svga: update dumping code with new GBS commands, etc
[mesa.git] / src / gallium / drivers / svga / svga_tgsi_insn.c
1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26
27 #include "pipe/p_shader_tokens.h"
28 #include "tgsi/tgsi_dump.h"
29 #include "tgsi/tgsi_parse.h"
30 #include "util/u_memory.h"
31 #include "util/u_math.h"
32
33 #include "svga_tgsi_emit.h"
34 #include "svga_context.h"
35
36
37 static boolean emit_vs_postamble( struct svga_shader_emitter *emit );
38 static boolean emit_ps_postamble( struct svga_shader_emitter *emit );
39
40
41 static unsigned
42 translate_opcode(uint opcode)
43 {
44 switch (opcode) {
45 case TGSI_OPCODE_ABS: return SVGA3DOP_ABS;
46 case TGSI_OPCODE_ADD: return SVGA3DOP_ADD;
47 case TGSI_OPCODE_DP2A: return SVGA3DOP_DP2ADD;
48 case TGSI_OPCODE_DP3: return SVGA3DOP_DP3;
49 case TGSI_OPCODE_DP4: return SVGA3DOP_DP4;
50 case TGSI_OPCODE_FRC: return SVGA3DOP_FRC;
51 case TGSI_OPCODE_MAD: return SVGA3DOP_MAD;
52 case TGSI_OPCODE_MAX: return SVGA3DOP_MAX;
53 case TGSI_OPCODE_MIN: return SVGA3DOP_MIN;
54 case TGSI_OPCODE_MOV: return SVGA3DOP_MOV;
55 case TGSI_OPCODE_MUL: return SVGA3DOP_MUL;
56 case TGSI_OPCODE_NOP: return SVGA3DOP_NOP;
57 case TGSI_OPCODE_NRM4: return SVGA3DOP_NRM;
58 default:
59 assert(!"svga: unexpected opcode in translate_opcode()");
60 return SVGA3DOP_LAST_INST;
61 }
62 }
63
64
65 static unsigned
66 translate_file(unsigned file)
67 {
68 switch (file) {
69 case TGSI_FILE_TEMPORARY: return SVGA3DREG_TEMP;
70 case TGSI_FILE_INPUT: return SVGA3DREG_INPUT;
71 case TGSI_FILE_OUTPUT: return SVGA3DREG_OUTPUT; /* VS3.0+ only */
72 case TGSI_FILE_IMMEDIATE: return SVGA3DREG_CONST;
73 case TGSI_FILE_CONSTANT: return SVGA3DREG_CONST;
74 case TGSI_FILE_SAMPLER: return SVGA3DREG_SAMPLER;
75 case TGSI_FILE_ADDRESS: return SVGA3DREG_ADDR;
76 default:
77 assert(!"svga: unexpected register file in translate_file()");
78 return SVGA3DREG_TEMP;
79 }
80 }
81
82
83 /**
84 * Translate a TGSI destination register to an SVGA3DShaderDestToken.
85 * \param insn the TGSI instruction
86 * \param idx which TGSI dest register to translate (usually (always?) zero)
87 */
88 static SVGA3dShaderDestToken
89 translate_dst_register( struct svga_shader_emitter *emit,
90 const struct tgsi_full_instruction *insn,
91 unsigned idx )
92 {
93 const struct tgsi_full_dst_register *reg = &insn->Dst[idx];
94 SVGA3dShaderDestToken dest;
95
96 switch (reg->Register.File) {
97 case TGSI_FILE_OUTPUT:
98 /* Output registers encode semantic information in their name.
99 * Need to lookup a table built at decl time:
100 */
101 dest = emit->output_map[reg->Register.Index];
102 break;
103
104 default:
105 {
106 unsigned index = reg->Register.Index;
107 assert(index < SVGA3D_TEMPREG_MAX);
108 index = MIN2(index, SVGA3D_TEMPREG_MAX - 1);
109 dest = dst_register(translate_file(reg->Register.File), index);
110 }
111 break;
112 }
113
114 if (reg->Register.Indirect) {
115 debug_warning("Indirect indexing of dest registers is not supported!\n");
116 }
117
118 dest.mask = reg->Register.WriteMask;
119 assert(dest.mask);
120
121 if (insn->Instruction.Saturate)
122 dest.dstMod = SVGA3DDSTMOD_SATURATE;
123
124 return dest;
125 }
126
127
128 /**
129 * Apply a swizzle to a src_register, returning a new src_register
130 * Ex: swizzle(SRC.ZZYY, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_X, SWIZZLE_Y)
131 * would return SRC.YYZZ
132 */
133 static struct src_register
134 swizzle(struct src_register src,
135 unsigned x, unsigned y, unsigned z, unsigned w)
136 {
137 assert(x < 4);
138 assert(y < 4);
139 assert(z < 4);
140 assert(w < 4);
141 x = (src.base.swizzle >> (x * 2)) & 0x3;
142 y = (src.base.swizzle >> (y * 2)) & 0x3;
143 z = (src.base.swizzle >> (z * 2)) & 0x3;
144 w = (src.base.swizzle >> (w * 2)) & 0x3;
145
146 src.base.swizzle = TRANSLATE_SWIZZLE(x, y, z, w);
147
148 return src;
149 }
150
151
152 /**
153 * Apply a "scalar" swizzle to a src_register returning a new
154 * src_register where all the swizzle terms are the same.
155 * Ex: scalar(SRC.WZYX, SWIZZLE_Y) would return SRC.ZZZZ
156 */
157 static struct src_register
158 scalar(struct src_register src, unsigned comp)
159 {
160 assert(comp < 4);
161 return swizzle( src, comp, comp, comp, comp );
162 }
163
164
165 static boolean
166 svga_arl_needs_adjustment( const struct svga_shader_emitter *emit )
167 {
168 int i;
169
170 for (i = 0; i < emit->num_arl_consts; ++i) {
171 if (emit->arl_consts[i].arl_num == emit->current_arl)
172 return TRUE;
173 }
174 return FALSE;
175 }
176
177
178 static int
179 svga_arl_adjustment( const struct svga_shader_emitter *emit )
180 {
181 int i;
182
183 for (i = 0; i < emit->num_arl_consts; ++i) {
184 if (emit->arl_consts[i].arl_num == emit->current_arl)
185 return emit->arl_consts[i].number;
186 }
187 return 0;
188 }
189
190
191 /**
192 * Translate a TGSI src register to a src_register.
193 */
194 static struct src_register
195 translate_src_register( const struct svga_shader_emitter *emit,
196 const struct tgsi_full_src_register *reg )
197 {
198 struct src_register src;
199
200 switch (reg->Register.File) {
201 case TGSI_FILE_INPUT:
202 /* Input registers are referred to by their semantic name rather
203 * than by index. Use the mapping build up from the decls:
204 */
205 src = emit->input_map[reg->Register.Index];
206 break;
207
208 case TGSI_FILE_IMMEDIATE:
209 /* Immediates are appended after TGSI constants in the D3D
210 * constant buffer.
211 */
212 src = src_register( translate_file( reg->Register.File ),
213 reg->Register.Index + emit->imm_start );
214 break;
215
216 default:
217 src = src_register( translate_file( reg->Register.File ),
218 reg->Register.Index );
219 break;
220 }
221
222 /* Indirect addressing.
223 */
224 if (reg->Register.Indirect) {
225 if (emit->unit == PIPE_SHADER_FRAGMENT) {
226 /* Pixel shaders have only loop registers for relative
227 * addressing into inputs. Ignore the redundant address
228 * register, the contents of aL should be in sync with it.
229 */
230 if (reg->Register.File == TGSI_FILE_INPUT) {
231 src.base.relAddr = 1;
232 src.indirect = src_token(SVGA3DREG_LOOP, 0);
233 }
234 }
235 else {
236 /* Constant buffers only.
237 */
238 if (reg->Register.File == TGSI_FILE_CONSTANT) {
239 /* we shift the offset towards the minimum */
240 if (svga_arl_needs_adjustment( emit )) {
241 src.base.num -= svga_arl_adjustment( emit );
242 }
243 src.base.relAddr = 1;
244
245 /* Not really sure what should go in the second token:
246 */
247 src.indirect = src_token( SVGA3DREG_ADDR,
248 reg->Indirect.Index );
249
250 src.indirect.swizzle = SWIZZLE_XXXX;
251 }
252 }
253 }
254
255 src = swizzle( src,
256 reg->Register.SwizzleX,
257 reg->Register.SwizzleY,
258 reg->Register.SwizzleZ,
259 reg->Register.SwizzleW );
260
261 /* src.mod isn't a bitfield, unfortunately:
262 * See tgsi_util_get_full_src_register_sign_mode for implementation details.
263 */
264 if (reg->Register.Absolute) {
265 if (reg->Register.Negate)
266 src.base.srcMod = SVGA3DSRCMOD_ABSNEG;
267 else
268 src.base.srcMod = SVGA3DSRCMOD_ABS;
269 }
270 else {
271 if (reg->Register.Negate)
272 src.base.srcMod = SVGA3DSRCMOD_NEG;
273 else
274 src.base.srcMod = SVGA3DSRCMOD_NONE;
275 }
276
277 return src;
278 }
279
280
281 /*
282 * Get a temporary register.
283 * Note: if we exceed the temporary register limit we just use
284 * register SVGA3D_TEMPREG_MAX - 1.
285 */
286 static SVGA3dShaderDestToken
287 get_temp( struct svga_shader_emitter *emit )
288 {
289 int i = emit->nr_hw_temp + emit->internal_temp_count++;
290 if (i >= SVGA3D_TEMPREG_MAX) {
291 debug_warn_once("svga: Too many temporary registers used in shader\n");
292 i = SVGA3D_TEMPREG_MAX - 1;
293 }
294 return dst_register( SVGA3DREG_TEMP, i );
295 }
296
297
298 /**
299 * Release a single temp. Currently only effective if it was the last
300 * allocated temp, otherwise release will be delayed until the next
301 * call to reset_temp_regs().
302 */
303 static void
304 release_temp( struct svga_shader_emitter *emit,
305 SVGA3dShaderDestToken temp )
306 {
307 if (temp.num == emit->internal_temp_count - 1)
308 emit->internal_temp_count--;
309 }
310
311
312 /**
313 * Release all temps.
314 */
315 static void
316 reset_temp_regs(struct svga_shader_emitter *emit)
317 {
318 emit->internal_temp_count = 0;
319 }
320
321
322 /** Emit bytecode for a src_register */
323 static boolean
324 emit_src(struct svga_shader_emitter *emit, const struct src_register src)
325 {
326 if (src.base.relAddr) {
327 assert(src.base.reserved0);
328 assert(src.indirect.reserved0);
329 return (svga_shader_emit_dword( emit, src.base.value ) &&
330 svga_shader_emit_dword( emit, src.indirect.value ));
331 }
332 else {
333 assert(src.base.reserved0);
334 return svga_shader_emit_dword( emit, src.base.value );
335 }
336 }
337
338
339 /** Emit bytecode for a dst_register */
340 static boolean
341 emit_dst(struct svga_shader_emitter *emit, SVGA3dShaderDestToken dest)
342 {
343 assert(dest.reserved0);
344 assert(dest.mask);
345 return svga_shader_emit_dword( emit, dest.value );
346 }
347
348
349 /** Emit bytecode for a 1-operand instruction */
350 static boolean
351 emit_op1(struct svga_shader_emitter *emit,
352 SVGA3dShaderInstToken inst,
353 SVGA3dShaderDestToken dest,
354 struct src_register src0)
355 {
356 return (emit_instruction(emit, inst) &&
357 emit_dst(emit, dest) &&
358 emit_src(emit, src0));
359 }
360
361
362 /** Emit bytecode for a 2-operand instruction */
363 static boolean
364 emit_op2(struct svga_shader_emitter *emit,
365 SVGA3dShaderInstToken inst,
366 SVGA3dShaderDestToken dest,
367 struct src_register src0,
368 struct src_register src1)
369 {
370 return (emit_instruction(emit, inst) &&
371 emit_dst(emit, dest) &&
372 emit_src(emit, src0) &&
373 emit_src(emit, src1));
374 }
375
376
377 /** Emit bytecode for a 3-operand instruction */
378 static boolean
379 emit_op3(struct svga_shader_emitter *emit,
380 SVGA3dShaderInstToken inst,
381 SVGA3dShaderDestToken dest,
382 struct src_register src0,
383 struct src_register src1,
384 struct src_register src2)
385 {
386 return (emit_instruction(emit, inst) &&
387 emit_dst(emit, dest) &&
388 emit_src(emit, src0) &&
389 emit_src(emit, src1) &&
390 emit_src(emit, src2));
391 }
392
393
394 /** Emit bytecode for a 4-operand instruction */
395 static boolean
396 emit_op4(struct svga_shader_emitter *emit,
397 SVGA3dShaderInstToken inst,
398 SVGA3dShaderDestToken dest,
399 struct src_register src0,
400 struct src_register src1,
401 struct src_register src2,
402 struct src_register src3)
403 {
404 return (emit_instruction(emit, inst) &&
405 emit_dst(emit, dest) &&
406 emit_src(emit, src0) &&
407 emit_src(emit, src1) &&
408 emit_src(emit, src2) &&
409 emit_src(emit, src3));
410 }
411
412
413 /**
414 * Apply the absolute value modifier to the given src_register, returning
415 * a new src_register.
416 */
417 static struct src_register
418 absolute(struct src_register src)
419 {
420 src.base.srcMod = SVGA3DSRCMOD_ABS;
421 return src;
422 }
423
424
425 /**
426 * Apply the negation modifier to the given src_register, returning
427 * a new src_register.
428 */
429 static struct src_register
430 negate(struct src_register src)
431 {
432 switch (src.base.srcMod) {
433 case SVGA3DSRCMOD_ABS:
434 src.base.srcMod = SVGA3DSRCMOD_ABSNEG;
435 break;
436 case SVGA3DSRCMOD_ABSNEG:
437 src.base.srcMod = SVGA3DSRCMOD_ABS;
438 break;
439 case SVGA3DSRCMOD_NEG:
440 src.base.srcMod = SVGA3DSRCMOD_NONE;
441 break;
442 case SVGA3DSRCMOD_NONE:
443 src.base.srcMod = SVGA3DSRCMOD_NEG;
444 break;
445 }
446 return src;
447 }
448
449
450
451 /* Replace the src with the temporary specified in the dst, but copying
452 * only the necessary channels, and preserving the original swizzle (which is
453 * important given that several opcodes have constraints in the allowed
454 * swizzles).
455 */
456 static boolean
457 emit_repl(struct svga_shader_emitter *emit,
458 SVGA3dShaderDestToken dst,
459 struct src_register *src0)
460 {
461 unsigned src0_swizzle;
462 unsigned chan;
463
464 assert(SVGA3dShaderGetRegType(dst.value) == SVGA3DREG_TEMP);
465
466 src0_swizzle = src0->base.swizzle;
467
468 dst.mask = 0;
469 for (chan = 0; chan < 4; ++chan) {
470 unsigned swizzle = (src0_swizzle >> (chan *2)) & 0x3;
471 dst.mask |= 1 << swizzle;
472 }
473 assert(dst.mask);
474
475 src0->base.swizzle = SVGA3DSWIZZLE_NONE;
476
477 if (!emit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, *src0 ))
478 return FALSE;
479
480 *src0 = src( dst );
481 src0->base.swizzle = src0_swizzle;
482
483 return TRUE;
484 }
485
486
487 /**
488 * Submit/emit an instruction with zero operands.
489 */
490 static boolean
491 submit_op0(struct svga_shader_emitter *emit,
492 SVGA3dShaderInstToken inst,
493 SVGA3dShaderDestToken dest)
494 {
495 return (emit_instruction( emit, inst ) &&
496 emit_dst( emit, dest ));
497 }
498
499
500 /**
501 * Submit/emit an instruction with one operand.
502 */
503 static boolean
504 submit_op1(struct svga_shader_emitter *emit,
505 SVGA3dShaderInstToken inst,
506 SVGA3dShaderDestToken dest,
507 struct src_register src0)
508 {
509 return emit_op1( emit, inst, dest, src0 );
510 }
511
512
513 /**
514 * Submit/emit an instruction with two operands.
515 *
516 * SVGA shaders may not refer to >1 constant register in a single
517 * instruction. This function checks for that usage and inserts a
518 * move to temporary if detected.
519 *
520 * The same applies to input registers -- at most a single input
521 * register may be read by any instruction.
522 */
523 static boolean
524 submit_op2(struct svga_shader_emitter *emit,
525 SVGA3dShaderInstToken inst,
526 SVGA3dShaderDestToken dest,
527 struct src_register src0,
528 struct src_register src1)
529 {
530 SVGA3dShaderDestToken temp;
531 SVGA3dShaderRegType type0, type1;
532 boolean need_temp = FALSE;
533
534 temp.value = 0;
535 type0 = SVGA3dShaderGetRegType( src0.base.value );
536 type1 = SVGA3dShaderGetRegType( src1.base.value );
537
538 if (type0 == SVGA3DREG_CONST &&
539 type1 == SVGA3DREG_CONST &&
540 src0.base.num != src1.base.num)
541 need_temp = TRUE;
542
543 if (type0 == SVGA3DREG_INPUT &&
544 type1 == SVGA3DREG_INPUT &&
545 src0.base.num != src1.base.num)
546 need_temp = TRUE;
547
548 if (need_temp) {
549 temp = get_temp( emit );
550
551 if (!emit_repl( emit, temp, &src0 ))
552 return FALSE;
553 }
554
555 if (!emit_op2( emit, inst, dest, src0, src1 ))
556 return FALSE;
557
558 if (need_temp)
559 release_temp( emit, temp );
560
561 return TRUE;
562 }
563
564
565 /**
566 * Submit/emit an instruction with three operands.
567 *
568 * SVGA shaders may not refer to >1 constant register in a single
569 * instruction. This function checks for that usage and inserts a
570 * move to temporary if detected.
571 */
572 static boolean
573 submit_op3(struct svga_shader_emitter *emit,
574 SVGA3dShaderInstToken inst,
575 SVGA3dShaderDestToken dest,
576 struct src_register src0,
577 struct src_register src1,
578 struct src_register src2)
579 {
580 SVGA3dShaderDestToken temp0;
581 SVGA3dShaderDestToken temp1;
582 boolean need_temp0 = FALSE;
583 boolean need_temp1 = FALSE;
584 SVGA3dShaderRegType type0, type1, type2;
585
586 temp0.value = 0;
587 temp1.value = 0;
588 type0 = SVGA3dShaderGetRegType( src0.base.value );
589 type1 = SVGA3dShaderGetRegType( src1.base.value );
590 type2 = SVGA3dShaderGetRegType( src2.base.value );
591
592 if (inst.op != SVGA3DOP_SINCOS) {
593 if (type0 == SVGA3DREG_CONST &&
594 ((type1 == SVGA3DREG_CONST && src0.base.num != src1.base.num) ||
595 (type2 == SVGA3DREG_CONST && src0.base.num != src2.base.num)))
596 need_temp0 = TRUE;
597
598 if (type1 == SVGA3DREG_CONST &&
599 (type2 == SVGA3DREG_CONST && src1.base.num != src2.base.num))
600 need_temp1 = TRUE;
601 }
602
603 if (type0 == SVGA3DREG_INPUT &&
604 ((type1 == SVGA3DREG_INPUT && src0.base.num != src1.base.num) ||
605 (type2 == SVGA3DREG_INPUT && src0.base.num != src2.base.num)))
606 need_temp0 = TRUE;
607
608 if (type1 == SVGA3DREG_INPUT &&
609 (type2 == SVGA3DREG_INPUT && src1.base.num != src2.base.num))
610 need_temp1 = TRUE;
611
612 if (need_temp0) {
613 temp0 = get_temp( emit );
614
615 if (!emit_repl( emit, temp0, &src0 ))
616 return FALSE;
617 }
618
619 if (need_temp1) {
620 temp1 = get_temp( emit );
621
622 if (!emit_repl( emit, temp1, &src1 ))
623 return FALSE;
624 }
625
626 if (!emit_op3( emit, inst, dest, src0, src1, src2 ))
627 return FALSE;
628
629 if (need_temp1)
630 release_temp( emit, temp1 );
631 if (need_temp0)
632 release_temp( emit, temp0 );
633 return TRUE;
634 }
635
636
637 /**
638 * Submit/emit an instruction with four operands.
639 *
640 * SVGA shaders may not refer to >1 constant register in a single
641 * instruction. This function checks for that usage and inserts a
642 * move to temporary if detected.
643 */
644 static boolean
645 submit_op4(struct svga_shader_emitter *emit,
646 SVGA3dShaderInstToken inst,
647 SVGA3dShaderDestToken dest,
648 struct src_register src0,
649 struct src_register src1,
650 struct src_register src2,
651 struct src_register src3)
652 {
653 SVGA3dShaderDestToken temp0;
654 SVGA3dShaderDestToken temp3;
655 boolean need_temp0 = FALSE;
656 boolean need_temp3 = FALSE;
657 SVGA3dShaderRegType type0, type1, type2, type3;
658
659 temp0.value = 0;
660 temp3.value = 0;
661 type0 = SVGA3dShaderGetRegType( src0.base.value );
662 type1 = SVGA3dShaderGetRegType( src1.base.value );
663 type2 = SVGA3dShaderGetRegType( src2.base.value );
664 type3 = SVGA3dShaderGetRegType( src2.base.value );
665
666 /* Make life a little easier - this is only used by the TXD
667 * instruction which is guaranteed not to have a constant/input reg
668 * in one slot at least:
669 */
670 assert(type1 == SVGA3DREG_SAMPLER);
671
672 if (type0 == SVGA3DREG_CONST &&
673 ((type3 == SVGA3DREG_CONST && src0.base.num != src3.base.num) ||
674 (type2 == SVGA3DREG_CONST && src0.base.num != src2.base.num)))
675 need_temp0 = TRUE;
676
677 if (type3 == SVGA3DREG_CONST &&
678 (type2 == SVGA3DREG_CONST && src3.base.num != src2.base.num))
679 need_temp3 = TRUE;
680
681 if (type0 == SVGA3DREG_INPUT &&
682 ((type3 == SVGA3DREG_INPUT && src0.base.num != src3.base.num) ||
683 (type2 == SVGA3DREG_INPUT && src0.base.num != src2.base.num)))
684 need_temp0 = TRUE;
685
686 if (type3 == SVGA3DREG_INPUT &&
687 (type2 == SVGA3DREG_INPUT && src3.base.num != src2.base.num))
688 need_temp3 = TRUE;
689
690 if (need_temp0) {
691 temp0 = get_temp( emit );
692
693 if (!emit_repl( emit, temp0, &src0 ))
694 return FALSE;
695 }
696
697 if (need_temp3) {
698 temp3 = get_temp( emit );
699
700 if (!emit_repl( emit, temp3, &src3 ))
701 return FALSE;
702 }
703
704 if (!emit_op4( emit, inst, dest, src0, src1, src2, src3 ))
705 return FALSE;
706
707 if (need_temp3)
708 release_temp( emit, temp3 );
709 if (need_temp0)
710 release_temp( emit, temp0 );
711 return TRUE;
712 }
713
714
715 /**
716 * Do the src and dest registers refer to the same register?
717 */
718 static boolean
719 alias_src_dst(struct src_register src,
720 SVGA3dShaderDestToken dst)
721 {
722 if (src.base.num != dst.num)
723 return FALSE;
724
725 if (SVGA3dShaderGetRegType(dst.value) !=
726 SVGA3dShaderGetRegType(src.base.value))
727 return FALSE;
728
729 return TRUE;
730 }
731
732
733 /**
734 * Helper for emitting SVGA immediate values using the SVGA3DOP_DEF[I]
735 * instructions.
736 */
737 static boolean
738 emit_def_const(struct svga_shader_emitter *emit,
739 SVGA3dShaderConstType type,
740 unsigned idx, float a, float b, float c, float d)
741 {
742 SVGA3DOpDefArgs def;
743 SVGA3dShaderInstToken opcode;
744
745 switch (type) {
746 case SVGA3D_CONST_TYPE_FLOAT:
747 opcode = inst_token( SVGA3DOP_DEF );
748 def.dst = dst_register( SVGA3DREG_CONST, idx );
749 def.constValues[0] = a;
750 def.constValues[1] = b;
751 def.constValues[2] = c;
752 def.constValues[3] = d;
753 break;
754 case SVGA3D_CONST_TYPE_INT:
755 opcode = inst_token( SVGA3DOP_DEFI );
756 def.dst = dst_register( SVGA3DREG_CONSTINT, idx );
757 def.constIValues[0] = (int)a;
758 def.constIValues[1] = (int)b;
759 def.constIValues[2] = (int)c;
760 def.constIValues[3] = (int)d;
761 break;
762 default:
763 assert(0);
764 opcode = inst_token( SVGA3DOP_NOP );
765 break;
766 }
767
768 if (!emit_instruction(emit, opcode) ||
769 !svga_shader_emit_dwords( emit, def.values, Elements(def.values)))
770 return FALSE;
771
772 return TRUE;
773 }
774
775
776 static boolean
777 create_loop_const( struct svga_shader_emitter *emit )
778 {
779 unsigned idx = emit->nr_hw_int_const++;
780
781 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_INT, idx,
782 255, /* iteration count */
783 0, /* initial value */
784 1, /* step size */
785 0 /* not used, must be 0 */))
786 return FALSE;
787
788 emit->loop_const_idx = idx;
789 emit->created_loop_const = TRUE;
790
791 return TRUE;
792 }
793
794 static boolean
795 create_arl_consts( struct svga_shader_emitter *emit )
796 {
797 int i;
798
799 for (i = 0; i < emit->num_arl_consts; i += 4) {
800 int j;
801 unsigned idx = emit->nr_hw_float_const++;
802 float vals[4];
803 for (j = 0; j < 4 && (j + i) < emit->num_arl_consts; ++j) {
804 vals[j] = (float) emit->arl_consts[i + j].number;
805 emit->arl_consts[i + j].idx = idx;
806 switch (j) {
807 case 0:
808 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_X;
809 break;
810 case 1:
811 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_Y;
812 break;
813 case 2:
814 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_Z;
815 break;
816 case 3:
817 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_W;
818 break;
819 }
820 }
821 while (j < 4)
822 vals[j++] = 0;
823
824 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT, idx,
825 vals[0], vals[1],
826 vals[2], vals[3]))
827 return FALSE;
828 }
829
830 return TRUE;
831 }
832
833
834 /**
835 * Return the register which holds the pixel shaders front/back-
836 * facing value.
837 */
838 static struct src_register
839 get_vface( struct svga_shader_emitter *emit )
840 {
841 assert(emit->emitted_vface);
842 return src_register(SVGA3DREG_MISCTYPE, SVGA3DMISCREG_FACE);
843 }
844
845
846 /**
847 * Create/emit a "common" constant with values {0, 0.5, -1, 1}.
848 * We can swizzle this to produce other useful constants such as
849 * {0, 0, 0, 0}, {1, 1, 1, 1}, etc.
850 */
851 static boolean
852 create_common_immediate( struct svga_shader_emitter *emit )
853 {
854 unsigned idx = emit->nr_hw_float_const++;
855
856 /* Emit the constant (0, 0.5, -1, 1) and use swizzling to generate
857 * other useful vectors.
858 */
859 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT,
860 idx, 0.0f, 0.5f, -1.0f, 1.0f ))
861 return FALSE;
862
863 emit->common_immediate_idx = idx;
864 emit->created_common_immediate = TRUE;
865
866 return TRUE;
867 }
868
869
870 /**
871 * Return swizzle/position for the given value in the "common" immediate.
872 */
873 static inline unsigned
874 common_immediate_swizzle(float value)
875 {
876 if (value == 0.0f)
877 return TGSI_SWIZZLE_X;
878 else if (value == 0.5f)
879 return TGSI_SWIZZLE_Y;
880 else if (value == -1.0f)
881 return TGSI_SWIZZLE_Z;
882 else if (value == 1.0f)
883 return TGSI_SWIZZLE_W;
884 else {
885 assert(!"illegal value in common_immediate_swizzle");
886 return TGSI_SWIZZLE_X;
887 }
888 }
889
890
891 /**
892 * Returns an immediate reg where all the terms are either 0, 1, -1 or 0.5
893 */
894 static struct src_register
895 get_immediate(struct svga_shader_emitter *emit,
896 float x, float y, float z, float w)
897 {
898 unsigned sx = common_immediate_swizzle(x);
899 unsigned sy = common_immediate_swizzle(y);
900 unsigned sz = common_immediate_swizzle(z);
901 unsigned sw = common_immediate_swizzle(w);
902 assert(emit->created_common_immediate);
903 assert(emit->common_immediate_idx >= 0);
904 return swizzle(src_register(SVGA3DREG_CONST, emit->common_immediate_idx),
905 sx, sy, sz, sw);
906 }
907
908
909 /**
910 * returns {0, 0, 0, 0} immediate
911 */
912 static struct src_register
913 get_zero_immediate( struct svga_shader_emitter *emit )
914 {
915 assert(emit->created_common_immediate);
916 assert(emit->common_immediate_idx >= 0);
917 return swizzle(src_register( SVGA3DREG_CONST,
918 emit->common_immediate_idx),
919 0, 0, 0, 0);
920 }
921
922
923 /**
924 * returns {1, 1, 1, 1} immediate
925 */
926 static struct src_register
927 get_one_immediate( struct svga_shader_emitter *emit )
928 {
929 assert(emit->created_common_immediate);
930 assert(emit->common_immediate_idx >= 0);
931 return swizzle(src_register( SVGA3DREG_CONST,
932 emit->common_immediate_idx),
933 3, 3, 3, 3);
934 }
935
936
937 /**
938 * returns {0.5, 0.5, 0.5, 0.5} immediate
939 */
940 static struct src_register
941 get_half_immediate( struct svga_shader_emitter *emit )
942 {
943 assert(emit->created_common_immediate);
944 assert(emit->common_immediate_idx >= 0);
945 return swizzle(src_register(SVGA3DREG_CONST, emit->common_immediate_idx),
946 1, 1, 1, 1);
947 }
948
949
950 /**
951 * returns the loop const
952 */
953 static struct src_register
954 get_loop_const( struct svga_shader_emitter *emit )
955 {
956 assert(emit->created_loop_const);
957 assert(emit->loop_const_idx >= 0);
958 return src_register( SVGA3DREG_CONSTINT,
959 emit->loop_const_idx );
960 }
961
962
963 static struct src_register
964 get_fake_arl_const( struct svga_shader_emitter *emit )
965 {
966 struct src_register reg;
967 int idx = 0, swizzle = 0, i;
968
969 for (i = 0; i < emit->num_arl_consts; ++ i) {
970 if (emit->arl_consts[i].arl_num == emit->current_arl) {
971 idx = emit->arl_consts[i].idx;
972 swizzle = emit->arl_consts[i].swizzle;
973 }
974 }
975
976 reg = src_register( SVGA3DREG_CONST, idx );
977 return scalar(reg, swizzle);
978 }
979
980
981 /**
982 * Return a register which holds the width and height of the texture
983 * currently bound to the given sampler.
984 */
985 static struct src_register
986 get_tex_dimensions( struct svga_shader_emitter *emit, int sampler_num )
987 {
988 int idx;
989 struct src_register reg;
990
991 /* the width/height indexes start right after constants */
992 idx = emit->key.fkey.tex[sampler_num].width_height_idx +
993 emit->info.file_max[TGSI_FILE_CONSTANT] + 1;
994
995 reg = src_register( SVGA3DREG_CONST, idx );
996 return reg;
997 }
998
999
1000 static boolean
1001 emit_fake_arl(struct svga_shader_emitter *emit,
1002 const struct tgsi_full_instruction *insn)
1003 {
1004 const struct src_register src0 =
1005 translate_src_register(emit, &insn->Src[0] );
1006 struct src_register src1 = get_fake_arl_const( emit );
1007 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1008 SVGA3dShaderDestToken tmp = get_temp( emit );
1009
1010 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), tmp, src0))
1011 return FALSE;
1012
1013 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), tmp, src( tmp ),
1014 src1))
1015 return FALSE;
1016
1017 /* replicate the original swizzle */
1018 src1 = src(tmp);
1019 src1.base.swizzle = src0.base.swizzle;
1020
1021 return submit_op1( emit, inst_token( SVGA3DOP_MOVA ),
1022 dst, src1 );
1023 }
1024
1025
1026 static boolean
1027 emit_if(struct svga_shader_emitter *emit,
1028 const struct tgsi_full_instruction *insn)
1029 {
1030 struct src_register src0 =
1031 translate_src_register(emit, &insn->Src[0]);
1032 struct src_register zero = get_zero_immediate(emit);
1033 SVGA3dShaderInstToken if_token = inst_token( SVGA3DOP_IFC );
1034
1035 if_token.control = SVGA3DOPCOMPC_NE;
1036
1037 if (SVGA3dShaderGetRegType(src0.base.value) == SVGA3DREG_CONST) {
1038 /*
1039 * Max different constant registers readable per IFC instruction is 1.
1040 */
1041 SVGA3dShaderDestToken tmp = get_temp( emit );
1042
1043 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), tmp, src0))
1044 return FALSE;
1045
1046 src0 = scalar(src( tmp ), TGSI_SWIZZLE_X);
1047 }
1048
1049 emit->dynamic_branching_level++;
1050
1051 return (emit_instruction( emit, if_token ) &&
1052 emit_src( emit, src0 ) &&
1053 emit_src( emit, zero ) );
1054 }
1055
1056
1057 static boolean
1058 emit_else(struct svga_shader_emitter *emit,
1059 const struct tgsi_full_instruction *insn)
1060 {
1061 return emit_instruction(emit, inst_token(SVGA3DOP_ELSE));
1062 }
1063
1064
1065 static boolean
1066 emit_endif(struct svga_shader_emitter *emit,
1067 const struct tgsi_full_instruction *insn)
1068 {
1069 emit->dynamic_branching_level--;
1070
1071 return emit_instruction(emit, inst_token(SVGA3DOP_ENDIF));
1072 }
1073
1074
1075 /**
1076 * Translate the following TGSI FLR instruction.
1077 * FLR DST, SRC
1078 * To the following SVGA3D instruction sequence.
1079 * FRC TMP, SRC
1080 * SUB DST, SRC, TMP
1081 */
1082 static boolean
1083 emit_floor(struct svga_shader_emitter *emit,
1084 const struct tgsi_full_instruction *insn )
1085 {
1086 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1087 const struct src_register src0 =
1088 translate_src_register(emit, &insn->Src[0] );
1089 SVGA3dShaderDestToken temp = get_temp( emit );
1090
1091 /* FRC TMP, SRC */
1092 if (!submit_op1( emit, inst_token( SVGA3DOP_FRC ), temp, src0 ))
1093 return FALSE;
1094
1095 /* SUB DST, SRC, TMP */
1096 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst, src0,
1097 negate( src( temp ) ) ))
1098 return FALSE;
1099
1100 return TRUE;
1101 }
1102
1103
1104 /**
1105 * Translate the following TGSI CEIL instruction.
1106 * CEIL DST, SRC
1107 * To the following SVGA3D instruction sequence.
1108 * FRC TMP, -SRC
1109 * ADD DST, SRC, TMP
1110 */
1111 static boolean
1112 emit_ceil(struct svga_shader_emitter *emit,
1113 const struct tgsi_full_instruction *insn)
1114 {
1115 SVGA3dShaderDestToken dst = translate_dst_register(emit, insn, 0);
1116 const struct src_register src0 =
1117 translate_src_register(emit, &insn->Src[0]);
1118 SVGA3dShaderDestToken temp = get_temp(emit);
1119
1120 /* FRC TMP, -SRC */
1121 if (!submit_op1(emit, inst_token(SVGA3DOP_FRC), temp, negate(src0)))
1122 return FALSE;
1123
1124 /* ADD DST, SRC, TMP */
1125 if (!submit_op2(emit, inst_token(SVGA3DOP_ADD), dst, src0, src(temp)))
1126 return FALSE;
1127
1128 return TRUE;
1129 }
1130
1131
1132 /**
1133 * Translate the following TGSI DIV instruction.
1134 * DIV DST.xy, SRC0, SRC1
1135 * To the following SVGA3D instruction sequence.
1136 * RCP TMP.x, SRC1.xxxx
1137 * RCP TMP.y, SRC1.yyyy
1138 * MUL DST.xy, SRC0, TMP
1139 */
1140 static boolean
1141 emit_div(struct svga_shader_emitter *emit,
1142 const struct tgsi_full_instruction *insn )
1143 {
1144 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1145 const struct src_register src0 =
1146 translate_src_register(emit, &insn->Src[0] );
1147 const struct src_register src1 =
1148 translate_src_register(emit, &insn->Src[1] );
1149 SVGA3dShaderDestToken temp = get_temp( emit );
1150 int i;
1151
1152 /* For each enabled element, perform a RCP instruction. Note that
1153 * RCP is scalar in SVGA3D:
1154 */
1155 for (i = 0; i < 4; i++) {
1156 unsigned channel = 1 << i;
1157 if (dst.mask & channel) {
1158 /* RCP TMP.?, SRC1.???? */
1159 if (!submit_op1( emit, inst_token( SVGA3DOP_RCP ),
1160 writemask(temp, channel),
1161 scalar(src1, i) ))
1162 return FALSE;
1163 }
1164 }
1165
1166 /* Vector mul:
1167 * MUL DST, SRC0, TMP
1168 */
1169 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ), dst, src0,
1170 src( temp ) ))
1171 return FALSE;
1172
1173 return TRUE;
1174 }
1175
1176
1177 /**
1178 * Translate the following TGSI DP2 instruction.
1179 * DP2 DST, SRC1, SRC2
1180 * To the following SVGA3D instruction sequence.
1181 * MUL TMP, SRC1, SRC2
1182 * ADD DST, TMP.xxxx, TMP.yyyy
1183 */
1184 static boolean
1185 emit_dp2(struct svga_shader_emitter *emit,
1186 const struct tgsi_full_instruction *insn )
1187 {
1188 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1189 const struct src_register src0 =
1190 translate_src_register(emit, &insn->Src[0]);
1191 const struct src_register src1 =
1192 translate_src_register(emit, &insn->Src[1]);
1193 SVGA3dShaderDestToken temp = get_temp( emit );
1194 struct src_register temp_src0, temp_src1;
1195
1196 /* MUL TMP, SRC1, SRC2 */
1197 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ), temp, src0, src1 ))
1198 return FALSE;
1199
1200 temp_src0 = scalar(src( temp ), TGSI_SWIZZLE_X);
1201 temp_src1 = scalar(src( temp ), TGSI_SWIZZLE_Y);
1202
1203 /* ADD DST, TMP.xxxx, TMP.yyyy */
1204 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst,
1205 temp_src0, temp_src1 ))
1206 return FALSE;
1207
1208 return TRUE;
1209 }
1210
1211
1212 /**
1213 * Translate the following TGSI DPH instruction.
1214 * DPH DST, SRC1, SRC2
1215 * To the following SVGA3D instruction sequence.
1216 * DP3 TMP, SRC1, SRC2
1217 * ADD DST, TMP, SRC2.wwww
1218 */
1219 static boolean
1220 emit_dph(struct svga_shader_emitter *emit,
1221 const struct tgsi_full_instruction *insn )
1222 {
1223 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1224 const struct src_register src0 = translate_src_register(
1225 emit, &insn->Src[0] );
1226 struct src_register src1 =
1227 translate_src_register(emit, &insn->Src[1]);
1228 SVGA3dShaderDestToken temp = get_temp( emit );
1229
1230 /* DP3 TMP, SRC1, SRC2 */
1231 if (!submit_op2( emit, inst_token( SVGA3DOP_DP3 ), temp, src0, src1 ))
1232 return FALSE;
1233
1234 src1 = scalar(src1, TGSI_SWIZZLE_W);
1235
1236 /* ADD DST, TMP, SRC2.wwww */
1237 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst,
1238 src( temp ), src1 ))
1239 return FALSE;
1240
1241 return TRUE;
1242 }
1243
1244
1245 /**
1246 * Translate the following TGSI DST instruction.
1247 * NRM DST, SRC
1248 * To the following SVGA3D instruction sequence.
1249 * DP3 TMP, SRC, SRC
1250 * RSQ TMP, TMP
1251 * MUL DST, SRC, TMP
1252 */
1253 static boolean
1254 emit_nrm(struct svga_shader_emitter *emit,
1255 const struct tgsi_full_instruction *insn)
1256 {
1257 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1258 const struct src_register src0 =
1259 translate_src_register(emit, &insn->Src[0]);
1260 SVGA3dShaderDestToken temp = get_temp( emit );
1261
1262 /* DP3 TMP, SRC, SRC */
1263 if (!submit_op2( emit, inst_token( SVGA3DOP_DP3 ), temp, src0, src0 ))
1264 return FALSE;
1265
1266 /* RSQ TMP, TMP */
1267 if (!submit_op1( emit, inst_token( SVGA3DOP_RSQ ), temp, src( temp )))
1268 return FALSE;
1269
1270 /* MUL DST, SRC, TMP */
1271 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ), dst,
1272 src0, src( temp )))
1273 return FALSE;
1274
1275 return TRUE;
1276 }
1277
1278
1279 /**
1280 * Sine / Cosine helper function.
1281 */
1282 static boolean
1283 do_emit_sincos(struct svga_shader_emitter *emit,
1284 SVGA3dShaderDestToken dst,
1285 struct src_register src0)
1286 {
1287 src0 = scalar(src0, TGSI_SWIZZLE_X);
1288 return submit_op1(emit, inst_token(SVGA3DOP_SINCOS), dst, src0);
1289 }
1290
1291
1292 /**
1293 * Translate/emit a TGSI SIN, COS or CSC instruction.
1294 */
1295 static boolean
1296 emit_sincos(struct svga_shader_emitter *emit,
1297 const struct tgsi_full_instruction *insn)
1298 {
1299 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1300 struct src_register src0 = translate_src_register(emit, &insn->Src[0]);
1301 SVGA3dShaderDestToken temp = get_temp( emit );
1302
1303 /* SCS TMP SRC */
1304 if (!do_emit_sincos(emit, writemask(temp, TGSI_WRITEMASK_XY), src0 ))
1305 return FALSE;
1306
1307 /* MOV DST TMP */
1308 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src( temp ) ))
1309 return FALSE;
1310
1311 return TRUE;
1312 }
1313
1314
1315 /**
1316 * Translate TGSI SIN instruction into:
1317 * SCS TMP SRC
1318 * MOV DST TMP.yyyy
1319 */
1320 static boolean
1321 emit_sin(struct svga_shader_emitter *emit,
1322 const struct tgsi_full_instruction *insn )
1323 {
1324 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1325 struct src_register src0 =
1326 translate_src_register(emit, &insn->Src[0] );
1327 SVGA3dShaderDestToken temp = get_temp( emit );
1328
1329 /* SCS TMP SRC */
1330 if (!do_emit_sincos(emit, writemask(temp, TGSI_WRITEMASK_Y), src0))
1331 return FALSE;
1332
1333 src0 = scalar(src( temp ), TGSI_SWIZZLE_Y);
1334
1335 /* MOV DST TMP.yyyy */
1336 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src0 ))
1337 return FALSE;
1338
1339 return TRUE;
1340 }
1341
1342
1343 /*
1344 * Translate TGSI COS instruction into:
1345 * SCS TMP SRC
1346 * MOV DST TMP.xxxx
1347 */
1348 static boolean
1349 emit_cos(struct svga_shader_emitter *emit,
1350 const struct tgsi_full_instruction *insn)
1351 {
1352 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1353 struct src_register src0 =
1354 translate_src_register(emit, &insn->Src[0] );
1355 SVGA3dShaderDestToken temp = get_temp( emit );
1356
1357 /* SCS TMP SRC */
1358 if (!do_emit_sincos( emit, writemask(temp, TGSI_WRITEMASK_X), src0 ))
1359 return FALSE;
1360
1361 src0 = scalar(src( temp ), TGSI_SWIZZLE_X);
1362
1363 /* MOV DST TMP.xxxx */
1364 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src0 ))
1365 return FALSE;
1366
1367 return TRUE;
1368 }
1369
1370
1371 /**
1372 * Translate/emit TGSI SSG (Set Sign: -1, 0, +1) instruction.
1373 */
1374 static boolean
1375 emit_ssg(struct svga_shader_emitter *emit,
1376 const struct tgsi_full_instruction *insn)
1377 {
1378 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1379 struct src_register src0 =
1380 translate_src_register(emit, &insn->Src[0] );
1381 SVGA3dShaderDestToken temp0 = get_temp( emit );
1382 SVGA3dShaderDestToken temp1 = get_temp( emit );
1383 struct src_register zero, one;
1384
1385 if (emit->unit == PIPE_SHADER_VERTEX) {
1386 /* SGN DST, SRC0, TMP0, TMP1 */
1387 return submit_op3( emit, inst_token( SVGA3DOP_SGN ), dst, src0,
1388 src( temp0 ), src( temp1 ) );
1389 }
1390
1391 one = get_one_immediate(emit);
1392 zero = get_zero_immediate(emit);
1393
1394 /* CMP TMP0, SRC0, one, zero */
1395 if (!submit_op3( emit, inst_token( SVGA3DOP_CMP ),
1396 writemask( temp0, dst.mask ), src0, one, zero ))
1397 return FALSE;
1398
1399 /* CMP TMP1, negate(SRC0), negate(one), zero */
1400 if (!submit_op3( emit, inst_token( SVGA3DOP_CMP ),
1401 writemask( temp1, dst.mask ), negate( src0 ), negate( one ),
1402 zero ))
1403 return FALSE;
1404
1405 /* ADD DST, TMP0, TMP1 */
1406 return submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst, src( temp0 ),
1407 src( temp1 ) );
1408 }
1409
1410
1411 /**
1412 * Translate/emit TGSI SUB instruction as:
1413 * ADD DST, SRC0, negate(SRC1)
1414 */
1415 static boolean
1416 emit_sub(struct svga_shader_emitter *emit,
1417 const struct tgsi_full_instruction *insn)
1418 {
1419 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1420 struct src_register src0 = translate_src_register(
1421 emit, &insn->Src[0] );
1422 struct src_register src1 = translate_src_register(
1423 emit, &insn->Src[1] );
1424
1425 src1 = negate(src1);
1426
1427 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst,
1428 src0, src1 ))
1429 return FALSE;
1430
1431 return TRUE;
1432 }
1433
1434
1435 /**
1436 * Translate/emit KILL_IF instruction (kill if any of X,Y,Z,W are negative).
1437 */
1438 static boolean
1439 emit_kill_if(struct svga_shader_emitter *emit,
1440 const struct tgsi_full_instruction *insn)
1441 {
1442 const struct tgsi_full_src_register *reg = &insn->Src[0];
1443 struct src_register src0, srcIn;
1444 const boolean special = (reg->Register.Absolute ||
1445 reg->Register.Negate ||
1446 reg->Register.Indirect ||
1447 reg->Register.SwizzleX != 0 ||
1448 reg->Register.SwizzleY != 1 ||
1449 reg->Register.SwizzleZ != 2 ||
1450 reg->Register.File != TGSI_FILE_TEMPORARY);
1451 SVGA3dShaderDestToken temp;
1452
1453 src0 = srcIn = translate_src_register( emit, reg );
1454
1455 if (special) {
1456 /* need a temp reg */
1457 temp = get_temp( emit );
1458 }
1459
1460 if (special) {
1461 /* move the source into a temp register */
1462 submit_op1(emit, inst_token(SVGA3DOP_MOV), temp, src0);
1463
1464 src0 = src( temp );
1465 }
1466
1467 /* Do the texkill by checking if any of the XYZW components are < 0.
1468 * Note that ps_2_0 and later take XYZW in consideration, while ps_1_x
1469 * only used XYZ. The MSDN documentation about this is incorrect.
1470 */
1471 if (!submit_op0( emit, inst_token( SVGA3DOP_TEXKILL ), dst(src0) ))
1472 return FALSE;
1473
1474 return TRUE;
1475 }
1476
1477
1478 /**
1479 * Translate/emit unconditional kill instruction (usually found inside
1480 * an IF/ELSE/ENDIF block).
1481 */
1482 static boolean
1483 emit_kill(struct svga_shader_emitter *emit,
1484 const struct tgsi_full_instruction *insn)
1485 {
1486 SVGA3dShaderDestToken temp;
1487 struct src_register one = get_one_immediate(emit);
1488 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_TEXKILL );
1489
1490 /* texkill doesn't allow negation on the operand so lets move
1491 * negation of {1} to a temp register */
1492 temp = get_temp( emit );
1493 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), temp,
1494 negate( one ) ))
1495 return FALSE;
1496
1497 return submit_op0( emit, inst, temp );
1498 }
1499
1500
1501 /**
1502 * Test if r1 and r2 are the same register.
1503 */
1504 static boolean
1505 same_register(struct src_register r1, struct src_register r2)
1506 {
1507 return (r1.base.num == r2.base.num &&
1508 r1.base.type_upper == r2.base.type_upper &&
1509 r1.base.type_lower == r2.base.type_lower);
1510 }
1511
1512
1513
1514 /**
1515 * Implement conditionals by initializing destination reg to 'fail',
1516 * then set predicate reg with UFOP_SETP, then move 'pass' to dest
1517 * based on predicate reg.
1518 *
1519 * SETP src0, cmp, src1 -- do this first to avoid aliasing problems.
1520 * MOV dst, fail
1521 * MOV dst, pass, p0
1522 */
1523 static boolean
1524 emit_conditional(struct svga_shader_emitter *emit,
1525 unsigned compare_func,
1526 SVGA3dShaderDestToken dst,
1527 struct src_register src0,
1528 struct src_register src1,
1529 struct src_register pass,
1530 struct src_register fail)
1531 {
1532 SVGA3dShaderDestToken pred_reg = dst_register( SVGA3DREG_PREDICATE, 0 );
1533 SVGA3dShaderInstToken setp_token, mov_token;
1534 setp_token = inst_token( SVGA3DOP_SETP );
1535
1536 switch (compare_func) {
1537 case PIPE_FUNC_NEVER:
1538 return submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1539 dst, fail );
1540 break;
1541 case PIPE_FUNC_LESS:
1542 setp_token.control = SVGA3DOPCOMP_LT;
1543 break;
1544 case PIPE_FUNC_EQUAL:
1545 setp_token.control = SVGA3DOPCOMP_EQ;
1546 break;
1547 case PIPE_FUNC_LEQUAL:
1548 setp_token.control = SVGA3DOPCOMP_LE;
1549 break;
1550 case PIPE_FUNC_GREATER:
1551 setp_token.control = SVGA3DOPCOMP_GT;
1552 break;
1553 case PIPE_FUNC_NOTEQUAL:
1554 setp_token.control = SVGA3DOPCOMPC_NE;
1555 break;
1556 case PIPE_FUNC_GEQUAL:
1557 setp_token.control = SVGA3DOPCOMP_GE;
1558 break;
1559 case PIPE_FUNC_ALWAYS:
1560 return submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1561 dst, pass );
1562 break;
1563 }
1564
1565 if (same_register(src(dst), pass)) {
1566 /* We'll get bad results if the dst and pass registers are the same
1567 * so use a temp register containing pass.
1568 */
1569 SVGA3dShaderDestToken temp = get_temp(emit);
1570 if (!submit_op1(emit, inst_token(SVGA3DOP_MOV), temp, pass))
1571 return FALSE;
1572 pass = src(temp);
1573 }
1574
1575 /* SETP src0, COMPOP, src1 */
1576 if (!submit_op2( emit, setp_token, pred_reg,
1577 src0, src1 ))
1578 return FALSE;
1579
1580 mov_token = inst_token( SVGA3DOP_MOV );
1581
1582 /* MOV dst, fail */
1583 if (!submit_op1( emit, mov_token, dst,
1584 fail ))
1585 return FALSE;
1586
1587 /* MOV dst, pass (predicated)
1588 *
1589 * Note that the predicate reg (and possible modifiers) is passed
1590 * as the first source argument.
1591 */
1592 mov_token.predicated = 1;
1593 if (!submit_op2( emit, mov_token, dst,
1594 src( pred_reg ), pass ))
1595 return FALSE;
1596
1597 return TRUE;
1598 }
1599
1600
1601 /**
1602 * Helper for emiting 'selection' commands. Basically:
1603 * if (src0 OP src1)
1604 * dst = 1.0;
1605 * else
1606 * dst = 0.0;
1607 */
1608 static boolean
1609 emit_select(struct svga_shader_emitter *emit,
1610 unsigned compare_func,
1611 SVGA3dShaderDestToken dst,
1612 struct src_register src0,
1613 struct src_register src1 )
1614 {
1615 /* There are some SVGA instructions which implement some selects
1616 * directly, but they are only available in the vertex shader.
1617 */
1618 if (emit->unit == PIPE_SHADER_VERTEX) {
1619 switch (compare_func) {
1620 case PIPE_FUNC_GEQUAL:
1621 return submit_op2( emit, inst_token( SVGA3DOP_SGE ), dst, src0, src1 );
1622 case PIPE_FUNC_LEQUAL:
1623 return submit_op2( emit, inst_token( SVGA3DOP_SGE ), dst, src1, src0 );
1624 case PIPE_FUNC_GREATER:
1625 return submit_op2( emit, inst_token( SVGA3DOP_SLT ), dst, src1, src0 );
1626 case PIPE_FUNC_LESS:
1627 return submit_op2( emit, inst_token( SVGA3DOP_SLT ), dst, src0, src1 );
1628 default:
1629 break;
1630 }
1631 }
1632
1633 /* Otherwise, need to use the setp approach:
1634 */
1635 {
1636 struct src_register one, zero;
1637 /* zero immediate is 0,0,0,1 */
1638 zero = get_zero_immediate(emit);
1639 one = get_one_immediate(emit);
1640
1641 return emit_conditional(emit, compare_func, dst, src0, src1, one, zero);
1642 }
1643 }
1644
1645
1646 /**
1647 * Translate/emit a TGSI SEQ, SNE, SLT, SGE, etc. instruction.
1648 */
1649 static boolean
1650 emit_select_op(struct svga_shader_emitter *emit,
1651 unsigned compare,
1652 const struct tgsi_full_instruction *insn)
1653 {
1654 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1655 struct src_register src0 = translate_src_register(
1656 emit, &insn->Src[0] );
1657 struct src_register src1 = translate_src_register(
1658 emit, &insn->Src[1] );
1659
1660 return emit_select( emit, compare, dst, src0, src1 );
1661 }
1662
1663
1664 /**
1665 * Translate TGSI CMP instruction. Component-wise:
1666 * dst = (src0 < 0.0) ? src1 : src2
1667 */
1668 static boolean
1669 emit_cmp(struct svga_shader_emitter *emit,
1670 const struct tgsi_full_instruction *insn)
1671 {
1672 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1673 const struct src_register src0 =
1674 translate_src_register(emit, &insn->Src[0] );
1675 const struct src_register src1 =
1676 translate_src_register(emit, &insn->Src[1] );
1677 const struct src_register src2 =
1678 translate_src_register(emit, &insn->Src[2] );
1679
1680 if (emit->unit == PIPE_SHADER_VERTEX) {
1681 struct src_register zero = get_zero_immediate(emit);
1682 /* We used to simulate CMP with SLT+LRP. But that didn't work when
1683 * src1 or src2 was Inf/NaN. In particular, GLSL sqrt(0) failed
1684 * because it involves a CMP to handle the 0 case.
1685 * Use a conditional expression instead.
1686 */
1687 return emit_conditional(emit, PIPE_FUNC_LESS, dst,
1688 src0, zero, src1, src2);
1689 }
1690 else {
1691 assert(emit->unit == PIPE_SHADER_FRAGMENT);
1692
1693 /* CMP DST, SRC0, SRC2, SRC1 */
1694 return submit_op3( emit, inst_token( SVGA3DOP_CMP ), dst,
1695 src0, src2, src1);
1696 }
1697 }
1698
1699
1700 /**
1701 * Translate/emit 2-operand (coord, sampler) texture instructions.
1702 */
1703 static boolean
1704 emit_tex2(struct svga_shader_emitter *emit,
1705 const struct tgsi_full_instruction *insn,
1706 SVGA3dShaderDestToken dst)
1707 {
1708 SVGA3dShaderInstToken inst;
1709 struct src_register texcoord;
1710 struct src_register sampler;
1711 SVGA3dShaderDestToken tmp;
1712
1713 inst.value = 0;
1714
1715 switch (insn->Instruction.Opcode) {
1716 case TGSI_OPCODE_TEX:
1717 inst.op = SVGA3DOP_TEX;
1718 break;
1719 case TGSI_OPCODE_TXP:
1720 inst.op = SVGA3DOP_TEX;
1721 inst.control = SVGA3DOPCONT_PROJECT;
1722 break;
1723 case TGSI_OPCODE_TXB:
1724 inst.op = SVGA3DOP_TEX;
1725 inst.control = SVGA3DOPCONT_BIAS;
1726 break;
1727 case TGSI_OPCODE_TXL:
1728 inst.op = SVGA3DOP_TEXLDL;
1729 break;
1730 default:
1731 assert(0);
1732 return FALSE;
1733 }
1734
1735 texcoord = translate_src_register( emit, &insn->Src[0] );
1736 sampler = translate_src_register( emit, &insn->Src[1] );
1737
1738 if (emit->key.fkey.tex[sampler.base.num].unnormalized ||
1739 emit->dynamic_branching_level > 0)
1740 tmp = get_temp( emit );
1741
1742 /* Can't do mipmapping inside dynamic branch constructs. Force LOD
1743 * zero in that case.
1744 */
1745 if (emit->dynamic_branching_level > 0 &&
1746 inst.op == SVGA3DOP_TEX &&
1747 SVGA3dShaderGetRegType(texcoord.base.value) == SVGA3DREG_TEMP) {
1748 struct src_register zero = get_zero_immediate(emit);
1749
1750 /* MOV tmp, texcoord */
1751 if (!submit_op1( emit,
1752 inst_token( SVGA3DOP_MOV ),
1753 tmp,
1754 texcoord ))
1755 return FALSE;
1756
1757 /* MOV tmp.w, zero */
1758 if (!submit_op1( emit,
1759 inst_token( SVGA3DOP_MOV ),
1760 writemask( tmp, TGSI_WRITEMASK_W ),
1761 zero ))
1762 return FALSE;
1763
1764 texcoord = src( tmp );
1765 inst.op = SVGA3DOP_TEXLDL;
1766 }
1767
1768 /* Explicit normalization of texcoords:
1769 */
1770 if (emit->key.fkey.tex[sampler.base.num].unnormalized) {
1771 struct src_register wh = get_tex_dimensions( emit, sampler.base.num );
1772
1773 /* MUL tmp, SRC0, WH */
1774 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
1775 tmp, texcoord, wh ))
1776 return FALSE;
1777
1778 texcoord = src( tmp );
1779 }
1780
1781 return submit_op2( emit, inst, dst, texcoord, sampler );
1782 }
1783
1784
1785 /**
1786 * Translate/emit 4-operand (coord, ddx, ddy, sampler) texture instructions.
1787 */
1788 static boolean
1789 emit_tex4(struct svga_shader_emitter *emit,
1790 const struct tgsi_full_instruction *insn,
1791 SVGA3dShaderDestToken dst )
1792 {
1793 SVGA3dShaderInstToken inst;
1794 struct src_register texcoord;
1795 struct src_register ddx;
1796 struct src_register ddy;
1797 struct src_register sampler;
1798
1799 texcoord = translate_src_register( emit, &insn->Src[0] );
1800 ddx = translate_src_register( emit, &insn->Src[1] );
1801 ddy = translate_src_register( emit, &insn->Src[2] );
1802 sampler = translate_src_register( emit, &insn->Src[3] );
1803
1804 inst.value = 0;
1805
1806 switch (insn->Instruction.Opcode) {
1807 case TGSI_OPCODE_TXD:
1808 inst.op = SVGA3DOP_TEXLDD; /* 4 args! */
1809 break;
1810 default:
1811 assert(0);
1812 return FALSE;
1813 }
1814
1815 return submit_op4( emit, inst, dst, texcoord, sampler, ddx, ddy );
1816 }
1817
1818
1819 /**
1820 * Emit texture swizzle code. We do this here since SVGA samplers don't
1821 * directly support swizzles.
1822 */
1823 static boolean
1824 emit_tex_swizzle(struct svga_shader_emitter *emit,
1825 SVGA3dShaderDestToken dst,
1826 struct src_register src,
1827 unsigned swizzle_x,
1828 unsigned swizzle_y,
1829 unsigned swizzle_z,
1830 unsigned swizzle_w)
1831 {
1832 const unsigned swizzleIn[4] = {swizzle_x, swizzle_y, swizzle_z, swizzle_w};
1833 unsigned srcSwizzle[4];
1834 unsigned srcWritemask = 0x0, zeroWritemask = 0x0, oneWritemask = 0x0;
1835 int i;
1836
1837 /* build writemasks and srcSwizzle terms */
1838 for (i = 0; i < 4; i++) {
1839 if (swizzleIn[i] == PIPE_SWIZZLE_ZERO) {
1840 srcSwizzle[i] = TGSI_SWIZZLE_X + i;
1841 zeroWritemask |= (1 << i);
1842 }
1843 else if (swizzleIn[i] == PIPE_SWIZZLE_ONE) {
1844 srcSwizzle[i] = TGSI_SWIZZLE_X + i;
1845 oneWritemask |= (1 << i);
1846 }
1847 else {
1848 srcSwizzle[i] = swizzleIn[i];
1849 srcWritemask |= (1 << i);
1850 }
1851 }
1852
1853 /* write x/y/z/w comps */
1854 if (dst.mask & srcWritemask) {
1855 if (!submit_op1(emit,
1856 inst_token(SVGA3DOP_MOV),
1857 writemask(dst, srcWritemask),
1858 swizzle(src,
1859 srcSwizzle[0],
1860 srcSwizzle[1],
1861 srcSwizzle[2],
1862 srcSwizzle[3])))
1863 return FALSE;
1864 }
1865
1866 /* write 0 comps */
1867 if (dst.mask & zeroWritemask) {
1868 if (!submit_op1(emit,
1869 inst_token(SVGA3DOP_MOV),
1870 writemask(dst, zeroWritemask),
1871 get_zero_immediate(emit)))
1872 return FALSE;
1873 }
1874
1875 /* write 1 comps */
1876 if (dst.mask & oneWritemask) {
1877 if (!submit_op1(emit,
1878 inst_token(SVGA3DOP_MOV),
1879 writemask(dst, oneWritemask),
1880 get_one_immediate(emit)))
1881 return FALSE;
1882 }
1883
1884 return TRUE;
1885 }
1886
1887
1888 /**
1889 * Translate/emit a TGSI texture sample instruction.
1890 */
1891 static boolean
1892 emit_tex(struct svga_shader_emitter *emit,
1893 const struct tgsi_full_instruction *insn)
1894 {
1895 SVGA3dShaderDestToken dst =
1896 translate_dst_register( emit, insn, 0 );
1897 struct src_register src0 =
1898 translate_src_register( emit, &insn->Src[0] );
1899 struct src_register src1 =
1900 translate_src_register( emit, &insn->Src[1] );
1901
1902 SVGA3dShaderDestToken tex_result;
1903 const unsigned unit = src1.base.num;
1904
1905 /* check for shadow samplers */
1906 boolean compare = (emit->key.fkey.tex[unit].compare_mode ==
1907 PIPE_TEX_COMPARE_R_TO_TEXTURE);
1908
1909 /* texture swizzle */
1910 boolean swizzle = (emit->key.fkey.tex[unit].swizzle_r != PIPE_SWIZZLE_RED ||
1911 emit->key.fkey.tex[unit].swizzle_g != PIPE_SWIZZLE_GREEN ||
1912 emit->key.fkey.tex[unit].swizzle_b != PIPE_SWIZZLE_BLUE ||
1913 emit->key.fkey.tex[unit].swizzle_a != PIPE_SWIZZLE_ALPHA);
1914
1915 boolean saturate = insn->Instruction.Saturate != TGSI_SAT_NONE;
1916
1917 /* If doing compare processing or tex swizzle or saturation, we need to put
1918 * the fetched color into a temporary so it can be used as a source later on.
1919 */
1920 if (compare || swizzle || saturate) {
1921 tex_result = get_temp( emit );
1922 }
1923 else {
1924 tex_result = dst;
1925 }
1926
1927 switch(insn->Instruction.Opcode) {
1928 case TGSI_OPCODE_TEX:
1929 case TGSI_OPCODE_TXB:
1930 case TGSI_OPCODE_TXP:
1931 case TGSI_OPCODE_TXL:
1932 if (!emit_tex2( emit, insn, tex_result ))
1933 return FALSE;
1934 break;
1935 case TGSI_OPCODE_TXD:
1936 if (!emit_tex4( emit, insn, tex_result ))
1937 return FALSE;
1938 break;
1939 default:
1940 assert(0);
1941 }
1942
1943 if (compare) {
1944 SVGA3dShaderDestToken dst2;
1945
1946 if (swizzle || saturate)
1947 dst2 = tex_result;
1948 else
1949 dst2 = dst;
1950
1951 if (dst.mask & TGSI_WRITEMASK_XYZ) {
1952 SVGA3dShaderDestToken src0_zdivw = get_temp( emit );
1953 /* When sampling a depth texture, the result of the comparison is in
1954 * the Y component.
1955 */
1956 struct src_register tex_src_x = scalar(src(tex_result), TGSI_SWIZZLE_Y);
1957 struct src_register r_coord;
1958
1959 if (insn->Instruction.Opcode == TGSI_OPCODE_TXP) {
1960 /* Divide texcoord R by Q */
1961 if (!submit_op1( emit, inst_token( SVGA3DOP_RCP ),
1962 writemask(src0_zdivw, TGSI_WRITEMASK_X),
1963 scalar(src0, TGSI_SWIZZLE_W) ))
1964 return FALSE;
1965
1966 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
1967 writemask(src0_zdivw, TGSI_WRITEMASK_X),
1968 scalar(src0, TGSI_SWIZZLE_Z),
1969 scalar(src(src0_zdivw), TGSI_SWIZZLE_X) ))
1970 return FALSE;
1971
1972 r_coord = scalar(src(src0_zdivw), TGSI_SWIZZLE_X);
1973 }
1974 else {
1975 r_coord = scalar(src0, TGSI_SWIZZLE_Z);
1976 }
1977
1978 /* Compare texture sample value against R component of texcoord */
1979 if (!emit_select(emit,
1980 emit->key.fkey.tex[unit].compare_func,
1981 writemask( dst2, TGSI_WRITEMASK_XYZ ),
1982 r_coord,
1983 tex_src_x))
1984 return FALSE;
1985 }
1986
1987 if (dst.mask & TGSI_WRITEMASK_W) {
1988 struct src_register one = get_one_immediate(emit);
1989
1990 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1991 writemask( dst2, TGSI_WRITEMASK_W ),
1992 one ))
1993 return FALSE;
1994 }
1995 }
1996
1997 if (saturate && !swizzle) {
1998 /* MOV_SAT real_dst, dst */
1999 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src(tex_result) ))
2000 return FALSE;
2001 }
2002 else if (swizzle) {
2003 /* swizzle from tex_result to dst (handles saturation too, if any) */
2004 emit_tex_swizzle(emit,
2005 dst, src(tex_result),
2006 emit->key.fkey.tex[unit].swizzle_r,
2007 emit->key.fkey.tex[unit].swizzle_g,
2008 emit->key.fkey.tex[unit].swizzle_b,
2009 emit->key.fkey.tex[unit].swizzle_a);
2010 }
2011
2012 return TRUE;
2013 }
2014
2015
2016 static boolean
2017 emit_bgnloop(struct svga_shader_emitter *emit,
2018 const struct tgsi_full_instruction *insn)
2019 {
2020 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_LOOP );
2021 struct src_register loop_reg = src_register( SVGA3DREG_LOOP, 0 );
2022 struct src_register const_int = get_loop_const( emit );
2023
2024 emit->dynamic_branching_level++;
2025
2026 return (emit_instruction( emit, inst ) &&
2027 emit_src( emit, loop_reg ) &&
2028 emit_src( emit, const_int ) );
2029 }
2030
2031
2032 static boolean
2033 emit_endloop(struct svga_shader_emitter *emit,
2034 const struct tgsi_full_instruction *insn)
2035 {
2036 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_ENDLOOP );
2037
2038 emit->dynamic_branching_level--;
2039
2040 return emit_instruction( emit, inst );
2041 }
2042
2043
2044 /**
2045 * Translate/emit TGSI BREAK (out of loop) instruction.
2046 */
2047 static boolean
2048 emit_brk(struct svga_shader_emitter *emit,
2049 const struct tgsi_full_instruction *insn)
2050 {
2051 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_BREAK );
2052 return emit_instruction( emit, inst );
2053 }
2054
2055
2056 /**
2057 * Emit simple instruction which operates on one scalar value (not
2058 * a vector). Ex: LG2, RCP, RSQ.
2059 */
2060 static boolean
2061 emit_scalar_op1(struct svga_shader_emitter *emit,
2062 unsigned opcode,
2063 const struct tgsi_full_instruction *insn)
2064 {
2065 SVGA3dShaderInstToken inst;
2066 SVGA3dShaderDestToken dst;
2067 struct src_register src;
2068
2069 inst = inst_token( opcode );
2070 dst = translate_dst_register( emit, insn, 0 );
2071 src = translate_src_register( emit, &insn->Src[0] );
2072 src = scalar( src, TGSI_SWIZZLE_X );
2073
2074 return submit_op1( emit, inst, dst, src );
2075 }
2076
2077
2078 /**
2079 * Translate/emit a simple instruction (one which has no special-case
2080 * code) such as ADD, MUL, MIN, MAX.
2081 */
2082 static boolean
2083 emit_simple_instruction(struct svga_shader_emitter *emit,
2084 unsigned opcode,
2085 const struct tgsi_full_instruction *insn)
2086 {
2087 const struct tgsi_full_src_register *src = insn->Src;
2088 SVGA3dShaderInstToken inst;
2089 SVGA3dShaderDestToken dst;
2090
2091 inst = inst_token( opcode );
2092 dst = translate_dst_register( emit, insn, 0 );
2093
2094 switch (insn->Instruction.NumSrcRegs) {
2095 case 0:
2096 return submit_op0( emit, inst, dst );
2097 case 1:
2098 return submit_op1( emit, inst, dst,
2099 translate_src_register( emit, &src[0] ));
2100 case 2:
2101 return submit_op2( emit, inst, dst,
2102 translate_src_register( emit, &src[0] ),
2103 translate_src_register( emit, &src[1] ) );
2104 case 3:
2105 return submit_op3( emit, inst, dst,
2106 translate_src_register( emit, &src[0] ),
2107 translate_src_register( emit, &src[1] ),
2108 translate_src_register( emit, &src[2] ) );
2109 default:
2110 assert(0);
2111 return FALSE;
2112 }
2113 }
2114
2115
2116 /**
2117 * Translate/emit TGSI DDX, DDY instructions.
2118 */
2119 static boolean
2120 emit_deriv(struct svga_shader_emitter *emit,
2121 const struct tgsi_full_instruction *insn )
2122 {
2123 if (emit->dynamic_branching_level > 0 &&
2124 insn->Src[0].Register.File == TGSI_FILE_TEMPORARY)
2125 {
2126 SVGA3dShaderDestToken dst =
2127 translate_dst_register( emit, insn, 0 );
2128
2129 /* Deriv opcodes not valid inside dynamic branching, workaround
2130 * by zeroing out the destination.
2131 */
2132 if (!submit_op1(emit,
2133 inst_token( SVGA3DOP_MOV ),
2134 dst,
2135 get_zero_immediate(emit)))
2136 return FALSE;
2137
2138 return TRUE;
2139 }
2140 else {
2141 unsigned opcode;
2142 const struct tgsi_full_src_register *reg = &insn->Src[0];
2143 SVGA3dShaderInstToken inst;
2144 SVGA3dShaderDestToken dst;
2145 struct src_register src0;
2146
2147 switch (insn->Instruction.Opcode) {
2148 case TGSI_OPCODE_DDX:
2149 opcode = SVGA3DOP_DSX;
2150 break;
2151 case TGSI_OPCODE_DDY:
2152 opcode = SVGA3DOP_DSY;
2153 break;
2154 default:
2155 return FALSE;
2156 }
2157
2158 inst = inst_token( opcode );
2159 dst = translate_dst_register( emit, insn, 0 );
2160 src0 = translate_src_register( emit, reg );
2161
2162 /* We cannot use negate or abs on source to dsx/dsy instruction.
2163 */
2164 if (reg->Register.Absolute ||
2165 reg->Register.Negate) {
2166 SVGA3dShaderDestToken temp = get_temp( emit );
2167
2168 if (!emit_repl( emit, temp, &src0 ))
2169 return FALSE;
2170 }
2171
2172 return submit_op1( emit, inst, dst, src0 );
2173 }
2174 }
2175
2176
2177 /**
2178 * Translate/emit ARL (Address Register Load) instruction. Used to
2179 * move a value into the special 'address' register. Used to implement
2180 * indirect/variable indexing into arrays.
2181 */
2182 static boolean
2183 emit_arl(struct svga_shader_emitter *emit,
2184 const struct tgsi_full_instruction *insn)
2185 {
2186 ++emit->current_arl;
2187 if (emit->unit == PIPE_SHADER_FRAGMENT) {
2188 /* MOVA not present in pixel shader instruction set.
2189 * Ignore this instruction altogether since it is
2190 * only used for loop counters -- and for that
2191 * we reference aL directly.
2192 */
2193 return TRUE;
2194 }
2195 if (svga_arl_needs_adjustment( emit )) {
2196 return emit_fake_arl( emit, insn );
2197 } else {
2198 /* no need to adjust, just emit straight arl */
2199 return emit_simple_instruction(emit, SVGA3DOP_MOVA, insn);
2200 }
2201 }
2202
2203
2204 static boolean
2205 emit_pow(struct svga_shader_emitter *emit,
2206 const struct tgsi_full_instruction *insn)
2207 {
2208 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2209 struct src_register src0 = translate_src_register(
2210 emit, &insn->Src[0] );
2211 struct src_register src1 = translate_src_register(
2212 emit, &insn->Src[1] );
2213 boolean need_tmp = FALSE;
2214
2215 /* POW can only output to a temporary */
2216 if (insn->Dst[0].Register.File != TGSI_FILE_TEMPORARY)
2217 need_tmp = TRUE;
2218
2219 /* POW src1 must not be the same register as dst */
2220 if (alias_src_dst( src1, dst ))
2221 need_tmp = TRUE;
2222
2223 /* it's a scalar op */
2224 src0 = scalar( src0, TGSI_SWIZZLE_X );
2225 src1 = scalar( src1, TGSI_SWIZZLE_X );
2226
2227 if (need_tmp) {
2228 SVGA3dShaderDestToken tmp =
2229 writemask(get_temp( emit ), TGSI_WRITEMASK_X );
2230
2231 if (!submit_op2(emit, inst_token( SVGA3DOP_POW ), tmp, src0, src1))
2232 return FALSE;
2233
2234 return submit_op1(emit, inst_token( SVGA3DOP_MOV ),
2235 dst, scalar(src(tmp), 0) );
2236 }
2237 else {
2238 return submit_op2(emit, inst_token( SVGA3DOP_POW ), dst, src0, src1);
2239 }
2240 }
2241
2242
2243 /**
2244 * Translate/emit TGSI XPD (vector cross product) instruction.
2245 */
2246 static boolean
2247 emit_xpd(struct svga_shader_emitter *emit,
2248 const struct tgsi_full_instruction *insn)
2249 {
2250 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2251 const struct src_register src0 = translate_src_register(
2252 emit, &insn->Src[0] );
2253 const struct src_register src1 = translate_src_register(
2254 emit, &insn->Src[1] );
2255 boolean need_dst_tmp = FALSE;
2256
2257 /* XPD can only output to a temporary */
2258 if (SVGA3dShaderGetRegType(dst.value) != SVGA3DREG_TEMP)
2259 need_dst_tmp = TRUE;
2260
2261 /* The dst reg must not be the same as src0 or src1*/
2262 if (alias_src_dst(src0, dst) ||
2263 alias_src_dst(src1, dst))
2264 need_dst_tmp = TRUE;
2265
2266 if (need_dst_tmp) {
2267 SVGA3dShaderDestToken tmp = get_temp( emit );
2268
2269 /* Obey DX9 restrictions on mask:
2270 */
2271 tmp.mask = dst.mask & TGSI_WRITEMASK_XYZ;
2272
2273 if (!submit_op2(emit, inst_token( SVGA3DOP_CRS ), tmp, src0, src1))
2274 return FALSE;
2275
2276 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), dst, src( tmp )))
2277 return FALSE;
2278 }
2279 else {
2280 if (!submit_op2(emit, inst_token( SVGA3DOP_CRS ), dst, src0, src1))
2281 return FALSE;
2282 }
2283
2284 /* Need to emit 1.0 to dst.w?
2285 */
2286 if (dst.mask & TGSI_WRITEMASK_W) {
2287 struct src_register one = get_one_immediate( emit );
2288
2289 if (!submit_op1(emit,
2290 inst_token( SVGA3DOP_MOV ),
2291 writemask(dst, TGSI_WRITEMASK_W),
2292 one))
2293 return FALSE;
2294 }
2295
2296 return TRUE;
2297 }
2298
2299
2300 /**
2301 * Emit a LRP (linear interpolation) instruction.
2302 */
2303 static boolean
2304 submit_lrp(struct svga_shader_emitter *emit,
2305 SVGA3dShaderDestToken dst,
2306 struct src_register src0,
2307 struct src_register src1,
2308 struct src_register src2)
2309 {
2310 SVGA3dShaderDestToken tmp;
2311 boolean need_dst_tmp = FALSE;
2312
2313 /* The dst reg must be a temporary, and not be the same as src0 or src2 */
2314 if (SVGA3dShaderGetRegType(dst.value) != SVGA3DREG_TEMP ||
2315 alias_src_dst(src0, dst) ||
2316 alias_src_dst(src2, dst))
2317 need_dst_tmp = TRUE;
2318
2319 if (need_dst_tmp) {
2320 tmp = get_temp( emit );
2321 tmp.mask = dst.mask;
2322 }
2323 else {
2324 tmp = dst;
2325 }
2326
2327 if (!submit_op3(emit, inst_token( SVGA3DOP_LRP ), tmp, src0, src1, src2))
2328 return FALSE;
2329
2330 if (need_dst_tmp) {
2331 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), dst, src( tmp )))
2332 return FALSE;
2333 }
2334
2335 return TRUE;
2336 }
2337
2338
2339 /**
2340 * Translate/emit LRP (Linear Interpolation) instruction.
2341 */
2342 static boolean
2343 emit_lrp(struct svga_shader_emitter *emit,
2344 const struct tgsi_full_instruction *insn)
2345 {
2346 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2347 const struct src_register src0 = translate_src_register(
2348 emit, &insn->Src[0] );
2349 const struct src_register src1 = translate_src_register(
2350 emit, &insn->Src[1] );
2351 const struct src_register src2 = translate_src_register(
2352 emit, &insn->Src[2] );
2353
2354 return submit_lrp(emit, dst, src0, src1, src2);
2355 }
2356
2357 /**
2358 * Translate/emit DST (Distance function) instruction.
2359 */
2360 static boolean
2361 emit_dst_insn(struct svga_shader_emitter *emit,
2362 const struct tgsi_full_instruction *insn)
2363 {
2364 if (emit->unit == PIPE_SHADER_VERTEX) {
2365 /* SVGA/DX9 has a DST instruction, but only for vertex shaders:
2366 */
2367 return emit_simple_instruction(emit, SVGA3DOP_DST, insn);
2368 }
2369 else {
2370 /* result[0] = 1 * 1;
2371 * result[1] = a[1] * b[1];
2372 * result[2] = a[2] * 1;
2373 * result[3] = 1 * b[3];
2374 */
2375 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2376 SVGA3dShaderDestToken tmp;
2377 const struct src_register src0 = translate_src_register(
2378 emit, &insn->Src[0] );
2379 const struct src_register src1 = translate_src_register(
2380 emit, &insn->Src[1] );
2381 boolean need_tmp = FALSE;
2382
2383 if (SVGA3dShaderGetRegType(dst.value) != SVGA3DREG_TEMP ||
2384 alias_src_dst(src0, dst) ||
2385 alias_src_dst(src1, dst))
2386 need_tmp = TRUE;
2387
2388 if (need_tmp) {
2389 tmp = get_temp( emit );
2390 }
2391 else {
2392 tmp = dst;
2393 }
2394
2395 /* tmp.xw = 1.0
2396 */
2397 if (tmp.mask & TGSI_WRITEMASK_XW) {
2398 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2399 writemask(tmp, TGSI_WRITEMASK_XW ),
2400 get_one_immediate(emit)))
2401 return FALSE;
2402 }
2403
2404 /* tmp.yz = src0
2405 */
2406 if (tmp.mask & TGSI_WRITEMASK_YZ) {
2407 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2408 writemask(tmp, TGSI_WRITEMASK_YZ ),
2409 src0))
2410 return FALSE;
2411 }
2412
2413 /* tmp.yw = tmp * src1
2414 */
2415 if (tmp.mask & TGSI_WRITEMASK_YW) {
2416 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
2417 writemask(tmp, TGSI_WRITEMASK_YW ),
2418 src(tmp),
2419 src1))
2420 return FALSE;
2421 }
2422
2423 /* dst = tmp
2424 */
2425 if (need_tmp) {
2426 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2427 dst,
2428 src(tmp)))
2429 return FALSE;
2430 }
2431 }
2432
2433 return TRUE;
2434 }
2435
2436
2437 static boolean
2438 emit_exp(struct svga_shader_emitter *emit,
2439 const struct tgsi_full_instruction *insn)
2440 {
2441 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2442 struct src_register src0 =
2443 translate_src_register( emit, &insn->Src[0] );
2444 SVGA3dShaderDestToken fraction;
2445
2446 if (dst.mask & TGSI_WRITEMASK_Y)
2447 fraction = dst;
2448 else if (dst.mask & TGSI_WRITEMASK_X)
2449 fraction = get_temp( emit );
2450 else
2451 fraction.value = 0;
2452
2453 /* If y is being written, fill it with src0 - floor(src0).
2454 */
2455 if (dst.mask & TGSI_WRITEMASK_XY) {
2456 if (!submit_op1( emit, inst_token( SVGA3DOP_FRC ),
2457 writemask( fraction, TGSI_WRITEMASK_Y ),
2458 src0 ))
2459 return FALSE;
2460 }
2461
2462 /* If x is being written, fill it with 2 ^ floor(src0).
2463 */
2464 if (dst.mask & TGSI_WRITEMASK_X) {
2465 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ),
2466 writemask( dst, TGSI_WRITEMASK_X ),
2467 src0,
2468 scalar( negate( src( fraction ) ), TGSI_SWIZZLE_Y ) ) )
2469 return FALSE;
2470
2471 if (!submit_op1( emit, inst_token( SVGA3DOP_EXP ),
2472 writemask( dst, TGSI_WRITEMASK_X ),
2473 scalar( src( dst ), TGSI_SWIZZLE_X ) ) )
2474 return FALSE;
2475
2476 if (!(dst.mask & TGSI_WRITEMASK_Y))
2477 release_temp( emit, fraction );
2478 }
2479
2480 /* If z is being written, fill it with 2 ^ src0 (partial precision).
2481 */
2482 if (dst.mask & TGSI_WRITEMASK_Z) {
2483 if (!submit_op1( emit, inst_token( SVGA3DOP_EXPP ),
2484 writemask( dst, TGSI_WRITEMASK_Z ),
2485 src0 ) )
2486 return FALSE;
2487 }
2488
2489 /* If w is being written, fill it with one.
2490 */
2491 if (dst.mask & TGSI_WRITEMASK_W) {
2492 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2493 writemask(dst, TGSI_WRITEMASK_W),
2494 get_one_immediate(emit)))
2495 return FALSE;
2496 }
2497
2498 return TRUE;
2499 }
2500
2501
2502 /**
2503 * Translate/emit LIT (Lighting helper) instruction.
2504 */
2505 static boolean
2506 emit_lit(struct svga_shader_emitter *emit,
2507 const struct tgsi_full_instruction *insn)
2508 {
2509 if (emit->unit == PIPE_SHADER_VERTEX) {
2510 /* SVGA/DX9 has a LIT instruction, but only for vertex shaders:
2511 */
2512 return emit_simple_instruction(emit, SVGA3DOP_LIT, insn);
2513 }
2514 else {
2515 /* D3D vs. GL semantics can be fairly easily accomodated by
2516 * variations on this sequence.
2517 *
2518 * GL:
2519 * tmp.y = src.x
2520 * tmp.z = pow(src.y,src.w)
2521 * p0 = src0.xxxx > 0
2522 * result = zero.wxxw
2523 * (p0) result.yz = tmp
2524 *
2525 * D3D:
2526 * tmp.y = src.x
2527 * tmp.z = pow(src.y,src.w)
2528 * p0 = src0.xxyy > 0
2529 * result = zero.wxxw
2530 * (p0) result.yz = tmp
2531 *
2532 * Will implement the GL version for now.
2533 */
2534 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2535 SVGA3dShaderDestToken tmp = get_temp( emit );
2536 const struct src_register src0 = translate_src_register(
2537 emit, &insn->Src[0] );
2538
2539 /* tmp = pow(src.y, src.w)
2540 */
2541 if (dst.mask & TGSI_WRITEMASK_Z) {
2542 if (!submit_op2(emit, inst_token( SVGA3DOP_POW ),
2543 tmp,
2544 scalar(src0, 1),
2545 scalar(src0, 3)))
2546 return FALSE;
2547 }
2548
2549 /* tmp.y = src.x
2550 */
2551 if (dst.mask & TGSI_WRITEMASK_Y) {
2552 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2553 writemask(tmp, TGSI_WRITEMASK_Y ),
2554 scalar(src0, 0)))
2555 return FALSE;
2556 }
2557
2558 /* Can't quite do this with emit conditional due to the extra
2559 * writemask on the predicated mov:
2560 */
2561 {
2562 SVGA3dShaderDestToken pred_reg = dst_register( SVGA3DREG_PREDICATE, 0 );
2563 SVGA3dShaderInstToken setp_token, mov_token;
2564 struct src_register predsrc;
2565
2566 setp_token = inst_token( SVGA3DOP_SETP );
2567 mov_token = inst_token( SVGA3DOP_MOV );
2568
2569 setp_token.control = SVGA3DOPCOMP_GT;
2570
2571 /* D3D vs GL semantics:
2572 */
2573 if (0)
2574 predsrc = swizzle(src0, 0, 0, 1, 1); /* D3D */
2575 else
2576 predsrc = swizzle(src0, 0, 0, 0, 0); /* GL */
2577
2578 /* SETP src0.xxyy, GT, {0}.x */
2579 if (!submit_op2( emit, setp_token, pred_reg,
2580 predsrc,
2581 get_zero_immediate(emit)))
2582 return FALSE;
2583
2584 /* MOV dst, fail */
2585 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst,
2586 get_immediate(emit, 1.0f, 0.0f, 0.0f, 1.0f)))
2587 return FALSE;
2588
2589 /* MOV dst.yz, tmp (predicated)
2590 *
2591 * Note that the predicate reg (and possible modifiers) is passed
2592 * as the first source argument.
2593 */
2594 if (dst.mask & TGSI_WRITEMASK_YZ) {
2595 mov_token.predicated = 1;
2596 if (!submit_op2( emit, mov_token,
2597 writemask(dst, TGSI_WRITEMASK_YZ),
2598 src( pred_reg ), src( tmp ) ))
2599 return FALSE;
2600 }
2601 }
2602 }
2603
2604 return TRUE;
2605 }
2606
2607
2608 static boolean
2609 emit_ex2(struct svga_shader_emitter *emit,
2610 const struct tgsi_full_instruction *insn)
2611 {
2612 SVGA3dShaderInstToken inst;
2613 SVGA3dShaderDestToken dst;
2614 struct src_register src0;
2615
2616 inst = inst_token( SVGA3DOP_EXP );
2617 dst = translate_dst_register( emit, insn, 0 );
2618 src0 = translate_src_register( emit, &insn->Src[0] );
2619 src0 = scalar( src0, TGSI_SWIZZLE_X );
2620
2621 if (dst.mask != TGSI_WRITEMASK_XYZW) {
2622 SVGA3dShaderDestToken tmp = get_temp( emit );
2623
2624 if (!submit_op1( emit, inst, tmp, src0 ))
2625 return FALSE;
2626
2627 return submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2628 dst,
2629 scalar( src( tmp ), TGSI_SWIZZLE_X ) );
2630 }
2631
2632 return submit_op1( emit, inst, dst, src0 );
2633 }
2634
2635
2636 static boolean
2637 emit_log(struct svga_shader_emitter *emit,
2638 const struct tgsi_full_instruction *insn)
2639 {
2640 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2641 struct src_register src0 =
2642 translate_src_register( emit, &insn->Src[0] );
2643 SVGA3dShaderDestToken abs_tmp;
2644 struct src_register abs_src0;
2645 SVGA3dShaderDestToken log2_abs;
2646
2647 abs_tmp.value = 0;
2648
2649 if (dst.mask & TGSI_WRITEMASK_Z)
2650 log2_abs = dst;
2651 else if (dst.mask & TGSI_WRITEMASK_XY)
2652 log2_abs = get_temp( emit );
2653 else
2654 log2_abs.value = 0;
2655
2656 /* If z is being written, fill it with log2( abs( src0 ) ).
2657 */
2658 if (dst.mask & TGSI_WRITEMASK_XYZ) {
2659 if (!src0.base.srcMod || src0.base.srcMod == SVGA3DSRCMOD_ABS)
2660 abs_src0 = src0;
2661 else {
2662 abs_tmp = get_temp( emit );
2663
2664 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2665 abs_tmp,
2666 src0 ) )
2667 return FALSE;
2668
2669 abs_src0 = src( abs_tmp );
2670 }
2671
2672 abs_src0 = absolute( scalar( abs_src0, TGSI_SWIZZLE_X ) );
2673
2674 if (!submit_op1( emit, inst_token( SVGA3DOP_LOG ),
2675 writemask( log2_abs, TGSI_WRITEMASK_Z ),
2676 abs_src0 ) )
2677 return FALSE;
2678 }
2679
2680 if (dst.mask & TGSI_WRITEMASK_XY) {
2681 SVGA3dShaderDestToken floor_log2;
2682
2683 if (dst.mask & TGSI_WRITEMASK_X)
2684 floor_log2 = dst;
2685 else
2686 floor_log2 = get_temp( emit );
2687
2688 /* If x is being written, fill it with floor( log2( abs( src0 ) ) ).
2689 */
2690 if (!submit_op1( emit, inst_token( SVGA3DOP_FRC ),
2691 writemask( floor_log2, TGSI_WRITEMASK_X ),
2692 scalar( src( log2_abs ), TGSI_SWIZZLE_Z ) ) )
2693 return FALSE;
2694
2695 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ),
2696 writemask( floor_log2, TGSI_WRITEMASK_X ),
2697 scalar( src( log2_abs ), TGSI_SWIZZLE_Z ),
2698 negate( src( floor_log2 ) ) ) )
2699 return FALSE;
2700
2701 /* If y is being written, fill it with
2702 * abs ( src0 ) / ( 2 ^ floor( log2( abs( src0 ) ) ) ).
2703 */
2704 if (dst.mask & TGSI_WRITEMASK_Y) {
2705 if (!submit_op1( emit, inst_token( SVGA3DOP_EXP ),
2706 writemask( dst, TGSI_WRITEMASK_Y ),
2707 negate( scalar( src( floor_log2 ),
2708 TGSI_SWIZZLE_X ) ) ) )
2709 return FALSE;
2710
2711 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
2712 writemask( dst, TGSI_WRITEMASK_Y ),
2713 src( dst ),
2714 abs_src0 ) )
2715 return FALSE;
2716 }
2717
2718 if (!(dst.mask & TGSI_WRITEMASK_X))
2719 release_temp( emit, floor_log2 );
2720
2721 if (!(dst.mask & TGSI_WRITEMASK_Z))
2722 release_temp( emit, log2_abs );
2723 }
2724
2725 if (dst.mask & TGSI_WRITEMASK_XYZ && src0.base.srcMod &&
2726 src0.base.srcMod != SVGA3DSRCMOD_ABS)
2727 release_temp( emit, abs_tmp );
2728
2729 /* If w is being written, fill it with one.
2730 */
2731 if (dst.mask & TGSI_WRITEMASK_W) {
2732 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2733 writemask(dst, TGSI_WRITEMASK_W),
2734 get_one_immediate(emit)))
2735 return FALSE;
2736 }
2737
2738 return TRUE;
2739 }
2740
2741
2742 /**
2743 * Translate TGSI TRUNC or ROUND instruction.
2744 * We need to truncate toward zero. Ex: trunc(-1.9) = -1
2745 * Different approaches are needed for VS versus PS.
2746 */
2747 static boolean
2748 emit_trunc_round(struct svga_shader_emitter *emit,
2749 const struct tgsi_full_instruction *insn,
2750 boolean round)
2751 {
2752 SVGA3dShaderDestToken dst = translate_dst_register(emit, insn, 0);
2753 const struct src_register src0 =
2754 translate_src_register(emit, &insn->Src[0] );
2755 SVGA3dShaderDestToken t1 = get_temp(emit);
2756
2757 if (round) {
2758 SVGA3dShaderDestToken t0 = get_temp(emit);
2759 struct src_register half = get_half_immediate(emit);
2760
2761 /* t0 = abs(src0) + 0.5 */
2762 if (!submit_op2(emit, inst_token(SVGA3DOP_ADD), t0,
2763 absolute(src0), half))
2764 return FALSE;
2765
2766 /* t1 = fract(t0) */
2767 if (!submit_op1(emit, inst_token(SVGA3DOP_FRC), t1, src(t0)))
2768 return FALSE;
2769
2770 /* t1 = t0 - t1 */
2771 if (!submit_op2(emit, inst_token(SVGA3DOP_ADD), t1, src(t0),
2772 negate(src(t1))))
2773 return FALSE;
2774 }
2775 else {
2776 /* trunc */
2777
2778 /* t1 = fract(abs(src0)) */
2779 if (!submit_op1(emit, inst_token(SVGA3DOP_FRC), t1, absolute(src0)))
2780 return FALSE;
2781
2782 /* t1 = abs(src0) - t1 */
2783 if (!submit_op2(emit, inst_token(SVGA3DOP_ADD), t1, absolute(src0),
2784 negate(src(t1))))
2785 return FALSE;
2786 }
2787
2788 /*
2789 * Now we need to multiply t1 by the sign of the original value.
2790 */
2791 if (emit->unit == PIPE_SHADER_VERTEX) {
2792 /* For VS: use SGN instruction */
2793 /* Need two extra/dummy registers: */
2794 SVGA3dShaderDestToken t2 = get_temp(emit), t3 = get_temp(emit),
2795 t4 = get_temp(emit);
2796
2797 /* t2 = sign(src0) */
2798 if (!submit_op3(emit, inst_token(SVGA3DOP_SGN), t2, src0,
2799 src(t3), src(t4)))
2800 return FALSE;
2801
2802 /* dst = t1 * t2 */
2803 if (!submit_op2(emit, inst_token(SVGA3DOP_MUL), dst, src(t1), src(t2)))
2804 return FALSE;
2805 }
2806 else {
2807 /* For FS: Use CMP instruction */
2808 return submit_op3(emit, inst_token( SVGA3DOP_CMP ), dst,
2809 src0, src(t1), negate(src(t1)));
2810 }
2811
2812 return TRUE;
2813 }
2814
2815
2816 /**
2817 * Translate/emit "begin subroutine" instruction/marker/label.
2818 */
2819 static boolean
2820 emit_bgnsub(struct svga_shader_emitter *emit,
2821 unsigned position,
2822 const struct tgsi_full_instruction *insn)
2823 {
2824 unsigned i;
2825
2826 /* Note that we've finished the main function and are now emitting
2827 * subroutines. This affects how we terminate the generated
2828 * shader.
2829 */
2830 emit->in_main_func = FALSE;
2831
2832 for (i = 0; i < emit->nr_labels; i++) {
2833 if (emit->label[i] == position) {
2834 return (emit_instruction( emit, inst_token( SVGA3DOP_RET ) ) &&
2835 emit_instruction( emit, inst_token( SVGA3DOP_LABEL ) ) &&
2836 emit_src( emit, src_register( SVGA3DREG_LABEL, i )));
2837 }
2838 }
2839
2840 assert(0);
2841 return TRUE;
2842 }
2843
2844
2845 /**
2846 * Translate/emit subroutine call instruction.
2847 */
2848 static boolean
2849 emit_call(struct svga_shader_emitter *emit,
2850 const struct tgsi_full_instruction *insn)
2851 {
2852 unsigned position = insn->Label.Label;
2853 unsigned i;
2854
2855 for (i = 0; i < emit->nr_labels; i++) {
2856 if (emit->label[i] == position)
2857 break;
2858 }
2859
2860 if (emit->nr_labels == Elements(emit->label))
2861 return FALSE;
2862
2863 if (i == emit->nr_labels) {
2864 emit->label[i] = position;
2865 emit->nr_labels++;
2866 }
2867
2868 return (emit_instruction( emit, inst_token( SVGA3DOP_CALL ) ) &&
2869 emit_src( emit, src_register( SVGA3DREG_LABEL, i )));
2870 }
2871
2872
2873 /**
2874 * Called at the end of the shader. Actually, emit special "fix-up"
2875 * code for the vertex/fragment shader.
2876 */
2877 static boolean
2878 emit_end(struct svga_shader_emitter *emit)
2879 {
2880 if (emit->unit == PIPE_SHADER_VERTEX) {
2881 return emit_vs_postamble( emit );
2882 }
2883 else {
2884 return emit_ps_postamble( emit );
2885 }
2886 }
2887
2888
2889 /**
2890 * Translate any TGSI instruction to SVGA.
2891 */
2892 static boolean
2893 svga_emit_instruction(struct svga_shader_emitter *emit,
2894 unsigned position,
2895 const struct tgsi_full_instruction *insn)
2896 {
2897 switch (insn->Instruction.Opcode) {
2898
2899 case TGSI_OPCODE_ARL:
2900 return emit_arl( emit, insn );
2901
2902 case TGSI_OPCODE_TEX:
2903 case TGSI_OPCODE_TXB:
2904 case TGSI_OPCODE_TXP:
2905 case TGSI_OPCODE_TXL:
2906 case TGSI_OPCODE_TXD:
2907 return emit_tex( emit, insn );
2908
2909 case TGSI_OPCODE_DDX:
2910 case TGSI_OPCODE_DDY:
2911 return emit_deriv( emit, insn );
2912
2913 case TGSI_OPCODE_BGNSUB:
2914 return emit_bgnsub( emit, position, insn );
2915
2916 case TGSI_OPCODE_ENDSUB:
2917 return TRUE;
2918
2919 case TGSI_OPCODE_CAL:
2920 return emit_call( emit, insn );
2921
2922 case TGSI_OPCODE_FLR:
2923 return emit_floor( emit, insn );
2924
2925 case TGSI_OPCODE_TRUNC:
2926 return emit_trunc_round( emit, insn, FALSE );
2927
2928 case TGSI_OPCODE_ROUND:
2929 return emit_trunc_round( emit, insn, TRUE );
2930
2931 case TGSI_OPCODE_CEIL:
2932 return emit_ceil( emit, insn );
2933
2934 case TGSI_OPCODE_CMP:
2935 return emit_cmp( emit, insn );
2936
2937 case TGSI_OPCODE_DIV:
2938 return emit_div( emit, insn );
2939
2940 case TGSI_OPCODE_DP2:
2941 return emit_dp2( emit, insn );
2942
2943 case TGSI_OPCODE_DPH:
2944 return emit_dph( emit, insn );
2945
2946 case TGSI_OPCODE_NRM:
2947 return emit_nrm( emit, insn );
2948
2949 case TGSI_OPCODE_COS:
2950 return emit_cos( emit, insn );
2951
2952 case TGSI_OPCODE_SIN:
2953 return emit_sin( emit, insn );
2954
2955 case TGSI_OPCODE_SCS:
2956 return emit_sincos( emit, insn );
2957
2958 case TGSI_OPCODE_END:
2959 /* TGSI always finishes the main func with an END */
2960 return emit_end( emit );
2961
2962 case TGSI_OPCODE_KILL_IF:
2963 return emit_kill_if( emit, insn );
2964
2965 /* Selection opcodes. The underlying language is fairly
2966 * non-orthogonal about these.
2967 */
2968 case TGSI_OPCODE_SEQ:
2969 return emit_select_op( emit, PIPE_FUNC_EQUAL, insn );
2970
2971 case TGSI_OPCODE_SNE:
2972 return emit_select_op( emit, PIPE_FUNC_NOTEQUAL, insn );
2973
2974 case TGSI_OPCODE_SGT:
2975 return emit_select_op( emit, PIPE_FUNC_GREATER, insn );
2976
2977 case TGSI_OPCODE_SGE:
2978 return emit_select_op( emit, PIPE_FUNC_GEQUAL, insn );
2979
2980 case TGSI_OPCODE_SLT:
2981 return emit_select_op( emit, PIPE_FUNC_LESS, insn );
2982
2983 case TGSI_OPCODE_SLE:
2984 return emit_select_op( emit, PIPE_FUNC_LEQUAL, insn );
2985
2986 case TGSI_OPCODE_SUB:
2987 return emit_sub( emit, insn );
2988
2989 case TGSI_OPCODE_POW:
2990 return emit_pow( emit, insn );
2991
2992 case TGSI_OPCODE_EX2:
2993 return emit_ex2( emit, insn );
2994
2995 case TGSI_OPCODE_EXP:
2996 return emit_exp( emit, insn );
2997
2998 case TGSI_OPCODE_LOG:
2999 return emit_log( emit, insn );
3000
3001 case TGSI_OPCODE_LG2:
3002 return emit_scalar_op1( emit, SVGA3DOP_LOG, insn );
3003
3004 case TGSI_OPCODE_RSQ:
3005 return emit_scalar_op1( emit, SVGA3DOP_RSQ, insn );
3006
3007 case TGSI_OPCODE_RCP:
3008 return emit_scalar_op1( emit, SVGA3DOP_RCP, insn );
3009
3010 case TGSI_OPCODE_CONT:
3011 /* not expected (we return PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED = 0) */
3012 return FALSE;
3013
3014 case TGSI_OPCODE_RET:
3015 /* This is a noop -- we tell mesa that we can't support RET
3016 * within a function (early return), so this will always be
3017 * followed by an ENDSUB.
3018 */
3019 return TRUE;
3020
3021 /* These aren't actually used by any of the frontends we care
3022 * about:
3023 */
3024 case TGSI_OPCODE_CLAMP:
3025 case TGSI_OPCODE_AND:
3026 case TGSI_OPCODE_OR:
3027 case TGSI_OPCODE_I2F:
3028 case TGSI_OPCODE_NOT:
3029 case TGSI_OPCODE_SHL:
3030 case TGSI_OPCODE_ISHR:
3031 case TGSI_OPCODE_XOR:
3032 return FALSE;
3033
3034 case TGSI_OPCODE_IF:
3035 return emit_if( emit, insn );
3036 case TGSI_OPCODE_ELSE:
3037 return emit_else( emit, insn );
3038 case TGSI_OPCODE_ENDIF:
3039 return emit_endif( emit, insn );
3040
3041 case TGSI_OPCODE_BGNLOOP:
3042 return emit_bgnloop( emit, insn );
3043 case TGSI_OPCODE_ENDLOOP:
3044 return emit_endloop( emit, insn );
3045 case TGSI_OPCODE_BRK:
3046 return emit_brk( emit, insn );
3047
3048 case TGSI_OPCODE_XPD:
3049 return emit_xpd( emit, insn );
3050
3051 case TGSI_OPCODE_KILL:
3052 return emit_kill( emit, insn );
3053
3054 case TGSI_OPCODE_DST:
3055 return emit_dst_insn( emit, insn );
3056
3057 case TGSI_OPCODE_LIT:
3058 return emit_lit( emit, insn );
3059
3060 case TGSI_OPCODE_LRP:
3061 return emit_lrp( emit, insn );
3062
3063 case TGSI_OPCODE_SSG:
3064 return emit_ssg( emit, insn );
3065
3066 default:
3067 {
3068 unsigned opcode = translate_opcode(insn->Instruction.Opcode);
3069
3070 if (opcode == SVGA3DOP_LAST_INST)
3071 return FALSE;
3072
3073 if (!emit_simple_instruction( emit, opcode, insn ))
3074 return FALSE;
3075 }
3076 }
3077
3078 return TRUE;
3079 }
3080
3081
3082 /**
3083 * Translate/emit a TGSI IMMEDIATE declaration.
3084 * An immediate vector is a constant that's hard-coded into the shader.
3085 */
3086 static boolean
3087 svga_emit_immediate(struct svga_shader_emitter *emit,
3088 const struct tgsi_full_immediate *imm)
3089 {
3090 static const float id[4] = {0,0,0,1};
3091 float value[4];
3092 unsigned i;
3093
3094 assert(1 <= imm->Immediate.NrTokens && imm->Immediate.NrTokens <= 5);
3095 for (i = 0; i < imm->Immediate.NrTokens - 1; i++) {
3096 float f = imm->u[i].Float;
3097 value[i] = util_is_inf_or_nan(f) ? 0.0f : f;
3098 }
3099
3100 /* If the immediate has less than four values, fill in the remaining
3101 * positions from id={0,0,0,1}.
3102 */
3103 for ( ; i < 4; i++ )
3104 value[i] = id[i];
3105
3106 return emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT,
3107 emit->imm_start + emit->internal_imm_count++,
3108 value[0], value[1], value[2], value[3]);
3109 }
3110
3111
3112 static boolean
3113 make_immediate(struct svga_shader_emitter *emit,
3114 float a, float b, float c, float d,
3115 struct src_register *out )
3116 {
3117 unsigned idx = emit->nr_hw_float_const++;
3118
3119 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT,
3120 idx, a, b, c, d ))
3121 return FALSE;
3122
3123 *out = src_register( SVGA3DREG_CONST, idx );
3124
3125 return TRUE;
3126 }
3127
3128
3129 /**
3130 * Emit special VS instructions at top of shader.
3131 */
3132 static boolean
3133 emit_vs_preamble(struct svga_shader_emitter *emit)
3134 {
3135 if (!emit->key.vkey.need_prescale) {
3136 if (!make_immediate( emit, 0, 0, .5, .5,
3137 &emit->imm_0055))
3138 return FALSE;
3139 }
3140
3141 return TRUE;
3142 }
3143
3144
3145 /**
3146 * Emit special PS instructions at top of shader.
3147 */
3148 static boolean
3149 emit_ps_preamble(struct svga_shader_emitter *emit)
3150 {
3151 if (emit->ps_reads_pos && emit->info.reads_z) {
3152 /*
3153 * Assemble the position from various bits of inputs. Depth and W are
3154 * passed in a texcoord this is due to D3D's vPos not hold Z or W.
3155 * Also fixup the perspective interpolation.
3156 *
3157 * temp_pos.xy = vPos.xy
3158 * temp_pos.w = rcp(texcoord1.w);
3159 * temp_pos.z = texcoord1.z * temp_pos.w;
3160 */
3161 if (!submit_op1( emit,
3162 inst_token(SVGA3DOP_MOV),
3163 writemask( emit->ps_temp_pos, TGSI_WRITEMASK_XY ),
3164 emit->ps_true_pos ))
3165 return FALSE;
3166
3167 if (!submit_op1( emit,
3168 inst_token(SVGA3DOP_RCP),
3169 writemask( emit->ps_temp_pos, TGSI_WRITEMASK_W ),
3170 scalar( emit->ps_depth_pos, TGSI_SWIZZLE_W ) ))
3171 return FALSE;
3172
3173 if (!submit_op2( emit,
3174 inst_token(SVGA3DOP_MUL),
3175 writemask( emit->ps_temp_pos, TGSI_WRITEMASK_Z ),
3176 scalar( emit->ps_depth_pos, TGSI_SWIZZLE_Z ),
3177 scalar( src(emit->ps_temp_pos), TGSI_SWIZZLE_W ) ))
3178 return FALSE;
3179 }
3180
3181 return TRUE;
3182 }
3183
3184
3185 /**
3186 * Emit special PS instructions at end of shader.
3187 */
3188 static boolean
3189 emit_ps_postamble(struct svga_shader_emitter *emit)
3190 {
3191 unsigned i;
3192
3193 /* PS oDepth is incredibly fragile and it's very hard to catch the
3194 * types of usage that break it during shader emit. Easier just to
3195 * redirect the main program to a temporary and then only touch
3196 * oDepth with a hand-crafted MOV below.
3197 */
3198 if (SVGA3dShaderGetRegType(emit->true_pos.value) != 0) {
3199 if (!submit_op1( emit,
3200 inst_token(SVGA3DOP_MOV),
3201 emit->true_pos,
3202 scalar(src(emit->temp_pos), TGSI_SWIZZLE_Z) ))
3203 return FALSE;
3204 }
3205
3206 for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
3207 if (SVGA3dShaderGetRegType(emit->true_color_output[i].value) != 0) {
3208 /* Potentially override output colors with white for XOR
3209 * logicop workaround.
3210 */
3211 if (emit->unit == PIPE_SHADER_FRAGMENT &&
3212 emit->key.fkey.white_fragments) {
3213 struct src_register one = get_one_immediate(emit);
3214
3215 if (!submit_op1( emit,
3216 inst_token(SVGA3DOP_MOV),
3217 emit->true_color_output[i],
3218 one ))
3219 return FALSE;
3220 }
3221 else if (emit->unit == PIPE_SHADER_FRAGMENT &&
3222 i < emit->key.fkey.write_color0_to_n_cbufs) {
3223 /* Write temp color output [0] to true output [i] */
3224 if (!submit_op1(emit, inst_token(SVGA3DOP_MOV),
3225 emit->true_color_output[i],
3226 src(emit->temp_color_output[0]))) {
3227 return FALSE;
3228 }
3229 }
3230 else {
3231 if (!submit_op1( emit,
3232 inst_token(SVGA3DOP_MOV),
3233 emit->true_color_output[i],
3234 src(emit->temp_color_output[i]) ))
3235 return FALSE;
3236 }
3237 }
3238 }
3239
3240 return TRUE;
3241 }
3242
3243
3244 /**
3245 * Emit special VS instructions at end of shader.
3246 */
3247 static boolean
3248 emit_vs_postamble(struct svga_shader_emitter *emit)
3249 {
3250 /* PSIZ output is incredibly fragile and it's very hard to catch
3251 * the types of usage that break it during shader emit. Easier
3252 * just to redirect the main program to a temporary and then only
3253 * touch PSIZ with a hand-crafted MOV below.
3254 */
3255 if (SVGA3dShaderGetRegType(emit->true_psiz.value) != 0) {
3256 if (!submit_op1( emit,
3257 inst_token(SVGA3DOP_MOV),
3258 emit->true_psiz,
3259 scalar(src(emit->temp_psiz), TGSI_SWIZZLE_X) ))
3260 return FALSE;
3261 }
3262
3263 /* Need to perform various manipulations on vertex position to cope
3264 * with the different GL and D3D clip spaces.
3265 */
3266 if (emit->key.vkey.need_prescale) {
3267 SVGA3dShaderDestToken temp_pos = emit->temp_pos;
3268 SVGA3dShaderDestToken depth = emit->depth_pos;
3269 SVGA3dShaderDestToken pos = emit->true_pos;
3270 unsigned offset = emit->info.file_max[TGSI_FILE_CONSTANT] + 1;
3271 struct src_register prescale_scale = src_register( SVGA3DREG_CONST,
3272 offset + 0 );
3273 struct src_register prescale_trans = src_register( SVGA3DREG_CONST,
3274 offset + 1 );
3275
3276 if (!submit_op1( emit,
3277 inst_token(SVGA3DOP_MOV),
3278 writemask(depth, TGSI_WRITEMASK_W),
3279 scalar(src(temp_pos), TGSI_SWIZZLE_W) ))
3280 return FALSE;
3281
3282 /* MUL temp_pos.xyz, temp_pos, prescale.scale
3283 * MAD result.position, temp_pos.wwww, prescale.trans, temp_pos
3284 * --> Note that prescale.trans.w == 0
3285 */
3286 if (!submit_op2( emit,
3287 inst_token(SVGA3DOP_MUL),
3288 writemask(temp_pos, TGSI_WRITEMASK_XYZ),
3289 src(temp_pos),
3290 prescale_scale ))
3291 return FALSE;
3292
3293 if (!submit_op3( emit,
3294 inst_token(SVGA3DOP_MAD),
3295 pos,
3296 swizzle(src(temp_pos), 3, 3, 3, 3),
3297 prescale_trans,
3298 src(temp_pos)))
3299 return FALSE;
3300
3301 /* Also write to depth value */
3302 if (!submit_op3( emit,
3303 inst_token(SVGA3DOP_MAD),
3304 writemask(depth, TGSI_WRITEMASK_Z),
3305 swizzle(src(temp_pos), 3, 3, 3, 3),
3306 prescale_trans,
3307 src(temp_pos) ))
3308 return FALSE;
3309 }
3310 else {
3311 SVGA3dShaderDestToken temp_pos = emit->temp_pos;
3312 SVGA3dShaderDestToken depth = emit->depth_pos;
3313 SVGA3dShaderDestToken pos = emit->true_pos;
3314 struct src_register imm_0055 = emit->imm_0055;
3315
3316 /* Adjust GL clipping coordinate space to hardware (D3D-style):
3317 *
3318 * DP4 temp_pos.z, {0,0,.5,.5}, temp_pos
3319 * MOV result.position, temp_pos
3320 */
3321 if (!submit_op2( emit,
3322 inst_token(SVGA3DOP_DP4),
3323 writemask(temp_pos, TGSI_WRITEMASK_Z),
3324 imm_0055,
3325 src(temp_pos) ))
3326 return FALSE;
3327
3328 if (!submit_op1( emit,
3329 inst_token(SVGA3DOP_MOV),
3330 pos,
3331 src(temp_pos) ))
3332 return FALSE;
3333
3334 /* Move the manipulated depth into the extra texcoord reg */
3335 if (!submit_op1( emit,
3336 inst_token(SVGA3DOP_MOV),
3337 writemask(depth, TGSI_WRITEMASK_ZW),
3338 src(temp_pos) ))
3339 return FALSE;
3340 }
3341
3342 return TRUE;
3343 }
3344
3345
3346 /**
3347 * For the pixel shader: emit the code which chooses the front
3348 * or back face color depending on triangle orientation.
3349 * This happens at the top of the fragment shader.
3350 *
3351 * 0: IF VFACE :4
3352 * 1: COLOR = FrontColor;
3353 * 2: ELSE
3354 * 3: COLOR = BackColor;
3355 * 4: ENDIF
3356 */
3357 static boolean
3358 emit_light_twoside(struct svga_shader_emitter *emit)
3359 {
3360 struct src_register vface, zero;
3361 struct src_register front[2];
3362 struct src_register back[2];
3363 SVGA3dShaderDestToken color[2];
3364 int count = emit->internal_color_count;
3365 int i;
3366 SVGA3dShaderInstToken if_token;
3367
3368 if (count == 0)
3369 return TRUE;
3370
3371 vface = get_vface( emit );
3372 zero = get_zero_immediate(emit);
3373
3374 /* Can't use get_temp() to allocate the color reg as such
3375 * temporaries will be reclaimed after each instruction by the call
3376 * to reset_temp_regs().
3377 */
3378 for (i = 0; i < count; i++) {
3379 color[i] = dst_register( SVGA3DREG_TEMP, emit->nr_hw_temp++ );
3380 front[i] = emit->input_map[emit->internal_color_idx[i]];
3381
3382 /* Back is always the next input:
3383 */
3384 back[i] = front[i];
3385 back[i].base.num = front[i].base.num + 1;
3386
3387 /* Reassign the input_map to the actual front-face color:
3388 */
3389 emit->input_map[emit->internal_color_idx[i]] = src(color[i]);
3390 }
3391
3392 if_token = inst_token( SVGA3DOP_IFC );
3393
3394 if (emit->key.fkey.front_ccw)
3395 if_token.control = SVGA3DOPCOMP_LT;
3396 else
3397 if_token.control = SVGA3DOPCOMP_GT;
3398
3399 if (!(emit_instruction( emit, if_token ) &&
3400 emit_src( emit, vface ) &&
3401 emit_src( emit, zero ) ))
3402 return FALSE;
3403
3404 for (i = 0; i < count; i++) {
3405 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), color[i], front[i] ))
3406 return FALSE;
3407 }
3408
3409 if (!(emit_instruction( emit, inst_token( SVGA3DOP_ELSE))))
3410 return FALSE;
3411
3412 for (i = 0; i < count; i++) {
3413 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), color[i], back[i] ))
3414 return FALSE;
3415 }
3416
3417 if (!emit_instruction( emit, inst_token( SVGA3DOP_ENDIF ) ))
3418 return FALSE;
3419
3420 return TRUE;
3421 }
3422
3423
3424 /**
3425 * Emit special setup code for the front/back face register in the FS.
3426 * 0: SETP_GT TEMP, VFACE, 0
3427 * where TEMP is a fake frontface register
3428 */
3429 static boolean
3430 emit_frontface(struct svga_shader_emitter *emit)
3431 {
3432 struct src_register vface;
3433 SVGA3dShaderDestToken temp;
3434 struct src_register pass, fail;
3435
3436 vface = get_vface( emit );
3437
3438 /* Can't use get_temp() to allocate the fake frontface reg as such
3439 * temporaries will be reclaimed after each instruction by the call
3440 * to reset_temp_regs().
3441 */
3442 temp = dst_register( SVGA3DREG_TEMP,
3443 emit->nr_hw_temp++ );
3444
3445 if (emit->key.fkey.front_ccw) {
3446 pass = get_zero_immediate(emit);
3447 fail = get_one_immediate(emit);
3448 } else {
3449 pass = get_one_immediate(emit);
3450 fail = get_zero_immediate(emit);
3451 }
3452
3453 if (!emit_conditional(emit, PIPE_FUNC_GREATER,
3454 temp, vface, get_zero_immediate(emit),
3455 pass, fail))
3456 return FALSE;
3457
3458 /* Reassign the input_map to the actual front-face color:
3459 */
3460 emit->input_map[emit->internal_frontface_idx] = src(temp);
3461
3462 return TRUE;
3463 }
3464
3465
3466 /**
3467 * Emit code to invert the T component of the incoming texture coordinate.
3468 * This is used for drawing point sprites when
3469 * pipe_rasterizer_state::sprite_coord_mode == PIPE_SPRITE_COORD_LOWER_LEFT.
3470 */
3471 static boolean
3472 emit_inverted_texcoords(struct svga_shader_emitter *emit)
3473 {
3474 unsigned inverted_texcoords = emit->inverted_texcoords;
3475
3476 while (inverted_texcoords) {
3477 const unsigned unit = ffs(inverted_texcoords) - 1;
3478
3479 assert(emit->inverted_texcoords & (1 << unit));
3480
3481 assert(unit < Elements(emit->ps_true_texcoord));
3482
3483 assert(unit < Elements(emit->ps_inverted_texcoord_input));
3484
3485 assert(emit->ps_inverted_texcoord_input[unit]
3486 < Elements(emit->input_map));
3487
3488 /* inverted = coord * (1, -1, 1, 1) + (0, 1, 0, 0) */
3489 if (!submit_op3(emit,
3490 inst_token(SVGA3DOP_MAD),
3491 dst(emit->ps_inverted_texcoord[unit]),
3492 emit->ps_true_texcoord[unit],
3493 get_immediate(emit, 1.0f, -1.0f, 1.0f, 1.0f),
3494 get_immediate(emit, 0.0f, 1.0f, 0.0f, 0.0f)))
3495 return FALSE;
3496
3497 /* Reassign the input_map entry to the new texcoord register */
3498 emit->input_map[emit->ps_inverted_texcoord_input[unit]] =
3499 emit->ps_inverted_texcoord[unit];
3500
3501 inverted_texcoords &= ~(1 << unit);
3502 }
3503
3504 return TRUE;
3505 }
3506
3507
3508 /**
3509 * Determine if we need to create the "common" immediate value which is
3510 * used for generating useful vector constants such as {0,0,0,0} and
3511 * {1,1,1,1}.
3512 * We could just do this all the time except that we want to conserve
3513 * registers whenever possible.
3514 */
3515 static boolean
3516 needs_to_create_common_immediate(const struct svga_shader_emitter *emit)
3517 {
3518 unsigned i;
3519
3520 if (emit->unit == PIPE_SHADER_FRAGMENT) {
3521 if (emit->key.fkey.light_twoside)
3522 return TRUE;
3523
3524 if (emit->key.fkey.white_fragments)
3525 return TRUE;
3526
3527 if (emit->emit_frontface)
3528 return TRUE;
3529
3530 if (emit->info.opcode_count[TGSI_OPCODE_DST] >= 1 ||
3531 emit->info.opcode_count[TGSI_OPCODE_SSG] >= 1 ||
3532 emit->info.opcode_count[TGSI_OPCODE_LIT] >= 1)
3533 return TRUE;
3534
3535 if (emit->inverted_texcoords)
3536 return TRUE;
3537
3538 /* look for any PIPE_SWIZZLE_ZERO/ONE terms */
3539 for (i = 0; i < emit->key.fkey.num_textures; i++) {
3540 if (emit->key.fkey.tex[i].swizzle_r > PIPE_SWIZZLE_ALPHA ||
3541 emit->key.fkey.tex[i].swizzle_g > PIPE_SWIZZLE_ALPHA ||
3542 emit->key.fkey.tex[i].swizzle_b > PIPE_SWIZZLE_ALPHA ||
3543 emit->key.fkey.tex[i].swizzle_a > PIPE_SWIZZLE_ALPHA)
3544 return TRUE;
3545 }
3546
3547 for (i = 0; i < emit->key.fkey.num_textures; i++) {
3548 if (emit->key.fkey.tex[i].compare_mode
3549 == PIPE_TEX_COMPARE_R_TO_TEXTURE)
3550 return TRUE;
3551 }
3552 }
3553
3554 if (emit->unit == PIPE_SHADER_VERTEX) {
3555 if (emit->info.opcode_count[TGSI_OPCODE_CMP] >= 1)
3556 return TRUE;
3557 }
3558
3559 if (emit->info.opcode_count[TGSI_OPCODE_IF] >= 1 ||
3560 emit->info.opcode_count[TGSI_OPCODE_BGNLOOP] >= 1 ||
3561 emit->info.opcode_count[TGSI_OPCODE_DDX] >= 1 ||
3562 emit->info.opcode_count[TGSI_OPCODE_DDY] >= 1 ||
3563 emit->info.opcode_count[TGSI_OPCODE_ROUND] >= 1 ||
3564 emit->info.opcode_count[TGSI_OPCODE_SGE] >= 1 ||
3565 emit->info.opcode_count[TGSI_OPCODE_SGT] >= 1 ||
3566 emit->info.opcode_count[TGSI_OPCODE_SLE] >= 1 ||
3567 emit->info.opcode_count[TGSI_OPCODE_SLT] >= 1 ||
3568 emit->info.opcode_count[TGSI_OPCODE_SNE] >= 1 ||
3569 emit->info.opcode_count[TGSI_OPCODE_SEQ] >= 1 ||
3570 emit->info.opcode_count[TGSI_OPCODE_EXP] >= 1 ||
3571 emit->info.opcode_count[TGSI_OPCODE_LOG] >= 1 ||
3572 emit->info.opcode_count[TGSI_OPCODE_XPD] >= 1 ||
3573 emit->info.opcode_count[TGSI_OPCODE_KILL] >= 1)
3574 return TRUE;
3575
3576 return FALSE;
3577 }
3578
3579
3580 /**
3581 * Do we need to create a looping constant?
3582 */
3583 static boolean
3584 needs_to_create_loop_const(const struct svga_shader_emitter *emit)
3585 {
3586 return (emit->info.opcode_count[TGSI_OPCODE_BGNLOOP] >= 1);
3587 }
3588
3589
3590 static boolean
3591 needs_to_create_arl_consts(const struct svga_shader_emitter *emit)
3592 {
3593 return (emit->num_arl_consts > 0);
3594 }
3595
3596
3597 static boolean
3598 pre_parse_add_indirect( struct svga_shader_emitter *emit,
3599 int num, int current_arl)
3600 {
3601 int i;
3602 assert(num < 0);
3603
3604 for (i = 0; i < emit->num_arl_consts; ++i) {
3605 if (emit->arl_consts[i].arl_num == current_arl)
3606 break;
3607 }
3608 /* new entry */
3609 if (emit->num_arl_consts == i) {
3610 ++emit->num_arl_consts;
3611 }
3612 emit->arl_consts[i].number = (emit->arl_consts[i].number > num) ?
3613 num :
3614 emit->arl_consts[i].number;
3615 emit->arl_consts[i].arl_num = current_arl;
3616 return TRUE;
3617 }
3618
3619
3620 static boolean
3621 pre_parse_instruction( struct svga_shader_emitter *emit,
3622 const struct tgsi_full_instruction *insn,
3623 int current_arl)
3624 {
3625 if (insn->Src[0].Register.Indirect &&
3626 insn->Src[0].Indirect.File == TGSI_FILE_ADDRESS) {
3627 const struct tgsi_full_src_register *reg = &insn->Src[0];
3628 if (reg->Register.Index < 0) {
3629 pre_parse_add_indirect(emit, reg->Register.Index, current_arl);
3630 }
3631 }
3632
3633 if (insn->Src[1].Register.Indirect &&
3634 insn->Src[1].Indirect.File == TGSI_FILE_ADDRESS) {
3635 const struct tgsi_full_src_register *reg = &insn->Src[1];
3636 if (reg->Register.Index < 0) {
3637 pre_parse_add_indirect(emit, reg->Register.Index, current_arl);
3638 }
3639 }
3640
3641 if (insn->Src[2].Register.Indirect &&
3642 insn->Src[2].Indirect.File == TGSI_FILE_ADDRESS) {
3643 const struct tgsi_full_src_register *reg = &insn->Src[2];
3644 if (reg->Register.Index < 0) {
3645 pre_parse_add_indirect(emit, reg->Register.Index, current_arl);
3646 }
3647 }
3648
3649 return TRUE;
3650 }
3651
3652
3653 static boolean
3654 pre_parse_tokens( struct svga_shader_emitter *emit,
3655 const struct tgsi_token *tokens )
3656 {
3657 struct tgsi_parse_context parse;
3658 int current_arl = 0;
3659
3660 tgsi_parse_init( &parse, tokens );
3661
3662 while (!tgsi_parse_end_of_tokens( &parse )) {
3663 tgsi_parse_token( &parse );
3664 switch (parse.FullToken.Token.Type) {
3665 case TGSI_TOKEN_TYPE_IMMEDIATE:
3666 case TGSI_TOKEN_TYPE_DECLARATION:
3667 break;
3668 case TGSI_TOKEN_TYPE_INSTRUCTION:
3669 if (parse.FullToken.FullInstruction.Instruction.Opcode ==
3670 TGSI_OPCODE_ARL) {
3671 ++current_arl;
3672 }
3673 if (!pre_parse_instruction( emit, &parse.FullToken.FullInstruction,
3674 current_arl ))
3675 return FALSE;
3676 break;
3677 default:
3678 break;
3679 }
3680
3681 }
3682 return TRUE;
3683 }
3684
3685
3686 static boolean
3687 svga_shader_emit_helpers(struct svga_shader_emitter *emit)
3688 {
3689 if (needs_to_create_common_immediate( emit )) {
3690 create_common_immediate( emit );
3691 }
3692 if (needs_to_create_loop_const( emit )) {
3693 create_loop_const( emit );
3694 }
3695 if (needs_to_create_arl_consts( emit )) {
3696 create_arl_consts( emit );
3697 }
3698
3699 if (emit->unit == PIPE_SHADER_FRAGMENT) {
3700 if (!emit_ps_preamble( emit ))
3701 return FALSE;
3702
3703 if (emit->key.fkey.light_twoside) {
3704 if (!emit_light_twoside( emit ))
3705 return FALSE;
3706 }
3707 if (emit->emit_frontface) {
3708 if (!emit_frontface( emit ))
3709 return FALSE;
3710 }
3711 if (emit->inverted_texcoords) {
3712 if (!emit_inverted_texcoords( emit ))
3713 return FALSE;
3714 }
3715 }
3716
3717 return TRUE;
3718 }
3719
3720
3721 /**
3722 * This is the main entrypoint into the TGSI instruction translater.
3723 * Translate TGSI shader tokens into an SVGA shader.
3724 */
3725 boolean
3726 svga_shader_emit_instructions(struct svga_shader_emitter *emit,
3727 const struct tgsi_token *tokens)
3728 {
3729 struct tgsi_parse_context parse;
3730 boolean ret = TRUE;
3731 boolean helpers_emitted = FALSE;
3732 unsigned line_nr = 0;
3733
3734 tgsi_parse_init( &parse, tokens );
3735 emit->internal_imm_count = 0;
3736
3737 if (emit->unit == PIPE_SHADER_VERTEX) {
3738 ret = emit_vs_preamble( emit );
3739 if (!ret)
3740 goto done;
3741 }
3742
3743 pre_parse_tokens(emit, tokens);
3744
3745 while (!tgsi_parse_end_of_tokens( &parse )) {
3746 tgsi_parse_token( &parse );
3747
3748 switch (parse.FullToken.Token.Type) {
3749 case TGSI_TOKEN_TYPE_IMMEDIATE:
3750 ret = svga_emit_immediate( emit, &parse.FullToken.FullImmediate );
3751 if (!ret)
3752 goto done;
3753 break;
3754
3755 case TGSI_TOKEN_TYPE_DECLARATION:
3756 ret = svga_translate_decl_sm30( emit, &parse.FullToken.FullDeclaration );
3757 if (!ret)
3758 goto done;
3759 break;
3760
3761 case TGSI_TOKEN_TYPE_INSTRUCTION:
3762 if (!helpers_emitted) {
3763 if (!svga_shader_emit_helpers( emit ))
3764 goto done;
3765 helpers_emitted = TRUE;
3766 }
3767 ret = svga_emit_instruction( emit,
3768 line_nr++,
3769 &parse.FullToken.FullInstruction );
3770 if (!ret)
3771 goto done;
3772 break;
3773 default:
3774 break;
3775 }
3776
3777 reset_temp_regs( emit );
3778 }
3779
3780 /* Need to terminate the current subroutine. Note that the
3781 * hardware doesn't tolerate shaders without sub-routines
3782 * terminating with RET+END.
3783 */
3784 if (!emit->in_main_func) {
3785 ret = emit_instruction( emit, inst_token( SVGA3DOP_RET ) );
3786 if (!ret)
3787 goto done;
3788 }
3789
3790 assert(emit->dynamic_branching_level == 0);
3791
3792 /* Need to terminate the whole shader:
3793 */
3794 ret = emit_instruction( emit, inst_token( SVGA3DOP_END ) );
3795 if (!ret)
3796 goto done;
3797
3798 done:
3799 tgsi_parse_free( &parse );
3800 return ret;
3801 }