radeonsi: parse and dump status registers on GPU hang
[mesa.git] / src / gallium / drivers / svga / svga_tgsi_insn.c
1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26
27 #include "pipe/p_shader_tokens.h"
28 #include "tgsi/tgsi_dump.h"
29 #include "tgsi/tgsi_parse.h"
30 #include "util/u_memory.h"
31 #include "util/u_math.h"
32
33 #include "svga_tgsi_emit.h"
34 #include "svga_context.h"
35
36
37 static boolean emit_vs_postamble( struct svga_shader_emitter *emit );
38 static boolean emit_ps_postamble( struct svga_shader_emitter *emit );
39
40
41 static unsigned
42 translate_opcode(uint opcode)
43 {
44 switch (opcode) {
45 case TGSI_OPCODE_ABS: return SVGA3DOP_ABS;
46 case TGSI_OPCODE_ADD: return SVGA3DOP_ADD;
47 case TGSI_OPCODE_DP2A: return SVGA3DOP_DP2ADD;
48 case TGSI_OPCODE_DP3: return SVGA3DOP_DP3;
49 case TGSI_OPCODE_DP4: return SVGA3DOP_DP4;
50 case TGSI_OPCODE_FRC: return SVGA3DOP_FRC;
51 case TGSI_OPCODE_MAD: return SVGA3DOP_MAD;
52 case TGSI_OPCODE_MAX: return SVGA3DOP_MAX;
53 case TGSI_OPCODE_MIN: return SVGA3DOP_MIN;
54 case TGSI_OPCODE_MOV: return SVGA3DOP_MOV;
55 case TGSI_OPCODE_MUL: return SVGA3DOP_MUL;
56 case TGSI_OPCODE_NOP: return SVGA3DOP_NOP;
57 default:
58 assert(!"svga: unexpected opcode in translate_opcode()");
59 return SVGA3DOP_LAST_INST;
60 }
61 }
62
63
64 static unsigned
65 translate_file(unsigned file)
66 {
67 switch (file) {
68 case TGSI_FILE_TEMPORARY: return SVGA3DREG_TEMP;
69 case TGSI_FILE_INPUT: return SVGA3DREG_INPUT;
70 case TGSI_FILE_OUTPUT: return SVGA3DREG_OUTPUT; /* VS3.0+ only */
71 case TGSI_FILE_IMMEDIATE: return SVGA3DREG_CONST;
72 case TGSI_FILE_CONSTANT: return SVGA3DREG_CONST;
73 case TGSI_FILE_SAMPLER: return SVGA3DREG_SAMPLER;
74 case TGSI_FILE_ADDRESS: return SVGA3DREG_ADDR;
75 default:
76 assert(!"svga: unexpected register file in translate_file()");
77 return SVGA3DREG_TEMP;
78 }
79 }
80
81
82 /**
83 * Translate a TGSI destination register to an SVGA3DShaderDestToken.
84 * \param insn the TGSI instruction
85 * \param idx which TGSI dest register to translate (usually (always?) zero)
86 */
87 static SVGA3dShaderDestToken
88 translate_dst_register( struct svga_shader_emitter *emit,
89 const struct tgsi_full_instruction *insn,
90 unsigned idx )
91 {
92 const struct tgsi_full_dst_register *reg = &insn->Dst[idx];
93 SVGA3dShaderDestToken dest;
94
95 switch (reg->Register.File) {
96 case TGSI_FILE_OUTPUT:
97 /* Output registers encode semantic information in their name.
98 * Need to lookup a table built at decl time:
99 */
100 dest = emit->output_map[reg->Register.Index];
101 break;
102
103 default:
104 {
105 unsigned index = reg->Register.Index;
106 assert(index < SVGA3D_TEMPREG_MAX);
107 index = MIN2(index, SVGA3D_TEMPREG_MAX - 1);
108 dest = dst_register(translate_file(reg->Register.File), index);
109 }
110 break;
111 }
112
113 if (reg->Register.Indirect) {
114 debug_warning("Indirect indexing of dest registers is not supported!\n");
115 }
116
117 dest.mask = reg->Register.WriteMask;
118 assert(dest.mask);
119
120 if (insn->Instruction.Saturate)
121 dest.dstMod = SVGA3DDSTMOD_SATURATE;
122
123 return dest;
124 }
125
126
127 /**
128 * Apply a swizzle to a src_register, returning a new src_register
129 * Ex: swizzle(SRC.ZZYY, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_X, SWIZZLE_Y)
130 * would return SRC.YYZZ
131 */
132 static struct src_register
133 swizzle(struct src_register src,
134 unsigned x, unsigned y, unsigned z, unsigned w)
135 {
136 assert(x < 4);
137 assert(y < 4);
138 assert(z < 4);
139 assert(w < 4);
140 x = (src.base.swizzle >> (x * 2)) & 0x3;
141 y = (src.base.swizzle >> (y * 2)) & 0x3;
142 z = (src.base.swizzle >> (z * 2)) & 0x3;
143 w = (src.base.swizzle >> (w * 2)) & 0x3;
144
145 src.base.swizzle = TRANSLATE_SWIZZLE(x, y, z, w);
146
147 return src;
148 }
149
150
151 /**
152 * Apply a "scalar" swizzle to a src_register returning a new
153 * src_register where all the swizzle terms are the same.
154 * Ex: scalar(SRC.WZYX, SWIZZLE_Y) would return SRC.ZZZZ
155 */
156 static struct src_register
157 scalar(struct src_register src, unsigned comp)
158 {
159 assert(comp < 4);
160 return swizzle( src, comp, comp, comp, comp );
161 }
162
163
164 static boolean
165 svga_arl_needs_adjustment( const struct svga_shader_emitter *emit )
166 {
167 int i;
168
169 for (i = 0; i < emit->num_arl_consts; ++i) {
170 if (emit->arl_consts[i].arl_num == emit->current_arl)
171 return TRUE;
172 }
173 return FALSE;
174 }
175
176
177 static int
178 svga_arl_adjustment( const struct svga_shader_emitter *emit )
179 {
180 int i;
181
182 for (i = 0; i < emit->num_arl_consts; ++i) {
183 if (emit->arl_consts[i].arl_num == emit->current_arl)
184 return emit->arl_consts[i].number;
185 }
186 return 0;
187 }
188
189
190 /**
191 * Translate a TGSI src register to a src_register.
192 */
193 static struct src_register
194 translate_src_register( const struct svga_shader_emitter *emit,
195 const struct tgsi_full_src_register *reg )
196 {
197 struct src_register src;
198
199 switch (reg->Register.File) {
200 case TGSI_FILE_INPUT:
201 /* Input registers are referred to by their semantic name rather
202 * than by index. Use the mapping build up from the decls:
203 */
204 src = emit->input_map[reg->Register.Index];
205 break;
206
207 case TGSI_FILE_IMMEDIATE:
208 /* Immediates are appended after TGSI constants in the D3D
209 * constant buffer.
210 */
211 src = src_register( translate_file( reg->Register.File ),
212 reg->Register.Index + emit->imm_start );
213 break;
214
215 default:
216 src = src_register( translate_file( reg->Register.File ),
217 reg->Register.Index );
218 break;
219 }
220
221 /* Indirect addressing.
222 */
223 if (reg->Register.Indirect) {
224 if (emit->unit == PIPE_SHADER_FRAGMENT) {
225 /* Pixel shaders have only loop registers for relative
226 * addressing into inputs. Ignore the redundant address
227 * register, the contents of aL should be in sync with it.
228 */
229 if (reg->Register.File == TGSI_FILE_INPUT) {
230 src.base.relAddr = 1;
231 src.indirect = src_token(SVGA3DREG_LOOP, 0);
232 }
233 }
234 else {
235 /* Constant buffers only.
236 */
237 if (reg->Register.File == TGSI_FILE_CONSTANT) {
238 /* we shift the offset towards the minimum */
239 if (svga_arl_needs_adjustment( emit )) {
240 src.base.num -= svga_arl_adjustment( emit );
241 }
242 src.base.relAddr = 1;
243
244 /* Not really sure what should go in the second token:
245 */
246 src.indirect = src_token( SVGA3DREG_ADDR,
247 reg->Indirect.Index );
248
249 src.indirect.swizzle = SWIZZLE_XXXX;
250 }
251 }
252 }
253
254 src = swizzle( src,
255 reg->Register.SwizzleX,
256 reg->Register.SwizzleY,
257 reg->Register.SwizzleZ,
258 reg->Register.SwizzleW );
259
260 /* src.mod isn't a bitfield, unfortunately:
261 * See tgsi_util_get_full_src_register_sign_mode for implementation details.
262 */
263 if (reg->Register.Absolute) {
264 if (reg->Register.Negate)
265 src.base.srcMod = SVGA3DSRCMOD_ABSNEG;
266 else
267 src.base.srcMod = SVGA3DSRCMOD_ABS;
268 }
269 else {
270 if (reg->Register.Negate)
271 src.base.srcMod = SVGA3DSRCMOD_NEG;
272 else
273 src.base.srcMod = SVGA3DSRCMOD_NONE;
274 }
275
276 return src;
277 }
278
279
280 /*
281 * Get a temporary register.
282 * Note: if we exceed the temporary register limit we just use
283 * register SVGA3D_TEMPREG_MAX - 1.
284 */
285 static SVGA3dShaderDestToken
286 get_temp( struct svga_shader_emitter *emit )
287 {
288 int i = emit->nr_hw_temp + emit->internal_temp_count++;
289 if (i >= SVGA3D_TEMPREG_MAX) {
290 debug_warn_once("svga: Too many temporary registers used in shader\n");
291 i = SVGA3D_TEMPREG_MAX - 1;
292 }
293 return dst_register( SVGA3DREG_TEMP, i );
294 }
295
296
297 /**
298 * Release a single temp. Currently only effective if it was the last
299 * allocated temp, otherwise release will be delayed until the next
300 * call to reset_temp_regs().
301 */
302 static void
303 release_temp( struct svga_shader_emitter *emit,
304 SVGA3dShaderDestToken temp )
305 {
306 if (temp.num == emit->internal_temp_count - 1)
307 emit->internal_temp_count--;
308 }
309
310
311 /**
312 * Release all temps.
313 */
314 static void
315 reset_temp_regs(struct svga_shader_emitter *emit)
316 {
317 emit->internal_temp_count = 0;
318 }
319
320
321 /** Emit bytecode for a src_register */
322 static boolean
323 emit_src(struct svga_shader_emitter *emit, const struct src_register src)
324 {
325 if (src.base.relAddr) {
326 assert(src.base.reserved0);
327 assert(src.indirect.reserved0);
328 return (svga_shader_emit_dword( emit, src.base.value ) &&
329 svga_shader_emit_dword( emit, src.indirect.value ));
330 }
331 else {
332 assert(src.base.reserved0);
333 return svga_shader_emit_dword( emit, src.base.value );
334 }
335 }
336
337
338 /** Emit bytecode for a dst_register */
339 static boolean
340 emit_dst(struct svga_shader_emitter *emit, SVGA3dShaderDestToken dest)
341 {
342 assert(dest.reserved0);
343 assert(dest.mask);
344 return svga_shader_emit_dword( emit, dest.value );
345 }
346
347
348 /** Emit bytecode for a 1-operand instruction */
349 static boolean
350 emit_op1(struct svga_shader_emitter *emit,
351 SVGA3dShaderInstToken inst,
352 SVGA3dShaderDestToken dest,
353 struct src_register src0)
354 {
355 return (emit_instruction(emit, inst) &&
356 emit_dst(emit, dest) &&
357 emit_src(emit, src0));
358 }
359
360
361 /** Emit bytecode for a 2-operand instruction */
362 static boolean
363 emit_op2(struct svga_shader_emitter *emit,
364 SVGA3dShaderInstToken inst,
365 SVGA3dShaderDestToken dest,
366 struct src_register src0,
367 struct src_register src1)
368 {
369 return (emit_instruction(emit, inst) &&
370 emit_dst(emit, dest) &&
371 emit_src(emit, src0) &&
372 emit_src(emit, src1));
373 }
374
375
376 /** Emit bytecode for a 3-operand instruction */
377 static boolean
378 emit_op3(struct svga_shader_emitter *emit,
379 SVGA3dShaderInstToken inst,
380 SVGA3dShaderDestToken dest,
381 struct src_register src0,
382 struct src_register src1,
383 struct src_register src2)
384 {
385 return (emit_instruction(emit, inst) &&
386 emit_dst(emit, dest) &&
387 emit_src(emit, src0) &&
388 emit_src(emit, src1) &&
389 emit_src(emit, src2));
390 }
391
392
393 /** Emit bytecode for a 4-operand instruction */
394 static boolean
395 emit_op4(struct svga_shader_emitter *emit,
396 SVGA3dShaderInstToken inst,
397 SVGA3dShaderDestToken dest,
398 struct src_register src0,
399 struct src_register src1,
400 struct src_register src2,
401 struct src_register src3)
402 {
403 return (emit_instruction(emit, inst) &&
404 emit_dst(emit, dest) &&
405 emit_src(emit, src0) &&
406 emit_src(emit, src1) &&
407 emit_src(emit, src2) &&
408 emit_src(emit, src3));
409 }
410
411
412 /**
413 * Apply the absolute value modifier to the given src_register, returning
414 * a new src_register.
415 */
416 static struct src_register
417 absolute(struct src_register src)
418 {
419 src.base.srcMod = SVGA3DSRCMOD_ABS;
420 return src;
421 }
422
423
424 /**
425 * Apply the negation modifier to the given src_register, returning
426 * a new src_register.
427 */
428 static struct src_register
429 negate(struct src_register src)
430 {
431 switch (src.base.srcMod) {
432 case SVGA3DSRCMOD_ABS:
433 src.base.srcMod = SVGA3DSRCMOD_ABSNEG;
434 break;
435 case SVGA3DSRCMOD_ABSNEG:
436 src.base.srcMod = SVGA3DSRCMOD_ABS;
437 break;
438 case SVGA3DSRCMOD_NEG:
439 src.base.srcMod = SVGA3DSRCMOD_NONE;
440 break;
441 case SVGA3DSRCMOD_NONE:
442 src.base.srcMod = SVGA3DSRCMOD_NEG;
443 break;
444 }
445 return src;
446 }
447
448
449
450 /* Replace the src with the temporary specified in the dst, but copying
451 * only the necessary channels, and preserving the original swizzle (which is
452 * important given that several opcodes have constraints in the allowed
453 * swizzles).
454 */
455 static boolean
456 emit_repl(struct svga_shader_emitter *emit,
457 SVGA3dShaderDestToken dst,
458 struct src_register *src0)
459 {
460 unsigned src0_swizzle;
461 unsigned chan;
462
463 assert(SVGA3dShaderGetRegType(dst.value) == SVGA3DREG_TEMP);
464
465 src0_swizzle = src0->base.swizzle;
466
467 dst.mask = 0;
468 for (chan = 0; chan < 4; ++chan) {
469 unsigned swizzle = (src0_swizzle >> (chan *2)) & 0x3;
470 dst.mask |= 1 << swizzle;
471 }
472 assert(dst.mask);
473
474 src0->base.swizzle = SVGA3DSWIZZLE_NONE;
475
476 if (!emit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, *src0 ))
477 return FALSE;
478
479 *src0 = src( dst );
480 src0->base.swizzle = src0_swizzle;
481
482 return TRUE;
483 }
484
485
486 /**
487 * Submit/emit an instruction with zero operands.
488 */
489 static boolean
490 submit_op0(struct svga_shader_emitter *emit,
491 SVGA3dShaderInstToken inst,
492 SVGA3dShaderDestToken dest)
493 {
494 return (emit_instruction( emit, inst ) &&
495 emit_dst( emit, dest ));
496 }
497
498
499 /**
500 * Submit/emit an instruction with one operand.
501 */
502 static boolean
503 submit_op1(struct svga_shader_emitter *emit,
504 SVGA3dShaderInstToken inst,
505 SVGA3dShaderDestToken dest,
506 struct src_register src0)
507 {
508 return emit_op1( emit, inst, dest, src0 );
509 }
510
511
512 /**
513 * Submit/emit an instruction with two operands.
514 *
515 * SVGA shaders may not refer to >1 constant register in a single
516 * instruction. This function checks for that usage and inserts a
517 * move to temporary if detected.
518 *
519 * The same applies to input registers -- at most a single input
520 * register may be read by any instruction.
521 */
522 static boolean
523 submit_op2(struct svga_shader_emitter *emit,
524 SVGA3dShaderInstToken inst,
525 SVGA3dShaderDestToken dest,
526 struct src_register src0,
527 struct src_register src1)
528 {
529 SVGA3dShaderDestToken temp;
530 SVGA3dShaderRegType type0, type1;
531 boolean need_temp = FALSE;
532
533 temp.value = 0;
534 type0 = SVGA3dShaderGetRegType( src0.base.value );
535 type1 = SVGA3dShaderGetRegType( src1.base.value );
536
537 if (type0 == SVGA3DREG_CONST &&
538 type1 == SVGA3DREG_CONST &&
539 src0.base.num != src1.base.num)
540 need_temp = TRUE;
541
542 if (type0 == SVGA3DREG_INPUT &&
543 type1 == SVGA3DREG_INPUT &&
544 src0.base.num != src1.base.num)
545 need_temp = TRUE;
546
547 if (need_temp) {
548 temp = get_temp( emit );
549
550 if (!emit_repl( emit, temp, &src0 ))
551 return FALSE;
552 }
553
554 if (!emit_op2( emit, inst, dest, src0, src1 ))
555 return FALSE;
556
557 if (need_temp)
558 release_temp( emit, temp );
559
560 return TRUE;
561 }
562
563
564 /**
565 * Submit/emit an instruction with three operands.
566 *
567 * SVGA shaders may not refer to >1 constant register in a single
568 * instruction. This function checks for that usage and inserts a
569 * move to temporary if detected.
570 */
571 static boolean
572 submit_op3(struct svga_shader_emitter *emit,
573 SVGA3dShaderInstToken inst,
574 SVGA3dShaderDestToken dest,
575 struct src_register src0,
576 struct src_register src1,
577 struct src_register src2)
578 {
579 SVGA3dShaderDestToken temp0;
580 SVGA3dShaderDestToken temp1;
581 boolean need_temp0 = FALSE;
582 boolean need_temp1 = FALSE;
583 SVGA3dShaderRegType type0, type1, type2;
584
585 temp0.value = 0;
586 temp1.value = 0;
587 type0 = SVGA3dShaderGetRegType( src0.base.value );
588 type1 = SVGA3dShaderGetRegType( src1.base.value );
589 type2 = SVGA3dShaderGetRegType( src2.base.value );
590
591 if (inst.op != SVGA3DOP_SINCOS) {
592 if (type0 == SVGA3DREG_CONST &&
593 ((type1 == SVGA3DREG_CONST && src0.base.num != src1.base.num) ||
594 (type2 == SVGA3DREG_CONST && src0.base.num != src2.base.num)))
595 need_temp0 = TRUE;
596
597 if (type1 == SVGA3DREG_CONST &&
598 (type2 == SVGA3DREG_CONST && src1.base.num != src2.base.num))
599 need_temp1 = TRUE;
600 }
601
602 if (type0 == SVGA3DREG_INPUT &&
603 ((type1 == SVGA3DREG_INPUT && src0.base.num != src1.base.num) ||
604 (type2 == SVGA3DREG_INPUT && src0.base.num != src2.base.num)))
605 need_temp0 = TRUE;
606
607 if (type1 == SVGA3DREG_INPUT &&
608 (type2 == SVGA3DREG_INPUT && src1.base.num != src2.base.num))
609 need_temp1 = TRUE;
610
611 if (need_temp0) {
612 temp0 = get_temp( emit );
613
614 if (!emit_repl( emit, temp0, &src0 ))
615 return FALSE;
616 }
617
618 if (need_temp1) {
619 temp1 = get_temp( emit );
620
621 if (!emit_repl( emit, temp1, &src1 ))
622 return FALSE;
623 }
624
625 if (!emit_op3( emit, inst, dest, src0, src1, src2 ))
626 return FALSE;
627
628 if (need_temp1)
629 release_temp( emit, temp1 );
630 if (need_temp0)
631 release_temp( emit, temp0 );
632 return TRUE;
633 }
634
635
636 /**
637 * Submit/emit an instruction with four operands.
638 *
639 * SVGA shaders may not refer to >1 constant register in a single
640 * instruction. This function checks for that usage and inserts a
641 * move to temporary if detected.
642 */
643 static boolean
644 submit_op4(struct svga_shader_emitter *emit,
645 SVGA3dShaderInstToken inst,
646 SVGA3dShaderDestToken dest,
647 struct src_register src0,
648 struct src_register src1,
649 struct src_register src2,
650 struct src_register src3)
651 {
652 SVGA3dShaderDestToken temp0;
653 SVGA3dShaderDestToken temp3;
654 boolean need_temp0 = FALSE;
655 boolean need_temp3 = FALSE;
656 SVGA3dShaderRegType type0, type1, type2, type3;
657
658 temp0.value = 0;
659 temp3.value = 0;
660 type0 = SVGA3dShaderGetRegType( src0.base.value );
661 type1 = SVGA3dShaderGetRegType( src1.base.value );
662 type2 = SVGA3dShaderGetRegType( src2.base.value );
663 type3 = SVGA3dShaderGetRegType( src2.base.value );
664
665 /* Make life a little easier - this is only used by the TXD
666 * instruction which is guaranteed not to have a constant/input reg
667 * in one slot at least:
668 */
669 assert(type1 == SVGA3DREG_SAMPLER);
670
671 if (type0 == SVGA3DREG_CONST &&
672 ((type3 == SVGA3DREG_CONST && src0.base.num != src3.base.num) ||
673 (type2 == SVGA3DREG_CONST && src0.base.num != src2.base.num)))
674 need_temp0 = TRUE;
675
676 if (type3 == SVGA3DREG_CONST &&
677 (type2 == SVGA3DREG_CONST && src3.base.num != src2.base.num))
678 need_temp3 = TRUE;
679
680 if (type0 == SVGA3DREG_INPUT &&
681 ((type3 == SVGA3DREG_INPUT && src0.base.num != src3.base.num) ||
682 (type2 == SVGA3DREG_INPUT && src0.base.num != src2.base.num)))
683 need_temp0 = TRUE;
684
685 if (type3 == SVGA3DREG_INPUT &&
686 (type2 == SVGA3DREG_INPUT && src3.base.num != src2.base.num))
687 need_temp3 = TRUE;
688
689 if (need_temp0) {
690 temp0 = get_temp( emit );
691
692 if (!emit_repl( emit, temp0, &src0 ))
693 return FALSE;
694 }
695
696 if (need_temp3) {
697 temp3 = get_temp( emit );
698
699 if (!emit_repl( emit, temp3, &src3 ))
700 return FALSE;
701 }
702
703 if (!emit_op4( emit, inst, dest, src0, src1, src2, src3 ))
704 return FALSE;
705
706 if (need_temp3)
707 release_temp( emit, temp3 );
708 if (need_temp0)
709 release_temp( emit, temp0 );
710 return TRUE;
711 }
712
713
714 /**
715 * Do the src and dest registers refer to the same register?
716 */
717 static boolean
718 alias_src_dst(struct src_register src,
719 SVGA3dShaderDestToken dst)
720 {
721 if (src.base.num != dst.num)
722 return FALSE;
723
724 if (SVGA3dShaderGetRegType(dst.value) !=
725 SVGA3dShaderGetRegType(src.base.value))
726 return FALSE;
727
728 return TRUE;
729 }
730
731
732 /**
733 * Helper for emitting SVGA immediate values using the SVGA3DOP_DEF[I]
734 * instructions.
735 */
736 static boolean
737 emit_def_const(struct svga_shader_emitter *emit,
738 SVGA3dShaderConstType type,
739 unsigned idx, float a, float b, float c, float d)
740 {
741 SVGA3DOpDefArgs def;
742 SVGA3dShaderInstToken opcode;
743
744 switch (type) {
745 case SVGA3D_CONST_TYPE_FLOAT:
746 opcode = inst_token( SVGA3DOP_DEF );
747 def.dst = dst_register( SVGA3DREG_CONST, idx );
748 def.constValues[0] = a;
749 def.constValues[1] = b;
750 def.constValues[2] = c;
751 def.constValues[3] = d;
752 break;
753 case SVGA3D_CONST_TYPE_INT:
754 opcode = inst_token( SVGA3DOP_DEFI );
755 def.dst = dst_register( SVGA3DREG_CONSTINT, idx );
756 def.constIValues[0] = (int)a;
757 def.constIValues[1] = (int)b;
758 def.constIValues[2] = (int)c;
759 def.constIValues[3] = (int)d;
760 break;
761 default:
762 assert(0);
763 opcode = inst_token( SVGA3DOP_NOP );
764 break;
765 }
766
767 if (!emit_instruction(emit, opcode) ||
768 !svga_shader_emit_dwords( emit, def.values, Elements(def.values)))
769 return FALSE;
770
771 return TRUE;
772 }
773
774
775 static boolean
776 create_loop_const( struct svga_shader_emitter *emit )
777 {
778 unsigned idx = emit->nr_hw_int_const++;
779
780 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_INT, idx,
781 255, /* iteration count */
782 0, /* initial value */
783 1, /* step size */
784 0 /* not used, must be 0 */))
785 return FALSE;
786
787 emit->loop_const_idx = idx;
788 emit->created_loop_const = TRUE;
789
790 return TRUE;
791 }
792
793 static boolean
794 create_arl_consts( struct svga_shader_emitter *emit )
795 {
796 int i;
797
798 for (i = 0; i < emit->num_arl_consts; i += 4) {
799 int j;
800 unsigned idx = emit->nr_hw_float_const++;
801 float vals[4];
802 for (j = 0; j < 4 && (j + i) < emit->num_arl_consts; ++j) {
803 vals[j] = (float) emit->arl_consts[i + j].number;
804 emit->arl_consts[i + j].idx = idx;
805 switch (j) {
806 case 0:
807 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_X;
808 break;
809 case 1:
810 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_Y;
811 break;
812 case 2:
813 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_Z;
814 break;
815 case 3:
816 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_W;
817 break;
818 }
819 }
820 while (j < 4)
821 vals[j++] = 0;
822
823 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT, idx,
824 vals[0], vals[1],
825 vals[2], vals[3]))
826 return FALSE;
827 }
828
829 return TRUE;
830 }
831
832
833 /**
834 * Return the register which holds the pixel shaders front/back-
835 * facing value.
836 */
837 static struct src_register
838 get_vface( struct svga_shader_emitter *emit )
839 {
840 assert(emit->emitted_vface);
841 return src_register(SVGA3DREG_MISCTYPE, SVGA3DMISCREG_FACE);
842 }
843
844
845 /**
846 * Create/emit a "common" constant with values {0, 0.5, -1, 1}.
847 * We can swizzle this to produce other useful constants such as
848 * {0, 0, 0, 0}, {1, 1, 1, 1}, etc.
849 */
850 static boolean
851 create_common_immediate( struct svga_shader_emitter *emit )
852 {
853 unsigned idx = emit->nr_hw_float_const++;
854
855 /* Emit the constant (0, 0.5, -1, 1) and use swizzling to generate
856 * other useful vectors.
857 */
858 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT,
859 idx, 0.0f, 0.5f, -1.0f, 1.0f ))
860 return FALSE;
861 emit->common_immediate_idx[0] = idx;
862 idx++;
863
864 /* Emit constant {2, 0, 0, 0} (only the 2 is used for now) */
865 if (emit->key.vkey.adjust_attrib_range) {
866 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT,
867 idx, 2.0f, 0.0f, 0.0f, 0.0f ))
868 return FALSE;
869 emit->common_immediate_idx[1] = idx;
870 }
871 else {
872 emit->common_immediate_idx[1] = -1;
873 }
874
875 emit->created_common_immediate = TRUE;
876
877 return TRUE;
878 }
879
880
881 /**
882 * Return swizzle/position for the given value in the "common" immediate.
883 */
884 static inline unsigned
885 common_immediate_swizzle(float value)
886 {
887 if (value == 0.0f)
888 return TGSI_SWIZZLE_X;
889 else if (value == 0.5f)
890 return TGSI_SWIZZLE_Y;
891 else if (value == -1.0f)
892 return TGSI_SWIZZLE_Z;
893 else if (value == 1.0f)
894 return TGSI_SWIZZLE_W;
895 else {
896 assert(!"illegal value in common_immediate_swizzle");
897 return TGSI_SWIZZLE_X;
898 }
899 }
900
901
902 /**
903 * Returns an immediate reg where all the terms are either 0, 1, 2 or 0.5
904 */
905 static struct src_register
906 get_immediate(struct svga_shader_emitter *emit,
907 float x, float y, float z, float w)
908 {
909 unsigned sx = common_immediate_swizzle(x);
910 unsigned sy = common_immediate_swizzle(y);
911 unsigned sz = common_immediate_swizzle(z);
912 unsigned sw = common_immediate_swizzle(w);
913 assert(emit->created_common_immediate);
914 assert(emit->common_immediate_idx[0] >= 0);
915 return swizzle(src_register(SVGA3DREG_CONST, emit->common_immediate_idx[0]),
916 sx, sy, sz, sw);
917 }
918
919
920 /**
921 * returns {0, 0, 0, 0} immediate
922 */
923 static struct src_register
924 get_zero_immediate( struct svga_shader_emitter *emit )
925 {
926 assert(emit->created_common_immediate);
927 assert(emit->common_immediate_idx[0] >= 0);
928 return swizzle(src_register( SVGA3DREG_CONST,
929 emit->common_immediate_idx[0]),
930 0, 0, 0, 0);
931 }
932
933
934 /**
935 * returns {1, 1, 1, 1} immediate
936 */
937 static struct src_register
938 get_one_immediate( struct svga_shader_emitter *emit )
939 {
940 assert(emit->created_common_immediate);
941 assert(emit->common_immediate_idx[0] >= 0);
942 return swizzle(src_register( SVGA3DREG_CONST,
943 emit->common_immediate_idx[0]),
944 3, 3, 3, 3);
945 }
946
947
948 /**
949 * returns {0.5, 0.5, 0.5, 0.5} immediate
950 */
951 static struct src_register
952 get_half_immediate( struct svga_shader_emitter *emit )
953 {
954 assert(emit->created_common_immediate);
955 assert(emit->common_immediate_idx[0] >= 0);
956 return swizzle(src_register(SVGA3DREG_CONST, emit->common_immediate_idx[0]),
957 1, 1, 1, 1);
958 }
959
960
961 /**
962 * returns {2, 2, 2, 2} immediate
963 */
964 static struct src_register
965 get_two_immediate( struct svga_shader_emitter *emit )
966 {
967 /* Note we use the second common immediate here */
968 assert(emit->created_common_immediate);
969 assert(emit->common_immediate_idx[1] >= 0);
970 return swizzle(src_register( SVGA3DREG_CONST,
971 emit->common_immediate_idx[1]),
972 0, 0, 0, 0);
973 }
974
975
976 /**
977 * returns the loop const
978 */
979 static struct src_register
980 get_loop_const( struct svga_shader_emitter *emit )
981 {
982 assert(emit->created_loop_const);
983 assert(emit->loop_const_idx >= 0);
984 return src_register( SVGA3DREG_CONSTINT,
985 emit->loop_const_idx );
986 }
987
988
989 static struct src_register
990 get_fake_arl_const( struct svga_shader_emitter *emit )
991 {
992 struct src_register reg;
993 int idx = 0, swizzle = 0, i;
994
995 for (i = 0; i < emit->num_arl_consts; ++ i) {
996 if (emit->arl_consts[i].arl_num == emit->current_arl) {
997 idx = emit->arl_consts[i].idx;
998 swizzle = emit->arl_consts[i].swizzle;
999 }
1000 }
1001
1002 reg = src_register( SVGA3DREG_CONST, idx );
1003 return scalar(reg, swizzle);
1004 }
1005
1006
1007 /**
1008 * Return a register which holds the width and height of the texture
1009 * currently bound to the given sampler.
1010 */
1011 static struct src_register
1012 get_tex_dimensions( struct svga_shader_emitter *emit, int sampler_num )
1013 {
1014 int idx;
1015 struct src_register reg;
1016
1017 /* the width/height indexes start right after constants */
1018 idx = emit->key.fkey.tex[sampler_num].width_height_idx +
1019 emit->info.file_max[TGSI_FILE_CONSTANT] + 1;
1020
1021 reg = src_register( SVGA3DREG_CONST, idx );
1022 return reg;
1023 }
1024
1025
1026 static boolean
1027 emit_fake_arl(struct svga_shader_emitter *emit,
1028 const struct tgsi_full_instruction *insn)
1029 {
1030 const struct src_register src0 =
1031 translate_src_register(emit, &insn->Src[0] );
1032 struct src_register src1 = get_fake_arl_const( emit );
1033 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1034 SVGA3dShaderDestToken tmp = get_temp( emit );
1035
1036 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), tmp, src0))
1037 return FALSE;
1038
1039 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), tmp, src( tmp ),
1040 src1))
1041 return FALSE;
1042
1043 /* replicate the original swizzle */
1044 src1 = src(tmp);
1045 src1.base.swizzle = src0.base.swizzle;
1046
1047 return submit_op1( emit, inst_token( SVGA3DOP_MOVA ),
1048 dst, src1 );
1049 }
1050
1051
1052 static boolean
1053 emit_if(struct svga_shader_emitter *emit,
1054 const struct tgsi_full_instruction *insn)
1055 {
1056 struct src_register src0 =
1057 translate_src_register(emit, &insn->Src[0]);
1058 struct src_register zero = get_zero_immediate(emit);
1059 SVGA3dShaderInstToken if_token = inst_token( SVGA3DOP_IFC );
1060
1061 if_token.control = SVGA3DOPCOMPC_NE;
1062
1063 if (SVGA3dShaderGetRegType(src0.base.value) == SVGA3DREG_CONST) {
1064 /*
1065 * Max different constant registers readable per IFC instruction is 1.
1066 */
1067 SVGA3dShaderDestToken tmp = get_temp( emit );
1068
1069 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), tmp, src0))
1070 return FALSE;
1071
1072 src0 = scalar(src( tmp ), TGSI_SWIZZLE_X);
1073 }
1074
1075 emit->dynamic_branching_level++;
1076
1077 return (emit_instruction( emit, if_token ) &&
1078 emit_src( emit, src0 ) &&
1079 emit_src( emit, zero ) );
1080 }
1081
1082
1083 static boolean
1084 emit_else(struct svga_shader_emitter *emit,
1085 const struct tgsi_full_instruction *insn)
1086 {
1087 return emit_instruction(emit, inst_token(SVGA3DOP_ELSE));
1088 }
1089
1090
1091 static boolean
1092 emit_endif(struct svga_shader_emitter *emit,
1093 const struct tgsi_full_instruction *insn)
1094 {
1095 emit->dynamic_branching_level--;
1096
1097 return emit_instruction(emit, inst_token(SVGA3DOP_ENDIF));
1098 }
1099
1100
1101 /**
1102 * Translate the following TGSI FLR instruction.
1103 * FLR DST, SRC
1104 * To the following SVGA3D instruction sequence.
1105 * FRC TMP, SRC
1106 * SUB DST, SRC, TMP
1107 */
1108 static boolean
1109 emit_floor(struct svga_shader_emitter *emit,
1110 const struct tgsi_full_instruction *insn )
1111 {
1112 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1113 const struct src_register src0 =
1114 translate_src_register(emit, &insn->Src[0] );
1115 SVGA3dShaderDestToken temp = get_temp( emit );
1116
1117 /* FRC TMP, SRC */
1118 if (!submit_op1( emit, inst_token( SVGA3DOP_FRC ), temp, src0 ))
1119 return FALSE;
1120
1121 /* SUB DST, SRC, TMP */
1122 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst, src0,
1123 negate( src( temp ) ) ))
1124 return FALSE;
1125
1126 return TRUE;
1127 }
1128
1129
1130 /**
1131 * Translate the following TGSI CEIL instruction.
1132 * CEIL DST, SRC
1133 * To the following SVGA3D instruction sequence.
1134 * FRC TMP, -SRC
1135 * ADD DST, SRC, TMP
1136 */
1137 static boolean
1138 emit_ceil(struct svga_shader_emitter *emit,
1139 const struct tgsi_full_instruction *insn)
1140 {
1141 SVGA3dShaderDestToken dst = translate_dst_register(emit, insn, 0);
1142 const struct src_register src0 =
1143 translate_src_register(emit, &insn->Src[0]);
1144 SVGA3dShaderDestToken temp = get_temp(emit);
1145
1146 /* FRC TMP, -SRC */
1147 if (!submit_op1(emit, inst_token(SVGA3DOP_FRC), temp, negate(src0)))
1148 return FALSE;
1149
1150 /* ADD DST, SRC, TMP */
1151 if (!submit_op2(emit, inst_token(SVGA3DOP_ADD), dst, src0, src(temp)))
1152 return FALSE;
1153
1154 return TRUE;
1155 }
1156
1157
1158 /**
1159 * Translate the following TGSI DIV instruction.
1160 * DIV DST.xy, SRC0, SRC1
1161 * To the following SVGA3D instruction sequence.
1162 * RCP TMP.x, SRC1.xxxx
1163 * RCP TMP.y, SRC1.yyyy
1164 * MUL DST.xy, SRC0, TMP
1165 */
1166 static boolean
1167 emit_div(struct svga_shader_emitter *emit,
1168 const struct tgsi_full_instruction *insn )
1169 {
1170 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1171 const struct src_register src0 =
1172 translate_src_register(emit, &insn->Src[0] );
1173 const struct src_register src1 =
1174 translate_src_register(emit, &insn->Src[1] );
1175 SVGA3dShaderDestToken temp = get_temp( emit );
1176 int i;
1177
1178 /* For each enabled element, perform a RCP instruction. Note that
1179 * RCP is scalar in SVGA3D:
1180 */
1181 for (i = 0; i < 4; i++) {
1182 unsigned channel = 1 << i;
1183 if (dst.mask & channel) {
1184 /* RCP TMP.?, SRC1.???? */
1185 if (!submit_op1( emit, inst_token( SVGA3DOP_RCP ),
1186 writemask(temp, channel),
1187 scalar(src1, i) ))
1188 return FALSE;
1189 }
1190 }
1191
1192 /* Vector mul:
1193 * MUL DST, SRC0, TMP
1194 */
1195 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ), dst, src0,
1196 src( temp ) ))
1197 return FALSE;
1198
1199 return TRUE;
1200 }
1201
1202
1203 /**
1204 * Translate the following TGSI DP2 instruction.
1205 * DP2 DST, SRC1, SRC2
1206 * To the following SVGA3D instruction sequence.
1207 * MUL TMP, SRC1, SRC2
1208 * ADD DST, TMP.xxxx, TMP.yyyy
1209 */
1210 static boolean
1211 emit_dp2(struct svga_shader_emitter *emit,
1212 const struct tgsi_full_instruction *insn )
1213 {
1214 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1215 const struct src_register src0 =
1216 translate_src_register(emit, &insn->Src[0]);
1217 const struct src_register src1 =
1218 translate_src_register(emit, &insn->Src[1]);
1219 SVGA3dShaderDestToken temp = get_temp( emit );
1220 struct src_register temp_src0, temp_src1;
1221
1222 /* MUL TMP, SRC1, SRC2 */
1223 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ), temp, src0, src1 ))
1224 return FALSE;
1225
1226 temp_src0 = scalar(src( temp ), TGSI_SWIZZLE_X);
1227 temp_src1 = scalar(src( temp ), TGSI_SWIZZLE_Y);
1228
1229 /* ADD DST, TMP.xxxx, TMP.yyyy */
1230 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst,
1231 temp_src0, temp_src1 ))
1232 return FALSE;
1233
1234 return TRUE;
1235 }
1236
1237
1238 /**
1239 * Translate the following TGSI DPH instruction.
1240 * DPH DST, SRC1, SRC2
1241 * To the following SVGA3D instruction sequence.
1242 * DP3 TMP, SRC1, SRC2
1243 * ADD DST, TMP, SRC2.wwww
1244 */
1245 static boolean
1246 emit_dph(struct svga_shader_emitter *emit,
1247 const struct tgsi_full_instruction *insn )
1248 {
1249 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1250 const struct src_register src0 = translate_src_register(
1251 emit, &insn->Src[0] );
1252 struct src_register src1 =
1253 translate_src_register(emit, &insn->Src[1]);
1254 SVGA3dShaderDestToken temp = get_temp( emit );
1255
1256 /* DP3 TMP, SRC1, SRC2 */
1257 if (!submit_op2( emit, inst_token( SVGA3DOP_DP3 ), temp, src0, src1 ))
1258 return FALSE;
1259
1260 src1 = scalar(src1, TGSI_SWIZZLE_W);
1261
1262 /* ADD DST, TMP, SRC2.wwww */
1263 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst,
1264 src( temp ), src1 ))
1265 return FALSE;
1266
1267 return TRUE;
1268 }
1269
1270
1271 /**
1272 * Sine / Cosine helper function.
1273 */
1274 static boolean
1275 do_emit_sincos(struct svga_shader_emitter *emit,
1276 SVGA3dShaderDestToken dst,
1277 struct src_register src0)
1278 {
1279 src0 = scalar(src0, TGSI_SWIZZLE_X);
1280 return submit_op1(emit, inst_token(SVGA3DOP_SINCOS), dst, src0);
1281 }
1282
1283
1284 /**
1285 * Translate/emit a TGSI SIN, COS or CSC instruction.
1286 */
1287 static boolean
1288 emit_sincos(struct svga_shader_emitter *emit,
1289 const struct tgsi_full_instruction *insn)
1290 {
1291 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1292 struct src_register src0 = translate_src_register(emit, &insn->Src[0]);
1293 SVGA3dShaderDestToken temp = get_temp( emit );
1294
1295 /* SCS TMP SRC */
1296 if (!do_emit_sincos(emit, writemask(temp, TGSI_WRITEMASK_XY), src0 ))
1297 return FALSE;
1298
1299 /* MOV DST TMP */
1300 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src( temp ) ))
1301 return FALSE;
1302
1303 return TRUE;
1304 }
1305
1306
1307 /**
1308 * Translate TGSI SIN instruction into:
1309 * SCS TMP SRC
1310 * MOV DST TMP.yyyy
1311 */
1312 static boolean
1313 emit_sin(struct svga_shader_emitter *emit,
1314 const struct tgsi_full_instruction *insn )
1315 {
1316 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1317 struct src_register src0 =
1318 translate_src_register(emit, &insn->Src[0] );
1319 SVGA3dShaderDestToken temp = get_temp( emit );
1320
1321 /* SCS TMP SRC */
1322 if (!do_emit_sincos(emit, writemask(temp, TGSI_WRITEMASK_Y), src0))
1323 return FALSE;
1324
1325 src0 = scalar(src( temp ), TGSI_SWIZZLE_Y);
1326
1327 /* MOV DST TMP.yyyy */
1328 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src0 ))
1329 return FALSE;
1330
1331 return TRUE;
1332 }
1333
1334
1335 /*
1336 * Translate TGSI COS instruction into:
1337 * SCS TMP SRC
1338 * MOV DST TMP.xxxx
1339 */
1340 static boolean
1341 emit_cos(struct svga_shader_emitter *emit,
1342 const struct tgsi_full_instruction *insn)
1343 {
1344 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1345 struct src_register src0 =
1346 translate_src_register(emit, &insn->Src[0] );
1347 SVGA3dShaderDestToken temp = get_temp( emit );
1348
1349 /* SCS TMP SRC */
1350 if (!do_emit_sincos( emit, writemask(temp, TGSI_WRITEMASK_X), src0 ))
1351 return FALSE;
1352
1353 src0 = scalar(src( temp ), TGSI_SWIZZLE_X);
1354
1355 /* MOV DST TMP.xxxx */
1356 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src0 ))
1357 return FALSE;
1358
1359 return TRUE;
1360 }
1361
1362
1363 /**
1364 * Translate/emit TGSI SSG (Set Sign: -1, 0, +1) instruction.
1365 */
1366 static boolean
1367 emit_ssg(struct svga_shader_emitter *emit,
1368 const struct tgsi_full_instruction *insn)
1369 {
1370 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1371 struct src_register src0 =
1372 translate_src_register(emit, &insn->Src[0] );
1373 SVGA3dShaderDestToken temp0 = get_temp( emit );
1374 SVGA3dShaderDestToken temp1 = get_temp( emit );
1375 struct src_register zero, one;
1376
1377 if (emit->unit == PIPE_SHADER_VERTEX) {
1378 /* SGN DST, SRC0, TMP0, TMP1 */
1379 return submit_op3( emit, inst_token( SVGA3DOP_SGN ), dst, src0,
1380 src( temp0 ), src( temp1 ) );
1381 }
1382
1383 one = get_one_immediate(emit);
1384 zero = get_zero_immediate(emit);
1385
1386 /* CMP TMP0, SRC0, one, zero */
1387 if (!submit_op3( emit, inst_token( SVGA3DOP_CMP ),
1388 writemask( temp0, dst.mask ), src0, one, zero ))
1389 return FALSE;
1390
1391 /* CMP TMP1, negate(SRC0), negate(one), zero */
1392 if (!submit_op3( emit, inst_token( SVGA3DOP_CMP ),
1393 writemask( temp1, dst.mask ), negate( src0 ), negate( one ),
1394 zero ))
1395 return FALSE;
1396
1397 /* ADD DST, TMP0, TMP1 */
1398 return submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst, src( temp0 ),
1399 src( temp1 ) );
1400 }
1401
1402
1403 /**
1404 * Translate/emit TGSI SUB instruction as:
1405 * ADD DST, SRC0, negate(SRC1)
1406 */
1407 static boolean
1408 emit_sub(struct svga_shader_emitter *emit,
1409 const struct tgsi_full_instruction *insn)
1410 {
1411 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1412 struct src_register src0 = translate_src_register(
1413 emit, &insn->Src[0] );
1414 struct src_register src1 = translate_src_register(
1415 emit, &insn->Src[1] );
1416
1417 src1 = negate(src1);
1418
1419 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst,
1420 src0, src1 ))
1421 return FALSE;
1422
1423 return TRUE;
1424 }
1425
1426
1427 /**
1428 * Translate/emit KILL_IF instruction (kill if any of X,Y,Z,W are negative).
1429 */
1430 static boolean
1431 emit_kill_if(struct svga_shader_emitter *emit,
1432 const struct tgsi_full_instruction *insn)
1433 {
1434 const struct tgsi_full_src_register *reg = &insn->Src[0];
1435 struct src_register src0, srcIn;
1436 const boolean special = (reg->Register.Absolute ||
1437 reg->Register.Negate ||
1438 reg->Register.Indirect ||
1439 reg->Register.SwizzleX != 0 ||
1440 reg->Register.SwizzleY != 1 ||
1441 reg->Register.SwizzleZ != 2 ||
1442 reg->Register.File != TGSI_FILE_TEMPORARY);
1443 SVGA3dShaderDestToken temp;
1444
1445 src0 = srcIn = translate_src_register( emit, reg );
1446
1447 if (special) {
1448 /* need a temp reg */
1449 temp = get_temp( emit );
1450 }
1451
1452 if (special) {
1453 /* move the source into a temp register */
1454 submit_op1(emit, inst_token(SVGA3DOP_MOV), temp, src0);
1455
1456 src0 = src( temp );
1457 }
1458
1459 /* Do the texkill by checking if any of the XYZW components are < 0.
1460 * Note that ps_2_0 and later take XYZW in consideration, while ps_1_x
1461 * only used XYZ. The MSDN documentation about this is incorrect.
1462 */
1463 if (!submit_op0( emit, inst_token( SVGA3DOP_TEXKILL ), dst(src0) ))
1464 return FALSE;
1465
1466 return TRUE;
1467 }
1468
1469
1470 /**
1471 * Translate/emit unconditional kill instruction (usually found inside
1472 * an IF/ELSE/ENDIF block).
1473 */
1474 static boolean
1475 emit_kill(struct svga_shader_emitter *emit,
1476 const struct tgsi_full_instruction *insn)
1477 {
1478 SVGA3dShaderDestToken temp;
1479 struct src_register one = get_one_immediate(emit);
1480 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_TEXKILL );
1481
1482 /* texkill doesn't allow negation on the operand so lets move
1483 * negation of {1} to a temp register */
1484 temp = get_temp( emit );
1485 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), temp,
1486 negate( one ) ))
1487 return FALSE;
1488
1489 return submit_op0( emit, inst, temp );
1490 }
1491
1492
1493 /**
1494 * Test if r1 and r2 are the same register.
1495 */
1496 static boolean
1497 same_register(struct src_register r1, struct src_register r2)
1498 {
1499 return (r1.base.num == r2.base.num &&
1500 r1.base.type_upper == r2.base.type_upper &&
1501 r1.base.type_lower == r2.base.type_lower);
1502 }
1503
1504
1505
1506 /**
1507 * Implement conditionals by initializing destination reg to 'fail',
1508 * then set predicate reg with UFOP_SETP, then move 'pass' to dest
1509 * based on predicate reg.
1510 *
1511 * SETP src0, cmp, src1 -- do this first to avoid aliasing problems.
1512 * MOV dst, fail
1513 * MOV dst, pass, p0
1514 */
1515 static boolean
1516 emit_conditional(struct svga_shader_emitter *emit,
1517 unsigned compare_func,
1518 SVGA3dShaderDestToken dst,
1519 struct src_register src0,
1520 struct src_register src1,
1521 struct src_register pass,
1522 struct src_register fail)
1523 {
1524 SVGA3dShaderDestToken pred_reg = dst_register( SVGA3DREG_PREDICATE, 0 );
1525 SVGA3dShaderInstToken setp_token;
1526
1527 switch (compare_func) {
1528 case PIPE_FUNC_NEVER:
1529 return submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1530 dst, fail );
1531 break;
1532 case PIPE_FUNC_LESS:
1533 setp_token = inst_token_setp(SVGA3DOPCOMP_LT);
1534 break;
1535 case PIPE_FUNC_EQUAL:
1536 setp_token = inst_token_setp(SVGA3DOPCOMP_EQ);
1537 break;
1538 case PIPE_FUNC_LEQUAL:
1539 setp_token = inst_token_setp(SVGA3DOPCOMP_LE);
1540 break;
1541 case PIPE_FUNC_GREATER:
1542 setp_token = inst_token_setp(SVGA3DOPCOMP_GT);
1543 break;
1544 case PIPE_FUNC_NOTEQUAL:
1545 setp_token = inst_token_setp(SVGA3DOPCOMPC_NE);
1546 break;
1547 case PIPE_FUNC_GEQUAL:
1548 setp_token = inst_token_setp(SVGA3DOPCOMP_GE);
1549 break;
1550 case PIPE_FUNC_ALWAYS:
1551 return submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1552 dst, pass );
1553 break;
1554 }
1555
1556 if (same_register(src(dst), pass)) {
1557 /* We'll get bad results if the dst and pass registers are the same
1558 * so use a temp register containing pass.
1559 */
1560 SVGA3dShaderDestToken temp = get_temp(emit);
1561 if (!submit_op1(emit, inst_token(SVGA3DOP_MOV), temp, pass))
1562 return FALSE;
1563 pass = src(temp);
1564 }
1565
1566 /* SETP src0, COMPOP, src1 */
1567 if (!submit_op2( emit, setp_token, pred_reg,
1568 src0, src1 ))
1569 return FALSE;
1570
1571 /* MOV dst, fail */
1572 if (!submit_op1(emit, inst_token(SVGA3DOP_MOV), dst, fail))
1573 return FALSE;
1574
1575 /* MOV dst, pass (predicated)
1576 *
1577 * Note that the predicate reg (and possible modifiers) is passed
1578 * as the first source argument.
1579 */
1580 if (!submit_op2(emit,
1581 inst_token_predicated(SVGA3DOP_MOV), dst,
1582 src(pred_reg), pass))
1583 return FALSE;
1584
1585 return TRUE;
1586 }
1587
1588
1589 /**
1590 * Helper for emiting 'selection' commands. Basically:
1591 * if (src0 OP src1)
1592 * dst = 1.0;
1593 * else
1594 * dst = 0.0;
1595 */
1596 static boolean
1597 emit_select(struct svga_shader_emitter *emit,
1598 unsigned compare_func,
1599 SVGA3dShaderDestToken dst,
1600 struct src_register src0,
1601 struct src_register src1 )
1602 {
1603 /* There are some SVGA instructions which implement some selects
1604 * directly, but they are only available in the vertex shader.
1605 */
1606 if (emit->unit == PIPE_SHADER_VERTEX) {
1607 switch (compare_func) {
1608 case PIPE_FUNC_GEQUAL:
1609 return submit_op2( emit, inst_token( SVGA3DOP_SGE ), dst, src0, src1 );
1610 case PIPE_FUNC_LEQUAL:
1611 return submit_op2( emit, inst_token( SVGA3DOP_SGE ), dst, src1, src0 );
1612 case PIPE_FUNC_GREATER:
1613 return submit_op2( emit, inst_token( SVGA3DOP_SLT ), dst, src1, src0 );
1614 case PIPE_FUNC_LESS:
1615 return submit_op2( emit, inst_token( SVGA3DOP_SLT ), dst, src0, src1 );
1616 default:
1617 break;
1618 }
1619 }
1620
1621 /* Otherwise, need to use the setp approach:
1622 */
1623 {
1624 struct src_register one, zero;
1625 /* zero immediate is 0,0,0,1 */
1626 zero = get_zero_immediate(emit);
1627 one = get_one_immediate(emit);
1628
1629 return emit_conditional(emit, compare_func, dst, src0, src1, one, zero);
1630 }
1631 }
1632
1633
1634 /**
1635 * Translate/emit a TGSI SEQ, SNE, SLT, SGE, etc. instruction.
1636 */
1637 static boolean
1638 emit_select_op(struct svga_shader_emitter *emit,
1639 unsigned compare,
1640 const struct tgsi_full_instruction *insn)
1641 {
1642 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1643 struct src_register src0 = translate_src_register(
1644 emit, &insn->Src[0] );
1645 struct src_register src1 = translate_src_register(
1646 emit, &insn->Src[1] );
1647
1648 return emit_select( emit, compare, dst, src0, src1 );
1649 }
1650
1651
1652 /**
1653 * Translate TGSI CMP instruction. Component-wise:
1654 * dst = (src0 < 0.0) ? src1 : src2
1655 */
1656 static boolean
1657 emit_cmp(struct svga_shader_emitter *emit,
1658 const struct tgsi_full_instruction *insn)
1659 {
1660 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1661 const struct src_register src0 =
1662 translate_src_register(emit, &insn->Src[0] );
1663 const struct src_register src1 =
1664 translate_src_register(emit, &insn->Src[1] );
1665 const struct src_register src2 =
1666 translate_src_register(emit, &insn->Src[2] );
1667
1668 if (emit->unit == PIPE_SHADER_VERTEX) {
1669 struct src_register zero = get_zero_immediate(emit);
1670 /* We used to simulate CMP with SLT+LRP. But that didn't work when
1671 * src1 or src2 was Inf/NaN. In particular, GLSL sqrt(0) failed
1672 * because it involves a CMP to handle the 0 case.
1673 * Use a conditional expression instead.
1674 */
1675 return emit_conditional(emit, PIPE_FUNC_LESS, dst,
1676 src0, zero, src1, src2);
1677 }
1678 else {
1679 assert(emit->unit == PIPE_SHADER_FRAGMENT);
1680
1681 /* CMP DST, SRC0, SRC2, SRC1 */
1682 return submit_op3( emit, inst_token( SVGA3DOP_CMP ), dst,
1683 src0, src2, src1);
1684 }
1685 }
1686
1687
1688 /**
1689 * Translate/emit 2-operand (coord, sampler) texture instructions.
1690 */
1691 static boolean
1692 emit_tex2(struct svga_shader_emitter *emit,
1693 const struct tgsi_full_instruction *insn,
1694 SVGA3dShaderDestToken dst)
1695 {
1696 SVGA3dShaderInstToken inst;
1697 struct src_register texcoord;
1698 struct src_register sampler;
1699 SVGA3dShaderDestToken tmp;
1700
1701 inst.value = 0;
1702
1703 switch (insn->Instruction.Opcode) {
1704 case TGSI_OPCODE_TEX:
1705 inst.op = SVGA3DOP_TEX;
1706 break;
1707 case TGSI_OPCODE_TXP:
1708 inst.op = SVGA3DOP_TEX;
1709 inst.control = SVGA3DOPCONT_PROJECT;
1710 break;
1711 case TGSI_OPCODE_TXB:
1712 inst.op = SVGA3DOP_TEX;
1713 inst.control = SVGA3DOPCONT_BIAS;
1714 break;
1715 case TGSI_OPCODE_TXL:
1716 inst.op = SVGA3DOP_TEXLDL;
1717 break;
1718 default:
1719 assert(0);
1720 return FALSE;
1721 }
1722
1723 texcoord = translate_src_register( emit, &insn->Src[0] );
1724 sampler = translate_src_register( emit, &insn->Src[1] );
1725
1726 if (emit->key.fkey.tex[sampler.base.num].unnormalized ||
1727 emit->dynamic_branching_level > 0)
1728 tmp = get_temp( emit );
1729
1730 /* Can't do mipmapping inside dynamic branch constructs. Force LOD
1731 * zero in that case.
1732 */
1733 if (emit->dynamic_branching_level > 0 &&
1734 inst.op == SVGA3DOP_TEX &&
1735 SVGA3dShaderGetRegType(texcoord.base.value) == SVGA3DREG_TEMP) {
1736 struct src_register zero = get_zero_immediate(emit);
1737
1738 /* MOV tmp, texcoord */
1739 if (!submit_op1( emit,
1740 inst_token( SVGA3DOP_MOV ),
1741 tmp,
1742 texcoord ))
1743 return FALSE;
1744
1745 /* MOV tmp.w, zero */
1746 if (!submit_op1( emit,
1747 inst_token( SVGA3DOP_MOV ),
1748 writemask( tmp, TGSI_WRITEMASK_W ),
1749 zero ))
1750 return FALSE;
1751
1752 texcoord = src( tmp );
1753 inst.op = SVGA3DOP_TEXLDL;
1754 }
1755
1756 /* Explicit normalization of texcoords:
1757 */
1758 if (emit->key.fkey.tex[sampler.base.num].unnormalized) {
1759 struct src_register wh = get_tex_dimensions( emit, sampler.base.num );
1760
1761 /* MUL tmp, SRC0, WH */
1762 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
1763 tmp, texcoord, wh ))
1764 return FALSE;
1765
1766 texcoord = src( tmp );
1767 }
1768
1769 return submit_op2( emit, inst, dst, texcoord, sampler );
1770 }
1771
1772
1773 /**
1774 * Translate/emit 4-operand (coord, ddx, ddy, sampler) texture instructions.
1775 */
1776 static boolean
1777 emit_tex4(struct svga_shader_emitter *emit,
1778 const struct tgsi_full_instruction *insn,
1779 SVGA3dShaderDestToken dst )
1780 {
1781 SVGA3dShaderInstToken inst;
1782 struct src_register texcoord;
1783 struct src_register ddx;
1784 struct src_register ddy;
1785 struct src_register sampler;
1786
1787 texcoord = translate_src_register( emit, &insn->Src[0] );
1788 ddx = translate_src_register( emit, &insn->Src[1] );
1789 ddy = translate_src_register( emit, &insn->Src[2] );
1790 sampler = translate_src_register( emit, &insn->Src[3] );
1791
1792 inst.value = 0;
1793
1794 switch (insn->Instruction.Opcode) {
1795 case TGSI_OPCODE_TXD:
1796 inst.op = SVGA3DOP_TEXLDD; /* 4 args! */
1797 break;
1798 default:
1799 assert(0);
1800 return FALSE;
1801 }
1802
1803 return submit_op4( emit, inst, dst, texcoord, sampler, ddx, ddy );
1804 }
1805
1806
1807 /**
1808 * Emit texture swizzle code. We do this here since SVGA samplers don't
1809 * directly support swizzles.
1810 */
1811 static boolean
1812 emit_tex_swizzle(struct svga_shader_emitter *emit,
1813 SVGA3dShaderDestToken dst,
1814 struct src_register src,
1815 unsigned swizzle_x,
1816 unsigned swizzle_y,
1817 unsigned swizzle_z,
1818 unsigned swizzle_w)
1819 {
1820 const unsigned swizzleIn[4] = {swizzle_x, swizzle_y, swizzle_z, swizzle_w};
1821 unsigned srcSwizzle[4];
1822 unsigned srcWritemask = 0x0, zeroWritemask = 0x0, oneWritemask = 0x0;
1823 int i;
1824
1825 /* build writemasks and srcSwizzle terms */
1826 for (i = 0; i < 4; i++) {
1827 if (swizzleIn[i] == PIPE_SWIZZLE_ZERO) {
1828 srcSwizzle[i] = TGSI_SWIZZLE_X + i;
1829 zeroWritemask |= (1 << i);
1830 }
1831 else if (swizzleIn[i] == PIPE_SWIZZLE_ONE) {
1832 srcSwizzle[i] = TGSI_SWIZZLE_X + i;
1833 oneWritemask |= (1 << i);
1834 }
1835 else {
1836 srcSwizzle[i] = swizzleIn[i];
1837 srcWritemask |= (1 << i);
1838 }
1839 }
1840
1841 /* write x/y/z/w comps */
1842 if (dst.mask & srcWritemask) {
1843 if (!submit_op1(emit,
1844 inst_token(SVGA3DOP_MOV),
1845 writemask(dst, srcWritemask),
1846 swizzle(src,
1847 srcSwizzle[0],
1848 srcSwizzle[1],
1849 srcSwizzle[2],
1850 srcSwizzle[3])))
1851 return FALSE;
1852 }
1853
1854 /* write 0 comps */
1855 if (dst.mask & zeroWritemask) {
1856 if (!submit_op1(emit,
1857 inst_token(SVGA3DOP_MOV),
1858 writemask(dst, zeroWritemask),
1859 get_zero_immediate(emit)))
1860 return FALSE;
1861 }
1862
1863 /* write 1 comps */
1864 if (dst.mask & oneWritemask) {
1865 if (!submit_op1(emit,
1866 inst_token(SVGA3DOP_MOV),
1867 writemask(dst, oneWritemask),
1868 get_one_immediate(emit)))
1869 return FALSE;
1870 }
1871
1872 return TRUE;
1873 }
1874
1875
1876 /**
1877 * Translate/emit a TGSI texture sample instruction.
1878 */
1879 static boolean
1880 emit_tex(struct svga_shader_emitter *emit,
1881 const struct tgsi_full_instruction *insn)
1882 {
1883 SVGA3dShaderDestToken dst =
1884 translate_dst_register( emit, insn, 0 );
1885 struct src_register src0 =
1886 translate_src_register( emit, &insn->Src[0] );
1887 struct src_register src1 =
1888 translate_src_register( emit, &insn->Src[1] );
1889
1890 SVGA3dShaderDestToken tex_result;
1891 const unsigned unit = src1.base.num;
1892
1893 /* check for shadow samplers */
1894 boolean compare = (emit->key.fkey.tex[unit].compare_mode ==
1895 PIPE_TEX_COMPARE_R_TO_TEXTURE);
1896
1897 /* texture swizzle */
1898 boolean swizzle = (emit->key.fkey.tex[unit].swizzle_r != PIPE_SWIZZLE_RED ||
1899 emit->key.fkey.tex[unit].swizzle_g != PIPE_SWIZZLE_GREEN ||
1900 emit->key.fkey.tex[unit].swizzle_b != PIPE_SWIZZLE_BLUE ||
1901 emit->key.fkey.tex[unit].swizzle_a != PIPE_SWIZZLE_ALPHA);
1902
1903 boolean saturate = insn->Instruction.Saturate;
1904
1905 /* If doing compare processing or tex swizzle or saturation, we need to put
1906 * the fetched color into a temporary so it can be used as a source later on.
1907 */
1908 if (compare || swizzle || saturate) {
1909 tex_result = get_temp( emit );
1910 }
1911 else {
1912 tex_result = dst;
1913 }
1914
1915 switch(insn->Instruction.Opcode) {
1916 case TGSI_OPCODE_TEX:
1917 case TGSI_OPCODE_TXB:
1918 case TGSI_OPCODE_TXP:
1919 case TGSI_OPCODE_TXL:
1920 if (!emit_tex2( emit, insn, tex_result ))
1921 return FALSE;
1922 break;
1923 case TGSI_OPCODE_TXD:
1924 if (!emit_tex4( emit, insn, tex_result ))
1925 return FALSE;
1926 break;
1927 default:
1928 assert(0);
1929 }
1930
1931 if (compare) {
1932 SVGA3dShaderDestToken dst2;
1933
1934 if (swizzle || saturate)
1935 dst2 = tex_result;
1936 else
1937 dst2 = dst;
1938
1939 if (dst.mask & TGSI_WRITEMASK_XYZ) {
1940 SVGA3dShaderDestToken src0_zdivw = get_temp( emit );
1941 /* When sampling a depth texture, the result of the comparison is in
1942 * the Y component.
1943 */
1944 struct src_register tex_src_x = scalar(src(tex_result), TGSI_SWIZZLE_Y);
1945 struct src_register r_coord;
1946
1947 if (insn->Instruction.Opcode == TGSI_OPCODE_TXP) {
1948 /* Divide texcoord R by Q */
1949 if (!submit_op1( emit, inst_token( SVGA3DOP_RCP ),
1950 writemask(src0_zdivw, TGSI_WRITEMASK_X),
1951 scalar(src0, TGSI_SWIZZLE_W) ))
1952 return FALSE;
1953
1954 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
1955 writemask(src0_zdivw, TGSI_WRITEMASK_X),
1956 scalar(src0, TGSI_SWIZZLE_Z),
1957 scalar(src(src0_zdivw), TGSI_SWIZZLE_X) ))
1958 return FALSE;
1959
1960 r_coord = scalar(src(src0_zdivw), TGSI_SWIZZLE_X);
1961 }
1962 else {
1963 r_coord = scalar(src0, TGSI_SWIZZLE_Z);
1964 }
1965
1966 /* Compare texture sample value against R component of texcoord */
1967 if (!emit_select(emit,
1968 emit->key.fkey.tex[unit].compare_func,
1969 writemask( dst2, TGSI_WRITEMASK_XYZ ),
1970 r_coord,
1971 tex_src_x))
1972 return FALSE;
1973 }
1974
1975 if (dst.mask & TGSI_WRITEMASK_W) {
1976 struct src_register one = get_one_immediate(emit);
1977
1978 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1979 writemask( dst2, TGSI_WRITEMASK_W ),
1980 one ))
1981 return FALSE;
1982 }
1983 }
1984
1985 if (saturate && !swizzle) {
1986 /* MOV_SAT real_dst, dst */
1987 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src(tex_result) ))
1988 return FALSE;
1989 }
1990 else if (swizzle) {
1991 /* swizzle from tex_result to dst (handles saturation too, if any) */
1992 emit_tex_swizzle(emit,
1993 dst, src(tex_result),
1994 emit->key.fkey.tex[unit].swizzle_r,
1995 emit->key.fkey.tex[unit].swizzle_g,
1996 emit->key.fkey.tex[unit].swizzle_b,
1997 emit->key.fkey.tex[unit].swizzle_a);
1998 }
1999
2000 return TRUE;
2001 }
2002
2003
2004 static boolean
2005 emit_bgnloop(struct svga_shader_emitter *emit,
2006 const struct tgsi_full_instruction *insn)
2007 {
2008 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_LOOP );
2009 struct src_register loop_reg = src_register( SVGA3DREG_LOOP, 0 );
2010 struct src_register const_int = get_loop_const( emit );
2011
2012 emit->dynamic_branching_level++;
2013
2014 return (emit_instruction( emit, inst ) &&
2015 emit_src( emit, loop_reg ) &&
2016 emit_src( emit, const_int ) );
2017 }
2018
2019
2020 static boolean
2021 emit_endloop(struct svga_shader_emitter *emit,
2022 const struct tgsi_full_instruction *insn)
2023 {
2024 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_ENDLOOP );
2025
2026 emit->dynamic_branching_level--;
2027
2028 return emit_instruction( emit, inst );
2029 }
2030
2031
2032 /**
2033 * Translate/emit TGSI BREAK (out of loop) instruction.
2034 */
2035 static boolean
2036 emit_brk(struct svga_shader_emitter *emit,
2037 const struct tgsi_full_instruction *insn)
2038 {
2039 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_BREAK );
2040 return emit_instruction( emit, inst );
2041 }
2042
2043
2044 /**
2045 * Emit simple instruction which operates on one scalar value (not
2046 * a vector). Ex: LG2, RCP, RSQ.
2047 */
2048 static boolean
2049 emit_scalar_op1(struct svga_shader_emitter *emit,
2050 unsigned opcode,
2051 const struct tgsi_full_instruction *insn)
2052 {
2053 SVGA3dShaderInstToken inst;
2054 SVGA3dShaderDestToken dst;
2055 struct src_register src;
2056
2057 inst = inst_token( opcode );
2058 dst = translate_dst_register( emit, insn, 0 );
2059 src = translate_src_register( emit, &insn->Src[0] );
2060 src = scalar( src, TGSI_SWIZZLE_X );
2061
2062 return submit_op1( emit, inst, dst, src );
2063 }
2064
2065
2066 /**
2067 * Translate/emit a simple instruction (one which has no special-case
2068 * code) such as ADD, MUL, MIN, MAX.
2069 */
2070 static boolean
2071 emit_simple_instruction(struct svga_shader_emitter *emit,
2072 unsigned opcode,
2073 const struct tgsi_full_instruction *insn)
2074 {
2075 const struct tgsi_full_src_register *src = insn->Src;
2076 SVGA3dShaderInstToken inst;
2077 SVGA3dShaderDestToken dst;
2078
2079 inst = inst_token( opcode );
2080 dst = translate_dst_register( emit, insn, 0 );
2081
2082 switch (insn->Instruction.NumSrcRegs) {
2083 case 0:
2084 return submit_op0( emit, inst, dst );
2085 case 1:
2086 return submit_op1( emit, inst, dst,
2087 translate_src_register( emit, &src[0] ));
2088 case 2:
2089 return submit_op2( emit, inst, dst,
2090 translate_src_register( emit, &src[0] ),
2091 translate_src_register( emit, &src[1] ) );
2092 case 3:
2093 return submit_op3( emit, inst, dst,
2094 translate_src_register( emit, &src[0] ),
2095 translate_src_register( emit, &src[1] ),
2096 translate_src_register( emit, &src[2] ) );
2097 default:
2098 assert(0);
2099 return FALSE;
2100 }
2101 }
2102
2103
2104 /**
2105 * Translate/emit TGSI DDX, DDY instructions.
2106 */
2107 static boolean
2108 emit_deriv(struct svga_shader_emitter *emit,
2109 const struct tgsi_full_instruction *insn )
2110 {
2111 if (emit->dynamic_branching_level > 0 &&
2112 insn->Src[0].Register.File == TGSI_FILE_TEMPORARY)
2113 {
2114 SVGA3dShaderDestToken dst =
2115 translate_dst_register( emit, insn, 0 );
2116
2117 /* Deriv opcodes not valid inside dynamic branching, workaround
2118 * by zeroing out the destination.
2119 */
2120 if (!submit_op1(emit,
2121 inst_token( SVGA3DOP_MOV ),
2122 dst,
2123 get_zero_immediate(emit)))
2124 return FALSE;
2125
2126 return TRUE;
2127 }
2128 else {
2129 unsigned opcode;
2130 const struct tgsi_full_src_register *reg = &insn->Src[0];
2131 SVGA3dShaderInstToken inst;
2132 SVGA3dShaderDestToken dst;
2133 struct src_register src0;
2134
2135 switch (insn->Instruction.Opcode) {
2136 case TGSI_OPCODE_DDX:
2137 opcode = SVGA3DOP_DSX;
2138 break;
2139 case TGSI_OPCODE_DDY:
2140 opcode = SVGA3DOP_DSY;
2141 break;
2142 default:
2143 return FALSE;
2144 }
2145
2146 inst = inst_token( opcode );
2147 dst = translate_dst_register( emit, insn, 0 );
2148 src0 = translate_src_register( emit, reg );
2149
2150 /* We cannot use negate or abs on source to dsx/dsy instruction.
2151 */
2152 if (reg->Register.Absolute ||
2153 reg->Register.Negate) {
2154 SVGA3dShaderDestToken temp = get_temp( emit );
2155
2156 if (!emit_repl( emit, temp, &src0 ))
2157 return FALSE;
2158 }
2159
2160 return submit_op1( emit, inst, dst, src0 );
2161 }
2162 }
2163
2164
2165 /**
2166 * Translate/emit ARL (Address Register Load) instruction. Used to
2167 * move a value into the special 'address' register. Used to implement
2168 * indirect/variable indexing into arrays.
2169 */
2170 static boolean
2171 emit_arl(struct svga_shader_emitter *emit,
2172 const struct tgsi_full_instruction *insn)
2173 {
2174 ++emit->current_arl;
2175 if (emit->unit == PIPE_SHADER_FRAGMENT) {
2176 /* MOVA not present in pixel shader instruction set.
2177 * Ignore this instruction altogether since it is
2178 * only used for loop counters -- and for that
2179 * we reference aL directly.
2180 */
2181 return TRUE;
2182 }
2183 if (svga_arl_needs_adjustment( emit )) {
2184 return emit_fake_arl( emit, insn );
2185 } else {
2186 /* no need to adjust, just emit straight arl */
2187 return emit_simple_instruction(emit, SVGA3DOP_MOVA, insn);
2188 }
2189 }
2190
2191
2192 static boolean
2193 emit_pow(struct svga_shader_emitter *emit,
2194 const struct tgsi_full_instruction *insn)
2195 {
2196 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2197 struct src_register src0 = translate_src_register(
2198 emit, &insn->Src[0] );
2199 struct src_register src1 = translate_src_register(
2200 emit, &insn->Src[1] );
2201 boolean need_tmp = FALSE;
2202
2203 /* POW can only output to a temporary */
2204 if (insn->Dst[0].Register.File != TGSI_FILE_TEMPORARY)
2205 need_tmp = TRUE;
2206
2207 /* POW src1 must not be the same register as dst */
2208 if (alias_src_dst( src1, dst ))
2209 need_tmp = TRUE;
2210
2211 /* it's a scalar op */
2212 src0 = scalar( src0, TGSI_SWIZZLE_X );
2213 src1 = scalar( src1, TGSI_SWIZZLE_X );
2214
2215 if (need_tmp) {
2216 SVGA3dShaderDestToken tmp =
2217 writemask(get_temp( emit ), TGSI_WRITEMASK_X );
2218
2219 if (!submit_op2(emit, inst_token( SVGA3DOP_POW ), tmp, src0, src1))
2220 return FALSE;
2221
2222 return submit_op1(emit, inst_token( SVGA3DOP_MOV ),
2223 dst, scalar(src(tmp), 0) );
2224 }
2225 else {
2226 return submit_op2(emit, inst_token( SVGA3DOP_POW ), dst, src0, src1);
2227 }
2228 }
2229
2230
2231 /**
2232 * Translate/emit TGSI XPD (vector cross product) instruction.
2233 */
2234 static boolean
2235 emit_xpd(struct svga_shader_emitter *emit,
2236 const struct tgsi_full_instruction *insn)
2237 {
2238 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2239 const struct src_register src0 = translate_src_register(
2240 emit, &insn->Src[0] );
2241 const struct src_register src1 = translate_src_register(
2242 emit, &insn->Src[1] );
2243 boolean need_dst_tmp = FALSE;
2244
2245 /* XPD can only output to a temporary */
2246 if (SVGA3dShaderGetRegType(dst.value) != SVGA3DREG_TEMP)
2247 need_dst_tmp = TRUE;
2248
2249 /* The dst reg must not be the same as src0 or src1*/
2250 if (alias_src_dst(src0, dst) ||
2251 alias_src_dst(src1, dst))
2252 need_dst_tmp = TRUE;
2253
2254 if (need_dst_tmp) {
2255 SVGA3dShaderDestToken tmp = get_temp( emit );
2256
2257 /* Obey DX9 restrictions on mask:
2258 */
2259 tmp.mask = dst.mask & TGSI_WRITEMASK_XYZ;
2260
2261 if (!submit_op2(emit, inst_token( SVGA3DOP_CRS ), tmp, src0, src1))
2262 return FALSE;
2263
2264 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), dst, src( tmp )))
2265 return FALSE;
2266 }
2267 else {
2268 if (!submit_op2(emit, inst_token( SVGA3DOP_CRS ), dst, src0, src1))
2269 return FALSE;
2270 }
2271
2272 /* Need to emit 1.0 to dst.w?
2273 */
2274 if (dst.mask & TGSI_WRITEMASK_W) {
2275 struct src_register one = get_one_immediate( emit );
2276
2277 if (!submit_op1(emit,
2278 inst_token( SVGA3DOP_MOV ),
2279 writemask(dst, TGSI_WRITEMASK_W),
2280 one))
2281 return FALSE;
2282 }
2283
2284 return TRUE;
2285 }
2286
2287
2288 /**
2289 * Emit a LRP (linear interpolation) instruction.
2290 */
2291 static boolean
2292 submit_lrp(struct svga_shader_emitter *emit,
2293 SVGA3dShaderDestToken dst,
2294 struct src_register src0,
2295 struct src_register src1,
2296 struct src_register src2)
2297 {
2298 SVGA3dShaderDestToken tmp;
2299 boolean need_dst_tmp = FALSE;
2300
2301 /* The dst reg must be a temporary, and not be the same as src0 or src2 */
2302 if (SVGA3dShaderGetRegType(dst.value) != SVGA3DREG_TEMP ||
2303 alias_src_dst(src0, dst) ||
2304 alias_src_dst(src2, dst))
2305 need_dst_tmp = TRUE;
2306
2307 if (need_dst_tmp) {
2308 tmp = get_temp( emit );
2309 tmp.mask = dst.mask;
2310 }
2311 else {
2312 tmp = dst;
2313 }
2314
2315 if (!submit_op3(emit, inst_token( SVGA3DOP_LRP ), tmp, src0, src1, src2))
2316 return FALSE;
2317
2318 if (need_dst_tmp) {
2319 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), dst, src( tmp )))
2320 return FALSE;
2321 }
2322
2323 return TRUE;
2324 }
2325
2326
2327 /**
2328 * Translate/emit LRP (Linear Interpolation) instruction.
2329 */
2330 static boolean
2331 emit_lrp(struct svga_shader_emitter *emit,
2332 const struct tgsi_full_instruction *insn)
2333 {
2334 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2335 const struct src_register src0 = translate_src_register(
2336 emit, &insn->Src[0] );
2337 const struct src_register src1 = translate_src_register(
2338 emit, &insn->Src[1] );
2339 const struct src_register src2 = translate_src_register(
2340 emit, &insn->Src[2] );
2341
2342 return submit_lrp(emit, dst, src0, src1, src2);
2343 }
2344
2345 /**
2346 * Translate/emit DST (Distance function) instruction.
2347 */
2348 static boolean
2349 emit_dst_insn(struct svga_shader_emitter *emit,
2350 const struct tgsi_full_instruction *insn)
2351 {
2352 if (emit->unit == PIPE_SHADER_VERTEX) {
2353 /* SVGA/DX9 has a DST instruction, but only for vertex shaders:
2354 */
2355 return emit_simple_instruction(emit, SVGA3DOP_DST, insn);
2356 }
2357 else {
2358 /* result[0] = 1 * 1;
2359 * result[1] = a[1] * b[1];
2360 * result[2] = a[2] * 1;
2361 * result[3] = 1 * b[3];
2362 */
2363 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2364 SVGA3dShaderDestToken tmp;
2365 const struct src_register src0 = translate_src_register(
2366 emit, &insn->Src[0] );
2367 const struct src_register src1 = translate_src_register(
2368 emit, &insn->Src[1] );
2369 boolean need_tmp = FALSE;
2370
2371 if (SVGA3dShaderGetRegType(dst.value) != SVGA3DREG_TEMP ||
2372 alias_src_dst(src0, dst) ||
2373 alias_src_dst(src1, dst))
2374 need_tmp = TRUE;
2375
2376 if (need_tmp) {
2377 tmp = get_temp( emit );
2378 }
2379 else {
2380 tmp = dst;
2381 }
2382
2383 /* tmp.xw = 1.0
2384 */
2385 if (tmp.mask & TGSI_WRITEMASK_XW) {
2386 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2387 writemask(tmp, TGSI_WRITEMASK_XW ),
2388 get_one_immediate(emit)))
2389 return FALSE;
2390 }
2391
2392 /* tmp.yz = src0
2393 */
2394 if (tmp.mask & TGSI_WRITEMASK_YZ) {
2395 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2396 writemask(tmp, TGSI_WRITEMASK_YZ ),
2397 src0))
2398 return FALSE;
2399 }
2400
2401 /* tmp.yw = tmp * src1
2402 */
2403 if (tmp.mask & TGSI_WRITEMASK_YW) {
2404 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
2405 writemask(tmp, TGSI_WRITEMASK_YW ),
2406 src(tmp),
2407 src1))
2408 return FALSE;
2409 }
2410
2411 /* dst = tmp
2412 */
2413 if (need_tmp) {
2414 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2415 dst,
2416 src(tmp)))
2417 return FALSE;
2418 }
2419 }
2420
2421 return TRUE;
2422 }
2423
2424
2425 static boolean
2426 emit_exp(struct svga_shader_emitter *emit,
2427 const struct tgsi_full_instruction *insn)
2428 {
2429 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2430 struct src_register src0 =
2431 translate_src_register( emit, &insn->Src[0] );
2432 SVGA3dShaderDestToken fraction;
2433
2434 if (dst.mask & TGSI_WRITEMASK_Y)
2435 fraction = dst;
2436 else if (dst.mask & TGSI_WRITEMASK_X)
2437 fraction = get_temp( emit );
2438 else
2439 fraction.value = 0;
2440
2441 /* If y is being written, fill it with src0 - floor(src0).
2442 */
2443 if (dst.mask & TGSI_WRITEMASK_XY) {
2444 if (!submit_op1( emit, inst_token( SVGA3DOP_FRC ),
2445 writemask( fraction, TGSI_WRITEMASK_Y ),
2446 src0 ))
2447 return FALSE;
2448 }
2449
2450 /* If x is being written, fill it with 2 ^ floor(src0).
2451 */
2452 if (dst.mask & TGSI_WRITEMASK_X) {
2453 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ),
2454 writemask( dst, TGSI_WRITEMASK_X ),
2455 src0,
2456 scalar( negate( src( fraction ) ), TGSI_SWIZZLE_Y ) ) )
2457 return FALSE;
2458
2459 if (!submit_op1( emit, inst_token( SVGA3DOP_EXP ),
2460 writemask( dst, TGSI_WRITEMASK_X ),
2461 scalar( src( dst ), TGSI_SWIZZLE_X ) ) )
2462 return FALSE;
2463
2464 if (!(dst.mask & TGSI_WRITEMASK_Y))
2465 release_temp( emit, fraction );
2466 }
2467
2468 /* If z is being written, fill it with 2 ^ src0 (partial precision).
2469 */
2470 if (dst.mask & TGSI_WRITEMASK_Z) {
2471 if (!submit_op1( emit, inst_token( SVGA3DOP_EXPP ),
2472 writemask( dst, TGSI_WRITEMASK_Z ),
2473 src0 ) )
2474 return FALSE;
2475 }
2476
2477 /* If w is being written, fill it with one.
2478 */
2479 if (dst.mask & TGSI_WRITEMASK_W) {
2480 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2481 writemask(dst, TGSI_WRITEMASK_W),
2482 get_one_immediate(emit)))
2483 return FALSE;
2484 }
2485
2486 return TRUE;
2487 }
2488
2489
2490 /**
2491 * Translate/emit LIT (Lighting helper) instruction.
2492 */
2493 static boolean
2494 emit_lit(struct svga_shader_emitter *emit,
2495 const struct tgsi_full_instruction *insn)
2496 {
2497 if (emit->unit == PIPE_SHADER_VERTEX) {
2498 /* SVGA/DX9 has a LIT instruction, but only for vertex shaders:
2499 */
2500 return emit_simple_instruction(emit, SVGA3DOP_LIT, insn);
2501 }
2502 else {
2503 /* D3D vs. GL semantics can be fairly easily accomodated by
2504 * variations on this sequence.
2505 *
2506 * GL:
2507 * tmp.y = src.x
2508 * tmp.z = pow(src.y,src.w)
2509 * p0 = src0.xxxx > 0
2510 * result = zero.wxxw
2511 * (p0) result.yz = tmp
2512 *
2513 * D3D:
2514 * tmp.y = src.x
2515 * tmp.z = pow(src.y,src.w)
2516 * p0 = src0.xxyy > 0
2517 * result = zero.wxxw
2518 * (p0) result.yz = tmp
2519 *
2520 * Will implement the GL version for now.
2521 */
2522 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2523 SVGA3dShaderDestToken tmp = get_temp( emit );
2524 const struct src_register src0 = translate_src_register(
2525 emit, &insn->Src[0] );
2526
2527 /* tmp = pow(src.y, src.w)
2528 */
2529 if (dst.mask & TGSI_WRITEMASK_Z) {
2530 if (!submit_op2(emit, inst_token( SVGA3DOP_POW ),
2531 tmp,
2532 scalar(src0, 1),
2533 scalar(src0, 3)))
2534 return FALSE;
2535 }
2536
2537 /* tmp.y = src.x
2538 */
2539 if (dst.mask & TGSI_WRITEMASK_Y) {
2540 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2541 writemask(tmp, TGSI_WRITEMASK_Y ),
2542 scalar(src0, 0)))
2543 return FALSE;
2544 }
2545
2546 /* Can't quite do this with emit conditional due to the extra
2547 * writemask on the predicated mov:
2548 */
2549 {
2550 SVGA3dShaderDestToken pred_reg = dst_register( SVGA3DREG_PREDICATE, 0 );
2551 struct src_register predsrc;
2552
2553 /* D3D vs GL semantics:
2554 */
2555 if (0)
2556 predsrc = swizzle(src0, 0, 0, 1, 1); /* D3D */
2557 else
2558 predsrc = swizzle(src0, 0, 0, 0, 0); /* GL */
2559
2560 /* SETP src0.xxyy, GT, {0}.x */
2561 if (!submit_op2( emit,
2562 inst_token_setp(SVGA3DOPCOMP_GT),
2563 pred_reg,
2564 predsrc,
2565 get_zero_immediate(emit)))
2566 return FALSE;
2567
2568 /* MOV dst, fail */
2569 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst,
2570 get_immediate(emit, 1.0f, 0.0f, 0.0f, 1.0f)))
2571 return FALSE;
2572
2573 /* MOV dst.yz, tmp (predicated)
2574 *
2575 * Note that the predicate reg (and possible modifiers) is passed
2576 * as the first source argument.
2577 */
2578 if (dst.mask & TGSI_WRITEMASK_YZ) {
2579 if (!submit_op2( emit,
2580 inst_token_predicated(SVGA3DOP_MOV),
2581 writemask(dst, TGSI_WRITEMASK_YZ),
2582 src( pred_reg ), src( tmp ) ))
2583 return FALSE;
2584 }
2585 }
2586 }
2587
2588 return TRUE;
2589 }
2590
2591
2592 static boolean
2593 emit_ex2(struct svga_shader_emitter *emit,
2594 const struct tgsi_full_instruction *insn)
2595 {
2596 SVGA3dShaderInstToken inst;
2597 SVGA3dShaderDestToken dst;
2598 struct src_register src0;
2599
2600 inst = inst_token( SVGA3DOP_EXP );
2601 dst = translate_dst_register( emit, insn, 0 );
2602 src0 = translate_src_register( emit, &insn->Src[0] );
2603 src0 = scalar( src0, TGSI_SWIZZLE_X );
2604
2605 if (dst.mask != TGSI_WRITEMASK_XYZW) {
2606 SVGA3dShaderDestToken tmp = get_temp( emit );
2607
2608 if (!submit_op1( emit, inst, tmp, src0 ))
2609 return FALSE;
2610
2611 return submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2612 dst,
2613 scalar( src( tmp ), TGSI_SWIZZLE_X ) );
2614 }
2615
2616 return submit_op1( emit, inst, dst, src0 );
2617 }
2618
2619
2620 static boolean
2621 emit_log(struct svga_shader_emitter *emit,
2622 const struct tgsi_full_instruction *insn)
2623 {
2624 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2625 struct src_register src0 =
2626 translate_src_register( emit, &insn->Src[0] );
2627 SVGA3dShaderDestToken abs_tmp;
2628 struct src_register abs_src0;
2629 SVGA3dShaderDestToken log2_abs;
2630
2631 abs_tmp.value = 0;
2632
2633 if (dst.mask & TGSI_WRITEMASK_Z)
2634 log2_abs = dst;
2635 else if (dst.mask & TGSI_WRITEMASK_XY)
2636 log2_abs = get_temp( emit );
2637 else
2638 log2_abs.value = 0;
2639
2640 /* If z is being written, fill it with log2( abs( src0 ) ).
2641 */
2642 if (dst.mask & TGSI_WRITEMASK_XYZ) {
2643 if (!src0.base.srcMod || src0.base.srcMod == SVGA3DSRCMOD_ABS)
2644 abs_src0 = src0;
2645 else {
2646 abs_tmp = get_temp( emit );
2647
2648 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2649 abs_tmp,
2650 src0 ) )
2651 return FALSE;
2652
2653 abs_src0 = src( abs_tmp );
2654 }
2655
2656 abs_src0 = absolute( scalar( abs_src0, TGSI_SWIZZLE_X ) );
2657
2658 if (!submit_op1( emit, inst_token( SVGA3DOP_LOG ),
2659 writemask( log2_abs, TGSI_WRITEMASK_Z ),
2660 abs_src0 ) )
2661 return FALSE;
2662 }
2663
2664 if (dst.mask & TGSI_WRITEMASK_XY) {
2665 SVGA3dShaderDestToken floor_log2;
2666
2667 if (dst.mask & TGSI_WRITEMASK_X)
2668 floor_log2 = dst;
2669 else
2670 floor_log2 = get_temp( emit );
2671
2672 /* If x is being written, fill it with floor( log2( abs( src0 ) ) ).
2673 */
2674 if (!submit_op1( emit, inst_token( SVGA3DOP_FRC ),
2675 writemask( floor_log2, TGSI_WRITEMASK_X ),
2676 scalar( src( log2_abs ), TGSI_SWIZZLE_Z ) ) )
2677 return FALSE;
2678
2679 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ),
2680 writemask( floor_log2, TGSI_WRITEMASK_X ),
2681 scalar( src( log2_abs ), TGSI_SWIZZLE_Z ),
2682 negate( src( floor_log2 ) ) ) )
2683 return FALSE;
2684
2685 /* If y is being written, fill it with
2686 * abs ( src0 ) / ( 2 ^ floor( log2( abs( src0 ) ) ) ).
2687 */
2688 if (dst.mask & TGSI_WRITEMASK_Y) {
2689 if (!submit_op1( emit, inst_token( SVGA3DOP_EXP ),
2690 writemask( dst, TGSI_WRITEMASK_Y ),
2691 negate( scalar( src( floor_log2 ),
2692 TGSI_SWIZZLE_X ) ) ) )
2693 return FALSE;
2694
2695 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
2696 writemask( dst, TGSI_WRITEMASK_Y ),
2697 src( dst ),
2698 abs_src0 ) )
2699 return FALSE;
2700 }
2701
2702 if (!(dst.mask & TGSI_WRITEMASK_X))
2703 release_temp( emit, floor_log2 );
2704
2705 if (!(dst.mask & TGSI_WRITEMASK_Z))
2706 release_temp( emit, log2_abs );
2707 }
2708
2709 if (dst.mask & TGSI_WRITEMASK_XYZ && src0.base.srcMod &&
2710 src0.base.srcMod != SVGA3DSRCMOD_ABS)
2711 release_temp( emit, abs_tmp );
2712
2713 /* If w is being written, fill it with one.
2714 */
2715 if (dst.mask & TGSI_WRITEMASK_W) {
2716 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2717 writemask(dst, TGSI_WRITEMASK_W),
2718 get_one_immediate(emit)))
2719 return FALSE;
2720 }
2721
2722 return TRUE;
2723 }
2724
2725
2726 /**
2727 * Translate TGSI TRUNC or ROUND instruction.
2728 * We need to truncate toward zero. Ex: trunc(-1.9) = -1
2729 * Different approaches are needed for VS versus PS.
2730 */
2731 static boolean
2732 emit_trunc_round(struct svga_shader_emitter *emit,
2733 const struct tgsi_full_instruction *insn,
2734 boolean round)
2735 {
2736 SVGA3dShaderDestToken dst = translate_dst_register(emit, insn, 0);
2737 const struct src_register src0 =
2738 translate_src_register(emit, &insn->Src[0] );
2739 SVGA3dShaderDestToken t1 = get_temp(emit);
2740
2741 if (round) {
2742 SVGA3dShaderDestToken t0 = get_temp(emit);
2743 struct src_register half = get_half_immediate(emit);
2744
2745 /* t0 = abs(src0) + 0.5 */
2746 if (!submit_op2(emit, inst_token(SVGA3DOP_ADD), t0,
2747 absolute(src0), half))
2748 return FALSE;
2749
2750 /* t1 = fract(t0) */
2751 if (!submit_op1(emit, inst_token(SVGA3DOP_FRC), t1, src(t0)))
2752 return FALSE;
2753
2754 /* t1 = t0 - t1 */
2755 if (!submit_op2(emit, inst_token(SVGA3DOP_ADD), t1, src(t0),
2756 negate(src(t1))))
2757 return FALSE;
2758 }
2759 else {
2760 /* trunc */
2761
2762 /* t1 = fract(abs(src0)) */
2763 if (!submit_op1(emit, inst_token(SVGA3DOP_FRC), t1, absolute(src0)))
2764 return FALSE;
2765
2766 /* t1 = abs(src0) - t1 */
2767 if (!submit_op2(emit, inst_token(SVGA3DOP_ADD), t1, absolute(src0),
2768 negate(src(t1))))
2769 return FALSE;
2770 }
2771
2772 /*
2773 * Now we need to multiply t1 by the sign of the original value.
2774 */
2775 if (emit->unit == PIPE_SHADER_VERTEX) {
2776 /* For VS: use SGN instruction */
2777 /* Need two extra/dummy registers: */
2778 SVGA3dShaderDestToken t2 = get_temp(emit), t3 = get_temp(emit),
2779 t4 = get_temp(emit);
2780
2781 /* t2 = sign(src0) */
2782 if (!submit_op3(emit, inst_token(SVGA3DOP_SGN), t2, src0,
2783 src(t3), src(t4)))
2784 return FALSE;
2785
2786 /* dst = t1 * t2 */
2787 if (!submit_op2(emit, inst_token(SVGA3DOP_MUL), dst, src(t1), src(t2)))
2788 return FALSE;
2789 }
2790 else {
2791 /* For FS: Use CMP instruction */
2792 return submit_op3(emit, inst_token( SVGA3DOP_CMP ), dst,
2793 src0, src(t1), negate(src(t1)));
2794 }
2795
2796 return TRUE;
2797 }
2798
2799
2800 /**
2801 * Translate/emit "begin subroutine" instruction/marker/label.
2802 */
2803 static boolean
2804 emit_bgnsub(struct svga_shader_emitter *emit,
2805 unsigned position,
2806 const struct tgsi_full_instruction *insn)
2807 {
2808 unsigned i;
2809
2810 /* Note that we've finished the main function and are now emitting
2811 * subroutines. This affects how we terminate the generated
2812 * shader.
2813 */
2814 emit->in_main_func = FALSE;
2815
2816 for (i = 0; i < emit->nr_labels; i++) {
2817 if (emit->label[i] == position) {
2818 return (emit_instruction( emit, inst_token( SVGA3DOP_RET ) ) &&
2819 emit_instruction( emit, inst_token( SVGA3DOP_LABEL ) ) &&
2820 emit_src( emit, src_register( SVGA3DREG_LABEL, i )));
2821 }
2822 }
2823
2824 assert(0);
2825 return TRUE;
2826 }
2827
2828
2829 /**
2830 * Translate/emit subroutine call instruction.
2831 */
2832 static boolean
2833 emit_call(struct svga_shader_emitter *emit,
2834 const struct tgsi_full_instruction *insn)
2835 {
2836 unsigned position = insn->Label.Label;
2837 unsigned i;
2838
2839 for (i = 0; i < emit->nr_labels; i++) {
2840 if (emit->label[i] == position)
2841 break;
2842 }
2843
2844 if (emit->nr_labels == Elements(emit->label))
2845 return FALSE;
2846
2847 if (i == emit->nr_labels) {
2848 emit->label[i] = position;
2849 emit->nr_labels++;
2850 }
2851
2852 return (emit_instruction( emit, inst_token( SVGA3DOP_CALL ) ) &&
2853 emit_src( emit, src_register( SVGA3DREG_LABEL, i )));
2854 }
2855
2856
2857 /**
2858 * Called at the end of the shader. Actually, emit special "fix-up"
2859 * code for the vertex/fragment shader.
2860 */
2861 static boolean
2862 emit_end(struct svga_shader_emitter *emit)
2863 {
2864 if (emit->unit == PIPE_SHADER_VERTEX) {
2865 return emit_vs_postamble( emit );
2866 }
2867 else {
2868 return emit_ps_postamble( emit );
2869 }
2870 }
2871
2872
2873 /**
2874 * Translate any TGSI instruction to SVGA.
2875 */
2876 static boolean
2877 svga_emit_instruction(struct svga_shader_emitter *emit,
2878 unsigned position,
2879 const struct tgsi_full_instruction *insn)
2880 {
2881 switch (insn->Instruction.Opcode) {
2882
2883 case TGSI_OPCODE_ARL:
2884 return emit_arl( emit, insn );
2885
2886 case TGSI_OPCODE_TEX:
2887 case TGSI_OPCODE_TXB:
2888 case TGSI_OPCODE_TXP:
2889 case TGSI_OPCODE_TXL:
2890 case TGSI_OPCODE_TXD:
2891 return emit_tex( emit, insn );
2892
2893 case TGSI_OPCODE_DDX:
2894 case TGSI_OPCODE_DDY:
2895 return emit_deriv( emit, insn );
2896
2897 case TGSI_OPCODE_BGNSUB:
2898 return emit_bgnsub( emit, position, insn );
2899
2900 case TGSI_OPCODE_ENDSUB:
2901 return TRUE;
2902
2903 case TGSI_OPCODE_CAL:
2904 return emit_call( emit, insn );
2905
2906 case TGSI_OPCODE_FLR:
2907 return emit_floor( emit, insn );
2908
2909 case TGSI_OPCODE_TRUNC:
2910 return emit_trunc_round( emit, insn, FALSE );
2911
2912 case TGSI_OPCODE_ROUND:
2913 return emit_trunc_round( emit, insn, TRUE );
2914
2915 case TGSI_OPCODE_CEIL:
2916 return emit_ceil( emit, insn );
2917
2918 case TGSI_OPCODE_CMP:
2919 return emit_cmp( emit, insn );
2920
2921 case TGSI_OPCODE_DIV:
2922 return emit_div( emit, insn );
2923
2924 case TGSI_OPCODE_DP2:
2925 return emit_dp2( emit, insn );
2926
2927 case TGSI_OPCODE_DPH:
2928 return emit_dph( emit, insn );
2929
2930 case TGSI_OPCODE_COS:
2931 return emit_cos( emit, insn );
2932
2933 case TGSI_OPCODE_SIN:
2934 return emit_sin( emit, insn );
2935
2936 case TGSI_OPCODE_SCS:
2937 return emit_sincos( emit, insn );
2938
2939 case TGSI_OPCODE_END:
2940 /* TGSI always finishes the main func with an END */
2941 return emit_end( emit );
2942
2943 case TGSI_OPCODE_KILL_IF:
2944 return emit_kill_if( emit, insn );
2945
2946 /* Selection opcodes. The underlying language is fairly
2947 * non-orthogonal about these.
2948 */
2949 case TGSI_OPCODE_SEQ:
2950 return emit_select_op( emit, PIPE_FUNC_EQUAL, insn );
2951
2952 case TGSI_OPCODE_SNE:
2953 return emit_select_op( emit, PIPE_FUNC_NOTEQUAL, insn );
2954
2955 case TGSI_OPCODE_SGT:
2956 return emit_select_op( emit, PIPE_FUNC_GREATER, insn );
2957
2958 case TGSI_OPCODE_SGE:
2959 return emit_select_op( emit, PIPE_FUNC_GEQUAL, insn );
2960
2961 case TGSI_OPCODE_SLT:
2962 return emit_select_op( emit, PIPE_FUNC_LESS, insn );
2963
2964 case TGSI_OPCODE_SLE:
2965 return emit_select_op( emit, PIPE_FUNC_LEQUAL, insn );
2966
2967 case TGSI_OPCODE_SUB:
2968 return emit_sub( emit, insn );
2969
2970 case TGSI_OPCODE_POW:
2971 return emit_pow( emit, insn );
2972
2973 case TGSI_OPCODE_EX2:
2974 return emit_ex2( emit, insn );
2975
2976 case TGSI_OPCODE_EXP:
2977 return emit_exp( emit, insn );
2978
2979 case TGSI_OPCODE_LOG:
2980 return emit_log( emit, insn );
2981
2982 case TGSI_OPCODE_LG2:
2983 return emit_scalar_op1( emit, SVGA3DOP_LOG, insn );
2984
2985 case TGSI_OPCODE_RSQ:
2986 return emit_scalar_op1( emit, SVGA3DOP_RSQ, insn );
2987
2988 case TGSI_OPCODE_RCP:
2989 return emit_scalar_op1( emit, SVGA3DOP_RCP, insn );
2990
2991 case TGSI_OPCODE_CONT:
2992 /* not expected (we return PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED = 0) */
2993 return FALSE;
2994
2995 case TGSI_OPCODE_RET:
2996 /* This is a noop -- we tell mesa that we can't support RET
2997 * within a function (early return), so this will always be
2998 * followed by an ENDSUB.
2999 */
3000 return TRUE;
3001
3002 /* These aren't actually used by any of the frontends we care
3003 * about:
3004 */
3005 case TGSI_OPCODE_CLAMP:
3006 case TGSI_OPCODE_AND:
3007 case TGSI_OPCODE_OR:
3008 case TGSI_OPCODE_I2F:
3009 case TGSI_OPCODE_NOT:
3010 case TGSI_OPCODE_SHL:
3011 case TGSI_OPCODE_ISHR:
3012 case TGSI_OPCODE_XOR:
3013 return FALSE;
3014
3015 case TGSI_OPCODE_IF:
3016 return emit_if( emit, insn );
3017 case TGSI_OPCODE_ELSE:
3018 return emit_else( emit, insn );
3019 case TGSI_OPCODE_ENDIF:
3020 return emit_endif( emit, insn );
3021
3022 case TGSI_OPCODE_BGNLOOP:
3023 return emit_bgnloop( emit, insn );
3024 case TGSI_OPCODE_ENDLOOP:
3025 return emit_endloop( emit, insn );
3026 case TGSI_OPCODE_BRK:
3027 return emit_brk( emit, insn );
3028
3029 case TGSI_OPCODE_XPD:
3030 return emit_xpd( emit, insn );
3031
3032 case TGSI_OPCODE_KILL:
3033 return emit_kill( emit, insn );
3034
3035 case TGSI_OPCODE_DST:
3036 return emit_dst_insn( emit, insn );
3037
3038 case TGSI_OPCODE_LIT:
3039 return emit_lit( emit, insn );
3040
3041 case TGSI_OPCODE_LRP:
3042 return emit_lrp( emit, insn );
3043
3044 case TGSI_OPCODE_SSG:
3045 return emit_ssg( emit, insn );
3046
3047 default:
3048 {
3049 unsigned opcode = translate_opcode(insn->Instruction.Opcode);
3050
3051 if (opcode == SVGA3DOP_LAST_INST)
3052 return FALSE;
3053
3054 if (!emit_simple_instruction( emit, opcode, insn ))
3055 return FALSE;
3056 }
3057 }
3058
3059 return TRUE;
3060 }
3061
3062
3063 /**
3064 * Translate/emit a TGSI IMMEDIATE declaration.
3065 * An immediate vector is a constant that's hard-coded into the shader.
3066 */
3067 static boolean
3068 svga_emit_immediate(struct svga_shader_emitter *emit,
3069 const struct tgsi_full_immediate *imm)
3070 {
3071 static const float id[4] = {0,0,0,1};
3072 float value[4];
3073 unsigned i;
3074
3075 assert(1 <= imm->Immediate.NrTokens && imm->Immediate.NrTokens <= 5);
3076 for (i = 0; i < imm->Immediate.NrTokens - 1; i++) {
3077 float f = imm->u[i].Float;
3078 value[i] = util_is_inf_or_nan(f) ? 0.0f : f;
3079 }
3080
3081 /* If the immediate has less than four values, fill in the remaining
3082 * positions from id={0,0,0,1}.
3083 */
3084 for ( ; i < 4; i++ )
3085 value[i] = id[i];
3086
3087 return emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT,
3088 emit->imm_start + emit->internal_imm_count++,
3089 value[0], value[1], value[2], value[3]);
3090 }
3091
3092
3093 static boolean
3094 make_immediate(struct svga_shader_emitter *emit,
3095 float a, float b, float c, float d,
3096 struct src_register *out )
3097 {
3098 unsigned idx = emit->nr_hw_float_const++;
3099
3100 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT,
3101 idx, a, b, c, d ))
3102 return FALSE;
3103
3104 *out = src_register( SVGA3DREG_CONST, idx );
3105
3106 return TRUE;
3107 }
3108
3109
3110 /**
3111 * Emit special VS instructions at top of shader.
3112 */
3113 static boolean
3114 emit_vs_preamble(struct svga_shader_emitter *emit)
3115 {
3116 if (!emit->key.vkey.need_prescale) {
3117 if (!make_immediate( emit, 0, 0, .5, .5,
3118 &emit->imm_0055))
3119 return FALSE;
3120 }
3121
3122 return TRUE;
3123 }
3124
3125
3126 /**
3127 * Emit special PS instructions at top of shader.
3128 */
3129 static boolean
3130 emit_ps_preamble(struct svga_shader_emitter *emit)
3131 {
3132 if (emit->ps_reads_pos && emit->info.reads_z) {
3133 /*
3134 * Assemble the position from various bits of inputs. Depth and W are
3135 * passed in a texcoord this is due to D3D's vPos not hold Z or W.
3136 * Also fixup the perspective interpolation.
3137 *
3138 * temp_pos.xy = vPos.xy
3139 * temp_pos.w = rcp(texcoord1.w);
3140 * temp_pos.z = texcoord1.z * temp_pos.w;
3141 */
3142 if (!submit_op1( emit,
3143 inst_token(SVGA3DOP_MOV),
3144 writemask( emit->ps_temp_pos, TGSI_WRITEMASK_XY ),
3145 emit->ps_true_pos ))
3146 return FALSE;
3147
3148 if (!submit_op1( emit,
3149 inst_token(SVGA3DOP_RCP),
3150 writemask( emit->ps_temp_pos, TGSI_WRITEMASK_W ),
3151 scalar( emit->ps_depth_pos, TGSI_SWIZZLE_W ) ))
3152 return FALSE;
3153
3154 if (!submit_op2( emit,
3155 inst_token(SVGA3DOP_MUL),
3156 writemask( emit->ps_temp_pos, TGSI_WRITEMASK_Z ),
3157 scalar( emit->ps_depth_pos, TGSI_SWIZZLE_Z ),
3158 scalar( src(emit->ps_temp_pos), TGSI_SWIZZLE_W ) ))
3159 return FALSE;
3160 }
3161
3162 return TRUE;
3163 }
3164
3165
3166 /**
3167 * Emit special PS instructions at end of shader.
3168 */
3169 static boolean
3170 emit_ps_postamble(struct svga_shader_emitter *emit)
3171 {
3172 unsigned i;
3173
3174 /* PS oDepth is incredibly fragile and it's very hard to catch the
3175 * types of usage that break it during shader emit. Easier just to
3176 * redirect the main program to a temporary and then only touch
3177 * oDepth with a hand-crafted MOV below.
3178 */
3179 if (SVGA3dShaderGetRegType(emit->true_pos.value) != 0) {
3180 if (!submit_op1( emit,
3181 inst_token(SVGA3DOP_MOV),
3182 emit->true_pos,
3183 scalar(src(emit->temp_pos), TGSI_SWIZZLE_Z) ))
3184 return FALSE;
3185 }
3186
3187 for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
3188 if (SVGA3dShaderGetRegType(emit->true_color_output[i].value) != 0) {
3189 /* Potentially override output colors with white for XOR
3190 * logicop workaround.
3191 */
3192 if (emit->unit == PIPE_SHADER_FRAGMENT &&
3193 emit->key.fkey.white_fragments) {
3194 struct src_register one = get_one_immediate(emit);
3195
3196 if (!submit_op1( emit,
3197 inst_token(SVGA3DOP_MOV),
3198 emit->true_color_output[i],
3199 one ))
3200 return FALSE;
3201 }
3202 else if (emit->unit == PIPE_SHADER_FRAGMENT &&
3203 i < emit->key.fkey.write_color0_to_n_cbufs) {
3204 /* Write temp color output [0] to true output [i] */
3205 if (!submit_op1(emit, inst_token(SVGA3DOP_MOV),
3206 emit->true_color_output[i],
3207 src(emit->temp_color_output[0]))) {
3208 return FALSE;
3209 }
3210 }
3211 else {
3212 if (!submit_op1( emit,
3213 inst_token(SVGA3DOP_MOV),
3214 emit->true_color_output[i],
3215 src(emit->temp_color_output[i]) ))
3216 return FALSE;
3217 }
3218 }
3219 }
3220
3221 return TRUE;
3222 }
3223
3224
3225 /**
3226 * Emit special VS instructions at end of shader.
3227 */
3228 static boolean
3229 emit_vs_postamble(struct svga_shader_emitter *emit)
3230 {
3231 /* PSIZ output is incredibly fragile and it's very hard to catch
3232 * the types of usage that break it during shader emit. Easier
3233 * just to redirect the main program to a temporary and then only
3234 * touch PSIZ with a hand-crafted MOV below.
3235 */
3236 if (SVGA3dShaderGetRegType(emit->true_psiz.value) != 0) {
3237 if (!submit_op1( emit,
3238 inst_token(SVGA3DOP_MOV),
3239 emit->true_psiz,
3240 scalar(src(emit->temp_psiz), TGSI_SWIZZLE_X) ))
3241 return FALSE;
3242 }
3243
3244 /* Need to perform various manipulations on vertex position to cope
3245 * with the different GL and D3D clip spaces.
3246 */
3247 if (emit->key.vkey.need_prescale) {
3248 SVGA3dShaderDestToken temp_pos = emit->temp_pos;
3249 SVGA3dShaderDestToken depth = emit->depth_pos;
3250 SVGA3dShaderDestToken pos = emit->true_pos;
3251 unsigned offset = emit->info.file_max[TGSI_FILE_CONSTANT] + 1;
3252 struct src_register prescale_scale = src_register( SVGA3DREG_CONST,
3253 offset + 0 );
3254 struct src_register prescale_trans = src_register( SVGA3DREG_CONST,
3255 offset + 1 );
3256
3257 if (!submit_op1( emit,
3258 inst_token(SVGA3DOP_MOV),
3259 writemask(depth, TGSI_WRITEMASK_W),
3260 scalar(src(temp_pos), TGSI_SWIZZLE_W) ))
3261 return FALSE;
3262
3263 /* MUL temp_pos.xyz, temp_pos, prescale.scale
3264 * MAD result.position, temp_pos.wwww, prescale.trans, temp_pos
3265 * --> Note that prescale.trans.w == 0
3266 */
3267 if (!submit_op2( emit,
3268 inst_token(SVGA3DOP_MUL),
3269 writemask(temp_pos, TGSI_WRITEMASK_XYZ),
3270 src(temp_pos),
3271 prescale_scale ))
3272 return FALSE;
3273
3274 if (!submit_op3( emit,
3275 inst_token(SVGA3DOP_MAD),
3276 pos,
3277 swizzle(src(temp_pos), 3, 3, 3, 3),
3278 prescale_trans,
3279 src(temp_pos)))
3280 return FALSE;
3281
3282 /* Also write to depth value */
3283 if (!submit_op3( emit,
3284 inst_token(SVGA3DOP_MAD),
3285 writemask(depth, TGSI_WRITEMASK_Z),
3286 swizzle(src(temp_pos), 3, 3, 3, 3),
3287 prescale_trans,
3288 src(temp_pos) ))
3289 return FALSE;
3290 }
3291 else {
3292 SVGA3dShaderDestToken temp_pos = emit->temp_pos;
3293 SVGA3dShaderDestToken depth = emit->depth_pos;
3294 SVGA3dShaderDestToken pos = emit->true_pos;
3295 struct src_register imm_0055 = emit->imm_0055;
3296
3297 /* Adjust GL clipping coordinate space to hardware (D3D-style):
3298 *
3299 * DP4 temp_pos.z, {0,0,.5,.5}, temp_pos
3300 * MOV result.position, temp_pos
3301 */
3302 if (!submit_op2( emit,
3303 inst_token(SVGA3DOP_DP4),
3304 writemask(temp_pos, TGSI_WRITEMASK_Z),
3305 imm_0055,
3306 src(temp_pos) ))
3307 return FALSE;
3308
3309 if (!submit_op1( emit,
3310 inst_token(SVGA3DOP_MOV),
3311 pos,
3312 src(temp_pos) ))
3313 return FALSE;
3314
3315 /* Move the manipulated depth into the extra texcoord reg */
3316 if (!submit_op1( emit,
3317 inst_token(SVGA3DOP_MOV),
3318 writemask(depth, TGSI_WRITEMASK_ZW),
3319 src(temp_pos) ))
3320 return FALSE;
3321 }
3322
3323 return TRUE;
3324 }
3325
3326
3327 /**
3328 * For the pixel shader: emit the code which chooses the front
3329 * or back face color depending on triangle orientation.
3330 * This happens at the top of the fragment shader.
3331 *
3332 * 0: IF VFACE :4
3333 * 1: COLOR = FrontColor;
3334 * 2: ELSE
3335 * 3: COLOR = BackColor;
3336 * 4: ENDIF
3337 */
3338 static boolean
3339 emit_light_twoside(struct svga_shader_emitter *emit)
3340 {
3341 struct src_register vface, zero;
3342 struct src_register front[2];
3343 struct src_register back[2];
3344 SVGA3dShaderDestToken color[2];
3345 int count = emit->internal_color_count;
3346 int i;
3347 SVGA3dShaderInstToken if_token;
3348
3349 if (count == 0)
3350 return TRUE;
3351
3352 vface = get_vface( emit );
3353 zero = get_zero_immediate(emit);
3354
3355 /* Can't use get_temp() to allocate the color reg as such
3356 * temporaries will be reclaimed after each instruction by the call
3357 * to reset_temp_regs().
3358 */
3359 for (i = 0; i < count; i++) {
3360 color[i] = dst_register( SVGA3DREG_TEMP, emit->nr_hw_temp++ );
3361 front[i] = emit->input_map[emit->internal_color_idx[i]];
3362
3363 /* Back is always the next input:
3364 */
3365 back[i] = front[i];
3366 back[i].base.num = front[i].base.num + 1;
3367
3368 /* Reassign the input_map to the actual front-face color:
3369 */
3370 emit->input_map[emit->internal_color_idx[i]] = src(color[i]);
3371 }
3372
3373 if_token = inst_token( SVGA3DOP_IFC );
3374
3375 if (emit->key.fkey.front_ccw)
3376 if_token.control = SVGA3DOPCOMP_LT;
3377 else
3378 if_token.control = SVGA3DOPCOMP_GT;
3379
3380 if (!(emit_instruction( emit, if_token ) &&
3381 emit_src( emit, vface ) &&
3382 emit_src( emit, zero ) ))
3383 return FALSE;
3384
3385 for (i = 0; i < count; i++) {
3386 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), color[i], front[i] ))
3387 return FALSE;
3388 }
3389
3390 if (!(emit_instruction( emit, inst_token( SVGA3DOP_ELSE))))
3391 return FALSE;
3392
3393 for (i = 0; i < count; i++) {
3394 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), color[i], back[i] ))
3395 return FALSE;
3396 }
3397
3398 if (!emit_instruction( emit, inst_token( SVGA3DOP_ENDIF ) ))
3399 return FALSE;
3400
3401 return TRUE;
3402 }
3403
3404
3405 /**
3406 * Emit special setup code for the front/back face register in the FS.
3407 * 0: SETP_GT TEMP, VFACE, 0
3408 * where TEMP is a fake frontface register
3409 */
3410 static boolean
3411 emit_frontface(struct svga_shader_emitter *emit)
3412 {
3413 struct src_register vface;
3414 SVGA3dShaderDestToken temp;
3415 struct src_register pass, fail;
3416
3417 vface = get_vface( emit );
3418
3419 /* Can't use get_temp() to allocate the fake frontface reg as such
3420 * temporaries will be reclaimed after each instruction by the call
3421 * to reset_temp_regs().
3422 */
3423 temp = dst_register( SVGA3DREG_TEMP,
3424 emit->nr_hw_temp++ );
3425
3426 if (emit->key.fkey.front_ccw) {
3427 pass = get_zero_immediate(emit);
3428 fail = get_one_immediate(emit);
3429 } else {
3430 pass = get_one_immediate(emit);
3431 fail = get_zero_immediate(emit);
3432 }
3433
3434 if (!emit_conditional(emit, PIPE_FUNC_GREATER,
3435 temp, vface, get_zero_immediate(emit),
3436 pass, fail))
3437 return FALSE;
3438
3439 /* Reassign the input_map to the actual front-face color:
3440 */
3441 emit->input_map[emit->internal_frontface_idx] = src(temp);
3442
3443 return TRUE;
3444 }
3445
3446
3447 /**
3448 * Emit code to invert the T component of the incoming texture coordinate.
3449 * This is used for drawing point sprites when
3450 * pipe_rasterizer_state::sprite_coord_mode == PIPE_SPRITE_COORD_LOWER_LEFT.
3451 */
3452 static boolean
3453 emit_inverted_texcoords(struct svga_shader_emitter *emit)
3454 {
3455 unsigned inverted_texcoords = emit->inverted_texcoords;
3456
3457 while (inverted_texcoords) {
3458 const unsigned unit = ffs(inverted_texcoords) - 1;
3459
3460 assert(emit->inverted_texcoords & (1 << unit));
3461
3462 assert(unit < Elements(emit->ps_true_texcoord));
3463
3464 assert(unit < Elements(emit->ps_inverted_texcoord_input));
3465
3466 assert(emit->ps_inverted_texcoord_input[unit]
3467 < Elements(emit->input_map));
3468
3469 /* inverted = coord * (1, -1, 1, 1) + (0, 1, 0, 0) */
3470 if (!submit_op3(emit,
3471 inst_token(SVGA3DOP_MAD),
3472 dst(emit->ps_inverted_texcoord[unit]),
3473 emit->ps_true_texcoord[unit],
3474 get_immediate(emit, 1.0f, -1.0f, 1.0f, 1.0f),
3475 get_immediate(emit, 0.0f, 1.0f, 0.0f, 0.0f)))
3476 return FALSE;
3477
3478 /* Reassign the input_map entry to the new texcoord register */
3479 emit->input_map[emit->ps_inverted_texcoord_input[unit]] =
3480 emit->ps_inverted_texcoord[unit];
3481
3482 inverted_texcoords &= ~(1 << unit);
3483 }
3484
3485 return TRUE;
3486 }
3487
3488
3489 /**
3490 * Emit code to adjust vertex shader inputs/attributes:
3491 * - Change range from [0,1] to [-1,1] (for normalized byte/short attribs).
3492 * - Set attrib W component = 1.
3493 */
3494 static boolean
3495 emit_adjusted_vertex_attribs(struct svga_shader_emitter *emit)
3496 {
3497 unsigned adjust_mask = (emit->key.vkey.adjust_attrib_range |
3498 emit->key.vkey.adjust_attrib_w_1);
3499
3500 while (adjust_mask) {
3501 /* Adjust vertex attrib range and/or set W component = 1 */
3502 const unsigned index = u_bit_scan(&adjust_mask);
3503 struct src_register tmp;
3504
3505 /* allocate a temp reg */
3506 tmp = src_register(SVGA3DREG_TEMP, emit->nr_hw_temp);
3507 emit->nr_hw_temp++;
3508
3509 if (emit->key.vkey.adjust_attrib_range & (1 << index)) {
3510 /* The vertex input/attribute is supposed to be a signed value in
3511 * the range [-1,1] but we actually fetched/converted it to the
3512 * range [0,1]. This most likely happens when the app specifies a
3513 * signed byte attribute but we interpreted it as unsigned bytes.
3514 * See also svga_translate_vertex_format().
3515 *
3516 * Here, we emit some extra instructions to adjust
3517 * the attribute values from [0,1] to [-1,1].
3518 *
3519 * The adjustment we implement is:
3520 * new_attrib = attrib * 2.0;
3521 * if (attrib >= 0.5)
3522 * new_attrib = new_attrib - 2.0;
3523 * This isn't exactly right (it's off by a bit or so) but close enough.
3524 */
3525 SVGA3dShaderDestToken pred_reg = dst_register(SVGA3DREG_PREDICATE, 0);
3526
3527 /* tmp = attrib * 2.0 */
3528 if (!submit_op2(emit,
3529 inst_token(SVGA3DOP_MUL),
3530 dst(tmp),
3531 emit->input_map[index],
3532 get_two_immediate(emit)))
3533 return FALSE;
3534
3535 /* pred = (attrib >= 0.5) */
3536 if (!submit_op2(emit,
3537 inst_token_setp(SVGA3DOPCOMP_GE),
3538 pred_reg,
3539 emit->input_map[index], /* vert attrib */
3540 get_half_immediate(emit))) /* 0.5 */
3541 return FALSE;
3542
3543 /* sub(pred) tmp, tmp, 2.0 */
3544 if (!submit_op3(emit,
3545 inst_token_predicated(SVGA3DOP_SUB),
3546 dst(tmp),
3547 src(pred_reg),
3548 tmp,
3549 get_two_immediate(emit)))
3550 return FALSE;
3551 }
3552 else {
3553 /* just copy the vertex input attrib to the temp register */
3554 if (!submit_op1(emit,
3555 inst_token(SVGA3DOP_MOV),
3556 dst(tmp),
3557 emit->input_map[index]))
3558 return FALSE;
3559 }
3560
3561 if (emit->key.vkey.adjust_attrib_w_1 & (1 << index)) {
3562 /* move 1 into W position of tmp */
3563 if (!submit_op1(emit,
3564 inst_token(SVGA3DOP_MOV),
3565 writemask(dst(tmp), TGSI_WRITEMASK_W),
3566 get_one_immediate(emit)))
3567 return FALSE;
3568 }
3569
3570 /* Reassign the input_map entry to the new tmp register */
3571 emit->input_map[index] = tmp;
3572 }
3573
3574 return TRUE;
3575 }
3576
3577
3578 /**
3579 * Determine if we need to create the "common" immediate value which is
3580 * used for generating useful vector constants such as {0,0,0,0} and
3581 * {1,1,1,1}.
3582 * We could just do this all the time except that we want to conserve
3583 * registers whenever possible.
3584 */
3585 static boolean
3586 needs_to_create_common_immediate(const struct svga_shader_emitter *emit)
3587 {
3588 unsigned i;
3589
3590 if (emit->unit == PIPE_SHADER_FRAGMENT) {
3591 if (emit->key.fkey.light_twoside)
3592 return TRUE;
3593
3594 if (emit->key.fkey.white_fragments)
3595 return TRUE;
3596
3597 if (emit->emit_frontface)
3598 return TRUE;
3599
3600 if (emit->info.opcode_count[TGSI_OPCODE_DST] >= 1 ||
3601 emit->info.opcode_count[TGSI_OPCODE_SSG] >= 1 ||
3602 emit->info.opcode_count[TGSI_OPCODE_LIT] >= 1)
3603 return TRUE;
3604
3605 if (emit->inverted_texcoords)
3606 return TRUE;
3607
3608 /* look for any PIPE_SWIZZLE_ZERO/ONE terms */
3609 for (i = 0; i < emit->key.fkey.num_textures; i++) {
3610 if (emit->key.fkey.tex[i].swizzle_r > PIPE_SWIZZLE_ALPHA ||
3611 emit->key.fkey.tex[i].swizzle_g > PIPE_SWIZZLE_ALPHA ||
3612 emit->key.fkey.tex[i].swizzle_b > PIPE_SWIZZLE_ALPHA ||
3613 emit->key.fkey.tex[i].swizzle_a > PIPE_SWIZZLE_ALPHA)
3614 return TRUE;
3615 }
3616
3617 for (i = 0; i < emit->key.fkey.num_textures; i++) {
3618 if (emit->key.fkey.tex[i].compare_mode
3619 == PIPE_TEX_COMPARE_R_TO_TEXTURE)
3620 return TRUE;
3621 }
3622 }
3623 else if (emit->unit == PIPE_SHADER_VERTEX) {
3624 if (emit->info.opcode_count[TGSI_OPCODE_CMP] >= 1)
3625 return TRUE;
3626 if (emit->key.vkey.adjust_attrib_range ||
3627 emit->key.vkey.adjust_attrib_w_1)
3628 return TRUE;
3629 }
3630
3631 if (emit->info.opcode_count[TGSI_OPCODE_IF] >= 1 ||
3632 emit->info.opcode_count[TGSI_OPCODE_BGNLOOP] >= 1 ||
3633 emit->info.opcode_count[TGSI_OPCODE_DDX] >= 1 ||
3634 emit->info.opcode_count[TGSI_OPCODE_DDY] >= 1 ||
3635 emit->info.opcode_count[TGSI_OPCODE_ROUND] >= 1 ||
3636 emit->info.opcode_count[TGSI_OPCODE_SGE] >= 1 ||
3637 emit->info.opcode_count[TGSI_OPCODE_SGT] >= 1 ||
3638 emit->info.opcode_count[TGSI_OPCODE_SLE] >= 1 ||
3639 emit->info.opcode_count[TGSI_OPCODE_SLT] >= 1 ||
3640 emit->info.opcode_count[TGSI_OPCODE_SNE] >= 1 ||
3641 emit->info.opcode_count[TGSI_OPCODE_SEQ] >= 1 ||
3642 emit->info.opcode_count[TGSI_OPCODE_EXP] >= 1 ||
3643 emit->info.opcode_count[TGSI_OPCODE_LOG] >= 1 ||
3644 emit->info.opcode_count[TGSI_OPCODE_XPD] >= 1 ||
3645 emit->info.opcode_count[TGSI_OPCODE_KILL] >= 1)
3646 return TRUE;
3647
3648 return FALSE;
3649 }
3650
3651
3652 /**
3653 * Do we need to create a looping constant?
3654 */
3655 static boolean
3656 needs_to_create_loop_const(const struct svga_shader_emitter *emit)
3657 {
3658 return (emit->info.opcode_count[TGSI_OPCODE_BGNLOOP] >= 1);
3659 }
3660
3661
3662 static boolean
3663 needs_to_create_arl_consts(const struct svga_shader_emitter *emit)
3664 {
3665 return (emit->num_arl_consts > 0);
3666 }
3667
3668
3669 static boolean
3670 pre_parse_add_indirect( struct svga_shader_emitter *emit,
3671 int num, int current_arl)
3672 {
3673 int i;
3674 assert(num < 0);
3675
3676 for (i = 0; i < emit->num_arl_consts; ++i) {
3677 if (emit->arl_consts[i].arl_num == current_arl)
3678 break;
3679 }
3680 /* new entry */
3681 if (emit->num_arl_consts == i) {
3682 ++emit->num_arl_consts;
3683 }
3684 emit->arl_consts[i].number = (emit->arl_consts[i].number > num) ?
3685 num :
3686 emit->arl_consts[i].number;
3687 emit->arl_consts[i].arl_num = current_arl;
3688 return TRUE;
3689 }
3690
3691
3692 static boolean
3693 pre_parse_instruction( struct svga_shader_emitter *emit,
3694 const struct tgsi_full_instruction *insn,
3695 int current_arl)
3696 {
3697 if (insn->Src[0].Register.Indirect &&
3698 insn->Src[0].Indirect.File == TGSI_FILE_ADDRESS) {
3699 const struct tgsi_full_src_register *reg = &insn->Src[0];
3700 if (reg->Register.Index < 0) {
3701 pre_parse_add_indirect(emit, reg->Register.Index, current_arl);
3702 }
3703 }
3704
3705 if (insn->Src[1].Register.Indirect &&
3706 insn->Src[1].Indirect.File == TGSI_FILE_ADDRESS) {
3707 const struct tgsi_full_src_register *reg = &insn->Src[1];
3708 if (reg->Register.Index < 0) {
3709 pre_parse_add_indirect(emit, reg->Register.Index, current_arl);
3710 }
3711 }
3712
3713 if (insn->Src[2].Register.Indirect &&
3714 insn->Src[2].Indirect.File == TGSI_FILE_ADDRESS) {
3715 const struct tgsi_full_src_register *reg = &insn->Src[2];
3716 if (reg->Register.Index < 0) {
3717 pre_parse_add_indirect(emit, reg->Register.Index, current_arl);
3718 }
3719 }
3720
3721 return TRUE;
3722 }
3723
3724
3725 static boolean
3726 pre_parse_tokens( struct svga_shader_emitter *emit,
3727 const struct tgsi_token *tokens )
3728 {
3729 struct tgsi_parse_context parse;
3730 int current_arl = 0;
3731
3732 tgsi_parse_init( &parse, tokens );
3733
3734 while (!tgsi_parse_end_of_tokens( &parse )) {
3735 tgsi_parse_token( &parse );
3736 switch (parse.FullToken.Token.Type) {
3737 case TGSI_TOKEN_TYPE_IMMEDIATE:
3738 case TGSI_TOKEN_TYPE_DECLARATION:
3739 break;
3740 case TGSI_TOKEN_TYPE_INSTRUCTION:
3741 if (parse.FullToken.FullInstruction.Instruction.Opcode ==
3742 TGSI_OPCODE_ARL) {
3743 ++current_arl;
3744 }
3745 if (!pre_parse_instruction( emit, &parse.FullToken.FullInstruction,
3746 current_arl ))
3747 return FALSE;
3748 break;
3749 default:
3750 break;
3751 }
3752
3753 }
3754 return TRUE;
3755 }
3756
3757
3758 static boolean
3759 svga_shader_emit_helpers(struct svga_shader_emitter *emit)
3760 {
3761 if (needs_to_create_common_immediate( emit )) {
3762 create_common_immediate( emit );
3763 }
3764 if (needs_to_create_loop_const( emit )) {
3765 create_loop_const( emit );
3766 }
3767 if (needs_to_create_arl_consts( emit )) {
3768 create_arl_consts( emit );
3769 }
3770
3771 if (emit->unit == PIPE_SHADER_FRAGMENT) {
3772 if (!emit_ps_preamble( emit ))
3773 return FALSE;
3774
3775 if (emit->key.fkey.light_twoside) {
3776 if (!emit_light_twoside( emit ))
3777 return FALSE;
3778 }
3779 if (emit->emit_frontface) {
3780 if (!emit_frontface( emit ))
3781 return FALSE;
3782 }
3783 if (emit->inverted_texcoords) {
3784 if (!emit_inverted_texcoords( emit ))
3785 return FALSE;
3786 }
3787 }
3788 else {
3789 assert(emit->unit == PIPE_SHADER_VERTEX);
3790 if (emit->key.vkey.adjust_attrib_range ||
3791 emit->key.vkey.adjust_attrib_w_1) {
3792 if (!emit_adjusted_vertex_attribs(emit))
3793 return FALSE;
3794 }
3795 }
3796
3797
3798 return TRUE;
3799 }
3800
3801
3802 /**
3803 * This is the main entrypoint into the TGSI instruction translater.
3804 * Translate TGSI shader tokens into an SVGA shader.
3805 */
3806 boolean
3807 svga_shader_emit_instructions(struct svga_shader_emitter *emit,
3808 const struct tgsi_token *tokens)
3809 {
3810 struct tgsi_parse_context parse;
3811 boolean ret = TRUE;
3812 boolean helpers_emitted = FALSE;
3813 unsigned line_nr = 0;
3814
3815 tgsi_parse_init( &parse, tokens );
3816 emit->internal_imm_count = 0;
3817
3818 if (emit->unit == PIPE_SHADER_VERTEX) {
3819 ret = emit_vs_preamble( emit );
3820 if (!ret)
3821 goto done;
3822 }
3823
3824 pre_parse_tokens(emit, tokens);
3825
3826 while (!tgsi_parse_end_of_tokens( &parse )) {
3827 tgsi_parse_token( &parse );
3828
3829 switch (parse.FullToken.Token.Type) {
3830 case TGSI_TOKEN_TYPE_IMMEDIATE:
3831 ret = svga_emit_immediate( emit, &parse.FullToken.FullImmediate );
3832 if (!ret)
3833 goto done;
3834 break;
3835
3836 case TGSI_TOKEN_TYPE_DECLARATION:
3837 ret = svga_translate_decl_sm30( emit, &parse.FullToken.FullDeclaration );
3838 if (!ret)
3839 goto done;
3840 break;
3841
3842 case TGSI_TOKEN_TYPE_INSTRUCTION:
3843 if (!helpers_emitted) {
3844 if (!svga_shader_emit_helpers( emit ))
3845 goto done;
3846 helpers_emitted = TRUE;
3847 }
3848 ret = svga_emit_instruction( emit,
3849 line_nr++,
3850 &parse.FullToken.FullInstruction );
3851 if (!ret)
3852 goto done;
3853 break;
3854 default:
3855 break;
3856 }
3857
3858 reset_temp_regs( emit );
3859 }
3860
3861 /* Need to terminate the current subroutine. Note that the
3862 * hardware doesn't tolerate shaders without sub-routines
3863 * terminating with RET+END.
3864 */
3865 if (!emit->in_main_func) {
3866 ret = emit_instruction( emit, inst_token( SVGA3DOP_RET ) );
3867 if (!ret)
3868 goto done;
3869 }
3870
3871 assert(emit->dynamic_branching_level == 0);
3872
3873 /* Need to terminate the whole shader:
3874 */
3875 ret = emit_instruction( emit, inst_token( SVGA3DOP_END ) );
3876 if (!ret)
3877 goto done;
3878
3879 done:
3880 tgsi_parse_free( &parse );
3881 return ret;
3882 }