svga: add comments, etc to svga_tgsi_insn.c code
[mesa.git] / src / gallium / drivers / svga / svga_tgsi_insn.c
1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26
27 #include "pipe/p_shader_tokens.h"
28 #include "tgsi/tgsi_dump.h"
29 #include "tgsi/tgsi_parse.h"
30 #include "util/u_memory.h"
31 #include "util/u_math.h"
32
33 #include "svga_tgsi_emit.h"
34 #include "svga_context.h"
35
36
37 static boolean emit_vs_postamble( struct svga_shader_emitter *emit );
38 static boolean emit_ps_postamble( struct svga_shader_emitter *emit );
39
40
41 static unsigned
42 translate_opcode(uint opcode)
43 {
44 switch (opcode) {
45 case TGSI_OPCODE_ABS: return SVGA3DOP_ABS;
46 case TGSI_OPCODE_ADD: return SVGA3DOP_ADD;
47 case TGSI_OPCODE_DP2A: return SVGA3DOP_DP2ADD;
48 case TGSI_OPCODE_DP3: return SVGA3DOP_DP3;
49 case TGSI_OPCODE_DP4: return SVGA3DOP_DP4;
50 case TGSI_OPCODE_FRC: return SVGA3DOP_FRC;
51 case TGSI_OPCODE_MAD: return SVGA3DOP_MAD;
52 case TGSI_OPCODE_MAX: return SVGA3DOP_MAX;
53 case TGSI_OPCODE_MIN: return SVGA3DOP_MIN;
54 case TGSI_OPCODE_MOV: return SVGA3DOP_MOV;
55 case TGSI_OPCODE_MUL: return SVGA3DOP_MUL;
56 case TGSI_OPCODE_NOP: return SVGA3DOP_NOP;
57 case TGSI_OPCODE_NRM4: return SVGA3DOP_NRM;
58 default:
59 assert(!"svga: unexpected opcode in translate_opcode()");
60 return SVGA3DOP_LAST_INST;
61 }
62 }
63
64
65 static unsigned
66 translate_file(unsigned file)
67 {
68 switch (file) {
69 case TGSI_FILE_TEMPORARY: return SVGA3DREG_TEMP;
70 case TGSI_FILE_INPUT: return SVGA3DREG_INPUT;
71 case TGSI_FILE_OUTPUT: return SVGA3DREG_OUTPUT; /* VS3.0+ only */
72 case TGSI_FILE_IMMEDIATE: return SVGA3DREG_CONST;
73 case TGSI_FILE_CONSTANT: return SVGA3DREG_CONST;
74 case TGSI_FILE_SAMPLER: return SVGA3DREG_SAMPLER;
75 case TGSI_FILE_ADDRESS: return SVGA3DREG_ADDR;
76 default:
77 assert(!"svga: unexpected register file in translate_file()");
78 return SVGA3DREG_TEMP;
79 }
80 }
81
82
83 /**
84 * Translate a TGSI destination register to an SVGA3DShaderDestToken.
85 * \param insn the TGSI instruction
86 * \param idx which TGSI dest register to translate (usually (always?) zero)
87 */
88 static SVGA3dShaderDestToken
89 translate_dst_register( struct svga_shader_emitter *emit,
90 const struct tgsi_full_instruction *insn,
91 unsigned idx )
92 {
93 const struct tgsi_full_dst_register *reg = &insn->Dst[idx];
94 SVGA3dShaderDestToken dest;
95
96 switch (reg->Register.File) {
97 case TGSI_FILE_OUTPUT:
98 /* Output registers encode semantic information in their name.
99 * Need to lookup a table built at decl time:
100 */
101 dest = emit->output_map[reg->Register.Index];
102 break;
103
104 default:
105 {
106 unsigned index = reg->Register.Index;
107 assert(index < SVGA3D_TEMPREG_MAX);
108 index = MIN2(index, SVGA3D_TEMPREG_MAX - 1);
109 dest = dst_register(translate_file(reg->Register.File), index);
110 }
111 break;
112 }
113
114 if (reg->Register.Indirect) {
115 debug_warning("Indirect indexing of dest registers is not supported!\n");
116 }
117
118 dest.mask = reg->Register.WriteMask;
119 assert(dest.mask);
120
121 if (insn->Instruction.Saturate)
122 dest.dstMod = SVGA3DDSTMOD_SATURATE;
123
124 return dest;
125 }
126
127
128 /**
129 * Apply a swizzle to a src_register, returning a new src_register
130 * Ex: swizzle(SRC.ZZYY, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_X, SWIZZLE_Y)
131 * would return SRC.YYZZ
132 */
133 static struct src_register
134 swizzle(struct src_register src,
135 unsigned x, unsigned y, unsigned z, unsigned w)
136 {
137 assert(x < 4);
138 assert(y < 4);
139 assert(z < 4);
140 assert(w < 4);
141 x = (src.base.swizzle >> (x * 2)) & 0x3;
142 y = (src.base.swizzle >> (y * 2)) & 0x3;
143 z = (src.base.swizzle >> (z * 2)) & 0x3;
144 w = (src.base.swizzle >> (w * 2)) & 0x3;
145
146 src.base.swizzle = TRANSLATE_SWIZZLE(x, y, z, w);
147
148 return src;
149 }
150
151
152 /**
153 * Apply a "scalar" swizzle to a src_register returning a new
154 * src_register where all the swizzle terms are the same.
155 * Ex: scalar(SRC.WZYX, SWIZZLE_Y) would return SRC.ZZZZ
156 */
157 static struct src_register
158 scalar(struct src_register src, unsigned comp)
159 {
160 assert(comp < 4);
161 return swizzle( src, comp, comp, comp, comp );
162 }
163
164
165 static boolean
166 svga_arl_needs_adjustment( const struct svga_shader_emitter *emit )
167 {
168 int i;
169
170 for (i = 0; i < emit->num_arl_consts; ++i) {
171 if (emit->arl_consts[i].arl_num == emit->current_arl)
172 return TRUE;
173 }
174 return FALSE;
175 }
176
177
178 static int
179 svga_arl_adjustment( const struct svga_shader_emitter *emit )
180 {
181 int i;
182
183 for (i = 0; i < emit->num_arl_consts; ++i) {
184 if (emit->arl_consts[i].arl_num == emit->current_arl)
185 return emit->arl_consts[i].number;
186 }
187 return 0;
188 }
189
190
191 /**
192 * Translate a TGSI src register to a src_register.
193 */
194 static struct src_register
195 translate_src_register( const struct svga_shader_emitter *emit,
196 const struct tgsi_full_src_register *reg )
197 {
198 struct src_register src;
199
200 switch (reg->Register.File) {
201 case TGSI_FILE_INPUT:
202 /* Input registers are referred to by their semantic name rather
203 * than by index. Use the mapping build up from the decls:
204 */
205 src = emit->input_map[reg->Register.Index];
206 break;
207
208 case TGSI_FILE_IMMEDIATE:
209 /* Immediates are appended after TGSI constants in the D3D
210 * constant buffer.
211 */
212 src = src_register( translate_file( reg->Register.File ),
213 reg->Register.Index + emit->imm_start );
214 break;
215
216 default:
217 src = src_register( translate_file( reg->Register.File ),
218 reg->Register.Index );
219 break;
220 }
221
222 /* Indirect addressing.
223 */
224 if (reg->Register.Indirect) {
225 if (emit->unit == PIPE_SHADER_FRAGMENT) {
226 /* Pixel shaders have only loop registers for relative
227 * addressing into inputs. Ignore the redundant address
228 * register, the contents of aL should be in sync with it.
229 */
230 if (reg->Register.File == TGSI_FILE_INPUT) {
231 src.base.relAddr = 1;
232 src.indirect = src_token(SVGA3DREG_LOOP, 0);
233 }
234 }
235 else {
236 /* Constant buffers only.
237 */
238 if (reg->Register.File == TGSI_FILE_CONSTANT) {
239 /* we shift the offset towards the minimum */
240 if (svga_arl_needs_adjustment( emit )) {
241 src.base.num -= svga_arl_adjustment( emit );
242 }
243 src.base.relAddr = 1;
244
245 /* Not really sure what should go in the second token:
246 */
247 src.indirect = src_token( SVGA3DREG_ADDR,
248 reg->Indirect.Index );
249
250 src.indirect.swizzle = SWIZZLE_XXXX;
251 }
252 }
253 }
254
255 src = swizzle( src,
256 reg->Register.SwizzleX,
257 reg->Register.SwizzleY,
258 reg->Register.SwizzleZ,
259 reg->Register.SwizzleW );
260
261 /* src.mod isn't a bitfield, unfortunately:
262 * See tgsi_util_get_full_src_register_sign_mode for implementation details.
263 */
264 if (reg->Register.Absolute) {
265 if (reg->Register.Negate)
266 src.base.srcMod = SVGA3DSRCMOD_ABSNEG;
267 else
268 src.base.srcMod = SVGA3DSRCMOD_ABS;
269 }
270 else {
271 if (reg->Register.Negate)
272 src.base.srcMod = SVGA3DSRCMOD_NEG;
273 else
274 src.base.srcMod = SVGA3DSRCMOD_NONE;
275 }
276
277 return src;
278 }
279
280
281 /*
282 * Get a temporary register.
283 * Note: if we exceed the temporary register limit we just use
284 * register SVGA3D_TEMPREG_MAX - 1.
285 */
286 static SVGA3dShaderDestToken
287 get_temp( struct svga_shader_emitter *emit )
288 {
289 int i = emit->nr_hw_temp + emit->internal_temp_count++;
290 assert(i < SVGA3D_TEMPREG_MAX);
291 i = MIN2(i, SVGA3D_TEMPREG_MAX - 1);
292 return dst_register( SVGA3DREG_TEMP, i );
293 }
294
295
296 /**
297 * Release a single temp. Currently only effective if it was the last
298 * allocated temp, otherwise release will be delayed until the next
299 * call to reset_temp_regs().
300 */
301 static void
302 release_temp( struct svga_shader_emitter *emit,
303 SVGA3dShaderDestToken temp )
304 {
305 if (temp.num == emit->internal_temp_count - 1)
306 emit->internal_temp_count--;
307 }
308
309
310 /**
311 * Release all temps.
312 */
313 static void
314 reset_temp_regs(struct svga_shader_emitter *emit)
315 {
316 emit->internal_temp_count = 0;
317 }
318
319
320 /** Emit bytecode for a src_register */
321 static boolean
322 emit_src(struct svga_shader_emitter *emit, const struct src_register src)
323 {
324 if (src.base.relAddr) {
325 assert(src.base.reserved0);
326 assert(src.indirect.reserved0);
327 return (svga_shader_emit_dword( emit, src.base.value ) &&
328 svga_shader_emit_dword( emit, src.indirect.value ));
329 }
330 else {
331 assert(src.base.reserved0);
332 return svga_shader_emit_dword( emit, src.base.value );
333 }
334 }
335
336
337 /** Emit bytecode for a dst_register */
338 static boolean
339 emit_dst(struct svga_shader_emitter *emit, SVGA3dShaderDestToken dest)
340 {
341 assert(dest.reserved0);
342 assert(dest.mask);
343 return svga_shader_emit_dword( emit, dest.value );
344 }
345
346
347 /** Emit bytecode for a 1-operand instruction */
348 static boolean
349 emit_op1(struct svga_shader_emitter *emit,
350 SVGA3dShaderInstToken inst,
351 SVGA3dShaderDestToken dest,
352 struct src_register src0)
353 {
354 return (emit_instruction(emit, inst) &&
355 emit_dst(emit, dest) &&
356 emit_src(emit, src0));
357 }
358
359
360 /** Emit bytecode for a 2-operand instruction */
361 static boolean
362 emit_op2(struct svga_shader_emitter *emit,
363 SVGA3dShaderInstToken inst,
364 SVGA3dShaderDestToken dest,
365 struct src_register src0,
366 struct src_register src1)
367 {
368 return (emit_instruction(emit, inst) &&
369 emit_dst(emit, dest) &&
370 emit_src(emit, src0) &&
371 emit_src(emit, src1));
372 }
373
374
375 /** Emit bytecode for a 3-operand instruction */
376 static boolean
377 emit_op3(struct svga_shader_emitter *emit,
378 SVGA3dShaderInstToken inst,
379 SVGA3dShaderDestToken dest,
380 struct src_register src0,
381 struct src_register src1,
382 struct src_register src2)
383 {
384 return (emit_instruction(emit, inst) &&
385 emit_dst(emit, dest) &&
386 emit_src(emit, src0) &&
387 emit_src(emit, src1) &&
388 emit_src(emit, src2));
389 }
390
391
392 /** Emit bytecode for a 4-operand instruction */
393 static boolean
394 emit_op4(struct svga_shader_emitter *emit,
395 SVGA3dShaderInstToken inst,
396 SVGA3dShaderDestToken dest,
397 struct src_register src0,
398 struct src_register src1,
399 struct src_register src2,
400 struct src_register src3)
401 {
402 return (emit_instruction(emit, inst) &&
403 emit_dst(emit, dest) &&
404 emit_src(emit, src0) &&
405 emit_src(emit, src1) &&
406 emit_src(emit, src2) &&
407 emit_src(emit, src3));
408 }
409
410
411 /**
412 * Apply the absolute value modifier to the given src_register, returning
413 * a new src_register.
414 */
415 static struct src_register
416 absolute(struct src_register src)
417 {
418 src.base.srcMod = SVGA3DSRCMOD_ABS;
419 return src;
420 }
421
422
423 /**
424 * Apply the negation modifier to the given src_register, returning
425 * a new src_register.
426 */
427 static struct src_register
428 negate(struct src_register src)
429 {
430 switch (src.base.srcMod) {
431 case SVGA3DSRCMOD_ABS:
432 src.base.srcMod = SVGA3DSRCMOD_ABSNEG;
433 break;
434 case SVGA3DSRCMOD_ABSNEG:
435 src.base.srcMod = SVGA3DSRCMOD_ABS;
436 break;
437 case SVGA3DSRCMOD_NEG:
438 src.base.srcMod = SVGA3DSRCMOD_NONE;
439 break;
440 case SVGA3DSRCMOD_NONE:
441 src.base.srcMod = SVGA3DSRCMOD_NEG;
442 break;
443 }
444 return src;
445 }
446
447
448
449 /* Replace the src with the temporary specified in the dst, but copying
450 * only the necessary channels, and preserving the original swizzle (which is
451 * important given that several opcodes have constraints in the allowed
452 * swizzles).
453 */
454 static boolean
455 emit_repl(struct svga_shader_emitter *emit,
456 SVGA3dShaderDestToken dst,
457 struct src_register *src0)
458 {
459 unsigned src0_swizzle;
460 unsigned chan;
461
462 assert(SVGA3dShaderGetRegType(dst.value) == SVGA3DREG_TEMP);
463
464 src0_swizzle = src0->base.swizzle;
465
466 dst.mask = 0;
467 for (chan = 0; chan < 4; ++chan) {
468 unsigned swizzle = (src0_swizzle >> (chan *2)) & 0x3;
469 dst.mask |= 1 << swizzle;
470 }
471 assert(dst.mask);
472
473 src0->base.swizzle = SVGA3DSWIZZLE_NONE;
474
475 if (!emit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, *src0 ))
476 return FALSE;
477
478 *src0 = src( dst );
479 src0->base.swizzle = src0_swizzle;
480
481 return TRUE;
482 }
483
484
485 /**
486 * Submit/emit an instruction with zero operands.
487 */
488 static boolean
489 submit_op0(struct svga_shader_emitter *emit,
490 SVGA3dShaderInstToken inst,
491 SVGA3dShaderDestToken dest)
492 {
493 return (emit_instruction( emit, inst ) &&
494 emit_dst( emit, dest ));
495 }
496
497
498 /**
499 * Submit/emit an instruction with one operand.
500 */
501 static boolean
502 submit_op1(struct svga_shader_emitter *emit,
503 SVGA3dShaderInstToken inst,
504 SVGA3dShaderDestToken dest,
505 struct src_register src0)
506 {
507 return emit_op1( emit, inst, dest, src0 );
508 }
509
510
511 /**
512 * Submit/emit an instruction with two operands.
513 *
514 * SVGA shaders may not refer to >1 constant register in a single
515 * instruction. This function checks for that usage and inserts a
516 * move to temporary if detected.
517 *
518 * The same applies to input registers -- at most a single input
519 * register may be read by any instruction.
520 */
521 static boolean
522 submit_op2(struct svga_shader_emitter *emit,
523 SVGA3dShaderInstToken inst,
524 SVGA3dShaderDestToken dest,
525 struct src_register src0,
526 struct src_register src1)
527 {
528 SVGA3dShaderDestToken temp;
529 SVGA3dShaderRegType type0, type1;
530 boolean need_temp = FALSE;
531
532 temp.value = 0;
533 type0 = SVGA3dShaderGetRegType( src0.base.value );
534 type1 = SVGA3dShaderGetRegType( src1.base.value );
535
536 if (type0 == SVGA3DREG_CONST &&
537 type1 == SVGA3DREG_CONST &&
538 src0.base.num != src1.base.num)
539 need_temp = TRUE;
540
541 if (type0 == SVGA3DREG_INPUT &&
542 type1 == SVGA3DREG_INPUT &&
543 src0.base.num != src1.base.num)
544 need_temp = TRUE;
545
546 if (need_temp) {
547 temp = get_temp( emit );
548
549 if (!emit_repl( emit, temp, &src0 ))
550 return FALSE;
551 }
552
553 if (!emit_op2( emit, inst, dest, src0, src1 ))
554 return FALSE;
555
556 if (need_temp)
557 release_temp( emit, temp );
558
559 return TRUE;
560 }
561
562
563 /**
564 * Submit/emit an instruction with three operands.
565 *
566 * SVGA shaders may not refer to >1 constant register in a single
567 * instruction. This function checks for that usage and inserts a
568 * move to temporary if detected.
569 */
570 static boolean
571 submit_op3(struct svga_shader_emitter *emit,
572 SVGA3dShaderInstToken inst,
573 SVGA3dShaderDestToken dest,
574 struct src_register src0,
575 struct src_register src1,
576 struct src_register src2)
577 {
578 SVGA3dShaderDestToken temp0;
579 SVGA3dShaderDestToken temp1;
580 boolean need_temp0 = FALSE;
581 boolean need_temp1 = FALSE;
582 SVGA3dShaderRegType type0, type1, type2;
583
584 temp0.value = 0;
585 temp1.value = 0;
586 type0 = SVGA3dShaderGetRegType( src0.base.value );
587 type1 = SVGA3dShaderGetRegType( src1.base.value );
588 type2 = SVGA3dShaderGetRegType( src2.base.value );
589
590 if (inst.op != SVGA3DOP_SINCOS) {
591 if (type0 == SVGA3DREG_CONST &&
592 ((type1 == SVGA3DREG_CONST && src0.base.num != src1.base.num) ||
593 (type2 == SVGA3DREG_CONST && src0.base.num != src2.base.num)))
594 need_temp0 = TRUE;
595
596 if (type1 == SVGA3DREG_CONST &&
597 (type2 == SVGA3DREG_CONST && src1.base.num != src2.base.num))
598 need_temp1 = TRUE;
599 }
600
601 if (type0 == SVGA3DREG_INPUT &&
602 ((type1 == SVGA3DREG_INPUT && src0.base.num != src1.base.num) ||
603 (type2 == SVGA3DREG_INPUT && src0.base.num != src2.base.num)))
604 need_temp0 = TRUE;
605
606 if (type1 == SVGA3DREG_INPUT &&
607 (type2 == SVGA3DREG_INPUT && src1.base.num != src2.base.num))
608 need_temp1 = TRUE;
609
610 if (need_temp0) {
611 temp0 = get_temp( emit );
612
613 if (!emit_repl( emit, temp0, &src0 ))
614 return FALSE;
615 }
616
617 if (need_temp1) {
618 temp1 = get_temp( emit );
619
620 if (!emit_repl( emit, temp1, &src1 ))
621 return FALSE;
622 }
623
624 if (!emit_op3( emit, inst, dest, src0, src1, src2 ))
625 return FALSE;
626
627 if (need_temp1)
628 release_temp( emit, temp1 );
629 if (need_temp0)
630 release_temp( emit, temp0 );
631 return TRUE;
632 }
633
634
635 /**
636 * Submit/emit an instruction with four operands.
637 *
638 * SVGA shaders may not refer to >1 constant register in a single
639 * instruction. This function checks for that usage and inserts a
640 * move to temporary if detected.
641 */
642 static boolean
643 submit_op4(struct svga_shader_emitter *emit,
644 SVGA3dShaderInstToken inst,
645 SVGA3dShaderDestToken dest,
646 struct src_register src0,
647 struct src_register src1,
648 struct src_register src2,
649 struct src_register src3)
650 {
651 SVGA3dShaderDestToken temp0;
652 SVGA3dShaderDestToken temp3;
653 boolean need_temp0 = FALSE;
654 boolean need_temp3 = FALSE;
655 SVGA3dShaderRegType type0, type1, type2, type3;
656
657 temp0.value = 0;
658 temp3.value = 0;
659 type0 = SVGA3dShaderGetRegType( src0.base.value );
660 type1 = SVGA3dShaderGetRegType( src1.base.value );
661 type2 = SVGA3dShaderGetRegType( src2.base.value );
662 type3 = SVGA3dShaderGetRegType( src2.base.value );
663
664 /* Make life a little easier - this is only used by the TXD
665 * instruction which is guaranteed not to have a constant/input reg
666 * in one slot at least:
667 */
668 assert(type1 == SVGA3DREG_SAMPLER);
669
670 if (type0 == SVGA3DREG_CONST &&
671 ((type3 == SVGA3DREG_CONST && src0.base.num != src3.base.num) ||
672 (type2 == SVGA3DREG_CONST && src0.base.num != src2.base.num)))
673 need_temp0 = TRUE;
674
675 if (type3 == SVGA3DREG_CONST &&
676 (type2 == SVGA3DREG_CONST && src3.base.num != src2.base.num))
677 need_temp3 = TRUE;
678
679 if (type0 == SVGA3DREG_INPUT &&
680 ((type3 == SVGA3DREG_INPUT && src0.base.num != src3.base.num) ||
681 (type2 == SVGA3DREG_INPUT && src0.base.num != src2.base.num)))
682 need_temp0 = TRUE;
683
684 if (type3 == SVGA3DREG_INPUT &&
685 (type2 == SVGA3DREG_INPUT && src3.base.num != src2.base.num))
686 need_temp3 = TRUE;
687
688 if (need_temp0) {
689 temp0 = get_temp( emit );
690
691 if (!emit_repl( emit, temp0, &src0 ))
692 return FALSE;
693 }
694
695 if (need_temp3) {
696 temp3 = get_temp( emit );
697
698 if (!emit_repl( emit, temp3, &src3 ))
699 return FALSE;
700 }
701
702 if (!emit_op4( emit, inst, dest, src0, src1, src2, src3 ))
703 return FALSE;
704
705 if (need_temp3)
706 release_temp( emit, temp3 );
707 if (need_temp0)
708 release_temp( emit, temp0 );
709 return TRUE;
710 }
711
712
713 /**
714 * Do the src and dest registers refer to the same register?
715 */
716 static boolean
717 alias_src_dst(struct src_register src,
718 SVGA3dShaderDestToken dst)
719 {
720 if (src.base.num != dst.num)
721 return FALSE;
722
723 if (SVGA3dShaderGetRegType(dst.value) !=
724 SVGA3dShaderGetRegType(src.base.value))
725 return FALSE;
726
727 return TRUE;
728 }
729
730
731 /**
732 * Translate/emit a LRP (linear interpolation) instruction.
733 */
734 static boolean
735 submit_lrp(struct svga_shader_emitter *emit,
736 SVGA3dShaderDestToken dst,
737 struct src_register src0,
738 struct src_register src1,
739 struct src_register src2)
740 {
741 SVGA3dShaderDestToken tmp;
742 boolean need_dst_tmp = FALSE;
743
744 /* The dst reg must be a temporary, and not be the same as src0 or src2 */
745 if (SVGA3dShaderGetRegType(dst.value) != SVGA3DREG_TEMP ||
746 alias_src_dst(src0, dst) ||
747 alias_src_dst(src2, dst))
748 need_dst_tmp = TRUE;
749
750 if (need_dst_tmp) {
751 tmp = get_temp( emit );
752 tmp.mask = dst.mask;
753 }
754 else {
755 tmp = dst;
756 }
757
758 if (!submit_op3(emit, inst_token( SVGA3DOP_LRP ), tmp, src0, src1, src2))
759 return FALSE;
760
761 if (need_dst_tmp) {
762 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), dst, src( tmp )))
763 return FALSE;
764 }
765
766 return TRUE;
767 }
768
769
770 /**
771 * Helper for emitting SVGA immediate values using the SVGA3DOP_DEF[I]
772 * instructions.
773 */
774 static boolean
775 emit_def_const(struct svga_shader_emitter *emit,
776 SVGA3dShaderConstType type,
777 unsigned idx, float a, float b, float c, float d)
778 {
779 SVGA3DOpDefArgs def;
780 SVGA3dShaderInstToken opcode;
781
782 switch (type) {
783 case SVGA3D_CONST_TYPE_FLOAT:
784 opcode = inst_token( SVGA3DOP_DEF );
785 def.dst = dst_register( SVGA3DREG_CONST, idx );
786 def.constValues[0] = a;
787 def.constValues[1] = b;
788 def.constValues[2] = c;
789 def.constValues[3] = d;
790 break;
791 case SVGA3D_CONST_TYPE_INT:
792 opcode = inst_token( SVGA3DOP_DEFI );
793 def.dst = dst_register( SVGA3DREG_CONSTINT, idx );
794 def.constIValues[0] = (int)a;
795 def.constIValues[1] = (int)b;
796 def.constIValues[2] = (int)c;
797 def.constIValues[3] = (int)d;
798 break;
799 default:
800 assert(0);
801 opcode = inst_token( SVGA3DOP_NOP );
802 break;
803 }
804
805 if (!emit_instruction(emit, opcode) ||
806 !svga_shader_emit_dwords( emit, def.values, Elements(def.values)))
807 return FALSE;
808
809 return TRUE;
810 }
811
812
813 /**
814 * Create/emit a constant with values {0, 0.5, -1, 1}.
815 * We can swizzle this to produce other useful constants such as
816 * {0, 0, 0, 0}, {1, 1, 1, 1}, etc.
817 */
818 static boolean
819 create_zero_immediate( struct svga_shader_emitter *emit )
820 {
821 unsigned idx = emit->nr_hw_float_const++;
822
823 /* Emit the constant (0, 0.5, -1, 1) and use swizzling to generate
824 * other useful vectors.
825 */
826 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT,
827 idx, 0, 0.5, -1, 1 ))
828 return FALSE;
829
830 emit->zero_immediate_idx = idx;
831 emit->created_zero_immediate = TRUE;
832
833 return TRUE;
834 }
835
836
837 static boolean
838 create_loop_const( struct svga_shader_emitter *emit )
839 {
840 unsigned idx = emit->nr_hw_int_const++;
841
842 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_INT, idx,
843 255, /* iteration count */
844 0, /* initial value */
845 1, /* step size */
846 0 /* not used, must be 0 */))
847 return FALSE;
848
849 emit->loop_const_idx = idx;
850 emit->created_loop_const = TRUE;
851
852 return TRUE;
853 }
854
855 static boolean
856 create_arl_consts( struct svga_shader_emitter *emit )
857 {
858 int i;
859
860 for (i = 0; i < emit->num_arl_consts; i += 4) {
861 int j;
862 unsigned idx = emit->nr_hw_float_const++;
863 float vals[4];
864 for (j = 0; j < 4 && (j + i) < emit->num_arl_consts; ++j) {
865 vals[j] = (float) emit->arl_consts[i + j].number;
866 emit->arl_consts[i + j].idx = idx;
867 switch (j) {
868 case 0:
869 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_X;
870 break;
871 case 1:
872 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_Y;
873 break;
874 case 2:
875 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_Z;
876 break;
877 case 3:
878 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_W;
879 break;
880 }
881 }
882 while (j < 4)
883 vals[j++] = 0;
884
885 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT, idx,
886 vals[0], vals[1],
887 vals[2], vals[3]))
888 return FALSE;
889 }
890
891 return TRUE;
892 }
893
894
895 /**
896 * Return the register which holds the pixel shaders front/back-
897 * facing value.
898 */
899 static struct src_register
900 get_vface( struct svga_shader_emitter *emit )
901 {
902 assert(emit->emitted_vface);
903 return src_register(SVGA3DREG_MISCTYPE, SVGA3DMISCREG_FACE);
904 }
905
906
907 /**
908 * returns {0, 0, 0, 1} immediate
909 */
910 static struct src_register
911 get_zero_immediate( struct svga_shader_emitter *emit )
912 {
913 assert(emit->created_zero_immediate);
914 assert(emit->zero_immediate_idx >= 0);
915 return swizzle(src_register( SVGA3DREG_CONST,
916 emit->zero_immediate_idx),
917 0, 0, 0, 3);
918 }
919
920
921 /**
922 * returns {1, 1, 1, -1} immediate
923 */
924 static struct src_register
925 get_pos_neg_one_immediate( struct svga_shader_emitter *emit )
926 {
927 assert(emit->created_zero_immediate);
928 assert(emit->zero_immediate_idx >= 0);
929 return swizzle(src_register( SVGA3DREG_CONST,
930 emit->zero_immediate_idx),
931 3, 3, 3, 2);
932 }
933
934
935 /**
936 * returns {0.5, 0.5, 0.5, 0.5} immediate
937 */
938 static struct src_register
939 get_half_immediate( struct svga_shader_emitter *emit )
940 {
941 assert(emit->created_zero_immediate);
942 assert(emit->zero_immediate_idx >= 0);
943 return swizzle(src_register(SVGA3DREG_CONST, emit->zero_immediate_idx),
944 1, 1, 1, 1);
945 }
946
947
948 /**
949 * returns the loop const
950 */
951 static struct src_register
952 get_loop_const( struct svga_shader_emitter *emit )
953 {
954 assert(emit->created_loop_const);
955 assert(emit->loop_const_idx >= 0);
956 return src_register( SVGA3DREG_CONSTINT,
957 emit->loop_const_idx );
958 }
959
960
961 static struct src_register
962 get_fake_arl_const( struct svga_shader_emitter *emit )
963 {
964 struct src_register reg;
965 int idx = 0, swizzle = 0, i;
966
967 for (i = 0; i < emit->num_arl_consts; ++ i) {
968 if (emit->arl_consts[i].arl_num == emit->current_arl) {
969 idx = emit->arl_consts[i].idx;
970 swizzle = emit->arl_consts[i].swizzle;
971 }
972 }
973
974 reg = src_register( SVGA3DREG_CONST, idx );
975 return scalar(reg, swizzle);
976 }
977
978
979 /**
980 * Return a register which holds the width and height of the texture
981 * currently bound to the given sampler.
982 */
983 static struct src_register
984 get_tex_dimensions( struct svga_shader_emitter *emit, int sampler_num )
985 {
986 int idx;
987 struct src_register reg;
988
989 /* the width/height indexes start right after constants */
990 idx = emit->key.fkey.tex[sampler_num].width_height_idx +
991 emit->info.file_max[TGSI_FILE_CONSTANT] + 1;
992
993 reg = src_register( SVGA3DREG_CONST, idx );
994 return reg;
995 }
996
997
998 static boolean
999 emit_fake_arl(struct svga_shader_emitter *emit,
1000 const struct tgsi_full_instruction *insn)
1001 {
1002 const struct src_register src0 =
1003 translate_src_register(emit, &insn->Src[0] );
1004 struct src_register src1 = get_fake_arl_const( emit );
1005 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1006 SVGA3dShaderDestToken tmp = get_temp( emit );
1007
1008 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), tmp, src0))
1009 return FALSE;
1010
1011 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), tmp, src( tmp ),
1012 src1))
1013 return FALSE;
1014
1015 /* replicate the original swizzle */
1016 src1 = src(tmp);
1017 src1.base.swizzle = src0.base.swizzle;
1018
1019 return submit_op1( emit, inst_token( SVGA3DOP_MOVA ),
1020 dst, src1 );
1021 }
1022
1023
1024 static boolean
1025 emit_if(struct svga_shader_emitter *emit,
1026 const struct tgsi_full_instruction *insn)
1027 {
1028 struct src_register src0 =
1029 translate_src_register(emit, &insn->Src[0]);
1030 struct src_register zero = get_zero_immediate( emit );
1031 SVGA3dShaderInstToken if_token = inst_token( SVGA3DOP_IFC );
1032
1033 if_token.control = SVGA3DOPCOMPC_NE;
1034 zero = scalar(zero, TGSI_SWIZZLE_X);
1035
1036 if (SVGA3dShaderGetRegType(src0.base.value) == SVGA3DREG_CONST) {
1037 /*
1038 * Max different constant registers readable per IFC instruction is 1.
1039 */
1040 SVGA3dShaderDestToken tmp = get_temp( emit );
1041
1042 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), tmp, src0))
1043 return FALSE;
1044
1045 src0 = scalar(src( tmp ), TGSI_SWIZZLE_X);
1046 }
1047
1048 emit->dynamic_branching_level++;
1049
1050 return (emit_instruction( emit, if_token ) &&
1051 emit_src( emit, src0 ) &&
1052 emit_src( emit, zero ) );
1053 }
1054
1055
1056 static boolean
1057 emit_endif(struct svga_shader_emitter *emit,
1058 const struct tgsi_full_instruction *insn)
1059 {
1060 emit->dynamic_branching_level--;
1061
1062 return emit_instruction(emit, inst_token(SVGA3DOP_ENDIF));
1063 }
1064
1065
1066 static boolean
1067 emit_else(struct svga_shader_emitter *emit,
1068 const struct tgsi_full_instruction *insn)
1069 {
1070 return emit_instruction(emit, inst_token(SVGA3DOP_ELSE));
1071 }
1072
1073
1074 /**
1075 * Translate the following TGSI FLR instruction.
1076 * FLR DST, SRC
1077 * To the following SVGA3D instruction sequence.
1078 * FRC TMP, SRC
1079 * SUB DST, SRC, TMP
1080 */
1081 static boolean
1082 emit_floor(struct svga_shader_emitter *emit,
1083 const struct tgsi_full_instruction *insn )
1084 {
1085 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1086 const struct src_register src0 =
1087 translate_src_register(emit, &insn->Src[0] );
1088 SVGA3dShaderDestToken temp = get_temp( emit );
1089
1090 /* FRC TMP, SRC */
1091 if (!submit_op1( emit, inst_token( SVGA3DOP_FRC ), temp, src0 ))
1092 return FALSE;
1093
1094 /* SUB DST, SRC, TMP */
1095 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst, src0,
1096 negate( src( temp ) ) ))
1097 return FALSE;
1098
1099 return TRUE;
1100 }
1101
1102
1103 /**
1104 * Translate the following TGSI CEIL instruction.
1105 * CEIL DST, SRC
1106 * To the following SVGA3D instruction sequence.
1107 * FRC TMP, -SRC
1108 * ADD DST, SRC, TMP
1109 */
1110 static boolean
1111 emit_ceil(struct svga_shader_emitter *emit,
1112 const struct tgsi_full_instruction *insn)
1113 {
1114 SVGA3dShaderDestToken dst = translate_dst_register(emit, insn, 0);
1115 const struct src_register src0 =
1116 translate_src_register(emit, &insn->Src[0]);
1117 SVGA3dShaderDestToken temp = get_temp(emit);
1118
1119 /* FRC TMP, -SRC */
1120 if (!submit_op1(emit, inst_token(SVGA3DOP_FRC), temp, negate(src0)))
1121 return FALSE;
1122
1123 /* ADD DST, SRC, TMP */
1124 if (!submit_op2(emit, inst_token(SVGA3DOP_ADD), dst, src0, src(temp)))
1125 return FALSE;
1126
1127 return TRUE;
1128 }
1129
1130
1131 /**
1132 * Translate the following TGSI DIV instruction.
1133 * DIV DST.xy, SRC0, SRC1
1134 * To the following SVGA3D instruction sequence.
1135 * RCP TMP.x, SRC1.xxxx
1136 * RCP TMP.y, SRC1.yyyy
1137 * MUL DST.xy, SRC0, TMP
1138 */
1139 static boolean
1140 emit_div(struct svga_shader_emitter *emit,
1141 const struct tgsi_full_instruction *insn )
1142 {
1143 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1144 const struct src_register src0 =
1145 translate_src_register(emit, &insn->Src[0] );
1146 const struct src_register src1 =
1147 translate_src_register(emit, &insn->Src[1] );
1148 SVGA3dShaderDestToken temp = get_temp( emit );
1149 int i;
1150
1151 /* For each enabled element, perform a RCP instruction. Note that
1152 * RCP is scalar in SVGA3D:
1153 */
1154 for (i = 0; i < 4; i++) {
1155 unsigned channel = 1 << i;
1156 if (dst.mask & channel) {
1157 /* RCP TMP.?, SRC1.???? */
1158 if (!submit_op1( emit, inst_token( SVGA3DOP_RCP ),
1159 writemask(temp, channel),
1160 scalar(src1, i) ))
1161 return FALSE;
1162 }
1163 }
1164
1165 /* Vector mul:
1166 * MUL DST, SRC0, TMP
1167 */
1168 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ), dst, src0,
1169 src( temp ) ))
1170 return FALSE;
1171
1172 return TRUE;
1173 }
1174
1175
1176 /**
1177 * Translate the following TGSI DP2 instruction.
1178 * DP2 DST, SRC1, SRC2
1179 * To the following SVGA3D instruction sequence.
1180 * MUL TMP, SRC1, SRC2
1181 * ADD DST, TMP.xxxx, TMP.yyyy
1182 */
1183 static boolean
1184 emit_dp2(struct svga_shader_emitter *emit,
1185 const struct tgsi_full_instruction *insn )
1186 {
1187 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1188 const struct src_register src0 =
1189 translate_src_register(emit, &insn->Src[0]);
1190 const struct src_register src1 =
1191 translate_src_register(emit, &insn->Src[1]);
1192 SVGA3dShaderDestToken temp = get_temp( emit );
1193 struct src_register temp_src0, temp_src1;
1194
1195 /* MUL TMP, SRC1, SRC2 */
1196 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ), temp, src0, src1 ))
1197 return FALSE;
1198
1199 temp_src0 = scalar(src( temp ), TGSI_SWIZZLE_X);
1200 temp_src1 = scalar(src( temp ), TGSI_SWIZZLE_Y);
1201
1202 /* ADD DST, TMP.xxxx, TMP.yyyy */
1203 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst,
1204 temp_src0, temp_src1 ))
1205 return FALSE;
1206
1207 return TRUE;
1208 }
1209
1210
1211 /**
1212 * Translate the following TGSI DPH instruction.
1213 * DPH DST, SRC1, SRC2
1214 * To the following SVGA3D instruction sequence.
1215 * DP3 TMP, SRC1, SRC2
1216 * ADD DST, TMP, SRC2.wwww
1217 */
1218 static boolean
1219 emit_dph(struct svga_shader_emitter *emit,
1220 const struct tgsi_full_instruction *insn )
1221 {
1222 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1223 const struct src_register src0 = translate_src_register(
1224 emit, &insn->Src[0] );
1225 struct src_register src1 =
1226 translate_src_register(emit, &insn->Src[1]);
1227 SVGA3dShaderDestToken temp = get_temp( emit );
1228
1229 /* DP3 TMP, SRC1, SRC2 */
1230 if (!submit_op2( emit, inst_token( SVGA3DOP_DP3 ), temp, src0, src1 ))
1231 return FALSE;
1232
1233 src1 = scalar(src1, TGSI_SWIZZLE_W);
1234
1235 /* ADD DST, TMP, SRC2.wwww */
1236 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst,
1237 src( temp ), src1 ))
1238 return FALSE;
1239
1240 return TRUE;
1241 }
1242
1243
1244 /**
1245 * Translate the following TGSI DST instruction.
1246 * NRM DST, SRC
1247 * To the following SVGA3D instruction sequence.
1248 * DP3 TMP, SRC, SRC
1249 * RSQ TMP, TMP
1250 * MUL DST, SRC, TMP
1251 */
1252 static boolean
1253 emit_nrm(struct svga_shader_emitter *emit,
1254 const struct tgsi_full_instruction *insn)
1255 {
1256 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1257 const struct src_register src0 =
1258 translate_src_register(emit, &insn->Src[0]);
1259 SVGA3dShaderDestToken temp = get_temp( emit );
1260
1261 /* DP3 TMP, SRC, SRC */
1262 if (!submit_op2( emit, inst_token( SVGA3DOP_DP3 ), temp, src0, src0 ))
1263 return FALSE;
1264
1265 /* RSQ TMP, TMP */
1266 if (!submit_op1( emit, inst_token( SVGA3DOP_RSQ ), temp, src( temp )))
1267 return FALSE;
1268
1269 /* MUL DST, SRC, TMP */
1270 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ), dst,
1271 src0, src( temp )))
1272 return FALSE;
1273
1274 return TRUE;
1275 }
1276
1277
1278 /**
1279 * Sine / Cosine helper function.
1280 */
1281 static boolean
1282 do_emit_sincos(struct svga_shader_emitter *emit,
1283 SVGA3dShaderDestToken dst,
1284 struct src_register src0)
1285 {
1286 src0 = scalar(src0, TGSI_SWIZZLE_X);
1287 return submit_op1(emit, inst_token(SVGA3DOP_SINCOS), dst, src0);
1288 }
1289
1290
1291 /**
1292 * Translate/emit a TGSI SIN, COS or CSC instruction.
1293 */
1294 static boolean
1295 emit_sincos(struct svga_shader_emitter *emit,
1296 const struct tgsi_full_instruction *insn)
1297 {
1298 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1299 struct src_register src0 = translate_src_register(emit, &insn->Src[0]);
1300 SVGA3dShaderDestToken temp = get_temp( emit );
1301
1302 /* SCS TMP SRC */
1303 if (!do_emit_sincos(emit, writemask(temp, TGSI_WRITEMASK_XY), src0 ))
1304 return FALSE;
1305
1306 /* MOV DST TMP */
1307 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src( temp ) ))
1308 return FALSE;
1309
1310 return TRUE;
1311 }
1312
1313
1314 /**
1315 * Translate TGSI SIN instruction into:
1316 * SCS TMP SRC
1317 * MOV DST TMP.yyyy
1318 */
1319 static boolean
1320 emit_sin(struct svga_shader_emitter *emit,
1321 const struct tgsi_full_instruction *insn )
1322 {
1323 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1324 struct src_register src0 =
1325 translate_src_register(emit, &insn->Src[0] );
1326 SVGA3dShaderDestToken temp = get_temp( emit );
1327
1328 /* SCS TMP SRC */
1329 if (!do_emit_sincos(emit, writemask(temp, TGSI_WRITEMASK_Y), src0))
1330 return FALSE;
1331
1332 src0 = scalar(src( temp ), TGSI_SWIZZLE_Y);
1333
1334 /* MOV DST TMP.yyyy */
1335 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src0 ))
1336 return FALSE;
1337
1338 return TRUE;
1339 }
1340
1341
1342 /*
1343 * Translate TGSI COS instruction into:
1344 * SCS TMP SRC
1345 * MOV DST TMP.xxxx
1346 */
1347 static boolean
1348 emit_cos(struct svga_shader_emitter *emit,
1349 const struct tgsi_full_instruction *insn)
1350 {
1351 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1352 struct src_register src0 =
1353 translate_src_register(emit, &insn->Src[0] );
1354 SVGA3dShaderDestToken temp = get_temp( emit );
1355
1356 /* SCS TMP SRC */
1357 if (!do_emit_sincos( emit, writemask(temp, TGSI_WRITEMASK_X), src0 ))
1358 return FALSE;
1359
1360 src0 = scalar(src( temp ), TGSI_SWIZZLE_X);
1361
1362 /* MOV DST TMP.xxxx */
1363 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src0 ))
1364 return FALSE;
1365
1366 return TRUE;
1367 }
1368
1369
1370 /**
1371 * Translate/emit TGSI SSG (Set Sign: -1, 0, +1) instruction.
1372 */
1373 static boolean
1374 emit_ssg(struct svga_shader_emitter *emit,
1375 const struct tgsi_full_instruction *insn)
1376 {
1377 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1378 struct src_register src0 =
1379 translate_src_register(emit, &insn->Src[0] );
1380 SVGA3dShaderDestToken temp0 = get_temp( emit );
1381 SVGA3dShaderDestToken temp1 = get_temp( emit );
1382 struct src_register zero, one;
1383
1384 if (emit->unit == PIPE_SHADER_VERTEX) {
1385 /* SGN DST, SRC0, TMP0, TMP1 */
1386 return submit_op3( emit, inst_token( SVGA3DOP_SGN ), dst, src0,
1387 src( temp0 ), src( temp1 ) );
1388 }
1389
1390 zero = get_zero_immediate( emit );
1391 one = scalar( zero, TGSI_SWIZZLE_W );
1392 zero = scalar( zero, TGSI_SWIZZLE_X );
1393
1394 /* CMP TMP0, SRC0, one, zero */
1395 if (!submit_op3( emit, inst_token( SVGA3DOP_CMP ),
1396 writemask( temp0, dst.mask ), src0, one, zero ))
1397 return FALSE;
1398
1399 /* CMP TMP1, negate(SRC0), negate(one), zero */
1400 if (!submit_op3( emit, inst_token( SVGA3DOP_CMP ),
1401 writemask( temp1, dst.mask ), negate( src0 ), negate( one ),
1402 zero ))
1403 return FALSE;
1404
1405 /* ADD DST, TMP0, TMP1 */
1406 return submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst, src( temp0 ),
1407 src( temp1 ) );
1408 }
1409
1410
1411 /**
1412 * Translate/emit TGSI SUB instruction as:
1413 * ADD DST, SRC0, negate(SRC1)
1414 */
1415 static boolean
1416 emit_sub(struct svga_shader_emitter *emit,
1417 const struct tgsi_full_instruction *insn)
1418 {
1419 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1420 struct src_register src0 = translate_src_register(
1421 emit, &insn->Src[0] );
1422 struct src_register src1 = translate_src_register(
1423 emit, &insn->Src[1] );
1424
1425 src1 = negate(src1);
1426
1427 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst,
1428 src0, src1 ))
1429 return FALSE;
1430
1431 return TRUE;
1432 }
1433
1434
1435 /**
1436 * Translate/emit KILL_IF instruction (kill if any of X,Y,Z,W are negative).
1437 */
1438 static boolean
1439 emit_kill_if(struct svga_shader_emitter *emit,
1440 const struct tgsi_full_instruction *insn)
1441 {
1442 const struct tgsi_full_src_register *reg = &insn->Src[0];
1443 struct src_register src0, srcIn;
1444 const boolean special = (reg->Register.Absolute ||
1445 reg->Register.Negate ||
1446 reg->Register.Indirect ||
1447 reg->Register.SwizzleX != 0 ||
1448 reg->Register.SwizzleY != 1 ||
1449 reg->Register.SwizzleZ != 2 ||
1450 reg->Register.File != TGSI_FILE_TEMPORARY);
1451 SVGA3dShaderDestToken temp;
1452
1453 src0 = srcIn = translate_src_register( emit, reg );
1454
1455 if (special) {
1456 /* need a temp reg */
1457 temp = get_temp( emit );
1458 }
1459
1460 if (special) {
1461 /* move the source into a temp register */
1462 submit_op1(emit, inst_token(SVGA3DOP_MOV), temp, src0);
1463
1464 src0 = src( temp );
1465 }
1466
1467 /* Do the texkill by checking if any of the XYZW components are < 0.
1468 * Note that ps_2_0 and later take XYZW in consideration, while ps_1_x
1469 * only used XYZ. The MSDN documentation about this is incorrect.
1470 */
1471 if (!submit_op0( emit, inst_token( SVGA3DOP_TEXKILL ), dst(src0) ))
1472 return FALSE;
1473
1474 return TRUE;
1475 }
1476
1477
1478 /**
1479 * Translate/emit unconditional kill instruction (usually found inside
1480 * an IF/ELSE/ENDIF block).
1481 */
1482 static boolean
1483 emit_kill(struct svga_shader_emitter *emit,
1484 const struct tgsi_full_instruction *insn)
1485 {
1486 SVGA3dShaderDestToken temp;
1487 struct src_register one = scalar( get_zero_immediate( emit ),
1488 TGSI_SWIZZLE_W );
1489 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_TEXKILL );
1490
1491 /* texkill doesn't allow negation on the operand so lets move
1492 * negation of {1} to a temp register */
1493 temp = get_temp( emit );
1494 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), temp,
1495 negate( one ) ))
1496 return FALSE;
1497
1498 return submit_op0( emit, inst, temp );
1499 }
1500
1501
1502 /**
1503 * Test if r1 and r2 are the same register.
1504 */
1505 static boolean
1506 same_register(struct src_register r1, struct src_register r2)
1507 {
1508 return (r1.base.num == r2.base.num &&
1509 r1.base.type_upper == r2.base.type_upper &&
1510 r1.base.type_lower == r2.base.type_lower);
1511 }
1512
1513
1514
1515 /**
1516 * Implement conditionals by initializing destination reg to 'fail',
1517 * then set predicate reg with UFOP_SETP, then move 'pass' to dest
1518 * based on predicate reg.
1519 *
1520 * SETP src0, cmp, src1 -- do this first to avoid aliasing problems.
1521 * MOV dst, fail
1522 * MOV dst, pass, p0
1523 */
1524 static boolean
1525 emit_conditional(struct svga_shader_emitter *emit,
1526 unsigned compare_func,
1527 SVGA3dShaderDestToken dst,
1528 struct src_register src0,
1529 struct src_register src1,
1530 struct src_register pass,
1531 struct src_register fail)
1532 {
1533 SVGA3dShaderDestToken pred_reg = dst_register( SVGA3DREG_PREDICATE, 0 );
1534 SVGA3dShaderInstToken setp_token, mov_token;
1535 setp_token = inst_token( SVGA3DOP_SETP );
1536
1537 switch (compare_func) {
1538 case PIPE_FUNC_NEVER:
1539 return submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1540 dst, fail );
1541 break;
1542 case PIPE_FUNC_LESS:
1543 setp_token.control = SVGA3DOPCOMP_LT;
1544 break;
1545 case PIPE_FUNC_EQUAL:
1546 setp_token.control = SVGA3DOPCOMP_EQ;
1547 break;
1548 case PIPE_FUNC_LEQUAL:
1549 setp_token.control = SVGA3DOPCOMP_LE;
1550 break;
1551 case PIPE_FUNC_GREATER:
1552 setp_token.control = SVGA3DOPCOMP_GT;
1553 break;
1554 case PIPE_FUNC_NOTEQUAL:
1555 setp_token.control = SVGA3DOPCOMPC_NE;
1556 break;
1557 case PIPE_FUNC_GEQUAL:
1558 setp_token.control = SVGA3DOPCOMP_GE;
1559 break;
1560 case PIPE_FUNC_ALWAYS:
1561 return submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1562 dst, pass );
1563 break;
1564 }
1565
1566 if (same_register(src(dst), pass)) {
1567 /* We'll get bad results if the dst and pass registers are the same
1568 * so use a temp register containing pass.
1569 */
1570 SVGA3dShaderDestToken temp = get_temp(emit);
1571 if (!submit_op1(emit, inst_token(SVGA3DOP_MOV), temp, pass))
1572 return FALSE;
1573 pass = src(temp);
1574 }
1575
1576 /* SETP src0, COMPOP, src1 */
1577 if (!submit_op2( emit, setp_token, pred_reg,
1578 src0, src1 ))
1579 return FALSE;
1580
1581 mov_token = inst_token( SVGA3DOP_MOV );
1582
1583 /* MOV dst, fail */
1584 if (!submit_op1( emit, mov_token, dst,
1585 fail ))
1586 return FALSE;
1587
1588 /* MOV dst, pass (predicated)
1589 *
1590 * Note that the predicate reg (and possible modifiers) is passed
1591 * as the first source argument.
1592 */
1593 mov_token.predicated = 1;
1594 if (!submit_op2( emit, mov_token, dst,
1595 src( pred_reg ), pass ))
1596 return FALSE;
1597
1598 return TRUE;
1599 }
1600
1601
1602 /**
1603 * Helper for emiting 'selection' commands. Basically:
1604 * if (src0 OP src1)
1605 * dst = 1.0;
1606 * else
1607 * dst = 0.0;
1608 */
1609 static boolean
1610 emit_select(struct svga_shader_emitter *emit,
1611 unsigned compare_func,
1612 SVGA3dShaderDestToken dst,
1613 struct src_register src0,
1614 struct src_register src1 )
1615 {
1616 /* There are some SVGA instructions which implement some selects
1617 * directly, but they are only available in the vertex shader.
1618 */
1619 if (emit->unit == PIPE_SHADER_VERTEX) {
1620 switch (compare_func) {
1621 case PIPE_FUNC_GEQUAL:
1622 return submit_op2( emit, inst_token( SVGA3DOP_SGE ), dst, src0, src1 );
1623 case PIPE_FUNC_LEQUAL:
1624 return submit_op2( emit, inst_token( SVGA3DOP_SGE ), dst, src1, src0 );
1625 case PIPE_FUNC_GREATER:
1626 return submit_op2( emit, inst_token( SVGA3DOP_SLT ), dst, src1, src0 );
1627 case PIPE_FUNC_LESS:
1628 return submit_op2( emit, inst_token( SVGA3DOP_SLT ), dst, src0, src1 );
1629 default:
1630 break;
1631 }
1632 }
1633
1634 /* Otherwise, need to use the setp approach:
1635 */
1636 {
1637 struct src_register one, zero;
1638 /* zero immediate is 0,0,0,1 */
1639 zero = get_zero_immediate( emit );
1640 one = scalar( zero, TGSI_SWIZZLE_W );
1641 zero = scalar( zero, TGSI_SWIZZLE_X );
1642
1643 return emit_conditional(emit, compare_func, dst, src0, src1, one, zero);
1644 }
1645 }
1646
1647
1648 /**
1649 * Translate/emit a TGSI SEQ, SNE, SLT, SGE, etc. instruction.
1650 */
1651 static boolean
1652 emit_select_op(struct svga_shader_emitter *emit,
1653 unsigned compare,
1654 const struct tgsi_full_instruction *insn)
1655 {
1656 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1657 struct src_register src0 = translate_src_register(
1658 emit, &insn->Src[0] );
1659 struct src_register src1 = translate_src_register(
1660 emit, &insn->Src[1] );
1661
1662 return emit_select( emit, compare, dst, src0, src1 );
1663 }
1664
1665
1666 /**
1667 * Translate TGSI CMP instruction. Component-wise:
1668 * dst = (src0 < 0.0) ? src1 : src2
1669 */
1670 static boolean
1671 emit_cmp(struct svga_shader_emitter *emit,
1672 const struct tgsi_full_instruction *insn)
1673 {
1674 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1675 const struct src_register src0 =
1676 translate_src_register(emit, &insn->Src[0] );
1677 const struct src_register src1 =
1678 translate_src_register(emit, &insn->Src[1] );
1679 const struct src_register src2 =
1680 translate_src_register(emit, &insn->Src[2] );
1681
1682 if (emit->unit == PIPE_SHADER_VERTEX) {
1683 struct src_register zero =
1684 scalar(get_zero_immediate(emit), TGSI_SWIZZLE_X);
1685 /* We used to simulate CMP with SLT+LRP. But that didn't work when
1686 * src1 or src2 was Inf/NaN. In particular, GLSL sqrt(0) failed
1687 * because it involves a CMP to handle the 0 case.
1688 * Use a conditional expression instead.
1689 */
1690 return emit_conditional(emit, PIPE_FUNC_LESS, dst,
1691 src0, zero, src1, src2);
1692 }
1693 else {
1694 assert(emit->unit == PIPE_SHADER_FRAGMENT);
1695
1696 /* CMP DST, SRC0, SRC2, SRC1 */
1697 return submit_op3( emit, inst_token( SVGA3DOP_CMP ), dst,
1698 src0, src2, src1);
1699 }
1700 }
1701
1702
1703 /**
1704 * Translate/emit 2-operand (coord, sampler) texture instructions.
1705 */
1706 static boolean
1707 emit_tex2(struct svga_shader_emitter *emit,
1708 const struct tgsi_full_instruction *insn,
1709 SVGA3dShaderDestToken dst)
1710 {
1711 SVGA3dShaderInstToken inst;
1712 struct src_register texcoord;
1713 struct src_register sampler;
1714 SVGA3dShaderDestToken tmp;
1715
1716 inst.value = 0;
1717
1718 switch (insn->Instruction.Opcode) {
1719 case TGSI_OPCODE_TEX:
1720 inst.op = SVGA3DOP_TEX;
1721 break;
1722 case TGSI_OPCODE_TXP:
1723 inst.op = SVGA3DOP_TEX;
1724 inst.control = SVGA3DOPCONT_PROJECT;
1725 break;
1726 case TGSI_OPCODE_TXB:
1727 inst.op = SVGA3DOP_TEX;
1728 inst.control = SVGA3DOPCONT_BIAS;
1729 break;
1730 case TGSI_OPCODE_TXL:
1731 inst.op = SVGA3DOP_TEXLDL;
1732 break;
1733 default:
1734 assert(0);
1735 return FALSE;
1736 }
1737
1738 texcoord = translate_src_register( emit, &insn->Src[0] );
1739 sampler = translate_src_register( emit, &insn->Src[1] );
1740
1741 if (emit->key.fkey.tex[sampler.base.num].unnormalized ||
1742 emit->dynamic_branching_level > 0)
1743 tmp = get_temp( emit );
1744
1745 /* Can't do mipmapping inside dynamic branch constructs. Force LOD
1746 * zero in that case.
1747 */
1748 if (emit->dynamic_branching_level > 0 &&
1749 inst.op == SVGA3DOP_TEX &&
1750 SVGA3dShaderGetRegType(texcoord.base.value) == SVGA3DREG_TEMP) {
1751 struct src_register zero = get_zero_immediate( emit );
1752
1753 /* MOV tmp, texcoord */
1754 if (!submit_op1( emit,
1755 inst_token( SVGA3DOP_MOV ),
1756 tmp,
1757 texcoord ))
1758 return FALSE;
1759
1760 /* MOV tmp.w, zero */
1761 if (!submit_op1( emit,
1762 inst_token( SVGA3DOP_MOV ),
1763 writemask( tmp, TGSI_WRITEMASK_W ),
1764 scalar( zero, TGSI_SWIZZLE_X )))
1765 return FALSE;
1766
1767 texcoord = src( tmp );
1768 inst.op = SVGA3DOP_TEXLDL;
1769 }
1770
1771 /* Explicit normalization of texcoords:
1772 */
1773 if (emit->key.fkey.tex[sampler.base.num].unnormalized) {
1774 struct src_register wh = get_tex_dimensions( emit, sampler.base.num );
1775
1776 /* MUL tmp, SRC0, WH */
1777 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
1778 tmp, texcoord, wh ))
1779 return FALSE;
1780
1781 texcoord = src( tmp );
1782 }
1783
1784 return submit_op2( emit, inst, dst, texcoord, sampler );
1785 }
1786
1787
1788 /**
1789 * Translate/emit 4-operand (coord, ddx, ddy, sampler) texture instructions.
1790 */
1791 static boolean
1792 emit_tex4(struct svga_shader_emitter *emit,
1793 const struct tgsi_full_instruction *insn,
1794 SVGA3dShaderDestToken dst )
1795 {
1796 SVGA3dShaderInstToken inst;
1797 struct src_register texcoord;
1798 struct src_register ddx;
1799 struct src_register ddy;
1800 struct src_register sampler;
1801
1802 texcoord = translate_src_register( emit, &insn->Src[0] );
1803 ddx = translate_src_register( emit, &insn->Src[1] );
1804 ddy = translate_src_register( emit, &insn->Src[2] );
1805 sampler = translate_src_register( emit, &insn->Src[3] );
1806
1807 inst.value = 0;
1808
1809 switch (insn->Instruction.Opcode) {
1810 case TGSI_OPCODE_TXD:
1811 inst.op = SVGA3DOP_TEXLDD; /* 4 args! */
1812 break;
1813 default:
1814 assert(0);
1815 return FALSE;
1816 }
1817
1818 return submit_op4( emit, inst, dst, texcoord, sampler, ddx, ddy );
1819 }
1820
1821
1822 /**
1823 * Emit texture swizzle code. We do this here since SVGA samplers don't
1824 * directly support swizzles.
1825 */
1826 static boolean
1827 emit_tex_swizzle(struct svga_shader_emitter *emit,
1828 SVGA3dShaderDestToken dst,
1829 struct src_register src,
1830 unsigned swizzle_x,
1831 unsigned swizzle_y,
1832 unsigned swizzle_z,
1833 unsigned swizzle_w)
1834 {
1835 const unsigned swizzleIn[4] = {swizzle_x, swizzle_y, swizzle_z, swizzle_w};
1836 unsigned srcSwizzle[4];
1837 unsigned srcWritemask = 0x0, zeroWritemask = 0x0, oneWritemask = 0x0;
1838 int i;
1839
1840 /* build writemasks and srcSwizzle terms */
1841 for (i = 0; i < 4; i++) {
1842 if (swizzleIn[i] == PIPE_SWIZZLE_ZERO) {
1843 srcSwizzle[i] = TGSI_SWIZZLE_X + i;
1844 zeroWritemask |= (1 << i);
1845 }
1846 else if (swizzleIn[i] == PIPE_SWIZZLE_ONE) {
1847 srcSwizzle[i] = TGSI_SWIZZLE_X + i;
1848 oneWritemask |= (1 << i);
1849 }
1850 else {
1851 srcSwizzle[i] = swizzleIn[i];
1852 srcWritemask |= (1 << i);
1853 }
1854 }
1855
1856 /* write x/y/z/w comps */
1857 if (dst.mask & srcWritemask) {
1858 if (!submit_op1(emit,
1859 inst_token(SVGA3DOP_MOV),
1860 writemask(dst, srcWritemask),
1861 swizzle(src,
1862 srcSwizzle[0],
1863 srcSwizzle[1],
1864 srcSwizzle[2],
1865 srcSwizzle[3])))
1866 return FALSE;
1867 }
1868
1869 /* write 0 comps */
1870 if (dst.mask & zeroWritemask) {
1871 if (!submit_op1(emit,
1872 inst_token(SVGA3DOP_MOV),
1873 writemask(dst, zeroWritemask),
1874 scalar(get_zero_immediate(emit), TGSI_SWIZZLE_X)))
1875 return FALSE;
1876 }
1877
1878 /* write 1 comps */
1879 if (dst.mask & oneWritemask) {
1880 if (!submit_op1(emit,
1881 inst_token(SVGA3DOP_MOV),
1882 writemask(dst, oneWritemask),
1883 scalar(get_zero_immediate(emit), TGSI_SWIZZLE_W)))
1884 return FALSE;
1885 }
1886
1887 return TRUE;
1888 }
1889
1890
1891 /**
1892 * Translate/emit a TGSI texture sample instruction.
1893 */
1894 static boolean
1895 emit_tex(struct svga_shader_emitter *emit,
1896 const struct tgsi_full_instruction *insn)
1897 {
1898 SVGA3dShaderDestToken dst =
1899 translate_dst_register( emit, insn, 0 );
1900 struct src_register src0 =
1901 translate_src_register( emit, &insn->Src[0] );
1902 struct src_register src1 =
1903 translate_src_register( emit, &insn->Src[1] );
1904
1905 SVGA3dShaderDestToken tex_result;
1906 const unsigned unit = src1.base.num;
1907
1908 /* check for shadow samplers */
1909 boolean compare = (emit->key.fkey.tex[unit].compare_mode ==
1910 PIPE_TEX_COMPARE_R_TO_TEXTURE);
1911
1912 /* texture swizzle */
1913 boolean swizzle = (emit->key.fkey.tex[unit].swizzle_r != PIPE_SWIZZLE_RED ||
1914 emit->key.fkey.tex[unit].swizzle_g != PIPE_SWIZZLE_GREEN ||
1915 emit->key.fkey.tex[unit].swizzle_b != PIPE_SWIZZLE_BLUE ||
1916 emit->key.fkey.tex[unit].swizzle_a != PIPE_SWIZZLE_ALPHA);
1917
1918 boolean saturate = insn->Instruction.Saturate != TGSI_SAT_NONE;
1919
1920 /* If doing compare processing or tex swizzle or saturation, we need to put
1921 * the fetched color into a temporary so it can be used as a source later on.
1922 */
1923 if (compare || swizzle || saturate) {
1924 tex_result = get_temp( emit );
1925 }
1926 else {
1927 tex_result = dst;
1928 }
1929
1930 switch(insn->Instruction.Opcode) {
1931 case TGSI_OPCODE_TEX:
1932 case TGSI_OPCODE_TXB:
1933 case TGSI_OPCODE_TXP:
1934 case TGSI_OPCODE_TXL:
1935 if (!emit_tex2( emit, insn, tex_result ))
1936 return FALSE;
1937 break;
1938 case TGSI_OPCODE_TXD:
1939 if (!emit_tex4( emit, insn, tex_result ))
1940 return FALSE;
1941 break;
1942 default:
1943 assert(0);
1944 }
1945
1946 if (compare) {
1947 SVGA3dShaderDestToken dst2;
1948
1949 if (swizzle || saturate)
1950 dst2 = tex_result;
1951 else
1952 dst2 = dst;
1953
1954 if (dst.mask & TGSI_WRITEMASK_XYZ) {
1955 SVGA3dShaderDestToken src0_zdivw = get_temp( emit );
1956 /* When sampling a depth texture, the result of the comparison is in
1957 * the Y component.
1958 */
1959 struct src_register tex_src_x = scalar(src(tex_result), TGSI_SWIZZLE_Y);
1960 struct src_register r_coord;
1961
1962 if (insn->Instruction.Opcode == TGSI_OPCODE_TXP) {
1963 /* Divide texcoord R by Q */
1964 if (!submit_op1( emit, inst_token( SVGA3DOP_RCP ),
1965 writemask(src0_zdivw, TGSI_WRITEMASK_X),
1966 scalar(src0, TGSI_SWIZZLE_W) ))
1967 return FALSE;
1968
1969 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
1970 writemask(src0_zdivw, TGSI_WRITEMASK_X),
1971 scalar(src0, TGSI_SWIZZLE_Z),
1972 scalar(src(src0_zdivw), TGSI_SWIZZLE_X) ))
1973 return FALSE;
1974
1975 r_coord = scalar(src(src0_zdivw), TGSI_SWIZZLE_X);
1976 }
1977 else {
1978 r_coord = scalar(src0, TGSI_SWIZZLE_Z);
1979 }
1980
1981 /* Compare texture sample value against R component of texcoord */
1982 if (!emit_select(emit,
1983 emit->key.fkey.tex[unit].compare_func,
1984 writemask( dst2, TGSI_WRITEMASK_XYZ ),
1985 r_coord,
1986 tex_src_x))
1987 return FALSE;
1988 }
1989
1990 if (dst.mask & TGSI_WRITEMASK_W) {
1991 struct src_register one =
1992 scalar( get_zero_immediate( emit ), TGSI_SWIZZLE_W );
1993
1994 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1995 writemask( dst2, TGSI_WRITEMASK_W ),
1996 one ))
1997 return FALSE;
1998 }
1999 }
2000
2001 if (saturate && !swizzle) {
2002 /* MOV_SAT real_dst, dst */
2003 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src(tex_result) ))
2004 return FALSE;
2005 }
2006 else if (swizzle) {
2007 /* swizzle from tex_result to dst (handles saturation too, if any) */
2008 emit_tex_swizzle(emit,
2009 dst, src(tex_result),
2010 emit->key.fkey.tex[unit].swizzle_r,
2011 emit->key.fkey.tex[unit].swizzle_g,
2012 emit->key.fkey.tex[unit].swizzle_b,
2013 emit->key.fkey.tex[unit].swizzle_a);
2014 }
2015
2016 return TRUE;
2017 }
2018
2019
2020 static boolean
2021 emit_bgnloop(struct svga_shader_emitter *emit,
2022 const struct tgsi_full_instruction *insn)
2023 {
2024 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_LOOP );
2025 struct src_register loop_reg = src_register( SVGA3DREG_LOOP, 0 );
2026 struct src_register const_int = get_loop_const( emit );
2027
2028 emit->dynamic_branching_level++;
2029
2030 return (emit_instruction( emit, inst ) &&
2031 emit_src( emit, loop_reg ) &&
2032 emit_src( emit, const_int ) );
2033 }
2034
2035
2036 static boolean
2037 emit_endloop(struct svga_shader_emitter *emit,
2038 const struct tgsi_full_instruction *insn)
2039 {
2040 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_ENDLOOP );
2041
2042 emit->dynamic_branching_level--;
2043
2044 return emit_instruction( emit, inst );
2045 }
2046
2047
2048 /**
2049 * Translate/emit TGSI BREAK (out of loop) instruction.
2050 */
2051 static boolean
2052 emit_brk(struct svga_shader_emitter *emit,
2053 const struct tgsi_full_instruction *insn)
2054 {
2055 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_BREAK );
2056 return emit_instruction( emit, inst );
2057 }
2058
2059
2060 /**
2061 * Emit simple instruction which operates on one scalar value (not
2062 * a vector). Ex: LG2, RCP, RSQ.
2063 */
2064 static boolean
2065 emit_scalar_op1(struct svga_shader_emitter *emit,
2066 unsigned opcode,
2067 const struct tgsi_full_instruction *insn)
2068 {
2069 SVGA3dShaderInstToken inst;
2070 SVGA3dShaderDestToken dst;
2071 struct src_register src;
2072
2073 inst = inst_token( opcode );
2074 dst = translate_dst_register( emit, insn, 0 );
2075 src = translate_src_register( emit, &insn->Src[0] );
2076 src = scalar( src, TGSI_SWIZZLE_X );
2077
2078 return submit_op1( emit, inst, dst, src );
2079 }
2080
2081
2082 /**
2083 * Translate/emit a simple instruction (one which has no special-case
2084 * code) such as ADD, MUL, MIN, MAX.
2085 */
2086 static boolean
2087 emit_simple_instruction(struct svga_shader_emitter *emit,
2088 unsigned opcode,
2089 const struct tgsi_full_instruction *insn)
2090 {
2091 const struct tgsi_full_src_register *src = insn->Src;
2092 SVGA3dShaderInstToken inst;
2093 SVGA3dShaderDestToken dst;
2094
2095 inst = inst_token( opcode );
2096 dst = translate_dst_register( emit, insn, 0 );
2097
2098 switch (insn->Instruction.NumSrcRegs) {
2099 case 0:
2100 return submit_op0( emit, inst, dst );
2101 case 1:
2102 return submit_op1( emit, inst, dst,
2103 translate_src_register( emit, &src[0] ));
2104 case 2:
2105 return submit_op2( emit, inst, dst,
2106 translate_src_register( emit, &src[0] ),
2107 translate_src_register( emit, &src[1] ) );
2108 case 3:
2109 return submit_op3( emit, inst, dst,
2110 translate_src_register( emit, &src[0] ),
2111 translate_src_register( emit, &src[1] ),
2112 translate_src_register( emit, &src[2] ) );
2113 default:
2114 assert(0);
2115 return FALSE;
2116 }
2117 }
2118
2119
2120 /**
2121 * Translate/emit TGSI DDX, DDY instructions.
2122 */
2123 static boolean
2124 emit_deriv(struct svga_shader_emitter *emit,
2125 const struct tgsi_full_instruction *insn )
2126 {
2127 if (emit->dynamic_branching_level > 0 &&
2128 insn->Src[0].Register.File == TGSI_FILE_TEMPORARY)
2129 {
2130 struct src_register zero = get_zero_immediate( emit );
2131 SVGA3dShaderDestToken dst =
2132 translate_dst_register( emit, insn, 0 );
2133
2134 /* Deriv opcodes not valid inside dynamic branching, workaround
2135 * by zeroing out the destination.
2136 */
2137 if (!submit_op1(emit,
2138 inst_token( SVGA3DOP_MOV ),
2139 dst,
2140 scalar(zero, TGSI_SWIZZLE_X)))
2141 return FALSE;
2142
2143 return TRUE;
2144 }
2145 else {
2146 unsigned opcode;
2147 const struct tgsi_full_src_register *reg = &insn->Src[0];
2148 SVGA3dShaderInstToken inst;
2149 SVGA3dShaderDestToken dst;
2150 struct src_register src0;
2151
2152 switch (insn->Instruction.Opcode) {
2153 case TGSI_OPCODE_DDX:
2154 opcode = SVGA3DOP_DSX;
2155 break;
2156 case TGSI_OPCODE_DDY:
2157 opcode = SVGA3DOP_DSY;
2158 break;
2159 default:
2160 return FALSE;
2161 }
2162
2163 inst = inst_token( opcode );
2164 dst = translate_dst_register( emit, insn, 0 );
2165 src0 = translate_src_register( emit, reg );
2166
2167 /* We cannot use negate or abs on source to dsx/dsy instruction.
2168 */
2169 if (reg->Register.Absolute ||
2170 reg->Register.Negate) {
2171 SVGA3dShaderDestToken temp = get_temp( emit );
2172
2173 if (!emit_repl( emit, temp, &src0 ))
2174 return FALSE;
2175 }
2176
2177 return submit_op1( emit, inst, dst, src0 );
2178 }
2179 }
2180
2181
2182 /**
2183 * Translate/emit ARL (Address Register Load) instruction. Used to
2184 * move a value into the special 'address' register. Used to implement
2185 * indirect/variable indexing into arrays.
2186 */
2187 static boolean
2188 emit_arl(struct svga_shader_emitter *emit,
2189 const struct tgsi_full_instruction *insn)
2190 {
2191 ++emit->current_arl;
2192 if (emit->unit == PIPE_SHADER_FRAGMENT) {
2193 /* MOVA not present in pixel shader instruction set.
2194 * Ignore this instruction altogether since it is
2195 * only used for loop counters -- and for that
2196 * we reference aL directly.
2197 */
2198 return TRUE;
2199 }
2200 if (svga_arl_needs_adjustment( emit )) {
2201 return emit_fake_arl( emit, insn );
2202 } else {
2203 /* no need to adjust, just emit straight arl */
2204 return emit_simple_instruction(emit, SVGA3DOP_MOVA, insn);
2205 }
2206 }
2207
2208
2209 static boolean
2210 emit_pow(struct svga_shader_emitter *emit,
2211 const struct tgsi_full_instruction *insn)
2212 {
2213 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2214 struct src_register src0 = translate_src_register(
2215 emit, &insn->Src[0] );
2216 struct src_register src1 = translate_src_register(
2217 emit, &insn->Src[1] );
2218 boolean need_tmp = FALSE;
2219
2220 /* POW can only output to a temporary */
2221 if (insn->Dst[0].Register.File != TGSI_FILE_TEMPORARY)
2222 need_tmp = TRUE;
2223
2224 /* POW src1 must not be the same register as dst */
2225 if (alias_src_dst( src1, dst ))
2226 need_tmp = TRUE;
2227
2228 /* it's a scalar op */
2229 src0 = scalar( src0, TGSI_SWIZZLE_X );
2230 src1 = scalar( src1, TGSI_SWIZZLE_X );
2231
2232 if (need_tmp) {
2233 SVGA3dShaderDestToken tmp =
2234 writemask(get_temp( emit ), TGSI_WRITEMASK_X );
2235
2236 if (!submit_op2(emit, inst_token( SVGA3DOP_POW ), tmp, src0, src1))
2237 return FALSE;
2238
2239 return submit_op1(emit, inst_token( SVGA3DOP_MOV ),
2240 dst, scalar(src(tmp), 0) );
2241 }
2242 else {
2243 return submit_op2(emit, inst_token( SVGA3DOP_POW ), dst, src0, src1);
2244 }
2245 }
2246
2247
2248 /**
2249 * Translate/emit TGSI XPD (vector cross product) instruction.
2250 */
2251 static boolean
2252 emit_xpd(struct svga_shader_emitter *emit,
2253 const struct tgsi_full_instruction *insn)
2254 {
2255 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2256 const struct src_register src0 = translate_src_register(
2257 emit, &insn->Src[0] );
2258 const struct src_register src1 = translate_src_register(
2259 emit, &insn->Src[1] );
2260 boolean need_dst_tmp = FALSE;
2261
2262 /* XPD can only output to a temporary */
2263 if (SVGA3dShaderGetRegType(dst.value) != SVGA3DREG_TEMP)
2264 need_dst_tmp = TRUE;
2265
2266 /* The dst reg must not be the same as src0 or src1*/
2267 if (alias_src_dst(src0, dst) ||
2268 alias_src_dst(src1, dst))
2269 need_dst_tmp = TRUE;
2270
2271 if (need_dst_tmp) {
2272 SVGA3dShaderDestToken tmp = get_temp( emit );
2273
2274 /* Obey DX9 restrictions on mask:
2275 */
2276 tmp.mask = dst.mask & TGSI_WRITEMASK_XYZ;
2277
2278 if (!submit_op2(emit, inst_token( SVGA3DOP_CRS ), tmp, src0, src1))
2279 return FALSE;
2280
2281 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), dst, src( tmp )))
2282 return FALSE;
2283 }
2284 else {
2285 if (!submit_op2(emit, inst_token( SVGA3DOP_CRS ), dst, src0, src1))
2286 return FALSE;
2287 }
2288
2289 /* Need to emit 1.0 to dst.w?
2290 */
2291 if (dst.mask & TGSI_WRITEMASK_W) {
2292 struct src_register zero = get_zero_immediate( emit );
2293
2294 if (!submit_op1(emit,
2295 inst_token( SVGA3DOP_MOV ),
2296 writemask(dst, TGSI_WRITEMASK_W),
2297 zero))
2298 return FALSE;
2299 }
2300
2301 return TRUE;
2302 }
2303
2304
2305 /**
2306 * Translate/emit LRP (Linear Interpolation) instruction.
2307 */
2308 static boolean
2309 emit_lrp(struct svga_shader_emitter *emit,
2310 const struct tgsi_full_instruction *insn)
2311 {
2312 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2313 const struct src_register src0 = translate_src_register(
2314 emit, &insn->Src[0] );
2315 const struct src_register src1 = translate_src_register(
2316 emit, &insn->Src[1] );
2317 const struct src_register src2 = translate_src_register(
2318 emit, &insn->Src[2] );
2319
2320 return submit_lrp(emit, dst, src0, src1, src2);
2321 }
2322
2323 /**
2324 * Translate/emit DST (Distance function) instruction.
2325 */
2326 static boolean
2327 emit_dst_insn(struct svga_shader_emitter *emit,
2328 const struct tgsi_full_instruction *insn)
2329 {
2330 if (emit->unit == PIPE_SHADER_VERTEX) {
2331 /* SVGA/DX9 has a DST instruction, but only for vertex shaders:
2332 */
2333 return emit_simple_instruction(emit, SVGA3DOP_DST, insn);
2334 }
2335 else {
2336 /* result[0] = 1 * 1;
2337 * result[1] = a[1] * b[1];
2338 * result[2] = a[2] * 1;
2339 * result[3] = 1 * b[3];
2340 */
2341 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2342 SVGA3dShaderDestToken tmp;
2343 const struct src_register src0 = translate_src_register(
2344 emit, &insn->Src[0] );
2345 const struct src_register src1 = translate_src_register(
2346 emit, &insn->Src[1] );
2347 struct src_register zero = get_zero_immediate( emit );
2348 boolean need_tmp = FALSE;
2349
2350 if (SVGA3dShaderGetRegType(dst.value) != SVGA3DREG_TEMP ||
2351 alias_src_dst(src0, dst) ||
2352 alias_src_dst(src1, dst))
2353 need_tmp = TRUE;
2354
2355 if (need_tmp) {
2356 tmp = get_temp( emit );
2357 }
2358 else {
2359 tmp = dst;
2360 }
2361
2362 /* tmp.xw = 1.0
2363 */
2364 if (tmp.mask & TGSI_WRITEMASK_XW) {
2365 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2366 writemask(tmp, TGSI_WRITEMASK_XW ),
2367 scalar( zero, 3 )))
2368 return FALSE;
2369 }
2370
2371 /* tmp.yz = src0
2372 */
2373 if (tmp.mask & TGSI_WRITEMASK_YZ) {
2374 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2375 writemask(tmp, TGSI_WRITEMASK_YZ ),
2376 src0))
2377 return FALSE;
2378 }
2379
2380 /* tmp.yw = tmp * src1
2381 */
2382 if (tmp.mask & TGSI_WRITEMASK_YW) {
2383 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
2384 writemask(tmp, TGSI_WRITEMASK_YW ),
2385 src(tmp),
2386 src1))
2387 return FALSE;
2388 }
2389
2390 /* dst = tmp
2391 */
2392 if (need_tmp) {
2393 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2394 dst,
2395 src(tmp)))
2396 return FALSE;
2397 }
2398 }
2399
2400 return TRUE;
2401 }
2402
2403
2404 static boolean
2405 emit_exp(struct svga_shader_emitter *emit,
2406 const struct tgsi_full_instruction *insn)
2407 {
2408 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2409 struct src_register src0 =
2410 translate_src_register( emit, &insn->Src[0] );
2411 struct src_register zero = get_zero_immediate( emit );
2412 SVGA3dShaderDestToken fraction;
2413
2414 if (dst.mask & TGSI_WRITEMASK_Y)
2415 fraction = dst;
2416 else if (dst.mask & TGSI_WRITEMASK_X)
2417 fraction = get_temp( emit );
2418 else
2419 fraction.value = 0;
2420
2421 /* If y is being written, fill it with src0 - floor(src0).
2422 */
2423 if (dst.mask & TGSI_WRITEMASK_XY) {
2424 if (!submit_op1( emit, inst_token( SVGA3DOP_FRC ),
2425 writemask( fraction, TGSI_WRITEMASK_Y ),
2426 src0 ))
2427 return FALSE;
2428 }
2429
2430 /* If x is being written, fill it with 2 ^ floor(src0).
2431 */
2432 if (dst.mask & TGSI_WRITEMASK_X) {
2433 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ),
2434 writemask( dst, TGSI_WRITEMASK_X ),
2435 src0,
2436 scalar( negate( src( fraction ) ), TGSI_SWIZZLE_Y ) ) )
2437 return FALSE;
2438
2439 if (!submit_op1( emit, inst_token( SVGA3DOP_EXP ),
2440 writemask( dst, TGSI_WRITEMASK_X ),
2441 scalar( src( dst ), TGSI_SWIZZLE_X ) ) )
2442 return FALSE;
2443
2444 if (!(dst.mask & TGSI_WRITEMASK_Y))
2445 release_temp( emit, fraction );
2446 }
2447
2448 /* If z is being written, fill it with 2 ^ src0 (partial precision).
2449 */
2450 if (dst.mask & TGSI_WRITEMASK_Z) {
2451 if (!submit_op1( emit, inst_token( SVGA3DOP_EXPP ),
2452 writemask( dst, TGSI_WRITEMASK_Z ),
2453 src0 ) )
2454 return FALSE;
2455 }
2456
2457 /* If w is being written, fill it with one.
2458 */
2459 if (dst.mask & TGSI_WRITEMASK_W) {
2460 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2461 writemask(dst, TGSI_WRITEMASK_W),
2462 scalar( zero, TGSI_SWIZZLE_W ) ))
2463 return FALSE;
2464 }
2465
2466 return TRUE;
2467 }
2468
2469
2470 /**
2471 * Translate/emit LIT (Lighting helper) instruction.
2472 */
2473 static boolean
2474 emit_lit(struct svga_shader_emitter *emit,
2475 const struct tgsi_full_instruction *insn)
2476 {
2477 if (emit->unit == PIPE_SHADER_VERTEX) {
2478 /* SVGA/DX9 has a LIT instruction, but only for vertex shaders:
2479 */
2480 return emit_simple_instruction(emit, SVGA3DOP_LIT, insn);
2481 }
2482 else {
2483 /* D3D vs. GL semantics can be fairly easily accomodated by
2484 * variations on this sequence.
2485 *
2486 * GL:
2487 * tmp.y = src.x
2488 * tmp.z = pow(src.y,src.w)
2489 * p0 = src0.xxxx > 0
2490 * result = zero.wxxw
2491 * (p0) result.yz = tmp
2492 *
2493 * D3D:
2494 * tmp.y = src.x
2495 * tmp.z = pow(src.y,src.w)
2496 * p0 = src0.xxyy > 0
2497 * result = zero.wxxw
2498 * (p0) result.yz = tmp
2499 *
2500 * Will implement the GL version for now.
2501 */
2502 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2503 SVGA3dShaderDestToken tmp = get_temp( emit );
2504 const struct src_register src0 = translate_src_register(
2505 emit, &insn->Src[0] );
2506 struct src_register zero = get_zero_immediate( emit );
2507
2508 /* tmp = pow(src.y, src.w)
2509 */
2510 if (dst.mask & TGSI_WRITEMASK_Z) {
2511 if (!submit_op2(emit, inst_token( SVGA3DOP_POW ),
2512 tmp,
2513 scalar(src0, 1),
2514 scalar(src0, 3)))
2515 return FALSE;
2516 }
2517
2518 /* tmp.y = src.x
2519 */
2520 if (dst.mask & TGSI_WRITEMASK_Y) {
2521 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2522 writemask(tmp, TGSI_WRITEMASK_Y ),
2523 scalar(src0, 0)))
2524 return FALSE;
2525 }
2526
2527 /* Can't quite do this with emit conditional due to the extra
2528 * writemask on the predicated mov:
2529 */
2530 {
2531 SVGA3dShaderDestToken pred_reg = dst_register( SVGA3DREG_PREDICATE, 0 );
2532 SVGA3dShaderInstToken setp_token, mov_token;
2533 struct src_register predsrc;
2534
2535 setp_token = inst_token( SVGA3DOP_SETP );
2536 mov_token = inst_token( SVGA3DOP_MOV );
2537
2538 setp_token.control = SVGA3DOPCOMP_GT;
2539
2540 /* D3D vs GL semantics:
2541 */
2542 if (0)
2543 predsrc = swizzle(src0, 0, 0, 1, 1); /* D3D */
2544 else
2545 predsrc = swizzle(src0, 0, 0, 0, 0); /* GL */
2546
2547 /* SETP src0.xxyy, GT, {0}.x */
2548 if (!submit_op2( emit, setp_token, pred_reg,
2549 predsrc,
2550 swizzle(zero, 0, 0, 0, 0) ))
2551 return FALSE;
2552
2553 /* MOV dst, fail */
2554 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst,
2555 swizzle(zero, 3, 0, 0, 3 )))
2556 return FALSE;
2557
2558 /* MOV dst.yz, tmp (predicated)
2559 *
2560 * Note that the predicate reg (and possible modifiers) is passed
2561 * as the first source argument.
2562 */
2563 if (dst.mask & TGSI_WRITEMASK_YZ) {
2564 mov_token.predicated = 1;
2565 if (!submit_op2( emit, mov_token,
2566 writemask(dst, TGSI_WRITEMASK_YZ),
2567 src( pred_reg ), src( tmp ) ))
2568 return FALSE;
2569 }
2570 }
2571 }
2572
2573 return TRUE;
2574 }
2575
2576
2577 static boolean
2578 emit_ex2(struct svga_shader_emitter *emit,
2579 const struct tgsi_full_instruction *insn)
2580 {
2581 SVGA3dShaderInstToken inst;
2582 SVGA3dShaderDestToken dst;
2583 struct src_register src0;
2584
2585 inst = inst_token( SVGA3DOP_EXP );
2586 dst = translate_dst_register( emit, insn, 0 );
2587 src0 = translate_src_register( emit, &insn->Src[0] );
2588 src0 = scalar( src0, TGSI_SWIZZLE_X );
2589
2590 if (dst.mask != TGSI_WRITEMASK_XYZW) {
2591 SVGA3dShaderDestToken tmp = get_temp( emit );
2592
2593 if (!submit_op1( emit, inst, tmp, src0 ))
2594 return FALSE;
2595
2596 return submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2597 dst,
2598 scalar( src( tmp ), TGSI_SWIZZLE_X ) );
2599 }
2600
2601 return submit_op1( emit, inst, dst, src0 );
2602 }
2603
2604
2605 static boolean
2606 emit_log(struct svga_shader_emitter *emit,
2607 const struct tgsi_full_instruction *insn)
2608 {
2609 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2610 struct src_register src0 =
2611 translate_src_register( emit, &insn->Src[0] );
2612 struct src_register zero = get_zero_immediate( emit );
2613 SVGA3dShaderDestToken abs_tmp;
2614 struct src_register abs_src0;
2615 SVGA3dShaderDestToken log2_abs;
2616
2617 abs_tmp.value = 0;
2618
2619 if (dst.mask & TGSI_WRITEMASK_Z)
2620 log2_abs = dst;
2621 else if (dst.mask & TGSI_WRITEMASK_XY)
2622 log2_abs = get_temp( emit );
2623 else
2624 log2_abs.value = 0;
2625
2626 /* If z is being written, fill it with log2( abs( src0 ) ).
2627 */
2628 if (dst.mask & TGSI_WRITEMASK_XYZ) {
2629 if (!src0.base.srcMod || src0.base.srcMod == SVGA3DSRCMOD_ABS)
2630 abs_src0 = src0;
2631 else {
2632 abs_tmp = get_temp( emit );
2633
2634 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2635 abs_tmp,
2636 src0 ) )
2637 return FALSE;
2638
2639 abs_src0 = src( abs_tmp );
2640 }
2641
2642 abs_src0 = absolute( scalar( abs_src0, TGSI_SWIZZLE_X ) );
2643
2644 if (!submit_op1( emit, inst_token( SVGA3DOP_LOG ),
2645 writemask( log2_abs, TGSI_WRITEMASK_Z ),
2646 abs_src0 ) )
2647 return FALSE;
2648 }
2649
2650 if (dst.mask & TGSI_WRITEMASK_XY) {
2651 SVGA3dShaderDestToken floor_log2;
2652
2653 if (dst.mask & TGSI_WRITEMASK_X)
2654 floor_log2 = dst;
2655 else
2656 floor_log2 = get_temp( emit );
2657
2658 /* If x is being written, fill it with floor( log2( abs( src0 ) ) ).
2659 */
2660 if (!submit_op1( emit, inst_token( SVGA3DOP_FRC ),
2661 writemask( floor_log2, TGSI_WRITEMASK_X ),
2662 scalar( src( log2_abs ), TGSI_SWIZZLE_Z ) ) )
2663 return FALSE;
2664
2665 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ),
2666 writemask( floor_log2, TGSI_WRITEMASK_X ),
2667 scalar( src( log2_abs ), TGSI_SWIZZLE_Z ),
2668 negate( src( floor_log2 ) ) ) )
2669 return FALSE;
2670
2671 /* If y is being written, fill it with
2672 * abs ( src0 ) / ( 2 ^ floor( log2( abs( src0 ) ) ) ).
2673 */
2674 if (dst.mask & TGSI_WRITEMASK_Y) {
2675 if (!submit_op1( emit, inst_token( SVGA3DOP_EXP ),
2676 writemask( dst, TGSI_WRITEMASK_Y ),
2677 negate( scalar( src( floor_log2 ),
2678 TGSI_SWIZZLE_X ) ) ) )
2679 return FALSE;
2680
2681 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
2682 writemask( dst, TGSI_WRITEMASK_Y ),
2683 src( dst ),
2684 abs_src0 ) )
2685 return FALSE;
2686 }
2687
2688 if (!(dst.mask & TGSI_WRITEMASK_X))
2689 release_temp( emit, floor_log2 );
2690
2691 if (!(dst.mask & TGSI_WRITEMASK_Z))
2692 release_temp( emit, log2_abs );
2693 }
2694
2695 if (dst.mask & TGSI_WRITEMASK_XYZ && src0.base.srcMod &&
2696 src0.base.srcMod != SVGA3DSRCMOD_ABS)
2697 release_temp( emit, abs_tmp );
2698
2699 /* If w is being written, fill it with one.
2700 */
2701 if (dst.mask & TGSI_WRITEMASK_W) {
2702 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2703 writemask(dst, TGSI_WRITEMASK_W),
2704 scalar( zero, TGSI_SWIZZLE_W ) ))
2705 return FALSE;
2706 }
2707
2708 return TRUE;
2709 }
2710
2711
2712 /**
2713 * Translate TGSI TRUNC or ROUND instruction.
2714 * We need to truncate toward zero. Ex: trunc(-1.9) = -1
2715 * Different approaches are needed for VS versus PS.
2716 */
2717 static boolean
2718 emit_trunc_round(struct svga_shader_emitter *emit,
2719 const struct tgsi_full_instruction *insn,
2720 boolean round)
2721 {
2722 SVGA3dShaderDestToken dst = translate_dst_register(emit, insn, 0);
2723 const struct src_register src0 =
2724 translate_src_register(emit, &insn->Src[0] );
2725 SVGA3dShaderDestToken t1 = get_temp(emit);
2726
2727 if (round) {
2728 SVGA3dShaderDestToken t0 = get_temp(emit);
2729 struct src_register half = get_half_immediate(emit);
2730
2731 /* t0 = abs(src0) + 0.5 */
2732 if (!submit_op2(emit, inst_token(SVGA3DOP_ADD), t0,
2733 absolute(src0), half))
2734 return FALSE;
2735
2736 /* t1 = fract(t0) */
2737 if (!submit_op1(emit, inst_token(SVGA3DOP_FRC), t1, src(t0)))
2738 return FALSE;
2739
2740 /* t1 = t0 - t1 */
2741 if (!submit_op2(emit, inst_token(SVGA3DOP_ADD), t1, src(t0),
2742 negate(src(t1))))
2743 return FALSE;
2744 }
2745 else {
2746 /* trunc */
2747
2748 /* t1 = fract(abs(src0)) */
2749 if (!submit_op1(emit, inst_token(SVGA3DOP_FRC), t1, absolute(src0)))
2750 return FALSE;
2751
2752 /* t1 = abs(src0) - t1 */
2753 if (!submit_op2(emit, inst_token(SVGA3DOP_ADD), t1, absolute(src0),
2754 negate(src(t1))))
2755 return FALSE;
2756 }
2757
2758 /*
2759 * Now we need to multiply t1 by the sign of the original value.
2760 */
2761 if (emit->unit == PIPE_SHADER_VERTEX) {
2762 /* For VS: use SGN instruction */
2763 /* Need two extra/dummy registers: */
2764 SVGA3dShaderDestToken t2 = get_temp(emit), t3 = get_temp(emit),
2765 t4 = get_temp(emit);
2766
2767 /* t2 = sign(src0) */
2768 if (!submit_op3(emit, inst_token(SVGA3DOP_SGN), t2, src0,
2769 src(t3), src(t4)))
2770 return FALSE;
2771
2772 /* dst = t1 * t2 */
2773 if (!submit_op2(emit, inst_token(SVGA3DOP_MUL), dst, src(t1), src(t2)))
2774 return FALSE;
2775 }
2776 else {
2777 /* For FS: Use CMP instruction */
2778 return submit_op3(emit, inst_token( SVGA3DOP_CMP ), dst,
2779 src0, src(t1), negate(src(t1)));
2780 }
2781
2782 return TRUE;
2783 }
2784
2785
2786 /**
2787 * Translate/emit "begin subroutine" instruction/marker/label.
2788 */
2789 static boolean
2790 emit_bgnsub(struct svga_shader_emitter *emit,
2791 unsigned position,
2792 const struct tgsi_full_instruction *insn)
2793 {
2794 unsigned i;
2795
2796 /* Note that we've finished the main function and are now emitting
2797 * subroutines. This affects how we terminate the generated
2798 * shader.
2799 */
2800 emit->in_main_func = FALSE;
2801
2802 for (i = 0; i < emit->nr_labels; i++) {
2803 if (emit->label[i] == position) {
2804 return (emit_instruction( emit, inst_token( SVGA3DOP_RET ) ) &&
2805 emit_instruction( emit, inst_token( SVGA3DOP_LABEL ) ) &&
2806 emit_src( emit, src_register( SVGA3DREG_LABEL, i )));
2807 }
2808 }
2809
2810 assert(0);
2811 return TRUE;
2812 }
2813
2814
2815 /**
2816 * Translate/emit subroutine call instruction.
2817 */
2818 static boolean
2819 emit_call(struct svga_shader_emitter *emit,
2820 const struct tgsi_full_instruction *insn)
2821 {
2822 unsigned position = insn->Label.Label;
2823 unsigned i;
2824
2825 for (i = 0; i < emit->nr_labels; i++) {
2826 if (emit->label[i] == position)
2827 break;
2828 }
2829
2830 if (emit->nr_labels == Elements(emit->label))
2831 return FALSE;
2832
2833 if (i == emit->nr_labels) {
2834 emit->label[i] = position;
2835 emit->nr_labels++;
2836 }
2837
2838 return (emit_instruction( emit, inst_token( SVGA3DOP_CALL ) ) &&
2839 emit_src( emit, src_register( SVGA3DREG_LABEL, i )));
2840 }
2841
2842
2843 /**
2844 * Called at the end of the shader. Actually, emit special "fix-up"
2845 * code for the vertex/fragment shader.
2846 */
2847 static boolean
2848 emit_end(struct svga_shader_emitter *emit)
2849 {
2850 if (emit->unit == PIPE_SHADER_VERTEX) {
2851 return emit_vs_postamble( emit );
2852 }
2853 else {
2854 return emit_ps_postamble( emit );
2855 }
2856 }
2857
2858
2859 /**
2860 * Translate any TGSI instruction to SVGA.
2861 */
2862 static boolean
2863 svga_emit_instruction(struct svga_shader_emitter *emit,
2864 unsigned position,
2865 const struct tgsi_full_instruction *insn)
2866 {
2867 switch (insn->Instruction.Opcode) {
2868
2869 case TGSI_OPCODE_ARL:
2870 return emit_arl( emit, insn );
2871
2872 case TGSI_OPCODE_TEX:
2873 case TGSI_OPCODE_TXB:
2874 case TGSI_OPCODE_TXP:
2875 case TGSI_OPCODE_TXL:
2876 case TGSI_OPCODE_TXD:
2877 return emit_tex( emit, insn );
2878
2879 case TGSI_OPCODE_DDX:
2880 case TGSI_OPCODE_DDY:
2881 return emit_deriv( emit, insn );
2882
2883 case TGSI_OPCODE_BGNSUB:
2884 return emit_bgnsub( emit, position, insn );
2885
2886 case TGSI_OPCODE_ENDSUB:
2887 return TRUE;
2888
2889 case TGSI_OPCODE_CAL:
2890 return emit_call( emit, insn );
2891
2892 case TGSI_OPCODE_FLR:
2893 return emit_floor( emit, insn );
2894
2895 case TGSI_OPCODE_TRUNC:
2896 return emit_trunc_round( emit, insn, FALSE );
2897
2898 case TGSI_OPCODE_ROUND:
2899 return emit_trunc_round( emit, insn, TRUE );
2900
2901 case TGSI_OPCODE_CEIL:
2902 return emit_ceil( emit, insn );
2903
2904 case TGSI_OPCODE_CMP:
2905 return emit_cmp( emit, insn );
2906
2907 case TGSI_OPCODE_DIV:
2908 return emit_div( emit, insn );
2909
2910 case TGSI_OPCODE_DP2:
2911 return emit_dp2( emit, insn );
2912
2913 case TGSI_OPCODE_DPH:
2914 return emit_dph( emit, insn );
2915
2916 case TGSI_OPCODE_NRM:
2917 return emit_nrm( emit, insn );
2918
2919 case TGSI_OPCODE_COS:
2920 return emit_cos( emit, insn );
2921
2922 case TGSI_OPCODE_SIN:
2923 return emit_sin( emit, insn );
2924
2925 case TGSI_OPCODE_SCS:
2926 return emit_sincos( emit, insn );
2927
2928 case TGSI_OPCODE_END:
2929 /* TGSI always finishes the main func with an END */
2930 return emit_end( emit );
2931
2932 case TGSI_OPCODE_KILL_IF:
2933 return emit_kill_if( emit, insn );
2934
2935 /* Selection opcodes. The underlying language is fairly
2936 * non-orthogonal about these.
2937 */
2938 case TGSI_OPCODE_SEQ:
2939 return emit_select_op( emit, PIPE_FUNC_EQUAL, insn );
2940
2941 case TGSI_OPCODE_SNE:
2942 return emit_select_op( emit, PIPE_FUNC_NOTEQUAL, insn );
2943
2944 case TGSI_OPCODE_SGT:
2945 return emit_select_op( emit, PIPE_FUNC_GREATER, insn );
2946
2947 case TGSI_OPCODE_SGE:
2948 return emit_select_op( emit, PIPE_FUNC_GEQUAL, insn );
2949
2950 case TGSI_OPCODE_SLT:
2951 return emit_select_op( emit, PIPE_FUNC_LESS, insn );
2952
2953 case TGSI_OPCODE_SLE:
2954 return emit_select_op( emit, PIPE_FUNC_LEQUAL, insn );
2955
2956 case TGSI_OPCODE_SUB:
2957 return emit_sub( emit, insn );
2958
2959 case TGSI_OPCODE_POW:
2960 return emit_pow( emit, insn );
2961
2962 case TGSI_OPCODE_EX2:
2963 return emit_ex2( emit, insn );
2964
2965 case TGSI_OPCODE_EXP:
2966 return emit_exp( emit, insn );
2967
2968 case TGSI_OPCODE_LOG:
2969 return emit_log( emit, insn );
2970
2971 case TGSI_OPCODE_LG2:
2972 return emit_scalar_op1( emit, SVGA3DOP_LOG, insn );
2973
2974 case TGSI_OPCODE_RSQ:
2975 return emit_scalar_op1( emit, SVGA3DOP_RSQ, insn );
2976
2977 case TGSI_OPCODE_RCP:
2978 return emit_scalar_op1( emit, SVGA3DOP_RCP, insn );
2979
2980 case TGSI_OPCODE_CONT:
2981 /* not expected (we return PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED = 0) */
2982 return FALSE;
2983
2984 case TGSI_OPCODE_RET:
2985 /* This is a noop -- we tell mesa that we can't support RET
2986 * within a function (early return), so this will always be
2987 * followed by an ENDSUB.
2988 */
2989 return TRUE;
2990
2991 /* These aren't actually used by any of the frontends we care
2992 * about:
2993 */
2994 case TGSI_OPCODE_CLAMP:
2995 case TGSI_OPCODE_AND:
2996 case TGSI_OPCODE_OR:
2997 case TGSI_OPCODE_I2F:
2998 case TGSI_OPCODE_NOT:
2999 case TGSI_OPCODE_SHL:
3000 case TGSI_OPCODE_ISHR:
3001 case TGSI_OPCODE_XOR:
3002 return FALSE;
3003
3004 case TGSI_OPCODE_IF:
3005 return emit_if( emit, insn );
3006 case TGSI_OPCODE_ELSE:
3007 return emit_else( emit, insn );
3008 case TGSI_OPCODE_ENDIF:
3009 return emit_endif( emit, insn );
3010
3011 case TGSI_OPCODE_BGNLOOP:
3012 return emit_bgnloop( emit, insn );
3013 case TGSI_OPCODE_ENDLOOP:
3014 return emit_endloop( emit, insn );
3015 case TGSI_OPCODE_BRK:
3016 return emit_brk( emit, insn );
3017
3018 case TGSI_OPCODE_XPD:
3019 return emit_xpd( emit, insn );
3020
3021 case TGSI_OPCODE_KILL:
3022 return emit_kill( emit, insn );
3023
3024 case TGSI_OPCODE_DST:
3025 return emit_dst_insn( emit, insn );
3026
3027 case TGSI_OPCODE_LIT:
3028 return emit_lit( emit, insn );
3029
3030 case TGSI_OPCODE_LRP:
3031 return emit_lrp( emit, insn );
3032
3033 case TGSI_OPCODE_SSG:
3034 return emit_ssg( emit, insn );
3035
3036 default:
3037 {
3038 unsigned opcode = translate_opcode(insn->Instruction.Opcode);
3039
3040 if (opcode == SVGA3DOP_LAST_INST)
3041 return FALSE;
3042
3043 if (!emit_simple_instruction( emit, opcode, insn ))
3044 return FALSE;
3045 }
3046 }
3047
3048 return TRUE;
3049 }
3050
3051
3052 /**
3053 * Translate/emit a TGSI IMMEDIATE declaration.
3054 * An immediate vector is a constant that's hard-coded into the shader.
3055 */
3056 static boolean
3057 svga_emit_immediate(struct svga_shader_emitter *emit,
3058 const struct tgsi_full_immediate *imm)
3059 {
3060 static const float id[4] = {0,0,0,1};
3061 float value[4];
3062 unsigned i;
3063
3064 assert(1 <= imm->Immediate.NrTokens && imm->Immediate.NrTokens <= 5);
3065 for (i = 0; i < imm->Immediate.NrTokens - 1; i++) {
3066 float f = imm->u[i].Float;
3067 value[i] = util_is_inf_or_nan(f) ? 0.0f : f;
3068 }
3069
3070 /* If the immediate has less than four values, fill in the remaining
3071 * positions from id={0,0,0,1}.
3072 */
3073 for ( ; i < 4; i++ )
3074 value[i] = id[i];
3075
3076 return emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT,
3077 emit->imm_start + emit->internal_imm_count++,
3078 value[0], value[1], value[2], value[3]);
3079 }
3080
3081
3082 static boolean
3083 make_immediate(struct svga_shader_emitter *emit,
3084 float a, float b, float c, float d,
3085 struct src_register *out )
3086 {
3087 unsigned idx = emit->nr_hw_float_const++;
3088
3089 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT,
3090 idx, a, b, c, d ))
3091 return FALSE;
3092
3093 *out = src_register( SVGA3DREG_CONST, idx );
3094
3095 return TRUE;
3096 }
3097
3098
3099 /**
3100 * Emit special VS instructions at top of shader.
3101 */
3102 static boolean
3103 emit_vs_preamble(struct svga_shader_emitter *emit)
3104 {
3105 if (!emit->key.vkey.need_prescale) {
3106 if (!make_immediate( emit, 0, 0, .5, .5,
3107 &emit->imm_0055))
3108 return FALSE;
3109 }
3110
3111 return TRUE;
3112 }
3113
3114
3115 /**
3116 * Emit special PS instructions at top of shader.
3117 */
3118 static boolean
3119 emit_ps_preamble(struct svga_shader_emitter *emit)
3120 {
3121 if (emit->ps_reads_pos && emit->info.reads_z) {
3122 /*
3123 * Assemble the position from various bits of inputs. Depth and W are
3124 * passed in a texcoord this is due to D3D's vPos not hold Z or W.
3125 * Also fixup the perspective interpolation.
3126 *
3127 * temp_pos.xy = vPos.xy
3128 * temp_pos.w = rcp(texcoord1.w);
3129 * temp_pos.z = texcoord1.z * temp_pos.w;
3130 */
3131 if (!submit_op1( emit,
3132 inst_token(SVGA3DOP_MOV),
3133 writemask( emit->ps_temp_pos, TGSI_WRITEMASK_XY ),
3134 emit->ps_true_pos ))
3135 return FALSE;
3136
3137 if (!submit_op1( emit,
3138 inst_token(SVGA3DOP_RCP),
3139 writemask( emit->ps_temp_pos, TGSI_WRITEMASK_W ),
3140 scalar( emit->ps_depth_pos, TGSI_SWIZZLE_W ) ))
3141 return FALSE;
3142
3143 if (!submit_op2( emit,
3144 inst_token(SVGA3DOP_MUL),
3145 writemask( emit->ps_temp_pos, TGSI_WRITEMASK_Z ),
3146 scalar( emit->ps_depth_pos, TGSI_SWIZZLE_Z ),
3147 scalar( src(emit->ps_temp_pos), TGSI_SWIZZLE_W ) ))
3148 return FALSE;
3149 }
3150
3151 return TRUE;
3152 }
3153
3154
3155 /**
3156 * Emit special PS instructions at end of shader.
3157 */
3158 static boolean
3159 emit_ps_postamble(struct svga_shader_emitter *emit)
3160 {
3161 unsigned i;
3162
3163 /* PS oDepth is incredibly fragile and it's very hard to catch the
3164 * types of usage that break it during shader emit. Easier just to
3165 * redirect the main program to a temporary and then only touch
3166 * oDepth with a hand-crafted MOV below.
3167 */
3168 if (SVGA3dShaderGetRegType(emit->true_pos.value) != 0) {
3169 if (!submit_op1( emit,
3170 inst_token(SVGA3DOP_MOV),
3171 emit->true_pos,
3172 scalar(src(emit->temp_pos), TGSI_SWIZZLE_Z) ))
3173 return FALSE;
3174 }
3175
3176 for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
3177 if (SVGA3dShaderGetRegType(emit->true_color_output[i].value) != 0) {
3178 /* Potentially override output colors with white for XOR
3179 * logicop workaround.
3180 */
3181 if (emit->unit == PIPE_SHADER_FRAGMENT &&
3182 emit->key.fkey.white_fragments) {
3183 struct src_register one = scalar( get_zero_immediate( emit ),
3184 TGSI_SWIZZLE_W );
3185
3186 if (!submit_op1( emit,
3187 inst_token(SVGA3DOP_MOV),
3188 emit->true_color_output[i],
3189 one ))
3190 return FALSE;
3191 }
3192 else if (emit->unit == PIPE_SHADER_FRAGMENT &&
3193 i < emit->key.fkey.write_color0_to_n_cbufs) {
3194 /* Write temp color output [0] to true output [i] */
3195 if (!submit_op1(emit, inst_token(SVGA3DOP_MOV),
3196 emit->true_color_output[i],
3197 src(emit->temp_color_output[0]))) {
3198 return FALSE;
3199 }
3200 }
3201 else {
3202 if (!submit_op1( emit,
3203 inst_token(SVGA3DOP_MOV),
3204 emit->true_color_output[i],
3205 src(emit->temp_color_output[i]) ))
3206 return FALSE;
3207 }
3208 }
3209 }
3210
3211 return TRUE;
3212 }
3213
3214
3215 /**
3216 * Emit special VS instructions at end of shader.
3217 */
3218 static boolean
3219 emit_vs_postamble(struct svga_shader_emitter *emit)
3220 {
3221 /* PSIZ output is incredibly fragile and it's very hard to catch
3222 * the types of usage that break it during shader emit. Easier
3223 * just to redirect the main program to a temporary and then only
3224 * touch PSIZ with a hand-crafted MOV below.
3225 */
3226 if (SVGA3dShaderGetRegType(emit->true_psiz.value) != 0) {
3227 if (!submit_op1( emit,
3228 inst_token(SVGA3DOP_MOV),
3229 emit->true_psiz,
3230 scalar(src(emit->temp_psiz), TGSI_SWIZZLE_X) ))
3231 return FALSE;
3232 }
3233
3234 /* Need to perform various manipulations on vertex position to cope
3235 * with the different GL and D3D clip spaces.
3236 */
3237 if (emit->key.vkey.need_prescale) {
3238 SVGA3dShaderDestToken temp_pos = emit->temp_pos;
3239 SVGA3dShaderDestToken depth = emit->depth_pos;
3240 SVGA3dShaderDestToken pos = emit->true_pos;
3241 unsigned offset = emit->info.file_max[TGSI_FILE_CONSTANT] + 1;
3242 struct src_register prescale_scale = src_register( SVGA3DREG_CONST,
3243 offset + 0 );
3244 struct src_register prescale_trans = src_register( SVGA3DREG_CONST,
3245 offset + 1 );
3246
3247 if (!submit_op1( emit,
3248 inst_token(SVGA3DOP_MOV),
3249 writemask(depth, TGSI_WRITEMASK_W),
3250 scalar(src(temp_pos), TGSI_SWIZZLE_W) ))
3251 return FALSE;
3252
3253 /* MUL temp_pos.xyz, temp_pos, prescale.scale
3254 * MAD result.position, temp_pos.wwww, prescale.trans, temp_pos
3255 * --> Note that prescale.trans.w == 0
3256 */
3257 if (!submit_op2( emit,
3258 inst_token(SVGA3DOP_MUL),
3259 writemask(temp_pos, TGSI_WRITEMASK_XYZ),
3260 src(temp_pos),
3261 prescale_scale ))
3262 return FALSE;
3263
3264 if (!submit_op3( emit,
3265 inst_token(SVGA3DOP_MAD),
3266 pos,
3267 swizzle(src(temp_pos), 3, 3, 3, 3),
3268 prescale_trans,
3269 src(temp_pos)))
3270 return FALSE;
3271
3272 /* Also write to depth value */
3273 if (!submit_op3( emit,
3274 inst_token(SVGA3DOP_MAD),
3275 writemask(depth, TGSI_WRITEMASK_Z),
3276 swizzle(src(temp_pos), 3, 3, 3, 3),
3277 prescale_trans,
3278 src(temp_pos) ))
3279 return FALSE;
3280 }
3281 else {
3282 SVGA3dShaderDestToken temp_pos = emit->temp_pos;
3283 SVGA3dShaderDestToken depth = emit->depth_pos;
3284 SVGA3dShaderDestToken pos = emit->true_pos;
3285 struct src_register imm_0055 = emit->imm_0055;
3286
3287 /* Adjust GL clipping coordinate space to hardware (D3D-style):
3288 *
3289 * DP4 temp_pos.z, {0,0,.5,.5}, temp_pos
3290 * MOV result.position, temp_pos
3291 */
3292 if (!submit_op2( emit,
3293 inst_token(SVGA3DOP_DP4),
3294 writemask(temp_pos, TGSI_WRITEMASK_Z),
3295 imm_0055,
3296 src(temp_pos) ))
3297 return FALSE;
3298
3299 if (!submit_op1( emit,
3300 inst_token(SVGA3DOP_MOV),
3301 pos,
3302 src(temp_pos) ))
3303 return FALSE;
3304
3305 /* Move the manipulated depth into the extra texcoord reg */
3306 if (!submit_op1( emit,
3307 inst_token(SVGA3DOP_MOV),
3308 writemask(depth, TGSI_WRITEMASK_ZW),
3309 src(temp_pos) ))
3310 return FALSE;
3311 }
3312
3313 return TRUE;
3314 }
3315
3316
3317 /**
3318 * For the pixel shader: emit the code which chooses the front
3319 * or back face color depending on triangle orientation.
3320 * This happens at the top of the fragment shader.
3321 *
3322 * 0: IF VFACE :4
3323 * 1: COLOR = FrontColor;
3324 * 2: ELSE
3325 * 3: COLOR = BackColor;
3326 * 4: ENDIF
3327 */
3328 static boolean
3329 emit_light_twoside(struct svga_shader_emitter *emit)
3330 {
3331 struct src_register vface, zero;
3332 struct src_register front[2];
3333 struct src_register back[2];
3334 SVGA3dShaderDestToken color[2];
3335 int count = emit->internal_color_count;
3336 int i;
3337 SVGA3dShaderInstToken if_token;
3338
3339 if (count == 0)
3340 return TRUE;
3341
3342 vface = get_vface( emit );
3343 zero = get_zero_immediate( emit );
3344
3345 /* Can't use get_temp() to allocate the color reg as such
3346 * temporaries will be reclaimed after each instruction by the call
3347 * to reset_temp_regs().
3348 */
3349 for (i = 0; i < count; i++) {
3350 color[i] = dst_register( SVGA3DREG_TEMP, emit->nr_hw_temp++ );
3351 front[i] = emit->input_map[emit->internal_color_idx[i]];
3352
3353 /* Back is always the next input:
3354 */
3355 back[i] = front[i];
3356 back[i].base.num = front[i].base.num + 1;
3357
3358 /* Reassign the input_map to the actual front-face color:
3359 */
3360 emit->input_map[emit->internal_color_idx[i]] = src(color[i]);
3361 }
3362
3363 if_token = inst_token( SVGA3DOP_IFC );
3364
3365 if (emit->key.fkey.front_ccw)
3366 if_token.control = SVGA3DOPCOMP_LT;
3367 else
3368 if_token.control = SVGA3DOPCOMP_GT;
3369
3370 zero = scalar(zero, TGSI_SWIZZLE_X);
3371
3372 if (!(emit_instruction( emit, if_token ) &&
3373 emit_src( emit, vface ) &&
3374 emit_src( emit, zero ) ))
3375 return FALSE;
3376
3377 for (i = 0; i < count; i++) {
3378 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), color[i], front[i] ))
3379 return FALSE;
3380 }
3381
3382 if (!(emit_instruction( emit, inst_token( SVGA3DOP_ELSE))))
3383 return FALSE;
3384
3385 for (i = 0; i < count; i++) {
3386 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), color[i], back[i] ))
3387 return FALSE;
3388 }
3389
3390 if (!emit_instruction( emit, inst_token( SVGA3DOP_ENDIF ) ))
3391 return FALSE;
3392
3393 return TRUE;
3394 }
3395
3396
3397 /**
3398 * Emit special setup code for the front/back face register in the FS.
3399 * 0: SETP_GT TEMP, VFACE, 0
3400 * where TEMP is a fake frontface register
3401 */
3402 static boolean
3403 emit_frontface(struct svga_shader_emitter *emit)
3404 {
3405 struct src_register vface, zero;
3406 SVGA3dShaderDestToken temp;
3407 struct src_register pass, fail;
3408
3409 vface = get_vface( emit );
3410 zero = get_zero_immediate( emit );
3411
3412 /* Can't use get_temp() to allocate the fake frontface reg as such
3413 * temporaries will be reclaimed after each instruction by the call
3414 * to reset_temp_regs().
3415 */
3416 temp = dst_register( SVGA3DREG_TEMP,
3417 emit->nr_hw_temp++ );
3418
3419 if (emit->key.fkey.front_ccw) {
3420 pass = scalar( zero, TGSI_SWIZZLE_X );
3421 fail = scalar( zero, TGSI_SWIZZLE_W );
3422 } else {
3423 pass = scalar( zero, TGSI_SWIZZLE_W );
3424 fail = scalar( zero, TGSI_SWIZZLE_X );
3425 }
3426
3427 if (!emit_conditional(emit, PIPE_FUNC_GREATER,
3428 temp, vface, scalar( zero, TGSI_SWIZZLE_X ),
3429 pass, fail))
3430 return FALSE;
3431
3432 /* Reassign the input_map to the actual front-face color:
3433 */
3434 emit->input_map[emit->internal_frontface_idx] = src(temp);
3435
3436 return TRUE;
3437 }
3438
3439
3440 /**
3441 * Emit code to invert the T component of the incoming texture coordinate.
3442 * This is used for drawing point sprites when
3443 * pipe_rasterizer_state::sprite_coord_mode == PIPE_SPRITE_COORD_LOWER_LEFT.
3444 */
3445 static boolean
3446 emit_inverted_texcoords(struct svga_shader_emitter *emit)
3447 {
3448 struct src_register zero = get_zero_immediate(emit);
3449 struct src_register pos_neg_one = get_pos_neg_one_immediate( emit );
3450 unsigned inverted_texcoords = emit->inverted_texcoords;
3451
3452 while (inverted_texcoords) {
3453 const unsigned unit = ffs(inverted_texcoords) - 1;
3454
3455 assert(emit->inverted_texcoords & (1 << unit));
3456
3457 assert(unit < Elements(emit->ps_true_texcoord));
3458
3459 assert(unit < Elements(emit->ps_inverted_texcoord_input));
3460
3461 assert(emit->ps_inverted_texcoord_input[unit]
3462 < Elements(emit->input_map));
3463
3464 /* inverted = coord * (1, -1, 1, 1) + (0, 1, 0, 0) */
3465 if (!submit_op3(emit,
3466 inst_token(SVGA3DOP_MAD),
3467 dst(emit->ps_inverted_texcoord[unit]),
3468 emit->ps_true_texcoord[unit],
3469 swizzle(pos_neg_one, 0, 3, 0, 0), /* (1, -1, 1, 1) */
3470 swizzle(zero, 0, 3, 0, 0))) /* (0, 1, 0, 0) */
3471 return FALSE;
3472
3473 /* Reassign the input_map entry to the new texcoord register */
3474 emit->input_map[emit->ps_inverted_texcoord_input[unit]] =
3475 emit->ps_inverted_texcoord[unit];
3476
3477 inverted_texcoords &= ~(1 << unit);
3478 }
3479
3480 return TRUE;
3481 }
3482
3483
3484 /**
3485 * Determine if we need to emit an immediate value with zeros.
3486 * We could just do this all the time except that we want to conserve
3487 * registers whenever possible.
3488 */
3489 static boolean
3490 needs_to_create_zero(const struct svga_shader_emitter *emit)
3491 {
3492 unsigned i;
3493
3494 if (emit->unit == PIPE_SHADER_FRAGMENT) {
3495 if (emit->key.fkey.light_twoside)
3496 return TRUE;
3497
3498 if (emit->key.fkey.white_fragments)
3499 return TRUE;
3500
3501 if (emit->emit_frontface)
3502 return TRUE;
3503
3504 if (emit->info.opcode_count[TGSI_OPCODE_DST] >= 1 ||
3505 emit->info.opcode_count[TGSI_OPCODE_SSG] >= 1 ||
3506 emit->info.opcode_count[TGSI_OPCODE_LIT] >= 1)
3507 return TRUE;
3508
3509 if (emit->inverted_texcoords)
3510 return TRUE;
3511
3512 /* look for any PIPE_SWIZZLE_ZERO/ONE terms */
3513 for (i = 0; i < emit->key.fkey.num_textures; i++) {
3514 if (emit->key.fkey.tex[i].swizzle_r > PIPE_SWIZZLE_ALPHA ||
3515 emit->key.fkey.tex[i].swizzle_g > PIPE_SWIZZLE_ALPHA ||
3516 emit->key.fkey.tex[i].swizzle_b > PIPE_SWIZZLE_ALPHA ||
3517 emit->key.fkey.tex[i].swizzle_a > PIPE_SWIZZLE_ALPHA)
3518 return TRUE;
3519 }
3520
3521 for (i = 0; i < emit->key.fkey.num_textures; i++) {
3522 if (emit->key.fkey.tex[i].compare_mode
3523 == PIPE_TEX_COMPARE_R_TO_TEXTURE)
3524 return TRUE;
3525 }
3526 }
3527
3528 if (emit->unit == PIPE_SHADER_VERTEX) {
3529 if (emit->info.opcode_count[TGSI_OPCODE_CMP] >= 1)
3530 return TRUE;
3531 }
3532
3533 if (emit->info.opcode_count[TGSI_OPCODE_IF] >= 1 ||
3534 emit->info.opcode_count[TGSI_OPCODE_BGNLOOP] >= 1 ||
3535 emit->info.opcode_count[TGSI_OPCODE_DDX] >= 1 ||
3536 emit->info.opcode_count[TGSI_OPCODE_DDY] >= 1 ||
3537 emit->info.opcode_count[TGSI_OPCODE_ROUND] >= 1 ||
3538 emit->info.opcode_count[TGSI_OPCODE_SGE] >= 1 ||
3539 emit->info.opcode_count[TGSI_OPCODE_SGT] >= 1 ||
3540 emit->info.opcode_count[TGSI_OPCODE_SLE] >= 1 ||
3541 emit->info.opcode_count[TGSI_OPCODE_SLT] >= 1 ||
3542 emit->info.opcode_count[TGSI_OPCODE_SNE] >= 1 ||
3543 emit->info.opcode_count[TGSI_OPCODE_SEQ] >= 1 ||
3544 emit->info.opcode_count[TGSI_OPCODE_EXP] >= 1 ||
3545 emit->info.opcode_count[TGSI_OPCODE_LOG] >= 1 ||
3546 emit->info.opcode_count[TGSI_OPCODE_XPD] >= 1 ||
3547 emit->info.opcode_count[TGSI_OPCODE_KILL] >= 1)
3548 return TRUE;
3549
3550 return FALSE;
3551 }
3552
3553
3554 /**
3555 * Do we need to create a looping constant?
3556 */
3557 static boolean
3558 needs_to_create_loop_const(const struct svga_shader_emitter *emit)
3559 {
3560 return (emit->info.opcode_count[TGSI_OPCODE_BGNLOOP] >= 1);
3561 }
3562
3563
3564 static boolean
3565 needs_to_create_arl_consts(const struct svga_shader_emitter *emit)
3566 {
3567 return (emit->num_arl_consts > 0);
3568 }
3569
3570
3571 static boolean
3572 pre_parse_add_indirect( struct svga_shader_emitter *emit,
3573 int num, int current_arl)
3574 {
3575 int i;
3576 assert(num < 0);
3577
3578 for (i = 0; i < emit->num_arl_consts; ++i) {
3579 if (emit->arl_consts[i].arl_num == current_arl)
3580 break;
3581 }
3582 /* new entry */
3583 if (emit->num_arl_consts == i) {
3584 ++emit->num_arl_consts;
3585 }
3586 emit->arl_consts[i].number = (emit->arl_consts[i].number > num) ?
3587 num :
3588 emit->arl_consts[i].number;
3589 emit->arl_consts[i].arl_num = current_arl;
3590 return TRUE;
3591 }
3592
3593
3594 static boolean
3595 pre_parse_instruction( struct svga_shader_emitter *emit,
3596 const struct tgsi_full_instruction *insn,
3597 int current_arl)
3598 {
3599 if (insn->Src[0].Register.Indirect &&
3600 insn->Src[0].Indirect.File == TGSI_FILE_ADDRESS) {
3601 const struct tgsi_full_src_register *reg = &insn->Src[0];
3602 if (reg->Register.Index < 0) {
3603 pre_parse_add_indirect(emit, reg->Register.Index, current_arl);
3604 }
3605 }
3606
3607 if (insn->Src[1].Register.Indirect &&
3608 insn->Src[1].Indirect.File == TGSI_FILE_ADDRESS) {
3609 const struct tgsi_full_src_register *reg = &insn->Src[1];
3610 if (reg->Register.Index < 0) {
3611 pre_parse_add_indirect(emit, reg->Register.Index, current_arl);
3612 }
3613 }
3614
3615 if (insn->Src[2].Register.Indirect &&
3616 insn->Src[2].Indirect.File == TGSI_FILE_ADDRESS) {
3617 const struct tgsi_full_src_register *reg = &insn->Src[2];
3618 if (reg->Register.Index < 0) {
3619 pre_parse_add_indirect(emit, reg->Register.Index, current_arl);
3620 }
3621 }
3622
3623 return TRUE;
3624 }
3625
3626
3627 static boolean
3628 pre_parse_tokens( struct svga_shader_emitter *emit,
3629 const struct tgsi_token *tokens )
3630 {
3631 struct tgsi_parse_context parse;
3632 int current_arl = 0;
3633
3634 tgsi_parse_init( &parse, tokens );
3635
3636 while (!tgsi_parse_end_of_tokens( &parse )) {
3637 tgsi_parse_token( &parse );
3638 switch (parse.FullToken.Token.Type) {
3639 case TGSI_TOKEN_TYPE_IMMEDIATE:
3640 case TGSI_TOKEN_TYPE_DECLARATION:
3641 break;
3642 case TGSI_TOKEN_TYPE_INSTRUCTION:
3643 if (parse.FullToken.FullInstruction.Instruction.Opcode ==
3644 TGSI_OPCODE_ARL) {
3645 ++current_arl;
3646 }
3647 if (!pre_parse_instruction( emit, &parse.FullToken.FullInstruction,
3648 current_arl ))
3649 return FALSE;
3650 break;
3651 default:
3652 break;
3653 }
3654
3655 }
3656 return TRUE;
3657 }
3658
3659
3660 static boolean
3661 svga_shader_emit_helpers(struct svga_shader_emitter *emit)
3662 {
3663 if (needs_to_create_zero( emit )) {
3664 create_zero_immediate( emit );
3665 }
3666 if (needs_to_create_loop_const( emit )) {
3667 create_loop_const( emit );
3668 }
3669 if (needs_to_create_arl_consts( emit )) {
3670 create_arl_consts( emit );
3671 }
3672
3673 if (emit->unit == PIPE_SHADER_FRAGMENT) {
3674 if (!emit_ps_preamble( emit ))
3675 return FALSE;
3676
3677 if (emit->key.fkey.light_twoside) {
3678 if (!emit_light_twoside( emit ))
3679 return FALSE;
3680 }
3681 if (emit->emit_frontface) {
3682 if (!emit_frontface( emit ))
3683 return FALSE;
3684 }
3685 if (emit->inverted_texcoords) {
3686 if (!emit_inverted_texcoords( emit ))
3687 return FALSE;
3688 }
3689 }
3690
3691 return TRUE;
3692 }
3693
3694
3695 /**
3696 * This is the main entrypoint into the TGSI instruction translater.
3697 * Translate TGSI shader tokens into an SVGA shader.
3698 */
3699 boolean
3700 svga_shader_emit_instructions(struct svga_shader_emitter *emit,
3701 const struct tgsi_token *tokens)
3702 {
3703 struct tgsi_parse_context parse;
3704 boolean ret = TRUE;
3705 boolean helpers_emitted = FALSE;
3706 unsigned line_nr = 0;
3707
3708 tgsi_parse_init( &parse, tokens );
3709 emit->internal_imm_count = 0;
3710
3711 if (emit->unit == PIPE_SHADER_VERTEX) {
3712 ret = emit_vs_preamble( emit );
3713 if (!ret)
3714 goto done;
3715 }
3716
3717 pre_parse_tokens(emit, tokens);
3718
3719 while (!tgsi_parse_end_of_tokens( &parse )) {
3720 tgsi_parse_token( &parse );
3721
3722 switch (parse.FullToken.Token.Type) {
3723 case TGSI_TOKEN_TYPE_IMMEDIATE:
3724 ret = svga_emit_immediate( emit, &parse.FullToken.FullImmediate );
3725 if (!ret)
3726 goto done;
3727 break;
3728
3729 case TGSI_TOKEN_TYPE_DECLARATION:
3730 ret = svga_translate_decl_sm30( emit, &parse.FullToken.FullDeclaration );
3731 if (!ret)
3732 goto done;
3733 break;
3734
3735 case TGSI_TOKEN_TYPE_INSTRUCTION:
3736 if (!helpers_emitted) {
3737 if (!svga_shader_emit_helpers( emit ))
3738 goto done;
3739 helpers_emitted = TRUE;
3740 }
3741 ret = svga_emit_instruction( emit,
3742 line_nr++,
3743 &parse.FullToken.FullInstruction );
3744 if (!ret)
3745 goto done;
3746 break;
3747 default:
3748 break;
3749 }
3750
3751 reset_temp_regs( emit );
3752 }
3753
3754 /* Need to terminate the current subroutine. Note that the
3755 * hardware doesn't tolerate shaders without sub-routines
3756 * terminating with RET+END.
3757 */
3758 if (!emit->in_main_func) {
3759 ret = emit_instruction( emit, inst_token( SVGA3DOP_RET ) );
3760 if (!ret)
3761 goto done;
3762 }
3763
3764 assert(emit->dynamic_branching_level == 0);
3765
3766 /* Need to terminate the whole shader:
3767 */
3768 ret = emit_instruction( emit, inst_token( SVGA3DOP_END ) );
3769 if (!ret)
3770 goto done;
3771
3772 done:
3773 tgsi_parse_free( &parse );
3774 return ret;
3775 }