tgsi: change tgsi_shader_info::properties to a one-dimensional array
[mesa.git] / src / gallium / drivers / svga / svga_tgsi_insn.c
1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26
27 #include "pipe/p_shader_tokens.h"
28 #include "tgsi/tgsi_dump.h"
29 #include "tgsi/tgsi_parse.h"
30 #include "util/u_memory.h"
31 #include "util/u_math.h"
32
33 #include "svga_tgsi_emit.h"
34 #include "svga_context.h"
35
36
37 static boolean emit_vs_postamble( struct svga_shader_emitter *emit );
38 static boolean emit_ps_postamble( struct svga_shader_emitter *emit );
39
40
41 static unsigned
42 translate_opcode(uint opcode)
43 {
44 switch (opcode) {
45 case TGSI_OPCODE_ABS: return SVGA3DOP_ABS;
46 case TGSI_OPCODE_ADD: return SVGA3DOP_ADD;
47 case TGSI_OPCODE_DP2A: return SVGA3DOP_DP2ADD;
48 case TGSI_OPCODE_DP3: return SVGA3DOP_DP3;
49 case TGSI_OPCODE_DP4: return SVGA3DOP_DP4;
50 case TGSI_OPCODE_FRC: return SVGA3DOP_FRC;
51 case TGSI_OPCODE_MAD: return SVGA3DOP_MAD;
52 case TGSI_OPCODE_MAX: return SVGA3DOP_MAX;
53 case TGSI_OPCODE_MIN: return SVGA3DOP_MIN;
54 case TGSI_OPCODE_MOV: return SVGA3DOP_MOV;
55 case TGSI_OPCODE_MUL: return SVGA3DOP_MUL;
56 case TGSI_OPCODE_NOP: return SVGA3DOP_NOP;
57 case TGSI_OPCODE_NRM4: return SVGA3DOP_NRM;
58 default:
59 assert(!"svga: unexpected opcode in translate_opcode()");
60 return SVGA3DOP_LAST_INST;
61 }
62 }
63
64
65 static unsigned
66 translate_file(unsigned file)
67 {
68 switch (file) {
69 case TGSI_FILE_TEMPORARY: return SVGA3DREG_TEMP;
70 case TGSI_FILE_INPUT: return SVGA3DREG_INPUT;
71 case TGSI_FILE_OUTPUT: return SVGA3DREG_OUTPUT; /* VS3.0+ only */
72 case TGSI_FILE_IMMEDIATE: return SVGA3DREG_CONST;
73 case TGSI_FILE_CONSTANT: return SVGA3DREG_CONST;
74 case TGSI_FILE_SAMPLER: return SVGA3DREG_SAMPLER;
75 case TGSI_FILE_ADDRESS: return SVGA3DREG_ADDR;
76 default:
77 assert(!"svga: unexpected register file in translate_file()");
78 return SVGA3DREG_TEMP;
79 }
80 }
81
82
83 /**
84 * Translate a TGSI destination register to an SVGA3DShaderDestToken.
85 * \param insn the TGSI instruction
86 * \param idx which TGSI dest register to translate (usually (always?) zero)
87 */
88 static SVGA3dShaderDestToken
89 translate_dst_register( struct svga_shader_emitter *emit,
90 const struct tgsi_full_instruction *insn,
91 unsigned idx )
92 {
93 const struct tgsi_full_dst_register *reg = &insn->Dst[idx];
94 SVGA3dShaderDestToken dest;
95
96 switch (reg->Register.File) {
97 case TGSI_FILE_OUTPUT:
98 /* Output registers encode semantic information in their name.
99 * Need to lookup a table built at decl time:
100 */
101 dest = emit->output_map[reg->Register.Index];
102 break;
103
104 default:
105 {
106 unsigned index = reg->Register.Index;
107 assert(index < SVGA3D_TEMPREG_MAX);
108 index = MIN2(index, SVGA3D_TEMPREG_MAX - 1);
109 dest = dst_register(translate_file(reg->Register.File), index);
110 }
111 break;
112 }
113
114 if (reg->Register.Indirect) {
115 debug_warning("Indirect indexing of dest registers is not supported!\n");
116 }
117
118 dest.mask = reg->Register.WriteMask;
119 assert(dest.mask);
120
121 if (insn->Instruction.Saturate)
122 dest.dstMod = SVGA3DDSTMOD_SATURATE;
123
124 return dest;
125 }
126
127
128 /**
129 * Apply a swizzle to a src_register, returning a new src_register
130 * Ex: swizzle(SRC.ZZYY, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_X, SWIZZLE_Y)
131 * would return SRC.YYZZ
132 */
133 static struct src_register
134 swizzle(struct src_register src,
135 unsigned x, unsigned y, unsigned z, unsigned w)
136 {
137 assert(x < 4);
138 assert(y < 4);
139 assert(z < 4);
140 assert(w < 4);
141 x = (src.base.swizzle >> (x * 2)) & 0x3;
142 y = (src.base.swizzle >> (y * 2)) & 0x3;
143 z = (src.base.swizzle >> (z * 2)) & 0x3;
144 w = (src.base.swizzle >> (w * 2)) & 0x3;
145
146 src.base.swizzle = TRANSLATE_SWIZZLE(x, y, z, w);
147
148 return src;
149 }
150
151
152 /**
153 * Apply a "scalar" swizzle to a src_register returning a new
154 * src_register where all the swizzle terms are the same.
155 * Ex: scalar(SRC.WZYX, SWIZZLE_Y) would return SRC.ZZZZ
156 */
157 static struct src_register
158 scalar(struct src_register src, unsigned comp)
159 {
160 assert(comp < 4);
161 return swizzle( src, comp, comp, comp, comp );
162 }
163
164
165 static boolean
166 svga_arl_needs_adjustment( const struct svga_shader_emitter *emit )
167 {
168 int i;
169
170 for (i = 0; i < emit->num_arl_consts; ++i) {
171 if (emit->arl_consts[i].arl_num == emit->current_arl)
172 return TRUE;
173 }
174 return FALSE;
175 }
176
177
178 static int
179 svga_arl_adjustment( const struct svga_shader_emitter *emit )
180 {
181 int i;
182
183 for (i = 0; i < emit->num_arl_consts; ++i) {
184 if (emit->arl_consts[i].arl_num == emit->current_arl)
185 return emit->arl_consts[i].number;
186 }
187 return 0;
188 }
189
190
191 /**
192 * Translate a TGSI src register to a src_register.
193 */
194 static struct src_register
195 translate_src_register( const struct svga_shader_emitter *emit,
196 const struct tgsi_full_src_register *reg )
197 {
198 struct src_register src;
199
200 switch (reg->Register.File) {
201 case TGSI_FILE_INPUT:
202 /* Input registers are referred to by their semantic name rather
203 * than by index. Use the mapping build up from the decls:
204 */
205 src = emit->input_map[reg->Register.Index];
206 break;
207
208 case TGSI_FILE_IMMEDIATE:
209 /* Immediates are appended after TGSI constants in the D3D
210 * constant buffer.
211 */
212 src = src_register( translate_file( reg->Register.File ),
213 reg->Register.Index + emit->imm_start );
214 break;
215
216 default:
217 src = src_register( translate_file( reg->Register.File ),
218 reg->Register.Index );
219 break;
220 }
221
222 /* Indirect addressing.
223 */
224 if (reg->Register.Indirect) {
225 if (emit->unit == PIPE_SHADER_FRAGMENT) {
226 /* Pixel shaders have only loop registers for relative
227 * addressing into inputs. Ignore the redundant address
228 * register, the contents of aL should be in sync with it.
229 */
230 if (reg->Register.File == TGSI_FILE_INPUT) {
231 src.base.relAddr = 1;
232 src.indirect = src_token(SVGA3DREG_LOOP, 0);
233 }
234 }
235 else {
236 /* Constant buffers only.
237 */
238 if (reg->Register.File == TGSI_FILE_CONSTANT) {
239 /* we shift the offset towards the minimum */
240 if (svga_arl_needs_adjustment( emit )) {
241 src.base.num -= svga_arl_adjustment( emit );
242 }
243 src.base.relAddr = 1;
244
245 /* Not really sure what should go in the second token:
246 */
247 src.indirect = src_token( SVGA3DREG_ADDR,
248 reg->Indirect.Index );
249
250 src.indirect.swizzle = SWIZZLE_XXXX;
251 }
252 }
253 }
254
255 src = swizzle( src,
256 reg->Register.SwizzleX,
257 reg->Register.SwizzleY,
258 reg->Register.SwizzleZ,
259 reg->Register.SwizzleW );
260
261 /* src.mod isn't a bitfield, unfortunately:
262 * See tgsi_util_get_full_src_register_sign_mode for implementation details.
263 */
264 if (reg->Register.Absolute) {
265 if (reg->Register.Negate)
266 src.base.srcMod = SVGA3DSRCMOD_ABSNEG;
267 else
268 src.base.srcMod = SVGA3DSRCMOD_ABS;
269 }
270 else {
271 if (reg->Register.Negate)
272 src.base.srcMod = SVGA3DSRCMOD_NEG;
273 else
274 src.base.srcMod = SVGA3DSRCMOD_NONE;
275 }
276
277 return src;
278 }
279
280
281 /*
282 * Get a temporary register.
283 * Note: if we exceed the temporary register limit we just use
284 * register SVGA3D_TEMPREG_MAX - 1.
285 */
286 static SVGA3dShaderDestToken
287 get_temp( struct svga_shader_emitter *emit )
288 {
289 int i = emit->nr_hw_temp + emit->internal_temp_count++;
290 if (i >= SVGA3D_TEMPREG_MAX) {
291 debug_warn_once("svga: Too many temporary registers used in shader\n");
292 i = SVGA3D_TEMPREG_MAX - 1;
293 }
294 return dst_register( SVGA3DREG_TEMP, i );
295 }
296
297
298 /**
299 * Release a single temp. Currently only effective if it was the last
300 * allocated temp, otherwise release will be delayed until the next
301 * call to reset_temp_regs().
302 */
303 static void
304 release_temp( struct svga_shader_emitter *emit,
305 SVGA3dShaderDestToken temp )
306 {
307 if (temp.num == emit->internal_temp_count - 1)
308 emit->internal_temp_count--;
309 }
310
311
312 /**
313 * Release all temps.
314 */
315 static void
316 reset_temp_regs(struct svga_shader_emitter *emit)
317 {
318 emit->internal_temp_count = 0;
319 }
320
321
322 /** Emit bytecode for a src_register */
323 static boolean
324 emit_src(struct svga_shader_emitter *emit, const struct src_register src)
325 {
326 if (src.base.relAddr) {
327 assert(src.base.reserved0);
328 assert(src.indirect.reserved0);
329 return (svga_shader_emit_dword( emit, src.base.value ) &&
330 svga_shader_emit_dword( emit, src.indirect.value ));
331 }
332 else {
333 assert(src.base.reserved0);
334 return svga_shader_emit_dword( emit, src.base.value );
335 }
336 }
337
338
339 /** Emit bytecode for a dst_register */
340 static boolean
341 emit_dst(struct svga_shader_emitter *emit, SVGA3dShaderDestToken dest)
342 {
343 assert(dest.reserved0);
344 assert(dest.mask);
345 return svga_shader_emit_dword( emit, dest.value );
346 }
347
348
349 /** Emit bytecode for a 1-operand instruction */
350 static boolean
351 emit_op1(struct svga_shader_emitter *emit,
352 SVGA3dShaderInstToken inst,
353 SVGA3dShaderDestToken dest,
354 struct src_register src0)
355 {
356 return (emit_instruction(emit, inst) &&
357 emit_dst(emit, dest) &&
358 emit_src(emit, src0));
359 }
360
361
362 /** Emit bytecode for a 2-operand instruction */
363 static boolean
364 emit_op2(struct svga_shader_emitter *emit,
365 SVGA3dShaderInstToken inst,
366 SVGA3dShaderDestToken dest,
367 struct src_register src0,
368 struct src_register src1)
369 {
370 return (emit_instruction(emit, inst) &&
371 emit_dst(emit, dest) &&
372 emit_src(emit, src0) &&
373 emit_src(emit, src1));
374 }
375
376
377 /** Emit bytecode for a 3-operand instruction */
378 static boolean
379 emit_op3(struct svga_shader_emitter *emit,
380 SVGA3dShaderInstToken inst,
381 SVGA3dShaderDestToken dest,
382 struct src_register src0,
383 struct src_register src1,
384 struct src_register src2)
385 {
386 return (emit_instruction(emit, inst) &&
387 emit_dst(emit, dest) &&
388 emit_src(emit, src0) &&
389 emit_src(emit, src1) &&
390 emit_src(emit, src2));
391 }
392
393
394 /** Emit bytecode for a 4-operand instruction */
395 static boolean
396 emit_op4(struct svga_shader_emitter *emit,
397 SVGA3dShaderInstToken inst,
398 SVGA3dShaderDestToken dest,
399 struct src_register src0,
400 struct src_register src1,
401 struct src_register src2,
402 struct src_register src3)
403 {
404 return (emit_instruction(emit, inst) &&
405 emit_dst(emit, dest) &&
406 emit_src(emit, src0) &&
407 emit_src(emit, src1) &&
408 emit_src(emit, src2) &&
409 emit_src(emit, src3));
410 }
411
412
413 /**
414 * Apply the absolute value modifier to the given src_register, returning
415 * a new src_register.
416 */
417 static struct src_register
418 absolute(struct src_register src)
419 {
420 src.base.srcMod = SVGA3DSRCMOD_ABS;
421 return src;
422 }
423
424
425 /**
426 * Apply the negation modifier to the given src_register, returning
427 * a new src_register.
428 */
429 static struct src_register
430 negate(struct src_register src)
431 {
432 switch (src.base.srcMod) {
433 case SVGA3DSRCMOD_ABS:
434 src.base.srcMod = SVGA3DSRCMOD_ABSNEG;
435 break;
436 case SVGA3DSRCMOD_ABSNEG:
437 src.base.srcMod = SVGA3DSRCMOD_ABS;
438 break;
439 case SVGA3DSRCMOD_NEG:
440 src.base.srcMod = SVGA3DSRCMOD_NONE;
441 break;
442 case SVGA3DSRCMOD_NONE:
443 src.base.srcMod = SVGA3DSRCMOD_NEG;
444 break;
445 }
446 return src;
447 }
448
449
450
451 /* Replace the src with the temporary specified in the dst, but copying
452 * only the necessary channels, and preserving the original swizzle (which is
453 * important given that several opcodes have constraints in the allowed
454 * swizzles).
455 */
456 static boolean
457 emit_repl(struct svga_shader_emitter *emit,
458 SVGA3dShaderDestToken dst,
459 struct src_register *src0)
460 {
461 unsigned src0_swizzle;
462 unsigned chan;
463
464 assert(SVGA3dShaderGetRegType(dst.value) == SVGA3DREG_TEMP);
465
466 src0_swizzle = src0->base.swizzle;
467
468 dst.mask = 0;
469 for (chan = 0; chan < 4; ++chan) {
470 unsigned swizzle = (src0_swizzle >> (chan *2)) & 0x3;
471 dst.mask |= 1 << swizzle;
472 }
473 assert(dst.mask);
474
475 src0->base.swizzle = SVGA3DSWIZZLE_NONE;
476
477 if (!emit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, *src0 ))
478 return FALSE;
479
480 *src0 = src( dst );
481 src0->base.swizzle = src0_swizzle;
482
483 return TRUE;
484 }
485
486
487 /**
488 * Submit/emit an instruction with zero operands.
489 */
490 static boolean
491 submit_op0(struct svga_shader_emitter *emit,
492 SVGA3dShaderInstToken inst,
493 SVGA3dShaderDestToken dest)
494 {
495 return (emit_instruction( emit, inst ) &&
496 emit_dst( emit, dest ));
497 }
498
499
500 /**
501 * Submit/emit an instruction with one operand.
502 */
503 static boolean
504 submit_op1(struct svga_shader_emitter *emit,
505 SVGA3dShaderInstToken inst,
506 SVGA3dShaderDestToken dest,
507 struct src_register src0)
508 {
509 return emit_op1( emit, inst, dest, src0 );
510 }
511
512
513 /**
514 * Submit/emit an instruction with two operands.
515 *
516 * SVGA shaders may not refer to >1 constant register in a single
517 * instruction. This function checks for that usage and inserts a
518 * move to temporary if detected.
519 *
520 * The same applies to input registers -- at most a single input
521 * register may be read by any instruction.
522 */
523 static boolean
524 submit_op2(struct svga_shader_emitter *emit,
525 SVGA3dShaderInstToken inst,
526 SVGA3dShaderDestToken dest,
527 struct src_register src0,
528 struct src_register src1)
529 {
530 SVGA3dShaderDestToken temp;
531 SVGA3dShaderRegType type0, type1;
532 boolean need_temp = FALSE;
533
534 temp.value = 0;
535 type0 = SVGA3dShaderGetRegType( src0.base.value );
536 type1 = SVGA3dShaderGetRegType( src1.base.value );
537
538 if (type0 == SVGA3DREG_CONST &&
539 type1 == SVGA3DREG_CONST &&
540 src0.base.num != src1.base.num)
541 need_temp = TRUE;
542
543 if (type0 == SVGA3DREG_INPUT &&
544 type1 == SVGA3DREG_INPUT &&
545 src0.base.num != src1.base.num)
546 need_temp = TRUE;
547
548 if (need_temp) {
549 temp = get_temp( emit );
550
551 if (!emit_repl( emit, temp, &src0 ))
552 return FALSE;
553 }
554
555 if (!emit_op2( emit, inst, dest, src0, src1 ))
556 return FALSE;
557
558 if (need_temp)
559 release_temp( emit, temp );
560
561 return TRUE;
562 }
563
564
565 /**
566 * Submit/emit an instruction with three operands.
567 *
568 * SVGA shaders may not refer to >1 constant register in a single
569 * instruction. This function checks for that usage and inserts a
570 * move to temporary if detected.
571 */
572 static boolean
573 submit_op3(struct svga_shader_emitter *emit,
574 SVGA3dShaderInstToken inst,
575 SVGA3dShaderDestToken dest,
576 struct src_register src0,
577 struct src_register src1,
578 struct src_register src2)
579 {
580 SVGA3dShaderDestToken temp0;
581 SVGA3dShaderDestToken temp1;
582 boolean need_temp0 = FALSE;
583 boolean need_temp1 = FALSE;
584 SVGA3dShaderRegType type0, type1, type2;
585
586 temp0.value = 0;
587 temp1.value = 0;
588 type0 = SVGA3dShaderGetRegType( src0.base.value );
589 type1 = SVGA3dShaderGetRegType( src1.base.value );
590 type2 = SVGA3dShaderGetRegType( src2.base.value );
591
592 if (inst.op != SVGA3DOP_SINCOS) {
593 if (type0 == SVGA3DREG_CONST &&
594 ((type1 == SVGA3DREG_CONST && src0.base.num != src1.base.num) ||
595 (type2 == SVGA3DREG_CONST && src0.base.num != src2.base.num)))
596 need_temp0 = TRUE;
597
598 if (type1 == SVGA3DREG_CONST &&
599 (type2 == SVGA3DREG_CONST && src1.base.num != src2.base.num))
600 need_temp1 = TRUE;
601 }
602
603 if (type0 == SVGA3DREG_INPUT &&
604 ((type1 == SVGA3DREG_INPUT && src0.base.num != src1.base.num) ||
605 (type2 == SVGA3DREG_INPUT && src0.base.num != src2.base.num)))
606 need_temp0 = TRUE;
607
608 if (type1 == SVGA3DREG_INPUT &&
609 (type2 == SVGA3DREG_INPUT && src1.base.num != src2.base.num))
610 need_temp1 = TRUE;
611
612 if (need_temp0) {
613 temp0 = get_temp( emit );
614
615 if (!emit_repl( emit, temp0, &src0 ))
616 return FALSE;
617 }
618
619 if (need_temp1) {
620 temp1 = get_temp( emit );
621
622 if (!emit_repl( emit, temp1, &src1 ))
623 return FALSE;
624 }
625
626 if (!emit_op3( emit, inst, dest, src0, src1, src2 ))
627 return FALSE;
628
629 if (need_temp1)
630 release_temp( emit, temp1 );
631 if (need_temp0)
632 release_temp( emit, temp0 );
633 return TRUE;
634 }
635
636
637 /**
638 * Submit/emit an instruction with four operands.
639 *
640 * SVGA shaders may not refer to >1 constant register in a single
641 * instruction. This function checks for that usage and inserts a
642 * move to temporary if detected.
643 */
644 static boolean
645 submit_op4(struct svga_shader_emitter *emit,
646 SVGA3dShaderInstToken inst,
647 SVGA3dShaderDestToken dest,
648 struct src_register src0,
649 struct src_register src1,
650 struct src_register src2,
651 struct src_register src3)
652 {
653 SVGA3dShaderDestToken temp0;
654 SVGA3dShaderDestToken temp3;
655 boolean need_temp0 = FALSE;
656 boolean need_temp3 = FALSE;
657 SVGA3dShaderRegType type0, type1, type2, type3;
658
659 temp0.value = 0;
660 temp3.value = 0;
661 type0 = SVGA3dShaderGetRegType( src0.base.value );
662 type1 = SVGA3dShaderGetRegType( src1.base.value );
663 type2 = SVGA3dShaderGetRegType( src2.base.value );
664 type3 = SVGA3dShaderGetRegType( src2.base.value );
665
666 /* Make life a little easier - this is only used by the TXD
667 * instruction which is guaranteed not to have a constant/input reg
668 * in one slot at least:
669 */
670 assert(type1 == SVGA3DREG_SAMPLER);
671
672 if (type0 == SVGA3DREG_CONST &&
673 ((type3 == SVGA3DREG_CONST && src0.base.num != src3.base.num) ||
674 (type2 == SVGA3DREG_CONST && src0.base.num != src2.base.num)))
675 need_temp0 = TRUE;
676
677 if (type3 == SVGA3DREG_CONST &&
678 (type2 == SVGA3DREG_CONST && src3.base.num != src2.base.num))
679 need_temp3 = TRUE;
680
681 if (type0 == SVGA3DREG_INPUT &&
682 ((type3 == SVGA3DREG_INPUT && src0.base.num != src3.base.num) ||
683 (type2 == SVGA3DREG_INPUT && src0.base.num != src2.base.num)))
684 need_temp0 = TRUE;
685
686 if (type3 == SVGA3DREG_INPUT &&
687 (type2 == SVGA3DREG_INPUT && src3.base.num != src2.base.num))
688 need_temp3 = TRUE;
689
690 if (need_temp0) {
691 temp0 = get_temp( emit );
692
693 if (!emit_repl( emit, temp0, &src0 ))
694 return FALSE;
695 }
696
697 if (need_temp3) {
698 temp3 = get_temp( emit );
699
700 if (!emit_repl( emit, temp3, &src3 ))
701 return FALSE;
702 }
703
704 if (!emit_op4( emit, inst, dest, src0, src1, src2, src3 ))
705 return FALSE;
706
707 if (need_temp3)
708 release_temp( emit, temp3 );
709 if (need_temp0)
710 release_temp( emit, temp0 );
711 return TRUE;
712 }
713
714
715 /**
716 * Do the src and dest registers refer to the same register?
717 */
718 static boolean
719 alias_src_dst(struct src_register src,
720 SVGA3dShaderDestToken dst)
721 {
722 if (src.base.num != dst.num)
723 return FALSE;
724
725 if (SVGA3dShaderGetRegType(dst.value) !=
726 SVGA3dShaderGetRegType(src.base.value))
727 return FALSE;
728
729 return TRUE;
730 }
731
732
733 /**
734 * Helper for emitting SVGA immediate values using the SVGA3DOP_DEF[I]
735 * instructions.
736 */
737 static boolean
738 emit_def_const(struct svga_shader_emitter *emit,
739 SVGA3dShaderConstType type,
740 unsigned idx, float a, float b, float c, float d)
741 {
742 SVGA3DOpDefArgs def;
743 SVGA3dShaderInstToken opcode;
744
745 switch (type) {
746 case SVGA3D_CONST_TYPE_FLOAT:
747 opcode = inst_token( SVGA3DOP_DEF );
748 def.dst = dst_register( SVGA3DREG_CONST, idx );
749 def.constValues[0] = a;
750 def.constValues[1] = b;
751 def.constValues[2] = c;
752 def.constValues[3] = d;
753 break;
754 case SVGA3D_CONST_TYPE_INT:
755 opcode = inst_token( SVGA3DOP_DEFI );
756 def.dst = dst_register( SVGA3DREG_CONSTINT, idx );
757 def.constIValues[0] = (int)a;
758 def.constIValues[1] = (int)b;
759 def.constIValues[2] = (int)c;
760 def.constIValues[3] = (int)d;
761 break;
762 default:
763 assert(0);
764 opcode = inst_token( SVGA3DOP_NOP );
765 break;
766 }
767
768 if (!emit_instruction(emit, opcode) ||
769 !svga_shader_emit_dwords( emit, def.values, Elements(def.values)))
770 return FALSE;
771
772 return TRUE;
773 }
774
775
776 static boolean
777 create_loop_const( struct svga_shader_emitter *emit )
778 {
779 unsigned idx = emit->nr_hw_int_const++;
780
781 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_INT, idx,
782 255, /* iteration count */
783 0, /* initial value */
784 1, /* step size */
785 0 /* not used, must be 0 */))
786 return FALSE;
787
788 emit->loop_const_idx = idx;
789 emit->created_loop_const = TRUE;
790
791 return TRUE;
792 }
793
794 static boolean
795 create_arl_consts( struct svga_shader_emitter *emit )
796 {
797 int i;
798
799 for (i = 0; i < emit->num_arl_consts; i += 4) {
800 int j;
801 unsigned idx = emit->nr_hw_float_const++;
802 float vals[4];
803 for (j = 0; j < 4 && (j + i) < emit->num_arl_consts; ++j) {
804 vals[j] = (float) emit->arl_consts[i + j].number;
805 emit->arl_consts[i + j].idx = idx;
806 switch (j) {
807 case 0:
808 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_X;
809 break;
810 case 1:
811 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_Y;
812 break;
813 case 2:
814 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_Z;
815 break;
816 case 3:
817 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_W;
818 break;
819 }
820 }
821 while (j < 4)
822 vals[j++] = 0;
823
824 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT, idx,
825 vals[0], vals[1],
826 vals[2], vals[3]))
827 return FALSE;
828 }
829
830 return TRUE;
831 }
832
833
834 /**
835 * Return the register which holds the pixel shaders front/back-
836 * facing value.
837 */
838 static struct src_register
839 get_vface( struct svga_shader_emitter *emit )
840 {
841 assert(emit->emitted_vface);
842 return src_register(SVGA3DREG_MISCTYPE, SVGA3DMISCREG_FACE);
843 }
844
845
846 /**
847 * Create/emit a "common" constant with values {0, 0.5, -1, 1}.
848 * We can swizzle this to produce other useful constants such as
849 * {0, 0, 0, 0}, {1, 1, 1, 1}, etc.
850 */
851 static boolean
852 create_common_immediate( struct svga_shader_emitter *emit )
853 {
854 unsigned idx = emit->nr_hw_float_const++;
855
856 /* Emit the constant (0, 0.5, -1, 1) and use swizzling to generate
857 * other useful vectors.
858 */
859 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT,
860 idx, 0.0f, 0.5f, -1.0f, 1.0f ))
861 return FALSE;
862 emit->common_immediate_idx[0] = idx;
863 idx++;
864
865 /* Emit constant {2, 0, 0, 0} (only the 2 is used for now) */
866 if (emit->key.vkey.adjust_attrib_range) {
867 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT,
868 idx, 2.0f, 0.0f, 0.0f, 0.0f ))
869 return FALSE;
870 emit->common_immediate_idx[1] = idx;
871 }
872 else {
873 emit->common_immediate_idx[1] = -1;
874 }
875
876 emit->created_common_immediate = TRUE;
877
878 return TRUE;
879 }
880
881
882 /**
883 * Return swizzle/position for the given value in the "common" immediate.
884 */
885 static inline unsigned
886 common_immediate_swizzle(float value)
887 {
888 if (value == 0.0f)
889 return TGSI_SWIZZLE_X;
890 else if (value == 0.5f)
891 return TGSI_SWIZZLE_Y;
892 else if (value == -1.0f)
893 return TGSI_SWIZZLE_Z;
894 else if (value == 1.0f)
895 return TGSI_SWIZZLE_W;
896 else {
897 assert(!"illegal value in common_immediate_swizzle");
898 return TGSI_SWIZZLE_X;
899 }
900 }
901
902
903 /**
904 * Returns an immediate reg where all the terms are either 0, 1, 2 or 0.5
905 */
906 static struct src_register
907 get_immediate(struct svga_shader_emitter *emit,
908 float x, float y, float z, float w)
909 {
910 unsigned sx = common_immediate_swizzle(x);
911 unsigned sy = common_immediate_swizzle(y);
912 unsigned sz = common_immediate_swizzle(z);
913 unsigned sw = common_immediate_swizzle(w);
914 assert(emit->created_common_immediate);
915 assert(emit->common_immediate_idx[0] >= 0);
916 return swizzle(src_register(SVGA3DREG_CONST, emit->common_immediate_idx[0]),
917 sx, sy, sz, sw);
918 }
919
920
921 /**
922 * returns {0, 0, 0, 0} immediate
923 */
924 static struct src_register
925 get_zero_immediate( struct svga_shader_emitter *emit )
926 {
927 assert(emit->created_common_immediate);
928 assert(emit->common_immediate_idx[0] >= 0);
929 return swizzle(src_register( SVGA3DREG_CONST,
930 emit->common_immediate_idx[0]),
931 0, 0, 0, 0);
932 }
933
934
935 /**
936 * returns {1, 1, 1, 1} immediate
937 */
938 static struct src_register
939 get_one_immediate( struct svga_shader_emitter *emit )
940 {
941 assert(emit->created_common_immediate);
942 assert(emit->common_immediate_idx[0] >= 0);
943 return swizzle(src_register( SVGA3DREG_CONST,
944 emit->common_immediate_idx[0]),
945 3, 3, 3, 3);
946 }
947
948
949 /**
950 * returns {0.5, 0.5, 0.5, 0.5} immediate
951 */
952 static struct src_register
953 get_half_immediate( struct svga_shader_emitter *emit )
954 {
955 assert(emit->created_common_immediate);
956 assert(emit->common_immediate_idx[0] >= 0);
957 return swizzle(src_register(SVGA3DREG_CONST, emit->common_immediate_idx[0]),
958 1, 1, 1, 1);
959 }
960
961
962 /**
963 * returns {2, 2, 2, 2} immediate
964 */
965 static struct src_register
966 get_two_immediate( struct svga_shader_emitter *emit )
967 {
968 /* Note we use the second common immediate here */
969 assert(emit->created_common_immediate);
970 assert(emit->common_immediate_idx[1] >= 0);
971 return swizzle(src_register( SVGA3DREG_CONST,
972 emit->common_immediate_idx[1]),
973 0, 0, 0, 0);
974 }
975
976
977 /**
978 * returns the loop const
979 */
980 static struct src_register
981 get_loop_const( struct svga_shader_emitter *emit )
982 {
983 assert(emit->created_loop_const);
984 assert(emit->loop_const_idx >= 0);
985 return src_register( SVGA3DREG_CONSTINT,
986 emit->loop_const_idx );
987 }
988
989
990 static struct src_register
991 get_fake_arl_const( struct svga_shader_emitter *emit )
992 {
993 struct src_register reg;
994 int idx = 0, swizzle = 0, i;
995
996 for (i = 0; i < emit->num_arl_consts; ++ i) {
997 if (emit->arl_consts[i].arl_num == emit->current_arl) {
998 idx = emit->arl_consts[i].idx;
999 swizzle = emit->arl_consts[i].swizzle;
1000 }
1001 }
1002
1003 reg = src_register( SVGA3DREG_CONST, idx );
1004 return scalar(reg, swizzle);
1005 }
1006
1007
1008 /**
1009 * Return a register which holds the width and height of the texture
1010 * currently bound to the given sampler.
1011 */
1012 static struct src_register
1013 get_tex_dimensions( struct svga_shader_emitter *emit, int sampler_num )
1014 {
1015 int idx;
1016 struct src_register reg;
1017
1018 /* the width/height indexes start right after constants */
1019 idx = emit->key.fkey.tex[sampler_num].width_height_idx +
1020 emit->info.file_max[TGSI_FILE_CONSTANT] + 1;
1021
1022 reg = src_register( SVGA3DREG_CONST, idx );
1023 return reg;
1024 }
1025
1026
1027 static boolean
1028 emit_fake_arl(struct svga_shader_emitter *emit,
1029 const struct tgsi_full_instruction *insn)
1030 {
1031 const struct src_register src0 =
1032 translate_src_register(emit, &insn->Src[0] );
1033 struct src_register src1 = get_fake_arl_const( emit );
1034 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1035 SVGA3dShaderDestToken tmp = get_temp( emit );
1036
1037 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), tmp, src0))
1038 return FALSE;
1039
1040 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), tmp, src( tmp ),
1041 src1))
1042 return FALSE;
1043
1044 /* replicate the original swizzle */
1045 src1 = src(tmp);
1046 src1.base.swizzle = src0.base.swizzle;
1047
1048 return submit_op1( emit, inst_token( SVGA3DOP_MOVA ),
1049 dst, src1 );
1050 }
1051
1052
1053 static boolean
1054 emit_if(struct svga_shader_emitter *emit,
1055 const struct tgsi_full_instruction *insn)
1056 {
1057 struct src_register src0 =
1058 translate_src_register(emit, &insn->Src[0]);
1059 struct src_register zero = get_zero_immediate(emit);
1060 SVGA3dShaderInstToken if_token = inst_token( SVGA3DOP_IFC );
1061
1062 if_token.control = SVGA3DOPCOMPC_NE;
1063
1064 if (SVGA3dShaderGetRegType(src0.base.value) == SVGA3DREG_CONST) {
1065 /*
1066 * Max different constant registers readable per IFC instruction is 1.
1067 */
1068 SVGA3dShaderDestToken tmp = get_temp( emit );
1069
1070 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), tmp, src0))
1071 return FALSE;
1072
1073 src0 = scalar(src( tmp ), TGSI_SWIZZLE_X);
1074 }
1075
1076 emit->dynamic_branching_level++;
1077
1078 return (emit_instruction( emit, if_token ) &&
1079 emit_src( emit, src0 ) &&
1080 emit_src( emit, zero ) );
1081 }
1082
1083
1084 static boolean
1085 emit_else(struct svga_shader_emitter *emit,
1086 const struct tgsi_full_instruction *insn)
1087 {
1088 return emit_instruction(emit, inst_token(SVGA3DOP_ELSE));
1089 }
1090
1091
1092 static boolean
1093 emit_endif(struct svga_shader_emitter *emit,
1094 const struct tgsi_full_instruction *insn)
1095 {
1096 emit->dynamic_branching_level--;
1097
1098 return emit_instruction(emit, inst_token(SVGA3DOP_ENDIF));
1099 }
1100
1101
1102 /**
1103 * Translate the following TGSI FLR instruction.
1104 * FLR DST, SRC
1105 * To the following SVGA3D instruction sequence.
1106 * FRC TMP, SRC
1107 * SUB DST, SRC, TMP
1108 */
1109 static boolean
1110 emit_floor(struct svga_shader_emitter *emit,
1111 const struct tgsi_full_instruction *insn )
1112 {
1113 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1114 const struct src_register src0 =
1115 translate_src_register(emit, &insn->Src[0] );
1116 SVGA3dShaderDestToken temp = get_temp( emit );
1117
1118 /* FRC TMP, SRC */
1119 if (!submit_op1( emit, inst_token( SVGA3DOP_FRC ), temp, src0 ))
1120 return FALSE;
1121
1122 /* SUB DST, SRC, TMP */
1123 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst, src0,
1124 negate( src( temp ) ) ))
1125 return FALSE;
1126
1127 return TRUE;
1128 }
1129
1130
1131 /**
1132 * Translate the following TGSI CEIL instruction.
1133 * CEIL DST, SRC
1134 * To the following SVGA3D instruction sequence.
1135 * FRC TMP, -SRC
1136 * ADD DST, SRC, TMP
1137 */
1138 static boolean
1139 emit_ceil(struct svga_shader_emitter *emit,
1140 const struct tgsi_full_instruction *insn)
1141 {
1142 SVGA3dShaderDestToken dst = translate_dst_register(emit, insn, 0);
1143 const struct src_register src0 =
1144 translate_src_register(emit, &insn->Src[0]);
1145 SVGA3dShaderDestToken temp = get_temp(emit);
1146
1147 /* FRC TMP, -SRC */
1148 if (!submit_op1(emit, inst_token(SVGA3DOP_FRC), temp, negate(src0)))
1149 return FALSE;
1150
1151 /* ADD DST, SRC, TMP */
1152 if (!submit_op2(emit, inst_token(SVGA3DOP_ADD), dst, src0, src(temp)))
1153 return FALSE;
1154
1155 return TRUE;
1156 }
1157
1158
1159 /**
1160 * Translate the following TGSI DIV instruction.
1161 * DIV DST.xy, SRC0, SRC1
1162 * To the following SVGA3D instruction sequence.
1163 * RCP TMP.x, SRC1.xxxx
1164 * RCP TMP.y, SRC1.yyyy
1165 * MUL DST.xy, SRC0, TMP
1166 */
1167 static boolean
1168 emit_div(struct svga_shader_emitter *emit,
1169 const struct tgsi_full_instruction *insn )
1170 {
1171 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1172 const struct src_register src0 =
1173 translate_src_register(emit, &insn->Src[0] );
1174 const struct src_register src1 =
1175 translate_src_register(emit, &insn->Src[1] );
1176 SVGA3dShaderDestToken temp = get_temp( emit );
1177 int i;
1178
1179 /* For each enabled element, perform a RCP instruction. Note that
1180 * RCP is scalar in SVGA3D:
1181 */
1182 for (i = 0; i < 4; i++) {
1183 unsigned channel = 1 << i;
1184 if (dst.mask & channel) {
1185 /* RCP TMP.?, SRC1.???? */
1186 if (!submit_op1( emit, inst_token( SVGA3DOP_RCP ),
1187 writemask(temp, channel),
1188 scalar(src1, i) ))
1189 return FALSE;
1190 }
1191 }
1192
1193 /* Vector mul:
1194 * MUL DST, SRC0, TMP
1195 */
1196 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ), dst, src0,
1197 src( temp ) ))
1198 return FALSE;
1199
1200 return TRUE;
1201 }
1202
1203
1204 /**
1205 * Translate the following TGSI DP2 instruction.
1206 * DP2 DST, SRC1, SRC2
1207 * To the following SVGA3D instruction sequence.
1208 * MUL TMP, SRC1, SRC2
1209 * ADD DST, TMP.xxxx, TMP.yyyy
1210 */
1211 static boolean
1212 emit_dp2(struct svga_shader_emitter *emit,
1213 const struct tgsi_full_instruction *insn )
1214 {
1215 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1216 const struct src_register src0 =
1217 translate_src_register(emit, &insn->Src[0]);
1218 const struct src_register src1 =
1219 translate_src_register(emit, &insn->Src[1]);
1220 SVGA3dShaderDestToken temp = get_temp( emit );
1221 struct src_register temp_src0, temp_src1;
1222
1223 /* MUL TMP, SRC1, SRC2 */
1224 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ), temp, src0, src1 ))
1225 return FALSE;
1226
1227 temp_src0 = scalar(src( temp ), TGSI_SWIZZLE_X);
1228 temp_src1 = scalar(src( temp ), TGSI_SWIZZLE_Y);
1229
1230 /* ADD DST, TMP.xxxx, TMP.yyyy */
1231 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst,
1232 temp_src0, temp_src1 ))
1233 return FALSE;
1234
1235 return TRUE;
1236 }
1237
1238
1239 /**
1240 * Translate the following TGSI DPH instruction.
1241 * DPH DST, SRC1, SRC2
1242 * To the following SVGA3D instruction sequence.
1243 * DP3 TMP, SRC1, SRC2
1244 * ADD DST, TMP, SRC2.wwww
1245 */
1246 static boolean
1247 emit_dph(struct svga_shader_emitter *emit,
1248 const struct tgsi_full_instruction *insn )
1249 {
1250 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1251 const struct src_register src0 = translate_src_register(
1252 emit, &insn->Src[0] );
1253 struct src_register src1 =
1254 translate_src_register(emit, &insn->Src[1]);
1255 SVGA3dShaderDestToken temp = get_temp( emit );
1256
1257 /* DP3 TMP, SRC1, SRC2 */
1258 if (!submit_op2( emit, inst_token( SVGA3DOP_DP3 ), temp, src0, src1 ))
1259 return FALSE;
1260
1261 src1 = scalar(src1, TGSI_SWIZZLE_W);
1262
1263 /* ADD DST, TMP, SRC2.wwww */
1264 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst,
1265 src( temp ), src1 ))
1266 return FALSE;
1267
1268 return TRUE;
1269 }
1270
1271
1272 /**
1273 * Translate the following TGSI DST instruction.
1274 * NRM DST, SRC
1275 * To the following SVGA3D instruction sequence.
1276 * DP3 TMP, SRC, SRC
1277 * RSQ TMP, TMP
1278 * MUL DST, SRC, TMP
1279 */
1280 static boolean
1281 emit_nrm(struct svga_shader_emitter *emit,
1282 const struct tgsi_full_instruction *insn)
1283 {
1284 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1285 const struct src_register src0 =
1286 translate_src_register(emit, &insn->Src[0]);
1287 SVGA3dShaderDestToken temp = get_temp( emit );
1288
1289 /* DP3 TMP, SRC, SRC */
1290 if (!submit_op2( emit, inst_token( SVGA3DOP_DP3 ), temp, src0, src0 ))
1291 return FALSE;
1292
1293 /* RSQ TMP, TMP */
1294 if (!submit_op1( emit, inst_token( SVGA3DOP_RSQ ), temp, src( temp )))
1295 return FALSE;
1296
1297 /* MUL DST, SRC, TMP */
1298 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ), dst,
1299 src0, src( temp )))
1300 return FALSE;
1301
1302 return TRUE;
1303 }
1304
1305
1306 /**
1307 * Sine / Cosine helper function.
1308 */
1309 static boolean
1310 do_emit_sincos(struct svga_shader_emitter *emit,
1311 SVGA3dShaderDestToken dst,
1312 struct src_register src0)
1313 {
1314 src0 = scalar(src0, TGSI_SWIZZLE_X);
1315 return submit_op1(emit, inst_token(SVGA3DOP_SINCOS), dst, src0);
1316 }
1317
1318
1319 /**
1320 * Translate/emit a TGSI SIN, COS or CSC instruction.
1321 */
1322 static boolean
1323 emit_sincos(struct svga_shader_emitter *emit,
1324 const struct tgsi_full_instruction *insn)
1325 {
1326 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1327 struct src_register src0 = translate_src_register(emit, &insn->Src[0]);
1328 SVGA3dShaderDestToken temp = get_temp( emit );
1329
1330 /* SCS TMP SRC */
1331 if (!do_emit_sincos(emit, writemask(temp, TGSI_WRITEMASK_XY), src0 ))
1332 return FALSE;
1333
1334 /* MOV DST TMP */
1335 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src( temp ) ))
1336 return FALSE;
1337
1338 return TRUE;
1339 }
1340
1341
1342 /**
1343 * Translate TGSI SIN instruction into:
1344 * SCS TMP SRC
1345 * MOV DST TMP.yyyy
1346 */
1347 static boolean
1348 emit_sin(struct svga_shader_emitter *emit,
1349 const struct tgsi_full_instruction *insn )
1350 {
1351 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1352 struct src_register src0 =
1353 translate_src_register(emit, &insn->Src[0] );
1354 SVGA3dShaderDestToken temp = get_temp( emit );
1355
1356 /* SCS TMP SRC */
1357 if (!do_emit_sincos(emit, writemask(temp, TGSI_WRITEMASK_Y), src0))
1358 return FALSE;
1359
1360 src0 = scalar(src( temp ), TGSI_SWIZZLE_Y);
1361
1362 /* MOV DST TMP.yyyy */
1363 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src0 ))
1364 return FALSE;
1365
1366 return TRUE;
1367 }
1368
1369
1370 /*
1371 * Translate TGSI COS instruction into:
1372 * SCS TMP SRC
1373 * MOV DST TMP.xxxx
1374 */
1375 static boolean
1376 emit_cos(struct svga_shader_emitter *emit,
1377 const struct tgsi_full_instruction *insn)
1378 {
1379 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1380 struct src_register src0 =
1381 translate_src_register(emit, &insn->Src[0] );
1382 SVGA3dShaderDestToken temp = get_temp( emit );
1383
1384 /* SCS TMP SRC */
1385 if (!do_emit_sincos( emit, writemask(temp, TGSI_WRITEMASK_X), src0 ))
1386 return FALSE;
1387
1388 src0 = scalar(src( temp ), TGSI_SWIZZLE_X);
1389
1390 /* MOV DST TMP.xxxx */
1391 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src0 ))
1392 return FALSE;
1393
1394 return TRUE;
1395 }
1396
1397
1398 /**
1399 * Translate/emit TGSI SSG (Set Sign: -1, 0, +1) instruction.
1400 */
1401 static boolean
1402 emit_ssg(struct svga_shader_emitter *emit,
1403 const struct tgsi_full_instruction *insn)
1404 {
1405 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1406 struct src_register src0 =
1407 translate_src_register(emit, &insn->Src[0] );
1408 SVGA3dShaderDestToken temp0 = get_temp( emit );
1409 SVGA3dShaderDestToken temp1 = get_temp( emit );
1410 struct src_register zero, one;
1411
1412 if (emit->unit == PIPE_SHADER_VERTEX) {
1413 /* SGN DST, SRC0, TMP0, TMP1 */
1414 return submit_op3( emit, inst_token( SVGA3DOP_SGN ), dst, src0,
1415 src( temp0 ), src( temp1 ) );
1416 }
1417
1418 one = get_one_immediate(emit);
1419 zero = get_zero_immediate(emit);
1420
1421 /* CMP TMP0, SRC0, one, zero */
1422 if (!submit_op3( emit, inst_token( SVGA3DOP_CMP ),
1423 writemask( temp0, dst.mask ), src0, one, zero ))
1424 return FALSE;
1425
1426 /* CMP TMP1, negate(SRC0), negate(one), zero */
1427 if (!submit_op3( emit, inst_token( SVGA3DOP_CMP ),
1428 writemask( temp1, dst.mask ), negate( src0 ), negate( one ),
1429 zero ))
1430 return FALSE;
1431
1432 /* ADD DST, TMP0, TMP1 */
1433 return submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst, src( temp0 ),
1434 src( temp1 ) );
1435 }
1436
1437
1438 /**
1439 * Translate/emit TGSI SUB instruction as:
1440 * ADD DST, SRC0, negate(SRC1)
1441 */
1442 static boolean
1443 emit_sub(struct svga_shader_emitter *emit,
1444 const struct tgsi_full_instruction *insn)
1445 {
1446 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1447 struct src_register src0 = translate_src_register(
1448 emit, &insn->Src[0] );
1449 struct src_register src1 = translate_src_register(
1450 emit, &insn->Src[1] );
1451
1452 src1 = negate(src1);
1453
1454 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst,
1455 src0, src1 ))
1456 return FALSE;
1457
1458 return TRUE;
1459 }
1460
1461
1462 /**
1463 * Translate/emit KILL_IF instruction (kill if any of X,Y,Z,W are negative).
1464 */
1465 static boolean
1466 emit_kill_if(struct svga_shader_emitter *emit,
1467 const struct tgsi_full_instruction *insn)
1468 {
1469 const struct tgsi_full_src_register *reg = &insn->Src[0];
1470 struct src_register src0, srcIn;
1471 const boolean special = (reg->Register.Absolute ||
1472 reg->Register.Negate ||
1473 reg->Register.Indirect ||
1474 reg->Register.SwizzleX != 0 ||
1475 reg->Register.SwizzleY != 1 ||
1476 reg->Register.SwizzleZ != 2 ||
1477 reg->Register.File != TGSI_FILE_TEMPORARY);
1478 SVGA3dShaderDestToken temp;
1479
1480 src0 = srcIn = translate_src_register( emit, reg );
1481
1482 if (special) {
1483 /* need a temp reg */
1484 temp = get_temp( emit );
1485 }
1486
1487 if (special) {
1488 /* move the source into a temp register */
1489 submit_op1(emit, inst_token(SVGA3DOP_MOV), temp, src0);
1490
1491 src0 = src( temp );
1492 }
1493
1494 /* Do the texkill by checking if any of the XYZW components are < 0.
1495 * Note that ps_2_0 and later take XYZW in consideration, while ps_1_x
1496 * only used XYZ. The MSDN documentation about this is incorrect.
1497 */
1498 if (!submit_op0( emit, inst_token( SVGA3DOP_TEXKILL ), dst(src0) ))
1499 return FALSE;
1500
1501 return TRUE;
1502 }
1503
1504
1505 /**
1506 * Translate/emit unconditional kill instruction (usually found inside
1507 * an IF/ELSE/ENDIF block).
1508 */
1509 static boolean
1510 emit_kill(struct svga_shader_emitter *emit,
1511 const struct tgsi_full_instruction *insn)
1512 {
1513 SVGA3dShaderDestToken temp;
1514 struct src_register one = get_one_immediate(emit);
1515 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_TEXKILL );
1516
1517 /* texkill doesn't allow negation on the operand so lets move
1518 * negation of {1} to a temp register */
1519 temp = get_temp( emit );
1520 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), temp,
1521 negate( one ) ))
1522 return FALSE;
1523
1524 return submit_op0( emit, inst, temp );
1525 }
1526
1527
1528 /**
1529 * Test if r1 and r2 are the same register.
1530 */
1531 static boolean
1532 same_register(struct src_register r1, struct src_register r2)
1533 {
1534 return (r1.base.num == r2.base.num &&
1535 r1.base.type_upper == r2.base.type_upper &&
1536 r1.base.type_lower == r2.base.type_lower);
1537 }
1538
1539
1540
1541 /**
1542 * Implement conditionals by initializing destination reg to 'fail',
1543 * then set predicate reg with UFOP_SETP, then move 'pass' to dest
1544 * based on predicate reg.
1545 *
1546 * SETP src0, cmp, src1 -- do this first to avoid aliasing problems.
1547 * MOV dst, fail
1548 * MOV dst, pass, p0
1549 */
1550 static boolean
1551 emit_conditional(struct svga_shader_emitter *emit,
1552 unsigned compare_func,
1553 SVGA3dShaderDestToken dst,
1554 struct src_register src0,
1555 struct src_register src1,
1556 struct src_register pass,
1557 struct src_register fail)
1558 {
1559 SVGA3dShaderDestToken pred_reg = dst_register( SVGA3DREG_PREDICATE, 0 );
1560 SVGA3dShaderInstToken setp_token;
1561
1562 switch (compare_func) {
1563 case PIPE_FUNC_NEVER:
1564 return submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1565 dst, fail );
1566 break;
1567 case PIPE_FUNC_LESS:
1568 setp_token = inst_token_setp(SVGA3DOPCOMP_LT);
1569 break;
1570 case PIPE_FUNC_EQUAL:
1571 setp_token = inst_token_setp(SVGA3DOPCOMP_EQ);
1572 break;
1573 case PIPE_FUNC_LEQUAL:
1574 setp_token = inst_token_setp(SVGA3DOPCOMP_LE);
1575 break;
1576 case PIPE_FUNC_GREATER:
1577 setp_token = inst_token_setp(SVGA3DOPCOMP_GT);
1578 break;
1579 case PIPE_FUNC_NOTEQUAL:
1580 setp_token = inst_token_setp(SVGA3DOPCOMPC_NE);
1581 break;
1582 case PIPE_FUNC_GEQUAL:
1583 setp_token = inst_token_setp(SVGA3DOPCOMP_GE);
1584 break;
1585 case PIPE_FUNC_ALWAYS:
1586 return submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1587 dst, pass );
1588 break;
1589 }
1590
1591 if (same_register(src(dst), pass)) {
1592 /* We'll get bad results if the dst and pass registers are the same
1593 * so use a temp register containing pass.
1594 */
1595 SVGA3dShaderDestToken temp = get_temp(emit);
1596 if (!submit_op1(emit, inst_token(SVGA3DOP_MOV), temp, pass))
1597 return FALSE;
1598 pass = src(temp);
1599 }
1600
1601 /* SETP src0, COMPOP, src1 */
1602 if (!submit_op2( emit, setp_token, pred_reg,
1603 src0, src1 ))
1604 return FALSE;
1605
1606 /* MOV dst, fail */
1607 if (!submit_op1(emit, inst_token(SVGA3DOP_MOV), dst, fail))
1608 return FALSE;
1609
1610 /* MOV dst, pass (predicated)
1611 *
1612 * Note that the predicate reg (and possible modifiers) is passed
1613 * as the first source argument.
1614 */
1615 if (!submit_op2(emit,
1616 inst_token_predicated(SVGA3DOP_MOV), dst,
1617 src(pred_reg), pass))
1618 return FALSE;
1619
1620 return TRUE;
1621 }
1622
1623
1624 /**
1625 * Helper for emiting 'selection' commands. Basically:
1626 * if (src0 OP src1)
1627 * dst = 1.0;
1628 * else
1629 * dst = 0.0;
1630 */
1631 static boolean
1632 emit_select(struct svga_shader_emitter *emit,
1633 unsigned compare_func,
1634 SVGA3dShaderDestToken dst,
1635 struct src_register src0,
1636 struct src_register src1 )
1637 {
1638 /* There are some SVGA instructions which implement some selects
1639 * directly, but they are only available in the vertex shader.
1640 */
1641 if (emit->unit == PIPE_SHADER_VERTEX) {
1642 switch (compare_func) {
1643 case PIPE_FUNC_GEQUAL:
1644 return submit_op2( emit, inst_token( SVGA3DOP_SGE ), dst, src0, src1 );
1645 case PIPE_FUNC_LEQUAL:
1646 return submit_op2( emit, inst_token( SVGA3DOP_SGE ), dst, src1, src0 );
1647 case PIPE_FUNC_GREATER:
1648 return submit_op2( emit, inst_token( SVGA3DOP_SLT ), dst, src1, src0 );
1649 case PIPE_FUNC_LESS:
1650 return submit_op2( emit, inst_token( SVGA3DOP_SLT ), dst, src0, src1 );
1651 default:
1652 break;
1653 }
1654 }
1655
1656 /* Otherwise, need to use the setp approach:
1657 */
1658 {
1659 struct src_register one, zero;
1660 /* zero immediate is 0,0,0,1 */
1661 zero = get_zero_immediate(emit);
1662 one = get_one_immediate(emit);
1663
1664 return emit_conditional(emit, compare_func, dst, src0, src1, one, zero);
1665 }
1666 }
1667
1668
1669 /**
1670 * Translate/emit a TGSI SEQ, SNE, SLT, SGE, etc. instruction.
1671 */
1672 static boolean
1673 emit_select_op(struct svga_shader_emitter *emit,
1674 unsigned compare,
1675 const struct tgsi_full_instruction *insn)
1676 {
1677 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1678 struct src_register src0 = translate_src_register(
1679 emit, &insn->Src[0] );
1680 struct src_register src1 = translate_src_register(
1681 emit, &insn->Src[1] );
1682
1683 return emit_select( emit, compare, dst, src0, src1 );
1684 }
1685
1686
1687 /**
1688 * Translate TGSI CMP instruction. Component-wise:
1689 * dst = (src0 < 0.0) ? src1 : src2
1690 */
1691 static boolean
1692 emit_cmp(struct svga_shader_emitter *emit,
1693 const struct tgsi_full_instruction *insn)
1694 {
1695 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1696 const struct src_register src0 =
1697 translate_src_register(emit, &insn->Src[0] );
1698 const struct src_register src1 =
1699 translate_src_register(emit, &insn->Src[1] );
1700 const struct src_register src2 =
1701 translate_src_register(emit, &insn->Src[2] );
1702
1703 if (emit->unit == PIPE_SHADER_VERTEX) {
1704 struct src_register zero = get_zero_immediate(emit);
1705 /* We used to simulate CMP with SLT+LRP. But that didn't work when
1706 * src1 or src2 was Inf/NaN. In particular, GLSL sqrt(0) failed
1707 * because it involves a CMP to handle the 0 case.
1708 * Use a conditional expression instead.
1709 */
1710 return emit_conditional(emit, PIPE_FUNC_LESS, dst,
1711 src0, zero, src1, src2);
1712 }
1713 else {
1714 assert(emit->unit == PIPE_SHADER_FRAGMENT);
1715
1716 /* CMP DST, SRC0, SRC2, SRC1 */
1717 return submit_op3( emit, inst_token( SVGA3DOP_CMP ), dst,
1718 src0, src2, src1);
1719 }
1720 }
1721
1722
1723 /**
1724 * Translate/emit 2-operand (coord, sampler) texture instructions.
1725 */
1726 static boolean
1727 emit_tex2(struct svga_shader_emitter *emit,
1728 const struct tgsi_full_instruction *insn,
1729 SVGA3dShaderDestToken dst)
1730 {
1731 SVGA3dShaderInstToken inst;
1732 struct src_register texcoord;
1733 struct src_register sampler;
1734 SVGA3dShaderDestToken tmp;
1735
1736 inst.value = 0;
1737
1738 switch (insn->Instruction.Opcode) {
1739 case TGSI_OPCODE_TEX:
1740 inst.op = SVGA3DOP_TEX;
1741 break;
1742 case TGSI_OPCODE_TXP:
1743 inst.op = SVGA3DOP_TEX;
1744 inst.control = SVGA3DOPCONT_PROJECT;
1745 break;
1746 case TGSI_OPCODE_TXB:
1747 inst.op = SVGA3DOP_TEX;
1748 inst.control = SVGA3DOPCONT_BIAS;
1749 break;
1750 case TGSI_OPCODE_TXL:
1751 inst.op = SVGA3DOP_TEXLDL;
1752 break;
1753 default:
1754 assert(0);
1755 return FALSE;
1756 }
1757
1758 texcoord = translate_src_register( emit, &insn->Src[0] );
1759 sampler = translate_src_register( emit, &insn->Src[1] );
1760
1761 if (emit->key.fkey.tex[sampler.base.num].unnormalized ||
1762 emit->dynamic_branching_level > 0)
1763 tmp = get_temp( emit );
1764
1765 /* Can't do mipmapping inside dynamic branch constructs. Force LOD
1766 * zero in that case.
1767 */
1768 if (emit->dynamic_branching_level > 0 &&
1769 inst.op == SVGA3DOP_TEX &&
1770 SVGA3dShaderGetRegType(texcoord.base.value) == SVGA3DREG_TEMP) {
1771 struct src_register zero = get_zero_immediate(emit);
1772
1773 /* MOV tmp, texcoord */
1774 if (!submit_op1( emit,
1775 inst_token( SVGA3DOP_MOV ),
1776 tmp,
1777 texcoord ))
1778 return FALSE;
1779
1780 /* MOV tmp.w, zero */
1781 if (!submit_op1( emit,
1782 inst_token( SVGA3DOP_MOV ),
1783 writemask( tmp, TGSI_WRITEMASK_W ),
1784 zero ))
1785 return FALSE;
1786
1787 texcoord = src( tmp );
1788 inst.op = SVGA3DOP_TEXLDL;
1789 }
1790
1791 /* Explicit normalization of texcoords:
1792 */
1793 if (emit->key.fkey.tex[sampler.base.num].unnormalized) {
1794 struct src_register wh = get_tex_dimensions( emit, sampler.base.num );
1795
1796 /* MUL tmp, SRC0, WH */
1797 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
1798 tmp, texcoord, wh ))
1799 return FALSE;
1800
1801 texcoord = src( tmp );
1802 }
1803
1804 return submit_op2( emit, inst, dst, texcoord, sampler );
1805 }
1806
1807
1808 /**
1809 * Translate/emit 4-operand (coord, ddx, ddy, sampler) texture instructions.
1810 */
1811 static boolean
1812 emit_tex4(struct svga_shader_emitter *emit,
1813 const struct tgsi_full_instruction *insn,
1814 SVGA3dShaderDestToken dst )
1815 {
1816 SVGA3dShaderInstToken inst;
1817 struct src_register texcoord;
1818 struct src_register ddx;
1819 struct src_register ddy;
1820 struct src_register sampler;
1821
1822 texcoord = translate_src_register( emit, &insn->Src[0] );
1823 ddx = translate_src_register( emit, &insn->Src[1] );
1824 ddy = translate_src_register( emit, &insn->Src[2] );
1825 sampler = translate_src_register( emit, &insn->Src[3] );
1826
1827 inst.value = 0;
1828
1829 switch (insn->Instruction.Opcode) {
1830 case TGSI_OPCODE_TXD:
1831 inst.op = SVGA3DOP_TEXLDD; /* 4 args! */
1832 break;
1833 default:
1834 assert(0);
1835 return FALSE;
1836 }
1837
1838 return submit_op4( emit, inst, dst, texcoord, sampler, ddx, ddy );
1839 }
1840
1841
1842 /**
1843 * Emit texture swizzle code. We do this here since SVGA samplers don't
1844 * directly support swizzles.
1845 */
1846 static boolean
1847 emit_tex_swizzle(struct svga_shader_emitter *emit,
1848 SVGA3dShaderDestToken dst,
1849 struct src_register src,
1850 unsigned swizzle_x,
1851 unsigned swizzle_y,
1852 unsigned swizzle_z,
1853 unsigned swizzle_w)
1854 {
1855 const unsigned swizzleIn[4] = {swizzle_x, swizzle_y, swizzle_z, swizzle_w};
1856 unsigned srcSwizzle[4];
1857 unsigned srcWritemask = 0x0, zeroWritemask = 0x0, oneWritemask = 0x0;
1858 int i;
1859
1860 /* build writemasks and srcSwizzle terms */
1861 for (i = 0; i < 4; i++) {
1862 if (swizzleIn[i] == PIPE_SWIZZLE_ZERO) {
1863 srcSwizzle[i] = TGSI_SWIZZLE_X + i;
1864 zeroWritemask |= (1 << i);
1865 }
1866 else if (swizzleIn[i] == PIPE_SWIZZLE_ONE) {
1867 srcSwizzle[i] = TGSI_SWIZZLE_X + i;
1868 oneWritemask |= (1 << i);
1869 }
1870 else {
1871 srcSwizzle[i] = swizzleIn[i];
1872 srcWritemask |= (1 << i);
1873 }
1874 }
1875
1876 /* write x/y/z/w comps */
1877 if (dst.mask & srcWritemask) {
1878 if (!submit_op1(emit,
1879 inst_token(SVGA3DOP_MOV),
1880 writemask(dst, srcWritemask),
1881 swizzle(src,
1882 srcSwizzle[0],
1883 srcSwizzle[1],
1884 srcSwizzle[2],
1885 srcSwizzle[3])))
1886 return FALSE;
1887 }
1888
1889 /* write 0 comps */
1890 if (dst.mask & zeroWritemask) {
1891 if (!submit_op1(emit,
1892 inst_token(SVGA3DOP_MOV),
1893 writemask(dst, zeroWritemask),
1894 get_zero_immediate(emit)))
1895 return FALSE;
1896 }
1897
1898 /* write 1 comps */
1899 if (dst.mask & oneWritemask) {
1900 if (!submit_op1(emit,
1901 inst_token(SVGA3DOP_MOV),
1902 writemask(dst, oneWritemask),
1903 get_one_immediate(emit)))
1904 return FALSE;
1905 }
1906
1907 return TRUE;
1908 }
1909
1910
1911 /**
1912 * Translate/emit a TGSI texture sample instruction.
1913 */
1914 static boolean
1915 emit_tex(struct svga_shader_emitter *emit,
1916 const struct tgsi_full_instruction *insn)
1917 {
1918 SVGA3dShaderDestToken dst =
1919 translate_dst_register( emit, insn, 0 );
1920 struct src_register src0 =
1921 translate_src_register( emit, &insn->Src[0] );
1922 struct src_register src1 =
1923 translate_src_register( emit, &insn->Src[1] );
1924
1925 SVGA3dShaderDestToken tex_result;
1926 const unsigned unit = src1.base.num;
1927
1928 /* check for shadow samplers */
1929 boolean compare = (emit->key.fkey.tex[unit].compare_mode ==
1930 PIPE_TEX_COMPARE_R_TO_TEXTURE);
1931
1932 /* texture swizzle */
1933 boolean swizzle = (emit->key.fkey.tex[unit].swizzle_r != PIPE_SWIZZLE_RED ||
1934 emit->key.fkey.tex[unit].swizzle_g != PIPE_SWIZZLE_GREEN ||
1935 emit->key.fkey.tex[unit].swizzle_b != PIPE_SWIZZLE_BLUE ||
1936 emit->key.fkey.tex[unit].swizzle_a != PIPE_SWIZZLE_ALPHA);
1937
1938 boolean saturate = insn->Instruction.Saturate != TGSI_SAT_NONE;
1939
1940 /* If doing compare processing or tex swizzle or saturation, we need to put
1941 * the fetched color into a temporary so it can be used as a source later on.
1942 */
1943 if (compare || swizzle || saturate) {
1944 tex_result = get_temp( emit );
1945 }
1946 else {
1947 tex_result = dst;
1948 }
1949
1950 switch(insn->Instruction.Opcode) {
1951 case TGSI_OPCODE_TEX:
1952 case TGSI_OPCODE_TXB:
1953 case TGSI_OPCODE_TXP:
1954 case TGSI_OPCODE_TXL:
1955 if (!emit_tex2( emit, insn, tex_result ))
1956 return FALSE;
1957 break;
1958 case TGSI_OPCODE_TXD:
1959 if (!emit_tex4( emit, insn, tex_result ))
1960 return FALSE;
1961 break;
1962 default:
1963 assert(0);
1964 }
1965
1966 if (compare) {
1967 SVGA3dShaderDestToken dst2;
1968
1969 if (swizzle || saturate)
1970 dst2 = tex_result;
1971 else
1972 dst2 = dst;
1973
1974 if (dst.mask & TGSI_WRITEMASK_XYZ) {
1975 SVGA3dShaderDestToken src0_zdivw = get_temp( emit );
1976 /* When sampling a depth texture, the result of the comparison is in
1977 * the Y component.
1978 */
1979 struct src_register tex_src_x = scalar(src(tex_result), TGSI_SWIZZLE_Y);
1980 struct src_register r_coord;
1981
1982 if (insn->Instruction.Opcode == TGSI_OPCODE_TXP) {
1983 /* Divide texcoord R by Q */
1984 if (!submit_op1( emit, inst_token( SVGA3DOP_RCP ),
1985 writemask(src0_zdivw, TGSI_WRITEMASK_X),
1986 scalar(src0, TGSI_SWIZZLE_W) ))
1987 return FALSE;
1988
1989 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
1990 writemask(src0_zdivw, TGSI_WRITEMASK_X),
1991 scalar(src0, TGSI_SWIZZLE_Z),
1992 scalar(src(src0_zdivw), TGSI_SWIZZLE_X) ))
1993 return FALSE;
1994
1995 r_coord = scalar(src(src0_zdivw), TGSI_SWIZZLE_X);
1996 }
1997 else {
1998 r_coord = scalar(src0, TGSI_SWIZZLE_Z);
1999 }
2000
2001 /* Compare texture sample value against R component of texcoord */
2002 if (!emit_select(emit,
2003 emit->key.fkey.tex[unit].compare_func,
2004 writemask( dst2, TGSI_WRITEMASK_XYZ ),
2005 r_coord,
2006 tex_src_x))
2007 return FALSE;
2008 }
2009
2010 if (dst.mask & TGSI_WRITEMASK_W) {
2011 struct src_register one = get_one_immediate(emit);
2012
2013 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2014 writemask( dst2, TGSI_WRITEMASK_W ),
2015 one ))
2016 return FALSE;
2017 }
2018 }
2019
2020 if (saturate && !swizzle) {
2021 /* MOV_SAT real_dst, dst */
2022 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src(tex_result) ))
2023 return FALSE;
2024 }
2025 else if (swizzle) {
2026 /* swizzle from tex_result to dst (handles saturation too, if any) */
2027 emit_tex_swizzle(emit,
2028 dst, src(tex_result),
2029 emit->key.fkey.tex[unit].swizzle_r,
2030 emit->key.fkey.tex[unit].swizzle_g,
2031 emit->key.fkey.tex[unit].swizzle_b,
2032 emit->key.fkey.tex[unit].swizzle_a);
2033 }
2034
2035 return TRUE;
2036 }
2037
2038
2039 static boolean
2040 emit_bgnloop(struct svga_shader_emitter *emit,
2041 const struct tgsi_full_instruction *insn)
2042 {
2043 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_LOOP );
2044 struct src_register loop_reg = src_register( SVGA3DREG_LOOP, 0 );
2045 struct src_register const_int = get_loop_const( emit );
2046
2047 emit->dynamic_branching_level++;
2048
2049 return (emit_instruction( emit, inst ) &&
2050 emit_src( emit, loop_reg ) &&
2051 emit_src( emit, const_int ) );
2052 }
2053
2054
2055 static boolean
2056 emit_endloop(struct svga_shader_emitter *emit,
2057 const struct tgsi_full_instruction *insn)
2058 {
2059 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_ENDLOOP );
2060
2061 emit->dynamic_branching_level--;
2062
2063 return emit_instruction( emit, inst );
2064 }
2065
2066
2067 /**
2068 * Translate/emit TGSI BREAK (out of loop) instruction.
2069 */
2070 static boolean
2071 emit_brk(struct svga_shader_emitter *emit,
2072 const struct tgsi_full_instruction *insn)
2073 {
2074 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_BREAK );
2075 return emit_instruction( emit, inst );
2076 }
2077
2078
2079 /**
2080 * Emit simple instruction which operates on one scalar value (not
2081 * a vector). Ex: LG2, RCP, RSQ.
2082 */
2083 static boolean
2084 emit_scalar_op1(struct svga_shader_emitter *emit,
2085 unsigned opcode,
2086 const struct tgsi_full_instruction *insn)
2087 {
2088 SVGA3dShaderInstToken inst;
2089 SVGA3dShaderDestToken dst;
2090 struct src_register src;
2091
2092 inst = inst_token( opcode );
2093 dst = translate_dst_register( emit, insn, 0 );
2094 src = translate_src_register( emit, &insn->Src[0] );
2095 src = scalar( src, TGSI_SWIZZLE_X );
2096
2097 return submit_op1( emit, inst, dst, src );
2098 }
2099
2100
2101 /**
2102 * Translate/emit a simple instruction (one which has no special-case
2103 * code) such as ADD, MUL, MIN, MAX.
2104 */
2105 static boolean
2106 emit_simple_instruction(struct svga_shader_emitter *emit,
2107 unsigned opcode,
2108 const struct tgsi_full_instruction *insn)
2109 {
2110 const struct tgsi_full_src_register *src = insn->Src;
2111 SVGA3dShaderInstToken inst;
2112 SVGA3dShaderDestToken dst;
2113
2114 inst = inst_token( opcode );
2115 dst = translate_dst_register( emit, insn, 0 );
2116
2117 switch (insn->Instruction.NumSrcRegs) {
2118 case 0:
2119 return submit_op0( emit, inst, dst );
2120 case 1:
2121 return submit_op1( emit, inst, dst,
2122 translate_src_register( emit, &src[0] ));
2123 case 2:
2124 return submit_op2( emit, inst, dst,
2125 translate_src_register( emit, &src[0] ),
2126 translate_src_register( emit, &src[1] ) );
2127 case 3:
2128 return submit_op3( emit, inst, dst,
2129 translate_src_register( emit, &src[0] ),
2130 translate_src_register( emit, &src[1] ),
2131 translate_src_register( emit, &src[2] ) );
2132 default:
2133 assert(0);
2134 return FALSE;
2135 }
2136 }
2137
2138
2139 /**
2140 * Translate/emit TGSI DDX, DDY instructions.
2141 */
2142 static boolean
2143 emit_deriv(struct svga_shader_emitter *emit,
2144 const struct tgsi_full_instruction *insn )
2145 {
2146 if (emit->dynamic_branching_level > 0 &&
2147 insn->Src[0].Register.File == TGSI_FILE_TEMPORARY)
2148 {
2149 SVGA3dShaderDestToken dst =
2150 translate_dst_register( emit, insn, 0 );
2151
2152 /* Deriv opcodes not valid inside dynamic branching, workaround
2153 * by zeroing out the destination.
2154 */
2155 if (!submit_op1(emit,
2156 inst_token( SVGA3DOP_MOV ),
2157 dst,
2158 get_zero_immediate(emit)))
2159 return FALSE;
2160
2161 return TRUE;
2162 }
2163 else {
2164 unsigned opcode;
2165 const struct tgsi_full_src_register *reg = &insn->Src[0];
2166 SVGA3dShaderInstToken inst;
2167 SVGA3dShaderDestToken dst;
2168 struct src_register src0;
2169
2170 switch (insn->Instruction.Opcode) {
2171 case TGSI_OPCODE_DDX:
2172 opcode = SVGA3DOP_DSX;
2173 break;
2174 case TGSI_OPCODE_DDY:
2175 opcode = SVGA3DOP_DSY;
2176 break;
2177 default:
2178 return FALSE;
2179 }
2180
2181 inst = inst_token( opcode );
2182 dst = translate_dst_register( emit, insn, 0 );
2183 src0 = translate_src_register( emit, reg );
2184
2185 /* We cannot use negate or abs on source to dsx/dsy instruction.
2186 */
2187 if (reg->Register.Absolute ||
2188 reg->Register.Negate) {
2189 SVGA3dShaderDestToken temp = get_temp( emit );
2190
2191 if (!emit_repl( emit, temp, &src0 ))
2192 return FALSE;
2193 }
2194
2195 return submit_op1( emit, inst, dst, src0 );
2196 }
2197 }
2198
2199
2200 /**
2201 * Translate/emit ARL (Address Register Load) instruction. Used to
2202 * move a value into the special 'address' register. Used to implement
2203 * indirect/variable indexing into arrays.
2204 */
2205 static boolean
2206 emit_arl(struct svga_shader_emitter *emit,
2207 const struct tgsi_full_instruction *insn)
2208 {
2209 ++emit->current_arl;
2210 if (emit->unit == PIPE_SHADER_FRAGMENT) {
2211 /* MOVA not present in pixel shader instruction set.
2212 * Ignore this instruction altogether since it is
2213 * only used for loop counters -- and for that
2214 * we reference aL directly.
2215 */
2216 return TRUE;
2217 }
2218 if (svga_arl_needs_adjustment( emit )) {
2219 return emit_fake_arl( emit, insn );
2220 } else {
2221 /* no need to adjust, just emit straight arl */
2222 return emit_simple_instruction(emit, SVGA3DOP_MOVA, insn);
2223 }
2224 }
2225
2226
2227 static boolean
2228 emit_pow(struct svga_shader_emitter *emit,
2229 const struct tgsi_full_instruction *insn)
2230 {
2231 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2232 struct src_register src0 = translate_src_register(
2233 emit, &insn->Src[0] );
2234 struct src_register src1 = translate_src_register(
2235 emit, &insn->Src[1] );
2236 boolean need_tmp = FALSE;
2237
2238 /* POW can only output to a temporary */
2239 if (insn->Dst[0].Register.File != TGSI_FILE_TEMPORARY)
2240 need_tmp = TRUE;
2241
2242 /* POW src1 must not be the same register as dst */
2243 if (alias_src_dst( src1, dst ))
2244 need_tmp = TRUE;
2245
2246 /* it's a scalar op */
2247 src0 = scalar( src0, TGSI_SWIZZLE_X );
2248 src1 = scalar( src1, TGSI_SWIZZLE_X );
2249
2250 if (need_tmp) {
2251 SVGA3dShaderDestToken tmp =
2252 writemask(get_temp( emit ), TGSI_WRITEMASK_X );
2253
2254 if (!submit_op2(emit, inst_token( SVGA3DOP_POW ), tmp, src0, src1))
2255 return FALSE;
2256
2257 return submit_op1(emit, inst_token( SVGA3DOP_MOV ),
2258 dst, scalar(src(tmp), 0) );
2259 }
2260 else {
2261 return submit_op2(emit, inst_token( SVGA3DOP_POW ), dst, src0, src1);
2262 }
2263 }
2264
2265
2266 /**
2267 * Translate/emit TGSI XPD (vector cross product) instruction.
2268 */
2269 static boolean
2270 emit_xpd(struct svga_shader_emitter *emit,
2271 const struct tgsi_full_instruction *insn)
2272 {
2273 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2274 const struct src_register src0 = translate_src_register(
2275 emit, &insn->Src[0] );
2276 const struct src_register src1 = translate_src_register(
2277 emit, &insn->Src[1] );
2278 boolean need_dst_tmp = FALSE;
2279
2280 /* XPD can only output to a temporary */
2281 if (SVGA3dShaderGetRegType(dst.value) != SVGA3DREG_TEMP)
2282 need_dst_tmp = TRUE;
2283
2284 /* The dst reg must not be the same as src0 or src1*/
2285 if (alias_src_dst(src0, dst) ||
2286 alias_src_dst(src1, dst))
2287 need_dst_tmp = TRUE;
2288
2289 if (need_dst_tmp) {
2290 SVGA3dShaderDestToken tmp = get_temp( emit );
2291
2292 /* Obey DX9 restrictions on mask:
2293 */
2294 tmp.mask = dst.mask & TGSI_WRITEMASK_XYZ;
2295
2296 if (!submit_op2(emit, inst_token( SVGA3DOP_CRS ), tmp, src0, src1))
2297 return FALSE;
2298
2299 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), dst, src( tmp )))
2300 return FALSE;
2301 }
2302 else {
2303 if (!submit_op2(emit, inst_token( SVGA3DOP_CRS ), dst, src0, src1))
2304 return FALSE;
2305 }
2306
2307 /* Need to emit 1.0 to dst.w?
2308 */
2309 if (dst.mask & TGSI_WRITEMASK_W) {
2310 struct src_register one = get_one_immediate( emit );
2311
2312 if (!submit_op1(emit,
2313 inst_token( SVGA3DOP_MOV ),
2314 writemask(dst, TGSI_WRITEMASK_W),
2315 one))
2316 return FALSE;
2317 }
2318
2319 return TRUE;
2320 }
2321
2322
2323 /**
2324 * Emit a LRP (linear interpolation) instruction.
2325 */
2326 static boolean
2327 submit_lrp(struct svga_shader_emitter *emit,
2328 SVGA3dShaderDestToken dst,
2329 struct src_register src0,
2330 struct src_register src1,
2331 struct src_register src2)
2332 {
2333 SVGA3dShaderDestToken tmp;
2334 boolean need_dst_tmp = FALSE;
2335
2336 /* The dst reg must be a temporary, and not be the same as src0 or src2 */
2337 if (SVGA3dShaderGetRegType(dst.value) != SVGA3DREG_TEMP ||
2338 alias_src_dst(src0, dst) ||
2339 alias_src_dst(src2, dst))
2340 need_dst_tmp = TRUE;
2341
2342 if (need_dst_tmp) {
2343 tmp = get_temp( emit );
2344 tmp.mask = dst.mask;
2345 }
2346 else {
2347 tmp = dst;
2348 }
2349
2350 if (!submit_op3(emit, inst_token( SVGA3DOP_LRP ), tmp, src0, src1, src2))
2351 return FALSE;
2352
2353 if (need_dst_tmp) {
2354 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), dst, src( tmp )))
2355 return FALSE;
2356 }
2357
2358 return TRUE;
2359 }
2360
2361
2362 /**
2363 * Translate/emit LRP (Linear Interpolation) instruction.
2364 */
2365 static boolean
2366 emit_lrp(struct svga_shader_emitter *emit,
2367 const struct tgsi_full_instruction *insn)
2368 {
2369 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2370 const struct src_register src0 = translate_src_register(
2371 emit, &insn->Src[0] );
2372 const struct src_register src1 = translate_src_register(
2373 emit, &insn->Src[1] );
2374 const struct src_register src2 = translate_src_register(
2375 emit, &insn->Src[2] );
2376
2377 return submit_lrp(emit, dst, src0, src1, src2);
2378 }
2379
2380 /**
2381 * Translate/emit DST (Distance function) instruction.
2382 */
2383 static boolean
2384 emit_dst_insn(struct svga_shader_emitter *emit,
2385 const struct tgsi_full_instruction *insn)
2386 {
2387 if (emit->unit == PIPE_SHADER_VERTEX) {
2388 /* SVGA/DX9 has a DST instruction, but only for vertex shaders:
2389 */
2390 return emit_simple_instruction(emit, SVGA3DOP_DST, insn);
2391 }
2392 else {
2393 /* result[0] = 1 * 1;
2394 * result[1] = a[1] * b[1];
2395 * result[2] = a[2] * 1;
2396 * result[3] = 1 * b[3];
2397 */
2398 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2399 SVGA3dShaderDestToken tmp;
2400 const struct src_register src0 = translate_src_register(
2401 emit, &insn->Src[0] );
2402 const struct src_register src1 = translate_src_register(
2403 emit, &insn->Src[1] );
2404 boolean need_tmp = FALSE;
2405
2406 if (SVGA3dShaderGetRegType(dst.value) != SVGA3DREG_TEMP ||
2407 alias_src_dst(src0, dst) ||
2408 alias_src_dst(src1, dst))
2409 need_tmp = TRUE;
2410
2411 if (need_tmp) {
2412 tmp = get_temp( emit );
2413 }
2414 else {
2415 tmp = dst;
2416 }
2417
2418 /* tmp.xw = 1.0
2419 */
2420 if (tmp.mask & TGSI_WRITEMASK_XW) {
2421 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2422 writemask(tmp, TGSI_WRITEMASK_XW ),
2423 get_one_immediate(emit)))
2424 return FALSE;
2425 }
2426
2427 /* tmp.yz = src0
2428 */
2429 if (tmp.mask & TGSI_WRITEMASK_YZ) {
2430 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2431 writemask(tmp, TGSI_WRITEMASK_YZ ),
2432 src0))
2433 return FALSE;
2434 }
2435
2436 /* tmp.yw = tmp * src1
2437 */
2438 if (tmp.mask & TGSI_WRITEMASK_YW) {
2439 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
2440 writemask(tmp, TGSI_WRITEMASK_YW ),
2441 src(tmp),
2442 src1))
2443 return FALSE;
2444 }
2445
2446 /* dst = tmp
2447 */
2448 if (need_tmp) {
2449 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2450 dst,
2451 src(tmp)))
2452 return FALSE;
2453 }
2454 }
2455
2456 return TRUE;
2457 }
2458
2459
2460 static boolean
2461 emit_exp(struct svga_shader_emitter *emit,
2462 const struct tgsi_full_instruction *insn)
2463 {
2464 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2465 struct src_register src0 =
2466 translate_src_register( emit, &insn->Src[0] );
2467 SVGA3dShaderDestToken fraction;
2468
2469 if (dst.mask & TGSI_WRITEMASK_Y)
2470 fraction = dst;
2471 else if (dst.mask & TGSI_WRITEMASK_X)
2472 fraction = get_temp( emit );
2473 else
2474 fraction.value = 0;
2475
2476 /* If y is being written, fill it with src0 - floor(src0).
2477 */
2478 if (dst.mask & TGSI_WRITEMASK_XY) {
2479 if (!submit_op1( emit, inst_token( SVGA3DOP_FRC ),
2480 writemask( fraction, TGSI_WRITEMASK_Y ),
2481 src0 ))
2482 return FALSE;
2483 }
2484
2485 /* If x is being written, fill it with 2 ^ floor(src0).
2486 */
2487 if (dst.mask & TGSI_WRITEMASK_X) {
2488 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ),
2489 writemask( dst, TGSI_WRITEMASK_X ),
2490 src0,
2491 scalar( negate( src( fraction ) ), TGSI_SWIZZLE_Y ) ) )
2492 return FALSE;
2493
2494 if (!submit_op1( emit, inst_token( SVGA3DOP_EXP ),
2495 writemask( dst, TGSI_WRITEMASK_X ),
2496 scalar( src( dst ), TGSI_SWIZZLE_X ) ) )
2497 return FALSE;
2498
2499 if (!(dst.mask & TGSI_WRITEMASK_Y))
2500 release_temp( emit, fraction );
2501 }
2502
2503 /* If z is being written, fill it with 2 ^ src0 (partial precision).
2504 */
2505 if (dst.mask & TGSI_WRITEMASK_Z) {
2506 if (!submit_op1( emit, inst_token( SVGA3DOP_EXPP ),
2507 writemask( dst, TGSI_WRITEMASK_Z ),
2508 src0 ) )
2509 return FALSE;
2510 }
2511
2512 /* If w is being written, fill it with one.
2513 */
2514 if (dst.mask & TGSI_WRITEMASK_W) {
2515 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2516 writemask(dst, TGSI_WRITEMASK_W),
2517 get_one_immediate(emit)))
2518 return FALSE;
2519 }
2520
2521 return TRUE;
2522 }
2523
2524
2525 /**
2526 * Translate/emit LIT (Lighting helper) instruction.
2527 */
2528 static boolean
2529 emit_lit(struct svga_shader_emitter *emit,
2530 const struct tgsi_full_instruction *insn)
2531 {
2532 if (emit->unit == PIPE_SHADER_VERTEX) {
2533 /* SVGA/DX9 has a LIT instruction, but only for vertex shaders:
2534 */
2535 return emit_simple_instruction(emit, SVGA3DOP_LIT, insn);
2536 }
2537 else {
2538 /* D3D vs. GL semantics can be fairly easily accomodated by
2539 * variations on this sequence.
2540 *
2541 * GL:
2542 * tmp.y = src.x
2543 * tmp.z = pow(src.y,src.w)
2544 * p0 = src0.xxxx > 0
2545 * result = zero.wxxw
2546 * (p0) result.yz = tmp
2547 *
2548 * D3D:
2549 * tmp.y = src.x
2550 * tmp.z = pow(src.y,src.w)
2551 * p0 = src0.xxyy > 0
2552 * result = zero.wxxw
2553 * (p0) result.yz = tmp
2554 *
2555 * Will implement the GL version for now.
2556 */
2557 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2558 SVGA3dShaderDestToken tmp = get_temp( emit );
2559 const struct src_register src0 = translate_src_register(
2560 emit, &insn->Src[0] );
2561
2562 /* tmp = pow(src.y, src.w)
2563 */
2564 if (dst.mask & TGSI_WRITEMASK_Z) {
2565 if (!submit_op2(emit, inst_token( SVGA3DOP_POW ),
2566 tmp,
2567 scalar(src0, 1),
2568 scalar(src0, 3)))
2569 return FALSE;
2570 }
2571
2572 /* tmp.y = src.x
2573 */
2574 if (dst.mask & TGSI_WRITEMASK_Y) {
2575 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2576 writemask(tmp, TGSI_WRITEMASK_Y ),
2577 scalar(src0, 0)))
2578 return FALSE;
2579 }
2580
2581 /* Can't quite do this with emit conditional due to the extra
2582 * writemask on the predicated mov:
2583 */
2584 {
2585 SVGA3dShaderDestToken pred_reg = dst_register( SVGA3DREG_PREDICATE, 0 );
2586 struct src_register predsrc;
2587
2588 /* D3D vs GL semantics:
2589 */
2590 if (0)
2591 predsrc = swizzle(src0, 0, 0, 1, 1); /* D3D */
2592 else
2593 predsrc = swizzle(src0, 0, 0, 0, 0); /* GL */
2594
2595 /* SETP src0.xxyy, GT, {0}.x */
2596 if (!submit_op2( emit,
2597 inst_token_setp(SVGA3DOPCOMP_GT),
2598 pred_reg,
2599 predsrc,
2600 get_zero_immediate(emit)))
2601 return FALSE;
2602
2603 /* MOV dst, fail */
2604 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst,
2605 get_immediate(emit, 1.0f, 0.0f, 0.0f, 1.0f)))
2606 return FALSE;
2607
2608 /* MOV dst.yz, tmp (predicated)
2609 *
2610 * Note that the predicate reg (and possible modifiers) is passed
2611 * as the first source argument.
2612 */
2613 if (dst.mask & TGSI_WRITEMASK_YZ) {
2614 if (!submit_op2( emit,
2615 inst_token_predicated(SVGA3DOP_MOV),
2616 writemask(dst, TGSI_WRITEMASK_YZ),
2617 src( pred_reg ), src( tmp ) ))
2618 return FALSE;
2619 }
2620 }
2621 }
2622
2623 return TRUE;
2624 }
2625
2626
2627 static boolean
2628 emit_ex2(struct svga_shader_emitter *emit,
2629 const struct tgsi_full_instruction *insn)
2630 {
2631 SVGA3dShaderInstToken inst;
2632 SVGA3dShaderDestToken dst;
2633 struct src_register src0;
2634
2635 inst = inst_token( SVGA3DOP_EXP );
2636 dst = translate_dst_register( emit, insn, 0 );
2637 src0 = translate_src_register( emit, &insn->Src[0] );
2638 src0 = scalar( src0, TGSI_SWIZZLE_X );
2639
2640 if (dst.mask != TGSI_WRITEMASK_XYZW) {
2641 SVGA3dShaderDestToken tmp = get_temp( emit );
2642
2643 if (!submit_op1( emit, inst, tmp, src0 ))
2644 return FALSE;
2645
2646 return submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2647 dst,
2648 scalar( src( tmp ), TGSI_SWIZZLE_X ) );
2649 }
2650
2651 return submit_op1( emit, inst, dst, src0 );
2652 }
2653
2654
2655 static boolean
2656 emit_log(struct svga_shader_emitter *emit,
2657 const struct tgsi_full_instruction *insn)
2658 {
2659 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2660 struct src_register src0 =
2661 translate_src_register( emit, &insn->Src[0] );
2662 SVGA3dShaderDestToken abs_tmp;
2663 struct src_register abs_src0;
2664 SVGA3dShaderDestToken log2_abs;
2665
2666 abs_tmp.value = 0;
2667
2668 if (dst.mask & TGSI_WRITEMASK_Z)
2669 log2_abs = dst;
2670 else if (dst.mask & TGSI_WRITEMASK_XY)
2671 log2_abs = get_temp( emit );
2672 else
2673 log2_abs.value = 0;
2674
2675 /* If z is being written, fill it with log2( abs( src0 ) ).
2676 */
2677 if (dst.mask & TGSI_WRITEMASK_XYZ) {
2678 if (!src0.base.srcMod || src0.base.srcMod == SVGA3DSRCMOD_ABS)
2679 abs_src0 = src0;
2680 else {
2681 abs_tmp = get_temp( emit );
2682
2683 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2684 abs_tmp,
2685 src0 ) )
2686 return FALSE;
2687
2688 abs_src0 = src( abs_tmp );
2689 }
2690
2691 abs_src0 = absolute( scalar( abs_src0, TGSI_SWIZZLE_X ) );
2692
2693 if (!submit_op1( emit, inst_token( SVGA3DOP_LOG ),
2694 writemask( log2_abs, TGSI_WRITEMASK_Z ),
2695 abs_src0 ) )
2696 return FALSE;
2697 }
2698
2699 if (dst.mask & TGSI_WRITEMASK_XY) {
2700 SVGA3dShaderDestToken floor_log2;
2701
2702 if (dst.mask & TGSI_WRITEMASK_X)
2703 floor_log2 = dst;
2704 else
2705 floor_log2 = get_temp( emit );
2706
2707 /* If x is being written, fill it with floor( log2( abs( src0 ) ) ).
2708 */
2709 if (!submit_op1( emit, inst_token( SVGA3DOP_FRC ),
2710 writemask( floor_log2, TGSI_WRITEMASK_X ),
2711 scalar( src( log2_abs ), TGSI_SWIZZLE_Z ) ) )
2712 return FALSE;
2713
2714 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ),
2715 writemask( floor_log2, TGSI_WRITEMASK_X ),
2716 scalar( src( log2_abs ), TGSI_SWIZZLE_Z ),
2717 negate( src( floor_log2 ) ) ) )
2718 return FALSE;
2719
2720 /* If y is being written, fill it with
2721 * abs ( src0 ) / ( 2 ^ floor( log2( abs( src0 ) ) ) ).
2722 */
2723 if (dst.mask & TGSI_WRITEMASK_Y) {
2724 if (!submit_op1( emit, inst_token( SVGA3DOP_EXP ),
2725 writemask( dst, TGSI_WRITEMASK_Y ),
2726 negate( scalar( src( floor_log2 ),
2727 TGSI_SWIZZLE_X ) ) ) )
2728 return FALSE;
2729
2730 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
2731 writemask( dst, TGSI_WRITEMASK_Y ),
2732 src( dst ),
2733 abs_src0 ) )
2734 return FALSE;
2735 }
2736
2737 if (!(dst.mask & TGSI_WRITEMASK_X))
2738 release_temp( emit, floor_log2 );
2739
2740 if (!(dst.mask & TGSI_WRITEMASK_Z))
2741 release_temp( emit, log2_abs );
2742 }
2743
2744 if (dst.mask & TGSI_WRITEMASK_XYZ && src0.base.srcMod &&
2745 src0.base.srcMod != SVGA3DSRCMOD_ABS)
2746 release_temp( emit, abs_tmp );
2747
2748 /* If w is being written, fill it with one.
2749 */
2750 if (dst.mask & TGSI_WRITEMASK_W) {
2751 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2752 writemask(dst, TGSI_WRITEMASK_W),
2753 get_one_immediate(emit)))
2754 return FALSE;
2755 }
2756
2757 return TRUE;
2758 }
2759
2760
2761 /**
2762 * Translate TGSI TRUNC or ROUND instruction.
2763 * We need to truncate toward zero. Ex: trunc(-1.9) = -1
2764 * Different approaches are needed for VS versus PS.
2765 */
2766 static boolean
2767 emit_trunc_round(struct svga_shader_emitter *emit,
2768 const struct tgsi_full_instruction *insn,
2769 boolean round)
2770 {
2771 SVGA3dShaderDestToken dst = translate_dst_register(emit, insn, 0);
2772 const struct src_register src0 =
2773 translate_src_register(emit, &insn->Src[0] );
2774 SVGA3dShaderDestToken t1 = get_temp(emit);
2775
2776 if (round) {
2777 SVGA3dShaderDestToken t0 = get_temp(emit);
2778 struct src_register half = get_half_immediate(emit);
2779
2780 /* t0 = abs(src0) + 0.5 */
2781 if (!submit_op2(emit, inst_token(SVGA3DOP_ADD), t0,
2782 absolute(src0), half))
2783 return FALSE;
2784
2785 /* t1 = fract(t0) */
2786 if (!submit_op1(emit, inst_token(SVGA3DOP_FRC), t1, src(t0)))
2787 return FALSE;
2788
2789 /* t1 = t0 - t1 */
2790 if (!submit_op2(emit, inst_token(SVGA3DOP_ADD), t1, src(t0),
2791 negate(src(t1))))
2792 return FALSE;
2793 }
2794 else {
2795 /* trunc */
2796
2797 /* t1 = fract(abs(src0)) */
2798 if (!submit_op1(emit, inst_token(SVGA3DOP_FRC), t1, absolute(src0)))
2799 return FALSE;
2800
2801 /* t1 = abs(src0) - t1 */
2802 if (!submit_op2(emit, inst_token(SVGA3DOP_ADD), t1, absolute(src0),
2803 negate(src(t1))))
2804 return FALSE;
2805 }
2806
2807 /*
2808 * Now we need to multiply t1 by the sign of the original value.
2809 */
2810 if (emit->unit == PIPE_SHADER_VERTEX) {
2811 /* For VS: use SGN instruction */
2812 /* Need two extra/dummy registers: */
2813 SVGA3dShaderDestToken t2 = get_temp(emit), t3 = get_temp(emit),
2814 t4 = get_temp(emit);
2815
2816 /* t2 = sign(src0) */
2817 if (!submit_op3(emit, inst_token(SVGA3DOP_SGN), t2, src0,
2818 src(t3), src(t4)))
2819 return FALSE;
2820
2821 /* dst = t1 * t2 */
2822 if (!submit_op2(emit, inst_token(SVGA3DOP_MUL), dst, src(t1), src(t2)))
2823 return FALSE;
2824 }
2825 else {
2826 /* For FS: Use CMP instruction */
2827 return submit_op3(emit, inst_token( SVGA3DOP_CMP ), dst,
2828 src0, src(t1), negate(src(t1)));
2829 }
2830
2831 return TRUE;
2832 }
2833
2834
2835 /**
2836 * Translate/emit "begin subroutine" instruction/marker/label.
2837 */
2838 static boolean
2839 emit_bgnsub(struct svga_shader_emitter *emit,
2840 unsigned position,
2841 const struct tgsi_full_instruction *insn)
2842 {
2843 unsigned i;
2844
2845 /* Note that we've finished the main function and are now emitting
2846 * subroutines. This affects how we terminate the generated
2847 * shader.
2848 */
2849 emit->in_main_func = FALSE;
2850
2851 for (i = 0; i < emit->nr_labels; i++) {
2852 if (emit->label[i] == position) {
2853 return (emit_instruction( emit, inst_token( SVGA3DOP_RET ) ) &&
2854 emit_instruction( emit, inst_token( SVGA3DOP_LABEL ) ) &&
2855 emit_src( emit, src_register( SVGA3DREG_LABEL, i )));
2856 }
2857 }
2858
2859 assert(0);
2860 return TRUE;
2861 }
2862
2863
2864 /**
2865 * Translate/emit subroutine call instruction.
2866 */
2867 static boolean
2868 emit_call(struct svga_shader_emitter *emit,
2869 const struct tgsi_full_instruction *insn)
2870 {
2871 unsigned position = insn->Label.Label;
2872 unsigned i;
2873
2874 for (i = 0; i < emit->nr_labels; i++) {
2875 if (emit->label[i] == position)
2876 break;
2877 }
2878
2879 if (emit->nr_labels == Elements(emit->label))
2880 return FALSE;
2881
2882 if (i == emit->nr_labels) {
2883 emit->label[i] = position;
2884 emit->nr_labels++;
2885 }
2886
2887 return (emit_instruction( emit, inst_token( SVGA3DOP_CALL ) ) &&
2888 emit_src( emit, src_register( SVGA3DREG_LABEL, i )));
2889 }
2890
2891
2892 /**
2893 * Called at the end of the shader. Actually, emit special "fix-up"
2894 * code for the vertex/fragment shader.
2895 */
2896 static boolean
2897 emit_end(struct svga_shader_emitter *emit)
2898 {
2899 if (emit->unit == PIPE_SHADER_VERTEX) {
2900 return emit_vs_postamble( emit );
2901 }
2902 else {
2903 return emit_ps_postamble( emit );
2904 }
2905 }
2906
2907
2908 /**
2909 * Translate any TGSI instruction to SVGA.
2910 */
2911 static boolean
2912 svga_emit_instruction(struct svga_shader_emitter *emit,
2913 unsigned position,
2914 const struct tgsi_full_instruction *insn)
2915 {
2916 switch (insn->Instruction.Opcode) {
2917
2918 case TGSI_OPCODE_ARL:
2919 return emit_arl( emit, insn );
2920
2921 case TGSI_OPCODE_TEX:
2922 case TGSI_OPCODE_TXB:
2923 case TGSI_OPCODE_TXP:
2924 case TGSI_OPCODE_TXL:
2925 case TGSI_OPCODE_TXD:
2926 return emit_tex( emit, insn );
2927
2928 case TGSI_OPCODE_DDX:
2929 case TGSI_OPCODE_DDY:
2930 return emit_deriv( emit, insn );
2931
2932 case TGSI_OPCODE_BGNSUB:
2933 return emit_bgnsub( emit, position, insn );
2934
2935 case TGSI_OPCODE_ENDSUB:
2936 return TRUE;
2937
2938 case TGSI_OPCODE_CAL:
2939 return emit_call( emit, insn );
2940
2941 case TGSI_OPCODE_FLR:
2942 return emit_floor( emit, insn );
2943
2944 case TGSI_OPCODE_TRUNC:
2945 return emit_trunc_round( emit, insn, FALSE );
2946
2947 case TGSI_OPCODE_ROUND:
2948 return emit_trunc_round( emit, insn, TRUE );
2949
2950 case TGSI_OPCODE_CEIL:
2951 return emit_ceil( emit, insn );
2952
2953 case TGSI_OPCODE_CMP:
2954 return emit_cmp( emit, insn );
2955
2956 case TGSI_OPCODE_DIV:
2957 return emit_div( emit, insn );
2958
2959 case TGSI_OPCODE_DP2:
2960 return emit_dp2( emit, insn );
2961
2962 case TGSI_OPCODE_DPH:
2963 return emit_dph( emit, insn );
2964
2965 case TGSI_OPCODE_NRM:
2966 return emit_nrm( emit, insn );
2967
2968 case TGSI_OPCODE_COS:
2969 return emit_cos( emit, insn );
2970
2971 case TGSI_OPCODE_SIN:
2972 return emit_sin( emit, insn );
2973
2974 case TGSI_OPCODE_SCS:
2975 return emit_sincos( emit, insn );
2976
2977 case TGSI_OPCODE_END:
2978 /* TGSI always finishes the main func with an END */
2979 return emit_end( emit );
2980
2981 case TGSI_OPCODE_KILL_IF:
2982 return emit_kill_if( emit, insn );
2983
2984 /* Selection opcodes. The underlying language is fairly
2985 * non-orthogonal about these.
2986 */
2987 case TGSI_OPCODE_SEQ:
2988 return emit_select_op( emit, PIPE_FUNC_EQUAL, insn );
2989
2990 case TGSI_OPCODE_SNE:
2991 return emit_select_op( emit, PIPE_FUNC_NOTEQUAL, insn );
2992
2993 case TGSI_OPCODE_SGT:
2994 return emit_select_op( emit, PIPE_FUNC_GREATER, insn );
2995
2996 case TGSI_OPCODE_SGE:
2997 return emit_select_op( emit, PIPE_FUNC_GEQUAL, insn );
2998
2999 case TGSI_OPCODE_SLT:
3000 return emit_select_op( emit, PIPE_FUNC_LESS, insn );
3001
3002 case TGSI_OPCODE_SLE:
3003 return emit_select_op( emit, PIPE_FUNC_LEQUAL, insn );
3004
3005 case TGSI_OPCODE_SUB:
3006 return emit_sub( emit, insn );
3007
3008 case TGSI_OPCODE_POW:
3009 return emit_pow( emit, insn );
3010
3011 case TGSI_OPCODE_EX2:
3012 return emit_ex2( emit, insn );
3013
3014 case TGSI_OPCODE_EXP:
3015 return emit_exp( emit, insn );
3016
3017 case TGSI_OPCODE_LOG:
3018 return emit_log( emit, insn );
3019
3020 case TGSI_OPCODE_LG2:
3021 return emit_scalar_op1( emit, SVGA3DOP_LOG, insn );
3022
3023 case TGSI_OPCODE_RSQ:
3024 return emit_scalar_op1( emit, SVGA3DOP_RSQ, insn );
3025
3026 case TGSI_OPCODE_RCP:
3027 return emit_scalar_op1( emit, SVGA3DOP_RCP, insn );
3028
3029 case TGSI_OPCODE_CONT:
3030 /* not expected (we return PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED = 0) */
3031 return FALSE;
3032
3033 case TGSI_OPCODE_RET:
3034 /* This is a noop -- we tell mesa that we can't support RET
3035 * within a function (early return), so this will always be
3036 * followed by an ENDSUB.
3037 */
3038 return TRUE;
3039
3040 /* These aren't actually used by any of the frontends we care
3041 * about:
3042 */
3043 case TGSI_OPCODE_CLAMP:
3044 case TGSI_OPCODE_AND:
3045 case TGSI_OPCODE_OR:
3046 case TGSI_OPCODE_I2F:
3047 case TGSI_OPCODE_NOT:
3048 case TGSI_OPCODE_SHL:
3049 case TGSI_OPCODE_ISHR:
3050 case TGSI_OPCODE_XOR:
3051 return FALSE;
3052
3053 case TGSI_OPCODE_IF:
3054 return emit_if( emit, insn );
3055 case TGSI_OPCODE_ELSE:
3056 return emit_else( emit, insn );
3057 case TGSI_OPCODE_ENDIF:
3058 return emit_endif( emit, insn );
3059
3060 case TGSI_OPCODE_BGNLOOP:
3061 return emit_bgnloop( emit, insn );
3062 case TGSI_OPCODE_ENDLOOP:
3063 return emit_endloop( emit, insn );
3064 case TGSI_OPCODE_BRK:
3065 return emit_brk( emit, insn );
3066
3067 case TGSI_OPCODE_XPD:
3068 return emit_xpd( emit, insn );
3069
3070 case TGSI_OPCODE_KILL:
3071 return emit_kill( emit, insn );
3072
3073 case TGSI_OPCODE_DST:
3074 return emit_dst_insn( emit, insn );
3075
3076 case TGSI_OPCODE_LIT:
3077 return emit_lit( emit, insn );
3078
3079 case TGSI_OPCODE_LRP:
3080 return emit_lrp( emit, insn );
3081
3082 case TGSI_OPCODE_SSG:
3083 return emit_ssg( emit, insn );
3084
3085 default:
3086 {
3087 unsigned opcode = translate_opcode(insn->Instruction.Opcode);
3088
3089 if (opcode == SVGA3DOP_LAST_INST)
3090 return FALSE;
3091
3092 if (!emit_simple_instruction( emit, opcode, insn ))
3093 return FALSE;
3094 }
3095 }
3096
3097 return TRUE;
3098 }
3099
3100
3101 /**
3102 * Translate/emit a TGSI IMMEDIATE declaration.
3103 * An immediate vector is a constant that's hard-coded into the shader.
3104 */
3105 static boolean
3106 svga_emit_immediate(struct svga_shader_emitter *emit,
3107 const struct tgsi_full_immediate *imm)
3108 {
3109 static const float id[4] = {0,0,0,1};
3110 float value[4];
3111 unsigned i;
3112
3113 assert(1 <= imm->Immediate.NrTokens && imm->Immediate.NrTokens <= 5);
3114 for (i = 0; i < imm->Immediate.NrTokens - 1; i++) {
3115 float f = imm->u[i].Float;
3116 value[i] = util_is_inf_or_nan(f) ? 0.0f : f;
3117 }
3118
3119 /* If the immediate has less than four values, fill in the remaining
3120 * positions from id={0,0,0,1}.
3121 */
3122 for ( ; i < 4; i++ )
3123 value[i] = id[i];
3124
3125 return emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT,
3126 emit->imm_start + emit->internal_imm_count++,
3127 value[0], value[1], value[2], value[3]);
3128 }
3129
3130
3131 static boolean
3132 make_immediate(struct svga_shader_emitter *emit,
3133 float a, float b, float c, float d,
3134 struct src_register *out )
3135 {
3136 unsigned idx = emit->nr_hw_float_const++;
3137
3138 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT,
3139 idx, a, b, c, d ))
3140 return FALSE;
3141
3142 *out = src_register( SVGA3DREG_CONST, idx );
3143
3144 return TRUE;
3145 }
3146
3147
3148 /**
3149 * Emit special VS instructions at top of shader.
3150 */
3151 static boolean
3152 emit_vs_preamble(struct svga_shader_emitter *emit)
3153 {
3154 if (!emit->key.vkey.need_prescale) {
3155 if (!make_immediate( emit, 0, 0, .5, .5,
3156 &emit->imm_0055))
3157 return FALSE;
3158 }
3159
3160 return TRUE;
3161 }
3162
3163
3164 /**
3165 * Emit special PS instructions at top of shader.
3166 */
3167 static boolean
3168 emit_ps_preamble(struct svga_shader_emitter *emit)
3169 {
3170 if (emit->ps_reads_pos && emit->info.reads_z) {
3171 /*
3172 * Assemble the position from various bits of inputs. Depth and W are
3173 * passed in a texcoord this is due to D3D's vPos not hold Z or W.
3174 * Also fixup the perspective interpolation.
3175 *
3176 * temp_pos.xy = vPos.xy
3177 * temp_pos.w = rcp(texcoord1.w);
3178 * temp_pos.z = texcoord1.z * temp_pos.w;
3179 */
3180 if (!submit_op1( emit,
3181 inst_token(SVGA3DOP_MOV),
3182 writemask( emit->ps_temp_pos, TGSI_WRITEMASK_XY ),
3183 emit->ps_true_pos ))
3184 return FALSE;
3185
3186 if (!submit_op1( emit,
3187 inst_token(SVGA3DOP_RCP),
3188 writemask( emit->ps_temp_pos, TGSI_WRITEMASK_W ),
3189 scalar( emit->ps_depth_pos, TGSI_SWIZZLE_W ) ))
3190 return FALSE;
3191
3192 if (!submit_op2( emit,
3193 inst_token(SVGA3DOP_MUL),
3194 writemask( emit->ps_temp_pos, TGSI_WRITEMASK_Z ),
3195 scalar( emit->ps_depth_pos, TGSI_SWIZZLE_Z ),
3196 scalar( src(emit->ps_temp_pos), TGSI_SWIZZLE_W ) ))
3197 return FALSE;
3198 }
3199
3200 return TRUE;
3201 }
3202
3203
3204 /**
3205 * Emit special PS instructions at end of shader.
3206 */
3207 static boolean
3208 emit_ps_postamble(struct svga_shader_emitter *emit)
3209 {
3210 unsigned i;
3211
3212 /* PS oDepth is incredibly fragile and it's very hard to catch the
3213 * types of usage that break it during shader emit. Easier just to
3214 * redirect the main program to a temporary and then only touch
3215 * oDepth with a hand-crafted MOV below.
3216 */
3217 if (SVGA3dShaderGetRegType(emit->true_pos.value) != 0) {
3218 if (!submit_op1( emit,
3219 inst_token(SVGA3DOP_MOV),
3220 emit->true_pos,
3221 scalar(src(emit->temp_pos), TGSI_SWIZZLE_Z) ))
3222 return FALSE;
3223 }
3224
3225 for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
3226 if (SVGA3dShaderGetRegType(emit->true_color_output[i].value) != 0) {
3227 /* Potentially override output colors with white for XOR
3228 * logicop workaround.
3229 */
3230 if (emit->unit == PIPE_SHADER_FRAGMENT &&
3231 emit->key.fkey.white_fragments) {
3232 struct src_register one = get_one_immediate(emit);
3233
3234 if (!submit_op1( emit,
3235 inst_token(SVGA3DOP_MOV),
3236 emit->true_color_output[i],
3237 one ))
3238 return FALSE;
3239 }
3240 else if (emit->unit == PIPE_SHADER_FRAGMENT &&
3241 i < emit->key.fkey.write_color0_to_n_cbufs) {
3242 /* Write temp color output [0] to true output [i] */
3243 if (!submit_op1(emit, inst_token(SVGA3DOP_MOV),
3244 emit->true_color_output[i],
3245 src(emit->temp_color_output[0]))) {
3246 return FALSE;
3247 }
3248 }
3249 else {
3250 if (!submit_op1( emit,
3251 inst_token(SVGA3DOP_MOV),
3252 emit->true_color_output[i],
3253 src(emit->temp_color_output[i]) ))
3254 return FALSE;
3255 }
3256 }
3257 }
3258
3259 return TRUE;
3260 }
3261
3262
3263 /**
3264 * Emit special VS instructions at end of shader.
3265 */
3266 static boolean
3267 emit_vs_postamble(struct svga_shader_emitter *emit)
3268 {
3269 /* PSIZ output is incredibly fragile and it's very hard to catch
3270 * the types of usage that break it during shader emit. Easier
3271 * just to redirect the main program to a temporary and then only
3272 * touch PSIZ with a hand-crafted MOV below.
3273 */
3274 if (SVGA3dShaderGetRegType(emit->true_psiz.value) != 0) {
3275 if (!submit_op1( emit,
3276 inst_token(SVGA3DOP_MOV),
3277 emit->true_psiz,
3278 scalar(src(emit->temp_psiz), TGSI_SWIZZLE_X) ))
3279 return FALSE;
3280 }
3281
3282 /* Need to perform various manipulations on vertex position to cope
3283 * with the different GL and D3D clip spaces.
3284 */
3285 if (emit->key.vkey.need_prescale) {
3286 SVGA3dShaderDestToken temp_pos = emit->temp_pos;
3287 SVGA3dShaderDestToken depth = emit->depth_pos;
3288 SVGA3dShaderDestToken pos = emit->true_pos;
3289 unsigned offset = emit->info.file_max[TGSI_FILE_CONSTANT] + 1;
3290 struct src_register prescale_scale = src_register( SVGA3DREG_CONST,
3291 offset + 0 );
3292 struct src_register prescale_trans = src_register( SVGA3DREG_CONST,
3293 offset + 1 );
3294
3295 if (!submit_op1( emit,
3296 inst_token(SVGA3DOP_MOV),
3297 writemask(depth, TGSI_WRITEMASK_W),
3298 scalar(src(temp_pos), TGSI_SWIZZLE_W) ))
3299 return FALSE;
3300
3301 /* MUL temp_pos.xyz, temp_pos, prescale.scale
3302 * MAD result.position, temp_pos.wwww, prescale.trans, temp_pos
3303 * --> Note that prescale.trans.w == 0
3304 */
3305 if (!submit_op2( emit,
3306 inst_token(SVGA3DOP_MUL),
3307 writemask(temp_pos, TGSI_WRITEMASK_XYZ),
3308 src(temp_pos),
3309 prescale_scale ))
3310 return FALSE;
3311
3312 if (!submit_op3( emit,
3313 inst_token(SVGA3DOP_MAD),
3314 pos,
3315 swizzle(src(temp_pos), 3, 3, 3, 3),
3316 prescale_trans,
3317 src(temp_pos)))
3318 return FALSE;
3319
3320 /* Also write to depth value */
3321 if (!submit_op3( emit,
3322 inst_token(SVGA3DOP_MAD),
3323 writemask(depth, TGSI_WRITEMASK_Z),
3324 swizzle(src(temp_pos), 3, 3, 3, 3),
3325 prescale_trans,
3326 src(temp_pos) ))
3327 return FALSE;
3328 }
3329 else {
3330 SVGA3dShaderDestToken temp_pos = emit->temp_pos;
3331 SVGA3dShaderDestToken depth = emit->depth_pos;
3332 SVGA3dShaderDestToken pos = emit->true_pos;
3333 struct src_register imm_0055 = emit->imm_0055;
3334
3335 /* Adjust GL clipping coordinate space to hardware (D3D-style):
3336 *
3337 * DP4 temp_pos.z, {0,0,.5,.5}, temp_pos
3338 * MOV result.position, temp_pos
3339 */
3340 if (!submit_op2( emit,
3341 inst_token(SVGA3DOP_DP4),
3342 writemask(temp_pos, TGSI_WRITEMASK_Z),
3343 imm_0055,
3344 src(temp_pos) ))
3345 return FALSE;
3346
3347 if (!submit_op1( emit,
3348 inst_token(SVGA3DOP_MOV),
3349 pos,
3350 src(temp_pos) ))
3351 return FALSE;
3352
3353 /* Move the manipulated depth into the extra texcoord reg */
3354 if (!submit_op1( emit,
3355 inst_token(SVGA3DOP_MOV),
3356 writemask(depth, TGSI_WRITEMASK_ZW),
3357 src(temp_pos) ))
3358 return FALSE;
3359 }
3360
3361 return TRUE;
3362 }
3363
3364
3365 /**
3366 * For the pixel shader: emit the code which chooses the front
3367 * or back face color depending on triangle orientation.
3368 * This happens at the top of the fragment shader.
3369 *
3370 * 0: IF VFACE :4
3371 * 1: COLOR = FrontColor;
3372 * 2: ELSE
3373 * 3: COLOR = BackColor;
3374 * 4: ENDIF
3375 */
3376 static boolean
3377 emit_light_twoside(struct svga_shader_emitter *emit)
3378 {
3379 struct src_register vface, zero;
3380 struct src_register front[2];
3381 struct src_register back[2];
3382 SVGA3dShaderDestToken color[2];
3383 int count = emit->internal_color_count;
3384 int i;
3385 SVGA3dShaderInstToken if_token;
3386
3387 if (count == 0)
3388 return TRUE;
3389
3390 vface = get_vface( emit );
3391 zero = get_zero_immediate(emit);
3392
3393 /* Can't use get_temp() to allocate the color reg as such
3394 * temporaries will be reclaimed after each instruction by the call
3395 * to reset_temp_regs().
3396 */
3397 for (i = 0; i < count; i++) {
3398 color[i] = dst_register( SVGA3DREG_TEMP, emit->nr_hw_temp++ );
3399 front[i] = emit->input_map[emit->internal_color_idx[i]];
3400
3401 /* Back is always the next input:
3402 */
3403 back[i] = front[i];
3404 back[i].base.num = front[i].base.num + 1;
3405
3406 /* Reassign the input_map to the actual front-face color:
3407 */
3408 emit->input_map[emit->internal_color_idx[i]] = src(color[i]);
3409 }
3410
3411 if_token = inst_token( SVGA3DOP_IFC );
3412
3413 if (emit->key.fkey.front_ccw)
3414 if_token.control = SVGA3DOPCOMP_LT;
3415 else
3416 if_token.control = SVGA3DOPCOMP_GT;
3417
3418 if (!(emit_instruction( emit, if_token ) &&
3419 emit_src( emit, vface ) &&
3420 emit_src( emit, zero ) ))
3421 return FALSE;
3422
3423 for (i = 0; i < count; i++) {
3424 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), color[i], front[i] ))
3425 return FALSE;
3426 }
3427
3428 if (!(emit_instruction( emit, inst_token( SVGA3DOP_ELSE))))
3429 return FALSE;
3430
3431 for (i = 0; i < count; i++) {
3432 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), color[i], back[i] ))
3433 return FALSE;
3434 }
3435
3436 if (!emit_instruction( emit, inst_token( SVGA3DOP_ENDIF ) ))
3437 return FALSE;
3438
3439 return TRUE;
3440 }
3441
3442
3443 /**
3444 * Emit special setup code for the front/back face register in the FS.
3445 * 0: SETP_GT TEMP, VFACE, 0
3446 * where TEMP is a fake frontface register
3447 */
3448 static boolean
3449 emit_frontface(struct svga_shader_emitter *emit)
3450 {
3451 struct src_register vface;
3452 SVGA3dShaderDestToken temp;
3453 struct src_register pass, fail;
3454
3455 vface = get_vface( emit );
3456
3457 /* Can't use get_temp() to allocate the fake frontface reg as such
3458 * temporaries will be reclaimed after each instruction by the call
3459 * to reset_temp_regs().
3460 */
3461 temp = dst_register( SVGA3DREG_TEMP,
3462 emit->nr_hw_temp++ );
3463
3464 if (emit->key.fkey.front_ccw) {
3465 pass = get_zero_immediate(emit);
3466 fail = get_one_immediate(emit);
3467 } else {
3468 pass = get_one_immediate(emit);
3469 fail = get_zero_immediate(emit);
3470 }
3471
3472 if (!emit_conditional(emit, PIPE_FUNC_GREATER,
3473 temp, vface, get_zero_immediate(emit),
3474 pass, fail))
3475 return FALSE;
3476
3477 /* Reassign the input_map to the actual front-face color:
3478 */
3479 emit->input_map[emit->internal_frontface_idx] = src(temp);
3480
3481 return TRUE;
3482 }
3483
3484
3485 /**
3486 * Emit code to invert the T component of the incoming texture coordinate.
3487 * This is used for drawing point sprites when
3488 * pipe_rasterizer_state::sprite_coord_mode == PIPE_SPRITE_COORD_LOWER_LEFT.
3489 */
3490 static boolean
3491 emit_inverted_texcoords(struct svga_shader_emitter *emit)
3492 {
3493 unsigned inverted_texcoords = emit->inverted_texcoords;
3494
3495 while (inverted_texcoords) {
3496 const unsigned unit = ffs(inverted_texcoords) - 1;
3497
3498 assert(emit->inverted_texcoords & (1 << unit));
3499
3500 assert(unit < Elements(emit->ps_true_texcoord));
3501
3502 assert(unit < Elements(emit->ps_inverted_texcoord_input));
3503
3504 assert(emit->ps_inverted_texcoord_input[unit]
3505 < Elements(emit->input_map));
3506
3507 /* inverted = coord * (1, -1, 1, 1) + (0, 1, 0, 0) */
3508 if (!submit_op3(emit,
3509 inst_token(SVGA3DOP_MAD),
3510 dst(emit->ps_inverted_texcoord[unit]),
3511 emit->ps_true_texcoord[unit],
3512 get_immediate(emit, 1.0f, -1.0f, 1.0f, 1.0f),
3513 get_immediate(emit, 0.0f, 1.0f, 0.0f, 0.0f)))
3514 return FALSE;
3515
3516 /* Reassign the input_map entry to the new texcoord register */
3517 emit->input_map[emit->ps_inverted_texcoord_input[unit]] =
3518 emit->ps_inverted_texcoord[unit];
3519
3520 inverted_texcoords &= ~(1 << unit);
3521 }
3522
3523 return TRUE;
3524 }
3525
3526
3527 /**
3528 * Emit code to adjust vertex shader inputs/attributes:
3529 * - Change range from [0,1] to [-1,1] (for normalized byte/short attribs).
3530 * - Set attrib W component = 1.
3531 */
3532 static boolean
3533 emit_adjusted_vertex_attribs(struct svga_shader_emitter *emit)
3534 {
3535 unsigned adjust_mask = (emit->key.vkey.adjust_attrib_range |
3536 emit->key.vkey.adjust_attrib_w_1);
3537
3538 while (adjust_mask) {
3539 /* Adjust vertex attrib range and/or set W component = 1 */
3540 const unsigned index = u_bit_scan(&adjust_mask);
3541 struct src_register tmp;
3542
3543 /* allocate a temp reg */
3544 tmp = src_register(SVGA3DREG_TEMP, emit->nr_hw_temp);
3545 emit->nr_hw_temp++;
3546
3547 if (emit->key.vkey.adjust_attrib_range & (1 << index)) {
3548 /* The vertex input/attribute is supposed to be a signed value in
3549 * the range [-1,1] but we actually fetched/converted it to the
3550 * range [0,1]. This most likely happens when the app specifies a
3551 * signed byte attribute but we interpreted it as unsigned bytes.
3552 * See also svga_translate_vertex_format().
3553 *
3554 * Here, we emit some extra instructions to adjust
3555 * the attribute values from [0,1] to [-1,1].
3556 *
3557 * The adjustment we implement is:
3558 * new_attrib = attrib * 2.0;
3559 * if (attrib >= 0.5)
3560 * new_attrib = new_attrib - 2.0;
3561 * This isn't exactly right (it's off by a bit or so) but close enough.
3562 */
3563 SVGA3dShaderDestToken pred_reg = dst_register(SVGA3DREG_PREDICATE, 0);
3564
3565 /* tmp = attrib * 2.0 */
3566 if (!submit_op2(emit,
3567 inst_token(SVGA3DOP_MUL),
3568 dst(tmp),
3569 emit->input_map[index],
3570 get_two_immediate(emit)))
3571 return FALSE;
3572
3573 /* pred = (attrib >= 0.5) */
3574 if (!submit_op2(emit,
3575 inst_token_setp(SVGA3DOPCOMP_GE),
3576 pred_reg,
3577 emit->input_map[index], /* vert attrib */
3578 get_half_immediate(emit))) /* 0.5 */
3579 return FALSE;
3580
3581 /* sub(pred) tmp, tmp, 2.0 */
3582 if (!submit_op3(emit,
3583 inst_token_predicated(SVGA3DOP_SUB),
3584 dst(tmp),
3585 src(pred_reg),
3586 tmp,
3587 get_two_immediate(emit)))
3588 return FALSE;
3589 }
3590 else {
3591 /* just copy the vertex input attrib to the temp register */
3592 if (!submit_op1(emit,
3593 inst_token(SVGA3DOP_MOV),
3594 dst(tmp),
3595 emit->input_map[index]))
3596 return FALSE;
3597 }
3598
3599 if (emit->key.vkey.adjust_attrib_w_1 & (1 << index)) {
3600 /* move 1 into W position of tmp */
3601 if (!submit_op1(emit,
3602 inst_token(SVGA3DOP_MOV),
3603 writemask(dst(tmp), TGSI_WRITEMASK_W),
3604 get_one_immediate(emit)))
3605 return FALSE;
3606 }
3607
3608 /* Reassign the input_map entry to the new tmp register */
3609 emit->input_map[index] = tmp;
3610 }
3611
3612 return TRUE;
3613 }
3614
3615
3616 /**
3617 * Determine if we need to create the "common" immediate value which is
3618 * used for generating useful vector constants such as {0,0,0,0} and
3619 * {1,1,1,1}.
3620 * We could just do this all the time except that we want to conserve
3621 * registers whenever possible.
3622 */
3623 static boolean
3624 needs_to_create_common_immediate(const struct svga_shader_emitter *emit)
3625 {
3626 unsigned i;
3627
3628 if (emit->unit == PIPE_SHADER_FRAGMENT) {
3629 if (emit->key.fkey.light_twoside)
3630 return TRUE;
3631
3632 if (emit->key.fkey.white_fragments)
3633 return TRUE;
3634
3635 if (emit->emit_frontface)
3636 return TRUE;
3637
3638 if (emit->info.opcode_count[TGSI_OPCODE_DST] >= 1 ||
3639 emit->info.opcode_count[TGSI_OPCODE_SSG] >= 1 ||
3640 emit->info.opcode_count[TGSI_OPCODE_LIT] >= 1)
3641 return TRUE;
3642
3643 if (emit->inverted_texcoords)
3644 return TRUE;
3645
3646 /* look for any PIPE_SWIZZLE_ZERO/ONE terms */
3647 for (i = 0; i < emit->key.fkey.num_textures; i++) {
3648 if (emit->key.fkey.tex[i].swizzle_r > PIPE_SWIZZLE_ALPHA ||
3649 emit->key.fkey.tex[i].swizzle_g > PIPE_SWIZZLE_ALPHA ||
3650 emit->key.fkey.tex[i].swizzle_b > PIPE_SWIZZLE_ALPHA ||
3651 emit->key.fkey.tex[i].swizzle_a > PIPE_SWIZZLE_ALPHA)
3652 return TRUE;
3653 }
3654
3655 for (i = 0; i < emit->key.fkey.num_textures; i++) {
3656 if (emit->key.fkey.tex[i].compare_mode
3657 == PIPE_TEX_COMPARE_R_TO_TEXTURE)
3658 return TRUE;
3659 }
3660 }
3661 else if (emit->unit == PIPE_SHADER_VERTEX) {
3662 if (emit->info.opcode_count[TGSI_OPCODE_CMP] >= 1)
3663 return TRUE;
3664 if (emit->key.vkey.adjust_attrib_range ||
3665 emit->key.vkey.adjust_attrib_w_1)
3666 return TRUE;
3667 }
3668
3669 if (emit->info.opcode_count[TGSI_OPCODE_IF] >= 1 ||
3670 emit->info.opcode_count[TGSI_OPCODE_BGNLOOP] >= 1 ||
3671 emit->info.opcode_count[TGSI_OPCODE_DDX] >= 1 ||
3672 emit->info.opcode_count[TGSI_OPCODE_DDY] >= 1 ||
3673 emit->info.opcode_count[TGSI_OPCODE_ROUND] >= 1 ||
3674 emit->info.opcode_count[TGSI_OPCODE_SGE] >= 1 ||
3675 emit->info.opcode_count[TGSI_OPCODE_SGT] >= 1 ||
3676 emit->info.opcode_count[TGSI_OPCODE_SLE] >= 1 ||
3677 emit->info.opcode_count[TGSI_OPCODE_SLT] >= 1 ||
3678 emit->info.opcode_count[TGSI_OPCODE_SNE] >= 1 ||
3679 emit->info.opcode_count[TGSI_OPCODE_SEQ] >= 1 ||
3680 emit->info.opcode_count[TGSI_OPCODE_EXP] >= 1 ||
3681 emit->info.opcode_count[TGSI_OPCODE_LOG] >= 1 ||
3682 emit->info.opcode_count[TGSI_OPCODE_XPD] >= 1 ||
3683 emit->info.opcode_count[TGSI_OPCODE_KILL] >= 1)
3684 return TRUE;
3685
3686 return FALSE;
3687 }
3688
3689
3690 /**
3691 * Do we need to create a looping constant?
3692 */
3693 static boolean
3694 needs_to_create_loop_const(const struct svga_shader_emitter *emit)
3695 {
3696 return (emit->info.opcode_count[TGSI_OPCODE_BGNLOOP] >= 1);
3697 }
3698
3699
3700 static boolean
3701 needs_to_create_arl_consts(const struct svga_shader_emitter *emit)
3702 {
3703 return (emit->num_arl_consts > 0);
3704 }
3705
3706
3707 static boolean
3708 pre_parse_add_indirect( struct svga_shader_emitter *emit,
3709 int num, int current_arl)
3710 {
3711 int i;
3712 assert(num < 0);
3713
3714 for (i = 0; i < emit->num_arl_consts; ++i) {
3715 if (emit->arl_consts[i].arl_num == current_arl)
3716 break;
3717 }
3718 /* new entry */
3719 if (emit->num_arl_consts == i) {
3720 ++emit->num_arl_consts;
3721 }
3722 emit->arl_consts[i].number = (emit->arl_consts[i].number > num) ?
3723 num :
3724 emit->arl_consts[i].number;
3725 emit->arl_consts[i].arl_num = current_arl;
3726 return TRUE;
3727 }
3728
3729
3730 static boolean
3731 pre_parse_instruction( struct svga_shader_emitter *emit,
3732 const struct tgsi_full_instruction *insn,
3733 int current_arl)
3734 {
3735 if (insn->Src[0].Register.Indirect &&
3736 insn->Src[0].Indirect.File == TGSI_FILE_ADDRESS) {
3737 const struct tgsi_full_src_register *reg = &insn->Src[0];
3738 if (reg->Register.Index < 0) {
3739 pre_parse_add_indirect(emit, reg->Register.Index, current_arl);
3740 }
3741 }
3742
3743 if (insn->Src[1].Register.Indirect &&
3744 insn->Src[1].Indirect.File == TGSI_FILE_ADDRESS) {
3745 const struct tgsi_full_src_register *reg = &insn->Src[1];
3746 if (reg->Register.Index < 0) {
3747 pre_parse_add_indirect(emit, reg->Register.Index, current_arl);
3748 }
3749 }
3750
3751 if (insn->Src[2].Register.Indirect &&
3752 insn->Src[2].Indirect.File == TGSI_FILE_ADDRESS) {
3753 const struct tgsi_full_src_register *reg = &insn->Src[2];
3754 if (reg->Register.Index < 0) {
3755 pre_parse_add_indirect(emit, reg->Register.Index, current_arl);
3756 }
3757 }
3758
3759 return TRUE;
3760 }
3761
3762
3763 static boolean
3764 pre_parse_tokens( struct svga_shader_emitter *emit,
3765 const struct tgsi_token *tokens )
3766 {
3767 struct tgsi_parse_context parse;
3768 int current_arl = 0;
3769
3770 tgsi_parse_init( &parse, tokens );
3771
3772 while (!tgsi_parse_end_of_tokens( &parse )) {
3773 tgsi_parse_token( &parse );
3774 switch (parse.FullToken.Token.Type) {
3775 case TGSI_TOKEN_TYPE_IMMEDIATE:
3776 case TGSI_TOKEN_TYPE_DECLARATION:
3777 break;
3778 case TGSI_TOKEN_TYPE_INSTRUCTION:
3779 if (parse.FullToken.FullInstruction.Instruction.Opcode ==
3780 TGSI_OPCODE_ARL) {
3781 ++current_arl;
3782 }
3783 if (!pre_parse_instruction( emit, &parse.FullToken.FullInstruction,
3784 current_arl ))
3785 return FALSE;
3786 break;
3787 default:
3788 break;
3789 }
3790
3791 }
3792 return TRUE;
3793 }
3794
3795
3796 static boolean
3797 svga_shader_emit_helpers(struct svga_shader_emitter *emit)
3798 {
3799 if (needs_to_create_common_immediate( emit )) {
3800 create_common_immediate( emit );
3801 }
3802 if (needs_to_create_loop_const( emit )) {
3803 create_loop_const( emit );
3804 }
3805 if (needs_to_create_arl_consts( emit )) {
3806 create_arl_consts( emit );
3807 }
3808
3809 if (emit->unit == PIPE_SHADER_FRAGMENT) {
3810 if (!emit_ps_preamble( emit ))
3811 return FALSE;
3812
3813 if (emit->key.fkey.light_twoside) {
3814 if (!emit_light_twoside( emit ))
3815 return FALSE;
3816 }
3817 if (emit->emit_frontface) {
3818 if (!emit_frontface( emit ))
3819 return FALSE;
3820 }
3821 if (emit->inverted_texcoords) {
3822 if (!emit_inverted_texcoords( emit ))
3823 return FALSE;
3824 }
3825 }
3826 else {
3827 assert(emit->unit == PIPE_SHADER_VERTEX);
3828 if (emit->key.vkey.adjust_attrib_range ||
3829 emit->key.vkey.adjust_attrib_w_1) {
3830 if (!emit_adjusted_vertex_attribs(emit))
3831 return FALSE;
3832 }
3833 }
3834
3835
3836 return TRUE;
3837 }
3838
3839
3840 /**
3841 * This is the main entrypoint into the TGSI instruction translater.
3842 * Translate TGSI shader tokens into an SVGA shader.
3843 */
3844 boolean
3845 svga_shader_emit_instructions(struct svga_shader_emitter *emit,
3846 const struct tgsi_token *tokens)
3847 {
3848 struct tgsi_parse_context parse;
3849 boolean ret = TRUE;
3850 boolean helpers_emitted = FALSE;
3851 unsigned line_nr = 0;
3852
3853 tgsi_parse_init( &parse, tokens );
3854 emit->internal_imm_count = 0;
3855
3856 if (emit->unit == PIPE_SHADER_VERTEX) {
3857 ret = emit_vs_preamble( emit );
3858 if (!ret)
3859 goto done;
3860 }
3861
3862 pre_parse_tokens(emit, tokens);
3863
3864 while (!tgsi_parse_end_of_tokens( &parse )) {
3865 tgsi_parse_token( &parse );
3866
3867 switch (parse.FullToken.Token.Type) {
3868 case TGSI_TOKEN_TYPE_IMMEDIATE:
3869 ret = svga_emit_immediate( emit, &parse.FullToken.FullImmediate );
3870 if (!ret)
3871 goto done;
3872 break;
3873
3874 case TGSI_TOKEN_TYPE_DECLARATION:
3875 ret = svga_translate_decl_sm30( emit, &parse.FullToken.FullDeclaration );
3876 if (!ret)
3877 goto done;
3878 break;
3879
3880 case TGSI_TOKEN_TYPE_INSTRUCTION:
3881 if (!helpers_emitted) {
3882 if (!svga_shader_emit_helpers( emit ))
3883 goto done;
3884 helpers_emitted = TRUE;
3885 }
3886 ret = svga_emit_instruction( emit,
3887 line_nr++,
3888 &parse.FullToken.FullInstruction );
3889 if (!ret)
3890 goto done;
3891 break;
3892 default:
3893 break;
3894 }
3895
3896 reset_temp_regs( emit );
3897 }
3898
3899 /* Need to terminate the current subroutine. Note that the
3900 * hardware doesn't tolerate shaders without sub-routines
3901 * terminating with RET+END.
3902 */
3903 if (!emit->in_main_func) {
3904 ret = emit_instruction( emit, inst_token( SVGA3DOP_RET ) );
3905 if (!ret)
3906 goto done;
3907 }
3908
3909 assert(emit->dynamic_branching_level == 0);
3910
3911 /* Need to terminate the whole shader:
3912 */
3913 ret = emit_instruction( emit, inst_token( SVGA3DOP_END ) );
3914 if (!ret)
3915 goto done;
3916
3917 done:
3918 tgsi_parse_free( &parse );
3919 return ret;
3920 }