tgsi: rename the TGSI fragment kill opcodes
[mesa.git] / src / gallium / drivers / svga / svga_tgsi_insn.c
1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26
27 #include "pipe/p_shader_tokens.h"
28 #include "tgsi/tgsi_dump.h"
29 #include "tgsi/tgsi_parse.h"
30 #include "util/u_memory.h"
31 #include "util/u_math.h"
32
33 #include "svga_tgsi_emit.h"
34 #include "svga_context.h"
35
36
37 static boolean emit_vs_postamble( struct svga_shader_emitter *emit );
38 static boolean emit_ps_postamble( struct svga_shader_emitter *emit );
39
40
41 static unsigned
42 translate_opcode(uint opcode)
43 {
44 switch (opcode) {
45 case TGSI_OPCODE_ABS: return SVGA3DOP_ABS;
46 case TGSI_OPCODE_ADD: return SVGA3DOP_ADD;
47 case TGSI_OPCODE_DP2A: return SVGA3DOP_DP2ADD;
48 case TGSI_OPCODE_DP3: return SVGA3DOP_DP3;
49 case TGSI_OPCODE_DP4: return SVGA3DOP_DP4;
50 case TGSI_OPCODE_FRC: return SVGA3DOP_FRC;
51 case TGSI_OPCODE_MAD: return SVGA3DOP_MAD;
52 case TGSI_OPCODE_MAX: return SVGA3DOP_MAX;
53 case TGSI_OPCODE_MIN: return SVGA3DOP_MIN;
54 case TGSI_OPCODE_MOV: return SVGA3DOP_MOV;
55 case TGSI_OPCODE_MUL: return SVGA3DOP_MUL;
56 case TGSI_OPCODE_NOP: return SVGA3DOP_NOP;
57 case TGSI_OPCODE_NRM4: return SVGA3DOP_NRM;
58 default:
59 debug_printf("Unkown opcode %u\n", opcode);
60 assert( 0 );
61 return SVGA3DOP_LAST_INST;
62 }
63 }
64
65
66 static unsigned
67 translate_file(unsigned file)
68 {
69 switch (file) {
70 case TGSI_FILE_TEMPORARY: return SVGA3DREG_TEMP;
71 case TGSI_FILE_INPUT: return SVGA3DREG_INPUT;
72 case TGSI_FILE_OUTPUT: return SVGA3DREG_OUTPUT; /* VS3.0+ only */
73 case TGSI_FILE_IMMEDIATE: return SVGA3DREG_CONST;
74 case TGSI_FILE_CONSTANT: return SVGA3DREG_CONST;
75 case TGSI_FILE_SAMPLER: return SVGA3DREG_SAMPLER;
76 case TGSI_FILE_ADDRESS: return SVGA3DREG_ADDR;
77 default:
78 assert( 0 );
79 return SVGA3DREG_TEMP;
80 }
81 }
82
83
84 static SVGA3dShaderDestToken
85 translate_dst_register( struct svga_shader_emitter *emit,
86 const struct tgsi_full_instruction *insn,
87 unsigned idx )
88 {
89 const struct tgsi_full_dst_register *reg = &insn->Dst[idx];
90 SVGA3dShaderDestToken dest;
91
92 switch (reg->Register.File) {
93 case TGSI_FILE_OUTPUT:
94 /* Output registers encode semantic information in their name.
95 * Need to lookup a table built at decl time:
96 */
97 dest = emit->output_map[reg->Register.Index];
98 break;
99
100 default:
101 {
102 unsigned index = reg->Register.Index;
103 assert(index < SVGA3D_TEMPREG_MAX);
104 index = MIN2(index, SVGA3D_TEMPREG_MAX - 1);
105 dest = dst_register(translate_file(reg->Register.File), index);
106 }
107 break;
108 }
109
110 dest.mask = reg->Register.WriteMask;
111 assert(dest.mask);
112
113 if (insn->Instruction.Saturate)
114 dest.dstMod = SVGA3DDSTMOD_SATURATE;
115
116 return dest;
117 }
118
119
120 /**
121 * Apply a swizzle to a src_register, returning a new src_register
122 * Ex: swizzle(SRC.ZZYY, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_X, SWIZZLE_Y)
123 * would return SRC.YYZZ
124 */
125 static struct src_register
126 swizzle(struct src_register src,
127 unsigned x, unsigned y, unsigned z, unsigned w)
128 {
129 assert(x < 4);
130 assert(y < 4);
131 assert(z < 4);
132 assert(w < 4);
133 x = (src.base.swizzle >> (x * 2)) & 0x3;
134 y = (src.base.swizzle >> (y * 2)) & 0x3;
135 z = (src.base.swizzle >> (z * 2)) & 0x3;
136 w = (src.base.swizzle >> (w * 2)) & 0x3;
137
138 src.base.swizzle = TRANSLATE_SWIZZLE(x, y, z, w);
139
140 return src;
141 }
142
143
144 /**
145 * Apply a "scalar" swizzle to a src_register returning a new
146 * src_register where all the swizzle terms are the same.
147 * Ex: scalar(SRC.WZYX, SWIZZLE_Y) would return SRC.ZZZZ
148 */
149 static struct src_register
150 scalar(struct src_register src, unsigned comp)
151 {
152 assert(comp < 4);
153 return swizzle( src, comp, comp, comp, comp );
154 }
155
156
157 static boolean
158 svga_arl_needs_adjustment( const struct svga_shader_emitter *emit )
159 {
160 int i;
161
162 for (i = 0; i < emit->num_arl_consts; ++i) {
163 if (emit->arl_consts[i].arl_num == emit->current_arl)
164 return TRUE;
165 }
166 return FALSE;
167 }
168
169
170 static int
171 svga_arl_adjustment( const struct svga_shader_emitter *emit )
172 {
173 int i;
174
175 for (i = 0; i < emit->num_arl_consts; ++i) {
176 if (emit->arl_consts[i].arl_num == emit->current_arl)
177 return emit->arl_consts[i].number;
178 }
179 return 0;
180 }
181
182
183 static struct src_register
184 translate_src_register( const struct svga_shader_emitter *emit,
185 const struct tgsi_full_src_register *reg )
186 {
187 struct src_register src;
188
189 switch (reg->Register.File) {
190 case TGSI_FILE_INPUT:
191 /* Input registers are referred to by their semantic name rather
192 * than by index. Use the mapping build up from the decls:
193 */
194 src = emit->input_map[reg->Register.Index];
195 break;
196
197 case TGSI_FILE_IMMEDIATE:
198 /* Immediates are appended after TGSI constants in the D3D
199 * constant buffer.
200 */
201 src = src_register( translate_file( reg->Register.File ),
202 reg->Register.Index + emit->imm_start );
203 break;
204
205 default:
206 src = src_register( translate_file( reg->Register.File ),
207 reg->Register.Index );
208 break;
209 }
210
211 /* Indirect addressing.
212 */
213 if (reg->Register.Indirect) {
214 if (emit->unit == PIPE_SHADER_FRAGMENT) {
215 /* Pixel shaders have only loop registers for relative
216 * addressing into inputs. Ignore the redundant address
217 * register, the contents of aL should be in sync with it.
218 */
219 if (reg->Register.File == TGSI_FILE_INPUT) {
220 src.base.relAddr = 1;
221 src.indirect = src_token(SVGA3DREG_LOOP, 0);
222 }
223 }
224 else {
225 /* Constant buffers only.
226 */
227 if (reg->Register.File == TGSI_FILE_CONSTANT) {
228 /* we shift the offset towards the minimum */
229 if (svga_arl_needs_adjustment( emit )) {
230 src.base.num -= svga_arl_adjustment( emit );
231 }
232 src.base.relAddr = 1;
233
234 /* Not really sure what should go in the second token:
235 */
236 src.indirect = src_token( SVGA3DREG_ADDR,
237 reg->Indirect.Index );
238
239 src.indirect.swizzle = SWIZZLE_XXXX;
240 }
241 }
242 }
243
244 src = swizzle( src,
245 reg->Register.SwizzleX,
246 reg->Register.SwizzleY,
247 reg->Register.SwizzleZ,
248 reg->Register.SwizzleW );
249
250 /* src.mod isn't a bitfield, unfortunately:
251 * See tgsi_util_get_full_src_register_sign_mode for implementation details.
252 */
253 if (reg->Register.Absolute) {
254 if (reg->Register.Negate)
255 src.base.srcMod = SVGA3DSRCMOD_ABSNEG;
256 else
257 src.base.srcMod = SVGA3DSRCMOD_ABS;
258 }
259 else {
260 if (reg->Register.Negate)
261 src.base.srcMod = SVGA3DSRCMOD_NEG;
262 else
263 src.base.srcMod = SVGA3DSRCMOD_NONE;
264 }
265
266 return src;
267 }
268
269
270 /*
271 * Get a temporary register.
272 * Note: if we exceed the temporary register limit we just use
273 * register SVGA3D_TEMPREG_MAX - 1.
274 */
275 static SVGA3dShaderDestToken
276 get_temp( struct svga_shader_emitter *emit )
277 {
278 int i = emit->nr_hw_temp + emit->internal_temp_count++;
279 assert(i < SVGA3D_TEMPREG_MAX);
280 i = MIN2(i, SVGA3D_TEMPREG_MAX - 1);
281 return dst_register( SVGA3DREG_TEMP, i );
282 }
283
284
285 /**
286 * Release a single temp. Currently only effective if it was the last
287 * allocated temp, otherwise release will be delayed until the next
288 * call to reset_temp_regs().
289 */
290 static void
291 release_temp( struct svga_shader_emitter *emit,
292 SVGA3dShaderDestToken temp )
293 {
294 if (temp.num == emit->internal_temp_count - 1)
295 emit->internal_temp_count--;
296 }
297
298
299 static void
300 reset_temp_regs(struct svga_shader_emitter *emit)
301 {
302 emit->internal_temp_count = 0;
303 }
304
305
306 /** Emit bytecode for a src_register */
307 static boolean
308 emit_src(struct svga_shader_emitter *emit, const struct src_register src)
309 {
310 if (src.base.relAddr) {
311 assert(src.base.reserved0);
312 assert(src.indirect.reserved0);
313 return (svga_shader_emit_dword( emit, src.base.value ) &&
314 svga_shader_emit_dword( emit, src.indirect.value ));
315 }
316 else {
317 assert(src.base.reserved0);
318 return svga_shader_emit_dword( emit, src.base.value );
319 }
320 }
321
322
323 /** Emit bytecode for a dst_register */
324 static boolean
325 emit_dst(struct svga_shader_emitter *emit, SVGA3dShaderDestToken dest)
326 {
327 assert(dest.reserved0);
328 assert(dest.mask);
329 return svga_shader_emit_dword( emit, dest.value );
330 }
331
332
333 /** Emit bytecode for a 1-operand instruction */
334 static boolean
335 emit_op1(struct svga_shader_emitter *emit,
336 SVGA3dShaderInstToken inst,
337 SVGA3dShaderDestToken dest,
338 struct src_register src0)
339 {
340 return (emit_instruction(emit, inst) &&
341 emit_dst(emit, dest) &&
342 emit_src(emit, src0));
343 }
344
345
346 /** Emit bytecode for a 2-operand instruction */
347 static boolean
348 emit_op2(struct svga_shader_emitter *emit,
349 SVGA3dShaderInstToken inst,
350 SVGA3dShaderDestToken dest,
351 struct src_register src0,
352 struct src_register src1)
353 {
354 return (emit_instruction(emit, inst) &&
355 emit_dst(emit, dest) &&
356 emit_src(emit, src0) &&
357 emit_src(emit, src1));
358 }
359
360
361 /** Emit bytecode for a 3-operand instruction */
362 static boolean
363 emit_op3(struct svga_shader_emitter *emit,
364 SVGA3dShaderInstToken inst,
365 SVGA3dShaderDestToken dest,
366 struct src_register src0,
367 struct src_register src1,
368 struct src_register src2)
369 {
370 return (emit_instruction(emit, inst) &&
371 emit_dst(emit, dest) &&
372 emit_src(emit, src0) &&
373 emit_src(emit, src1) &&
374 emit_src(emit, src2));
375 }
376
377
378 /** Emit bytecode for a 4-operand instruction */
379 static boolean
380 emit_op4(struct svga_shader_emitter *emit,
381 SVGA3dShaderInstToken inst,
382 SVGA3dShaderDestToken dest,
383 struct src_register src0,
384 struct src_register src1,
385 struct src_register src2,
386 struct src_register src3)
387 {
388 return (emit_instruction(emit, inst) &&
389 emit_dst(emit, dest) &&
390 emit_src(emit, src0) &&
391 emit_src(emit, src1) &&
392 emit_src(emit, src2) &&
393 emit_src(emit, src3));
394 }
395
396
397 /**
398 * Apply the absolute value modifier to the given src_register, returning
399 * a new src_register.
400 */
401 static struct src_register
402 absolute(struct src_register src)
403 {
404 src.base.srcMod = SVGA3DSRCMOD_ABS;
405 return src;
406 }
407
408
409 /**
410 * Apply the negation modifier to the given src_register, returning
411 * a new src_register.
412 */
413 static struct src_register
414 negate(struct src_register src)
415 {
416 switch (src.base.srcMod) {
417 case SVGA3DSRCMOD_ABS:
418 src.base.srcMod = SVGA3DSRCMOD_ABSNEG;
419 break;
420 case SVGA3DSRCMOD_ABSNEG:
421 src.base.srcMod = SVGA3DSRCMOD_ABS;
422 break;
423 case SVGA3DSRCMOD_NEG:
424 src.base.srcMod = SVGA3DSRCMOD_NONE;
425 break;
426 case SVGA3DSRCMOD_NONE:
427 src.base.srcMod = SVGA3DSRCMOD_NEG;
428 break;
429 }
430 return src;
431 }
432
433
434
435 /* Replace the src with the temporary specified in the dst, but copying
436 * only the necessary channels, and preserving the original swizzle (which is
437 * important given that several opcodes have constraints in the allowed
438 * swizzles).
439 */
440 static boolean
441 emit_repl(struct svga_shader_emitter *emit,
442 SVGA3dShaderDestToken dst,
443 struct src_register *src0)
444 {
445 unsigned src0_swizzle;
446 unsigned chan;
447
448 assert(SVGA3dShaderGetRegType(dst.value) == SVGA3DREG_TEMP);
449
450 src0_swizzle = src0->base.swizzle;
451
452 dst.mask = 0;
453 for (chan = 0; chan < 4; ++chan) {
454 unsigned swizzle = (src0_swizzle >> (chan *2)) & 0x3;
455 dst.mask |= 1 << swizzle;
456 }
457 assert(dst.mask);
458
459 src0->base.swizzle = SVGA3DSWIZZLE_NONE;
460
461 if (!emit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, *src0 ))
462 return FALSE;
463
464 *src0 = src( dst );
465 src0->base.swizzle = src0_swizzle;
466
467 return TRUE;
468 }
469
470
471 static boolean
472 submit_op0(struct svga_shader_emitter *emit,
473 SVGA3dShaderInstToken inst,
474 SVGA3dShaderDestToken dest)
475 {
476 return (emit_instruction( emit, inst ) &&
477 emit_dst( emit, dest ));
478 }
479
480
481 static boolean
482 submit_op1(struct svga_shader_emitter *emit,
483 SVGA3dShaderInstToken inst,
484 SVGA3dShaderDestToken dest,
485 struct src_register src0)
486 {
487 return emit_op1( emit, inst, dest, src0 );
488 }
489
490
491 /**
492 * SVGA shaders may not refer to >1 constant register in a single
493 * instruction. This function checks for that usage and inserts a
494 * move to temporary if detected.
495 *
496 * The same applies to input registers -- at most a single input
497 * register may be read by any instruction.
498 */
499 static boolean
500 submit_op2(struct svga_shader_emitter *emit,
501 SVGA3dShaderInstToken inst,
502 SVGA3dShaderDestToken dest,
503 struct src_register src0,
504 struct src_register src1)
505 {
506 SVGA3dShaderDestToken temp;
507 SVGA3dShaderRegType type0, type1;
508 boolean need_temp = FALSE;
509
510 temp.value = 0;
511 type0 = SVGA3dShaderGetRegType( src0.base.value );
512 type1 = SVGA3dShaderGetRegType( src1.base.value );
513
514 if (type0 == SVGA3DREG_CONST &&
515 type1 == SVGA3DREG_CONST &&
516 src0.base.num != src1.base.num)
517 need_temp = TRUE;
518
519 if (type0 == SVGA3DREG_INPUT &&
520 type1 == SVGA3DREG_INPUT &&
521 src0.base.num != src1.base.num)
522 need_temp = TRUE;
523
524 if (need_temp) {
525 temp = get_temp( emit );
526
527 if (!emit_repl( emit, temp, &src0 ))
528 return FALSE;
529 }
530
531 if (!emit_op2( emit, inst, dest, src0, src1 ))
532 return FALSE;
533
534 if (need_temp)
535 release_temp( emit, temp );
536
537 return TRUE;
538 }
539
540
541 /**
542 * SVGA shaders may not refer to >1 constant register in a single
543 * instruction. This function checks for that usage and inserts a
544 * move to temporary if detected.
545 */
546 static boolean
547 submit_op3(struct svga_shader_emitter *emit,
548 SVGA3dShaderInstToken inst,
549 SVGA3dShaderDestToken dest,
550 struct src_register src0,
551 struct src_register src1,
552 struct src_register src2)
553 {
554 SVGA3dShaderDestToken temp0;
555 SVGA3dShaderDestToken temp1;
556 boolean need_temp0 = FALSE;
557 boolean need_temp1 = FALSE;
558 SVGA3dShaderRegType type0, type1, type2;
559
560 temp0.value = 0;
561 temp1.value = 0;
562 type0 = SVGA3dShaderGetRegType( src0.base.value );
563 type1 = SVGA3dShaderGetRegType( src1.base.value );
564 type2 = SVGA3dShaderGetRegType( src2.base.value );
565
566 if (inst.op != SVGA3DOP_SINCOS) {
567 if (type0 == SVGA3DREG_CONST &&
568 ((type1 == SVGA3DREG_CONST && src0.base.num != src1.base.num) ||
569 (type2 == SVGA3DREG_CONST && src0.base.num != src2.base.num)))
570 need_temp0 = TRUE;
571
572 if (type1 == SVGA3DREG_CONST &&
573 (type2 == SVGA3DREG_CONST && src1.base.num != src2.base.num))
574 need_temp1 = TRUE;
575 }
576
577 if (type0 == SVGA3DREG_INPUT &&
578 ((type1 == SVGA3DREG_INPUT && src0.base.num != src1.base.num) ||
579 (type2 == SVGA3DREG_INPUT && src0.base.num != src2.base.num)))
580 need_temp0 = TRUE;
581
582 if (type1 == SVGA3DREG_INPUT &&
583 (type2 == SVGA3DREG_INPUT && src1.base.num != src2.base.num))
584 need_temp1 = TRUE;
585
586 if (need_temp0) {
587 temp0 = get_temp( emit );
588
589 if (!emit_repl( emit, temp0, &src0 ))
590 return FALSE;
591 }
592
593 if (need_temp1) {
594 temp1 = get_temp( emit );
595
596 if (!emit_repl( emit, temp1, &src1 ))
597 return FALSE;
598 }
599
600 if (!emit_op3( emit, inst, dest, src0, src1, src2 ))
601 return FALSE;
602
603 if (need_temp1)
604 release_temp( emit, temp1 );
605 if (need_temp0)
606 release_temp( emit, temp0 );
607 return TRUE;
608 }
609
610
611 /**
612 * SVGA shaders may not refer to >1 constant register in a single
613 * instruction. This function checks for that usage and inserts a
614 * move to temporary if detected.
615 */
616 static boolean
617 submit_op4(struct svga_shader_emitter *emit,
618 SVGA3dShaderInstToken inst,
619 SVGA3dShaderDestToken dest,
620 struct src_register src0,
621 struct src_register src1,
622 struct src_register src2,
623 struct src_register src3)
624 {
625 SVGA3dShaderDestToken temp0;
626 SVGA3dShaderDestToken temp3;
627 boolean need_temp0 = FALSE;
628 boolean need_temp3 = FALSE;
629 SVGA3dShaderRegType type0, type1, type2, type3;
630
631 temp0.value = 0;
632 temp3.value = 0;
633 type0 = SVGA3dShaderGetRegType( src0.base.value );
634 type1 = SVGA3dShaderGetRegType( src1.base.value );
635 type2 = SVGA3dShaderGetRegType( src2.base.value );
636 type3 = SVGA3dShaderGetRegType( src2.base.value );
637
638 /* Make life a little easier - this is only used by the TXD
639 * instruction which is guaranteed not to have a constant/input reg
640 * in one slot at least:
641 */
642 assert(type1 == SVGA3DREG_SAMPLER);
643
644 if (type0 == SVGA3DREG_CONST &&
645 ((type3 == SVGA3DREG_CONST && src0.base.num != src3.base.num) ||
646 (type2 == SVGA3DREG_CONST && src0.base.num != src2.base.num)))
647 need_temp0 = TRUE;
648
649 if (type3 == SVGA3DREG_CONST &&
650 (type2 == SVGA3DREG_CONST && src3.base.num != src2.base.num))
651 need_temp3 = TRUE;
652
653 if (type0 == SVGA3DREG_INPUT &&
654 ((type3 == SVGA3DREG_INPUT && src0.base.num != src3.base.num) ||
655 (type2 == SVGA3DREG_INPUT && src0.base.num != src2.base.num)))
656 need_temp0 = TRUE;
657
658 if (type3 == SVGA3DREG_INPUT &&
659 (type2 == SVGA3DREG_INPUT && src3.base.num != src2.base.num))
660 need_temp3 = TRUE;
661
662 if (need_temp0) {
663 temp0 = get_temp( emit );
664
665 if (!emit_repl( emit, temp0, &src0 ))
666 return FALSE;
667 }
668
669 if (need_temp3) {
670 temp3 = get_temp( emit );
671
672 if (!emit_repl( emit, temp3, &src3 ))
673 return FALSE;
674 }
675
676 if (!emit_op4( emit, inst, dest, src0, src1, src2, src3 ))
677 return FALSE;
678
679 if (need_temp3)
680 release_temp( emit, temp3 );
681 if (need_temp0)
682 release_temp( emit, temp0 );
683 return TRUE;
684 }
685
686
687 /**
688 * Do the src and dest registers refer to the same register?
689 */
690 static boolean
691 alias_src_dst(struct src_register src,
692 SVGA3dShaderDestToken dst)
693 {
694 if (src.base.num != dst.num)
695 return FALSE;
696
697 if (SVGA3dShaderGetRegType(dst.value) !=
698 SVGA3dShaderGetRegType(src.base.value))
699 return FALSE;
700
701 return TRUE;
702 }
703
704
705 static boolean
706 submit_lrp(struct svga_shader_emitter *emit,
707 SVGA3dShaderDestToken dst,
708 struct src_register src0,
709 struct src_register src1,
710 struct src_register src2)
711 {
712 SVGA3dShaderDestToken tmp;
713 boolean need_dst_tmp = FALSE;
714
715 /* The dst reg must be a temporary, and not be the same as src0 or src2 */
716 if (SVGA3dShaderGetRegType(dst.value) != SVGA3DREG_TEMP ||
717 alias_src_dst(src0, dst) ||
718 alias_src_dst(src2, dst))
719 need_dst_tmp = TRUE;
720
721 if (need_dst_tmp) {
722 tmp = get_temp( emit );
723 tmp.mask = dst.mask;
724 }
725 else {
726 tmp = dst;
727 }
728
729 if (!submit_op3(emit, inst_token( SVGA3DOP_LRP ), tmp, src0, src1, src2))
730 return FALSE;
731
732 if (need_dst_tmp) {
733 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), dst, src( tmp )))
734 return FALSE;
735 }
736
737 return TRUE;
738 }
739
740
741 static boolean
742 emit_def_const(struct svga_shader_emitter *emit,
743 SVGA3dShaderConstType type,
744 unsigned idx, float a, float b, float c, float d)
745 {
746 SVGA3DOpDefArgs def;
747 SVGA3dShaderInstToken opcode;
748
749 switch (type) {
750 case SVGA3D_CONST_TYPE_FLOAT:
751 opcode = inst_token( SVGA3DOP_DEF );
752 def.dst = dst_register( SVGA3DREG_CONST, idx );
753 def.constValues[0] = a;
754 def.constValues[1] = b;
755 def.constValues[2] = c;
756 def.constValues[3] = d;
757 break;
758 case SVGA3D_CONST_TYPE_INT:
759 opcode = inst_token( SVGA3DOP_DEFI );
760 def.dst = dst_register( SVGA3DREG_CONSTINT, idx );
761 def.constIValues[0] = (int)a;
762 def.constIValues[1] = (int)b;
763 def.constIValues[2] = (int)c;
764 def.constIValues[3] = (int)d;
765 break;
766 default:
767 assert(0);
768 opcode = inst_token( SVGA3DOP_NOP );
769 break;
770 }
771
772 if (!emit_instruction(emit, opcode) ||
773 !svga_shader_emit_dwords( emit, def.values, Elements(def.values)))
774 return FALSE;
775
776 return TRUE;
777 }
778
779
780 static boolean
781 create_zero_immediate( struct svga_shader_emitter *emit )
782 {
783 unsigned idx = emit->nr_hw_float_const++;
784
785 /* Emit the constant (0, 0.5, -1, 1) and use swizzling to generate
786 * other useful vectors.
787 */
788 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT,
789 idx, 0, 0.5, -1, 1 ))
790 return FALSE;
791
792 emit->zero_immediate_idx = idx;
793 emit->created_zero_immediate = TRUE;
794
795 return TRUE;
796 }
797
798
799 static boolean
800 create_loop_const( struct svga_shader_emitter *emit )
801 {
802 unsigned idx = emit->nr_hw_int_const++;
803
804 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_INT, idx,
805 255, /* iteration count */
806 0, /* initial value */
807 1, /* step size */
808 0 /* not used, must be 0 */))
809 return FALSE;
810
811 emit->loop_const_idx = idx;
812 emit->created_loop_const = TRUE;
813
814 return TRUE;
815 }
816
817 static boolean
818 create_arl_consts( struct svga_shader_emitter *emit )
819 {
820 int i;
821
822 for (i = 0; i < emit->num_arl_consts; i += 4) {
823 int j;
824 unsigned idx = emit->nr_hw_float_const++;
825 float vals[4];
826 for (j = 0; j < 4 && (j + i) < emit->num_arl_consts; ++j) {
827 vals[j] = (float) emit->arl_consts[i + j].number;
828 emit->arl_consts[i + j].idx = idx;
829 switch (j) {
830 case 0:
831 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_X;
832 break;
833 case 1:
834 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_Y;
835 break;
836 case 2:
837 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_Z;
838 break;
839 case 3:
840 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_W;
841 break;
842 }
843 }
844 while (j < 4)
845 vals[j++] = 0;
846
847 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT, idx,
848 vals[0], vals[1],
849 vals[2], vals[3]))
850 return FALSE;
851 }
852
853 return TRUE;
854 }
855
856
857 /**
858 * Return the register which holds the pixel shaders front/back-
859 * facing value.
860 */
861 static struct src_register
862 get_vface( struct svga_shader_emitter *emit )
863 {
864 assert(emit->emitted_vface);
865 return src_register(SVGA3DREG_MISCTYPE, SVGA3DMISCREG_FACE);
866 }
867
868
869 /**
870 * returns {0, 0, 0, 1} immediate
871 */
872 static struct src_register
873 get_zero_immediate( struct svga_shader_emitter *emit )
874 {
875 assert(emit->created_zero_immediate);
876 assert(emit->zero_immediate_idx >= 0);
877 return swizzle(src_register( SVGA3DREG_CONST,
878 emit->zero_immediate_idx),
879 0, 0, 0, 3);
880 }
881
882
883 /**
884 * returns {1, 1, 1, -1} immediate
885 */
886 static struct src_register
887 get_pos_neg_one_immediate( struct svga_shader_emitter *emit )
888 {
889 assert(emit->created_zero_immediate);
890 assert(emit->zero_immediate_idx >= 0);
891 return swizzle(src_register( SVGA3DREG_CONST,
892 emit->zero_immediate_idx),
893 3, 3, 3, 2);
894 }
895
896
897 /**
898 * returns {0.5, 0.5, 0.5, 0.5} immediate
899 */
900 static struct src_register
901 get_half_immediate( struct svga_shader_emitter *emit )
902 {
903 assert(emit->created_zero_immediate);
904 assert(emit->zero_immediate_idx >= 0);
905 return swizzle(src_register(SVGA3DREG_CONST, emit->zero_immediate_idx),
906 1, 1, 1, 1);
907 }
908
909
910 /**
911 * returns the loop const
912 */
913 static struct src_register
914 get_loop_const( struct svga_shader_emitter *emit )
915 {
916 assert(emit->created_loop_const);
917 assert(emit->loop_const_idx >= 0);
918 return src_register( SVGA3DREG_CONSTINT,
919 emit->loop_const_idx );
920 }
921
922
923 static struct src_register
924 get_fake_arl_const( struct svga_shader_emitter *emit )
925 {
926 struct src_register reg;
927 int idx = 0, swizzle = 0, i;
928
929 for (i = 0; i < emit->num_arl_consts; ++ i) {
930 if (emit->arl_consts[i].arl_num == emit->current_arl) {
931 idx = emit->arl_consts[i].idx;
932 swizzle = emit->arl_consts[i].swizzle;
933 }
934 }
935
936 reg = src_register( SVGA3DREG_CONST, idx );
937 return scalar(reg, swizzle);
938 }
939
940
941 /**
942 * Return the register which holds the current dimenions of the
943 * texture bound to the given sampler
944 */
945 static struct src_register
946 get_tex_dimensions( struct svga_shader_emitter *emit, int sampler_num )
947 {
948 int idx;
949 struct src_register reg;
950
951 /* the width/height indexes start right after constants */
952 idx = emit->key.fkey.tex[sampler_num].width_height_idx +
953 emit->info.file_max[TGSI_FILE_CONSTANT] + 1;
954
955 reg = src_register( SVGA3DREG_CONST, idx );
956 return reg;
957 }
958
959
960 static boolean
961 emit_fake_arl(struct svga_shader_emitter *emit,
962 const struct tgsi_full_instruction *insn)
963 {
964 const struct src_register src0 =
965 translate_src_register(emit, &insn->Src[0] );
966 struct src_register src1 = get_fake_arl_const( emit );
967 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
968 SVGA3dShaderDestToken tmp = get_temp( emit );
969
970 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), tmp, src0))
971 return FALSE;
972
973 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), tmp, src( tmp ),
974 src1))
975 return FALSE;
976
977 /* replicate the original swizzle */
978 src1 = src(tmp);
979 src1.base.swizzle = src0.base.swizzle;
980
981 return submit_op1( emit, inst_token( SVGA3DOP_MOVA ),
982 dst, src1 );
983 }
984
985
986 static boolean
987 emit_if(struct svga_shader_emitter *emit,
988 const struct tgsi_full_instruction *insn)
989 {
990 struct src_register src0 =
991 translate_src_register(emit, &insn->Src[0]);
992 struct src_register zero = get_zero_immediate( emit );
993 SVGA3dShaderInstToken if_token = inst_token( SVGA3DOP_IFC );
994
995 if_token.control = SVGA3DOPCOMPC_NE;
996 zero = scalar(zero, TGSI_SWIZZLE_X);
997
998 if (SVGA3dShaderGetRegType(src0.base.value) == SVGA3DREG_CONST) {
999 /*
1000 * Max different constant registers readable per IFC instruction is 1.
1001 */
1002 SVGA3dShaderDestToken tmp = get_temp( emit );
1003
1004 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), tmp, src0))
1005 return FALSE;
1006
1007 src0 = scalar(src( tmp ), TGSI_SWIZZLE_X);
1008 }
1009
1010 emit->dynamic_branching_level++;
1011
1012 return (emit_instruction( emit, if_token ) &&
1013 emit_src( emit, src0 ) &&
1014 emit_src( emit, zero ) );
1015 }
1016
1017
1018 static boolean
1019 emit_endif(struct svga_shader_emitter *emit,
1020 const struct tgsi_full_instruction *insn)
1021 {
1022 emit->dynamic_branching_level--;
1023
1024 return emit_instruction(emit, inst_token(SVGA3DOP_ENDIF));
1025 }
1026
1027
1028 static boolean
1029 emit_else(struct svga_shader_emitter *emit,
1030 const struct tgsi_full_instruction *insn)
1031 {
1032 return emit_instruction(emit, inst_token(SVGA3DOP_ELSE));
1033 }
1034
1035
1036 /**
1037 * Translate the following TGSI FLR instruction.
1038 * FLR DST, SRC
1039 * To the following SVGA3D instruction sequence.
1040 * FRC TMP, SRC
1041 * SUB DST, SRC, TMP
1042 */
1043 static boolean
1044 emit_floor(struct svga_shader_emitter *emit,
1045 const struct tgsi_full_instruction *insn )
1046 {
1047 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1048 const struct src_register src0 =
1049 translate_src_register(emit, &insn->Src[0] );
1050 SVGA3dShaderDestToken temp = get_temp( emit );
1051
1052 /* FRC TMP, SRC */
1053 if (!submit_op1( emit, inst_token( SVGA3DOP_FRC ), temp, src0 ))
1054 return FALSE;
1055
1056 /* SUB DST, SRC, TMP */
1057 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst, src0,
1058 negate( src( temp ) ) ))
1059 return FALSE;
1060
1061 return TRUE;
1062 }
1063
1064
1065 /**
1066 * Translate the following TGSI CEIL instruction.
1067 * CEIL DST, SRC
1068 * To the following SVGA3D instruction sequence.
1069 * FRC TMP, -SRC
1070 * ADD DST, SRC, TMP
1071 */
1072 static boolean
1073 emit_ceil(struct svga_shader_emitter *emit,
1074 const struct tgsi_full_instruction *insn)
1075 {
1076 SVGA3dShaderDestToken dst = translate_dst_register(emit, insn, 0);
1077 const struct src_register src0 =
1078 translate_src_register(emit, &insn->Src[0]);
1079 SVGA3dShaderDestToken temp = get_temp(emit);
1080
1081 /* FRC TMP, -SRC */
1082 if (!submit_op1(emit, inst_token(SVGA3DOP_FRC), temp, negate(src0)))
1083 return FALSE;
1084
1085 /* ADD DST, SRC, TMP */
1086 if (!submit_op2(emit, inst_token(SVGA3DOP_ADD), dst, src0, src(temp)))
1087 return FALSE;
1088
1089 return TRUE;
1090 }
1091
1092
1093 /**
1094 * Translate the following TGSI DIV instruction.
1095 * DIV DST.xy, SRC0, SRC1
1096 * To the following SVGA3D instruction sequence.
1097 * RCP TMP.x, SRC1.xxxx
1098 * RCP TMP.y, SRC1.yyyy
1099 * MUL DST.xy, SRC0, TMP
1100 */
1101 static boolean
1102 emit_div(struct svga_shader_emitter *emit,
1103 const struct tgsi_full_instruction *insn )
1104 {
1105 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1106 const struct src_register src0 =
1107 translate_src_register(emit, &insn->Src[0] );
1108 const struct src_register src1 =
1109 translate_src_register(emit, &insn->Src[1] );
1110 SVGA3dShaderDestToken temp = get_temp( emit );
1111 int i;
1112
1113 /* For each enabled element, perform a RCP instruction. Note that
1114 * RCP is scalar in SVGA3D:
1115 */
1116 for (i = 0; i < 4; i++) {
1117 unsigned channel = 1 << i;
1118 if (dst.mask & channel) {
1119 /* RCP TMP.?, SRC1.???? */
1120 if (!submit_op1( emit, inst_token( SVGA3DOP_RCP ),
1121 writemask(temp, channel),
1122 scalar(src1, i) ))
1123 return FALSE;
1124 }
1125 }
1126
1127 /* Vector mul:
1128 * MUL DST, SRC0, TMP
1129 */
1130 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ), dst, src0,
1131 src( temp ) ))
1132 return FALSE;
1133
1134 return TRUE;
1135 }
1136
1137
1138 /**
1139 * Translate the following TGSI DP2 instruction.
1140 * DP2 DST, SRC1, SRC2
1141 * To the following SVGA3D instruction sequence.
1142 * MUL TMP, SRC1, SRC2
1143 * ADD DST, TMP.xxxx, TMP.yyyy
1144 */
1145 static boolean
1146 emit_dp2(struct svga_shader_emitter *emit,
1147 const struct tgsi_full_instruction *insn )
1148 {
1149 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1150 const struct src_register src0 =
1151 translate_src_register(emit, &insn->Src[0]);
1152 const struct src_register src1 =
1153 translate_src_register(emit, &insn->Src[1]);
1154 SVGA3dShaderDestToken temp = get_temp( emit );
1155 struct src_register temp_src0, temp_src1;
1156
1157 /* MUL TMP, SRC1, SRC2 */
1158 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ), temp, src0, src1 ))
1159 return FALSE;
1160
1161 temp_src0 = scalar(src( temp ), TGSI_SWIZZLE_X);
1162 temp_src1 = scalar(src( temp ), TGSI_SWIZZLE_Y);
1163
1164 /* ADD DST, TMP.xxxx, TMP.yyyy */
1165 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst,
1166 temp_src0, temp_src1 ))
1167 return FALSE;
1168
1169 return TRUE;
1170 }
1171
1172
1173 /**
1174 * Translate the following TGSI DPH instruction.
1175 * DPH DST, SRC1, SRC2
1176 * To the following SVGA3D instruction sequence.
1177 * DP3 TMP, SRC1, SRC2
1178 * ADD DST, TMP, SRC2.wwww
1179 */
1180 static boolean
1181 emit_dph(struct svga_shader_emitter *emit,
1182 const struct tgsi_full_instruction *insn )
1183 {
1184 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1185 const struct src_register src0 = translate_src_register(
1186 emit, &insn->Src[0] );
1187 struct src_register src1 =
1188 translate_src_register(emit, &insn->Src[1]);
1189 SVGA3dShaderDestToken temp = get_temp( emit );
1190
1191 /* DP3 TMP, SRC1, SRC2 */
1192 if (!submit_op2( emit, inst_token( SVGA3DOP_DP3 ), temp, src0, src1 ))
1193 return FALSE;
1194
1195 src1 = scalar(src1, TGSI_SWIZZLE_W);
1196
1197 /* ADD DST, TMP, SRC2.wwww */
1198 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst,
1199 src( temp ), src1 ))
1200 return FALSE;
1201
1202 return TRUE;
1203 }
1204
1205
1206 /**
1207 * Translate the following TGSI DST instruction.
1208 * NRM DST, SRC
1209 * To the following SVGA3D instruction sequence.
1210 * DP3 TMP, SRC, SRC
1211 * RSQ TMP, TMP
1212 * MUL DST, SRC, TMP
1213 */
1214 static boolean
1215 emit_nrm(struct svga_shader_emitter *emit,
1216 const struct tgsi_full_instruction *insn)
1217 {
1218 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1219 const struct src_register src0 =
1220 translate_src_register(emit, &insn->Src[0]);
1221 SVGA3dShaderDestToken temp = get_temp( emit );
1222
1223 /* DP3 TMP, SRC, SRC */
1224 if (!submit_op2( emit, inst_token( SVGA3DOP_DP3 ), temp, src0, src0 ))
1225 return FALSE;
1226
1227 /* RSQ TMP, TMP */
1228 if (!submit_op1( emit, inst_token( SVGA3DOP_RSQ ), temp, src( temp )))
1229 return FALSE;
1230
1231 /* MUL DST, SRC, TMP */
1232 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ), dst,
1233 src0, src( temp )))
1234 return FALSE;
1235
1236 return TRUE;
1237 }
1238
1239
1240 static boolean
1241 do_emit_sincos(struct svga_shader_emitter *emit,
1242 SVGA3dShaderDestToken dst,
1243 struct src_register src0)
1244 {
1245 src0 = scalar(src0, TGSI_SWIZZLE_X);
1246 return submit_op1(emit, inst_token(SVGA3DOP_SINCOS), dst, src0);
1247 }
1248
1249
1250 static boolean
1251 emit_sincos(struct svga_shader_emitter *emit,
1252 const struct tgsi_full_instruction *insn)
1253 {
1254 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1255 struct src_register src0 = translate_src_register(emit, &insn->Src[0]);
1256 SVGA3dShaderDestToken temp = get_temp( emit );
1257
1258 /* SCS TMP SRC */
1259 if (!do_emit_sincos(emit, writemask(temp, TGSI_WRITEMASK_XY), src0 ))
1260 return FALSE;
1261
1262 /* MOV DST TMP */
1263 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src( temp ) ))
1264 return FALSE;
1265
1266 return TRUE;
1267 }
1268
1269
1270 /**
1271 * SCS TMP SRC
1272 * MOV DST TMP.yyyy
1273 */
1274 static boolean
1275 emit_sin(struct svga_shader_emitter *emit,
1276 const struct tgsi_full_instruction *insn )
1277 {
1278 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1279 struct src_register src0 =
1280 translate_src_register(emit, &insn->Src[0] );
1281 SVGA3dShaderDestToken temp = get_temp( emit );
1282
1283 /* SCS TMP SRC */
1284 if (!do_emit_sincos(emit, writemask(temp, TGSI_WRITEMASK_Y), src0))
1285 return FALSE;
1286
1287 src0 = scalar(src( temp ), TGSI_SWIZZLE_Y);
1288
1289 /* MOV DST TMP.yyyy */
1290 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src0 ))
1291 return FALSE;
1292
1293 return TRUE;
1294 }
1295
1296 /*
1297 * SCS TMP SRC
1298 * MOV DST TMP.xxxx
1299 */
1300 static boolean
1301 emit_cos(struct svga_shader_emitter *emit,
1302 const struct tgsi_full_instruction *insn)
1303 {
1304 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1305 struct src_register src0 =
1306 translate_src_register(emit, &insn->Src[0] );
1307 SVGA3dShaderDestToken temp = get_temp( emit );
1308
1309 /* SCS TMP SRC */
1310 if (!do_emit_sincos( emit, writemask(temp, TGSI_WRITEMASK_X), src0 ))
1311 return FALSE;
1312
1313 src0 = scalar(src( temp ), TGSI_SWIZZLE_X);
1314
1315 /* MOV DST TMP.xxxx */
1316 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src0 ))
1317 return FALSE;
1318
1319 return TRUE;
1320 }
1321
1322
1323 static boolean
1324 emit_ssg(struct svga_shader_emitter *emit,
1325 const struct tgsi_full_instruction *insn)
1326 {
1327 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1328 struct src_register src0 =
1329 translate_src_register(emit, &insn->Src[0] );
1330 SVGA3dShaderDestToken temp0 = get_temp( emit );
1331 SVGA3dShaderDestToken temp1 = get_temp( emit );
1332 struct src_register zero, one;
1333
1334 if (emit->unit == PIPE_SHADER_VERTEX) {
1335 /* SGN DST, SRC0, TMP0, TMP1 */
1336 return submit_op3( emit, inst_token( SVGA3DOP_SGN ), dst, src0,
1337 src( temp0 ), src( temp1 ) );
1338 }
1339
1340 zero = get_zero_immediate( emit );
1341 one = scalar( zero, TGSI_SWIZZLE_W );
1342 zero = scalar( zero, TGSI_SWIZZLE_X );
1343
1344 /* CMP TMP0, SRC0, one, zero */
1345 if (!submit_op3( emit, inst_token( SVGA3DOP_CMP ),
1346 writemask( temp0, dst.mask ), src0, one, zero ))
1347 return FALSE;
1348
1349 /* CMP TMP1, negate(SRC0), negate(one), zero */
1350 if (!submit_op3( emit, inst_token( SVGA3DOP_CMP ),
1351 writemask( temp1, dst.mask ), negate( src0 ), negate( one ),
1352 zero ))
1353 return FALSE;
1354
1355 /* ADD DST, TMP0, TMP1 */
1356 return submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst, src( temp0 ),
1357 src( temp1 ) );
1358 }
1359
1360
1361 /**
1362 * ADD DST SRC0, negate(SRC0)
1363 */
1364 static boolean
1365 emit_sub(struct svga_shader_emitter *emit,
1366 const struct tgsi_full_instruction *insn)
1367 {
1368 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1369 struct src_register src0 = translate_src_register(
1370 emit, &insn->Src[0] );
1371 struct src_register src1 = translate_src_register(
1372 emit, &insn->Src[1] );
1373
1374 src1 = negate(src1);
1375
1376 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst,
1377 src0, src1 ))
1378 return FALSE;
1379
1380 return TRUE;
1381 }
1382
1383
1384 static boolean
1385 emit_kill_if(struct svga_shader_emitter *emit,
1386 const struct tgsi_full_instruction *insn)
1387 {
1388 const struct tgsi_full_src_register *reg = &insn->Src[0];
1389 struct src_register src0, srcIn;
1390 /* is the W component tested in another position? */
1391 const boolean w_tested = (reg->Register.SwizzleW == reg->Register.SwizzleX ||
1392 reg->Register.SwizzleW == reg->Register.SwizzleY ||
1393 reg->Register.SwizzleW == reg->Register.SwizzleZ);
1394 const boolean special = (reg->Register.Absolute ||
1395 reg->Register.Negate ||
1396 reg->Register.Indirect ||
1397 reg->Register.SwizzleX != 0 ||
1398 reg->Register.SwizzleY != 1 ||
1399 reg->Register.SwizzleZ != 2 ||
1400 reg->Register.File != TGSI_FILE_TEMPORARY);
1401 SVGA3dShaderDestToken temp;
1402
1403 src0 = srcIn = translate_src_register( emit, reg );
1404
1405 if (special || !w_tested) {
1406 /* need a temp reg */
1407 temp = get_temp( emit );
1408 }
1409
1410 if (special) {
1411 /* move the source into a temp register */
1412 submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1413 writemask( temp, TGSI_WRITEMASK_XYZ ),
1414 src0 );
1415
1416 src0 = src( temp );
1417 }
1418
1419 /* do the texkill (on the xyz components) */
1420 if (!submit_op0( emit, inst_token( SVGA3DOP_TEXKILL ), dst(src0) ))
1421 return FALSE;
1422
1423 if (!w_tested) {
1424 /* need to emit a second texkill to test the W component */
1425 /* put src.wwww into temp register */
1426 if (!submit_op1(emit,
1427 inst_token( SVGA3DOP_MOV ),
1428 writemask( temp, TGSI_WRITEMASK_XYZ ),
1429 scalar(srcIn, TGSI_SWIZZLE_W)))
1430 return FALSE;
1431
1432 /* second texkill */
1433 if (!submit_op0( emit, inst_token( SVGA3DOP_TEXKILL ), temp ))
1434 return FALSE;
1435 }
1436
1437 return TRUE;
1438 }
1439
1440
1441 /**
1442 * unconditional kill
1443 */
1444 static boolean
1445 emit_kill(struct svga_shader_emitter *emit,
1446 const struct tgsi_full_instruction *insn)
1447 {
1448 SVGA3dShaderDestToken temp;
1449 struct src_register one = scalar( get_zero_immediate( emit ),
1450 TGSI_SWIZZLE_W );
1451 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_TEXKILL );
1452
1453 /* texkill doesn't allow negation on the operand so lets move
1454 * negation of {1} to a temp register */
1455 temp = get_temp( emit );
1456 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), temp,
1457 negate( one ) ))
1458 return FALSE;
1459
1460 return submit_op0( emit, inst, temp );
1461 }
1462
1463
1464 /**
1465 * Test if r1 and r2 are the same register.
1466 */
1467 static boolean
1468 same_register(struct src_register r1, struct src_register r2)
1469 {
1470 return (r1.base.num == r2.base.num &&
1471 r1.base.type_upper == r2.base.type_upper &&
1472 r1.base.type_lower == r2.base.type_lower);
1473 }
1474
1475
1476
1477 /* Implement conditionals by initializing destination reg to 'fail',
1478 * then set predicate reg with UFOP_SETP, then move 'pass' to dest
1479 * based on predicate reg.
1480 *
1481 * SETP src0, cmp, src1 -- do this first to avoid aliasing problems.
1482 * MOV dst, fail
1483 * MOV dst, pass, p0
1484 */
1485 static boolean
1486 emit_conditional(struct svga_shader_emitter *emit,
1487 unsigned compare_func,
1488 SVGA3dShaderDestToken dst,
1489 struct src_register src0,
1490 struct src_register src1,
1491 struct src_register pass,
1492 struct src_register fail)
1493 {
1494 SVGA3dShaderDestToken pred_reg = dst_register( SVGA3DREG_PREDICATE, 0 );
1495 SVGA3dShaderInstToken setp_token, mov_token;
1496 setp_token = inst_token( SVGA3DOP_SETP );
1497
1498 switch (compare_func) {
1499 case PIPE_FUNC_NEVER:
1500 return submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1501 dst, fail );
1502 break;
1503 case PIPE_FUNC_LESS:
1504 setp_token.control = SVGA3DOPCOMP_LT;
1505 break;
1506 case PIPE_FUNC_EQUAL:
1507 setp_token.control = SVGA3DOPCOMP_EQ;
1508 break;
1509 case PIPE_FUNC_LEQUAL:
1510 setp_token.control = SVGA3DOPCOMP_LE;
1511 break;
1512 case PIPE_FUNC_GREATER:
1513 setp_token.control = SVGA3DOPCOMP_GT;
1514 break;
1515 case PIPE_FUNC_NOTEQUAL:
1516 setp_token.control = SVGA3DOPCOMPC_NE;
1517 break;
1518 case PIPE_FUNC_GEQUAL:
1519 setp_token.control = SVGA3DOPCOMP_GE;
1520 break;
1521 case PIPE_FUNC_ALWAYS:
1522 return submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1523 dst, pass );
1524 break;
1525 }
1526
1527 if (same_register(src(dst), pass)) {
1528 /* We'll get bad results if the dst and pass registers are the same
1529 * so use a temp register containing pass.
1530 */
1531 SVGA3dShaderDestToken temp = get_temp(emit);
1532 if (!submit_op1(emit, inst_token(SVGA3DOP_MOV), temp, pass))
1533 return FALSE;
1534 pass = src(temp);
1535 }
1536
1537 /* SETP src0, COMPOP, src1 */
1538 if (!submit_op2( emit, setp_token, pred_reg,
1539 src0, src1 ))
1540 return FALSE;
1541
1542 mov_token = inst_token( SVGA3DOP_MOV );
1543
1544 /* MOV dst, fail */
1545 if (!submit_op1( emit, mov_token, dst,
1546 fail ))
1547 return FALSE;
1548
1549 /* MOV dst, pass (predicated)
1550 *
1551 * Note that the predicate reg (and possible modifiers) is passed
1552 * as the first source argument.
1553 */
1554 mov_token.predicated = 1;
1555 if (!submit_op2( emit, mov_token, dst,
1556 src( pred_reg ), pass ))
1557 return FALSE;
1558
1559 return TRUE;
1560 }
1561
1562
1563 static boolean
1564 emit_select(struct svga_shader_emitter *emit,
1565 unsigned compare_func,
1566 SVGA3dShaderDestToken dst,
1567 struct src_register src0,
1568 struct src_register src1 )
1569 {
1570 /* There are some SVGA instructions which implement some selects
1571 * directly, but they are only available in the vertex shader.
1572 */
1573 if (emit->unit == PIPE_SHADER_VERTEX) {
1574 switch (compare_func) {
1575 case PIPE_FUNC_GEQUAL:
1576 return submit_op2( emit, inst_token( SVGA3DOP_SGE ), dst, src0, src1 );
1577 case PIPE_FUNC_LEQUAL:
1578 return submit_op2( emit, inst_token( SVGA3DOP_SGE ), dst, src1, src0 );
1579 case PIPE_FUNC_GREATER:
1580 return submit_op2( emit, inst_token( SVGA3DOP_SLT ), dst, src1, src0 );
1581 case PIPE_FUNC_LESS:
1582 return submit_op2( emit, inst_token( SVGA3DOP_SLT ), dst, src0, src1 );
1583 default:
1584 break;
1585 }
1586 }
1587
1588 /* Otherwise, need to use the setp approach:
1589 */
1590 {
1591 struct src_register one, zero;
1592 /* zero immediate is 0,0,0,1 */
1593 zero = get_zero_immediate( emit );
1594 one = scalar( zero, TGSI_SWIZZLE_W );
1595 zero = scalar( zero, TGSI_SWIZZLE_X );
1596
1597 return emit_conditional(
1598 emit,
1599 compare_func,
1600 dst,
1601 src0,
1602 src1,
1603 one, zero);
1604 }
1605 }
1606
1607
1608 static boolean
1609 emit_select_op(struct svga_shader_emitter *emit,
1610 unsigned compare,
1611 const struct tgsi_full_instruction *insn)
1612 {
1613 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1614 struct src_register src0 = translate_src_register(
1615 emit, &insn->Src[0] );
1616 struct src_register src1 = translate_src_register(
1617 emit, &insn->Src[1] );
1618
1619 return emit_select( emit, compare, dst, src0, src1 );
1620 }
1621
1622
1623 /**
1624 * Translate TGSI CMP instruction.
1625 */
1626 static boolean
1627 emit_cmp(struct svga_shader_emitter *emit,
1628 const struct tgsi_full_instruction *insn)
1629 {
1630 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1631 const struct src_register src0 =
1632 translate_src_register(emit, &insn->Src[0] );
1633 const struct src_register src1 =
1634 translate_src_register(emit, &insn->Src[1] );
1635 const struct src_register src2 =
1636 translate_src_register(emit, &insn->Src[2] );
1637
1638 if (emit->unit == PIPE_SHADER_VERTEX) {
1639 struct src_register zero =
1640 scalar(get_zero_immediate(emit), TGSI_SWIZZLE_X);
1641 /* We used to simulate CMP with SLT+LRP. But that didn't work when
1642 * src1 or src2 was Inf/NaN. In particular, GLSL sqrt(0) failed
1643 * because it involves a CMP to handle the 0 case.
1644 * Use a conditional expression instead.
1645 */
1646 return emit_conditional(emit, PIPE_FUNC_LESS, dst,
1647 src0, zero, src1, src2);
1648 }
1649 else {
1650 assert(emit->unit == PIPE_SHADER_FRAGMENT);
1651
1652 /* CMP DST, SRC0, SRC2, SRC1 */
1653 return submit_op3( emit, inst_token( SVGA3DOP_CMP ), dst,
1654 src0, src2, src1);
1655 }
1656 }
1657
1658
1659 /**
1660 * Translate texture instructions to SVGA3D representation.
1661 */
1662 static boolean
1663 emit_tex2(struct svga_shader_emitter *emit,
1664 const struct tgsi_full_instruction *insn,
1665 SVGA3dShaderDestToken dst)
1666 {
1667 SVGA3dShaderInstToken inst;
1668 struct src_register texcoord;
1669 struct src_register sampler;
1670 SVGA3dShaderDestToken tmp;
1671
1672 inst.value = 0;
1673
1674 switch (insn->Instruction.Opcode) {
1675 case TGSI_OPCODE_TEX:
1676 inst.op = SVGA3DOP_TEX;
1677 break;
1678 case TGSI_OPCODE_TXP:
1679 inst.op = SVGA3DOP_TEX;
1680 inst.control = SVGA3DOPCONT_PROJECT;
1681 break;
1682 case TGSI_OPCODE_TXB:
1683 inst.op = SVGA3DOP_TEX;
1684 inst.control = SVGA3DOPCONT_BIAS;
1685 break;
1686 case TGSI_OPCODE_TXL:
1687 inst.op = SVGA3DOP_TEXLDL;
1688 break;
1689 default:
1690 assert(0);
1691 return FALSE;
1692 }
1693
1694 texcoord = translate_src_register( emit, &insn->Src[0] );
1695 sampler = translate_src_register( emit, &insn->Src[1] );
1696
1697 if (emit->key.fkey.tex[sampler.base.num].unnormalized ||
1698 emit->dynamic_branching_level > 0)
1699 tmp = get_temp( emit );
1700
1701 /* Can't do mipmapping inside dynamic branch constructs. Force LOD
1702 * zero in that case.
1703 */
1704 if (emit->dynamic_branching_level > 0 &&
1705 inst.op == SVGA3DOP_TEX &&
1706 SVGA3dShaderGetRegType(texcoord.base.value) == SVGA3DREG_TEMP) {
1707 struct src_register zero = get_zero_immediate( emit );
1708
1709 /* MOV tmp, texcoord */
1710 if (!submit_op1( emit,
1711 inst_token( SVGA3DOP_MOV ),
1712 tmp,
1713 texcoord ))
1714 return FALSE;
1715
1716 /* MOV tmp.w, zero */
1717 if (!submit_op1( emit,
1718 inst_token( SVGA3DOP_MOV ),
1719 writemask( tmp, TGSI_WRITEMASK_W ),
1720 scalar( zero, TGSI_SWIZZLE_X )))
1721 return FALSE;
1722
1723 texcoord = src( tmp );
1724 inst.op = SVGA3DOP_TEXLDL;
1725 }
1726
1727 /* Explicit normalization of texcoords:
1728 */
1729 if (emit->key.fkey.tex[sampler.base.num].unnormalized) {
1730 struct src_register wh = get_tex_dimensions( emit, sampler.base.num );
1731
1732 /* MUL tmp, SRC0, WH */
1733 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
1734 tmp, texcoord, wh ))
1735 return FALSE;
1736
1737 texcoord = src( tmp );
1738 }
1739
1740 return submit_op2( emit, inst, dst, texcoord, sampler );
1741 }
1742
1743
1744 /**
1745 * Translate texture instructions to SVGA3D representation.
1746 */
1747 static boolean
1748 emit_tex4(struct svga_shader_emitter *emit,
1749 const struct tgsi_full_instruction *insn,
1750 SVGA3dShaderDestToken dst )
1751 {
1752 SVGA3dShaderInstToken inst;
1753 struct src_register texcoord;
1754 struct src_register ddx;
1755 struct src_register ddy;
1756 struct src_register sampler;
1757
1758 texcoord = translate_src_register( emit, &insn->Src[0] );
1759 ddx = translate_src_register( emit, &insn->Src[1] );
1760 ddy = translate_src_register( emit, &insn->Src[2] );
1761 sampler = translate_src_register( emit, &insn->Src[3] );
1762
1763 inst.value = 0;
1764
1765 switch (insn->Instruction.Opcode) {
1766 case TGSI_OPCODE_TXD:
1767 inst.op = SVGA3DOP_TEXLDD; /* 4 args! */
1768 break;
1769 default:
1770 assert(0);
1771 return FALSE;
1772 }
1773
1774 return submit_op4( emit, inst, dst, texcoord, sampler, ddx, ddy );
1775 }
1776
1777
1778 /**
1779 * Emit texture swizzle code.
1780 */
1781 static boolean
1782 emit_tex_swizzle(struct svga_shader_emitter *emit,
1783 SVGA3dShaderDestToken dst,
1784 struct src_register src,
1785 unsigned swizzle_x,
1786 unsigned swizzle_y,
1787 unsigned swizzle_z,
1788 unsigned swizzle_w)
1789 {
1790 const unsigned swizzleIn[4] = {swizzle_x, swizzle_y, swizzle_z, swizzle_w};
1791 unsigned srcSwizzle[4];
1792 unsigned srcWritemask = 0x0, zeroWritemask = 0x0, oneWritemask = 0x0;
1793 int i;
1794
1795 /* build writemasks and srcSwizzle terms */
1796 for (i = 0; i < 4; i++) {
1797 if (swizzleIn[i] == PIPE_SWIZZLE_ZERO) {
1798 srcSwizzle[i] = TGSI_SWIZZLE_X + i;
1799 zeroWritemask |= (1 << i);
1800 }
1801 else if (swizzleIn[i] == PIPE_SWIZZLE_ONE) {
1802 srcSwizzle[i] = TGSI_SWIZZLE_X + i;
1803 oneWritemask |= (1 << i);
1804 }
1805 else {
1806 srcSwizzle[i] = swizzleIn[i];
1807 srcWritemask |= (1 << i);
1808 }
1809 }
1810
1811 /* write x/y/z/w comps */
1812 if (dst.mask & srcWritemask) {
1813 if (!submit_op1(emit,
1814 inst_token(SVGA3DOP_MOV),
1815 writemask(dst, srcWritemask),
1816 swizzle(src,
1817 srcSwizzle[0],
1818 srcSwizzle[1],
1819 srcSwizzle[2],
1820 srcSwizzle[3])))
1821 return FALSE;
1822 }
1823
1824 /* write 0 comps */
1825 if (dst.mask & zeroWritemask) {
1826 if (!submit_op1(emit,
1827 inst_token(SVGA3DOP_MOV),
1828 writemask(dst, zeroWritemask),
1829 scalar(get_zero_immediate(emit), TGSI_SWIZZLE_X)))
1830 return FALSE;
1831 }
1832
1833 /* write 1 comps */
1834 if (dst.mask & oneWritemask) {
1835 if (!submit_op1(emit,
1836 inst_token(SVGA3DOP_MOV),
1837 writemask(dst, oneWritemask),
1838 scalar(get_zero_immediate(emit), TGSI_SWIZZLE_W)))
1839 return FALSE;
1840 }
1841
1842 return TRUE;
1843 }
1844
1845
1846 static boolean
1847 emit_tex(struct svga_shader_emitter *emit,
1848 const struct tgsi_full_instruction *insn)
1849 {
1850 SVGA3dShaderDestToken dst =
1851 translate_dst_register( emit, insn, 0 );
1852 struct src_register src0 =
1853 translate_src_register( emit, &insn->Src[0] );
1854 struct src_register src1 =
1855 translate_src_register( emit, &insn->Src[1] );
1856
1857 SVGA3dShaderDestToken tex_result;
1858 const unsigned unit = src1.base.num;
1859
1860 /* check for shadow samplers */
1861 boolean compare = (emit->key.fkey.tex[unit].compare_mode ==
1862 PIPE_TEX_COMPARE_R_TO_TEXTURE);
1863
1864 /* texture swizzle */
1865 boolean swizzle = (emit->key.fkey.tex[unit].swizzle_r != PIPE_SWIZZLE_RED ||
1866 emit->key.fkey.tex[unit].swizzle_g != PIPE_SWIZZLE_GREEN ||
1867 emit->key.fkey.tex[unit].swizzle_b != PIPE_SWIZZLE_BLUE ||
1868 emit->key.fkey.tex[unit].swizzle_a != PIPE_SWIZZLE_ALPHA);
1869
1870 boolean saturate = insn->Instruction.Saturate != TGSI_SAT_NONE;
1871
1872 /* If doing compare processing or tex swizzle or saturation, we need to put
1873 * the fetched color into a temporary so it can be used as a source later on.
1874 */
1875 if (compare || swizzle || saturate) {
1876 tex_result = get_temp( emit );
1877 }
1878 else {
1879 tex_result = dst;
1880 }
1881
1882 switch(insn->Instruction.Opcode) {
1883 case TGSI_OPCODE_TEX:
1884 case TGSI_OPCODE_TXB:
1885 case TGSI_OPCODE_TXP:
1886 case TGSI_OPCODE_TXL:
1887 if (!emit_tex2( emit, insn, tex_result ))
1888 return FALSE;
1889 break;
1890 case TGSI_OPCODE_TXD:
1891 if (!emit_tex4( emit, insn, tex_result ))
1892 return FALSE;
1893 break;
1894 default:
1895 assert(0);
1896 }
1897
1898 if (compare) {
1899 SVGA3dShaderDestToken dst2;
1900
1901 if (swizzle || saturate)
1902 dst2 = tex_result;
1903 else
1904 dst2 = dst;
1905
1906 if (dst.mask & TGSI_WRITEMASK_XYZ) {
1907 SVGA3dShaderDestToken src0_zdivw = get_temp( emit );
1908 /* When sampling a depth texture, the result of the comparison is in
1909 * the Y component.
1910 */
1911 struct src_register tex_src_x = scalar(src(tex_result), TGSI_SWIZZLE_Y);
1912 struct src_register r_coord;
1913
1914 if (insn->Instruction.Opcode == TGSI_OPCODE_TXP) {
1915 /* Divide texcoord R by Q */
1916 if (!submit_op1( emit, inst_token( SVGA3DOP_RCP ),
1917 writemask(src0_zdivw, TGSI_WRITEMASK_X),
1918 scalar(src0, TGSI_SWIZZLE_W) ))
1919 return FALSE;
1920
1921 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
1922 writemask(src0_zdivw, TGSI_WRITEMASK_X),
1923 scalar(src0, TGSI_SWIZZLE_Z),
1924 scalar(src(src0_zdivw), TGSI_SWIZZLE_X) ))
1925 return FALSE;
1926
1927 r_coord = scalar(src(src0_zdivw), TGSI_SWIZZLE_X);
1928 }
1929 else {
1930 r_coord = scalar(src0, TGSI_SWIZZLE_Z);
1931 }
1932
1933 /* Compare texture sample value against R component of texcoord */
1934 if (!emit_select(emit,
1935 emit->key.fkey.tex[unit].compare_func,
1936 writemask( dst2, TGSI_WRITEMASK_XYZ ),
1937 r_coord,
1938 tex_src_x))
1939 return FALSE;
1940 }
1941
1942 if (dst.mask & TGSI_WRITEMASK_W) {
1943 struct src_register one =
1944 scalar( get_zero_immediate( emit ), TGSI_SWIZZLE_W );
1945
1946 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1947 writemask( dst2, TGSI_WRITEMASK_W ),
1948 one ))
1949 return FALSE;
1950 }
1951 }
1952
1953 if (saturate && !swizzle) {
1954 /* MOV_SAT real_dst, dst */
1955 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src(tex_result) ))
1956 return FALSE;
1957 }
1958 else if (swizzle) {
1959 /* swizzle from tex_result to dst (handles saturation too, if any) */
1960 emit_tex_swizzle(emit,
1961 dst, src(tex_result),
1962 emit->key.fkey.tex[unit].swizzle_r,
1963 emit->key.fkey.tex[unit].swizzle_g,
1964 emit->key.fkey.tex[unit].swizzle_b,
1965 emit->key.fkey.tex[unit].swizzle_a);
1966 }
1967
1968 return TRUE;
1969 }
1970
1971
1972 static boolean
1973 emit_bgnloop2(struct svga_shader_emitter *emit,
1974 const struct tgsi_full_instruction *insn)
1975 {
1976 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_LOOP );
1977 struct src_register loop_reg = src_register( SVGA3DREG_LOOP, 0 );
1978 struct src_register const_int = get_loop_const( emit );
1979
1980 emit->dynamic_branching_level++;
1981
1982 return (emit_instruction( emit, inst ) &&
1983 emit_src( emit, loop_reg ) &&
1984 emit_src( emit, const_int ) );
1985 }
1986
1987
1988 static boolean
1989 emit_endloop2(struct svga_shader_emitter *emit,
1990 const struct tgsi_full_instruction *insn)
1991 {
1992 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_ENDLOOP );
1993
1994 emit->dynamic_branching_level--;
1995
1996 return emit_instruction( emit, inst );
1997 }
1998
1999
2000 static boolean
2001 emit_brk(struct svga_shader_emitter *emit,
2002 const struct tgsi_full_instruction *insn)
2003 {
2004 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_BREAK );
2005 return emit_instruction( emit, inst );
2006 }
2007
2008
2009 static boolean
2010 emit_scalar_op1(struct svga_shader_emitter *emit,
2011 unsigned opcode,
2012 const struct tgsi_full_instruction *insn)
2013 {
2014 SVGA3dShaderInstToken inst;
2015 SVGA3dShaderDestToken dst;
2016 struct src_register src;
2017
2018 inst = inst_token( opcode );
2019 dst = translate_dst_register( emit, insn, 0 );
2020 src = translate_src_register( emit, &insn->Src[0] );
2021 src = scalar( src, TGSI_SWIZZLE_X );
2022
2023 return submit_op1( emit, inst, dst, src );
2024 }
2025
2026
2027 static boolean
2028 emit_simple_instruction(struct svga_shader_emitter *emit,
2029 unsigned opcode,
2030 const struct tgsi_full_instruction *insn)
2031 {
2032 const struct tgsi_full_src_register *src = insn->Src;
2033 SVGA3dShaderInstToken inst;
2034 SVGA3dShaderDestToken dst;
2035
2036 inst = inst_token( opcode );
2037 dst = translate_dst_register( emit, insn, 0 );
2038
2039 switch (insn->Instruction.NumSrcRegs) {
2040 case 0:
2041 return submit_op0( emit, inst, dst );
2042 case 1:
2043 return submit_op1( emit, inst, dst,
2044 translate_src_register( emit, &src[0] ));
2045 case 2:
2046 return submit_op2( emit, inst, dst,
2047 translate_src_register( emit, &src[0] ),
2048 translate_src_register( emit, &src[1] ) );
2049 case 3:
2050 return submit_op3( emit, inst, dst,
2051 translate_src_register( emit, &src[0] ),
2052 translate_src_register( emit, &src[1] ),
2053 translate_src_register( emit, &src[2] ) );
2054 default:
2055 assert(0);
2056 return FALSE;
2057 }
2058 }
2059
2060
2061 static boolean
2062 emit_deriv(struct svga_shader_emitter *emit,
2063 const struct tgsi_full_instruction *insn )
2064 {
2065 if (emit->dynamic_branching_level > 0 &&
2066 insn->Src[0].Register.File == TGSI_FILE_TEMPORARY)
2067 {
2068 struct src_register zero = get_zero_immediate( emit );
2069 SVGA3dShaderDestToken dst =
2070 translate_dst_register( emit, insn, 0 );
2071
2072 /* Deriv opcodes not valid inside dynamic branching, workaround
2073 * by zeroing out the destination.
2074 */
2075 if (!submit_op1(emit,
2076 inst_token( SVGA3DOP_MOV ),
2077 dst,
2078 scalar(zero, TGSI_SWIZZLE_X)))
2079 return FALSE;
2080
2081 return TRUE;
2082 }
2083 else {
2084 unsigned opcode;
2085 const struct tgsi_full_src_register *reg = &insn->Src[0];
2086 SVGA3dShaderInstToken inst;
2087 SVGA3dShaderDestToken dst;
2088 struct src_register src0;
2089
2090 switch (insn->Instruction.Opcode) {
2091 case TGSI_OPCODE_DDX:
2092 opcode = SVGA3DOP_DSX;
2093 break;
2094 case TGSI_OPCODE_DDY:
2095 opcode = SVGA3DOP_DSY;
2096 break;
2097 default:
2098 return FALSE;
2099 }
2100
2101 inst = inst_token( opcode );
2102 dst = translate_dst_register( emit, insn, 0 );
2103 src0 = translate_src_register( emit, reg );
2104
2105 /* We cannot use negate or abs on source to dsx/dsy instruction.
2106 */
2107 if (reg->Register.Absolute ||
2108 reg->Register.Negate) {
2109 SVGA3dShaderDestToken temp = get_temp( emit );
2110
2111 if (!emit_repl( emit, temp, &src0 ))
2112 return FALSE;
2113 }
2114
2115 return submit_op1( emit, inst, dst, src0 );
2116 }
2117 }
2118
2119
2120 static boolean
2121 emit_arl(struct svga_shader_emitter *emit,
2122 const struct tgsi_full_instruction *insn)
2123 {
2124 ++emit->current_arl;
2125 if (emit->unit == PIPE_SHADER_FRAGMENT) {
2126 /* MOVA not present in pixel shader instruction set.
2127 * Ignore this instruction altogether since it is
2128 * only used for loop counters -- and for that
2129 * we reference aL directly.
2130 */
2131 return TRUE;
2132 }
2133 if (svga_arl_needs_adjustment( emit )) {
2134 return emit_fake_arl( emit, insn );
2135 } else {
2136 /* no need to adjust, just emit straight arl */
2137 return emit_simple_instruction(emit, SVGA3DOP_MOVA, insn);
2138 }
2139 }
2140
2141
2142 static boolean
2143 emit_pow(struct svga_shader_emitter *emit,
2144 const struct tgsi_full_instruction *insn)
2145 {
2146 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2147 struct src_register src0 = translate_src_register(
2148 emit, &insn->Src[0] );
2149 struct src_register src1 = translate_src_register(
2150 emit, &insn->Src[1] );
2151 boolean need_tmp = FALSE;
2152
2153 /* POW can only output to a temporary */
2154 if (insn->Dst[0].Register.File != TGSI_FILE_TEMPORARY)
2155 need_tmp = TRUE;
2156
2157 /* POW src1 must not be the same register as dst */
2158 if (alias_src_dst( src1, dst ))
2159 need_tmp = TRUE;
2160
2161 /* it's a scalar op */
2162 src0 = scalar( src0, TGSI_SWIZZLE_X );
2163 src1 = scalar( src1, TGSI_SWIZZLE_X );
2164
2165 if (need_tmp) {
2166 SVGA3dShaderDestToken tmp =
2167 writemask(get_temp( emit ), TGSI_WRITEMASK_X );
2168
2169 if (!submit_op2(emit, inst_token( SVGA3DOP_POW ), tmp, src0, src1))
2170 return FALSE;
2171
2172 return submit_op1(emit, inst_token( SVGA3DOP_MOV ),
2173 dst, scalar(src(tmp), 0) );
2174 }
2175 else {
2176 return submit_op2(emit, inst_token( SVGA3DOP_POW ), dst, src0, src1);
2177 }
2178 }
2179
2180
2181 static boolean
2182 emit_xpd(struct svga_shader_emitter *emit,
2183 const struct tgsi_full_instruction *insn)
2184 {
2185 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2186 const struct src_register src0 = translate_src_register(
2187 emit, &insn->Src[0] );
2188 const struct src_register src1 = translate_src_register(
2189 emit, &insn->Src[1] );
2190 boolean need_dst_tmp = FALSE;
2191
2192 /* XPD can only output to a temporary */
2193 if (SVGA3dShaderGetRegType(dst.value) != SVGA3DREG_TEMP)
2194 need_dst_tmp = TRUE;
2195
2196 /* The dst reg must not be the same as src0 or src1*/
2197 if (alias_src_dst(src0, dst) ||
2198 alias_src_dst(src1, dst))
2199 need_dst_tmp = TRUE;
2200
2201 if (need_dst_tmp) {
2202 SVGA3dShaderDestToken tmp = get_temp( emit );
2203
2204 /* Obey DX9 restrictions on mask:
2205 */
2206 tmp.mask = dst.mask & TGSI_WRITEMASK_XYZ;
2207
2208 if (!submit_op2(emit, inst_token( SVGA3DOP_CRS ), tmp, src0, src1))
2209 return FALSE;
2210
2211 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), dst, src( tmp )))
2212 return FALSE;
2213 }
2214 else {
2215 if (!submit_op2(emit, inst_token( SVGA3DOP_CRS ), dst, src0, src1))
2216 return FALSE;
2217 }
2218
2219 /* Need to emit 1.0 to dst.w?
2220 */
2221 if (dst.mask & TGSI_WRITEMASK_W) {
2222 struct src_register zero = get_zero_immediate( emit );
2223
2224 if (!submit_op1(emit,
2225 inst_token( SVGA3DOP_MOV ),
2226 writemask(dst, TGSI_WRITEMASK_W),
2227 zero))
2228 return FALSE;
2229 }
2230
2231 return TRUE;
2232 }
2233
2234
2235 static boolean
2236 emit_lrp(struct svga_shader_emitter *emit,
2237 const struct tgsi_full_instruction *insn)
2238 {
2239 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2240 const struct src_register src0 = translate_src_register(
2241 emit, &insn->Src[0] );
2242 const struct src_register src1 = translate_src_register(
2243 emit, &insn->Src[1] );
2244 const struct src_register src2 = translate_src_register(
2245 emit, &insn->Src[2] );
2246
2247 return submit_lrp(emit, dst, src0, src1, src2);
2248 }
2249
2250
2251 static boolean
2252 emit_dst_insn(struct svga_shader_emitter *emit,
2253 const struct tgsi_full_instruction *insn)
2254 {
2255 if (emit->unit == PIPE_SHADER_VERTEX) {
2256 /* SVGA/DX9 has a DST instruction, but only for vertex shaders:
2257 */
2258 return emit_simple_instruction(emit, SVGA3DOP_DST, insn);
2259 }
2260 else {
2261 /* result[0] = 1 * 1;
2262 * result[1] = a[1] * b[1];
2263 * result[2] = a[2] * 1;
2264 * result[3] = 1 * b[3];
2265 */
2266 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2267 SVGA3dShaderDestToken tmp;
2268 const struct src_register src0 = translate_src_register(
2269 emit, &insn->Src[0] );
2270 const struct src_register src1 = translate_src_register(
2271 emit, &insn->Src[1] );
2272 struct src_register zero = get_zero_immediate( emit );
2273 boolean need_tmp = FALSE;
2274
2275 if (SVGA3dShaderGetRegType(dst.value) != SVGA3DREG_TEMP ||
2276 alias_src_dst(src0, dst) ||
2277 alias_src_dst(src1, dst))
2278 need_tmp = TRUE;
2279
2280 if (need_tmp) {
2281 tmp = get_temp( emit );
2282 }
2283 else {
2284 tmp = dst;
2285 }
2286
2287 /* tmp.xw = 1.0
2288 */
2289 if (tmp.mask & TGSI_WRITEMASK_XW) {
2290 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2291 writemask(tmp, TGSI_WRITEMASK_XW ),
2292 scalar( zero, 3 )))
2293 return FALSE;
2294 }
2295
2296 /* tmp.yz = src0
2297 */
2298 if (tmp.mask & TGSI_WRITEMASK_YZ) {
2299 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2300 writemask(tmp, TGSI_WRITEMASK_YZ ),
2301 src0))
2302 return FALSE;
2303 }
2304
2305 /* tmp.yw = tmp * src1
2306 */
2307 if (tmp.mask & TGSI_WRITEMASK_YW) {
2308 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
2309 writemask(tmp, TGSI_WRITEMASK_YW ),
2310 src(tmp),
2311 src1))
2312 return FALSE;
2313 }
2314
2315 /* dst = tmp
2316 */
2317 if (need_tmp) {
2318 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2319 dst,
2320 src(tmp)))
2321 return FALSE;
2322 }
2323 }
2324
2325 return TRUE;
2326 }
2327
2328
2329 static boolean
2330 emit_exp(struct svga_shader_emitter *emit,
2331 const struct tgsi_full_instruction *insn)
2332 {
2333 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2334 struct src_register src0 =
2335 translate_src_register( emit, &insn->Src[0] );
2336 struct src_register zero = get_zero_immediate( emit );
2337 SVGA3dShaderDestToken fraction;
2338
2339 if (dst.mask & TGSI_WRITEMASK_Y)
2340 fraction = dst;
2341 else if (dst.mask & TGSI_WRITEMASK_X)
2342 fraction = get_temp( emit );
2343 else
2344 fraction.value = 0;
2345
2346 /* If y is being written, fill it with src0 - floor(src0).
2347 */
2348 if (dst.mask & TGSI_WRITEMASK_XY) {
2349 if (!submit_op1( emit, inst_token( SVGA3DOP_FRC ),
2350 writemask( fraction, TGSI_WRITEMASK_Y ),
2351 src0 ))
2352 return FALSE;
2353 }
2354
2355 /* If x is being written, fill it with 2 ^ floor(src0).
2356 */
2357 if (dst.mask & TGSI_WRITEMASK_X) {
2358 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ),
2359 writemask( dst, TGSI_WRITEMASK_X ),
2360 src0,
2361 scalar( negate( src( fraction ) ), TGSI_SWIZZLE_Y ) ) )
2362 return FALSE;
2363
2364 if (!submit_op1( emit, inst_token( SVGA3DOP_EXP ),
2365 writemask( dst, TGSI_WRITEMASK_X ),
2366 scalar( src( dst ), TGSI_SWIZZLE_X ) ) )
2367 return FALSE;
2368
2369 if (!(dst.mask & TGSI_WRITEMASK_Y))
2370 release_temp( emit, fraction );
2371 }
2372
2373 /* If z is being written, fill it with 2 ^ src0 (partial precision).
2374 */
2375 if (dst.mask & TGSI_WRITEMASK_Z) {
2376 if (!submit_op1( emit, inst_token( SVGA3DOP_EXPP ),
2377 writemask( dst, TGSI_WRITEMASK_Z ),
2378 src0 ) )
2379 return FALSE;
2380 }
2381
2382 /* If w is being written, fill it with one.
2383 */
2384 if (dst.mask & TGSI_WRITEMASK_W) {
2385 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2386 writemask(dst, TGSI_WRITEMASK_W),
2387 scalar( zero, TGSI_SWIZZLE_W ) ))
2388 return FALSE;
2389 }
2390
2391 return TRUE;
2392 }
2393
2394
2395 static boolean
2396 emit_lit(struct svga_shader_emitter *emit,
2397 const struct tgsi_full_instruction *insn)
2398 {
2399 if (emit->unit == PIPE_SHADER_VERTEX) {
2400 /* SVGA/DX9 has a LIT instruction, but only for vertex shaders:
2401 */
2402 return emit_simple_instruction(emit, SVGA3DOP_LIT, insn);
2403 }
2404 else {
2405 /* D3D vs. GL semantics can be fairly easily accomodated by
2406 * variations on this sequence.
2407 *
2408 * GL:
2409 * tmp.y = src.x
2410 * tmp.z = pow(src.y,src.w)
2411 * p0 = src0.xxxx > 0
2412 * result = zero.wxxw
2413 * (p0) result.yz = tmp
2414 *
2415 * D3D:
2416 * tmp.y = src.x
2417 * tmp.z = pow(src.y,src.w)
2418 * p0 = src0.xxyy > 0
2419 * result = zero.wxxw
2420 * (p0) result.yz = tmp
2421 *
2422 * Will implement the GL version for now.
2423 */
2424 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2425 SVGA3dShaderDestToken tmp = get_temp( emit );
2426 const struct src_register src0 = translate_src_register(
2427 emit, &insn->Src[0] );
2428 struct src_register zero = get_zero_immediate( emit );
2429
2430 /* tmp = pow(src.y, src.w)
2431 */
2432 if (dst.mask & TGSI_WRITEMASK_Z) {
2433 if (!submit_op2(emit, inst_token( SVGA3DOP_POW ),
2434 tmp,
2435 scalar(src0, 1),
2436 scalar(src0, 3)))
2437 return FALSE;
2438 }
2439
2440 /* tmp.y = src.x
2441 */
2442 if (dst.mask & TGSI_WRITEMASK_Y) {
2443 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2444 writemask(tmp, TGSI_WRITEMASK_Y ),
2445 scalar(src0, 0)))
2446 return FALSE;
2447 }
2448
2449 /* Can't quite do this with emit conditional due to the extra
2450 * writemask on the predicated mov:
2451 */
2452 {
2453 SVGA3dShaderDestToken pred_reg = dst_register( SVGA3DREG_PREDICATE, 0 );
2454 SVGA3dShaderInstToken setp_token, mov_token;
2455 struct src_register predsrc;
2456
2457 setp_token = inst_token( SVGA3DOP_SETP );
2458 mov_token = inst_token( SVGA3DOP_MOV );
2459
2460 setp_token.control = SVGA3DOPCOMP_GT;
2461
2462 /* D3D vs GL semantics:
2463 */
2464 if (0)
2465 predsrc = swizzle(src0, 0, 0, 1, 1); /* D3D */
2466 else
2467 predsrc = swizzle(src0, 0, 0, 0, 0); /* GL */
2468
2469 /* SETP src0.xxyy, GT, {0}.x */
2470 if (!submit_op2( emit, setp_token, pred_reg,
2471 predsrc,
2472 swizzle(zero, 0, 0, 0, 0) ))
2473 return FALSE;
2474
2475 /* MOV dst, fail */
2476 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst,
2477 swizzle(zero, 3, 0, 0, 3 )))
2478 return FALSE;
2479
2480 /* MOV dst.yz, tmp (predicated)
2481 *
2482 * Note that the predicate reg (and possible modifiers) is passed
2483 * as the first source argument.
2484 */
2485 if (dst.mask & TGSI_WRITEMASK_YZ) {
2486 mov_token.predicated = 1;
2487 if (!submit_op2( emit, mov_token,
2488 writemask(dst, TGSI_WRITEMASK_YZ),
2489 src( pred_reg ), src( tmp ) ))
2490 return FALSE;
2491 }
2492 }
2493 }
2494
2495 return TRUE;
2496 }
2497
2498
2499 static boolean
2500 emit_ex2(struct svga_shader_emitter *emit,
2501 const struct tgsi_full_instruction *insn)
2502 {
2503 SVGA3dShaderInstToken inst;
2504 SVGA3dShaderDestToken dst;
2505 struct src_register src0;
2506
2507 inst = inst_token( SVGA3DOP_EXP );
2508 dst = translate_dst_register( emit, insn, 0 );
2509 src0 = translate_src_register( emit, &insn->Src[0] );
2510 src0 = scalar( src0, TGSI_SWIZZLE_X );
2511
2512 if (dst.mask != TGSI_WRITEMASK_XYZW) {
2513 SVGA3dShaderDestToken tmp = get_temp( emit );
2514
2515 if (!submit_op1( emit, inst, tmp, src0 ))
2516 return FALSE;
2517
2518 return submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2519 dst,
2520 scalar( src( tmp ), TGSI_SWIZZLE_X ) );
2521 }
2522
2523 return submit_op1( emit, inst, dst, src0 );
2524 }
2525
2526
2527 static boolean
2528 emit_log(struct svga_shader_emitter *emit,
2529 const struct tgsi_full_instruction *insn)
2530 {
2531 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2532 struct src_register src0 =
2533 translate_src_register( emit, &insn->Src[0] );
2534 struct src_register zero = get_zero_immediate( emit );
2535 SVGA3dShaderDestToken abs_tmp;
2536 struct src_register abs_src0;
2537 SVGA3dShaderDestToken log2_abs;
2538
2539 abs_tmp.value = 0;
2540
2541 if (dst.mask & TGSI_WRITEMASK_Z)
2542 log2_abs = dst;
2543 else if (dst.mask & TGSI_WRITEMASK_XY)
2544 log2_abs = get_temp( emit );
2545 else
2546 log2_abs.value = 0;
2547
2548 /* If z is being written, fill it with log2( abs( src0 ) ).
2549 */
2550 if (dst.mask & TGSI_WRITEMASK_XYZ) {
2551 if (!src0.base.srcMod || src0.base.srcMod == SVGA3DSRCMOD_ABS)
2552 abs_src0 = src0;
2553 else {
2554 abs_tmp = get_temp( emit );
2555
2556 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2557 abs_tmp,
2558 src0 ) )
2559 return FALSE;
2560
2561 abs_src0 = src( abs_tmp );
2562 }
2563
2564 abs_src0 = absolute( scalar( abs_src0, TGSI_SWIZZLE_X ) );
2565
2566 if (!submit_op1( emit, inst_token( SVGA3DOP_LOG ),
2567 writemask( log2_abs, TGSI_WRITEMASK_Z ),
2568 abs_src0 ) )
2569 return FALSE;
2570 }
2571
2572 if (dst.mask & TGSI_WRITEMASK_XY) {
2573 SVGA3dShaderDestToken floor_log2;
2574
2575 if (dst.mask & TGSI_WRITEMASK_X)
2576 floor_log2 = dst;
2577 else
2578 floor_log2 = get_temp( emit );
2579
2580 /* If x is being written, fill it with floor( log2( abs( src0 ) ) ).
2581 */
2582 if (!submit_op1( emit, inst_token( SVGA3DOP_FRC ),
2583 writemask( floor_log2, TGSI_WRITEMASK_X ),
2584 scalar( src( log2_abs ), TGSI_SWIZZLE_Z ) ) )
2585 return FALSE;
2586
2587 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ),
2588 writemask( floor_log2, TGSI_WRITEMASK_X ),
2589 scalar( src( log2_abs ), TGSI_SWIZZLE_Z ),
2590 negate( src( floor_log2 ) ) ) )
2591 return FALSE;
2592
2593 /* If y is being written, fill it with
2594 * abs ( src0 ) / ( 2 ^ floor( log2( abs( src0 ) ) ) ).
2595 */
2596 if (dst.mask & TGSI_WRITEMASK_Y) {
2597 if (!submit_op1( emit, inst_token( SVGA3DOP_EXP ),
2598 writemask( dst, TGSI_WRITEMASK_Y ),
2599 negate( scalar( src( floor_log2 ),
2600 TGSI_SWIZZLE_X ) ) ) )
2601 return FALSE;
2602
2603 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
2604 writemask( dst, TGSI_WRITEMASK_Y ),
2605 src( dst ),
2606 abs_src0 ) )
2607 return FALSE;
2608 }
2609
2610 if (!(dst.mask & TGSI_WRITEMASK_X))
2611 release_temp( emit, floor_log2 );
2612
2613 if (!(dst.mask & TGSI_WRITEMASK_Z))
2614 release_temp( emit, log2_abs );
2615 }
2616
2617 if (dst.mask & TGSI_WRITEMASK_XYZ && src0.base.srcMod &&
2618 src0.base.srcMod != SVGA3DSRCMOD_ABS)
2619 release_temp( emit, abs_tmp );
2620
2621 /* If w is being written, fill it with one.
2622 */
2623 if (dst.mask & TGSI_WRITEMASK_W) {
2624 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2625 writemask(dst, TGSI_WRITEMASK_W),
2626 scalar( zero, TGSI_SWIZZLE_W ) ))
2627 return FALSE;
2628 }
2629
2630 return TRUE;
2631 }
2632
2633
2634 /**
2635 * Translate TGSI TRUNC or ROUND instruction.
2636 * We need to truncate toward zero. Ex: trunc(-1.9) = -1
2637 * Different approaches are needed for VS versus PS.
2638 */
2639 static boolean
2640 emit_trunc_round(struct svga_shader_emitter *emit,
2641 const struct tgsi_full_instruction *insn,
2642 boolean round)
2643 {
2644 SVGA3dShaderDestToken dst = translate_dst_register(emit, insn, 0);
2645 const struct src_register src0 =
2646 translate_src_register(emit, &insn->Src[0] );
2647 SVGA3dShaderDestToken t1 = get_temp(emit);
2648
2649 if (round) {
2650 SVGA3dShaderDestToken t0 = get_temp(emit);
2651 struct src_register half = get_half_immediate(emit);
2652
2653 /* t0 = abs(src0) + 0.5 */
2654 if (!submit_op2(emit, inst_token(SVGA3DOP_ADD), t0,
2655 absolute(src0), half))
2656 return FALSE;
2657
2658 /* t1 = fract(t0) */
2659 if (!submit_op1(emit, inst_token(SVGA3DOP_FRC), t1, src(t0)))
2660 return FALSE;
2661
2662 /* t1 = t0 - t1 */
2663 if (!submit_op2(emit, inst_token(SVGA3DOP_ADD), t1, src(t0),
2664 negate(src(t1))))
2665 return FALSE;
2666 }
2667 else {
2668 /* trunc */
2669
2670 /* t1 = fract(abs(src0)) */
2671 if (!submit_op1(emit, inst_token(SVGA3DOP_FRC), t1, absolute(src0)))
2672 return FALSE;
2673
2674 /* t1 = abs(src0) - t1 */
2675 if (!submit_op2(emit, inst_token(SVGA3DOP_ADD), t1, absolute(src0),
2676 negate(src(t1))))
2677 return FALSE;
2678 }
2679
2680 /*
2681 * Now we need to multiply t1 by the sign of the original value.
2682 */
2683 if (emit->unit == PIPE_SHADER_VERTEX) {
2684 /* For VS: use SGN instruction */
2685 /* Need two extra/dummy registers: */
2686 SVGA3dShaderDestToken t2 = get_temp(emit), t3 = get_temp(emit),
2687 t4 = get_temp(emit);
2688
2689 /* t2 = sign(src0) */
2690 if (!submit_op3(emit, inst_token(SVGA3DOP_SGN), t2, src0,
2691 src(t3), src(t4)))
2692 return FALSE;
2693
2694 /* dst = t1 * t2 */
2695 if (!submit_op2(emit, inst_token(SVGA3DOP_MUL), dst, src(t1), src(t2)))
2696 return FALSE;
2697 }
2698 else {
2699 /* For FS: Use CMP instruction */
2700 return submit_op3(emit, inst_token( SVGA3DOP_CMP ), dst,
2701 src0, src(t1), negate(src(t1)));
2702 }
2703
2704 return TRUE;
2705 }
2706
2707
2708 static boolean
2709 emit_bgnsub(struct svga_shader_emitter *emit,
2710 unsigned position,
2711 const struct tgsi_full_instruction *insn)
2712 {
2713 unsigned i;
2714
2715 /* Note that we've finished the main function and are now emitting
2716 * subroutines. This affects how we terminate the generated
2717 * shader.
2718 */
2719 emit->in_main_func = FALSE;
2720
2721 for (i = 0; i < emit->nr_labels; i++) {
2722 if (emit->label[i] == position) {
2723 return (emit_instruction( emit, inst_token( SVGA3DOP_RET ) ) &&
2724 emit_instruction( emit, inst_token( SVGA3DOP_LABEL ) ) &&
2725 emit_src( emit, src_register( SVGA3DREG_LABEL, i )));
2726 }
2727 }
2728
2729 assert(0);
2730 return TRUE;
2731 }
2732
2733
2734 static boolean
2735 emit_call(struct svga_shader_emitter *emit,
2736 const struct tgsi_full_instruction *insn)
2737 {
2738 unsigned position = insn->Label.Label;
2739 unsigned i;
2740
2741 for (i = 0; i < emit->nr_labels; i++) {
2742 if (emit->label[i] == position)
2743 break;
2744 }
2745
2746 if (emit->nr_labels == Elements(emit->label))
2747 return FALSE;
2748
2749 if (i == emit->nr_labels) {
2750 emit->label[i] = position;
2751 emit->nr_labels++;
2752 }
2753
2754 return (emit_instruction( emit, inst_token( SVGA3DOP_CALL ) ) &&
2755 emit_src( emit, src_register( SVGA3DREG_LABEL, i )));
2756 }
2757
2758
2759 /**
2760 * Called at the end of the shader. Actually, emit special "fix-up"
2761 * code for the vertex/fragment shader.
2762 */
2763 static boolean
2764 emit_end(struct svga_shader_emitter *emit)
2765 {
2766 if (emit->unit == PIPE_SHADER_VERTEX) {
2767 return emit_vs_postamble( emit );
2768 }
2769 else {
2770 return emit_ps_postamble( emit );
2771 }
2772 }
2773
2774
2775
2776 static boolean
2777 svga_emit_instruction(struct svga_shader_emitter *emit,
2778 unsigned position,
2779 const struct tgsi_full_instruction *insn)
2780 {
2781 switch (insn->Instruction.Opcode) {
2782
2783 case TGSI_OPCODE_ARL:
2784 return emit_arl( emit, insn );
2785
2786 case TGSI_OPCODE_TEX:
2787 case TGSI_OPCODE_TXB:
2788 case TGSI_OPCODE_TXP:
2789 case TGSI_OPCODE_TXL:
2790 case TGSI_OPCODE_TXD:
2791 return emit_tex( emit, insn );
2792
2793 case TGSI_OPCODE_DDX:
2794 case TGSI_OPCODE_DDY:
2795 return emit_deriv( emit, insn );
2796
2797 case TGSI_OPCODE_BGNSUB:
2798 return emit_bgnsub( emit, position, insn );
2799
2800 case TGSI_OPCODE_ENDSUB:
2801 return TRUE;
2802
2803 case TGSI_OPCODE_CAL:
2804 return emit_call( emit, insn );
2805
2806 case TGSI_OPCODE_FLR:
2807 return emit_floor( emit, insn );
2808
2809 case TGSI_OPCODE_TRUNC:
2810 return emit_trunc_round( emit, insn, FALSE );
2811
2812 case TGSI_OPCODE_ROUND:
2813 return emit_trunc_round( emit, insn, TRUE );
2814
2815 case TGSI_OPCODE_CEIL:
2816 return emit_ceil( emit, insn );
2817
2818 case TGSI_OPCODE_CMP:
2819 return emit_cmp( emit, insn );
2820
2821 case TGSI_OPCODE_DIV:
2822 return emit_div( emit, insn );
2823
2824 case TGSI_OPCODE_DP2:
2825 return emit_dp2( emit, insn );
2826
2827 case TGSI_OPCODE_DPH:
2828 return emit_dph( emit, insn );
2829
2830 case TGSI_OPCODE_NRM:
2831 return emit_nrm( emit, insn );
2832
2833 case TGSI_OPCODE_COS:
2834 return emit_cos( emit, insn );
2835
2836 case TGSI_OPCODE_SIN:
2837 return emit_sin( emit, insn );
2838
2839 case TGSI_OPCODE_SCS:
2840 return emit_sincos( emit, insn );
2841
2842 case TGSI_OPCODE_END:
2843 /* TGSI always finishes the main func with an END */
2844 return emit_end( emit );
2845
2846 case TGSI_OPCODE_KILL_IF:
2847 return emit_kill_if( emit, insn );
2848
2849 /* Selection opcodes. The underlying language is fairly
2850 * non-orthogonal about these.
2851 */
2852 case TGSI_OPCODE_SEQ:
2853 return emit_select_op( emit, PIPE_FUNC_EQUAL, insn );
2854
2855 case TGSI_OPCODE_SNE:
2856 return emit_select_op( emit, PIPE_FUNC_NOTEQUAL, insn );
2857
2858 case TGSI_OPCODE_SGT:
2859 return emit_select_op( emit, PIPE_FUNC_GREATER, insn );
2860
2861 case TGSI_OPCODE_SGE:
2862 return emit_select_op( emit, PIPE_FUNC_GEQUAL, insn );
2863
2864 case TGSI_OPCODE_SLT:
2865 return emit_select_op( emit, PIPE_FUNC_LESS, insn );
2866
2867 case TGSI_OPCODE_SLE:
2868 return emit_select_op( emit, PIPE_FUNC_LEQUAL, insn );
2869
2870 case TGSI_OPCODE_SUB:
2871 return emit_sub( emit, insn );
2872
2873 case TGSI_OPCODE_POW:
2874 return emit_pow( emit, insn );
2875
2876 case TGSI_OPCODE_EX2:
2877 return emit_ex2( emit, insn );
2878
2879 case TGSI_OPCODE_EXP:
2880 return emit_exp( emit, insn );
2881
2882 case TGSI_OPCODE_LOG:
2883 return emit_log( emit, insn );
2884
2885 case TGSI_OPCODE_LG2:
2886 return emit_scalar_op1( emit, SVGA3DOP_LOG, insn );
2887
2888 case TGSI_OPCODE_RSQ:
2889 return emit_scalar_op1( emit, SVGA3DOP_RSQ, insn );
2890
2891 case TGSI_OPCODE_RCP:
2892 return emit_scalar_op1( emit, SVGA3DOP_RCP, insn );
2893
2894 case TGSI_OPCODE_CONT:
2895 case TGSI_OPCODE_RET:
2896 /* This is a noop -- we tell mesa that we can't support RET
2897 * within a function (early return), so this will always be
2898 * followed by an ENDSUB.
2899 */
2900 return TRUE;
2901
2902 /* These aren't actually used by any of the frontends we care
2903 * about:
2904 */
2905 case TGSI_OPCODE_CLAMP:
2906 case TGSI_OPCODE_AND:
2907 case TGSI_OPCODE_OR:
2908 case TGSI_OPCODE_I2F:
2909 case TGSI_OPCODE_NOT:
2910 case TGSI_OPCODE_SHL:
2911 case TGSI_OPCODE_ISHR:
2912 case TGSI_OPCODE_XOR:
2913 return FALSE;
2914
2915 case TGSI_OPCODE_IF:
2916 return emit_if( emit, insn );
2917 case TGSI_OPCODE_ELSE:
2918 return emit_else( emit, insn );
2919 case TGSI_OPCODE_ENDIF:
2920 return emit_endif( emit, insn );
2921
2922 case TGSI_OPCODE_BGNLOOP:
2923 return emit_bgnloop2( emit, insn );
2924 case TGSI_OPCODE_ENDLOOP:
2925 return emit_endloop2( emit, insn );
2926 case TGSI_OPCODE_BRK:
2927 return emit_brk( emit, insn );
2928
2929 case TGSI_OPCODE_XPD:
2930 return emit_xpd( emit, insn );
2931
2932 case TGSI_OPCODE_KILL:
2933 return emit_kill( emit, insn );
2934
2935 case TGSI_OPCODE_DST:
2936 return emit_dst_insn( emit, insn );
2937
2938 case TGSI_OPCODE_LIT:
2939 return emit_lit( emit, insn );
2940
2941 case TGSI_OPCODE_LRP:
2942 return emit_lrp( emit, insn );
2943
2944 case TGSI_OPCODE_SSG:
2945 return emit_ssg( emit, insn );
2946
2947 default:
2948 {
2949 unsigned opcode = translate_opcode(insn->Instruction.Opcode);
2950
2951 if (opcode == SVGA3DOP_LAST_INST)
2952 return FALSE;
2953
2954 if (!emit_simple_instruction( emit, opcode, insn ))
2955 return FALSE;
2956 }
2957 }
2958
2959 return TRUE;
2960 }
2961
2962
2963 static boolean
2964 svga_emit_immediate(struct svga_shader_emitter *emit,
2965 struct tgsi_full_immediate *imm)
2966 {
2967 static const float id[4] = {0,0,0,1};
2968 float value[4];
2969 unsigned i;
2970
2971 assert(1 <= imm->Immediate.NrTokens && imm->Immediate.NrTokens <= 5);
2972 for (i = 0; i < imm->Immediate.NrTokens - 1; i++) {
2973 float f = imm->u[i].Float;
2974 value[i] = util_is_inf_or_nan(f) ? 0.0f : f;
2975 }
2976
2977 for ( ; i < 4; i++ )
2978 value[i] = id[i];
2979
2980 return emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT,
2981 emit->imm_start + emit->internal_imm_count++,
2982 value[0], value[1], value[2], value[3]);
2983 }
2984
2985
2986 static boolean
2987 make_immediate(struct svga_shader_emitter *emit,
2988 float a, float b, float c, float d,
2989 struct src_register *out )
2990 {
2991 unsigned idx = emit->nr_hw_float_const++;
2992
2993 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT,
2994 idx, a, b, c, d ))
2995 return FALSE;
2996
2997 *out = src_register( SVGA3DREG_CONST, idx );
2998
2999 return TRUE;
3000 }
3001
3002
3003 static boolean
3004 emit_vs_preamble(struct svga_shader_emitter *emit)
3005 {
3006 if (!emit->key.vkey.need_prescale) {
3007 if (!make_immediate( emit, 0, 0, .5, .5,
3008 &emit->imm_0055))
3009 return FALSE;
3010 }
3011
3012 return TRUE;
3013 }
3014
3015
3016 static boolean
3017 emit_ps_preamble(struct svga_shader_emitter *emit)
3018 {
3019 if (emit->ps_reads_pos && emit->info.reads_z) {
3020 /*
3021 * Assemble the position from various bits of inputs. Depth and W are
3022 * passed in a texcoord this is due to D3D's vPos not hold Z or W.
3023 * Also fixup the perspective interpolation.
3024 *
3025 * temp_pos.xy = vPos.xy
3026 * temp_pos.w = rcp(texcoord1.w);
3027 * temp_pos.z = texcoord1.z * temp_pos.w;
3028 */
3029 if (!submit_op1( emit,
3030 inst_token(SVGA3DOP_MOV),
3031 writemask( emit->ps_temp_pos, TGSI_WRITEMASK_XY ),
3032 emit->ps_true_pos ))
3033 return FALSE;
3034
3035 if (!submit_op1( emit,
3036 inst_token(SVGA3DOP_RCP),
3037 writemask( emit->ps_temp_pos, TGSI_WRITEMASK_W ),
3038 scalar( emit->ps_depth_pos, TGSI_SWIZZLE_W ) ))
3039 return FALSE;
3040
3041 if (!submit_op2( emit,
3042 inst_token(SVGA3DOP_MUL),
3043 writemask( emit->ps_temp_pos, TGSI_WRITEMASK_Z ),
3044 scalar( emit->ps_depth_pos, TGSI_SWIZZLE_Z ),
3045 scalar( src(emit->ps_temp_pos), TGSI_SWIZZLE_W ) ))
3046 return FALSE;
3047 }
3048
3049 return TRUE;
3050 }
3051
3052
3053 static boolean
3054 emit_ps_postamble(struct svga_shader_emitter *emit)
3055 {
3056 unsigned i;
3057
3058 /* PS oDepth is incredibly fragile and it's very hard to catch the
3059 * types of usage that break it during shader emit. Easier just to
3060 * redirect the main program to a temporary and then only touch
3061 * oDepth with a hand-crafted MOV below.
3062 */
3063 if (SVGA3dShaderGetRegType(emit->true_pos.value) != 0) {
3064 if (!submit_op1( emit,
3065 inst_token(SVGA3DOP_MOV),
3066 emit->true_pos,
3067 scalar(src(emit->temp_pos), TGSI_SWIZZLE_Z) ))
3068 return FALSE;
3069 }
3070
3071 for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
3072 if (SVGA3dShaderGetRegType(emit->true_col[i].value) != 0) {
3073 /* Potentially override output colors with white for XOR
3074 * logicop workaround.
3075 */
3076 if (emit->unit == PIPE_SHADER_FRAGMENT &&
3077 emit->key.fkey.white_fragments) {
3078 struct src_register one = scalar( get_zero_immediate( emit ),
3079 TGSI_SWIZZLE_W );
3080
3081 if (!submit_op1( emit,
3082 inst_token(SVGA3DOP_MOV),
3083 emit->true_col[i],
3084 one ))
3085 return FALSE;
3086 }
3087 else {
3088 if (!submit_op1( emit,
3089 inst_token(SVGA3DOP_MOV),
3090 emit->true_col[i],
3091 src(emit->temp_col[i]) ))
3092 return FALSE;
3093 }
3094 }
3095 }
3096
3097 return TRUE;
3098 }
3099
3100
3101 static boolean
3102 emit_vs_postamble(struct svga_shader_emitter *emit)
3103 {
3104 /* PSIZ output is incredibly fragile and it's very hard to catch
3105 * the types of usage that break it during shader emit. Easier
3106 * just to redirect the main program to a temporary and then only
3107 * touch PSIZ with a hand-crafted MOV below.
3108 */
3109 if (SVGA3dShaderGetRegType(emit->true_psiz.value) != 0) {
3110 if (!submit_op1( emit,
3111 inst_token(SVGA3DOP_MOV),
3112 emit->true_psiz,
3113 scalar(src(emit->temp_psiz), TGSI_SWIZZLE_X) ))
3114 return FALSE;
3115 }
3116
3117 /* Need to perform various manipulations on vertex position to cope
3118 * with the different GL and D3D clip spaces.
3119 */
3120 if (emit->key.vkey.need_prescale) {
3121 SVGA3dShaderDestToken temp_pos = emit->temp_pos;
3122 SVGA3dShaderDestToken depth = emit->depth_pos;
3123 SVGA3dShaderDestToken pos = emit->true_pos;
3124 unsigned offset = emit->info.file_max[TGSI_FILE_CONSTANT] + 1;
3125 struct src_register prescale_scale = src_register( SVGA3DREG_CONST,
3126 offset + 0 );
3127 struct src_register prescale_trans = src_register( SVGA3DREG_CONST,
3128 offset + 1 );
3129
3130 if (!submit_op1( emit,
3131 inst_token(SVGA3DOP_MOV),
3132 writemask(depth, TGSI_WRITEMASK_W),
3133 scalar(src(temp_pos), TGSI_SWIZZLE_W) ))
3134 return FALSE;
3135
3136 /* MUL temp_pos.xyz, temp_pos, prescale.scale
3137 * MAD result.position, temp_pos.wwww, prescale.trans, temp_pos
3138 * --> Note that prescale.trans.w == 0
3139 */
3140 if (!submit_op2( emit,
3141 inst_token(SVGA3DOP_MUL),
3142 writemask(temp_pos, TGSI_WRITEMASK_XYZ),
3143 src(temp_pos),
3144 prescale_scale ))
3145 return FALSE;
3146
3147 if (!submit_op3( emit,
3148 inst_token(SVGA3DOP_MAD),
3149 pos,
3150 swizzle(src(temp_pos), 3, 3, 3, 3),
3151 prescale_trans,
3152 src(temp_pos)))
3153 return FALSE;
3154
3155 /* Also write to depth value */
3156 if (!submit_op3( emit,
3157 inst_token(SVGA3DOP_MAD),
3158 writemask(depth, TGSI_WRITEMASK_Z),
3159 swizzle(src(temp_pos), 3, 3, 3, 3),
3160 prescale_trans,
3161 src(temp_pos) ))
3162 return FALSE;
3163 }
3164 else {
3165 SVGA3dShaderDestToken temp_pos = emit->temp_pos;
3166 SVGA3dShaderDestToken depth = emit->depth_pos;
3167 SVGA3dShaderDestToken pos = emit->true_pos;
3168 struct src_register imm_0055 = emit->imm_0055;
3169
3170 /* Adjust GL clipping coordinate space to hardware (D3D-style):
3171 *
3172 * DP4 temp_pos.z, {0,0,.5,.5}, temp_pos
3173 * MOV result.position, temp_pos
3174 */
3175 if (!submit_op2( emit,
3176 inst_token(SVGA3DOP_DP4),
3177 writemask(temp_pos, TGSI_WRITEMASK_Z),
3178 imm_0055,
3179 src(temp_pos) ))
3180 return FALSE;
3181
3182 if (!submit_op1( emit,
3183 inst_token(SVGA3DOP_MOV),
3184 pos,
3185 src(temp_pos) ))
3186 return FALSE;
3187
3188 /* Move the manipulated depth into the extra texcoord reg */
3189 if (!submit_op1( emit,
3190 inst_token(SVGA3DOP_MOV),
3191 writemask(depth, TGSI_WRITEMASK_ZW),
3192 src(temp_pos) ))
3193 return FALSE;
3194 }
3195
3196 return TRUE;
3197 }
3198
3199
3200 /**
3201 * For the pixel shader: emit the code which chooses the front
3202 * or back face color depending on triangle orientation.
3203 *
3204 * 0: IF VFACE :4
3205 * 1: COLOR = FrontColor;
3206 * 2: ELSE
3207 * 3: COLOR = BackColor;
3208 * 4: ENDIF
3209 */
3210 static boolean
3211 emit_light_twoside(struct svga_shader_emitter *emit)
3212 {
3213 struct src_register vface, zero;
3214 struct src_register front[2];
3215 struct src_register back[2];
3216 SVGA3dShaderDestToken color[2];
3217 int count = emit->internal_color_count;
3218 int i;
3219 SVGA3dShaderInstToken if_token;
3220
3221 if (count == 0)
3222 return TRUE;
3223
3224 vface = get_vface( emit );
3225 zero = get_zero_immediate( emit );
3226
3227 /* Can't use get_temp() to allocate the color reg as such
3228 * temporaries will be reclaimed after each instruction by the call
3229 * to reset_temp_regs().
3230 */
3231 for (i = 0; i < count; i++) {
3232 color[i] = dst_register( SVGA3DREG_TEMP, emit->nr_hw_temp++ );
3233 front[i] = emit->input_map[emit->internal_color_idx[i]];
3234
3235 /* Back is always the next input:
3236 */
3237 back[i] = front[i];
3238 back[i].base.num = front[i].base.num + 1;
3239
3240 /* Reassign the input_map to the actual front-face color:
3241 */
3242 emit->input_map[emit->internal_color_idx[i]] = src(color[i]);
3243 }
3244
3245 if_token = inst_token( SVGA3DOP_IFC );
3246
3247 if (emit->key.fkey.front_ccw)
3248 if_token.control = SVGA3DOPCOMP_LT;
3249 else
3250 if_token.control = SVGA3DOPCOMP_GT;
3251
3252 zero = scalar(zero, TGSI_SWIZZLE_X);
3253
3254 if (!(emit_instruction( emit, if_token ) &&
3255 emit_src( emit, vface ) &&
3256 emit_src( emit, zero ) ))
3257 return FALSE;
3258
3259 for (i = 0; i < count; i++) {
3260 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), color[i], front[i] ))
3261 return FALSE;
3262 }
3263
3264 if (!(emit_instruction( emit, inst_token( SVGA3DOP_ELSE))))
3265 return FALSE;
3266
3267 for (i = 0; i < count; i++) {
3268 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), color[i], back[i] ))
3269 return FALSE;
3270 }
3271
3272 if (!emit_instruction( emit, inst_token( SVGA3DOP_ENDIF ) ))
3273 return FALSE;
3274
3275 return TRUE;
3276 }
3277
3278
3279 /**
3280 * 0: SETP_GT TEMP, VFACE, 0
3281 * where TEMP is a fake frontface register
3282 */
3283 static boolean
3284 emit_frontface(struct svga_shader_emitter *emit)
3285 {
3286 struct src_register vface, zero;
3287 SVGA3dShaderDestToken temp;
3288 struct src_register pass, fail;
3289
3290 vface = get_vface( emit );
3291 zero = get_zero_immediate( emit );
3292
3293 /* Can't use get_temp() to allocate the fake frontface reg as such
3294 * temporaries will be reclaimed after each instruction by the call
3295 * to reset_temp_regs().
3296 */
3297 temp = dst_register( SVGA3DREG_TEMP,
3298 emit->nr_hw_temp++ );
3299
3300 if (emit->key.fkey.front_ccw) {
3301 pass = scalar( zero, TGSI_SWIZZLE_X );
3302 fail = scalar( zero, TGSI_SWIZZLE_W );
3303 } else {
3304 pass = scalar( zero, TGSI_SWIZZLE_W );
3305 fail = scalar( zero, TGSI_SWIZZLE_X );
3306 }
3307
3308 if (!emit_conditional(emit, PIPE_FUNC_GREATER,
3309 temp, vface, scalar( zero, TGSI_SWIZZLE_X ),
3310 pass, fail))
3311 return FALSE;
3312
3313 /* Reassign the input_map to the actual front-face color:
3314 */
3315 emit->input_map[emit->internal_frontface_idx] = src(temp);
3316
3317 return TRUE;
3318 }
3319
3320
3321 /**
3322 * Emit code to invert the T component of the incoming texture coordinate.
3323 * This is used for drawing point sprites when
3324 * pipe_rasterizer_state::sprite_coord_mode == PIPE_SPRITE_COORD_LOWER_LEFT.
3325 */
3326 static boolean
3327 emit_inverted_texcoords(struct svga_shader_emitter *emit)
3328 {
3329 struct src_register zero = get_zero_immediate(emit);
3330 struct src_register pos_neg_one = get_pos_neg_one_immediate( emit );
3331 unsigned inverted_texcoords = emit->inverted_texcoords;
3332
3333 while (inverted_texcoords) {
3334 const unsigned unit = ffs(inverted_texcoords) - 1;
3335
3336 assert(emit->inverted_texcoords & (1 << unit));
3337
3338 assert(unit < Elements(emit->ps_true_texcoord));
3339
3340 assert(unit < Elements(emit->ps_inverted_texcoord_input));
3341
3342 assert(emit->ps_inverted_texcoord_input[unit]
3343 < Elements(emit->input_map));
3344
3345 /* inverted = coord * (1, -1, 1, 1) + (0, 1, 0, 0) */
3346 if (!submit_op3(emit,
3347 inst_token(SVGA3DOP_MAD),
3348 dst(emit->ps_inverted_texcoord[unit]),
3349 emit->ps_true_texcoord[unit],
3350 swizzle(pos_neg_one, 0, 3, 0, 0), /* (1, -1, 1, 1) */
3351 swizzle(zero, 0, 3, 0, 0))) /* (0, 1, 0, 0) */
3352 return FALSE;
3353
3354 /* Reassign the input_map entry to the new texcoord register */
3355 emit->input_map[emit->ps_inverted_texcoord_input[unit]] =
3356 emit->ps_inverted_texcoord[unit];
3357
3358 inverted_texcoords &= ~(1 << unit);
3359 }
3360
3361 return TRUE;
3362 }
3363
3364
3365 static boolean
3366 needs_to_create_zero( struct svga_shader_emitter *emit )
3367 {
3368 unsigned i;
3369
3370 if (emit->unit == PIPE_SHADER_FRAGMENT) {
3371 if (emit->key.fkey.light_twoside)
3372 return TRUE;
3373
3374 if (emit->key.fkey.white_fragments)
3375 return TRUE;
3376
3377 if (emit->emit_frontface)
3378 return TRUE;
3379
3380 if (emit->info.opcode_count[TGSI_OPCODE_DST] >= 1 ||
3381 emit->info.opcode_count[TGSI_OPCODE_SSG] >= 1 ||
3382 emit->info.opcode_count[TGSI_OPCODE_LIT] >= 1)
3383 return TRUE;
3384
3385 if (emit->inverted_texcoords)
3386 return TRUE;
3387
3388 /* look for any PIPE_SWIZZLE_ZERO/ONE terms */
3389 for (i = 0; i < emit->key.fkey.num_textures; i++) {
3390 if (emit->key.fkey.tex[i].swizzle_r > PIPE_SWIZZLE_ALPHA ||
3391 emit->key.fkey.tex[i].swizzle_g > PIPE_SWIZZLE_ALPHA ||
3392 emit->key.fkey.tex[i].swizzle_b > PIPE_SWIZZLE_ALPHA ||
3393 emit->key.fkey.tex[i].swizzle_a > PIPE_SWIZZLE_ALPHA)
3394 return TRUE;
3395 }
3396
3397 for (i = 0; i < emit->key.fkey.num_textures; i++) {
3398 if (emit->key.fkey.tex[i].compare_mode
3399 == PIPE_TEX_COMPARE_R_TO_TEXTURE)
3400 return TRUE;
3401 }
3402 }
3403
3404 if (emit->unit == PIPE_SHADER_VERTEX) {
3405 if (emit->info.opcode_count[TGSI_OPCODE_CMP] >= 1)
3406 return TRUE;
3407 }
3408
3409 if (emit->info.opcode_count[TGSI_OPCODE_IF] >= 1 ||
3410 emit->info.opcode_count[TGSI_OPCODE_BGNLOOP] >= 1 ||
3411 emit->info.opcode_count[TGSI_OPCODE_DDX] >= 1 ||
3412 emit->info.opcode_count[TGSI_OPCODE_DDY] >= 1 ||
3413 emit->info.opcode_count[TGSI_OPCODE_ROUND] >= 1 ||
3414 emit->info.opcode_count[TGSI_OPCODE_SGE] >= 1 ||
3415 emit->info.opcode_count[TGSI_OPCODE_SGT] >= 1 ||
3416 emit->info.opcode_count[TGSI_OPCODE_SLE] >= 1 ||
3417 emit->info.opcode_count[TGSI_OPCODE_SLT] >= 1 ||
3418 emit->info.opcode_count[TGSI_OPCODE_SNE] >= 1 ||
3419 emit->info.opcode_count[TGSI_OPCODE_SEQ] >= 1 ||
3420 emit->info.opcode_count[TGSI_OPCODE_EXP] >= 1 ||
3421 emit->info.opcode_count[TGSI_OPCODE_LOG] >= 1 ||
3422 emit->info.opcode_count[TGSI_OPCODE_XPD] >= 1 ||
3423 emit->info.opcode_count[TGSI_OPCODE_KILL] >= 1)
3424 return TRUE;
3425
3426 return FALSE;
3427 }
3428
3429
3430 static boolean
3431 needs_to_create_loop_const( struct svga_shader_emitter *emit )
3432 {
3433 return (emit->info.opcode_count[TGSI_OPCODE_BGNLOOP] >= 1);
3434 }
3435
3436
3437 static boolean
3438 needs_to_create_arl_consts( struct svga_shader_emitter *emit )
3439 {
3440 return (emit->num_arl_consts > 0);
3441 }
3442
3443
3444 static boolean
3445 pre_parse_add_indirect( struct svga_shader_emitter *emit,
3446 int num, int current_arl)
3447 {
3448 int i;
3449 assert(num < 0);
3450
3451 for (i = 0; i < emit->num_arl_consts; ++i) {
3452 if (emit->arl_consts[i].arl_num == current_arl)
3453 break;
3454 }
3455 /* new entry */
3456 if (emit->num_arl_consts == i) {
3457 ++emit->num_arl_consts;
3458 }
3459 emit->arl_consts[i].number = (emit->arl_consts[i].number > num) ?
3460 num :
3461 emit->arl_consts[i].number;
3462 emit->arl_consts[i].arl_num = current_arl;
3463 return TRUE;
3464 }
3465
3466
3467 static boolean
3468 pre_parse_instruction( struct svga_shader_emitter *emit,
3469 const struct tgsi_full_instruction *insn,
3470 int current_arl)
3471 {
3472 if (insn->Src[0].Register.Indirect &&
3473 insn->Src[0].Indirect.File == TGSI_FILE_ADDRESS) {
3474 const struct tgsi_full_src_register *reg = &insn->Src[0];
3475 if (reg->Register.Index < 0) {
3476 pre_parse_add_indirect(emit, reg->Register.Index, current_arl);
3477 }
3478 }
3479
3480 if (insn->Src[1].Register.Indirect &&
3481 insn->Src[1].Indirect.File == TGSI_FILE_ADDRESS) {
3482 const struct tgsi_full_src_register *reg = &insn->Src[1];
3483 if (reg->Register.Index < 0) {
3484 pre_parse_add_indirect(emit, reg->Register.Index, current_arl);
3485 }
3486 }
3487
3488 if (insn->Src[2].Register.Indirect &&
3489 insn->Src[2].Indirect.File == TGSI_FILE_ADDRESS) {
3490 const struct tgsi_full_src_register *reg = &insn->Src[2];
3491 if (reg->Register.Index < 0) {
3492 pre_parse_add_indirect(emit, reg->Register.Index, current_arl);
3493 }
3494 }
3495
3496 return TRUE;
3497 }
3498
3499
3500 static boolean
3501 pre_parse_tokens( struct svga_shader_emitter *emit,
3502 const struct tgsi_token *tokens )
3503 {
3504 struct tgsi_parse_context parse;
3505 int current_arl = 0;
3506
3507 tgsi_parse_init( &parse, tokens );
3508
3509 while (!tgsi_parse_end_of_tokens( &parse )) {
3510 tgsi_parse_token( &parse );
3511 switch (parse.FullToken.Token.Type) {
3512 case TGSI_TOKEN_TYPE_IMMEDIATE:
3513 case TGSI_TOKEN_TYPE_DECLARATION:
3514 break;
3515 case TGSI_TOKEN_TYPE_INSTRUCTION:
3516 if (parse.FullToken.FullInstruction.Instruction.Opcode ==
3517 TGSI_OPCODE_ARL) {
3518 ++current_arl;
3519 }
3520 if (!pre_parse_instruction( emit, &parse.FullToken.FullInstruction,
3521 current_arl ))
3522 return FALSE;
3523 break;
3524 default:
3525 break;
3526 }
3527
3528 }
3529 return TRUE;
3530 }
3531
3532
3533 static boolean
3534 svga_shader_emit_helpers(struct svga_shader_emitter *emit)
3535 {
3536 if (needs_to_create_zero( emit )) {
3537 create_zero_immediate( emit );
3538 }
3539 if (needs_to_create_loop_const( emit )) {
3540 create_loop_const( emit );
3541 }
3542 if (needs_to_create_arl_consts( emit )) {
3543 create_arl_consts( emit );
3544 }
3545
3546 if (emit->unit == PIPE_SHADER_FRAGMENT) {
3547 if (!emit_ps_preamble( emit ))
3548 return FALSE;
3549
3550 if (emit->key.fkey.light_twoside) {
3551 if (!emit_light_twoside( emit ))
3552 return FALSE;
3553 }
3554 if (emit->emit_frontface) {
3555 if (!emit_frontface( emit ))
3556 return FALSE;
3557 }
3558 if (emit->inverted_texcoords) {
3559 if (!emit_inverted_texcoords( emit ))
3560 return FALSE;
3561 }
3562 }
3563
3564 return TRUE;
3565 }
3566
3567
3568 boolean
3569 svga_shader_emit_instructions(struct svga_shader_emitter *emit,
3570 const struct tgsi_token *tokens)
3571 {
3572 struct tgsi_parse_context parse;
3573 boolean ret = TRUE;
3574 boolean helpers_emitted = FALSE;
3575 unsigned line_nr = 0;
3576
3577 tgsi_parse_init( &parse, tokens );
3578 emit->internal_imm_count = 0;
3579
3580 if (emit->unit == PIPE_SHADER_VERTEX) {
3581 ret = emit_vs_preamble( emit );
3582 if (!ret)
3583 goto done;
3584 }
3585
3586 pre_parse_tokens(emit, tokens);
3587
3588 while (!tgsi_parse_end_of_tokens( &parse )) {
3589 tgsi_parse_token( &parse );
3590
3591 switch (parse.FullToken.Token.Type) {
3592 case TGSI_TOKEN_TYPE_IMMEDIATE:
3593 ret = svga_emit_immediate( emit, &parse.FullToken.FullImmediate );
3594 if (!ret)
3595 goto done;
3596 break;
3597
3598 case TGSI_TOKEN_TYPE_DECLARATION:
3599 ret = svga_translate_decl_sm30( emit, &parse.FullToken.FullDeclaration );
3600 if (!ret)
3601 goto done;
3602 break;
3603
3604 case TGSI_TOKEN_TYPE_INSTRUCTION:
3605 if (!helpers_emitted) {
3606 if (!svga_shader_emit_helpers( emit ))
3607 goto done;
3608 helpers_emitted = TRUE;
3609 }
3610 ret = svga_emit_instruction( emit,
3611 line_nr++,
3612 &parse.FullToken.FullInstruction );
3613 if (!ret)
3614 goto done;
3615 break;
3616 default:
3617 break;
3618 }
3619
3620 reset_temp_regs( emit );
3621 }
3622
3623 /* Need to terminate the current subroutine. Note that the
3624 * hardware doesn't tolerate shaders without sub-routines
3625 * terminating with RET+END.
3626 */
3627 if (!emit->in_main_func) {
3628 ret = emit_instruction( emit, inst_token( SVGA3DOP_RET ) );
3629 if (!ret)
3630 goto done;
3631 }
3632
3633 assert(emit->dynamic_branching_level == 0);
3634
3635 /* Need to terminate the whole shader:
3636 */
3637 ret = emit_instruction( emit, inst_token( SVGA3DOP_END ) );
3638 if (!ret)
3639 goto done;
3640
3641 done:
3642 tgsi_parse_free( &parse );
3643 return ret;
3644 }