0fc385a8df6e556f908549552fdf7ece1328e9da
[mesa.git] / src / gallium / drivers / svga / svga_tgsi_insn.c
1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26
27 #include "pipe/p_shader_tokens.h"
28 #include "tgsi/tgsi_dump.h"
29 #include "tgsi/tgsi_parse.h"
30 #include "util/u_memory.h"
31 #include "util/u_math.h"
32
33 #include "svga_tgsi_emit.h"
34 #include "svga_context.h"
35
36
37 static boolean emit_vs_postamble( struct svga_shader_emitter *emit );
38 static boolean emit_ps_postamble( struct svga_shader_emitter *emit );
39
40
41 static unsigned
42 translate_opcode(uint opcode)
43 {
44 switch (opcode) {
45 case TGSI_OPCODE_ABS: return SVGA3DOP_ABS;
46 case TGSI_OPCODE_ADD: return SVGA3DOP_ADD;
47 case TGSI_OPCODE_DP2A: return SVGA3DOP_DP2ADD;
48 case TGSI_OPCODE_DP3: return SVGA3DOP_DP3;
49 case TGSI_OPCODE_DP4: return SVGA3DOP_DP4;
50 case TGSI_OPCODE_FRC: return SVGA3DOP_FRC;
51 case TGSI_OPCODE_MAD: return SVGA3DOP_MAD;
52 case TGSI_OPCODE_MAX: return SVGA3DOP_MAX;
53 case TGSI_OPCODE_MIN: return SVGA3DOP_MIN;
54 case TGSI_OPCODE_MOV: return SVGA3DOP_MOV;
55 case TGSI_OPCODE_MUL: return SVGA3DOP_MUL;
56 case TGSI_OPCODE_NOP: return SVGA3DOP_NOP;
57 case TGSI_OPCODE_NRM4: return SVGA3DOP_NRM;
58 default:
59 debug_printf("Unkown opcode %u\n", opcode);
60 assert( 0 );
61 return SVGA3DOP_LAST_INST;
62 }
63 }
64
65
66 static unsigned
67 translate_file(unsigned file)
68 {
69 switch (file) {
70 case TGSI_FILE_TEMPORARY: return SVGA3DREG_TEMP;
71 case TGSI_FILE_INPUT: return SVGA3DREG_INPUT;
72 case TGSI_FILE_OUTPUT: return SVGA3DREG_OUTPUT; /* VS3.0+ only */
73 case TGSI_FILE_IMMEDIATE: return SVGA3DREG_CONST;
74 case TGSI_FILE_CONSTANT: return SVGA3DREG_CONST;
75 case TGSI_FILE_SAMPLER: return SVGA3DREG_SAMPLER;
76 case TGSI_FILE_ADDRESS: return SVGA3DREG_ADDR;
77 default:
78 assert( 0 );
79 return SVGA3DREG_TEMP;
80 }
81 }
82
83
84 static SVGA3dShaderDestToken
85 translate_dst_register( struct svga_shader_emitter *emit,
86 const struct tgsi_full_instruction *insn,
87 unsigned idx )
88 {
89 const struct tgsi_full_dst_register *reg = &insn->Dst[idx];
90 SVGA3dShaderDestToken dest;
91
92 switch (reg->Register.File) {
93 case TGSI_FILE_OUTPUT:
94 /* Output registers encode semantic information in their name.
95 * Need to lookup a table built at decl time:
96 */
97 dest = emit->output_map[reg->Register.Index];
98 break;
99
100 default:
101 {
102 unsigned index = reg->Register.Index;
103 assert(index < SVGA3D_TEMPREG_MAX);
104 index = MIN2(index, SVGA3D_TEMPREG_MAX - 1);
105 dest = dst_register(translate_file(reg->Register.File), index);
106 }
107 break;
108 }
109
110 if (reg->Register.Indirect) {
111 debug_warning("Indirect indexing of dest registers is not supported!\n");
112 }
113
114 dest.mask = reg->Register.WriteMask;
115 assert(dest.mask);
116
117 if (insn->Instruction.Saturate)
118 dest.dstMod = SVGA3DDSTMOD_SATURATE;
119
120 return dest;
121 }
122
123
124 /**
125 * Apply a swizzle to a src_register, returning a new src_register
126 * Ex: swizzle(SRC.ZZYY, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_X, SWIZZLE_Y)
127 * would return SRC.YYZZ
128 */
129 static struct src_register
130 swizzle(struct src_register src,
131 unsigned x, unsigned y, unsigned z, unsigned w)
132 {
133 assert(x < 4);
134 assert(y < 4);
135 assert(z < 4);
136 assert(w < 4);
137 x = (src.base.swizzle >> (x * 2)) & 0x3;
138 y = (src.base.swizzle >> (y * 2)) & 0x3;
139 z = (src.base.swizzle >> (z * 2)) & 0x3;
140 w = (src.base.swizzle >> (w * 2)) & 0x3;
141
142 src.base.swizzle = TRANSLATE_SWIZZLE(x, y, z, w);
143
144 return src;
145 }
146
147
148 /**
149 * Apply a "scalar" swizzle to a src_register returning a new
150 * src_register where all the swizzle terms are the same.
151 * Ex: scalar(SRC.WZYX, SWIZZLE_Y) would return SRC.ZZZZ
152 */
153 static struct src_register
154 scalar(struct src_register src, unsigned comp)
155 {
156 assert(comp < 4);
157 return swizzle( src, comp, comp, comp, comp );
158 }
159
160
161 static boolean
162 svga_arl_needs_adjustment( const struct svga_shader_emitter *emit )
163 {
164 int i;
165
166 for (i = 0; i < emit->num_arl_consts; ++i) {
167 if (emit->arl_consts[i].arl_num == emit->current_arl)
168 return TRUE;
169 }
170 return FALSE;
171 }
172
173
174 static int
175 svga_arl_adjustment( const struct svga_shader_emitter *emit )
176 {
177 int i;
178
179 for (i = 0; i < emit->num_arl_consts; ++i) {
180 if (emit->arl_consts[i].arl_num == emit->current_arl)
181 return emit->arl_consts[i].number;
182 }
183 return 0;
184 }
185
186
187 static struct src_register
188 translate_src_register( const struct svga_shader_emitter *emit,
189 const struct tgsi_full_src_register *reg )
190 {
191 struct src_register src;
192
193 switch (reg->Register.File) {
194 case TGSI_FILE_INPUT:
195 /* Input registers are referred to by their semantic name rather
196 * than by index. Use the mapping build up from the decls:
197 */
198 src = emit->input_map[reg->Register.Index];
199 break;
200
201 case TGSI_FILE_IMMEDIATE:
202 /* Immediates are appended after TGSI constants in the D3D
203 * constant buffer.
204 */
205 src = src_register( translate_file( reg->Register.File ),
206 reg->Register.Index + emit->imm_start );
207 break;
208
209 default:
210 src = src_register( translate_file( reg->Register.File ),
211 reg->Register.Index );
212 break;
213 }
214
215 /* Indirect addressing.
216 */
217 if (reg->Register.Indirect) {
218 if (emit->unit == PIPE_SHADER_FRAGMENT) {
219 /* Pixel shaders have only loop registers for relative
220 * addressing into inputs. Ignore the redundant address
221 * register, the contents of aL should be in sync with it.
222 */
223 if (reg->Register.File == TGSI_FILE_INPUT) {
224 src.base.relAddr = 1;
225 src.indirect = src_token(SVGA3DREG_LOOP, 0);
226 }
227 }
228 else {
229 /* Constant buffers only.
230 */
231 if (reg->Register.File == TGSI_FILE_CONSTANT) {
232 /* we shift the offset towards the minimum */
233 if (svga_arl_needs_adjustment( emit )) {
234 src.base.num -= svga_arl_adjustment( emit );
235 }
236 src.base.relAddr = 1;
237
238 /* Not really sure what should go in the second token:
239 */
240 src.indirect = src_token( SVGA3DREG_ADDR,
241 reg->Indirect.Index );
242
243 src.indirect.swizzle = SWIZZLE_XXXX;
244 }
245 }
246 }
247
248 src = swizzle( src,
249 reg->Register.SwizzleX,
250 reg->Register.SwizzleY,
251 reg->Register.SwizzleZ,
252 reg->Register.SwizzleW );
253
254 /* src.mod isn't a bitfield, unfortunately:
255 * See tgsi_util_get_full_src_register_sign_mode for implementation details.
256 */
257 if (reg->Register.Absolute) {
258 if (reg->Register.Negate)
259 src.base.srcMod = SVGA3DSRCMOD_ABSNEG;
260 else
261 src.base.srcMod = SVGA3DSRCMOD_ABS;
262 }
263 else {
264 if (reg->Register.Negate)
265 src.base.srcMod = SVGA3DSRCMOD_NEG;
266 else
267 src.base.srcMod = SVGA3DSRCMOD_NONE;
268 }
269
270 return src;
271 }
272
273
274 /*
275 * Get a temporary register.
276 * Note: if we exceed the temporary register limit we just use
277 * register SVGA3D_TEMPREG_MAX - 1.
278 */
279 static SVGA3dShaderDestToken
280 get_temp( struct svga_shader_emitter *emit )
281 {
282 int i = emit->nr_hw_temp + emit->internal_temp_count++;
283 assert(i < SVGA3D_TEMPREG_MAX);
284 i = MIN2(i, SVGA3D_TEMPREG_MAX - 1);
285 return dst_register( SVGA3DREG_TEMP, i );
286 }
287
288
289 /**
290 * Release a single temp. Currently only effective if it was the last
291 * allocated temp, otherwise release will be delayed until the next
292 * call to reset_temp_regs().
293 */
294 static void
295 release_temp( struct svga_shader_emitter *emit,
296 SVGA3dShaderDestToken temp )
297 {
298 if (temp.num == emit->internal_temp_count - 1)
299 emit->internal_temp_count--;
300 }
301
302
303 static void
304 reset_temp_regs(struct svga_shader_emitter *emit)
305 {
306 emit->internal_temp_count = 0;
307 }
308
309
310 /** Emit bytecode for a src_register */
311 static boolean
312 emit_src(struct svga_shader_emitter *emit, const struct src_register src)
313 {
314 if (src.base.relAddr) {
315 assert(src.base.reserved0);
316 assert(src.indirect.reserved0);
317 return (svga_shader_emit_dword( emit, src.base.value ) &&
318 svga_shader_emit_dword( emit, src.indirect.value ));
319 }
320 else {
321 assert(src.base.reserved0);
322 return svga_shader_emit_dword( emit, src.base.value );
323 }
324 }
325
326
327 /** Emit bytecode for a dst_register */
328 static boolean
329 emit_dst(struct svga_shader_emitter *emit, SVGA3dShaderDestToken dest)
330 {
331 assert(dest.reserved0);
332 assert(dest.mask);
333 return svga_shader_emit_dword( emit, dest.value );
334 }
335
336
337 /** Emit bytecode for a 1-operand instruction */
338 static boolean
339 emit_op1(struct svga_shader_emitter *emit,
340 SVGA3dShaderInstToken inst,
341 SVGA3dShaderDestToken dest,
342 struct src_register src0)
343 {
344 return (emit_instruction(emit, inst) &&
345 emit_dst(emit, dest) &&
346 emit_src(emit, src0));
347 }
348
349
350 /** Emit bytecode for a 2-operand instruction */
351 static boolean
352 emit_op2(struct svga_shader_emitter *emit,
353 SVGA3dShaderInstToken inst,
354 SVGA3dShaderDestToken dest,
355 struct src_register src0,
356 struct src_register src1)
357 {
358 return (emit_instruction(emit, inst) &&
359 emit_dst(emit, dest) &&
360 emit_src(emit, src0) &&
361 emit_src(emit, src1));
362 }
363
364
365 /** Emit bytecode for a 3-operand instruction */
366 static boolean
367 emit_op3(struct svga_shader_emitter *emit,
368 SVGA3dShaderInstToken inst,
369 SVGA3dShaderDestToken dest,
370 struct src_register src0,
371 struct src_register src1,
372 struct src_register src2)
373 {
374 return (emit_instruction(emit, inst) &&
375 emit_dst(emit, dest) &&
376 emit_src(emit, src0) &&
377 emit_src(emit, src1) &&
378 emit_src(emit, src2));
379 }
380
381
382 /** Emit bytecode for a 4-operand instruction */
383 static boolean
384 emit_op4(struct svga_shader_emitter *emit,
385 SVGA3dShaderInstToken inst,
386 SVGA3dShaderDestToken dest,
387 struct src_register src0,
388 struct src_register src1,
389 struct src_register src2,
390 struct src_register src3)
391 {
392 return (emit_instruction(emit, inst) &&
393 emit_dst(emit, dest) &&
394 emit_src(emit, src0) &&
395 emit_src(emit, src1) &&
396 emit_src(emit, src2) &&
397 emit_src(emit, src3));
398 }
399
400
401 /**
402 * Apply the absolute value modifier to the given src_register, returning
403 * a new src_register.
404 */
405 static struct src_register
406 absolute(struct src_register src)
407 {
408 src.base.srcMod = SVGA3DSRCMOD_ABS;
409 return src;
410 }
411
412
413 /**
414 * Apply the negation modifier to the given src_register, returning
415 * a new src_register.
416 */
417 static struct src_register
418 negate(struct src_register src)
419 {
420 switch (src.base.srcMod) {
421 case SVGA3DSRCMOD_ABS:
422 src.base.srcMod = SVGA3DSRCMOD_ABSNEG;
423 break;
424 case SVGA3DSRCMOD_ABSNEG:
425 src.base.srcMod = SVGA3DSRCMOD_ABS;
426 break;
427 case SVGA3DSRCMOD_NEG:
428 src.base.srcMod = SVGA3DSRCMOD_NONE;
429 break;
430 case SVGA3DSRCMOD_NONE:
431 src.base.srcMod = SVGA3DSRCMOD_NEG;
432 break;
433 }
434 return src;
435 }
436
437
438
439 /* Replace the src with the temporary specified in the dst, but copying
440 * only the necessary channels, and preserving the original swizzle (which is
441 * important given that several opcodes have constraints in the allowed
442 * swizzles).
443 */
444 static boolean
445 emit_repl(struct svga_shader_emitter *emit,
446 SVGA3dShaderDestToken dst,
447 struct src_register *src0)
448 {
449 unsigned src0_swizzle;
450 unsigned chan;
451
452 assert(SVGA3dShaderGetRegType(dst.value) == SVGA3DREG_TEMP);
453
454 src0_swizzle = src0->base.swizzle;
455
456 dst.mask = 0;
457 for (chan = 0; chan < 4; ++chan) {
458 unsigned swizzle = (src0_swizzle >> (chan *2)) & 0x3;
459 dst.mask |= 1 << swizzle;
460 }
461 assert(dst.mask);
462
463 src0->base.swizzle = SVGA3DSWIZZLE_NONE;
464
465 if (!emit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, *src0 ))
466 return FALSE;
467
468 *src0 = src( dst );
469 src0->base.swizzle = src0_swizzle;
470
471 return TRUE;
472 }
473
474
475 static boolean
476 submit_op0(struct svga_shader_emitter *emit,
477 SVGA3dShaderInstToken inst,
478 SVGA3dShaderDestToken dest)
479 {
480 return (emit_instruction( emit, inst ) &&
481 emit_dst( emit, dest ));
482 }
483
484
485 static boolean
486 submit_op1(struct svga_shader_emitter *emit,
487 SVGA3dShaderInstToken inst,
488 SVGA3dShaderDestToken dest,
489 struct src_register src0)
490 {
491 return emit_op1( emit, inst, dest, src0 );
492 }
493
494
495 /**
496 * SVGA shaders may not refer to >1 constant register in a single
497 * instruction. This function checks for that usage and inserts a
498 * move to temporary if detected.
499 *
500 * The same applies to input registers -- at most a single input
501 * register may be read by any instruction.
502 */
503 static boolean
504 submit_op2(struct svga_shader_emitter *emit,
505 SVGA3dShaderInstToken inst,
506 SVGA3dShaderDestToken dest,
507 struct src_register src0,
508 struct src_register src1)
509 {
510 SVGA3dShaderDestToken temp;
511 SVGA3dShaderRegType type0, type1;
512 boolean need_temp = FALSE;
513
514 temp.value = 0;
515 type0 = SVGA3dShaderGetRegType( src0.base.value );
516 type1 = SVGA3dShaderGetRegType( src1.base.value );
517
518 if (type0 == SVGA3DREG_CONST &&
519 type1 == SVGA3DREG_CONST &&
520 src0.base.num != src1.base.num)
521 need_temp = TRUE;
522
523 if (type0 == SVGA3DREG_INPUT &&
524 type1 == SVGA3DREG_INPUT &&
525 src0.base.num != src1.base.num)
526 need_temp = TRUE;
527
528 if (need_temp) {
529 temp = get_temp( emit );
530
531 if (!emit_repl( emit, temp, &src0 ))
532 return FALSE;
533 }
534
535 if (!emit_op2( emit, inst, dest, src0, src1 ))
536 return FALSE;
537
538 if (need_temp)
539 release_temp( emit, temp );
540
541 return TRUE;
542 }
543
544
545 /**
546 * SVGA shaders may not refer to >1 constant register in a single
547 * instruction. This function checks for that usage and inserts a
548 * move to temporary if detected.
549 */
550 static boolean
551 submit_op3(struct svga_shader_emitter *emit,
552 SVGA3dShaderInstToken inst,
553 SVGA3dShaderDestToken dest,
554 struct src_register src0,
555 struct src_register src1,
556 struct src_register src2)
557 {
558 SVGA3dShaderDestToken temp0;
559 SVGA3dShaderDestToken temp1;
560 boolean need_temp0 = FALSE;
561 boolean need_temp1 = FALSE;
562 SVGA3dShaderRegType type0, type1, type2;
563
564 temp0.value = 0;
565 temp1.value = 0;
566 type0 = SVGA3dShaderGetRegType( src0.base.value );
567 type1 = SVGA3dShaderGetRegType( src1.base.value );
568 type2 = SVGA3dShaderGetRegType( src2.base.value );
569
570 if (inst.op != SVGA3DOP_SINCOS) {
571 if (type0 == SVGA3DREG_CONST &&
572 ((type1 == SVGA3DREG_CONST && src0.base.num != src1.base.num) ||
573 (type2 == SVGA3DREG_CONST && src0.base.num != src2.base.num)))
574 need_temp0 = TRUE;
575
576 if (type1 == SVGA3DREG_CONST &&
577 (type2 == SVGA3DREG_CONST && src1.base.num != src2.base.num))
578 need_temp1 = TRUE;
579 }
580
581 if (type0 == SVGA3DREG_INPUT &&
582 ((type1 == SVGA3DREG_INPUT && src0.base.num != src1.base.num) ||
583 (type2 == SVGA3DREG_INPUT && src0.base.num != src2.base.num)))
584 need_temp0 = TRUE;
585
586 if (type1 == SVGA3DREG_INPUT &&
587 (type2 == SVGA3DREG_INPUT && src1.base.num != src2.base.num))
588 need_temp1 = TRUE;
589
590 if (need_temp0) {
591 temp0 = get_temp( emit );
592
593 if (!emit_repl( emit, temp0, &src0 ))
594 return FALSE;
595 }
596
597 if (need_temp1) {
598 temp1 = get_temp( emit );
599
600 if (!emit_repl( emit, temp1, &src1 ))
601 return FALSE;
602 }
603
604 if (!emit_op3( emit, inst, dest, src0, src1, src2 ))
605 return FALSE;
606
607 if (need_temp1)
608 release_temp( emit, temp1 );
609 if (need_temp0)
610 release_temp( emit, temp0 );
611 return TRUE;
612 }
613
614
615 /**
616 * SVGA shaders may not refer to >1 constant register in a single
617 * instruction. This function checks for that usage and inserts a
618 * move to temporary if detected.
619 */
620 static boolean
621 submit_op4(struct svga_shader_emitter *emit,
622 SVGA3dShaderInstToken inst,
623 SVGA3dShaderDestToken dest,
624 struct src_register src0,
625 struct src_register src1,
626 struct src_register src2,
627 struct src_register src3)
628 {
629 SVGA3dShaderDestToken temp0;
630 SVGA3dShaderDestToken temp3;
631 boolean need_temp0 = FALSE;
632 boolean need_temp3 = FALSE;
633 SVGA3dShaderRegType type0, type1, type2, type3;
634
635 temp0.value = 0;
636 temp3.value = 0;
637 type0 = SVGA3dShaderGetRegType( src0.base.value );
638 type1 = SVGA3dShaderGetRegType( src1.base.value );
639 type2 = SVGA3dShaderGetRegType( src2.base.value );
640 type3 = SVGA3dShaderGetRegType( src2.base.value );
641
642 /* Make life a little easier - this is only used by the TXD
643 * instruction which is guaranteed not to have a constant/input reg
644 * in one slot at least:
645 */
646 assert(type1 == SVGA3DREG_SAMPLER);
647
648 if (type0 == SVGA3DREG_CONST &&
649 ((type3 == SVGA3DREG_CONST && src0.base.num != src3.base.num) ||
650 (type2 == SVGA3DREG_CONST && src0.base.num != src2.base.num)))
651 need_temp0 = TRUE;
652
653 if (type3 == SVGA3DREG_CONST &&
654 (type2 == SVGA3DREG_CONST && src3.base.num != src2.base.num))
655 need_temp3 = TRUE;
656
657 if (type0 == SVGA3DREG_INPUT &&
658 ((type3 == SVGA3DREG_INPUT && src0.base.num != src3.base.num) ||
659 (type2 == SVGA3DREG_INPUT && src0.base.num != src2.base.num)))
660 need_temp0 = TRUE;
661
662 if (type3 == SVGA3DREG_INPUT &&
663 (type2 == SVGA3DREG_INPUT && src3.base.num != src2.base.num))
664 need_temp3 = TRUE;
665
666 if (need_temp0) {
667 temp0 = get_temp( emit );
668
669 if (!emit_repl( emit, temp0, &src0 ))
670 return FALSE;
671 }
672
673 if (need_temp3) {
674 temp3 = get_temp( emit );
675
676 if (!emit_repl( emit, temp3, &src3 ))
677 return FALSE;
678 }
679
680 if (!emit_op4( emit, inst, dest, src0, src1, src2, src3 ))
681 return FALSE;
682
683 if (need_temp3)
684 release_temp( emit, temp3 );
685 if (need_temp0)
686 release_temp( emit, temp0 );
687 return TRUE;
688 }
689
690
691 /**
692 * Do the src and dest registers refer to the same register?
693 */
694 static boolean
695 alias_src_dst(struct src_register src,
696 SVGA3dShaderDestToken dst)
697 {
698 if (src.base.num != dst.num)
699 return FALSE;
700
701 if (SVGA3dShaderGetRegType(dst.value) !=
702 SVGA3dShaderGetRegType(src.base.value))
703 return FALSE;
704
705 return TRUE;
706 }
707
708
709 static boolean
710 submit_lrp(struct svga_shader_emitter *emit,
711 SVGA3dShaderDestToken dst,
712 struct src_register src0,
713 struct src_register src1,
714 struct src_register src2)
715 {
716 SVGA3dShaderDestToken tmp;
717 boolean need_dst_tmp = FALSE;
718
719 /* The dst reg must be a temporary, and not be the same as src0 or src2 */
720 if (SVGA3dShaderGetRegType(dst.value) != SVGA3DREG_TEMP ||
721 alias_src_dst(src0, dst) ||
722 alias_src_dst(src2, dst))
723 need_dst_tmp = TRUE;
724
725 if (need_dst_tmp) {
726 tmp = get_temp( emit );
727 tmp.mask = dst.mask;
728 }
729 else {
730 tmp = dst;
731 }
732
733 if (!submit_op3(emit, inst_token( SVGA3DOP_LRP ), tmp, src0, src1, src2))
734 return FALSE;
735
736 if (need_dst_tmp) {
737 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), dst, src( tmp )))
738 return FALSE;
739 }
740
741 return TRUE;
742 }
743
744
745 static boolean
746 emit_def_const(struct svga_shader_emitter *emit,
747 SVGA3dShaderConstType type,
748 unsigned idx, float a, float b, float c, float d)
749 {
750 SVGA3DOpDefArgs def;
751 SVGA3dShaderInstToken opcode;
752
753 switch (type) {
754 case SVGA3D_CONST_TYPE_FLOAT:
755 opcode = inst_token( SVGA3DOP_DEF );
756 def.dst = dst_register( SVGA3DREG_CONST, idx );
757 def.constValues[0] = a;
758 def.constValues[1] = b;
759 def.constValues[2] = c;
760 def.constValues[3] = d;
761 break;
762 case SVGA3D_CONST_TYPE_INT:
763 opcode = inst_token( SVGA3DOP_DEFI );
764 def.dst = dst_register( SVGA3DREG_CONSTINT, idx );
765 def.constIValues[0] = (int)a;
766 def.constIValues[1] = (int)b;
767 def.constIValues[2] = (int)c;
768 def.constIValues[3] = (int)d;
769 break;
770 default:
771 assert(0);
772 opcode = inst_token( SVGA3DOP_NOP );
773 break;
774 }
775
776 if (!emit_instruction(emit, opcode) ||
777 !svga_shader_emit_dwords( emit, def.values, Elements(def.values)))
778 return FALSE;
779
780 return TRUE;
781 }
782
783
784 static boolean
785 create_zero_immediate( struct svga_shader_emitter *emit )
786 {
787 unsigned idx = emit->nr_hw_float_const++;
788
789 /* Emit the constant (0, 0.5, -1, 1) and use swizzling to generate
790 * other useful vectors.
791 */
792 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT,
793 idx, 0, 0.5, -1, 1 ))
794 return FALSE;
795
796 emit->zero_immediate_idx = idx;
797 emit->created_zero_immediate = TRUE;
798
799 return TRUE;
800 }
801
802
803 static boolean
804 create_loop_const( struct svga_shader_emitter *emit )
805 {
806 unsigned idx = emit->nr_hw_int_const++;
807
808 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_INT, idx,
809 255, /* iteration count */
810 0, /* initial value */
811 1, /* step size */
812 0 /* not used, must be 0 */))
813 return FALSE;
814
815 emit->loop_const_idx = idx;
816 emit->created_loop_const = TRUE;
817
818 return TRUE;
819 }
820
821 static boolean
822 create_arl_consts( struct svga_shader_emitter *emit )
823 {
824 int i;
825
826 for (i = 0; i < emit->num_arl_consts; i += 4) {
827 int j;
828 unsigned idx = emit->nr_hw_float_const++;
829 float vals[4];
830 for (j = 0; j < 4 && (j + i) < emit->num_arl_consts; ++j) {
831 vals[j] = (float) emit->arl_consts[i + j].number;
832 emit->arl_consts[i + j].idx = idx;
833 switch (j) {
834 case 0:
835 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_X;
836 break;
837 case 1:
838 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_Y;
839 break;
840 case 2:
841 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_Z;
842 break;
843 case 3:
844 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_W;
845 break;
846 }
847 }
848 while (j < 4)
849 vals[j++] = 0;
850
851 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT, idx,
852 vals[0], vals[1],
853 vals[2], vals[3]))
854 return FALSE;
855 }
856
857 return TRUE;
858 }
859
860
861 /**
862 * Return the register which holds the pixel shaders front/back-
863 * facing value.
864 */
865 static struct src_register
866 get_vface( struct svga_shader_emitter *emit )
867 {
868 assert(emit->emitted_vface);
869 return src_register(SVGA3DREG_MISCTYPE, SVGA3DMISCREG_FACE);
870 }
871
872
873 /**
874 * returns {0, 0, 0, 1} immediate
875 */
876 static struct src_register
877 get_zero_immediate( struct svga_shader_emitter *emit )
878 {
879 assert(emit->created_zero_immediate);
880 assert(emit->zero_immediate_idx >= 0);
881 return swizzle(src_register( SVGA3DREG_CONST,
882 emit->zero_immediate_idx),
883 0, 0, 0, 3);
884 }
885
886
887 /**
888 * returns {1, 1, 1, -1} immediate
889 */
890 static struct src_register
891 get_pos_neg_one_immediate( struct svga_shader_emitter *emit )
892 {
893 assert(emit->created_zero_immediate);
894 assert(emit->zero_immediate_idx >= 0);
895 return swizzle(src_register( SVGA3DREG_CONST,
896 emit->zero_immediate_idx),
897 3, 3, 3, 2);
898 }
899
900
901 /**
902 * returns {0.5, 0.5, 0.5, 0.5} immediate
903 */
904 static struct src_register
905 get_half_immediate( struct svga_shader_emitter *emit )
906 {
907 assert(emit->created_zero_immediate);
908 assert(emit->zero_immediate_idx >= 0);
909 return swizzle(src_register(SVGA3DREG_CONST, emit->zero_immediate_idx),
910 1, 1, 1, 1);
911 }
912
913
914 /**
915 * returns the loop const
916 */
917 static struct src_register
918 get_loop_const( struct svga_shader_emitter *emit )
919 {
920 assert(emit->created_loop_const);
921 assert(emit->loop_const_idx >= 0);
922 return src_register( SVGA3DREG_CONSTINT,
923 emit->loop_const_idx );
924 }
925
926
927 static struct src_register
928 get_fake_arl_const( struct svga_shader_emitter *emit )
929 {
930 struct src_register reg;
931 int idx = 0, swizzle = 0, i;
932
933 for (i = 0; i < emit->num_arl_consts; ++ i) {
934 if (emit->arl_consts[i].arl_num == emit->current_arl) {
935 idx = emit->arl_consts[i].idx;
936 swizzle = emit->arl_consts[i].swizzle;
937 }
938 }
939
940 reg = src_register( SVGA3DREG_CONST, idx );
941 return scalar(reg, swizzle);
942 }
943
944
945 /**
946 * Return the register which holds the current dimenions of the
947 * texture bound to the given sampler
948 */
949 static struct src_register
950 get_tex_dimensions( struct svga_shader_emitter *emit, int sampler_num )
951 {
952 int idx;
953 struct src_register reg;
954
955 /* the width/height indexes start right after constants */
956 idx = emit->key.fkey.tex[sampler_num].width_height_idx +
957 emit->info.file_max[TGSI_FILE_CONSTANT] + 1;
958
959 reg = src_register( SVGA3DREG_CONST, idx );
960 return reg;
961 }
962
963
964 static boolean
965 emit_fake_arl(struct svga_shader_emitter *emit,
966 const struct tgsi_full_instruction *insn)
967 {
968 const struct src_register src0 =
969 translate_src_register(emit, &insn->Src[0] );
970 struct src_register src1 = get_fake_arl_const( emit );
971 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
972 SVGA3dShaderDestToken tmp = get_temp( emit );
973
974 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), tmp, src0))
975 return FALSE;
976
977 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), tmp, src( tmp ),
978 src1))
979 return FALSE;
980
981 /* replicate the original swizzle */
982 src1 = src(tmp);
983 src1.base.swizzle = src0.base.swizzle;
984
985 return submit_op1( emit, inst_token( SVGA3DOP_MOVA ),
986 dst, src1 );
987 }
988
989
990 static boolean
991 emit_if(struct svga_shader_emitter *emit,
992 const struct tgsi_full_instruction *insn)
993 {
994 struct src_register src0 =
995 translate_src_register(emit, &insn->Src[0]);
996 struct src_register zero = get_zero_immediate( emit );
997 SVGA3dShaderInstToken if_token = inst_token( SVGA3DOP_IFC );
998
999 if_token.control = SVGA3DOPCOMPC_NE;
1000 zero = scalar(zero, TGSI_SWIZZLE_X);
1001
1002 if (SVGA3dShaderGetRegType(src0.base.value) == SVGA3DREG_CONST) {
1003 /*
1004 * Max different constant registers readable per IFC instruction is 1.
1005 */
1006 SVGA3dShaderDestToken tmp = get_temp( emit );
1007
1008 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), tmp, src0))
1009 return FALSE;
1010
1011 src0 = scalar(src( tmp ), TGSI_SWIZZLE_X);
1012 }
1013
1014 emit->dynamic_branching_level++;
1015
1016 return (emit_instruction( emit, if_token ) &&
1017 emit_src( emit, src0 ) &&
1018 emit_src( emit, zero ) );
1019 }
1020
1021
1022 static boolean
1023 emit_endif(struct svga_shader_emitter *emit,
1024 const struct tgsi_full_instruction *insn)
1025 {
1026 emit->dynamic_branching_level--;
1027
1028 return emit_instruction(emit, inst_token(SVGA3DOP_ENDIF));
1029 }
1030
1031
1032 static boolean
1033 emit_else(struct svga_shader_emitter *emit,
1034 const struct tgsi_full_instruction *insn)
1035 {
1036 return emit_instruction(emit, inst_token(SVGA3DOP_ELSE));
1037 }
1038
1039
1040 /**
1041 * Translate the following TGSI FLR instruction.
1042 * FLR DST, SRC
1043 * To the following SVGA3D instruction sequence.
1044 * FRC TMP, SRC
1045 * SUB DST, SRC, TMP
1046 */
1047 static boolean
1048 emit_floor(struct svga_shader_emitter *emit,
1049 const struct tgsi_full_instruction *insn )
1050 {
1051 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1052 const struct src_register src0 =
1053 translate_src_register(emit, &insn->Src[0] );
1054 SVGA3dShaderDestToken temp = get_temp( emit );
1055
1056 /* FRC TMP, SRC */
1057 if (!submit_op1( emit, inst_token( SVGA3DOP_FRC ), temp, src0 ))
1058 return FALSE;
1059
1060 /* SUB DST, SRC, TMP */
1061 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst, src0,
1062 negate( src( temp ) ) ))
1063 return FALSE;
1064
1065 return TRUE;
1066 }
1067
1068
1069 /**
1070 * Translate the following TGSI CEIL instruction.
1071 * CEIL DST, SRC
1072 * To the following SVGA3D instruction sequence.
1073 * FRC TMP, -SRC
1074 * ADD DST, SRC, TMP
1075 */
1076 static boolean
1077 emit_ceil(struct svga_shader_emitter *emit,
1078 const struct tgsi_full_instruction *insn)
1079 {
1080 SVGA3dShaderDestToken dst = translate_dst_register(emit, insn, 0);
1081 const struct src_register src0 =
1082 translate_src_register(emit, &insn->Src[0]);
1083 SVGA3dShaderDestToken temp = get_temp(emit);
1084
1085 /* FRC TMP, -SRC */
1086 if (!submit_op1(emit, inst_token(SVGA3DOP_FRC), temp, negate(src0)))
1087 return FALSE;
1088
1089 /* ADD DST, SRC, TMP */
1090 if (!submit_op2(emit, inst_token(SVGA3DOP_ADD), dst, src0, src(temp)))
1091 return FALSE;
1092
1093 return TRUE;
1094 }
1095
1096
1097 /**
1098 * Translate the following TGSI DIV instruction.
1099 * DIV DST.xy, SRC0, SRC1
1100 * To the following SVGA3D instruction sequence.
1101 * RCP TMP.x, SRC1.xxxx
1102 * RCP TMP.y, SRC1.yyyy
1103 * MUL DST.xy, SRC0, TMP
1104 */
1105 static boolean
1106 emit_div(struct svga_shader_emitter *emit,
1107 const struct tgsi_full_instruction *insn )
1108 {
1109 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1110 const struct src_register src0 =
1111 translate_src_register(emit, &insn->Src[0] );
1112 const struct src_register src1 =
1113 translate_src_register(emit, &insn->Src[1] );
1114 SVGA3dShaderDestToken temp = get_temp( emit );
1115 int i;
1116
1117 /* For each enabled element, perform a RCP instruction. Note that
1118 * RCP is scalar in SVGA3D:
1119 */
1120 for (i = 0; i < 4; i++) {
1121 unsigned channel = 1 << i;
1122 if (dst.mask & channel) {
1123 /* RCP TMP.?, SRC1.???? */
1124 if (!submit_op1( emit, inst_token( SVGA3DOP_RCP ),
1125 writemask(temp, channel),
1126 scalar(src1, i) ))
1127 return FALSE;
1128 }
1129 }
1130
1131 /* Vector mul:
1132 * MUL DST, SRC0, TMP
1133 */
1134 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ), dst, src0,
1135 src( temp ) ))
1136 return FALSE;
1137
1138 return TRUE;
1139 }
1140
1141
1142 /**
1143 * Translate the following TGSI DP2 instruction.
1144 * DP2 DST, SRC1, SRC2
1145 * To the following SVGA3D instruction sequence.
1146 * MUL TMP, SRC1, SRC2
1147 * ADD DST, TMP.xxxx, TMP.yyyy
1148 */
1149 static boolean
1150 emit_dp2(struct svga_shader_emitter *emit,
1151 const struct tgsi_full_instruction *insn )
1152 {
1153 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1154 const struct src_register src0 =
1155 translate_src_register(emit, &insn->Src[0]);
1156 const struct src_register src1 =
1157 translate_src_register(emit, &insn->Src[1]);
1158 SVGA3dShaderDestToken temp = get_temp( emit );
1159 struct src_register temp_src0, temp_src1;
1160
1161 /* MUL TMP, SRC1, SRC2 */
1162 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ), temp, src0, src1 ))
1163 return FALSE;
1164
1165 temp_src0 = scalar(src( temp ), TGSI_SWIZZLE_X);
1166 temp_src1 = scalar(src( temp ), TGSI_SWIZZLE_Y);
1167
1168 /* ADD DST, TMP.xxxx, TMP.yyyy */
1169 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst,
1170 temp_src0, temp_src1 ))
1171 return FALSE;
1172
1173 return TRUE;
1174 }
1175
1176
1177 /**
1178 * Translate the following TGSI DPH instruction.
1179 * DPH DST, SRC1, SRC2
1180 * To the following SVGA3D instruction sequence.
1181 * DP3 TMP, SRC1, SRC2
1182 * ADD DST, TMP, SRC2.wwww
1183 */
1184 static boolean
1185 emit_dph(struct svga_shader_emitter *emit,
1186 const struct tgsi_full_instruction *insn )
1187 {
1188 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1189 const struct src_register src0 = translate_src_register(
1190 emit, &insn->Src[0] );
1191 struct src_register src1 =
1192 translate_src_register(emit, &insn->Src[1]);
1193 SVGA3dShaderDestToken temp = get_temp( emit );
1194
1195 /* DP3 TMP, SRC1, SRC2 */
1196 if (!submit_op2( emit, inst_token( SVGA3DOP_DP3 ), temp, src0, src1 ))
1197 return FALSE;
1198
1199 src1 = scalar(src1, TGSI_SWIZZLE_W);
1200
1201 /* ADD DST, TMP, SRC2.wwww */
1202 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst,
1203 src( temp ), src1 ))
1204 return FALSE;
1205
1206 return TRUE;
1207 }
1208
1209
1210 /**
1211 * Translate the following TGSI DST instruction.
1212 * NRM DST, SRC
1213 * To the following SVGA3D instruction sequence.
1214 * DP3 TMP, SRC, SRC
1215 * RSQ TMP, TMP
1216 * MUL DST, SRC, TMP
1217 */
1218 static boolean
1219 emit_nrm(struct svga_shader_emitter *emit,
1220 const struct tgsi_full_instruction *insn)
1221 {
1222 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1223 const struct src_register src0 =
1224 translate_src_register(emit, &insn->Src[0]);
1225 SVGA3dShaderDestToken temp = get_temp( emit );
1226
1227 /* DP3 TMP, SRC, SRC */
1228 if (!submit_op2( emit, inst_token( SVGA3DOP_DP3 ), temp, src0, src0 ))
1229 return FALSE;
1230
1231 /* RSQ TMP, TMP */
1232 if (!submit_op1( emit, inst_token( SVGA3DOP_RSQ ), temp, src( temp )))
1233 return FALSE;
1234
1235 /* MUL DST, SRC, TMP */
1236 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ), dst,
1237 src0, src( temp )))
1238 return FALSE;
1239
1240 return TRUE;
1241 }
1242
1243
1244 static boolean
1245 do_emit_sincos(struct svga_shader_emitter *emit,
1246 SVGA3dShaderDestToken dst,
1247 struct src_register src0)
1248 {
1249 src0 = scalar(src0, TGSI_SWIZZLE_X);
1250 return submit_op1(emit, inst_token(SVGA3DOP_SINCOS), dst, src0);
1251 }
1252
1253
1254 static boolean
1255 emit_sincos(struct svga_shader_emitter *emit,
1256 const struct tgsi_full_instruction *insn)
1257 {
1258 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1259 struct src_register src0 = translate_src_register(emit, &insn->Src[0]);
1260 SVGA3dShaderDestToken temp = get_temp( emit );
1261
1262 /* SCS TMP SRC */
1263 if (!do_emit_sincos(emit, writemask(temp, TGSI_WRITEMASK_XY), src0 ))
1264 return FALSE;
1265
1266 /* MOV DST TMP */
1267 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src( temp ) ))
1268 return FALSE;
1269
1270 return TRUE;
1271 }
1272
1273
1274 /**
1275 * SCS TMP SRC
1276 * MOV DST TMP.yyyy
1277 */
1278 static boolean
1279 emit_sin(struct svga_shader_emitter *emit,
1280 const struct tgsi_full_instruction *insn )
1281 {
1282 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1283 struct src_register src0 =
1284 translate_src_register(emit, &insn->Src[0] );
1285 SVGA3dShaderDestToken temp = get_temp( emit );
1286
1287 /* SCS TMP SRC */
1288 if (!do_emit_sincos(emit, writemask(temp, TGSI_WRITEMASK_Y), src0))
1289 return FALSE;
1290
1291 src0 = scalar(src( temp ), TGSI_SWIZZLE_Y);
1292
1293 /* MOV DST TMP.yyyy */
1294 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src0 ))
1295 return FALSE;
1296
1297 return TRUE;
1298 }
1299
1300 /*
1301 * SCS TMP SRC
1302 * MOV DST TMP.xxxx
1303 */
1304 static boolean
1305 emit_cos(struct svga_shader_emitter *emit,
1306 const struct tgsi_full_instruction *insn)
1307 {
1308 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1309 struct src_register src0 =
1310 translate_src_register(emit, &insn->Src[0] );
1311 SVGA3dShaderDestToken temp = get_temp( emit );
1312
1313 /* SCS TMP SRC */
1314 if (!do_emit_sincos( emit, writemask(temp, TGSI_WRITEMASK_X), src0 ))
1315 return FALSE;
1316
1317 src0 = scalar(src( temp ), TGSI_SWIZZLE_X);
1318
1319 /* MOV DST TMP.xxxx */
1320 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src0 ))
1321 return FALSE;
1322
1323 return TRUE;
1324 }
1325
1326
1327 static boolean
1328 emit_ssg(struct svga_shader_emitter *emit,
1329 const struct tgsi_full_instruction *insn)
1330 {
1331 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1332 struct src_register src0 =
1333 translate_src_register(emit, &insn->Src[0] );
1334 SVGA3dShaderDestToken temp0 = get_temp( emit );
1335 SVGA3dShaderDestToken temp1 = get_temp( emit );
1336 struct src_register zero, one;
1337
1338 if (emit->unit == PIPE_SHADER_VERTEX) {
1339 /* SGN DST, SRC0, TMP0, TMP1 */
1340 return submit_op3( emit, inst_token( SVGA3DOP_SGN ), dst, src0,
1341 src( temp0 ), src( temp1 ) );
1342 }
1343
1344 zero = get_zero_immediate( emit );
1345 one = scalar( zero, TGSI_SWIZZLE_W );
1346 zero = scalar( zero, TGSI_SWIZZLE_X );
1347
1348 /* CMP TMP0, SRC0, one, zero */
1349 if (!submit_op3( emit, inst_token( SVGA3DOP_CMP ),
1350 writemask( temp0, dst.mask ), src0, one, zero ))
1351 return FALSE;
1352
1353 /* CMP TMP1, negate(SRC0), negate(one), zero */
1354 if (!submit_op3( emit, inst_token( SVGA3DOP_CMP ),
1355 writemask( temp1, dst.mask ), negate( src0 ), negate( one ),
1356 zero ))
1357 return FALSE;
1358
1359 /* ADD DST, TMP0, TMP1 */
1360 return submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst, src( temp0 ),
1361 src( temp1 ) );
1362 }
1363
1364
1365 /**
1366 * ADD DST SRC0, negate(SRC0)
1367 */
1368 static boolean
1369 emit_sub(struct svga_shader_emitter *emit,
1370 const struct tgsi_full_instruction *insn)
1371 {
1372 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1373 struct src_register src0 = translate_src_register(
1374 emit, &insn->Src[0] );
1375 struct src_register src1 = translate_src_register(
1376 emit, &insn->Src[1] );
1377
1378 src1 = negate(src1);
1379
1380 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst,
1381 src0, src1 ))
1382 return FALSE;
1383
1384 return TRUE;
1385 }
1386
1387
1388 static boolean
1389 emit_kill_if(struct svga_shader_emitter *emit,
1390 const struct tgsi_full_instruction *insn)
1391 {
1392 const struct tgsi_full_src_register *reg = &insn->Src[0];
1393 struct src_register src0, srcIn;
1394 /* is the W component tested in another position? */
1395 const boolean w_tested = (reg->Register.SwizzleW == reg->Register.SwizzleX ||
1396 reg->Register.SwizzleW == reg->Register.SwizzleY ||
1397 reg->Register.SwizzleW == reg->Register.SwizzleZ);
1398 const boolean special = (reg->Register.Absolute ||
1399 reg->Register.Negate ||
1400 reg->Register.Indirect ||
1401 reg->Register.SwizzleX != 0 ||
1402 reg->Register.SwizzleY != 1 ||
1403 reg->Register.SwizzleZ != 2 ||
1404 reg->Register.File != TGSI_FILE_TEMPORARY);
1405 SVGA3dShaderDestToken temp;
1406
1407 src0 = srcIn = translate_src_register( emit, reg );
1408
1409 if (special || !w_tested) {
1410 /* need a temp reg */
1411 temp = get_temp( emit );
1412 }
1413
1414 if (special) {
1415 /* move the source into a temp register */
1416 submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1417 writemask( temp, TGSI_WRITEMASK_XYZ ),
1418 src0 );
1419
1420 src0 = src( temp );
1421 }
1422
1423 /* do the texkill (on the xyz components) */
1424 if (!submit_op0( emit, inst_token( SVGA3DOP_TEXKILL ), dst(src0) ))
1425 return FALSE;
1426
1427 if (!w_tested) {
1428 /* need to emit a second texkill to test the W component */
1429 /* put src.wwww into temp register */
1430 if (!submit_op1(emit,
1431 inst_token( SVGA3DOP_MOV ),
1432 writemask( temp, TGSI_WRITEMASK_XYZ ),
1433 scalar(srcIn, TGSI_SWIZZLE_W)))
1434 return FALSE;
1435
1436 /* second texkill */
1437 if (!submit_op0( emit, inst_token( SVGA3DOP_TEXKILL ), temp ))
1438 return FALSE;
1439 }
1440
1441 return TRUE;
1442 }
1443
1444
1445 /**
1446 * unconditional kill
1447 */
1448 static boolean
1449 emit_kill(struct svga_shader_emitter *emit,
1450 const struct tgsi_full_instruction *insn)
1451 {
1452 SVGA3dShaderDestToken temp;
1453 struct src_register one = scalar( get_zero_immediate( emit ),
1454 TGSI_SWIZZLE_W );
1455 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_TEXKILL );
1456
1457 /* texkill doesn't allow negation on the operand so lets move
1458 * negation of {1} to a temp register */
1459 temp = get_temp( emit );
1460 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), temp,
1461 negate( one ) ))
1462 return FALSE;
1463
1464 return submit_op0( emit, inst, temp );
1465 }
1466
1467
1468 /**
1469 * Test if r1 and r2 are the same register.
1470 */
1471 static boolean
1472 same_register(struct src_register r1, struct src_register r2)
1473 {
1474 return (r1.base.num == r2.base.num &&
1475 r1.base.type_upper == r2.base.type_upper &&
1476 r1.base.type_lower == r2.base.type_lower);
1477 }
1478
1479
1480
1481 /* Implement conditionals by initializing destination reg to 'fail',
1482 * then set predicate reg with UFOP_SETP, then move 'pass' to dest
1483 * based on predicate reg.
1484 *
1485 * SETP src0, cmp, src1 -- do this first to avoid aliasing problems.
1486 * MOV dst, fail
1487 * MOV dst, pass, p0
1488 */
1489 static boolean
1490 emit_conditional(struct svga_shader_emitter *emit,
1491 unsigned compare_func,
1492 SVGA3dShaderDestToken dst,
1493 struct src_register src0,
1494 struct src_register src1,
1495 struct src_register pass,
1496 struct src_register fail)
1497 {
1498 SVGA3dShaderDestToken pred_reg = dst_register( SVGA3DREG_PREDICATE, 0 );
1499 SVGA3dShaderInstToken setp_token, mov_token;
1500 setp_token = inst_token( SVGA3DOP_SETP );
1501
1502 switch (compare_func) {
1503 case PIPE_FUNC_NEVER:
1504 return submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1505 dst, fail );
1506 break;
1507 case PIPE_FUNC_LESS:
1508 setp_token.control = SVGA3DOPCOMP_LT;
1509 break;
1510 case PIPE_FUNC_EQUAL:
1511 setp_token.control = SVGA3DOPCOMP_EQ;
1512 break;
1513 case PIPE_FUNC_LEQUAL:
1514 setp_token.control = SVGA3DOPCOMP_LE;
1515 break;
1516 case PIPE_FUNC_GREATER:
1517 setp_token.control = SVGA3DOPCOMP_GT;
1518 break;
1519 case PIPE_FUNC_NOTEQUAL:
1520 setp_token.control = SVGA3DOPCOMPC_NE;
1521 break;
1522 case PIPE_FUNC_GEQUAL:
1523 setp_token.control = SVGA3DOPCOMP_GE;
1524 break;
1525 case PIPE_FUNC_ALWAYS:
1526 return submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1527 dst, pass );
1528 break;
1529 }
1530
1531 if (same_register(src(dst), pass)) {
1532 /* We'll get bad results if the dst and pass registers are the same
1533 * so use a temp register containing pass.
1534 */
1535 SVGA3dShaderDestToken temp = get_temp(emit);
1536 if (!submit_op1(emit, inst_token(SVGA3DOP_MOV), temp, pass))
1537 return FALSE;
1538 pass = src(temp);
1539 }
1540
1541 /* SETP src0, COMPOP, src1 */
1542 if (!submit_op2( emit, setp_token, pred_reg,
1543 src0, src1 ))
1544 return FALSE;
1545
1546 mov_token = inst_token( SVGA3DOP_MOV );
1547
1548 /* MOV dst, fail */
1549 if (!submit_op1( emit, mov_token, dst,
1550 fail ))
1551 return FALSE;
1552
1553 /* MOV dst, pass (predicated)
1554 *
1555 * Note that the predicate reg (and possible modifiers) is passed
1556 * as the first source argument.
1557 */
1558 mov_token.predicated = 1;
1559 if (!submit_op2( emit, mov_token, dst,
1560 src( pred_reg ), pass ))
1561 return FALSE;
1562
1563 return TRUE;
1564 }
1565
1566
1567 static boolean
1568 emit_select(struct svga_shader_emitter *emit,
1569 unsigned compare_func,
1570 SVGA3dShaderDestToken dst,
1571 struct src_register src0,
1572 struct src_register src1 )
1573 {
1574 /* There are some SVGA instructions which implement some selects
1575 * directly, but they are only available in the vertex shader.
1576 */
1577 if (emit->unit == PIPE_SHADER_VERTEX) {
1578 switch (compare_func) {
1579 case PIPE_FUNC_GEQUAL:
1580 return submit_op2( emit, inst_token( SVGA3DOP_SGE ), dst, src0, src1 );
1581 case PIPE_FUNC_LEQUAL:
1582 return submit_op2( emit, inst_token( SVGA3DOP_SGE ), dst, src1, src0 );
1583 case PIPE_FUNC_GREATER:
1584 return submit_op2( emit, inst_token( SVGA3DOP_SLT ), dst, src1, src0 );
1585 case PIPE_FUNC_LESS:
1586 return submit_op2( emit, inst_token( SVGA3DOP_SLT ), dst, src0, src1 );
1587 default:
1588 break;
1589 }
1590 }
1591
1592 /* Otherwise, need to use the setp approach:
1593 */
1594 {
1595 struct src_register one, zero;
1596 /* zero immediate is 0,0,0,1 */
1597 zero = get_zero_immediate( emit );
1598 one = scalar( zero, TGSI_SWIZZLE_W );
1599 zero = scalar( zero, TGSI_SWIZZLE_X );
1600
1601 return emit_conditional(
1602 emit,
1603 compare_func,
1604 dst,
1605 src0,
1606 src1,
1607 one, zero);
1608 }
1609 }
1610
1611
1612 static boolean
1613 emit_select_op(struct svga_shader_emitter *emit,
1614 unsigned compare,
1615 const struct tgsi_full_instruction *insn)
1616 {
1617 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1618 struct src_register src0 = translate_src_register(
1619 emit, &insn->Src[0] );
1620 struct src_register src1 = translate_src_register(
1621 emit, &insn->Src[1] );
1622
1623 return emit_select( emit, compare, dst, src0, src1 );
1624 }
1625
1626
1627 /**
1628 * Translate TGSI CMP instruction.
1629 */
1630 static boolean
1631 emit_cmp(struct svga_shader_emitter *emit,
1632 const struct tgsi_full_instruction *insn)
1633 {
1634 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1635 const struct src_register src0 =
1636 translate_src_register(emit, &insn->Src[0] );
1637 const struct src_register src1 =
1638 translate_src_register(emit, &insn->Src[1] );
1639 const struct src_register src2 =
1640 translate_src_register(emit, &insn->Src[2] );
1641
1642 if (emit->unit == PIPE_SHADER_VERTEX) {
1643 struct src_register zero =
1644 scalar(get_zero_immediate(emit), TGSI_SWIZZLE_X);
1645 /* We used to simulate CMP with SLT+LRP. But that didn't work when
1646 * src1 or src2 was Inf/NaN. In particular, GLSL sqrt(0) failed
1647 * because it involves a CMP to handle the 0 case.
1648 * Use a conditional expression instead.
1649 */
1650 return emit_conditional(emit, PIPE_FUNC_LESS, dst,
1651 src0, zero, src1, src2);
1652 }
1653 else {
1654 assert(emit->unit == PIPE_SHADER_FRAGMENT);
1655
1656 /* CMP DST, SRC0, SRC2, SRC1 */
1657 return submit_op3( emit, inst_token( SVGA3DOP_CMP ), dst,
1658 src0, src2, src1);
1659 }
1660 }
1661
1662
1663 /**
1664 * Translate texture instructions to SVGA3D representation.
1665 */
1666 static boolean
1667 emit_tex2(struct svga_shader_emitter *emit,
1668 const struct tgsi_full_instruction *insn,
1669 SVGA3dShaderDestToken dst)
1670 {
1671 SVGA3dShaderInstToken inst;
1672 struct src_register texcoord;
1673 struct src_register sampler;
1674 SVGA3dShaderDestToken tmp;
1675
1676 inst.value = 0;
1677
1678 switch (insn->Instruction.Opcode) {
1679 case TGSI_OPCODE_TEX:
1680 inst.op = SVGA3DOP_TEX;
1681 break;
1682 case TGSI_OPCODE_TXP:
1683 inst.op = SVGA3DOP_TEX;
1684 inst.control = SVGA3DOPCONT_PROJECT;
1685 break;
1686 case TGSI_OPCODE_TXB:
1687 inst.op = SVGA3DOP_TEX;
1688 inst.control = SVGA3DOPCONT_BIAS;
1689 break;
1690 case TGSI_OPCODE_TXL:
1691 inst.op = SVGA3DOP_TEXLDL;
1692 break;
1693 default:
1694 assert(0);
1695 return FALSE;
1696 }
1697
1698 texcoord = translate_src_register( emit, &insn->Src[0] );
1699 sampler = translate_src_register( emit, &insn->Src[1] );
1700
1701 if (emit->key.fkey.tex[sampler.base.num].unnormalized ||
1702 emit->dynamic_branching_level > 0)
1703 tmp = get_temp( emit );
1704
1705 /* Can't do mipmapping inside dynamic branch constructs. Force LOD
1706 * zero in that case.
1707 */
1708 if (emit->dynamic_branching_level > 0 &&
1709 inst.op == SVGA3DOP_TEX &&
1710 SVGA3dShaderGetRegType(texcoord.base.value) == SVGA3DREG_TEMP) {
1711 struct src_register zero = get_zero_immediate( emit );
1712
1713 /* MOV tmp, texcoord */
1714 if (!submit_op1( emit,
1715 inst_token( SVGA3DOP_MOV ),
1716 tmp,
1717 texcoord ))
1718 return FALSE;
1719
1720 /* MOV tmp.w, zero */
1721 if (!submit_op1( emit,
1722 inst_token( SVGA3DOP_MOV ),
1723 writemask( tmp, TGSI_WRITEMASK_W ),
1724 scalar( zero, TGSI_SWIZZLE_X )))
1725 return FALSE;
1726
1727 texcoord = src( tmp );
1728 inst.op = SVGA3DOP_TEXLDL;
1729 }
1730
1731 /* Explicit normalization of texcoords:
1732 */
1733 if (emit->key.fkey.tex[sampler.base.num].unnormalized) {
1734 struct src_register wh = get_tex_dimensions( emit, sampler.base.num );
1735
1736 /* MUL tmp, SRC0, WH */
1737 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
1738 tmp, texcoord, wh ))
1739 return FALSE;
1740
1741 texcoord = src( tmp );
1742 }
1743
1744 return submit_op2( emit, inst, dst, texcoord, sampler );
1745 }
1746
1747
1748 /**
1749 * Translate texture instructions to SVGA3D representation.
1750 */
1751 static boolean
1752 emit_tex4(struct svga_shader_emitter *emit,
1753 const struct tgsi_full_instruction *insn,
1754 SVGA3dShaderDestToken dst )
1755 {
1756 SVGA3dShaderInstToken inst;
1757 struct src_register texcoord;
1758 struct src_register ddx;
1759 struct src_register ddy;
1760 struct src_register sampler;
1761
1762 texcoord = translate_src_register( emit, &insn->Src[0] );
1763 ddx = translate_src_register( emit, &insn->Src[1] );
1764 ddy = translate_src_register( emit, &insn->Src[2] );
1765 sampler = translate_src_register( emit, &insn->Src[3] );
1766
1767 inst.value = 0;
1768
1769 switch (insn->Instruction.Opcode) {
1770 case TGSI_OPCODE_TXD:
1771 inst.op = SVGA3DOP_TEXLDD; /* 4 args! */
1772 break;
1773 default:
1774 assert(0);
1775 return FALSE;
1776 }
1777
1778 return submit_op4( emit, inst, dst, texcoord, sampler, ddx, ddy );
1779 }
1780
1781
1782 /**
1783 * Emit texture swizzle code.
1784 */
1785 static boolean
1786 emit_tex_swizzle(struct svga_shader_emitter *emit,
1787 SVGA3dShaderDestToken dst,
1788 struct src_register src,
1789 unsigned swizzle_x,
1790 unsigned swizzle_y,
1791 unsigned swizzle_z,
1792 unsigned swizzle_w)
1793 {
1794 const unsigned swizzleIn[4] = {swizzle_x, swizzle_y, swizzle_z, swizzle_w};
1795 unsigned srcSwizzle[4];
1796 unsigned srcWritemask = 0x0, zeroWritemask = 0x0, oneWritemask = 0x0;
1797 int i;
1798
1799 /* build writemasks and srcSwizzle terms */
1800 for (i = 0; i < 4; i++) {
1801 if (swizzleIn[i] == PIPE_SWIZZLE_ZERO) {
1802 srcSwizzle[i] = TGSI_SWIZZLE_X + i;
1803 zeroWritemask |= (1 << i);
1804 }
1805 else if (swizzleIn[i] == PIPE_SWIZZLE_ONE) {
1806 srcSwizzle[i] = TGSI_SWIZZLE_X + i;
1807 oneWritemask |= (1 << i);
1808 }
1809 else {
1810 srcSwizzle[i] = swizzleIn[i];
1811 srcWritemask |= (1 << i);
1812 }
1813 }
1814
1815 /* write x/y/z/w comps */
1816 if (dst.mask & srcWritemask) {
1817 if (!submit_op1(emit,
1818 inst_token(SVGA3DOP_MOV),
1819 writemask(dst, srcWritemask),
1820 swizzle(src,
1821 srcSwizzle[0],
1822 srcSwizzle[1],
1823 srcSwizzle[2],
1824 srcSwizzle[3])))
1825 return FALSE;
1826 }
1827
1828 /* write 0 comps */
1829 if (dst.mask & zeroWritemask) {
1830 if (!submit_op1(emit,
1831 inst_token(SVGA3DOP_MOV),
1832 writemask(dst, zeroWritemask),
1833 scalar(get_zero_immediate(emit), TGSI_SWIZZLE_X)))
1834 return FALSE;
1835 }
1836
1837 /* write 1 comps */
1838 if (dst.mask & oneWritemask) {
1839 if (!submit_op1(emit,
1840 inst_token(SVGA3DOP_MOV),
1841 writemask(dst, oneWritemask),
1842 scalar(get_zero_immediate(emit), TGSI_SWIZZLE_W)))
1843 return FALSE;
1844 }
1845
1846 return TRUE;
1847 }
1848
1849
1850 static boolean
1851 emit_tex(struct svga_shader_emitter *emit,
1852 const struct tgsi_full_instruction *insn)
1853 {
1854 SVGA3dShaderDestToken dst =
1855 translate_dst_register( emit, insn, 0 );
1856 struct src_register src0 =
1857 translate_src_register( emit, &insn->Src[0] );
1858 struct src_register src1 =
1859 translate_src_register( emit, &insn->Src[1] );
1860
1861 SVGA3dShaderDestToken tex_result;
1862 const unsigned unit = src1.base.num;
1863
1864 /* check for shadow samplers */
1865 boolean compare = (emit->key.fkey.tex[unit].compare_mode ==
1866 PIPE_TEX_COMPARE_R_TO_TEXTURE);
1867
1868 /* texture swizzle */
1869 boolean swizzle = (emit->key.fkey.tex[unit].swizzle_r != PIPE_SWIZZLE_RED ||
1870 emit->key.fkey.tex[unit].swizzle_g != PIPE_SWIZZLE_GREEN ||
1871 emit->key.fkey.tex[unit].swizzle_b != PIPE_SWIZZLE_BLUE ||
1872 emit->key.fkey.tex[unit].swizzle_a != PIPE_SWIZZLE_ALPHA);
1873
1874 boolean saturate = insn->Instruction.Saturate != TGSI_SAT_NONE;
1875
1876 /* If doing compare processing or tex swizzle or saturation, we need to put
1877 * the fetched color into a temporary so it can be used as a source later on.
1878 */
1879 if (compare || swizzle || saturate) {
1880 tex_result = get_temp( emit );
1881 }
1882 else {
1883 tex_result = dst;
1884 }
1885
1886 switch(insn->Instruction.Opcode) {
1887 case TGSI_OPCODE_TEX:
1888 case TGSI_OPCODE_TXB:
1889 case TGSI_OPCODE_TXP:
1890 case TGSI_OPCODE_TXL:
1891 if (!emit_tex2( emit, insn, tex_result ))
1892 return FALSE;
1893 break;
1894 case TGSI_OPCODE_TXD:
1895 if (!emit_tex4( emit, insn, tex_result ))
1896 return FALSE;
1897 break;
1898 default:
1899 assert(0);
1900 }
1901
1902 if (compare) {
1903 SVGA3dShaderDestToken dst2;
1904
1905 if (swizzle || saturate)
1906 dst2 = tex_result;
1907 else
1908 dst2 = dst;
1909
1910 if (dst.mask & TGSI_WRITEMASK_XYZ) {
1911 SVGA3dShaderDestToken src0_zdivw = get_temp( emit );
1912 /* When sampling a depth texture, the result of the comparison is in
1913 * the Y component.
1914 */
1915 struct src_register tex_src_x = scalar(src(tex_result), TGSI_SWIZZLE_Y);
1916 struct src_register r_coord;
1917
1918 if (insn->Instruction.Opcode == TGSI_OPCODE_TXP) {
1919 /* Divide texcoord R by Q */
1920 if (!submit_op1( emit, inst_token( SVGA3DOP_RCP ),
1921 writemask(src0_zdivw, TGSI_WRITEMASK_X),
1922 scalar(src0, TGSI_SWIZZLE_W) ))
1923 return FALSE;
1924
1925 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
1926 writemask(src0_zdivw, TGSI_WRITEMASK_X),
1927 scalar(src0, TGSI_SWIZZLE_Z),
1928 scalar(src(src0_zdivw), TGSI_SWIZZLE_X) ))
1929 return FALSE;
1930
1931 r_coord = scalar(src(src0_zdivw), TGSI_SWIZZLE_X);
1932 }
1933 else {
1934 r_coord = scalar(src0, TGSI_SWIZZLE_Z);
1935 }
1936
1937 /* Compare texture sample value against R component of texcoord */
1938 if (!emit_select(emit,
1939 emit->key.fkey.tex[unit].compare_func,
1940 writemask( dst2, TGSI_WRITEMASK_XYZ ),
1941 r_coord,
1942 tex_src_x))
1943 return FALSE;
1944 }
1945
1946 if (dst.mask & TGSI_WRITEMASK_W) {
1947 struct src_register one =
1948 scalar( get_zero_immediate( emit ), TGSI_SWIZZLE_W );
1949
1950 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1951 writemask( dst2, TGSI_WRITEMASK_W ),
1952 one ))
1953 return FALSE;
1954 }
1955 }
1956
1957 if (saturate && !swizzle) {
1958 /* MOV_SAT real_dst, dst */
1959 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src(tex_result) ))
1960 return FALSE;
1961 }
1962 else if (swizzle) {
1963 /* swizzle from tex_result to dst (handles saturation too, if any) */
1964 emit_tex_swizzle(emit,
1965 dst, src(tex_result),
1966 emit->key.fkey.tex[unit].swizzle_r,
1967 emit->key.fkey.tex[unit].swizzle_g,
1968 emit->key.fkey.tex[unit].swizzle_b,
1969 emit->key.fkey.tex[unit].swizzle_a);
1970 }
1971
1972 return TRUE;
1973 }
1974
1975
1976 static boolean
1977 emit_bgnloop2(struct svga_shader_emitter *emit,
1978 const struct tgsi_full_instruction *insn)
1979 {
1980 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_LOOP );
1981 struct src_register loop_reg = src_register( SVGA3DREG_LOOP, 0 );
1982 struct src_register const_int = get_loop_const( emit );
1983
1984 emit->dynamic_branching_level++;
1985
1986 return (emit_instruction( emit, inst ) &&
1987 emit_src( emit, loop_reg ) &&
1988 emit_src( emit, const_int ) );
1989 }
1990
1991
1992 static boolean
1993 emit_endloop2(struct svga_shader_emitter *emit,
1994 const struct tgsi_full_instruction *insn)
1995 {
1996 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_ENDLOOP );
1997
1998 emit->dynamic_branching_level--;
1999
2000 return emit_instruction( emit, inst );
2001 }
2002
2003
2004 static boolean
2005 emit_brk(struct svga_shader_emitter *emit,
2006 const struct tgsi_full_instruction *insn)
2007 {
2008 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_BREAK );
2009 return emit_instruction( emit, inst );
2010 }
2011
2012
2013 static boolean
2014 emit_scalar_op1(struct svga_shader_emitter *emit,
2015 unsigned opcode,
2016 const struct tgsi_full_instruction *insn)
2017 {
2018 SVGA3dShaderInstToken inst;
2019 SVGA3dShaderDestToken dst;
2020 struct src_register src;
2021
2022 inst = inst_token( opcode );
2023 dst = translate_dst_register( emit, insn, 0 );
2024 src = translate_src_register( emit, &insn->Src[0] );
2025 src = scalar( src, TGSI_SWIZZLE_X );
2026
2027 return submit_op1( emit, inst, dst, src );
2028 }
2029
2030
2031 static boolean
2032 emit_simple_instruction(struct svga_shader_emitter *emit,
2033 unsigned opcode,
2034 const struct tgsi_full_instruction *insn)
2035 {
2036 const struct tgsi_full_src_register *src = insn->Src;
2037 SVGA3dShaderInstToken inst;
2038 SVGA3dShaderDestToken dst;
2039
2040 inst = inst_token( opcode );
2041 dst = translate_dst_register( emit, insn, 0 );
2042
2043 switch (insn->Instruction.NumSrcRegs) {
2044 case 0:
2045 return submit_op0( emit, inst, dst );
2046 case 1:
2047 return submit_op1( emit, inst, dst,
2048 translate_src_register( emit, &src[0] ));
2049 case 2:
2050 return submit_op2( emit, inst, dst,
2051 translate_src_register( emit, &src[0] ),
2052 translate_src_register( emit, &src[1] ) );
2053 case 3:
2054 return submit_op3( emit, inst, dst,
2055 translate_src_register( emit, &src[0] ),
2056 translate_src_register( emit, &src[1] ),
2057 translate_src_register( emit, &src[2] ) );
2058 default:
2059 assert(0);
2060 return FALSE;
2061 }
2062 }
2063
2064
2065 static boolean
2066 emit_deriv(struct svga_shader_emitter *emit,
2067 const struct tgsi_full_instruction *insn )
2068 {
2069 if (emit->dynamic_branching_level > 0 &&
2070 insn->Src[0].Register.File == TGSI_FILE_TEMPORARY)
2071 {
2072 struct src_register zero = get_zero_immediate( emit );
2073 SVGA3dShaderDestToken dst =
2074 translate_dst_register( emit, insn, 0 );
2075
2076 /* Deriv opcodes not valid inside dynamic branching, workaround
2077 * by zeroing out the destination.
2078 */
2079 if (!submit_op1(emit,
2080 inst_token( SVGA3DOP_MOV ),
2081 dst,
2082 scalar(zero, TGSI_SWIZZLE_X)))
2083 return FALSE;
2084
2085 return TRUE;
2086 }
2087 else {
2088 unsigned opcode;
2089 const struct tgsi_full_src_register *reg = &insn->Src[0];
2090 SVGA3dShaderInstToken inst;
2091 SVGA3dShaderDestToken dst;
2092 struct src_register src0;
2093
2094 switch (insn->Instruction.Opcode) {
2095 case TGSI_OPCODE_DDX:
2096 opcode = SVGA3DOP_DSX;
2097 break;
2098 case TGSI_OPCODE_DDY:
2099 opcode = SVGA3DOP_DSY;
2100 break;
2101 default:
2102 return FALSE;
2103 }
2104
2105 inst = inst_token( opcode );
2106 dst = translate_dst_register( emit, insn, 0 );
2107 src0 = translate_src_register( emit, reg );
2108
2109 /* We cannot use negate or abs on source to dsx/dsy instruction.
2110 */
2111 if (reg->Register.Absolute ||
2112 reg->Register.Negate) {
2113 SVGA3dShaderDestToken temp = get_temp( emit );
2114
2115 if (!emit_repl( emit, temp, &src0 ))
2116 return FALSE;
2117 }
2118
2119 return submit_op1( emit, inst, dst, src0 );
2120 }
2121 }
2122
2123
2124 static boolean
2125 emit_arl(struct svga_shader_emitter *emit,
2126 const struct tgsi_full_instruction *insn)
2127 {
2128 ++emit->current_arl;
2129 if (emit->unit == PIPE_SHADER_FRAGMENT) {
2130 /* MOVA not present in pixel shader instruction set.
2131 * Ignore this instruction altogether since it is
2132 * only used for loop counters -- and for that
2133 * we reference aL directly.
2134 */
2135 return TRUE;
2136 }
2137 if (svga_arl_needs_adjustment( emit )) {
2138 return emit_fake_arl( emit, insn );
2139 } else {
2140 /* no need to adjust, just emit straight arl */
2141 return emit_simple_instruction(emit, SVGA3DOP_MOVA, insn);
2142 }
2143 }
2144
2145
2146 static boolean
2147 emit_pow(struct svga_shader_emitter *emit,
2148 const struct tgsi_full_instruction *insn)
2149 {
2150 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2151 struct src_register src0 = translate_src_register(
2152 emit, &insn->Src[0] );
2153 struct src_register src1 = translate_src_register(
2154 emit, &insn->Src[1] );
2155 boolean need_tmp = FALSE;
2156
2157 /* POW can only output to a temporary */
2158 if (insn->Dst[0].Register.File != TGSI_FILE_TEMPORARY)
2159 need_tmp = TRUE;
2160
2161 /* POW src1 must not be the same register as dst */
2162 if (alias_src_dst( src1, dst ))
2163 need_tmp = TRUE;
2164
2165 /* it's a scalar op */
2166 src0 = scalar( src0, TGSI_SWIZZLE_X );
2167 src1 = scalar( src1, TGSI_SWIZZLE_X );
2168
2169 if (need_tmp) {
2170 SVGA3dShaderDestToken tmp =
2171 writemask(get_temp( emit ), TGSI_WRITEMASK_X );
2172
2173 if (!submit_op2(emit, inst_token( SVGA3DOP_POW ), tmp, src0, src1))
2174 return FALSE;
2175
2176 return submit_op1(emit, inst_token( SVGA3DOP_MOV ),
2177 dst, scalar(src(tmp), 0) );
2178 }
2179 else {
2180 return submit_op2(emit, inst_token( SVGA3DOP_POW ), dst, src0, src1);
2181 }
2182 }
2183
2184
2185 static boolean
2186 emit_xpd(struct svga_shader_emitter *emit,
2187 const struct tgsi_full_instruction *insn)
2188 {
2189 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2190 const struct src_register src0 = translate_src_register(
2191 emit, &insn->Src[0] );
2192 const struct src_register src1 = translate_src_register(
2193 emit, &insn->Src[1] );
2194 boolean need_dst_tmp = FALSE;
2195
2196 /* XPD can only output to a temporary */
2197 if (SVGA3dShaderGetRegType(dst.value) != SVGA3DREG_TEMP)
2198 need_dst_tmp = TRUE;
2199
2200 /* The dst reg must not be the same as src0 or src1*/
2201 if (alias_src_dst(src0, dst) ||
2202 alias_src_dst(src1, dst))
2203 need_dst_tmp = TRUE;
2204
2205 if (need_dst_tmp) {
2206 SVGA3dShaderDestToken tmp = get_temp( emit );
2207
2208 /* Obey DX9 restrictions on mask:
2209 */
2210 tmp.mask = dst.mask & TGSI_WRITEMASK_XYZ;
2211
2212 if (!submit_op2(emit, inst_token( SVGA3DOP_CRS ), tmp, src0, src1))
2213 return FALSE;
2214
2215 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), dst, src( tmp )))
2216 return FALSE;
2217 }
2218 else {
2219 if (!submit_op2(emit, inst_token( SVGA3DOP_CRS ), dst, src0, src1))
2220 return FALSE;
2221 }
2222
2223 /* Need to emit 1.0 to dst.w?
2224 */
2225 if (dst.mask & TGSI_WRITEMASK_W) {
2226 struct src_register zero = get_zero_immediate( emit );
2227
2228 if (!submit_op1(emit,
2229 inst_token( SVGA3DOP_MOV ),
2230 writemask(dst, TGSI_WRITEMASK_W),
2231 zero))
2232 return FALSE;
2233 }
2234
2235 return TRUE;
2236 }
2237
2238
2239 static boolean
2240 emit_lrp(struct svga_shader_emitter *emit,
2241 const struct tgsi_full_instruction *insn)
2242 {
2243 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2244 const struct src_register src0 = translate_src_register(
2245 emit, &insn->Src[0] );
2246 const struct src_register src1 = translate_src_register(
2247 emit, &insn->Src[1] );
2248 const struct src_register src2 = translate_src_register(
2249 emit, &insn->Src[2] );
2250
2251 return submit_lrp(emit, dst, src0, src1, src2);
2252 }
2253
2254
2255 static boolean
2256 emit_dst_insn(struct svga_shader_emitter *emit,
2257 const struct tgsi_full_instruction *insn)
2258 {
2259 if (emit->unit == PIPE_SHADER_VERTEX) {
2260 /* SVGA/DX9 has a DST instruction, but only for vertex shaders:
2261 */
2262 return emit_simple_instruction(emit, SVGA3DOP_DST, insn);
2263 }
2264 else {
2265 /* result[0] = 1 * 1;
2266 * result[1] = a[1] * b[1];
2267 * result[2] = a[2] * 1;
2268 * result[3] = 1 * b[3];
2269 */
2270 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2271 SVGA3dShaderDestToken tmp;
2272 const struct src_register src0 = translate_src_register(
2273 emit, &insn->Src[0] );
2274 const struct src_register src1 = translate_src_register(
2275 emit, &insn->Src[1] );
2276 struct src_register zero = get_zero_immediate( emit );
2277 boolean need_tmp = FALSE;
2278
2279 if (SVGA3dShaderGetRegType(dst.value) != SVGA3DREG_TEMP ||
2280 alias_src_dst(src0, dst) ||
2281 alias_src_dst(src1, dst))
2282 need_tmp = TRUE;
2283
2284 if (need_tmp) {
2285 tmp = get_temp( emit );
2286 }
2287 else {
2288 tmp = dst;
2289 }
2290
2291 /* tmp.xw = 1.0
2292 */
2293 if (tmp.mask & TGSI_WRITEMASK_XW) {
2294 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2295 writemask(tmp, TGSI_WRITEMASK_XW ),
2296 scalar( zero, 3 )))
2297 return FALSE;
2298 }
2299
2300 /* tmp.yz = src0
2301 */
2302 if (tmp.mask & TGSI_WRITEMASK_YZ) {
2303 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2304 writemask(tmp, TGSI_WRITEMASK_YZ ),
2305 src0))
2306 return FALSE;
2307 }
2308
2309 /* tmp.yw = tmp * src1
2310 */
2311 if (tmp.mask & TGSI_WRITEMASK_YW) {
2312 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
2313 writemask(tmp, TGSI_WRITEMASK_YW ),
2314 src(tmp),
2315 src1))
2316 return FALSE;
2317 }
2318
2319 /* dst = tmp
2320 */
2321 if (need_tmp) {
2322 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2323 dst,
2324 src(tmp)))
2325 return FALSE;
2326 }
2327 }
2328
2329 return TRUE;
2330 }
2331
2332
2333 static boolean
2334 emit_exp(struct svga_shader_emitter *emit,
2335 const struct tgsi_full_instruction *insn)
2336 {
2337 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2338 struct src_register src0 =
2339 translate_src_register( emit, &insn->Src[0] );
2340 struct src_register zero = get_zero_immediate( emit );
2341 SVGA3dShaderDestToken fraction;
2342
2343 if (dst.mask & TGSI_WRITEMASK_Y)
2344 fraction = dst;
2345 else if (dst.mask & TGSI_WRITEMASK_X)
2346 fraction = get_temp( emit );
2347 else
2348 fraction.value = 0;
2349
2350 /* If y is being written, fill it with src0 - floor(src0).
2351 */
2352 if (dst.mask & TGSI_WRITEMASK_XY) {
2353 if (!submit_op1( emit, inst_token( SVGA3DOP_FRC ),
2354 writemask( fraction, TGSI_WRITEMASK_Y ),
2355 src0 ))
2356 return FALSE;
2357 }
2358
2359 /* If x is being written, fill it with 2 ^ floor(src0).
2360 */
2361 if (dst.mask & TGSI_WRITEMASK_X) {
2362 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ),
2363 writemask( dst, TGSI_WRITEMASK_X ),
2364 src0,
2365 scalar( negate( src( fraction ) ), TGSI_SWIZZLE_Y ) ) )
2366 return FALSE;
2367
2368 if (!submit_op1( emit, inst_token( SVGA3DOP_EXP ),
2369 writemask( dst, TGSI_WRITEMASK_X ),
2370 scalar( src( dst ), TGSI_SWIZZLE_X ) ) )
2371 return FALSE;
2372
2373 if (!(dst.mask & TGSI_WRITEMASK_Y))
2374 release_temp( emit, fraction );
2375 }
2376
2377 /* If z is being written, fill it with 2 ^ src0 (partial precision).
2378 */
2379 if (dst.mask & TGSI_WRITEMASK_Z) {
2380 if (!submit_op1( emit, inst_token( SVGA3DOP_EXPP ),
2381 writemask( dst, TGSI_WRITEMASK_Z ),
2382 src0 ) )
2383 return FALSE;
2384 }
2385
2386 /* If w is being written, fill it with one.
2387 */
2388 if (dst.mask & TGSI_WRITEMASK_W) {
2389 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2390 writemask(dst, TGSI_WRITEMASK_W),
2391 scalar( zero, TGSI_SWIZZLE_W ) ))
2392 return FALSE;
2393 }
2394
2395 return TRUE;
2396 }
2397
2398
2399 static boolean
2400 emit_lit(struct svga_shader_emitter *emit,
2401 const struct tgsi_full_instruction *insn)
2402 {
2403 if (emit->unit == PIPE_SHADER_VERTEX) {
2404 /* SVGA/DX9 has a LIT instruction, but only for vertex shaders:
2405 */
2406 return emit_simple_instruction(emit, SVGA3DOP_LIT, insn);
2407 }
2408 else {
2409 /* D3D vs. GL semantics can be fairly easily accomodated by
2410 * variations on this sequence.
2411 *
2412 * GL:
2413 * tmp.y = src.x
2414 * tmp.z = pow(src.y,src.w)
2415 * p0 = src0.xxxx > 0
2416 * result = zero.wxxw
2417 * (p0) result.yz = tmp
2418 *
2419 * D3D:
2420 * tmp.y = src.x
2421 * tmp.z = pow(src.y,src.w)
2422 * p0 = src0.xxyy > 0
2423 * result = zero.wxxw
2424 * (p0) result.yz = tmp
2425 *
2426 * Will implement the GL version for now.
2427 */
2428 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2429 SVGA3dShaderDestToken tmp = get_temp( emit );
2430 const struct src_register src0 = translate_src_register(
2431 emit, &insn->Src[0] );
2432 struct src_register zero = get_zero_immediate( emit );
2433
2434 /* tmp = pow(src.y, src.w)
2435 */
2436 if (dst.mask & TGSI_WRITEMASK_Z) {
2437 if (!submit_op2(emit, inst_token( SVGA3DOP_POW ),
2438 tmp,
2439 scalar(src0, 1),
2440 scalar(src0, 3)))
2441 return FALSE;
2442 }
2443
2444 /* tmp.y = src.x
2445 */
2446 if (dst.mask & TGSI_WRITEMASK_Y) {
2447 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2448 writemask(tmp, TGSI_WRITEMASK_Y ),
2449 scalar(src0, 0)))
2450 return FALSE;
2451 }
2452
2453 /* Can't quite do this with emit conditional due to the extra
2454 * writemask on the predicated mov:
2455 */
2456 {
2457 SVGA3dShaderDestToken pred_reg = dst_register( SVGA3DREG_PREDICATE, 0 );
2458 SVGA3dShaderInstToken setp_token, mov_token;
2459 struct src_register predsrc;
2460
2461 setp_token = inst_token( SVGA3DOP_SETP );
2462 mov_token = inst_token( SVGA3DOP_MOV );
2463
2464 setp_token.control = SVGA3DOPCOMP_GT;
2465
2466 /* D3D vs GL semantics:
2467 */
2468 if (0)
2469 predsrc = swizzle(src0, 0, 0, 1, 1); /* D3D */
2470 else
2471 predsrc = swizzle(src0, 0, 0, 0, 0); /* GL */
2472
2473 /* SETP src0.xxyy, GT, {0}.x */
2474 if (!submit_op2( emit, setp_token, pred_reg,
2475 predsrc,
2476 swizzle(zero, 0, 0, 0, 0) ))
2477 return FALSE;
2478
2479 /* MOV dst, fail */
2480 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst,
2481 swizzle(zero, 3, 0, 0, 3 )))
2482 return FALSE;
2483
2484 /* MOV dst.yz, tmp (predicated)
2485 *
2486 * Note that the predicate reg (and possible modifiers) is passed
2487 * as the first source argument.
2488 */
2489 if (dst.mask & TGSI_WRITEMASK_YZ) {
2490 mov_token.predicated = 1;
2491 if (!submit_op2( emit, mov_token,
2492 writemask(dst, TGSI_WRITEMASK_YZ),
2493 src( pred_reg ), src( tmp ) ))
2494 return FALSE;
2495 }
2496 }
2497 }
2498
2499 return TRUE;
2500 }
2501
2502
2503 static boolean
2504 emit_ex2(struct svga_shader_emitter *emit,
2505 const struct tgsi_full_instruction *insn)
2506 {
2507 SVGA3dShaderInstToken inst;
2508 SVGA3dShaderDestToken dst;
2509 struct src_register src0;
2510
2511 inst = inst_token( SVGA3DOP_EXP );
2512 dst = translate_dst_register( emit, insn, 0 );
2513 src0 = translate_src_register( emit, &insn->Src[0] );
2514 src0 = scalar( src0, TGSI_SWIZZLE_X );
2515
2516 if (dst.mask != TGSI_WRITEMASK_XYZW) {
2517 SVGA3dShaderDestToken tmp = get_temp( emit );
2518
2519 if (!submit_op1( emit, inst, tmp, src0 ))
2520 return FALSE;
2521
2522 return submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2523 dst,
2524 scalar( src( tmp ), TGSI_SWIZZLE_X ) );
2525 }
2526
2527 return submit_op1( emit, inst, dst, src0 );
2528 }
2529
2530
2531 static boolean
2532 emit_log(struct svga_shader_emitter *emit,
2533 const struct tgsi_full_instruction *insn)
2534 {
2535 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2536 struct src_register src0 =
2537 translate_src_register( emit, &insn->Src[0] );
2538 struct src_register zero = get_zero_immediate( emit );
2539 SVGA3dShaderDestToken abs_tmp;
2540 struct src_register abs_src0;
2541 SVGA3dShaderDestToken log2_abs;
2542
2543 abs_tmp.value = 0;
2544
2545 if (dst.mask & TGSI_WRITEMASK_Z)
2546 log2_abs = dst;
2547 else if (dst.mask & TGSI_WRITEMASK_XY)
2548 log2_abs = get_temp( emit );
2549 else
2550 log2_abs.value = 0;
2551
2552 /* If z is being written, fill it with log2( abs( src0 ) ).
2553 */
2554 if (dst.mask & TGSI_WRITEMASK_XYZ) {
2555 if (!src0.base.srcMod || src0.base.srcMod == SVGA3DSRCMOD_ABS)
2556 abs_src0 = src0;
2557 else {
2558 abs_tmp = get_temp( emit );
2559
2560 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2561 abs_tmp,
2562 src0 ) )
2563 return FALSE;
2564
2565 abs_src0 = src( abs_tmp );
2566 }
2567
2568 abs_src0 = absolute( scalar( abs_src0, TGSI_SWIZZLE_X ) );
2569
2570 if (!submit_op1( emit, inst_token( SVGA3DOP_LOG ),
2571 writemask( log2_abs, TGSI_WRITEMASK_Z ),
2572 abs_src0 ) )
2573 return FALSE;
2574 }
2575
2576 if (dst.mask & TGSI_WRITEMASK_XY) {
2577 SVGA3dShaderDestToken floor_log2;
2578
2579 if (dst.mask & TGSI_WRITEMASK_X)
2580 floor_log2 = dst;
2581 else
2582 floor_log2 = get_temp( emit );
2583
2584 /* If x is being written, fill it with floor( log2( abs( src0 ) ) ).
2585 */
2586 if (!submit_op1( emit, inst_token( SVGA3DOP_FRC ),
2587 writemask( floor_log2, TGSI_WRITEMASK_X ),
2588 scalar( src( log2_abs ), TGSI_SWIZZLE_Z ) ) )
2589 return FALSE;
2590
2591 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ),
2592 writemask( floor_log2, TGSI_WRITEMASK_X ),
2593 scalar( src( log2_abs ), TGSI_SWIZZLE_Z ),
2594 negate( src( floor_log2 ) ) ) )
2595 return FALSE;
2596
2597 /* If y is being written, fill it with
2598 * abs ( src0 ) / ( 2 ^ floor( log2( abs( src0 ) ) ) ).
2599 */
2600 if (dst.mask & TGSI_WRITEMASK_Y) {
2601 if (!submit_op1( emit, inst_token( SVGA3DOP_EXP ),
2602 writemask( dst, TGSI_WRITEMASK_Y ),
2603 negate( scalar( src( floor_log2 ),
2604 TGSI_SWIZZLE_X ) ) ) )
2605 return FALSE;
2606
2607 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
2608 writemask( dst, TGSI_WRITEMASK_Y ),
2609 src( dst ),
2610 abs_src0 ) )
2611 return FALSE;
2612 }
2613
2614 if (!(dst.mask & TGSI_WRITEMASK_X))
2615 release_temp( emit, floor_log2 );
2616
2617 if (!(dst.mask & TGSI_WRITEMASK_Z))
2618 release_temp( emit, log2_abs );
2619 }
2620
2621 if (dst.mask & TGSI_WRITEMASK_XYZ && src0.base.srcMod &&
2622 src0.base.srcMod != SVGA3DSRCMOD_ABS)
2623 release_temp( emit, abs_tmp );
2624
2625 /* If w is being written, fill it with one.
2626 */
2627 if (dst.mask & TGSI_WRITEMASK_W) {
2628 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2629 writemask(dst, TGSI_WRITEMASK_W),
2630 scalar( zero, TGSI_SWIZZLE_W ) ))
2631 return FALSE;
2632 }
2633
2634 return TRUE;
2635 }
2636
2637
2638 /**
2639 * Translate TGSI TRUNC or ROUND instruction.
2640 * We need to truncate toward zero. Ex: trunc(-1.9) = -1
2641 * Different approaches are needed for VS versus PS.
2642 */
2643 static boolean
2644 emit_trunc_round(struct svga_shader_emitter *emit,
2645 const struct tgsi_full_instruction *insn,
2646 boolean round)
2647 {
2648 SVGA3dShaderDestToken dst = translate_dst_register(emit, insn, 0);
2649 const struct src_register src0 =
2650 translate_src_register(emit, &insn->Src[0] );
2651 SVGA3dShaderDestToken t1 = get_temp(emit);
2652
2653 if (round) {
2654 SVGA3dShaderDestToken t0 = get_temp(emit);
2655 struct src_register half = get_half_immediate(emit);
2656
2657 /* t0 = abs(src0) + 0.5 */
2658 if (!submit_op2(emit, inst_token(SVGA3DOP_ADD), t0,
2659 absolute(src0), half))
2660 return FALSE;
2661
2662 /* t1 = fract(t0) */
2663 if (!submit_op1(emit, inst_token(SVGA3DOP_FRC), t1, src(t0)))
2664 return FALSE;
2665
2666 /* t1 = t0 - t1 */
2667 if (!submit_op2(emit, inst_token(SVGA3DOP_ADD), t1, src(t0),
2668 negate(src(t1))))
2669 return FALSE;
2670 }
2671 else {
2672 /* trunc */
2673
2674 /* t1 = fract(abs(src0)) */
2675 if (!submit_op1(emit, inst_token(SVGA3DOP_FRC), t1, absolute(src0)))
2676 return FALSE;
2677
2678 /* t1 = abs(src0) - t1 */
2679 if (!submit_op2(emit, inst_token(SVGA3DOP_ADD), t1, absolute(src0),
2680 negate(src(t1))))
2681 return FALSE;
2682 }
2683
2684 /*
2685 * Now we need to multiply t1 by the sign of the original value.
2686 */
2687 if (emit->unit == PIPE_SHADER_VERTEX) {
2688 /* For VS: use SGN instruction */
2689 /* Need two extra/dummy registers: */
2690 SVGA3dShaderDestToken t2 = get_temp(emit), t3 = get_temp(emit),
2691 t4 = get_temp(emit);
2692
2693 /* t2 = sign(src0) */
2694 if (!submit_op3(emit, inst_token(SVGA3DOP_SGN), t2, src0,
2695 src(t3), src(t4)))
2696 return FALSE;
2697
2698 /* dst = t1 * t2 */
2699 if (!submit_op2(emit, inst_token(SVGA3DOP_MUL), dst, src(t1), src(t2)))
2700 return FALSE;
2701 }
2702 else {
2703 /* For FS: Use CMP instruction */
2704 return submit_op3(emit, inst_token( SVGA3DOP_CMP ), dst,
2705 src0, src(t1), negate(src(t1)));
2706 }
2707
2708 return TRUE;
2709 }
2710
2711
2712 static boolean
2713 emit_bgnsub(struct svga_shader_emitter *emit,
2714 unsigned position,
2715 const struct tgsi_full_instruction *insn)
2716 {
2717 unsigned i;
2718
2719 /* Note that we've finished the main function and are now emitting
2720 * subroutines. This affects how we terminate the generated
2721 * shader.
2722 */
2723 emit->in_main_func = FALSE;
2724
2725 for (i = 0; i < emit->nr_labels; i++) {
2726 if (emit->label[i] == position) {
2727 return (emit_instruction( emit, inst_token( SVGA3DOP_RET ) ) &&
2728 emit_instruction( emit, inst_token( SVGA3DOP_LABEL ) ) &&
2729 emit_src( emit, src_register( SVGA3DREG_LABEL, i )));
2730 }
2731 }
2732
2733 assert(0);
2734 return TRUE;
2735 }
2736
2737
2738 static boolean
2739 emit_call(struct svga_shader_emitter *emit,
2740 const struct tgsi_full_instruction *insn)
2741 {
2742 unsigned position = insn->Label.Label;
2743 unsigned i;
2744
2745 for (i = 0; i < emit->nr_labels; i++) {
2746 if (emit->label[i] == position)
2747 break;
2748 }
2749
2750 if (emit->nr_labels == Elements(emit->label))
2751 return FALSE;
2752
2753 if (i == emit->nr_labels) {
2754 emit->label[i] = position;
2755 emit->nr_labels++;
2756 }
2757
2758 return (emit_instruction( emit, inst_token( SVGA3DOP_CALL ) ) &&
2759 emit_src( emit, src_register( SVGA3DREG_LABEL, i )));
2760 }
2761
2762
2763 /**
2764 * Called at the end of the shader. Actually, emit special "fix-up"
2765 * code for the vertex/fragment shader.
2766 */
2767 static boolean
2768 emit_end(struct svga_shader_emitter *emit)
2769 {
2770 if (emit->unit == PIPE_SHADER_VERTEX) {
2771 return emit_vs_postamble( emit );
2772 }
2773 else {
2774 return emit_ps_postamble( emit );
2775 }
2776 }
2777
2778
2779
2780 static boolean
2781 svga_emit_instruction(struct svga_shader_emitter *emit,
2782 unsigned position,
2783 const struct tgsi_full_instruction *insn)
2784 {
2785 switch (insn->Instruction.Opcode) {
2786
2787 case TGSI_OPCODE_ARL:
2788 return emit_arl( emit, insn );
2789
2790 case TGSI_OPCODE_TEX:
2791 case TGSI_OPCODE_TXB:
2792 case TGSI_OPCODE_TXP:
2793 case TGSI_OPCODE_TXL:
2794 case TGSI_OPCODE_TXD:
2795 return emit_tex( emit, insn );
2796
2797 case TGSI_OPCODE_DDX:
2798 case TGSI_OPCODE_DDY:
2799 return emit_deriv( emit, insn );
2800
2801 case TGSI_OPCODE_BGNSUB:
2802 return emit_bgnsub( emit, position, insn );
2803
2804 case TGSI_OPCODE_ENDSUB:
2805 return TRUE;
2806
2807 case TGSI_OPCODE_CAL:
2808 return emit_call( emit, insn );
2809
2810 case TGSI_OPCODE_FLR:
2811 return emit_floor( emit, insn );
2812
2813 case TGSI_OPCODE_TRUNC:
2814 return emit_trunc_round( emit, insn, FALSE );
2815
2816 case TGSI_OPCODE_ROUND:
2817 return emit_trunc_round( emit, insn, TRUE );
2818
2819 case TGSI_OPCODE_CEIL:
2820 return emit_ceil( emit, insn );
2821
2822 case TGSI_OPCODE_CMP:
2823 return emit_cmp( emit, insn );
2824
2825 case TGSI_OPCODE_DIV:
2826 return emit_div( emit, insn );
2827
2828 case TGSI_OPCODE_DP2:
2829 return emit_dp2( emit, insn );
2830
2831 case TGSI_OPCODE_DPH:
2832 return emit_dph( emit, insn );
2833
2834 case TGSI_OPCODE_NRM:
2835 return emit_nrm( emit, insn );
2836
2837 case TGSI_OPCODE_COS:
2838 return emit_cos( emit, insn );
2839
2840 case TGSI_OPCODE_SIN:
2841 return emit_sin( emit, insn );
2842
2843 case TGSI_OPCODE_SCS:
2844 return emit_sincos( emit, insn );
2845
2846 case TGSI_OPCODE_END:
2847 /* TGSI always finishes the main func with an END */
2848 return emit_end( emit );
2849
2850 case TGSI_OPCODE_KILL_IF:
2851 return emit_kill_if( emit, insn );
2852
2853 /* Selection opcodes. The underlying language is fairly
2854 * non-orthogonal about these.
2855 */
2856 case TGSI_OPCODE_SEQ:
2857 return emit_select_op( emit, PIPE_FUNC_EQUAL, insn );
2858
2859 case TGSI_OPCODE_SNE:
2860 return emit_select_op( emit, PIPE_FUNC_NOTEQUAL, insn );
2861
2862 case TGSI_OPCODE_SGT:
2863 return emit_select_op( emit, PIPE_FUNC_GREATER, insn );
2864
2865 case TGSI_OPCODE_SGE:
2866 return emit_select_op( emit, PIPE_FUNC_GEQUAL, insn );
2867
2868 case TGSI_OPCODE_SLT:
2869 return emit_select_op( emit, PIPE_FUNC_LESS, insn );
2870
2871 case TGSI_OPCODE_SLE:
2872 return emit_select_op( emit, PIPE_FUNC_LEQUAL, insn );
2873
2874 case TGSI_OPCODE_SUB:
2875 return emit_sub( emit, insn );
2876
2877 case TGSI_OPCODE_POW:
2878 return emit_pow( emit, insn );
2879
2880 case TGSI_OPCODE_EX2:
2881 return emit_ex2( emit, insn );
2882
2883 case TGSI_OPCODE_EXP:
2884 return emit_exp( emit, insn );
2885
2886 case TGSI_OPCODE_LOG:
2887 return emit_log( emit, insn );
2888
2889 case TGSI_OPCODE_LG2:
2890 return emit_scalar_op1( emit, SVGA3DOP_LOG, insn );
2891
2892 case TGSI_OPCODE_RSQ:
2893 return emit_scalar_op1( emit, SVGA3DOP_RSQ, insn );
2894
2895 case TGSI_OPCODE_RCP:
2896 return emit_scalar_op1( emit, SVGA3DOP_RCP, insn );
2897
2898 case TGSI_OPCODE_CONT:
2899 /* not expected (we return PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED = 0) */
2900 return FALSE;
2901
2902 case TGSI_OPCODE_RET:
2903 /* This is a noop -- we tell mesa that we can't support RET
2904 * within a function (early return), so this will always be
2905 * followed by an ENDSUB.
2906 */
2907 return TRUE;
2908
2909 /* These aren't actually used by any of the frontends we care
2910 * about:
2911 */
2912 case TGSI_OPCODE_CLAMP:
2913 case TGSI_OPCODE_AND:
2914 case TGSI_OPCODE_OR:
2915 case TGSI_OPCODE_I2F:
2916 case TGSI_OPCODE_NOT:
2917 case TGSI_OPCODE_SHL:
2918 case TGSI_OPCODE_ISHR:
2919 case TGSI_OPCODE_XOR:
2920 return FALSE;
2921
2922 case TGSI_OPCODE_IF:
2923 return emit_if( emit, insn );
2924 case TGSI_OPCODE_ELSE:
2925 return emit_else( emit, insn );
2926 case TGSI_OPCODE_ENDIF:
2927 return emit_endif( emit, insn );
2928
2929 case TGSI_OPCODE_BGNLOOP:
2930 return emit_bgnloop2( emit, insn );
2931 case TGSI_OPCODE_ENDLOOP:
2932 return emit_endloop2( emit, insn );
2933 case TGSI_OPCODE_BRK:
2934 return emit_brk( emit, insn );
2935
2936 case TGSI_OPCODE_XPD:
2937 return emit_xpd( emit, insn );
2938
2939 case TGSI_OPCODE_KILL:
2940 return emit_kill( emit, insn );
2941
2942 case TGSI_OPCODE_DST:
2943 return emit_dst_insn( emit, insn );
2944
2945 case TGSI_OPCODE_LIT:
2946 return emit_lit( emit, insn );
2947
2948 case TGSI_OPCODE_LRP:
2949 return emit_lrp( emit, insn );
2950
2951 case TGSI_OPCODE_SSG:
2952 return emit_ssg( emit, insn );
2953
2954 default:
2955 {
2956 unsigned opcode = translate_opcode(insn->Instruction.Opcode);
2957
2958 if (opcode == SVGA3DOP_LAST_INST)
2959 return FALSE;
2960
2961 if (!emit_simple_instruction( emit, opcode, insn ))
2962 return FALSE;
2963 }
2964 }
2965
2966 return TRUE;
2967 }
2968
2969
2970 static boolean
2971 svga_emit_immediate(struct svga_shader_emitter *emit,
2972 struct tgsi_full_immediate *imm)
2973 {
2974 static const float id[4] = {0,0,0,1};
2975 float value[4];
2976 unsigned i;
2977
2978 assert(1 <= imm->Immediate.NrTokens && imm->Immediate.NrTokens <= 5);
2979 for (i = 0; i < imm->Immediate.NrTokens - 1; i++) {
2980 float f = imm->u[i].Float;
2981 value[i] = util_is_inf_or_nan(f) ? 0.0f : f;
2982 }
2983
2984 for ( ; i < 4; i++ )
2985 value[i] = id[i];
2986
2987 return emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT,
2988 emit->imm_start + emit->internal_imm_count++,
2989 value[0], value[1], value[2], value[3]);
2990 }
2991
2992
2993 static boolean
2994 make_immediate(struct svga_shader_emitter *emit,
2995 float a, float b, float c, float d,
2996 struct src_register *out )
2997 {
2998 unsigned idx = emit->nr_hw_float_const++;
2999
3000 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT,
3001 idx, a, b, c, d ))
3002 return FALSE;
3003
3004 *out = src_register( SVGA3DREG_CONST, idx );
3005
3006 return TRUE;
3007 }
3008
3009
3010 static boolean
3011 emit_vs_preamble(struct svga_shader_emitter *emit)
3012 {
3013 if (!emit->key.vkey.need_prescale) {
3014 if (!make_immediate( emit, 0, 0, .5, .5,
3015 &emit->imm_0055))
3016 return FALSE;
3017 }
3018
3019 return TRUE;
3020 }
3021
3022
3023 static boolean
3024 emit_ps_preamble(struct svga_shader_emitter *emit)
3025 {
3026 if (emit->ps_reads_pos && emit->info.reads_z) {
3027 /*
3028 * Assemble the position from various bits of inputs. Depth and W are
3029 * passed in a texcoord this is due to D3D's vPos not hold Z or W.
3030 * Also fixup the perspective interpolation.
3031 *
3032 * temp_pos.xy = vPos.xy
3033 * temp_pos.w = rcp(texcoord1.w);
3034 * temp_pos.z = texcoord1.z * temp_pos.w;
3035 */
3036 if (!submit_op1( emit,
3037 inst_token(SVGA3DOP_MOV),
3038 writemask( emit->ps_temp_pos, TGSI_WRITEMASK_XY ),
3039 emit->ps_true_pos ))
3040 return FALSE;
3041
3042 if (!submit_op1( emit,
3043 inst_token(SVGA3DOP_RCP),
3044 writemask( emit->ps_temp_pos, TGSI_WRITEMASK_W ),
3045 scalar( emit->ps_depth_pos, TGSI_SWIZZLE_W ) ))
3046 return FALSE;
3047
3048 if (!submit_op2( emit,
3049 inst_token(SVGA3DOP_MUL),
3050 writemask( emit->ps_temp_pos, TGSI_WRITEMASK_Z ),
3051 scalar( emit->ps_depth_pos, TGSI_SWIZZLE_Z ),
3052 scalar( src(emit->ps_temp_pos), TGSI_SWIZZLE_W ) ))
3053 return FALSE;
3054 }
3055
3056 return TRUE;
3057 }
3058
3059
3060 static boolean
3061 emit_ps_postamble(struct svga_shader_emitter *emit)
3062 {
3063 unsigned i;
3064
3065 /* PS oDepth is incredibly fragile and it's very hard to catch the
3066 * types of usage that break it during shader emit. Easier just to
3067 * redirect the main program to a temporary and then only touch
3068 * oDepth with a hand-crafted MOV below.
3069 */
3070 if (SVGA3dShaderGetRegType(emit->true_pos.value) != 0) {
3071 if (!submit_op1( emit,
3072 inst_token(SVGA3DOP_MOV),
3073 emit->true_pos,
3074 scalar(src(emit->temp_pos), TGSI_SWIZZLE_Z) ))
3075 return FALSE;
3076 }
3077
3078 for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
3079 if (SVGA3dShaderGetRegType(emit->true_col[i].value) != 0) {
3080 /* Potentially override output colors with white for XOR
3081 * logicop workaround.
3082 */
3083 if (emit->unit == PIPE_SHADER_FRAGMENT &&
3084 emit->key.fkey.white_fragments) {
3085 struct src_register one = scalar( get_zero_immediate( emit ),
3086 TGSI_SWIZZLE_W );
3087
3088 if (!submit_op1( emit,
3089 inst_token(SVGA3DOP_MOV),
3090 emit->true_col[i],
3091 one ))
3092 return FALSE;
3093 }
3094 else {
3095 if (!submit_op1( emit,
3096 inst_token(SVGA3DOP_MOV),
3097 emit->true_col[i],
3098 src(emit->temp_col[i]) ))
3099 return FALSE;
3100 }
3101 }
3102 }
3103
3104 return TRUE;
3105 }
3106
3107
3108 static boolean
3109 emit_vs_postamble(struct svga_shader_emitter *emit)
3110 {
3111 /* PSIZ output is incredibly fragile and it's very hard to catch
3112 * the types of usage that break it during shader emit. Easier
3113 * just to redirect the main program to a temporary and then only
3114 * touch PSIZ with a hand-crafted MOV below.
3115 */
3116 if (SVGA3dShaderGetRegType(emit->true_psiz.value) != 0) {
3117 if (!submit_op1( emit,
3118 inst_token(SVGA3DOP_MOV),
3119 emit->true_psiz,
3120 scalar(src(emit->temp_psiz), TGSI_SWIZZLE_X) ))
3121 return FALSE;
3122 }
3123
3124 /* Need to perform various manipulations on vertex position to cope
3125 * with the different GL and D3D clip spaces.
3126 */
3127 if (emit->key.vkey.need_prescale) {
3128 SVGA3dShaderDestToken temp_pos = emit->temp_pos;
3129 SVGA3dShaderDestToken depth = emit->depth_pos;
3130 SVGA3dShaderDestToken pos = emit->true_pos;
3131 unsigned offset = emit->info.file_max[TGSI_FILE_CONSTANT] + 1;
3132 struct src_register prescale_scale = src_register( SVGA3DREG_CONST,
3133 offset + 0 );
3134 struct src_register prescale_trans = src_register( SVGA3DREG_CONST,
3135 offset + 1 );
3136
3137 if (!submit_op1( emit,
3138 inst_token(SVGA3DOP_MOV),
3139 writemask(depth, TGSI_WRITEMASK_W),
3140 scalar(src(temp_pos), TGSI_SWIZZLE_W) ))
3141 return FALSE;
3142
3143 /* MUL temp_pos.xyz, temp_pos, prescale.scale
3144 * MAD result.position, temp_pos.wwww, prescale.trans, temp_pos
3145 * --> Note that prescale.trans.w == 0
3146 */
3147 if (!submit_op2( emit,
3148 inst_token(SVGA3DOP_MUL),
3149 writemask(temp_pos, TGSI_WRITEMASK_XYZ),
3150 src(temp_pos),
3151 prescale_scale ))
3152 return FALSE;
3153
3154 if (!submit_op3( emit,
3155 inst_token(SVGA3DOP_MAD),
3156 pos,
3157 swizzle(src(temp_pos), 3, 3, 3, 3),
3158 prescale_trans,
3159 src(temp_pos)))
3160 return FALSE;
3161
3162 /* Also write to depth value */
3163 if (!submit_op3( emit,
3164 inst_token(SVGA3DOP_MAD),
3165 writemask(depth, TGSI_WRITEMASK_Z),
3166 swizzle(src(temp_pos), 3, 3, 3, 3),
3167 prescale_trans,
3168 src(temp_pos) ))
3169 return FALSE;
3170 }
3171 else {
3172 SVGA3dShaderDestToken temp_pos = emit->temp_pos;
3173 SVGA3dShaderDestToken depth = emit->depth_pos;
3174 SVGA3dShaderDestToken pos = emit->true_pos;
3175 struct src_register imm_0055 = emit->imm_0055;
3176
3177 /* Adjust GL clipping coordinate space to hardware (D3D-style):
3178 *
3179 * DP4 temp_pos.z, {0,0,.5,.5}, temp_pos
3180 * MOV result.position, temp_pos
3181 */
3182 if (!submit_op2( emit,
3183 inst_token(SVGA3DOP_DP4),
3184 writemask(temp_pos, TGSI_WRITEMASK_Z),
3185 imm_0055,
3186 src(temp_pos) ))
3187 return FALSE;
3188
3189 if (!submit_op1( emit,
3190 inst_token(SVGA3DOP_MOV),
3191 pos,
3192 src(temp_pos) ))
3193 return FALSE;
3194
3195 /* Move the manipulated depth into the extra texcoord reg */
3196 if (!submit_op1( emit,
3197 inst_token(SVGA3DOP_MOV),
3198 writemask(depth, TGSI_WRITEMASK_ZW),
3199 src(temp_pos) ))
3200 return FALSE;
3201 }
3202
3203 return TRUE;
3204 }
3205
3206
3207 /**
3208 * For the pixel shader: emit the code which chooses the front
3209 * or back face color depending on triangle orientation.
3210 *
3211 * 0: IF VFACE :4
3212 * 1: COLOR = FrontColor;
3213 * 2: ELSE
3214 * 3: COLOR = BackColor;
3215 * 4: ENDIF
3216 */
3217 static boolean
3218 emit_light_twoside(struct svga_shader_emitter *emit)
3219 {
3220 struct src_register vface, zero;
3221 struct src_register front[2];
3222 struct src_register back[2];
3223 SVGA3dShaderDestToken color[2];
3224 int count = emit->internal_color_count;
3225 int i;
3226 SVGA3dShaderInstToken if_token;
3227
3228 if (count == 0)
3229 return TRUE;
3230
3231 vface = get_vface( emit );
3232 zero = get_zero_immediate( emit );
3233
3234 /* Can't use get_temp() to allocate the color reg as such
3235 * temporaries will be reclaimed after each instruction by the call
3236 * to reset_temp_regs().
3237 */
3238 for (i = 0; i < count; i++) {
3239 color[i] = dst_register( SVGA3DREG_TEMP, emit->nr_hw_temp++ );
3240 front[i] = emit->input_map[emit->internal_color_idx[i]];
3241
3242 /* Back is always the next input:
3243 */
3244 back[i] = front[i];
3245 back[i].base.num = front[i].base.num + 1;
3246
3247 /* Reassign the input_map to the actual front-face color:
3248 */
3249 emit->input_map[emit->internal_color_idx[i]] = src(color[i]);
3250 }
3251
3252 if_token = inst_token( SVGA3DOP_IFC );
3253
3254 if (emit->key.fkey.front_ccw)
3255 if_token.control = SVGA3DOPCOMP_LT;
3256 else
3257 if_token.control = SVGA3DOPCOMP_GT;
3258
3259 zero = scalar(zero, TGSI_SWIZZLE_X);
3260
3261 if (!(emit_instruction( emit, if_token ) &&
3262 emit_src( emit, vface ) &&
3263 emit_src( emit, zero ) ))
3264 return FALSE;
3265
3266 for (i = 0; i < count; i++) {
3267 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), color[i], front[i] ))
3268 return FALSE;
3269 }
3270
3271 if (!(emit_instruction( emit, inst_token( SVGA3DOP_ELSE))))
3272 return FALSE;
3273
3274 for (i = 0; i < count; i++) {
3275 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), color[i], back[i] ))
3276 return FALSE;
3277 }
3278
3279 if (!emit_instruction( emit, inst_token( SVGA3DOP_ENDIF ) ))
3280 return FALSE;
3281
3282 return TRUE;
3283 }
3284
3285
3286 /**
3287 * 0: SETP_GT TEMP, VFACE, 0
3288 * where TEMP is a fake frontface register
3289 */
3290 static boolean
3291 emit_frontface(struct svga_shader_emitter *emit)
3292 {
3293 struct src_register vface, zero;
3294 SVGA3dShaderDestToken temp;
3295 struct src_register pass, fail;
3296
3297 vface = get_vface( emit );
3298 zero = get_zero_immediate( emit );
3299
3300 /* Can't use get_temp() to allocate the fake frontface reg as such
3301 * temporaries will be reclaimed after each instruction by the call
3302 * to reset_temp_regs().
3303 */
3304 temp = dst_register( SVGA3DREG_TEMP,
3305 emit->nr_hw_temp++ );
3306
3307 if (emit->key.fkey.front_ccw) {
3308 pass = scalar( zero, TGSI_SWIZZLE_X );
3309 fail = scalar( zero, TGSI_SWIZZLE_W );
3310 } else {
3311 pass = scalar( zero, TGSI_SWIZZLE_W );
3312 fail = scalar( zero, TGSI_SWIZZLE_X );
3313 }
3314
3315 if (!emit_conditional(emit, PIPE_FUNC_GREATER,
3316 temp, vface, scalar( zero, TGSI_SWIZZLE_X ),
3317 pass, fail))
3318 return FALSE;
3319
3320 /* Reassign the input_map to the actual front-face color:
3321 */
3322 emit->input_map[emit->internal_frontface_idx] = src(temp);
3323
3324 return TRUE;
3325 }
3326
3327
3328 /**
3329 * Emit code to invert the T component of the incoming texture coordinate.
3330 * This is used for drawing point sprites when
3331 * pipe_rasterizer_state::sprite_coord_mode == PIPE_SPRITE_COORD_LOWER_LEFT.
3332 */
3333 static boolean
3334 emit_inverted_texcoords(struct svga_shader_emitter *emit)
3335 {
3336 struct src_register zero = get_zero_immediate(emit);
3337 struct src_register pos_neg_one = get_pos_neg_one_immediate( emit );
3338 unsigned inverted_texcoords = emit->inverted_texcoords;
3339
3340 while (inverted_texcoords) {
3341 const unsigned unit = ffs(inverted_texcoords) - 1;
3342
3343 assert(emit->inverted_texcoords & (1 << unit));
3344
3345 assert(unit < Elements(emit->ps_true_texcoord));
3346
3347 assert(unit < Elements(emit->ps_inverted_texcoord_input));
3348
3349 assert(emit->ps_inverted_texcoord_input[unit]
3350 < Elements(emit->input_map));
3351
3352 /* inverted = coord * (1, -1, 1, 1) + (0, 1, 0, 0) */
3353 if (!submit_op3(emit,
3354 inst_token(SVGA3DOP_MAD),
3355 dst(emit->ps_inverted_texcoord[unit]),
3356 emit->ps_true_texcoord[unit],
3357 swizzle(pos_neg_one, 0, 3, 0, 0), /* (1, -1, 1, 1) */
3358 swizzle(zero, 0, 3, 0, 0))) /* (0, 1, 0, 0) */
3359 return FALSE;
3360
3361 /* Reassign the input_map entry to the new texcoord register */
3362 emit->input_map[emit->ps_inverted_texcoord_input[unit]] =
3363 emit->ps_inverted_texcoord[unit];
3364
3365 inverted_texcoords &= ~(1 << unit);
3366 }
3367
3368 return TRUE;
3369 }
3370
3371
3372 static boolean
3373 needs_to_create_zero( struct svga_shader_emitter *emit )
3374 {
3375 unsigned i;
3376
3377 if (emit->unit == PIPE_SHADER_FRAGMENT) {
3378 if (emit->key.fkey.light_twoside)
3379 return TRUE;
3380
3381 if (emit->key.fkey.white_fragments)
3382 return TRUE;
3383
3384 if (emit->emit_frontface)
3385 return TRUE;
3386
3387 if (emit->info.opcode_count[TGSI_OPCODE_DST] >= 1 ||
3388 emit->info.opcode_count[TGSI_OPCODE_SSG] >= 1 ||
3389 emit->info.opcode_count[TGSI_OPCODE_LIT] >= 1)
3390 return TRUE;
3391
3392 if (emit->inverted_texcoords)
3393 return TRUE;
3394
3395 /* look for any PIPE_SWIZZLE_ZERO/ONE terms */
3396 for (i = 0; i < emit->key.fkey.num_textures; i++) {
3397 if (emit->key.fkey.tex[i].swizzle_r > PIPE_SWIZZLE_ALPHA ||
3398 emit->key.fkey.tex[i].swizzle_g > PIPE_SWIZZLE_ALPHA ||
3399 emit->key.fkey.tex[i].swizzle_b > PIPE_SWIZZLE_ALPHA ||
3400 emit->key.fkey.tex[i].swizzle_a > PIPE_SWIZZLE_ALPHA)
3401 return TRUE;
3402 }
3403
3404 for (i = 0; i < emit->key.fkey.num_textures; i++) {
3405 if (emit->key.fkey.tex[i].compare_mode
3406 == PIPE_TEX_COMPARE_R_TO_TEXTURE)
3407 return TRUE;
3408 }
3409 }
3410
3411 if (emit->unit == PIPE_SHADER_VERTEX) {
3412 if (emit->info.opcode_count[TGSI_OPCODE_CMP] >= 1)
3413 return TRUE;
3414 }
3415
3416 if (emit->info.opcode_count[TGSI_OPCODE_IF] >= 1 ||
3417 emit->info.opcode_count[TGSI_OPCODE_BGNLOOP] >= 1 ||
3418 emit->info.opcode_count[TGSI_OPCODE_DDX] >= 1 ||
3419 emit->info.opcode_count[TGSI_OPCODE_DDY] >= 1 ||
3420 emit->info.opcode_count[TGSI_OPCODE_ROUND] >= 1 ||
3421 emit->info.opcode_count[TGSI_OPCODE_SGE] >= 1 ||
3422 emit->info.opcode_count[TGSI_OPCODE_SGT] >= 1 ||
3423 emit->info.opcode_count[TGSI_OPCODE_SLE] >= 1 ||
3424 emit->info.opcode_count[TGSI_OPCODE_SLT] >= 1 ||
3425 emit->info.opcode_count[TGSI_OPCODE_SNE] >= 1 ||
3426 emit->info.opcode_count[TGSI_OPCODE_SEQ] >= 1 ||
3427 emit->info.opcode_count[TGSI_OPCODE_EXP] >= 1 ||
3428 emit->info.opcode_count[TGSI_OPCODE_LOG] >= 1 ||
3429 emit->info.opcode_count[TGSI_OPCODE_XPD] >= 1 ||
3430 emit->info.opcode_count[TGSI_OPCODE_KILL] >= 1)
3431 return TRUE;
3432
3433 return FALSE;
3434 }
3435
3436
3437 static boolean
3438 needs_to_create_loop_const( struct svga_shader_emitter *emit )
3439 {
3440 return (emit->info.opcode_count[TGSI_OPCODE_BGNLOOP] >= 1);
3441 }
3442
3443
3444 static boolean
3445 needs_to_create_arl_consts( struct svga_shader_emitter *emit )
3446 {
3447 return (emit->num_arl_consts > 0);
3448 }
3449
3450
3451 static boolean
3452 pre_parse_add_indirect( struct svga_shader_emitter *emit,
3453 int num, int current_arl)
3454 {
3455 int i;
3456 assert(num < 0);
3457
3458 for (i = 0; i < emit->num_arl_consts; ++i) {
3459 if (emit->arl_consts[i].arl_num == current_arl)
3460 break;
3461 }
3462 /* new entry */
3463 if (emit->num_arl_consts == i) {
3464 ++emit->num_arl_consts;
3465 }
3466 emit->arl_consts[i].number = (emit->arl_consts[i].number > num) ?
3467 num :
3468 emit->arl_consts[i].number;
3469 emit->arl_consts[i].arl_num = current_arl;
3470 return TRUE;
3471 }
3472
3473
3474 static boolean
3475 pre_parse_instruction( struct svga_shader_emitter *emit,
3476 const struct tgsi_full_instruction *insn,
3477 int current_arl)
3478 {
3479 if (insn->Src[0].Register.Indirect &&
3480 insn->Src[0].Indirect.File == TGSI_FILE_ADDRESS) {
3481 const struct tgsi_full_src_register *reg = &insn->Src[0];
3482 if (reg->Register.Index < 0) {
3483 pre_parse_add_indirect(emit, reg->Register.Index, current_arl);
3484 }
3485 }
3486
3487 if (insn->Src[1].Register.Indirect &&
3488 insn->Src[1].Indirect.File == TGSI_FILE_ADDRESS) {
3489 const struct tgsi_full_src_register *reg = &insn->Src[1];
3490 if (reg->Register.Index < 0) {
3491 pre_parse_add_indirect(emit, reg->Register.Index, current_arl);
3492 }
3493 }
3494
3495 if (insn->Src[2].Register.Indirect &&
3496 insn->Src[2].Indirect.File == TGSI_FILE_ADDRESS) {
3497 const struct tgsi_full_src_register *reg = &insn->Src[2];
3498 if (reg->Register.Index < 0) {
3499 pre_parse_add_indirect(emit, reg->Register.Index, current_arl);
3500 }
3501 }
3502
3503 return TRUE;
3504 }
3505
3506
3507 static boolean
3508 pre_parse_tokens( struct svga_shader_emitter *emit,
3509 const struct tgsi_token *tokens )
3510 {
3511 struct tgsi_parse_context parse;
3512 int current_arl = 0;
3513
3514 tgsi_parse_init( &parse, tokens );
3515
3516 while (!tgsi_parse_end_of_tokens( &parse )) {
3517 tgsi_parse_token( &parse );
3518 switch (parse.FullToken.Token.Type) {
3519 case TGSI_TOKEN_TYPE_IMMEDIATE:
3520 case TGSI_TOKEN_TYPE_DECLARATION:
3521 break;
3522 case TGSI_TOKEN_TYPE_INSTRUCTION:
3523 if (parse.FullToken.FullInstruction.Instruction.Opcode ==
3524 TGSI_OPCODE_ARL) {
3525 ++current_arl;
3526 }
3527 if (!pre_parse_instruction( emit, &parse.FullToken.FullInstruction,
3528 current_arl ))
3529 return FALSE;
3530 break;
3531 default:
3532 break;
3533 }
3534
3535 }
3536 return TRUE;
3537 }
3538
3539
3540 static boolean
3541 svga_shader_emit_helpers(struct svga_shader_emitter *emit)
3542 {
3543 if (needs_to_create_zero( emit )) {
3544 create_zero_immediate( emit );
3545 }
3546 if (needs_to_create_loop_const( emit )) {
3547 create_loop_const( emit );
3548 }
3549 if (needs_to_create_arl_consts( emit )) {
3550 create_arl_consts( emit );
3551 }
3552
3553 if (emit->unit == PIPE_SHADER_FRAGMENT) {
3554 if (!emit_ps_preamble( emit ))
3555 return FALSE;
3556
3557 if (emit->key.fkey.light_twoside) {
3558 if (!emit_light_twoside( emit ))
3559 return FALSE;
3560 }
3561 if (emit->emit_frontface) {
3562 if (!emit_frontface( emit ))
3563 return FALSE;
3564 }
3565 if (emit->inverted_texcoords) {
3566 if (!emit_inverted_texcoords( emit ))
3567 return FALSE;
3568 }
3569 }
3570
3571 return TRUE;
3572 }
3573
3574
3575 boolean
3576 svga_shader_emit_instructions(struct svga_shader_emitter *emit,
3577 const struct tgsi_token *tokens)
3578 {
3579 struct tgsi_parse_context parse;
3580 boolean ret = TRUE;
3581 boolean helpers_emitted = FALSE;
3582 unsigned line_nr = 0;
3583
3584 tgsi_parse_init( &parse, tokens );
3585 emit->internal_imm_count = 0;
3586
3587 if (emit->unit == PIPE_SHADER_VERTEX) {
3588 ret = emit_vs_preamble( emit );
3589 if (!ret)
3590 goto done;
3591 }
3592
3593 pre_parse_tokens(emit, tokens);
3594
3595 while (!tgsi_parse_end_of_tokens( &parse )) {
3596 tgsi_parse_token( &parse );
3597
3598 switch (parse.FullToken.Token.Type) {
3599 case TGSI_TOKEN_TYPE_IMMEDIATE:
3600 ret = svga_emit_immediate( emit, &parse.FullToken.FullImmediate );
3601 if (!ret)
3602 goto done;
3603 break;
3604
3605 case TGSI_TOKEN_TYPE_DECLARATION:
3606 ret = svga_translate_decl_sm30( emit, &parse.FullToken.FullDeclaration );
3607 if (!ret)
3608 goto done;
3609 break;
3610
3611 case TGSI_TOKEN_TYPE_INSTRUCTION:
3612 if (!helpers_emitted) {
3613 if (!svga_shader_emit_helpers( emit ))
3614 goto done;
3615 helpers_emitted = TRUE;
3616 }
3617 ret = svga_emit_instruction( emit,
3618 line_nr++,
3619 &parse.FullToken.FullInstruction );
3620 if (!ret)
3621 goto done;
3622 break;
3623 default:
3624 break;
3625 }
3626
3627 reset_temp_regs( emit );
3628 }
3629
3630 /* Need to terminate the current subroutine. Note that the
3631 * hardware doesn't tolerate shaders without sub-routines
3632 * terminating with RET+END.
3633 */
3634 if (!emit->in_main_func) {
3635 ret = emit_instruction( emit, inst_token( SVGA3DOP_RET ) );
3636 if (!ret)
3637 goto done;
3638 }
3639
3640 assert(emit->dynamic_branching_level == 0);
3641
3642 /* Need to terminate the whole shader:
3643 */
3644 ret = emit_instruction( emit, inst_token( SVGA3DOP_END ) );
3645 if (!ret)
3646 goto done;
3647
3648 done:
3649 tgsi_parse_free( &parse );
3650 return ret;
3651 }