svga: Wip for passing depth in a texcoord
[mesa.git] / src / gallium / drivers / svga / svga_tgsi_insn.c
1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26
27 #include "pipe/p_shader_tokens.h"
28 #include "tgsi/tgsi_parse.h"
29 #include "util/u_memory.h"
30
31 #include "svga_tgsi_emit.h"
32 #include "svga_context.h"
33
34
35 static boolean emit_vs_postamble( struct svga_shader_emitter *emit );
36 static boolean emit_ps_postamble( struct svga_shader_emitter *emit );
37
38
39
40
41 static unsigned
42 translate_opcode(
43 uint opcode )
44 {
45 switch (opcode) {
46 case TGSI_OPCODE_ABS: return SVGA3DOP_ABS;
47 case TGSI_OPCODE_ADD: return SVGA3DOP_ADD;
48 case TGSI_OPCODE_BREAKC: return SVGA3DOP_BREAKC;
49 case TGSI_OPCODE_DP2A: return SVGA3DOP_DP2ADD;
50 case TGSI_OPCODE_DP3: return SVGA3DOP_DP3;
51 case TGSI_OPCODE_DP4: return SVGA3DOP_DP4;
52 case TGSI_OPCODE_FRC: return SVGA3DOP_FRC;
53 case TGSI_OPCODE_MAD: return SVGA3DOP_MAD;
54 case TGSI_OPCODE_MAX: return SVGA3DOP_MAX;
55 case TGSI_OPCODE_MIN: return SVGA3DOP_MIN;
56 case TGSI_OPCODE_MOV: return SVGA3DOP_MOV;
57 case TGSI_OPCODE_MUL: return SVGA3DOP_MUL;
58 case TGSI_OPCODE_NOP: return SVGA3DOP_NOP;
59 case TGSI_OPCODE_NRM4: return SVGA3DOP_NRM;
60 default:
61 debug_printf("Unkown opcode %u\n", opcode);
62 assert( 0 );
63 return SVGA3DOP_LAST_INST;
64 }
65 }
66
67
68 static unsigned translate_file( unsigned file )
69 {
70 switch (file) {
71 case TGSI_FILE_TEMPORARY: return SVGA3DREG_TEMP;
72 case TGSI_FILE_INPUT: return SVGA3DREG_INPUT;
73 case TGSI_FILE_OUTPUT: return SVGA3DREG_OUTPUT; /* VS3.0+ only */
74 case TGSI_FILE_IMMEDIATE: return SVGA3DREG_CONST;
75 case TGSI_FILE_CONSTANT: return SVGA3DREG_CONST;
76 case TGSI_FILE_SAMPLER: return SVGA3DREG_SAMPLER;
77 case TGSI_FILE_ADDRESS: return SVGA3DREG_ADDR;
78 default:
79 assert( 0 );
80 return SVGA3DREG_TEMP;
81 }
82 }
83
84
85
86
87
88
89 static SVGA3dShaderDestToken
90 translate_dst_register( struct svga_shader_emitter *emit,
91 const struct tgsi_full_instruction *insn,
92 unsigned idx )
93 {
94 const struct tgsi_full_dst_register *reg = &insn->Dst[idx];
95 SVGA3dShaderDestToken dest;
96
97 switch (reg->Register.File) {
98 case TGSI_FILE_OUTPUT:
99 /* Output registers encode semantic information in their name.
100 * Need to lookup a table built at decl time:
101 */
102 dest = emit->output_map[reg->Register.Index];
103 break;
104
105 default:
106 dest = dst_register( translate_file( reg->Register.File ),
107 reg->Register.Index );
108 break;
109 }
110
111 dest.mask = reg->Register.WriteMask;
112 assert(dest.mask);
113
114 if (insn->Instruction.Saturate)
115 dest.dstMod = SVGA3DDSTMOD_SATURATE;
116
117 return dest;
118 }
119
120
121 static struct src_register
122 swizzle( struct src_register src,
123 int x,
124 int y,
125 int z,
126 int w )
127 {
128 x = (src.base.swizzle >> (x * 2)) & 0x3;
129 y = (src.base.swizzle >> (y * 2)) & 0x3;
130 z = (src.base.swizzle >> (z * 2)) & 0x3;
131 w = (src.base.swizzle >> (w * 2)) & 0x3;
132
133 src.base.swizzle = TRANSLATE_SWIZZLE(x,y,z,w);
134
135 return src;
136 }
137
138 static struct src_register
139 scalar( struct src_register src,
140 int comp )
141 {
142 return swizzle( src, comp, comp, comp, comp );
143 }
144
145 static INLINE boolean
146 svga_arl_needs_adjustment( const struct svga_shader_emitter *emit )
147 {
148 int i;
149
150 for (i = 0; i < emit->num_arl_consts; ++i) {
151 if (emit->arl_consts[i].arl_num == emit->current_arl)
152 return TRUE;
153 }
154 return FALSE;
155 }
156
157 static INLINE int
158 svga_arl_adjustment( const struct svga_shader_emitter *emit )
159 {
160 int i;
161
162 for (i = 0; i < emit->num_arl_consts; ++i) {
163 if (emit->arl_consts[i].arl_num == emit->current_arl)
164 return emit->arl_consts[i].number;
165 }
166 return 0;
167 }
168
169 static struct src_register
170 translate_src_register( const struct svga_shader_emitter *emit,
171 const struct tgsi_full_src_register *reg )
172 {
173 struct src_register src;
174
175 switch (reg->Register.File) {
176 case TGSI_FILE_INPUT:
177 /* Input registers are referred to by their semantic name rather
178 * than by index. Use the mapping build up from the decls:
179 */
180 src = emit->input_map[reg->Register.Index];
181 break;
182
183 case TGSI_FILE_IMMEDIATE:
184 /* Immediates are appended after TGSI constants in the D3D
185 * constant buffer.
186 */
187 src = src_register( translate_file( reg->Register.File ),
188 reg->Register.Index +
189 emit->imm_start );
190 break;
191
192 default:
193 src = src_register( translate_file( reg->Register.File ),
194 reg->Register.Index );
195
196 break;
197 }
198
199 /* Indirect addressing.
200 */
201 if (reg->Register.Indirect) {
202 if (emit->unit == PIPE_SHADER_FRAGMENT) {
203 /* Pixel shaders have only loop registers for relative
204 * addressing into inputs. Ignore the redundant address
205 * register, the contents of aL should be in sync with it.
206 */
207 if (reg->Register.File == TGSI_FILE_INPUT) {
208 src.base.relAddr = 1;
209 src.indirect = src_token(SVGA3DREG_LOOP, 0);
210 }
211 }
212 else {
213 /* Constant buffers only.
214 */
215 if (reg->Register.File == TGSI_FILE_CONSTANT) {
216 /* we shift the offset towards the minimum */
217 if (svga_arl_needs_adjustment( emit )) {
218 src.base.num -= svga_arl_adjustment( emit );
219 }
220 src.base.relAddr = 1;
221
222 /* Not really sure what should go in the second token:
223 */
224 src.indirect = src_token( SVGA3DREG_ADDR,
225 reg->Indirect.Index );
226
227 src.indirect.swizzle = SWIZZLE_XXXX;
228 }
229 }
230 }
231
232 src = swizzle( src,
233 reg->Register.SwizzleX,
234 reg->Register.SwizzleY,
235 reg->Register.SwizzleZ,
236 reg->Register.SwizzleW );
237
238 /* src.mod isn't a bitfield, unfortunately:
239 * See tgsi_util_get_full_src_register_sign_mode for implementation details.
240 */
241 if (reg->Register.Absolute) {
242 if (reg->Register.Negate)
243 src.base.srcMod = SVGA3DSRCMOD_ABSNEG;
244 else
245 src.base.srcMod = SVGA3DSRCMOD_ABS;
246 }
247 else {
248 if (reg->Register.Negate)
249 src.base.srcMod = SVGA3DSRCMOD_NEG;
250 else
251 src.base.srcMod = SVGA3DSRCMOD_NONE;
252 }
253
254 return src;
255 }
256
257
258 /*
259 * Get a temporary register, return -1 if none available
260 */
261 static INLINE SVGA3dShaderDestToken
262 get_temp( struct svga_shader_emitter *emit )
263 {
264 int i = emit->nr_hw_temp + emit->internal_temp_count++;
265
266 return dst_register( SVGA3DREG_TEMP, i );
267 }
268
269 /* Release a single temp. Currently only effective if it was the last
270 * allocated temp, otherwise release will be delayed until the next
271 * call to reset_temp_regs().
272 */
273 static INLINE void
274 release_temp( struct svga_shader_emitter *emit,
275 SVGA3dShaderDestToken temp )
276 {
277 if (temp.num == emit->internal_temp_count - 1)
278 emit->internal_temp_count--;
279 }
280
281 static void reset_temp_regs( struct svga_shader_emitter *emit )
282 {
283 emit->internal_temp_count = 0;
284 }
285
286
287 /* Replace the src with the temporary specified in the dst, but copying
288 * only the necessary channels, and preserving the original swizzle (which is
289 * important given that several opcodes have constraints in the allowed
290 * swizzles).
291 */
292 static boolean emit_repl( struct svga_shader_emitter *emit,
293 SVGA3dShaderDestToken dst,
294 struct src_register *src0)
295 {
296 unsigned src0_swizzle;
297 unsigned chan;
298
299 assert(SVGA3dShaderGetRegType(dst.value) == SVGA3DREG_TEMP);
300
301 src0_swizzle = src0->base.swizzle;
302
303 dst.mask = 0;
304 for (chan = 0; chan < 4; ++chan) {
305 unsigned swizzle = (src0_swizzle >> (chan *2)) & 0x3;
306 dst.mask |= 1 << swizzle;
307 }
308 assert(dst.mask);
309
310 src0->base.swizzle = SVGA3DSWIZZLE_NONE;
311
312 if (!emit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, *src0 ))
313 return FALSE;
314
315 *src0 = src( dst );
316 src0->base.swizzle = src0_swizzle;
317
318 return TRUE;
319 }
320
321
322 static boolean submit_op0( struct svga_shader_emitter *emit,
323 SVGA3dShaderInstToken inst,
324 SVGA3dShaderDestToken dest )
325 {
326 return (emit_instruction( emit, inst ) &&
327 emit_dst( emit, dest ));
328 }
329
330 static boolean submit_op1( struct svga_shader_emitter *emit,
331 SVGA3dShaderInstToken inst,
332 SVGA3dShaderDestToken dest,
333 struct src_register src0 )
334 {
335 return emit_op1( emit, inst, dest, src0 );
336 }
337
338
339 /* SVGA shaders may not refer to >1 constant register in a single
340 * instruction. This function checks for that usage and inserts a
341 * move to temporary if detected.
342 *
343 * The same applies to input registers -- at most a single input
344 * register may be read by any instruction.
345 */
346 static boolean submit_op2( struct svga_shader_emitter *emit,
347 SVGA3dShaderInstToken inst,
348 SVGA3dShaderDestToken dest,
349 struct src_register src0,
350 struct src_register src1 )
351 {
352 SVGA3dShaderDestToken temp;
353 SVGA3dShaderRegType type0, type1;
354 boolean need_temp = FALSE;
355
356 temp.value = 0;
357 type0 = SVGA3dShaderGetRegType( src0.base.value );
358 type1 = SVGA3dShaderGetRegType( src1.base.value );
359
360 if (type0 == SVGA3DREG_CONST &&
361 type1 == SVGA3DREG_CONST &&
362 src0.base.num != src1.base.num)
363 need_temp = TRUE;
364
365 if (type0 == SVGA3DREG_INPUT &&
366 type1 == SVGA3DREG_INPUT &&
367 src0.base.num != src1.base.num)
368 need_temp = TRUE;
369
370 if (need_temp) {
371 temp = get_temp( emit );
372
373 if (!emit_repl( emit, temp, &src0 ))
374 return FALSE;
375 }
376
377 if (!emit_op2( emit, inst, dest, src0, src1 ))
378 return FALSE;
379
380 if (need_temp)
381 release_temp( emit, temp );
382
383 return TRUE;
384 }
385
386
387 /* SVGA shaders may not refer to >1 constant register in a single
388 * instruction. This function checks for that usage and inserts a
389 * move to temporary if detected.
390 */
391 static boolean submit_op3( struct svga_shader_emitter *emit,
392 SVGA3dShaderInstToken inst,
393 SVGA3dShaderDestToken dest,
394 struct src_register src0,
395 struct src_register src1,
396 struct src_register src2 )
397 {
398 SVGA3dShaderDestToken temp0;
399 SVGA3dShaderDestToken temp1;
400 boolean need_temp0 = FALSE;
401 boolean need_temp1 = FALSE;
402 SVGA3dShaderRegType type0, type1, type2;
403
404 temp0.value = 0;
405 temp1.value = 0;
406 type0 = SVGA3dShaderGetRegType( src0.base.value );
407 type1 = SVGA3dShaderGetRegType( src1.base.value );
408 type2 = SVGA3dShaderGetRegType( src2.base.value );
409
410 if (inst.op != SVGA3DOP_SINCOS) {
411 if (type0 == SVGA3DREG_CONST &&
412 ((type1 == SVGA3DREG_CONST && src0.base.num != src1.base.num) ||
413 (type2 == SVGA3DREG_CONST && src0.base.num != src2.base.num)))
414 need_temp0 = TRUE;
415
416 if (type1 == SVGA3DREG_CONST &&
417 (type2 == SVGA3DREG_CONST && src1.base.num != src2.base.num))
418 need_temp1 = TRUE;
419 }
420
421 if (type0 == SVGA3DREG_INPUT &&
422 ((type1 == SVGA3DREG_INPUT && src0.base.num != src1.base.num) ||
423 (type2 == SVGA3DREG_INPUT && src0.base.num != src2.base.num)))
424 need_temp0 = TRUE;
425
426 if (type1 == SVGA3DREG_INPUT &&
427 (type2 == SVGA3DREG_INPUT && src1.base.num != src2.base.num))
428 need_temp1 = TRUE;
429
430 if (need_temp0) {
431 temp0 = get_temp( emit );
432
433 if (!emit_repl( emit, temp0, &src0 ))
434 return FALSE;
435 }
436
437 if (need_temp1) {
438 temp1 = get_temp( emit );
439
440 if (!emit_repl( emit, temp1, &src1 ))
441 return FALSE;
442 }
443
444 if (!emit_op3( emit, inst, dest, src0, src1, src2 ))
445 return FALSE;
446
447 if (need_temp1)
448 release_temp( emit, temp1 );
449 if (need_temp0)
450 release_temp( emit, temp0 );
451 return TRUE;
452 }
453
454
455
456
457 /* SVGA shaders may not refer to >1 constant register in a single
458 * instruction. This function checks for that usage and inserts a
459 * move to temporary if detected.
460 */
461 static boolean submit_op4( struct svga_shader_emitter *emit,
462 SVGA3dShaderInstToken inst,
463 SVGA3dShaderDestToken dest,
464 struct src_register src0,
465 struct src_register src1,
466 struct src_register src2,
467 struct src_register src3)
468 {
469 SVGA3dShaderDestToken temp0;
470 SVGA3dShaderDestToken temp3;
471 boolean need_temp0 = FALSE;
472 boolean need_temp3 = FALSE;
473 SVGA3dShaderRegType type0, type1, type2, type3;
474
475 temp0.value = 0;
476 temp3.value = 0;
477 type0 = SVGA3dShaderGetRegType( src0.base.value );
478 type1 = SVGA3dShaderGetRegType( src1.base.value );
479 type2 = SVGA3dShaderGetRegType( src2.base.value );
480 type3 = SVGA3dShaderGetRegType( src2.base.value );
481
482 /* Make life a little easier - this is only used by the TXD
483 * instruction which is guaranteed not to have a constant/input reg
484 * in one slot at least:
485 */
486 assert(type1 == SVGA3DREG_SAMPLER);
487
488 if (type0 == SVGA3DREG_CONST &&
489 ((type3 == SVGA3DREG_CONST && src0.base.num != src3.base.num) ||
490 (type2 == SVGA3DREG_CONST && src0.base.num != src2.base.num)))
491 need_temp0 = TRUE;
492
493 if (type3 == SVGA3DREG_CONST &&
494 (type2 == SVGA3DREG_CONST && src3.base.num != src2.base.num))
495 need_temp3 = TRUE;
496
497 if (type0 == SVGA3DREG_INPUT &&
498 ((type3 == SVGA3DREG_INPUT && src0.base.num != src3.base.num) ||
499 (type2 == SVGA3DREG_INPUT && src0.base.num != src2.base.num)))
500 need_temp0 = TRUE;
501
502 if (type3 == SVGA3DREG_INPUT &&
503 (type2 == SVGA3DREG_INPUT && src3.base.num != src2.base.num))
504 need_temp3 = TRUE;
505
506 if (need_temp0) {
507 temp0 = get_temp( emit );
508
509 if (!emit_repl( emit, temp0, &src0 ))
510 return FALSE;
511 }
512
513 if (need_temp3) {
514 temp3 = get_temp( emit );
515
516 if (!emit_repl( emit, temp3, &src3 ))
517 return FALSE;
518 }
519
520 if (!emit_op4( emit, inst, dest, src0, src1, src2, src3 ))
521 return FALSE;
522
523 if (need_temp3)
524 release_temp( emit, temp3 );
525 if (need_temp0)
526 release_temp( emit, temp0 );
527 return TRUE;
528 }
529
530
531 static boolean alias_src_dst( struct src_register src,
532 SVGA3dShaderDestToken dst )
533 {
534 if (src.base.num != dst.num)
535 return FALSE;
536
537 if (SVGA3dShaderGetRegType(dst.value) !=
538 SVGA3dShaderGetRegType(src.base.value))
539 return FALSE;
540
541 return TRUE;
542 }
543
544
545 static boolean submit_lrp(struct svga_shader_emitter *emit,
546 SVGA3dShaderDestToken dst,
547 struct src_register src0,
548 struct src_register src1,
549 struct src_register src2)
550 {
551 SVGA3dShaderDestToken tmp;
552 boolean need_dst_tmp = FALSE;
553
554 /* The dst reg must be a temporary, and not be the same as src0 or src2 */
555 if (SVGA3dShaderGetRegType(dst.value) != SVGA3DREG_TEMP ||
556 alias_src_dst(src0, dst) ||
557 alias_src_dst(src2, dst))
558 need_dst_tmp = TRUE;
559
560 if (need_dst_tmp) {
561 tmp = get_temp( emit );
562 tmp.mask = dst.mask;
563 }
564 else {
565 tmp = dst;
566 }
567
568 if (!submit_op3(emit, inst_token( SVGA3DOP_LRP ), tmp, src0, src1, src2))
569 return FALSE;
570
571 if (need_dst_tmp) {
572 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), dst, src( tmp )))
573 return FALSE;
574 }
575
576 return TRUE;
577 }
578
579
580 static boolean emit_def_const( struct svga_shader_emitter *emit,
581 SVGA3dShaderConstType type,
582 unsigned idx,
583 float a,
584 float b,
585 float c,
586 float d )
587 {
588 SVGA3DOpDefArgs def;
589 SVGA3dShaderInstToken opcode;
590
591 switch (type) {
592 case SVGA3D_CONST_TYPE_FLOAT:
593 opcode = inst_token( SVGA3DOP_DEF );
594 def.dst = dst_register( SVGA3DREG_CONST, idx );
595 def.constValues[0] = a;
596 def.constValues[1] = b;
597 def.constValues[2] = c;
598 def.constValues[3] = d;
599 break;
600 case SVGA3D_CONST_TYPE_INT:
601 opcode = inst_token( SVGA3DOP_DEFI );
602 def.dst = dst_register( SVGA3DREG_CONSTINT, idx );
603 def.constIValues[0] = (int)a;
604 def.constIValues[1] = (int)b;
605 def.constIValues[2] = (int)c;
606 def.constIValues[3] = (int)d;
607 break;
608 default:
609 assert(0);
610 opcode = inst_token( SVGA3DOP_NOP );
611 break;
612 }
613
614 if (!emit_instruction(emit, opcode) ||
615 !svga_shader_emit_dwords( emit, def.values, Elements(def.values)))
616 return FALSE;
617
618 return TRUE;
619 }
620
621 static INLINE boolean
622 create_zero_immediate( struct svga_shader_emitter *emit )
623 {
624 unsigned idx = emit->nr_hw_float_const++;
625
626 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT,
627 idx, 0, 0, 0, 1 ))
628 return FALSE;
629
630 emit->zero_immediate_idx = idx;
631 emit->created_zero_immediate = TRUE;
632
633 return TRUE;
634 }
635
636 static INLINE boolean
637 create_loop_const( struct svga_shader_emitter *emit )
638 {
639 unsigned idx = emit->nr_hw_int_const++;
640
641 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_INT, idx,
642 255, /* iteration count */
643 0, /* initial value */
644 1, /* step size */
645 0 /* not used, must be 0 */))
646 return FALSE;
647
648 emit->loop_const_idx = idx;
649 emit->created_loop_const = TRUE;
650
651 return TRUE;
652 }
653
654 static INLINE boolean
655 create_sincos_consts( struct svga_shader_emitter *emit )
656 {
657 unsigned idx = emit->nr_hw_float_const++;
658
659 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT, idx,
660 -1.5500992e-006f,
661 -2.1701389e-005f,
662 0.0026041667f,
663 0.00026041668f ))
664 return FALSE;
665
666 emit->sincos_consts_idx = idx;
667 idx = emit->nr_hw_float_const++;
668
669 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT, idx,
670 -0.020833334f,
671 -0.12500000f,
672 1.0f,
673 0.50000000f ))
674 return FALSE;
675
676 emit->created_sincos_consts = TRUE;
677
678 return TRUE;
679 }
680
681 static INLINE boolean
682 create_arl_consts( struct svga_shader_emitter *emit )
683 {
684 int i;
685
686 for (i = 0; i < emit->num_arl_consts; i += 4) {
687 int j;
688 unsigned idx = emit->nr_hw_float_const++;
689 float vals[4];
690 for (j = 0; j < 4 && (j + i) < emit->num_arl_consts; ++j) {
691 vals[j] = emit->arl_consts[i + j].number;
692 emit->arl_consts[i + j].idx = idx;
693 switch (j) {
694 case 0:
695 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_X;
696 break;
697 case 1:
698 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_Y;
699 break;
700 case 2:
701 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_Z;
702 break;
703 case 3:
704 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_W;
705 break;
706 }
707 }
708 while (j < 4)
709 vals[j++] = 0;
710
711 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT, idx,
712 vals[0], vals[1],
713 vals[2], vals[3]))
714 return FALSE;
715 }
716
717 return TRUE;
718 }
719
720 static INLINE struct src_register
721 get_vface( struct svga_shader_emitter *emit )
722 {
723 assert(emit->emitted_vface);
724 return src_register(SVGA3DREG_MISCTYPE,
725 SVGA3DMISCREG_FACE);
726 }
727
728 /* returns {0, 0, 0, 1} immediate */
729 static INLINE struct src_register
730 get_zero_immediate( struct svga_shader_emitter *emit )
731 {
732 assert(emit->created_zero_immediate);
733 assert(emit->zero_immediate_idx >= 0);
734 return src_register( SVGA3DREG_CONST,
735 emit->zero_immediate_idx );
736 }
737
738 /* returns the loop const */
739 static INLINE struct src_register
740 get_loop_const( struct svga_shader_emitter *emit )
741 {
742 assert(emit->created_loop_const);
743 assert(emit->loop_const_idx >= 0);
744 return src_register( SVGA3DREG_CONSTINT,
745 emit->loop_const_idx );
746 }
747
748 /* returns a sincos const */
749 static INLINE struct src_register
750 get_sincos_const( struct svga_shader_emitter *emit,
751 unsigned index )
752 {
753 assert(emit->created_sincos_consts);
754 assert(emit->sincos_consts_idx >= 0);
755 assert(index == 0 || index == 1);
756 return src_register( SVGA3DREG_CONST,
757 emit->sincos_consts_idx + index );
758 }
759
760 static INLINE struct src_register
761 get_fake_arl_const( struct svga_shader_emitter *emit )
762 {
763 struct src_register reg;
764 int idx = 0, swizzle = 0, i;
765
766 for (i = 0; i < emit->num_arl_consts; ++ i) {
767 if (emit->arl_consts[i].arl_num == emit->current_arl) {
768 idx = emit->arl_consts[i].idx;
769 swizzle = emit->arl_consts[i].swizzle;
770 }
771 }
772
773 reg = src_register( SVGA3DREG_CONST, idx );
774 return scalar(reg, swizzle);
775 }
776
777 static INLINE struct src_register
778 get_tex_dimensions( struct svga_shader_emitter *emit, int sampler_num )
779 {
780 int idx;
781 struct src_register reg;
782
783 /* the width/height indexes start right after constants */
784 idx = emit->key.fkey.tex[sampler_num].width_height_idx +
785 emit->info.file_max[TGSI_FILE_CONSTANT] + 1;
786
787 reg = src_register( SVGA3DREG_CONST, idx );
788 return reg;
789 }
790
791 static boolean emit_fake_arl(struct svga_shader_emitter *emit,
792 const struct tgsi_full_instruction *insn)
793 {
794 const struct src_register src0 = translate_src_register(
795 emit, &insn->Src[0] );
796 struct src_register src1 = get_fake_arl_const( emit );
797 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
798 SVGA3dShaderDestToken tmp = get_temp( emit );
799
800 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), tmp, src0))
801 return FALSE;
802
803 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), tmp, src( tmp ),
804 src1))
805 return FALSE;
806
807 /* replicate the original swizzle */
808 src1 = src(tmp);
809 src1.base.swizzle = src0.base.swizzle;
810
811 return submit_op1( emit, inst_token( SVGA3DOP_MOVA ),
812 dst, src1 );
813 }
814
815 static boolean emit_if(struct svga_shader_emitter *emit,
816 const struct tgsi_full_instruction *insn)
817 {
818 struct src_register src0 = translate_src_register(
819 emit, &insn->Src[0] );
820 struct src_register zero = get_zero_immediate( emit );
821 SVGA3dShaderInstToken if_token = inst_token( SVGA3DOP_IFC );
822
823 if_token.control = SVGA3DOPCOMPC_NE;
824 zero = scalar(zero, TGSI_SWIZZLE_X);
825
826 if (SVGA3dShaderGetRegType(src0.base.value) == SVGA3DREG_CONST) {
827 /*
828 * Max different constant registers readable per IFC instruction is 1.
829 */
830
831 SVGA3dShaderDestToken tmp = get_temp( emit );
832
833 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), tmp, src0))
834 return FALSE;
835
836 src0 = scalar(src( tmp ), TGSI_SWIZZLE_X);
837 }
838
839 emit->dynamic_branching_level++;
840
841 return (emit_instruction( emit, if_token ) &&
842 emit_src( emit, src0 ) &&
843 emit_src( emit, zero ) );
844 }
845
846 static boolean emit_endif(struct svga_shader_emitter *emit,
847 const struct tgsi_full_instruction *insn)
848 {
849 emit->dynamic_branching_level--;
850
851 return (emit_instruction( emit,
852 inst_token( SVGA3DOP_ENDIF )));
853 }
854
855 static boolean emit_else(struct svga_shader_emitter *emit,
856 const struct tgsi_full_instruction *insn)
857 {
858 return (emit_instruction( emit,
859 inst_token( SVGA3DOP_ELSE )));
860 }
861
862 /* Translate the following TGSI FLR instruction.
863 * FLR DST, SRC
864 * To the following SVGA3D instruction sequence.
865 * FRC TMP, SRC
866 * SUB DST, SRC, TMP
867 */
868 static boolean emit_floor(struct svga_shader_emitter *emit,
869 const struct tgsi_full_instruction *insn )
870 {
871 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
872 const struct src_register src0 = translate_src_register(
873 emit, &insn->Src[0] );
874 SVGA3dShaderDestToken temp = get_temp( emit );
875
876 /* FRC TMP, SRC */
877 if (!submit_op1( emit, inst_token( SVGA3DOP_FRC ), temp, src0 ))
878 return FALSE;
879
880 /* SUB DST, SRC, TMP */
881 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst, src0,
882 negate( src( temp ) ) ))
883 return FALSE;
884
885 return TRUE;
886 }
887
888
889 /* Translate the following TGSI CMP instruction.
890 * CMP DST, SRC0, SRC1, SRC2
891 * To the following SVGA3D instruction sequence.
892 * CMP DST, SRC0, SRC2, SRC1
893 */
894 static boolean emit_cmp(struct svga_shader_emitter *emit,
895 const struct tgsi_full_instruction *insn )
896 {
897 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
898 const struct src_register src0 = translate_src_register(
899 emit, &insn->Src[0] );
900 const struct src_register src1 = translate_src_register(
901 emit, &insn->Src[1] );
902 const struct src_register src2 = translate_src_register(
903 emit, &insn->Src[2] );
904
905 if (emit->unit == PIPE_SHADER_VERTEX) {
906 SVGA3dShaderDestToken temp = get_temp(emit);
907 struct src_register zero = scalar(get_zero_immediate(emit), TGSI_SWIZZLE_X);
908
909 /* Since vertex shaders don't support the CMP instruction,
910 * simulate it with SLT and LRP instructions.
911 * SLT TMP, SRC0, 0.0
912 * LRP DST, TMP, SRC1, SRC2
913 */
914 if (!submit_op2(emit, inst_token(SVGA3DOP_SLT), temp, src0, zero))
915 return FALSE;
916 return submit_lrp(emit, dst, src(temp), src1, src2);
917 }
918
919 /* CMP DST, SRC0, SRC2, SRC1 */
920 return submit_op3( emit, inst_token( SVGA3DOP_CMP ), dst, src0, src2, src1);
921 }
922
923
924
925 /* Translate the following TGSI DIV instruction.
926 * DIV DST.xy, SRC0, SRC1
927 * To the following SVGA3D instruction sequence.
928 * RCP TMP.x, SRC1.xxxx
929 * RCP TMP.y, SRC1.yyyy
930 * MUL DST.xy, SRC0, TMP
931 */
932 static boolean emit_div(struct svga_shader_emitter *emit,
933 const struct tgsi_full_instruction *insn )
934 {
935 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
936 const struct src_register src0 = translate_src_register(
937 emit, &insn->Src[0] );
938 const struct src_register src1 = translate_src_register(
939 emit, &insn->Src[1] );
940 SVGA3dShaderDestToken temp = get_temp( emit );
941 int i;
942
943 /* For each enabled element, perform a RCP instruction. Note that
944 * RCP is scalar in SVGA3D:
945 */
946 for (i = 0; i < 4; i++) {
947 unsigned channel = 1 << i;
948 if (dst.mask & channel) {
949 /* RCP TMP.?, SRC1.???? */
950 if (!submit_op1( emit, inst_token( SVGA3DOP_RCP ),
951 writemask(temp, channel),
952 scalar(src1, i) ))
953 return FALSE;
954 }
955 }
956
957 /* Then multiply them out with a single mul:
958 *
959 * MUL DST, SRC0, TMP
960 */
961 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ), dst, src0,
962 src( temp ) ))
963 return FALSE;
964
965 return TRUE;
966 }
967
968 /* Translate the following TGSI DP2 instruction.
969 * DP2 DST, SRC1, SRC2
970 * To the following SVGA3D instruction sequence.
971 * MUL TMP, SRC1, SRC2
972 * ADD DST, TMP.xxxx, TMP.yyyy
973 */
974 static boolean emit_dp2(struct svga_shader_emitter *emit,
975 const struct tgsi_full_instruction *insn )
976 {
977 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
978 const struct src_register src0 = translate_src_register(
979 emit, &insn->Src[0] );
980 const struct src_register src1 = translate_src_register(
981 emit, &insn->Src[1] );
982 SVGA3dShaderDestToken temp = get_temp( emit );
983 struct src_register temp_src0, temp_src1;
984
985 /* MUL TMP, SRC1, SRC2 */
986 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ), temp, src0, src1 ))
987 return FALSE;
988
989 temp_src0 = scalar(src( temp ), TGSI_SWIZZLE_X);
990 temp_src1 = scalar(src( temp ), TGSI_SWIZZLE_Y);
991
992 /* ADD DST, TMP.xxxx, TMP.yyyy */
993 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst,
994 temp_src0, temp_src1 ))
995 return FALSE;
996
997 return TRUE;
998 }
999
1000
1001 /* Translate the following TGSI DPH instruction.
1002 * DPH DST, SRC1, SRC2
1003 * To the following SVGA3D instruction sequence.
1004 * DP3 TMP, SRC1, SRC2
1005 * ADD DST, TMP, SRC2.wwww
1006 */
1007 static boolean emit_dph(struct svga_shader_emitter *emit,
1008 const struct tgsi_full_instruction *insn )
1009 {
1010 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1011 const struct src_register src0 = translate_src_register(
1012 emit, &insn->Src[0] );
1013 struct src_register src1 = translate_src_register(
1014 emit, &insn->Src[1] );
1015 SVGA3dShaderDestToken temp = get_temp( emit );
1016
1017 /* DP3 TMP, SRC1, SRC2 */
1018 if (!submit_op2( emit, inst_token( SVGA3DOP_DP3 ), temp, src0, src1 ))
1019 return FALSE;
1020
1021 src1 = scalar(src1, TGSI_SWIZZLE_W);
1022
1023 /* ADD DST, TMP, SRC2.wwww */
1024 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst,
1025 src( temp ), src1 ))
1026 return FALSE;
1027
1028 return TRUE;
1029 }
1030
1031 /* Translate the following TGSI DST instruction.
1032 * NRM DST, SRC
1033 * To the following SVGA3D instruction sequence.
1034 * DP3 TMP, SRC, SRC
1035 * RSQ TMP, TMP
1036 * MUL DST, SRC, TMP
1037 */
1038 static boolean emit_nrm(struct svga_shader_emitter *emit,
1039 const struct tgsi_full_instruction *insn )
1040 {
1041 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1042 const struct src_register src0 = translate_src_register(
1043 emit, &insn->Src[0] );
1044 SVGA3dShaderDestToken temp = get_temp( emit );
1045
1046 /* DP3 TMP, SRC, SRC */
1047 if (!submit_op2( emit, inst_token( SVGA3DOP_DP3 ), temp, src0, src0 ))
1048 return FALSE;
1049
1050 /* RSQ TMP, TMP */
1051 if (!submit_op1( emit, inst_token( SVGA3DOP_RSQ ), temp, src( temp )))
1052 return FALSE;
1053
1054 /* MUL DST, SRC, TMP */
1055 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ), dst,
1056 src0, src( temp )))
1057 return FALSE;
1058
1059 return TRUE;
1060
1061 }
1062
1063 static boolean do_emit_sincos(struct svga_shader_emitter *emit,
1064 SVGA3dShaderDestToken dst,
1065 struct src_register src0)
1066 {
1067 src0 = scalar(src0, TGSI_SWIZZLE_X);
1068
1069 if (emit->use_sm30) {
1070 return submit_op1( emit, inst_token( SVGA3DOP_SINCOS ),
1071 dst, src0 );
1072 } else {
1073 struct src_register const1 = get_sincos_const( emit, 0 );
1074 struct src_register const2 = get_sincos_const( emit, 1 );
1075
1076 return submit_op3( emit, inst_token( SVGA3DOP_SINCOS ),
1077 dst, src0, const1, const2 );
1078 }
1079 }
1080
1081 static boolean emit_sincos(struct svga_shader_emitter *emit,
1082 const struct tgsi_full_instruction *insn)
1083 {
1084 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1085 struct src_register src0 = translate_src_register(
1086 emit, &insn->Src[0] );
1087 SVGA3dShaderDestToken temp = get_temp( emit );
1088
1089 /* SCS TMP SRC */
1090 if (!do_emit_sincos(emit, writemask(temp, TGSI_WRITEMASK_XY), src0 ))
1091 return FALSE;
1092
1093 /* MOV DST TMP */
1094 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src( temp ) ))
1095 return FALSE;
1096
1097 return TRUE;
1098 }
1099
1100 /*
1101 * SCS TMP SRC
1102 * MOV DST TMP.yyyy
1103 */
1104 static boolean emit_sin(struct svga_shader_emitter *emit,
1105 const struct tgsi_full_instruction *insn )
1106 {
1107 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1108 struct src_register src0 = translate_src_register(
1109 emit, &insn->Src[0] );
1110 SVGA3dShaderDestToken temp = get_temp( emit );
1111
1112 /* SCS TMP SRC */
1113 if (!do_emit_sincos(emit, writemask(temp, TGSI_WRITEMASK_Y), src0))
1114 return FALSE;
1115
1116 src0 = scalar(src( temp ), TGSI_SWIZZLE_Y);
1117
1118 /* MOV DST TMP.yyyy */
1119 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src0 ))
1120 return FALSE;
1121
1122 return TRUE;
1123 }
1124
1125 /*
1126 * SCS TMP SRC
1127 * MOV DST TMP.xxxx
1128 */
1129 static boolean emit_cos(struct svga_shader_emitter *emit,
1130 const struct tgsi_full_instruction *insn )
1131 {
1132 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1133 struct src_register src0 = translate_src_register(
1134 emit, &insn->Src[0] );
1135 SVGA3dShaderDestToken temp = get_temp( emit );
1136
1137 /* SCS TMP SRC */
1138 if (!do_emit_sincos( emit, writemask(temp, TGSI_WRITEMASK_X), src0 ))
1139 return FALSE;
1140
1141 src0 = scalar(src( temp ), TGSI_SWIZZLE_X);
1142
1143 /* MOV DST TMP.xxxx */
1144 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src0 ))
1145 return FALSE;
1146
1147 return TRUE;
1148 }
1149
1150 static boolean emit_ssg(struct svga_shader_emitter *emit,
1151 const struct tgsi_full_instruction *insn )
1152 {
1153 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1154 struct src_register src0 = translate_src_register(
1155 emit, &insn->Src[0] );
1156 SVGA3dShaderDestToken temp0 = get_temp( emit );
1157 SVGA3dShaderDestToken temp1 = get_temp( emit );
1158 struct src_register zero, one;
1159
1160 if (emit->unit == PIPE_SHADER_VERTEX) {
1161 /* SGN DST, SRC0, TMP0, TMP1 */
1162 return submit_op3( emit, inst_token( SVGA3DOP_SGN ), dst, src0,
1163 src( temp0 ), src( temp1 ) );
1164 }
1165
1166 zero = get_zero_immediate( emit );
1167 one = scalar( zero, TGSI_SWIZZLE_W );
1168 zero = scalar( zero, TGSI_SWIZZLE_X );
1169
1170 /* CMP TMP0, SRC0, one, zero */
1171 if (!submit_op3( emit, inst_token( SVGA3DOP_CMP ),
1172 writemask( temp0, dst.mask ), src0, one, zero ))
1173 return FALSE;
1174
1175 /* CMP TMP1, negate(SRC0), negate(one), zero */
1176 if (!submit_op3( emit, inst_token( SVGA3DOP_CMP ),
1177 writemask( temp1, dst.mask ), negate( src0 ), negate( one ),
1178 zero ))
1179 return FALSE;
1180
1181 /* ADD DST, TMP0, TMP1 */
1182 return submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst, src( temp0 ),
1183 src( temp1 ) );
1184 }
1185
1186 /*
1187 * ADD DST SRC0, negate(SRC0)
1188 */
1189 static boolean emit_sub(struct svga_shader_emitter *emit,
1190 const struct tgsi_full_instruction *insn)
1191 {
1192 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1193 struct src_register src0 = translate_src_register(
1194 emit, &insn->Src[0] );
1195 struct src_register src1 = translate_src_register(
1196 emit, &insn->Src[1] );
1197
1198 src1 = negate(src1);
1199
1200 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst,
1201 src0, src1 ))
1202 return FALSE;
1203
1204 return TRUE;
1205 }
1206
1207
1208 static boolean emit_kil(struct svga_shader_emitter *emit,
1209 const struct tgsi_full_instruction *insn )
1210 {
1211 SVGA3dShaderInstToken inst;
1212 const struct tgsi_full_src_register *reg = &insn->Src[0];
1213 struct src_register src0;
1214
1215 inst = inst_token( SVGA3DOP_TEXKILL );
1216 src0 = translate_src_register( emit, reg );
1217
1218 if (reg->Register.Absolute ||
1219 reg->Register.Negate ||
1220 reg->Register.Indirect ||
1221 reg->Register.SwizzleX != 0 ||
1222 reg->Register.SwizzleY != 1 ||
1223 reg->Register.SwizzleZ != 2 ||
1224 reg->Register.File != TGSI_FILE_TEMPORARY)
1225 {
1226 SVGA3dShaderDestToken temp = get_temp( emit );
1227
1228 submit_op1( emit, inst_token( SVGA3DOP_MOV ), temp, src0 );
1229 src0 = src( temp );
1230 }
1231
1232 return submit_op0( emit, inst, dst(src0) );
1233 }
1234
1235
1236 /* mesa state tracker always emits kilp as an unconditional
1237 * kil */
1238 static boolean emit_kilp(struct svga_shader_emitter *emit,
1239 const struct tgsi_full_instruction *insn )
1240 {
1241 SVGA3dShaderInstToken inst;
1242 SVGA3dShaderDestToken temp;
1243 struct src_register one = scalar( get_zero_immediate( emit ),
1244 TGSI_SWIZZLE_W );
1245
1246 inst = inst_token( SVGA3DOP_TEXKILL );
1247
1248 /* texkill doesn't allow negation on the operand so lets move
1249 * negation of {1} to a temp register */
1250 temp = get_temp( emit );
1251 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), temp,
1252 negate( one ) ))
1253 return FALSE;
1254
1255 return submit_op0( emit, inst, temp );
1256 }
1257
1258 /* Implement conditionals by initializing destination reg to 'fail',
1259 * then set predicate reg with UFOP_SETP, then move 'pass' to dest
1260 * based on predicate reg.
1261 *
1262 * SETP src0, cmp, src1 -- do this first to avoid aliasing problems.
1263 * MOV dst, fail
1264 * MOV dst, pass, p0
1265 */
1266 static boolean
1267 emit_conditional(struct svga_shader_emitter *emit,
1268 unsigned compare_func,
1269 SVGA3dShaderDestToken dst,
1270 struct src_register src0,
1271 struct src_register src1,
1272 struct src_register pass,
1273 struct src_register fail)
1274 {
1275 SVGA3dShaderDestToken pred_reg = dst_register( SVGA3DREG_PREDICATE, 0 );
1276 SVGA3dShaderInstToken setp_token, mov_token;
1277 setp_token = inst_token( SVGA3DOP_SETP );
1278
1279 switch (compare_func) {
1280 case PIPE_FUNC_NEVER:
1281 return submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1282 dst, fail );
1283 break;
1284 case PIPE_FUNC_LESS:
1285 setp_token.control = SVGA3DOPCOMP_LT;
1286 break;
1287 case PIPE_FUNC_EQUAL:
1288 setp_token.control = SVGA3DOPCOMP_EQ;
1289 break;
1290 case PIPE_FUNC_LEQUAL:
1291 setp_token.control = SVGA3DOPCOMP_LE;
1292 break;
1293 case PIPE_FUNC_GREATER:
1294 setp_token.control = SVGA3DOPCOMP_GT;
1295 break;
1296 case PIPE_FUNC_NOTEQUAL:
1297 setp_token.control = SVGA3DOPCOMPC_NE;
1298 break;
1299 case PIPE_FUNC_GEQUAL:
1300 setp_token.control = SVGA3DOPCOMP_GE;
1301 break;
1302 case PIPE_FUNC_ALWAYS:
1303 return submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1304 dst, pass );
1305 break;
1306 }
1307
1308 /* SETP src0, COMPOP, src1 */
1309 if (!submit_op2( emit, setp_token, pred_reg,
1310 src0, src1 ))
1311 return FALSE;
1312
1313 mov_token = inst_token( SVGA3DOP_MOV );
1314
1315 /* MOV dst, fail */
1316 if (!submit_op1( emit, mov_token, dst,
1317 fail ))
1318 return FALSE;
1319
1320 /* MOV dst, pass (predicated)
1321 *
1322 * Note that the predicate reg (and possible modifiers) is passed
1323 * as the first source argument.
1324 */
1325 mov_token.predicated = 1;
1326 if (!submit_op2( emit, mov_token, dst,
1327 src( pred_reg ), pass ))
1328 return FALSE;
1329
1330 return TRUE;
1331 }
1332
1333
1334 static boolean
1335 emit_select(struct svga_shader_emitter *emit,
1336 unsigned compare_func,
1337 SVGA3dShaderDestToken dst,
1338 struct src_register src0,
1339 struct src_register src1 )
1340 {
1341 /* There are some SVGA instructions which implement some selects
1342 * directly, but they are only available in the vertex shader.
1343 */
1344 if (emit->unit == PIPE_SHADER_VERTEX) {
1345 switch (compare_func) {
1346 case PIPE_FUNC_GEQUAL:
1347 return submit_op2( emit, inst_token( SVGA3DOP_SGE ), dst, src0, src1 );
1348 case PIPE_FUNC_LEQUAL:
1349 return submit_op2( emit, inst_token( SVGA3DOP_SGE ), dst, src1, src0 );
1350 case PIPE_FUNC_GREATER:
1351 return submit_op2( emit, inst_token( SVGA3DOP_SLT ), dst, src1, src0 );
1352 case PIPE_FUNC_LESS:
1353 return submit_op2( emit, inst_token( SVGA3DOP_SLT ), dst, src0, src1 );
1354 default:
1355 break;
1356 }
1357 }
1358
1359
1360 /* Otherwise, need to use the setp approach:
1361 */
1362 {
1363 struct src_register one, zero;
1364 /* zero immediate is 0,0,0,1 */
1365 zero = get_zero_immediate( emit );
1366 one = scalar( zero, TGSI_SWIZZLE_W );
1367 zero = scalar( zero, TGSI_SWIZZLE_X );
1368
1369 return emit_conditional(
1370 emit,
1371 compare_func,
1372 dst,
1373 src0,
1374 src1,
1375 one, zero);
1376 }
1377 }
1378
1379
1380 static boolean emit_select_op(struct svga_shader_emitter *emit,
1381 unsigned compare,
1382 const struct tgsi_full_instruction *insn)
1383 {
1384 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1385 struct src_register src0 = translate_src_register(
1386 emit, &insn->Src[0] );
1387 struct src_register src1 = translate_src_register(
1388 emit, &insn->Src[1] );
1389
1390 return emit_select( emit, compare, dst, src0, src1 );
1391 }
1392
1393
1394 /* Translate texture instructions to SVGA3D representation.
1395 */
1396 static boolean emit_tex2(struct svga_shader_emitter *emit,
1397 const struct tgsi_full_instruction *insn,
1398 SVGA3dShaderDestToken dst )
1399 {
1400 SVGA3dShaderInstToken inst;
1401 struct src_register texcoord;
1402 struct src_register sampler;
1403 SVGA3dShaderDestToken tmp;
1404
1405 inst.value = 0;
1406
1407 switch (insn->Instruction.Opcode) {
1408 case TGSI_OPCODE_TEX:
1409 inst.op = SVGA3DOP_TEX;
1410 break;
1411 case TGSI_OPCODE_TXP:
1412 inst.op = SVGA3DOP_TEX;
1413 inst.control = SVGA3DOPCONT_PROJECT;
1414 break;
1415 case TGSI_OPCODE_TXB:
1416 inst.op = SVGA3DOP_TEX;
1417 inst.control = SVGA3DOPCONT_BIAS;
1418 break;
1419 case TGSI_OPCODE_TXL:
1420 inst.op = SVGA3DOP_TEXLDL;
1421 break;
1422 default:
1423 assert(0);
1424 return FALSE;
1425 }
1426
1427 texcoord = translate_src_register( emit, &insn->Src[0] );
1428 sampler = translate_src_register( emit, &insn->Src[1] );
1429
1430 if (emit->key.fkey.tex[sampler.base.num].unnormalized ||
1431 emit->dynamic_branching_level > 0)
1432 tmp = get_temp( emit );
1433
1434 /* Can't do mipmapping inside dynamic branch constructs. Force LOD
1435 * zero in that case.
1436 */
1437 if (emit->dynamic_branching_level > 0 &&
1438 inst.op == SVGA3DOP_TEX &&
1439 SVGA3dShaderGetRegType(texcoord.base.value) == SVGA3DREG_TEMP) {
1440 struct src_register zero = get_zero_immediate( emit );
1441
1442 /* MOV tmp, texcoord */
1443 if (!submit_op1( emit,
1444 inst_token( SVGA3DOP_MOV ),
1445 tmp,
1446 texcoord ))
1447 return FALSE;
1448
1449 /* MOV tmp.w, zero */
1450 if (!submit_op1( emit,
1451 inst_token( SVGA3DOP_MOV ),
1452 writemask( tmp, TGSI_WRITEMASK_W ),
1453 scalar( zero, TGSI_SWIZZLE_X )))
1454 return FALSE;
1455
1456 texcoord = src( tmp );
1457 inst.op = SVGA3DOP_TEXLDL;
1458 }
1459
1460 /* Explicit normalization of texcoords:
1461 */
1462 if (emit->key.fkey.tex[sampler.base.num].unnormalized) {
1463 struct src_register wh = get_tex_dimensions( emit, sampler.base.num );
1464
1465 /* MUL tmp, SRC0, WH */
1466 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
1467 tmp, texcoord, wh ))
1468 return FALSE;
1469
1470 texcoord = src( tmp );
1471 }
1472
1473 return submit_op2( emit, inst, dst, texcoord, sampler );
1474 }
1475
1476
1477
1478
1479 /* Translate texture instructions to SVGA3D representation.
1480 */
1481 static boolean emit_tex4(struct svga_shader_emitter *emit,
1482 const struct tgsi_full_instruction *insn,
1483 SVGA3dShaderDestToken dst )
1484 {
1485 SVGA3dShaderInstToken inst;
1486 struct src_register texcoord;
1487 struct src_register ddx;
1488 struct src_register ddy;
1489 struct src_register sampler;
1490
1491 texcoord = translate_src_register( emit, &insn->Src[0] );
1492 ddx = translate_src_register( emit, &insn->Src[1] );
1493 ddy = translate_src_register( emit, &insn->Src[2] );
1494 sampler = translate_src_register( emit, &insn->Src[3] );
1495
1496 inst.value = 0;
1497
1498 switch (insn->Instruction.Opcode) {
1499 case TGSI_OPCODE_TXD:
1500 inst.op = SVGA3DOP_TEXLDD; /* 4 args! */
1501 break;
1502 default:
1503 assert(0);
1504 return FALSE;
1505 }
1506
1507 return submit_op4( emit, inst, dst, texcoord, sampler, ddx, ddy );
1508 }
1509
1510
1511 static boolean emit_tex(struct svga_shader_emitter *emit,
1512 const struct tgsi_full_instruction *insn )
1513 {
1514 SVGA3dShaderDestToken dst =
1515 translate_dst_register( emit, insn, 0 );
1516 struct src_register src0 =
1517 translate_src_register( emit, &insn->Src[0] );
1518 struct src_register src1 =
1519 translate_src_register( emit, &insn->Src[1] );
1520
1521 SVGA3dShaderDestToken tex_result;
1522
1523 /* check for shadow samplers */
1524 boolean compare = (emit->key.fkey.tex[src1.base.num].compare_mode ==
1525 PIPE_TEX_COMPARE_R_TO_TEXTURE);
1526
1527
1528 /* If doing compare processing, need to put this value into a
1529 * temporary so it can be used as a source later on.
1530 */
1531 if (compare ||
1532 (!emit->use_sm30 && dst.mask != TGSI_WRITEMASK_XYZW) ) {
1533 tex_result = get_temp( emit );
1534 }
1535 else {
1536 tex_result = dst;
1537 }
1538
1539 switch(insn->Instruction.Opcode) {
1540 case TGSI_OPCODE_TEX:
1541 case TGSI_OPCODE_TXB:
1542 case TGSI_OPCODE_TXP:
1543 case TGSI_OPCODE_TXL:
1544 if (!emit_tex2( emit, insn, tex_result ))
1545 return FALSE;
1546 break;
1547 case TGSI_OPCODE_TXD:
1548 if (!emit_tex4( emit, insn, tex_result ))
1549 return FALSE;
1550 break;
1551 default:
1552 assert(0);
1553 }
1554
1555
1556 if (compare) {
1557 if (dst.mask & TGSI_WRITEMASK_XYZ) {
1558 SVGA3dShaderDestToken src0_zdivw = get_temp( emit );
1559 struct src_register tex_src_x = scalar(src(tex_result), TGSI_SWIZZLE_Y);
1560
1561 /* Divide texcoord R by Q */
1562 if (!submit_op1( emit, inst_token( SVGA3DOP_RCP ),
1563 writemask(src0_zdivw, TGSI_WRITEMASK_X),
1564 scalar(src0, TGSI_SWIZZLE_W) ))
1565 return FALSE;
1566
1567 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
1568 writemask(src0_zdivw, TGSI_WRITEMASK_X),
1569 scalar(src0, TGSI_SWIZZLE_Z),
1570 scalar(src(src0_zdivw), TGSI_SWIZZLE_X) ))
1571 return FALSE;
1572
1573 if (!emit_select(
1574 emit,
1575 emit->key.fkey.tex[src1.base.num].compare_func,
1576 writemask( dst, TGSI_WRITEMASK_XYZ ),
1577 scalar(src(src0_zdivw), TGSI_SWIZZLE_X),
1578 tex_src_x))
1579 return FALSE;
1580 }
1581
1582 if (dst.mask & TGSI_WRITEMASK_W) {
1583 struct src_register one =
1584 scalar( get_zero_immediate( emit ), TGSI_SWIZZLE_W );
1585
1586 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1587 writemask( dst, TGSI_WRITEMASK_W ),
1588 one ))
1589 return FALSE;
1590 }
1591
1592 return TRUE;
1593 }
1594 else if (!emit->use_sm30 && dst.mask != TGSI_WRITEMASK_XYZW)
1595 {
1596 if (!emit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src(tex_result) ))
1597 return FALSE;
1598 }
1599
1600 return TRUE;
1601 }
1602
1603 static boolean emit_bgnloop2( struct svga_shader_emitter *emit,
1604 const struct tgsi_full_instruction *insn )
1605 {
1606 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_LOOP );
1607 struct src_register loop_reg = src_register( SVGA3DREG_LOOP, 0 );
1608 struct src_register const_int = get_loop_const( emit );
1609
1610 emit->dynamic_branching_level++;
1611
1612 return (emit_instruction( emit, inst ) &&
1613 emit_src( emit, loop_reg ) &&
1614 emit_src( emit, const_int ) );
1615 }
1616
1617 static boolean emit_endloop2( struct svga_shader_emitter *emit,
1618 const struct tgsi_full_instruction *insn )
1619 {
1620 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_ENDLOOP );
1621
1622 emit->dynamic_branching_level--;
1623
1624 return emit_instruction( emit, inst );
1625 }
1626
1627 static boolean emit_brk( struct svga_shader_emitter *emit,
1628 const struct tgsi_full_instruction *insn )
1629 {
1630 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_BREAK );
1631 return emit_instruction( emit, inst );
1632 }
1633
1634 static boolean emit_scalar_op1( struct svga_shader_emitter *emit,
1635 unsigned opcode,
1636 const struct tgsi_full_instruction *insn )
1637 {
1638 SVGA3dShaderInstToken inst;
1639 SVGA3dShaderDestToken dst;
1640 struct src_register src;
1641
1642 inst = inst_token( opcode );
1643 dst = translate_dst_register( emit, insn, 0 );
1644 src = translate_src_register( emit, &insn->Src[0] );
1645 src = scalar( src, TGSI_SWIZZLE_X );
1646
1647 return submit_op1( emit, inst, dst, src );
1648 }
1649
1650
1651 static boolean emit_simple_instruction(struct svga_shader_emitter *emit,
1652 unsigned opcode,
1653 const struct tgsi_full_instruction *insn )
1654 {
1655 const struct tgsi_full_src_register *src = insn->Src;
1656 SVGA3dShaderInstToken inst;
1657 SVGA3dShaderDestToken dst;
1658
1659 inst = inst_token( opcode );
1660 dst = translate_dst_register( emit, insn, 0 );
1661
1662 switch (insn->Instruction.NumSrcRegs) {
1663 case 0:
1664 return submit_op0( emit, inst, dst );
1665 case 1:
1666 return submit_op1( emit, inst, dst,
1667 translate_src_register( emit, &src[0] ));
1668 case 2:
1669 return submit_op2( emit, inst, dst,
1670 translate_src_register( emit, &src[0] ),
1671 translate_src_register( emit, &src[1] ) );
1672 case 3:
1673 return submit_op3( emit, inst, dst,
1674 translate_src_register( emit, &src[0] ),
1675 translate_src_register( emit, &src[1] ),
1676 translate_src_register( emit, &src[2] ) );
1677 default:
1678 assert(0);
1679 return FALSE;
1680 }
1681 }
1682
1683
1684 static boolean emit_deriv(struct svga_shader_emitter *emit,
1685 const struct tgsi_full_instruction *insn )
1686 {
1687 if (emit->dynamic_branching_level > 0 &&
1688 insn->Src[0].Register.File == TGSI_FILE_TEMPORARY)
1689 {
1690 struct src_register zero = get_zero_immediate( emit );
1691 SVGA3dShaderDestToken dst =
1692 translate_dst_register( emit, insn, 0 );
1693
1694 /* Deriv opcodes not valid inside dynamic branching, workaround
1695 * by zeroing out the destination.
1696 */
1697 if (!submit_op1(emit,
1698 inst_token( SVGA3DOP_MOV ),
1699 dst,
1700 scalar(zero, TGSI_SWIZZLE_X)))
1701 return FALSE;
1702
1703 return TRUE;
1704 }
1705 else {
1706 unsigned opcode;
1707 const struct tgsi_full_src_register *reg = &insn->Src[0];
1708 SVGA3dShaderInstToken inst;
1709 SVGA3dShaderDestToken dst;
1710 struct src_register src0;
1711
1712 switch (insn->Instruction.Opcode) {
1713 case TGSI_OPCODE_DDX:
1714 opcode = SVGA3DOP_DSX;
1715 break;
1716 case TGSI_OPCODE_DDY:
1717 opcode = SVGA3DOP_DSY;
1718 break;
1719 default:
1720 return FALSE;
1721 }
1722
1723 inst = inst_token( opcode );
1724 dst = translate_dst_register( emit, insn, 0 );
1725 src0 = translate_src_register( emit, reg );
1726
1727 /* We cannot use negate or abs on source to dsx/dsy instruction.
1728 */
1729 if (reg->Register.Absolute ||
1730 reg->Register.Negate) {
1731 SVGA3dShaderDestToken temp = get_temp( emit );
1732
1733 if (!emit_repl( emit, temp, &src0 ))
1734 return FALSE;
1735 }
1736
1737 return submit_op1( emit, inst, dst, src0 );
1738 }
1739 }
1740
1741 static boolean emit_arl(struct svga_shader_emitter *emit,
1742 const struct tgsi_full_instruction *insn)
1743 {
1744 ++emit->current_arl;
1745 if (emit->unit == PIPE_SHADER_FRAGMENT) {
1746 /* MOVA not present in pixel shader instruction set.
1747 * Ignore this instruction altogether since it is
1748 * only used for loop counters -- and for that
1749 * we reference aL directly.
1750 */
1751 return TRUE;
1752 }
1753 if (svga_arl_needs_adjustment( emit )) {
1754 return emit_fake_arl( emit, insn );
1755 } else {
1756 /* no need to adjust, just emit straight arl */
1757 return emit_simple_instruction(emit, SVGA3DOP_MOVA, insn);
1758 }
1759 }
1760
1761 static boolean emit_pow(struct svga_shader_emitter *emit,
1762 const struct tgsi_full_instruction *insn)
1763 {
1764 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1765 struct src_register src0 = translate_src_register(
1766 emit, &insn->Src[0] );
1767 struct src_register src1 = translate_src_register(
1768 emit, &insn->Src[1] );
1769 boolean need_tmp = FALSE;
1770
1771 /* POW can only output to a temporary */
1772 if (insn->Dst[0].Register.File != TGSI_FILE_TEMPORARY)
1773 need_tmp = TRUE;
1774
1775 /* POW src1 must not be the same register as dst */
1776 if (alias_src_dst( src1, dst ))
1777 need_tmp = TRUE;
1778
1779 /* it's a scalar op */
1780 src0 = scalar( src0, TGSI_SWIZZLE_X );
1781 src1 = scalar( src1, TGSI_SWIZZLE_X );
1782
1783 if (need_tmp) {
1784 SVGA3dShaderDestToken tmp = writemask(get_temp( emit ), TGSI_WRITEMASK_X );
1785
1786 if (!submit_op2(emit, inst_token( SVGA3DOP_POW ), tmp, src0, src1))
1787 return FALSE;
1788
1789 return submit_op1(emit, inst_token( SVGA3DOP_MOV ), dst, scalar(src(tmp), 0) );
1790 }
1791 else {
1792 return submit_op2(emit, inst_token( SVGA3DOP_POW ), dst, src0, src1);
1793 }
1794 }
1795
1796 static boolean emit_xpd(struct svga_shader_emitter *emit,
1797 const struct tgsi_full_instruction *insn)
1798 {
1799 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1800 const struct src_register src0 = translate_src_register(
1801 emit, &insn->Src[0] );
1802 const struct src_register src1 = translate_src_register(
1803 emit, &insn->Src[1] );
1804 boolean need_dst_tmp = FALSE;
1805
1806 /* XPD can only output to a temporary */
1807 if (SVGA3dShaderGetRegType(dst.value) != SVGA3DREG_TEMP)
1808 need_dst_tmp = TRUE;
1809
1810 /* The dst reg must not be the same as src0 or src1*/
1811 if (alias_src_dst(src0, dst) ||
1812 alias_src_dst(src1, dst))
1813 need_dst_tmp = TRUE;
1814
1815 if (need_dst_tmp) {
1816 SVGA3dShaderDestToken tmp = get_temp( emit );
1817
1818 /* Obey DX9 restrictions on mask:
1819 */
1820 tmp.mask = dst.mask & TGSI_WRITEMASK_XYZ;
1821
1822 if (!submit_op2(emit, inst_token( SVGA3DOP_CRS ), tmp, src0, src1))
1823 return FALSE;
1824
1825 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), dst, src( tmp )))
1826 return FALSE;
1827 }
1828 else {
1829 if (!submit_op2(emit, inst_token( SVGA3DOP_CRS ), dst, src0, src1))
1830 return FALSE;
1831 }
1832
1833 /* Need to emit 1.0 to dst.w?
1834 */
1835 if (dst.mask & TGSI_WRITEMASK_W) {
1836 struct src_register zero = get_zero_immediate( emit );
1837
1838 if (!submit_op1(emit,
1839 inst_token( SVGA3DOP_MOV ),
1840 writemask(dst, TGSI_WRITEMASK_W),
1841 zero))
1842 return FALSE;
1843 }
1844
1845 return TRUE;
1846 }
1847
1848
1849 static boolean emit_lrp(struct svga_shader_emitter *emit,
1850 const struct tgsi_full_instruction *insn)
1851 {
1852 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1853 const struct src_register src0 = translate_src_register(
1854 emit, &insn->Src[0] );
1855 const struct src_register src1 = translate_src_register(
1856 emit, &insn->Src[1] );
1857 const struct src_register src2 = translate_src_register(
1858 emit, &insn->Src[2] );
1859
1860 return submit_lrp(emit, dst, src0, src1, src2);
1861 }
1862
1863
1864 static boolean emit_dst_insn(struct svga_shader_emitter *emit,
1865 const struct tgsi_full_instruction *insn )
1866 {
1867 if (emit->unit == PIPE_SHADER_VERTEX) {
1868 /* SVGA/DX9 has a DST instruction, but only for vertex shaders:
1869 */
1870 return emit_simple_instruction(emit, SVGA3DOP_DST, insn);
1871 }
1872 else {
1873
1874 /* result[0] = 1 * 1;
1875 * result[1] = a[1] * b[1];
1876 * result[2] = a[2] * 1;
1877 * result[3] = 1 * b[3];
1878 */
1879
1880 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1881 SVGA3dShaderDestToken tmp;
1882 const struct src_register src0 = translate_src_register(
1883 emit, &insn->Src[0] );
1884 const struct src_register src1 = translate_src_register(
1885 emit, &insn->Src[1] );
1886 struct src_register zero = get_zero_immediate( emit );
1887 boolean need_tmp = FALSE;
1888
1889 if (SVGA3dShaderGetRegType(dst.value) != SVGA3DREG_TEMP ||
1890 alias_src_dst(src0, dst) ||
1891 alias_src_dst(src1, dst))
1892 need_tmp = TRUE;
1893
1894 if (need_tmp) {
1895 tmp = get_temp( emit );
1896 }
1897 else {
1898 tmp = dst;
1899 }
1900
1901 /* tmp.xw = 1.0
1902 */
1903 if (tmp.mask & TGSI_WRITEMASK_XW) {
1904 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1905 writemask(tmp, TGSI_WRITEMASK_XW ),
1906 scalar( zero, 3 )))
1907 return FALSE;
1908 }
1909
1910 /* tmp.yz = src0
1911 */
1912 if (tmp.mask & TGSI_WRITEMASK_YZ) {
1913 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1914 writemask(tmp, TGSI_WRITEMASK_YZ ),
1915 src0))
1916 return FALSE;
1917 }
1918
1919 /* tmp.yw = tmp * src1
1920 */
1921 if (tmp.mask & TGSI_WRITEMASK_YW) {
1922 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
1923 writemask(tmp, TGSI_WRITEMASK_YW ),
1924 src(tmp),
1925 src1))
1926 return FALSE;
1927 }
1928
1929 /* dst = tmp
1930 */
1931 if (need_tmp) {
1932 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1933 dst,
1934 src(tmp)))
1935 return FALSE;
1936 }
1937 }
1938
1939 return TRUE;
1940 }
1941
1942
1943 static boolean emit_exp(struct svga_shader_emitter *emit,
1944 const struct tgsi_full_instruction *insn)
1945 {
1946 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1947 struct src_register src0 =
1948 translate_src_register( emit, &insn->Src[0] );
1949 struct src_register zero = get_zero_immediate( emit );
1950 SVGA3dShaderDestToken fraction;
1951
1952 if (dst.mask & TGSI_WRITEMASK_Y)
1953 fraction = dst;
1954 else if (dst.mask & TGSI_WRITEMASK_X)
1955 fraction = get_temp( emit );
1956 else
1957 fraction.value = 0;
1958
1959 /* If y is being written, fill it with src0 - floor(src0).
1960 */
1961 if (dst.mask & TGSI_WRITEMASK_XY) {
1962 if (!submit_op1( emit, inst_token( SVGA3DOP_FRC ),
1963 writemask( fraction, TGSI_WRITEMASK_Y ),
1964 src0 ))
1965 return FALSE;
1966 }
1967
1968 /* If x is being written, fill it with 2 ^ floor(src0).
1969 */
1970 if (dst.mask & TGSI_WRITEMASK_X) {
1971 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ),
1972 writemask( dst, TGSI_WRITEMASK_X ),
1973 src0,
1974 scalar( negate( src( fraction ) ), TGSI_SWIZZLE_Y ) ) )
1975 return FALSE;
1976
1977 if (!submit_op1( emit, inst_token( SVGA3DOP_EXP ),
1978 writemask( dst, TGSI_WRITEMASK_X ),
1979 scalar( src( dst ), TGSI_SWIZZLE_X ) ) )
1980 return FALSE;
1981
1982 if (!(dst.mask & TGSI_WRITEMASK_Y))
1983 release_temp( emit, fraction );
1984 }
1985
1986 /* If z is being written, fill it with 2 ^ src0 (partial precision).
1987 */
1988 if (dst.mask & TGSI_WRITEMASK_Z) {
1989 if (!submit_op1( emit, inst_token( SVGA3DOP_EXPP ),
1990 writemask( dst, TGSI_WRITEMASK_Z ),
1991 src0 ) )
1992 return FALSE;
1993 }
1994
1995 /* If w is being written, fill it with one.
1996 */
1997 if (dst.mask & TGSI_WRITEMASK_W) {
1998 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1999 writemask(dst, TGSI_WRITEMASK_W),
2000 scalar( zero, TGSI_SWIZZLE_W ) ))
2001 return FALSE;
2002 }
2003
2004 return TRUE;
2005 }
2006
2007 static boolean emit_lit(struct svga_shader_emitter *emit,
2008 const struct tgsi_full_instruction *insn )
2009 {
2010 if (emit->unit == PIPE_SHADER_VERTEX) {
2011 /* SVGA/DX9 has a LIT instruction, but only for vertex shaders:
2012 */
2013 return emit_simple_instruction(emit, SVGA3DOP_LIT, insn);
2014 }
2015 else {
2016
2017 /* D3D vs. GL semantics can be fairly easily accomodated by
2018 * variations on this sequence.
2019 *
2020 * GL:
2021 * tmp.y = src.x
2022 * tmp.z = pow(src.y,src.w)
2023 * p0 = src0.xxxx > 0
2024 * result = zero.wxxw
2025 * (p0) result.yz = tmp
2026 *
2027 * D3D:
2028 * tmp.y = src.x
2029 * tmp.z = pow(src.y,src.w)
2030 * p0 = src0.xxyy > 0
2031 * result = zero.wxxw
2032 * (p0) result.yz = tmp
2033 *
2034 * Will implement the GL version for now.
2035 */
2036
2037 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2038 SVGA3dShaderDestToken tmp = get_temp( emit );
2039 const struct src_register src0 = translate_src_register(
2040 emit, &insn->Src[0] );
2041 struct src_register zero = get_zero_immediate( emit );
2042
2043 /* tmp = pow(src.y, src.w)
2044 */
2045 if (dst.mask & TGSI_WRITEMASK_Z) {
2046 if (!submit_op2(emit, inst_token( SVGA3DOP_POW ),
2047 tmp,
2048 scalar(src0, 1),
2049 scalar(src0, 3)))
2050 return FALSE;
2051 }
2052
2053 /* tmp.y = src.x
2054 */
2055 if (dst.mask & TGSI_WRITEMASK_Y) {
2056 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2057 writemask(tmp, TGSI_WRITEMASK_Y ),
2058 scalar(src0, 0)))
2059 return FALSE;
2060 }
2061
2062 /* Can't quite do this with emit conditional due to the extra
2063 * writemask on the predicated mov:
2064 */
2065 {
2066 SVGA3dShaderDestToken pred_reg = dst_register( SVGA3DREG_PREDICATE, 0 );
2067 SVGA3dShaderInstToken setp_token, mov_token;
2068 struct src_register predsrc;
2069
2070 setp_token = inst_token( SVGA3DOP_SETP );
2071 mov_token = inst_token( SVGA3DOP_MOV );
2072
2073 setp_token.control = SVGA3DOPCOMP_GT;
2074
2075 /* D3D vs GL semantics:
2076 */
2077 if (0)
2078 predsrc = swizzle(src0, 0, 0, 1, 1); /* D3D */
2079 else
2080 predsrc = swizzle(src0, 0, 0, 0, 0); /* GL */
2081
2082 /* SETP src0.xxyy, GT, {0}.x */
2083 if (!submit_op2( emit, setp_token, pred_reg,
2084 predsrc,
2085 swizzle(zero, 0, 0, 0, 0) ))
2086 return FALSE;
2087
2088 /* MOV dst, fail */
2089 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst,
2090 swizzle(zero, 3, 0, 0, 3 )))
2091 return FALSE;
2092
2093 /* MOV dst.yz, tmp (predicated)
2094 *
2095 * Note that the predicate reg (and possible modifiers) is passed
2096 * as the first source argument.
2097 */
2098 if (dst.mask & TGSI_WRITEMASK_YZ) {
2099 mov_token.predicated = 1;
2100 if (!submit_op2( emit, mov_token,
2101 writemask(dst, TGSI_WRITEMASK_YZ),
2102 src( pred_reg ), src( tmp ) ))
2103 return FALSE;
2104 }
2105 }
2106 }
2107
2108 return TRUE;
2109 }
2110
2111
2112
2113
2114 static boolean emit_ex2( struct svga_shader_emitter *emit,
2115 const struct tgsi_full_instruction *insn )
2116 {
2117 SVGA3dShaderInstToken inst;
2118 SVGA3dShaderDestToken dst;
2119 struct src_register src0;
2120
2121 inst = inst_token( SVGA3DOP_EXP );
2122 dst = translate_dst_register( emit, insn, 0 );
2123 src0 = translate_src_register( emit, &insn->Src[0] );
2124 src0 = scalar( src0, TGSI_SWIZZLE_X );
2125
2126 if (dst.mask != TGSI_WRITEMASK_XYZW) {
2127 SVGA3dShaderDestToken tmp = get_temp( emit );
2128
2129 if (!submit_op1( emit, inst, tmp, src0 ))
2130 return FALSE;
2131
2132 return submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2133 dst,
2134 scalar( src( tmp ), TGSI_SWIZZLE_X ) );
2135 }
2136
2137 return submit_op1( emit, inst, dst, src0 );
2138 }
2139
2140
2141 static boolean emit_log(struct svga_shader_emitter *emit,
2142 const struct tgsi_full_instruction *insn)
2143 {
2144 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2145 struct src_register src0 =
2146 translate_src_register( emit, &insn->Src[0] );
2147 struct src_register zero = get_zero_immediate( emit );
2148 SVGA3dShaderDestToken abs_tmp;
2149 struct src_register abs_src0;
2150 SVGA3dShaderDestToken log2_abs;
2151
2152 abs_tmp.value = 0;
2153
2154 if (dst.mask & TGSI_WRITEMASK_Z)
2155 log2_abs = dst;
2156 else if (dst.mask & TGSI_WRITEMASK_XY)
2157 log2_abs = get_temp( emit );
2158 else
2159 log2_abs.value = 0;
2160
2161 /* If z is being written, fill it with log2( abs( src0 ) ).
2162 */
2163 if (dst.mask & TGSI_WRITEMASK_XYZ) {
2164 if (!src0.base.srcMod || src0.base.srcMod == SVGA3DSRCMOD_ABS)
2165 abs_src0 = src0;
2166 else {
2167 abs_tmp = get_temp( emit );
2168
2169 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2170 abs_tmp,
2171 src0 ) )
2172 return FALSE;
2173
2174 abs_src0 = src( abs_tmp );
2175 }
2176
2177 abs_src0 = absolute( scalar( abs_src0, TGSI_SWIZZLE_X ) );
2178
2179 if (!submit_op1( emit, inst_token( SVGA3DOP_LOG ),
2180 writemask( log2_abs, TGSI_WRITEMASK_Z ),
2181 abs_src0 ) )
2182 return FALSE;
2183 }
2184
2185 if (dst.mask & TGSI_WRITEMASK_XY) {
2186 SVGA3dShaderDestToken floor_log2;
2187
2188 if (dst.mask & TGSI_WRITEMASK_X)
2189 floor_log2 = dst;
2190 else
2191 floor_log2 = get_temp( emit );
2192
2193 /* If x is being written, fill it with floor( log2( abs( src0 ) ) ).
2194 */
2195 if (!submit_op1( emit, inst_token( SVGA3DOP_FRC ),
2196 writemask( floor_log2, TGSI_WRITEMASK_X ),
2197 scalar( src( log2_abs ), TGSI_SWIZZLE_Z ) ) )
2198 return FALSE;
2199
2200 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ),
2201 writemask( floor_log2, TGSI_WRITEMASK_X ),
2202 scalar( src( log2_abs ), TGSI_SWIZZLE_Z ),
2203 negate( src( floor_log2 ) ) ) )
2204 return FALSE;
2205
2206 /* If y is being written, fill it with
2207 * abs ( src0 ) / ( 2 ^ floor( log2( abs( src0 ) ) ) ).
2208 */
2209 if (dst.mask & TGSI_WRITEMASK_Y) {
2210 if (!submit_op1( emit, inst_token( SVGA3DOP_EXP ),
2211 writemask( dst, TGSI_WRITEMASK_Y ),
2212 negate( scalar( src( floor_log2 ),
2213 TGSI_SWIZZLE_X ) ) ) )
2214 return FALSE;
2215
2216 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
2217 writemask( dst, TGSI_WRITEMASK_Y ),
2218 src( dst ),
2219 abs_src0 ) )
2220 return FALSE;
2221 }
2222
2223 if (!(dst.mask & TGSI_WRITEMASK_X))
2224 release_temp( emit, floor_log2 );
2225
2226 if (!(dst.mask & TGSI_WRITEMASK_Z))
2227 release_temp( emit, log2_abs );
2228 }
2229
2230 if (dst.mask & TGSI_WRITEMASK_XYZ && src0.base.srcMod &&
2231 src0.base.srcMod != SVGA3DSRCMOD_ABS)
2232 release_temp( emit, abs_tmp );
2233
2234 /* If w is being written, fill it with one.
2235 */
2236 if (dst.mask & TGSI_WRITEMASK_W) {
2237 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2238 writemask(dst, TGSI_WRITEMASK_W),
2239 scalar( zero, TGSI_SWIZZLE_W ) ))
2240 return FALSE;
2241 }
2242
2243 return TRUE;
2244 }
2245
2246
2247 static boolean emit_bgnsub( struct svga_shader_emitter *emit,
2248 unsigned position,
2249 const struct tgsi_full_instruction *insn )
2250 {
2251 unsigned i;
2252
2253 /* Note that we've finished the main function and are now emitting
2254 * subroutines. This affects how we terminate the generated
2255 * shader.
2256 */
2257 emit->in_main_func = FALSE;
2258
2259 for (i = 0; i < emit->nr_labels; i++) {
2260 if (emit->label[i] == position) {
2261 return (emit_instruction( emit, inst_token( SVGA3DOP_RET ) ) &&
2262 emit_instruction( emit, inst_token( SVGA3DOP_LABEL ) ) &&
2263 emit_src( emit, src_register( SVGA3DREG_LABEL, i )));
2264 }
2265 }
2266
2267 assert(0);
2268 return TRUE;
2269 }
2270
2271 static boolean emit_call( struct svga_shader_emitter *emit,
2272 const struct tgsi_full_instruction *insn )
2273 {
2274 unsigned position = insn->Label.Label;
2275 unsigned i;
2276
2277 for (i = 0; i < emit->nr_labels; i++) {
2278 if (emit->label[i] == position)
2279 break;
2280 }
2281
2282 if (emit->nr_labels == Elements(emit->label))
2283 return FALSE;
2284
2285 if (i == emit->nr_labels) {
2286 emit->label[i] = position;
2287 emit->nr_labels++;
2288 }
2289
2290 return (emit_instruction( emit, inst_token( SVGA3DOP_CALL ) ) &&
2291 emit_src( emit, src_register( SVGA3DREG_LABEL, i )));
2292 }
2293
2294
2295 static boolean emit_end( struct svga_shader_emitter *emit )
2296 {
2297 if (emit->unit == PIPE_SHADER_VERTEX) {
2298 return emit_vs_postamble( emit );
2299 }
2300 else {
2301 return emit_ps_postamble( emit );
2302 }
2303 }
2304
2305
2306
2307 static boolean svga_emit_instruction( struct svga_shader_emitter *emit,
2308 unsigned position,
2309 const struct tgsi_full_instruction *insn )
2310 {
2311 switch (insn->Instruction.Opcode) {
2312
2313 case TGSI_OPCODE_ARL:
2314 return emit_arl( emit, insn );
2315
2316 case TGSI_OPCODE_TEX:
2317 case TGSI_OPCODE_TXB:
2318 case TGSI_OPCODE_TXP:
2319 case TGSI_OPCODE_TXL:
2320 case TGSI_OPCODE_TXD:
2321 return emit_tex( emit, insn );
2322
2323 case TGSI_OPCODE_DDX:
2324 case TGSI_OPCODE_DDY:
2325 return emit_deriv( emit, insn );
2326
2327 case TGSI_OPCODE_BGNSUB:
2328 return emit_bgnsub( emit, position, insn );
2329
2330 case TGSI_OPCODE_ENDSUB:
2331 return TRUE;
2332
2333 case TGSI_OPCODE_CAL:
2334 return emit_call( emit, insn );
2335
2336 case TGSI_OPCODE_FLR:
2337 case TGSI_OPCODE_TRUNC: /* should be TRUNC, not FLR */
2338 return emit_floor( emit, insn );
2339
2340 case TGSI_OPCODE_CMP:
2341 return emit_cmp( emit, insn );
2342
2343 case TGSI_OPCODE_DIV:
2344 return emit_div( emit, insn );
2345
2346 case TGSI_OPCODE_DP2:
2347 return emit_dp2( emit, insn );
2348
2349 case TGSI_OPCODE_DPH:
2350 return emit_dph( emit, insn );
2351
2352 case TGSI_OPCODE_NRM:
2353 return emit_nrm( emit, insn );
2354
2355 case TGSI_OPCODE_COS:
2356 return emit_cos( emit, insn );
2357
2358 case TGSI_OPCODE_SIN:
2359 return emit_sin( emit, insn );
2360
2361 case TGSI_OPCODE_SCS:
2362 return emit_sincos( emit, insn );
2363
2364 case TGSI_OPCODE_END:
2365 /* TGSI always finishes the main func with an END */
2366 return emit_end( emit );
2367
2368 case TGSI_OPCODE_KIL:
2369 return emit_kil( emit, insn );
2370
2371 /* Selection opcodes. The underlying language is fairly
2372 * non-orthogonal about these.
2373 */
2374 case TGSI_OPCODE_SEQ:
2375 return emit_select_op( emit, PIPE_FUNC_EQUAL, insn );
2376
2377 case TGSI_OPCODE_SNE:
2378 return emit_select_op( emit, PIPE_FUNC_NOTEQUAL, insn );
2379
2380 case TGSI_OPCODE_SGT:
2381 return emit_select_op( emit, PIPE_FUNC_GREATER, insn );
2382
2383 case TGSI_OPCODE_SGE:
2384 return emit_select_op( emit, PIPE_FUNC_GEQUAL, insn );
2385
2386 case TGSI_OPCODE_SLT:
2387 return emit_select_op( emit, PIPE_FUNC_LESS, insn );
2388
2389 case TGSI_OPCODE_SLE:
2390 return emit_select_op( emit, PIPE_FUNC_LEQUAL, insn );
2391
2392 case TGSI_OPCODE_SUB:
2393 return emit_sub( emit, insn );
2394
2395 case TGSI_OPCODE_POW:
2396 return emit_pow( emit, insn );
2397
2398 case TGSI_OPCODE_EX2:
2399 return emit_ex2( emit, insn );
2400
2401 case TGSI_OPCODE_EXP:
2402 return emit_exp( emit, insn );
2403
2404 case TGSI_OPCODE_LOG:
2405 return emit_log( emit, insn );
2406
2407 case TGSI_OPCODE_LG2:
2408 return emit_scalar_op1( emit, SVGA3DOP_LOG, insn );
2409
2410 case TGSI_OPCODE_RSQ:
2411 return emit_scalar_op1( emit, SVGA3DOP_RSQ, insn );
2412
2413 case TGSI_OPCODE_RCP:
2414 return emit_scalar_op1( emit, SVGA3DOP_RCP, insn );
2415
2416 case TGSI_OPCODE_CONT:
2417 case TGSI_OPCODE_RET:
2418 /* This is a noop -- we tell mesa that we can't support RET
2419 * within a function (early return), so this will always be
2420 * followed by an ENDSUB.
2421 */
2422 return TRUE;
2423
2424 /* These aren't actually used by any of the frontends we care
2425 * about:
2426 */
2427 case TGSI_OPCODE_CLAMP:
2428 case TGSI_OPCODE_ROUND:
2429 case TGSI_OPCODE_AND:
2430 case TGSI_OPCODE_OR:
2431 case TGSI_OPCODE_I2F:
2432 case TGSI_OPCODE_NOT:
2433 case TGSI_OPCODE_SHL:
2434 case TGSI_OPCODE_ISHR:
2435 case TGSI_OPCODE_XOR:
2436 return FALSE;
2437
2438 case TGSI_OPCODE_IF:
2439 return emit_if( emit, insn );
2440 case TGSI_OPCODE_ELSE:
2441 return emit_else( emit, insn );
2442 case TGSI_OPCODE_ENDIF:
2443 return emit_endif( emit, insn );
2444
2445 case TGSI_OPCODE_BGNLOOP:
2446 return emit_bgnloop2( emit, insn );
2447 case TGSI_OPCODE_ENDLOOP:
2448 return emit_endloop2( emit, insn );
2449 case TGSI_OPCODE_BRK:
2450 return emit_brk( emit, insn );
2451
2452 case TGSI_OPCODE_XPD:
2453 return emit_xpd( emit, insn );
2454
2455 case TGSI_OPCODE_KILP:
2456 return emit_kilp( emit, insn );
2457
2458 case TGSI_OPCODE_DST:
2459 return emit_dst_insn( emit, insn );
2460
2461 case TGSI_OPCODE_LIT:
2462 return emit_lit( emit, insn );
2463
2464 case TGSI_OPCODE_LRP:
2465 return emit_lrp( emit, insn );
2466
2467 case TGSI_OPCODE_SSG:
2468 return emit_ssg( emit, insn );
2469
2470 default: {
2471 unsigned opcode = translate_opcode(insn->Instruction.Opcode);
2472
2473 if (opcode == SVGA3DOP_LAST_INST)
2474 return FALSE;
2475
2476 if (!emit_simple_instruction( emit, opcode, insn ))
2477 return FALSE;
2478 }
2479 }
2480
2481 return TRUE;
2482 }
2483
2484
2485 static boolean svga_emit_immediate( struct svga_shader_emitter *emit,
2486 struct tgsi_full_immediate *imm)
2487 {
2488 static const float id[4] = {0,0,0,1};
2489 float value[4];
2490 unsigned i;
2491
2492 assert(1 <= imm->Immediate.NrTokens && imm->Immediate.NrTokens <= 5);
2493 for (i = 0; i < imm->Immediate.NrTokens - 1; i++)
2494 value[i] = imm->u[i].Float;
2495
2496 for ( ; i < 4; i++ )
2497 value[i] = id[i];
2498
2499 return emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT,
2500 emit->imm_start + emit->internal_imm_count++,
2501 value[0], value[1], value[2], value[3]);
2502 }
2503
2504 static boolean make_immediate( struct svga_shader_emitter *emit,
2505 float a,
2506 float b,
2507 float c,
2508 float d,
2509 struct src_register *out )
2510 {
2511 unsigned idx = emit->nr_hw_float_const++;
2512
2513 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT,
2514 idx, a, b, c, d ))
2515 return FALSE;
2516
2517 *out = src_register( SVGA3DREG_CONST, idx );
2518
2519 return TRUE;
2520 }
2521
2522 static boolean emit_vs_preamble( struct svga_shader_emitter *emit )
2523 {
2524 if (!emit->key.vkey.need_prescale) {
2525 if (!make_immediate( emit, 0, 0, .5, .5,
2526 &emit->imm_0055))
2527 return FALSE;
2528 }
2529
2530 return TRUE;
2531 }
2532
2533 static boolean emit_ps_preamble( struct svga_shader_emitter *emit )
2534 {
2535 unsigned i;
2536
2537 /* For SM20, need to initialize the temporaries we're using to hold
2538 * color outputs to some value. Shaders which don't set all of
2539 * these values are likely to be rejected by the DX9 runtime.
2540 */
2541 if (!emit->use_sm30) {
2542 struct src_register zero = get_zero_immediate( emit );
2543 for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
2544 if (SVGA3dShaderGetRegType(emit->true_col[i].value) != 0) {
2545
2546 if (!submit_op1( emit,
2547 inst_token(SVGA3DOP_MOV),
2548 emit->temp_col[i],
2549 zero ))
2550 return FALSE;
2551 }
2552 }
2553 } else if (emit->ps_reads_pos) {
2554 /*
2555 * Assemble the position from various bits of inputs. Depth and W are
2556 * passed in a texcoord this is due to D3D's vPos not hold Z or W.
2557 * Also fixup the perspective interpolation.
2558 *
2559 * temp_pos.xy = vPos.xy
2560 * temp_pos.w = rcp(texcoord1.w);
2561 * temp_pos.z = texcoord1.z * temp_pos.w;
2562 */
2563 if (!submit_op1( emit,
2564 inst_token(SVGA3DOP_MOV),
2565 writemask( emit->ps_temp_pos, TGSI_WRITEMASK_XY ),
2566 emit->ps_true_pos ))
2567 return FALSE;
2568
2569 if (!submit_op1( emit,
2570 inst_token(SVGA3DOP_RCP),
2571 writemask( emit->ps_temp_pos, TGSI_WRITEMASK_W ),
2572 scalar( emit->ps_depth_pos, TGSI_SWIZZLE_W ) ))
2573 return FALSE;
2574
2575 if (!submit_op2( emit,
2576 inst_token(SVGA3DOP_MUL),
2577 writemask( emit->ps_temp_pos, TGSI_WRITEMASK_Z ),
2578 scalar( emit->ps_depth_pos, TGSI_SWIZZLE_Z ),
2579 scalar( src(emit->ps_temp_pos), TGSI_SWIZZLE_W ) ))
2580 return FALSE;
2581 }
2582
2583 return TRUE;
2584 }
2585
2586 static boolean emit_ps_postamble( struct svga_shader_emitter *emit )
2587 {
2588 unsigned i;
2589
2590 /* PS oDepth is incredibly fragile and it's very hard to catch the
2591 * types of usage that break it during shader emit. Easier just to
2592 * redirect the main program to a temporary and then only touch
2593 * oDepth with a hand-crafted MOV below.
2594 */
2595 if (SVGA3dShaderGetRegType(emit->true_pos.value) != 0) {
2596
2597 if (!submit_op1( emit,
2598 inst_token(SVGA3DOP_MOV),
2599 emit->true_pos,
2600 scalar(src(emit->temp_pos), TGSI_SWIZZLE_Z) ))
2601 return FALSE;
2602 }
2603
2604 /* Similarly for SM20 color outputs... Luckily SM30 isn't so
2605 * fragile.
2606 */
2607 for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
2608 if (SVGA3dShaderGetRegType(emit->true_col[i].value) != 0) {
2609
2610 /* Potentially override output colors with white for XOR
2611 * logicop workaround.
2612 */
2613 if (emit->unit == PIPE_SHADER_FRAGMENT &&
2614 emit->key.fkey.white_fragments) {
2615
2616 struct src_register one = scalar( get_zero_immediate( emit ),
2617 TGSI_SWIZZLE_W );
2618
2619 if (!submit_op1( emit,
2620 inst_token(SVGA3DOP_MOV),
2621 emit->true_col[i],
2622 one ))
2623 return FALSE;
2624 }
2625 else {
2626 if (!submit_op1( emit,
2627 inst_token(SVGA3DOP_MOV),
2628 emit->true_col[i],
2629 src(emit->temp_col[i]) ))
2630 return FALSE;
2631 }
2632 }
2633 }
2634
2635 return TRUE;
2636 }
2637
2638 static boolean emit_vs_postamble( struct svga_shader_emitter *emit )
2639 {
2640 /* PSIZ output is incredibly fragile and it's very hard to catch
2641 * the types of usage that break it during shader emit. Easier
2642 * just to redirect the main program to a temporary and then only
2643 * touch PSIZ with a hand-crafted MOV below.
2644 */
2645 if (SVGA3dShaderGetRegType(emit->true_psiz.value) != 0) {
2646
2647 if (!submit_op1( emit,
2648 inst_token(SVGA3DOP_MOV),
2649 emit->true_psiz,
2650 scalar(src(emit->temp_psiz), TGSI_SWIZZLE_X) ))
2651 return FALSE;
2652 }
2653
2654 /* Need to perform various manipulations on vertex position to cope
2655 * with the different GL and D3D clip spaces.
2656 */
2657 if (emit->key.vkey.need_prescale) {
2658 SVGA3dShaderDestToken temp_pos = emit->temp_pos;
2659 SVGA3dShaderDestToken depth = emit->depth_pos;
2660 SVGA3dShaderDestToken pos = emit->true_pos;
2661 unsigned offset = emit->info.file_max[TGSI_FILE_CONSTANT] + 1;
2662 struct src_register prescale_scale = src_register( SVGA3DREG_CONST,
2663 offset + 0 );
2664 struct src_register prescale_trans = src_register( SVGA3DREG_CONST,
2665 offset + 1 );
2666
2667 if (!submit_op1( emit,
2668 inst_token(SVGA3DOP_MOV),
2669 writemask(depth, TGSI_WRITEMASK_W),
2670 scalar(src(temp_pos), TGSI_SWIZZLE_W) ))
2671 return FALSE;
2672
2673 /* MUL temp_pos.xyz, temp_pos, prescale.scale
2674 * MAD result.position, temp_pos.wwww, prescale.trans, temp_pos
2675 * --> Note that prescale.trans.w == 0
2676 */
2677 if (!submit_op2( emit,
2678 inst_token(SVGA3DOP_MUL),
2679 writemask(temp_pos, TGSI_WRITEMASK_XYZ),
2680 src(temp_pos),
2681 prescale_scale ))
2682 return FALSE;
2683
2684 if (!submit_op3( emit,
2685 inst_token(SVGA3DOP_MAD),
2686 pos,
2687 swizzle(src(temp_pos), 3, 3, 3, 3),
2688 prescale_trans,
2689 src(temp_pos)))
2690 return FALSE;
2691
2692 /* Also write to depth value */
2693 if (!submit_op3( emit,
2694 inst_token(SVGA3DOP_MAD),
2695 writemask(depth, TGSI_WRITEMASK_XYZ),
2696 swizzle(src(temp_pos), 3, 3, 3, 3),
2697 prescale_trans,
2698 src(temp_pos) ))
2699 return FALSE;
2700 }
2701 else {
2702 SVGA3dShaderDestToken temp_pos = emit->temp_pos;
2703 SVGA3dShaderDestToken depth = emit->depth_pos;
2704 SVGA3dShaderDestToken pos = emit->true_pos;
2705 struct src_register imm_0055 = emit->imm_0055;
2706
2707 /* Adjust GL clipping coordinate space to hardware (D3D-style):
2708 *
2709 * DP4 temp_pos.z, {0,0,.5,.5}, temp_pos
2710 * MOV result.position, temp_pos
2711 */
2712 if (!submit_op2( emit,
2713 inst_token(SVGA3DOP_DP4),
2714 writemask(temp_pos, TGSI_WRITEMASK_Z),
2715 imm_0055,
2716 src(temp_pos) ))
2717 return FALSE;
2718
2719 if (!submit_op1( emit,
2720 inst_token(SVGA3DOP_MOV),
2721 pos,
2722 src(temp_pos) ))
2723 return FALSE;
2724
2725 /* Move the manipulated depth into the extra texcoord reg */
2726 if (!submit_op1( emit,
2727 inst_token(SVGA3DOP_MOV),
2728 depth,
2729 src(temp_pos) ))
2730 return FALSE;
2731 }
2732
2733 return TRUE;
2734 }
2735
2736 /*
2737 0: IF VFACE :4
2738 1: COLOR = FrontColor;
2739 2: ELSE
2740 3: COLOR = BackColor;
2741 4: ENDIF
2742 */
2743 static boolean emit_light_twoside( struct svga_shader_emitter *emit )
2744 {
2745 struct src_register vface, zero;
2746 struct src_register front[2];
2747 struct src_register back[2];
2748 SVGA3dShaderDestToken color[2];
2749 int count = emit->internal_color_count;
2750 int i;
2751 SVGA3dShaderInstToken if_token;
2752
2753 if (count == 0)
2754 return TRUE;
2755
2756 vface = get_vface( emit );
2757 zero = get_zero_immediate( emit );
2758
2759 /* Can't use get_temp() to allocate the color reg as such
2760 * temporaries will be reclaimed after each instruction by the call
2761 * to reset_temp_regs().
2762 */
2763 for (i = 0; i < count; i++) {
2764 color[i] = dst_register( SVGA3DREG_TEMP,
2765 emit->nr_hw_temp++ );
2766
2767 front[i] = emit->input_map[emit->internal_color_idx[i]];
2768
2769 /* Back is always the next input:
2770 */
2771 back[i] = front[i];
2772 back[i].base.num = front[i].base.num + 1;
2773
2774 /* Reassign the input_map to the actual front-face color:
2775 */
2776 emit->input_map[emit->internal_color_idx[i]] = src(color[i]);
2777 }
2778
2779 if_token = inst_token( SVGA3DOP_IFC );
2780
2781 if (emit->key.fkey.front_ccw)
2782 if_token.control = SVGA3DOPCOMP_LT;
2783 else
2784 if_token.control = SVGA3DOPCOMP_GT;
2785
2786 zero = scalar(zero, TGSI_SWIZZLE_X);
2787
2788 if (!(emit_instruction( emit, if_token ) &&
2789 emit_src( emit, vface ) &&
2790 emit_src( emit, zero ) ))
2791 return FALSE;
2792
2793 for (i = 0; i < count; i++) {
2794 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), color[i], front[i] ))
2795 return FALSE;
2796 }
2797
2798 if (!(emit_instruction( emit, inst_token( SVGA3DOP_ELSE))))
2799 return FALSE;
2800
2801 for (i = 0; i < count; i++) {
2802 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), color[i], back[i] ))
2803 return FALSE;
2804 }
2805
2806 if (!emit_instruction( emit, inst_token( SVGA3DOP_ENDIF ) ))
2807 return FALSE;
2808
2809 return TRUE;
2810 }
2811
2812 /*
2813 0: SETP_GT TEMP, VFACE, 0
2814 where TEMP is a fake frontface register
2815 */
2816 static boolean emit_frontface( struct svga_shader_emitter *emit )
2817 {
2818 struct src_register vface, zero;
2819 SVGA3dShaderDestToken temp;
2820 struct src_register pass, fail;
2821
2822 vface = get_vface( emit );
2823 zero = get_zero_immediate( emit );
2824
2825 /* Can't use get_temp() to allocate the fake frontface reg as such
2826 * temporaries will be reclaimed after each instruction by the call
2827 * to reset_temp_regs().
2828 */
2829 temp = dst_register( SVGA3DREG_TEMP,
2830 emit->nr_hw_temp++ );
2831
2832 if (emit->key.fkey.front_ccw) {
2833 pass = scalar( zero, TGSI_SWIZZLE_X );
2834 fail = scalar( zero, TGSI_SWIZZLE_W );
2835 } else {
2836 pass = scalar( zero, TGSI_SWIZZLE_W );
2837 fail = scalar( zero, TGSI_SWIZZLE_X );
2838 }
2839
2840 if (!emit_conditional(emit, PIPE_FUNC_GREATER,
2841 temp, vface, scalar( zero, TGSI_SWIZZLE_X ),
2842 pass, fail))
2843 return FALSE;
2844
2845 /* Reassign the input_map to the actual front-face color:
2846 */
2847 emit->input_map[emit->internal_frontface_idx] = src(temp);
2848
2849 return TRUE;
2850 }
2851
2852 static INLINE boolean
2853 needs_to_create_zero( struct svga_shader_emitter *emit )
2854 {
2855 int i;
2856
2857 if (emit->unit == PIPE_SHADER_FRAGMENT) {
2858 if (!emit->use_sm30)
2859 return TRUE;
2860
2861 if (emit->key.fkey.light_twoside)
2862 return TRUE;
2863
2864 if (emit->key.fkey.white_fragments)
2865 return TRUE;
2866
2867 if (emit->emit_frontface)
2868 return TRUE;
2869
2870 if (emit->info.opcode_count[TGSI_OPCODE_DST] >= 1 ||
2871 emit->info.opcode_count[TGSI_OPCODE_SSG] >= 1 ||
2872 emit->info.opcode_count[TGSI_OPCODE_LIT] >= 1)
2873 return TRUE;
2874 }
2875
2876 if (emit->unit == PIPE_SHADER_VERTEX) {
2877 if (emit->info.opcode_count[TGSI_OPCODE_CMP] >= 1)
2878 return TRUE;
2879 }
2880
2881 if (emit->info.opcode_count[TGSI_OPCODE_IF] >= 1 ||
2882 emit->info.opcode_count[TGSI_OPCODE_BGNLOOP] >= 1 ||
2883 emit->info.opcode_count[TGSI_OPCODE_DDX] >= 1 ||
2884 emit->info.opcode_count[TGSI_OPCODE_DDY] >= 1 ||
2885 emit->info.opcode_count[TGSI_OPCODE_SGE] >= 1 ||
2886 emit->info.opcode_count[TGSI_OPCODE_SGT] >= 1 ||
2887 emit->info.opcode_count[TGSI_OPCODE_SLE] >= 1 ||
2888 emit->info.opcode_count[TGSI_OPCODE_SLT] >= 1 ||
2889 emit->info.opcode_count[TGSI_OPCODE_SNE] >= 1 ||
2890 emit->info.opcode_count[TGSI_OPCODE_SEQ] >= 1 ||
2891 emit->info.opcode_count[TGSI_OPCODE_EXP] >= 1 ||
2892 emit->info.opcode_count[TGSI_OPCODE_LOG] >= 1 ||
2893 emit->info.opcode_count[TGSI_OPCODE_XPD] >= 1 ||
2894 emit->info.opcode_count[TGSI_OPCODE_KILP] >= 1)
2895 return TRUE;
2896
2897 for (i = 0; i < emit->key.fkey.num_textures; i++) {
2898 if (emit->key.fkey.tex[i].compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE)
2899 return TRUE;
2900 }
2901
2902 return FALSE;
2903 }
2904
2905 static INLINE boolean
2906 needs_to_create_loop_const( struct svga_shader_emitter *emit )
2907 {
2908 return (emit->info.opcode_count[TGSI_OPCODE_BGNLOOP] >= 1);
2909 }
2910
2911 static INLINE boolean
2912 needs_to_create_sincos_consts( struct svga_shader_emitter *emit )
2913 {
2914 return !emit->use_sm30 && (emit->info.opcode_count[TGSI_OPCODE_SIN] >= 1 ||
2915 emit->info.opcode_count[TGSI_OPCODE_COS] >= 1 ||
2916 emit->info.opcode_count[TGSI_OPCODE_SCS] >= 1);
2917 }
2918
2919 static INLINE boolean
2920 needs_to_create_arl_consts( struct svga_shader_emitter *emit )
2921 {
2922 return (emit->num_arl_consts > 0);
2923 }
2924
2925 static INLINE boolean
2926 pre_parse_add_indirect( struct svga_shader_emitter *emit,
2927 int num, int current_arl)
2928 {
2929 int i;
2930 assert(num < 0);
2931
2932 for (i = 0; i < emit->num_arl_consts; ++i) {
2933 if (emit->arl_consts[i].arl_num == current_arl)
2934 break;
2935 }
2936 /* new entry */
2937 if (emit->num_arl_consts == i) {
2938 ++emit->num_arl_consts;
2939 }
2940 emit->arl_consts[i].number = (emit->arl_consts[i].number > num) ?
2941 num :
2942 emit->arl_consts[i].number;
2943 emit->arl_consts[i].arl_num = current_arl;
2944 return TRUE;
2945 }
2946
2947 static boolean
2948 pre_parse_instruction( struct svga_shader_emitter *emit,
2949 const struct tgsi_full_instruction *insn,
2950 int current_arl)
2951 {
2952 if (insn->Src[0].Register.Indirect &&
2953 insn->Src[0].Indirect.File == TGSI_FILE_ADDRESS) {
2954 const struct tgsi_full_src_register *reg = &insn->Src[0];
2955 if (reg->Register.Index < 0) {
2956 pre_parse_add_indirect(emit, reg->Register.Index, current_arl);
2957 }
2958 }
2959
2960 if (insn->Src[1].Register.Indirect &&
2961 insn->Src[1].Indirect.File == TGSI_FILE_ADDRESS) {
2962 const struct tgsi_full_src_register *reg = &insn->Src[1];
2963 if (reg->Register.Index < 0) {
2964 pre_parse_add_indirect(emit, reg->Register.Index, current_arl);
2965 }
2966 }
2967
2968 if (insn->Src[2].Register.Indirect &&
2969 insn->Src[2].Indirect.File == TGSI_FILE_ADDRESS) {
2970 const struct tgsi_full_src_register *reg = &insn->Src[2];
2971 if (reg->Register.Index < 0) {
2972 pre_parse_add_indirect(emit, reg->Register.Index, current_arl);
2973 }
2974 }
2975
2976 return TRUE;
2977 }
2978
2979 static boolean
2980 pre_parse_tokens( struct svga_shader_emitter *emit,
2981 const struct tgsi_token *tokens )
2982 {
2983 struct tgsi_parse_context parse;
2984 int current_arl = 0;
2985
2986 tgsi_parse_init( &parse, tokens );
2987
2988 while (!tgsi_parse_end_of_tokens( &parse )) {
2989 tgsi_parse_token( &parse );
2990 switch (parse.FullToken.Token.Type) {
2991 case TGSI_TOKEN_TYPE_IMMEDIATE:
2992 case TGSI_TOKEN_TYPE_DECLARATION:
2993 break;
2994 case TGSI_TOKEN_TYPE_INSTRUCTION:
2995 if (parse.FullToken.FullInstruction.Instruction.Opcode ==
2996 TGSI_OPCODE_ARL) {
2997 ++current_arl;
2998 }
2999 if (!pre_parse_instruction( emit, &parse.FullToken.FullInstruction,
3000 current_arl ))
3001 return FALSE;
3002 break;
3003 default:
3004 break;
3005 }
3006
3007 }
3008 return TRUE;
3009 }
3010
3011 static boolean svga_shader_emit_helpers( struct svga_shader_emitter *emit )
3012
3013 {
3014 if (needs_to_create_zero( emit )) {
3015 create_zero_immediate( emit );
3016 }
3017 if (needs_to_create_loop_const( emit )) {
3018 create_loop_const( emit );
3019 }
3020 if (needs_to_create_sincos_consts( emit )) {
3021 create_sincos_consts( emit );
3022 }
3023 if (needs_to_create_arl_consts( emit )) {
3024 create_arl_consts( emit );
3025 }
3026
3027 if (emit->unit == PIPE_SHADER_FRAGMENT) {
3028 if (!emit_ps_preamble( emit ))
3029 return FALSE;
3030
3031 if (emit->key.fkey.light_twoside) {
3032 if (!emit_light_twoside( emit ))
3033 return FALSE;
3034 }
3035 if (emit->emit_frontface) {
3036 if (!emit_frontface( emit ))
3037 return FALSE;
3038 }
3039 }
3040
3041 return TRUE;
3042 }
3043
3044 boolean svga_shader_emit_instructions( struct svga_shader_emitter *emit,
3045 const struct tgsi_token *tokens )
3046 {
3047 struct tgsi_parse_context parse;
3048 boolean ret = TRUE;
3049 boolean helpers_emitted = FALSE;
3050 unsigned line_nr = 0;
3051
3052 tgsi_parse_init( &parse, tokens );
3053 emit->internal_imm_count = 0;
3054
3055 if (emit->unit == PIPE_SHADER_VERTEX) {
3056 ret = emit_vs_preamble( emit );
3057 if (!ret)
3058 goto done;
3059 }
3060
3061 pre_parse_tokens(emit, tokens);
3062
3063 while (!tgsi_parse_end_of_tokens( &parse )) {
3064 tgsi_parse_token( &parse );
3065
3066 switch (parse.FullToken.Token.Type) {
3067 case TGSI_TOKEN_TYPE_IMMEDIATE:
3068 ret = svga_emit_immediate( emit, &parse.FullToken.FullImmediate );
3069 if (!ret)
3070 goto done;
3071 break;
3072
3073 case TGSI_TOKEN_TYPE_DECLARATION:
3074 if (emit->use_sm30)
3075 ret = svga_translate_decl_sm30( emit, &parse.FullToken.FullDeclaration );
3076 else
3077 ret = svga_translate_decl_sm20( emit, &parse.FullToken.FullDeclaration );
3078 if (!ret)
3079 goto done;
3080 break;
3081
3082 case TGSI_TOKEN_TYPE_INSTRUCTION:
3083 if (!helpers_emitted) {
3084 if (!svga_shader_emit_helpers( emit ))
3085 goto done;
3086 helpers_emitted = TRUE;
3087 }
3088 ret = svga_emit_instruction( emit,
3089 line_nr++,
3090 &parse.FullToken.FullInstruction );
3091 if (!ret)
3092 goto done;
3093 break;
3094 default:
3095 break;
3096 }
3097
3098 reset_temp_regs( emit );
3099 }
3100
3101 /* Need to terminate the current subroutine. Note that the
3102 * hardware doesn't tolerate shaders without sub-routines
3103 * terminating with RET+END.
3104 */
3105 if (!emit->in_main_func) {
3106 ret = emit_instruction( emit, inst_token( SVGA3DOP_RET ) );
3107 if (!ret)
3108 goto done;
3109 }
3110
3111 assert(emit->dynamic_branching_level == 0);
3112
3113 /* Need to terminate the whole shader:
3114 */
3115 ret = emit_instruction( emit, inst_token( SVGA3DOP_END ) );
3116 if (!ret)
3117 goto done;
3118
3119 done:
3120 assert(ret);
3121 tgsi_parse_free( &parse );
3122 return ret;
3123 }
3124