Merge commit 'origin/master' into gallium-msaa
[mesa.git] / src / gallium / drivers / svga / svga_tgsi_insn.c
1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26
27 #include "pipe/p_shader_tokens.h"
28 #include "tgsi/tgsi_parse.h"
29 #include "util/u_memory.h"
30
31 #include "svga_tgsi_emit.h"
32 #include "svga_context.h"
33
34
35 static boolean emit_vs_postamble( struct svga_shader_emitter *emit );
36 static boolean emit_ps_postamble( struct svga_shader_emitter *emit );
37
38
39
40
41 static unsigned
42 translate_opcode(
43 uint opcode )
44 {
45 switch (opcode) {
46 case TGSI_OPCODE_ABS: return SVGA3DOP_ABS;
47 case TGSI_OPCODE_ADD: return SVGA3DOP_ADD;
48 case TGSI_OPCODE_BREAKC: return SVGA3DOP_BREAKC;
49 case TGSI_OPCODE_DP2A: return SVGA3DOP_DP2ADD;
50 case TGSI_OPCODE_DP3: return SVGA3DOP_DP3;
51 case TGSI_OPCODE_DP4: return SVGA3DOP_DP4;
52 case TGSI_OPCODE_FRC: return SVGA3DOP_FRC;
53 case TGSI_OPCODE_MAD: return SVGA3DOP_MAD;
54 case TGSI_OPCODE_MAX: return SVGA3DOP_MAX;
55 case TGSI_OPCODE_MIN: return SVGA3DOP_MIN;
56 case TGSI_OPCODE_MOV: return SVGA3DOP_MOV;
57 case TGSI_OPCODE_MUL: return SVGA3DOP_MUL;
58 case TGSI_OPCODE_NOP: return SVGA3DOP_NOP;
59 case TGSI_OPCODE_NRM4: return SVGA3DOP_NRM;
60 case TGSI_OPCODE_SSG: return SVGA3DOP_SGN;
61 default:
62 debug_printf("Unkown opcode %u\n", opcode);
63 assert( 0 );
64 return SVGA3DOP_LAST_INST;
65 }
66 }
67
68
69 static unsigned translate_file( unsigned file )
70 {
71 switch (file) {
72 case TGSI_FILE_TEMPORARY: return SVGA3DREG_TEMP;
73 case TGSI_FILE_INPUT: return SVGA3DREG_INPUT;
74 case TGSI_FILE_OUTPUT: return SVGA3DREG_OUTPUT; /* VS3.0+ only */
75 case TGSI_FILE_IMMEDIATE: return SVGA3DREG_CONST;
76 case TGSI_FILE_CONSTANT: return SVGA3DREG_CONST;
77 case TGSI_FILE_SAMPLER: return SVGA3DREG_SAMPLER;
78 case TGSI_FILE_ADDRESS: return SVGA3DREG_ADDR;
79 default:
80 assert( 0 );
81 return SVGA3DREG_TEMP;
82 }
83 }
84
85
86
87
88
89
90 static SVGA3dShaderDestToken
91 translate_dst_register( struct svga_shader_emitter *emit,
92 const struct tgsi_full_instruction *insn,
93 unsigned idx )
94 {
95 const struct tgsi_full_dst_register *reg = &insn->Dst[idx];
96 SVGA3dShaderDestToken dest;
97
98 switch (reg->Register.File) {
99 case TGSI_FILE_OUTPUT:
100 /* Output registers encode semantic information in their name.
101 * Need to lookup a table built at decl time:
102 */
103 dest = emit->output_map[reg->Register.Index];
104 break;
105
106 default:
107 dest = dst_register( translate_file( reg->Register.File ),
108 reg->Register.Index );
109 break;
110 }
111
112 dest.mask = reg->Register.WriteMask;
113 assert(dest.mask);
114
115 if (insn->Instruction.Saturate)
116 dest.dstMod = SVGA3DDSTMOD_SATURATE;
117
118 return dest;
119 }
120
121
122 static struct src_register
123 swizzle( struct src_register src,
124 int x,
125 int y,
126 int z,
127 int w )
128 {
129 x = (src.base.swizzle >> (x * 2)) & 0x3;
130 y = (src.base.swizzle >> (y * 2)) & 0x3;
131 z = (src.base.swizzle >> (z * 2)) & 0x3;
132 w = (src.base.swizzle >> (w * 2)) & 0x3;
133
134 src.base.swizzle = TRANSLATE_SWIZZLE(x,y,z,w);
135
136 return src;
137 }
138
139 static struct src_register
140 scalar( struct src_register src,
141 int comp )
142 {
143 return swizzle( src, comp, comp, comp, comp );
144 }
145
146 static INLINE boolean
147 svga_arl_needs_adjustment( const struct svga_shader_emitter *emit )
148 {
149 int i;
150
151 for (i = 0; i < emit->num_arl_consts; ++i) {
152 if (emit->arl_consts[i].arl_num == emit->current_arl)
153 return TRUE;
154 }
155 return FALSE;
156 }
157
158 static INLINE int
159 svga_arl_adjustment( const struct svga_shader_emitter *emit )
160 {
161 int i;
162
163 for (i = 0; i < emit->num_arl_consts; ++i) {
164 if (emit->arl_consts[i].arl_num == emit->current_arl)
165 return emit->arl_consts[i].number;
166 }
167 return 0;
168 }
169
170 static struct src_register
171 translate_src_register( const struct svga_shader_emitter *emit,
172 const struct tgsi_full_src_register *reg )
173 {
174 struct src_register src;
175
176 switch (reg->Register.File) {
177 case TGSI_FILE_INPUT:
178 /* Input registers are referred to by their semantic name rather
179 * than by index. Use the mapping build up from the decls:
180 */
181 src = emit->input_map[reg->Register.Index];
182 break;
183
184 case TGSI_FILE_IMMEDIATE:
185 /* Immediates are appended after TGSI constants in the D3D
186 * constant buffer.
187 */
188 src = src_register( translate_file( reg->Register.File ),
189 reg->Register.Index +
190 emit->imm_start );
191 break;
192
193 default:
194 src = src_register( translate_file( reg->Register.File ),
195 reg->Register.Index );
196
197 break;
198 }
199
200 /* Indirect addressing (for coninstant buffer lookups only)
201 */
202 if (reg->Register.Indirect)
203 {
204 /* we shift the offset towards the minimum */
205 if (svga_arl_needs_adjustment( emit )) {
206 src.base.num -= svga_arl_adjustment( emit );
207 }
208 src.base.relAddr = 1;
209
210 /* Not really sure what should go in the second token:
211 */
212 src.indirect = src_token( SVGA3DREG_ADDR,
213 reg->Indirect.Index );
214
215 src.indirect.swizzle = SWIZZLE_XXXX;
216 }
217
218 src = swizzle( src,
219 reg->Register.SwizzleX,
220 reg->Register.SwizzleY,
221 reg->Register.SwizzleZ,
222 reg->Register.SwizzleW );
223
224 /* src.mod isn't a bitfield, unfortunately:
225 * See tgsi_util_get_full_src_register_sign_mode for implementation details.
226 */
227 if (reg->Register.Absolute) {
228 if (reg->Register.Negate)
229 src.base.srcMod = SVGA3DSRCMOD_ABSNEG;
230 else
231 src.base.srcMod = SVGA3DSRCMOD_ABS;
232 }
233 else {
234 if (reg->Register.Negate)
235 src.base.srcMod = SVGA3DSRCMOD_NEG;
236 else
237 src.base.srcMod = SVGA3DSRCMOD_NONE;
238 }
239
240 return src;
241 }
242
243
244 /*
245 * Get a temporary register, return -1 if none available
246 */
247 static INLINE SVGA3dShaderDestToken
248 get_temp( struct svga_shader_emitter *emit )
249 {
250 int i = emit->nr_hw_temp + emit->internal_temp_count++;
251
252 return dst_register( SVGA3DREG_TEMP, i );
253 }
254
255 /* Release a single temp. Currently only effective if it was the last
256 * allocated temp, otherwise release will be delayed until the next
257 * call to reset_temp_regs().
258 */
259 static INLINE void
260 release_temp( struct svga_shader_emitter *emit,
261 SVGA3dShaderDestToken temp )
262 {
263 if (temp.num == emit->internal_temp_count - 1)
264 emit->internal_temp_count--;
265 }
266
267 static void reset_temp_regs( struct svga_shader_emitter *emit )
268 {
269 emit->internal_temp_count = 0;
270 }
271
272
273 static boolean submit_op0( struct svga_shader_emitter *emit,
274 SVGA3dShaderInstToken inst,
275 SVGA3dShaderDestToken dest )
276 {
277 return (emit_instruction( emit, inst ) &&
278 emit_dst( emit, dest ));
279 }
280
281 static boolean submit_op1( struct svga_shader_emitter *emit,
282 SVGA3dShaderInstToken inst,
283 SVGA3dShaderDestToken dest,
284 struct src_register src0 )
285 {
286 return emit_op1( emit, inst, dest, src0 );
287 }
288
289
290 /* SVGA shaders may not refer to >1 constant register in a single
291 * instruction. This function checks for that usage and inserts a
292 * move to temporary if detected.
293 *
294 * The same applies to input registers -- at most a single input
295 * register may be read by any instruction.
296 */
297 static boolean submit_op2( struct svga_shader_emitter *emit,
298 SVGA3dShaderInstToken inst,
299 SVGA3dShaderDestToken dest,
300 struct src_register src0,
301 struct src_register src1 )
302 {
303 SVGA3dShaderDestToken temp;
304 SVGA3dShaderRegType type0, type1;
305 boolean need_temp = FALSE;
306
307 temp.value = 0;
308 type0 = SVGA3dShaderGetRegType( src0.base.value );
309 type1 = SVGA3dShaderGetRegType( src1.base.value );
310
311 if (type0 == SVGA3DREG_CONST &&
312 type1 == SVGA3DREG_CONST &&
313 src0.base.num != src1.base.num)
314 need_temp = TRUE;
315
316 if (type0 == SVGA3DREG_INPUT &&
317 type1 == SVGA3DREG_INPUT &&
318 src0.base.num != src1.base.num)
319 need_temp = TRUE;
320
321 if (need_temp)
322 {
323 temp = get_temp( emit );
324
325 if (!emit_op1( emit, inst_token( SVGA3DOP_MOV ), temp, src0 ))
326 return FALSE;
327
328 src0 = src( temp );
329 }
330
331 if (!emit_op2( emit, inst, dest, src0, src1 ))
332 return FALSE;
333
334 if (need_temp)
335 release_temp( emit, temp );
336
337 return TRUE;
338 }
339
340
341 /* SVGA shaders may not refer to >1 constant register in a single
342 * instruction. This function checks for that usage and inserts a
343 * move to temporary if detected.
344 */
345 static boolean submit_op3( struct svga_shader_emitter *emit,
346 SVGA3dShaderInstToken inst,
347 SVGA3dShaderDestToken dest,
348 struct src_register src0,
349 struct src_register src1,
350 struct src_register src2 )
351 {
352 SVGA3dShaderDestToken temp0;
353 SVGA3dShaderDestToken temp1;
354 boolean need_temp0 = FALSE;
355 boolean need_temp1 = FALSE;
356 SVGA3dShaderRegType type0, type1, type2;
357
358 temp0.value = 0;
359 temp1.value = 0;
360 type0 = SVGA3dShaderGetRegType( src0.base.value );
361 type1 = SVGA3dShaderGetRegType( src1.base.value );
362 type2 = SVGA3dShaderGetRegType( src2.base.value );
363
364 if (inst.op != SVGA3DOP_SINCOS) {
365 if (type0 == SVGA3DREG_CONST &&
366 ((type1 == SVGA3DREG_CONST && src0.base.num != src1.base.num) ||
367 (type2 == SVGA3DREG_CONST && src0.base.num != src2.base.num)))
368 need_temp0 = TRUE;
369
370 if (type1 == SVGA3DREG_CONST &&
371 (type2 == SVGA3DREG_CONST && src1.base.num != src2.base.num))
372 need_temp1 = TRUE;
373 }
374
375 if (type0 == SVGA3DREG_INPUT &&
376 ((type1 == SVGA3DREG_INPUT && src0.base.num != src1.base.num) ||
377 (type2 == SVGA3DREG_INPUT && src0.base.num != src2.base.num)))
378 need_temp0 = TRUE;
379
380 if (type1 == SVGA3DREG_INPUT &&
381 (type2 == SVGA3DREG_INPUT && src1.base.num != src2.base.num))
382 need_temp1 = TRUE;
383
384 if (need_temp0)
385 {
386 temp0 = get_temp( emit );
387
388 if (!emit_op1( emit, inst_token( SVGA3DOP_MOV ), temp0, src0 ))
389 return FALSE;
390
391 src0 = src( temp0 );
392 }
393
394 if (need_temp1)
395 {
396 temp1 = get_temp( emit );
397
398 if (!emit_op1( emit, inst_token( SVGA3DOP_MOV ), temp1, src1 ))
399 return FALSE;
400
401 src1 = src( temp1 );
402 }
403
404 if (!emit_op3( emit, inst, dest, src0, src1, src2 ))
405 return FALSE;
406
407 if (need_temp1)
408 release_temp( emit, temp1 );
409 if (need_temp0)
410 release_temp( emit, temp0 );
411 return TRUE;
412 }
413
414
415
416
417 /* SVGA shaders may not refer to >1 constant register in a single
418 * instruction. This function checks for that usage and inserts a
419 * move to temporary if detected.
420 */
421 static boolean submit_op4( struct svga_shader_emitter *emit,
422 SVGA3dShaderInstToken inst,
423 SVGA3dShaderDestToken dest,
424 struct src_register src0,
425 struct src_register src1,
426 struct src_register src2,
427 struct src_register src3)
428 {
429 SVGA3dShaderDestToken temp0;
430 SVGA3dShaderDestToken temp3;
431 boolean need_temp0 = FALSE;
432 boolean need_temp3 = FALSE;
433 SVGA3dShaderRegType type0, type1, type2, type3;
434
435 temp0.value = 0;
436 temp3.value = 0;
437 type0 = SVGA3dShaderGetRegType( src0.base.value );
438 type1 = SVGA3dShaderGetRegType( src1.base.value );
439 type2 = SVGA3dShaderGetRegType( src2.base.value );
440 type3 = SVGA3dShaderGetRegType( src2.base.value );
441
442 /* Make life a little easier - this is only used by the TXD
443 * instruction which is guaranteed not to have a constant/input reg
444 * in one slot at least:
445 */
446 assert(type1 == SVGA3DREG_SAMPLER);
447
448 if (type0 == SVGA3DREG_CONST &&
449 ((type3 == SVGA3DREG_CONST && src0.base.num != src3.base.num) ||
450 (type2 == SVGA3DREG_CONST && src0.base.num != src2.base.num)))
451 need_temp0 = TRUE;
452
453 if (type3 == SVGA3DREG_CONST &&
454 (type2 == SVGA3DREG_CONST && src3.base.num != src2.base.num))
455 need_temp3 = TRUE;
456
457 if (type0 == SVGA3DREG_INPUT &&
458 ((type3 == SVGA3DREG_INPUT && src0.base.num != src3.base.num) ||
459 (type2 == SVGA3DREG_INPUT && src0.base.num != src2.base.num)))
460 need_temp0 = TRUE;
461
462 if (type3 == SVGA3DREG_INPUT &&
463 (type2 == SVGA3DREG_INPUT && src3.base.num != src2.base.num))
464 need_temp3 = TRUE;
465
466 if (need_temp0)
467 {
468 temp0 = get_temp( emit );
469
470 if (!emit_op1( emit, inst_token( SVGA3DOP_MOV ), temp0, src0 ))
471 return FALSE;
472
473 src0 = src( temp0 );
474 }
475
476 if (need_temp3)
477 {
478 temp3 = get_temp( emit );
479
480 if (!emit_op1( emit, inst_token( SVGA3DOP_MOV ), temp3, src3 ))
481 return FALSE;
482
483 src3 = src( temp3 );
484 }
485
486 if (!emit_op4( emit, inst, dest, src0, src1, src2, src3 ))
487 return FALSE;
488
489 if (need_temp3)
490 release_temp( emit, temp3 );
491 if (need_temp0)
492 release_temp( emit, temp0 );
493 return TRUE;
494 }
495
496
497 static boolean emit_def_const( struct svga_shader_emitter *emit,
498 SVGA3dShaderConstType type,
499 unsigned idx,
500 float a,
501 float b,
502 float c,
503 float d )
504 {
505 SVGA3DOpDefArgs def;
506 SVGA3dShaderInstToken opcode;
507
508 switch (type) {
509 case SVGA3D_CONST_TYPE_FLOAT:
510 opcode = inst_token( SVGA3DOP_DEF );
511 def.dst = dst_register( SVGA3DREG_CONST, idx );
512 def.constValues[0] = a;
513 def.constValues[1] = b;
514 def.constValues[2] = c;
515 def.constValues[3] = d;
516 break;
517 case SVGA3D_CONST_TYPE_INT:
518 opcode = inst_token( SVGA3DOP_DEFI );
519 def.dst = dst_register( SVGA3DREG_CONSTINT, idx );
520 def.constIValues[0] = (int)a;
521 def.constIValues[1] = (int)b;
522 def.constIValues[2] = (int)c;
523 def.constIValues[3] = (int)d;
524 break;
525 default:
526 assert(0);
527 opcode = inst_token( SVGA3DOP_NOP );
528 break;
529 }
530
531 if (!emit_instruction(emit, opcode) ||
532 !svga_shader_emit_dwords( emit, def.values, Elements(def.values)))
533 return FALSE;
534
535 return TRUE;
536 }
537
538 static INLINE boolean
539 create_zero_immediate( struct svga_shader_emitter *emit )
540 {
541 unsigned idx = emit->nr_hw_const++;
542
543 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT,
544 idx, 0, 0, 0, 1 ))
545 return FALSE;
546
547 emit->zero_immediate_idx = idx;
548 emit->created_zero_immediate = TRUE;
549
550 return TRUE;
551 }
552
553 static INLINE boolean
554 create_loop_const( struct svga_shader_emitter *emit )
555 {
556 unsigned idx = emit->nr_hw_const++;
557
558 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_INT, idx,
559 255, /* iteration count */
560 0, /* initial value */
561 1, /* step size */
562 0 /* not used, must be 0 */))
563 return FALSE;
564
565 emit->loop_const_idx = idx;
566 emit->created_loop_const = TRUE;
567
568 return TRUE;
569 }
570
571 static INLINE boolean
572 create_sincos_consts( struct svga_shader_emitter *emit )
573 {
574 unsigned idx = emit->nr_hw_const++;
575
576 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT, idx,
577 -1.5500992e-006f,
578 -2.1701389e-005f,
579 0.0026041667f,
580 0.00026041668f ))
581 return FALSE;
582
583 emit->sincos_consts_idx = idx;
584 idx = emit->nr_hw_const++;
585
586 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT, idx,
587 -0.020833334f,
588 -0.12500000f,
589 1.0f,
590 0.50000000f ))
591 return FALSE;
592
593 emit->created_sincos_consts = TRUE;
594
595 return TRUE;
596 }
597
598 static INLINE boolean
599 create_arl_consts( struct svga_shader_emitter *emit )
600 {
601 int i;
602
603 for (i = 0; i < emit->num_arl_consts; i += 4) {
604 int j;
605 unsigned idx = emit->nr_hw_const++;
606 float vals[4];
607 for (j = 0; j < 4 && (j + i) < emit->num_arl_consts; ++j) {
608 vals[j] = emit->arl_consts[i + j].number;
609 emit->arl_consts[i + j].idx = idx;
610 switch (j) {
611 case 0:
612 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_X;
613 break;
614 case 1:
615 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_Y;
616 break;
617 case 2:
618 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_Z;
619 break;
620 case 3:
621 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_W;
622 break;
623 }
624 }
625 while (j < 4)
626 vals[j++] = 0;
627
628 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT, idx,
629 vals[0], vals[1],
630 vals[2], vals[3]))
631 return FALSE;
632 }
633
634 return TRUE;
635 }
636
637 static INLINE struct src_register
638 get_vface( struct svga_shader_emitter *emit )
639 {
640 assert(emit->emitted_vface);
641 return src_register(SVGA3DREG_MISCTYPE,
642 SVGA3DMISCREG_FACE);
643 }
644
645 /* returns {0, 0, 0, 1} immediate */
646 static INLINE struct src_register
647 get_zero_immediate( struct svga_shader_emitter *emit )
648 {
649 assert(emit->created_zero_immediate);
650 assert(emit->zero_immediate_idx >= 0);
651 return src_register( SVGA3DREG_CONST,
652 emit->zero_immediate_idx );
653 }
654
655 /* returns the loop const */
656 static INLINE struct src_register
657 get_loop_const( struct svga_shader_emitter *emit )
658 {
659 assert(emit->created_loop_const);
660 assert(emit->loop_const_idx >= 0);
661 return src_register( SVGA3DREG_CONSTINT,
662 emit->loop_const_idx );
663 }
664
665 /* returns a sincos const */
666 static INLINE struct src_register
667 get_sincos_const( struct svga_shader_emitter *emit,
668 unsigned index )
669 {
670 assert(emit->created_sincos_consts);
671 assert(emit->sincos_consts_idx >= 0);
672 assert(index == 0 || index == 1);
673 return src_register( SVGA3DREG_CONST,
674 emit->sincos_consts_idx + index );
675 }
676
677 static INLINE struct src_register
678 get_fake_arl_const( struct svga_shader_emitter *emit )
679 {
680 struct src_register reg;
681 int idx = 0, swizzle = 0, i;
682
683 for (i = 0; i < emit->num_arl_consts; ++ i) {
684 if (emit->arl_consts[i].arl_num == emit->current_arl) {
685 idx = emit->arl_consts[i].idx;
686 swizzle = emit->arl_consts[i].swizzle;
687 }
688 }
689
690 reg = src_register( SVGA3DREG_CONST, idx );
691 return scalar(reg, swizzle);
692 }
693
694 static INLINE struct src_register
695 get_tex_dimensions( struct svga_shader_emitter *emit, int sampler_num )
696 {
697 int idx;
698 struct src_register reg;
699
700 /* the width/height indexes start right after constants */
701 idx = emit->key.fkey.tex[sampler_num].width_height_idx +
702 emit->info.file_max[TGSI_FILE_CONSTANT] + 1;
703
704 reg = src_register( SVGA3DREG_CONST, idx );
705 return reg;
706 }
707
708 static boolean emit_fake_arl(struct svga_shader_emitter *emit,
709 const struct tgsi_full_instruction *insn)
710 {
711 const struct src_register src0 = translate_src_register(
712 emit, &insn->Src[0] );
713 struct src_register src1 = get_fake_arl_const( emit );
714 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
715 SVGA3dShaderDestToken tmp = get_temp( emit );
716
717 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), tmp, src0))
718 return FALSE;
719
720 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), tmp, src( tmp ),
721 src1))
722 return FALSE;
723
724 /* replicate the original swizzle */
725 src1 = src(tmp);
726 src1.base.swizzle = src0.base.swizzle;
727
728 return submit_op1( emit, inst_token( SVGA3DOP_MOVA ),
729 dst, src1 );
730 }
731
732 static boolean emit_if(struct svga_shader_emitter *emit,
733 const struct tgsi_full_instruction *insn)
734 {
735 const struct src_register src = translate_src_register(
736 emit, &insn->Src[0] );
737 struct src_register zero = get_zero_immediate( emit );
738 SVGA3dShaderInstToken if_token = inst_token( SVGA3DOP_IFC );
739
740 if_token.control = SVGA3DOPCOMPC_NE;
741 zero = scalar(zero, TGSI_SWIZZLE_X);
742
743 emit->dynamic_branching_level++;
744
745 return (emit_instruction( emit, if_token ) &&
746 emit_src( emit, src ) &&
747 emit_src( emit, zero ) );
748 }
749
750 static boolean emit_endif(struct svga_shader_emitter *emit,
751 const struct tgsi_full_instruction *insn)
752 {
753 emit->dynamic_branching_level--;
754
755 return (emit_instruction( emit,
756 inst_token( SVGA3DOP_ENDIF )));
757 }
758
759 static boolean emit_else(struct svga_shader_emitter *emit,
760 const struct tgsi_full_instruction *insn)
761 {
762 return (emit_instruction( emit,
763 inst_token( SVGA3DOP_ELSE )));
764 }
765
766 /* Translate the following TGSI FLR instruction.
767 * FLR DST, SRC
768 * To the following SVGA3D instruction sequence.
769 * FRC TMP, SRC
770 * SUB DST, SRC, TMP
771 */
772 static boolean emit_floor(struct svga_shader_emitter *emit,
773 const struct tgsi_full_instruction *insn )
774 {
775 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
776 const struct src_register src0 = translate_src_register(
777 emit, &insn->Src[0] );
778 SVGA3dShaderDestToken temp = get_temp( emit );
779
780 /* FRC TMP, SRC */
781 if (!submit_op1( emit, inst_token( SVGA3DOP_FRC ), temp, src0 ))
782 return FALSE;
783
784 /* SUB DST, SRC, TMP */
785 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst, src0,
786 negate( src( temp ) ) ))
787 return FALSE;
788
789 return TRUE;
790 }
791
792
793 /* Translate the following TGSI CMP instruction.
794 * CMP DST, SRC0, SRC1, SRC2
795 * To the following SVGA3D instruction sequence.
796 * CMP DST, SRC0, SRC2, SRC1
797 */
798 static boolean emit_cmp(struct svga_shader_emitter *emit,
799 const struct tgsi_full_instruction *insn )
800 {
801 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
802 const struct src_register src0 = translate_src_register(
803 emit, &insn->Src[0] );
804 const struct src_register src1 = translate_src_register(
805 emit, &insn->Src[1] );
806 const struct src_register src2 = translate_src_register(
807 emit, &insn->Src[2] );
808
809 /* CMP DST, SRC0, SRC2, SRC1 */
810 return submit_op3( emit, inst_token( SVGA3DOP_CMP ), dst, src0, src2, src1);
811 }
812
813
814
815 /* Translate the following TGSI DIV instruction.
816 * DIV DST.xy, SRC0, SRC1
817 * To the following SVGA3D instruction sequence.
818 * RCP TMP.x, SRC1.xxxx
819 * RCP TMP.y, SRC1.yyyy
820 * MUL DST.xy, SRC0, TMP
821 */
822 static boolean emit_div(struct svga_shader_emitter *emit,
823 const struct tgsi_full_instruction *insn )
824 {
825 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
826 const struct src_register src0 = translate_src_register(
827 emit, &insn->Src[0] );
828 const struct src_register src1 = translate_src_register(
829 emit, &insn->Src[1] );
830 SVGA3dShaderDestToken temp = get_temp( emit );
831 int i;
832
833 /* For each enabled element, perform a RCP instruction. Note that
834 * RCP is scalar in SVGA3D:
835 */
836 for (i = 0; i < 4; i++) {
837 unsigned channel = 1 << i;
838 if (dst.mask & channel) {
839 /* RCP TMP.?, SRC1.???? */
840 if (!submit_op1( emit, inst_token( SVGA3DOP_RCP ),
841 writemask(temp, channel),
842 scalar(src1, i) ))
843 return FALSE;
844 }
845 }
846
847 /* Then multiply them out with a single mul:
848 *
849 * MUL DST, SRC0, TMP
850 */
851 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ), dst, src0,
852 src( temp ) ))
853 return FALSE;
854
855 return TRUE;
856 }
857
858 /* Translate the following TGSI DP2 instruction.
859 * DP2 DST, SRC1, SRC2
860 * To the following SVGA3D instruction sequence.
861 * MUL TMP, SRC1, SRC2
862 * ADD DST, TMP.xxxx, TMP.yyyy
863 */
864 static boolean emit_dp2(struct svga_shader_emitter *emit,
865 const struct tgsi_full_instruction *insn )
866 {
867 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
868 const struct src_register src0 = translate_src_register(
869 emit, &insn->Src[0] );
870 const struct src_register src1 = translate_src_register(
871 emit, &insn->Src[1] );
872 SVGA3dShaderDestToken temp = get_temp( emit );
873 struct src_register temp_src0, temp_src1;
874
875 /* MUL TMP, SRC1, SRC2 */
876 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ), temp, src0, src1 ))
877 return FALSE;
878
879 temp_src0 = scalar(src( temp ), TGSI_SWIZZLE_X);
880 temp_src1 = scalar(src( temp ), TGSI_SWIZZLE_Y);
881
882 /* ADD DST, TMP.xxxx, TMP.yyyy */
883 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst,
884 temp_src0, temp_src1 ))
885 return FALSE;
886
887 return TRUE;
888 }
889
890
891 /* Translate the following TGSI DPH instruction.
892 * DPH DST, SRC1, SRC2
893 * To the following SVGA3D instruction sequence.
894 * DP3 TMP, SRC1, SRC2
895 * ADD DST, TMP, SRC2.wwww
896 */
897 static boolean emit_dph(struct svga_shader_emitter *emit,
898 const struct tgsi_full_instruction *insn )
899 {
900 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
901 const struct src_register src0 = translate_src_register(
902 emit, &insn->Src[0] );
903 struct src_register src1 = translate_src_register(
904 emit, &insn->Src[1] );
905 SVGA3dShaderDestToken temp = get_temp( emit );
906
907 /* DP3 TMP, SRC1, SRC2 */
908 if (!submit_op2( emit, inst_token( SVGA3DOP_DP3 ), temp, src0, src1 ))
909 return FALSE;
910
911 src1 = scalar(src1, TGSI_SWIZZLE_W);
912
913 /* ADD DST, TMP, SRC2.wwww */
914 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst,
915 src( temp ), src1 ))
916 return FALSE;
917
918 return TRUE;
919 }
920
921 /* Translate the following TGSI DST instruction.
922 * NRM DST, SRC
923 * To the following SVGA3D instruction sequence.
924 * DP3 TMP, SRC, SRC
925 * RSQ TMP, TMP
926 * MUL DST, SRC, TMP
927 */
928 static boolean emit_nrm(struct svga_shader_emitter *emit,
929 const struct tgsi_full_instruction *insn )
930 {
931 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
932 const struct src_register src0 = translate_src_register(
933 emit, &insn->Src[0] );
934 SVGA3dShaderDestToken temp = get_temp( emit );
935
936 /* DP3 TMP, SRC, SRC */
937 if (!submit_op2( emit, inst_token( SVGA3DOP_DP3 ), temp, src0, src0 ))
938 return FALSE;
939
940 /* RSQ TMP, TMP */
941 if (!submit_op1( emit, inst_token( SVGA3DOP_RSQ ), temp, src( temp )))
942 return FALSE;
943
944 /* MUL DST, SRC, TMP */
945 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ), dst,
946 src0, src( temp )))
947 return FALSE;
948
949 return TRUE;
950
951 }
952
953 static boolean do_emit_sincos(struct svga_shader_emitter *emit,
954 SVGA3dShaderDestToken dst,
955 struct src_register src0)
956 {
957 src0 = scalar(src0, TGSI_SWIZZLE_X);
958
959 if (emit->use_sm30) {
960 return submit_op1( emit, inst_token( SVGA3DOP_SINCOS ),
961 dst, src0 );
962 } else {
963 struct src_register const1 = get_sincos_const( emit, 0 );
964 struct src_register const2 = get_sincos_const( emit, 1 );
965
966 return submit_op3( emit, inst_token( SVGA3DOP_SINCOS ),
967 dst, src0, const1, const2 );
968 }
969 }
970
971 static boolean emit_sincos(struct svga_shader_emitter *emit,
972 const struct tgsi_full_instruction *insn)
973 {
974 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
975 struct src_register src0 = translate_src_register(
976 emit, &insn->Src[0] );
977 SVGA3dShaderDestToken temp = get_temp( emit );
978
979 /* SCS TMP SRC */
980 if (!do_emit_sincos(emit, writemask(temp, TGSI_WRITEMASK_XY), src0 ))
981 return FALSE;
982
983 /* MOV DST TMP */
984 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src( temp ) ))
985 return FALSE;
986
987 return TRUE;
988 }
989
990 /*
991 * SCS TMP SRC
992 * MOV DST TMP.yyyy
993 */
994 static boolean emit_sin(struct svga_shader_emitter *emit,
995 const struct tgsi_full_instruction *insn )
996 {
997 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
998 struct src_register src0 = translate_src_register(
999 emit, &insn->Src[0] );
1000 SVGA3dShaderDestToken temp = get_temp( emit );
1001
1002 /* SCS TMP SRC */
1003 if (!do_emit_sincos(emit, writemask(temp, TGSI_WRITEMASK_Y), src0))
1004 return FALSE;
1005
1006 src0 = scalar(src( temp ), TGSI_SWIZZLE_Y);
1007
1008 /* MOV DST TMP.yyyy */
1009 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src0 ))
1010 return FALSE;
1011
1012 return TRUE;
1013 }
1014
1015 /*
1016 * SCS TMP SRC
1017 * MOV DST TMP.xxxx
1018 */
1019 static boolean emit_cos(struct svga_shader_emitter *emit,
1020 const struct tgsi_full_instruction *insn )
1021 {
1022 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1023 struct src_register src0 = translate_src_register(
1024 emit, &insn->Src[0] );
1025 SVGA3dShaderDestToken temp = get_temp( emit );
1026
1027 /* SCS TMP SRC */
1028 if (!do_emit_sincos( emit, writemask(temp, TGSI_WRITEMASK_X), src0 ))
1029 return FALSE;
1030
1031 src0 = scalar(src( temp ), TGSI_SWIZZLE_X);
1032
1033 /* MOV DST TMP.xxxx */
1034 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src0 ))
1035 return FALSE;
1036
1037 return TRUE;
1038 }
1039
1040
1041 /*
1042 * ADD DST SRC0, negate(SRC0)
1043 */
1044 static boolean emit_sub(struct svga_shader_emitter *emit,
1045 const struct tgsi_full_instruction *insn)
1046 {
1047 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1048 struct src_register src0 = translate_src_register(
1049 emit, &insn->Src[0] );
1050 struct src_register src1 = translate_src_register(
1051 emit, &insn->Src[1] );
1052
1053 src1 = negate(src1);
1054
1055 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst,
1056 src0, src1 ))
1057 return FALSE;
1058
1059 return TRUE;
1060 }
1061
1062
1063 static boolean emit_kil(struct svga_shader_emitter *emit,
1064 const struct tgsi_full_instruction *insn )
1065 {
1066 SVGA3dShaderInstToken inst;
1067 const struct tgsi_full_src_register *reg = &insn->Src[0];
1068 struct src_register src0;
1069
1070 inst = inst_token( SVGA3DOP_TEXKILL );
1071 src0 = translate_src_register( emit, reg );
1072
1073 if (reg->Register.Absolute ||
1074 reg->Register.Negate ||
1075 reg->Register.Indirect ||
1076 reg->Register.SwizzleX != 0 ||
1077 reg->Register.SwizzleY != 1 ||
1078 reg->Register.SwizzleZ != 2 ||
1079 reg->Register.File != TGSI_FILE_TEMPORARY)
1080 {
1081 SVGA3dShaderDestToken temp = get_temp( emit );
1082
1083 submit_op1( emit, inst_token( SVGA3DOP_MOV ), temp, src0 );
1084 src0 = src( temp );
1085 }
1086
1087 return submit_op0( emit, inst, dst(src0) );
1088 }
1089
1090
1091 /* mesa state tracker always emits kilp as an unconditional
1092 * kil */
1093 static boolean emit_kilp(struct svga_shader_emitter *emit,
1094 const struct tgsi_full_instruction *insn )
1095 {
1096 SVGA3dShaderInstToken inst;
1097 SVGA3dShaderDestToken temp;
1098 struct src_register one = scalar( get_zero_immediate( emit ),
1099 TGSI_SWIZZLE_W );
1100
1101 inst = inst_token( SVGA3DOP_TEXKILL );
1102
1103 /* texkill doesn't allow negation on the operand so lets move
1104 * negation of {1} to a temp register */
1105 temp = get_temp( emit );
1106 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), temp,
1107 negate( one ) ))
1108 return FALSE;
1109
1110 return submit_op0( emit, inst, temp );
1111 }
1112
1113 /* Implement conditionals by initializing destination reg to 'fail',
1114 * then set predicate reg with UFOP_SETP, then move 'pass' to dest
1115 * based on predicate reg.
1116 *
1117 * SETP src0, cmp, src1 -- do this first to avoid aliasing problems.
1118 * MOV dst, fail
1119 * MOV dst, pass, p0
1120 */
1121 static boolean
1122 emit_conditional(struct svga_shader_emitter *emit,
1123 unsigned compare_func,
1124 SVGA3dShaderDestToken dst,
1125 struct src_register src0,
1126 struct src_register src1,
1127 struct src_register pass,
1128 struct src_register fail)
1129 {
1130 SVGA3dShaderDestToken pred_reg = dst_register( SVGA3DREG_PREDICATE, 0 );
1131 SVGA3dShaderInstToken setp_token, mov_token;
1132 setp_token = inst_token( SVGA3DOP_SETP );
1133
1134 switch (compare_func) {
1135 case PIPE_FUNC_NEVER:
1136 return submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1137 dst, fail );
1138 break;
1139 case PIPE_FUNC_LESS:
1140 setp_token.control = SVGA3DOPCOMP_LT;
1141 break;
1142 case PIPE_FUNC_EQUAL:
1143 setp_token.control = SVGA3DOPCOMP_EQ;
1144 break;
1145 case PIPE_FUNC_LEQUAL:
1146 setp_token.control = SVGA3DOPCOMP_LE;
1147 break;
1148 case PIPE_FUNC_GREATER:
1149 setp_token.control = SVGA3DOPCOMP_GT;
1150 break;
1151 case PIPE_FUNC_NOTEQUAL:
1152 setp_token.control = SVGA3DOPCOMPC_NE;
1153 break;
1154 case PIPE_FUNC_GEQUAL:
1155 setp_token.control = SVGA3DOPCOMP_GE;
1156 break;
1157 case PIPE_FUNC_ALWAYS:
1158 return submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1159 dst, pass );
1160 break;
1161 }
1162
1163 /* SETP src0, COMPOP, src1 */
1164 if (!submit_op2( emit, setp_token, pred_reg,
1165 src0, src1 ))
1166 return FALSE;
1167
1168 mov_token = inst_token( SVGA3DOP_MOV );
1169
1170 /* MOV dst, fail */
1171 if (!submit_op1( emit, mov_token, dst,
1172 fail ))
1173 return FALSE;
1174
1175 /* MOV dst, pass (predicated)
1176 *
1177 * Note that the predicate reg (and possible modifiers) is passed
1178 * as the first source argument.
1179 */
1180 mov_token.predicated = 1;
1181 if (!submit_op2( emit, mov_token, dst,
1182 src( pred_reg ), pass ))
1183 return FALSE;
1184
1185 return TRUE;
1186 }
1187
1188
1189 static boolean
1190 emit_select(struct svga_shader_emitter *emit,
1191 unsigned compare_func,
1192 SVGA3dShaderDestToken dst,
1193 struct src_register src0,
1194 struct src_register src1 )
1195 {
1196 /* There are some SVGA instructions which implement some selects
1197 * directly, but they are only available in the vertex shader.
1198 */
1199 if (emit->unit == PIPE_SHADER_VERTEX) {
1200 switch (compare_func) {
1201 case PIPE_FUNC_GEQUAL:
1202 return submit_op2( emit, inst_token( SVGA3DOP_SGE ), dst, src0, src1 );
1203 case PIPE_FUNC_LEQUAL:
1204 return submit_op2( emit, inst_token( SVGA3DOP_SGE ), dst, src1, src0 );
1205 case PIPE_FUNC_GREATER:
1206 return submit_op2( emit, inst_token( SVGA3DOP_SLT ), dst, src1, src0 );
1207 case PIPE_FUNC_LESS:
1208 return submit_op2( emit, inst_token( SVGA3DOP_SLT ), dst, src0, src1 );
1209 default:
1210 break;
1211 }
1212 }
1213
1214
1215 /* Otherwise, need to use the setp approach:
1216 */
1217 {
1218 struct src_register one, zero;
1219 /* zero immediate is 0,0,0,1 */
1220 zero = get_zero_immediate( emit );
1221 one = scalar( zero, TGSI_SWIZZLE_W );
1222 zero = scalar( zero, TGSI_SWIZZLE_X );
1223
1224 return emit_conditional(
1225 emit,
1226 compare_func,
1227 dst,
1228 src0,
1229 src1,
1230 one, zero);
1231 }
1232 }
1233
1234
1235 static boolean emit_select_op(struct svga_shader_emitter *emit,
1236 unsigned compare,
1237 const struct tgsi_full_instruction *insn)
1238 {
1239 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1240 struct src_register src0 = translate_src_register(
1241 emit, &insn->Src[0] );
1242 struct src_register src1 = translate_src_register(
1243 emit, &insn->Src[1] );
1244
1245 return emit_select( emit, compare, dst, src0, src1 );
1246 }
1247
1248
1249 /* Translate texture instructions to SVGA3D representation.
1250 */
1251 static boolean emit_tex2(struct svga_shader_emitter *emit,
1252 const struct tgsi_full_instruction *insn,
1253 SVGA3dShaderDestToken dst )
1254 {
1255 SVGA3dShaderInstToken inst;
1256 struct src_register texcoord;
1257 struct src_register sampler;
1258 SVGA3dShaderDestToken tmp;
1259
1260 inst.value = 0;
1261
1262 switch (insn->Instruction.Opcode) {
1263 case TGSI_OPCODE_TEX:
1264 inst.op = SVGA3DOP_TEX;
1265 break;
1266 case TGSI_OPCODE_TXP:
1267 inst.op = SVGA3DOP_TEX;
1268 inst.control = SVGA3DOPCONT_PROJECT;
1269 break;
1270 case TGSI_OPCODE_TXB:
1271 inst.op = SVGA3DOP_TEX;
1272 inst.control = SVGA3DOPCONT_BIAS;
1273 break;
1274 case TGSI_OPCODE_TXL:
1275 inst.op = SVGA3DOP_TEXLDL;
1276 break;
1277 default:
1278 assert(0);
1279 return FALSE;
1280 }
1281
1282 texcoord = translate_src_register( emit, &insn->Src[0] );
1283 sampler = translate_src_register( emit, &insn->Src[1] );
1284
1285 if (emit->key.fkey.tex[sampler.base.num].unnormalized ||
1286 emit->dynamic_branching_level > 0)
1287 tmp = get_temp( emit );
1288
1289 /* Can't do mipmapping inside dynamic branch constructs. Force LOD
1290 * zero in that case.
1291 */
1292 if (emit->dynamic_branching_level > 0 &&
1293 inst.op == SVGA3DOP_TEX &&
1294 SVGA3dShaderGetRegType(texcoord.base.value) == SVGA3DREG_TEMP) {
1295 struct src_register zero = get_zero_immediate( emit );
1296
1297 /* MOV tmp, texcoord */
1298 if (!submit_op1( emit,
1299 inst_token( SVGA3DOP_MOV ),
1300 tmp,
1301 texcoord ))
1302 return FALSE;
1303
1304 /* MOV tmp.w, zero */
1305 if (!submit_op1( emit,
1306 inst_token( SVGA3DOP_MOV ),
1307 writemask( tmp, TGSI_WRITEMASK_W ),
1308 scalar( zero, TGSI_SWIZZLE_X )))
1309 return FALSE;
1310
1311 texcoord = src( tmp );
1312 inst.op = SVGA3DOP_TEXLDL;
1313 }
1314
1315 /* Explicit normalization of texcoords:
1316 */
1317 if (emit->key.fkey.tex[sampler.base.num].unnormalized) {
1318 struct src_register wh = get_tex_dimensions( emit, sampler.base.num );
1319
1320 /* MUL tmp, SRC0, WH */
1321 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
1322 tmp, texcoord, wh ))
1323 return FALSE;
1324
1325 texcoord = src( tmp );
1326 }
1327
1328 return submit_op2( emit, inst, dst, texcoord, sampler );
1329 }
1330
1331
1332
1333
1334 /* Translate texture instructions to SVGA3D representation.
1335 */
1336 static boolean emit_tex4(struct svga_shader_emitter *emit,
1337 const struct tgsi_full_instruction *insn,
1338 SVGA3dShaderDestToken dst )
1339 {
1340 SVGA3dShaderInstToken inst;
1341 struct src_register texcoord;
1342 struct src_register ddx;
1343 struct src_register ddy;
1344 struct src_register sampler;
1345
1346 texcoord = translate_src_register( emit, &insn->Src[0] );
1347 ddx = translate_src_register( emit, &insn->Src[1] );
1348 ddy = translate_src_register( emit, &insn->Src[2] );
1349 sampler = translate_src_register( emit, &insn->Src[3] );
1350
1351 inst.value = 0;
1352
1353 switch (insn->Instruction.Opcode) {
1354 case TGSI_OPCODE_TXD:
1355 inst.op = SVGA3DOP_TEXLDD; /* 4 args! */
1356 break;
1357 default:
1358 assert(0);
1359 return FALSE;
1360 }
1361
1362 return submit_op4( emit, inst, dst, texcoord, sampler, ddx, ddy );
1363 }
1364
1365
1366 static boolean emit_tex(struct svga_shader_emitter *emit,
1367 const struct tgsi_full_instruction *insn )
1368 {
1369 SVGA3dShaderDestToken dst =
1370 translate_dst_register( emit, insn, 0 );
1371 struct src_register src0 =
1372 translate_src_register( emit, &insn->Src[0] );
1373 struct src_register src1 =
1374 translate_src_register( emit, &insn->Src[1] );
1375
1376 SVGA3dShaderDestToken tex_result;
1377
1378 /* check for shadow samplers */
1379 boolean compare = (emit->key.fkey.tex[src1.base.num].compare_mode ==
1380 PIPE_TEX_COMPARE_R_TO_TEXTURE);
1381
1382
1383 /* If doing compare processing, need to put this value into a
1384 * temporary so it can be used as a source later on.
1385 */
1386 if (compare ||
1387 (!emit->use_sm30 && dst.mask != TGSI_WRITEMASK_XYZW) ) {
1388 tex_result = get_temp( emit );
1389 }
1390 else {
1391 tex_result = dst;
1392 }
1393
1394 switch(insn->Instruction.Opcode) {
1395 case TGSI_OPCODE_TEX:
1396 case TGSI_OPCODE_TXB:
1397 case TGSI_OPCODE_TXP:
1398 case TGSI_OPCODE_TXL:
1399 if (!emit_tex2( emit, insn, tex_result ))
1400 return FALSE;
1401 break;
1402 case TGSI_OPCODE_TXD:
1403 if (!emit_tex4( emit, insn, tex_result ))
1404 return FALSE;
1405 break;
1406 default:
1407 assert(0);
1408 }
1409
1410
1411 if (compare) {
1412 if (dst.mask & TGSI_WRITEMASK_XYZ) {
1413 SVGA3dShaderDestToken src0_zdivw = get_temp( emit );
1414 struct src_register tex_src_x = scalar(src(tex_result), TGSI_SWIZZLE_Y);
1415
1416 /* Divide texcoord R by Q */
1417 if (!submit_op1( emit, inst_token( SVGA3DOP_RCP ),
1418 writemask(src0_zdivw, TGSI_WRITEMASK_X),
1419 scalar(src0, TGSI_SWIZZLE_W) ))
1420 return FALSE;
1421
1422 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
1423 writemask(src0_zdivw, TGSI_WRITEMASK_X),
1424 scalar(src0, TGSI_SWIZZLE_Z),
1425 scalar(src(src0_zdivw), TGSI_SWIZZLE_X) ))
1426 return FALSE;
1427
1428 if (!emit_select(
1429 emit,
1430 emit->key.fkey.tex[src1.base.num].compare_func,
1431 writemask( dst, TGSI_WRITEMASK_XYZ ),
1432 scalar(src(src0_zdivw), TGSI_SWIZZLE_X),
1433 tex_src_x))
1434 return FALSE;
1435 }
1436
1437 if (dst.mask & TGSI_WRITEMASK_W) {
1438 struct src_register one =
1439 scalar( get_zero_immediate( emit ), TGSI_SWIZZLE_W );
1440
1441 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1442 writemask( dst, TGSI_WRITEMASK_W ),
1443 one ))
1444 return FALSE;
1445 }
1446
1447 return TRUE;
1448 }
1449 else if (!emit->use_sm30 && dst.mask != TGSI_WRITEMASK_XYZW)
1450 {
1451 if (!emit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src(tex_result) ))
1452 return FALSE;
1453 }
1454
1455 return TRUE;
1456 }
1457
1458 static boolean emit_bgnloop2( struct svga_shader_emitter *emit,
1459 const struct tgsi_full_instruction *insn )
1460 {
1461 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_LOOP );
1462 struct src_register loop_reg = src_register( SVGA3DREG_LOOP, 0 );
1463 struct src_register const_int = get_loop_const( emit );
1464
1465 emit->dynamic_branching_level++;
1466
1467 return (emit_instruction( emit, inst ) &&
1468 emit_src( emit, loop_reg ) &&
1469 emit_src( emit, const_int ) );
1470 }
1471
1472 static boolean emit_endloop2( struct svga_shader_emitter *emit,
1473 const struct tgsi_full_instruction *insn )
1474 {
1475 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_ENDLOOP );
1476
1477 emit->dynamic_branching_level--;
1478
1479 return emit_instruction( emit, inst );
1480 }
1481
1482 static boolean emit_brk( struct svga_shader_emitter *emit,
1483 const struct tgsi_full_instruction *insn )
1484 {
1485 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_BREAK );
1486 return emit_instruction( emit, inst );
1487 }
1488
1489 static boolean emit_scalar_op1( struct svga_shader_emitter *emit,
1490 unsigned opcode,
1491 const struct tgsi_full_instruction *insn )
1492 {
1493 SVGA3dShaderInstToken inst;
1494 SVGA3dShaderDestToken dst;
1495 struct src_register src;
1496
1497 inst = inst_token( opcode );
1498 dst = translate_dst_register( emit, insn, 0 );
1499 src = translate_src_register( emit, &insn->Src[0] );
1500 src = scalar( src, TGSI_SWIZZLE_X );
1501
1502 return submit_op1( emit, inst, dst, src );
1503 }
1504
1505
1506 static boolean emit_simple_instruction(struct svga_shader_emitter *emit,
1507 unsigned opcode,
1508 const struct tgsi_full_instruction *insn )
1509 {
1510 const struct tgsi_full_src_register *src = insn->Src;
1511 SVGA3dShaderInstToken inst;
1512 SVGA3dShaderDestToken dst;
1513
1514 inst = inst_token( opcode );
1515 dst = translate_dst_register( emit, insn, 0 );
1516
1517 switch (insn->Instruction.NumSrcRegs) {
1518 case 0:
1519 return submit_op0( emit, inst, dst );
1520 case 1:
1521 return submit_op1( emit, inst, dst,
1522 translate_src_register( emit, &src[0] ));
1523 case 2:
1524 return submit_op2( emit, inst, dst,
1525 translate_src_register( emit, &src[0] ),
1526 translate_src_register( emit, &src[1] ) );
1527 case 3:
1528 return submit_op3( emit, inst, dst,
1529 translate_src_register( emit, &src[0] ),
1530 translate_src_register( emit, &src[1] ),
1531 translate_src_register( emit, &src[2] ) );
1532 default:
1533 assert(0);
1534 return FALSE;
1535 }
1536 }
1537
1538
1539 static boolean emit_deriv(struct svga_shader_emitter *emit,
1540 const struct tgsi_full_instruction *insn )
1541 {
1542 if (emit->dynamic_branching_level > 0 &&
1543 insn->Src[0].Register.File == TGSI_FILE_TEMPORARY)
1544 {
1545 struct src_register zero = get_zero_immediate( emit );
1546 SVGA3dShaderDestToken dst =
1547 translate_dst_register( emit, insn, 0 );
1548
1549 /* Deriv opcodes not valid inside dynamic branching, workaround
1550 * by zeroing out the destination.
1551 */
1552 if (!submit_op1(emit,
1553 inst_token( SVGA3DOP_MOV ),
1554 dst,
1555 scalar(zero, TGSI_SWIZZLE_X)))
1556 return FALSE;
1557
1558 return TRUE;
1559 }
1560 else {
1561 unsigned opcode;
1562
1563 switch (insn->Instruction.Opcode) {
1564 case TGSI_OPCODE_DDX:
1565 opcode = SVGA3DOP_DSX;
1566 break;
1567 case TGSI_OPCODE_DDY:
1568 opcode = SVGA3DOP_DSY;
1569 break;
1570 default:
1571 return FALSE;
1572 }
1573
1574 return emit_simple_instruction( emit, opcode, insn );
1575 }
1576 }
1577
1578 static boolean emit_arl(struct svga_shader_emitter *emit,
1579 const struct tgsi_full_instruction *insn)
1580 {
1581 ++emit->current_arl;
1582 if (svga_arl_needs_adjustment( emit )) {
1583 return emit_fake_arl( emit, insn );
1584 } else {
1585 /* no need to adjust, just emit straight arl */
1586 return emit_simple_instruction(emit, SVGA3DOP_MOVA, insn);
1587 }
1588 }
1589
1590 static boolean alias_src_dst( struct src_register src,
1591 SVGA3dShaderDestToken dst )
1592 {
1593 if (src.base.num != dst.num)
1594 return FALSE;
1595
1596 if (SVGA3dShaderGetRegType(dst.value) !=
1597 SVGA3dShaderGetRegType(src.base.value))
1598 return FALSE;
1599
1600 return TRUE;
1601 }
1602
1603 static boolean emit_pow(struct svga_shader_emitter *emit,
1604 const struct tgsi_full_instruction *insn)
1605 {
1606 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1607 struct src_register src0 = translate_src_register(
1608 emit, &insn->Src[0] );
1609 struct src_register src1 = translate_src_register(
1610 emit, &insn->Src[1] );
1611 boolean need_tmp = FALSE;
1612
1613 /* POW can only output to a temporary */
1614 if (insn->Dst[0].Register.File != TGSI_FILE_TEMPORARY)
1615 need_tmp = TRUE;
1616
1617 /* POW src1 must not be the same register as dst */
1618 if (alias_src_dst( src1, dst ))
1619 need_tmp = TRUE;
1620
1621 /* it's a scalar op */
1622 src0 = scalar( src0, TGSI_SWIZZLE_X );
1623 src1 = scalar( src1, TGSI_SWIZZLE_X );
1624
1625 if (need_tmp) {
1626 SVGA3dShaderDestToken tmp = writemask(get_temp( emit ), TGSI_WRITEMASK_X );
1627
1628 if (!submit_op2(emit, inst_token( SVGA3DOP_POW ), tmp, src0, src1))
1629 return FALSE;
1630
1631 return submit_op1(emit, inst_token( SVGA3DOP_MOV ), dst, scalar(src(tmp), 0) );
1632 }
1633 else {
1634 return submit_op2(emit, inst_token( SVGA3DOP_POW ), dst, src0, src1);
1635 }
1636 }
1637
1638 static boolean emit_xpd(struct svga_shader_emitter *emit,
1639 const struct tgsi_full_instruction *insn)
1640 {
1641 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1642 const struct src_register src0 = translate_src_register(
1643 emit, &insn->Src[0] );
1644 const struct src_register src1 = translate_src_register(
1645 emit, &insn->Src[1] );
1646 boolean need_dst_tmp = FALSE;
1647
1648 /* XPD can only output to a temporary */
1649 if (SVGA3dShaderGetRegType(dst.value) != SVGA3DREG_TEMP)
1650 need_dst_tmp = TRUE;
1651
1652 /* The dst reg must not be the same as src0 or src1*/
1653 if (alias_src_dst(src0, dst) ||
1654 alias_src_dst(src1, dst))
1655 need_dst_tmp = TRUE;
1656
1657 if (need_dst_tmp) {
1658 SVGA3dShaderDestToken tmp = get_temp( emit );
1659
1660 /* Obey DX9 restrictions on mask:
1661 */
1662 tmp.mask = dst.mask & TGSI_WRITEMASK_XYZ;
1663
1664 if (!submit_op2(emit, inst_token( SVGA3DOP_CRS ), tmp, src0, src1))
1665 return FALSE;
1666
1667 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), dst, src( tmp )))
1668 return FALSE;
1669 }
1670 else {
1671 if (!submit_op2(emit, inst_token( SVGA3DOP_CRS ), dst, src0, src1))
1672 return FALSE;
1673 }
1674
1675 /* Need to emit 1.0 to dst.w?
1676 */
1677 if (dst.mask & TGSI_WRITEMASK_W) {
1678 struct src_register zero = get_zero_immediate( emit );
1679
1680 if (!submit_op1(emit,
1681 inst_token( SVGA3DOP_MOV ),
1682 writemask(dst, TGSI_WRITEMASK_W),
1683 zero))
1684 return FALSE;
1685 }
1686
1687 return TRUE;
1688 }
1689
1690
1691 static boolean emit_lrp(struct svga_shader_emitter *emit,
1692 const struct tgsi_full_instruction *insn)
1693 {
1694 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1695 SVGA3dShaderDestToken tmp;
1696 const struct src_register src0 = translate_src_register(
1697 emit, &insn->Src[0] );
1698 const struct src_register src1 = translate_src_register(
1699 emit, &insn->Src[1] );
1700 const struct src_register src2 = translate_src_register(
1701 emit, &insn->Src[2] );
1702 boolean need_dst_tmp = FALSE;
1703
1704 /* The dst reg must not be the same as src0 or src2 */
1705 if (alias_src_dst(src0, dst) ||
1706 alias_src_dst(src2, dst))
1707 need_dst_tmp = TRUE;
1708
1709 if (need_dst_tmp) {
1710 tmp = get_temp( emit );
1711 tmp.mask = dst.mask;
1712 }
1713 else {
1714 tmp = dst;
1715 }
1716
1717 if (!submit_op3(emit, inst_token( SVGA3DOP_LRP ), tmp, src0, src1, src2))
1718 return FALSE;
1719
1720 if (need_dst_tmp) {
1721 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), dst, src( tmp )))
1722 return FALSE;
1723 }
1724
1725 return TRUE;
1726 }
1727
1728
1729 static boolean emit_dst_insn(struct svga_shader_emitter *emit,
1730 const struct tgsi_full_instruction *insn )
1731 {
1732 if (emit->unit == PIPE_SHADER_VERTEX) {
1733 /* SVGA/DX9 has a DST instruction, but only for vertex shaders:
1734 */
1735 return emit_simple_instruction(emit, SVGA3DOP_DST, insn);
1736 }
1737 else {
1738
1739 /* result[0] = 1 * 1;
1740 * result[1] = a[1] * b[1];
1741 * result[2] = a[2] * 1;
1742 * result[3] = 1 * b[3];
1743 */
1744
1745 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1746 SVGA3dShaderDestToken tmp;
1747 const struct src_register src0 = translate_src_register(
1748 emit, &insn->Src[0] );
1749 const struct src_register src1 = translate_src_register(
1750 emit, &insn->Src[1] );
1751 struct src_register zero = get_zero_immediate( emit );
1752 boolean need_tmp = FALSE;
1753
1754 if (SVGA3dShaderGetRegType(dst.value) != SVGA3DREG_TEMP ||
1755 alias_src_dst(src0, dst) ||
1756 alias_src_dst(src1, dst))
1757 need_tmp = TRUE;
1758
1759 if (need_tmp) {
1760 tmp = get_temp( emit );
1761 }
1762 else {
1763 tmp = dst;
1764 }
1765
1766 /* tmp.xw = 1.0
1767 */
1768 if (tmp.mask & TGSI_WRITEMASK_XW) {
1769 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1770 writemask(tmp, TGSI_WRITEMASK_XW ),
1771 scalar( zero, 3 )))
1772 return FALSE;
1773 }
1774
1775 /* tmp.yz = src0
1776 */
1777 if (tmp.mask & TGSI_WRITEMASK_YZ) {
1778 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1779 writemask(tmp, TGSI_WRITEMASK_YZ ),
1780 src0))
1781 return FALSE;
1782 }
1783
1784 /* tmp.yw = tmp * src1
1785 */
1786 if (tmp.mask & TGSI_WRITEMASK_YW) {
1787 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
1788 writemask(tmp, TGSI_WRITEMASK_YW ),
1789 src(tmp),
1790 src1))
1791 return FALSE;
1792 }
1793
1794 /* dst = tmp
1795 */
1796 if (need_tmp) {
1797 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1798 dst,
1799 src(tmp)))
1800 return FALSE;
1801 }
1802 }
1803
1804 return TRUE;
1805 }
1806
1807
1808 static boolean emit_exp(struct svga_shader_emitter *emit,
1809 const struct tgsi_full_instruction *insn)
1810 {
1811 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1812 struct src_register src0 =
1813 translate_src_register( emit, &insn->Src[0] );
1814 struct src_register zero = get_zero_immediate( emit );
1815 SVGA3dShaderDestToken fraction;
1816
1817 if (dst.mask & TGSI_WRITEMASK_Y)
1818 fraction = dst;
1819 else if (dst.mask & TGSI_WRITEMASK_X)
1820 fraction = get_temp( emit );
1821 else
1822 fraction.value = 0;
1823
1824 /* If y is being written, fill it with src0 - floor(src0).
1825 */
1826 if (dst.mask & TGSI_WRITEMASK_XY) {
1827 if (!submit_op1( emit, inst_token( SVGA3DOP_FRC ),
1828 writemask( fraction, TGSI_WRITEMASK_Y ),
1829 src0 ))
1830 return FALSE;
1831 }
1832
1833 /* If x is being written, fill it with 2 ^ floor(src0).
1834 */
1835 if (dst.mask & TGSI_WRITEMASK_X) {
1836 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ),
1837 writemask( dst, TGSI_WRITEMASK_X ),
1838 src0,
1839 scalar( negate( src( fraction ) ), TGSI_SWIZZLE_Y ) ) )
1840 return FALSE;
1841
1842 if (!submit_op1( emit, inst_token( SVGA3DOP_EXP ),
1843 writemask( dst, TGSI_WRITEMASK_X ),
1844 scalar( src( dst ), TGSI_SWIZZLE_X ) ) )
1845 return FALSE;
1846
1847 if (!(dst.mask & TGSI_WRITEMASK_Y))
1848 release_temp( emit, fraction );
1849 }
1850
1851 /* If z is being written, fill it with 2 ^ src0 (partial precision).
1852 */
1853 if (dst.mask & TGSI_WRITEMASK_Z) {
1854 if (!submit_op1( emit, inst_token( SVGA3DOP_EXPP ),
1855 writemask( dst, TGSI_WRITEMASK_Z ),
1856 src0 ) )
1857 return FALSE;
1858 }
1859
1860 /* If w is being written, fill it with one.
1861 */
1862 if (dst.mask & TGSI_WRITEMASK_W) {
1863 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1864 writemask(dst, TGSI_WRITEMASK_W),
1865 scalar( zero, TGSI_SWIZZLE_W ) ))
1866 return FALSE;
1867 }
1868
1869 return TRUE;
1870 }
1871
1872 static boolean emit_lit(struct svga_shader_emitter *emit,
1873 const struct tgsi_full_instruction *insn )
1874 {
1875 if (emit->unit == PIPE_SHADER_VERTEX) {
1876 /* SVGA/DX9 has a LIT instruction, but only for vertex shaders:
1877 */
1878 return emit_simple_instruction(emit, SVGA3DOP_LIT, insn);
1879 }
1880 else {
1881
1882 /* D3D vs. GL semantics can be fairly easily accomodated by
1883 * variations on this sequence.
1884 *
1885 * GL:
1886 * tmp.y = src.x
1887 * tmp.z = pow(src.y,src.w)
1888 * p0 = src0.xxxx > 0
1889 * result = zero.wxxw
1890 * (p0) result.yz = tmp
1891 *
1892 * D3D:
1893 * tmp.y = src.x
1894 * tmp.z = pow(src.y,src.w)
1895 * p0 = src0.xxyy > 0
1896 * result = zero.wxxw
1897 * (p0) result.yz = tmp
1898 *
1899 * Will implement the GL version for now.
1900 */
1901
1902 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1903 SVGA3dShaderDestToken tmp = get_temp( emit );
1904 const struct src_register src0 = translate_src_register(
1905 emit, &insn->Src[0] );
1906 struct src_register zero = get_zero_immediate( emit );
1907
1908 /* tmp = pow(src.y, src.w)
1909 */
1910 if (dst.mask & TGSI_WRITEMASK_Z) {
1911 if (!submit_op2(emit, inst_token( SVGA3DOP_POW ),
1912 tmp,
1913 scalar(src0, 1),
1914 scalar(src0, 3)))
1915 return FALSE;
1916 }
1917
1918 /* tmp.y = src.x
1919 */
1920 if (dst.mask & TGSI_WRITEMASK_Y) {
1921 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1922 writemask(tmp, TGSI_WRITEMASK_Y ),
1923 scalar(src0, 0)))
1924 return FALSE;
1925 }
1926
1927 /* Can't quite do this with emit conditional due to the extra
1928 * writemask on the predicated mov:
1929 */
1930 {
1931 SVGA3dShaderDestToken pred_reg = dst_register( SVGA3DREG_PREDICATE, 0 );
1932 SVGA3dShaderInstToken setp_token, mov_token;
1933 struct src_register predsrc;
1934
1935 setp_token = inst_token( SVGA3DOP_SETP );
1936 mov_token = inst_token( SVGA3DOP_MOV );
1937
1938 setp_token.control = SVGA3DOPCOMP_GT;
1939
1940 /* D3D vs GL semantics:
1941 */
1942 if (0)
1943 predsrc = swizzle(src0, 0, 0, 1, 1); /* D3D */
1944 else
1945 predsrc = swizzle(src0, 0, 0, 0, 0); /* GL */
1946
1947 /* SETP src0.xxyy, GT, {0}.x */
1948 if (!submit_op2( emit, setp_token, pred_reg,
1949 predsrc,
1950 swizzle(zero, 0, 0, 0, 0) ))
1951 return FALSE;
1952
1953 /* MOV dst, fail */
1954 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst,
1955 swizzle(zero, 3, 0, 0, 3 )))
1956 return FALSE;
1957
1958 /* MOV dst.yz, tmp (predicated)
1959 *
1960 * Note that the predicate reg (and possible modifiers) is passed
1961 * as the first source argument.
1962 */
1963 if (dst.mask & TGSI_WRITEMASK_YZ) {
1964 mov_token.predicated = 1;
1965 if (!submit_op2( emit, mov_token,
1966 writemask(dst, TGSI_WRITEMASK_YZ),
1967 src( pred_reg ), src( tmp ) ))
1968 return FALSE;
1969 }
1970 }
1971 }
1972
1973 return TRUE;
1974 }
1975
1976
1977
1978
1979 static boolean emit_ex2( struct svga_shader_emitter *emit,
1980 const struct tgsi_full_instruction *insn )
1981 {
1982 SVGA3dShaderInstToken inst;
1983 SVGA3dShaderDestToken dst;
1984 struct src_register src0;
1985
1986 inst = inst_token( SVGA3DOP_EXP );
1987 dst = translate_dst_register( emit, insn, 0 );
1988 src0 = translate_src_register( emit, &insn->Src[0] );
1989 src0 = scalar( src0, TGSI_SWIZZLE_X );
1990
1991 if (dst.mask != TGSI_WRITEMASK_XYZW) {
1992 SVGA3dShaderDestToken tmp = get_temp( emit );
1993
1994 if (!submit_op1( emit, inst, tmp, src0 ))
1995 return FALSE;
1996
1997 return submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1998 dst,
1999 scalar( src( tmp ), TGSI_SWIZZLE_X ) );
2000 }
2001
2002 return submit_op1( emit, inst, dst, src0 );
2003 }
2004
2005
2006 static boolean emit_log(struct svga_shader_emitter *emit,
2007 const struct tgsi_full_instruction *insn)
2008 {
2009 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
2010 struct src_register src0 =
2011 translate_src_register( emit, &insn->Src[0] );
2012 struct src_register zero = get_zero_immediate( emit );
2013 SVGA3dShaderDestToken abs_tmp;
2014 struct src_register abs_src0;
2015 SVGA3dShaderDestToken log2_abs;
2016
2017 abs_tmp.value = 0;
2018
2019 if (dst.mask & TGSI_WRITEMASK_Z)
2020 log2_abs = dst;
2021 else if (dst.mask & TGSI_WRITEMASK_XY)
2022 log2_abs = get_temp( emit );
2023 else
2024 log2_abs.value = 0;
2025
2026 /* If z is being written, fill it with log2( abs( src0 ) ).
2027 */
2028 if (dst.mask & TGSI_WRITEMASK_XYZ) {
2029 if (!src0.base.srcMod || src0.base.srcMod == SVGA3DSRCMOD_ABS)
2030 abs_src0 = src0;
2031 else {
2032 abs_tmp = get_temp( emit );
2033
2034 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2035 abs_tmp,
2036 src0 ) )
2037 return FALSE;
2038
2039 abs_src0 = src( abs_tmp );
2040 }
2041
2042 abs_src0 = absolute( scalar( abs_src0, TGSI_SWIZZLE_X ) );
2043
2044 if (!submit_op1( emit, inst_token( SVGA3DOP_LOG ),
2045 writemask( log2_abs, TGSI_WRITEMASK_Z ),
2046 abs_src0 ) )
2047 return FALSE;
2048 }
2049
2050 if (dst.mask & TGSI_WRITEMASK_XY) {
2051 SVGA3dShaderDestToken floor_log2;
2052
2053 if (dst.mask & TGSI_WRITEMASK_X)
2054 floor_log2 = dst;
2055 else
2056 floor_log2 = get_temp( emit );
2057
2058 /* If x is being written, fill it with floor( log2( abs( src0 ) ) ).
2059 */
2060 if (!submit_op1( emit, inst_token( SVGA3DOP_FRC ),
2061 writemask( floor_log2, TGSI_WRITEMASK_X ),
2062 scalar( src( log2_abs ), TGSI_SWIZZLE_Z ) ) )
2063 return FALSE;
2064
2065 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ),
2066 writemask( floor_log2, TGSI_WRITEMASK_X ),
2067 scalar( src( log2_abs ), TGSI_SWIZZLE_Z ),
2068 negate( src( floor_log2 ) ) ) )
2069 return FALSE;
2070
2071 /* If y is being written, fill it with
2072 * abs ( src0 ) / ( 2 ^ floor( log2( abs( src0 ) ) ) ).
2073 */
2074 if (dst.mask & TGSI_WRITEMASK_Y) {
2075 if (!submit_op1( emit, inst_token( SVGA3DOP_EXP ),
2076 writemask( dst, TGSI_WRITEMASK_Y ),
2077 negate( scalar( src( floor_log2 ),
2078 TGSI_SWIZZLE_X ) ) ) )
2079 return FALSE;
2080
2081 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
2082 writemask( dst, TGSI_WRITEMASK_Y ),
2083 src( dst ),
2084 abs_src0 ) )
2085 return FALSE;
2086 }
2087
2088 if (!(dst.mask & TGSI_WRITEMASK_X))
2089 release_temp( emit, floor_log2 );
2090
2091 if (!(dst.mask & TGSI_WRITEMASK_Z))
2092 release_temp( emit, log2_abs );
2093 }
2094
2095 if (dst.mask & TGSI_WRITEMASK_XYZ && src0.base.srcMod &&
2096 src0.base.srcMod != SVGA3DSRCMOD_ABS)
2097 release_temp( emit, abs_tmp );
2098
2099 /* If w is being written, fill it with one.
2100 */
2101 if (dst.mask & TGSI_WRITEMASK_W) {
2102 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
2103 writemask(dst, TGSI_WRITEMASK_W),
2104 scalar( zero, TGSI_SWIZZLE_W ) ))
2105 return FALSE;
2106 }
2107
2108 return TRUE;
2109 }
2110
2111
2112 static boolean emit_bgnsub( struct svga_shader_emitter *emit,
2113 unsigned position,
2114 const struct tgsi_full_instruction *insn )
2115 {
2116 unsigned i;
2117
2118 /* Note that we've finished the main function and are now emitting
2119 * subroutines. This affects how we terminate the generated
2120 * shader.
2121 */
2122 emit->in_main_func = FALSE;
2123
2124 for (i = 0; i < emit->nr_labels; i++) {
2125 if (emit->label[i] == position) {
2126 return (emit_instruction( emit, inst_token( SVGA3DOP_RET ) ) &&
2127 emit_instruction( emit, inst_token( SVGA3DOP_LABEL ) ) &&
2128 emit_src( emit, src_register( SVGA3DREG_LABEL, i )));
2129 }
2130 }
2131
2132 assert(0);
2133 return TRUE;
2134 }
2135
2136 static boolean emit_call( struct svga_shader_emitter *emit,
2137 const struct tgsi_full_instruction *insn )
2138 {
2139 unsigned position = insn->Label.Label;
2140 unsigned i;
2141
2142 for (i = 0; i < emit->nr_labels; i++) {
2143 if (emit->label[i] == position)
2144 break;
2145 }
2146
2147 if (emit->nr_labels == Elements(emit->label))
2148 return FALSE;
2149
2150 if (i == emit->nr_labels) {
2151 emit->label[i] = position;
2152 emit->nr_labels++;
2153 }
2154
2155 return (emit_instruction( emit, inst_token( SVGA3DOP_CALL ) ) &&
2156 emit_src( emit, src_register( SVGA3DREG_LABEL, i )));
2157 }
2158
2159
2160 static boolean emit_end( struct svga_shader_emitter *emit )
2161 {
2162 if (emit->unit == PIPE_SHADER_VERTEX) {
2163 return emit_vs_postamble( emit );
2164 }
2165 else {
2166 return emit_ps_postamble( emit );
2167 }
2168 }
2169
2170
2171
2172 static boolean svga_emit_instruction( struct svga_shader_emitter *emit,
2173 unsigned position,
2174 const struct tgsi_full_instruction *insn )
2175 {
2176 switch (insn->Instruction.Opcode) {
2177
2178 case TGSI_OPCODE_ARL:
2179 return emit_arl( emit, insn );
2180
2181 case TGSI_OPCODE_TEX:
2182 case TGSI_OPCODE_TXB:
2183 case TGSI_OPCODE_TXP:
2184 case TGSI_OPCODE_TXL:
2185 case TGSI_OPCODE_TXD:
2186 return emit_tex( emit, insn );
2187
2188 case TGSI_OPCODE_DDX:
2189 case TGSI_OPCODE_DDY:
2190 return emit_deriv( emit, insn );
2191
2192 case TGSI_OPCODE_BGNSUB:
2193 return emit_bgnsub( emit, position, insn );
2194
2195 case TGSI_OPCODE_ENDSUB:
2196 return TRUE;
2197
2198 case TGSI_OPCODE_CAL:
2199 return emit_call( emit, insn );
2200
2201 case TGSI_OPCODE_FLR:
2202 case TGSI_OPCODE_TRUNC: /* should be TRUNC, not FLR */
2203 return emit_floor( emit, insn );
2204
2205 case TGSI_OPCODE_CMP:
2206 return emit_cmp( emit, insn );
2207
2208 case TGSI_OPCODE_DIV:
2209 return emit_div( emit, insn );
2210
2211 case TGSI_OPCODE_DP2:
2212 return emit_dp2( emit, insn );
2213
2214 case TGSI_OPCODE_DPH:
2215 return emit_dph( emit, insn );
2216
2217 case TGSI_OPCODE_NRM:
2218 return emit_nrm( emit, insn );
2219
2220 case TGSI_OPCODE_COS:
2221 return emit_cos( emit, insn );
2222
2223 case TGSI_OPCODE_SIN:
2224 return emit_sin( emit, insn );
2225
2226 case TGSI_OPCODE_SCS:
2227 return emit_sincos( emit, insn );
2228
2229 case TGSI_OPCODE_END:
2230 /* TGSI always finishes the main func with an END */
2231 return emit_end( emit );
2232
2233 case TGSI_OPCODE_KIL:
2234 return emit_kil( emit, insn );
2235
2236 /* Selection opcodes. The underlying language is fairly
2237 * non-orthogonal about these.
2238 */
2239 case TGSI_OPCODE_SEQ:
2240 return emit_select_op( emit, PIPE_FUNC_EQUAL, insn );
2241
2242 case TGSI_OPCODE_SNE:
2243 return emit_select_op( emit, PIPE_FUNC_NOTEQUAL, insn );
2244
2245 case TGSI_OPCODE_SGT:
2246 return emit_select_op( emit, PIPE_FUNC_GREATER, insn );
2247
2248 case TGSI_OPCODE_SGE:
2249 return emit_select_op( emit, PIPE_FUNC_GEQUAL, insn );
2250
2251 case TGSI_OPCODE_SLT:
2252 return emit_select_op( emit, PIPE_FUNC_LESS, insn );
2253
2254 case TGSI_OPCODE_SLE:
2255 return emit_select_op( emit, PIPE_FUNC_LEQUAL, insn );
2256
2257 case TGSI_OPCODE_SUB:
2258 return emit_sub( emit, insn );
2259
2260 case TGSI_OPCODE_POW:
2261 return emit_pow( emit, insn );
2262
2263 case TGSI_OPCODE_EX2:
2264 return emit_ex2( emit, insn );
2265
2266 case TGSI_OPCODE_EXP:
2267 return emit_exp( emit, insn );
2268
2269 case TGSI_OPCODE_LOG:
2270 return emit_log( emit, insn );
2271
2272 case TGSI_OPCODE_LG2:
2273 return emit_scalar_op1( emit, SVGA3DOP_LOG, insn );
2274
2275 case TGSI_OPCODE_RSQ:
2276 return emit_scalar_op1( emit, SVGA3DOP_RSQ, insn );
2277
2278 case TGSI_OPCODE_RCP:
2279 return emit_scalar_op1( emit, SVGA3DOP_RCP, insn );
2280
2281 case TGSI_OPCODE_CONT:
2282 case TGSI_OPCODE_RET:
2283 /* This is a noop -- we tell mesa that we can't support RET
2284 * within a function (early return), so this will always be
2285 * followed by an ENDSUB.
2286 */
2287 return TRUE;
2288
2289 /* These aren't actually used by any of the frontends we care
2290 * about:
2291 */
2292 case TGSI_OPCODE_CLAMP:
2293 case TGSI_OPCODE_ROUND:
2294 case TGSI_OPCODE_AND:
2295 case TGSI_OPCODE_OR:
2296 case TGSI_OPCODE_I2F:
2297 case TGSI_OPCODE_NOT:
2298 case TGSI_OPCODE_SHL:
2299 case TGSI_OPCODE_ISHR:
2300 case TGSI_OPCODE_XOR:
2301 return FALSE;
2302
2303 case TGSI_OPCODE_IF:
2304 return emit_if( emit, insn );
2305 case TGSI_OPCODE_ELSE:
2306 return emit_else( emit, insn );
2307 case TGSI_OPCODE_ENDIF:
2308 return emit_endif( emit, insn );
2309
2310 case TGSI_OPCODE_BGNLOOP:
2311 return emit_bgnloop2( emit, insn );
2312 case TGSI_OPCODE_ENDLOOP:
2313 return emit_endloop2( emit, insn );
2314 case TGSI_OPCODE_BRK:
2315 return emit_brk( emit, insn );
2316
2317 case TGSI_OPCODE_XPD:
2318 return emit_xpd( emit, insn );
2319
2320 case TGSI_OPCODE_KILP:
2321 return emit_kilp( emit, insn );
2322
2323 case TGSI_OPCODE_DST:
2324 return emit_dst_insn( emit, insn );
2325
2326 case TGSI_OPCODE_LIT:
2327 return emit_lit( emit, insn );
2328
2329 case TGSI_OPCODE_LRP:
2330 return emit_lrp( emit, insn );
2331
2332 default: {
2333 unsigned opcode = translate_opcode(insn->Instruction.Opcode);
2334
2335 if (opcode == SVGA3DOP_LAST_INST)
2336 return FALSE;
2337
2338 if (!emit_simple_instruction( emit, opcode, insn ))
2339 return FALSE;
2340 }
2341 }
2342
2343 return TRUE;
2344 }
2345
2346
2347 static boolean svga_emit_immediate( struct svga_shader_emitter *emit,
2348 struct tgsi_full_immediate *imm)
2349 {
2350 static const float id[4] = {0,0,0,1};
2351 float value[4];
2352 unsigned i;
2353
2354 assert(1 <= imm->Immediate.NrTokens && imm->Immediate.NrTokens <= 5);
2355 for (i = 0; i < imm->Immediate.NrTokens - 1; i++)
2356 value[i] = imm->u[i].Float;
2357
2358 for ( ; i < 4; i++ )
2359 value[i] = id[i];
2360
2361 return emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT,
2362 emit->imm_start + emit->internal_imm_count++,
2363 value[0], value[1], value[2], value[3]);
2364 }
2365
2366 static boolean make_immediate( struct svga_shader_emitter *emit,
2367 float a,
2368 float b,
2369 float c,
2370 float d,
2371 struct src_register *out )
2372 {
2373 unsigned idx = emit->nr_hw_const++;
2374
2375 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT,
2376 idx, a, b, c, d ))
2377 return FALSE;
2378
2379 *out = src_register( SVGA3DREG_CONST, idx );
2380
2381 return TRUE;
2382 }
2383
2384 static boolean emit_vs_preamble( struct svga_shader_emitter *emit )
2385 {
2386 if (!emit->key.vkey.need_prescale) {
2387 if (!make_immediate( emit, 0, 0, .5, .5,
2388 &emit->imm_0055))
2389 return FALSE;
2390 }
2391
2392 return TRUE;
2393 }
2394
2395 static boolean emit_ps_preamble( struct svga_shader_emitter *emit )
2396 {
2397 unsigned i;
2398
2399 /* For SM20, need to initialize the temporaries we're using to hold
2400 * color outputs to some value. Shaders which don't set all of
2401 * these values are likely to be rejected by the DX9 runtime.
2402 */
2403 if (!emit->use_sm30) {
2404 struct src_register zero = get_zero_immediate( emit );
2405 for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
2406 if (SVGA3dShaderGetRegType(emit->true_col[i].value) != 0) {
2407
2408 if (!submit_op1( emit,
2409 inst_token(SVGA3DOP_MOV),
2410 emit->temp_col[i],
2411 zero ))
2412 return FALSE;
2413 }
2414 }
2415 }
2416
2417 return TRUE;
2418 }
2419
2420 static boolean emit_ps_postamble( struct svga_shader_emitter *emit )
2421 {
2422 unsigned i;
2423
2424 /* PS oDepth is incredibly fragile and it's very hard to catch the
2425 * types of usage that break it during shader emit. Easier just to
2426 * redirect the main program to a temporary and then only touch
2427 * oDepth with a hand-crafted MOV below.
2428 */
2429 if (SVGA3dShaderGetRegType(emit->true_pos.value) != 0) {
2430
2431 if (!submit_op1( emit,
2432 inst_token(SVGA3DOP_MOV),
2433 emit->true_pos,
2434 scalar(src(emit->temp_pos), TGSI_SWIZZLE_Z) ))
2435 return FALSE;
2436 }
2437
2438 /* Similarly for SM20 color outputs... Luckily SM30 isn't so
2439 * fragile.
2440 */
2441 for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
2442 if (SVGA3dShaderGetRegType(emit->true_col[i].value) != 0) {
2443
2444 /* Potentially override output colors with white for XOR
2445 * logicop workaround.
2446 */
2447 if (emit->unit == PIPE_SHADER_FRAGMENT &&
2448 emit->key.fkey.white_fragments) {
2449
2450 struct src_register one = scalar( get_zero_immediate( emit ),
2451 TGSI_SWIZZLE_W );
2452
2453 if (!submit_op1( emit,
2454 inst_token(SVGA3DOP_MOV),
2455 emit->true_col[i],
2456 one ))
2457 return FALSE;
2458 }
2459 else {
2460 if (!submit_op1( emit,
2461 inst_token(SVGA3DOP_MOV),
2462 emit->true_col[i],
2463 src(emit->temp_col[i]) ))
2464 return FALSE;
2465 }
2466 }
2467 }
2468
2469 return TRUE;
2470 }
2471
2472 static boolean emit_vs_postamble( struct svga_shader_emitter *emit )
2473 {
2474 /* PSIZ output is incredibly fragile and it's very hard to catch
2475 * the types of usage that break it during shader emit. Easier
2476 * just to redirect the main program to a temporary and then only
2477 * touch PSIZ with a hand-crafted MOV below.
2478 */
2479 if (SVGA3dShaderGetRegType(emit->true_psiz.value) != 0) {
2480
2481 if (!submit_op1( emit,
2482 inst_token(SVGA3DOP_MOV),
2483 emit->true_psiz,
2484 scalar(src(emit->temp_psiz), TGSI_SWIZZLE_X) ))
2485 return FALSE;
2486 }
2487
2488 /* Need to perform various manipulations on vertex position to cope
2489 * with the different GL and D3D clip spaces.
2490 */
2491 if (emit->key.vkey.need_prescale) {
2492 SVGA3dShaderDestToken temp_pos = emit->temp_pos;
2493 SVGA3dShaderDestToken pos = emit->true_pos;
2494 unsigned offset = emit->info.file_max[TGSI_FILE_CONSTANT] + 1;
2495 struct src_register prescale_scale = src_register( SVGA3DREG_CONST,
2496 offset + 0 );
2497 struct src_register prescale_trans = src_register( SVGA3DREG_CONST,
2498 offset + 1 );
2499
2500 /* MUL temp_pos.xyz, temp_pos, prescale.scale
2501 * MAD result.position, temp_pos.wwww, prescale.trans, temp_pos
2502 * --> Note that prescale.trans.w == 0
2503 */
2504 if (!submit_op2( emit,
2505 inst_token(SVGA3DOP_MUL),
2506 writemask(temp_pos, TGSI_WRITEMASK_XYZ),
2507 src(temp_pos),
2508 prescale_scale ))
2509 return FALSE;
2510
2511 if (!submit_op3( emit,
2512 inst_token(SVGA3DOP_MAD),
2513 pos,
2514 swizzle(src(temp_pos), 3, 3, 3, 3),
2515 prescale_trans,
2516 src(temp_pos)))
2517 return FALSE;
2518 }
2519 else {
2520 SVGA3dShaderDestToken temp_pos = emit->temp_pos;
2521 SVGA3dShaderDestToken pos = emit->true_pos;
2522 struct src_register imm_0055 = emit->imm_0055;
2523
2524 /* Adjust GL clipping coordinate space to hardware (D3D-style):
2525 *
2526 * DP4 temp_pos.z, {0,0,.5,.5}, temp_pos
2527 * MOV result.position, temp_pos
2528 */
2529 if (!submit_op2( emit,
2530 inst_token(SVGA3DOP_DP4),
2531 writemask(temp_pos, TGSI_WRITEMASK_Z),
2532 imm_0055,
2533 src(temp_pos) ))
2534 return FALSE;
2535
2536 if (!submit_op1( emit,
2537 inst_token(SVGA3DOP_MOV),
2538 pos,
2539 src(temp_pos) ))
2540 return FALSE;
2541 }
2542
2543 return TRUE;
2544 }
2545
2546 /*
2547 0: IF VFACE :4
2548 1: COLOR = FrontColor;
2549 2: ELSE
2550 3: COLOR = BackColor;
2551 4: ENDIF
2552 */
2553 static boolean emit_light_twoside( struct svga_shader_emitter *emit )
2554 {
2555 struct src_register vface, zero;
2556 struct src_register front[2];
2557 struct src_register back[2];
2558 SVGA3dShaderDestToken color[2];
2559 int count = emit->internal_color_count;
2560 int i;
2561 SVGA3dShaderInstToken if_token;
2562
2563 if (count == 0)
2564 return TRUE;
2565
2566 vface = get_vface( emit );
2567 zero = get_zero_immediate( emit );
2568
2569 /* Can't use get_temp() to allocate the color reg as such
2570 * temporaries will be reclaimed after each instruction by the call
2571 * to reset_temp_regs().
2572 */
2573 for (i = 0; i < count; i++) {
2574 color[i] = dst_register( SVGA3DREG_TEMP,
2575 emit->nr_hw_temp++ );
2576
2577 front[i] = emit->input_map[emit->internal_color_idx[i]];
2578
2579 /* Back is always the next input:
2580 */
2581 back[i] = front[i];
2582 back[i].base.num = front[i].base.num + 1;
2583
2584 /* Reassign the input_map to the actual front-face color:
2585 */
2586 emit->input_map[emit->internal_color_idx[i]] = src(color[i]);
2587 }
2588
2589 if_token = inst_token( SVGA3DOP_IFC );
2590
2591 if (emit->key.fkey.front_cw)
2592 if_token.control = SVGA3DOPCOMP_GT;
2593 else
2594 if_token.control = SVGA3DOPCOMP_LT;
2595
2596 zero = scalar(zero, TGSI_SWIZZLE_X);
2597
2598 if (!(emit_instruction( emit, if_token ) &&
2599 emit_src( emit, vface ) &&
2600 emit_src( emit, zero ) ))
2601 return FALSE;
2602
2603 for (i = 0; i < count; i++) {
2604 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), color[i], front[i] ))
2605 return FALSE;
2606 }
2607
2608 if (!(emit_instruction( emit, inst_token( SVGA3DOP_ELSE))))
2609 return FALSE;
2610
2611 for (i = 0; i < count; i++) {
2612 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), color[i], back[i] ))
2613 return FALSE;
2614 }
2615
2616 if (!emit_instruction( emit, inst_token( SVGA3DOP_ENDIF ) ))
2617 return FALSE;
2618
2619 return TRUE;
2620 }
2621
2622 /*
2623 0: SETP_GT TEMP, VFACE, 0
2624 where TEMP is a fake frontface register
2625 */
2626 static boolean emit_frontface( struct svga_shader_emitter *emit )
2627 {
2628 struct src_register vface, zero;
2629 SVGA3dShaderDestToken temp;
2630 struct src_register pass, fail;
2631
2632 vface = get_vface( emit );
2633 zero = get_zero_immediate( emit );
2634
2635 /* Can't use get_temp() to allocate the fake frontface reg as such
2636 * temporaries will be reclaimed after each instruction by the call
2637 * to reset_temp_regs().
2638 */
2639 temp = dst_register( SVGA3DREG_TEMP,
2640 emit->nr_hw_temp++ );
2641
2642 if (emit->key.fkey.front_cw) {
2643 pass = scalar( zero, TGSI_SWIZZLE_W );
2644 fail = scalar( zero, TGSI_SWIZZLE_X );
2645 } else {
2646 pass = scalar( zero, TGSI_SWIZZLE_X );
2647 fail = scalar( zero, TGSI_SWIZZLE_W );
2648 }
2649
2650 if (!emit_conditional(emit, PIPE_FUNC_GREATER,
2651 temp, vface, scalar( zero, TGSI_SWIZZLE_X ),
2652 pass, fail))
2653 return FALSE;
2654
2655 /* Reassign the input_map to the actual front-face color:
2656 */
2657 emit->input_map[emit->internal_frontface_idx] = src(temp);
2658
2659 return TRUE;
2660 }
2661
2662 static INLINE boolean
2663 needs_to_create_zero( struct svga_shader_emitter *emit )
2664 {
2665 int i;
2666
2667 if (emit->unit == PIPE_SHADER_FRAGMENT) {
2668 if (!emit->use_sm30)
2669 return TRUE;
2670
2671 if (emit->key.fkey.light_twoside)
2672 return TRUE;
2673
2674 if (emit->key.fkey.white_fragments)
2675 return TRUE;
2676
2677 if (emit->emit_frontface)
2678 return TRUE;
2679
2680 if (emit->info.opcode_count[TGSI_OPCODE_DST] >= 1 ||
2681 emit->info.opcode_count[TGSI_OPCODE_LIT] >= 1)
2682 return TRUE;
2683 }
2684
2685 if (emit->info.opcode_count[TGSI_OPCODE_IF] >= 1 ||
2686 emit->info.opcode_count[TGSI_OPCODE_BGNLOOP] >= 1 ||
2687 emit->info.opcode_count[TGSI_OPCODE_DDX] >= 1 ||
2688 emit->info.opcode_count[TGSI_OPCODE_DDY] >= 1 ||
2689 emit->info.opcode_count[TGSI_OPCODE_SGE] >= 1 ||
2690 emit->info.opcode_count[TGSI_OPCODE_SGT] >= 1 ||
2691 emit->info.opcode_count[TGSI_OPCODE_SLE] >= 1 ||
2692 emit->info.opcode_count[TGSI_OPCODE_SLT] >= 1 ||
2693 emit->info.opcode_count[TGSI_OPCODE_SNE] >= 1 ||
2694 emit->info.opcode_count[TGSI_OPCODE_SEQ] >= 1 ||
2695 emit->info.opcode_count[TGSI_OPCODE_EXP] >= 1 ||
2696 emit->info.opcode_count[TGSI_OPCODE_LOG] >= 1 ||
2697 emit->info.opcode_count[TGSI_OPCODE_XPD] >= 1 ||
2698 emit->info.opcode_count[TGSI_OPCODE_KILP] >= 1)
2699 return TRUE;
2700
2701 for (i = 0; i < emit->key.fkey.num_textures; i++) {
2702 if (emit->key.fkey.tex[i].compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE)
2703 return TRUE;
2704 }
2705
2706 return FALSE;
2707 }
2708
2709 static INLINE boolean
2710 needs_to_create_loop_const( struct svga_shader_emitter *emit )
2711 {
2712 return (emit->info.opcode_count[TGSI_OPCODE_BGNLOOP] >= 1);
2713 }
2714
2715 static INLINE boolean
2716 needs_to_create_sincos_consts( struct svga_shader_emitter *emit )
2717 {
2718 return !emit->use_sm30 && (emit->info.opcode_count[TGSI_OPCODE_SIN] >= 1 ||
2719 emit->info.opcode_count[TGSI_OPCODE_COS] >= 1 ||
2720 emit->info.opcode_count[TGSI_OPCODE_SCS] >= 1);
2721 }
2722
2723 static INLINE boolean
2724 needs_to_create_arl_consts( struct svga_shader_emitter *emit )
2725 {
2726 return (emit->num_arl_consts > 0);
2727 }
2728
2729 static INLINE boolean
2730 pre_parse_add_indirect( struct svga_shader_emitter *emit,
2731 int num, int current_arl)
2732 {
2733 int i;
2734 assert(num < 0);
2735
2736 for (i = 0; i < emit->num_arl_consts; ++i) {
2737 if (emit->arl_consts[i].arl_num == current_arl)
2738 break;
2739 }
2740 /* new entry */
2741 if (emit->num_arl_consts == i) {
2742 ++emit->num_arl_consts;
2743 }
2744 emit->arl_consts[i].number = (emit->arl_consts[i].number > num) ?
2745 num :
2746 emit->arl_consts[i].number;
2747 emit->arl_consts[i].arl_num = current_arl;
2748 return TRUE;
2749 }
2750
2751 static boolean
2752 pre_parse_instruction( struct svga_shader_emitter *emit,
2753 const struct tgsi_full_instruction *insn,
2754 int current_arl)
2755 {
2756 if (insn->Src[0].Register.Indirect &&
2757 insn->Src[0].Indirect.File == TGSI_FILE_ADDRESS) {
2758 const struct tgsi_full_src_register *reg = &insn->Src[0];
2759 if (reg->Register.Index < 0) {
2760 pre_parse_add_indirect(emit, reg->Register.Index, current_arl);
2761 }
2762 }
2763
2764 if (insn->Src[1].Register.Indirect &&
2765 insn->Src[1].Indirect.File == TGSI_FILE_ADDRESS) {
2766 const struct tgsi_full_src_register *reg = &insn->Src[1];
2767 if (reg->Register.Index < 0) {
2768 pre_parse_add_indirect(emit, reg->Register.Index, current_arl);
2769 }
2770 }
2771
2772 if (insn->Src[2].Register.Indirect &&
2773 insn->Src[2].Indirect.File == TGSI_FILE_ADDRESS) {
2774 const struct tgsi_full_src_register *reg = &insn->Src[2];
2775 if (reg->Register.Index < 0) {
2776 pre_parse_add_indirect(emit, reg->Register.Index, current_arl);
2777 }
2778 }
2779
2780 return TRUE;
2781 }
2782
2783 static boolean
2784 pre_parse_tokens( struct svga_shader_emitter *emit,
2785 const struct tgsi_token *tokens )
2786 {
2787 struct tgsi_parse_context parse;
2788 int current_arl = 0;
2789
2790 tgsi_parse_init( &parse, tokens );
2791
2792 while (!tgsi_parse_end_of_tokens( &parse )) {
2793 tgsi_parse_token( &parse );
2794 switch (parse.FullToken.Token.Type) {
2795 case TGSI_TOKEN_TYPE_IMMEDIATE:
2796 case TGSI_TOKEN_TYPE_DECLARATION:
2797 break;
2798 case TGSI_TOKEN_TYPE_INSTRUCTION:
2799 if (parse.FullToken.FullInstruction.Instruction.Opcode ==
2800 TGSI_OPCODE_ARL) {
2801 ++current_arl;
2802 }
2803 if (!pre_parse_instruction( emit, &parse.FullToken.FullInstruction,
2804 current_arl ))
2805 return FALSE;
2806 break;
2807 default:
2808 break;
2809 }
2810
2811 }
2812 return TRUE;
2813 }
2814
2815 static boolean svga_shader_emit_helpers( struct svga_shader_emitter *emit )
2816
2817 {
2818 if (needs_to_create_zero( emit )) {
2819 create_zero_immediate( emit );
2820 }
2821 if (needs_to_create_loop_const( emit )) {
2822 create_loop_const( emit );
2823 }
2824 if (needs_to_create_sincos_consts( emit )) {
2825 create_sincos_consts( emit );
2826 }
2827 if (needs_to_create_arl_consts( emit )) {
2828 create_arl_consts( emit );
2829 }
2830
2831 if (emit->unit == PIPE_SHADER_FRAGMENT) {
2832 if (!emit_ps_preamble( emit ))
2833 return FALSE;
2834
2835 if (emit->key.fkey.light_twoside) {
2836 if (!emit_light_twoside( emit ))
2837 return FALSE;
2838 }
2839 if (emit->emit_frontface) {
2840 if (!emit_frontface( emit ))
2841 return FALSE;
2842 }
2843 }
2844
2845 return TRUE;
2846 }
2847
2848 boolean svga_shader_emit_instructions( struct svga_shader_emitter *emit,
2849 const struct tgsi_token *tokens )
2850 {
2851 struct tgsi_parse_context parse;
2852 boolean ret = TRUE;
2853 boolean helpers_emitted = FALSE;
2854 unsigned line_nr = 0;
2855
2856 tgsi_parse_init( &parse, tokens );
2857 emit->internal_imm_count = 0;
2858
2859 if (emit->unit == PIPE_SHADER_VERTEX) {
2860 ret = emit_vs_preamble( emit );
2861 if (!ret)
2862 goto done;
2863 }
2864
2865 pre_parse_tokens(emit, tokens);
2866
2867 while (!tgsi_parse_end_of_tokens( &parse )) {
2868 tgsi_parse_token( &parse );
2869
2870 switch (parse.FullToken.Token.Type) {
2871 case TGSI_TOKEN_TYPE_IMMEDIATE:
2872 ret = svga_emit_immediate( emit, &parse.FullToken.FullImmediate );
2873 if (!ret)
2874 goto done;
2875 break;
2876
2877 case TGSI_TOKEN_TYPE_DECLARATION:
2878 if (emit->use_sm30)
2879 ret = svga_translate_decl_sm30( emit, &parse.FullToken.FullDeclaration );
2880 else
2881 ret = svga_translate_decl_sm20( emit, &parse.FullToken.FullDeclaration );
2882 if (!ret)
2883 goto done;
2884 break;
2885
2886 case TGSI_TOKEN_TYPE_INSTRUCTION:
2887 if (!helpers_emitted) {
2888 if (!svga_shader_emit_helpers( emit ))
2889 goto done;
2890 helpers_emitted = TRUE;
2891 }
2892 ret = svga_emit_instruction( emit,
2893 line_nr++,
2894 &parse.FullToken.FullInstruction );
2895 if (!ret)
2896 goto done;
2897 break;
2898 default:
2899 break;
2900 }
2901
2902 reset_temp_regs( emit );
2903 }
2904
2905 /* Need to terminate the current subroutine. Note that the
2906 * hardware doesn't tolerate shaders without sub-routines
2907 * terminating with RET+END.
2908 */
2909 if (!emit->in_main_func) {
2910 ret = emit_instruction( emit, inst_token( SVGA3DOP_RET ) );
2911 if (!ret)
2912 goto done;
2913 }
2914
2915 assert(emit->dynamic_branching_level == 0);
2916
2917 /* Need to terminate the whole shader:
2918 */
2919 ret = emit_instruction( emit, inst_token( SVGA3DOP_END ) );
2920 if (!ret)
2921 goto done;
2922
2923 done:
2924 assert(ret);
2925 tgsi_parse_free( &parse );
2926 return ret;
2927 }
2928