Merge branch 'mesa_7_6_branch' into mesa_7_7_branch
[mesa.git] / src / gallium / drivers / svga / svga_tgsi_insn.c
1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26
27 #include "pipe/p_shader_tokens.h"
28 #include "tgsi/tgsi_parse.h"
29 #include "util/u_memory.h"
30
31 #include "svga_tgsi_emit.h"
32 #include "svga_context.h"
33
34
35 static boolean emit_vs_postamble( struct svga_shader_emitter *emit );
36 static boolean emit_ps_postamble( struct svga_shader_emitter *emit );
37
38
39
40
41 static unsigned
42 translate_opcode(
43 uint opcode )
44 {
45 switch (opcode) {
46 case TGSI_OPCODE_ABS: return SVGA3DOP_ABS;
47 case TGSI_OPCODE_ADD: return SVGA3DOP_ADD;
48 case TGSI_OPCODE_BREAKC: return SVGA3DOP_BREAKC;
49 case TGSI_OPCODE_DDX: return SVGA3DOP_DSX;
50 case TGSI_OPCODE_DDY: return SVGA3DOP_DSY;
51 case TGSI_OPCODE_DP2A: return SVGA3DOP_DP2ADD;
52 case TGSI_OPCODE_DP3: return SVGA3DOP_DP3;
53 case TGSI_OPCODE_DP4: return SVGA3DOP_DP4;
54 case TGSI_OPCODE_ENDFOR: return SVGA3DOP_ENDLOOP;
55 case TGSI_OPCODE_FRC: return SVGA3DOP_FRC;
56 case TGSI_OPCODE_BGNFOR: return SVGA3DOP_LOOP;
57 case TGSI_OPCODE_MAD: return SVGA3DOP_MAD;
58 case TGSI_OPCODE_MAX: return SVGA3DOP_MAX;
59 case TGSI_OPCODE_MIN: return SVGA3DOP_MIN;
60 case TGSI_OPCODE_MOV: return SVGA3DOP_MOV;
61 case TGSI_OPCODE_MUL: return SVGA3DOP_MUL;
62 case TGSI_OPCODE_NOP: return SVGA3DOP_NOP;
63 case TGSI_OPCODE_NRM4: return SVGA3DOP_NRM;
64 case TGSI_OPCODE_SSG: return SVGA3DOP_SGN;
65 default:
66 debug_printf("Unkown opcode %u\n", opcode);
67 assert( 0 );
68 return SVGA3DOP_LAST_INST;
69 }
70 }
71
72
73 static unsigned translate_file( unsigned file )
74 {
75 switch (file) {
76 case TGSI_FILE_TEMPORARY: return SVGA3DREG_TEMP;
77 case TGSI_FILE_INPUT: return SVGA3DREG_INPUT;
78 case TGSI_FILE_OUTPUT: return SVGA3DREG_OUTPUT; /* VS3.0+ only */
79 case TGSI_FILE_IMMEDIATE: return SVGA3DREG_CONST;
80 case TGSI_FILE_CONSTANT: return SVGA3DREG_CONST;
81 case TGSI_FILE_SAMPLER: return SVGA3DREG_SAMPLER;
82 case TGSI_FILE_ADDRESS: return SVGA3DREG_ADDR;
83 default:
84 assert( 0 );
85 return SVGA3DREG_TEMP;
86 }
87 }
88
89
90
91
92
93
94 static SVGA3dShaderDestToken
95 translate_dst_register( struct svga_shader_emitter *emit,
96 const struct tgsi_full_instruction *insn,
97 unsigned idx )
98 {
99 const struct tgsi_full_dst_register *reg = &insn->FullDstRegisters[idx];
100 SVGA3dShaderDestToken dest;
101
102 switch (reg->DstRegister.File) {
103 case TGSI_FILE_OUTPUT:
104 /* Output registers encode semantic information in their name.
105 * Need to lookup a table built at decl time:
106 */
107 dest = emit->output_map[reg->DstRegister.Index];
108 break;
109
110 default:
111 dest = dst_register( translate_file( reg->DstRegister.File ),
112 reg->DstRegister.Index );
113 break;
114 }
115
116 dest.mask = reg->DstRegister.WriteMask;
117
118 if (insn->Instruction.Saturate)
119 dest.dstMod = SVGA3DDSTMOD_SATURATE;
120
121 return dest;
122 }
123
124
125 static struct src_register
126 swizzle( struct src_register src,
127 int x,
128 int y,
129 int z,
130 int w )
131 {
132 x = (src.base.swizzle >> (x * 2)) & 0x3;
133 y = (src.base.swizzle >> (y * 2)) & 0x3;
134 z = (src.base.swizzle >> (z * 2)) & 0x3;
135 w = (src.base.swizzle >> (w * 2)) & 0x3;
136
137 src.base.swizzle = TRANSLATE_SWIZZLE(x,y,z,w);
138
139 return src;
140 }
141
142 static struct src_register
143 scalar( struct src_register src,
144 int comp )
145 {
146 return swizzle( src, comp, comp, comp, comp );
147 }
148
149 static INLINE boolean
150 svga_arl_needs_adjustment( const struct svga_shader_emitter *emit )
151 {
152 int i;
153
154 for (i = 0; i < emit->num_arl_consts; ++i) {
155 if (emit->arl_consts[i].arl_num == emit->current_arl)
156 return TRUE;
157 }
158 return FALSE;
159 }
160
161 static INLINE int
162 svga_arl_adjustment( const struct svga_shader_emitter *emit )
163 {
164 int i;
165
166 for (i = 0; i < emit->num_arl_consts; ++i) {
167 if (emit->arl_consts[i].arl_num == emit->current_arl)
168 return emit->arl_consts[i].number;
169 }
170 return 0;
171 }
172
173 static struct src_register
174 translate_src_register( const struct svga_shader_emitter *emit,
175 const struct tgsi_full_src_register *reg )
176 {
177 struct src_register src;
178
179 switch (reg->SrcRegister.File) {
180 case TGSI_FILE_INPUT:
181 /* Input registers are referred to by their semantic name rather
182 * than by index. Use the mapping build up from the decls:
183 */
184 src = emit->input_map[reg->SrcRegister.Index];
185 break;
186
187 case TGSI_FILE_IMMEDIATE:
188 /* Immediates are appended after TGSI constants in the D3D
189 * constant buffer.
190 */
191 src = src_register( translate_file( reg->SrcRegister.File ),
192 reg->SrcRegister.Index +
193 emit->imm_start );
194 break;
195
196 default:
197 src = src_register( translate_file( reg->SrcRegister.File ),
198 reg->SrcRegister.Index );
199
200 break;
201 }
202
203 /* Indirect addressing (for coninstant buffer lookups only)
204 */
205 if (reg->SrcRegister.Indirect)
206 {
207 /* we shift the offset towards the minimum */
208 if (svga_arl_needs_adjustment( emit )) {
209 src.base.num -= svga_arl_adjustment( emit );
210 }
211 src.base.relAddr = 1;
212
213 /* Not really sure what should go in the second token:
214 */
215 src.indirect = src_token( SVGA3DREG_ADDR,
216 reg->SrcRegisterInd.Index );
217
218 src.indirect.swizzle = SWIZZLE_XXXX;
219 }
220
221 src = swizzle( src,
222 reg->SrcRegister.SwizzleX,
223 reg->SrcRegister.SwizzleY,
224 reg->SrcRegister.SwizzleZ,
225 reg->SrcRegister.SwizzleW );
226
227 /* src.mod isn't a bitfield, unfortunately:
228 * See tgsi_util_get_full_src_register_sign_mode for implementation details.
229 */
230 if (reg->SrcRegisterExtMod.Absolute) {
231 if (reg->SrcRegisterExtMod.Negate)
232 src.base.srcMod = SVGA3DSRCMOD_ABSNEG;
233 else
234 src.base.srcMod = SVGA3DSRCMOD_ABS;
235 }
236 else {
237 if (reg->SrcRegister.Negate != reg->SrcRegisterExtMod.Negate)
238 src.base.srcMod = SVGA3DSRCMOD_NEG;
239 else
240 src.base.srcMod = SVGA3DSRCMOD_NONE;
241 }
242
243 return src;
244 }
245
246
247 /*
248 * Get a temporary register, return -1 if none available
249 */
250 static INLINE SVGA3dShaderDestToken
251 get_temp( struct svga_shader_emitter *emit )
252 {
253 int i = emit->nr_hw_temp + emit->internal_temp_count++;
254
255 return dst_register( SVGA3DREG_TEMP, i );
256 }
257
258 /* Release a single temp. Currently only effective if it was the last
259 * allocated temp, otherwise release will be delayed until the next
260 * call to reset_temp_regs().
261 */
262 static INLINE void
263 release_temp( struct svga_shader_emitter *emit,
264 SVGA3dShaderDestToken temp )
265 {
266 if (temp.num == emit->internal_temp_count - 1)
267 emit->internal_temp_count--;
268 }
269
270 static void reset_temp_regs( struct svga_shader_emitter *emit )
271 {
272 emit->internal_temp_count = 0;
273 }
274
275
276 static boolean submit_op0( struct svga_shader_emitter *emit,
277 SVGA3dShaderInstToken inst,
278 SVGA3dShaderDestToken dest )
279 {
280 return (emit_instruction( emit, inst ) &&
281 emit_dst( emit, dest ));
282 }
283
284 static boolean submit_op1( struct svga_shader_emitter *emit,
285 SVGA3dShaderInstToken inst,
286 SVGA3dShaderDestToken dest,
287 struct src_register src0 )
288 {
289 return emit_op1( emit, inst, dest, src0 );
290 }
291
292
293 /* SVGA shaders may not refer to >1 constant register in a single
294 * instruction. This function checks for that usage and inserts a
295 * move to temporary if detected.
296 *
297 * The same applies to input registers -- at most a single input
298 * register may be read by any instruction.
299 */
300 static boolean submit_op2( struct svga_shader_emitter *emit,
301 SVGA3dShaderInstToken inst,
302 SVGA3dShaderDestToken dest,
303 struct src_register src0,
304 struct src_register src1 )
305 {
306 SVGA3dShaderDestToken temp;
307 SVGA3dShaderRegType type0, type1;
308 boolean need_temp = FALSE;
309
310 temp.value = 0;
311 type0 = SVGA3dShaderGetRegType( src0.base.value );
312 type1 = SVGA3dShaderGetRegType( src1.base.value );
313
314 if (type0 == SVGA3DREG_CONST &&
315 type1 == SVGA3DREG_CONST &&
316 src0.base.num != src1.base.num)
317 need_temp = TRUE;
318
319 if (type0 == SVGA3DREG_INPUT &&
320 type1 == SVGA3DREG_INPUT &&
321 src0.base.num != src1.base.num)
322 need_temp = TRUE;
323
324 if (need_temp)
325 {
326 temp = get_temp( emit );
327
328 if (!emit_op1( emit, inst_token( SVGA3DOP_MOV ), temp, src0 ))
329 return FALSE;
330
331 src0 = src( temp );
332 }
333
334 if (!emit_op2( emit, inst, dest, src0, src1 ))
335 return FALSE;
336
337 if (need_temp)
338 release_temp( emit, temp );
339
340 return TRUE;
341 }
342
343
344 /* SVGA shaders may not refer to >1 constant register in a single
345 * instruction. This function checks for that usage and inserts a
346 * move to temporary if detected.
347 */
348 static boolean submit_op3( struct svga_shader_emitter *emit,
349 SVGA3dShaderInstToken inst,
350 SVGA3dShaderDestToken dest,
351 struct src_register src0,
352 struct src_register src1,
353 struct src_register src2 )
354 {
355 SVGA3dShaderDestToken temp0;
356 SVGA3dShaderDestToken temp1;
357 boolean need_temp0 = FALSE;
358 boolean need_temp1 = FALSE;
359 SVGA3dShaderRegType type0, type1, type2;
360
361 temp0.value = 0;
362 temp1.value = 0;
363 type0 = SVGA3dShaderGetRegType( src0.base.value );
364 type1 = SVGA3dShaderGetRegType( src1.base.value );
365 type2 = SVGA3dShaderGetRegType( src2.base.value );
366
367 if (inst.op != SVGA3DOP_SINCOS) {
368 if (type0 == SVGA3DREG_CONST &&
369 ((type1 == SVGA3DREG_CONST && src0.base.num != src1.base.num) ||
370 (type2 == SVGA3DREG_CONST && src0.base.num != src2.base.num)))
371 need_temp0 = TRUE;
372
373 if (type1 == SVGA3DREG_CONST &&
374 (type2 == SVGA3DREG_CONST && src1.base.num != src2.base.num))
375 need_temp1 = TRUE;
376 }
377
378 if (type0 == SVGA3DREG_INPUT &&
379 ((type1 == SVGA3DREG_INPUT && src0.base.num != src1.base.num) ||
380 (type2 == SVGA3DREG_INPUT && src0.base.num != src2.base.num)))
381 need_temp0 = TRUE;
382
383 if (type1 == SVGA3DREG_INPUT &&
384 (type2 == SVGA3DREG_INPUT && src1.base.num != src2.base.num))
385 need_temp1 = TRUE;
386
387 if (need_temp0)
388 {
389 temp0 = get_temp( emit );
390
391 if (!emit_op1( emit, inst_token( SVGA3DOP_MOV ), temp0, src0 ))
392 return FALSE;
393
394 src0 = src( temp0 );
395 }
396
397 if (need_temp1)
398 {
399 temp1 = get_temp( emit );
400
401 if (!emit_op1( emit, inst_token( SVGA3DOP_MOV ), temp1, src1 ))
402 return FALSE;
403
404 src1 = src( temp1 );
405 }
406
407 if (!emit_op3( emit, inst, dest, src0, src1, src2 ))
408 return FALSE;
409
410 if (need_temp1)
411 release_temp( emit, temp1 );
412 if (need_temp0)
413 release_temp( emit, temp0 );
414 return TRUE;
415 }
416
417
418 static boolean emit_def_const( struct svga_shader_emitter *emit,
419 SVGA3dShaderConstType type,
420 unsigned idx,
421 float a,
422 float b,
423 float c,
424 float d )
425 {
426 SVGA3DOpDefArgs def;
427 SVGA3dShaderInstToken opcode;
428
429 switch (type) {
430 case SVGA3D_CONST_TYPE_FLOAT:
431 opcode = inst_token( SVGA3DOP_DEF );
432 def.dst = dst_register( SVGA3DREG_CONST, idx );
433 def.constValues[0] = a;
434 def.constValues[1] = b;
435 def.constValues[2] = c;
436 def.constValues[3] = d;
437 break;
438 case SVGA3D_CONST_TYPE_INT:
439 opcode = inst_token( SVGA3DOP_DEFI );
440 def.dst = dst_register( SVGA3DREG_CONSTINT, idx );
441 def.constIValues[0] = (int)a;
442 def.constIValues[1] = (int)b;
443 def.constIValues[2] = (int)c;
444 def.constIValues[3] = (int)d;
445 break;
446 default:
447 assert(0);
448 break;
449 }
450
451 if (!emit_instruction(emit, opcode) ||
452 !svga_shader_emit_dwords( emit, def.values, Elements(def.values)))
453 return FALSE;
454
455 return TRUE;
456 }
457
458 static INLINE boolean
459 create_zero_immediate( struct svga_shader_emitter *emit )
460 {
461 unsigned idx = emit->nr_hw_const++;
462
463 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT,
464 idx, 0, 0, 0, 1 ))
465 return FALSE;
466
467 emit->zero_immediate_idx = idx;
468 emit->created_zero_immediate = TRUE;
469
470 return TRUE;
471 }
472
473 static INLINE boolean
474 create_loop_const( struct svga_shader_emitter *emit )
475 {
476 unsigned idx = emit->nr_hw_const++;
477
478 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_INT, idx,
479 255, /* iteration count */
480 0, /* initial value */
481 1, /* step size */
482 0 /* not used, must be 0 */))
483 return FALSE;
484
485 emit->loop_const_idx = idx;
486 emit->created_loop_const = TRUE;
487
488 return TRUE;
489 }
490
491 static INLINE boolean
492 create_sincos_consts( struct svga_shader_emitter *emit )
493 {
494 unsigned idx = emit->nr_hw_const++;
495
496 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT, idx,
497 -1.5500992e-006f,
498 -2.1701389e-005f,
499 0.0026041667f,
500 0.00026041668f ))
501 return FALSE;
502
503 emit->sincos_consts_idx = idx;
504 idx = emit->nr_hw_const++;
505
506 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT, idx,
507 -0.020833334f,
508 -0.12500000f,
509 1.0f,
510 0.50000000f ))
511 return FALSE;
512
513 emit->created_sincos_consts = TRUE;
514
515 return TRUE;
516 }
517
518 static INLINE boolean
519 create_arl_consts( struct svga_shader_emitter *emit )
520 {
521 int i;
522
523 for (i = 0; i < emit->num_arl_consts; i += 4) {
524 int j;
525 unsigned idx = emit->nr_hw_const++;
526 float vals[4];
527 for (j = 0; j < 4 && (j + i) < emit->num_arl_consts; ++j) {
528 vals[j] = emit->arl_consts[i + j].number;
529 emit->arl_consts[i + j].idx = idx;
530 switch (j) {
531 case 0:
532 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_X;
533 break;
534 case 1:
535 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_Y;
536 break;
537 case 2:
538 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_Z;
539 break;
540 case 3:
541 emit->arl_consts[i + 0].swizzle = TGSI_SWIZZLE_W;
542 break;
543 }
544 }
545 while (j < 4)
546 vals[j++] = 0;
547
548 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT, idx,
549 vals[0], vals[1],
550 vals[2], vals[3]))
551 return FALSE;
552 }
553
554 return TRUE;
555 }
556
557 static INLINE struct src_register
558 get_vface( struct svga_shader_emitter *emit )
559 {
560 assert(emit->emitted_vface);
561 return src_register(SVGA3DREG_MISCTYPE,
562 SVGA3DMISCREG_FACE);
563 }
564
565 /* returns {0, 0, 0, 1} immediate */
566 static INLINE struct src_register
567 get_zero_immediate( struct svga_shader_emitter *emit )
568 {
569 assert(emit->created_zero_immediate);
570 assert(emit->zero_immediate_idx >= 0);
571 return src_register( SVGA3DREG_CONST,
572 emit->zero_immediate_idx );
573 }
574
575 /* returns the loop const */
576 static INLINE struct src_register
577 get_loop_const( struct svga_shader_emitter *emit )
578 {
579 assert(emit->created_loop_const);
580 assert(emit->loop_const_idx >= 0);
581 return src_register( SVGA3DREG_CONSTINT,
582 emit->loop_const_idx );
583 }
584
585 /* returns a sincos const */
586 static INLINE struct src_register
587 get_sincos_const( struct svga_shader_emitter *emit,
588 unsigned index )
589 {
590 assert(emit->created_sincos_consts);
591 assert(emit->sincos_consts_idx >= 0);
592 assert(index == 0 || index == 1);
593 return src_register( SVGA3DREG_CONST,
594 emit->sincos_consts_idx + index );
595 }
596
597 static INLINE struct src_register
598 get_fake_arl_const( struct svga_shader_emitter *emit )
599 {
600 struct src_register reg;
601 int idx = 0, swizzle = 0, i;
602
603 for (i = 0; i < emit->num_arl_consts; ++ i) {
604 if (emit->arl_consts[i].arl_num == emit->current_arl) {
605 idx = emit->arl_consts[i].idx;
606 swizzle = emit->arl_consts[i].swizzle;
607 }
608 }
609
610 reg = src_register( SVGA3DREG_CONST, idx );
611 return scalar(reg, swizzle);
612 }
613
614 static INLINE struct src_register
615 get_tex_dimensions( struct svga_shader_emitter *emit, int sampler_num )
616 {
617 int idx;
618 struct src_register reg;
619
620 /* the width/height indexes start right after constants */
621 idx = emit->key.fkey.tex[sampler_num].width_height_idx +
622 emit->info.file_max[TGSI_FILE_CONSTANT] + 1;
623
624 reg = src_register( SVGA3DREG_CONST, idx );
625 return reg;
626 }
627
628 static boolean emit_fake_arl(struct svga_shader_emitter *emit,
629 const struct tgsi_full_instruction *insn)
630 {
631 const struct src_register src0 = translate_src_register(
632 emit, &insn->FullSrcRegisters[0] );
633 struct src_register src1 = get_fake_arl_const( emit );
634 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
635 SVGA3dShaderDestToken tmp = get_temp( emit );
636
637 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), tmp, src0))
638 return FALSE;
639
640 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), tmp, src( tmp ),
641 src1))
642 return FALSE;
643
644 /* replicate the original swizzle */
645 src1 = src(tmp);
646 src1.base.swizzle = src0.base.swizzle;
647
648 return submit_op1( emit, inst_token( SVGA3DOP_MOVA ),
649 dst, src1 );
650 }
651
652 static boolean emit_if(struct svga_shader_emitter *emit,
653 const struct tgsi_full_instruction *insn)
654 {
655 const struct src_register src = translate_src_register(
656 emit, &insn->FullSrcRegisters[0] );
657 struct src_register zero = get_zero_immediate( emit );
658 SVGA3dShaderInstToken if_token = inst_token( SVGA3DOP_IFC );
659
660 if_token.control = SVGA3DOPCOMPC_NE;
661 zero = scalar(zero, TGSI_SWIZZLE_X);
662
663 return (emit_instruction( emit, if_token ) &&
664 emit_src( emit, src ) &&
665 emit_src( emit, zero ) );
666 }
667
668 static boolean emit_endif(struct svga_shader_emitter *emit,
669 const struct tgsi_full_instruction *insn)
670 {
671 return (emit_instruction( emit,
672 inst_token( SVGA3DOP_ENDIF )));
673 }
674
675 static boolean emit_else(struct svga_shader_emitter *emit,
676 const struct tgsi_full_instruction *insn)
677 {
678 return (emit_instruction( emit,
679 inst_token( SVGA3DOP_ELSE )));
680 }
681
682 /* Translate the following TGSI FLR instruction.
683 * FLR DST, SRC
684 * To the following SVGA3D instruction sequence.
685 * FRC TMP, SRC
686 * SUB DST, SRC, TMP
687 */
688 static boolean emit_floor(struct svga_shader_emitter *emit,
689 const struct tgsi_full_instruction *insn )
690 {
691 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
692 const struct src_register src0 = translate_src_register(
693 emit, &insn->FullSrcRegisters[0] );
694 SVGA3dShaderDestToken temp = get_temp( emit );
695
696 /* FRC TMP, SRC */
697 if (!submit_op1( emit, inst_token( SVGA3DOP_FRC ), temp, src0 ))
698 return FALSE;
699
700 /* SUB DST, SRC, TMP */
701 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst, src0,
702 negate( src( temp ) ) ))
703 return FALSE;
704
705 return TRUE;
706 }
707
708
709 /* Translate the following TGSI CMP instruction.
710 * CMP DST, SRC0, SRC1, SRC2
711 * To the following SVGA3D instruction sequence.
712 * CMP DST, SRC0, SRC2, SRC1
713 */
714 static boolean emit_cmp(struct svga_shader_emitter *emit,
715 const struct tgsi_full_instruction *insn )
716 {
717 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
718 const struct src_register src0 = translate_src_register(
719 emit, &insn->FullSrcRegisters[0] );
720 const struct src_register src1 = translate_src_register(
721 emit, &insn->FullSrcRegisters[1] );
722 const struct src_register src2 = translate_src_register(
723 emit, &insn->FullSrcRegisters[2] );
724
725 /* CMP DST, SRC0, SRC2, SRC1 */
726 return submit_op3( emit, inst_token( SVGA3DOP_CMP ), dst, src0, src2, src1);
727 }
728
729
730
731 /* Translate the following TGSI DIV instruction.
732 * DIV DST.xy, SRC0, SRC1
733 * To the following SVGA3D instruction sequence.
734 * RCP TMP.x, SRC1.xxxx
735 * RCP TMP.y, SRC1.yyyy
736 * MUL DST.xy, SRC0, TMP
737 */
738 static boolean emit_div(struct svga_shader_emitter *emit,
739 const struct tgsi_full_instruction *insn )
740 {
741 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
742 const struct src_register src0 = translate_src_register(
743 emit, &insn->FullSrcRegisters[0] );
744 const struct src_register src1 = translate_src_register(
745 emit, &insn->FullSrcRegisters[1] );
746 SVGA3dShaderDestToken temp = get_temp( emit );
747 int i;
748
749 /* For each enabled element, perform a RCP instruction. Note that
750 * RCP is scalar in SVGA3D:
751 */
752 for (i = 0; i < 4; i++) {
753 unsigned channel = 1 << i;
754 if (dst.mask & channel) {
755 /* RCP TMP.?, SRC1.???? */
756 if (!submit_op1( emit, inst_token( SVGA3DOP_RCP ),
757 writemask(temp, channel),
758 scalar(src1, i) ))
759 return FALSE;
760 }
761 }
762
763 /* Then multiply them out with a single mul:
764 *
765 * MUL DST, SRC0, TMP
766 */
767 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ), dst, src0,
768 src( temp ) ))
769 return FALSE;
770
771 return TRUE;
772 }
773
774 /* Translate the following TGSI DP2 instruction.
775 * DP2 DST, SRC1, SRC2
776 * To the following SVGA3D instruction sequence.
777 * MUL TMP, SRC1, SRC2
778 * ADD DST, TMP.xxxx, TMP.yyyy
779 */
780 static boolean emit_dp2(struct svga_shader_emitter *emit,
781 const struct tgsi_full_instruction *insn )
782 {
783 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
784 const struct src_register src0 = translate_src_register(
785 emit, &insn->FullSrcRegisters[0] );
786 const struct src_register src1 = translate_src_register(
787 emit, &insn->FullSrcRegisters[1] );
788 SVGA3dShaderDestToken temp = get_temp( emit );
789 struct src_register temp_src0, temp_src1;
790
791 /* MUL TMP, SRC1, SRC2 */
792 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ), temp, src0, src1 ))
793 return FALSE;
794
795 temp_src0 = scalar(src( temp ), TGSI_SWIZZLE_X);
796 temp_src1 = scalar(src( temp ), TGSI_SWIZZLE_Y);
797
798 /* ADD DST, TMP.xxxx, TMP.yyyy */
799 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst,
800 temp_src0, temp_src1 ))
801 return FALSE;
802
803 return TRUE;
804 }
805
806
807 /* Translate the following TGSI DPH instruction.
808 * DPH DST, SRC1, SRC2
809 * To the following SVGA3D instruction sequence.
810 * DP3 TMP, SRC1, SRC2
811 * ADD DST, TMP, SRC2.wwww
812 */
813 static boolean emit_dph(struct svga_shader_emitter *emit,
814 const struct tgsi_full_instruction *insn )
815 {
816 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
817 const struct src_register src0 = translate_src_register(
818 emit, &insn->FullSrcRegisters[0] );
819 struct src_register src1 = translate_src_register(
820 emit, &insn->FullSrcRegisters[1] );
821 SVGA3dShaderDestToken temp = get_temp( emit );
822
823 /* DP3 TMP, SRC1, SRC2 */
824 if (!submit_op2( emit, inst_token( SVGA3DOP_DP3 ), temp, src0, src1 ))
825 return FALSE;
826
827 src1 = scalar(src1, TGSI_SWIZZLE_W);
828
829 /* ADD DST, TMP, SRC2.wwww */
830 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst,
831 src( temp ), src1 ))
832 return FALSE;
833
834 return TRUE;
835 }
836
837 /* Translate the following TGSI DST instruction.
838 * NRM DST, SRC
839 * To the following SVGA3D instruction sequence.
840 * DP3 TMP, SRC, SRC
841 * RSQ TMP, TMP
842 * MUL DST, SRC, TMP
843 */
844 static boolean emit_nrm(struct svga_shader_emitter *emit,
845 const struct tgsi_full_instruction *insn )
846 {
847 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
848 const struct src_register src0 = translate_src_register(
849 emit, &insn->FullSrcRegisters[0] );
850 SVGA3dShaderDestToken temp = get_temp( emit );
851
852 /* DP3 TMP, SRC, SRC */
853 if (!submit_op2( emit, inst_token( SVGA3DOP_DP3 ), temp, src0, src0 ))
854 return FALSE;
855
856 /* RSQ TMP, TMP */
857 if (!submit_op1( emit, inst_token( SVGA3DOP_RSQ ), temp, src( temp )))
858 return FALSE;
859
860 /* MUL DST, SRC, TMP */
861 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ), dst,
862 src0, src( temp )))
863 return FALSE;
864
865 return TRUE;
866
867 }
868
869 static boolean do_emit_sincos(struct svga_shader_emitter *emit,
870 SVGA3dShaderDestToken dst,
871 struct src_register src0)
872 {
873 src0 = scalar(src0, TGSI_SWIZZLE_X);
874
875 if (emit->use_sm30) {
876 return submit_op1( emit, inst_token( SVGA3DOP_SINCOS ),
877 dst, src0 );
878 } else {
879 struct src_register const1 = get_sincos_const( emit, 0 );
880 struct src_register const2 = get_sincos_const( emit, 1 );
881
882 return submit_op3( emit, inst_token( SVGA3DOP_SINCOS ),
883 dst, src0, const1, const2 );
884 }
885 }
886
887 static boolean emit_sincos(struct svga_shader_emitter *emit,
888 const struct tgsi_full_instruction *insn)
889 {
890 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
891 struct src_register src0 = translate_src_register(
892 emit, &insn->FullSrcRegisters[0] );
893 SVGA3dShaderDestToken temp = get_temp( emit );
894
895 /* SCS TMP SRC */
896 if (!do_emit_sincos(emit, writemask(temp, TGSI_WRITEMASK_XY), src0 ))
897 return FALSE;
898
899 /* MOV DST TMP */
900 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src( temp ) ))
901 return FALSE;
902
903 return TRUE;
904 }
905
906 /*
907 * SCS TMP SRC
908 * MOV DST TMP.yyyy
909 */
910 static boolean emit_sin(struct svga_shader_emitter *emit,
911 const struct tgsi_full_instruction *insn )
912 {
913 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
914 struct src_register src0 = translate_src_register(
915 emit, &insn->FullSrcRegisters[0] );
916 SVGA3dShaderDestToken temp = get_temp( emit );
917
918 /* SCS TMP SRC */
919 if (!do_emit_sincos(emit, writemask(temp, TGSI_WRITEMASK_Y), src0))
920 return FALSE;
921
922 src0 = scalar(src( temp ), TGSI_SWIZZLE_Y);
923
924 /* MOV DST TMP.yyyy */
925 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src0 ))
926 return FALSE;
927
928 return TRUE;
929 }
930
931 /*
932 * SCS TMP SRC
933 * MOV DST TMP.xxxx
934 */
935 static boolean emit_cos(struct svga_shader_emitter *emit,
936 const struct tgsi_full_instruction *insn )
937 {
938 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
939 struct src_register src0 = translate_src_register(
940 emit, &insn->FullSrcRegisters[0] );
941 SVGA3dShaderDestToken temp = get_temp( emit );
942
943 /* SCS TMP SRC */
944 if (!do_emit_sincos( emit, writemask(temp, TGSI_WRITEMASK_X), src0 ))
945 return FALSE;
946
947 src0 = scalar(src( temp ), TGSI_SWIZZLE_X);
948
949 /* MOV DST TMP.xxxx */
950 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src0 ))
951 return FALSE;
952
953 return TRUE;
954 }
955
956
957 /*
958 * ADD DST SRC0, negate(SRC0)
959 */
960 static boolean emit_sub(struct svga_shader_emitter *emit,
961 const struct tgsi_full_instruction *insn)
962 {
963 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
964 struct src_register src0 = translate_src_register(
965 emit, &insn->FullSrcRegisters[0] );
966 struct src_register src1 = translate_src_register(
967 emit, &insn->FullSrcRegisters[1] );
968
969 src1 = negate(src1);
970
971 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ), dst,
972 src0, src1 ))
973 return FALSE;
974
975 return TRUE;
976 }
977
978
979 static boolean emit_kil(struct svga_shader_emitter *emit,
980 const struct tgsi_full_instruction *insn )
981 {
982 SVGA3dShaderInstToken inst;
983 const struct tgsi_full_src_register *reg = &insn->FullSrcRegisters[0];
984 struct src_register src0;
985
986 inst = inst_token( SVGA3DOP_TEXKILL );
987 src0 = translate_src_register( emit, reg );
988
989 if (reg->SrcRegisterExtMod.Absolute ||
990 reg->SrcRegister.Negate != reg->SrcRegisterExtMod.Negate ||
991 reg->SrcRegister.Indirect ||
992 reg->SrcRegister.SwizzleX != 0 ||
993 reg->SrcRegister.SwizzleY != 1 ||
994 reg->SrcRegister.SwizzleZ != 2 ||
995 reg->SrcRegister.File != TGSI_FILE_TEMPORARY)
996 {
997 SVGA3dShaderDestToken temp = get_temp( emit );
998
999 submit_op1( emit, inst_token( SVGA3DOP_MOV ), temp, src0 );
1000 src0 = src( temp );
1001 }
1002
1003 return submit_op0( emit, inst, dst(src0) );
1004 }
1005
1006
1007 /* mesa state tracker always emits kilp as an unconditional
1008 * kil */
1009 static boolean emit_kilp(struct svga_shader_emitter *emit,
1010 const struct tgsi_full_instruction *insn )
1011 {
1012 SVGA3dShaderInstToken inst;
1013 SVGA3dShaderDestToken temp;
1014 struct src_register one = get_zero_immediate( emit );
1015
1016 inst = inst_token( SVGA3DOP_TEXKILL );
1017 one = scalar( one, TGSI_SWIZZLE_W );
1018
1019 /* texkill doesn't allow negation on the operand so lets move
1020 * negation of {1} to a temp register */
1021 temp = get_temp( emit );
1022 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), temp,
1023 negate( one ) ))
1024 return FALSE;
1025
1026 return submit_op0( emit, inst, temp );
1027 }
1028
1029 /* Implement conditionals by initializing destination reg to 'fail',
1030 * then set predicate reg with UFOP_SETP, then move 'pass' to dest
1031 * based on predicate reg.
1032 *
1033 * SETP src0, cmp, src1 -- do this first to avoid aliasing problems.
1034 * MOV dst, fail
1035 * MOV dst, pass, p0
1036 */
1037 static boolean
1038 emit_conditional(struct svga_shader_emitter *emit,
1039 unsigned compare_func,
1040 SVGA3dShaderDestToken dst,
1041 struct src_register src0,
1042 struct src_register src1,
1043 struct src_register pass,
1044 struct src_register fail)
1045 {
1046 SVGA3dShaderDestToken pred_reg = dst_register( SVGA3DREG_PREDICATE, 0 );
1047 SVGA3dShaderInstToken setp_token, mov_token;
1048 setp_token = inst_token( SVGA3DOP_SETP );
1049
1050 switch (compare_func) {
1051 case PIPE_FUNC_NEVER:
1052 return submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1053 dst, fail );
1054 break;
1055 case PIPE_FUNC_LESS:
1056 setp_token.control = SVGA3DOPCOMP_LT;
1057 break;
1058 case PIPE_FUNC_EQUAL:
1059 setp_token.control = SVGA3DOPCOMP_EQ;
1060 break;
1061 case PIPE_FUNC_LEQUAL:
1062 setp_token.control = SVGA3DOPCOMP_LE;
1063 break;
1064 case PIPE_FUNC_GREATER:
1065 setp_token.control = SVGA3DOPCOMP_GT;
1066 break;
1067 case PIPE_FUNC_NOTEQUAL:
1068 setp_token.control = SVGA3DOPCOMPC_NE;
1069 break;
1070 case PIPE_FUNC_GEQUAL:
1071 setp_token.control = SVGA3DOPCOMP_GE;
1072 break;
1073 case PIPE_FUNC_ALWAYS:
1074 return submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1075 dst, pass );
1076 break;
1077 }
1078
1079 /* SETP src0, COMPOP, src1 */
1080 if (!submit_op2( emit, setp_token, pred_reg,
1081 src0, src1 ))
1082 return FALSE;
1083
1084 mov_token = inst_token( SVGA3DOP_MOV );
1085
1086 /* MOV dst, fail */
1087 if (!submit_op1( emit, mov_token, dst,
1088 fail ))
1089 return FALSE;
1090
1091 /* MOV dst, pass (predicated)
1092 *
1093 * Note that the predicate reg (and possible modifiers) is passed
1094 * as the first source argument.
1095 */
1096 mov_token.predicated = 1;
1097 if (!submit_op2( emit, mov_token, dst,
1098 src( pred_reg ), pass ))
1099 return FALSE;
1100
1101 return TRUE;
1102 }
1103
1104
1105 static boolean
1106 emit_select(struct svga_shader_emitter *emit,
1107 unsigned compare_func,
1108 SVGA3dShaderDestToken dst,
1109 struct src_register src0,
1110 struct src_register src1 )
1111 {
1112 /* There are some SVGA instructions which implement some selects
1113 * directly, but they are only available in the vertex shader.
1114 */
1115 if (emit->unit == PIPE_SHADER_VERTEX) {
1116 switch (compare_func) {
1117 case PIPE_FUNC_GEQUAL:
1118 return submit_op2( emit, inst_token( SVGA3DOP_SGE ), dst, src0, src1 );
1119 case PIPE_FUNC_LEQUAL:
1120 return submit_op2( emit, inst_token( SVGA3DOP_SGE ), dst, src1, src0 );
1121 case PIPE_FUNC_GREATER:
1122 return submit_op2( emit, inst_token( SVGA3DOP_SLT ), dst, src1, src0 );
1123 case PIPE_FUNC_LESS:
1124 return submit_op2( emit, inst_token( SVGA3DOP_SLT ), dst, src0, src1 );
1125 default:
1126 break;
1127 }
1128 }
1129
1130
1131 /* Otherwise, need to use the setp approach:
1132 */
1133 {
1134 struct src_register one, zero;
1135 /* zero immediate is 0,0,0,1 */
1136 zero = get_zero_immediate( emit );
1137 one = scalar( zero, TGSI_SWIZZLE_W );
1138 zero = scalar( zero, TGSI_SWIZZLE_X );
1139
1140 return emit_conditional(
1141 emit,
1142 compare_func,
1143 dst,
1144 src0,
1145 src1,
1146 one, zero);
1147 }
1148 }
1149
1150
1151 static boolean emit_select_op(struct svga_shader_emitter *emit,
1152 unsigned compare,
1153 const struct tgsi_full_instruction *insn)
1154 {
1155 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1156 struct src_register src0 = translate_src_register(
1157 emit, &insn->FullSrcRegisters[0] );
1158 struct src_register src1 = translate_src_register(
1159 emit, &insn->FullSrcRegisters[1] );
1160
1161 return emit_select( emit, compare, dst, src0, src1 );
1162 }
1163
1164
1165 /* Translate texture instructions to SVGA3D representation.
1166 */
1167 static boolean emit_tex2(struct svga_shader_emitter *emit,
1168 const struct tgsi_full_instruction *insn,
1169 SVGA3dShaderDestToken dst )
1170 {
1171 SVGA3dShaderInstToken inst;
1172 struct src_register src0;
1173 struct src_register src1;
1174
1175 inst.value = 0;
1176 inst.op = SVGA3DOP_TEX;
1177
1178 switch (insn->Instruction.Opcode) {
1179 case TGSI_OPCODE_TEX:
1180 break;
1181 case TGSI_OPCODE_TXP:
1182 inst.control = SVGA3DOPCONT_PROJECT;
1183 break;
1184 case TGSI_OPCODE_TXB:
1185 inst.control = SVGA3DOPCONT_BIAS;
1186 break;
1187 default:
1188 assert(0);
1189 return FALSE;
1190 }
1191
1192 src0 = translate_src_register( emit, &insn->FullSrcRegisters[0] );
1193 src1 = translate_src_register( emit, &insn->FullSrcRegisters[1] );
1194
1195 if (emit->key.fkey.tex[src1.base.num].unnormalized) {
1196 struct src_register wh = get_tex_dimensions( emit, src1.base.num );
1197 SVGA3dShaderDestToken tmp = get_temp( emit );
1198
1199 /* MUL tmp, SRC0, WH */
1200 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
1201 tmp, src0, wh ))
1202 return FALSE;
1203 src0 = src( tmp );
1204 }
1205
1206 return submit_op2( emit, inst, dst, src0, src1 );
1207 }
1208
1209
1210
1211
1212 /* Translate texture instructions to SVGA3D representation.
1213 */
1214 static boolean emit_tex3(struct svga_shader_emitter *emit,
1215 const struct tgsi_full_instruction *insn,
1216 SVGA3dShaderDestToken dst )
1217 {
1218 SVGA3dShaderInstToken inst;
1219 struct src_register src0;
1220 struct src_register src1;
1221 struct src_register src2;
1222
1223 inst.value = 0;
1224
1225 switch (insn->Instruction.Opcode) {
1226 case TGSI_OPCODE_TXD:
1227 inst.op = SVGA3DOP_TEXLDD;
1228 break;
1229 case TGSI_OPCODE_TXL:
1230 inst.op = SVGA3DOP_TEXLDL;
1231 break;
1232 }
1233
1234 src0 = translate_src_register( emit, &insn->FullSrcRegisters[0] );
1235 src1 = translate_src_register( emit, &insn->FullSrcRegisters[1] );
1236 src2 = translate_src_register( emit, &insn->FullSrcRegisters[2] );
1237
1238 return submit_op3( emit, inst, dst, src0, src1, src2 );
1239 }
1240
1241
1242 static boolean emit_tex(struct svga_shader_emitter *emit,
1243 const struct tgsi_full_instruction *insn )
1244 {
1245 SVGA3dShaderDestToken dst =
1246 translate_dst_register( emit, insn, 0 );
1247 struct src_register src0 =
1248 translate_src_register( emit, &insn->FullSrcRegisters[0] );
1249 struct src_register src1 =
1250 translate_src_register( emit, &insn->FullSrcRegisters[1] );
1251
1252 SVGA3dShaderDestToken tex_result;
1253
1254 /* check for shadow samplers */
1255 boolean compare = (emit->key.fkey.tex[src1.base.num].compare_mode ==
1256 PIPE_TEX_COMPARE_R_TO_TEXTURE);
1257
1258
1259 /* If doing compare processing, need to put this value into a
1260 * temporary so it can be used as a source later on.
1261 */
1262 if (compare ||
1263 (!emit->use_sm30 && dst.mask != TGSI_WRITEMASK_XYZW) ) {
1264 tex_result = get_temp( emit );
1265 }
1266 else {
1267 tex_result = dst;
1268 }
1269
1270 switch(insn->Instruction.Opcode) {
1271 case TGSI_OPCODE_TEX:
1272 case TGSI_OPCODE_TXB:
1273 case TGSI_OPCODE_TXP:
1274 if (!emit_tex2( emit, insn, tex_result ))
1275 return FALSE;
1276 break;
1277 case TGSI_OPCODE_TXL:
1278 case TGSI_OPCODE_TXD:
1279 if (!emit_tex3( emit, insn, tex_result ))
1280 return FALSE;
1281 break;
1282 default:
1283 assert(0);
1284 }
1285
1286
1287 if (compare) {
1288 SVGA3dShaderDestToken src0_zdivw = get_temp( emit );
1289 struct src_register tex_src_x = scalar(src(tex_result), TGSI_SWIZZLE_Y);
1290 struct src_register one =
1291 scalar( get_zero_immediate( emit ), TGSI_SWIZZLE_W );
1292
1293 /* Divide texcoord R by Q */
1294 if (!submit_op1( emit, inst_token( SVGA3DOP_RCP ),
1295 src0_zdivw,
1296 scalar(src0, TGSI_SWIZZLE_W) ))
1297 return FALSE;
1298
1299 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
1300 src0_zdivw,
1301 scalar(src0, TGSI_SWIZZLE_Z),
1302 src(src0_zdivw) ))
1303 return FALSE;
1304
1305 if (!emit_select(
1306 emit,
1307 emit->key.fkey.tex[src1.base.num].compare_func,
1308 dst,
1309 src(src0_zdivw),
1310 tex_src_x))
1311 return FALSE;
1312
1313 return submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1314 writemask( dst, TGSI_WRITEMASK_W),
1315 one );
1316 }
1317 else if (!emit->use_sm30 && dst.mask != TGSI_WRITEMASK_XYZW)
1318 {
1319 if (!emit_op1( emit, inst_token( SVGA3DOP_MOV ), dst, src(tex_result) ))
1320 return FALSE;
1321 }
1322
1323 return TRUE;
1324 }
1325
1326 static boolean emit_bgnloop2( struct svga_shader_emitter *emit,
1327 const struct tgsi_full_instruction *insn )
1328 {
1329 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_LOOP );
1330 struct src_register loop_reg = src_register( SVGA3DREG_LOOP, 0 );
1331 struct src_register const_int = get_loop_const( emit );
1332
1333 return (emit_instruction( emit, inst ) &&
1334 emit_src( emit, loop_reg ) &&
1335 emit_src( emit, const_int ) );
1336 }
1337
1338 static boolean emit_endloop2( struct svga_shader_emitter *emit,
1339 const struct tgsi_full_instruction *insn )
1340 {
1341 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_ENDLOOP );
1342 return emit_instruction( emit, inst );
1343 }
1344
1345 static boolean emit_brk( struct svga_shader_emitter *emit,
1346 const struct tgsi_full_instruction *insn )
1347 {
1348 SVGA3dShaderInstToken inst = inst_token( SVGA3DOP_BREAK );
1349 return emit_instruction( emit, inst );
1350 }
1351
1352 static boolean emit_scalar_op1( struct svga_shader_emitter *emit,
1353 unsigned opcode,
1354 const struct tgsi_full_instruction *insn )
1355 {
1356 SVGA3dShaderInstToken inst;
1357 SVGA3dShaderDestToken dst;
1358 struct src_register src;
1359
1360 inst = inst_token( opcode );
1361 dst = translate_dst_register( emit, insn, 0 );
1362 src = translate_src_register( emit, &insn->FullSrcRegisters[0] );
1363 src = scalar( src, TGSI_SWIZZLE_X );
1364
1365 return submit_op1( emit, inst, dst, src );
1366 }
1367
1368
1369 static boolean emit_simple_instruction(struct svga_shader_emitter *emit,
1370 unsigned opcode,
1371 const struct tgsi_full_instruction *insn )
1372 {
1373 const struct tgsi_full_src_register *src = insn->FullSrcRegisters;
1374 SVGA3dShaderInstToken inst;
1375 SVGA3dShaderDestToken dst;
1376
1377 inst = inst_token( opcode );
1378 dst = translate_dst_register( emit, insn, 0 );
1379
1380 switch (insn->Instruction.NumSrcRegs) {
1381 case 0:
1382 return submit_op0( emit, inst, dst );
1383 case 1:
1384 return submit_op1( emit, inst, dst,
1385 translate_src_register( emit, &src[0] ));
1386 case 2:
1387 return submit_op2( emit, inst, dst,
1388 translate_src_register( emit, &src[0] ),
1389 translate_src_register( emit, &src[1] ) );
1390 case 3:
1391 return submit_op3( emit, inst, dst,
1392 translate_src_register( emit, &src[0] ),
1393 translate_src_register( emit, &src[1] ),
1394 translate_src_register( emit, &src[2] ) );
1395 default:
1396 assert(0);
1397 return FALSE;
1398 }
1399 }
1400
1401 static boolean emit_arl(struct svga_shader_emitter *emit,
1402 const struct tgsi_full_instruction *insn)
1403 {
1404 ++emit->current_arl;
1405 if (svga_arl_needs_adjustment( emit )) {
1406 return emit_fake_arl( emit, insn );
1407 } else {
1408 /* no need to adjust, just emit straight arl */
1409 return emit_simple_instruction(emit, SVGA3DOP_MOVA, insn);
1410 }
1411 }
1412
1413 static boolean alias_src_dst( struct src_register src,
1414 SVGA3dShaderDestToken dst )
1415 {
1416 if (src.base.num != dst.num)
1417 return FALSE;
1418
1419 if (SVGA3dShaderGetRegType(dst.value) !=
1420 SVGA3dShaderGetRegType(src.base.value))
1421 return FALSE;
1422
1423 return TRUE;
1424 }
1425
1426 static boolean emit_pow(struct svga_shader_emitter *emit,
1427 const struct tgsi_full_instruction *insn)
1428 {
1429 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1430 struct src_register src0 = translate_src_register(
1431 emit, &insn->FullSrcRegisters[0] );
1432 struct src_register src1 = translate_src_register(
1433 emit, &insn->FullSrcRegisters[1] );
1434 boolean need_tmp = FALSE;
1435
1436 /* POW can only output to a temporary */
1437 if (insn->FullDstRegisters[0].DstRegister.File != TGSI_FILE_TEMPORARY)
1438 need_tmp = TRUE;
1439
1440 /* POW src1 must not be the same register as dst */
1441 if (alias_src_dst( src1, dst ))
1442 need_tmp = TRUE;
1443
1444 /* it's a scalar op */
1445 src0 = scalar( src0, TGSI_SWIZZLE_X );
1446 src1 = scalar( src1, TGSI_SWIZZLE_X );
1447
1448 if (need_tmp) {
1449 SVGA3dShaderDestToken tmp = writemask(get_temp( emit ), TGSI_WRITEMASK_X );
1450
1451 if (!submit_op2(emit, inst_token( SVGA3DOP_POW ), tmp, src0, src1))
1452 return FALSE;
1453
1454 return submit_op1(emit, inst_token( SVGA3DOP_MOV ), dst, scalar(src(tmp), 0) );
1455 }
1456 else {
1457 return submit_op2(emit, inst_token( SVGA3DOP_POW ), dst, src0, src1);
1458 }
1459 }
1460
1461 static boolean emit_xpd(struct svga_shader_emitter *emit,
1462 const struct tgsi_full_instruction *insn)
1463 {
1464 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1465 const struct src_register src0 = translate_src_register(
1466 emit, &insn->FullSrcRegisters[0] );
1467 const struct src_register src1 = translate_src_register(
1468 emit, &insn->FullSrcRegisters[1] );
1469 boolean need_dst_tmp = FALSE;
1470
1471 /* XPD can only output to a temporary */
1472 if (SVGA3dShaderGetRegType(dst.value) != SVGA3DREG_TEMP)
1473 need_dst_tmp = TRUE;
1474
1475 /* The dst reg must not be the same as src0 or src1*/
1476 if (alias_src_dst(src0, dst) ||
1477 alias_src_dst(src1, dst))
1478 need_dst_tmp = TRUE;
1479
1480 if (need_dst_tmp) {
1481 SVGA3dShaderDestToken tmp = get_temp( emit );
1482
1483 /* Obey DX9 restrictions on mask:
1484 */
1485 tmp.mask = dst.mask & TGSI_WRITEMASK_XYZ;
1486
1487 if (!submit_op2(emit, inst_token( SVGA3DOP_CRS ), tmp, src0, src1))
1488 return FALSE;
1489
1490 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), dst, src( tmp )))
1491 return FALSE;
1492 }
1493 else {
1494 if (!submit_op2(emit, inst_token( SVGA3DOP_CRS ), dst, src0, src1))
1495 return FALSE;
1496 }
1497
1498 /* Need to emit 1.0 to dst.w?
1499 */
1500 if (dst.mask & TGSI_WRITEMASK_W) {
1501 struct src_register zero = get_zero_immediate( emit );
1502
1503 if (!submit_op1(emit,
1504 inst_token( SVGA3DOP_MOV ),
1505 writemask(dst, TGSI_WRITEMASK_W),
1506 zero))
1507 return FALSE;
1508 }
1509
1510 return TRUE;
1511 }
1512
1513
1514 static boolean emit_lrp(struct svga_shader_emitter *emit,
1515 const struct tgsi_full_instruction *insn)
1516 {
1517 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1518 SVGA3dShaderDestToken tmp;
1519 const struct src_register src0 = translate_src_register(
1520 emit, &insn->FullSrcRegisters[0] );
1521 const struct src_register src1 = translate_src_register(
1522 emit, &insn->FullSrcRegisters[1] );
1523 const struct src_register src2 = translate_src_register(
1524 emit, &insn->FullSrcRegisters[2] );
1525 boolean need_dst_tmp = FALSE;
1526
1527 /* The dst reg must not be the same as src0 or src2 */
1528 if (alias_src_dst(src0, dst) ||
1529 alias_src_dst(src2, dst))
1530 need_dst_tmp = TRUE;
1531
1532 if (need_dst_tmp) {
1533 tmp = get_temp( emit );
1534 tmp.mask = dst.mask;
1535 }
1536 else {
1537 tmp = dst;
1538 }
1539
1540 if (!submit_op3(emit, inst_token( SVGA3DOP_LRP ), tmp, src0, src1, src2))
1541 return FALSE;
1542
1543 if (need_dst_tmp) {
1544 if (!submit_op1(emit, inst_token( SVGA3DOP_MOV ), dst, src( tmp )))
1545 return FALSE;
1546 }
1547
1548 return TRUE;
1549 }
1550
1551
1552 static boolean emit_dst_insn(struct svga_shader_emitter *emit,
1553 const struct tgsi_full_instruction *insn )
1554 {
1555 if (emit->unit == PIPE_SHADER_VERTEX) {
1556 /* SVGA/DX9 has a DST instruction, but only for vertex shaders:
1557 */
1558 return emit_simple_instruction(emit, SVGA3DOP_DST, insn);
1559 }
1560 else {
1561
1562 /* result[0] = 1 * 1;
1563 * result[1] = a[1] * b[1];
1564 * result[2] = a[2] * 1;
1565 * result[3] = 1 * b[3];
1566 */
1567
1568 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1569 SVGA3dShaderDestToken tmp;
1570 const struct src_register src0 = translate_src_register(
1571 emit, &insn->FullSrcRegisters[0] );
1572 const struct src_register src1 = translate_src_register(
1573 emit, &insn->FullSrcRegisters[1] );
1574 struct src_register zero = get_zero_immediate( emit );
1575 boolean need_tmp = FALSE;
1576
1577 if (SVGA3dShaderGetRegType(dst.value) != SVGA3DREG_TEMP ||
1578 alias_src_dst(src0, dst) ||
1579 alias_src_dst(src1, dst))
1580 need_tmp = TRUE;
1581
1582 if (need_tmp) {
1583 tmp = get_temp( emit );
1584 }
1585 else {
1586 tmp = dst;
1587 }
1588
1589 /* tmp.xw = 1.0
1590 */
1591 if (tmp.mask & TGSI_WRITEMASK_XW) {
1592 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1593 writemask(tmp, TGSI_WRITEMASK_XW ),
1594 scalar( zero, 3 )))
1595 return FALSE;
1596 }
1597
1598 /* tmp.yz = src0
1599 */
1600 if (tmp.mask & TGSI_WRITEMASK_YZ) {
1601 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1602 writemask(tmp, TGSI_WRITEMASK_YZ ),
1603 src0))
1604 return FALSE;
1605 }
1606
1607 /* tmp.yw = tmp * src1
1608 */
1609 if (tmp.mask & TGSI_WRITEMASK_YW) {
1610 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
1611 writemask(tmp, TGSI_WRITEMASK_YW ),
1612 src(tmp),
1613 src1))
1614 return FALSE;
1615 }
1616
1617 /* dst = tmp
1618 */
1619 if (need_tmp) {
1620 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1621 dst,
1622 src(tmp)))
1623 return FALSE;
1624 }
1625 }
1626
1627 return TRUE;
1628 }
1629
1630
1631 static boolean emit_exp(struct svga_shader_emitter *emit,
1632 const struct tgsi_full_instruction *insn)
1633 {
1634 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1635 struct src_register src0 =
1636 translate_src_register( emit, &insn->FullSrcRegisters[0] );
1637 struct src_register zero = get_zero_immediate( emit );
1638 SVGA3dShaderDestToken fraction;
1639
1640 if (dst.mask & TGSI_WRITEMASK_Y)
1641 fraction = dst;
1642 else if (dst.mask & TGSI_WRITEMASK_X)
1643 fraction = get_temp( emit );
1644
1645 /* If y is being written, fill it with src0 - floor(src0).
1646 */
1647 if (dst.mask & TGSI_WRITEMASK_XY) {
1648 if (!submit_op1( emit, inst_token( SVGA3DOP_FRC ),
1649 writemask( fraction, TGSI_WRITEMASK_Y ),
1650 src0 ))
1651 return FALSE;
1652 }
1653
1654 /* If x is being written, fill it with 2 ^ floor(src0).
1655 */
1656 if (dst.mask & TGSI_WRITEMASK_X) {
1657 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ),
1658 writemask( dst, dst.mask & TGSI_WRITEMASK_X ),
1659 src0,
1660 scalar( negate( src( fraction ) ), TGSI_SWIZZLE_Y ) ) )
1661 return FALSE;
1662
1663 if (!submit_op1( emit, inst_token( SVGA3DOP_EXP ),
1664 writemask( dst, dst.mask & TGSI_WRITEMASK_X ),
1665 scalar( src( dst ), TGSI_SWIZZLE_X ) ) )
1666 return FALSE;
1667
1668 if (!(dst.mask & TGSI_WRITEMASK_Y))
1669 release_temp( emit, fraction );
1670 }
1671
1672 /* If z is being written, fill it with 2 ^ src0 (partial precision).
1673 */
1674 if (dst.mask & TGSI_WRITEMASK_Z) {
1675 if (!submit_op1( emit, inst_token( SVGA3DOP_EXPP ),
1676 writemask( dst, dst.mask & TGSI_WRITEMASK_Z ),
1677 src0 ) )
1678 return FALSE;
1679 }
1680
1681 /* If w is being written, fill it with one.
1682 */
1683 if (dst.mask & TGSI_WRITEMASK_W) {
1684 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1685 writemask(dst, TGSI_WRITEMASK_W),
1686 scalar( zero, TGSI_SWIZZLE_W ) ))
1687 return FALSE;
1688 }
1689
1690 return TRUE;
1691 }
1692
1693 static boolean emit_lit(struct svga_shader_emitter *emit,
1694 const struct tgsi_full_instruction *insn )
1695 {
1696 if (emit->unit == PIPE_SHADER_VERTEX) {
1697 /* SVGA/DX9 has a LIT instruction, but only for vertex shaders:
1698 */
1699 return emit_simple_instruction(emit, SVGA3DOP_LIT, insn);
1700 }
1701 else {
1702
1703 /* D3D vs. GL semantics can be fairly easily accomodated by
1704 * variations on this sequence.
1705 *
1706 * GL:
1707 * tmp.y = src.x
1708 * tmp.z = pow(src.y,src.w)
1709 * p0 = src0.xxxx > 0
1710 * result = zero.wxxw
1711 * (p0) result.yz = tmp
1712 *
1713 * D3D:
1714 * tmp.y = src.x
1715 * tmp.z = pow(src.y,src.w)
1716 * p0 = src0.xxyy > 0
1717 * result = zero.wxxw
1718 * (p0) result.yz = tmp
1719 *
1720 * Will implement the GL version for now.
1721 */
1722
1723 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1724 SVGA3dShaderDestToken tmp = get_temp( emit );
1725 const struct src_register src0 = translate_src_register(
1726 emit, &insn->FullSrcRegisters[0] );
1727 struct src_register zero = get_zero_immediate( emit );
1728
1729 /* tmp = pow(src.y, src.w)
1730 */
1731 if (dst.mask & TGSI_WRITEMASK_Z) {
1732 if (!submit_op2(emit, inst_token( SVGA3DOP_POW ),
1733 tmp,
1734 scalar(src0, 1),
1735 scalar(src0, 3)))
1736 return FALSE;
1737 }
1738
1739 /* tmp.y = src.x
1740 */
1741 if (dst.mask & TGSI_WRITEMASK_Y) {
1742 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1743 writemask(tmp, TGSI_WRITEMASK_Y ),
1744 scalar(src0, 0)))
1745 return FALSE;
1746 }
1747
1748 /* Can't quite do this with emit conditional due to the extra
1749 * writemask on the predicated mov:
1750 */
1751 {
1752 SVGA3dShaderDestToken pred_reg = dst_register( SVGA3DREG_PREDICATE, 0 );
1753 SVGA3dShaderInstToken setp_token, mov_token;
1754 struct src_register predsrc;
1755
1756 setp_token = inst_token( SVGA3DOP_SETP );
1757 mov_token = inst_token( SVGA3DOP_MOV );
1758
1759 setp_token.control = SVGA3DOPCOMP_GT;
1760
1761 /* D3D vs GL semantics:
1762 */
1763 if (0)
1764 predsrc = swizzle(src0, 0, 0, 1, 1); /* D3D */
1765 else
1766 predsrc = swizzle(src0, 0, 0, 0, 0); /* GL */
1767
1768 /* SETP src0.xxyy, GT, {0}.x */
1769 if (!submit_op2( emit, setp_token, pred_reg,
1770 predsrc,
1771 swizzle(zero, 0, 0, 0, 0) ))
1772 return FALSE;
1773
1774 /* MOV dst, fail */
1775 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), dst,
1776 swizzle(zero, 3, 0, 0, 3 )))
1777 return FALSE;
1778
1779 /* MOV dst.yz, tmp (predicated)
1780 *
1781 * Note that the predicate reg (and possible modifiers) is passed
1782 * as the first source argument.
1783 */
1784 if (dst.mask & TGSI_WRITEMASK_YZ) {
1785 mov_token.predicated = 1;
1786 if (!submit_op2( emit, mov_token,
1787 writemask(dst, TGSI_WRITEMASK_YZ),
1788 src( pred_reg ), src( tmp ) ))
1789 return FALSE;
1790 }
1791 }
1792 }
1793
1794 return TRUE;
1795 }
1796
1797
1798
1799
1800 static boolean emit_ex2( struct svga_shader_emitter *emit,
1801 const struct tgsi_full_instruction *insn )
1802 {
1803 SVGA3dShaderInstToken inst;
1804 SVGA3dShaderDestToken dst;
1805 struct src_register src0;
1806
1807 inst = inst_token( SVGA3DOP_EXP );
1808 dst = translate_dst_register( emit, insn, 0 );
1809 src0 = translate_src_register( emit, &insn->FullSrcRegisters[0] );
1810 src0 = scalar( src0, TGSI_SWIZZLE_X );
1811
1812 if (dst.mask != TGSI_WRITEMASK_XYZW) {
1813 SVGA3dShaderDestToken tmp = get_temp( emit );
1814
1815 if (!submit_op1( emit, inst, tmp, src0 ))
1816 return FALSE;
1817
1818 return submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1819 dst,
1820 scalar( src( tmp ), TGSI_SWIZZLE_X ) );
1821 }
1822
1823 return submit_op1( emit, inst, dst, src0 );
1824 }
1825
1826
1827 static boolean emit_log(struct svga_shader_emitter *emit,
1828 const struct tgsi_full_instruction *insn)
1829 {
1830 SVGA3dShaderDestToken dst = translate_dst_register( emit, insn, 0 );
1831 struct src_register src0 =
1832 translate_src_register( emit, &insn->FullSrcRegisters[0] );
1833 struct src_register zero = get_zero_immediate( emit );
1834 SVGA3dShaderDestToken abs_tmp;
1835 struct src_register abs_src0;
1836 SVGA3dShaderDestToken log2_abs;
1837
1838 if (dst.mask & TGSI_WRITEMASK_Z)
1839 log2_abs = dst;
1840 else if (dst.mask & TGSI_WRITEMASK_XY)
1841 log2_abs = get_temp( emit );
1842
1843 /* If z is being written, fill it with log2( abs( src0 ) ).
1844 */
1845 if (dst.mask & TGSI_WRITEMASK_XYZ) {
1846 if (!src0.base.srcMod || src0.base.srcMod == SVGA3DSRCMOD_ABS)
1847 abs_src0 = src0;
1848 else {
1849 abs_tmp = get_temp( emit );
1850
1851 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1852 abs_tmp,
1853 src0 ) )
1854 return FALSE;
1855
1856 abs_src0 = src( abs_tmp );
1857 }
1858
1859 abs_src0 = absolute( scalar( abs_src0, TGSI_SWIZZLE_X ) );
1860
1861 if (!submit_op1( emit, inst_token( SVGA3DOP_LOG ),
1862 writemask( log2_abs, TGSI_WRITEMASK_Z ),
1863 abs_src0 ) )
1864 return FALSE;
1865 }
1866
1867 if (dst.mask & TGSI_WRITEMASK_XY) {
1868 SVGA3dShaderDestToken floor_log2;
1869
1870 if (dst.mask & TGSI_WRITEMASK_X)
1871 floor_log2 = dst;
1872 else
1873 floor_log2 = get_temp( emit );
1874
1875 /* If x is being written, fill it with floor( log2( abs( src0 ) ) ).
1876 */
1877 if (!submit_op1( emit, inst_token( SVGA3DOP_FRC ),
1878 writemask( floor_log2, TGSI_WRITEMASK_X ),
1879 scalar( src( log2_abs ), TGSI_SWIZZLE_Z ) ) )
1880 return FALSE;
1881
1882 if (!submit_op2( emit, inst_token( SVGA3DOP_ADD ),
1883 writemask( floor_log2, TGSI_WRITEMASK_X ),
1884 scalar( src( log2_abs ), TGSI_SWIZZLE_Z ),
1885 negate( src( floor_log2 ) ) ) )
1886 return FALSE;
1887
1888 /* If y is being written, fill it with
1889 * abs ( src0 ) / ( 2 ^ floor( log2( abs( src0 ) ) ) ).
1890 */
1891 if (dst.mask & TGSI_WRITEMASK_Y) {
1892 if (!submit_op1( emit, inst_token( SVGA3DOP_EXP ),
1893 writemask( dst, TGSI_WRITEMASK_Y ),
1894 negate( scalar( src( floor_log2 ),
1895 TGSI_SWIZZLE_X ) ) ) )
1896 return FALSE;
1897
1898 if (!submit_op2( emit, inst_token( SVGA3DOP_MUL ),
1899 writemask( dst, TGSI_WRITEMASK_Y ),
1900 src( dst ),
1901 abs_src0 ) )
1902 return FALSE;
1903 }
1904
1905 if (!(dst.mask & TGSI_WRITEMASK_X))
1906 release_temp( emit, floor_log2 );
1907
1908 if (!(dst.mask & TGSI_WRITEMASK_Z))
1909 release_temp( emit, log2_abs );
1910 }
1911
1912 if (dst.mask & TGSI_WRITEMASK_XYZ && src0.base.srcMod &&
1913 src0.base.srcMod != SVGA3DSRCMOD_ABS)
1914 release_temp( emit, abs_tmp );
1915
1916 /* If w is being written, fill it with one.
1917 */
1918 if (dst.mask & TGSI_WRITEMASK_W) {
1919 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ),
1920 writemask(dst, TGSI_WRITEMASK_W),
1921 scalar( zero, TGSI_SWIZZLE_W ) ))
1922 return FALSE;
1923 }
1924
1925 return TRUE;
1926 }
1927
1928
1929 static boolean emit_bgnsub( struct svga_shader_emitter *emit,
1930 unsigned position,
1931 const struct tgsi_full_instruction *insn )
1932 {
1933 unsigned i;
1934
1935 /* Note that we've finished the main function and are now emitting
1936 * subroutines. This affects how we terminate the generated
1937 * shader.
1938 */
1939 emit->in_main_func = FALSE;
1940
1941 for (i = 0; i < emit->nr_labels; i++) {
1942 if (emit->label[i] == position) {
1943 return (emit_instruction( emit, inst_token( SVGA3DOP_RET ) ) &&
1944 emit_instruction( emit, inst_token( SVGA3DOP_LABEL ) ) &&
1945 emit_src( emit, src_register( SVGA3DREG_LABEL, i )));
1946 }
1947 }
1948
1949 assert(0);
1950 return TRUE;
1951 }
1952
1953 static boolean emit_call( struct svga_shader_emitter *emit,
1954 const struct tgsi_full_instruction *insn )
1955 {
1956 unsigned position = insn->InstructionExtLabel.Label;
1957 unsigned i;
1958
1959 for (i = 0; i < emit->nr_labels; i++) {
1960 if (emit->label[i] == position)
1961 break;
1962 }
1963
1964 if (emit->nr_labels == Elements(emit->label))
1965 return FALSE;
1966
1967 if (i == emit->nr_labels) {
1968 emit->label[i] = position;
1969 emit->nr_labels++;
1970 }
1971
1972 return (emit_instruction( emit, inst_token( SVGA3DOP_CALL ) ) &&
1973 emit_src( emit, src_register( SVGA3DREG_LABEL, i )));
1974 }
1975
1976
1977 static boolean emit_end( struct svga_shader_emitter *emit )
1978 {
1979 if (emit->unit == PIPE_SHADER_VERTEX) {
1980 return emit_vs_postamble( emit );
1981 }
1982 else {
1983 return emit_ps_postamble( emit );
1984 }
1985 }
1986
1987
1988
1989 static boolean svga_emit_instruction( struct svga_shader_emitter *emit,
1990 unsigned position,
1991 const struct tgsi_full_instruction *insn )
1992 {
1993 switch (insn->Instruction.Opcode) {
1994
1995 case TGSI_OPCODE_ARL:
1996 return emit_arl( emit, insn );
1997
1998 case TGSI_OPCODE_TEX:
1999 case TGSI_OPCODE_TXB:
2000 case TGSI_OPCODE_TXP:
2001 case TGSI_OPCODE_TXL:
2002 case TGSI_OPCODE_TXD:
2003 return emit_tex( emit, insn );
2004
2005 case TGSI_OPCODE_BGNSUB:
2006 return emit_bgnsub( emit, position, insn );
2007
2008 case TGSI_OPCODE_ENDSUB:
2009 return TRUE;
2010
2011 case TGSI_OPCODE_CAL:
2012 return emit_call( emit, insn );
2013
2014 case TGSI_OPCODE_FLR:
2015 case TGSI_OPCODE_TRUNC: /* should be TRUNC, not FLR */
2016 return emit_floor( emit, insn );
2017
2018 case TGSI_OPCODE_CMP:
2019 return emit_cmp( emit, insn );
2020
2021 case TGSI_OPCODE_DIV:
2022 return emit_div( emit, insn );
2023
2024 case TGSI_OPCODE_DP2:
2025 return emit_dp2( emit, insn );
2026
2027 case TGSI_OPCODE_DPH:
2028 return emit_dph( emit, insn );
2029
2030 case TGSI_OPCODE_NRM:
2031 return emit_nrm( emit, insn );
2032
2033 case TGSI_OPCODE_COS:
2034 return emit_cos( emit, insn );
2035
2036 case TGSI_OPCODE_SIN:
2037 return emit_sin( emit, insn );
2038
2039 case TGSI_OPCODE_SCS:
2040 return emit_sincos( emit, insn );
2041
2042 case TGSI_OPCODE_END:
2043 /* TGSI always finishes the main func with an END */
2044 return emit_end( emit );
2045
2046 case TGSI_OPCODE_KIL:
2047 return emit_kil( emit, insn );
2048
2049 /* Selection opcodes. The underlying language is fairly
2050 * non-orthogonal about these.
2051 */
2052 case TGSI_OPCODE_SEQ:
2053 return emit_select_op( emit, PIPE_FUNC_EQUAL, insn );
2054
2055 case TGSI_OPCODE_SNE:
2056 return emit_select_op( emit, PIPE_FUNC_NOTEQUAL, insn );
2057
2058 case TGSI_OPCODE_SGT:
2059 return emit_select_op( emit, PIPE_FUNC_GREATER, insn );
2060
2061 case TGSI_OPCODE_SGE:
2062 return emit_select_op( emit, PIPE_FUNC_GEQUAL, insn );
2063
2064 case TGSI_OPCODE_SLT:
2065 return emit_select_op( emit, PIPE_FUNC_LESS, insn );
2066
2067 case TGSI_OPCODE_SLE:
2068 return emit_select_op( emit, PIPE_FUNC_LEQUAL, insn );
2069
2070 case TGSI_OPCODE_SUB:
2071 return emit_sub( emit, insn );
2072
2073 case TGSI_OPCODE_POW:
2074 return emit_pow( emit, insn );
2075
2076 case TGSI_OPCODE_EX2:
2077 return emit_ex2( emit, insn );
2078
2079 case TGSI_OPCODE_EXP:
2080 return emit_exp( emit, insn );
2081
2082 case TGSI_OPCODE_LOG:
2083 return emit_log( emit, insn );
2084
2085 case TGSI_OPCODE_LG2:
2086 return emit_scalar_op1( emit, SVGA3DOP_LOG, insn );
2087
2088 case TGSI_OPCODE_RSQ:
2089 return emit_scalar_op1( emit, SVGA3DOP_RSQ, insn );
2090
2091 case TGSI_OPCODE_RCP:
2092 return emit_scalar_op1( emit, SVGA3DOP_RCP, insn );
2093
2094 case TGSI_OPCODE_CONT:
2095 case TGSI_OPCODE_RET:
2096 /* This is a noop -- we tell mesa that we can't support RET
2097 * within a function (early return), so this will always be
2098 * followed by an ENDSUB.
2099 */
2100 return TRUE;
2101
2102 /* These aren't actually used by any of the frontends we care
2103 * about:
2104 */
2105 case TGSI_OPCODE_CLAMP:
2106 case TGSI_OPCODE_ROUND:
2107 case TGSI_OPCODE_AND:
2108 case TGSI_OPCODE_OR:
2109 case TGSI_OPCODE_I2F:
2110 case TGSI_OPCODE_NOT:
2111 case TGSI_OPCODE_SHL:
2112 case TGSI_OPCODE_SHR:
2113 case TGSI_OPCODE_XOR:
2114 return FALSE;
2115
2116 case TGSI_OPCODE_IF:
2117 return emit_if( emit, insn );
2118 case TGSI_OPCODE_ELSE:
2119 return emit_else( emit, insn );
2120 case TGSI_OPCODE_ENDIF:
2121 return emit_endif( emit, insn );
2122
2123 case TGSI_OPCODE_BGNLOOP:
2124 return emit_bgnloop2( emit, insn );
2125 case TGSI_OPCODE_ENDLOOP:
2126 return emit_endloop2( emit, insn );
2127 case TGSI_OPCODE_BRK:
2128 return emit_brk( emit, insn );
2129
2130 case TGSI_OPCODE_XPD:
2131 return emit_xpd( emit, insn );
2132
2133 case TGSI_OPCODE_KILP:
2134 return emit_kilp( emit, insn );
2135
2136 case TGSI_OPCODE_DST:
2137 return emit_dst_insn( emit, insn );
2138
2139 case TGSI_OPCODE_LIT:
2140 return emit_lit( emit, insn );
2141
2142 case TGSI_OPCODE_LRP:
2143 return emit_lrp( emit, insn );
2144
2145 default: {
2146 unsigned opcode = translate_opcode(insn->Instruction.Opcode);
2147
2148 if (opcode == SVGA3DOP_LAST_INST)
2149 return FALSE;
2150
2151 if (!emit_simple_instruction( emit, opcode, insn ))
2152 return FALSE;
2153 }
2154 }
2155
2156 return TRUE;
2157 }
2158
2159
2160 static boolean svga_emit_immediate( struct svga_shader_emitter *emit,
2161 struct tgsi_full_immediate *imm)
2162 {
2163 static const float id[4] = {0,0,0,1};
2164 float value[4];
2165 unsigned i;
2166
2167 assert(1 <= imm->Immediate.NrTokens && imm->Immediate.NrTokens <= 5);
2168 for (i = 0; i < imm->Immediate.NrTokens - 1; i++)
2169 value[i] = imm->u[i].Float;
2170
2171 for ( ; i < 4; i++ )
2172 value[i] = id[i];
2173
2174 return emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT,
2175 emit->imm_start + emit->internal_imm_count++,
2176 value[0], value[1], value[2], value[3]);
2177 }
2178
2179 static boolean make_immediate( struct svga_shader_emitter *emit,
2180 float a,
2181 float b,
2182 float c,
2183 float d,
2184 struct src_register *out )
2185 {
2186 unsigned idx = emit->nr_hw_const++;
2187
2188 if (!emit_def_const( emit, SVGA3D_CONST_TYPE_FLOAT,
2189 idx, a, b, c, d ))
2190 return FALSE;
2191
2192 *out = src_register( SVGA3DREG_CONST, idx );
2193
2194 return TRUE;
2195 }
2196
2197 static boolean emit_vs_preamble( struct svga_shader_emitter *emit )
2198 {
2199 if (!emit->key.vkey.need_prescale) {
2200 if (!make_immediate( emit, 0, 0, .5, .5,
2201 &emit->imm_0055))
2202 return FALSE;
2203 }
2204
2205 return TRUE;
2206 }
2207
2208 static boolean emit_ps_preamble( struct svga_shader_emitter *emit )
2209 {
2210 unsigned i;
2211
2212 /* For SM20, need to initialize the temporaries we're using to hold
2213 * color outputs to some value. Shaders which don't set all of
2214 * these values are likely to be rejected by the DX9 runtime.
2215 */
2216 if (!emit->use_sm30) {
2217 struct src_register zero = get_zero_immediate( emit );
2218 for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
2219 if (SVGA3dShaderGetRegType(emit->true_col[i].value) != 0) {
2220
2221 if (!submit_op1( emit,
2222 inst_token(SVGA3DOP_MOV),
2223 emit->temp_col[i],
2224 zero ))
2225 return FALSE;
2226 }
2227 }
2228 }
2229
2230 return TRUE;
2231 }
2232
2233 static boolean emit_ps_postamble( struct svga_shader_emitter *emit )
2234 {
2235 unsigned i;
2236
2237 /* PS oDepth is incredibly fragile and it's very hard to catch the
2238 * types of usage that break it during shader emit. Easier just to
2239 * redirect the main program to a temporary and then only touch
2240 * oDepth with a hand-crafted MOV below.
2241 */
2242 if (SVGA3dShaderGetRegType(emit->true_pos.value) != 0) {
2243
2244 if (!submit_op1( emit,
2245 inst_token(SVGA3DOP_MOV),
2246 emit->true_pos,
2247 scalar(src(emit->temp_pos), TGSI_SWIZZLE_Z) ))
2248 return FALSE;
2249 }
2250
2251 /* Similarly for SM20 color outputs... Luckily SM30 isn't so
2252 * fragile.
2253 */
2254 for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
2255 if (SVGA3dShaderGetRegType(emit->true_col[i].value) != 0) {
2256
2257 if (!submit_op1( emit,
2258 inst_token(SVGA3DOP_MOV),
2259 emit->true_col[i],
2260 src(emit->temp_col[i]) ))
2261 return FALSE;
2262 }
2263 }
2264
2265 return TRUE;
2266 }
2267
2268 static boolean emit_vs_postamble( struct svga_shader_emitter *emit )
2269 {
2270 /* PSIZ output is incredibly fragile and it's very hard to catch
2271 * the types of usage that break it during shader emit. Easier
2272 * just to redirect the main program to a temporary and then only
2273 * touch PSIZ with a hand-crafted MOV below.
2274 */
2275 if (SVGA3dShaderGetRegType(emit->true_psiz.value) != 0) {
2276
2277 if (!submit_op1( emit,
2278 inst_token(SVGA3DOP_MOV),
2279 emit->true_psiz,
2280 scalar(src(emit->temp_psiz), TGSI_SWIZZLE_X) ))
2281 return FALSE;
2282 }
2283
2284 /* Need to perform various manipulations on vertex position to cope
2285 * with the different GL and D3D clip spaces.
2286 */
2287 if (emit->key.vkey.need_prescale) {
2288 SVGA3dShaderDestToken temp_pos = emit->temp_pos;
2289 SVGA3dShaderDestToken pos = emit->true_pos;
2290 unsigned offset = emit->info.file_max[TGSI_FILE_CONSTANT] + 1;
2291 struct src_register prescale_scale = src_register( SVGA3DREG_CONST,
2292 offset + 0 );
2293 struct src_register prescale_trans = src_register( SVGA3DREG_CONST,
2294 offset + 1 );
2295
2296 /* MUL temp_pos.xyz, temp_pos, prescale.scale
2297 * MAD result.position, temp_pos.wwww, prescale.trans, temp_pos
2298 * --> Note that prescale.trans.w == 0
2299 */
2300 if (!submit_op2( emit,
2301 inst_token(SVGA3DOP_MUL),
2302 writemask(temp_pos, TGSI_WRITEMASK_XYZ),
2303 src(temp_pos),
2304 prescale_scale ))
2305 return FALSE;
2306
2307 if (!submit_op3( emit,
2308 inst_token(SVGA3DOP_MAD),
2309 pos,
2310 swizzle(src(temp_pos), 3, 3, 3, 3),
2311 prescale_trans,
2312 src(temp_pos)))
2313 return FALSE;
2314 }
2315 else {
2316 SVGA3dShaderDestToken temp_pos = emit->temp_pos;
2317 SVGA3dShaderDestToken pos = emit->true_pos;
2318 struct src_register imm_0055 = emit->imm_0055;
2319
2320 /* Adjust GL clipping coordinate space to hardware (D3D-style):
2321 *
2322 * DP4 temp_pos.z, {0,0,.5,.5}, temp_pos
2323 * MOV result.position, temp_pos
2324 */
2325 if (!submit_op2( emit,
2326 inst_token(SVGA3DOP_DP4),
2327 writemask(temp_pos, TGSI_WRITEMASK_Z),
2328 imm_0055,
2329 src(temp_pos) ))
2330 return FALSE;
2331
2332 if (!submit_op1( emit,
2333 inst_token(SVGA3DOP_MOV),
2334 pos,
2335 src(temp_pos) ))
2336 return FALSE;
2337 }
2338
2339 return TRUE;
2340 }
2341
2342 /*
2343 0: IF VFACE :4
2344 1: COLOR = FrontColor;
2345 2: ELSE
2346 3: COLOR = BackColor;
2347 4: ENDIF
2348 */
2349 static boolean emit_light_twoside( struct svga_shader_emitter *emit )
2350 {
2351 struct src_register vface, zero;
2352 struct src_register front[2];
2353 struct src_register back[2];
2354 SVGA3dShaderDestToken color[2];
2355 int count = emit->internal_color_count;
2356 int i;
2357 SVGA3dShaderInstToken if_token;
2358
2359 if (count == 0)
2360 return TRUE;
2361
2362 vface = get_vface( emit );
2363 zero = get_zero_immediate( emit );
2364
2365 /* Can't use get_temp() to allocate the color reg as such
2366 * temporaries will be reclaimed after each instruction by the call
2367 * to reset_temp_regs().
2368 */
2369 for (i = 0; i < count; i++) {
2370 color[i] = dst_register( SVGA3DREG_TEMP,
2371 emit->nr_hw_temp++ );
2372
2373 front[i] = emit->input_map[emit->internal_color_idx[i]];
2374
2375 /* Back is always the next input:
2376 */
2377 back[i] = front[i];
2378 back[i].base.num = front[i].base.num + 1;
2379
2380 /* Reassign the input_map to the actual front-face color:
2381 */
2382 emit->input_map[emit->internal_color_idx[i]] = src(color[i]);
2383 }
2384
2385 if_token = inst_token( SVGA3DOP_IFC );
2386
2387 if (emit->key.fkey.front_cw)
2388 if_token.control = SVGA3DOPCOMP_GT;
2389 else
2390 if_token.control = SVGA3DOPCOMP_LT;
2391
2392 zero = scalar(zero, TGSI_SWIZZLE_X);
2393
2394 if (!(emit_instruction( emit, if_token ) &&
2395 emit_src( emit, vface ) &&
2396 emit_src( emit, zero ) ))
2397 return FALSE;
2398
2399 for (i = 0; i < count; i++) {
2400 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), color[i], front[i] ))
2401 return FALSE;
2402 }
2403
2404 if (!(emit_instruction( emit, inst_token( SVGA3DOP_ELSE))))
2405 return FALSE;
2406
2407 for (i = 0; i < count; i++) {
2408 if (!submit_op1( emit, inst_token( SVGA3DOP_MOV ), color[i], back[i] ))
2409 return FALSE;
2410 }
2411
2412 if (!emit_instruction( emit, inst_token( SVGA3DOP_ENDIF ) ))
2413 return FALSE;
2414
2415 return TRUE;
2416 }
2417
2418 /*
2419 0: SETP_GT TEMP, VFACE, 0
2420 where TEMP is a fake frontface register
2421 */
2422 static boolean emit_frontface( struct svga_shader_emitter *emit )
2423 {
2424 struct src_register vface, zero;
2425 SVGA3dShaderDestToken temp;
2426 struct src_register pass, fail;
2427
2428 vface = get_vface( emit );
2429 zero = get_zero_immediate( emit );
2430
2431 /* Can't use get_temp() to allocate the fake frontface reg as such
2432 * temporaries will be reclaimed after each instruction by the call
2433 * to reset_temp_regs().
2434 */
2435 temp = dst_register( SVGA3DREG_TEMP,
2436 emit->nr_hw_temp++ );
2437
2438 if (emit->key.fkey.front_cw) {
2439 pass = scalar( zero, TGSI_SWIZZLE_W );
2440 fail = scalar( zero, TGSI_SWIZZLE_X );
2441 } else {
2442 pass = scalar( zero, TGSI_SWIZZLE_X );
2443 fail = scalar( zero, TGSI_SWIZZLE_W );
2444 }
2445
2446 if (!emit_conditional(emit, PIPE_FUNC_GREATER,
2447 temp, vface, scalar( zero, TGSI_SWIZZLE_X ),
2448 pass, fail))
2449 return FALSE;
2450
2451 /* Reassign the input_map to the actual front-face color:
2452 */
2453 emit->input_map[emit->internal_frontface_idx] = src(temp);
2454
2455 return TRUE;
2456 }
2457
2458 static INLINE boolean
2459 needs_to_create_zero( struct svga_shader_emitter *emit )
2460 {
2461 int i;
2462
2463 if (emit->unit == PIPE_SHADER_FRAGMENT) {
2464 if (!emit->use_sm30)
2465 return TRUE;
2466
2467 if (emit->key.fkey.light_twoside)
2468 return TRUE;
2469
2470 if (emit->emit_frontface)
2471 return TRUE;
2472
2473 if (emit->info.opcode_count[TGSI_OPCODE_DST] >= 1 ||
2474 emit->info.opcode_count[TGSI_OPCODE_LIT] >= 1)
2475 return TRUE;
2476 }
2477
2478 if (emit->info.opcode_count[TGSI_OPCODE_IF] >= 1 ||
2479 emit->info.opcode_count[TGSI_OPCODE_SGE] >= 1 ||
2480 emit->info.opcode_count[TGSI_OPCODE_SGT] >= 1 ||
2481 emit->info.opcode_count[TGSI_OPCODE_SLE] >= 1 ||
2482 emit->info.opcode_count[TGSI_OPCODE_SLT] >= 1 ||
2483 emit->info.opcode_count[TGSI_OPCODE_SNE] >= 1 ||
2484 emit->info.opcode_count[TGSI_OPCODE_SEQ] >= 1 ||
2485 emit->info.opcode_count[TGSI_OPCODE_EXP] >= 1 ||
2486 emit->info.opcode_count[TGSI_OPCODE_LOG] >= 1 ||
2487 emit->info.opcode_count[TGSI_OPCODE_XPD] >= 1 ||
2488 emit->info.opcode_count[TGSI_OPCODE_KILP] >= 1)
2489 return TRUE;
2490
2491 for (i = 0; i < emit->key.fkey.num_textures; i++) {
2492 if (emit->key.fkey.tex[i].compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE)
2493 return TRUE;
2494 }
2495
2496 return FALSE;
2497 }
2498
2499 static INLINE boolean
2500 needs_to_create_loop_const( struct svga_shader_emitter *emit )
2501 {
2502 return (emit->info.opcode_count[TGSI_OPCODE_BGNLOOP] >= 1);
2503 }
2504
2505 static INLINE boolean
2506 needs_to_create_sincos_consts( struct svga_shader_emitter *emit )
2507 {
2508 return !emit->use_sm30 && (emit->info.opcode_count[TGSI_OPCODE_SIN] >= 1 ||
2509 emit->info.opcode_count[TGSI_OPCODE_COS] >= 1 ||
2510 emit->info.opcode_count[TGSI_OPCODE_SCS] >= 1);
2511 }
2512
2513 static INLINE boolean
2514 needs_to_create_arl_consts( struct svga_shader_emitter *emit )
2515 {
2516 return (emit->num_arl_consts > 0);
2517 }
2518
2519 static INLINE boolean
2520 pre_parse_add_indirect( struct svga_shader_emitter *emit,
2521 int num, int current_arl)
2522 {
2523 int i;
2524 assert(num < 0);
2525
2526 for (i = 0; i < emit->num_arl_consts; ++i) {
2527 if (emit->arl_consts[i].arl_num == current_arl)
2528 break;
2529 }
2530 /* new entry */
2531 if (emit->num_arl_consts == i) {
2532 ++emit->num_arl_consts;
2533 }
2534 emit->arl_consts[i].number = (emit->arl_consts[i].number > num) ?
2535 num :
2536 emit->arl_consts[i].number;
2537 emit->arl_consts[i].arl_num = current_arl;
2538 return TRUE;
2539 }
2540
2541 static boolean
2542 pre_parse_instruction( struct svga_shader_emitter *emit,
2543 const struct tgsi_full_instruction *insn,
2544 int current_arl)
2545 {
2546 if (insn->FullSrcRegisters[0].SrcRegister.Indirect &&
2547 insn->FullSrcRegisters[0].SrcRegisterInd.File == TGSI_FILE_ADDRESS) {
2548 const struct tgsi_full_src_register *reg = &insn->FullSrcRegisters[0];
2549 if (reg->SrcRegister.Index < 0) {
2550 pre_parse_add_indirect(emit, reg->SrcRegister.Index, current_arl);
2551 }
2552 }
2553
2554 if (insn->FullSrcRegisters[1].SrcRegister.Indirect &&
2555 insn->FullSrcRegisters[1].SrcRegisterInd.File == TGSI_FILE_ADDRESS) {
2556 const struct tgsi_full_src_register *reg = &insn->FullSrcRegisters[1];
2557 if (reg->SrcRegister.Index < 0) {
2558 pre_parse_add_indirect(emit, reg->SrcRegister.Index, current_arl);
2559 }
2560 }
2561
2562 if (insn->FullSrcRegisters[2].SrcRegister.Indirect &&
2563 insn->FullSrcRegisters[2].SrcRegisterInd.File == TGSI_FILE_ADDRESS) {
2564 const struct tgsi_full_src_register *reg = &insn->FullSrcRegisters[2];
2565 if (reg->SrcRegister.Index < 0) {
2566 pre_parse_add_indirect(emit, reg->SrcRegister.Index, current_arl);
2567 }
2568 }
2569
2570 return TRUE;
2571 }
2572
2573 static boolean
2574 pre_parse_tokens( struct svga_shader_emitter *emit,
2575 const struct tgsi_token *tokens )
2576 {
2577 struct tgsi_parse_context parse;
2578 int current_arl = 0;
2579
2580 tgsi_parse_init( &parse, tokens );
2581
2582 while (!tgsi_parse_end_of_tokens( &parse )) {
2583 tgsi_parse_token( &parse );
2584 switch (parse.FullToken.Token.Type) {
2585 case TGSI_TOKEN_TYPE_IMMEDIATE:
2586 case TGSI_TOKEN_TYPE_DECLARATION:
2587 break;
2588 case TGSI_TOKEN_TYPE_INSTRUCTION:
2589 if (parse.FullToken.FullInstruction.Instruction.Opcode ==
2590 TGSI_OPCODE_ARL) {
2591 ++current_arl;
2592 }
2593 if (!pre_parse_instruction( emit, &parse.FullToken.FullInstruction,
2594 current_arl ))
2595 return FALSE;
2596 break;
2597 default:
2598 break;
2599 }
2600
2601 }
2602 return TRUE;
2603 }
2604
2605 static boolean svga_shader_emit_helpers( struct svga_shader_emitter *emit )
2606
2607 {
2608 if (needs_to_create_zero( emit )) {
2609 create_zero_immediate( emit );
2610 }
2611 if (needs_to_create_loop_const( emit )) {
2612 create_loop_const( emit );
2613 }
2614 if (needs_to_create_sincos_consts( emit )) {
2615 create_sincos_consts( emit );
2616 }
2617 if (needs_to_create_arl_consts( emit )) {
2618 create_arl_consts( emit );
2619 }
2620
2621 if (emit->unit == PIPE_SHADER_FRAGMENT) {
2622 if (!emit_ps_preamble( emit ))
2623 return FALSE;
2624
2625 if (emit->key.fkey.light_twoside) {
2626 if (!emit_light_twoside( emit ))
2627 return FALSE;
2628 }
2629 if (emit->emit_frontface) {
2630 if (!emit_frontface( emit ))
2631 return FALSE;
2632 }
2633 }
2634
2635 return TRUE;
2636 }
2637
2638 boolean svga_shader_emit_instructions( struct svga_shader_emitter *emit,
2639 const struct tgsi_token *tokens )
2640 {
2641 struct tgsi_parse_context parse;
2642 boolean ret = TRUE;
2643 boolean helpers_emitted = FALSE;
2644 unsigned line_nr = 0;
2645
2646 tgsi_parse_init( &parse, tokens );
2647 emit->internal_imm_count = 0;
2648
2649 if (emit->unit == PIPE_SHADER_VERTEX) {
2650 ret = emit_vs_preamble( emit );
2651 if (!ret)
2652 goto done;
2653 }
2654
2655 pre_parse_tokens(emit, tokens);
2656
2657 while (!tgsi_parse_end_of_tokens( &parse )) {
2658 tgsi_parse_token( &parse );
2659
2660 switch (parse.FullToken.Token.Type) {
2661 case TGSI_TOKEN_TYPE_IMMEDIATE:
2662 ret = svga_emit_immediate( emit, &parse.FullToken.FullImmediate );
2663 if (!ret)
2664 goto done;
2665 break;
2666
2667 case TGSI_TOKEN_TYPE_DECLARATION:
2668 if (emit->use_sm30)
2669 ret = svga_translate_decl_sm30( emit, &parse.FullToken.FullDeclaration );
2670 else
2671 ret = svga_translate_decl_sm20( emit, &parse.FullToken.FullDeclaration );
2672 if (!ret)
2673 goto done;
2674 break;
2675
2676 case TGSI_TOKEN_TYPE_INSTRUCTION:
2677 if (!helpers_emitted) {
2678 if (!svga_shader_emit_helpers( emit ))
2679 goto done;
2680 helpers_emitted = TRUE;
2681 }
2682 ret = svga_emit_instruction( emit,
2683 line_nr++,
2684 &parse.FullToken.FullInstruction );
2685 if (!ret)
2686 goto done;
2687 break;
2688 default:
2689 break;
2690 }
2691
2692 reset_temp_regs( emit );
2693 }
2694
2695 /* Need to terminate the current subroutine. Note that the
2696 * hardware doesn't tolerate shaders without sub-routines
2697 * terminating with RET+END.
2698 */
2699 if (!emit->in_main_func) {
2700 ret = emit_instruction( emit, inst_token( SVGA3DOP_RET ) );
2701 if (!ret)
2702 goto done;
2703 }
2704
2705 /* Need to terminate the whole shader:
2706 */
2707 ret = emit_instruction( emit, inst_token( SVGA3DOP_END ) );
2708 if (!ret)
2709 goto done;
2710
2711 done:
2712 assert(ret);
2713 tgsi_parse_free( &parse );
2714 return ret;
2715 }
2716