nvc0: fix branching ops
[mesa.git] / src / gallium / drivers / nvc0 / nvc0_tgsi_to_nc.c
1 /*
2 * Copyright 2010 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
21 */
22
23 #include <unistd.h>
24
25 #define NOUVEAU_DEBUG 1
26
27 #include "pipe/p_shader_tokens.h"
28 #include "tgsi/tgsi_parse.h"
29 #include "tgsi/tgsi_util.h"
30 #include "tgsi/tgsi_dump.h"
31 #include "util/u_dynarray.h"
32
33 #include "nvc0_pc.h"
34 #include "nvc0_program.h"
35
36 /* Arbitrary internal limits. */
37 #define BLD_MAX_TEMPS 64
38 #define BLD_MAX_ADDRS 4
39 #define BLD_MAX_PREDS 4
40 #define BLD_MAX_IMMDS 128
41 #define BLD_MAX_OUTPS PIPE_MAX_SHADER_OUTPUTS
42
43 #define BLD_MAX_COND_NESTING 8
44 #define BLD_MAX_LOOP_NESTING 4
45 #define BLD_MAX_CALL_NESTING 2
46
47 /* This structure represents a TGSI register. */
48 struct bld_register {
49 struct nv_value *current;
50 /* collect all SSA values assigned to it */
51 struct util_dynarray vals;
52 /* 1 bit per loop level, indicates if used/defd, reset when loop ends */
53 uint16_t loop_use;
54 uint16_t loop_def;
55 };
56
57 static INLINE struct nv_value **
58 bld_register_access(struct bld_register *reg, unsigned i)
59 {
60 return util_dynarray_element(&reg->vals, struct nv_value *, i);
61 }
62
63 static INLINE void
64 bld_register_add_val(struct bld_register *reg, struct nv_value *val)
65 {
66 util_dynarray_append(&reg->vals, struct nv_value *, val);
67 }
68
69 static INLINE boolean
70 bld_register_del_val(struct bld_register *reg, struct nv_value *val)
71 {
72 unsigned i;
73
74 for (i = reg->vals.size / sizeof(struct nv_value *); i > 0; --i)
75 if (*bld_register_access(reg, i - 1) == val)
76 break;
77 if (!i)
78 return FALSE;
79
80 if (i != reg->vals.size / sizeof(struct nv_value *))
81 *bld_register_access(reg, i - 1) = util_dynarray_pop(&reg->vals,
82 struct nv_value *);
83 else
84 reg->vals.size -= sizeof(struct nv_value *);
85
86 return TRUE;
87 }
88
89 struct bld_context {
90 struct nvc0_translation_info *ti;
91
92 struct nv_pc *pc;
93 struct nv_basic_block *b;
94
95 struct tgsi_parse_context parse[BLD_MAX_CALL_NESTING];
96 int call_lvl;
97
98 struct nv_basic_block *cond_bb[BLD_MAX_COND_NESTING];
99 struct nv_basic_block *join_bb[BLD_MAX_COND_NESTING];
100 struct nv_basic_block *else_bb[BLD_MAX_COND_NESTING];
101 int cond_lvl;
102 struct nv_basic_block *loop_bb[BLD_MAX_LOOP_NESTING];
103 struct nv_basic_block *brkt_bb[BLD_MAX_LOOP_NESTING];
104 int loop_lvl;
105
106 ubyte out_kind; /* CFG_EDGE_FORWARD, or FAKE in case of BREAK/CONT */
107
108 struct bld_register tvs[BLD_MAX_TEMPS][4]; /* TGSI_FILE_TEMPORARY */
109 struct bld_register avs[BLD_MAX_ADDRS][4]; /* TGSI_FILE_ADDRESS */
110 struct bld_register pvs[BLD_MAX_PREDS][4]; /* TGSI_FILE_PREDICATE */
111 struct bld_register ovs[BLD_MAX_OUTPS][4]; /* TGSI_FILE_OUTPUT, FP only */
112
113 uint32_t outputs_written[(PIPE_MAX_SHADER_OUTPUTS + 7) / 8];
114
115 struct nv_value *zero;
116 struct nv_value *frag_coord[4];
117
118 /* wipe on new BB */
119 struct nv_value *saved_sysvals[4];
120 struct nv_value *saved_addr[4][2];
121 struct nv_value *saved_inputs[PIPE_MAX_SHADER_INPUTS][4];
122 struct nv_value *saved_immd[BLD_MAX_IMMDS];
123 uint num_immds;
124 };
125
126 static INLINE ubyte
127 bld_register_file(struct bld_context *bld, struct bld_register *reg)
128 {
129 if (reg < &bld->avs[0][0]) return NV_FILE_GPR;
130 else
131 if (reg < &bld->pvs[0][0]) return NV_FILE_GPR;
132 else
133 if (reg < &bld->ovs[0][0]) return NV_FILE_PRED;
134 else
135 return NV_FILE_MEM_V;
136 }
137
138 static INLINE struct nv_value *
139 bld_fetch(struct bld_context *bld, struct bld_register *regs, int i, int c)
140 {
141 regs[i * 4 + c].loop_use |= 1 << bld->loop_lvl;
142 return regs[i * 4 + c].current;
143 }
144
145 static struct nv_value *
146 bld_loop_phi(struct bld_context *, struct bld_register *, struct nv_value *);
147
148 /* If a variable is defined in a loop without prior use, we don't need
149 * a phi in the loop header to account for backwards flow.
150 *
151 * However, if this variable is then also used outside the loop, we do
152 * need a phi after all. But we must not use this phi's def inside the
153 * loop, so we can eliminate the phi if it is unused later.
154 */
155 static INLINE void
156 bld_store(struct bld_context *bld,
157 struct bld_register *regs, int i, int c, struct nv_value *val)
158 {
159 const uint16_t m = 1 << bld->loop_lvl;
160 struct bld_register *reg = &regs[i * 4 + c];
161
162 if (bld->loop_lvl && !(m & (reg->loop_def | reg->loop_use)))
163 bld_loop_phi(bld, reg, val);
164
165 reg->current = val;
166 bld_register_add_val(reg, reg->current);
167
168 reg->loop_def |= 1 << bld->loop_lvl;
169 }
170
171 #define FETCH_TEMP(i, c) bld_fetch(bld, &bld->tvs[0][0], i, c)
172 #define STORE_TEMP(i, c, v) bld_store(bld, &bld->tvs[0][0], i, c, (v))
173 #define FETCH_ADDR(i, c) bld_fetch(bld, &bld->avs[0][0], i, c)
174 #define STORE_ADDR(i, c, v) bld_store(bld, &bld->avs[0][0], i, c, (v))
175 #define FETCH_PRED(i, c) bld_fetch(bld, &bld->pvs[0][0], i, c)
176 #define STORE_PRED(i, c, v) bld_store(bld, &bld->pvs[0][0], i, c, (v))
177 #define STORE_OUTP(i, c, v) \
178 do { \
179 bld_store(bld, &bld->ovs[0][0], i, c, (v)); \
180 bld->outputs_written[(i) / 8] |= 1 << (((i) * 4 + (c)) % 32); \
181 } while (0)
182
183 static INLINE void
184 bld_clear_def_use(struct bld_register *regs, int n, int lvl)
185 {
186 int i;
187 const uint16_t mask = ~(1 << lvl);
188
189 for (i = 0; i < n * 4; ++i) {
190 regs[i].loop_def &= mask;
191 regs[i].loop_use &= mask;
192 }
193 }
194
195 static INLINE void
196 bld_warn_uninitialized(struct bld_context *bld, int kind,
197 struct bld_register *reg, struct nv_basic_block *b)
198 {
199 #ifdef NOUVEAU_DEBUG
200 long i = (reg - &bld->tvs[0][0]) / 4;
201 long c = (reg - &bld->tvs[0][0]) & 3;
202
203 if (c == 3)
204 c = -1;
205 debug_printf("WARNING: TEMP[%li].%c %s used uninitialized in BB:%i\n",
206 i, (int)('x' + c), kind ? "may be" : "is", b->id);
207 #endif
208 }
209
210 static INLINE struct nv_value *
211 bld_def(struct nv_instruction *i, int c, struct nv_value *value)
212 {
213 i->def[c] = value;
214 value->insn = i;
215 return value;
216 }
217
218 static INLINE struct nv_value *
219 find_by_bb(struct bld_register *reg, struct nv_basic_block *b)
220 {
221 int i;
222
223 if (reg->current && reg->current->insn->bb == b)
224 return reg->current;
225
226 for (i = 0; i < reg->vals.size / sizeof(struct nv_value *); ++i)
227 if ((*bld_register_access(reg, i))->insn->bb == b)
228 return *bld_register_access(reg, i);
229 return NULL;
230 }
231
232 /* Fetch value from register that was defined in the specified BB,
233 * or search for first definitions in all of its predecessors.
234 */
235 static void
236 fetch_by_bb(struct bld_register *reg,
237 struct nv_value **vals, int *n,
238 struct nv_basic_block *b)
239 {
240 int i;
241 struct nv_value *val;
242
243 assert(*n < 16); /* MAX_COND_NESTING */
244
245 val = find_by_bb(reg, b);
246 if (val) {
247 for (i = 0; i < *n; ++i)
248 if (vals[i] == val)
249 return;
250 vals[(*n)++] = val;
251 return;
252 }
253 for (i = 0; i < b->num_in; ++i)
254 if (!IS_WALL_EDGE(b->in_kind[i]))
255 fetch_by_bb(reg, vals, n, b->in[i]);
256 }
257
258 static INLINE struct nv_value *
259 bld_load_imm_u32(struct bld_context *bld, uint32_t u);
260
261 static INLINE struct nv_value *
262 bld_undef(struct bld_context *bld, ubyte file)
263 {
264 struct nv_instruction *nvi = new_instruction(bld->pc, NV_OP_UNDEF);
265
266 return bld_def(nvi, 0, new_value(bld->pc, file, 4));
267 }
268
269 static struct nv_value *
270 bld_phi(struct bld_context *bld, struct nv_basic_block *b,
271 struct bld_register *reg)
272 {
273 struct nv_basic_block *in;
274 struct nv_value *vals[16] = { NULL };
275 struct nv_value *val;
276 struct nv_instruction *phi;
277 int i, j, n;
278
279 do {
280 i = n = 0;
281 fetch_by_bb(reg, vals, &n, b);
282
283 if (!n) {
284 bld_warn_uninitialized(bld, 0, reg, b);
285 return NULL;
286 }
287
288 if (n == 1) {
289 if (nvc0_bblock_dominated_by(b, vals[0]->insn->bb))
290 break;
291
292 bld_warn_uninitialized(bld, 1, reg, b);
293
294 /* back-tracking to insert missing value of other path */
295 in = b;
296 while (in->in[0]) {
297 if (in->num_in == 1) {
298 in = in->in[0];
299 } else {
300 if (!nvc0_bblock_reachable_by(in->in[0], vals[0]->insn->bb, b))
301 in = in->in[0];
302 else
303 if (!nvc0_bblock_reachable_by(in->in[1], vals[0]->insn->bb, b))
304 in = in->in[1];
305 else
306 in = in->in[0];
307 }
308 }
309 bld->pc->current_block = in;
310
311 /* should make this a no-op */
312 bld_register_add_val(reg, bld_undef(bld, vals[0]->reg.file));
313 continue;
314 }
315
316 for (i = 0; i < n; ++i) {
317 /* if value dominates b, continue to the redefinitions */
318 if (nvc0_bblock_dominated_by(b, vals[i]->insn->bb))
319 continue;
320
321 /* if value dominates any in-block, b should be the dom frontier */
322 for (j = 0; j < b->num_in; ++j)
323 if (nvc0_bblock_dominated_by(b->in[j], vals[i]->insn->bb))
324 break;
325 /* otherwise, find the dominance frontier and put the phi there */
326 if (j == b->num_in) {
327 in = nvc0_bblock_dom_frontier(vals[i]->insn->bb);
328 val = bld_phi(bld, in, reg);
329 bld_register_add_val(reg, val);
330 break;
331 }
332 }
333 } while(i < n);
334
335 bld->pc->current_block = b;
336
337 if (n == 1)
338 return vals[0];
339
340 phi = new_instruction(bld->pc, NV_OP_PHI);
341
342 bld_def(phi, 0, new_value(bld->pc, vals[0]->reg.file, vals[0]->reg.size));
343 for (i = 0; i < n; ++i)
344 nv_reference(bld->pc, phi, i, vals[i]);
345
346 return phi->def[0];
347 }
348
349 /* Insert a phi function in the loop header.
350 * For nested loops, we need to insert phi functions in all the outer
351 * loop headers if they don't have one yet.
352 *
353 * @def: redefinition from inside loop, or NULL if to be replaced later
354 */
355 static struct nv_value *
356 bld_loop_phi(struct bld_context *bld, struct bld_register *reg,
357 struct nv_value *def)
358 {
359 struct nv_instruction *phi;
360 struct nv_basic_block *bb = bld->pc->current_block;
361 struct nv_value *val = NULL;
362
363 if (bld->loop_lvl > 1) {
364 --bld->loop_lvl;
365 if (!((reg->loop_def | reg->loop_use) & (1 << bld->loop_lvl)))
366 val = bld_loop_phi(bld, reg, NULL);
367 ++bld->loop_lvl;
368 }
369
370 if (!val)
371 val = bld_phi(bld, bld->pc->current_block, reg); /* old definition */
372 if (!val) {
373 bld->pc->current_block = bld->loop_bb[bld->loop_lvl - 1]->in[0];
374 val = bld_undef(bld, bld_register_file(bld, reg));
375 }
376
377 bld->pc->current_block = bld->loop_bb[bld->loop_lvl - 1];
378
379 phi = new_instruction(bld->pc, NV_OP_PHI);
380
381 bld_def(phi, 0, new_value_like(bld->pc, val));
382 if (!def)
383 def = phi->def[0];
384
385 bld_register_add_val(reg, phi->def[0]);
386
387 phi->target = (struct nv_basic_block *)reg; /* cheat */
388
389 nv_reference(bld->pc, phi, 0, val);
390 nv_reference(bld->pc, phi, 1, def);
391
392 bld->pc->current_block = bb;
393
394 return phi->def[0];
395 }
396
397 static INLINE struct nv_value *
398 bld_fetch_global(struct bld_context *bld, struct bld_register *reg)
399 {
400 const uint16_t m = 1 << bld->loop_lvl;
401 const uint16_t use = reg->loop_use;
402
403 reg->loop_use |= m;
404
405 /* If neither used nor def'd inside the loop, build a phi in foresight,
406 * so we don't have to replace stuff later on, which requires tracking.
407 */
408 if (bld->loop_lvl && !((use | reg->loop_def) & m))
409 return bld_loop_phi(bld, reg, NULL);
410
411 return bld_phi(bld, bld->pc->current_block, reg);
412 }
413
414 static INLINE struct nv_value *
415 bld_imm_u32(struct bld_context *bld, uint32_t u)
416 {
417 int i;
418 unsigned n = bld->num_immds;
419
420 for (i = 0; i < n; ++i)
421 if (bld->saved_immd[i]->reg.imm.u32 == u)
422 return bld->saved_immd[i];
423
424 assert(n < BLD_MAX_IMMDS);
425 bld->num_immds++;
426
427 bld->saved_immd[n] = new_value(bld->pc, NV_FILE_IMM, 4);
428 bld->saved_immd[n]->reg.imm.u32 = u;
429 return bld->saved_immd[n];
430 }
431
432 static void
433 bld_replace_value(struct nv_pc *, struct nv_basic_block *, struct nv_value *,
434 struct nv_value *);
435
436 /* Replace the source of the phi in the loop header by the last assignment,
437 * or eliminate the phi function if there is no assignment inside the loop.
438 *
439 * Redundancy situation 1 - (used) but (not redefined) value:
440 * %3 = phi %0, %3 = %3 is used
441 * %3 = phi %0, %4 = is new definition
442 *
443 * Redundancy situation 2 - (not used) but (redefined) value:
444 * %3 = phi %0, %2 = %2 is used, %3 could be used outside, deleted by DCE
445 */
446 static void
447 bld_loop_end(struct bld_context *bld, struct nv_basic_block *bb)
448 {
449 struct nv_basic_block *save = bld->pc->current_block;
450 struct nv_instruction *phi, *next;
451 struct nv_value *val;
452 struct bld_register *reg;
453 int i, s, n;
454
455 for (phi = bb->phi; phi && phi->opcode == NV_OP_PHI; phi = next) {
456 next = phi->next;
457
458 reg = (struct bld_register *)phi->target;
459 phi->target = NULL;
460
461 for (s = 1, n = 0; n < bb->num_in; ++n) {
462 if (bb->in_kind[n] != CFG_EDGE_BACK)
463 continue;
464
465 assert(s < 4);
466 bld->pc->current_block = bb->in[n];
467 val = bld_fetch_global(bld, reg);
468
469 for (i = 0; i < 4; ++i)
470 if (phi->src[i] && phi->src[i]->value == val)
471 break;
472 if (i == 4)
473 nv_reference(bld->pc, phi, s++, val);
474 }
475 bld->pc->current_block = save;
476
477 if (phi->src[0]->value == phi->def[0] ||
478 phi->src[0]->value == phi->src[1]->value)
479 s = 1;
480 else
481 if (phi->src[1]->value == phi->def[0])
482 s = 0;
483 else
484 continue;
485
486 if (s >= 0) {
487 /* eliminate the phi */
488 bld_register_del_val(reg, phi->def[0]);
489
490 ++bld->pc->pass_seq;
491 bld_replace_value(bld->pc, bb, phi->def[0], phi->src[s]->value);
492
493 nvc0_insn_delete(phi);
494 }
495 }
496 }
497
498 static INLINE struct nv_value *
499 bld_imm_f32(struct bld_context *bld, float f)
500 {
501 return bld_imm_u32(bld, fui(f));
502 }
503
504 static struct nv_value *
505 bld_insn_1(struct bld_context *bld, uint opcode, struct nv_value *src0)
506 {
507 struct nv_instruction *insn = new_instruction(bld->pc, opcode);
508
509 nv_reference(bld->pc, insn, 0, src0);
510
511 return bld_def(insn, 0, new_value(bld->pc, NV_FILE_GPR, src0->reg.size));
512 }
513
514 static struct nv_value *
515 bld_insn_2(struct bld_context *bld, uint opcode,
516 struct nv_value *src0, struct nv_value *src1)
517 {
518 struct nv_instruction *insn = new_instruction(bld->pc, opcode);
519
520 nv_reference(bld->pc, insn, 0, src0);
521 nv_reference(bld->pc, insn, 1, src1);
522
523 return bld_def(insn, 0, new_value(bld->pc, NV_FILE_GPR, src0->reg.size));
524 }
525
526 static struct nv_value *
527 bld_insn_3(struct bld_context *bld, uint opcode,
528 struct nv_value *src0, struct nv_value *src1,
529 struct nv_value *src2)
530 {
531 struct nv_instruction *insn = new_instruction(bld->pc, opcode);
532
533 nv_reference(bld->pc, insn, 0, src0);
534 nv_reference(bld->pc, insn, 1, src1);
535 nv_reference(bld->pc, insn, 2, src2);
536
537 return bld_def(insn, 0, new_value(bld->pc, NV_FILE_GPR, src0->reg.size));
538 }
539
540 static INLINE void
541 bld_src_predicate(struct bld_context *bld,
542 struct nv_instruction *nvi, int s, struct nv_value *val)
543 {
544 nvi->predicate = s;
545 nv_reference(bld->pc, nvi, s, val);
546 }
547
548 static INLINE void
549 bld_src_pointer(struct bld_context *bld,
550 struct nv_instruction *nvi, int s, struct nv_value *val)
551 {
552 nvi->indirect = s;
553 nv_reference(bld->pc, nvi, s, val);
554 }
555
556 static void
557 bld_lmem_store(struct bld_context *bld, struct nv_value *ptr, int ofst,
558 struct nv_value *val)
559 {
560 struct nv_instruction *insn = new_instruction(bld->pc, NV_OP_ST);
561 struct nv_value *loc;
562
563 loc = new_value(bld->pc, NV_FILE_MEM_L, nv_type_sizeof(NV_TYPE_U32));
564
565 loc->reg.id = ofst * 4;
566
567 nv_reference(bld->pc, insn, 0, loc);
568 nv_reference(bld->pc, insn, 1, ptr);
569 nv_reference(bld->pc, insn, 2, val);
570 }
571
572 static struct nv_value *
573 bld_lmem_load(struct bld_context *bld, struct nv_value *ptr, int ofst)
574 {
575 struct nv_value *loc, *val;
576
577 loc = new_value(bld->pc, NV_FILE_MEM_L, nv_type_sizeof(NV_TYPE_U32));
578
579 loc->reg.address = ofst * 4;
580
581 val = bld_insn_2(bld, NV_OP_LD, loc, ptr);
582
583 return val;
584 }
585
586 static struct nv_value *
587 bld_pow(struct bld_context *bld, struct nv_value *x, struct nv_value *e)
588 {
589 struct nv_value *val;
590
591 val = bld_insn_1(bld, NV_OP_LG2, x);
592 val = bld_insn_2(bld, NV_OP_MUL_F32, e, val);
593
594 val = bld_insn_1(bld, NV_OP_PREEX2, val);
595 val = bld_insn_1(bld, NV_OP_EX2, val);
596
597 return val;
598 }
599
600 static INLINE struct nv_value *
601 bld_load_imm_f32(struct bld_context *bld, float f)
602 {
603 if (f == 0.0f)
604 return bld->zero;
605 return bld_insn_1(bld, NV_OP_MOV, bld_imm_f32(bld, f));
606 }
607
608 static INLINE struct nv_value *
609 bld_load_imm_u32(struct bld_context *bld, uint32_t u)
610 {
611 if (u == 0)
612 return bld->zero;
613 return bld_insn_1(bld, NV_OP_MOV, bld_imm_u32(bld, u));
614 }
615
616 static INLINE struct nv_value *
617 bld_setp(struct bld_context *bld, uint op, uint8_t cc,
618 struct nv_value *src0, struct nv_value *src1)
619 {
620 struct nv_value *val = bld_insn_2(bld, op, src0, src1);
621
622 val->reg.file = NV_FILE_PRED;
623 val->reg.size = 1;
624 val->insn->set_cond = cc & 0xf;
625 return val;
626 }
627
628 static INLINE struct nv_value *
629 bld_cvt(struct bld_context *bld, uint8_t dt, uint8_t st, struct nv_value *src)
630 {
631 struct nv_value *val = bld_insn_1(bld, NV_OP_CVT, src);
632 val->insn->ext.cvt.d = dt;
633 val->insn->ext.cvt.s = st;
634 return val;
635 }
636
637 static void
638 bld_kil(struct bld_context *bld, struct nv_value *src)
639 {
640 struct nv_instruction *nvi;
641
642 src = bld_setp(bld, NV_OP_SET_F32, NV_CC_LT, src, bld->zero);
643
644 nvi = new_instruction(bld->pc, NV_OP_KIL);
645 nvi->fixed = 1;
646
647 bld_src_predicate(bld, nvi, 0, src);
648 }
649
650 static void
651 bld_flow(struct bld_context *bld, uint opcode,
652 struct nv_value *src, struct nv_basic_block *target,
653 boolean reconverge)
654 {
655 struct nv_instruction *nvi;
656
657 if (reconverge)
658 new_instruction(bld->pc, NV_OP_JOINAT)->fixed = 1;
659
660 nvi = new_instruction(bld->pc, opcode);
661 nvi->target = target;
662 nvi->terminator = 1;
663 if (src)
664 bld_src_predicate(bld, nvi, 0, src);
665 }
666
667 static ubyte
668 translate_setcc(unsigned opcode)
669 {
670 switch (opcode) {
671 case TGSI_OPCODE_SLT: return NV_CC_LT;
672 case TGSI_OPCODE_SGE: return NV_CC_GE;
673 case TGSI_OPCODE_SEQ: return NV_CC_EQ;
674 case TGSI_OPCODE_SGT: return NV_CC_GT;
675 case TGSI_OPCODE_SLE: return NV_CC_LE;
676 case TGSI_OPCODE_SNE: return NV_CC_NE | NV_CC_U;
677 case TGSI_OPCODE_STR: return NV_CC_TR;
678 case TGSI_OPCODE_SFL: return NV_CC_FL;
679
680 case TGSI_OPCODE_ISLT: return NV_CC_LT;
681 case TGSI_OPCODE_ISGE: return NV_CC_GE;
682 case TGSI_OPCODE_USEQ: return NV_CC_EQ;
683 case TGSI_OPCODE_USGE: return NV_CC_GE;
684 case TGSI_OPCODE_USLT: return NV_CC_LT;
685 case TGSI_OPCODE_USNE: return NV_CC_NE;
686 default:
687 assert(0);
688 return NV_CC_FL;
689 }
690 }
691
692 static uint
693 translate_opcode(uint opcode)
694 {
695 switch (opcode) {
696 case TGSI_OPCODE_ABS: return NV_OP_ABS_F32;
697 case TGSI_OPCODE_ADD: return NV_OP_ADD_F32;
698 case TGSI_OPCODE_SUB: return NV_OP_SUB_F32;
699 case TGSI_OPCODE_UADD: return NV_OP_ADD_B32;
700 case TGSI_OPCODE_AND: return NV_OP_AND;
701 case TGSI_OPCODE_EX2: return NV_OP_EX2;
702 case TGSI_OPCODE_CEIL: return NV_OP_CEIL;
703 case TGSI_OPCODE_FLR: return NV_OP_FLOOR;
704 case TGSI_OPCODE_TRUNC: return NV_OP_TRUNC;
705 case TGSI_OPCODE_COS: return NV_OP_COS;
706 case TGSI_OPCODE_SIN: return NV_OP_SIN;
707 case TGSI_OPCODE_DDX: return NV_OP_DFDX;
708 case TGSI_OPCODE_DDY: return NV_OP_DFDY;
709 case TGSI_OPCODE_F2I:
710 case TGSI_OPCODE_F2U:
711 case TGSI_OPCODE_I2F:
712 case TGSI_OPCODE_U2F: return NV_OP_CVT;
713 case TGSI_OPCODE_INEG: return NV_OP_NEG_S32;
714 case TGSI_OPCODE_LG2: return NV_OP_LG2;
715 case TGSI_OPCODE_ISHR: return NV_OP_SAR;
716 case TGSI_OPCODE_USHR: return NV_OP_SHR;
717 case TGSI_OPCODE_MAD: return NV_OP_MAD_F32;
718 case TGSI_OPCODE_MAX: return NV_OP_MAX_F32;
719 case TGSI_OPCODE_IMAX: return NV_OP_MAX_S32;
720 case TGSI_OPCODE_UMAX: return NV_OP_MAX_U32;
721 case TGSI_OPCODE_MIN: return NV_OP_MIN_F32;
722 case TGSI_OPCODE_IMIN: return NV_OP_MIN_S32;
723 case TGSI_OPCODE_UMIN: return NV_OP_MIN_U32;
724 case TGSI_OPCODE_MUL: return NV_OP_MUL_F32;
725 case TGSI_OPCODE_UMUL: return NV_OP_MUL_B32;
726 case TGSI_OPCODE_OR: return NV_OP_OR;
727 case TGSI_OPCODE_RCP: return NV_OP_RCP;
728 case TGSI_OPCODE_RSQ: return NV_OP_RSQ;
729 case TGSI_OPCODE_SAD: return NV_OP_SAD;
730 case TGSI_OPCODE_SHL: return NV_OP_SHL;
731 case TGSI_OPCODE_SLT:
732 case TGSI_OPCODE_SGE:
733 case TGSI_OPCODE_SEQ:
734 case TGSI_OPCODE_SGT:
735 case TGSI_OPCODE_SLE:
736 case TGSI_OPCODE_SNE: return NV_OP_FSET_F32;
737 case TGSI_OPCODE_ISLT:
738 case TGSI_OPCODE_ISGE: return NV_OP_SET_S32;
739 case TGSI_OPCODE_USEQ:
740 case TGSI_OPCODE_USGE:
741 case TGSI_OPCODE_USLT:
742 case TGSI_OPCODE_USNE: return NV_OP_SET_U32;
743 case TGSI_OPCODE_TEX: return NV_OP_TEX;
744 case TGSI_OPCODE_TXP: return NV_OP_TEX;
745 case TGSI_OPCODE_TXB: return NV_OP_TXB;
746 case TGSI_OPCODE_TXL: return NV_OP_TXL;
747 case TGSI_OPCODE_XOR: return NV_OP_XOR;
748 default:
749 return NV_OP_NOP;
750 }
751 }
752
753 #if 0
754 static ubyte
755 infer_src_type(unsigned opcode)
756 {
757 switch (opcode) {
758 case TGSI_OPCODE_MOV:
759 case TGSI_OPCODE_AND:
760 case TGSI_OPCODE_OR:
761 case TGSI_OPCODE_XOR:
762 case TGSI_OPCODE_SAD:
763 case TGSI_OPCODE_U2F:
764 case TGSI_OPCODE_UADD:
765 case TGSI_OPCODE_UDIV:
766 case TGSI_OPCODE_UMOD:
767 case TGSI_OPCODE_UMAD:
768 case TGSI_OPCODE_UMUL:
769 case TGSI_OPCODE_UMAX:
770 case TGSI_OPCODE_UMIN:
771 case TGSI_OPCODE_USEQ:
772 case TGSI_OPCODE_USGE:
773 case TGSI_OPCODE_USLT:
774 case TGSI_OPCODE_USNE:
775 case TGSI_OPCODE_USHR:
776 return NV_TYPE_U32;
777 case TGSI_OPCODE_I2F:
778 case TGSI_OPCODE_IDIV:
779 case TGSI_OPCODE_IMAX:
780 case TGSI_OPCODE_IMIN:
781 case TGSI_OPCODE_INEG:
782 case TGSI_OPCODE_ISGE:
783 case TGSI_OPCODE_ISHR:
784 case TGSI_OPCODE_ISLT:
785 return NV_TYPE_S32;
786 default:
787 return NV_TYPE_F32;
788 }
789 }
790
791 static ubyte
792 infer_dst_type(unsigned opcode)
793 {
794 switch (opcode) {
795 case TGSI_OPCODE_MOV:
796 case TGSI_OPCODE_F2U:
797 case TGSI_OPCODE_AND:
798 case TGSI_OPCODE_OR:
799 case TGSI_OPCODE_XOR:
800 case TGSI_OPCODE_SAD:
801 case TGSI_OPCODE_UADD:
802 case TGSI_OPCODE_UDIV:
803 case TGSI_OPCODE_UMOD:
804 case TGSI_OPCODE_UMAD:
805 case TGSI_OPCODE_UMUL:
806 case TGSI_OPCODE_UMAX:
807 case TGSI_OPCODE_UMIN:
808 case TGSI_OPCODE_USEQ:
809 case TGSI_OPCODE_USGE:
810 case TGSI_OPCODE_USLT:
811 case TGSI_OPCODE_USNE:
812 case TGSI_OPCODE_USHR:
813 return NV_TYPE_U32;
814 case TGSI_OPCODE_F2I:
815 case TGSI_OPCODE_IDIV:
816 case TGSI_OPCODE_IMAX:
817 case TGSI_OPCODE_IMIN:
818 case TGSI_OPCODE_INEG:
819 case TGSI_OPCODE_ISGE:
820 case TGSI_OPCODE_ISHR:
821 case TGSI_OPCODE_ISLT:
822 return NV_TYPE_S32;
823 default:
824 return NV_TYPE_F32;
825 }
826 }
827 #endif
828
829 static void
830 emit_store(struct bld_context *bld, const struct tgsi_full_instruction *inst,
831 unsigned chan, struct nv_value *res)
832 {
833 const struct tgsi_full_dst_register *reg = &inst->Dst[0];
834 struct nv_instruction *nvi;
835 struct nv_value *mem;
836 struct nv_value *ptr = NULL;
837 int idx;
838
839 idx = reg->Register.Index;
840 assert(chan < 4);
841
842 if (reg->Register.Indirect)
843 ptr = FETCH_ADDR(reg->Indirect.Index,
844 tgsi_util_get_src_register_swizzle(&reg->Indirect, 0));
845
846 switch (inst->Instruction.Saturate) {
847 case TGSI_SAT_NONE:
848 break;
849 case TGSI_SAT_ZERO_ONE:
850 res = bld_insn_1(bld, NV_OP_SAT, res);
851 break;
852 case TGSI_SAT_MINUS_PLUS_ONE:
853 res = bld_insn_2(bld, NV_OP_MAX_F32, res, bld_load_imm_f32(bld, -1.0f));
854 res = bld_insn_2(bld, NV_OP_MIN_F32, res, bld_load_imm_f32(bld, 1.0f));
855 break;
856 }
857
858 switch (reg->Register.File) {
859 case TGSI_FILE_OUTPUT:
860 if (!res->insn)
861 res = bld_insn_1(bld, NV_OP_MOV, res);
862
863 if (bld->pc->is_fragprog) {
864 assert(!ptr);
865 STORE_OUTP(idx, chan, res);
866 } else {
867 nvi = new_instruction(bld->pc, NV_OP_EXPORT);
868 mem = new_value(bld->pc, bld->ti->output_file, res->reg.size);
869 nv_reference(bld->pc, nvi, 0, mem);
870 nv_reference(bld->pc, nvi, 1, res);
871 if (!ptr)
872 mem->reg.address = bld->ti->output_loc[idx][chan];
873 else
874 mem->reg.address = 0x80 + idx * 16 + chan * 4;
875 nvi->fixed = 1;
876 }
877 break;
878 case TGSI_FILE_TEMPORARY:
879 assert(idx < BLD_MAX_TEMPS);
880 if (!res->insn)
881 res = bld_insn_1(bld, NV_OP_MOV, res);
882
883 assert(res->reg.file == NV_FILE_GPR);
884 assert(res->insn->bb = bld->pc->current_block);
885
886 if (bld->ti->require_stores)
887 bld_lmem_store(bld, ptr, idx * 4 + chan, res);
888 else
889 STORE_TEMP(idx, chan, res);
890 break;
891 case TGSI_FILE_ADDRESS:
892 assert(idx < BLD_MAX_ADDRS);
893 STORE_ADDR(idx, chan, res);
894 break;
895 }
896 }
897
898 static INLINE uint32_t
899 bld_is_output_written(struct bld_context *bld, int i, int c)
900 {
901 if (c < 0)
902 return bld->outputs_written[i / 8] & (0xf << ((i * 4) % 32));
903 return bld->outputs_written[i / 8] & (1 << ((i * 4 + c) % 32));
904 }
905
906 static void
907 bld_export_fp_outputs(struct bld_context *bld)
908 {
909 struct nv_value *vals[4];
910 struct nv_instruction *nvi;
911 int i, c, n;
912
913 for (i = 0; i < PIPE_MAX_SHADER_OUTPUTS; ++i) {
914 if (!bld_is_output_written(bld, i, -1))
915 continue;
916 for (n = 0, c = 0; c < 4; ++c) {
917 if (!bld_is_output_written(bld, i, c))
918 continue;
919 vals[n] = bld_fetch_global(bld, &bld->ovs[i][c]);
920 assert(vals[n]);
921 vals[n] = bld_insn_1(bld, NV_OP_MOV, vals[n]);
922 vals[n++]->reg.id = bld->ti->output_loc[i][c];
923 }
924 assert(n);
925
926 (nvi = new_instruction(bld->pc, NV_OP_EXPORT))->fixed = 1;
927 for (c = 0; c < n; ++c)
928 nv_reference(bld->pc, nvi, c, vals[c]);
929 }
930 }
931
932 static void
933 bld_new_block(struct bld_context *bld, struct nv_basic_block *b)
934 {
935 int i, c;
936
937 bld->pc->current_block = b;
938
939 for (i = 0; i < 4; ++i)
940 bld->saved_addr[i][0] = NULL;
941 for (i = 0; i < PIPE_MAX_SHADER_INPUTS; ++i)
942 for (c = 0; c < 4; ++c)
943 bld->saved_inputs[i][c] = NULL;
944
945 bld->out_kind = CFG_EDGE_FORWARD;
946 }
947
948 static struct nv_value *
949 bld_get_saved_input(struct bld_context *bld, unsigned i, unsigned c)
950 {
951 if (bld->saved_inputs[i][c])
952 return bld->saved_inputs[i][c];
953 return NULL;
954 }
955
956 static struct nv_value *
957 bld_interp(struct bld_context *bld, unsigned mode, struct nv_value *val)
958 {
959 unsigned cent = mode & NVC0_INTERP_CENTROID;
960
961 mode &= ~NVC0_INTERP_CENTROID;
962
963 if (val->reg.address == 0x3fc) {
964 /* gl_FrontFacing: 0/~0 to -1.0/+1.0 */
965 val = bld_insn_1(bld, NV_OP_LINTERP, val);
966 val = bld_insn_2(bld, NV_OP_SHL, val, bld_imm_u32(bld, 31));
967 val = bld_insn_2(bld, NV_OP_XOR, val, bld_imm_f32(bld, -1.0f));
968 } else
969 if (mode == NVC0_INTERP_PERSPECTIVE) {
970 val = bld_insn_2(bld, NV_OP_PINTERP, val, bld->frag_coord[3]);
971 } else {
972 val = bld_insn_1(bld, NV_OP_LINTERP, val);
973 }
974
975 val->insn->flat = mode == NVC0_INTERP_FLAT ? 1 : 0;
976 val->insn->centroid = cent ? 1 : 0;
977 return val;
978 }
979
980 static struct nv_value *
981 emit_fetch(struct bld_context *bld, const struct tgsi_full_instruction *insn,
982 const unsigned s, const unsigned chan)
983 {
984 const struct tgsi_full_src_register *src = &insn->Src[s];
985 struct nv_value *res = NULL;
986 struct nv_value *ptr = NULL;
987 int idx, ind_idx, dim_idx;
988 unsigned swz, ind_swz, sgn;
989
990 idx = src->Register.Index;
991 swz = tgsi_util_get_full_src_register_swizzle(src, chan);
992
993 if (src->Register.Indirect) {
994 ind_idx = src->Indirect.Index;
995 ind_swz = tgsi_util_get_src_register_swizzle(&src->Indirect, 0);
996
997 ptr = FETCH_ADDR(ind_idx, ind_swz);
998 }
999
1000 if (src->Register.Dimension)
1001 dim_idx = src->Dimension.Index;
1002 else
1003 dim_idx = 0;
1004
1005 switch (src->Register.File) {
1006 case TGSI_FILE_CONSTANT:
1007 assert(dim_idx < 14);
1008 res = new_value(bld->pc, NV_FILE_MEM_C(dim_idx), 4);
1009 res->reg.address = idx * 16 + swz * 4;
1010 res = bld_insn_1(bld, NV_OP_LD, res);
1011 if (ptr)
1012 bld_src_pointer(bld, res->insn, 1, ptr);
1013 break;
1014 case TGSI_FILE_IMMEDIATE: /* XXX: type for MOV TEMP[0], -IMM[0] */
1015 assert(idx < bld->ti->immd32_nr);
1016 res = bld_load_imm_u32(bld, bld->ti->immd32[idx * 4 + swz]);
1017 break;
1018 case TGSI_FILE_INPUT:
1019 assert(!src->Register.Dimension);
1020 if (!ptr) {
1021 res = bld_get_saved_input(bld, idx, swz);
1022 if (res)
1023 return res;
1024 }
1025 res = new_value(bld->pc, bld->ti->input_file, 4);
1026 if (ptr)
1027 res->reg.address = 0x80 + idx * 16 + swz * 4;
1028 else
1029 res->reg.address = bld->ti->input_loc[idx][swz];
1030
1031 if (bld->pc->is_fragprog)
1032 res = bld_interp(bld, bld->ti->interp_mode[idx], res);
1033 else
1034 res = bld_insn_1(bld, NV_OP_VFETCH, res);
1035
1036 if (ptr)
1037 bld_src_pointer(bld, res->insn, res->insn->src[1] ? 2 : 1, ptr);
1038 else
1039 bld->saved_inputs[idx][swz] = res;
1040 break;
1041 case TGSI_FILE_TEMPORARY:
1042 if (bld->ti->require_stores)
1043 res = bld_lmem_load(bld, ptr, idx * 4 + swz);
1044 else
1045 res = bld_fetch_global(bld, &bld->tvs[idx][swz]);
1046 break;
1047 case TGSI_FILE_ADDRESS:
1048 res = bld_fetch_global(bld, &bld->avs[idx][swz]);
1049 break;
1050 case TGSI_FILE_PREDICATE:
1051 res = bld_fetch_global(bld, &bld->pvs[idx][swz]);
1052 break;
1053 default:
1054 NOUVEAU_ERR("illegal/unhandled src reg file: %d\n", src->Register.File);
1055 abort();
1056 break;
1057 }
1058 if (!res)
1059 return bld_undef(bld, NV_FILE_GPR);
1060
1061 sgn = tgsi_util_get_full_src_register_sign_mode(src, chan);
1062
1063 switch (sgn) {
1064 case TGSI_UTIL_SIGN_KEEP:
1065 break;
1066 case TGSI_UTIL_SIGN_CLEAR:
1067 res = bld_insn_1(bld, NV_OP_ABS_F32, res);
1068 break;
1069 case TGSI_UTIL_SIGN_TOGGLE:
1070 res = bld_insn_1(bld, NV_OP_NEG_F32, res);
1071 break;
1072 case TGSI_UTIL_SIGN_SET:
1073 res = bld_insn_1(bld, NV_OP_ABS_F32, res);
1074 res = bld_insn_1(bld, NV_OP_NEG_F32, res);
1075 break;
1076 default:
1077 NOUVEAU_ERR("illegal/unhandled src reg sign mode\n");
1078 abort();
1079 break;
1080 }
1081
1082 return res;
1083 }
1084
1085 static void
1086 bld_lit(struct bld_context *bld, struct nv_value *dst0[4],
1087 const struct tgsi_full_instruction *insn)
1088 {
1089 struct nv_value *val0 = NULL;
1090 unsigned mask = insn->Dst[0].Register.WriteMask;
1091
1092 if (mask & ((1 << 0) | (1 << 3)))
1093 dst0[3] = dst0[0] = bld_load_imm_f32(bld, 1.0f);
1094
1095 if (mask & (3 << 1)) {
1096 val0 = bld_insn_2(bld, NV_OP_MAX, emit_fetch(bld, insn, 0, 0), bld->zero);
1097 if (mask & (1 << 1))
1098 dst0[1] = val0;
1099 }
1100
1101 if (mask & (1 << 2)) {
1102 struct nv_value *val1, *val3, *src1, *src3, *pred;
1103 struct nv_value *pos128 = bld_load_imm_f32(bld, 127.999999f);
1104 struct nv_value *neg128 = bld_load_imm_f32(bld, -127.999999f);
1105
1106 src1 = emit_fetch(bld, insn, 0, 1);
1107 src3 = emit_fetch(bld, insn, 0, 3);
1108
1109 pred = bld_setp(bld, NV_OP_SET_F32, NV_CC_LE, val0, bld->zero);
1110
1111 val1 = bld_insn_2(bld, NV_OP_MAX_F32, src1, bld->zero);
1112 val3 = bld_insn_2(bld, NV_OP_MAX_F32, src3, neg128);
1113 val3 = bld_insn_2(bld, NV_OP_MIN_F32, val3, pos128);
1114 val3 = bld_pow(bld, val1, val3);
1115
1116 dst0[2] = bld_insn_1(bld, NV_OP_MOV, bld->zero);
1117 bld_src_predicate(bld, dst0[2]->insn, 1, pred);
1118
1119 dst0[2] = bld_insn_2(bld, NV_OP_SELECT, val3, dst0[2]);
1120 }
1121 }
1122
1123 static INLINE void
1124 get_tex_dim(const struct tgsi_full_instruction *insn, int *dim, int *arg)
1125 {
1126 switch (insn->Texture.Texture) {
1127 case TGSI_TEXTURE_1D:
1128 *arg = *dim = 1;
1129 break;
1130 case TGSI_TEXTURE_SHADOW1D:
1131 *dim = 1;
1132 *arg = 2;
1133 break;
1134 case TGSI_TEXTURE_UNKNOWN:
1135 case TGSI_TEXTURE_2D:
1136 case TGSI_TEXTURE_RECT:
1137 *arg = *dim = 2;
1138 break;
1139 case TGSI_TEXTURE_SHADOW2D:
1140 case TGSI_TEXTURE_SHADOWRECT:
1141 *dim = 2;
1142 *arg = 3;
1143 break;
1144 case TGSI_TEXTURE_3D:
1145 case TGSI_TEXTURE_CUBE:
1146 *dim = *arg = 3;
1147 break;
1148 default:
1149 assert(0);
1150 break;
1151 }
1152 }
1153
1154 static struct nv_value *
1155 bld_clone(struct bld_context *bld, struct nv_instruction *nvi)
1156 {
1157 struct nv_instruction *dupi = new_instruction(bld->pc, nvi->opcode);
1158 struct nv_instruction *next, *prev;
1159 int c;
1160
1161 next = dupi->next;
1162 prev = dupi->prev;
1163
1164 *dupi = *nvi;
1165
1166 dupi->next = next;
1167 dupi->prev = prev;
1168
1169 for (c = 0; c < 5 && nvi->def[c]; ++c)
1170 bld_def(dupi, c, new_value_like(bld->pc, nvi->def[c]));
1171
1172 for (c = 0; c < 6 && nvi->src[c]; ++c) {
1173 dupi->src[c] = NULL;
1174 nv_reference(bld->pc, dupi, c, nvi->src[c]->value);
1175 }
1176
1177 return dupi->def[0];
1178 }
1179
1180 /* NOTE: proj(t0) = (t0 / w) / (tc3 / w) = tc0 / tc2 handled by optimizer */
1181 static void
1182 load_proj_tex_coords(struct bld_context *bld,
1183 struct nv_value *t[4], int dim, int arg,
1184 const struct tgsi_full_instruction *insn)
1185 {
1186 int c;
1187 unsigned mask = (1 << dim) - 1;
1188
1189 if (arg != dim)
1190 mask |= 4; /* depth comparison value */
1191
1192 t[3] = emit_fetch(bld, insn, 0, 3);
1193 if (t[3]->insn->opcode == NV_OP_PINTERP) {
1194 t[3] = bld_clone(bld, t[3]->insn);
1195 t[3]->insn->opcode = NV_OP_LINTERP;
1196 nv_reference(bld->pc, t[3]->insn, 1, NULL);
1197 }
1198 t[3] = bld_insn_1(bld, NV_OP_RCP, t[3]);
1199
1200 for (c = 0; c < 4; ++c) {
1201 if (!(mask & (1 << c)))
1202 continue;
1203 t[c] = emit_fetch(bld, insn, 0, c);
1204
1205 if (t[c]->insn->opcode != NV_OP_PINTERP)
1206 continue;
1207 mask &= ~(1 << c);
1208
1209 t[c] = bld_clone(bld, t[c]->insn);
1210 nv_reference(bld->pc, t[c]->insn, 1, t[3]);
1211 }
1212 if (mask == 0)
1213 return;
1214
1215 t[3] = emit_fetch(bld, insn, 0, 3);
1216 t[3] = bld_insn_1(bld, NV_OP_RCP, t[3]);
1217
1218 for (c = 0; c < 4; ++c)
1219 if (mask & (1 << c))
1220 t[c] = bld_insn_2(bld, NV_OP_MUL_F32, t[c], t[3]);
1221 }
1222
1223 /* For a quad of threads / top left, top right, bottom left, bottom right
1224 * pixels, do a different operation, and take src0 from a specific thread.
1225 */
1226 #define QOP_ADD 0
1227 #define QOP_SUBR 1
1228 #define QOP_SUB 2
1229 #define QOP_MOV1 3
1230
1231 #define QOP(a, b, c, d) \
1232 ((QOP_##a << 0) | (QOP_##b << 2) | (QOP_##c << 4) | (QOP_##d << 6))
1233
1234 static INLINE struct nv_value *
1235 bld_quadop(struct bld_context *bld, ubyte qop, struct nv_value *src0, int lane,
1236 struct nv_value *src1, boolean wp)
1237 {
1238 struct nv_value *val = bld_insn_2(bld, NV_OP_QUADOP, src0, src1);
1239 val->insn->lanes = lane;
1240 val->insn->quadop = qop;
1241 if (wp) {
1242 assert(!"quadop predicate write");
1243 }
1244 return val;
1245 }
1246
1247 static struct nv_instruction *
1248 emit_tex(struct bld_context *bld, uint opcode,
1249 struct nv_value *dst[4], struct nv_value *t_in[4],
1250 int argc, int tic, int tsc, int cube)
1251 {
1252 struct nv_value *t[4];
1253 struct nv_instruction *nvi;
1254 int c;
1255
1256 /* the inputs to a tex instruction must be separate values */
1257 for (c = 0; c < argc; ++c) {
1258 t[c] = bld_insn_1(bld, NV_OP_MOV, t_in[c]);
1259 t[c]->insn->fixed = 1;
1260 }
1261
1262 nvi = new_instruction(bld->pc, opcode);
1263 for (c = 0; c < 4; ++c)
1264 dst[c] = bld_def(nvi, c, new_value(bld->pc, NV_FILE_GPR, 4));
1265 for (c = 0; c < argc; ++c)
1266 nv_reference(bld->pc, nvi, c, t[c]);
1267
1268 nvi->ext.tex.t = tic;
1269 nvi->ext.tex.s = tsc;
1270 nvi->tex_mask = 0xf;
1271 nvi->tex_cube = cube;
1272 nvi->tex_live = 0;
1273 nvi->tex_argc = argc;
1274
1275 return nvi;
1276 }
1277
1278 /*
1279 static boolean
1280 bld_is_constant(struct nv_value *val)
1281 {
1282 if (val->reg.file == NV_FILE_IMM)
1283 return TRUE;
1284 return val->insn && nvCG_find_constant(val->insn->src[0]);
1285 }
1286 */
1287
1288 static void
1289 bld_tex(struct bld_context *bld, struct nv_value *dst0[4],
1290 const struct tgsi_full_instruction *insn)
1291 {
1292 struct nv_value *t[4], *s[3];
1293 uint opcode = translate_opcode(insn->Instruction.Opcode);
1294 int arg, dim, c;
1295 const int tic = insn->Src[1].Register.Index;
1296 const int tsc = tic;
1297 const int cube = (insn->Texture.Texture == TGSI_TEXTURE_CUBE) ? 1 : 0;
1298
1299 get_tex_dim(insn, &dim, &arg);
1300
1301 if (!cube && insn->Instruction.Opcode == TGSI_OPCODE_TXP)
1302 load_proj_tex_coords(bld, t, dim, arg, insn);
1303 else {
1304 for (c = 0; c < dim; ++c)
1305 t[c] = emit_fetch(bld, insn, 0, c);
1306 if (arg != dim)
1307 t[dim] = emit_fetch(bld, insn, 0, 2);
1308 }
1309
1310 if (cube) {
1311 assert(dim >= 3);
1312 for (c = 0; c < 3; ++c)
1313 s[c] = bld_insn_1(bld, NV_OP_ABS_F32, t[c]);
1314
1315 s[0] = bld_insn_2(bld, NV_OP_MAX_F32, s[0], s[1]);
1316 s[0] = bld_insn_2(bld, NV_OP_MAX_F32, s[0], s[2]);
1317 s[0] = bld_insn_1(bld, NV_OP_RCP, s[0]);
1318
1319 for (c = 0; c < 3; ++c)
1320 t[c] = bld_insn_2(bld, NV_OP_MUL_F32, t[c], s[0]);
1321 }
1322
1323 if (opcode == NV_OP_TXB || opcode == NV_OP_TXL)
1324 t[arg++] = emit_fetch(bld, insn, 0, 3);
1325 emit_tex(bld, opcode, dst0, t, arg, tic, tsc, cube);
1326 }
1327
1328 static INLINE struct nv_value *
1329 bld_dot(struct bld_context *bld, const struct tgsi_full_instruction *insn,
1330 int n)
1331 {
1332 struct nv_value *dotp, *src0, *src1;
1333 int c;
1334
1335 src0 = emit_fetch(bld, insn, 0, 0);
1336 src1 = emit_fetch(bld, insn, 1, 0);
1337 dotp = bld_insn_2(bld, NV_OP_MUL_F32, src0, src1);
1338
1339 for (c = 1; c < n; ++c) {
1340 src0 = emit_fetch(bld, insn, 0, c);
1341 src1 = emit_fetch(bld, insn, 1, c);
1342 dotp = bld_insn_3(bld, NV_OP_MAD_F32, src0, src1, dotp);
1343 }
1344 return dotp;
1345 }
1346
1347 #define FOR_EACH_DST0_ENABLED_CHANNEL(chan, inst) \
1348 for (chan = 0; chan < 4; ++chan) \
1349 if ((inst)->Dst[0].Register.WriteMask & (1 << chan))
1350
1351 static void
1352 bld_instruction(struct bld_context *bld,
1353 const struct tgsi_full_instruction *insn)
1354 {
1355 struct nv_value *src0;
1356 struct nv_value *src1;
1357 struct nv_value *src2;
1358 struct nv_value *dst0[4] = { NULL };
1359 struct nv_value *temp;
1360 int c;
1361 uint opcode = translate_opcode(insn->Instruction.Opcode);
1362 uint8_t mask = insn->Dst[0].Register.WriteMask;
1363
1364 #ifdef NOUVEAU_DEBUG
1365 debug_printf("bld_instruction:"); tgsi_dump_instruction(insn, 1);
1366 #endif
1367
1368 switch (insn->Instruction.Opcode) {
1369 case TGSI_OPCODE_ADD:
1370 case TGSI_OPCODE_MAX:
1371 case TGSI_OPCODE_MIN:
1372 case TGSI_OPCODE_MUL:
1373 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1374 src0 = emit_fetch(bld, insn, 0, c);
1375 src1 = emit_fetch(bld, insn, 1, c);
1376 dst0[c] = bld_insn_2(bld, opcode, src0, src1);
1377 }
1378 break;
1379 case TGSI_OPCODE_ARL:
1380 src1 = bld_imm_u32(bld, 4);
1381 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1382 src0 = emit_fetch(bld, insn, 0, c);
1383 src0 = bld_insn_1(bld, NV_OP_FLOOR, src0);
1384 src0->insn->ext.cvt.d = NV_TYPE_S32;
1385 src0->insn->ext.cvt.s = NV_TYPE_F32;
1386 dst0[c] = bld_insn_2(bld, NV_OP_SHL, src0, src1);
1387 }
1388 break;
1389 case TGSI_OPCODE_CMP:
1390 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1391 src0 = emit_fetch(bld, insn, 0, c);
1392 src0 = bld_setp(bld, NV_OP_SET_F32, NV_CC_LT, src0, bld->zero);
1393 src1 = emit_fetch(bld, insn, 1, c);
1394 src2 = emit_fetch(bld, insn, 2, c);
1395 dst0[c] = bld_insn_3(bld, NV_OP_SELP, src1, src2, src0);
1396 }
1397 break;
1398 case TGSI_OPCODE_COS:
1399 case TGSI_OPCODE_SIN:
1400 src0 = emit_fetch(bld, insn, 0, 0);
1401 temp = bld_insn_1(bld, NV_OP_PRESIN, src0);
1402 if (insn->Dst[0].Register.WriteMask & 7)
1403 temp = bld_insn_1(bld, opcode, temp);
1404 for (c = 0; c < 3; ++c)
1405 if (insn->Dst[0].Register.WriteMask & (1 << c))
1406 dst0[c] = temp;
1407 if (!(insn->Dst[0].Register.WriteMask & (1 << 3)))
1408 break;
1409 src0 = emit_fetch(bld, insn, 0, 3);
1410 temp = bld_insn_1(bld, NV_OP_PRESIN, src0);
1411 dst0[3] = bld_insn_1(bld, opcode, temp);
1412 break;
1413 case TGSI_OPCODE_DP2:
1414 temp = bld_dot(bld, insn, 2);
1415 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1416 dst0[c] = temp;
1417 break;
1418 case TGSI_OPCODE_DP3:
1419 temp = bld_dot(bld, insn, 3);
1420 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1421 dst0[c] = temp;
1422 break;
1423 case TGSI_OPCODE_DP4:
1424 temp = bld_dot(bld, insn, 4);
1425 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1426 dst0[c] = temp;
1427 break;
1428 case TGSI_OPCODE_DPH:
1429 src0 = bld_dot(bld, insn, 3);
1430 src1 = emit_fetch(bld, insn, 1, 3);
1431 temp = bld_insn_2(bld, NV_OP_ADD_F32, src0, src1);
1432 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1433 dst0[c] = temp;
1434 break;
1435 case TGSI_OPCODE_DST:
1436 if (insn->Dst[0].Register.WriteMask & 1)
1437 dst0[0] = bld_imm_f32(bld, 1.0f);
1438 if (insn->Dst[0].Register.WriteMask & 2) {
1439 src0 = emit_fetch(bld, insn, 0, 1);
1440 src1 = emit_fetch(bld, insn, 1, 1);
1441 dst0[1] = bld_insn_2(bld, NV_OP_MUL_F32, src0, src1);
1442 }
1443 if (insn->Dst[0].Register.WriteMask & 4)
1444 dst0[2] = emit_fetch(bld, insn, 0, 2);
1445 if (insn->Dst[0].Register.WriteMask & 8)
1446 dst0[3] = emit_fetch(bld, insn, 1, 3);
1447 break;
1448 case TGSI_OPCODE_EXP:
1449 src0 = emit_fetch(bld, insn, 0, 0);
1450 temp = bld_insn_1(bld, NV_OP_FLOOR, src0);
1451
1452 if (insn->Dst[0].Register.WriteMask & 2)
1453 dst0[1] = bld_insn_2(bld, NV_OP_SUB_F32, src0, temp);
1454 if (insn->Dst[0].Register.WriteMask & 1) {
1455 temp = bld_insn_1(bld, NV_OP_PREEX2, temp);
1456 dst0[0] = bld_insn_1(bld, NV_OP_EX2, temp);
1457 }
1458 if (insn->Dst[0].Register.WriteMask & 4) {
1459 temp = bld_insn_1(bld, NV_OP_PREEX2, src0);
1460 dst0[2] = bld_insn_1(bld, NV_OP_EX2, temp);
1461 }
1462 if (insn->Dst[0].Register.WriteMask & 8)
1463 dst0[3] = bld_imm_f32(bld, 1.0f);
1464 break;
1465 case TGSI_OPCODE_EX2:
1466 src0 = emit_fetch(bld, insn, 0, 0);
1467 temp = bld_insn_1(bld, NV_OP_PREEX2, src0);
1468 temp = bld_insn_1(bld, NV_OP_EX2, temp);
1469 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1470 dst0[c] = temp;
1471 break;
1472 case TGSI_OPCODE_FRC:
1473 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1474 src0 = emit_fetch(bld, insn, 0, c);
1475 dst0[c] = bld_insn_1(bld, NV_OP_FLOOR, src0);
1476 dst0[c] = bld_insn_2(bld, NV_OP_SUB_F32, src0, dst0[c]);
1477 }
1478 break;
1479 case TGSI_OPCODE_KIL:
1480 for (c = 0; c < 4; ++c)
1481 bld_kil(bld, emit_fetch(bld, insn, 0, c));
1482 break;
1483 case TGSI_OPCODE_KILP:
1484 (new_instruction(bld->pc, NV_OP_KIL))->fixed = 1;
1485 break;
1486 case TGSI_OPCODE_IF:
1487 {
1488 struct nv_basic_block *b = new_basic_block(bld->pc);
1489
1490 assert(bld->cond_lvl < BLD_MAX_COND_NESTING);
1491
1492 nvc0_bblock_attach(bld->pc->current_block, b, CFG_EDGE_FORWARD);
1493
1494 bld->join_bb[bld->cond_lvl] = bld->pc->current_block;
1495 bld->cond_bb[bld->cond_lvl] = bld->pc->current_block;
1496
1497 src1 = bld_setp(bld, NV_OP_SET_U32, NV_CC_EQ,
1498 emit_fetch(bld, insn, 0, 0), bld->zero);
1499
1500 bld_flow(bld, NV_OP_BRA, src1, NULL, (bld->cond_lvl == 0));
1501
1502 ++bld->cond_lvl;
1503 bld_new_block(bld, b);
1504 }
1505 break;
1506 case TGSI_OPCODE_ELSE:
1507 {
1508 struct nv_basic_block *b = new_basic_block(bld->pc);
1509
1510 --bld->cond_lvl;
1511 nvc0_bblock_attach(bld->join_bb[bld->cond_lvl], b, CFG_EDGE_FORWARD);
1512
1513 bld->cond_bb[bld->cond_lvl]->exit->target = b;
1514 bld->cond_bb[bld->cond_lvl] = bld->pc->current_block;
1515
1516 new_instruction(bld->pc, NV_OP_BRA)->terminator = 1;
1517
1518 ++bld->cond_lvl;
1519 bld_new_block(bld, b);
1520 }
1521 break;
1522 case TGSI_OPCODE_ENDIF:
1523 {
1524 struct nv_basic_block *b = new_basic_block(bld->pc);
1525
1526 --bld->cond_lvl;
1527 nvc0_bblock_attach(bld->pc->current_block, b, bld->out_kind);
1528 nvc0_bblock_attach(bld->cond_bb[bld->cond_lvl], b, CFG_EDGE_FORWARD);
1529
1530 bld->cond_bb[bld->cond_lvl]->exit->target = b;
1531
1532 bld_new_block(bld, b);
1533
1534 if (!bld->cond_lvl && bld->join_bb[bld->cond_lvl]) {
1535 bld->join_bb[bld->cond_lvl]->exit->prev->target = b;
1536 new_instruction(bld->pc, NV_OP_JOIN)->join = 1;
1537 }
1538 }
1539 break;
1540 case TGSI_OPCODE_BGNLOOP:
1541 {
1542 struct nv_basic_block *bl = new_basic_block(bld->pc);
1543 struct nv_basic_block *bb = new_basic_block(bld->pc);
1544
1545 assert(bld->loop_lvl < BLD_MAX_LOOP_NESTING);
1546
1547 bld->loop_bb[bld->loop_lvl] = bl;
1548 bld->brkt_bb[bld->loop_lvl] = bb;
1549
1550 nvc0_bblock_attach(bld->pc->current_block, bl, CFG_EDGE_LOOP_ENTER);
1551
1552 bld_new_block(bld, bld->loop_bb[bld->loop_lvl++]);
1553
1554 if (bld->loop_lvl == bld->pc->loop_nesting_bound)
1555 bld->pc->loop_nesting_bound++;
1556
1557 bld_clear_def_use(&bld->tvs[0][0], BLD_MAX_TEMPS, bld->loop_lvl);
1558 bld_clear_def_use(&bld->avs[0][0], BLD_MAX_ADDRS, bld->loop_lvl);
1559 bld_clear_def_use(&bld->pvs[0][0], BLD_MAX_PREDS, bld->loop_lvl);
1560 }
1561 break;
1562 case TGSI_OPCODE_BRK:
1563 {
1564 struct nv_basic_block *bb = bld->brkt_bb[bld->loop_lvl - 1];
1565
1566 bld_flow(bld, NV_OP_BRA, NULL, bb, FALSE);
1567
1568 if (bld->out_kind == CFG_EDGE_FORWARD) /* else we already had BRK/CONT */
1569 nvc0_bblock_attach(bld->pc->current_block, bb, CFG_EDGE_LOOP_LEAVE);
1570
1571 bld->out_kind = CFG_EDGE_FAKE;
1572 }
1573 break;
1574 case TGSI_OPCODE_CONT:
1575 {
1576 struct nv_basic_block *bb = bld->loop_bb[bld->loop_lvl - 1];
1577
1578 bld_flow(bld, NV_OP_BRA, NULL, bb, FALSE);
1579
1580 nvc0_bblock_attach(bld->pc->current_block, bb, CFG_EDGE_BACK);
1581
1582 if ((bb = bld->join_bb[bld->cond_lvl - 1])) {
1583 bld->join_bb[bld->cond_lvl - 1] = NULL;
1584 nvc0_insn_delete(bb->exit->prev);
1585 }
1586 bld->out_kind = CFG_EDGE_FAKE;
1587 }
1588 break;
1589 case TGSI_OPCODE_ENDLOOP:
1590 {
1591 struct nv_basic_block *bb = bld->loop_bb[bld->loop_lvl - 1];
1592
1593 bld_flow(bld, NV_OP_BRA, NULL, bb, FALSE);
1594
1595 nvc0_bblock_attach(bld->pc->current_block, bb, CFG_EDGE_BACK);
1596
1597 bld_loop_end(bld, bb); /* replace loop-side operand of the phis */
1598
1599 bld_new_block(bld, bld->brkt_bb[--bld->loop_lvl]);
1600 }
1601 break;
1602 case TGSI_OPCODE_ABS:
1603 case TGSI_OPCODE_CEIL:
1604 case TGSI_OPCODE_FLR:
1605 case TGSI_OPCODE_TRUNC:
1606 case TGSI_OPCODE_DDX:
1607 case TGSI_OPCODE_DDY:
1608 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1609 src0 = emit_fetch(bld, insn, 0, c);
1610 dst0[c] = bld_insn_1(bld, opcode, src0);
1611 }
1612 break;
1613 case TGSI_OPCODE_LIT:
1614 bld_lit(bld, dst0, insn);
1615 break;
1616 case TGSI_OPCODE_LRP:
1617 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1618 src0 = emit_fetch(bld, insn, 0, c);
1619 src1 = emit_fetch(bld, insn, 1, c);
1620 src2 = emit_fetch(bld, insn, 2, c);
1621 dst0[c] = bld_insn_2(bld, NV_OP_SUB_F32, src1, src2);
1622 dst0[c] = bld_insn_3(bld, NV_OP_MAD_F32, dst0[c], src0, src2);
1623 }
1624 break;
1625 case TGSI_OPCODE_MOV:
1626 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1627 dst0[c] = emit_fetch(bld, insn, 0, c);
1628 break;
1629 case TGSI_OPCODE_MAD:
1630 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1631 src0 = emit_fetch(bld, insn, 0, c);
1632 src1 = emit_fetch(bld, insn, 1, c);
1633 src2 = emit_fetch(bld, insn, 2, c);
1634 dst0[c] = bld_insn_3(bld, opcode, src0, src1, src2);
1635 }
1636 break;
1637 case TGSI_OPCODE_POW:
1638 src0 = emit_fetch(bld, insn, 0, 0);
1639 src1 = emit_fetch(bld, insn, 1, 0);
1640 temp = bld_pow(bld, src0, src1);
1641 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1642 dst0[c] = temp;
1643 break;
1644 case TGSI_OPCODE_LOG:
1645 src0 = emit_fetch(bld, insn, 0, 0);
1646 src0 = bld_insn_1(bld, NV_OP_ABS_F32, src0);
1647 temp = bld_insn_1(bld, NV_OP_LG2, src0);
1648 dst0[2] = temp;
1649 if (insn->Dst[0].Register.WriteMask & 3) {
1650 temp = bld_insn_1(bld, NV_OP_FLOOR, temp);
1651 dst0[0] = temp;
1652 }
1653 if (insn->Dst[0].Register.WriteMask & 2) {
1654 temp = bld_insn_1(bld, NV_OP_PREEX2, temp);
1655 temp = bld_insn_1(bld, NV_OP_EX2, temp);
1656 temp = bld_insn_1(bld, NV_OP_RCP, temp);
1657 dst0[1] = bld_insn_2(bld, NV_OP_MUL_F32, src0, temp);
1658 }
1659 if (insn->Dst[0].Register.WriteMask & 8)
1660 dst0[3] = bld_imm_f32(bld, 1.0f);
1661 break;
1662 case TGSI_OPCODE_RCP:
1663 case TGSI_OPCODE_LG2:
1664 src0 = emit_fetch(bld, insn, 0, 0);
1665 temp = bld_insn_1(bld, opcode, src0);
1666 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1667 dst0[c] = temp;
1668 break;
1669 case TGSI_OPCODE_RSQ:
1670 src0 = emit_fetch(bld, insn, 0, 0);
1671 temp = bld_insn_1(bld, NV_OP_ABS_F32, src0);
1672 temp = bld_insn_1(bld, NV_OP_RSQ, temp);
1673 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1674 dst0[c] = temp;
1675 break;
1676 case TGSI_OPCODE_SLT:
1677 case TGSI_OPCODE_SGE:
1678 case TGSI_OPCODE_SEQ:
1679 case TGSI_OPCODE_SGT:
1680 case TGSI_OPCODE_SLE:
1681 case TGSI_OPCODE_SNE:
1682 case TGSI_OPCODE_ISLT:
1683 case TGSI_OPCODE_ISGE:
1684 case TGSI_OPCODE_USEQ:
1685 case TGSI_OPCODE_USGE:
1686 case TGSI_OPCODE_USLT:
1687 case TGSI_OPCODE_USNE:
1688 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1689 src0 = emit_fetch(bld, insn, 0, c);
1690 src1 = emit_fetch(bld, insn, 1, c);
1691 dst0[c] = bld_insn_2(bld, opcode, src0, src1);
1692 dst0[c]->insn->set_cond = translate_setcc(insn->Instruction.Opcode);
1693 }
1694 break;
1695 case TGSI_OPCODE_SCS:
1696 if (insn->Dst[0].Register.WriteMask & 0x3) {
1697 src0 = emit_fetch(bld, insn, 0, 0);
1698 temp = bld_insn_1(bld, NV_OP_PRESIN, src0);
1699 if (insn->Dst[0].Register.WriteMask & 0x1)
1700 dst0[0] = bld_insn_1(bld, NV_OP_COS, temp);
1701 if (insn->Dst[0].Register.WriteMask & 0x2)
1702 dst0[1] = bld_insn_1(bld, NV_OP_SIN, temp);
1703 }
1704 if (insn->Dst[0].Register.WriteMask & 0x4)
1705 dst0[2] = bld_imm_f32(bld, 0.0f);
1706 if (insn->Dst[0].Register.WriteMask & 0x8)
1707 dst0[3] = bld_imm_f32(bld, 1.0f);
1708 break;
1709 case TGSI_OPCODE_SSG:
1710 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) { /* XXX: set lt, set gt, sub */
1711 src0 = emit_fetch(bld, insn, 0, c);
1712 src1 = bld_setp(bld, NV_OP_SET_F32, NV_CC_EQ, src0, bld->zero);
1713 temp = bld_insn_2(bld, NV_OP_AND, src0, bld_imm_u32(bld, 0x80000000));
1714 temp = bld_insn_2(bld, NV_OP_OR, temp, bld_imm_f32(bld, 1.0f));
1715 dst0[c] = bld_insn_1(bld, NV_OP_MOV, temp);
1716 bld_src_predicate(bld, dst0[c]->insn, 1, src1);
1717 }
1718 break;
1719 case TGSI_OPCODE_SUB:
1720 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1721 src0 = emit_fetch(bld, insn, 0, c);
1722 src1 = emit_fetch(bld, insn, 1, c);
1723 dst0[c] = bld_insn_2(bld, NV_OP_SUB_F32, src0, src1);
1724 }
1725 break;
1726 case TGSI_OPCODE_TEX:
1727 case TGSI_OPCODE_TXB:
1728 case TGSI_OPCODE_TXL:
1729 case TGSI_OPCODE_TXP:
1730 bld_tex(bld, dst0, insn);
1731 break;
1732 case TGSI_OPCODE_XPD:
1733 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1734 if (c == 3) {
1735 dst0[3] = bld_imm_f32(bld, 1.0f);
1736 break;
1737 }
1738 src0 = emit_fetch(bld, insn, 1, (c + 1) % 3);
1739 src1 = emit_fetch(bld, insn, 0, (c + 2) % 3);
1740 dst0[c] = bld_insn_2(bld, NV_OP_MUL_F32, src0, src1);
1741
1742 src0 = emit_fetch(bld, insn, 0, (c + 1) % 3);
1743 src1 = emit_fetch(bld, insn, 1, (c + 2) % 3);
1744 dst0[c] = bld_insn_3(bld, NV_OP_MAD_F32, src0, src1, dst0[c]);
1745
1746 dst0[c]->insn->src[2]->mod ^= NV_MOD_NEG;
1747 }
1748 break;
1749 case TGSI_OPCODE_RET:
1750 (new_instruction(bld->pc, NV_OP_RET))->fixed = 1;
1751 break;
1752 case TGSI_OPCODE_END:
1753 /* VP outputs are exported in-place as scalars, optimization later */
1754 if (bld->pc->is_fragprog)
1755 bld_export_fp_outputs(bld);
1756 break;
1757 default:
1758 NOUVEAU_ERR("unhandled opcode %u\n", insn->Instruction.Opcode);
1759 abort();
1760 break;
1761 }
1762
1763 if (insn->Dst[0].Register.File == TGSI_FILE_OUTPUT &&
1764 !bld->pc->is_fragprog) {
1765 struct nv_instruction *mi = NULL;
1766 uint size;
1767
1768 for (c = 0; c < 4; ++c)
1769 if ((mask & (1 << c)) &&
1770 ((dst0[c]->reg.file == NV_FILE_IMM) ||
1771 (dst0[c]->reg.id == 63 && dst0[c]->reg.file == NV_FILE_GPR)))
1772 dst0[c] = bld_insn_1(bld, NV_OP_MOV, dst0[c]);
1773
1774 c = 0;
1775 if ((mask & 0x3) == 0x3) {
1776 mask &= ~0x3;
1777 size = 8;
1778 mi = bld_insn_2(bld, NV_OP_BIND, dst0[0], dst0[1])->insn;
1779 }
1780 if ((mask & 0xc) == 0xc) {
1781 mask &= ~0xc;
1782 if (mi) {
1783 size = 16;
1784 nv_reference(bld->pc, mi, 2, dst0[2]);
1785 nv_reference(bld->pc, mi, 3, dst0[3]);
1786 } else {
1787 c = 2;
1788 size = 8;
1789 mi = bld_insn_2(bld, NV_OP_BIND, dst0[2], dst0[3])->insn;
1790 }
1791 } else
1792 if (mi && (mask & 0x4)) {
1793 size = 12;
1794 mask &= ~0x4;
1795 nv_reference(bld->pc, mi, 2, dst0[2]);
1796 }
1797
1798 if (mi) {
1799 struct nv_instruction *ex = new_instruction(bld->pc, NV_OP_EXPORT);
1800 int s;
1801
1802 nv_reference(bld->pc, ex, 0, new_value(bld->pc, NV_FILE_MEM_V, 4));
1803 nv_reference(bld->pc, ex, 1, mi->def[0]);
1804
1805 for (s = 1; s < size / 4; ++s) {
1806 bld_def(mi, s, new_value(bld->pc, NV_FILE_GPR, 4));
1807 nv_reference(bld->pc, ex, s + 1, mi->def[s]);
1808 }
1809
1810 ex->fixed = 1;
1811 ex->src[0]->value->reg.size = size;
1812 ex->src[0]->value->reg.address =
1813 bld->ti->output_loc[insn->Dst[0].Register.Index][c];
1814 }
1815 }
1816
1817 for (c = 0; c < 4; ++c)
1818 if (mask & (1 << c))
1819 emit_store(bld, insn, c, dst0[c]);
1820 }
1821
1822 static INLINE void
1823 bld_free_registers(struct bld_register *base, int n)
1824 {
1825 int i, c;
1826
1827 for (i = 0; i < n; ++i)
1828 for (c = 0; c < 4; ++c)
1829 util_dynarray_fini(&base[i * 4 + c].vals);
1830 }
1831
1832 int
1833 nvc0_tgsi_to_nc(struct nv_pc *pc, struct nvc0_translation_info *ti)
1834 {
1835 struct bld_context *bld = CALLOC_STRUCT(bld_context);
1836 unsigned ip;
1837
1838 pc->root[0] = pc->current_block = new_basic_block(pc);
1839
1840 bld->pc = pc;
1841 bld->ti = ti;
1842
1843 pc->loop_nesting_bound = 1;
1844
1845 bld->zero = new_value(pc, NV_FILE_GPR, 4);
1846 bld->zero->reg.id = 63;
1847
1848 if (pc->is_fragprog) {
1849 struct nv_value *mem = new_value(pc, NV_FILE_MEM_V, 4);
1850 mem->reg.address = 0x7c;
1851
1852 bld->frag_coord[3] = bld_insn_1(bld, NV_OP_LINTERP, mem);
1853 bld->frag_coord[3] = bld_insn_1(bld, NV_OP_RCP, bld->frag_coord[3]);
1854 }
1855
1856 for (ip = 0; ip < ti->num_insns; ++ip)
1857 bld_instruction(bld, &ti->insns[ip]);
1858
1859 bld_free_registers(&bld->tvs[0][0], BLD_MAX_TEMPS);
1860 bld_free_registers(&bld->avs[0][0], BLD_MAX_ADDRS);
1861 bld_free_registers(&bld->pvs[0][0], BLD_MAX_PREDS);
1862 bld_free_registers(&bld->ovs[0][0], PIPE_MAX_SHADER_OUTPUTS);
1863
1864 FREE(bld);
1865 return 0;
1866 }
1867
1868 /* If a variable is assigned in a loop, replace all references to the value
1869 * from outside the loop with a phi value.
1870 */
1871 static void
1872 bld_replace_value(struct nv_pc *pc, struct nv_basic_block *b,
1873 struct nv_value *old_val,
1874 struct nv_value *new_val)
1875 {
1876 struct nv_instruction *nvi;
1877
1878 for (nvi = b->phi ? b->phi : b->entry; nvi; nvi = nvi->next) {
1879 int s;
1880 for (s = 0; s < 6 && nvi->src[s]; ++s)
1881 if (nvi->src[s]->value == old_val)
1882 nv_reference(pc, nvi, s, new_val);
1883 }
1884
1885 b->pass_seq = pc->pass_seq;
1886
1887 if (b->out[0] && b->out[0]->pass_seq < pc->pass_seq)
1888 bld_replace_value(pc, b->out[0], old_val, new_val);
1889
1890 if (b->out[1] && b->out[1]->pass_seq < pc->pass_seq)
1891 bld_replace_value(pc, b->out[1], old_val, new_val);
1892 }