nvc0: import nvc0 gallium driver
[mesa.git] / src / gallium / drivers / nvc0 / nvc0_tgsi_to_nc.c
1 /*
2 * Copyright 2010 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
21 */
22
23 #include <unistd.h>
24
25 #include "pipe/p_shader_tokens.h"
26 #include "tgsi/tgsi_parse.h"
27 #include "tgsi/tgsi_util.h"
28 #include "tgsi/tgsi_dump.h"
29 #include "util/u_dynarray.h"
30
31 #include "nvc0_pc.h"
32 #include "nvc0_program.h"
33
34 /* Arbitrary internal limits. */
35 #define BLD_MAX_TEMPS 64
36 #define BLD_MAX_ADDRS 4
37 #define BLD_MAX_PREDS 4
38 #define BLD_MAX_IMMDS 128
39 #define BLD_MAX_OUTPS PIPE_MAX_SHADER_OUTPUTS
40
41 #define BLD_MAX_COND_NESTING 8
42 #define BLD_MAX_LOOP_NESTING 4
43 #define BLD_MAX_CALL_NESTING 2
44
45 /* This structure represents a TGSI register. */
46 struct bld_register {
47 struct nv_value *current;
48 /* collect all SSA values assigned to it */
49 struct util_dynarray vals;
50 /* 1 bit per loop level, indicates if used/defd, reset when loop ends */
51 uint16_t loop_use;
52 uint16_t loop_def;
53 };
54
55 static INLINE struct nv_value **
56 bld_register_access(struct bld_register *reg, unsigned i)
57 {
58 return util_dynarray_element(&reg->vals, struct nv_value *, i);
59 }
60
61 static INLINE void
62 bld_register_add_val(struct bld_register *reg, struct nv_value *val)
63 {
64 util_dynarray_append(&reg->vals, struct nv_value *, val);
65 }
66
67 static INLINE boolean
68 bld_register_del_val(struct bld_register *reg, struct nv_value *val)
69 {
70 unsigned i;
71
72 for (i = reg->vals.size / sizeof(struct nv_value *); i > 0; --i)
73 if (*bld_register_access(reg, i - 1) == val)
74 break;
75 if (!i)
76 return FALSE;
77
78 if (i != reg->vals.size / sizeof(struct nv_value *))
79 *bld_register_access(reg, i - 1) = util_dynarray_pop(&reg->vals,
80 struct nv_value *);
81 else
82 reg->vals.size -= sizeof(struct nv_value *);
83
84 return TRUE;
85 }
86
87 struct bld_context {
88 struct nvc0_translation_info *ti;
89
90 struct nv_pc *pc;
91 struct nv_basic_block *b;
92
93 struct tgsi_parse_context parse[BLD_MAX_CALL_NESTING];
94 int call_lvl;
95
96 struct nv_basic_block *cond_bb[BLD_MAX_COND_NESTING];
97 struct nv_basic_block *join_bb[BLD_MAX_COND_NESTING];
98 struct nv_basic_block *else_bb[BLD_MAX_COND_NESTING];
99 int cond_lvl;
100 struct nv_basic_block *loop_bb[BLD_MAX_LOOP_NESTING];
101 struct nv_basic_block *brkt_bb[BLD_MAX_LOOP_NESTING];
102 int loop_lvl;
103
104 ubyte out_kind; /* CFG_EDGE_FORWARD, or FAKE in case of BREAK/CONT */
105
106 struct bld_register tvs[BLD_MAX_TEMPS][4]; /* TGSI_FILE_TEMPORARY */
107 struct bld_register avs[BLD_MAX_ADDRS][4]; /* TGSI_FILE_ADDRESS */
108 struct bld_register pvs[BLD_MAX_PREDS][4]; /* TGSI_FILE_PREDICATE */
109 struct bld_register ovs[BLD_MAX_OUTPS][4]; /* TGSI_FILE_OUTPUT, FP only */
110
111 uint32_t outputs_written[(PIPE_MAX_SHADER_OUTPUTS + 7) / 8];
112
113 struct nv_value *zero;
114 struct nv_value *frag_coord[4];
115
116 /* wipe on new BB */
117 struct nv_value *saved_sysvals[4];
118 struct nv_value *saved_addr[4][2];
119 struct nv_value *saved_inputs[PIPE_MAX_SHADER_INPUTS][4];
120 struct nv_value *saved_immd[BLD_MAX_IMMDS];
121 uint num_immds;
122 };
123
124 static INLINE ubyte
125 bld_register_file(struct bld_context *bld, struct bld_register *reg)
126 {
127 if (reg < &bld->avs[0][0]) return NV_FILE_GPR;
128 else
129 if (reg < &bld->pvs[0][0]) return NV_FILE_GPR;
130 else
131 if (reg < &bld->ovs[0][0]) return NV_FILE_PRED;
132 else
133 return NV_FILE_MEM_V;
134 }
135
136 static INLINE struct nv_value *
137 bld_fetch(struct bld_context *bld, struct bld_register *regs, int i, int c)
138 {
139 regs[i * 4 + c].loop_use |= 1 << bld->loop_lvl;
140 return regs[i * 4 + c].current;
141 }
142
143 static struct nv_value *
144 bld_loop_phi(struct bld_context *, struct bld_register *, struct nv_value *);
145
146 /* If a variable is defined in a loop without prior use, we don't need
147 * a phi in the loop header to account for backwards flow.
148 *
149 * However, if this variable is then also used outside the loop, we do
150 * need a phi after all. But we must not use this phi's def inside the
151 * loop, so we can eliminate the phi if it is unused later.
152 */
153 static INLINE void
154 bld_store(struct bld_context *bld,
155 struct bld_register *regs, int i, int c, struct nv_value *val)
156 {
157 const uint16_t m = 1 << bld->loop_lvl;
158 struct bld_register *reg = &regs[i * 4 + c];
159
160 if (bld->loop_lvl && !(m & (reg->loop_def | reg->loop_use)))
161 bld_loop_phi(bld, reg, val);
162
163 reg->current = val;
164 bld_register_add_val(reg, reg->current);
165
166 reg->loop_def |= 1 << bld->loop_lvl;
167 }
168
169 #define FETCH_TEMP(i, c) bld_fetch(bld, &bld->tvs[0][0], i, c)
170 #define STORE_TEMP(i, c, v) bld_store(bld, &bld->tvs[0][0], i, c, (v))
171 #define FETCH_ADDR(i, c) bld_fetch(bld, &bld->avs[0][0], i, c)
172 #define STORE_ADDR(i, c, v) bld_store(bld, &bld->avs[0][0], i, c, (v))
173 #define FETCH_PRED(i, c) bld_fetch(bld, &bld->pvs[0][0], i, c)
174 #define STORE_PRED(i, c, v) bld_store(bld, &bld->pvs[0][0], i, c, (v))
175 #define STORE_OUTP(i, c, v) \
176 do { \
177 bld_store(bld, &bld->ovs[0][0], i, c, (v)); \
178 bld->outputs_written[(i) / 8] |= 1 << (((i) * 4 + (c)) % 32); \
179 } while (0)
180
181 static INLINE void
182 bld_clear_def_use(struct bld_register *regs, int n, int lvl)
183 {
184 int i;
185 const uint16_t mask = ~(1 << lvl);
186
187 for (i = 0; i < n * 4; ++i) {
188 regs[i].loop_def &= mask;
189 regs[i].loop_use &= mask;
190 }
191 }
192
193 static INLINE void
194 bld_warn_uninitialized(struct bld_context *bld, int kind,
195 struct bld_register *reg, struct nv_basic_block *b)
196 {
197 #ifdef NOUVEAU_DEBUG_BITS
198 long i = (reg - &bld->tvs[0][0]) / 4;
199 long c = (reg - &bld->tvs[0][0]) & 3;
200
201 if (c == 3)
202 c = -1;
203 debug_printf("WARNING: TEMP[%li].%c %s used uninitialized in BB:%i\n",
204 i, (int)('x' + c), kind ? "may be" : "is", b->id);
205 #endif
206 }
207
208 static INLINE struct nv_value *
209 bld_def(struct nv_instruction *i, int c, struct nv_value *value)
210 {
211 i->def[c] = value;
212 value->insn = i;
213 return value;
214 }
215
216 static INLINE struct nv_value *
217 find_by_bb(struct bld_register *reg, struct nv_basic_block *b)
218 {
219 int i;
220
221 if (reg->current && reg->current->insn->bb == b)
222 return reg->current;
223
224 for (i = 0; i < reg->vals.size / sizeof(struct nv_value *); ++i)
225 if ((*bld_register_access(reg, i))->insn->bb == b)
226 return *bld_register_access(reg, i);
227 return NULL;
228 }
229
230 /* Fetch value from register that was defined in the specified BB,
231 * or search for first definitions in all of its predecessors.
232 */
233 static void
234 fetch_by_bb(struct bld_register *reg,
235 struct nv_value **vals, int *n,
236 struct nv_basic_block *b)
237 {
238 int i;
239 struct nv_value *val;
240
241 assert(*n < 16); /* MAX_COND_NESTING */
242
243 val = find_by_bb(reg, b);
244 if (val) {
245 for (i = 0; i < *n; ++i)
246 if (vals[i] == val)
247 return;
248 vals[(*n)++] = val;
249 return;
250 }
251 for (i = 0; i < b->num_in; ++i)
252 if (!IS_WALL_EDGE(b->in_kind[i]))
253 fetch_by_bb(reg, vals, n, b->in[i]);
254 }
255
256 static INLINE struct nv_value *
257 bld_load_imm_u32(struct bld_context *bld, uint32_t u);
258
259 static INLINE struct nv_value *
260 bld_undef(struct bld_context *bld, ubyte file)
261 {
262 struct nv_instruction *nvi = new_instruction(bld->pc, NV_OP_UNDEF);
263
264 return bld_def(nvi, 0, new_value(bld->pc, file, 4));
265 }
266
267 static struct nv_value *
268 bld_phi(struct bld_context *bld, struct nv_basic_block *b,
269 struct bld_register *reg)
270 {
271 struct nv_basic_block *in;
272 struct nv_value *vals[16] = { NULL };
273 struct nv_value *val;
274 struct nv_instruction *phi;
275 int i, j, n;
276
277 do {
278 i = n = 0;
279 fetch_by_bb(reg, vals, &n, b);
280
281 if (!n) {
282 bld_warn_uninitialized(bld, 0, reg, b);
283 return NULL;
284 }
285
286 if (n == 1) {
287 if (nvc0_bblock_dominated_by(b, vals[0]->insn->bb))
288 break;
289
290 bld_warn_uninitialized(bld, 1, reg, b);
291
292 /* back-tracking to insert missing value of other path */
293 in = b;
294 while (in->in[0]) {
295 if (in->num_in == 1) {
296 in = in->in[0];
297 } else {
298 if (!nvc0_bblock_reachable_by(in->in[0], vals[0]->insn->bb, b))
299 in = in->in[0];
300 else
301 if (!nvc0_bblock_reachable_by(in->in[1], vals[0]->insn->bb, b))
302 in = in->in[1];
303 else
304 in = in->in[0];
305 }
306 }
307 bld->pc->current_block = in;
308
309 /* should make this a no-op */
310 bld_register_add_val(reg, bld_undef(bld, vals[0]->reg.file));
311 continue;
312 }
313
314 for (i = 0; i < n; ++i) {
315 /* if value dominates b, continue to the redefinitions */
316 if (nvc0_bblock_dominated_by(b, vals[i]->insn->bb))
317 continue;
318
319 /* if value dominates any in-block, b should be the dom frontier */
320 for (j = 0; j < b->num_in; ++j)
321 if (nvc0_bblock_dominated_by(b->in[j], vals[i]->insn->bb))
322 break;
323 /* otherwise, find the dominance frontier and put the phi there */
324 if (j == b->num_in) {
325 in = nvc0_bblock_dom_frontier(vals[i]->insn->bb);
326 val = bld_phi(bld, in, reg);
327 bld_register_add_val(reg, val);
328 break;
329 }
330 }
331 } while(i < n);
332
333 bld->pc->current_block = b;
334
335 if (n == 1)
336 return vals[0];
337
338 phi = new_instruction(bld->pc, NV_OP_PHI);
339
340 bld_def(phi, 0, new_value(bld->pc, vals[0]->reg.file, vals[0]->reg.size));
341 for (i = 0; i < n; ++i)
342 nv_reference(bld->pc, phi, i, vals[i]);
343
344 return phi->def[0];
345 }
346
347 /* Insert a phi function in the loop header.
348 * For nested loops, we need to insert phi functions in all the outer
349 * loop headers if they don't have one yet.
350 *
351 * @def: redefinition from inside loop, or NULL if to be replaced later
352 */
353 static struct nv_value *
354 bld_loop_phi(struct bld_context *bld, struct bld_register *reg,
355 struct nv_value *def)
356 {
357 struct nv_instruction *phi;
358 struct nv_basic_block *bb = bld->pc->current_block;
359 struct nv_value *val = NULL;
360
361 if (bld->loop_lvl > 1) {
362 --bld->loop_lvl;
363 if (!((reg->loop_def | reg->loop_use) & (1 << bld->loop_lvl)))
364 val = bld_loop_phi(bld, reg, NULL);
365 ++bld->loop_lvl;
366 }
367
368 if (!val)
369 val = bld_phi(bld, bld->pc->current_block, reg); /* old definition */
370 if (!val) {
371 bld->pc->current_block = bld->loop_bb[bld->loop_lvl - 1]->in[0];
372 val = bld_undef(bld, bld_register_file(bld, reg));
373 }
374
375 bld->pc->current_block = bld->loop_bb[bld->loop_lvl - 1];
376
377 phi = new_instruction(bld->pc, NV_OP_PHI);
378
379 bld_def(phi, 0, new_value_like(bld->pc, val));
380 if (!def)
381 def = phi->def[0];
382
383 bld_register_add_val(reg, phi->def[0]);
384
385 phi->target = (struct nv_basic_block *)reg; /* cheat */
386
387 nv_reference(bld->pc, phi, 0, val);
388 nv_reference(bld->pc, phi, 1, def);
389
390 bld->pc->current_block = bb;
391
392 return phi->def[0];
393 }
394
395 static INLINE struct nv_value *
396 bld_fetch_global(struct bld_context *bld, struct bld_register *reg)
397 {
398 const uint16_t m = 1 << bld->loop_lvl;
399 const uint16_t use = reg->loop_use;
400
401 reg->loop_use |= m;
402
403 /* If neither used nor def'd inside the loop, build a phi in foresight,
404 * so we don't have to replace stuff later on, which requires tracking.
405 */
406 if (bld->loop_lvl && !((use | reg->loop_def) & m))
407 return bld_loop_phi(bld, reg, NULL);
408
409 return bld_phi(bld, bld->pc->current_block, reg);
410 }
411
412 static INLINE struct nv_value *
413 bld_imm_u32(struct bld_context *bld, uint32_t u)
414 {
415 int i;
416 unsigned n = bld->num_immds;
417
418 for (i = 0; i < n; ++i)
419 if (bld->saved_immd[i]->reg.imm.u32 == u)
420 return bld->saved_immd[i];
421
422 assert(n < BLD_MAX_IMMDS);
423 bld->num_immds++;
424
425 bld->saved_immd[n] = new_value(bld->pc, NV_FILE_IMM, 4);
426 bld->saved_immd[n]->reg.imm.u32 = u;
427 return bld->saved_immd[n];
428 }
429
430 static void
431 bld_replace_value(struct nv_pc *, struct nv_basic_block *, struct nv_value *,
432 struct nv_value *);
433
434 /* Replace the source of the phi in the loop header by the last assignment,
435 * or eliminate the phi function if there is no assignment inside the loop.
436 *
437 * Redundancy situation 1 - (used) but (not redefined) value:
438 * %3 = phi %0, %3 = %3 is used
439 * %3 = phi %0, %4 = is new definition
440 *
441 * Redundancy situation 2 - (not used) but (redefined) value:
442 * %3 = phi %0, %2 = %2 is used, %3 could be used outside, deleted by DCE
443 */
444 static void
445 bld_loop_end(struct bld_context *bld, struct nv_basic_block *bb)
446 {
447 struct nv_basic_block *save = bld->pc->current_block;
448 struct nv_instruction *phi, *next;
449 struct nv_value *val;
450 struct bld_register *reg;
451 int i, s, n;
452
453 for (phi = bb->phi; phi && phi->opcode == NV_OP_PHI; phi = next) {
454 next = phi->next;
455
456 reg = (struct bld_register *)phi->target;
457 phi->target = NULL;
458
459 for (s = 1, n = 0; n < bb->num_in; ++n) {
460 if (bb->in_kind[n] != CFG_EDGE_BACK)
461 continue;
462
463 assert(s < 4);
464 bld->pc->current_block = bb->in[n];
465 val = bld_fetch_global(bld, reg);
466
467 for (i = 0; i < 4; ++i)
468 if (phi->src[i] && phi->src[i]->value == val)
469 break;
470 if (i == 4)
471 nv_reference(bld->pc, phi, s++, val);
472 }
473 bld->pc->current_block = save;
474
475 if (phi->src[0]->value == phi->def[0] ||
476 phi->src[0]->value == phi->src[1]->value)
477 s = 1;
478 else
479 if (phi->src[1]->value == phi->def[0])
480 s = 0;
481 else
482 continue;
483
484 if (s >= 0) {
485 /* eliminate the phi */
486 bld_register_del_val(reg, phi->def[0]);
487
488 ++bld->pc->pass_seq;
489 bld_replace_value(bld->pc, bb, phi->def[0], phi->src[s]->value);
490
491 nvc0_insn_delete(phi);
492 }
493 }
494 }
495
496 static INLINE struct nv_value *
497 bld_imm_f32(struct bld_context *bld, float f)
498 {
499 return bld_imm_u32(bld, fui(f));
500 }
501
502 static struct nv_value *
503 bld_insn_1(struct bld_context *bld, uint opcode, struct nv_value *src0)
504 {
505 struct nv_instruction *insn = new_instruction(bld->pc, opcode);
506
507 nv_reference(bld->pc, insn, 0, src0);
508
509 return bld_def(insn, 0, new_value(bld->pc, NV_FILE_GPR, src0->reg.size));
510 }
511
512 static struct nv_value *
513 bld_insn_2(struct bld_context *bld, uint opcode,
514 struct nv_value *src0, struct nv_value *src1)
515 {
516 struct nv_instruction *insn = new_instruction(bld->pc, opcode);
517
518 nv_reference(bld->pc, insn, 0, src0);
519 nv_reference(bld->pc, insn, 1, src1);
520
521 return bld_def(insn, 0, new_value(bld->pc, NV_FILE_GPR, src0->reg.size));
522 }
523
524 static struct nv_value *
525 bld_insn_3(struct bld_context *bld, uint opcode,
526 struct nv_value *src0, struct nv_value *src1,
527 struct nv_value *src2)
528 {
529 struct nv_instruction *insn = new_instruction(bld->pc, opcode);
530
531 nv_reference(bld->pc, insn, 0, src0);
532 nv_reference(bld->pc, insn, 1, src1);
533 nv_reference(bld->pc, insn, 2, src2);
534
535 return bld_def(insn, 0, new_value(bld->pc, NV_FILE_GPR, src0->reg.size));
536 }
537
538 static INLINE void
539 bld_src_predicate(struct bld_context *bld,
540 struct nv_instruction *nvi, int s, struct nv_value *val)
541 {
542 nvi->predicate = s;
543 nv_reference(bld->pc, nvi, s, val);
544 }
545
546 static INLINE void
547 bld_src_pointer(struct bld_context *bld,
548 struct nv_instruction *nvi, int s, struct nv_value *val)
549 {
550 nvi->indirect = s;
551 nv_reference(bld->pc, nvi, s, val);
552 }
553
554 static void
555 bld_lmem_store(struct bld_context *bld, struct nv_value *ptr, int ofst,
556 struct nv_value *val)
557 {
558 struct nv_instruction *insn = new_instruction(bld->pc, NV_OP_ST);
559 struct nv_value *loc;
560
561 loc = new_value(bld->pc, NV_FILE_MEM_L, nv_type_sizeof(NV_TYPE_U32));
562
563 loc->reg.id = ofst * 4;
564
565 nv_reference(bld->pc, insn, 0, loc);
566 nv_reference(bld->pc, insn, 1, ptr);
567 nv_reference(bld->pc, insn, 2, val);
568 }
569
570 static struct nv_value *
571 bld_lmem_load(struct bld_context *bld, struct nv_value *ptr, int ofst)
572 {
573 struct nv_value *loc, *val;
574
575 loc = new_value(bld->pc, NV_FILE_MEM_L, nv_type_sizeof(NV_TYPE_U32));
576
577 loc->reg.address = ofst * 4;
578
579 val = bld_insn_2(bld, NV_OP_LD, loc, ptr);
580
581 return val;
582 }
583
584 static struct nv_value *
585 bld_pow(struct bld_context *bld, struct nv_value *x, struct nv_value *e)
586 {
587 struct nv_value *val;
588
589 val = bld_insn_1(bld, NV_OP_LG2, x);
590 val = bld_insn_2(bld, NV_OP_MUL_F32, e, val);
591
592 val = bld_insn_1(bld, NV_OP_PREEX2, val);
593 val = bld_insn_1(bld, NV_OP_EX2, val);
594
595 return val;
596 }
597
598 static INLINE struct nv_value *
599 bld_load_imm_f32(struct bld_context *bld, float f)
600 {
601 if (f == 0.0f)
602 return bld->zero;
603 return bld_insn_1(bld, NV_OP_MOV, bld_imm_f32(bld, f));
604 }
605
606 static INLINE struct nv_value *
607 bld_load_imm_u32(struct bld_context *bld, uint32_t u)
608 {
609 if (u == 0)
610 return bld->zero;
611 return bld_insn_1(bld, NV_OP_MOV, bld_imm_u32(bld, u));
612 }
613
614 static INLINE struct nv_value *
615 bld_setp(struct bld_context *bld, uint op, uint8_t cc,
616 struct nv_value *src0, struct nv_value *src1)
617 {
618 struct nv_value *val = bld_insn_2(bld, op, src0, src1);
619
620 val->reg.file = NV_FILE_PRED;
621 val->reg.size = 1;
622 val->insn->set_cond = cc & 0xf;
623 return val;
624 }
625
626 static INLINE struct nv_value *
627 bld_cvt(struct bld_context *bld, uint8_t dt, uint8_t st, struct nv_value *src)
628 {
629 struct nv_value *val = bld_insn_1(bld, NV_OP_CVT, src);
630 val->insn->ext.cvt.d = dt;
631 val->insn->ext.cvt.s = st;
632 return val;
633 }
634
635 static void
636 bld_kil(struct bld_context *bld, struct nv_value *src)
637 {
638 struct nv_instruction *nvi;
639
640 src = bld_setp(bld, NV_OP_SET_F32, NV_CC_LT, src, bld->zero);
641
642 nvi = new_instruction(bld->pc, NV_OP_KIL);
643 nvi->fixed = 1;
644
645 bld_src_predicate(bld, nvi, 0, src);
646 }
647
648 static void
649 bld_flow(struct bld_context *bld, uint opcode,
650 struct nv_value *src, struct nv_basic_block *target,
651 boolean reconverge)
652 {
653 struct nv_instruction *nvi;
654
655 if (reconverge)
656 new_instruction(bld->pc, NV_OP_JOINAT)->fixed = 1;
657
658 nvi = new_instruction(bld->pc, opcode);
659 nvi->target = target;
660 nvi->terminator = 1;
661 if (src)
662 bld_src_predicate(bld, nvi, 0, src);
663 }
664
665 static ubyte
666 translate_setcc(unsigned opcode)
667 {
668 switch (opcode) {
669 case TGSI_OPCODE_SLT: return NV_CC_LT;
670 case TGSI_OPCODE_SGE: return NV_CC_GE;
671 case TGSI_OPCODE_SEQ: return NV_CC_EQ;
672 case TGSI_OPCODE_SGT: return NV_CC_GT;
673 case TGSI_OPCODE_SLE: return NV_CC_LE;
674 case TGSI_OPCODE_SNE: return NV_CC_NE | NV_CC_U;
675 case TGSI_OPCODE_STR: return NV_CC_TR;
676 case TGSI_OPCODE_SFL: return NV_CC_FL;
677
678 case TGSI_OPCODE_ISLT: return NV_CC_LT;
679 case TGSI_OPCODE_ISGE: return NV_CC_GE;
680 case TGSI_OPCODE_USEQ: return NV_CC_EQ;
681 case TGSI_OPCODE_USGE: return NV_CC_GE;
682 case TGSI_OPCODE_USLT: return NV_CC_LT;
683 case TGSI_OPCODE_USNE: return NV_CC_NE;
684 default:
685 assert(0);
686 return NV_CC_FL;
687 }
688 }
689
690 static uint
691 translate_opcode(uint opcode)
692 {
693 switch (opcode) {
694 case TGSI_OPCODE_ABS: return NV_OP_ABS_F32;
695 case TGSI_OPCODE_ADD: return NV_OP_ADD_F32;
696 case TGSI_OPCODE_SUB: return NV_OP_SUB_F32;
697 case TGSI_OPCODE_UADD: return NV_OP_ADD_B32;
698 case TGSI_OPCODE_AND: return NV_OP_AND;
699 case TGSI_OPCODE_EX2: return NV_OP_EX2;
700 case TGSI_OPCODE_CEIL: return NV_OP_CEIL;
701 case TGSI_OPCODE_FLR: return NV_OP_FLOOR;
702 case TGSI_OPCODE_TRUNC: return NV_OP_TRUNC;
703 case TGSI_OPCODE_COS: return NV_OP_COS;
704 case TGSI_OPCODE_SIN: return NV_OP_SIN;
705 case TGSI_OPCODE_DDX: return NV_OP_DFDX;
706 case TGSI_OPCODE_DDY: return NV_OP_DFDY;
707 case TGSI_OPCODE_F2I:
708 case TGSI_OPCODE_F2U:
709 case TGSI_OPCODE_I2F:
710 case TGSI_OPCODE_U2F: return NV_OP_CVT;
711 case TGSI_OPCODE_INEG: return NV_OP_NEG_S32;
712 case TGSI_OPCODE_LG2: return NV_OP_LG2;
713 case TGSI_OPCODE_ISHR: return NV_OP_SAR;
714 case TGSI_OPCODE_USHR: return NV_OP_SHR;
715 case TGSI_OPCODE_MAD: return NV_OP_MAD_F32;
716 case TGSI_OPCODE_MAX: return NV_OP_MAX_F32;
717 case TGSI_OPCODE_IMAX: return NV_OP_MAX_S32;
718 case TGSI_OPCODE_UMAX: return NV_OP_MAX_U32;
719 case TGSI_OPCODE_MIN: return NV_OP_MIN_F32;
720 case TGSI_OPCODE_IMIN: return NV_OP_MIN_S32;
721 case TGSI_OPCODE_UMIN: return NV_OP_MIN_U32;
722 case TGSI_OPCODE_MUL: return NV_OP_MUL_F32;
723 case TGSI_OPCODE_UMUL: return NV_OP_MUL_B32;
724 case TGSI_OPCODE_OR: return NV_OP_OR;
725 case TGSI_OPCODE_RCP: return NV_OP_RCP;
726 case TGSI_OPCODE_RSQ: return NV_OP_RSQ;
727 case TGSI_OPCODE_SAD: return NV_OP_SAD;
728 case TGSI_OPCODE_SHL: return NV_OP_SHL;
729 case TGSI_OPCODE_SLT:
730 case TGSI_OPCODE_SGE:
731 case TGSI_OPCODE_SEQ:
732 case TGSI_OPCODE_SGT:
733 case TGSI_OPCODE_SLE:
734 case TGSI_OPCODE_SNE: return NV_OP_FSET_F32;
735 case TGSI_OPCODE_ISLT:
736 case TGSI_OPCODE_ISGE: return NV_OP_SET_S32;
737 case TGSI_OPCODE_USEQ:
738 case TGSI_OPCODE_USGE:
739 case TGSI_OPCODE_USLT:
740 case TGSI_OPCODE_USNE: return NV_OP_SET_U32;
741 case TGSI_OPCODE_TEX: return NV_OP_TEX;
742 case TGSI_OPCODE_TXP: return NV_OP_TEX;
743 case TGSI_OPCODE_TXB: return NV_OP_TXB;
744 case TGSI_OPCODE_TXL: return NV_OP_TXL;
745 case TGSI_OPCODE_XOR: return NV_OP_XOR;
746 default:
747 return NV_OP_NOP;
748 }
749 }
750
751 #if 0
752 static ubyte
753 infer_src_type(unsigned opcode)
754 {
755 switch (opcode) {
756 case TGSI_OPCODE_MOV:
757 case TGSI_OPCODE_AND:
758 case TGSI_OPCODE_OR:
759 case TGSI_OPCODE_XOR:
760 case TGSI_OPCODE_SAD:
761 case TGSI_OPCODE_U2F:
762 case TGSI_OPCODE_UADD:
763 case TGSI_OPCODE_UDIV:
764 case TGSI_OPCODE_UMOD:
765 case TGSI_OPCODE_UMAD:
766 case TGSI_OPCODE_UMUL:
767 case TGSI_OPCODE_UMAX:
768 case TGSI_OPCODE_UMIN:
769 case TGSI_OPCODE_USEQ:
770 case TGSI_OPCODE_USGE:
771 case TGSI_OPCODE_USLT:
772 case TGSI_OPCODE_USNE:
773 case TGSI_OPCODE_USHR:
774 return NV_TYPE_U32;
775 case TGSI_OPCODE_I2F:
776 case TGSI_OPCODE_IDIV:
777 case TGSI_OPCODE_IMAX:
778 case TGSI_OPCODE_IMIN:
779 case TGSI_OPCODE_INEG:
780 case TGSI_OPCODE_ISGE:
781 case TGSI_OPCODE_ISHR:
782 case TGSI_OPCODE_ISLT:
783 return NV_TYPE_S32;
784 default:
785 return NV_TYPE_F32;
786 }
787 }
788
789 static ubyte
790 infer_dst_type(unsigned opcode)
791 {
792 switch (opcode) {
793 case TGSI_OPCODE_MOV:
794 case TGSI_OPCODE_F2U:
795 case TGSI_OPCODE_AND:
796 case TGSI_OPCODE_OR:
797 case TGSI_OPCODE_XOR:
798 case TGSI_OPCODE_SAD:
799 case TGSI_OPCODE_UADD:
800 case TGSI_OPCODE_UDIV:
801 case TGSI_OPCODE_UMOD:
802 case TGSI_OPCODE_UMAD:
803 case TGSI_OPCODE_UMUL:
804 case TGSI_OPCODE_UMAX:
805 case TGSI_OPCODE_UMIN:
806 case TGSI_OPCODE_USEQ:
807 case TGSI_OPCODE_USGE:
808 case TGSI_OPCODE_USLT:
809 case TGSI_OPCODE_USNE:
810 case TGSI_OPCODE_USHR:
811 return NV_TYPE_U32;
812 case TGSI_OPCODE_F2I:
813 case TGSI_OPCODE_IDIV:
814 case TGSI_OPCODE_IMAX:
815 case TGSI_OPCODE_IMIN:
816 case TGSI_OPCODE_INEG:
817 case TGSI_OPCODE_ISGE:
818 case TGSI_OPCODE_ISHR:
819 case TGSI_OPCODE_ISLT:
820 return NV_TYPE_S32;
821 default:
822 return NV_TYPE_F32;
823 }
824 }
825 #endif
826
827 static void
828 emit_store(struct bld_context *bld, const struct tgsi_full_instruction *inst,
829 unsigned chan, struct nv_value *res)
830 {
831 const struct tgsi_full_dst_register *reg = &inst->Dst[0];
832 struct nv_instruction *nvi;
833 struct nv_value *mem;
834 struct nv_value *ptr = NULL;
835 int idx;
836
837 idx = reg->Register.Index;
838 assert(chan < 4);
839
840 if (reg->Register.Indirect)
841 ptr = FETCH_ADDR(reg->Indirect.Index,
842 tgsi_util_get_src_register_swizzle(&reg->Indirect, 0));
843
844 switch (inst->Instruction.Saturate) {
845 case TGSI_SAT_NONE:
846 break;
847 case TGSI_SAT_ZERO_ONE:
848 res = bld_insn_1(bld, NV_OP_SAT, res);
849 break;
850 case TGSI_SAT_MINUS_PLUS_ONE:
851 res = bld_insn_2(bld, NV_OP_MAX_F32, res, bld_load_imm_f32(bld, -1.0f));
852 res = bld_insn_2(bld, NV_OP_MIN_F32, res, bld_load_imm_f32(bld, 1.0f));
853 break;
854 }
855
856 switch (reg->Register.File) {
857 case TGSI_FILE_OUTPUT:
858 if (!res->insn)
859 res = bld_insn_1(bld, NV_OP_MOV, res);
860
861 if (bld->pc->is_fragprog) {
862 assert(!ptr);
863 STORE_OUTP(idx, chan, res);
864 } else {
865 nvi = new_instruction(bld->pc, NV_OP_EXPORT);
866 mem = new_value(bld->pc, bld->ti->output_file, res->reg.size);
867 nv_reference(bld->pc, nvi, 0, mem);
868 nv_reference(bld->pc, nvi, 1, res);
869 if (!ptr)
870 mem->reg.address = bld->ti->output_loc[idx][chan];
871 else
872 mem->reg.address = 0x80 + idx * 16 + chan * 4;
873 nvi->fixed = 1;
874 }
875 break;
876 case TGSI_FILE_TEMPORARY:
877 assert(idx < BLD_MAX_TEMPS);
878 if (!res->insn)
879 res = bld_insn_1(bld, NV_OP_MOV, res);
880
881 assert(res->reg.file == NV_FILE_GPR);
882 assert(res->insn->bb = bld->pc->current_block);
883
884 if (bld->ti->require_stores)
885 bld_lmem_store(bld, ptr, idx * 4 + chan, res);
886 else
887 STORE_TEMP(idx, chan, res);
888 break;
889 case TGSI_FILE_ADDRESS:
890 assert(idx < BLD_MAX_ADDRS);
891 STORE_ADDR(idx, chan, res);
892 break;
893 }
894 }
895
896 static INLINE uint32_t
897 bld_is_output_written(struct bld_context *bld, int i, int c)
898 {
899 if (c < 0)
900 return bld->outputs_written[i / 8] & (0xf << ((i * 4) % 32));
901 return bld->outputs_written[i / 8] & (1 << ((i * 4 + c) % 32));
902 }
903
904 static void
905 bld_export_fp_outputs(struct bld_context *bld)
906 {
907 struct nv_value *vals[4];
908 struct nv_instruction *nvi;
909 int i, c, n;
910
911 for (i = 0; i < PIPE_MAX_SHADER_OUTPUTS; ++i) {
912 if (!bld_is_output_written(bld, i, -1))
913 continue;
914 for (n = 0, c = 0; c < 4; ++c) {
915 if (!bld_is_output_written(bld, i, c))
916 continue;
917 vals[n] = bld_fetch_global(bld, &bld->ovs[i][c]);
918 assert(vals[n]);
919 vals[n] = bld_insn_1(bld, NV_OP_MOV, vals[n]);
920 vals[n++]->reg.id = bld->ti->output_loc[i][c];
921 }
922 assert(n);
923
924 (nvi = new_instruction(bld->pc, NV_OP_EXPORT))->fixed = 1;
925 for (c = 0; c < n; ++c)
926 nv_reference(bld->pc, nvi, c, vals[c]);
927 }
928 }
929
930 static void
931 bld_new_block(struct bld_context *bld, struct nv_basic_block *b)
932 {
933 int i, c;
934
935 bld->pc->current_block = b;
936
937 for (i = 0; i < 4; ++i)
938 bld->saved_addr[i][0] = NULL;
939 for (i = 0; i < PIPE_MAX_SHADER_INPUTS; ++i)
940 for (c = 0; c < 4; ++c)
941 bld->saved_inputs[i][c] = NULL;
942
943 bld->out_kind = CFG_EDGE_FORWARD;
944 }
945
946 static struct nv_value *
947 bld_get_saved_input(struct bld_context *bld, unsigned i, unsigned c)
948 {
949 if (bld->saved_inputs[i][c])
950 return bld->saved_inputs[i][c];
951 return NULL;
952 }
953
954 static struct nv_value *
955 bld_interp(struct bld_context *bld, unsigned mode, struct nv_value *val)
956 {
957 unsigned cent = mode & NVC0_INTERP_CENTROID;
958
959 mode &= ~NVC0_INTERP_CENTROID;
960
961 if (val->reg.address == 0x3fc) {
962 /* gl_FrontFacing: 0/~0 to -1.0/+1.0 */
963 val = bld_insn_1(bld, NV_OP_LINTERP, val);
964 val = bld_insn_2(bld, NV_OP_SHL, val, bld_imm_u32(bld, 31));
965 val = bld_insn_2(bld, NV_OP_XOR, val, bld_imm_f32(bld, -1.0f));
966 } else
967 if (mode == NVC0_INTERP_PERSPECTIVE) {
968 val = bld_insn_2(bld, NV_OP_PINTERP, val, bld->frag_coord[3]);
969 } else {
970 val = bld_insn_1(bld, NV_OP_LINTERP, val);
971 }
972
973 val->insn->flat = mode == NVC0_INTERP_FLAT ? 1 : 0;
974 val->insn->centroid = cent ? 1 : 0;
975 return val;
976 }
977
978 static struct nv_value *
979 emit_fetch(struct bld_context *bld, const struct tgsi_full_instruction *insn,
980 const unsigned s, const unsigned chan)
981 {
982 const struct tgsi_full_src_register *src = &insn->Src[s];
983 struct nv_value *res = NULL;
984 struct nv_value *ptr = NULL;
985 int idx, ind_idx, dim_idx;
986 unsigned swz, ind_swz, sgn;
987
988 idx = src->Register.Index;
989 swz = tgsi_util_get_full_src_register_swizzle(src, chan);
990
991 if (src->Register.Indirect) {
992 ind_idx = src->Indirect.Index;
993 ind_swz = tgsi_util_get_src_register_swizzle(&src->Indirect, 0);
994
995 ptr = FETCH_ADDR(ind_idx, ind_swz);
996 }
997
998 if (src->Register.Dimension)
999 dim_idx = src->Dimension.Index;
1000 else
1001 dim_idx = 0;
1002
1003 switch (src->Register.File) {
1004 case TGSI_FILE_CONSTANT:
1005 assert(dim_idx < 14);
1006 res = new_value(bld->pc, NV_FILE_MEM_C(dim_idx), 4);
1007 res->reg.address = idx * 16 + swz * 4;
1008 res = bld_insn_1(bld, NV_OP_LD, res);
1009 if (ptr)
1010 bld_src_pointer(bld, res->insn, 1, ptr);
1011 break;
1012 case TGSI_FILE_IMMEDIATE: /* XXX: type for MOV TEMP[0], -IMM[0] */
1013 assert(idx < bld->ti->immd32_nr);
1014 res = bld_load_imm_u32(bld, bld->ti->immd32[idx * 4 + swz]);
1015 break;
1016 case TGSI_FILE_INPUT:
1017 assert(!src->Register.Dimension);
1018 if (!ptr) {
1019 res = bld_get_saved_input(bld, idx, swz);
1020 if (res)
1021 return res;
1022 }
1023 res = new_value(bld->pc, bld->ti->input_file, 4);
1024 if (ptr)
1025 res->reg.address = 0x80 + idx * 16 + swz * 4;
1026 else
1027 res->reg.address = bld->ti->input_loc[idx][swz];
1028
1029 if (bld->pc->is_fragprog)
1030 res = bld_interp(bld, bld->ti->interp_mode[idx], res);
1031 else
1032 res = bld_insn_1(bld, NV_OP_VFETCH, res);
1033
1034 if (ptr)
1035 bld_src_pointer(bld, res->insn, res->insn->src[1] ? 2 : 1, ptr);
1036 else
1037 bld->saved_inputs[idx][swz] = res;
1038 break;
1039 case TGSI_FILE_TEMPORARY:
1040 if (bld->ti->require_stores)
1041 res = bld_lmem_load(bld, ptr, idx * 4 + swz);
1042 else
1043 res = bld_fetch_global(bld, &bld->tvs[idx][swz]);
1044 break;
1045 case TGSI_FILE_ADDRESS:
1046 res = bld_fetch_global(bld, &bld->avs[idx][swz]);
1047 break;
1048 case TGSI_FILE_PREDICATE:
1049 res = bld_fetch_global(bld, &bld->pvs[idx][swz]);
1050 break;
1051 default:
1052 NOUVEAU_ERR("illegal/unhandled src reg file: %d\n", src->Register.File);
1053 abort();
1054 break;
1055 }
1056 if (!res)
1057 return bld_undef(bld, NV_FILE_GPR);
1058
1059 sgn = tgsi_util_get_full_src_register_sign_mode(src, chan);
1060
1061 switch (sgn) {
1062 case TGSI_UTIL_SIGN_KEEP:
1063 break;
1064 case TGSI_UTIL_SIGN_CLEAR:
1065 res = bld_insn_1(bld, NV_OP_ABS_F32, res);
1066 break;
1067 case TGSI_UTIL_SIGN_TOGGLE:
1068 res = bld_insn_1(bld, NV_OP_NEG_F32, res);
1069 break;
1070 case TGSI_UTIL_SIGN_SET:
1071 res = bld_insn_1(bld, NV_OP_ABS_F32, res);
1072 res = bld_insn_1(bld, NV_OP_NEG_F32, res);
1073 break;
1074 default:
1075 NOUVEAU_ERR("illegal/unhandled src reg sign mode\n");
1076 abort();
1077 break;
1078 }
1079
1080 return res;
1081 }
1082
1083 static void
1084 bld_lit(struct bld_context *bld, struct nv_value *dst0[4],
1085 const struct tgsi_full_instruction *insn)
1086 {
1087 struct nv_value *val0 = NULL;
1088 unsigned mask = insn->Dst[0].Register.WriteMask;
1089
1090 if (mask & ((1 << 0) | (1 << 3)))
1091 dst0[3] = dst0[0] = bld_load_imm_f32(bld, 1.0f);
1092
1093 if (mask & (3 << 1)) {
1094 val0 = bld_insn_2(bld, NV_OP_MAX, emit_fetch(bld, insn, 0, 0), bld->zero);
1095 if (mask & (1 << 1))
1096 dst0[1] = val0;
1097 }
1098
1099 if (mask & (1 << 2)) {
1100 struct nv_value *val1, *val3, *src1, *src3, *pred;
1101 struct nv_value *pos128 = bld_load_imm_f32(bld, 127.999999f);
1102 struct nv_value *neg128 = bld_load_imm_f32(bld, -127.999999f);
1103
1104 src1 = emit_fetch(bld, insn, 0, 1);
1105 src3 = emit_fetch(bld, insn, 0, 3);
1106
1107 pred = bld_setp(bld, NV_OP_SET_F32, NV_CC_LE, val0, bld->zero);
1108
1109 val1 = bld_insn_2(bld, NV_OP_MAX_F32, src1, bld->zero);
1110 val3 = bld_insn_2(bld, NV_OP_MAX_F32, src3, neg128);
1111 val3 = bld_insn_2(bld, NV_OP_MIN_F32, val3, pos128);
1112 val3 = bld_pow(bld, val1, val3);
1113
1114 dst0[2] = bld_insn_1(bld, NV_OP_MOV, bld->zero);
1115 bld_src_predicate(bld, dst0[2]->insn, 1, pred);
1116
1117 dst0[2] = bld_insn_2(bld, NV_OP_SELECT, val3, dst0[2]);
1118 }
1119 }
1120
1121 static INLINE void
1122 get_tex_dim(const struct tgsi_full_instruction *insn, int *dim, int *arg)
1123 {
1124 switch (insn->Texture.Texture) {
1125 case TGSI_TEXTURE_1D:
1126 *arg = *dim = 1;
1127 break;
1128 case TGSI_TEXTURE_SHADOW1D:
1129 *dim = 1;
1130 *arg = 2;
1131 break;
1132 case TGSI_TEXTURE_UNKNOWN:
1133 case TGSI_TEXTURE_2D:
1134 case TGSI_TEXTURE_RECT:
1135 *arg = *dim = 2;
1136 break;
1137 case TGSI_TEXTURE_SHADOW2D:
1138 case TGSI_TEXTURE_SHADOWRECT:
1139 *dim = 2;
1140 *arg = 3;
1141 break;
1142 case TGSI_TEXTURE_3D:
1143 case TGSI_TEXTURE_CUBE:
1144 *dim = *arg = 3;
1145 break;
1146 default:
1147 assert(0);
1148 break;
1149 }
1150 }
1151
1152 static struct nv_value *
1153 bld_clone(struct bld_context *bld, struct nv_instruction *nvi)
1154 {
1155 struct nv_instruction *dupi = new_instruction(bld->pc, nvi->opcode);
1156 struct nv_instruction *next, *prev;
1157 int c;
1158
1159 next = dupi->next;
1160 prev = dupi->prev;
1161
1162 *dupi = *nvi;
1163
1164 dupi->next = next;
1165 dupi->prev = prev;
1166
1167 for (c = 0; c < 5 && nvi->def[c]; ++c)
1168 bld_def(dupi, c, new_value_like(bld->pc, nvi->def[c]));
1169
1170 for (c = 0; c < 6 && nvi->src[c]; ++c) {
1171 dupi->src[c] = NULL;
1172 nv_reference(bld->pc, dupi, c, nvi->src[c]->value);
1173 }
1174
1175 return dupi->def[0];
1176 }
1177
1178 /* NOTE: proj(t0) = (t0 / w) / (tc3 / w) = tc0 / tc2 handled by optimizer */
1179 static void
1180 load_proj_tex_coords(struct bld_context *bld,
1181 struct nv_value *t[4], int dim, int arg,
1182 const struct tgsi_full_instruction *insn)
1183 {
1184 int c;
1185 unsigned mask = (1 << dim) - 1;
1186
1187 if (arg != dim)
1188 mask |= 4; /* depth comparison value */
1189
1190 t[3] = emit_fetch(bld, insn, 0, 3);
1191 if (t[3]->insn->opcode == NV_OP_PINTERP) {
1192 t[3] = bld_clone(bld, t[3]->insn);
1193 t[3]->insn->opcode = NV_OP_LINTERP;
1194 nv_reference(bld->pc, t[3]->insn, 1, NULL);
1195 }
1196 t[3] = bld_insn_1(bld, NV_OP_RCP, t[3]);
1197
1198 for (c = 0; c < 4; ++c) {
1199 if (!(mask & (1 << c)))
1200 continue;
1201 t[c] = emit_fetch(bld, insn, 0, c);
1202
1203 if (t[c]->insn->opcode != NV_OP_PINTERP)
1204 continue;
1205 mask &= ~(1 << c);
1206
1207 t[c] = bld_clone(bld, t[c]->insn);
1208 nv_reference(bld->pc, t[c]->insn, 1, t[3]);
1209 }
1210 if (mask == 0)
1211 return;
1212
1213 t[3] = emit_fetch(bld, insn, 0, 3);
1214 t[3] = bld_insn_1(bld, NV_OP_RCP, t[3]);
1215
1216 for (c = 0; c < 4; ++c)
1217 if (mask & (1 << c))
1218 t[c] = bld_insn_2(bld, NV_OP_MUL_F32, t[c], t[3]);
1219 }
1220
1221 /* For a quad of threads / top left, top right, bottom left, bottom right
1222 * pixels, do a different operation, and take src0 from a specific thread.
1223 */
1224 #define QOP_ADD 0
1225 #define QOP_SUBR 1
1226 #define QOP_SUB 2
1227 #define QOP_MOV1 3
1228
1229 #define QOP(a, b, c, d) \
1230 ((QOP_##a << 0) | (QOP_##b << 2) | (QOP_##c << 4) | (QOP_##d << 6))
1231
1232 static INLINE struct nv_value *
1233 bld_quadop(struct bld_context *bld, ubyte qop, struct nv_value *src0, int lane,
1234 struct nv_value *src1, boolean wp)
1235 {
1236 struct nv_value *val = bld_insn_2(bld, NV_OP_QUADOP, src0, src1);
1237 val->insn->lanes = lane;
1238 val->insn->quadop = qop;
1239 if (wp) {
1240 assert(!"quadop predicate write");
1241 }
1242 return val;
1243 }
1244
1245 static struct nv_instruction *
1246 emit_tex(struct bld_context *bld, uint opcode,
1247 struct nv_value *dst[4], struct nv_value *t_in[4],
1248 int argc, int tic, int tsc, int cube)
1249 {
1250 struct nv_value *t[4];
1251 struct nv_instruction *nvi;
1252 int c;
1253
1254 /* the inputs to a tex instruction must be separate values */
1255 for (c = 0; c < argc; ++c) {
1256 t[c] = bld_insn_1(bld, NV_OP_MOV, t_in[c]);
1257 t[c]->insn->fixed = 1;
1258 }
1259
1260 nvi = new_instruction(bld->pc, opcode);
1261 for (c = 0; c < 4; ++c)
1262 dst[c] = bld_def(nvi, c, new_value(bld->pc, NV_FILE_GPR, 4));
1263 for (c = 0; c < argc; ++c)
1264 nv_reference(bld->pc, nvi, c, t[c]);
1265
1266 nvi->ext.tex.t = tic;
1267 nvi->ext.tex.s = tsc;
1268 nvi->tex_mask = 0xf;
1269 nvi->tex_cube = cube;
1270 nvi->tex_live = 0;
1271 nvi->tex_argc = argc;
1272
1273 return nvi;
1274 }
1275
1276 /*
1277 static boolean
1278 bld_is_constant(struct nv_value *val)
1279 {
1280 if (val->reg.file == NV_FILE_IMM)
1281 return TRUE;
1282 return val->insn && nvCG_find_constant(val->insn->src[0]);
1283 }
1284 */
1285
1286 static void
1287 bld_tex(struct bld_context *bld, struct nv_value *dst0[4],
1288 const struct tgsi_full_instruction *insn)
1289 {
1290 struct nv_value *t[4], *s[3];
1291 uint opcode = translate_opcode(insn->Instruction.Opcode);
1292 int arg, dim, c;
1293 const int tic = insn->Src[1].Register.Index;
1294 const int tsc = tic;
1295 const int cube = (insn->Texture.Texture == TGSI_TEXTURE_CUBE) ? 1 : 0;
1296
1297 get_tex_dim(insn, &dim, &arg);
1298
1299 if (!cube && insn->Instruction.Opcode == TGSI_OPCODE_TXP)
1300 load_proj_tex_coords(bld, t, dim, arg, insn);
1301 else {
1302 for (c = 0; c < dim; ++c)
1303 t[c] = emit_fetch(bld, insn, 0, c);
1304 if (arg != dim)
1305 t[dim] = emit_fetch(bld, insn, 0, 2);
1306 }
1307
1308 if (cube) {
1309 assert(dim >= 3);
1310 for (c = 0; c < 3; ++c)
1311 s[c] = bld_insn_1(bld, NV_OP_ABS_F32, t[c]);
1312
1313 s[0] = bld_insn_2(bld, NV_OP_MAX_F32, s[0], s[1]);
1314 s[0] = bld_insn_2(bld, NV_OP_MAX_F32, s[0], s[2]);
1315 s[0] = bld_insn_1(bld, NV_OP_RCP, s[0]);
1316
1317 for (c = 0; c < 3; ++c)
1318 t[c] = bld_insn_2(bld, NV_OP_MUL_F32, t[c], s[0]);
1319 }
1320
1321 if (opcode == NV_OP_TXB || opcode == NV_OP_TXL)
1322 t[arg++] = emit_fetch(bld, insn, 0, 3);
1323 emit_tex(bld, opcode, dst0, t, arg, tic, tsc, cube);
1324 }
1325
1326 static INLINE struct nv_value *
1327 bld_dot(struct bld_context *bld, const struct tgsi_full_instruction *insn,
1328 int n)
1329 {
1330 struct nv_value *dotp, *src0, *src1;
1331 int c;
1332
1333 src0 = emit_fetch(bld, insn, 0, 0);
1334 src1 = emit_fetch(bld, insn, 1, 0);
1335 dotp = bld_insn_2(bld, NV_OP_MUL_F32, src0, src1);
1336
1337 for (c = 1; c < n; ++c) {
1338 src0 = emit_fetch(bld, insn, 0, c);
1339 src1 = emit_fetch(bld, insn, 1, c);
1340 dotp = bld_insn_3(bld, NV_OP_MAD_F32, src0, src1, dotp);
1341 }
1342 return dotp;
1343 }
1344
1345 #define FOR_EACH_DST0_ENABLED_CHANNEL(chan, inst) \
1346 for (chan = 0; chan < 4; ++chan) \
1347 if ((inst)->Dst[0].Register.WriteMask & (1 << chan))
1348
1349 static void
1350 bld_instruction(struct bld_context *bld,
1351 const struct tgsi_full_instruction *insn)
1352 {
1353 struct nv_value *src0;
1354 struct nv_value *src1;
1355 struct nv_value *src2;
1356 struct nv_value *dst0[4] = { NULL };
1357 struct nv_value *temp;
1358 int c;
1359 uint opcode = translate_opcode(insn->Instruction.Opcode);
1360 uint8_t mask = insn->Dst[0].Register.WriteMask;
1361
1362 #ifdef NOUVEAU_DEBUG_BITS
1363 debug_printf("bld_instruction:"); tgsi_dump_instruction(insn, 1);
1364 #endif
1365
1366 switch (insn->Instruction.Opcode) {
1367 case TGSI_OPCODE_ADD:
1368 case TGSI_OPCODE_MAX:
1369 case TGSI_OPCODE_MIN:
1370 case TGSI_OPCODE_MUL:
1371 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1372 src0 = emit_fetch(bld, insn, 0, c);
1373 src1 = emit_fetch(bld, insn, 1, c);
1374 dst0[c] = bld_insn_2(bld, opcode, src0, src1);
1375 }
1376 break;
1377 case TGSI_OPCODE_ARL:
1378 src1 = bld_imm_u32(bld, 4);
1379 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1380 src0 = emit_fetch(bld, insn, 0, c);
1381 src0 = bld_insn_1(bld, NV_OP_FLOOR, src0);
1382 src0->insn->ext.cvt.d = NV_TYPE_S32;
1383 src0->insn->ext.cvt.s = NV_TYPE_F32;
1384 dst0[c] = bld_insn_2(bld, NV_OP_SHL, src0, src1);
1385 }
1386 break;
1387 case TGSI_OPCODE_CMP:
1388 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1389 src0 = emit_fetch(bld, insn, 0, c);
1390 src0 = bld_setp(bld, NV_OP_SET_F32, NV_CC_LT, src0, bld->zero);
1391 src1 = emit_fetch(bld, insn, 1, c);
1392 src2 = emit_fetch(bld, insn, 2, c);
1393 dst0[c] = bld_insn_3(bld, NV_OP_SELP, src1, src2, src0);
1394 }
1395 break;
1396 case TGSI_OPCODE_COS:
1397 case TGSI_OPCODE_SIN:
1398 src0 = emit_fetch(bld, insn, 0, 0);
1399 temp = bld_insn_1(bld, NV_OP_PRESIN, src0);
1400 if (insn->Dst[0].Register.WriteMask & 7)
1401 temp = bld_insn_1(bld, opcode, temp);
1402 for (c = 0; c < 3; ++c)
1403 if (insn->Dst[0].Register.WriteMask & (1 << c))
1404 dst0[c] = temp;
1405 if (!(insn->Dst[0].Register.WriteMask & (1 << 3)))
1406 break;
1407 src0 = emit_fetch(bld, insn, 0, 3);
1408 temp = bld_insn_1(bld, NV_OP_PRESIN, src0);
1409 dst0[3] = bld_insn_1(bld, opcode, temp);
1410 break;
1411 case TGSI_OPCODE_DP2:
1412 temp = bld_dot(bld, insn, 2);
1413 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1414 dst0[c] = temp;
1415 break;
1416 case TGSI_OPCODE_DP3:
1417 temp = bld_dot(bld, insn, 3);
1418 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1419 dst0[c] = temp;
1420 break;
1421 case TGSI_OPCODE_DP4:
1422 temp = bld_dot(bld, insn, 4);
1423 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1424 dst0[c] = temp;
1425 break;
1426 case TGSI_OPCODE_DPH:
1427 src0 = bld_dot(bld, insn, 3);
1428 src1 = emit_fetch(bld, insn, 1, 3);
1429 temp = bld_insn_2(bld, NV_OP_ADD_F32, src0, src1);
1430 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1431 dst0[c] = temp;
1432 break;
1433 case TGSI_OPCODE_DST:
1434 if (insn->Dst[0].Register.WriteMask & 1)
1435 dst0[0] = bld_imm_f32(bld, 1.0f);
1436 if (insn->Dst[0].Register.WriteMask & 2) {
1437 src0 = emit_fetch(bld, insn, 0, 1);
1438 src1 = emit_fetch(bld, insn, 1, 1);
1439 dst0[1] = bld_insn_2(bld, NV_OP_MUL_F32, src0, src1);
1440 }
1441 if (insn->Dst[0].Register.WriteMask & 4)
1442 dst0[2] = emit_fetch(bld, insn, 0, 2);
1443 if (insn->Dst[0].Register.WriteMask & 8)
1444 dst0[3] = emit_fetch(bld, insn, 1, 3);
1445 break;
1446 case TGSI_OPCODE_EXP:
1447 src0 = emit_fetch(bld, insn, 0, 0);
1448 temp = bld_insn_1(bld, NV_OP_FLOOR, src0);
1449
1450 if (insn->Dst[0].Register.WriteMask & 2)
1451 dst0[1] = bld_insn_2(bld, NV_OP_SUB_F32, src0, temp);
1452 if (insn->Dst[0].Register.WriteMask & 1) {
1453 temp = bld_insn_1(bld, NV_OP_PREEX2, temp);
1454 dst0[0] = bld_insn_1(bld, NV_OP_EX2, temp);
1455 }
1456 if (insn->Dst[0].Register.WriteMask & 4) {
1457 temp = bld_insn_1(bld, NV_OP_PREEX2, src0);
1458 dst0[2] = bld_insn_1(bld, NV_OP_EX2, temp);
1459 }
1460 if (insn->Dst[0].Register.WriteMask & 8)
1461 dst0[3] = bld_imm_f32(bld, 1.0f);
1462 break;
1463 case TGSI_OPCODE_EX2:
1464 src0 = emit_fetch(bld, insn, 0, 0);
1465 temp = bld_insn_1(bld, NV_OP_PREEX2, src0);
1466 temp = bld_insn_1(bld, NV_OP_EX2, temp);
1467 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1468 dst0[c] = temp;
1469 break;
1470 case TGSI_OPCODE_FRC:
1471 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1472 src0 = emit_fetch(bld, insn, 0, c);
1473 dst0[c] = bld_insn_1(bld, NV_OP_FLOOR, src0);
1474 dst0[c] = bld_insn_2(bld, NV_OP_SUB_F32, src0, dst0[c]);
1475 }
1476 break;
1477 case TGSI_OPCODE_KIL:
1478 for (c = 0; c < 4; ++c)
1479 bld_kil(bld, emit_fetch(bld, insn, 0, c));
1480 break;
1481 case TGSI_OPCODE_KILP:
1482 (new_instruction(bld->pc, NV_OP_KIL))->fixed = 1;
1483 break;
1484 case TGSI_OPCODE_IF:
1485 {
1486 struct nv_basic_block *b = new_basic_block(bld->pc);
1487
1488 assert(bld->cond_lvl < BLD_MAX_COND_NESTING);
1489
1490 nvc0_bblock_attach(bld->pc->current_block, b, CFG_EDGE_FORWARD);
1491
1492 bld->join_bb[bld->cond_lvl] = bld->pc->current_block;
1493 bld->cond_bb[bld->cond_lvl] = bld->pc->current_block;
1494
1495 src1 = bld_setp(bld, NV_OP_SET_U32, NV_CC_NE,
1496 emit_fetch(bld, insn, 0, 0), bld->zero);
1497
1498 bld_flow(bld, NV_OP_BRA, src1, NULL, (bld->cond_lvl == 0));
1499
1500 ++bld->cond_lvl;
1501 bld_new_block(bld, b);
1502 }
1503 break;
1504 case TGSI_OPCODE_ELSE:
1505 {
1506 struct nv_basic_block *b = new_basic_block(bld->pc);
1507
1508 --bld->cond_lvl;
1509 nvc0_bblock_attach(bld->join_bb[bld->cond_lvl], b, CFG_EDGE_FORWARD);
1510
1511 bld->cond_bb[bld->cond_lvl]->exit->target = b;
1512 bld->cond_bb[bld->cond_lvl] = bld->pc->current_block;
1513
1514 new_instruction(bld->pc, NV_OP_BRA)->terminator = 1;
1515
1516 ++bld->cond_lvl;
1517 bld_new_block(bld, b);
1518 }
1519 break;
1520 case TGSI_OPCODE_ENDIF:
1521 {
1522 struct nv_basic_block *b = new_basic_block(bld->pc);
1523
1524 --bld->cond_lvl;
1525 nvc0_bblock_attach(bld->pc->current_block, b, bld->out_kind);
1526 nvc0_bblock_attach(bld->cond_bb[bld->cond_lvl], b, CFG_EDGE_FORWARD);
1527
1528 bld->cond_bb[bld->cond_lvl]->exit->target = b;
1529
1530 bld_new_block(bld, b);
1531
1532 if (!bld->cond_lvl && bld->join_bb[bld->cond_lvl]) {
1533 bld->join_bb[bld->cond_lvl]->exit->prev->target = b;
1534 new_instruction(bld->pc, NV_OP_JOIN)->join = 1;
1535 }
1536 }
1537 break;
1538 case TGSI_OPCODE_BGNLOOP:
1539 {
1540 struct nv_basic_block *bl = new_basic_block(bld->pc);
1541 struct nv_basic_block *bb = new_basic_block(bld->pc);
1542
1543 assert(bld->loop_lvl < BLD_MAX_LOOP_NESTING);
1544
1545 bld->loop_bb[bld->loop_lvl] = bl;
1546 bld->brkt_bb[bld->loop_lvl] = bb;
1547
1548 nvc0_bblock_attach(bld->pc->current_block, bl, CFG_EDGE_LOOP_ENTER);
1549
1550 bld_new_block(bld, bld->loop_bb[bld->loop_lvl++]);
1551
1552 if (bld->loop_lvl == bld->pc->loop_nesting_bound)
1553 bld->pc->loop_nesting_bound++;
1554
1555 bld_clear_def_use(&bld->tvs[0][0], BLD_MAX_TEMPS, bld->loop_lvl);
1556 bld_clear_def_use(&bld->avs[0][0], BLD_MAX_ADDRS, bld->loop_lvl);
1557 bld_clear_def_use(&bld->pvs[0][0], BLD_MAX_PREDS, bld->loop_lvl);
1558 }
1559 break;
1560 case TGSI_OPCODE_BRK:
1561 {
1562 struct nv_basic_block *bb = bld->brkt_bb[bld->loop_lvl - 1];
1563
1564 bld_flow(bld, NV_OP_BRA, NULL, bb, FALSE);
1565
1566 if (bld->out_kind == CFG_EDGE_FORWARD) /* else we already had BRK/CONT */
1567 nvc0_bblock_attach(bld->pc->current_block, bb, CFG_EDGE_LOOP_LEAVE);
1568
1569 bld->out_kind = CFG_EDGE_FAKE;
1570 }
1571 break;
1572 case TGSI_OPCODE_CONT:
1573 {
1574 struct nv_basic_block *bb = bld->loop_bb[bld->loop_lvl - 1];
1575
1576 bld_flow(bld, NV_OP_BRA, NULL, bb, FALSE);
1577
1578 nvc0_bblock_attach(bld->pc->current_block, bb, CFG_EDGE_BACK);
1579
1580 if ((bb = bld->join_bb[bld->cond_lvl - 1])) {
1581 bld->join_bb[bld->cond_lvl - 1] = NULL;
1582 nvc0_insn_delete(bb->exit->prev);
1583 }
1584 bld->out_kind = CFG_EDGE_FAKE;
1585 }
1586 break;
1587 case TGSI_OPCODE_ENDLOOP:
1588 {
1589 struct nv_basic_block *bb = bld->loop_bb[bld->loop_lvl - 1];
1590
1591 bld_flow(bld, NV_OP_BRA, NULL, bb, FALSE);
1592
1593 nvc0_bblock_attach(bld->pc->current_block, bb, CFG_EDGE_BACK);
1594
1595 bld_loop_end(bld, bb); /* replace loop-side operand of the phis */
1596
1597 bld_new_block(bld, bld->brkt_bb[--bld->loop_lvl]);
1598 }
1599 break;
1600 case TGSI_OPCODE_ABS:
1601 case TGSI_OPCODE_CEIL:
1602 case TGSI_OPCODE_FLR:
1603 case TGSI_OPCODE_TRUNC:
1604 case TGSI_OPCODE_DDX:
1605 case TGSI_OPCODE_DDY:
1606 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1607 src0 = emit_fetch(bld, insn, 0, c);
1608 dst0[c] = bld_insn_1(bld, opcode, src0);
1609 }
1610 break;
1611 case TGSI_OPCODE_LIT:
1612 bld_lit(bld, dst0, insn);
1613 break;
1614 case TGSI_OPCODE_LRP:
1615 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1616 src0 = emit_fetch(bld, insn, 0, c);
1617 src1 = emit_fetch(bld, insn, 1, c);
1618 src2 = emit_fetch(bld, insn, 2, c);
1619 dst0[c] = bld_insn_2(bld, NV_OP_SUB_F32, src1, src2);
1620 dst0[c] = bld_insn_3(bld, NV_OP_MAD_F32, dst0[c], src0, src2);
1621 }
1622 break;
1623 case TGSI_OPCODE_MOV:
1624 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1625 dst0[c] = emit_fetch(bld, insn, 0, c);
1626 break;
1627 case TGSI_OPCODE_MAD:
1628 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1629 src0 = emit_fetch(bld, insn, 0, c);
1630 src1 = emit_fetch(bld, insn, 1, c);
1631 src2 = emit_fetch(bld, insn, 2, c);
1632 dst0[c] = bld_insn_3(bld, opcode, src0, src1, src2);
1633 }
1634 break;
1635 case TGSI_OPCODE_POW:
1636 src0 = emit_fetch(bld, insn, 0, 0);
1637 src1 = emit_fetch(bld, insn, 1, 0);
1638 temp = bld_pow(bld, src0, src1);
1639 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1640 dst0[c] = temp;
1641 break;
1642 case TGSI_OPCODE_LOG:
1643 src0 = emit_fetch(bld, insn, 0, 0);
1644 src0 = bld_insn_1(bld, NV_OP_ABS_F32, src0);
1645 temp = bld_insn_1(bld, NV_OP_LG2, src0);
1646 dst0[2] = temp;
1647 if (insn->Dst[0].Register.WriteMask & 3) {
1648 temp = bld_insn_1(bld, NV_OP_FLOOR, temp);
1649 dst0[0] = temp;
1650 }
1651 if (insn->Dst[0].Register.WriteMask & 2) {
1652 temp = bld_insn_1(bld, NV_OP_PREEX2, temp);
1653 temp = bld_insn_1(bld, NV_OP_EX2, temp);
1654 temp = bld_insn_1(bld, NV_OP_RCP, temp);
1655 dst0[1] = bld_insn_2(bld, NV_OP_MUL_F32, src0, temp);
1656 }
1657 if (insn->Dst[0].Register.WriteMask & 8)
1658 dst0[3] = bld_imm_f32(bld, 1.0f);
1659 break;
1660 case TGSI_OPCODE_RCP:
1661 case TGSI_OPCODE_LG2:
1662 src0 = emit_fetch(bld, insn, 0, 0);
1663 temp = bld_insn_1(bld, opcode, src0);
1664 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1665 dst0[c] = temp;
1666 break;
1667 case TGSI_OPCODE_RSQ:
1668 src0 = emit_fetch(bld, insn, 0, 0);
1669 temp = bld_insn_1(bld, NV_OP_ABS_F32, src0);
1670 temp = bld_insn_1(bld, NV_OP_RSQ, temp);
1671 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1672 dst0[c] = temp;
1673 break;
1674 case TGSI_OPCODE_SLT:
1675 case TGSI_OPCODE_SGE:
1676 case TGSI_OPCODE_SEQ:
1677 case TGSI_OPCODE_SGT:
1678 case TGSI_OPCODE_SLE:
1679 case TGSI_OPCODE_SNE:
1680 case TGSI_OPCODE_ISLT:
1681 case TGSI_OPCODE_ISGE:
1682 case TGSI_OPCODE_USEQ:
1683 case TGSI_OPCODE_USGE:
1684 case TGSI_OPCODE_USLT:
1685 case TGSI_OPCODE_USNE:
1686 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1687 src0 = emit_fetch(bld, insn, 0, c);
1688 src1 = emit_fetch(bld, insn, 1, c);
1689 dst0[c] = bld_insn_2(bld, opcode, src0, src1);
1690 dst0[c]->insn->set_cond = translate_setcc(insn->Instruction.Opcode);
1691 }
1692 break;
1693 case TGSI_OPCODE_SCS:
1694 if (insn->Dst[0].Register.WriteMask & 0x3) {
1695 src0 = emit_fetch(bld, insn, 0, 0);
1696 temp = bld_insn_1(bld, NV_OP_PRESIN, src0);
1697 if (insn->Dst[0].Register.WriteMask & 0x1)
1698 dst0[0] = bld_insn_1(bld, NV_OP_COS, temp);
1699 if (insn->Dst[0].Register.WriteMask & 0x2)
1700 dst0[1] = bld_insn_1(bld, NV_OP_SIN, temp);
1701 }
1702 if (insn->Dst[0].Register.WriteMask & 0x4)
1703 dst0[2] = bld_imm_f32(bld, 0.0f);
1704 if (insn->Dst[0].Register.WriteMask & 0x8)
1705 dst0[3] = bld_imm_f32(bld, 1.0f);
1706 break;
1707 case TGSI_OPCODE_SSG:
1708 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) { /* XXX: set lt, set gt, sub */
1709 src0 = emit_fetch(bld, insn, 0, c);
1710 src1 = bld_setp(bld, NV_OP_SET_F32, NV_CC_EQ, src0, bld->zero);
1711 temp = bld_insn_2(bld, NV_OP_AND, src0, bld_imm_u32(bld, 0x80000000));
1712 temp = bld_insn_2(bld, NV_OP_OR, temp, bld_imm_f32(bld, 1.0f));
1713 dst0[c] = bld_insn_1(bld, NV_OP_MOV, temp);
1714 bld_src_predicate(bld, dst0[c]->insn, 1, src1);
1715 }
1716 break;
1717 case TGSI_OPCODE_SUB:
1718 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1719 src0 = emit_fetch(bld, insn, 0, c);
1720 src1 = emit_fetch(bld, insn, 1, c);
1721 dst0[c] = bld_insn_2(bld, NV_OP_SUB_F32, src0, src1);
1722 }
1723 break;
1724 case TGSI_OPCODE_TEX:
1725 case TGSI_OPCODE_TXB:
1726 case TGSI_OPCODE_TXL:
1727 case TGSI_OPCODE_TXP:
1728 bld_tex(bld, dst0, insn);
1729 break;
1730 case TGSI_OPCODE_XPD:
1731 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1732 if (c == 3) {
1733 dst0[3] = bld_imm_f32(bld, 1.0f);
1734 break;
1735 }
1736 src0 = emit_fetch(bld, insn, 1, (c + 1) % 3);
1737 src1 = emit_fetch(bld, insn, 0, (c + 2) % 3);
1738 dst0[c] = bld_insn_2(bld, NV_OP_MUL_F32, src0, src1);
1739
1740 src0 = emit_fetch(bld, insn, 0, (c + 1) % 3);
1741 src1 = emit_fetch(bld, insn, 1, (c + 2) % 3);
1742 dst0[c] = bld_insn_3(bld, NV_OP_MAD_F32, src0, src1, dst0[c]);
1743
1744 dst0[c]->insn->src[2]->mod ^= NV_MOD_NEG;
1745 }
1746 break;
1747 case TGSI_OPCODE_RET:
1748 (new_instruction(bld->pc, NV_OP_RET))->fixed = 1;
1749 break;
1750 case TGSI_OPCODE_END:
1751 /* VP outputs are exported in-place as scalars, optimization later */
1752 if (bld->pc->is_fragprog)
1753 bld_export_fp_outputs(bld);
1754 break;
1755 default:
1756 NOUVEAU_ERR("unhandled opcode %u\n", insn->Instruction.Opcode);
1757 abort();
1758 break;
1759 }
1760
1761 if (insn->Dst[0].Register.File == TGSI_FILE_OUTPUT &&
1762 !bld->pc->is_fragprog) {
1763 struct nv_instruction *mi = NULL;
1764 uint size;
1765
1766 for (c = 0; c < 4; ++c)
1767 if ((mask & (1 << c)) &&
1768 ((dst0[c]->reg.file == NV_FILE_IMM) ||
1769 (dst0[c]->reg.id == 63 && dst0[c]->reg.file == NV_FILE_GPR)))
1770 dst0[c] = bld_insn_1(bld, NV_OP_MOV, dst0[c]);
1771
1772 c = 0;
1773 if ((mask & 0x3) == 0x3) {
1774 mask &= ~0x3;
1775 size = 8;
1776 mi = bld_insn_2(bld, NV_OP_BIND, dst0[0], dst0[1])->insn;
1777 }
1778 if ((mask & 0xc) == 0xc) {
1779 mask &= ~0xc;
1780 if (mi) {
1781 size = 16;
1782 nv_reference(bld->pc, mi, 2, dst0[2]);
1783 nv_reference(bld->pc, mi, 3, dst0[3]);
1784 } else {
1785 c = 2;
1786 size = 8;
1787 mi = bld_insn_2(bld, NV_OP_BIND, dst0[2], dst0[3])->insn;
1788 }
1789 } else
1790 if (mi && (mask & 0x4)) {
1791 size = 12;
1792 mask &= ~0x4;
1793 nv_reference(bld->pc, mi, 2, dst0[2]);
1794 }
1795
1796 if (mi) {
1797 struct nv_instruction *ex = new_instruction(bld->pc, NV_OP_EXPORT);
1798 int s;
1799
1800 nv_reference(bld->pc, ex, 0, new_value(bld->pc, NV_FILE_MEM_V, 4));
1801 nv_reference(bld->pc, ex, 1, mi->def[0]);
1802
1803 for (s = 1; s < size / 4; ++s) {
1804 bld_def(mi, s, new_value(bld->pc, NV_FILE_GPR, 4));
1805 nv_reference(bld->pc, ex, s + 1, mi->def[s]);
1806 }
1807
1808 ex->fixed = 1;
1809 ex->src[0]->value->reg.size = size;
1810 ex->src[0]->value->reg.address =
1811 bld->ti->output_loc[insn->Dst[0].Register.Index][c];
1812 }
1813 }
1814
1815 for (c = 0; c < 4; ++c)
1816 if (mask & (1 << c))
1817 emit_store(bld, insn, c, dst0[c]);
1818 }
1819
1820 static INLINE void
1821 bld_free_registers(struct bld_register *base, int n)
1822 {
1823 int i, c;
1824
1825 for (i = 0; i < n; ++i)
1826 for (c = 0; c < 4; ++c)
1827 util_dynarray_fini(&base[i * 4 + c].vals);
1828 }
1829
1830 int
1831 nvc0_tgsi_to_nc(struct nv_pc *pc, struct nvc0_translation_info *ti)
1832 {
1833 struct bld_context *bld = CALLOC_STRUCT(bld_context);
1834 unsigned ip;
1835
1836 pc->root[0] = pc->current_block = new_basic_block(pc);
1837
1838 bld->pc = pc;
1839 bld->ti = ti;
1840
1841 pc->loop_nesting_bound = 1;
1842
1843 bld->zero = new_value(pc, NV_FILE_GPR, 4);
1844 bld->zero->reg.id = 63;
1845
1846 if (pc->is_fragprog) {
1847 struct nv_value *mem = new_value(pc, NV_FILE_MEM_V, 4);
1848 mem->reg.address = 0x7c;
1849
1850 bld->frag_coord[3] = bld_insn_1(bld, NV_OP_LINTERP, mem);
1851 bld->frag_coord[3] = bld_insn_1(bld, NV_OP_RCP, bld->frag_coord[3]);
1852 }
1853
1854 for (ip = 0; ip < ti->num_insns; ++ip)
1855 bld_instruction(bld, &ti->insns[ip]);
1856
1857 bld_free_registers(&bld->tvs[0][0], BLD_MAX_TEMPS);
1858 bld_free_registers(&bld->avs[0][0], BLD_MAX_ADDRS);
1859 bld_free_registers(&bld->pvs[0][0], BLD_MAX_PREDS);
1860 bld_free_registers(&bld->ovs[0][0], PIPE_MAX_SHADER_OUTPUTS);
1861
1862 FREE(bld);
1863 return 0;
1864 }
1865
1866 /* If a variable is assigned in a loop, replace all references to the value
1867 * from outside the loop with a phi value.
1868 */
1869 static void
1870 bld_replace_value(struct nv_pc *pc, struct nv_basic_block *b,
1871 struct nv_value *old_val,
1872 struct nv_value *new_val)
1873 {
1874 struct nv_instruction *nvi;
1875
1876 for (nvi = b->phi ? b->phi : b->entry; nvi; nvi = nvi->next) {
1877 int s;
1878 for (s = 0; s < 6 && nvi->src[s]; ++s)
1879 if (nvi->src[s]->value == old_val)
1880 nv_reference(pc, nvi, s, new_val);
1881 }
1882
1883 b->pass_seq = pc->pass_seq;
1884
1885 if (b->out[0] && b->out[0]->pass_seq < pc->pass_seq)
1886 bld_replace_value(pc, b->out[0], old_val, new_val);
1887
1888 if (b->out[1] && b->out[1]->pass_seq < pc->pass_seq)
1889 bld_replace_value(pc, b->out[1], old_val, new_val);
1890 }