nvc0: try to fix register conflicts for vector instructions
[mesa.git] / src / gallium / drivers / nvc0 / nvc0_tgsi_to_nc.c
1 /*
2 * Copyright 2010 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
21 */
22
23 #include <unistd.h>
24
25 #define NOUVEAU_DEBUG 1
26
27 #include "pipe/p_shader_tokens.h"
28 #include "tgsi/tgsi_parse.h"
29 #include "tgsi/tgsi_util.h"
30 #include "tgsi/tgsi_dump.h"
31 #include "util/u_dynarray.h"
32
33 #include "nvc0_pc.h"
34 #include "nvc0_program.h"
35
36 /* Arbitrary internal limits. */
37 #define BLD_MAX_TEMPS 64
38 #define BLD_MAX_ADDRS 4
39 #define BLD_MAX_PREDS 4
40 #define BLD_MAX_IMMDS 128
41 #define BLD_MAX_OUTPS PIPE_MAX_SHADER_OUTPUTS
42
43 #define BLD_MAX_COND_NESTING 8
44 #define BLD_MAX_LOOP_NESTING 4
45 #define BLD_MAX_CALL_NESTING 2
46
47 /* This structure represents a TGSI register. */
48 struct bld_register {
49 struct nv_value *current;
50 /* collect all SSA values assigned to it */
51 struct util_dynarray vals;
52 /* 1 bit per loop level, indicates if used/defd, reset when loop ends */
53 uint16_t loop_use;
54 uint16_t loop_def;
55 };
56
57 static INLINE struct nv_value **
58 bld_register_access(struct bld_register *reg, unsigned i)
59 {
60 return util_dynarray_element(&reg->vals, struct nv_value *, i);
61 }
62
63 static INLINE void
64 bld_register_add_val(struct bld_register *reg, struct nv_value *val)
65 {
66 util_dynarray_append(&reg->vals, struct nv_value *, val);
67 }
68
69 static INLINE boolean
70 bld_register_del_val(struct bld_register *reg, struct nv_value *val)
71 {
72 unsigned i;
73
74 for (i = reg->vals.size / sizeof(struct nv_value *); i > 0; --i)
75 if (*bld_register_access(reg, i - 1) == val)
76 break;
77 if (!i)
78 return FALSE;
79
80 if (i != reg->vals.size / sizeof(struct nv_value *))
81 *bld_register_access(reg, i - 1) = util_dynarray_pop(&reg->vals,
82 struct nv_value *);
83 else
84 reg->vals.size -= sizeof(struct nv_value *);
85
86 return TRUE;
87 }
88
89 struct bld_context {
90 struct nvc0_translation_info *ti;
91
92 struct nv_pc *pc;
93 struct nv_basic_block *b;
94
95 struct tgsi_parse_context parse[BLD_MAX_CALL_NESTING];
96 int call_lvl;
97
98 struct nv_basic_block *cond_bb[BLD_MAX_COND_NESTING];
99 struct nv_basic_block *join_bb[BLD_MAX_COND_NESTING];
100 struct nv_basic_block *else_bb[BLD_MAX_COND_NESTING];
101 int cond_lvl;
102 struct nv_basic_block *loop_bb[BLD_MAX_LOOP_NESTING];
103 struct nv_basic_block *brkt_bb[BLD_MAX_LOOP_NESTING];
104 int loop_lvl;
105
106 ubyte out_kind; /* CFG_EDGE_FORWARD, or FAKE in case of BREAK/CONT */
107
108 struct bld_register tvs[BLD_MAX_TEMPS][4]; /* TGSI_FILE_TEMPORARY */
109 struct bld_register avs[BLD_MAX_ADDRS][4]; /* TGSI_FILE_ADDRESS */
110 struct bld_register pvs[BLD_MAX_PREDS][4]; /* TGSI_FILE_PREDICATE */
111 struct bld_register ovs[BLD_MAX_OUTPS][4]; /* TGSI_FILE_OUTPUT, FP only */
112
113 uint32_t outputs_written[(PIPE_MAX_SHADER_OUTPUTS + 7) / 8];
114 int hpos_index;
115
116 struct nv_value *zero;
117 struct nv_value *frag_coord[4];
118
119 /* wipe on new BB */
120 struct nv_value *saved_sysvals[4];
121 struct nv_value *saved_addr[4][2];
122 struct nv_value *saved_inputs[PIPE_MAX_SHADER_INPUTS][4];
123 struct nv_value *saved_immd[BLD_MAX_IMMDS];
124 uint num_immds;
125 };
126
127 static INLINE ubyte
128 bld_register_file(struct bld_context *bld, struct bld_register *reg)
129 {
130 if (reg < &bld->avs[0][0]) return NV_FILE_GPR;
131 else
132 if (reg < &bld->pvs[0][0]) return NV_FILE_GPR;
133 else
134 if (reg < &bld->ovs[0][0]) return NV_FILE_PRED;
135 else
136 return NV_FILE_MEM_V;
137 }
138
139 static INLINE struct nv_value *
140 bld_fetch(struct bld_context *bld, struct bld_register *regs, int i, int c)
141 {
142 regs[i * 4 + c].loop_use |= 1 << bld->loop_lvl;
143 return regs[i * 4 + c].current;
144 }
145
146 static struct nv_value *
147 bld_loop_phi(struct bld_context *, struct bld_register *, struct nv_value *);
148
149 /* If a variable is defined in a loop without prior use, we don't need
150 * a phi in the loop header to account for backwards flow.
151 *
152 * However, if this variable is then also used outside the loop, we do
153 * need a phi after all. But we must not use this phi's def inside the
154 * loop, so we can eliminate the phi if it is unused later.
155 */
156 static INLINE void
157 bld_store(struct bld_context *bld,
158 struct bld_register *regs, int i, int c, struct nv_value *val)
159 {
160 const uint16_t m = 1 << bld->loop_lvl;
161 struct bld_register *reg = &regs[i * 4 + c];
162
163 if (bld->loop_lvl && !(m & (reg->loop_def | reg->loop_use)))
164 bld_loop_phi(bld, reg, val);
165
166 reg->current = val;
167 bld_register_add_val(reg, reg->current);
168
169 reg->loop_def |= 1 << bld->loop_lvl;
170 }
171
172 #define FETCH_TEMP(i, c) bld_fetch(bld, &bld->tvs[0][0], i, c)
173 #define STORE_TEMP(i, c, v) bld_store(bld, &bld->tvs[0][0], i, c, (v))
174 #define FETCH_ADDR(i, c) bld_fetch(bld, &bld->avs[0][0], i, c)
175 #define STORE_ADDR(i, c, v) bld_store(bld, &bld->avs[0][0], i, c, (v))
176 #define FETCH_PRED(i, c) bld_fetch(bld, &bld->pvs[0][0], i, c)
177 #define STORE_PRED(i, c, v) bld_store(bld, &bld->pvs[0][0], i, c, (v))
178 #define STORE_OUTP(i, c, v) \
179 do { \
180 bld_store(bld, &bld->ovs[0][0], i, c, (v)); \
181 bld->outputs_written[(i) / 8] |= 1 << (((i) * 4 + (c)) % 32); \
182 } while (0)
183
184 static INLINE void
185 bld_clear_def_use(struct bld_register *regs, int n, int lvl)
186 {
187 int i;
188 const uint16_t mask = ~(1 << lvl);
189
190 for (i = 0; i < n * 4; ++i) {
191 regs[i].loop_def &= mask;
192 regs[i].loop_use &= mask;
193 }
194 }
195
196 static INLINE void
197 bld_warn_uninitialized(struct bld_context *bld, int kind,
198 struct bld_register *reg, struct nv_basic_block *b)
199 {
200 #ifdef NOUVEAU_DEBUG
201 long i = (reg - &bld->tvs[0][0]) / 4;
202 long c = (reg - &bld->tvs[0][0]) & 3;
203
204 if (c == 3)
205 c = -1;
206 debug_printf("WARNING: TEMP[%li].%c %s used uninitialized in BB:%i\n",
207 i, (int)('x' + c), kind ? "may be" : "is", b->id);
208 #endif
209 }
210
211 static INLINE struct nv_value *
212 bld_def(struct nv_instruction *i, int c, struct nv_value *value)
213 {
214 i->def[c] = value;
215 value->insn = i;
216 return value;
217 }
218
219 static INLINE struct nv_value *
220 find_by_bb(struct bld_register *reg, struct nv_basic_block *b)
221 {
222 int i;
223
224 if (reg->current && reg->current->insn->bb == b)
225 return reg->current;
226
227 for (i = 0; i < reg->vals.size / sizeof(struct nv_value *); ++i)
228 if ((*bld_register_access(reg, i))->insn->bb == b)
229 return *bld_register_access(reg, i);
230 return NULL;
231 }
232
233 /* Fetch value from register that was defined in the specified BB,
234 * or search for first definitions in all of its predecessors.
235 */
236 static void
237 fetch_by_bb(struct bld_register *reg,
238 struct nv_value **vals, int *n,
239 struct nv_basic_block *b)
240 {
241 int i;
242 struct nv_value *val;
243
244 assert(*n < 16); /* MAX_COND_NESTING */
245
246 val = find_by_bb(reg, b);
247 if (val) {
248 for (i = 0; i < *n; ++i)
249 if (vals[i] == val)
250 return;
251 vals[(*n)++] = val;
252 return;
253 }
254 for (i = 0; i < b->num_in; ++i)
255 if (!IS_WALL_EDGE(b->in_kind[i]))
256 fetch_by_bb(reg, vals, n, b->in[i]);
257 }
258
259 static INLINE struct nv_value *
260 bld_load_imm_u32(struct bld_context *bld, uint32_t u);
261
262 static INLINE struct nv_value *
263 bld_undef(struct bld_context *bld, ubyte file)
264 {
265 struct nv_instruction *nvi = new_instruction(bld->pc, NV_OP_UNDEF);
266
267 return bld_def(nvi, 0, new_value(bld->pc, file, 4));
268 }
269
270 static struct nv_value *
271 bld_phi(struct bld_context *bld, struct nv_basic_block *b,
272 struct bld_register *reg)
273 {
274 struct nv_basic_block *in;
275 struct nv_value *vals[16] = { NULL };
276 struct nv_value *val;
277 struct nv_instruction *phi;
278 int i, j, n;
279
280 do {
281 i = n = 0;
282 fetch_by_bb(reg, vals, &n, b);
283
284 if (!n) {
285 bld_warn_uninitialized(bld, 0, reg, b);
286 return NULL;
287 }
288
289 if (n == 1) {
290 if (nvc0_bblock_dominated_by(b, vals[0]->insn->bb))
291 break;
292
293 bld_warn_uninitialized(bld, 1, reg, b);
294
295 /* back-tracking to insert missing value of other path */
296 in = b;
297 while (in->in[0]) {
298 if (in->num_in == 1) {
299 in = in->in[0];
300 } else {
301 if (!nvc0_bblock_reachable_by(in->in[0], vals[0]->insn->bb, b))
302 in = in->in[0];
303 else
304 if (!nvc0_bblock_reachable_by(in->in[1], vals[0]->insn->bb, b))
305 in = in->in[1];
306 else
307 in = in->in[0];
308 }
309 }
310 bld->pc->current_block = in;
311
312 /* should make this a no-op */
313 bld_register_add_val(reg, bld_undef(bld, vals[0]->reg.file));
314 continue;
315 }
316
317 for (i = 0; i < n; ++i) {
318 /* if value dominates b, continue to the redefinitions */
319 if (nvc0_bblock_dominated_by(b, vals[i]->insn->bb))
320 continue;
321
322 /* if value dominates any in-block, b should be the dom frontier */
323 for (j = 0; j < b->num_in; ++j)
324 if (nvc0_bblock_dominated_by(b->in[j], vals[i]->insn->bb))
325 break;
326 /* otherwise, find the dominance frontier and put the phi there */
327 if (j == b->num_in) {
328 in = nvc0_bblock_dom_frontier(vals[i]->insn->bb);
329 val = bld_phi(bld, in, reg);
330 bld_register_add_val(reg, val);
331 break;
332 }
333 }
334 } while(i < n);
335
336 bld->pc->current_block = b;
337
338 if (n == 1)
339 return vals[0];
340
341 phi = new_instruction(bld->pc, NV_OP_PHI);
342
343 bld_def(phi, 0, new_value(bld->pc, vals[0]->reg.file, vals[0]->reg.size));
344 for (i = 0; i < n; ++i)
345 nv_reference(bld->pc, phi, i, vals[i]);
346
347 return phi->def[0];
348 }
349
350 /* Insert a phi function in the loop header.
351 * For nested loops, we need to insert phi functions in all the outer
352 * loop headers if they don't have one yet.
353 *
354 * @def: redefinition from inside loop, or NULL if to be replaced later
355 */
356 static struct nv_value *
357 bld_loop_phi(struct bld_context *bld, struct bld_register *reg,
358 struct nv_value *def)
359 {
360 struct nv_instruction *phi;
361 struct nv_basic_block *bb = bld->pc->current_block;
362 struct nv_value *val = NULL;
363
364 if (bld->loop_lvl > 1) {
365 --bld->loop_lvl;
366 if (!((reg->loop_def | reg->loop_use) & (1 << bld->loop_lvl)))
367 val = bld_loop_phi(bld, reg, NULL);
368 ++bld->loop_lvl;
369 }
370
371 if (!val)
372 val = bld_phi(bld, bld->pc->current_block, reg); /* old definition */
373 if (!val) {
374 bld->pc->current_block = bld->loop_bb[bld->loop_lvl - 1]->in[0];
375 val = bld_undef(bld, bld_register_file(bld, reg));
376 }
377
378 bld->pc->current_block = bld->loop_bb[bld->loop_lvl - 1];
379
380 phi = new_instruction(bld->pc, NV_OP_PHI);
381
382 bld_def(phi, 0, new_value_like(bld->pc, val));
383 if (!def)
384 def = phi->def[0];
385
386 bld_register_add_val(reg, phi->def[0]);
387
388 phi->target = (struct nv_basic_block *)reg; /* cheat */
389
390 nv_reference(bld->pc, phi, 0, val);
391 nv_reference(bld->pc, phi, 1, def);
392
393 bld->pc->current_block = bb;
394
395 return phi->def[0];
396 }
397
398 static INLINE struct nv_value *
399 bld_fetch_global(struct bld_context *bld, struct bld_register *reg)
400 {
401 const uint16_t m = 1 << bld->loop_lvl;
402 const uint16_t use = reg->loop_use;
403
404 reg->loop_use |= m;
405
406 /* If neither used nor def'd inside the loop, build a phi in foresight,
407 * so we don't have to replace stuff later on, which requires tracking.
408 */
409 if (bld->loop_lvl && !((use | reg->loop_def) & m))
410 return bld_loop_phi(bld, reg, NULL);
411
412 return bld_phi(bld, bld->pc->current_block, reg);
413 }
414
415 static INLINE struct nv_value *
416 bld_imm_u32(struct bld_context *bld, uint32_t u)
417 {
418 int i;
419 unsigned n = bld->num_immds;
420
421 for (i = 0; i < n; ++i)
422 if (bld->saved_immd[i]->reg.imm.u32 == u)
423 return bld->saved_immd[i];
424
425 assert(n < BLD_MAX_IMMDS);
426 bld->num_immds++;
427
428 bld->saved_immd[n] = new_value(bld->pc, NV_FILE_IMM, 4);
429 bld->saved_immd[n]->reg.imm.u32 = u;
430 return bld->saved_immd[n];
431 }
432
433 static void
434 bld_replace_value(struct nv_pc *, struct nv_basic_block *, struct nv_value *,
435 struct nv_value *);
436
437 /* Replace the source of the phi in the loop header by the last assignment,
438 * or eliminate the phi function if there is no assignment inside the loop.
439 *
440 * Redundancy situation 1 - (used) but (not redefined) value:
441 * %3 = phi %0, %3 = %3 is used
442 * %3 = phi %0, %4 = is new definition
443 *
444 * Redundancy situation 2 - (not used) but (redefined) value:
445 * %3 = phi %0, %2 = %2 is used, %3 could be used outside, deleted by DCE
446 */
447 static void
448 bld_loop_end(struct bld_context *bld, struct nv_basic_block *bb)
449 {
450 struct nv_basic_block *save = bld->pc->current_block;
451 struct nv_instruction *phi, *next;
452 struct nv_value *val;
453 struct bld_register *reg;
454 int i, s, n;
455
456 for (phi = bb->phi; phi && phi->opcode == NV_OP_PHI; phi = next) {
457 next = phi->next;
458
459 reg = (struct bld_register *)phi->target;
460 phi->target = NULL;
461
462 for (s = 1, n = 0; n < bb->num_in; ++n) {
463 if (bb->in_kind[n] != CFG_EDGE_BACK)
464 continue;
465
466 assert(s < 4);
467 bld->pc->current_block = bb->in[n];
468 val = bld_fetch_global(bld, reg);
469
470 for (i = 0; i < 4; ++i)
471 if (phi->src[i] && phi->src[i]->value == val)
472 break;
473 if (i == 4)
474 nv_reference(bld->pc, phi, s++, val);
475 }
476 bld->pc->current_block = save;
477
478 if (phi->src[0]->value == phi->def[0] ||
479 phi->src[0]->value == phi->src[1]->value)
480 s = 1;
481 else
482 if (phi->src[1]->value == phi->def[0])
483 s = 0;
484 else
485 continue;
486
487 if (s >= 0) {
488 /* eliminate the phi */
489 bld_register_del_val(reg, phi->def[0]);
490
491 ++bld->pc->pass_seq;
492 bld_replace_value(bld->pc, bb, phi->def[0], phi->src[s]->value);
493
494 nvc0_insn_delete(phi);
495 }
496 }
497 }
498
499 static INLINE struct nv_value *
500 bld_imm_f32(struct bld_context *bld, float f)
501 {
502 return bld_imm_u32(bld, fui(f));
503 }
504
505 static struct nv_value *
506 bld_insn_1(struct bld_context *bld, uint opcode, struct nv_value *src0)
507 {
508 struct nv_instruction *insn = new_instruction(bld->pc, opcode);
509
510 nv_reference(bld->pc, insn, 0, src0);
511
512 return bld_def(insn, 0, new_value(bld->pc, NV_FILE_GPR, src0->reg.size));
513 }
514
515 static struct nv_value *
516 bld_insn_2(struct bld_context *bld, uint opcode,
517 struct nv_value *src0, struct nv_value *src1)
518 {
519 struct nv_instruction *insn = new_instruction(bld->pc, opcode);
520
521 nv_reference(bld->pc, insn, 0, src0);
522 nv_reference(bld->pc, insn, 1, src1);
523
524 return bld_def(insn, 0, new_value(bld->pc, NV_FILE_GPR, src0->reg.size));
525 }
526
527 static struct nv_value *
528 bld_insn_3(struct bld_context *bld, uint opcode,
529 struct nv_value *src0, struct nv_value *src1,
530 struct nv_value *src2)
531 {
532 struct nv_instruction *insn = new_instruction(bld->pc, opcode);
533
534 nv_reference(bld->pc, insn, 0, src0);
535 nv_reference(bld->pc, insn, 1, src1);
536 nv_reference(bld->pc, insn, 2, src2);
537
538 return bld_def(insn, 0, new_value(bld->pc, NV_FILE_GPR, src0->reg.size));
539 }
540
541 static INLINE void
542 bld_src_predicate(struct bld_context *bld,
543 struct nv_instruction *nvi, int s, struct nv_value *val)
544 {
545 nvi->predicate = s;
546 nv_reference(bld->pc, nvi, s, val);
547 }
548
549 static INLINE void
550 bld_src_pointer(struct bld_context *bld,
551 struct nv_instruction *nvi, int s, struct nv_value *val)
552 {
553 nvi->indirect = s;
554 nv_reference(bld->pc, nvi, s, val);
555 }
556
557 static void
558 bld_lmem_store(struct bld_context *bld, struct nv_value *ptr, int ofst,
559 struct nv_value *val)
560 {
561 struct nv_instruction *insn = new_instruction(bld->pc, NV_OP_ST);
562 struct nv_value *loc;
563
564 loc = new_value(bld->pc, NV_FILE_MEM_L, nv_type_sizeof(NV_TYPE_U32));
565
566 loc->reg.id = ofst * 4;
567
568 nv_reference(bld->pc, insn, 0, loc);
569 nv_reference(bld->pc, insn, 1, ptr);
570 nv_reference(bld->pc, insn, 2, val);
571 }
572
573 static struct nv_value *
574 bld_lmem_load(struct bld_context *bld, struct nv_value *ptr, int ofst)
575 {
576 struct nv_value *loc, *val;
577
578 loc = new_value(bld->pc, NV_FILE_MEM_L, nv_type_sizeof(NV_TYPE_U32));
579
580 loc->reg.address = ofst * 4;
581
582 val = bld_insn_2(bld, NV_OP_LD, loc, ptr);
583
584 return val;
585 }
586
587 static struct nv_value *
588 bld_pow(struct bld_context *bld, struct nv_value *x, struct nv_value *e)
589 {
590 struct nv_value *val;
591
592 val = bld_insn_1(bld, NV_OP_LG2, x);
593 val = bld_insn_2(bld, NV_OP_MUL_F32, e, val);
594
595 val = bld_insn_1(bld, NV_OP_PREEX2, val);
596 val = bld_insn_1(bld, NV_OP_EX2, val);
597
598 return val;
599 }
600
601 static INLINE struct nv_value *
602 bld_load_imm_f32(struct bld_context *bld, float f)
603 {
604 if (f == 0.0f)
605 return bld->zero;
606 return bld_insn_1(bld, NV_OP_MOV, bld_imm_f32(bld, f));
607 }
608
609 static INLINE struct nv_value *
610 bld_load_imm_u32(struct bld_context *bld, uint32_t u)
611 {
612 if (u == 0)
613 return bld->zero;
614 return bld_insn_1(bld, NV_OP_MOV, bld_imm_u32(bld, u));
615 }
616
617 static INLINE struct nv_value *
618 bld_setp(struct bld_context *bld, uint op, uint8_t cc,
619 struct nv_value *src0, struct nv_value *src1)
620 {
621 struct nv_value *val = bld_insn_2(bld, op, src0, src1);
622
623 val->reg.file = NV_FILE_PRED;
624 val->reg.size = 1;
625 val->insn->set_cond = cc & 0xf;
626 return val;
627 }
628
629 static INLINE struct nv_value *
630 bld_cvt(struct bld_context *bld, uint8_t dt, uint8_t st, struct nv_value *src)
631 {
632 struct nv_value *val = bld_insn_1(bld, NV_OP_CVT, src);
633 val->insn->ext.cvt.d = dt;
634 val->insn->ext.cvt.s = st;
635 return val;
636 }
637
638 static void
639 bld_kil(struct bld_context *bld, struct nv_value *src)
640 {
641 struct nv_instruction *nvi;
642
643 src = bld_setp(bld, NV_OP_SET_F32, NV_CC_LT, src, bld->zero);
644
645 nvi = new_instruction(bld->pc, NV_OP_KIL);
646 nvi->fixed = 1;
647
648 bld_src_predicate(bld, nvi, 0, src);
649 }
650
651 static void
652 bld_flow(struct bld_context *bld, uint opcode,
653 struct nv_value *src, struct nv_basic_block *target,
654 boolean reconverge)
655 {
656 struct nv_instruction *nvi;
657
658 if (reconverge)
659 new_instruction(bld->pc, NV_OP_JOINAT)->fixed = 1;
660
661 nvi = new_instruction(bld->pc, opcode);
662 nvi->target = target;
663 nvi->terminator = 1;
664 if (src)
665 bld_src_predicate(bld, nvi, 0, src);
666 }
667
668 static ubyte
669 translate_setcc(unsigned opcode)
670 {
671 switch (opcode) {
672 case TGSI_OPCODE_SLT: return NV_CC_LT;
673 case TGSI_OPCODE_SGE: return NV_CC_GE;
674 case TGSI_OPCODE_SEQ: return NV_CC_EQ;
675 case TGSI_OPCODE_SGT: return NV_CC_GT;
676 case TGSI_OPCODE_SLE: return NV_CC_LE;
677 case TGSI_OPCODE_SNE: return NV_CC_NE | NV_CC_U;
678 case TGSI_OPCODE_STR: return NV_CC_TR;
679 case TGSI_OPCODE_SFL: return NV_CC_FL;
680
681 case TGSI_OPCODE_ISLT: return NV_CC_LT;
682 case TGSI_OPCODE_ISGE: return NV_CC_GE;
683 case TGSI_OPCODE_USEQ: return NV_CC_EQ;
684 case TGSI_OPCODE_USGE: return NV_CC_GE;
685 case TGSI_OPCODE_USLT: return NV_CC_LT;
686 case TGSI_OPCODE_USNE: return NV_CC_NE;
687 default:
688 assert(0);
689 return NV_CC_FL;
690 }
691 }
692
693 static uint
694 translate_opcode(uint opcode)
695 {
696 switch (opcode) {
697 case TGSI_OPCODE_ABS: return NV_OP_ABS_F32;
698 case TGSI_OPCODE_ADD: return NV_OP_ADD_F32;
699 case TGSI_OPCODE_SUB: return NV_OP_SUB_F32;
700 case TGSI_OPCODE_UADD: return NV_OP_ADD_B32;
701 case TGSI_OPCODE_AND: return NV_OP_AND;
702 case TGSI_OPCODE_EX2: return NV_OP_EX2;
703 case TGSI_OPCODE_CEIL: return NV_OP_CEIL;
704 case TGSI_OPCODE_FLR: return NV_OP_FLOOR;
705 case TGSI_OPCODE_TRUNC: return NV_OP_TRUNC;
706 case TGSI_OPCODE_COS: return NV_OP_COS;
707 case TGSI_OPCODE_SIN: return NV_OP_SIN;
708 case TGSI_OPCODE_DDX: return NV_OP_DFDX;
709 case TGSI_OPCODE_DDY: return NV_OP_DFDY;
710 case TGSI_OPCODE_F2I:
711 case TGSI_OPCODE_F2U:
712 case TGSI_OPCODE_I2F:
713 case TGSI_OPCODE_U2F: return NV_OP_CVT;
714 case TGSI_OPCODE_INEG: return NV_OP_NEG_S32;
715 case TGSI_OPCODE_LG2: return NV_OP_LG2;
716 case TGSI_OPCODE_ISHR: return NV_OP_SAR;
717 case TGSI_OPCODE_USHR: return NV_OP_SHR;
718 case TGSI_OPCODE_MAD: return NV_OP_MAD_F32;
719 case TGSI_OPCODE_MAX: return NV_OP_MAX_F32;
720 case TGSI_OPCODE_IMAX: return NV_OP_MAX_S32;
721 case TGSI_OPCODE_UMAX: return NV_OP_MAX_U32;
722 case TGSI_OPCODE_MIN: return NV_OP_MIN_F32;
723 case TGSI_OPCODE_IMIN: return NV_OP_MIN_S32;
724 case TGSI_OPCODE_UMIN: return NV_OP_MIN_U32;
725 case TGSI_OPCODE_MUL: return NV_OP_MUL_F32;
726 case TGSI_OPCODE_UMUL: return NV_OP_MUL_B32;
727 case TGSI_OPCODE_OR: return NV_OP_OR;
728 case TGSI_OPCODE_RCP: return NV_OP_RCP;
729 case TGSI_OPCODE_RSQ: return NV_OP_RSQ;
730 case TGSI_OPCODE_SAD: return NV_OP_SAD;
731 case TGSI_OPCODE_SHL: return NV_OP_SHL;
732 case TGSI_OPCODE_SLT:
733 case TGSI_OPCODE_SGE:
734 case TGSI_OPCODE_SEQ:
735 case TGSI_OPCODE_SGT:
736 case TGSI_OPCODE_SLE:
737 case TGSI_OPCODE_SNE: return NV_OP_FSET_F32;
738 case TGSI_OPCODE_ISLT:
739 case TGSI_OPCODE_ISGE: return NV_OP_SET_S32;
740 case TGSI_OPCODE_USEQ:
741 case TGSI_OPCODE_USGE:
742 case TGSI_OPCODE_USLT:
743 case TGSI_OPCODE_USNE: return NV_OP_SET_U32;
744 case TGSI_OPCODE_TEX: return NV_OP_TEX;
745 case TGSI_OPCODE_TXP: return NV_OP_TEX;
746 case TGSI_OPCODE_TXB: return NV_OP_TXB;
747 case TGSI_OPCODE_TXL: return NV_OP_TXL;
748 case TGSI_OPCODE_XOR: return NV_OP_XOR;
749 default:
750 return NV_OP_NOP;
751 }
752 }
753
754 #if 0
755 static ubyte
756 infer_src_type(unsigned opcode)
757 {
758 switch (opcode) {
759 case TGSI_OPCODE_MOV:
760 case TGSI_OPCODE_AND:
761 case TGSI_OPCODE_OR:
762 case TGSI_OPCODE_XOR:
763 case TGSI_OPCODE_SAD:
764 case TGSI_OPCODE_U2F:
765 case TGSI_OPCODE_UADD:
766 case TGSI_OPCODE_UDIV:
767 case TGSI_OPCODE_UMOD:
768 case TGSI_OPCODE_UMAD:
769 case TGSI_OPCODE_UMUL:
770 case TGSI_OPCODE_UMAX:
771 case TGSI_OPCODE_UMIN:
772 case TGSI_OPCODE_USEQ:
773 case TGSI_OPCODE_USGE:
774 case TGSI_OPCODE_USLT:
775 case TGSI_OPCODE_USNE:
776 case TGSI_OPCODE_USHR:
777 return NV_TYPE_U32;
778 case TGSI_OPCODE_I2F:
779 case TGSI_OPCODE_IDIV:
780 case TGSI_OPCODE_IMAX:
781 case TGSI_OPCODE_IMIN:
782 case TGSI_OPCODE_INEG:
783 case TGSI_OPCODE_ISGE:
784 case TGSI_OPCODE_ISHR:
785 case TGSI_OPCODE_ISLT:
786 return NV_TYPE_S32;
787 default:
788 return NV_TYPE_F32;
789 }
790 }
791
792 static ubyte
793 infer_dst_type(unsigned opcode)
794 {
795 switch (opcode) {
796 case TGSI_OPCODE_MOV:
797 case TGSI_OPCODE_F2U:
798 case TGSI_OPCODE_AND:
799 case TGSI_OPCODE_OR:
800 case TGSI_OPCODE_XOR:
801 case TGSI_OPCODE_SAD:
802 case TGSI_OPCODE_UADD:
803 case TGSI_OPCODE_UDIV:
804 case TGSI_OPCODE_UMOD:
805 case TGSI_OPCODE_UMAD:
806 case TGSI_OPCODE_UMUL:
807 case TGSI_OPCODE_UMAX:
808 case TGSI_OPCODE_UMIN:
809 case TGSI_OPCODE_USEQ:
810 case TGSI_OPCODE_USGE:
811 case TGSI_OPCODE_USLT:
812 case TGSI_OPCODE_USNE:
813 case TGSI_OPCODE_USHR:
814 return NV_TYPE_U32;
815 case TGSI_OPCODE_F2I:
816 case TGSI_OPCODE_IDIV:
817 case TGSI_OPCODE_IMAX:
818 case TGSI_OPCODE_IMIN:
819 case TGSI_OPCODE_INEG:
820 case TGSI_OPCODE_ISGE:
821 case TGSI_OPCODE_ISHR:
822 case TGSI_OPCODE_ISLT:
823 return NV_TYPE_S32;
824 default:
825 return NV_TYPE_F32;
826 }
827 }
828 #endif
829
830 static void
831 emit_store(struct bld_context *bld, const struct tgsi_full_instruction *inst,
832 unsigned chan, struct nv_value *res)
833 {
834 const struct tgsi_full_dst_register *reg = &inst->Dst[0];
835 struct nv_instruction *nvi;
836 struct nv_value *mem;
837 struct nv_value *ptr = NULL;
838 int idx;
839
840 idx = reg->Register.Index;
841 assert(chan < 4);
842
843 if (reg->Register.Indirect)
844 ptr = FETCH_ADDR(reg->Indirect.Index,
845 tgsi_util_get_src_register_swizzle(&reg->Indirect, 0));
846
847 switch (inst->Instruction.Saturate) {
848 case TGSI_SAT_NONE:
849 break;
850 case TGSI_SAT_ZERO_ONE:
851 res = bld_insn_1(bld, NV_OP_SAT, res);
852 break;
853 case TGSI_SAT_MINUS_PLUS_ONE:
854 res = bld_insn_2(bld, NV_OP_MAX_F32, res, bld_load_imm_f32(bld, -1.0f));
855 res = bld_insn_2(bld, NV_OP_MIN_F32, res, bld_load_imm_f32(bld, 1.0f));
856 break;
857 }
858
859 switch (reg->Register.File) {
860 case TGSI_FILE_OUTPUT:
861 if (!res->insn)
862 res = bld_insn_1(bld, NV_OP_MOV, res);
863
864 if (bld->pc->is_fragprog) {
865 assert(!ptr);
866 STORE_OUTP(idx, chan, res);
867 } else {
868 nvi = new_instruction(bld->pc, NV_OP_EXPORT);
869 mem = new_value(bld->pc, bld->ti->output_file, res->reg.size);
870 nv_reference(bld->pc, nvi, 0, mem);
871 nv_reference(bld->pc, nvi, 1, res);
872 if (!ptr)
873 mem->reg.address = bld->ti->output_loc[idx][chan];
874 else
875 mem->reg.address = 0x80 + idx * 16 + chan * 4;
876 nvi->fixed = 1;
877 }
878 break;
879 case TGSI_FILE_TEMPORARY:
880 assert(idx < BLD_MAX_TEMPS);
881 if (!res->insn || res->insn->bb != bld->pc->current_block)
882 res = bld_insn_1(bld, NV_OP_MOV, res);
883
884 assert(res->reg.file == NV_FILE_GPR);
885
886 if (bld->ti->require_stores)
887 bld_lmem_store(bld, ptr, idx * 4 + chan, res);
888 else
889 STORE_TEMP(idx, chan, res);
890 break;
891 case TGSI_FILE_ADDRESS:
892 assert(idx < BLD_MAX_ADDRS);
893 STORE_ADDR(idx, chan, res);
894 break;
895 }
896 }
897
898 static INLINE uint32_t
899 bld_is_output_written(struct bld_context *bld, int i, int c)
900 {
901 if (c < 0)
902 return bld->outputs_written[i / 8] & (0xf << ((i * 4) % 32));
903 return bld->outputs_written[i / 8] & (1 << ((i * 4 + c) % 32));
904 }
905
906 static void
907 bld_append_vp_ucp(struct bld_context *bld)
908 {
909 struct nv_value *res[6];
910 struct nv_value *ucp, *vtx, *out;
911 struct nv_instruction *insn;
912 int i, c;
913
914 assert(bld->ti->prog->vp.num_ucps <= 6);
915
916 for (c = 0; c < 4; ++c) {
917 vtx = bld_fetch_global(bld, &bld->ovs[bld->hpos_index][c]);
918
919 for (i = 0; i < bld->ti->prog->vp.num_ucps; ++i) {
920 ucp = new_value(bld->pc, NV_FILE_MEM_C(15), 4);
921 ucp->reg.address = i * 16 + c * 4;
922
923 if (c == 0)
924 res[i] = bld_insn_2(bld, NV_OP_MUL_F32, vtx, ucp);
925 else
926 res[i] = bld_insn_3(bld, NV_OP_MAD_F32, vtx, ucp, res[i]);
927 }
928 }
929
930 for (i = 0; i < bld->ti->prog->vp.num_ucps; ++i) {
931 (out = new_value(bld->pc, NV_FILE_MEM_V, 4))->reg.address = 0x2c0 + i * 4;
932 (insn = new_instruction(bld->pc, NV_OP_EXPORT))->fixed = 1;
933 nv_reference(bld->pc, insn, 0, out);
934 nv_reference(bld->pc, insn, 1, res[i]);
935 }
936 }
937
938 static void
939 bld_export_fp_outputs(struct bld_context *bld)
940 {
941 struct nv_value *vals[4];
942 struct nv_instruction *nvi;
943 int i, c, n;
944
945 for (i = 0; i < PIPE_MAX_SHADER_OUTPUTS; ++i) {
946 if (!bld_is_output_written(bld, i, -1))
947 continue;
948 for (n = 0, c = 0; c < 4; ++c) {
949 if (!bld_is_output_written(bld, i, c))
950 continue;
951 vals[n] = bld_fetch_global(bld, &bld->ovs[i][c]);
952 assert(vals[n]);
953 vals[n] = bld_insn_1(bld, NV_OP_MOV, vals[n]);
954 vals[n++]->reg.id = bld->ti->output_loc[i][c];
955 }
956 assert(n);
957
958 (nvi = new_instruction(bld->pc, NV_OP_EXPORT))->fixed = 1;
959 for (c = 0; c < n; ++c)
960 nv_reference(bld->pc, nvi, c, vals[c]);
961 }
962 }
963
964 static void
965 bld_new_block(struct bld_context *bld, struct nv_basic_block *b)
966 {
967 int i, c;
968
969 bld->pc->current_block = b;
970
971 for (i = 0; i < 4; ++i)
972 bld->saved_addr[i][0] = NULL;
973 for (i = 0; i < PIPE_MAX_SHADER_INPUTS; ++i)
974 for (c = 0; c < 4; ++c)
975 bld->saved_inputs[i][c] = NULL;
976
977 bld->out_kind = CFG_EDGE_FORWARD;
978 }
979
980 static struct nv_value *
981 bld_get_saved_input(struct bld_context *bld, unsigned i, unsigned c)
982 {
983 if (bld->saved_inputs[i][c])
984 return bld->saved_inputs[i][c];
985 return NULL;
986 }
987
988 static struct nv_value *
989 bld_interp(struct bld_context *bld, unsigned mode, struct nv_value *val)
990 {
991 unsigned cent = mode & NVC0_INTERP_CENTROID;
992
993 mode &= ~NVC0_INTERP_CENTROID;
994
995 if (val->reg.address == 0x3fc) {
996 /* gl_FrontFacing: 0/~0 to -1.0/+1.0 */
997 val = bld_insn_1(bld, NV_OP_LINTERP, val);
998 val->insn->flat = 1;
999 val = bld_insn_2(bld, NV_OP_SHL, val, bld_imm_u32(bld, 31));
1000 val = bld_insn_2(bld, NV_OP_XOR, val, bld_imm_f32(bld, -1.0f));
1001 return val;
1002 } else
1003 if (mode == NVC0_INTERP_PERSPECTIVE) {
1004 val = bld_insn_2(bld, NV_OP_PINTERP, val, bld->frag_coord[3]);
1005 } else {
1006 val = bld_insn_1(bld, NV_OP_LINTERP, val);
1007 }
1008
1009 val->insn->flat = mode == NVC0_INTERP_FLAT ? 1 : 0;
1010 val->insn->centroid = cent ? 1 : 0;
1011 return val;
1012 }
1013
1014 static struct nv_value *
1015 emit_fetch(struct bld_context *bld, const struct tgsi_full_instruction *insn,
1016 const unsigned s, const unsigned chan)
1017 {
1018 const struct tgsi_full_src_register *src = &insn->Src[s];
1019 struct nv_value *res = NULL;
1020 struct nv_value *ptr = NULL;
1021 int idx, ind_idx, dim_idx;
1022 unsigned swz, ind_swz, sgn;
1023
1024 idx = src->Register.Index;
1025 swz = tgsi_util_get_full_src_register_swizzle(src, chan);
1026
1027 if (src->Register.Indirect) {
1028 ind_idx = src->Indirect.Index;
1029 ind_swz = tgsi_util_get_src_register_swizzle(&src->Indirect, 0);
1030
1031 ptr = FETCH_ADDR(ind_idx, ind_swz);
1032 }
1033
1034 if (src->Register.Dimension)
1035 dim_idx = src->Dimension.Index;
1036 else
1037 dim_idx = 0;
1038
1039 switch (src->Register.File) {
1040 case TGSI_FILE_CONSTANT:
1041 assert(dim_idx < 14);
1042 res = new_value(bld->pc, NV_FILE_MEM_C(dim_idx), 4);
1043 res->reg.address = idx * 16 + swz * 4;
1044 res = bld_insn_1(bld, NV_OP_LD, res);
1045 if (ptr)
1046 bld_src_pointer(bld, res->insn, 1, ptr);
1047 break;
1048 case TGSI_FILE_IMMEDIATE: /* XXX: type for MOV TEMP[0], -IMM[0] */
1049 assert(idx < bld->ti->immd32_nr);
1050 res = bld_load_imm_u32(bld, bld->ti->immd32[idx * 4 + swz]);
1051 break;
1052 case TGSI_FILE_INPUT:
1053 assert(!src->Register.Dimension);
1054 if (!ptr) {
1055 res = bld_get_saved_input(bld, idx, swz);
1056 if (res)
1057 return res;
1058 }
1059 res = new_value(bld->pc, bld->ti->input_file, 4);
1060 if (ptr)
1061 res->reg.address = 0x80 + idx * 16 + swz * 4;
1062 else
1063 res->reg.address = bld->ti->input_loc[idx][swz];
1064
1065 if (bld->pc->is_fragprog)
1066 res = bld_interp(bld, bld->ti->interp_mode[idx], res);
1067 else
1068 res = bld_insn_1(bld, NV_OP_VFETCH, res);
1069
1070 if (ptr)
1071 bld_src_pointer(bld, res->insn, res->insn->src[1] ? 2 : 1, ptr);
1072 else
1073 bld->saved_inputs[idx][swz] = res;
1074 break;
1075 case TGSI_FILE_TEMPORARY:
1076 if (bld->ti->require_stores)
1077 res = bld_lmem_load(bld, ptr, idx * 4 + swz);
1078 else
1079 res = bld_fetch_global(bld, &bld->tvs[idx][swz]);
1080 break;
1081 case TGSI_FILE_ADDRESS:
1082 res = bld_fetch_global(bld, &bld->avs[idx][swz]);
1083 break;
1084 case TGSI_FILE_PREDICATE:
1085 res = bld_fetch_global(bld, &bld->pvs[idx][swz]);
1086 break;
1087 case TGSI_FILE_SYSTEM_VALUE:
1088 assert(bld->ti->sysval_loc[idx] < 0xf00); /* >= would mean special reg */
1089 res = new_value(bld->pc,
1090 bld->pc->is_fragprog ? NV_FILE_MEM_V : NV_FILE_MEM_A, 4);
1091 res->reg.address = bld->ti->sysval_loc[idx];
1092
1093 if (res->reg.file == NV_FILE_MEM_A)
1094 res = bld_insn_1(bld, NV_OP_VFETCH, res);
1095 else
1096 res = bld_interp(bld, NVC0_INTERP_FLAT, res);
1097
1098 /* mesa doesn't do real integers yet :-(and in GL this should be S32) */
1099 res = bld_cvt(bld, NV_TYPE_F32, NV_TYPE_U32, res);
1100 break;
1101 default:
1102 NOUVEAU_ERR("illegal/unhandled src reg file: %d\n", src->Register.File);
1103 abort();
1104 break;
1105 }
1106 if (!res)
1107 return bld_undef(bld, NV_FILE_GPR);
1108
1109 sgn = tgsi_util_get_full_src_register_sign_mode(src, chan);
1110
1111 switch (sgn) {
1112 case TGSI_UTIL_SIGN_KEEP:
1113 break;
1114 case TGSI_UTIL_SIGN_CLEAR:
1115 res = bld_insn_1(bld, NV_OP_ABS_F32, res);
1116 break;
1117 case TGSI_UTIL_SIGN_TOGGLE:
1118 res = bld_insn_1(bld, NV_OP_NEG_F32, res);
1119 break;
1120 case TGSI_UTIL_SIGN_SET:
1121 res = bld_insn_1(bld, NV_OP_ABS_F32, res);
1122 res = bld_insn_1(bld, NV_OP_NEG_F32, res);
1123 break;
1124 default:
1125 NOUVEAU_ERR("illegal/unhandled src reg sign mode\n");
1126 abort();
1127 break;
1128 }
1129
1130 return res;
1131 }
1132
1133 static void
1134 bld_lit(struct bld_context *bld, struct nv_value *dst0[4],
1135 const struct tgsi_full_instruction *insn)
1136 {
1137 struct nv_value *val0 = NULL;
1138 unsigned mask = insn->Dst[0].Register.WriteMask;
1139
1140 if (mask & ((1 << 0) | (1 << 3)))
1141 dst0[3] = dst0[0] = bld_load_imm_f32(bld, 1.0f);
1142
1143 if (mask & (3 << 1)) {
1144 val0 = bld_insn_2(bld, NV_OP_MAX, emit_fetch(bld, insn, 0, 0), bld->zero);
1145 if (mask & (1 << 1))
1146 dst0[1] = val0;
1147 }
1148
1149 if (mask & (1 << 2)) {
1150 struct nv_value *val1, *val3, *src1, *src3, *pred;
1151 struct nv_value *pos128 = bld_load_imm_f32(bld, 127.999999f);
1152 struct nv_value *neg128 = bld_load_imm_f32(bld, -127.999999f);
1153
1154 src1 = emit_fetch(bld, insn, 0, 1);
1155 src3 = emit_fetch(bld, insn, 0, 3);
1156
1157 pred = bld_setp(bld, NV_OP_SET_F32, NV_CC_LE, val0, bld->zero);
1158
1159 val1 = bld_insn_2(bld, NV_OP_MAX_F32, src1, bld->zero);
1160 val3 = bld_insn_2(bld, NV_OP_MAX_F32, src3, neg128);
1161 val3 = bld_insn_2(bld, NV_OP_MIN_F32, val3, pos128);
1162 val3 = bld_pow(bld, val1, val3);
1163
1164 dst0[2] = bld_insn_1(bld, NV_OP_MOV, bld->zero);
1165 bld_src_predicate(bld, dst0[2]->insn, 1, pred);
1166
1167 dst0[2] = bld_insn_2(bld, NV_OP_SELECT, val3, dst0[2]);
1168 }
1169 }
1170
1171 static INLINE void
1172 describe_texture_target(unsigned target, int *dim,
1173 int *array, int *cube, int *shadow)
1174 {
1175 *array = *cube = *shadow = 0;
1176
1177 switch (target) {
1178 case TGSI_TEXTURE_1D:
1179 *dim = 1;
1180 break;
1181 case TGSI_TEXTURE_SHADOW1D:
1182 *dim = *shadow = 1;
1183 break;
1184 case TGSI_TEXTURE_UNKNOWN:
1185 case TGSI_TEXTURE_2D:
1186 case TGSI_TEXTURE_RECT:
1187 *dim = 2;
1188 break;
1189 case TGSI_TEXTURE_SHADOW2D:
1190 case TGSI_TEXTURE_SHADOWRECT:
1191 *dim = 2;
1192 *shadow = 1;
1193 break;
1194 case TGSI_TEXTURE_3D:
1195 *dim = 3;
1196 break;
1197 case TGSI_TEXTURE_CUBE:
1198 *dim = 2;
1199 *cube = 1;
1200 break;
1201 case TGSI_TEXTURE_1D_ARRAY:
1202 *dim = *array = 1;
1203 break;
1204 case TGSI_TEXTURE_2D_ARRAY:
1205 *dim = 2;
1206 *array = 1;
1207 break;
1208 /*
1209 case TGSI_TEXTURE_SHADOW1D_ARRAY:
1210 *dim = *array = *shadow = 1;
1211 break;
1212 case TGSI_TEXTURE_SHADOW2D_ARRAY:
1213 *dim = 2;
1214 *array = *shadow = 1;
1215 break;
1216 case TGSI_TEXTURE_CUBE_ARRAY:
1217 *dim = 2;
1218 *cube = *array = 1;
1219 break;
1220 */
1221 default:
1222 assert(0);
1223 break;
1224 }
1225 }
1226
1227 static struct nv_value *
1228 bld_clone(struct bld_context *bld, struct nv_instruction *nvi)
1229 {
1230 struct nv_instruction *dupi = new_instruction(bld->pc, nvi->opcode);
1231 struct nv_instruction *next, *prev;
1232 int c;
1233
1234 next = dupi->next;
1235 prev = dupi->prev;
1236
1237 *dupi = *nvi;
1238
1239 dupi->next = next;
1240 dupi->prev = prev;
1241
1242 for (c = 0; c < 5 && nvi->def[c]; ++c)
1243 bld_def(dupi, c, new_value_like(bld->pc, nvi->def[c]));
1244
1245 for (c = 0; c < 6 && nvi->src[c]; ++c) {
1246 dupi->src[c] = NULL;
1247 nv_reference(bld->pc, dupi, c, nvi->src[c]->value);
1248 }
1249
1250 return dupi->def[0];
1251 }
1252
1253 /* NOTE: proj(t0) = (t0 / w) / (tc3 / w) = tc0 / tc2 handled by optimizer */
1254 static void
1255 load_proj_tex_coords(struct bld_context *bld,
1256 struct nv_value *t[4], int dim, int shadow,
1257 const struct tgsi_full_instruction *insn)
1258 {
1259 int c;
1260 unsigned mask = (1 << dim) - 1;
1261
1262 if (shadow)
1263 mask |= 4; /* depth comparison value */
1264
1265 t[3] = emit_fetch(bld, insn, 0, 3);
1266 if (t[3]->insn->opcode == NV_OP_PINTERP) {
1267 t[3] = bld_clone(bld, t[3]->insn);
1268 t[3]->insn->opcode = NV_OP_LINTERP;
1269 nv_reference(bld->pc, t[3]->insn, 1, NULL);
1270 }
1271 t[3] = bld_insn_1(bld, NV_OP_RCP, t[3]);
1272
1273 for (c = 0; c < 4; ++c) {
1274 if (!(mask & (1 << c)))
1275 continue;
1276 t[c] = emit_fetch(bld, insn, 0, c);
1277
1278 if (t[c]->insn->opcode != NV_OP_PINTERP)
1279 continue;
1280 mask &= ~(1 << c);
1281
1282 t[c] = bld_clone(bld, t[c]->insn);
1283 nv_reference(bld->pc, t[c]->insn, 1, t[3]);
1284 }
1285 if (mask == 0)
1286 return;
1287
1288 t[3] = emit_fetch(bld, insn, 0, 3);
1289 t[3] = bld_insn_1(bld, NV_OP_RCP, t[3]);
1290
1291 for (c = 0; c < 4; ++c)
1292 if (mask & (1 << c))
1293 t[c] = bld_insn_2(bld, NV_OP_MUL_F32, t[c], t[3]);
1294 }
1295
1296 /* For a quad of threads / top left, top right, bottom left, bottom right
1297 * pixels, do a different operation, and take src0 from a specific thread.
1298 */
1299 #define QOP_ADD 0
1300 #define QOP_SUBR 1
1301 #define QOP_SUB 2
1302 #define QOP_MOV1 3
1303
1304 #define QOP(a, b, c, d) \
1305 ((QOP_##a << 0) | (QOP_##b << 2) | (QOP_##c << 4) | (QOP_##d << 6))
1306
1307 static INLINE struct nv_value *
1308 bld_quadop(struct bld_context *bld, ubyte qop, struct nv_value *src0, int lane,
1309 struct nv_value *src1, boolean wp)
1310 {
1311 struct nv_value *val = bld_insn_2(bld, NV_OP_QUADOP, src0, src1);
1312 val->insn->lanes = lane;
1313 val->insn->quadop = qop;
1314 if (wp) {
1315 assert(!"quadop predicate write");
1316 }
1317 return val;
1318 }
1319
1320 /* order of TGSI operands: x y z layer shadow lod/bias */
1321 /* order of native operands: layer x y z | lod/bias shadow */
1322 static struct nv_instruction *
1323 emit_tex(struct bld_context *bld, uint opcode, int tic, int tsc,
1324 struct nv_value *dst[4], struct nv_value *arg[4],
1325 int dim, int array, int cube, int shadow)
1326 {
1327 struct nv_value *src[4];
1328 struct nv_instruction *nvi, *bnd;
1329 int c;
1330 int s = 0;
1331 boolean lodbias = opcode == NV_OP_TXB || opcode == NV_OP_TXL;
1332
1333 if (array)
1334 arg[dim] = bld_cvt(bld, NV_TYPE_U32, NV_TYPE_F32, arg[dim]);
1335
1336 /* bind { layer x y z } and { lod/bias shadow } to adjacent regs */
1337
1338 bnd = new_instruction(bld->pc, NV_OP_BIND);
1339 if (array) {
1340 src[s] = new_value(bld->pc, NV_FILE_GPR, 4);
1341 bld_def(bnd, s, src[s]);
1342 nv_reference(bld->pc, bnd, s++, arg[dim + cube]);
1343 }
1344 for (c = 0; c < dim + cube; ++c, ++s) {
1345 src[s] = bld_def(bnd, s, new_value(bld->pc, NV_FILE_GPR, 4));
1346 nv_reference(bld->pc, bnd, s, arg[c]);
1347 }
1348
1349 if (shadow || lodbias) {
1350 bnd = new_instruction(bld->pc, NV_OP_BIND);
1351
1352 if (lodbias) {
1353 src[s] = new_value(bld->pc, NV_FILE_GPR, 4);
1354 bld_def(bnd, 0, src[s++]);
1355 nv_reference(bld->pc, bnd, 0, arg[dim + cube + array + shadow]);
1356 }
1357 if (shadow) {
1358 src[s] = new_value(bld->pc, NV_FILE_GPR, 4);
1359 bld_def(bnd, lodbias, src[s++]);
1360 nv_reference(bld->pc, bnd, lodbias, arg[dim + cube + array]);
1361 }
1362 }
1363
1364 nvi = new_instruction(bld->pc, opcode);
1365 for (c = 0; c < 4; ++c)
1366 dst[c] = bld_def(nvi, c, new_value(bld->pc, NV_FILE_GPR, 4));
1367 for (c = 0; c < s; ++c)
1368 nv_reference(bld->pc, nvi, c, src[c]);
1369
1370 nvi->ext.tex.t = tic;
1371 nvi->ext.tex.s = tsc;
1372 nvi->tex_mask = 0xf;
1373 nvi->tex_cube = cube;
1374 nvi->tex_dim = dim;
1375 nvi->tex_cube = cube;
1376 nvi->tex_shadow = shadow;
1377 nvi->tex_array = array;
1378 nvi->tex_live = 0;
1379
1380 return nvi;
1381 }
1382
1383 static void
1384 bld_tex(struct bld_context *bld, struct nv_value *dst0[4],
1385 const struct tgsi_full_instruction *insn)
1386 {
1387 struct nv_value *t[4], *s[3];
1388 uint opcode = translate_opcode(insn->Instruction.Opcode);
1389 int c, dim, array, cube, shadow;
1390 const int lodbias = opcode == NV_OP_TXB || opcode == NV_OP_TXL;
1391 const int tic = insn->Src[1].Register.Index;
1392 const int tsc = tic;
1393
1394 describe_texture_target(insn->Texture.Texture, &dim, &array, &cube, &shadow);
1395
1396 assert(dim + array + shadow + lodbias <= 5);
1397
1398 if (!cube && !array && insn->Instruction.Opcode == TGSI_OPCODE_TXP)
1399 load_proj_tex_coords(bld, t, dim, shadow, insn);
1400 else {
1401 for (c = 0; c < dim + cube + array; ++c)
1402 t[c] = emit_fetch(bld, insn, 0, c);
1403 if (shadow)
1404 t[c] = emit_fetch(bld, insn, 0, MAX2(c, 2));
1405 }
1406
1407 if (cube) {
1408 for (c = 0; c < 3; ++c)
1409 s[c] = bld_insn_1(bld, NV_OP_ABS_F32, t[c]);
1410
1411 s[0] = bld_insn_2(bld, NV_OP_MAX_F32, s[0], s[1]);
1412 s[0] = bld_insn_2(bld, NV_OP_MAX_F32, s[0], s[2]);
1413 s[0] = bld_insn_1(bld, NV_OP_RCP, s[0]);
1414
1415 for (c = 0; c < 3; ++c)
1416 t[c] = bld_insn_2(bld, NV_OP_MUL_F32, t[c], s[0]);
1417 }
1418
1419 if (lodbias)
1420 t[dim + cube + array + shadow] = emit_fetch(bld, insn, 0, 3);
1421
1422 emit_tex(bld, opcode, tic, tsc, dst0, t, dim, array, cube, shadow);
1423 }
1424
1425 static INLINE struct nv_value *
1426 bld_dot(struct bld_context *bld, const struct tgsi_full_instruction *insn,
1427 int n)
1428 {
1429 struct nv_value *dotp, *src0, *src1;
1430 int c;
1431
1432 src0 = emit_fetch(bld, insn, 0, 0);
1433 src1 = emit_fetch(bld, insn, 1, 0);
1434 dotp = bld_insn_2(bld, NV_OP_MUL_F32, src0, src1);
1435
1436 for (c = 1; c < n; ++c) {
1437 src0 = emit_fetch(bld, insn, 0, c);
1438 src1 = emit_fetch(bld, insn, 1, c);
1439 dotp = bld_insn_3(bld, NV_OP_MAD_F32, src0, src1, dotp);
1440 }
1441 return dotp;
1442 }
1443
1444 #define FOR_EACH_DST0_ENABLED_CHANNEL(chan, inst) \
1445 for (chan = 0; chan < 4; ++chan) \
1446 if ((inst)->Dst[0].Register.WriteMask & (1 << chan))
1447
1448 static void
1449 bld_instruction(struct bld_context *bld,
1450 const struct tgsi_full_instruction *insn)
1451 {
1452 struct nv_value *src0;
1453 struct nv_value *src1;
1454 struct nv_value *src2;
1455 struct nv_value *dst0[4] = { NULL };
1456 struct nv_value *temp;
1457 int c;
1458 uint opcode = translate_opcode(insn->Instruction.Opcode);
1459 uint8_t mask = insn->Dst[0].Register.WriteMask;
1460
1461 #ifdef NOUVEAU_DEBUG
1462 debug_printf("bld_instruction:"); tgsi_dump_instruction(insn, 1);
1463 #endif
1464
1465 switch (insn->Instruction.Opcode) {
1466 case TGSI_OPCODE_ADD:
1467 case TGSI_OPCODE_MAX:
1468 case TGSI_OPCODE_MIN:
1469 case TGSI_OPCODE_MUL:
1470 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1471 src0 = emit_fetch(bld, insn, 0, c);
1472 src1 = emit_fetch(bld, insn, 1, c);
1473 dst0[c] = bld_insn_2(bld, opcode, src0, src1);
1474 }
1475 break;
1476 case TGSI_OPCODE_ARL:
1477 src1 = bld_imm_u32(bld, 4);
1478 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1479 src0 = emit_fetch(bld, insn, 0, c);
1480 src0 = bld_insn_1(bld, NV_OP_FLOOR, src0);
1481 src0->insn->ext.cvt.d = NV_TYPE_S32;
1482 src0->insn->ext.cvt.s = NV_TYPE_F32;
1483 dst0[c] = bld_insn_2(bld, NV_OP_SHL, src0, src1);
1484 }
1485 break;
1486 case TGSI_OPCODE_CMP:
1487 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1488 src0 = emit_fetch(bld, insn, 0, c);
1489 src0 = bld_setp(bld, NV_OP_SET_F32, NV_CC_LT, src0, bld->zero);
1490 src1 = emit_fetch(bld, insn, 1, c);
1491 src2 = emit_fetch(bld, insn, 2, c);
1492 dst0[c] = bld_insn_3(bld, NV_OP_SELP, src1, src2, src0);
1493 }
1494 break;
1495 case TGSI_OPCODE_COS:
1496 case TGSI_OPCODE_SIN:
1497 src0 = emit_fetch(bld, insn, 0, 0);
1498 temp = bld_insn_1(bld, NV_OP_PRESIN, src0);
1499 if (insn->Dst[0].Register.WriteMask & 7)
1500 temp = bld_insn_1(bld, opcode, temp);
1501 for (c = 0; c < 3; ++c)
1502 if (insn->Dst[0].Register.WriteMask & (1 << c))
1503 dst0[c] = temp;
1504 if (!(insn->Dst[0].Register.WriteMask & (1 << 3)))
1505 break;
1506 src0 = emit_fetch(bld, insn, 0, 3);
1507 temp = bld_insn_1(bld, NV_OP_PRESIN, src0);
1508 dst0[3] = bld_insn_1(bld, opcode, temp);
1509 break;
1510 case TGSI_OPCODE_DP2:
1511 temp = bld_dot(bld, insn, 2);
1512 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1513 dst0[c] = temp;
1514 break;
1515 case TGSI_OPCODE_DP3:
1516 temp = bld_dot(bld, insn, 3);
1517 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1518 dst0[c] = temp;
1519 break;
1520 case TGSI_OPCODE_DP4:
1521 temp = bld_dot(bld, insn, 4);
1522 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1523 dst0[c] = temp;
1524 break;
1525 case TGSI_OPCODE_DPH:
1526 src0 = bld_dot(bld, insn, 3);
1527 src1 = emit_fetch(bld, insn, 1, 3);
1528 temp = bld_insn_2(bld, NV_OP_ADD_F32, src0, src1);
1529 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1530 dst0[c] = temp;
1531 break;
1532 case TGSI_OPCODE_DST:
1533 if (insn->Dst[0].Register.WriteMask & 1)
1534 dst0[0] = bld_imm_f32(bld, 1.0f);
1535 if (insn->Dst[0].Register.WriteMask & 2) {
1536 src0 = emit_fetch(bld, insn, 0, 1);
1537 src1 = emit_fetch(bld, insn, 1, 1);
1538 dst0[1] = bld_insn_2(bld, NV_OP_MUL_F32, src0, src1);
1539 }
1540 if (insn->Dst[0].Register.WriteMask & 4)
1541 dst0[2] = emit_fetch(bld, insn, 0, 2);
1542 if (insn->Dst[0].Register.WriteMask & 8)
1543 dst0[3] = emit_fetch(bld, insn, 1, 3);
1544 break;
1545 case TGSI_OPCODE_EXP:
1546 src0 = emit_fetch(bld, insn, 0, 0);
1547 temp = bld_insn_1(bld, NV_OP_FLOOR, src0);
1548
1549 if (insn->Dst[0].Register.WriteMask & 2)
1550 dst0[1] = bld_insn_2(bld, NV_OP_SUB_F32, src0, temp);
1551 if (insn->Dst[0].Register.WriteMask & 1) {
1552 temp = bld_insn_1(bld, NV_OP_PREEX2, temp);
1553 dst0[0] = bld_insn_1(bld, NV_OP_EX2, temp);
1554 }
1555 if (insn->Dst[0].Register.WriteMask & 4) {
1556 temp = bld_insn_1(bld, NV_OP_PREEX2, src0);
1557 dst0[2] = bld_insn_1(bld, NV_OP_EX2, temp);
1558 }
1559 if (insn->Dst[0].Register.WriteMask & 8)
1560 dst0[3] = bld_imm_f32(bld, 1.0f);
1561 break;
1562 case TGSI_OPCODE_EX2:
1563 src0 = emit_fetch(bld, insn, 0, 0);
1564 temp = bld_insn_1(bld, NV_OP_PREEX2, src0);
1565 temp = bld_insn_1(bld, NV_OP_EX2, temp);
1566 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1567 dst0[c] = temp;
1568 break;
1569 case TGSI_OPCODE_FRC:
1570 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1571 src0 = emit_fetch(bld, insn, 0, c);
1572 dst0[c] = bld_insn_1(bld, NV_OP_FLOOR, src0);
1573 dst0[c] = bld_insn_2(bld, NV_OP_SUB_F32, src0, dst0[c]);
1574 }
1575 break;
1576 case TGSI_OPCODE_KIL:
1577 for (c = 0; c < 4; ++c)
1578 bld_kil(bld, emit_fetch(bld, insn, 0, c));
1579 break;
1580 case TGSI_OPCODE_KILP:
1581 (new_instruction(bld->pc, NV_OP_KIL))->fixed = 1;
1582 break;
1583 case TGSI_OPCODE_IF:
1584 {
1585 struct nv_basic_block *b = new_basic_block(bld->pc);
1586
1587 assert(bld->cond_lvl < BLD_MAX_COND_NESTING);
1588
1589 nvc0_bblock_attach(bld->pc->current_block, b, CFG_EDGE_FORWARD);
1590
1591 bld->join_bb[bld->cond_lvl] = bld->pc->current_block;
1592 bld->cond_bb[bld->cond_lvl] = bld->pc->current_block;
1593
1594 src1 = bld_setp(bld, NV_OP_SET_U32, NV_CC_EQ,
1595 emit_fetch(bld, insn, 0, 0), bld->zero);
1596
1597 bld_flow(bld, NV_OP_BRA, src1, NULL, (bld->cond_lvl == 0));
1598
1599 ++bld->cond_lvl;
1600 bld_new_block(bld, b);
1601 }
1602 break;
1603 case TGSI_OPCODE_ELSE:
1604 {
1605 struct nv_basic_block *b = new_basic_block(bld->pc);
1606
1607 --bld->cond_lvl;
1608 nvc0_bblock_attach(bld->join_bb[bld->cond_lvl], b, CFG_EDGE_FORWARD);
1609
1610 bld->cond_bb[bld->cond_lvl]->exit->target = b;
1611 bld->cond_bb[bld->cond_lvl] = bld->pc->current_block;
1612
1613 new_instruction(bld->pc, NV_OP_BRA)->terminator = 1;
1614
1615 ++bld->cond_lvl;
1616 bld_new_block(bld, b);
1617 }
1618 break;
1619 case TGSI_OPCODE_ENDIF:
1620 {
1621 struct nv_basic_block *b = new_basic_block(bld->pc);
1622
1623 --bld->cond_lvl;
1624 nvc0_bblock_attach(bld->pc->current_block, b, bld->out_kind);
1625 nvc0_bblock_attach(bld->cond_bb[bld->cond_lvl], b, CFG_EDGE_FORWARD);
1626
1627 bld->cond_bb[bld->cond_lvl]->exit->target = b;
1628
1629 bld_new_block(bld, b);
1630
1631 if (!bld->cond_lvl && bld->join_bb[bld->cond_lvl]) {
1632 bld->join_bb[bld->cond_lvl]->exit->prev->target = b;
1633 new_instruction(bld->pc, NV_OP_JOIN)->join = 1;
1634 }
1635 }
1636 break;
1637 case TGSI_OPCODE_BGNLOOP:
1638 {
1639 struct nv_basic_block *bl = new_basic_block(bld->pc);
1640 struct nv_basic_block *bb = new_basic_block(bld->pc);
1641
1642 assert(bld->loop_lvl < BLD_MAX_LOOP_NESTING);
1643
1644 bld->loop_bb[bld->loop_lvl] = bl;
1645 bld->brkt_bb[bld->loop_lvl] = bb;
1646
1647 nvc0_bblock_attach(bld->pc->current_block, bl, CFG_EDGE_LOOP_ENTER);
1648
1649 bld_new_block(bld, bld->loop_bb[bld->loop_lvl++]);
1650
1651 if (bld->loop_lvl == bld->pc->loop_nesting_bound)
1652 bld->pc->loop_nesting_bound++;
1653
1654 bld_clear_def_use(&bld->tvs[0][0], BLD_MAX_TEMPS, bld->loop_lvl);
1655 bld_clear_def_use(&bld->avs[0][0], BLD_MAX_ADDRS, bld->loop_lvl);
1656 bld_clear_def_use(&bld->pvs[0][0], BLD_MAX_PREDS, bld->loop_lvl);
1657 }
1658 break;
1659 case TGSI_OPCODE_BRK:
1660 {
1661 struct nv_basic_block *bb = bld->brkt_bb[bld->loop_lvl - 1];
1662
1663 bld_flow(bld, NV_OP_BRA, NULL, bb, FALSE);
1664
1665 if (bld->out_kind == CFG_EDGE_FORWARD) /* else we already had BRK/CONT */
1666 nvc0_bblock_attach(bld->pc->current_block, bb, CFG_EDGE_LOOP_LEAVE);
1667
1668 bld->out_kind = CFG_EDGE_FAKE;
1669 }
1670 break;
1671 case TGSI_OPCODE_CONT:
1672 {
1673 struct nv_basic_block *bb = bld->loop_bb[bld->loop_lvl - 1];
1674
1675 bld_flow(bld, NV_OP_BRA, NULL, bb, FALSE);
1676
1677 nvc0_bblock_attach(bld->pc->current_block, bb, CFG_EDGE_BACK);
1678
1679 if ((bb = bld->join_bb[bld->cond_lvl - 1])) {
1680 bld->join_bb[bld->cond_lvl - 1] = NULL;
1681 nvc0_insn_delete(bb->exit->prev);
1682 }
1683 bld->out_kind = CFG_EDGE_FAKE;
1684 }
1685 break;
1686 case TGSI_OPCODE_ENDLOOP:
1687 {
1688 struct nv_basic_block *bb = bld->loop_bb[bld->loop_lvl - 1];
1689
1690 bld_flow(bld, NV_OP_BRA, NULL, bb, FALSE);
1691
1692 nvc0_bblock_attach(bld->pc->current_block, bb, CFG_EDGE_BACK);
1693
1694 bld_loop_end(bld, bb); /* replace loop-side operand of the phis */
1695
1696 bld_new_block(bld, bld->brkt_bb[--bld->loop_lvl]);
1697 }
1698 break;
1699 case TGSI_OPCODE_ABS:
1700 case TGSI_OPCODE_CEIL:
1701 case TGSI_OPCODE_FLR:
1702 case TGSI_OPCODE_TRUNC:
1703 case TGSI_OPCODE_DDX:
1704 case TGSI_OPCODE_DDY:
1705 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1706 src0 = emit_fetch(bld, insn, 0, c);
1707 dst0[c] = bld_insn_1(bld, opcode, src0);
1708 }
1709 break;
1710 case TGSI_OPCODE_LIT:
1711 bld_lit(bld, dst0, insn);
1712 break;
1713 case TGSI_OPCODE_LRP:
1714 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1715 src0 = emit_fetch(bld, insn, 0, c);
1716 src1 = emit_fetch(bld, insn, 1, c);
1717 src2 = emit_fetch(bld, insn, 2, c);
1718 dst0[c] = bld_insn_2(bld, NV_OP_SUB_F32, src1, src2);
1719 dst0[c] = bld_insn_3(bld, NV_OP_MAD_F32, dst0[c], src0, src2);
1720 }
1721 break;
1722 case TGSI_OPCODE_MOV:
1723 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1724 dst0[c] = emit_fetch(bld, insn, 0, c);
1725 break;
1726 case TGSI_OPCODE_MAD:
1727 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1728 src0 = emit_fetch(bld, insn, 0, c);
1729 src1 = emit_fetch(bld, insn, 1, c);
1730 src2 = emit_fetch(bld, insn, 2, c);
1731 dst0[c] = bld_insn_3(bld, opcode, src0, src1, src2);
1732 }
1733 break;
1734 case TGSI_OPCODE_POW:
1735 src0 = emit_fetch(bld, insn, 0, 0);
1736 src1 = emit_fetch(bld, insn, 1, 0);
1737 temp = bld_pow(bld, src0, src1);
1738 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1739 dst0[c] = temp;
1740 break;
1741 case TGSI_OPCODE_LOG:
1742 src0 = emit_fetch(bld, insn, 0, 0);
1743 src0 = bld_insn_1(bld, NV_OP_ABS_F32, src0);
1744 temp = bld_insn_1(bld, NV_OP_LG2, src0);
1745 dst0[2] = temp;
1746 if (insn->Dst[0].Register.WriteMask & 3) {
1747 temp = bld_insn_1(bld, NV_OP_FLOOR, temp);
1748 dst0[0] = temp;
1749 }
1750 if (insn->Dst[0].Register.WriteMask & 2) {
1751 temp = bld_insn_1(bld, NV_OP_PREEX2, temp);
1752 temp = bld_insn_1(bld, NV_OP_EX2, temp);
1753 temp = bld_insn_1(bld, NV_OP_RCP, temp);
1754 dst0[1] = bld_insn_2(bld, NV_OP_MUL_F32, src0, temp);
1755 }
1756 if (insn->Dst[0].Register.WriteMask & 8)
1757 dst0[3] = bld_imm_f32(bld, 1.0f);
1758 break;
1759 case TGSI_OPCODE_RCP:
1760 case TGSI_OPCODE_LG2:
1761 src0 = emit_fetch(bld, insn, 0, 0);
1762 temp = bld_insn_1(bld, opcode, src0);
1763 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1764 dst0[c] = temp;
1765 break;
1766 case TGSI_OPCODE_RSQ:
1767 src0 = emit_fetch(bld, insn, 0, 0);
1768 temp = bld_insn_1(bld, NV_OP_ABS_F32, src0);
1769 temp = bld_insn_1(bld, NV_OP_RSQ, temp);
1770 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1771 dst0[c] = temp;
1772 break;
1773 case TGSI_OPCODE_SLT:
1774 case TGSI_OPCODE_SGE:
1775 case TGSI_OPCODE_SEQ:
1776 case TGSI_OPCODE_SGT:
1777 case TGSI_OPCODE_SLE:
1778 case TGSI_OPCODE_SNE:
1779 case TGSI_OPCODE_ISLT:
1780 case TGSI_OPCODE_ISGE:
1781 case TGSI_OPCODE_USEQ:
1782 case TGSI_OPCODE_USGE:
1783 case TGSI_OPCODE_USLT:
1784 case TGSI_OPCODE_USNE:
1785 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1786 src0 = emit_fetch(bld, insn, 0, c);
1787 src1 = emit_fetch(bld, insn, 1, c);
1788 dst0[c] = bld_insn_2(bld, opcode, src0, src1);
1789 dst0[c]->insn->set_cond = translate_setcc(insn->Instruction.Opcode);
1790 }
1791 break;
1792 case TGSI_OPCODE_SCS:
1793 if (insn->Dst[0].Register.WriteMask & 0x3) {
1794 src0 = emit_fetch(bld, insn, 0, 0);
1795 temp = bld_insn_1(bld, NV_OP_PRESIN, src0);
1796 if (insn->Dst[0].Register.WriteMask & 0x1)
1797 dst0[0] = bld_insn_1(bld, NV_OP_COS, temp);
1798 if (insn->Dst[0].Register.WriteMask & 0x2)
1799 dst0[1] = bld_insn_1(bld, NV_OP_SIN, temp);
1800 }
1801 if (insn->Dst[0].Register.WriteMask & 0x4)
1802 dst0[2] = bld_imm_f32(bld, 0.0f);
1803 if (insn->Dst[0].Register.WriteMask & 0x8)
1804 dst0[3] = bld_imm_f32(bld, 1.0f);
1805 break;
1806 case TGSI_OPCODE_SSG:
1807 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) { /* XXX: set lt, set gt, sub */
1808 src0 = emit_fetch(bld, insn, 0, c);
1809 src1 = bld_setp(bld, NV_OP_SET_F32, NV_CC_EQ, src0, bld->zero);
1810 temp = bld_insn_2(bld, NV_OP_AND, src0, bld_imm_u32(bld, 0x80000000));
1811 temp = bld_insn_2(bld, NV_OP_OR, temp, bld_imm_f32(bld, 1.0f));
1812 dst0[c] = bld_insn_1(bld, NV_OP_MOV, temp);
1813 bld_src_predicate(bld, dst0[c]->insn, 1, src1);
1814 }
1815 break;
1816 case TGSI_OPCODE_SUB:
1817 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1818 src0 = emit_fetch(bld, insn, 0, c);
1819 src1 = emit_fetch(bld, insn, 1, c);
1820 dst0[c] = bld_insn_2(bld, NV_OP_SUB_F32, src0, src1);
1821 }
1822 break;
1823 case TGSI_OPCODE_TEX:
1824 case TGSI_OPCODE_TXB:
1825 case TGSI_OPCODE_TXL:
1826 case TGSI_OPCODE_TXP:
1827 bld_tex(bld, dst0, insn);
1828 break;
1829 case TGSI_OPCODE_XPD:
1830 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1831 if (c == 3) {
1832 dst0[3] = bld_imm_f32(bld, 1.0f);
1833 break;
1834 }
1835 src0 = emit_fetch(bld, insn, 1, (c + 1) % 3);
1836 src1 = emit_fetch(bld, insn, 0, (c + 2) % 3);
1837 dst0[c] = bld_insn_2(bld, NV_OP_MUL_F32, src0, src1);
1838
1839 src0 = emit_fetch(bld, insn, 0, (c + 1) % 3);
1840 src1 = emit_fetch(bld, insn, 1, (c + 2) % 3);
1841 dst0[c] = bld_insn_3(bld, NV_OP_MAD_F32, src0, src1, dst0[c]);
1842
1843 dst0[c]->insn->src[2]->mod ^= NV_MOD_NEG;
1844 }
1845 break;
1846 case TGSI_OPCODE_RET:
1847 (new_instruction(bld->pc, NV_OP_RET))->fixed = 1;
1848 break;
1849 case TGSI_OPCODE_END:
1850 /* VP outputs are exported in-place as scalars, optimization later */
1851 if (bld->pc->is_fragprog)
1852 bld_export_fp_outputs(bld);
1853 if (bld->ti->append_ucp)
1854 bld_append_vp_ucp(bld);
1855 return;
1856 default:
1857 NOUVEAU_ERR("unhandled opcode %u\n", insn->Instruction.Opcode);
1858 abort();
1859 return;
1860 }
1861
1862 if (insn->Dst[0].Register.File == TGSI_FILE_OUTPUT &&
1863 !bld->pc->is_fragprog) {
1864 struct nv_instruction *mi = NULL;
1865 uint size;
1866
1867 if (bld->ti->append_ucp) {
1868 if (bld->ti->output_loc[insn->Dst[0].Register.Index][0] == 0x70) {
1869 bld->hpos_index = insn->Dst[0].Register.Index;
1870 for (c = 0; c < 4; ++c)
1871 if (mask & (1 << c))
1872 STORE_OUTP(insn->Dst[0].Register.Index, c, dst0[c]);
1873 }
1874 }
1875
1876 for (c = 0; c < 4; ++c)
1877 if (mask & (1 << c))
1878 if ((dst0[c]->reg.file == NV_FILE_IMM) ||
1879 (dst0[c]->reg.file == NV_FILE_GPR && dst0[c]->reg.id == 63))
1880 dst0[c] = bld_insn_1(bld, NV_OP_MOV, dst0[c]);
1881
1882 c = 0;
1883 if ((mask & 0x3) == 0x3) {
1884 mask &= ~0x3;
1885 size = 8;
1886 mi = bld_insn_2(bld, NV_OP_BIND, dst0[0], dst0[1])->insn;
1887 }
1888 if ((mask & 0xc) == 0xc) {
1889 mask &= ~0xc;
1890 if (mi) {
1891 size = 16;
1892 nv_reference(bld->pc, mi, 2, dst0[2]);
1893 nv_reference(bld->pc, mi, 3, dst0[3]);
1894 } else {
1895 c = 2;
1896 size = 8;
1897 mi = bld_insn_2(bld, NV_OP_BIND, dst0[2], dst0[3])->insn;
1898 }
1899 } else
1900 if (mi && (mask & 0x4)) {
1901 size = 12;
1902 mask &= ~0x4;
1903 nv_reference(bld->pc, mi, 2, dst0[2]);
1904 }
1905
1906 if (mi) {
1907 struct nv_instruction *ex = new_instruction(bld->pc, NV_OP_EXPORT);
1908 int s;
1909
1910 nv_reference(bld->pc, ex, 0, new_value(bld->pc, NV_FILE_MEM_V, 4));
1911 nv_reference(bld->pc, ex, 1, mi->def[0]);
1912
1913 for (s = 1; s < size / 4; ++s) {
1914 bld_def(mi, s, new_value(bld->pc, NV_FILE_GPR, 4));
1915 nv_reference(bld->pc, ex, s + 1, mi->def[s]);
1916 }
1917
1918 ex->fixed = 1;
1919 ex->src[0]->value->reg.size = size;
1920 ex->src[0]->value->reg.address =
1921 bld->ti->output_loc[insn->Dst[0].Register.Index][c];
1922 }
1923 }
1924
1925 for (c = 0; c < 4; ++c)
1926 if (mask & (1 << c))
1927 emit_store(bld, insn, c, dst0[c]);
1928 }
1929
1930 static INLINE void
1931 bld_free_registers(struct bld_register *base, int n)
1932 {
1933 int i, c;
1934
1935 for (i = 0; i < n; ++i)
1936 for (c = 0; c < 4; ++c)
1937 util_dynarray_fini(&base[i * 4 + c].vals);
1938 }
1939
1940 int
1941 nvc0_tgsi_to_nc(struct nv_pc *pc, struct nvc0_translation_info *ti)
1942 {
1943 struct bld_context *bld = CALLOC_STRUCT(bld_context);
1944 unsigned ip;
1945
1946 pc->root[0] = pc->current_block = new_basic_block(pc);
1947
1948 bld->pc = pc;
1949 bld->ti = ti;
1950
1951 pc->loop_nesting_bound = 1;
1952
1953 bld->zero = new_value(pc, NV_FILE_GPR, 4);
1954 bld->zero->reg.id = 63;
1955
1956 if (pc->is_fragprog) {
1957 struct nv_value *mem = new_value(pc, NV_FILE_MEM_V, 4);
1958 mem->reg.address = 0x7c;
1959
1960 bld->frag_coord[3] = bld_insn_1(bld, NV_OP_LINTERP, mem);
1961 bld->frag_coord[3] = bld_insn_1(bld, NV_OP_RCP, bld->frag_coord[3]);
1962 }
1963
1964 for (ip = 0; ip < ti->num_insns; ++ip)
1965 bld_instruction(bld, &ti->insns[ip]);
1966
1967 bld_free_registers(&bld->tvs[0][0], BLD_MAX_TEMPS);
1968 bld_free_registers(&bld->avs[0][0], BLD_MAX_ADDRS);
1969 bld_free_registers(&bld->pvs[0][0], BLD_MAX_PREDS);
1970 bld_free_registers(&bld->ovs[0][0], PIPE_MAX_SHADER_OUTPUTS);
1971
1972 FREE(bld);
1973 return 0;
1974 }
1975
1976 /* If a variable is assigned in a loop, replace all references to the value
1977 * from outside the loop with a phi value.
1978 */
1979 static void
1980 bld_replace_value(struct nv_pc *pc, struct nv_basic_block *b,
1981 struct nv_value *old_val,
1982 struct nv_value *new_val)
1983 {
1984 struct nv_instruction *nvi;
1985
1986 for (nvi = b->phi ? b->phi : b->entry; nvi; nvi = nvi->next) {
1987 int s;
1988 for (s = 0; s < 6 && nvi->src[s]; ++s)
1989 if (nvi->src[s]->value == old_val)
1990 nv_reference(pc, nvi, s, new_val);
1991 }
1992
1993 b->pass_seq = pc->pass_seq;
1994
1995 if (b->out[0] && b->out[0]->pass_seq < pc->pass_seq)
1996 bld_replace_value(pc, b->out[0], old_val, new_val);
1997
1998 if (b->out[1] && b->out[1]->pass_seq < pc->pass_seq)
1999 bld_replace_value(pc, b->out[1], old_val, new_val);
2000 }