nv50: Silence uninitialized variable warning.
[mesa.git] / src / gallium / drivers / nv50 / nv50_tgsi_to_nc.c
1 /*
2 * Copyright 2010 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
21 */
22
23 /* #define NV50_TGSI2NC_DEBUG */
24
25 #include <unistd.h>
26
27 #include "nv50_context.h"
28 #include "nv50_pc.h"
29
30 #include "pipe/p_shader_tokens.h"
31 #include "tgsi/tgsi_parse.h"
32 #include "tgsi/tgsi_util.h"
33
34 #include "tgsi/tgsi_dump.h"
35
36 #define BLD_MAX_TEMPS 64
37 #define BLD_MAX_ADDRS 4
38 #define BLD_MAX_PREDS 4
39 #define BLD_MAX_IMMDS 128
40
41 #define BLD_MAX_COND_NESTING 8
42 #define BLD_MAX_LOOP_NESTING 4
43 #define BLD_MAX_CALL_NESTING 2
44
45 /* collects all values assigned to the same TGSI register */
46 struct bld_value_stack {
47 struct nv_value *top;
48 struct nv_value **body;
49 unsigned size;
50 uint16_t loop_use; /* 1 bit per loop level, indicates if used/defd */
51 uint16_t loop_def;
52 };
53
54 static INLINE void
55 bld_vals_push_val(struct bld_value_stack *stk, struct nv_value *val)
56 {
57 assert(!stk->size || (stk->body[stk->size - 1] != val));
58
59 if (!(stk->size % 8)) {
60 unsigned old_sz = (stk->size + 0) * sizeof(struct nv_value *);
61 unsigned new_sz = (stk->size + 8) * sizeof(struct nv_value *);
62 stk->body = (struct nv_value **)REALLOC(stk->body, old_sz, new_sz);
63 }
64 stk->body[stk->size++] = val;
65 }
66
67 static INLINE boolean
68 bld_vals_del_val(struct bld_value_stack *stk, struct nv_value *val)
69 {
70 unsigned i;
71
72 for (i = stk->size; i > 0; --i)
73 if (stk->body[i - 1] == val)
74 break;
75 if (!i)
76 return FALSE;
77
78 if (i != stk->size)
79 stk->body[i - 1] = stk->body[stk->size - 1];
80
81 --stk->size; /* XXX: old size in REALLOC */
82 return TRUE;
83 }
84
85 static INLINE void
86 bld_vals_push(struct bld_value_stack *stk)
87 {
88 bld_vals_push_val(stk, stk->top);
89 stk->top = NULL;
90 }
91
92 static INLINE void
93 bld_push_values(struct bld_value_stack *stacks, int n)
94 {
95 int i, c;
96
97 for (i = 0; i < n; ++i)
98 for (c = 0; c < 4; ++c)
99 if (stacks[i * 4 + c].top)
100 bld_vals_push(&stacks[i * 4 + c]);
101 }
102
103 struct bld_context {
104 struct nv50_translation_info *ti;
105
106 struct nv_pc *pc;
107 struct nv_basic_block *b;
108
109 struct tgsi_parse_context parse[BLD_MAX_CALL_NESTING];
110 int call_lvl;
111
112 struct nv_basic_block *cond_bb[BLD_MAX_COND_NESTING];
113 struct nv_basic_block *join_bb[BLD_MAX_COND_NESTING];
114 struct nv_basic_block *else_bb[BLD_MAX_COND_NESTING];
115 int cond_lvl;
116 struct nv_basic_block *loop_bb[BLD_MAX_LOOP_NESTING];
117 struct nv_basic_block *brkt_bb[BLD_MAX_LOOP_NESTING];
118 int loop_lvl;
119
120 ubyte out_kind; /* CFG_EDGE_FORWARD, or FAKE in case of BREAK/CONT */
121
122 struct bld_value_stack tvs[BLD_MAX_TEMPS][4]; /* TGSI_FILE_TEMPORARY */
123 struct bld_value_stack avs[BLD_MAX_ADDRS][4]; /* TGSI_FILE_ADDRESS */
124 struct bld_value_stack pvs[BLD_MAX_PREDS][4]; /* TGSI_FILE_PREDICATE */
125 struct bld_value_stack ovs[PIPE_MAX_SHADER_OUTPUTS][4];
126
127 uint32_t outputs_written[(PIPE_MAX_SHADER_OUTPUTS + 7) / 8];
128
129 struct nv_value *frgcrd[4];
130 struct nv_value *sysval[4];
131
132 /* wipe on new BB */
133 struct nv_value *saved_addr[4][2];
134 struct nv_value *saved_inputs[128];
135 struct nv_value *saved_immd[BLD_MAX_IMMDS];
136 uint num_immds;
137 };
138
139 static INLINE ubyte
140 bld_stack_file(struct bld_context *bld, struct bld_value_stack *stk)
141 {
142 if (stk < &bld->avs[0][0])
143 return NV_FILE_GPR;
144 else
145 if (stk < &bld->pvs[0][0])
146 return NV_FILE_ADDR;
147 else
148 if (stk < &bld->ovs[0][0])
149 return NV_FILE_FLAGS;
150 else
151 return NV_FILE_OUT;
152 }
153
154 static INLINE struct nv_value *
155 bld_fetch(struct bld_context *bld, struct bld_value_stack *stk, int i, int c)
156 {
157 stk[i * 4 + c].loop_use |= 1 << bld->loop_lvl;
158
159 return stk[i * 4 + c].top;
160 }
161
162 static struct nv_value *
163 bld_loop_phi(struct bld_context *, struct bld_value_stack *, struct nv_value *);
164
165 /* If a variable is defined in a loop without prior use, we don't need
166 * a phi in the loop header to account for backwards flow.
167 *
168 * However, if this variable is then also used outside the loop, we do
169 * need a phi after all. But we must not use this phi's def inside the
170 * loop, so we can eliminate the phi if it is unused later.
171 */
172 static INLINE void
173 bld_store(struct bld_context *bld, struct bld_value_stack *stk, int i, int c,
174 struct nv_value *val)
175 {
176 const uint16_t m = 1 << bld->loop_lvl;
177
178 stk = &stk[i * 4 + c];
179
180 if (bld->loop_lvl && !(m & (stk->loop_def | stk->loop_use)))
181 bld_loop_phi(bld, stk, val);
182
183 stk->top = val;
184 stk->loop_def |= 1 << bld->loop_lvl;
185 }
186
187 static INLINE void
188 bld_clear_def_use(struct bld_value_stack *stk, int n, int lvl)
189 {
190 int i;
191 const uint16_t mask = ~(1 << lvl);
192
193 for (i = 0; i < n * 4; ++i) {
194 stk[i].loop_def &= mask;
195 stk[i].loop_use &= mask;
196 }
197 }
198
199 #define FETCH_TEMP(i, c) bld_fetch(bld, &bld->tvs[0][0], i, c)
200 #define STORE_TEMP(i, c, v) bld_store(bld, &bld->tvs[0][0], i, c, (v))
201 #define FETCH_ADDR(i, c) bld_fetch(bld, &bld->avs[0][0], i, c)
202 #define STORE_ADDR(i, c, v) bld_store(bld, &bld->avs[0][0], i, c, (v))
203 #define FETCH_PRED(i, c) bld_fetch(bld, &bld->pvs[0][0], i, c)
204 #define STORE_PRED(i, c, v) bld_store(bld, &bld->pvs[0][0], i, c, (v))
205
206 #define STORE_OUTR(i, c, v) \
207 do { \
208 bld->ovs[i][c].top = (v); \
209 bld->outputs_written[(i) / 8] |= 1 << (((i) * 4 + (c)) % 32); \
210 } while (0)
211
212 static INLINE void
213 bld_warn_uninitialized(struct bld_context *bld, int kind,
214 struct bld_value_stack *stk, struct nv_basic_block *b)
215 {
216 #ifdef NV50_TGSI2NC_DEBUG
217 long i = (stk - &bld->tvs[0][0]) / 4;
218 long c = (stk - &bld->tvs[0][0]) & 3;
219
220 if (c == 3)
221 c = -1;
222
223 debug_printf("WARNING: TEMP[%li].%c %s used uninitialized in BB:%i\n",
224 i, (int)('x' + c), kind ? "may be" : "is", b->id);
225 #endif
226 }
227
228 static INLINE struct nv_value *
229 bld_def(struct nv_instruction *i, int c, struct nv_value *value)
230 {
231 i->def[c] = value;
232 value->insn = i;
233 return value;
234 }
235
236 static INLINE struct nv_value *
237 find_by_bb(struct bld_value_stack *stack, struct nv_basic_block *b)
238 {
239 int i;
240
241 if (stack->top && stack->top->insn->bb == b)
242 return stack->top;
243
244 for (i = stack->size - 1; i >= 0; --i)
245 if (stack->body[i]->insn->bb == b)
246 return stack->body[i];
247 return NULL;
248 }
249
250 /* fetch value from stack that was defined in the specified basic block,
251 * or search for first definitions in all of its predecessors
252 */
253 static void
254 fetch_by_bb(struct bld_value_stack *stack,
255 struct nv_value **vals, int *n,
256 struct nv_basic_block *b)
257 {
258 int i;
259 struct nv_value *val;
260
261 assert(*n < 16); /* MAX_COND_NESTING */
262
263 val = find_by_bb(stack, b);
264 if (val) {
265 for (i = 0; i < *n; ++i)
266 if (vals[i] == val)
267 return;
268 vals[(*n)++] = val;
269 return;
270 }
271 for (i = 0; i < b->num_in; ++i)
272 if (!IS_WALL_EDGE(b->in_kind[i]))
273 fetch_by_bb(stack, vals, n, b->in[i]);
274 }
275
276 static INLINE struct nv_value *
277 bld_load_imm_u32(struct bld_context *bld, uint32_t u);
278
279 static INLINE struct nv_value *
280 bld_undef(struct bld_context *bld, ubyte file)
281 {
282 struct nv_instruction *nvi = new_instruction(bld->pc, NV_OP_UNDEF);
283
284 return bld_def(nvi, 0, new_value(bld->pc, file, NV_TYPE_U32));
285 }
286
287 static struct nv_value *
288 bld_phi(struct bld_context *bld, struct nv_basic_block *b,
289 struct bld_value_stack *stack)
290 {
291 struct nv_basic_block *in;
292 struct nv_value *vals[16] = { 0 };
293 struct nv_value *val;
294 struct nv_instruction *phi;
295 int i, j, n;
296
297 do {
298 i = n = 0;
299 fetch_by_bb(stack, vals, &n, b);
300
301 if (!n) {
302 bld_warn_uninitialized(bld, 0, stack, b);
303 return NULL;
304 }
305
306 if (n == 1) {
307 if (nvbb_dominated_by(b, vals[0]->insn->bb))
308 break;
309
310 bld_warn_uninitialized(bld, 1, stack, b);
311
312 /* back-tracking to insert missing value of other path */
313 in = b;
314 while (in->in[0]) {
315 if (in->num_in == 1) {
316 in = in->in[0];
317 } else {
318 if (!nvbb_reachable_by(in->in[0], vals[0]->insn->bb, b))
319 in = in->in[0];
320 else
321 if (!nvbb_reachable_by(in->in[1], vals[0]->insn->bb, b))
322 in = in->in[1];
323 else
324 in = in->in[0];
325 }
326 }
327 bld->pc->current_block = in;
328
329 /* should make this a no-op */
330 bld_vals_push_val(stack, bld_undef(bld, vals[0]->reg.file));
331 continue;
332 }
333
334 for (i = 0; i < n; ++i) {
335 /* if value dominates b, continue to the redefinitions */
336 if (nvbb_dominated_by(b, vals[i]->insn->bb))
337 continue;
338
339 /* if value dominates any in-block, b should be the dom frontier */
340 for (j = 0; j < b->num_in; ++j)
341 if (nvbb_dominated_by(b->in[j], vals[i]->insn->bb))
342 break;
343 /* otherwise, find the dominance frontier and put the phi there */
344 if (j == b->num_in) {
345 in = nvbb_dom_frontier(vals[i]->insn->bb);
346 val = bld_phi(bld, in, stack);
347 bld_vals_push_val(stack, val);
348 break;
349 }
350 }
351 } while(i < n);
352
353 bld->pc->current_block = b;
354
355 if (n == 1)
356 return vals[0];
357
358 phi = new_instruction(bld->pc, NV_OP_PHI);
359
360 bld_def(phi, 0, new_value(bld->pc, vals[0]->reg.file, vals[0]->reg.type));
361 for (i = 0; i < n; ++i)
362 phi->src[i] = new_ref(bld->pc, vals[i]);
363
364 return phi->def[0];
365 }
366
367 /* Insert a phi function in the loop header.
368 * For nested loops, we need to insert phi functions in all the outer
369 * loop headers if they don't have one yet.
370 *
371 * @def: redefinition from inside loop, or NULL if to be replaced later
372 */
373 static struct nv_value *
374 bld_loop_phi(struct bld_context *bld, struct bld_value_stack *stack,
375 struct nv_value *def)
376 {
377 struct nv_instruction *phi;
378 struct nv_basic_block *bb = bld->pc->current_block;
379 struct nv_value *val = NULL;
380
381 if (bld->loop_lvl > 1) {
382 --bld->loop_lvl;
383 if (!((stack->loop_def | stack->loop_use) & (1 << bld->loop_lvl)))
384 val = bld_loop_phi(bld, stack, NULL);
385 ++bld->loop_lvl;
386 }
387
388 if (!val)
389 val = bld_phi(bld, bld->pc->current_block, stack); /* old definition */
390 if (!val) {
391 bld->pc->current_block = bld->loop_bb[bld->loop_lvl - 1]->in[0];
392 val = bld_undef(bld, bld_stack_file(bld, stack));
393 }
394
395 bld->pc->current_block = bld->loop_bb[bld->loop_lvl - 1];
396
397 phi = new_instruction(bld->pc, NV_OP_PHI);
398
399 bld_def(phi, 0, new_value_like(bld->pc, val));
400 if (!def)
401 def = phi->def[0];
402
403 bld_vals_push_val(stack, phi->def[0]);
404
405 phi->target = (struct nv_basic_block *)stack; /* cheat */
406
407 nv_reference(bld->pc, &phi->src[0], val);
408 nv_reference(bld->pc, &phi->src[1], def);
409
410 bld->pc->current_block = bb;
411
412 return phi->def[0];
413 }
414
415 static INLINE struct nv_value *
416 bld_fetch_global(struct bld_context *bld, struct bld_value_stack *stack)
417 {
418 const uint16_t m = 1 << bld->loop_lvl;
419 const uint16_t use = stack->loop_use;
420
421 stack->loop_use |= m;
422
423 /* If neither used nor def'd inside the loop, build a phi in foresight,
424 * so we don't have to replace stuff later on, which requires tracking.
425 */
426 if (bld->loop_lvl && !((use | stack->loop_def) & m))
427 return bld_loop_phi(bld, stack, NULL);
428
429 return bld_phi(bld, bld->pc->current_block, stack);
430 }
431
432 static INLINE struct nv_value *
433 bld_imm_u32(struct bld_context *bld, uint32_t u)
434 {
435 int i;
436 unsigned n = bld->num_immds;
437
438 for (i = 0; i < n; ++i)
439 if (bld->saved_immd[i]->reg.imm.u32 == u)
440 return bld->saved_immd[i];
441 assert(n < BLD_MAX_IMMDS);
442
443 bld->num_immds++;
444
445 bld->saved_immd[n] = new_value(bld->pc, NV_FILE_IMM, NV_TYPE_U32);
446 bld->saved_immd[n]->reg.imm.u32 = u;
447 return bld->saved_immd[n];
448 }
449
450 static void
451 bld_replace_value(struct nv_pc *, struct nv_basic_block *, struct nv_value *,
452 struct nv_value *);
453
454 /* Replace the source of the phi in the loop header by the last assignment,
455 * or eliminate the phi function if there is no assignment inside the loop.
456 *
457 * Redundancy situation 1 - (used) but (not redefined) value:
458 * %3 = phi %0, %3 = %3 is used
459 * %3 = phi %0, %4 = is new definition
460 *
461 * Redundancy situation 2 - (not used) but (redefined) value:
462 * %3 = phi %0, %2 = %2 is used, %3 could be used outside, deleted by DCE
463 */
464 static void
465 bld_loop_end(struct bld_context *bld, struct nv_basic_block *bb)
466 {
467 struct nv_basic_block *save = bld->pc->current_block;
468 struct nv_instruction *phi, *next;
469 struct nv_value *val;
470 struct bld_value_stack *stk;
471 int i, s, n;
472
473 for (phi = bb->phi; phi && phi->opcode == NV_OP_PHI; phi = next) {
474 next = phi->next;
475
476 stk = (struct bld_value_stack *)phi->target;
477 phi->target = NULL;
478
479 for (s = 1, n = 0; n < bb->num_in; ++n) {
480 if (bb->in_kind[n] != CFG_EDGE_BACK)
481 continue;
482
483 assert(s < 4);
484 bld->pc->current_block = bb->in[n];
485 val = bld_fetch_global(bld, stk);
486
487 for (i = 0; i < 4; ++i)
488 if (phi->src[i] && phi->src[i]->value == val)
489 break;
490 if (i == 4)
491 nv_reference(bld->pc, &phi->src[s++], val);
492 }
493 bld->pc->current_block = save;
494
495 if (phi->src[0]->value == phi->def[0] ||
496 phi->src[0]->value == phi->src[1]->value)
497 s = 1;
498 else
499 if (phi->src[1]->value == phi->def[0])
500 s = 0;
501 else
502 continue;
503
504 if (s >= 0) {
505 /* eliminate the phi */
506 bld_vals_del_val(stk, phi->def[0]);
507
508 ++bld->pc->pass_seq;
509 bld_replace_value(bld->pc, bb, phi->def[0], phi->src[s]->value);
510
511 nv_nvi_delete(phi);
512 }
513 }
514 }
515
516 static INLINE struct nv_value *
517 bld_imm_f32(struct bld_context *bld, float f)
518 {
519 return bld_imm_u32(bld, fui(f));
520 }
521
522 #define SET_TYPE(v, t) ((v)->reg.type = (v)->reg.as_type = (t))
523
524 static struct nv_value *
525 bld_insn_1(struct bld_context *bld, uint opcode, struct nv_value *src0)
526 {
527 struct nv_instruction *insn = new_instruction(bld->pc, opcode);
528
529 nv_reference(bld->pc, &insn->src[0], src0);
530
531 return bld_def(insn, 0, new_value(bld->pc, NV_FILE_GPR, src0->reg.as_type));
532 }
533
534 static struct nv_value *
535 bld_insn_2(struct bld_context *bld, uint opcode,
536 struct nv_value *src0, struct nv_value *src1)
537 {
538 struct nv_instruction *insn = new_instruction(bld->pc, opcode);
539
540 nv_reference(bld->pc, &insn->src[0], src0);
541 nv_reference(bld->pc, &insn->src[1], src1);
542
543 return bld_def(insn, 0, new_value(bld->pc, NV_FILE_GPR, src0->reg.as_type));
544 }
545
546 static struct nv_value *
547 bld_insn_3(struct bld_context *bld, uint opcode,
548 struct nv_value *src0, struct nv_value *src1,
549 struct nv_value *src2)
550 {
551 struct nv_instruction *insn = new_instruction(bld->pc, opcode);
552
553 nv_reference(bld->pc, &insn->src[0], src0);
554 nv_reference(bld->pc, &insn->src[1], src1);
555 nv_reference(bld->pc, &insn->src[2], src2);
556
557 return bld_def(insn, 0, new_value(bld->pc, NV_FILE_GPR, src0->reg.as_type));
558 }
559
560 static struct nv_value *
561 bld_duplicate_insn(struct bld_context *bld, struct nv_instruction *nvi)
562 {
563 struct nv_instruction *dupi = new_instruction(bld->pc, nvi->opcode);
564 int c;
565
566 if (nvi->def[0])
567 bld_def(dupi, 0, new_value_like(bld->pc, nvi->def[0]));
568
569 if (nvi->flags_def) {
570 dupi->flags_def = new_value_like(bld->pc, nvi->flags_def);
571 dupi->flags_def->insn = dupi;
572 }
573
574 for (c = 0; c < 5; ++c)
575 if (nvi->src[c])
576 nv_reference(bld->pc, &dupi->src[c], nvi->src[c]->value);
577 if (nvi->flags_src)
578 nv_reference(bld->pc, &dupi->flags_src, nvi->flags_src->value);
579
580 dupi->cc = nvi->cc;
581 dupi->saturate = nvi->saturate;
582 dupi->centroid = nvi->centroid;
583 dupi->flat = nvi->flat;
584
585 return dupi->def[0];
586 }
587
588 static void
589 bld_lmem_store(struct bld_context *bld, struct nv_value *ptr, int ofst,
590 struct nv_value *val)
591 {
592 struct nv_instruction *insn = new_instruction(bld->pc, NV_OP_STA);
593 struct nv_value *loc;
594
595 loc = new_value(bld->pc, NV_FILE_MEM_L, NV_TYPE_U32);
596
597 loc->reg.id = ofst * 4;
598
599 nv_reference(bld->pc, &insn->src[0], loc);
600 nv_reference(bld->pc, &insn->src[1], val);
601 nv_reference(bld->pc, &insn->src[4], ptr);
602 }
603
604 static struct nv_value *
605 bld_lmem_load(struct bld_context *bld, struct nv_value *ptr, int ofst)
606 {
607 struct nv_value *loc, *val;
608
609 loc = new_value(bld->pc, NV_FILE_MEM_L, NV_TYPE_U32);
610
611 loc->reg.id = ofst * 4;
612
613 val = bld_insn_1(bld, NV_OP_LDA, loc);
614
615 nv_reference(bld->pc, &val->insn->src[4], ptr);
616
617 return val;
618 }
619
620 #define BLD_INSN_1_EX(d, op, dt, s0, s0t) \
621 do { \
622 (d) = bld_insn_1(bld, (NV_OP_##op), (s0)); \
623 SET_TYPE(d, NV_TYPE_##dt); \
624 (d)->insn->src[0]->typecast = NV_TYPE_##s0t; \
625 } while(0)
626
627 #define BLD_INSN_2_EX(d, op, dt, s0, s0t, s1, s1t) \
628 do { \
629 (d) = bld_insn_2(bld, (NV_OP_##op), (s0), (s1)); \
630 SET_TYPE(d, NV_TYPE_##dt); \
631 (d)->insn->src[0]->typecast = NV_TYPE_##s0t; \
632 (d)->insn->src[1]->typecast = NV_TYPE_##s1t; \
633 } while(0)
634
635 static struct nv_value *
636 bld_pow(struct bld_context *bld, struct nv_value *x, struct nv_value *e)
637 {
638 struct nv_value *val;
639
640 BLD_INSN_1_EX(val, LG2, F32, x, F32);
641 BLD_INSN_2_EX(val, MUL, F32, e, F32, val, F32);
642 val = bld_insn_1(bld, NV_OP_PREEX2, val);
643 val = bld_insn_1(bld, NV_OP_EX2, val);
644
645 return val;
646 }
647
648 static INLINE struct nv_value *
649 bld_load_imm_f32(struct bld_context *bld, float f)
650 {
651 struct nv_value *imm = bld_insn_1(bld, NV_OP_MOV, bld_imm_f32(bld, f));
652
653 SET_TYPE(imm, NV_TYPE_F32);
654 return imm;
655 }
656
657 static INLINE struct nv_value *
658 bld_load_imm_u32(struct bld_context *bld, uint32_t u)
659 {
660 return bld_insn_1(bld, NV_OP_MOV, bld_imm_u32(bld, u));
661 }
662
663 static struct nv_value *
664 bld_get_address(struct bld_context *bld, int id, struct nv_value *indirect)
665 {
666 int i;
667 struct nv_instruction *nvi;
668 struct nv_value *val;
669
670 for (i = 0; i < 4; ++i) {
671 if (!bld->saved_addr[i][0])
672 break;
673 if (bld->saved_addr[i][1] == indirect) {
674 nvi = bld->saved_addr[i][0]->insn;
675 if (nvi->src[0]->value->reg.imm.u32 == id)
676 return bld->saved_addr[i][0];
677 }
678 }
679 i &= 3;
680
681 val = bld_imm_u32(bld, id);
682 if (indirect)
683 val = bld_insn_2(bld, NV_OP_ADD, indirect, val);
684 else
685 val = bld_insn_1(bld, NV_OP_MOV, val);
686
687 bld->saved_addr[i][0] = val;
688 bld->saved_addr[i][0]->reg.file = NV_FILE_ADDR;
689 bld->saved_addr[i][0]->reg.type = NV_TYPE_U16;
690 bld->saved_addr[i][1] = indirect;
691 return bld->saved_addr[i][0];
692 }
693
694
695 static struct nv_value *
696 bld_predicate(struct bld_context *bld, struct nv_value *src, boolean bool_only)
697 {
698 struct nv_instruction *s0i, *nvi = src->insn;
699
700 if (!nvi) {
701 nvi = bld_insn_1(bld,
702 (src->reg.file == NV_FILE_IMM) ? NV_OP_MOV : NV_OP_LDA,
703 src)->insn;
704 src = nvi->def[0];
705 } else
706 if (bool_only) {
707 while (nvi->opcode == NV_OP_ABS || nvi->opcode == NV_OP_NEG ||
708 nvi->opcode == NV_OP_CVT) {
709 s0i = nvi->src[0]->value->insn;
710 if (!s0i || !nv50_op_can_write_flags(s0i->opcode))
711 break;
712 nvi = s0i;
713 assert(!nvi->flags_src);
714 }
715 }
716
717 if (!nv50_op_can_write_flags(nvi->opcode) ||
718 nvi->bb != bld->pc->current_block) {
719 nvi = new_instruction(bld->pc, NV_OP_CVT);
720 nv_reference(bld->pc, &nvi->src[0], src);
721 }
722
723 if (!nvi->flags_def) {
724 nvi->flags_def = new_value(bld->pc, NV_FILE_FLAGS, NV_TYPE_U16);
725 nvi->flags_def->insn = nvi;
726 }
727 return nvi->flags_def;
728 }
729
730 static void
731 bld_kil(struct bld_context *bld, struct nv_value *src)
732 {
733 struct nv_instruction *nvi;
734
735 src = bld_predicate(bld, src, FALSE);
736 nvi = new_instruction(bld->pc, NV_OP_KIL);
737 nvi->fixed = 1;
738 nvi->flags_src = new_ref(bld->pc, src);
739 nvi->cc = NV_CC_LT;
740 }
741
742 static void
743 bld_flow(struct bld_context *bld, uint opcode, ubyte cc,
744 struct nv_value *src, struct nv_basic_block *target,
745 boolean plan_reconverge)
746 {
747 struct nv_instruction *nvi;
748
749 if (plan_reconverge)
750 new_instruction(bld->pc, NV_OP_JOINAT)->fixed = 1;
751
752 nvi = new_instruction(bld->pc, opcode);
753 nvi->is_terminator = 1;
754 nvi->cc = cc;
755 nvi->target = target;
756 if (src)
757 nvi->flags_src = new_ref(bld->pc, src);
758 }
759
760 static ubyte
761 translate_setcc(unsigned opcode)
762 {
763 switch (opcode) {
764 case TGSI_OPCODE_SLT: return NV_CC_LT;
765 case TGSI_OPCODE_SGE: return NV_CC_GE;
766 case TGSI_OPCODE_SEQ: return NV_CC_EQ;
767 case TGSI_OPCODE_SGT: return NV_CC_GT;
768 case TGSI_OPCODE_SLE: return NV_CC_LE;
769 case TGSI_OPCODE_SNE: return NV_CC_NE | NV_CC_U;
770 case TGSI_OPCODE_STR: return NV_CC_TR;
771 case TGSI_OPCODE_SFL: return NV_CC_FL;
772
773 case TGSI_OPCODE_ISLT: return NV_CC_LT;
774 case TGSI_OPCODE_ISGE: return NV_CC_GE;
775 case TGSI_OPCODE_USEQ: return NV_CC_EQ;
776 case TGSI_OPCODE_USGE: return NV_CC_GE;
777 case TGSI_OPCODE_USLT: return NV_CC_LT;
778 case TGSI_OPCODE_USNE: return NV_CC_NE;
779 default:
780 assert(0);
781 return NV_CC_FL;
782 }
783 }
784
785 static uint
786 translate_opcode(uint opcode)
787 {
788 switch (opcode) {
789 case TGSI_OPCODE_ABS: return NV_OP_ABS;
790 case TGSI_OPCODE_ADD:
791 case TGSI_OPCODE_SUB:
792 case TGSI_OPCODE_UADD: return NV_OP_ADD;
793 case TGSI_OPCODE_AND: return NV_OP_AND;
794 case TGSI_OPCODE_EX2: return NV_OP_EX2;
795 case TGSI_OPCODE_CEIL: return NV_OP_CEIL;
796 case TGSI_OPCODE_FLR: return NV_OP_FLOOR;
797 case TGSI_OPCODE_TRUNC: return NV_OP_TRUNC;
798 case TGSI_OPCODE_COS: return NV_OP_COS;
799 case TGSI_OPCODE_SIN: return NV_OP_SIN;
800 case TGSI_OPCODE_DDX: return NV_OP_DFDX;
801 case TGSI_OPCODE_DDY: return NV_OP_DFDY;
802 case TGSI_OPCODE_F2I:
803 case TGSI_OPCODE_F2U:
804 case TGSI_OPCODE_I2F:
805 case TGSI_OPCODE_U2F: return NV_OP_CVT;
806 case TGSI_OPCODE_INEG: return NV_OP_NEG;
807 case TGSI_OPCODE_LG2: return NV_OP_LG2;
808 case TGSI_OPCODE_ISHR:
809 case TGSI_OPCODE_USHR: return NV_OP_SHR;
810 case TGSI_OPCODE_MAD:
811 case TGSI_OPCODE_UMAD: return NV_OP_MAD;
812 case TGSI_OPCODE_MAX:
813 case TGSI_OPCODE_IMAX:
814 case TGSI_OPCODE_UMAX: return NV_OP_MAX;
815 case TGSI_OPCODE_MIN:
816 case TGSI_OPCODE_IMIN:
817 case TGSI_OPCODE_UMIN: return NV_OP_MIN;
818 case TGSI_OPCODE_MUL:
819 case TGSI_OPCODE_UMUL: return NV_OP_MUL;
820 case TGSI_OPCODE_OR: return NV_OP_OR;
821 case TGSI_OPCODE_RCP: return NV_OP_RCP;
822 case TGSI_OPCODE_RSQ: return NV_OP_RSQ;
823 case TGSI_OPCODE_SAD: return NV_OP_SAD;
824 case TGSI_OPCODE_SHL: return NV_OP_SHL;
825 case TGSI_OPCODE_SLT:
826 case TGSI_OPCODE_SGE:
827 case TGSI_OPCODE_SEQ:
828 case TGSI_OPCODE_SGT:
829 case TGSI_OPCODE_SLE:
830 case TGSI_OPCODE_SNE:
831 case TGSI_OPCODE_ISLT:
832 case TGSI_OPCODE_ISGE:
833 case TGSI_OPCODE_USEQ:
834 case TGSI_OPCODE_USGE:
835 case TGSI_OPCODE_USLT:
836 case TGSI_OPCODE_USNE: return NV_OP_SET;
837 case TGSI_OPCODE_TEX: return NV_OP_TEX;
838 case TGSI_OPCODE_TXP: return NV_OP_TEX;
839 case TGSI_OPCODE_TXB: return NV_OP_TXB;
840 case TGSI_OPCODE_TXL: return NV_OP_TXL;
841 case TGSI_OPCODE_XOR: return NV_OP_XOR;
842 default:
843 return NV_OP_NOP;
844 }
845 }
846
847 static ubyte
848 infer_src_type(unsigned opcode)
849 {
850 switch (opcode) {
851 case TGSI_OPCODE_MOV:
852 case TGSI_OPCODE_AND:
853 case TGSI_OPCODE_OR:
854 case TGSI_OPCODE_XOR:
855 case TGSI_OPCODE_SAD:
856 case TGSI_OPCODE_U2F:
857 case TGSI_OPCODE_UADD:
858 case TGSI_OPCODE_UDIV:
859 case TGSI_OPCODE_UMOD:
860 case TGSI_OPCODE_UMAD:
861 case TGSI_OPCODE_UMUL:
862 case TGSI_OPCODE_UMAX:
863 case TGSI_OPCODE_UMIN:
864 case TGSI_OPCODE_USEQ:
865 case TGSI_OPCODE_USGE:
866 case TGSI_OPCODE_USLT:
867 case TGSI_OPCODE_USNE:
868 case TGSI_OPCODE_USHR:
869 return NV_TYPE_U32;
870 case TGSI_OPCODE_I2F:
871 case TGSI_OPCODE_IDIV:
872 case TGSI_OPCODE_IMAX:
873 case TGSI_OPCODE_IMIN:
874 case TGSI_OPCODE_INEG:
875 case TGSI_OPCODE_ISGE:
876 case TGSI_OPCODE_ISHR:
877 case TGSI_OPCODE_ISLT:
878 return NV_TYPE_S32;
879 default:
880 return NV_TYPE_F32;
881 }
882 }
883
884 static ubyte
885 infer_dst_type(unsigned opcode)
886 {
887 switch (opcode) {
888 case TGSI_OPCODE_MOV:
889 case TGSI_OPCODE_F2U:
890 case TGSI_OPCODE_AND:
891 case TGSI_OPCODE_OR:
892 case TGSI_OPCODE_XOR:
893 case TGSI_OPCODE_SAD:
894 case TGSI_OPCODE_UADD:
895 case TGSI_OPCODE_UDIV:
896 case TGSI_OPCODE_UMOD:
897 case TGSI_OPCODE_UMAD:
898 case TGSI_OPCODE_UMUL:
899 case TGSI_OPCODE_UMAX:
900 case TGSI_OPCODE_UMIN:
901 case TGSI_OPCODE_USEQ:
902 case TGSI_OPCODE_USGE:
903 case TGSI_OPCODE_USLT:
904 case TGSI_OPCODE_USNE:
905 case TGSI_OPCODE_USHR:
906 return NV_TYPE_U32;
907 case TGSI_OPCODE_F2I:
908 case TGSI_OPCODE_IDIV:
909 case TGSI_OPCODE_IMAX:
910 case TGSI_OPCODE_IMIN:
911 case TGSI_OPCODE_INEG:
912 case TGSI_OPCODE_ISGE:
913 case TGSI_OPCODE_ISHR:
914 case TGSI_OPCODE_ISLT:
915 return NV_TYPE_S32;
916 default:
917 return NV_TYPE_F32;
918 }
919 }
920
921 static void
922 emit_store(struct bld_context *bld, const struct tgsi_full_instruction *inst,
923 unsigned chan, struct nv_value *value)
924 {
925 struct nv_value *ptr;
926 const struct tgsi_full_dst_register *reg = &inst->Dst[0];
927
928 if (reg->Register.Indirect) {
929 ptr = FETCH_ADDR(reg->Indirect.Index,
930 tgsi_util_get_src_register_swizzle(&reg->Indirect, 0));
931 } else {
932 ptr = NULL;
933 }
934
935 assert(chan < 4);
936
937 if (inst->Instruction.Opcode != TGSI_OPCODE_MOV)
938 value->reg.type = infer_dst_type(inst->Instruction.Opcode);
939
940 switch (inst->Instruction.Saturate) {
941 case TGSI_SAT_NONE:
942 break;
943 case TGSI_SAT_ZERO_ONE:
944 BLD_INSN_1_EX(value, SAT, F32, value, F32);
945 break;
946 case TGSI_SAT_MINUS_PLUS_ONE:
947 value->reg.as_type = NV_TYPE_F32;
948 value = bld_insn_2(bld, NV_OP_MAX, value, bld_load_imm_f32(bld, -1.0f));
949 value = bld_insn_2(bld, NV_OP_MIN, value, bld_load_imm_f32(bld, 1.0f));
950 break;
951 }
952
953 switch (reg->Register.File) {
954 case TGSI_FILE_OUTPUT:
955 if (!value->insn && (bld->ti->output_file == NV_FILE_OUT))
956 value = bld_insn_1(bld, NV_OP_MOV, value);
957 value = bld_insn_1(bld, NV_OP_MOV, value);
958 value->reg.file = bld->ti->output_file;
959
960 if (bld->ti->p->type == PIPE_SHADER_FRAGMENT) {
961 STORE_OUTR(reg->Register.Index, chan, value);
962 } else {
963 value->insn->fixed = 1;
964 value->reg.id = bld->ti->output_map[reg->Register.Index][chan];
965 }
966 break;
967 case TGSI_FILE_TEMPORARY:
968 assert(reg->Register.Index < BLD_MAX_TEMPS);
969 if (!value->insn || (value->insn->bb != bld->pc->current_block))
970 value = bld_insn_1(bld, NV_OP_MOV, value);
971 value->reg.file = NV_FILE_GPR;
972
973 if (bld->ti->store_to_memory)
974 bld_lmem_store(bld, ptr, reg->Register.Index * 4 + chan, value);
975 else
976 STORE_TEMP(reg->Register.Index, chan, value);
977 break;
978 case TGSI_FILE_ADDRESS:
979 assert(reg->Register.Index < BLD_MAX_ADDRS);
980 value->reg.file = NV_FILE_ADDR;
981 value->reg.type = NV_TYPE_U16;
982 STORE_ADDR(reg->Register.Index, chan, value);
983 break;
984 }
985 }
986
987 static INLINE uint32_t
988 bld_is_output_written(struct bld_context *bld, int i, int c)
989 {
990 if (c < 0)
991 return bld->outputs_written[i / 8] & (0xf << ((i * 4) % 32));
992 return bld->outputs_written[i / 8] & (1 << ((i * 4 + c) % 32));
993 }
994
995 static void
996 bld_export_outputs(struct bld_context *bld)
997 {
998 struct nv_value *vals[4];
999 struct nv_instruction *nvi;
1000 int i, c, n;
1001
1002 bld_push_values(&bld->ovs[0][0], PIPE_MAX_SHADER_OUTPUTS);
1003
1004 for (i = 0; i < PIPE_MAX_SHADER_OUTPUTS; ++i) {
1005 if (!bld_is_output_written(bld, i, -1))
1006 continue;
1007 for (n = 0, c = 0; c < 4; ++c) {
1008 if (!bld_is_output_written(bld, i, c))
1009 continue;
1010 vals[n] = bld_fetch_global(bld, &bld->ovs[i][c]);
1011 assert(vals[n]);
1012 vals[n] = bld_insn_1(bld, NV_OP_MOV, vals[n]);
1013 vals[n++]->reg.id = bld->ti->output_map[i][c];
1014 }
1015 assert(n);
1016
1017 (nvi = new_instruction(bld->pc, NV_OP_EXPORT))->fixed = 1;
1018
1019 for (c = 0; c < n; ++c)
1020 nvi->src[c] = new_ref(bld->pc, vals[c]);
1021 }
1022 }
1023
1024 static void
1025 bld_new_block(struct bld_context *bld, struct nv_basic_block *b)
1026 {
1027 int i;
1028
1029 bld_push_values(&bld->tvs[0][0], BLD_MAX_TEMPS);
1030 bld_push_values(&bld->avs[0][0], BLD_MAX_ADDRS);
1031 bld_push_values(&bld->pvs[0][0], BLD_MAX_PREDS);
1032 bld_push_values(&bld->ovs[0][0], PIPE_MAX_SHADER_OUTPUTS);
1033
1034 bld->pc->current_block = b;
1035
1036 for (i = 0; i < 4; ++i)
1037 bld->saved_addr[i][0] = NULL;
1038
1039 for (i = 0; i < 128; ++i)
1040 bld->saved_inputs[i] = NULL;
1041
1042 bld->out_kind = CFG_EDGE_FORWARD;
1043 }
1044
1045 static struct nv_value *
1046 bld_saved_input(struct bld_context *bld, unsigned i, unsigned c)
1047 {
1048 unsigned idx = bld->ti->input_map[i][c];
1049
1050 if (bld->ti->p->type != PIPE_SHADER_FRAGMENT)
1051 return NULL;
1052 if (bld->saved_inputs[idx])
1053 return bld->saved_inputs[idx];
1054 return NULL;
1055 }
1056
1057 static struct nv_value *
1058 bld_interpolate(struct bld_context *bld, unsigned mode, struct nv_value *val)
1059 {
1060 if (val->reg.id == 255) {
1061 /* gl_FrontFacing: 0/~0 to -1.0/+1.0 */
1062 val = bld_insn_1(bld, NV_OP_LINTERP, val);
1063 val = bld_insn_2(bld, NV_OP_SHL, val, bld_imm_u32(bld, 31));
1064 val->insn->src[0]->typecast = NV_TYPE_U32;
1065 val = bld_insn_2(bld, NV_OP_XOR, val, bld_imm_f32(bld, -1.0f));
1066 val->insn->src[0]->typecast = NV_TYPE_U32;
1067 } else
1068 if (mode & (NV50_INTERP_LINEAR | NV50_INTERP_FLAT))
1069 val = bld_insn_1(bld, NV_OP_LINTERP, val);
1070 else
1071 val = bld_insn_2(bld, NV_OP_PINTERP, val, bld->frgcrd[3]);
1072
1073 val->insn->flat = (mode & NV50_INTERP_FLAT) ? 1 : 0;
1074 val->insn->centroid = (mode & NV50_INTERP_CENTROID) ? 1 : 0;
1075 return val;
1076 }
1077
1078 static struct nv_value *
1079 emit_fetch(struct bld_context *bld, const struct tgsi_full_instruction *insn,
1080 const unsigned s, const unsigned chan)
1081 {
1082 const struct tgsi_full_src_register *src = &insn->Src[s];
1083 struct nv_value *res;
1084 struct nv_value *ptr = NULL;
1085 unsigned idx, swz, dim_idx, ind_idx, ind_swz, sgn;
1086 ubyte type = infer_src_type(insn->Instruction.Opcode);
1087
1088 idx = src->Register.Index;
1089 swz = tgsi_util_get_full_src_register_swizzle(src, chan);
1090 dim_idx = -1;
1091 ind_idx = -1;
1092 ind_swz = 0;
1093
1094 if (src->Register.Indirect) {
1095 ind_idx = src->Indirect.Index;
1096 ind_swz = tgsi_util_get_src_register_swizzle(&src->Indirect, 0);
1097
1098 ptr = FETCH_ADDR(ind_idx, ind_swz);
1099 }
1100 if (idx >= (128 / 4) && src->Register.File == TGSI_FILE_CONSTANT)
1101 ptr = bld_get_address(bld, (idx * 16) & ~0x1ff, ptr);
1102
1103 switch (src->Register.File) {
1104 case TGSI_FILE_CONSTANT:
1105 dim_idx = src->Dimension.Index ? src->Dimension.Index + 2 : 1;
1106 assert(dim_idx < 14);
1107 assert(dim_idx == 1); /* for now */
1108
1109 res = new_value(bld->pc, NV_FILE_MEM_C(dim_idx), type);
1110 SET_TYPE(res, type);
1111 res->reg.id = (idx * 4 + swz) & 127;
1112 res = bld_insn_1(bld, NV_OP_LDA, res);
1113
1114 if (ptr)
1115 res->insn->src[4] = new_ref(bld->pc, ptr);
1116 break;
1117 case TGSI_FILE_IMMEDIATE:
1118 assert(idx < bld->ti->immd32_nr);
1119 res = bld_load_imm_u32(bld, bld->ti->immd32[idx * 4 + swz]);
1120
1121 switch (bld->ti->immd32_ty[idx]) {
1122 case TGSI_IMM_FLOAT32: SET_TYPE(res, NV_TYPE_F32); break;
1123 case TGSI_IMM_UINT32: SET_TYPE(res, NV_TYPE_U32); break;
1124 case TGSI_IMM_INT32: SET_TYPE(res, NV_TYPE_S32); break;
1125 default:
1126 SET_TYPE(res, type);
1127 break;
1128 }
1129 break;
1130 case TGSI_FILE_INPUT:
1131 res = bld_saved_input(bld, idx, swz);
1132 if (res && (insn->Instruction.Opcode != TGSI_OPCODE_TXP))
1133 return res;
1134
1135 res = new_value(bld->pc, bld->ti->input_file, type);
1136 res->reg.id = bld->ti->input_map[idx][swz];
1137
1138 if (res->reg.file == NV_FILE_MEM_V) {
1139 res = bld_interpolate(bld, bld->ti->interp_mode[idx], res);
1140 } else {
1141 assert(src->Dimension.Dimension == 0);
1142 res = bld_insn_1(bld, NV_OP_LDA, res);
1143 assert(res->reg.type == type);
1144 }
1145 bld->saved_inputs[bld->ti->input_map[idx][swz]] = res;
1146 break;
1147 case TGSI_FILE_TEMPORARY:
1148 if (bld->ti->store_to_memory)
1149 res = bld_lmem_load(bld, ptr, idx * 4 + swz);
1150 else
1151 res = bld_fetch_global(bld, &bld->tvs[idx][swz]);
1152 break;
1153 case TGSI_FILE_ADDRESS:
1154 res = bld_fetch_global(bld, &bld->avs[idx][swz]);
1155 break;
1156 case TGSI_FILE_PREDICATE:
1157 res = bld_fetch_global(bld, &bld->pvs[idx][swz]);
1158 break;
1159 default:
1160 NOUVEAU_ERR("illegal/unhandled src reg file: %d\n", src->Register.File);
1161 abort();
1162 break;
1163 }
1164 if (!res)
1165 return bld_undef(bld, NV_FILE_GPR);
1166
1167 sgn = tgsi_util_get_full_src_register_sign_mode(src, chan);
1168
1169 if (insn->Instruction.Opcode != TGSI_OPCODE_MOV)
1170 res->reg.as_type = type;
1171 else
1172 if (sgn != TGSI_UTIL_SIGN_KEEP) /* apparently "MOV A, -B" assumes float */
1173 res->reg.as_type = NV_TYPE_F32;
1174
1175 switch (sgn) {
1176 case TGSI_UTIL_SIGN_KEEP:
1177 break;
1178 case TGSI_UTIL_SIGN_CLEAR:
1179 res = bld_insn_1(bld, NV_OP_ABS, res);
1180 break;
1181 case TGSI_UTIL_SIGN_TOGGLE:
1182 res = bld_insn_1(bld, NV_OP_NEG, res);
1183 break;
1184 case TGSI_UTIL_SIGN_SET:
1185 res = bld_insn_1(bld, NV_OP_ABS, res);
1186 res = bld_insn_1(bld, NV_OP_NEG, res);
1187 break;
1188 default:
1189 NOUVEAU_ERR("illegal/unhandled src reg sign mode\n");
1190 abort();
1191 break;
1192 }
1193
1194 return res;
1195 }
1196
1197 static void
1198 bld_lit(struct bld_context *bld, struct nv_value *dst0[4],
1199 const struct tgsi_full_instruction *insn)
1200 {
1201 struct nv_value *val0, *zero;
1202 unsigned mask = insn->Dst[0].Register.WriteMask;
1203
1204 if (mask & ((1 << 0) | (1 << 3)))
1205 dst0[3] = dst0[0] = bld_load_imm_f32(bld, 1.0f);
1206
1207 if (mask & (3 << 1)) {
1208 zero = bld_load_imm_f32(bld, 0.0f);
1209 val0 = bld_insn_2(bld, NV_OP_MAX, emit_fetch(bld, insn, 0, 0), zero);
1210
1211 if (mask & (1 << 1))
1212 dst0[1] = val0;
1213 }
1214
1215 if (mask & (1 << 2)) {
1216 struct nv_value *val1, *val3, *src1, *src3;
1217 struct nv_value *pos128 = bld_load_imm_f32(bld, 127.999999f);
1218 struct nv_value *neg128 = bld_load_imm_f32(bld, -127.999999f);
1219
1220 src1 = emit_fetch(bld, insn, 0, 1);
1221 src3 = emit_fetch(bld, insn, 0, 3);
1222
1223 val0->insn->flags_def = new_value(bld->pc, NV_FILE_FLAGS, NV_TYPE_U16);
1224 val0->insn->flags_def->insn = val0->insn;
1225
1226 val1 = bld_insn_2(bld, NV_OP_MAX, src1, zero);
1227 val3 = bld_insn_2(bld, NV_OP_MAX, src3, neg128);
1228 val3 = bld_insn_2(bld, NV_OP_MIN, val3, pos128);
1229 val3 = bld_pow(bld, val1, val3);
1230
1231 dst0[2] = bld_insn_1(bld, NV_OP_MOV, zero);
1232 dst0[2]->insn->cc = NV_CC_LE;
1233 dst0[2]->insn->flags_src = new_ref(bld->pc, val0->insn->flags_def);
1234
1235 dst0[2] = bld_insn_2(bld, NV_OP_SELECT, val3, dst0[2]);
1236 }
1237 }
1238
1239 static INLINE void
1240 get_tex_dim(const struct tgsi_full_instruction *insn, int *dim, int *arg)
1241 {
1242 switch (insn->Texture.Texture) {
1243 case TGSI_TEXTURE_1D:
1244 *arg = *dim = 1;
1245 break;
1246 case TGSI_TEXTURE_SHADOW1D:
1247 *dim = 1;
1248 *arg = 2;
1249 break;
1250 case TGSI_TEXTURE_UNKNOWN:
1251 case TGSI_TEXTURE_2D:
1252 case TGSI_TEXTURE_RECT:
1253 *arg = *dim = 2;
1254 break;
1255 case TGSI_TEXTURE_SHADOW2D:
1256 case TGSI_TEXTURE_SHADOWRECT:
1257 *dim = 2;
1258 *arg = 3;
1259 break;
1260 case TGSI_TEXTURE_3D:
1261 case TGSI_TEXTURE_CUBE:
1262 *dim = *arg = 3;
1263 break;
1264 default:
1265 assert(0);
1266 break;
1267 }
1268 }
1269
1270 static void
1271 load_proj_tex_coords(struct bld_context *bld,
1272 struct nv_value *t[4], int dim, int arg,
1273 const struct tgsi_full_instruction *insn)
1274 {
1275 int c, mask;
1276
1277 mask = (1 << dim) - 1;
1278 if (arg != dim)
1279 mask |= 4; /* depth comparison value */
1280
1281 t[3] = emit_fetch(bld, insn, 0, 3);
1282
1283 if (t[3]->insn->opcode == NV_OP_PINTERP) {
1284 t[3] = bld_duplicate_insn(bld, t[3]->insn);
1285 t[3]->insn->opcode = NV_OP_LINTERP;
1286 nv_reference(bld->pc, &t[3]->insn->src[1], NULL);
1287 }
1288
1289 t[3] = bld_insn_1(bld, NV_OP_RCP, t[3]);
1290
1291 for (c = 0; c < 4; ++c) {
1292 if (!(mask & (1 << c)))
1293 continue;
1294 t[c] = emit_fetch(bld, insn, 0, c);
1295
1296 if (t[c]->insn->opcode != NV_OP_LINTERP &&
1297 t[c]->insn->opcode != NV_OP_PINTERP)
1298 continue;
1299 t[c] = bld_duplicate_insn(bld, t[c]->insn);
1300 t[c]->insn->opcode = NV_OP_PINTERP;
1301 nv_reference(bld->pc, &t[c]->insn->src[1], t[3]);
1302
1303 mask &= ~(1 << c);
1304 }
1305
1306 for (c = 0; mask; ++c, mask >>= 1) {
1307 if (!(mask & 1))
1308 continue;
1309 t[c] = bld_insn_2(bld, NV_OP_MUL, t[c], t[3]);
1310 }
1311 }
1312
1313 /* For a quad of threads / top left, top right, bottom left, bottom right
1314 * pixels, do a different operation, and take src0 from a specific thread.
1315 */
1316 #define QOP_ADD 0
1317 #define QOP_SUBR 1
1318 #define QOP_SUB 2
1319 #define QOP_MOV1 3
1320
1321 #define QOP(a, b, c, d) \
1322 ((QOP_##a << 0) | (QOP_##b << 2) | (QOP_##c << 4) | (QOP_##d << 6))
1323
1324 static INLINE struct nv_value *
1325 bld_quadop(struct bld_context *bld, ubyte qop, struct nv_value *src0, int lane,
1326 struct nv_value *src1, boolean wp)
1327 {
1328 struct nv_value *val = bld_insn_2(bld, NV_OP_QUADOP, src0, src1);
1329 val->insn->lanes = lane;
1330 val->insn->quadop = qop;
1331 if (wp) {
1332 val->insn->flags_def = new_value(bld->pc, NV_FILE_FLAGS, NV_TYPE_U16);
1333 val->insn->flags_def->insn = val->insn;
1334 }
1335 return val;
1336 }
1337
1338 static INLINE struct nv_value *
1339 bld_cmov(struct bld_context *bld,
1340 struct nv_value *src, ubyte cc, struct nv_value *cr)
1341 {
1342 src = bld_insn_1(bld, NV_OP_MOV, src);
1343
1344 src->insn->cc = cc;
1345 src->insn->flags_src = new_ref(bld->pc, cr);
1346
1347 return src;
1348 }
1349
1350 static struct nv_instruction *
1351 emit_tex(struct bld_context *bld, uint opcode,
1352 struct nv_value *dst[4], struct nv_value *t_in[4],
1353 int argc, int tic, int tsc, int cube)
1354 {
1355 struct nv_value *t[4];
1356 struct nv_instruction *nvi;
1357 int c;
1358
1359 /* the inputs to a tex instruction must be separate values */
1360 for (c = 0; c < argc; ++c) {
1361 t[c] = bld_insn_1(bld, NV_OP_MOV, t_in[c]);
1362 SET_TYPE(t[c], NV_TYPE_F32);
1363 t[c]->insn->fixed = 1;
1364 }
1365
1366 nvi = new_instruction(bld->pc, opcode);
1367
1368 for (c = 0; c < 4; ++c)
1369 dst[c] = bld_def(nvi, c, new_value(bld->pc, NV_FILE_GPR, NV_TYPE_F32));
1370
1371 for (c = 0; c < argc; ++c)
1372 nvi->src[c] = new_ref(bld->pc, t[c]);
1373
1374 nvi->tex_t = tic;
1375 nvi->tex_s = tsc;
1376 nvi->tex_mask = 0xf;
1377 nvi->tex_cube = cube;
1378 nvi->tex_live = 0;
1379 nvi->tex_argc = argc;
1380
1381 return nvi;
1382 }
1383
1384 static void
1385 bld_texlod_sequence(struct bld_context *bld,
1386 struct nv_value *dst[4], struct nv_value *t[4], int arg,
1387 int tic, int tsc, int cube)
1388 {
1389 emit_tex(bld, NV_OP_TXL, dst, t, arg, tic, tsc, cube); /* TODO */
1390 }
1391
1392
1393 /* The lanes of a quad are grouped by the bit in the condition register
1394 * they have set, which is selected by differing bias values.
1395 * Move the input values for TEX into a new register set for each group
1396 * and execute TEX only for a specific group.
1397 * We always need to use 4 new registers for the inputs/outputs because
1398 * the implicitly calculated derivatives must be correct.
1399 */
1400 static void
1401 bld_texbias_sequence(struct bld_context *bld,
1402 struct nv_value *dst[4], struct nv_value *t[4], int arg,
1403 int tic, int tsc, int cube)
1404 {
1405 struct nv_instruction *sel, *tex;
1406 struct nv_value *bit[4], *cr[4], *res[4][4], *val;
1407 int l, c;
1408
1409 const ubyte cc[4] = { NV_CC_EQ, NV_CC_S, NV_CC_C, NV_CC_O };
1410
1411 for (l = 0; l < 4; ++l) {
1412 bit[l] = bld_load_imm_u32(bld, 1 << l);
1413
1414 val = bld_quadop(bld, QOP(SUBR, SUBR, SUBR, SUBR),
1415 t[arg - 1], l, t[arg - 1], TRUE);
1416
1417 cr[l] = bld_cmov(bld, bit[l], NV_CC_EQ, val->insn->flags_def);
1418
1419 cr[l]->reg.file = NV_FILE_FLAGS;
1420 SET_TYPE(cr[l], NV_TYPE_U16);
1421 }
1422
1423 sel = new_instruction(bld->pc, NV_OP_SELECT);
1424
1425 for (l = 0; l < 4; ++l)
1426 sel->src[l] = new_ref(bld->pc, cr[l]);
1427
1428 bld_def(sel, 0, new_value(bld->pc, NV_FILE_FLAGS, NV_TYPE_U16));
1429
1430 for (l = 0; l < 4; ++l) {
1431 tex = emit_tex(bld, NV_OP_TXB, dst, t, arg, tic, tsc, cube);
1432
1433 tex->cc = cc[l];
1434 tex->flags_src = new_ref(bld->pc, sel->def[0]);
1435
1436 for (c = 0; c < 4; ++c)
1437 res[l][c] = tex->def[c];
1438 }
1439
1440 for (l = 0; l < 4; ++l)
1441 for (c = 0; c < 4; ++c)
1442 res[l][c] = bld_cmov(bld, res[l][c], cc[l], sel->def[0]);
1443
1444 for (c = 0; c < 4; ++c) {
1445 sel = new_instruction(bld->pc, NV_OP_SELECT);
1446
1447 for (l = 0; l < 4; ++l)
1448 sel->src[l] = new_ref(bld->pc, res[l][c]);
1449
1450 bld_def(sel, 0, (dst[c] = new_value(bld->pc, NV_FILE_GPR, NV_TYPE_F32)));
1451 }
1452 }
1453
1454 static boolean
1455 bld_is_constant(struct nv_value *val)
1456 {
1457 if (val->reg.file == NV_FILE_IMM)
1458 return TRUE;
1459 return val->insn && nvcg_find_constant(val->insn->src[0]);
1460 }
1461
1462 static void
1463 bld_tex(struct bld_context *bld, struct nv_value *dst0[4],
1464 const struct tgsi_full_instruction *insn)
1465 {
1466 struct nv_value *t[4], *s[3];
1467 uint opcode = translate_opcode(insn->Instruction.Opcode);
1468 int arg, dim, c;
1469 const int tic = insn->Src[1].Register.Index;
1470 const int tsc = 0;
1471 const int cube = (insn->Texture.Texture == TGSI_TEXTURE_CUBE) ? 1 : 0;
1472
1473 get_tex_dim(insn, &dim, &arg);
1474
1475 if (!cube && insn->Instruction.Opcode == TGSI_OPCODE_TXP)
1476 load_proj_tex_coords(bld, t, dim, arg, insn);
1477 else {
1478 for (c = 0; c < dim; ++c)
1479 t[c] = emit_fetch(bld, insn, 0, c);
1480 if (arg != dim)
1481 t[dim] = emit_fetch(bld, insn, 0, 2);
1482 }
1483
1484 if (cube) {
1485 assert(dim >= 3);
1486 for (c = 0; c < 3; ++c)
1487 s[c] = bld_insn_1(bld, NV_OP_ABS, t[c]);
1488
1489 s[0] = bld_insn_2(bld, NV_OP_MAX, s[0], s[1]);
1490 s[0] = bld_insn_2(bld, NV_OP_MAX, s[0], s[2]);
1491 s[0] = bld_insn_1(bld, NV_OP_RCP, s[0]);
1492
1493 for (c = 0; c < 3; ++c)
1494 t[c] = bld_insn_2(bld, NV_OP_MUL, t[c], s[0]);
1495 }
1496
1497 if (opcode == NV_OP_TXB || opcode == NV_OP_TXL) {
1498 t[arg++] = emit_fetch(bld, insn, 0, 3);
1499
1500 if ((bld->ti->p->type == PIPE_SHADER_FRAGMENT) &&
1501 !bld_is_constant(t[arg - 1])) {
1502 if (opcode == NV_OP_TXB)
1503 bld_texbias_sequence(bld, dst0, t, arg, tic, tsc, cube);
1504 else
1505 bld_texlod_sequence(bld, dst0, t, arg, tic, tsc, cube);
1506 return;
1507 }
1508 }
1509
1510 emit_tex(bld, opcode, dst0, t, arg, tic, tsc, cube);
1511 }
1512
1513 static INLINE struct nv_value *
1514 bld_dot(struct bld_context *bld, const struct tgsi_full_instruction *insn,
1515 int n)
1516 {
1517 struct nv_value *dotp, *src0, *src1;
1518 int c;
1519
1520 src0 = emit_fetch(bld, insn, 0, 0);
1521 src1 = emit_fetch(bld, insn, 1, 0);
1522 dotp = bld_insn_2(bld, NV_OP_MUL, src0, src1);
1523
1524 for (c = 1; c < n; ++c) {
1525 src0 = emit_fetch(bld, insn, 0, c);
1526 src1 = emit_fetch(bld, insn, 1, c);
1527 dotp = bld_insn_3(bld, NV_OP_MAD, src0, src1, dotp);
1528 }
1529 return dotp;
1530 }
1531
1532 #define FOR_EACH_DST0_ENABLED_CHANNEL(chan, inst) \
1533 for (chan = 0; chan < 4; ++chan) \
1534 if ((inst)->Dst[0].Register.WriteMask & (1 << chan))
1535
1536 static void
1537 bld_instruction(struct bld_context *bld,
1538 const struct tgsi_full_instruction *insn)
1539 {
1540 struct nv_value *src0;
1541 struct nv_value *src1;
1542 struct nv_value *src2;
1543 struct nv_value *dst0[4] = { 0 };
1544 struct nv_value *temp;
1545 int c;
1546 uint opcode = translate_opcode(insn->Instruction.Opcode);
1547
1548 #ifdef NV50_TGSI2NC_DEBUG
1549 debug_printf("bld_instruction:"); tgsi_dump_instruction(insn, 1);
1550 #endif
1551
1552 switch (insn->Instruction.Opcode) {
1553 case TGSI_OPCODE_ADD:
1554 case TGSI_OPCODE_MAX:
1555 case TGSI_OPCODE_MIN:
1556 case TGSI_OPCODE_MUL:
1557 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1558 src0 = emit_fetch(bld, insn, 0, c);
1559 src1 = emit_fetch(bld, insn, 1, c);
1560 dst0[c] = bld_insn_2(bld, opcode, src0, src1);
1561 }
1562 break;
1563 case TGSI_OPCODE_ARL:
1564 src1 = bld_imm_u32(bld, 4);
1565 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1566 src0 = emit_fetch(bld, insn, 0, c);
1567 temp = bld_insn_1(bld, NV_OP_FLOOR, src0);
1568 SET_TYPE(temp, NV_TYPE_S32);
1569 dst0[c] = bld_insn_2(bld, NV_OP_SHL, temp, src1);
1570 }
1571 break;
1572 case TGSI_OPCODE_CMP:
1573 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1574 src0 = emit_fetch(bld, insn, 0, c);
1575 src1 = emit_fetch(bld, insn, 1, c);
1576 src2 = emit_fetch(bld, insn, 2, c);
1577 src0 = bld_predicate(bld, src0, FALSE);
1578
1579 src1 = bld_insn_1(bld, NV_OP_MOV, src1);
1580 src1->insn->flags_src = new_ref(bld->pc, src0);
1581 src1->insn->cc = NV_CC_LT;
1582
1583 src2 = bld_insn_1(bld, NV_OP_MOV, src2);
1584 src2->insn->flags_src = new_ref(bld->pc, src0);
1585 src2->insn->cc = NV_CC_GE;
1586
1587 dst0[c] = bld_insn_2(bld, NV_OP_SELECT, src1, src2);
1588 }
1589 break;
1590 case TGSI_OPCODE_COS:
1591 case TGSI_OPCODE_SIN:
1592 src0 = emit_fetch(bld, insn, 0, 0);
1593 temp = bld_insn_1(bld, NV_OP_PRESIN, src0);
1594 if (insn->Dst[0].Register.WriteMask & 7)
1595 temp = bld_insn_1(bld, opcode, temp);
1596 for (c = 0; c < 3; ++c)
1597 if (insn->Dst[0].Register.WriteMask & (1 << c))
1598 dst0[c] = temp;
1599 if (!(insn->Dst[0].Register.WriteMask & (1 << 3)))
1600 break;
1601 src0 = emit_fetch(bld, insn, 0, 3);
1602 temp = bld_insn_1(bld, NV_OP_PRESIN, src0);
1603 dst0[3] = bld_insn_1(bld, opcode, temp);
1604 break;
1605 case TGSI_OPCODE_DP2:
1606 temp = bld_dot(bld, insn, 2);
1607 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1608 dst0[c] = temp;
1609 break;
1610 case TGSI_OPCODE_DP3:
1611 temp = bld_dot(bld, insn, 3);
1612 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1613 dst0[c] = temp;
1614 break;
1615 case TGSI_OPCODE_DP4:
1616 temp = bld_dot(bld, insn, 4);
1617 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1618 dst0[c] = temp;
1619 break;
1620 case TGSI_OPCODE_DPH:
1621 src0 = bld_dot(bld, insn, 3);
1622 src1 = emit_fetch(bld, insn, 1, 3);
1623 temp = bld_insn_2(bld, NV_OP_ADD, src0, src1);
1624 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1625 dst0[c] = temp;
1626 break;
1627 case TGSI_OPCODE_DST:
1628 if (insn->Dst[0].Register.WriteMask & 1)
1629 dst0[0] = bld_imm_f32(bld, 1.0f);
1630 if (insn->Dst[0].Register.WriteMask & 2) {
1631 src0 = emit_fetch(bld, insn, 0, 1);
1632 src1 = emit_fetch(bld, insn, 1, 1);
1633 dst0[1] = bld_insn_2(bld, NV_OP_MUL, src0, src1);
1634 }
1635 if (insn->Dst[0].Register.WriteMask & 4)
1636 dst0[2] = emit_fetch(bld, insn, 0, 2);
1637 if (insn->Dst[0].Register.WriteMask & 8)
1638 dst0[3] = emit_fetch(bld, insn, 1, 3);
1639 break;
1640 case TGSI_OPCODE_EXP:
1641 src0 = emit_fetch(bld, insn, 0, 0);
1642 temp = bld_insn_1(bld, NV_OP_FLOOR, src0);
1643
1644 if (insn->Dst[0].Register.WriteMask & 2)
1645 dst0[1] = bld_insn_2(bld, NV_OP_SUB, src0, temp);
1646 if (insn->Dst[0].Register.WriteMask & 1) {
1647 temp = bld_insn_1(bld, NV_OP_PREEX2, temp);
1648 dst0[0] = bld_insn_1(bld, NV_OP_EX2, temp);
1649 }
1650 if (insn->Dst[0].Register.WriteMask & 4) {
1651 temp = bld_insn_1(bld, NV_OP_PREEX2, src0);
1652 dst0[2] = bld_insn_1(bld, NV_OP_EX2, temp);
1653 }
1654 if (insn->Dst[0].Register.WriteMask & 8)
1655 dst0[3] = bld_imm_f32(bld, 1.0f);
1656 break;
1657 case TGSI_OPCODE_EX2:
1658 src0 = emit_fetch(bld, insn, 0, 0);
1659 temp = bld_insn_1(bld, NV_OP_PREEX2, src0);
1660 temp = bld_insn_1(bld, NV_OP_EX2, temp);
1661 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1662 dst0[c] = temp;
1663 break;
1664 case TGSI_OPCODE_FRC:
1665 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1666 src0 = emit_fetch(bld, insn, 0, c);
1667 dst0[c] = bld_insn_1(bld, NV_OP_FLOOR, src0);
1668 dst0[c] = bld_insn_2(bld, NV_OP_SUB, src0, dst0[c]);
1669 }
1670 break;
1671 case TGSI_OPCODE_KIL:
1672 for (c = 0; c < 4; ++c) {
1673 src0 = emit_fetch(bld, insn, 0, c);
1674 bld_kil(bld, src0);
1675 }
1676 break;
1677 case TGSI_OPCODE_KILP:
1678 (new_instruction(bld->pc, NV_OP_KIL))->fixed = 1;
1679 break;
1680 case TGSI_OPCODE_IF:
1681 {
1682 struct nv_basic_block *b = new_basic_block(bld->pc);
1683
1684 assert(bld->cond_lvl < BLD_MAX_COND_NESTING);
1685
1686 nvbb_attach_block(bld->pc->current_block, b, CFG_EDGE_FORWARD);
1687
1688 bld->join_bb[bld->cond_lvl] = bld->pc->current_block;
1689 bld->cond_bb[bld->cond_lvl] = bld->pc->current_block;
1690
1691 src1 = bld_predicate(bld, emit_fetch(bld, insn, 0, 0), TRUE);
1692
1693 bld_flow(bld, NV_OP_BRA, NV_CC_EQ, src1, NULL, (bld->cond_lvl == 0));
1694
1695 ++bld->cond_lvl;
1696 bld_new_block(bld, b);
1697 }
1698 break;
1699 case TGSI_OPCODE_ELSE:
1700 {
1701 struct nv_basic_block *b = new_basic_block(bld->pc);
1702
1703 --bld->cond_lvl;
1704 nvbb_attach_block(bld->join_bb[bld->cond_lvl], b, CFG_EDGE_FORWARD);
1705
1706 bld->cond_bb[bld->cond_lvl]->exit->target = b;
1707 bld->cond_bb[bld->cond_lvl] = bld->pc->current_block;
1708
1709 new_instruction(bld->pc, NV_OP_BRA)->is_terminator = 1;
1710
1711 ++bld->cond_lvl;
1712 bld_new_block(bld, b);
1713 }
1714 break;
1715 case TGSI_OPCODE_ENDIF:
1716 {
1717 struct nv_basic_block *b = new_basic_block(bld->pc);
1718
1719 --bld->cond_lvl;
1720 nvbb_attach_block(bld->pc->current_block, b, bld->out_kind);
1721 nvbb_attach_block(bld->cond_bb[bld->cond_lvl], b, CFG_EDGE_FORWARD);
1722
1723 bld->cond_bb[bld->cond_lvl]->exit->target = b;
1724
1725 bld_new_block(bld, b);
1726
1727 if (!bld->cond_lvl && bld->join_bb[bld->cond_lvl]) {
1728 bld->join_bb[bld->cond_lvl]->exit->prev->target = b;
1729 new_instruction(bld->pc, NV_OP_JOIN)->is_join = TRUE;
1730 }
1731 }
1732 break;
1733 case TGSI_OPCODE_BGNLOOP:
1734 {
1735 struct nv_basic_block *bl = new_basic_block(bld->pc);
1736 struct nv_basic_block *bb = new_basic_block(bld->pc);
1737
1738 assert(bld->loop_lvl < BLD_MAX_LOOP_NESTING);
1739
1740 bld->loop_bb[bld->loop_lvl] = bl;
1741 bld->brkt_bb[bld->loop_lvl] = bb;
1742
1743 bld_flow(bld, NV_OP_BREAKADDR, NV_CC_TR, NULL, bb, FALSE);
1744
1745 nvbb_attach_block(bld->pc->current_block, bl, CFG_EDGE_LOOP_ENTER);
1746
1747 bld_new_block(bld, bld->loop_bb[bld->loop_lvl++]);
1748
1749 if (bld->loop_lvl == bld->pc->loop_nesting_bound)
1750 bld->pc->loop_nesting_bound++;
1751
1752 bld_clear_def_use(&bld->tvs[0][0], BLD_MAX_TEMPS, bld->loop_lvl);
1753 bld_clear_def_use(&bld->avs[0][0], BLD_MAX_ADDRS, bld->loop_lvl);
1754 bld_clear_def_use(&bld->pvs[0][0], BLD_MAX_PREDS, bld->loop_lvl);
1755 }
1756 break;
1757 case TGSI_OPCODE_BRK:
1758 {
1759 struct nv_basic_block *bb = bld->brkt_bb[bld->loop_lvl - 1];
1760
1761 bld_flow(bld, NV_OP_BREAK, NV_CC_TR, NULL, bb, FALSE);
1762
1763 if (bld->out_kind == CFG_EDGE_FORWARD) /* else we already had BRK/CONT */
1764 nvbb_attach_block(bld->pc->current_block, bb, CFG_EDGE_LOOP_LEAVE);
1765
1766 bld->out_kind = CFG_EDGE_FAKE;
1767 }
1768 break;
1769 case TGSI_OPCODE_CONT:
1770 {
1771 struct nv_basic_block *bb = bld->loop_bb[bld->loop_lvl - 1];
1772
1773 bld_flow(bld, NV_OP_BRA, NV_CC_TR, NULL, bb, FALSE);
1774
1775 nvbb_attach_block(bld->pc->current_block, bb, CFG_EDGE_BACK);
1776
1777 if ((bb = bld->join_bb[bld->cond_lvl - 1])) {
1778 bld->join_bb[bld->cond_lvl - 1] = NULL;
1779 nv_nvi_delete(bb->exit->prev);
1780 }
1781 bld->out_kind = CFG_EDGE_FAKE;
1782 }
1783 break;
1784 case TGSI_OPCODE_ENDLOOP:
1785 {
1786 struct nv_basic_block *bb = bld->loop_bb[bld->loop_lvl - 1];
1787
1788 bld_flow(bld, NV_OP_BRA, NV_CC_TR, NULL, bb, FALSE);
1789
1790 nvbb_attach_block(bld->pc->current_block, bb, CFG_EDGE_BACK);
1791
1792 bld_loop_end(bld, bb); /* replace loop-side operand of the phis */
1793
1794 bld_new_block(bld, bld->brkt_bb[--bld->loop_lvl]);
1795 }
1796 break;
1797 case TGSI_OPCODE_ABS:
1798 case TGSI_OPCODE_CEIL:
1799 case TGSI_OPCODE_FLR:
1800 case TGSI_OPCODE_TRUNC:
1801 case TGSI_OPCODE_DDX:
1802 case TGSI_OPCODE_DDY:
1803 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1804 src0 = emit_fetch(bld, insn, 0, c);
1805 dst0[c] = bld_insn_1(bld, opcode, src0);
1806 }
1807 break;
1808 case TGSI_OPCODE_LIT:
1809 bld_lit(bld, dst0, insn);
1810 break;
1811 case TGSI_OPCODE_LRP:
1812 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1813 src0 = emit_fetch(bld, insn, 0, c);
1814 src1 = emit_fetch(bld, insn, 1, c);
1815 src2 = emit_fetch(bld, insn, 2, c);
1816 dst0[c] = bld_insn_2(bld, NV_OP_SUB, src1, src2);
1817 dst0[c] = bld_insn_3(bld, NV_OP_MAD, dst0[c], src0, src2);
1818 }
1819 break;
1820 case TGSI_OPCODE_MOV:
1821 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1822 dst0[c] = emit_fetch(bld, insn, 0, c);
1823 break;
1824 case TGSI_OPCODE_MAD:
1825 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1826 src0 = emit_fetch(bld, insn, 0, c);
1827 src1 = emit_fetch(bld, insn, 1, c);
1828 src2 = emit_fetch(bld, insn, 2, c);
1829 dst0[c] = bld_insn_3(bld, opcode, src0, src1, src2);
1830 }
1831 break;
1832 case TGSI_OPCODE_POW:
1833 src0 = emit_fetch(bld, insn, 0, 0);
1834 src1 = emit_fetch(bld, insn, 1, 0);
1835 temp = bld_pow(bld, src0, src1);
1836 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1837 dst0[c] = temp;
1838 break;
1839 case TGSI_OPCODE_LOG:
1840 src0 = emit_fetch(bld, insn, 0, 0);
1841 src0 = bld_insn_1(bld, NV_OP_ABS, src0);
1842 temp = bld_insn_1(bld, NV_OP_LG2, src0);
1843 dst0[2] = temp;
1844 if (insn->Dst[0].Register.WriteMask & 3) {
1845 temp = bld_insn_1(bld, NV_OP_FLOOR, temp);
1846 dst0[0] = temp;
1847 }
1848 if (insn->Dst[0].Register.WriteMask & 2) {
1849 temp = bld_insn_1(bld, NV_OP_PREEX2, temp);
1850 temp = bld_insn_1(bld, NV_OP_EX2, temp);
1851 temp = bld_insn_1(bld, NV_OP_RCP, temp);
1852 dst0[1] = bld_insn_2(bld, NV_OP_MUL, src0, temp);
1853 }
1854 if (insn->Dst[0].Register.WriteMask & 8)
1855 dst0[3] = bld_imm_f32(bld, 1.0f);
1856 break;
1857 case TGSI_OPCODE_RCP:
1858 case TGSI_OPCODE_LG2:
1859 src0 = emit_fetch(bld, insn, 0, 0);
1860 temp = bld_insn_1(bld, opcode, src0);
1861 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1862 dst0[c] = temp;
1863 break;
1864 case TGSI_OPCODE_RSQ:
1865 src0 = emit_fetch(bld, insn, 0, 0);
1866 temp = bld_insn_1(bld, NV_OP_ABS, src0);
1867 temp = bld_insn_1(bld, NV_OP_RSQ, temp);
1868 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1869 dst0[c] = temp;
1870 break;
1871 case TGSI_OPCODE_SLT:
1872 case TGSI_OPCODE_SGE:
1873 case TGSI_OPCODE_SEQ:
1874 case TGSI_OPCODE_SGT:
1875 case TGSI_OPCODE_SLE:
1876 case TGSI_OPCODE_SNE:
1877 case TGSI_OPCODE_ISLT:
1878 case TGSI_OPCODE_ISGE:
1879 case TGSI_OPCODE_USEQ:
1880 case TGSI_OPCODE_USGE:
1881 case TGSI_OPCODE_USLT:
1882 case TGSI_OPCODE_USNE:
1883 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1884 src0 = emit_fetch(bld, insn, 0, c);
1885 src1 = emit_fetch(bld, insn, 1, c);
1886 dst0[c] = bld_insn_2(bld, NV_OP_SET, src0, src1);
1887 dst0[c]->insn->set_cond = translate_setcc(insn->Instruction.Opcode);
1888 SET_TYPE(dst0[c], infer_dst_type(insn->Instruction.Opcode));
1889
1890 dst0[c]->insn->src[0]->typecast =
1891 dst0[c]->insn->src[1]->typecast =
1892 infer_src_type(insn->Instruction.Opcode);
1893
1894 if (dst0[c]->reg.type != NV_TYPE_F32)
1895 break;
1896 dst0[c]->reg.as_type = NV_TYPE_S32;
1897 dst0[c] = bld_insn_1(bld, NV_OP_ABS, dst0[c]);
1898 dst0[c] = bld_insn_1(bld, NV_OP_CVT, dst0[c]);
1899 SET_TYPE(dst0[c], NV_TYPE_F32);
1900 }
1901 break;
1902 case TGSI_OPCODE_SCS:
1903 if (insn->Dst[0].Register.WriteMask & 0x3) {
1904 src0 = emit_fetch(bld, insn, 0, 0);
1905 temp = bld_insn_1(bld, NV_OP_PRESIN, src0);
1906 if (insn->Dst[0].Register.WriteMask & 0x1)
1907 dst0[0] = bld_insn_1(bld, NV_OP_COS, temp);
1908 if (insn->Dst[0].Register.WriteMask & 0x2)
1909 dst0[1] = bld_insn_1(bld, NV_OP_SIN, temp);
1910 }
1911 if (insn->Dst[0].Register.WriteMask & 0x4)
1912 dst0[2] = bld_imm_f32(bld, 0.0f);
1913 if (insn->Dst[0].Register.WriteMask & 0x8)
1914 dst0[3] = bld_imm_f32(bld, 1.0f);
1915 break;
1916 case TGSI_OPCODE_SSG:
1917 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1918 src0 = emit_fetch(bld, insn, 0, c);
1919 src1 = bld_predicate(bld, src0, FALSE);
1920 temp = bld_insn_2(bld, NV_OP_AND, src0, bld_imm_u32(bld, 0x80000000));
1921 temp = bld_insn_2(bld, NV_OP_OR, temp, bld_imm_f32(bld, 1.0f));
1922 dst0[c] = bld_insn_2(bld, NV_OP_XOR, temp, temp);
1923 dst0[c]->insn->cc = NV_CC_EQ;
1924 nv_reference(bld->pc, &dst0[c]->insn->flags_src, src1);
1925 }
1926 break;
1927 case TGSI_OPCODE_SUB:
1928 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1929 src0 = emit_fetch(bld, insn, 0, c);
1930 src1 = emit_fetch(bld, insn, 1, c);
1931 dst0[c] = bld_insn_2(bld, NV_OP_ADD, src0, src1);
1932 dst0[c]->insn->src[1]->mod ^= NV_MOD_NEG;
1933 }
1934 break;
1935 case TGSI_OPCODE_TEX:
1936 case TGSI_OPCODE_TXB:
1937 case TGSI_OPCODE_TXL:
1938 case TGSI_OPCODE_TXP:
1939 bld_tex(bld, dst0, insn);
1940 break;
1941 case TGSI_OPCODE_XPD:
1942 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1943 if (c == 3) {
1944 dst0[3] = bld_imm_f32(bld, 1.0f);
1945 break;
1946 }
1947 src0 = emit_fetch(bld, insn, 1, (c + 1) % 3);
1948 src1 = emit_fetch(bld, insn, 0, (c + 2) % 3);
1949 dst0[c] = bld_insn_2(bld, NV_OP_MUL, src0, src1);
1950
1951 src0 = emit_fetch(bld, insn, 0, (c + 1) % 3);
1952 src1 = emit_fetch(bld, insn, 1, (c + 2) % 3);
1953 dst0[c] = bld_insn_3(bld, NV_OP_MAD, src0, src1, dst0[c]);
1954
1955 dst0[c]->insn->src[2]->mod ^= NV_MOD_NEG;
1956 }
1957 break;
1958 case TGSI_OPCODE_RET:
1959 (new_instruction(bld->pc, NV_OP_RET))->fixed = 1;
1960 break;
1961 case TGSI_OPCODE_END:
1962 if (bld->ti->p->type == PIPE_SHADER_FRAGMENT)
1963 bld_export_outputs(bld);
1964 break;
1965 default:
1966 NOUVEAU_ERR("unhandled opcode %u\n", insn->Instruction.Opcode);
1967 abort();
1968 break;
1969 }
1970
1971 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1972 emit_store(bld, insn, c, dst0[c]);
1973 }
1974
1975 static INLINE void
1976 bld_free_value_trackers(struct bld_value_stack *base, int n)
1977 {
1978 int i, c;
1979
1980 for (i = 0; i < n; ++i)
1981 for (c = 0; c < 4; ++c)
1982 if (base[i * 4 + c].body)
1983 FREE(base[i * 4 + c].body);
1984 }
1985
1986 int
1987 nv50_tgsi_to_nc(struct nv_pc *pc, struct nv50_translation_info *ti)
1988 {
1989 struct bld_context *bld = CALLOC_STRUCT(bld_context);
1990 int c;
1991 unsigned ip;
1992
1993 pc->root[0] = pc->current_block = new_basic_block(pc);
1994
1995 bld->pc = pc;
1996 bld->ti = ti;
1997
1998 pc->loop_nesting_bound = 1;
1999
2000 c = util_bitcount(bld->ti->p->fp.interp >> 24);
2001 if (c && ti->p->type == PIPE_SHADER_FRAGMENT) {
2002 bld->frgcrd[3] = new_value(pc, NV_FILE_MEM_V, NV_TYPE_F32);
2003 bld->frgcrd[3]->reg.id = c - 1;
2004 bld->frgcrd[3] = bld_insn_1(bld, NV_OP_LINTERP, bld->frgcrd[3]);
2005 bld->frgcrd[3] = bld_insn_1(bld, NV_OP_RCP, bld->frgcrd[3]);
2006 }
2007
2008 for (ip = 0; ip < ti->inst_nr; ++ip)
2009 bld_instruction(bld, &ti->insns[ip]);
2010
2011 bld_free_value_trackers(&bld->tvs[0][0], BLD_MAX_TEMPS);
2012 bld_free_value_trackers(&bld->avs[0][0], BLD_MAX_ADDRS);
2013 bld_free_value_trackers(&bld->pvs[0][0], BLD_MAX_PREDS);
2014
2015 bld_free_value_trackers(&bld->ovs[0][0], PIPE_MAX_SHADER_OUTPUTS);
2016
2017 FREE(bld);
2018 return 0;
2019 }
2020
2021 /* If a variable is assigned in a loop, replace all references to the value
2022 * from outside the loop with a phi value.
2023 */
2024 static void
2025 bld_replace_value(struct nv_pc *pc, struct nv_basic_block *b,
2026 struct nv_value *old_val,
2027 struct nv_value *new_val)
2028 {
2029 struct nv_instruction *nvi;
2030
2031 for (nvi = b->phi ? b->phi : b->entry; nvi; nvi = nvi->next) {
2032 int s;
2033 for (s = 0; s < 5; ++s) {
2034 if (!nvi->src[s])
2035 continue;
2036 if (nvi->src[s]->value == old_val)
2037 nv_reference(pc, &nvi->src[s], new_val);
2038 }
2039 if (nvi->flags_src && nvi->flags_src->value == old_val)
2040 nv_reference(pc, &nvi->flags_src, new_val);
2041 }
2042
2043 b->pass_seq = pc->pass_seq;
2044
2045 if (b->out[0] && b->out[0]->pass_seq < pc->pass_seq)
2046 bld_replace_value(pc, b->out[0], old_val, new_val);
2047
2048 if (b->out[1] && b->out[1]->pass_seq < pc->pass_seq)
2049 bld_replace_value(pc, b->out[1], old_val, new_val);
2050 }