nv50,nvc0: fix multisample format hack
[mesa.git] / src / gallium / drivers / nv50 / nv50_tgsi_to_nc.c
1 /*
2 * Copyright 2010 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
21 */
22
23 #include <unistd.h>
24
25 #include "nv50_context.h"
26 #include "nv50_pc.h"
27
28 #include "pipe/p_shader_tokens.h"
29 #include "tgsi/tgsi_parse.h"
30 #include "tgsi/tgsi_util.h"
31
32 #include "tgsi/tgsi_dump.h"
33
34 #define BLD_MAX_TEMPS 64
35 #define BLD_MAX_ADDRS 4
36 #define BLD_MAX_PREDS 4
37 #define BLD_MAX_IMMDS 128
38
39 #define BLD_MAX_COND_NESTING 8
40 #define BLD_MAX_LOOP_NESTING 4
41 #define BLD_MAX_CALL_NESTING 2
42
43 /* collects all values assigned to the same TGSI register */
44 struct bld_value_stack {
45 struct nv_value *top;
46 struct nv_value **body;
47 unsigned size;
48 uint16_t loop_use; /* 1 bit per loop level, indicates if used/defd */
49 uint16_t loop_def;
50 };
51
52 static INLINE void
53 bld_vals_push_val(struct bld_value_stack *stk, struct nv_value *val)
54 {
55 assert(!stk->size || (stk->body[stk->size - 1] != val));
56
57 if (!(stk->size % 8)) {
58 unsigned old_sz = (stk->size + 0) * sizeof(struct nv_value *);
59 unsigned new_sz = (stk->size + 8) * sizeof(struct nv_value *);
60 stk->body = (struct nv_value **)REALLOC(stk->body, old_sz, new_sz);
61 }
62 stk->body[stk->size++] = val;
63 }
64
65 static INLINE boolean
66 bld_vals_del_val(struct bld_value_stack *stk, struct nv_value *val)
67 {
68 unsigned i;
69
70 for (i = stk->size; i > 0; --i)
71 if (stk->body[i - 1] == val)
72 break;
73 if (!i)
74 return FALSE;
75
76 if (i != stk->size)
77 stk->body[i - 1] = stk->body[stk->size - 1];
78
79 --stk->size; /* XXX: old size in REALLOC */
80 return TRUE;
81 }
82
83 static INLINE void
84 bld_vals_push(struct bld_value_stack *stk)
85 {
86 bld_vals_push_val(stk, stk->top);
87 stk->top = NULL;
88 }
89
90 static INLINE void
91 bld_push_values(struct bld_value_stack *stacks, int n)
92 {
93 int i, c;
94
95 for (i = 0; i < n; ++i)
96 for (c = 0; c < 4; ++c)
97 if (stacks[i * 4 + c].top)
98 bld_vals_push(&stacks[i * 4 + c]);
99 }
100
101 struct bld_context {
102 struct nv50_translation_info *ti;
103
104 struct nv_pc *pc;
105 struct nv_basic_block *b;
106
107 struct tgsi_parse_context parse[BLD_MAX_CALL_NESTING];
108 int call_lvl;
109
110 struct nv_basic_block *cond_bb[BLD_MAX_COND_NESTING];
111 struct nv_basic_block *join_bb[BLD_MAX_COND_NESTING];
112 struct nv_basic_block *else_bb[BLD_MAX_COND_NESTING];
113 int cond_lvl;
114 struct nv_basic_block *loop_bb[BLD_MAX_LOOP_NESTING];
115 struct nv_basic_block *brkt_bb[BLD_MAX_LOOP_NESTING];
116 int loop_lvl;
117
118 ubyte out_kind; /* CFG_EDGE_FORWARD, or FAKE in case of BREAK/CONT */
119
120 struct bld_value_stack tvs[BLD_MAX_TEMPS][4]; /* TGSI_FILE_TEMPORARY */
121 struct bld_value_stack avs[BLD_MAX_ADDRS][4]; /* TGSI_FILE_ADDRESS */
122 struct bld_value_stack pvs[BLD_MAX_PREDS][4]; /* TGSI_FILE_PREDICATE */
123 struct bld_value_stack ovs[PIPE_MAX_SHADER_OUTPUTS][4];
124
125 uint32_t outputs_written[(PIPE_MAX_SHADER_OUTPUTS + 7) / 8];
126
127 struct nv_value *frgcrd[4];
128 struct nv_value *sysval[4];
129
130 /* wipe on new BB */
131 struct nv_value *saved_addr[4][2];
132 struct nv_value *saved_inputs[128];
133 struct nv_value *saved_immd[BLD_MAX_IMMDS];
134 uint num_immds;
135 };
136
137 static INLINE ubyte
138 bld_stack_file(struct bld_context *bld, struct bld_value_stack *stk)
139 {
140 if (stk < &bld->avs[0][0])
141 return NV_FILE_GPR;
142 else
143 if (stk < &bld->pvs[0][0])
144 return NV_FILE_ADDR;
145 else
146 if (stk < &bld->ovs[0][0])
147 return NV_FILE_FLAGS;
148 else
149 return NV_FILE_OUT;
150 }
151
152 static INLINE struct nv_value *
153 bld_fetch(struct bld_context *bld, struct bld_value_stack *stk, int i, int c)
154 {
155 stk[i * 4 + c].loop_use |= 1 << bld->loop_lvl;
156
157 return stk[i * 4 + c].top;
158 }
159
160 static struct nv_value *
161 bld_loop_phi(struct bld_context *, struct bld_value_stack *, struct nv_value *);
162
163 /* If a variable is defined in a loop without prior use, we don't need
164 * a phi in the loop header to account for backwards flow.
165 *
166 * However, if this variable is then also used outside the loop, we do
167 * need a phi after all. But we must not use this phi's def inside the
168 * loop, so we can eliminate the phi if it is unused later.
169 */
170 static INLINE void
171 bld_store(struct bld_context *bld, struct bld_value_stack *stk, int i, int c,
172 struct nv_value *val)
173 {
174 const uint16_t m = 1 << bld->loop_lvl;
175
176 stk = &stk[i * 4 + c];
177
178 if (bld->loop_lvl && !(m & (stk->loop_def | stk->loop_use)))
179 bld_loop_phi(bld, stk, val);
180
181 stk->top = val;
182 stk->loop_def |= 1 << bld->loop_lvl;
183 }
184
185 static INLINE void
186 bld_clear_def_use(struct bld_value_stack *stk, int n, int lvl)
187 {
188 int i;
189 const uint16_t mask = ~(1 << lvl);
190
191 for (i = 0; i < n * 4; ++i) {
192 stk[i].loop_def &= mask;
193 stk[i].loop_use &= mask;
194 }
195 }
196
197 #define FETCH_TEMP(i, c) bld_fetch(bld, &bld->tvs[0][0], i, c)
198 #define STORE_TEMP(i, c, v) bld_store(bld, &bld->tvs[0][0], i, c, (v))
199 #define FETCH_ADDR(i, c) bld_fetch(bld, &bld->avs[0][0], i, c)
200 #define STORE_ADDR(i, c, v) bld_store(bld, &bld->avs[0][0], i, c, (v))
201 #define FETCH_PRED(i, c) bld_fetch(bld, &bld->pvs[0][0], i, c)
202 #define STORE_PRED(i, c, v) bld_store(bld, &bld->pvs[0][0], i, c, (v))
203
204 #define STORE_OUTR(i, c, v) \
205 do { \
206 bld->ovs[i][c].top = (v); \
207 bld->outputs_written[(i) / 8] |= 1 << (((i) * 4 + (c)) % 32); \
208 } while (0)
209
210 static INLINE void
211 bld_warn_uninitialized(struct bld_context *bld, int kind,
212 struct bld_value_stack *stk, struct nv_basic_block *b)
213 {
214 #if NV50_DEBUG & NV50_DEBUG_PROG_IR
215 long i = (stk - &bld->tvs[0][0]) / 4;
216 long c = (stk - &bld->tvs[0][0]) & 3;
217
218 if (c == 3)
219 c = -1;
220
221 debug_printf("WARNING: TEMP[%li].%c %s used uninitialized in BB:%i\n",
222 i, (int)('x' + c), kind ? "may be" : "is", b->id);
223 #endif
224 }
225
226 static INLINE struct nv_value *
227 bld_def(struct nv_instruction *i, int c, struct nv_value *value)
228 {
229 i->def[c] = value;
230 value->insn = i;
231 return value;
232 }
233
234 static INLINE struct nv_value *
235 find_by_bb(struct bld_value_stack *stack, struct nv_basic_block *b)
236 {
237 int i;
238
239 if (stack->top && stack->top->insn->bb == b)
240 return stack->top;
241
242 for (i = stack->size - 1; i >= 0; --i)
243 if (stack->body[i]->insn->bb == b)
244 return stack->body[i];
245 return NULL;
246 }
247
248 /* fetch value from stack that was defined in the specified basic block,
249 * or search for first definitions in all of its predecessors
250 */
251 static void
252 fetch_by_bb(struct bld_value_stack *stack,
253 struct nv_value **vals, int *n,
254 struct nv_basic_block *b)
255 {
256 int i;
257 struct nv_value *val;
258
259 assert(*n < 16); /* MAX_COND_NESTING */
260
261 val = find_by_bb(stack, b);
262 if (val) {
263 for (i = 0; i < *n; ++i)
264 if (vals[i] == val)
265 return;
266 vals[(*n)++] = val;
267 return;
268 }
269 for (i = 0; i < b->num_in; ++i)
270 if (!IS_WALL_EDGE(b->in_kind[i]))
271 fetch_by_bb(stack, vals, n, b->in[i]);
272 }
273
274 static INLINE boolean
275 nvbb_is_terminated(struct nv_basic_block *bb)
276 {
277 return bb->exit && bb->exit->is_terminator;
278 }
279
280 static INLINE struct nv_value *
281 bld_load_imm_u32(struct bld_context *bld, uint32_t u);
282
283 static INLINE struct nv_value *
284 bld_undef(struct bld_context *bld, ubyte file)
285 {
286 struct nv_instruction *nvi = new_instruction(bld->pc, NV_OP_UNDEF);
287
288 return bld_def(nvi, 0, new_value(bld->pc, file, NV_TYPE_U32));
289 }
290
291 static struct nv_value *
292 bld_phi(struct bld_context *bld, struct nv_basic_block *b,
293 struct bld_value_stack *stack)
294 {
295 struct nv_basic_block *in;
296 struct nv_value *vals[16] = { 0 };
297 struct nv_value *val;
298 struct nv_instruction *phi;
299 int i, j, n;
300
301 do {
302 i = n = 0;
303 fetch_by_bb(stack, vals, &n, b);
304
305 if (!n) {
306 bld_warn_uninitialized(bld, 0, stack, b);
307 return NULL;
308 }
309
310 if (n == 1) {
311 if (nvbb_dominated_by(b, vals[0]->insn->bb))
312 break;
313
314 bld_warn_uninitialized(bld, 1, stack, b);
315
316 /* back-tracking to insert missing value of other path */
317 in = b;
318 while (in->in[0]) {
319 if (in->num_in == 1) {
320 in = in->in[0];
321 } else {
322 if (!nvbb_reachable_by(in->in[0], vals[0]->insn->bb, b))
323 in = in->in[0];
324 else
325 if (!nvbb_reachable_by(in->in[1], vals[0]->insn->bb, b))
326 in = in->in[1];
327 else
328 in = in->in[0];
329 }
330 }
331 bld->pc->current_block = in;
332
333 /* should make this a no-op */
334 bld_vals_push_val(stack, bld_undef(bld, vals[0]->reg.file));
335 continue;
336 }
337
338 for (i = 0; i < n; ++i) {
339 /* if value dominates b, continue to the redefinitions */
340 if (nvbb_dominated_by(b, vals[i]->insn->bb))
341 continue;
342
343 /* if value dominates any in-block, b should be the dom frontier */
344 for (j = 0; j < b->num_in; ++j)
345 if (nvbb_dominated_by(b->in[j], vals[i]->insn->bb))
346 break;
347 /* otherwise, find the dominance frontier and put the phi there */
348 if (j == b->num_in) {
349 in = nvbb_dom_frontier(vals[i]->insn->bb);
350 val = bld_phi(bld, in, stack);
351 bld_vals_push_val(stack, val);
352 break;
353 }
354 }
355 } while(i < n);
356
357 bld->pc->current_block = b;
358
359 if (n == 1)
360 return vals[0];
361
362 phi = new_instruction(bld->pc, NV_OP_PHI);
363
364 bld_def(phi, 0, new_value(bld->pc, vals[0]->reg.file, vals[0]->reg.type));
365 for (i = 0; i < n; ++i)
366 phi->src[i] = new_ref(bld->pc, vals[i]);
367
368 return phi->def[0];
369 }
370
371 /* Insert a phi function in the loop header.
372 * For nested loops, we need to insert phi functions in all the outer
373 * loop headers if they don't have one yet.
374 *
375 * @def: redefinition from inside loop, or NULL if to be replaced later
376 */
377 static struct nv_value *
378 bld_loop_phi(struct bld_context *bld, struct bld_value_stack *stack,
379 struct nv_value *def)
380 {
381 struct nv_instruction *phi;
382 struct nv_basic_block *bb = bld->pc->current_block;
383 struct nv_value *val = NULL;
384
385 if (bld->loop_lvl > 1) {
386 --bld->loop_lvl;
387 if (!((stack->loop_def | stack->loop_use) & (1 << bld->loop_lvl)))
388 val = bld_loop_phi(bld, stack, NULL);
389 ++bld->loop_lvl;
390 }
391
392 if (!val)
393 val = bld_phi(bld, bld->pc->current_block, stack); /* old definition */
394 if (!val) {
395 bld->pc->current_block = bld->loop_bb[bld->loop_lvl - 1]->in[0];
396 val = bld_undef(bld, bld_stack_file(bld, stack));
397 }
398
399 bld->pc->current_block = bld->loop_bb[bld->loop_lvl - 1];
400
401 phi = new_instruction(bld->pc, NV_OP_PHI);
402
403 bld_def(phi, 0, new_value_like(bld->pc, val));
404 if (!def)
405 def = phi->def[0];
406
407 bld_vals_push_val(stack, phi->def[0]);
408
409 phi->target = (struct nv_basic_block *)stack; /* cheat */
410
411 nv_reference(bld->pc, &phi->src[0], val);
412 nv_reference(bld->pc, &phi->src[1], def);
413
414 bld->pc->current_block = bb;
415
416 return phi->def[0];
417 }
418
419 static INLINE struct nv_value *
420 bld_fetch_global(struct bld_context *bld, struct bld_value_stack *stack)
421 {
422 const uint16_t m = 1 << bld->loop_lvl;
423 const uint16_t use = stack->loop_use;
424
425 stack->loop_use |= m;
426
427 /* If neither used nor def'd inside the loop, build a phi in foresight,
428 * so we don't have to replace stuff later on, which requires tracking.
429 */
430 if (bld->loop_lvl && !((use | stack->loop_def) & m))
431 return bld_loop_phi(bld, stack, NULL);
432
433 return bld_phi(bld, bld->pc->current_block, stack);
434 }
435
436 static INLINE struct nv_value *
437 bld_imm_u32(struct bld_context *bld, uint32_t u)
438 {
439 int i;
440 unsigned n = bld->num_immds;
441
442 for (i = 0; i < n; ++i)
443 if (bld->saved_immd[i]->reg.imm.u32 == u)
444 return bld->saved_immd[i];
445 assert(n < BLD_MAX_IMMDS);
446
447 bld->num_immds++;
448
449 bld->saved_immd[n] = new_value(bld->pc, NV_FILE_IMM, NV_TYPE_U32);
450 bld->saved_immd[n]->reg.imm.u32 = u;
451 return bld->saved_immd[n];
452 }
453
454 static void
455 bld_replace_value(struct nv_pc *, struct nv_basic_block *, struct nv_value *,
456 struct nv_value *);
457
458 /* Replace the source of the phi in the loop header by the last assignment,
459 * or eliminate the phi function if there is no assignment inside the loop.
460 *
461 * Redundancy situation 1 - (used) but (not redefined) value:
462 * %3 = phi %0, %3 = %3 is used
463 * %3 = phi %0, %4 = is new definition
464 *
465 * Redundancy situation 2 - (not used) but (redefined) value:
466 * %3 = phi %0, %2 = %2 is used, %3 could be used outside, deleted by DCE
467 */
468 static void
469 bld_loop_end(struct bld_context *bld, struct nv_basic_block *bb)
470 {
471 struct nv_basic_block *save = bld->pc->current_block;
472 struct nv_instruction *phi, *next;
473 struct nv_value *val;
474 struct bld_value_stack *stk;
475 int i, s, n;
476
477 for (phi = bb->phi; phi && phi->opcode == NV_OP_PHI; phi = next) {
478 next = phi->next;
479
480 stk = (struct bld_value_stack *)phi->target;
481 phi->target = NULL;
482
483 /* start with s == 1, src[0] is from outside the loop */
484 for (s = 1, n = 0; n < bb->num_in; ++n) {
485 if (bb->in_kind[n] != CFG_EDGE_BACK)
486 continue;
487
488 assert(s < 4);
489 bld->pc->current_block = bb->in[n];
490 val = bld_fetch_global(bld, stk);
491
492 for (i = 0; i < 4; ++i)
493 if (phi->src[i] && phi->src[i]->value == val)
494 break;
495 if (i == 4) {
496 /* skip values we do not want to replace */
497 for (; phi->src[s] && phi->src[s]->value != phi->def[0]; ++s);
498 nv_reference(bld->pc, &phi->src[s++], val);
499 }
500 }
501 bld->pc->current_block = save;
502
503 if (phi->src[0]->value == phi->def[0] ||
504 phi->src[0]->value == phi->src[1]->value)
505 s = 1;
506 else
507 if (phi->src[1]->value == phi->def[0])
508 s = 0;
509 else
510 continue;
511
512 if (s >= 0) {
513 /* eliminate the phi */
514 bld_vals_del_val(stk, phi->def[0]);
515
516 ++bld->pc->pass_seq;
517 bld_replace_value(bld->pc, bb, phi->def[0], phi->src[s]->value);
518
519 nv_nvi_delete(phi);
520 }
521 }
522 }
523
524 static INLINE struct nv_value *
525 bld_imm_f32(struct bld_context *bld, float f)
526 {
527 return bld_imm_u32(bld, fui(f));
528 }
529
530 #define SET_TYPE(v, t) ((v)->reg.type = (v)->reg.as_type = (t))
531
532 static struct nv_value *
533 bld_insn_1(struct bld_context *bld, uint opcode, struct nv_value *src0)
534 {
535 struct nv_instruction *insn = new_instruction(bld->pc, opcode);
536
537 nv_reference(bld->pc, &insn->src[0], src0);
538
539 return bld_def(insn, 0, new_value(bld->pc, NV_FILE_GPR, src0->reg.as_type));
540 }
541
542 static struct nv_value *
543 bld_insn_2(struct bld_context *bld, uint opcode,
544 struct nv_value *src0, struct nv_value *src1)
545 {
546 struct nv_instruction *insn = new_instruction(bld->pc, opcode);
547
548 nv_reference(bld->pc, &insn->src[0], src0);
549 nv_reference(bld->pc, &insn->src[1], src1);
550
551 return bld_def(insn, 0, new_value(bld->pc, NV_FILE_GPR, src0->reg.as_type));
552 }
553
554 static struct nv_value *
555 bld_insn_3(struct bld_context *bld, uint opcode,
556 struct nv_value *src0, struct nv_value *src1,
557 struct nv_value *src2)
558 {
559 struct nv_instruction *insn = new_instruction(bld->pc, opcode);
560
561 nv_reference(bld->pc, &insn->src[0], src0);
562 nv_reference(bld->pc, &insn->src[1], src1);
563 nv_reference(bld->pc, &insn->src[2], src2);
564
565 return bld_def(insn, 0, new_value(bld->pc, NV_FILE_GPR, src0->reg.as_type));
566 }
567
568 static struct nv_value *
569 bld_duplicate_insn(struct bld_context *bld, struct nv_instruction *nvi)
570 {
571 struct nv_instruction *dupi = new_instruction(bld->pc, nvi->opcode);
572 int c;
573
574 if (nvi->def[0])
575 bld_def(dupi, 0, new_value_like(bld->pc, nvi->def[0]));
576
577 if (nvi->flags_def) {
578 dupi->flags_def = new_value_like(bld->pc, nvi->flags_def);
579 dupi->flags_def->insn = dupi;
580 }
581
582 for (c = 0; c < 5; ++c)
583 if (nvi->src[c])
584 nv_reference(bld->pc, &dupi->src[c], nvi->src[c]->value);
585 if (nvi->flags_src)
586 nv_reference(bld->pc, &dupi->flags_src, nvi->flags_src->value);
587
588 dupi->cc = nvi->cc;
589 dupi->saturate = nvi->saturate;
590 dupi->centroid = nvi->centroid;
591 dupi->flat = nvi->flat;
592
593 return dupi->def[0];
594 }
595
596 static void
597 bld_lmem_store(struct bld_context *bld, struct nv_value *ptr, int ofst,
598 struct nv_value *val)
599 {
600 struct nv_instruction *insn = new_instruction(bld->pc, NV_OP_STA);
601 struct nv_value *loc;
602
603 loc = new_value(bld->pc, NV_FILE_MEM_L, NV_TYPE_U32);
604
605 loc->reg.id = ofst * 4;
606
607 nv_reference(bld->pc, &insn->src[0], loc);
608 nv_reference(bld->pc, &insn->src[1], val);
609 nv_reference(bld->pc, &insn->src[4], ptr);
610 }
611
612 static struct nv_value *
613 bld_lmem_load(struct bld_context *bld, struct nv_value *ptr, int ofst)
614 {
615 struct nv_value *loc, *val;
616
617 loc = new_value(bld->pc, NV_FILE_MEM_L, NV_TYPE_U32);
618
619 loc->reg.id = ofst * 4;
620
621 val = bld_insn_1(bld, NV_OP_LDA, loc);
622
623 nv_reference(bld->pc, &val->insn->src[4], ptr);
624
625 return val;
626 }
627
628 #define BLD_INSN_1_EX(d, op, dt, s0, s0t) \
629 do { \
630 (d) = bld_insn_1(bld, (NV_OP_##op), (s0)); \
631 SET_TYPE(d, NV_TYPE_##dt); \
632 (d)->insn->src[0]->typecast = NV_TYPE_##s0t; \
633 } while(0)
634
635 #define BLD_INSN_2_EX(d, op, dt, s0, s0t, s1, s1t) \
636 do { \
637 (d) = bld_insn_2(bld, (NV_OP_##op), (s0), (s1)); \
638 SET_TYPE(d, NV_TYPE_##dt); \
639 (d)->insn->src[0]->typecast = NV_TYPE_##s0t; \
640 (d)->insn->src[1]->typecast = NV_TYPE_##s1t; \
641 } while(0)
642
643 static struct nv_value *
644 bld_pow(struct bld_context *bld, struct nv_value *x, struct nv_value *e)
645 {
646 struct nv_value *val;
647
648 BLD_INSN_1_EX(val, LG2, F32, x, F32);
649 BLD_INSN_2_EX(val, MUL, F32, e, F32, val, F32);
650 val = bld_insn_1(bld, NV_OP_PREEX2, val);
651 val = bld_insn_1(bld, NV_OP_EX2, val);
652
653 return val;
654 }
655
656 static INLINE struct nv_value *
657 bld_load_imm_f32(struct bld_context *bld, float f)
658 {
659 struct nv_value *imm = bld_insn_1(bld, NV_OP_MOV, bld_imm_f32(bld, f));
660
661 SET_TYPE(imm, NV_TYPE_F32);
662 return imm;
663 }
664
665 static INLINE struct nv_value *
666 bld_load_imm_u32(struct bld_context *bld, uint32_t u)
667 {
668 return bld_insn_1(bld, NV_OP_MOV, bld_imm_u32(bld, u));
669 }
670
671 static struct nv_value *
672 bld_get_address(struct bld_context *bld, int id, struct nv_value *indirect)
673 {
674 int i;
675 struct nv_instruction *nvi;
676 struct nv_value *val;
677
678 for (i = 0; i < 4; ++i) {
679 if (!bld->saved_addr[i][0])
680 break;
681 if (bld->saved_addr[i][1] == indirect) {
682 nvi = bld->saved_addr[i][0]->insn;
683 if (nvi->src[0]->value->reg.imm.u32 == id)
684 return bld->saved_addr[i][0];
685 }
686 }
687 i &= 3;
688
689 val = bld_imm_u32(bld, id);
690 if (indirect)
691 val = bld_insn_2(bld, NV_OP_ADD, indirect, val);
692 else
693 val = bld_insn_1(bld, NV_OP_MOV, val);
694
695 bld->saved_addr[i][0] = val;
696 bld->saved_addr[i][0]->reg.file = NV_FILE_ADDR;
697 bld->saved_addr[i][0]->reg.type = NV_TYPE_U16;
698 bld->saved_addr[i][1] = indirect;
699 return bld->saved_addr[i][0];
700 }
701
702
703 static struct nv_value *
704 bld_predicate(struct bld_context *bld, struct nv_value *src, boolean bool_only)
705 {
706 struct nv_instruction *s0i, *nvi = src->insn;
707
708 if (!nvi) {
709 nvi = bld_insn_1(bld,
710 (src->reg.file == NV_FILE_IMM) ? NV_OP_MOV : NV_OP_LDA,
711 src)->insn;
712 src = nvi->def[0];
713 } else
714 if (bool_only) {
715 while (nvi->opcode == NV_OP_ABS || nvi->opcode == NV_OP_NEG ||
716 nvi->opcode == NV_OP_CVT) {
717 s0i = nvi->src[0]->value->insn;
718 if (!s0i || !nv50_op_can_write_flags(s0i->opcode))
719 break;
720 nvi = s0i;
721 assert(!nvi->flags_src);
722 }
723 }
724
725 if (!nv50_op_can_write_flags(nvi->opcode) ||
726 nvi->bb != bld->pc->current_block) {
727 nvi = new_instruction(bld->pc, NV_OP_CVT);
728 nv_reference(bld->pc, &nvi->src[0], src);
729 }
730
731 if (!nvi->flags_def) {
732 nvi->flags_def = new_value(bld->pc, NV_FILE_FLAGS, NV_TYPE_U16);
733 nvi->flags_def->insn = nvi;
734 }
735 return nvi->flags_def;
736 }
737
738 static void
739 bld_kil(struct bld_context *bld, struct nv_value *src)
740 {
741 struct nv_instruction *nvi;
742
743 src = bld_predicate(bld, src, FALSE);
744 nvi = new_instruction(bld->pc, NV_OP_KIL);
745 nvi->fixed = 1;
746 nvi->flags_src = new_ref(bld->pc, src);
747 nvi->cc = NV_CC_LT;
748 }
749
750 static void
751 bld_flow(struct bld_context *bld, uint opcode, ubyte cc,
752 struct nv_value *src, struct nv_basic_block *target,
753 boolean plan_reconverge)
754 {
755 struct nv_instruction *nvi;
756
757 if (plan_reconverge)
758 new_instruction(bld->pc, NV_OP_JOINAT)->fixed = 1;
759
760 nvi = new_instruction(bld->pc, opcode);
761 nvi->is_terminator = 1;
762 nvi->cc = cc;
763 nvi->target = target;
764 if (src)
765 nvi->flags_src = new_ref(bld->pc, src);
766 }
767
768 static ubyte
769 translate_setcc(unsigned opcode)
770 {
771 switch (opcode) {
772 case TGSI_OPCODE_SLT: return NV_CC_LT;
773 case TGSI_OPCODE_SGE: return NV_CC_GE;
774 case TGSI_OPCODE_SEQ: return NV_CC_EQ;
775 case TGSI_OPCODE_SGT: return NV_CC_GT;
776 case TGSI_OPCODE_SLE: return NV_CC_LE;
777 case TGSI_OPCODE_SNE: return NV_CC_NE | NV_CC_U;
778 case TGSI_OPCODE_STR: return NV_CC_TR;
779 case TGSI_OPCODE_SFL: return NV_CC_FL;
780
781 case TGSI_OPCODE_ISLT: return NV_CC_LT;
782 case TGSI_OPCODE_ISGE: return NV_CC_GE;
783 case TGSI_OPCODE_USEQ: return NV_CC_EQ;
784 case TGSI_OPCODE_USGE: return NV_CC_GE;
785 case TGSI_OPCODE_USLT: return NV_CC_LT;
786 case TGSI_OPCODE_USNE: return NV_CC_NE;
787 default:
788 assert(0);
789 return NV_CC_FL;
790 }
791 }
792
793 static uint
794 translate_opcode(uint opcode)
795 {
796 switch (opcode) {
797 case TGSI_OPCODE_ABS: return NV_OP_ABS;
798 case TGSI_OPCODE_ADD:
799 case TGSI_OPCODE_SUB:
800 case TGSI_OPCODE_UADD: return NV_OP_ADD;
801 case TGSI_OPCODE_AND: return NV_OP_AND;
802 case TGSI_OPCODE_EX2: return NV_OP_EX2;
803 case TGSI_OPCODE_CEIL: return NV_OP_CEIL;
804 case TGSI_OPCODE_FLR: return NV_OP_FLOOR;
805 case TGSI_OPCODE_TRUNC: return NV_OP_TRUNC;
806 case TGSI_OPCODE_ROUND: return NV_OP_ROUND;
807 case TGSI_OPCODE_COS: return NV_OP_COS;
808 case TGSI_OPCODE_SIN: return NV_OP_SIN;
809 case TGSI_OPCODE_DDX: return NV_OP_DFDX;
810 case TGSI_OPCODE_DDY: return NV_OP_DFDY;
811 case TGSI_OPCODE_F2I:
812 case TGSI_OPCODE_F2U:
813 case TGSI_OPCODE_I2F:
814 case TGSI_OPCODE_U2F: return NV_OP_CVT;
815 case TGSI_OPCODE_INEG: return NV_OP_NEG;
816 case TGSI_OPCODE_LG2: return NV_OP_LG2;
817 case TGSI_OPCODE_ISHR:
818 case TGSI_OPCODE_USHR: return NV_OP_SHR;
819 case TGSI_OPCODE_MAD:
820 case TGSI_OPCODE_UMAD: return NV_OP_MAD;
821 case TGSI_OPCODE_MAX:
822 case TGSI_OPCODE_IMAX:
823 case TGSI_OPCODE_UMAX: return NV_OP_MAX;
824 case TGSI_OPCODE_MIN:
825 case TGSI_OPCODE_IMIN:
826 case TGSI_OPCODE_UMIN: return NV_OP_MIN;
827 case TGSI_OPCODE_MUL:
828 case TGSI_OPCODE_UMUL: return NV_OP_MUL;
829 case TGSI_OPCODE_OR: return NV_OP_OR;
830 case TGSI_OPCODE_RCP: return NV_OP_RCP;
831 case TGSI_OPCODE_RSQ: return NV_OP_RSQ;
832 case TGSI_OPCODE_SAD: return NV_OP_SAD;
833 case TGSI_OPCODE_SHL: return NV_OP_SHL;
834 case TGSI_OPCODE_SLT:
835 case TGSI_OPCODE_SGE:
836 case TGSI_OPCODE_SEQ:
837 case TGSI_OPCODE_SGT:
838 case TGSI_OPCODE_SLE:
839 case TGSI_OPCODE_SNE:
840 case TGSI_OPCODE_ISLT:
841 case TGSI_OPCODE_ISGE:
842 case TGSI_OPCODE_USEQ:
843 case TGSI_OPCODE_USGE:
844 case TGSI_OPCODE_USLT:
845 case TGSI_OPCODE_USNE: return NV_OP_SET;
846 case TGSI_OPCODE_TEX: return NV_OP_TEX;
847 case TGSI_OPCODE_TXP: return NV_OP_TEX;
848 case TGSI_OPCODE_TXB: return NV_OP_TXB;
849 case TGSI_OPCODE_TXL: return NV_OP_TXL;
850 case TGSI_OPCODE_XOR: return NV_OP_XOR;
851 default:
852 return NV_OP_NOP;
853 }
854 }
855
856 static ubyte
857 infer_src_type(unsigned opcode)
858 {
859 switch (opcode) {
860 case TGSI_OPCODE_MOV:
861 case TGSI_OPCODE_AND:
862 case TGSI_OPCODE_OR:
863 case TGSI_OPCODE_XOR:
864 case TGSI_OPCODE_SAD:
865 case TGSI_OPCODE_U2F:
866 case TGSI_OPCODE_UADD:
867 case TGSI_OPCODE_UDIV:
868 case TGSI_OPCODE_UMOD:
869 case TGSI_OPCODE_UMAD:
870 case TGSI_OPCODE_UMUL:
871 case TGSI_OPCODE_UMAX:
872 case TGSI_OPCODE_UMIN:
873 case TGSI_OPCODE_USEQ:
874 case TGSI_OPCODE_USGE:
875 case TGSI_OPCODE_USLT:
876 case TGSI_OPCODE_USNE:
877 case TGSI_OPCODE_USHR:
878 return NV_TYPE_U32;
879 case TGSI_OPCODE_I2F:
880 case TGSI_OPCODE_IDIV:
881 case TGSI_OPCODE_IMAX:
882 case TGSI_OPCODE_IMIN:
883 case TGSI_OPCODE_INEG:
884 case TGSI_OPCODE_ISGE:
885 case TGSI_OPCODE_ISHR:
886 case TGSI_OPCODE_ISLT:
887 return NV_TYPE_S32;
888 default:
889 return NV_TYPE_F32;
890 }
891 }
892
893 static ubyte
894 infer_dst_type(unsigned opcode)
895 {
896 switch (opcode) {
897 case TGSI_OPCODE_MOV:
898 case TGSI_OPCODE_F2U:
899 case TGSI_OPCODE_AND:
900 case TGSI_OPCODE_OR:
901 case TGSI_OPCODE_XOR:
902 case TGSI_OPCODE_SAD:
903 case TGSI_OPCODE_UADD:
904 case TGSI_OPCODE_UDIV:
905 case TGSI_OPCODE_UMOD:
906 case TGSI_OPCODE_UMAD:
907 case TGSI_OPCODE_UMUL:
908 case TGSI_OPCODE_UMAX:
909 case TGSI_OPCODE_UMIN:
910 case TGSI_OPCODE_USEQ:
911 case TGSI_OPCODE_USGE:
912 case TGSI_OPCODE_USLT:
913 case TGSI_OPCODE_USNE:
914 case TGSI_OPCODE_USHR:
915 return NV_TYPE_U32;
916 case TGSI_OPCODE_F2I:
917 case TGSI_OPCODE_IDIV:
918 case TGSI_OPCODE_IMAX:
919 case TGSI_OPCODE_IMIN:
920 case TGSI_OPCODE_INEG:
921 case TGSI_OPCODE_ISGE:
922 case TGSI_OPCODE_ISHR:
923 case TGSI_OPCODE_ISLT:
924 return NV_TYPE_S32;
925 default:
926 return NV_TYPE_F32;
927 }
928 }
929
930 static void
931 emit_store(struct bld_context *bld, const struct tgsi_full_instruction *inst,
932 unsigned chan, struct nv_value *value)
933 {
934 struct nv_value *ptr;
935 const struct tgsi_full_dst_register *reg = &inst->Dst[0];
936
937 if (reg->Register.Indirect) {
938 ptr = FETCH_ADDR(reg->Indirect.Index,
939 tgsi_util_get_src_register_swizzle(&reg->Indirect, 0));
940 } else {
941 ptr = NULL;
942 }
943
944 assert(chan < 4);
945
946 if (inst->Instruction.Opcode != TGSI_OPCODE_MOV)
947 value->reg.type = infer_dst_type(inst->Instruction.Opcode);
948
949 switch (inst->Instruction.Saturate) {
950 case TGSI_SAT_NONE:
951 break;
952 case TGSI_SAT_ZERO_ONE:
953 BLD_INSN_1_EX(value, SAT, F32, value, F32);
954 break;
955 case TGSI_SAT_MINUS_PLUS_ONE:
956 value->reg.as_type = NV_TYPE_F32;
957 value = bld_insn_2(bld, NV_OP_MAX, value, bld_load_imm_f32(bld, -1.0f));
958 value = bld_insn_2(bld, NV_OP_MIN, value, bld_load_imm_f32(bld, 1.0f));
959 break;
960 }
961
962 switch (reg->Register.File) {
963 case TGSI_FILE_OUTPUT:
964 if (!value->insn && (bld->ti->output_file == NV_FILE_OUT))
965 value = bld_insn_1(bld, NV_OP_MOV, value);
966 value = bld_insn_1(bld, NV_OP_MOV, value);
967 value->reg.file = bld->ti->output_file;
968
969 if (bld->ti->p->type == PIPE_SHADER_FRAGMENT) {
970 STORE_OUTR(reg->Register.Index, chan, value);
971 } else {
972 value->insn->fixed = 1;
973 value->reg.id = bld->ti->output_map[reg->Register.Index][chan];
974 }
975 break;
976 case TGSI_FILE_TEMPORARY:
977 assert(reg->Register.Index < BLD_MAX_TEMPS);
978 if (!value->insn || (value->insn->bb != bld->pc->current_block))
979 value = bld_insn_1(bld, NV_OP_MOV, value);
980 value->reg.file = NV_FILE_GPR;
981
982 if (bld->ti->store_to_memory)
983 bld_lmem_store(bld, ptr, reg->Register.Index * 4 + chan, value);
984 else
985 STORE_TEMP(reg->Register.Index, chan, value);
986 break;
987 case TGSI_FILE_ADDRESS:
988 assert(reg->Register.Index < BLD_MAX_ADDRS);
989 value->reg.file = NV_FILE_ADDR;
990 value->reg.type = NV_TYPE_U16;
991 STORE_ADDR(reg->Register.Index, chan, value);
992 break;
993 }
994 }
995
996 static INLINE uint32_t
997 bld_is_output_written(struct bld_context *bld, int i, int c)
998 {
999 if (c < 0)
1000 return bld->outputs_written[i / 8] & (0xf << ((i * 4) % 32));
1001 return bld->outputs_written[i / 8] & (1 << ((i * 4 + c) % 32));
1002 }
1003
1004 static void
1005 bld_export_outputs(struct bld_context *bld)
1006 {
1007 struct nv_value *vals[4];
1008 struct nv_instruction *nvi;
1009 int i, c, n;
1010
1011 bld_push_values(&bld->ovs[0][0], PIPE_MAX_SHADER_OUTPUTS);
1012
1013 for (i = 0; i < PIPE_MAX_SHADER_OUTPUTS; ++i) {
1014 if (!bld_is_output_written(bld, i, -1))
1015 continue;
1016 for (n = 0, c = 0; c < 4; ++c) {
1017 if (!bld_is_output_written(bld, i, c))
1018 continue;
1019 vals[n] = bld_fetch_global(bld, &bld->ovs[i][c]);
1020 assert(vals[n]);
1021 vals[n] = bld_insn_1(bld, NV_OP_MOV, vals[n]);
1022 vals[n++]->reg.id = bld->ti->output_map[i][c];
1023 }
1024 assert(n);
1025
1026 (nvi = new_instruction(bld->pc, NV_OP_EXPORT))->fixed = 1;
1027
1028 for (c = 0; c < n; ++c)
1029 nvi->src[c] = new_ref(bld->pc, vals[c]);
1030 }
1031 }
1032
1033 static void
1034 bld_new_block(struct bld_context *bld, struct nv_basic_block *b)
1035 {
1036 int i;
1037
1038 bld_push_values(&bld->tvs[0][0], BLD_MAX_TEMPS);
1039 bld_push_values(&bld->avs[0][0], BLD_MAX_ADDRS);
1040 bld_push_values(&bld->pvs[0][0], BLD_MAX_PREDS);
1041 bld_push_values(&bld->ovs[0][0], PIPE_MAX_SHADER_OUTPUTS);
1042
1043 bld->pc->current_block = b;
1044
1045 for (i = 0; i < 4; ++i)
1046 bld->saved_addr[i][0] = NULL;
1047
1048 for (i = 0; i < 128; ++i)
1049 bld->saved_inputs[i] = NULL;
1050
1051 bld->out_kind = CFG_EDGE_FORWARD;
1052 }
1053
1054 static struct nv_value *
1055 bld_saved_input(struct bld_context *bld, unsigned i, unsigned c)
1056 {
1057 unsigned idx = bld->ti->input_map[i][c];
1058
1059 if (bld->ti->p->type != PIPE_SHADER_FRAGMENT)
1060 return NULL;
1061 if (bld->saved_inputs[idx])
1062 return bld->saved_inputs[idx];
1063 return NULL;
1064 }
1065
1066 static struct nv_value *
1067 bld_interpolate(struct bld_context *bld, unsigned mode, struct nv_value *val)
1068 {
1069 if (val->reg.id == 255) {
1070 /* gl_FrontFacing: 0/~0 to -1.0/+1.0 */
1071 val = bld_insn_1(bld, NV_OP_LINTERP, val);
1072 val = bld_insn_2(bld, NV_OP_SHL, val, bld_imm_u32(bld, 31));
1073 val->insn->src[0]->typecast = NV_TYPE_U32;
1074 val = bld_insn_2(bld, NV_OP_XOR, val, bld_imm_f32(bld, -1.0f));
1075 val->insn->src[0]->typecast = NV_TYPE_U32;
1076 } else
1077 if (mode & (NV50_INTERP_LINEAR | NV50_INTERP_FLAT))
1078 val = bld_insn_1(bld, NV_OP_LINTERP, val);
1079 else
1080 val = bld_insn_2(bld, NV_OP_PINTERP, val, bld->frgcrd[3]);
1081
1082 val->insn->flat = (mode & NV50_INTERP_FLAT) ? 1 : 0;
1083 val->insn->centroid = (mode & NV50_INTERP_CENTROID) ? 1 : 0;
1084 return val;
1085 }
1086
1087 static struct nv_value *
1088 emit_fetch(struct bld_context *bld, const struct tgsi_full_instruction *insn,
1089 const unsigned s, const unsigned chan)
1090 {
1091 const struct tgsi_full_src_register *src = &insn->Src[s];
1092 struct nv_value *res;
1093 struct nv_value *ptr = NULL;
1094 unsigned idx, swz, dim_idx, ind_idx, ind_swz, sgn;
1095 ubyte type = infer_src_type(insn->Instruction.Opcode);
1096
1097 idx = src->Register.Index;
1098 swz = tgsi_util_get_full_src_register_swizzle(src, chan);
1099 dim_idx = -1;
1100 ind_idx = -1;
1101 ind_swz = 0;
1102
1103 if (src->Register.Indirect) {
1104 ind_idx = src->Indirect.Index;
1105 ind_swz = tgsi_util_get_src_register_swizzle(&src->Indirect, 0);
1106
1107 ptr = FETCH_ADDR(ind_idx, ind_swz);
1108 }
1109 if (idx >= (128 / 4) && src->Register.File == TGSI_FILE_CONSTANT)
1110 ptr = bld_get_address(bld, (idx * 16) & ~0x1ff, ptr);
1111
1112 switch (src->Register.File) {
1113 case TGSI_FILE_CONSTANT:
1114 dim_idx = src->Dimension.Index;
1115 assert(dim_idx < 15);
1116
1117 res = new_value(bld->pc, NV_FILE_MEM_C(dim_idx), type);
1118 SET_TYPE(res, type);
1119 res->reg.id = (idx * 4 + swz) & 127;
1120 res = bld_insn_1(bld, NV_OP_LDA, res);
1121
1122 if (ptr)
1123 res->insn->src[4] = new_ref(bld->pc, ptr);
1124 break;
1125 case TGSI_FILE_IMMEDIATE:
1126 assert(idx < bld->ti->immd32_nr);
1127 res = bld_load_imm_u32(bld, bld->ti->immd32[idx * 4 + swz]);
1128
1129 switch (bld->ti->immd32_ty[idx]) {
1130 case TGSI_IMM_FLOAT32: SET_TYPE(res, NV_TYPE_F32); break;
1131 case TGSI_IMM_UINT32: SET_TYPE(res, NV_TYPE_U32); break;
1132 case TGSI_IMM_INT32: SET_TYPE(res, NV_TYPE_S32); break;
1133 default:
1134 SET_TYPE(res, type);
1135 break;
1136 }
1137 break;
1138 case TGSI_FILE_INPUT:
1139 res = bld_saved_input(bld, idx, swz);
1140 if (res && (insn->Instruction.Opcode != TGSI_OPCODE_TXP))
1141 break;
1142
1143 res = new_value(bld->pc, bld->ti->input_file, type);
1144 res->reg.id = bld->ti->input_map[idx][swz];
1145
1146 if (res->reg.file == NV_FILE_MEM_V) {
1147 res = bld_interpolate(bld, bld->ti->interp_mode[idx], res);
1148 } else {
1149 assert(src->Dimension.Dimension == 0);
1150 res = bld_insn_1(bld, NV_OP_LDA, res);
1151 assert(res->reg.type == type);
1152 }
1153 bld->saved_inputs[bld->ti->input_map[idx][swz]] = res;
1154 break;
1155 case TGSI_FILE_TEMPORARY:
1156 if (bld->ti->store_to_memory)
1157 res = bld_lmem_load(bld, ptr, idx * 4 + swz);
1158 else
1159 res = bld_fetch_global(bld, &bld->tvs[idx][swz]);
1160 break;
1161 case TGSI_FILE_ADDRESS:
1162 res = bld_fetch_global(bld, &bld->avs[idx][swz]);
1163 break;
1164 case TGSI_FILE_PREDICATE:
1165 res = bld_fetch_global(bld, &bld->pvs[idx][swz]);
1166 break;
1167 case TGSI_FILE_SYSTEM_VALUE:
1168 res = new_value(bld->pc, bld->ti->input_file, NV_TYPE_U32);
1169 res->reg.id = bld->ti->sysval_map[idx];
1170 res = bld_insn_1(bld, NV_OP_LDA, res);
1171 res = bld_insn_1(bld, NV_OP_CVT, res);
1172 res->reg.type = NV_TYPE_F32;
1173 break;
1174 default:
1175 NOUVEAU_ERR("illegal/unhandled src reg file: %d\n", src->Register.File);
1176 abort();
1177 break;
1178 }
1179 if (!res)
1180 return bld_undef(bld, NV_FILE_GPR);
1181
1182 sgn = tgsi_util_get_full_src_register_sign_mode(src, chan);
1183
1184 if (insn->Instruction.Opcode != TGSI_OPCODE_MOV)
1185 res->reg.as_type = type;
1186 else
1187 if (sgn != TGSI_UTIL_SIGN_KEEP) /* apparently "MOV A, -B" assumes float */
1188 res->reg.as_type = NV_TYPE_F32;
1189
1190 switch (sgn) {
1191 case TGSI_UTIL_SIGN_KEEP:
1192 break;
1193 case TGSI_UTIL_SIGN_CLEAR:
1194 res = bld_insn_1(bld, NV_OP_ABS, res);
1195 break;
1196 case TGSI_UTIL_SIGN_TOGGLE:
1197 res = bld_insn_1(bld, NV_OP_NEG, res);
1198 break;
1199 case TGSI_UTIL_SIGN_SET:
1200 res = bld_insn_1(bld, NV_OP_ABS, res);
1201 res = bld_insn_1(bld, NV_OP_NEG, res);
1202 break;
1203 default:
1204 NOUVEAU_ERR("illegal/unhandled src reg sign mode\n");
1205 abort();
1206 break;
1207 }
1208
1209 return res;
1210 }
1211
1212 static void
1213 bld_lit(struct bld_context *bld, struct nv_value *dst0[4],
1214 const struct tgsi_full_instruction *insn)
1215 {
1216 struct nv_value *val0 = NULL;
1217 struct nv_value *zero = NULL;
1218 unsigned mask = insn->Dst[0].Register.WriteMask;
1219
1220 if (mask & ((1 << 0) | (1 << 3)))
1221 dst0[3] = dst0[0] = bld_load_imm_f32(bld, 1.0f);
1222
1223 if (mask & (3 << 1)) {
1224 zero = bld_load_imm_f32(bld, 0.0f);
1225 val0 = bld_insn_2(bld, NV_OP_MAX, emit_fetch(bld, insn, 0, 0), zero);
1226
1227 if (mask & (1 << 1))
1228 dst0[1] = val0;
1229 }
1230
1231 if (mask & (1 << 2)) {
1232 struct nv_value *val1, *val3, *src1, *src3;
1233 struct nv_value *pos128 = bld_load_imm_f32(bld, 127.999999f);
1234 struct nv_value *neg128 = bld_load_imm_f32(bld, -127.999999f);
1235
1236 src1 = emit_fetch(bld, insn, 0, 1);
1237 src3 = emit_fetch(bld, insn, 0, 3);
1238
1239 val0->insn->flags_def = new_value(bld->pc, NV_FILE_FLAGS, NV_TYPE_U16);
1240 val0->insn->flags_def->insn = val0->insn;
1241
1242 val1 = bld_insn_2(bld, NV_OP_MAX, src1, zero);
1243 val3 = bld_insn_2(bld, NV_OP_MAX, src3, neg128);
1244 val3 = bld_insn_2(bld, NV_OP_MIN, val3, pos128);
1245 val3 = bld_pow(bld, val1, val3);
1246
1247 dst0[2] = bld_insn_1(bld, NV_OP_MOV, zero);
1248 dst0[2]->insn->cc = NV_CC_LE;
1249 dst0[2]->insn->flags_src = new_ref(bld->pc, val0->insn->flags_def);
1250
1251 dst0[2] = bld_insn_2(bld, NV_OP_SELECT, val3, dst0[2]);
1252 }
1253 }
1254
1255 static INLINE void
1256 get_tex_dim(const struct tgsi_full_instruction *insn, int *dim, int *arg)
1257 {
1258 switch (insn->Texture.Texture) {
1259 case TGSI_TEXTURE_1D:
1260 *arg = *dim = 1;
1261 break;
1262 case TGSI_TEXTURE_SHADOW1D:
1263 *dim = 1;
1264 *arg = 2;
1265 break;
1266 case TGSI_TEXTURE_UNKNOWN:
1267 case TGSI_TEXTURE_2D:
1268 case TGSI_TEXTURE_RECT:
1269 *arg = *dim = 2;
1270 break;
1271 case TGSI_TEXTURE_SHADOW2D:
1272 case TGSI_TEXTURE_SHADOWRECT:
1273 *dim = 2;
1274 *arg = 3;
1275 break;
1276 case TGSI_TEXTURE_3D:
1277 case TGSI_TEXTURE_CUBE:
1278 *dim = *arg = 3;
1279 break;
1280 default:
1281 assert(0);
1282 break;
1283 }
1284 }
1285
1286 static void
1287 load_proj_tex_coords(struct bld_context *bld,
1288 struct nv_value *t[4], int dim, int arg,
1289 const struct tgsi_full_instruction *insn)
1290 {
1291 int c, mask;
1292
1293 mask = (1 << dim) - 1;
1294 if (arg != dim)
1295 mask |= 4; /* depth comparison value */
1296
1297 t[3] = emit_fetch(bld, insn, 0, 3);
1298
1299 if (t[3]->insn->opcode == NV_OP_PINTERP) {
1300 t[3] = bld_duplicate_insn(bld, t[3]->insn);
1301 t[3]->insn->opcode = NV_OP_LINTERP;
1302 nv_reference(bld->pc, &t[3]->insn->src[1], NULL);
1303 }
1304
1305 t[3] = bld_insn_1(bld, NV_OP_RCP, t[3]);
1306
1307 for (c = 0; c < 4; ++c) {
1308 if (!(mask & (1 << c)))
1309 continue;
1310 t[c] = emit_fetch(bld, insn, 0, c);
1311
1312 if (t[c]->insn->opcode != NV_OP_LINTERP &&
1313 t[c]->insn->opcode != NV_OP_PINTERP)
1314 continue;
1315 t[c] = bld_duplicate_insn(bld, t[c]->insn);
1316 t[c]->insn->opcode = NV_OP_PINTERP;
1317 nv_reference(bld->pc, &t[c]->insn->src[1], t[3]);
1318
1319 mask &= ~(1 << c);
1320 }
1321
1322 for (c = 0; mask; ++c, mask >>= 1) {
1323 if (!(mask & 1))
1324 continue;
1325 t[c] = bld_insn_2(bld, NV_OP_MUL, t[c], t[3]);
1326 }
1327 }
1328
1329 /* For a quad of threads / top left, top right, bottom left, bottom right
1330 * pixels, do a different operation, and take src0 from a specific thread.
1331 */
1332 #define QOP_ADD 0
1333 #define QOP_SUBR 1
1334 #define QOP_SUB 2
1335 #define QOP_MOV1 3
1336
1337 #define QOP(a, b, c, d) \
1338 ((QOP_##a << 0) | (QOP_##b << 2) | (QOP_##c << 4) | (QOP_##d << 6))
1339
1340 static INLINE struct nv_value *
1341 bld_quadop(struct bld_context *bld, ubyte qop, struct nv_value *src0, int lane,
1342 struct nv_value *src1, boolean wp)
1343 {
1344 struct nv_value *val = bld_insn_2(bld, NV_OP_QUADOP, src0, src1);
1345 val->insn->lanes = lane;
1346 val->insn->quadop = qop;
1347 if (wp) {
1348 val->insn->flags_def = new_value(bld->pc, NV_FILE_FLAGS, NV_TYPE_U16);
1349 val->insn->flags_def->insn = val->insn;
1350 }
1351 return val;
1352 }
1353
1354 static INLINE struct nv_value *
1355 bld_cmov(struct bld_context *bld,
1356 struct nv_value *src, ubyte cc, struct nv_value *cr)
1357 {
1358 src = bld_insn_1(bld, NV_OP_MOV, src);
1359
1360 src->insn->cc = cc;
1361 src->insn->flags_src = new_ref(bld->pc, cr);
1362
1363 return src;
1364 }
1365
1366 static struct nv_instruction *
1367 emit_tex(struct bld_context *bld, uint opcode,
1368 struct nv_value *dst[4], struct nv_value *t_in[4],
1369 int argc, int tic, int tsc, int cube)
1370 {
1371 struct nv_value *t[4];
1372 struct nv_instruction *nvi;
1373 int c;
1374
1375 /* the inputs to a tex instruction must be separate values */
1376 for (c = 0; c < argc; ++c) {
1377 t[c] = bld_insn_1(bld, NV_OP_MOV, t_in[c]);
1378 SET_TYPE(t[c], NV_TYPE_F32);
1379 t[c]->insn->fixed = 1;
1380 }
1381
1382 nvi = new_instruction(bld->pc, opcode);
1383
1384 for (c = 0; c < 4; ++c)
1385 dst[c] = bld_def(nvi, c, new_value(bld->pc, NV_FILE_GPR, NV_TYPE_F32));
1386
1387 for (c = 0; c < argc; ++c)
1388 nvi->src[c] = new_ref(bld->pc, t[c]);
1389
1390 nvi->tex_t = tic;
1391 nvi->tex_s = tsc;
1392 nvi->tex_mask = 0xf;
1393 nvi->tex_cube = cube;
1394 nvi->tex_live = 0;
1395 nvi->tex_argc = argc;
1396
1397 return nvi;
1398 }
1399
1400 static void
1401 bld_texlod_sequence(struct bld_context *bld,
1402 struct nv_value *dst[4], struct nv_value *t[4], int arg,
1403 int tic, int tsc, int cube)
1404 {
1405 emit_tex(bld, NV_OP_TXL, dst, t, arg, tic, tsc, cube); /* TODO */
1406 }
1407
1408
1409 /* The lanes of a quad are grouped by the bit in the condition register
1410 * they have set, which is selected by differing bias values.
1411 * Move the input values for TEX into a new register set for each group
1412 * and execute TEX only for a specific group.
1413 * We always need to use 4 new registers for the inputs/outputs because
1414 * the implicitly calculated derivatives must be correct.
1415 */
1416 static void
1417 bld_texbias_sequence(struct bld_context *bld,
1418 struct nv_value *dst[4], struct nv_value *t[4], int arg,
1419 int tic, int tsc, int cube)
1420 {
1421 struct nv_instruction *sel, *tex;
1422 struct nv_value *bit[4], *cr[4], *res[4][4], *val;
1423 int l, c;
1424
1425 const ubyte cc[4] = { NV_CC_EQ, NV_CC_S, NV_CC_C, NV_CC_O };
1426
1427 for (l = 0; l < 4; ++l) {
1428 bit[l] = bld_load_imm_u32(bld, 1 << l);
1429
1430 val = bld_quadop(bld, QOP(SUBR, SUBR, SUBR, SUBR),
1431 t[arg - 1], l, t[arg - 1], TRUE);
1432
1433 cr[l] = bld_cmov(bld, bit[l], NV_CC_EQ, val->insn->flags_def);
1434
1435 cr[l]->reg.file = NV_FILE_FLAGS;
1436 SET_TYPE(cr[l], NV_TYPE_U16);
1437 }
1438
1439 sel = new_instruction(bld->pc, NV_OP_SELECT);
1440
1441 for (l = 0; l < 4; ++l)
1442 sel->src[l] = new_ref(bld->pc, cr[l]);
1443
1444 bld_def(sel, 0, new_value(bld->pc, NV_FILE_FLAGS, NV_TYPE_U16));
1445
1446 for (l = 0; l < 4; ++l) {
1447 tex = emit_tex(bld, NV_OP_TXB, dst, t, arg, tic, tsc, cube);
1448
1449 tex->cc = cc[l];
1450 tex->flags_src = new_ref(bld->pc, sel->def[0]);
1451
1452 for (c = 0; c < 4; ++c)
1453 res[l][c] = tex->def[c];
1454 }
1455
1456 for (l = 0; l < 4; ++l)
1457 for (c = 0; c < 4; ++c)
1458 res[l][c] = bld_cmov(bld, res[l][c], cc[l], sel->def[0]);
1459
1460 for (c = 0; c < 4; ++c) {
1461 sel = new_instruction(bld->pc, NV_OP_SELECT);
1462
1463 for (l = 0; l < 4; ++l)
1464 sel->src[l] = new_ref(bld->pc, res[l][c]);
1465
1466 bld_def(sel, 0, (dst[c] = new_value(bld->pc, NV_FILE_GPR, NV_TYPE_F32)));
1467 }
1468 }
1469
1470 static boolean
1471 bld_is_constant(struct nv_value *val)
1472 {
1473 if (val->reg.file == NV_FILE_IMM)
1474 return TRUE;
1475 return val->insn && nvcg_find_constant(val->insn->src[0]);
1476 }
1477
1478 static void
1479 bld_tex(struct bld_context *bld, struct nv_value *dst0[4],
1480 const struct tgsi_full_instruction *insn)
1481 {
1482 struct nv_value *t[4], *s[3];
1483 uint opcode = translate_opcode(insn->Instruction.Opcode);
1484 int arg, dim, c;
1485 const int tic = insn->Src[1].Register.Index;
1486 const int tsc = tic;
1487 const int cube = (insn->Texture.Texture == TGSI_TEXTURE_CUBE) ? 1 : 0;
1488
1489 get_tex_dim(insn, &dim, &arg);
1490
1491 if (!cube && insn->Instruction.Opcode == TGSI_OPCODE_TXP)
1492 load_proj_tex_coords(bld, t, dim, arg, insn);
1493 else {
1494 for (c = 0; c < dim; ++c)
1495 t[c] = emit_fetch(bld, insn, 0, c);
1496 if (arg != dim)
1497 t[dim] = emit_fetch(bld, insn, 0, 2);
1498 }
1499
1500 if (cube) {
1501 assert(dim >= 3);
1502 for (c = 0; c < 3; ++c)
1503 s[c] = bld_insn_1(bld, NV_OP_ABS, t[c]);
1504
1505 s[0] = bld_insn_2(bld, NV_OP_MAX, s[0], s[1]);
1506 s[0] = bld_insn_2(bld, NV_OP_MAX, s[0], s[2]);
1507 s[0] = bld_insn_1(bld, NV_OP_RCP, s[0]);
1508
1509 for (c = 0; c < 3; ++c)
1510 t[c] = bld_insn_2(bld, NV_OP_MUL, t[c], s[0]);
1511 }
1512
1513 if (opcode == NV_OP_TXB || opcode == NV_OP_TXL) {
1514 t[arg++] = emit_fetch(bld, insn, 0, 3);
1515
1516 if ((bld->ti->p->type == PIPE_SHADER_FRAGMENT) &&
1517 !bld_is_constant(t[arg - 1])) {
1518 if (opcode == NV_OP_TXB)
1519 bld_texbias_sequence(bld, dst0, t, arg, tic, tsc, cube);
1520 else
1521 bld_texlod_sequence(bld, dst0, t, arg, tic, tsc, cube);
1522 return;
1523 }
1524 }
1525
1526 emit_tex(bld, opcode, dst0, t, arg, tic, tsc, cube);
1527 }
1528
1529 static INLINE struct nv_value *
1530 bld_dot(struct bld_context *bld, const struct tgsi_full_instruction *insn,
1531 int n)
1532 {
1533 struct nv_value *dotp, *src0, *src1;
1534 int c;
1535
1536 src0 = emit_fetch(bld, insn, 0, 0);
1537 src1 = emit_fetch(bld, insn, 1, 0);
1538 dotp = bld_insn_2(bld, NV_OP_MUL, src0, src1);
1539
1540 for (c = 1; c < n; ++c) {
1541 src0 = emit_fetch(bld, insn, 0, c);
1542 src1 = emit_fetch(bld, insn, 1, c);
1543 dotp = bld_insn_3(bld, NV_OP_MAD, src0, src1, dotp);
1544 }
1545 return dotp;
1546 }
1547
1548 #define FOR_EACH_DST0_ENABLED_CHANNEL(chan, inst) \
1549 for (chan = 0; chan < 4; ++chan) \
1550 if ((inst)->Dst[0].Register.WriteMask & (1 << chan))
1551
1552 static void
1553 bld_instruction(struct bld_context *bld,
1554 const struct tgsi_full_instruction *insn)
1555 {
1556 struct nv50_program *prog = bld->ti->p;
1557 const struct tgsi_full_dst_register *dreg = &insn->Dst[0];
1558 struct nv_value *src0;
1559 struct nv_value *src1;
1560 struct nv_value *src2;
1561 struct nv_value *dst0[4] = { 0 };
1562 struct nv_value *temp;
1563 int c;
1564 uint opcode = translate_opcode(insn->Instruction.Opcode);
1565
1566 #if NV50_DEBUG & NV50_DEBUG_PROG_IR
1567 debug_printf("bld_instruction:"); tgsi_dump_instruction(insn, 1);
1568 #endif
1569
1570 switch (insn->Instruction.Opcode) {
1571 case TGSI_OPCODE_ADD:
1572 case TGSI_OPCODE_MAX:
1573 case TGSI_OPCODE_MIN:
1574 case TGSI_OPCODE_MUL:
1575 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1576 src0 = emit_fetch(bld, insn, 0, c);
1577 src1 = emit_fetch(bld, insn, 1, c);
1578 dst0[c] = bld_insn_2(bld, opcode, src0, src1);
1579 }
1580 break;
1581 case TGSI_OPCODE_ARL:
1582 src1 = bld_imm_u32(bld, 4);
1583 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1584 src0 = emit_fetch(bld, insn, 0, c);
1585 temp = bld_insn_1(bld, NV_OP_FLOOR, src0);
1586 SET_TYPE(temp, NV_TYPE_S32);
1587 dst0[c] = bld_insn_2(bld, NV_OP_SHL, temp, src1);
1588 }
1589 break;
1590 case TGSI_OPCODE_CMP:
1591 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1592 src0 = emit_fetch(bld, insn, 0, c);
1593 src1 = emit_fetch(bld, insn, 1, c);
1594 src2 = emit_fetch(bld, insn, 2, c);
1595 src0 = bld_predicate(bld, src0, FALSE);
1596
1597 src1 = bld_insn_1(bld, NV_OP_MOV, src1);
1598 src1->insn->flags_src = new_ref(bld->pc, src0);
1599 src1->insn->cc = NV_CC_LT;
1600
1601 src2 = bld_insn_1(bld, NV_OP_MOV, src2);
1602 src2->insn->flags_src = new_ref(bld->pc, src0);
1603 src2->insn->cc = NV_CC_GE;
1604
1605 dst0[c] = bld_insn_2(bld, NV_OP_SELECT, src1, src2);
1606 }
1607 break;
1608 case TGSI_OPCODE_COS:
1609 case TGSI_OPCODE_SIN:
1610 src0 = emit_fetch(bld, insn, 0, 0);
1611 temp = bld_insn_1(bld, NV_OP_PRESIN, src0);
1612 if (insn->Dst[0].Register.WriteMask & 7)
1613 temp = bld_insn_1(bld, opcode, temp);
1614 for (c = 0; c < 3; ++c)
1615 if (insn->Dst[0].Register.WriteMask & (1 << c))
1616 dst0[c] = temp;
1617 if (!(insn->Dst[0].Register.WriteMask & (1 << 3)))
1618 break;
1619 src0 = emit_fetch(bld, insn, 0, 3);
1620 temp = bld_insn_1(bld, NV_OP_PRESIN, src0);
1621 dst0[3] = bld_insn_1(bld, opcode, temp);
1622 break;
1623 case TGSI_OPCODE_DP2:
1624 temp = bld_dot(bld, insn, 2);
1625 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1626 dst0[c] = temp;
1627 break;
1628 case TGSI_OPCODE_DP3:
1629 temp = bld_dot(bld, insn, 3);
1630 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1631 dst0[c] = temp;
1632 break;
1633 case TGSI_OPCODE_DP4:
1634 temp = bld_dot(bld, insn, 4);
1635 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1636 dst0[c] = temp;
1637 break;
1638 case TGSI_OPCODE_DPH:
1639 src0 = bld_dot(bld, insn, 3);
1640 src1 = emit_fetch(bld, insn, 1, 3);
1641 temp = bld_insn_2(bld, NV_OP_ADD, src0, src1);
1642 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1643 dst0[c] = temp;
1644 break;
1645 case TGSI_OPCODE_DST:
1646 if (insn->Dst[0].Register.WriteMask & 1)
1647 dst0[0] = bld_imm_f32(bld, 1.0f);
1648 if (insn->Dst[0].Register.WriteMask & 2) {
1649 src0 = emit_fetch(bld, insn, 0, 1);
1650 src1 = emit_fetch(bld, insn, 1, 1);
1651 dst0[1] = bld_insn_2(bld, NV_OP_MUL, src0, src1);
1652 }
1653 if (insn->Dst[0].Register.WriteMask & 4)
1654 dst0[2] = emit_fetch(bld, insn, 0, 2);
1655 if (insn->Dst[0].Register.WriteMask & 8)
1656 dst0[3] = emit_fetch(bld, insn, 1, 3);
1657 break;
1658 case TGSI_OPCODE_EXP:
1659 src0 = emit_fetch(bld, insn, 0, 0);
1660 temp = bld_insn_1(bld, NV_OP_FLOOR, src0);
1661
1662 if (insn->Dst[0].Register.WriteMask & 2)
1663 dst0[1] = bld_insn_2(bld, NV_OP_SUB, src0, temp);
1664 if (insn->Dst[0].Register.WriteMask & 1) {
1665 temp = bld_insn_1(bld, NV_OP_PREEX2, temp);
1666 dst0[0] = bld_insn_1(bld, NV_OP_EX2, temp);
1667 }
1668 if (insn->Dst[0].Register.WriteMask & 4) {
1669 temp = bld_insn_1(bld, NV_OP_PREEX2, src0);
1670 dst0[2] = bld_insn_1(bld, NV_OP_EX2, temp);
1671 }
1672 if (insn->Dst[0].Register.WriteMask & 8)
1673 dst0[3] = bld_imm_f32(bld, 1.0f);
1674 break;
1675 case TGSI_OPCODE_EX2:
1676 src0 = emit_fetch(bld, insn, 0, 0);
1677 temp = bld_insn_1(bld, NV_OP_PREEX2, src0);
1678 temp = bld_insn_1(bld, NV_OP_EX2, temp);
1679 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1680 dst0[c] = temp;
1681 break;
1682 case TGSI_OPCODE_FRC:
1683 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1684 src0 = emit_fetch(bld, insn, 0, c);
1685 dst0[c] = bld_insn_1(bld, NV_OP_FLOOR, src0);
1686 dst0[c] = bld_insn_2(bld, NV_OP_SUB, src0, dst0[c]);
1687 }
1688 break;
1689 case TGSI_OPCODE_KIL:
1690 for (c = 0; c < 4; ++c) {
1691 src0 = emit_fetch(bld, insn, 0, c);
1692 bld_kil(bld, src0);
1693 }
1694 break;
1695 case TGSI_OPCODE_KILP:
1696 (new_instruction(bld->pc, NV_OP_KIL))->fixed = 1;
1697 break;
1698 case TGSI_OPCODE_IF:
1699 {
1700 struct nv_basic_block *b = new_basic_block(bld->pc);
1701
1702 assert(bld->cond_lvl < BLD_MAX_COND_NESTING);
1703
1704 nvbb_attach_block(bld->pc->current_block, b, CFG_EDGE_FORWARD);
1705
1706 bld->join_bb[bld->cond_lvl] = bld->pc->current_block;
1707 bld->cond_bb[bld->cond_lvl] = bld->pc->current_block;
1708
1709 src1 = bld_predicate(bld, emit_fetch(bld, insn, 0, 0), TRUE);
1710
1711 bld_flow(bld, NV_OP_BRA, NV_CC_EQ, src1, NULL, (bld->cond_lvl == 0));
1712
1713 ++bld->cond_lvl;
1714 bld_new_block(bld, b);
1715 }
1716 break;
1717 case TGSI_OPCODE_ELSE:
1718 {
1719 struct nv_basic_block *b = new_basic_block(bld->pc);
1720
1721 --bld->cond_lvl;
1722 nvbb_attach_block(bld->join_bb[bld->cond_lvl], b, CFG_EDGE_FORWARD);
1723
1724 bld->cond_bb[bld->cond_lvl]->exit->target = b;
1725 bld->cond_bb[bld->cond_lvl] = bld->pc->current_block;
1726
1727 new_instruction(bld->pc, NV_OP_BRA)->is_terminator = 1;
1728
1729 ++bld->cond_lvl;
1730 bld_new_block(bld, b);
1731 }
1732 break;
1733 case TGSI_OPCODE_ENDIF:
1734 {
1735 struct nv_basic_block *b = new_basic_block(bld->pc);
1736
1737 if (!nvbb_is_terminated(bld->pc->current_block))
1738 bld_flow(bld, NV_OP_BRA, NV_CC_TR, NULL, b, FALSE);
1739
1740 --bld->cond_lvl;
1741 nvbb_attach_block(bld->pc->current_block, b, bld->out_kind);
1742 nvbb_attach_block(bld->cond_bb[bld->cond_lvl], b, CFG_EDGE_FORWARD);
1743
1744 bld->cond_bb[bld->cond_lvl]->exit->target = b;
1745
1746 bld_new_block(bld, b);
1747
1748 if (!bld->cond_lvl && bld->join_bb[bld->cond_lvl]) {
1749 bld->join_bb[bld->cond_lvl]->exit->prev->target = b;
1750 new_instruction(bld->pc, NV_OP_JOIN)->is_join = TRUE;
1751 }
1752 }
1753 break;
1754 case TGSI_OPCODE_BGNLOOP:
1755 {
1756 struct nv_basic_block *bl = new_basic_block(bld->pc);
1757 struct nv_basic_block *bb = new_basic_block(bld->pc);
1758
1759 assert(bld->loop_lvl < BLD_MAX_LOOP_NESTING);
1760
1761 bld->loop_bb[bld->loop_lvl] = bl;
1762 bld->brkt_bb[bld->loop_lvl] = bb;
1763
1764 bld_flow(bld, NV_OP_BREAKADDR, NV_CC_TR, NULL, bb, FALSE);
1765
1766 nvbb_attach_block(bld->pc->current_block, bl, CFG_EDGE_LOOP_ENTER);
1767
1768 bld_new_block(bld, bld->loop_bb[bld->loop_lvl++]);
1769
1770 if (bld->loop_lvl == bld->pc->loop_nesting_bound)
1771 bld->pc->loop_nesting_bound++;
1772
1773 bld_clear_def_use(&bld->tvs[0][0], BLD_MAX_TEMPS, bld->loop_lvl);
1774 bld_clear_def_use(&bld->avs[0][0], BLD_MAX_ADDRS, bld->loop_lvl);
1775 bld_clear_def_use(&bld->pvs[0][0], BLD_MAX_PREDS, bld->loop_lvl);
1776 }
1777 break;
1778 case TGSI_OPCODE_BRK:
1779 {
1780 struct nv_basic_block *bb = bld->brkt_bb[bld->loop_lvl - 1];
1781
1782 bld_flow(bld, NV_OP_BREAK, NV_CC_TR, NULL, bb, FALSE);
1783
1784 if (bld->out_kind == CFG_EDGE_FORWARD) /* else we already had BRK/CONT */
1785 nvbb_attach_block(bld->pc->current_block, bb, CFG_EDGE_LOOP_LEAVE);
1786
1787 bld->out_kind = CFG_EDGE_FAKE;
1788 }
1789 break;
1790 case TGSI_OPCODE_CONT:
1791 {
1792 struct nv_basic_block *bb = bld->loop_bb[bld->loop_lvl - 1];
1793
1794 bld_flow(bld, NV_OP_BRA, NV_CC_TR, NULL, bb, FALSE);
1795
1796 nvbb_attach_block(bld->pc->current_block, bb, CFG_EDGE_BACK);
1797
1798 if ((bb = bld->join_bb[bld->cond_lvl - 1])) {
1799 bld->join_bb[bld->cond_lvl - 1] = NULL;
1800 nv_nvi_delete(bb->exit->prev);
1801 }
1802 bld->out_kind = CFG_EDGE_FAKE;
1803 }
1804 break;
1805 case TGSI_OPCODE_ENDLOOP:
1806 {
1807 struct nv_basic_block *bb = bld->loop_bb[bld->loop_lvl - 1];
1808
1809 if (!nvbb_is_terminated(bld->pc->current_block))
1810 bld_flow(bld, NV_OP_BRA, NV_CC_TR, NULL, bb, FALSE);
1811
1812 nvbb_attach_block(bld->pc->current_block, bb, CFG_EDGE_BACK);
1813
1814 bld_loop_end(bld, bb); /* replace loop-side operand of the phis */
1815
1816 bld_new_block(bld, bld->brkt_bb[--bld->loop_lvl]);
1817 }
1818 break;
1819 case TGSI_OPCODE_ABS:
1820 case TGSI_OPCODE_CEIL:
1821 case TGSI_OPCODE_FLR:
1822 case TGSI_OPCODE_TRUNC:
1823 case TGSI_OPCODE_ROUND:
1824 case TGSI_OPCODE_DDX:
1825 case TGSI_OPCODE_DDY:
1826 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1827 src0 = emit_fetch(bld, insn, 0, c);
1828 dst0[c] = bld_insn_1(bld, opcode, src0);
1829 }
1830 break;
1831 case TGSI_OPCODE_LIT:
1832 bld_lit(bld, dst0, insn);
1833 break;
1834 case TGSI_OPCODE_LRP:
1835 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1836 src0 = emit_fetch(bld, insn, 0, c);
1837 src1 = emit_fetch(bld, insn, 1, c);
1838 src2 = emit_fetch(bld, insn, 2, c);
1839 dst0[c] = bld_insn_2(bld, NV_OP_SUB, src1, src2);
1840 dst0[c] = bld_insn_3(bld, NV_OP_MAD, dst0[c], src0, src2);
1841 }
1842 break;
1843 case TGSI_OPCODE_MOV:
1844 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1845 dst0[c] = emit_fetch(bld, insn, 0, c);
1846 break;
1847 case TGSI_OPCODE_MAD:
1848 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1849 src0 = emit_fetch(bld, insn, 0, c);
1850 src1 = emit_fetch(bld, insn, 1, c);
1851 src2 = emit_fetch(bld, insn, 2, c);
1852 dst0[c] = bld_insn_3(bld, opcode, src0, src1, src2);
1853 }
1854 break;
1855 case TGSI_OPCODE_POW:
1856 src0 = emit_fetch(bld, insn, 0, 0);
1857 src1 = emit_fetch(bld, insn, 1, 0);
1858 temp = bld_pow(bld, src0, src1);
1859 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1860 dst0[c] = temp;
1861 break;
1862 case TGSI_OPCODE_LOG:
1863 src0 = emit_fetch(bld, insn, 0, 0);
1864 src0 = bld_insn_1(bld, NV_OP_ABS, src0);
1865 temp = bld_insn_1(bld, NV_OP_LG2, src0);
1866 dst0[2] = temp;
1867 if (insn->Dst[0].Register.WriteMask & 3) {
1868 temp = bld_insn_1(bld, NV_OP_FLOOR, temp);
1869 dst0[0] = temp;
1870 }
1871 if (insn->Dst[0].Register.WriteMask & 2) {
1872 temp = bld_insn_1(bld, NV_OP_PREEX2, temp);
1873 temp = bld_insn_1(bld, NV_OP_EX2, temp);
1874 temp = bld_insn_1(bld, NV_OP_RCP, temp);
1875 dst0[1] = bld_insn_2(bld, NV_OP_MUL, src0, temp);
1876 }
1877 if (insn->Dst[0].Register.WriteMask & 8)
1878 dst0[3] = bld_imm_f32(bld, 1.0f);
1879 break;
1880 case TGSI_OPCODE_RCP:
1881 case TGSI_OPCODE_LG2:
1882 src0 = emit_fetch(bld, insn, 0, 0);
1883 temp = bld_insn_1(bld, opcode, src0);
1884 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1885 dst0[c] = temp;
1886 break;
1887 case TGSI_OPCODE_RSQ:
1888 src0 = emit_fetch(bld, insn, 0, 0);
1889 temp = bld_insn_1(bld, NV_OP_ABS, src0);
1890 temp = bld_insn_1(bld, NV_OP_RSQ, temp);
1891 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1892 dst0[c] = temp;
1893 break;
1894 case TGSI_OPCODE_SLT:
1895 case TGSI_OPCODE_SGE:
1896 case TGSI_OPCODE_SEQ:
1897 case TGSI_OPCODE_SGT:
1898 case TGSI_OPCODE_SLE:
1899 case TGSI_OPCODE_SNE:
1900 case TGSI_OPCODE_ISLT:
1901 case TGSI_OPCODE_ISGE:
1902 case TGSI_OPCODE_USEQ:
1903 case TGSI_OPCODE_USGE:
1904 case TGSI_OPCODE_USLT:
1905 case TGSI_OPCODE_USNE:
1906 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1907 src0 = emit_fetch(bld, insn, 0, c);
1908 src1 = emit_fetch(bld, insn, 1, c);
1909 dst0[c] = bld_insn_2(bld, NV_OP_SET, src0, src1);
1910 dst0[c]->insn->set_cond = translate_setcc(insn->Instruction.Opcode);
1911 SET_TYPE(dst0[c], infer_dst_type(insn->Instruction.Opcode));
1912
1913 dst0[c]->insn->src[0]->typecast =
1914 dst0[c]->insn->src[1]->typecast =
1915 infer_src_type(insn->Instruction.Opcode);
1916
1917 if (dst0[c]->reg.type != NV_TYPE_F32)
1918 break;
1919 dst0[c]->reg.as_type = NV_TYPE_S32;
1920 dst0[c] = bld_insn_1(bld, NV_OP_ABS, dst0[c]);
1921 dst0[c] = bld_insn_1(bld, NV_OP_CVT, dst0[c]);
1922 SET_TYPE(dst0[c], NV_TYPE_F32);
1923 }
1924 break;
1925 case TGSI_OPCODE_SCS:
1926 if (insn->Dst[0].Register.WriteMask & 0x3) {
1927 src0 = emit_fetch(bld, insn, 0, 0);
1928 temp = bld_insn_1(bld, NV_OP_PRESIN, src0);
1929 if (insn->Dst[0].Register.WriteMask & 0x1)
1930 dst0[0] = bld_insn_1(bld, NV_OP_COS, temp);
1931 if (insn->Dst[0].Register.WriteMask & 0x2)
1932 dst0[1] = bld_insn_1(bld, NV_OP_SIN, temp);
1933 }
1934 if (insn->Dst[0].Register.WriteMask & 0x4)
1935 dst0[2] = bld_imm_f32(bld, 0.0f);
1936 if (insn->Dst[0].Register.WriteMask & 0x8)
1937 dst0[3] = bld_imm_f32(bld, 1.0f);
1938 break;
1939 case TGSI_OPCODE_SSG:
1940 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1941 src0 = emit_fetch(bld, insn, 0, c);
1942 src1 = bld_predicate(bld, src0, FALSE);
1943 temp = bld_insn_2(bld, NV_OP_AND, src0, bld_imm_u32(bld, 0x80000000));
1944 temp = bld_insn_2(bld, NV_OP_OR, temp, bld_imm_f32(bld, 1.0f));
1945 dst0[c] = bld_insn_2(bld, NV_OP_XOR, temp, temp);
1946 dst0[c]->insn->cc = NV_CC_EQ;
1947 nv_reference(bld->pc, &dst0[c]->insn->flags_src, src1);
1948 dst0[c] = bld_insn_2(bld, NV_OP_SELECT, dst0[c], temp);
1949 }
1950 break;
1951 case TGSI_OPCODE_SUB:
1952 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1953 src0 = emit_fetch(bld, insn, 0, c);
1954 src1 = emit_fetch(bld, insn, 1, c);
1955 dst0[c] = bld_insn_2(bld, NV_OP_ADD, src0, src1);
1956 dst0[c]->insn->src[1]->mod ^= NV_MOD_NEG;
1957 }
1958 break;
1959 case TGSI_OPCODE_TEX:
1960 case TGSI_OPCODE_TXB:
1961 case TGSI_OPCODE_TXL:
1962 case TGSI_OPCODE_TXP:
1963 bld_tex(bld, dst0, insn);
1964 break;
1965 case TGSI_OPCODE_XPD:
1966 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1967 if (c == 3) {
1968 dst0[3] = bld_imm_f32(bld, 1.0f);
1969 break;
1970 }
1971 src0 = emit_fetch(bld, insn, 1, (c + 1) % 3);
1972 src1 = emit_fetch(bld, insn, 0, (c + 2) % 3);
1973 dst0[c] = bld_insn_2(bld, NV_OP_MUL, src0, src1);
1974
1975 src0 = emit_fetch(bld, insn, 0, (c + 1) % 3);
1976 src1 = emit_fetch(bld, insn, 1, (c + 2) % 3);
1977 dst0[c] = bld_insn_3(bld, NV_OP_MAD, src0, src1, dst0[c]);
1978
1979 dst0[c]->insn->src[2]->mod ^= NV_MOD_NEG;
1980 }
1981 break;
1982 case TGSI_OPCODE_RET:
1983 (new_instruction(bld->pc, NV_OP_RET))->fixed = 1;
1984 break;
1985 case TGSI_OPCODE_END:
1986 if (bld->ti->p->type == PIPE_SHADER_FRAGMENT)
1987 bld_export_outputs(bld);
1988 break;
1989 default:
1990 NOUVEAU_ERR("unhandled opcode %u\n", insn->Instruction.Opcode);
1991 abort();
1992 break;
1993 }
1994
1995 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1996 emit_store(bld, insn, c, dst0[c]);
1997
1998 if (prog->type == PIPE_SHADER_VERTEX && prog->vp.clpd_nr &&
1999 dreg->Register.File == TGSI_FILE_OUTPUT && !dreg->Register.Indirect &&
2000 prog->out[dreg->Register.Index].sn == TGSI_SEMANTIC_POSITION) {
2001
2002 int p;
2003 for (p = 0; p < prog->vp.clpd_nr; p++) {
2004 struct nv_value *clipd = NULL;
2005
2006 for (c = 0; c < 4; c++) {
2007 temp = new_value(bld->pc, NV_FILE_MEM_C(15), NV_TYPE_F32);
2008 temp->reg.id = p * 4 + c;
2009 temp = bld_insn_1(bld, NV_OP_LDA, temp);
2010
2011 clipd = clipd ?
2012 bld_insn_3(bld, NV_OP_MAD, dst0[c], temp, clipd) :
2013 bld_insn_2(bld, NV_OP_MUL, dst0[c], temp);
2014 }
2015
2016 temp = bld_insn_1(bld, NV_OP_MOV, clipd);
2017 temp->reg.file = NV_FILE_OUT;
2018 temp->reg.id = bld->ti->p->vp.clpd + p;
2019 temp->insn->fixed = 1;
2020 }
2021 }
2022 }
2023
2024 static INLINE void
2025 bld_free_value_trackers(struct bld_value_stack *base, int n)
2026 {
2027 int i, c;
2028
2029 for (i = 0; i < n; ++i)
2030 for (c = 0; c < 4; ++c)
2031 if (base[i * 4 + c].body)
2032 FREE(base[i * 4 + c].body);
2033 }
2034
2035 int
2036 nv50_tgsi_to_nc(struct nv_pc *pc, struct nv50_translation_info *ti)
2037 {
2038 struct bld_context *bld = CALLOC_STRUCT(bld_context);
2039 int c;
2040 unsigned ip;
2041
2042 pc->root[0] = pc->current_block = new_basic_block(pc);
2043
2044 bld->pc = pc;
2045 bld->ti = ti;
2046
2047 pc->loop_nesting_bound = 1;
2048
2049 c = util_bitcount(bld->ti->p->fp.interp >> 24);
2050 if (c && ti->p->type == PIPE_SHADER_FRAGMENT) {
2051 bld->frgcrd[3] = new_value(pc, NV_FILE_MEM_V, NV_TYPE_F32);
2052 bld->frgcrd[3]->reg.id = c - 1;
2053 bld->frgcrd[3] = bld_insn_1(bld, NV_OP_LINTERP, bld->frgcrd[3]);
2054 bld->frgcrd[3] = bld_insn_1(bld, NV_OP_RCP, bld->frgcrd[3]);
2055 }
2056
2057 for (ip = 0; ip < ti->inst_nr; ++ip)
2058 bld_instruction(bld, &ti->insns[ip]);
2059
2060 bld_free_value_trackers(&bld->tvs[0][0], BLD_MAX_TEMPS);
2061 bld_free_value_trackers(&bld->avs[0][0], BLD_MAX_ADDRS);
2062 bld_free_value_trackers(&bld->pvs[0][0], BLD_MAX_PREDS);
2063
2064 bld_free_value_trackers(&bld->ovs[0][0], PIPE_MAX_SHADER_OUTPUTS);
2065
2066 FREE(bld);
2067 return 0;
2068 }
2069
2070 /* If a variable is assigned in a loop, replace all references to the value
2071 * from outside the loop with a phi value.
2072 */
2073 static void
2074 bld_replace_value(struct nv_pc *pc, struct nv_basic_block *b,
2075 struct nv_value *old_val,
2076 struct nv_value *new_val)
2077 {
2078 struct nv_instruction *nvi;
2079
2080 for (nvi = b->phi ? b->phi : b->entry; nvi; nvi = nvi->next) {
2081 int s;
2082 for (s = 0; s < 5; ++s) {
2083 if (!nvi->src[s])
2084 continue;
2085 if (nvi->src[s]->value == old_val)
2086 nv_reference(pc, &nvi->src[s], new_val);
2087 }
2088 if (nvi->flags_src && nvi->flags_src->value == old_val)
2089 nv_reference(pc, &nvi->flags_src, new_val);
2090 }
2091
2092 b->pass_seq = pc->pass_seq;
2093
2094 if (b->out[0] && b->out[0]->pass_seq < pc->pass_seq)
2095 bld_replace_value(pc, b->out[0], old_val, new_val);
2096
2097 if (b->out[1] && b->out[1]->pass_seq < pc->pass_seq)
2098 bld_replace_value(pc, b->out[1], old_val, new_val);
2099 }