nv50: make FrontFacing -1 or +1
[mesa.git] / src / gallium / drivers / nv50 / nv50_tgsi_to_nc.c
1 /*
2 * Copyright 2010 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
21 */
22
23 /* #define NV50_TGSI2NC_DEBUG */
24
25 /* XXX: need to clean this up so we get the typecasting right more naturally */
26
27 #include <unistd.h>
28
29 #include "nv50_context.h"
30 #include "nv50_pc.h"
31
32 #include "pipe/p_shader_tokens.h"
33 #include "tgsi/tgsi_parse.h"
34 #include "tgsi/tgsi_util.h"
35
36 #include "util/u_simple_list.h"
37 #include "tgsi/tgsi_dump.h"
38
39 #define BLD_MAX_TEMPS 64
40 #define BLD_MAX_ADDRS 4
41 #define BLD_MAX_PREDS 4
42 #define BLD_MAX_IMMDS 128
43
44 #define BLD_MAX_COND_NESTING 4
45 #define BLD_MAX_LOOP_NESTING 4
46 #define BLD_MAX_CALL_NESTING 2
47
48 /* collects all values assigned to the same TGSI register */
49 struct bld_value_stack {
50 struct nv_value *top;
51 struct nv_value **body;
52 unsigned size;
53 uint16_t loop_use; /* 1 bit per loop level, indicates if used/defd */
54 uint16_t loop_def;
55 };
56
57 static INLINE void
58 bld_vals_push_val(struct bld_value_stack *stk, struct nv_value *val)
59 {
60 assert(!stk->size || (stk->body[stk->size - 1] != val));
61
62 if (!(stk->size % 8)) {
63 unsigned old_sz = (stk->size + 0) * sizeof(struct nv_value *);
64 unsigned new_sz = (stk->size + 8) * sizeof(struct nv_value *);
65 stk->body = (struct nv_value **)REALLOC(stk->body, old_sz, new_sz);
66 }
67 stk->body[stk->size++] = val;
68 }
69
70 static INLINE boolean
71 bld_vals_del_val(struct bld_value_stack *stk, struct nv_value *val)
72 {
73 unsigned i;
74
75 for (i = stk->size - 1; i >= 0; --i)
76 if (stk->body[i] == val)
77 break;
78 if (i < 0)
79 return FALSE;
80
81 if (i != stk->size - 1)
82 stk->body[i] = stk->body[stk->size - 1];
83
84 --stk->size; /* XXX: old size in REALLOC */
85 return TRUE;
86 }
87
88 static INLINE void
89 bld_vals_push(struct bld_value_stack *stk)
90 {
91 bld_vals_push_val(stk, stk->top);
92 stk->top = NULL;
93 }
94
95 static INLINE void
96 bld_push_values(struct bld_value_stack *stacks, int n)
97 {
98 int i, c;
99
100 for (i = 0; i < n; ++i)
101 for (c = 0; c < 4; ++c)
102 if (stacks[i * 4 + c].top)
103 bld_vals_push(&stacks[i * 4 + c]);
104 }
105
106 struct bld_context {
107 struct nv50_translation_info *ti;
108
109 struct nv_pc *pc;
110 struct nv_basic_block *b;
111
112 struct tgsi_parse_context parse[BLD_MAX_CALL_NESTING];
113 int call_lvl;
114
115 struct nv_basic_block *cond_bb[BLD_MAX_COND_NESTING];
116 struct nv_basic_block *join_bb[BLD_MAX_COND_NESTING];
117 struct nv_basic_block *else_bb[BLD_MAX_COND_NESTING];
118 int cond_lvl;
119 struct nv_basic_block *loop_bb[BLD_MAX_LOOP_NESTING];
120 struct nv_basic_block *brkt_bb[BLD_MAX_LOOP_NESTING];
121 int loop_lvl;
122
123 ubyte out_kind; /* CFG_EDGE_FORWARD, or FAKE in case of BREAK/CONT */
124
125 struct bld_value_stack tvs[BLD_MAX_TEMPS][4]; /* TGSI_FILE_TEMPORARY */
126 struct bld_value_stack avs[BLD_MAX_ADDRS][4]; /* TGSI_FILE_ADDRESS */
127 struct bld_value_stack pvs[BLD_MAX_PREDS][4]; /* TGSI_FILE_PREDICATE */
128 struct bld_value_stack ovs[PIPE_MAX_SHADER_OUTPUTS][4];
129
130 uint32_t outputs_written[(PIPE_MAX_SHADER_OUTPUTS + 31) / 32];
131
132 struct nv_value *frgcrd[4];
133 struct nv_value *sysval[4];
134
135 /* wipe on new BB */
136 struct nv_value *saved_addr[4][2];
137 struct nv_value *saved_inputs[128];
138 struct nv_value *saved_immd[BLD_MAX_IMMDS];
139 uint num_immds;
140 };
141
142 static INLINE ubyte
143 bld_stack_file(struct bld_context *bld, struct bld_value_stack *stk)
144 {
145 if (stk < &bld->avs[0][0])
146 return NV_FILE_GPR;
147 else
148 if (stk < &bld->pvs[0][0])
149 return NV_FILE_ADDR;
150 else
151 if (stk < &bld->ovs[0][0])
152 return NV_FILE_FLAGS;
153 else
154 return NV_FILE_OUT;
155 }
156
157 static INLINE struct nv_value *
158 bld_fetch(struct bld_context *bld, struct bld_value_stack *stk, int i, int c)
159 {
160 stk[i * 4 + c].loop_use |= 1 << bld->loop_lvl;
161
162 return stk[i * 4 + c].top;
163 }
164
165 static struct nv_value *
166 bld_loop_phi(struct bld_context *, struct bld_value_stack *, struct nv_value *);
167
168 /* If a variable is defined in a loop without prior use, we don't need
169 * a phi in the loop header to account for backwards flow.
170 *
171 * However, if this variable is then also used outside the loop, we do
172 * need a phi after all. But we must not use this phi's def inside the
173 * loop, so we can eliminate the phi if it is unused later.
174 */
175 static INLINE void
176 bld_store(struct bld_context *bld, struct bld_value_stack *stk, int i, int c,
177 struct nv_value *val)
178 {
179 const uint16_t m = 1 << bld->loop_lvl;
180
181 stk = &stk[i * 4 + c];
182
183 if (bld->loop_lvl && !(m & (stk->loop_def | stk->loop_use)))
184 bld_loop_phi(bld, stk, val);
185
186 stk->top = val;
187 stk->loop_def |= 1 << bld->loop_lvl;
188 }
189
190 static INLINE void
191 bld_clear_def_use(struct bld_value_stack *stk, int n, int lvl)
192 {
193 int i;
194 const uint16_t mask = ~(1 << lvl);
195
196 for (i = 0; i < n * 4; ++i) {
197 stk[i].loop_def &= mask;
198 stk[i].loop_use &= mask;
199 }
200 }
201
202 #define FETCH_TEMP(i, c) bld_fetch(bld, &bld->tvs[0][0], i, c)
203 #define STORE_TEMP(i, c, v) bld_store(bld, &bld->tvs[0][0], i, c, (v))
204 #define FETCH_ADDR(i, c) bld_fetch(bld, &bld->avs[0][0], i, c)
205 #define STORE_ADDR(i, c, v) bld_store(bld, &bld->avs[0][0], i, c, (v))
206 #define FETCH_PRED(i, c) bld_fetch(bld, &bld->pvs[0][0], i, c)
207 #define STORE_PRED(i, c, v) bld_store(bld, &bld->pvs[0][0], i, c, (v))
208
209 #define STORE_OUTR(i, c, v) \
210 do { \
211 bld->ovs[i][c].top = (v); \
212 bld->outputs_written[(i) / 8] |= 1 << (((i) * 4 + (c)) % 32); \
213 } while (0)
214
215 static INLINE void
216 bld_warn_uninitialized(struct bld_context *bld, int kind,
217 struct bld_value_stack *stk, struct nv_basic_block *b)
218 {
219 long i = (stk - &bld->tvs[0][0]) / 4;
220 long c = (stk - &bld->tvs[0][0]) & 3;
221
222 if (c == 3)
223 c = -1;
224
225 debug_printf("WARNING: TEMP[%li].%c %s used uninitialized in BB:%i\n",
226 i, (int)('x' + c), kind ? "may be" : "is", b->id);
227 }
228
229 static INLINE struct nv_value *
230 bld_def(struct nv_instruction *i, int c, struct nv_value *value)
231 {
232 i->def[c] = value;
233 value->insn = i;
234 return value;
235 }
236
237 static INLINE struct nv_value *
238 find_by_bb(struct bld_value_stack *stack, struct nv_basic_block *b)
239 {
240 int i;
241
242 if (stack->top && stack->top->insn->bb == b)
243 return stack->top;
244
245 for (i = stack->size - 1; i >= 0; --i)
246 if (stack->body[i]->insn->bb == b)
247 return stack->body[i];
248 return NULL;
249 }
250
251 /* fetch value from stack that was defined in the specified basic block,
252 * or search for first definitions in all of its predecessors
253 */
254 static void
255 fetch_by_bb(struct bld_value_stack *stack,
256 struct nv_value **vals, int *n,
257 struct nv_basic_block *b)
258 {
259 int i;
260 struct nv_value *val;
261
262 assert(*n < 16); /* MAX_COND_NESTING */
263
264 val = find_by_bb(stack, b);
265 if (val) {
266 for (i = 0; i < *n; ++i)
267 if (vals[i] == val)
268 return;
269 vals[(*n)++] = val;
270 return;
271 }
272 for (i = 0; i < b->num_in; ++i)
273 if (!IS_WALL_EDGE(b->in_kind[i]))
274 fetch_by_bb(stack, vals, n, b->in[i]);
275 }
276
277 static INLINE struct nv_value *
278 bld_load_imm_u32(struct bld_context *bld, uint32_t u);
279
280 static INLINE struct nv_value *
281 bld_undef(struct bld_context *bld, ubyte file)
282 {
283 struct nv_instruction *nvi = new_instruction(bld->pc, NV_OP_UNDEF);
284
285 return bld_def(nvi, 0, new_value(bld->pc, file, NV_TYPE_U32));
286 }
287
288 static struct nv_value *
289 bld_phi(struct bld_context *bld, struct nv_basic_block *b,
290 struct bld_value_stack *stack)
291 {
292 struct nv_basic_block *in;
293 struct nv_value *vals[16], *val;
294 struct nv_instruction *phi;
295 int i, j, n;
296
297 do {
298 i = n = 0;
299 fetch_by_bb(stack, vals, &n, b);
300
301 if (!n) {
302 bld_warn_uninitialized(bld, 0, stack, b);
303 return NULL;
304 }
305
306 if (n == 1) {
307 if (nvbb_dominated_by(b, vals[0]->insn->bb))
308 break;
309
310 bld_warn_uninitialized(bld, 1, stack, b);
311
312 /* back-tracking to insert missing value of other path */
313 in = b;
314 while (in->in[0]) {
315 if (in->num_in == 1) {
316 in = in->in[0];
317 } else {
318 if (!nvbb_reachable_by(in->in[0], vals[0]->insn->bb, b))
319 in = in->in[0];
320 else
321 if (!nvbb_reachable_by(in->in[1], vals[0]->insn->bb, b))
322 in = in->in[1];
323 else
324 in = in->in[0];
325 }
326 }
327 bld->pc->current_block = in;
328
329 /* should make this a no-op */
330 bld_vals_push_val(stack, bld_undef(bld, vals[0]->reg.file));
331 continue;
332 }
333
334 for (i = 0; i < n; ++i) {
335 /* if value dominates b, continue to the redefinitions */
336 if (nvbb_dominated_by(b, vals[i]->insn->bb))
337 continue;
338
339 /* if value dominates any in-block, b should be the dom frontier */
340 for (j = 0; j < b->num_in; ++j)
341 if (nvbb_dominated_by(b->in[j], vals[i]->insn->bb))
342 break;
343 /* otherwise, find the dominance frontier and put the phi there */
344 if (j == b->num_in) {
345 in = nvbb_dom_frontier(vals[i]->insn->bb);
346 val = bld_phi(bld, in, stack);
347 bld_vals_push_val(stack, val);
348 break;
349 }
350 }
351 } while(i < n);
352
353 bld->pc->current_block = b;
354
355 if (n == 1)
356 return vals[0];
357
358 phi = new_instruction(bld->pc, NV_OP_PHI);
359
360 bld_def(phi, 0, new_value(bld->pc, vals[0]->reg.file, vals[0]->reg.type));
361 for (i = 0; i < n; ++i)
362 phi->src[i] = new_ref(bld->pc, vals[i]);
363
364 return phi->def[0];
365 }
366
367 /* Insert a phi function in the loop header.
368 * For nested loops, we need to insert phi functions in all the outer
369 * loop headers if they don't have one yet.
370 *
371 * @def: redefinition from inside loop, or NULL if to be replaced later
372 */
373 static struct nv_value *
374 bld_loop_phi(struct bld_context *bld, struct bld_value_stack *stack,
375 struct nv_value *def)
376 {
377 struct nv_instruction *phi;
378 struct nv_basic_block *bb = bld->pc->current_block;
379 struct nv_value *val = NULL;
380
381 if (bld->loop_lvl > 1) {
382 --bld->loop_lvl;
383 if (!((stack->loop_def | stack->loop_use) & (1 << bld->loop_lvl)))
384 val = bld_loop_phi(bld, stack, NULL);
385 ++bld->loop_lvl;
386 }
387
388 if (!val)
389 val = bld_phi(bld, bld->pc->current_block, stack); /* old definition */
390 if (!val) {
391 bld->pc->current_block = bld->loop_bb[bld->loop_lvl - 1]->in[0];
392 val = bld_undef(bld, bld_stack_file(bld, stack));
393 }
394
395 bld->pc->current_block = bld->loop_bb[bld->loop_lvl - 1];
396
397 phi = new_instruction(bld->pc, NV_OP_PHI);
398
399 bld_def(phi, 0, new_value_like(bld->pc, val));
400 if (!def)
401 def = phi->def[0];
402
403 bld_vals_push_val(stack, phi->def[0]);
404
405 phi->target = (struct nv_basic_block *)stack; /* cheat */
406
407 nv_reference(bld->pc, &phi->src[0], val);
408 nv_reference(bld->pc, &phi->src[1], def);
409
410 bld->pc->current_block = bb;
411
412 return phi->def[0];
413 }
414
415 static INLINE struct nv_value *
416 bld_fetch_global(struct bld_context *bld, struct bld_value_stack *stack)
417 {
418 const uint16_t m = 1 << bld->loop_lvl;
419 const uint16_t use = stack->loop_use;
420
421 stack->loop_use |= m;
422
423 /* If neither used nor def'd inside the loop, build a phi in foresight,
424 * so we don't have to replace stuff later on, which requires tracking.
425 */
426 if (bld->loop_lvl && !((use | stack->loop_def) & m))
427 return bld_loop_phi(bld, stack, NULL);
428
429 return bld_phi(bld, bld->pc->current_block, stack);
430 }
431
432 static INLINE struct nv_value *
433 bld_imm_u32(struct bld_context *bld, uint32_t u)
434 {
435 int i;
436 unsigned n = bld->num_immds;
437
438 for (i = 0; i < n; ++i)
439 if (bld->saved_immd[i]->reg.imm.u32 == u)
440 return bld->saved_immd[i];
441 assert(n < BLD_MAX_IMMDS);
442
443 bld->num_immds++;
444
445 bld->saved_immd[n] = new_value(bld->pc, NV_FILE_IMM, NV_TYPE_U32);
446 bld->saved_immd[n]->reg.imm.u32 = u;
447 return bld->saved_immd[n];
448 }
449
450 static void
451 bld_replace_value(struct nv_pc *, struct nv_basic_block *, struct nv_value *,
452 struct nv_value *);
453
454 /* Replace the source of the phi in the loop header by the last assignment,
455 * or eliminate the phi function if there is no assignment inside the loop.
456 *
457 * Redundancy situation 1 - (used) but (not redefined) value:
458 * %3 = phi %0, %3 = %3 is used
459 * %3 = phi %0, %4 = is new definition
460 *
461 * Redundancy situation 2 - (not used) but (redefined) value:
462 * %3 = phi %0, %2 = %2 is used, %3 could be used outside, deleted by DCE
463 */
464 static void
465 bld_loop_end(struct bld_context *bld, struct nv_basic_block *bb)
466 {
467 struct nv_basic_block *save = bld->pc->current_block;
468 struct nv_instruction *phi, *next;
469 struct nv_value *val;
470 struct bld_value_stack *stk;
471 int i, s, n;
472
473 for (phi = bb->phi; phi && phi->opcode == NV_OP_PHI; phi = next) {
474 next = phi->next;
475
476 stk = (struct bld_value_stack *)phi->target;
477 phi->target = NULL;
478
479 for (s = 1, n = 0; n < bb->num_in; ++n) {
480 if (bb->in_kind[n] != CFG_EDGE_BACK)
481 continue;
482
483 assert(s < 4);
484 bld->pc->current_block = bb->in[n];
485 val = bld_fetch_global(bld, stk);
486
487 for (i = 0; i < 4; ++i)
488 if (phi->src[i] && phi->src[i]->value == val)
489 break;
490 if (i == 4)
491 nv_reference(bld->pc, &phi->src[s++], val);
492 }
493 bld->pc->current_block = save;
494
495 if (phi->src[0]->value == phi->def[0] ||
496 phi->src[0]->value == phi->src[1]->value)
497 s = 1;
498 else
499 if (phi->src[1]->value == phi->def[0])
500 s = 0;
501 else
502 continue;
503
504 if (s >= 0) {
505 /* eliminate the phi */
506 bld_vals_del_val(stk, phi->def[0]);
507
508 ++bld->pc->pass_seq;
509 bld_replace_value(bld->pc, bb, phi->def[0], phi->src[s]->value);
510
511 nv_nvi_delete(phi);
512 }
513 }
514 }
515
516 static INLINE struct nv_value *
517 bld_imm_f32(struct bld_context *bld, float f)
518 {
519 return bld_imm_u32(bld, fui(f));
520 }
521
522 #define SET_TYPE(v, t) ((v)->reg.type = NV_TYPE_##t)
523
524 static struct nv_value *
525 bld_insn_1(struct bld_context *bld, uint opcode, struct nv_value *src0)
526 {
527 struct nv_instruction *insn = new_instruction(bld->pc, opcode);
528 assert(insn);
529
530 nv_reference(bld->pc, &insn->src[0], src0); /* NOTE: new_ref would suffice */
531
532 return bld_def(insn, 0, new_value(bld->pc, NV_FILE_GPR, src0->reg.type));
533 }
534
535 static struct nv_value *
536 bld_insn_2(struct bld_context *bld, uint opcode,
537 struct nv_value *src0, struct nv_value *src1)
538 {
539 struct nv_instruction *insn = new_instruction(bld->pc, opcode);
540
541 nv_reference(bld->pc, &insn->src[0], src0);
542 nv_reference(bld->pc, &insn->src[1], src1);
543
544 return bld_def(insn, 0, new_value(bld->pc, NV_FILE_GPR, src0->reg.type));
545 }
546
547 static struct nv_value *
548 bld_insn_3(struct bld_context *bld, uint opcode,
549 struct nv_value *src0, struct nv_value *src1,
550 struct nv_value *src2)
551 {
552 struct nv_instruction *insn = new_instruction(bld->pc, opcode);
553
554 nv_reference(bld->pc, &insn->src[0], src0);
555 nv_reference(bld->pc, &insn->src[1], src1);
556 nv_reference(bld->pc, &insn->src[2], src2);
557
558 return bld_def(insn, 0, new_value(bld->pc, NV_FILE_GPR, src0->reg.type));
559 }
560
561 #define BLD_INSN_1_EX(d, op, dt, s0, s0t) \
562 do { \
563 (d) = bld_insn_1(bld, (NV_OP_##op), (s0)); \
564 (d)->reg.type = NV_TYPE_##dt; \
565 (d)->insn->src[0]->typecast = NV_TYPE_##s0t; \
566 } while(0)
567
568 #define BLD_INSN_2_EX(d, op, dt, s0, s0t, s1, s1t) \
569 do { \
570 (d) = bld_insn_2(bld, (NV_OP_##op), (s0), (s1)); \
571 (d)->reg.type = NV_TYPE_##dt; \
572 (d)->insn->src[0]->typecast = NV_TYPE_##s0t; \
573 (d)->insn->src[1]->typecast = NV_TYPE_##s1t; \
574 } while(0)
575
576 static struct nv_value *
577 bld_pow(struct bld_context *bld, struct nv_value *x, struct nv_value *e)
578 {
579 struct nv_value *val;
580
581 BLD_INSN_1_EX(val, LG2, F32, x, F32);
582 BLD_INSN_2_EX(val, MUL, F32, e, F32, val, F32);
583 val = bld_insn_1(bld, NV_OP_PREEX2, val);
584 val = bld_insn_1(bld, NV_OP_EX2, val);
585
586 return val;
587 }
588
589 static INLINE struct nv_value *
590 bld_load_imm_f32(struct bld_context *bld, float f)
591 {
592 return bld_insn_1(bld, NV_OP_MOV, bld_imm_f32(bld, f));
593 }
594
595 static INLINE struct nv_value *
596 bld_load_imm_u32(struct bld_context *bld, uint32_t u)
597 {
598 return bld_insn_1(bld, NV_OP_MOV, bld_imm_u32(bld, u));
599 }
600
601 static struct nv_value *
602 bld_get_address(struct bld_context *bld, int id, struct nv_value *indirect)
603 {
604 int i;
605 struct nv_instruction *nvi;
606
607 for (i = 0; i < 4; ++i) {
608 if (!bld->saved_addr[i][0])
609 break;
610 if (bld->saved_addr[i][1] == indirect) {
611 nvi = bld->saved_addr[i][0]->insn;
612 if (nvi->src[0]->value->reg.imm.u32 == id)
613 return bld->saved_addr[i][0];
614 }
615 }
616 i &= 3;
617
618 bld->saved_addr[i][0] = bld_load_imm_u32(bld, id);
619 bld->saved_addr[i][0]->reg.file = NV_FILE_ADDR;
620 bld->saved_addr[i][1] = indirect;
621 return bld->saved_addr[i][0];
622 }
623
624
625 static struct nv_value *
626 bld_predicate(struct bld_context *bld, struct nv_value *src, boolean bool_only)
627 {
628 struct nv_instruction *nvi = src->insn;
629
630 if (nvi->opcode == NV_OP_LDA ||
631 nvi->opcode == NV_OP_PHI ||
632 nvi->bb != bld->pc->current_block) {
633 nvi = new_instruction(bld->pc, NV_OP_CVT);
634 nv_reference(bld->pc, &nvi->src[0], src);
635 } else
636 if (bool_only) {
637 while (nvi->opcode == NV_OP_ABS || nvi->opcode == NV_OP_CVT ||
638 nvi->opcode == NV_OP_NEG) {
639 /* TGSI SET gets conversion to f32, we only need source 0/~0 */
640 if (!nvi->def[0]->insn->flags_src)
641 nvi = nvi->src[0]->value->insn;
642 }
643 }
644
645 if (!nvi->flags_def) {
646 nvi->flags_def = new_value(bld->pc, NV_FILE_FLAGS, NV_TYPE_U16);
647 nvi->flags_def->insn = nvi;
648 }
649 return nvi->flags_def;
650 }
651
652 static void
653 bld_kil(struct bld_context *bld, struct nv_value *src)
654 {
655 struct nv_instruction *nvi;
656
657 src = bld_predicate(bld, src, FALSE);
658 nvi = new_instruction(bld->pc, NV_OP_KIL);
659 nvi->fixed = 1;
660 nvi->flags_src = new_ref(bld->pc, src);
661 nvi->cc = NV_CC_LT;
662 }
663
664 static void
665 bld_flow(struct bld_context *bld, uint opcode, ubyte cc,
666 struct nv_value *src, struct nv_basic_block *target,
667 boolean plan_reconverge)
668 {
669 struct nv_instruction *nvi;
670
671 if (plan_reconverge)
672 new_instruction(bld->pc, NV_OP_JOINAT)->fixed = 1;
673
674 nvi = new_instruction(bld->pc, opcode);
675 nvi->is_terminator = 1;
676 nvi->cc = cc;
677 nvi->target = target;
678 if (src)
679 nvi->flags_src = new_ref(bld->pc, src);
680 }
681
682 static ubyte
683 translate_setcc(unsigned opcode)
684 {
685 switch (opcode) {
686 case TGSI_OPCODE_SLT: return NV_CC_LT;
687 case TGSI_OPCODE_SGE: return NV_CC_GE;
688 case TGSI_OPCODE_SEQ: return NV_CC_EQ;
689 case TGSI_OPCODE_SGT: return NV_CC_GT;
690 case TGSI_OPCODE_SLE: return NV_CC_LE;
691 case TGSI_OPCODE_SNE: return NV_CC_NE | NV_CC_U;
692 case TGSI_OPCODE_STR: return NV_CC_TR;
693 case TGSI_OPCODE_SFL: return NV_CC_FL;
694
695 case TGSI_OPCODE_ISLT: return NV_CC_LT;
696 case TGSI_OPCODE_ISGE: return NV_CC_GE;
697 case TGSI_OPCODE_USEQ: return NV_CC_EQ;
698 case TGSI_OPCODE_USGE: return NV_CC_GE;
699 case TGSI_OPCODE_USLT: return NV_CC_LT;
700 case TGSI_OPCODE_USNE: return NV_CC_NE;
701 default:
702 assert(0);
703 return NV_CC_FL;
704 }
705 }
706
707 static uint
708 translate_opcode(uint opcode)
709 {
710 switch (opcode) {
711 case TGSI_OPCODE_ABS: return NV_OP_ABS;
712 case TGSI_OPCODE_ADD:
713 case TGSI_OPCODE_SUB:
714 case TGSI_OPCODE_UADD: return NV_OP_ADD;
715 case TGSI_OPCODE_AND: return NV_OP_AND;
716 case TGSI_OPCODE_EX2: return NV_OP_EX2;
717 case TGSI_OPCODE_CEIL: return NV_OP_CEIL;
718 case TGSI_OPCODE_FLR: return NV_OP_FLOOR;
719 case TGSI_OPCODE_TRUNC: return NV_OP_TRUNC;
720 case TGSI_OPCODE_COS: return NV_OP_COS;
721 case TGSI_OPCODE_SIN: return NV_OP_SIN;
722 case TGSI_OPCODE_DDX: return NV_OP_DFDX;
723 case TGSI_OPCODE_DDY: return NV_OP_DFDY;
724 case TGSI_OPCODE_F2I:
725 case TGSI_OPCODE_F2U:
726 case TGSI_OPCODE_I2F:
727 case TGSI_OPCODE_U2F: return NV_OP_CVT;
728 case TGSI_OPCODE_INEG: return NV_OP_NEG;
729 case TGSI_OPCODE_LG2: return NV_OP_LG2;
730 case TGSI_OPCODE_ISHR:
731 case TGSI_OPCODE_USHR: return NV_OP_SHR;
732 case TGSI_OPCODE_MAD:
733 case TGSI_OPCODE_UMAD: return NV_OP_MAD;
734 case TGSI_OPCODE_MAX:
735 case TGSI_OPCODE_IMAX:
736 case TGSI_OPCODE_UMAX: return NV_OP_MAX;
737 case TGSI_OPCODE_MIN:
738 case TGSI_OPCODE_IMIN:
739 case TGSI_OPCODE_UMIN: return NV_OP_MIN;
740 case TGSI_OPCODE_MUL:
741 case TGSI_OPCODE_UMUL: return NV_OP_MUL;
742 case TGSI_OPCODE_OR: return NV_OP_OR;
743 case TGSI_OPCODE_RCP: return NV_OP_RCP;
744 case TGSI_OPCODE_RSQ: return NV_OP_RSQ;
745 case TGSI_OPCODE_SAD: return NV_OP_SAD;
746 case TGSI_OPCODE_SHL: return NV_OP_SHL;
747 case TGSI_OPCODE_SLT:
748 case TGSI_OPCODE_SGE:
749 case TGSI_OPCODE_SEQ:
750 case TGSI_OPCODE_SGT:
751 case TGSI_OPCODE_SLE:
752 case TGSI_OPCODE_SNE:
753 case TGSI_OPCODE_ISLT:
754 case TGSI_OPCODE_ISGE:
755 case TGSI_OPCODE_USEQ:
756 case TGSI_OPCODE_USGE:
757 case TGSI_OPCODE_USLT:
758 case TGSI_OPCODE_USNE: return NV_OP_SET;
759 case TGSI_OPCODE_TEX: return NV_OP_TEX;
760 case TGSI_OPCODE_TXP: return NV_OP_TEX;
761 case TGSI_OPCODE_TXB: return NV_OP_TXB;
762 case TGSI_OPCODE_TXL: return NV_OP_TXL;
763 case TGSI_OPCODE_XOR: return NV_OP_XOR;
764 default:
765 return NV_OP_NOP;
766 }
767 }
768
769 static ubyte
770 infer_src_type(unsigned opcode)
771 {
772 switch (opcode) {
773 case TGSI_OPCODE_MOV:
774 case TGSI_OPCODE_AND:
775 case TGSI_OPCODE_OR:
776 case TGSI_OPCODE_XOR:
777 case TGSI_OPCODE_SAD:
778 case TGSI_OPCODE_U2F:
779 case TGSI_OPCODE_UADD:
780 case TGSI_OPCODE_UDIV:
781 case TGSI_OPCODE_UMOD:
782 case TGSI_OPCODE_UMAD:
783 case TGSI_OPCODE_UMUL:
784 case TGSI_OPCODE_UMAX:
785 case TGSI_OPCODE_UMIN:
786 case TGSI_OPCODE_USEQ:
787 case TGSI_OPCODE_USGE:
788 case TGSI_OPCODE_USLT:
789 case TGSI_OPCODE_USNE:
790 case TGSI_OPCODE_USHR:
791 return NV_TYPE_U32;
792 case TGSI_OPCODE_I2F:
793 case TGSI_OPCODE_IDIV:
794 case TGSI_OPCODE_IMAX:
795 case TGSI_OPCODE_IMIN:
796 case TGSI_OPCODE_INEG:
797 case TGSI_OPCODE_ISGE:
798 case TGSI_OPCODE_ISHR:
799 case TGSI_OPCODE_ISLT:
800 return NV_TYPE_S32;
801 default:
802 return NV_TYPE_F32;
803 }
804 }
805
806 static ubyte
807 infer_dst_type(unsigned opcode)
808 {
809 switch (opcode) {
810 case TGSI_OPCODE_MOV:
811 case TGSI_OPCODE_F2U:
812 case TGSI_OPCODE_AND:
813 case TGSI_OPCODE_OR:
814 case TGSI_OPCODE_XOR:
815 case TGSI_OPCODE_SAD:
816 case TGSI_OPCODE_UADD:
817 case TGSI_OPCODE_UDIV:
818 case TGSI_OPCODE_UMOD:
819 case TGSI_OPCODE_UMAD:
820 case TGSI_OPCODE_UMUL:
821 case TGSI_OPCODE_UMAX:
822 case TGSI_OPCODE_UMIN:
823 case TGSI_OPCODE_USEQ:
824 case TGSI_OPCODE_USGE:
825 case TGSI_OPCODE_USLT:
826 case TGSI_OPCODE_USNE:
827 case TGSI_OPCODE_USHR:
828 return NV_TYPE_U32;
829 case TGSI_OPCODE_F2I:
830 case TGSI_OPCODE_IDIV:
831 case TGSI_OPCODE_IMAX:
832 case TGSI_OPCODE_IMIN:
833 case TGSI_OPCODE_INEG:
834 case TGSI_OPCODE_ISGE:
835 case TGSI_OPCODE_ISHR:
836 case TGSI_OPCODE_ISLT:
837 return NV_TYPE_S32;
838 default:
839 return NV_TYPE_F32;
840 }
841 }
842
843 static void
844 emit_store(struct bld_context *bld, const struct tgsi_full_instruction *inst,
845 unsigned chan, struct nv_value *value)
846 {
847 const struct tgsi_full_dst_register *reg = &inst->Dst[0];
848
849 assert(chan < 4);
850
851 if (inst->Instruction.Opcode != TGSI_OPCODE_MOV)
852 value->reg.type = infer_dst_type(inst->Instruction.Opcode);
853
854 switch (inst->Instruction.Saturate) {
855 case TGSI_SAT_NONE:
856 break;
857 case TGSI_SAT_ZERO_ONE:
858 BLD_INSN_1_EX(value, SAT, F32, value, F32);
859 break;
860 case TGSI_SAT_MINUS_PLUS_ONE:
861 value = bld_insn_2(bld, NV_OP_MAX, value, bld_load_imm_f32(bld, -1.0f));
862 value = bld_insn_2(bld, NV_OP_MIN, value, bld_load_imm_f32(bld, 1.0f));
863 value->reg.type = NV_TYPE_F32;
864 break;
865 }
866
867 switch (reg->Register.File) {
868 case TGSI_FILE_OUTPUT:
869 value = bld_insn_1(bld, NV_OP_MOV, value);
870 value->reg.file = bld->ti->output_file;
871
872 if (bld->ti->p->type == PIPE_SHADER_FRAGMENT) {
873 STORE_OUTR(reg->Register.Index, chan, value);
874 } else {
875 value->insn->fixed = 1;
876 value->reg.id = bld->ti->output_map[reg->Register.Index][chan];
877 }
878 break;
879 case TGSI_FILE_TEMPORARY:
880 assert(reg->Register.Index < BLD_MAX_TEMPS);
881 value->reg.file = NV_FILE_GPR;
882 if (value->insn->bb != bld->pc->current_block)
883 value = bld_insn_1(bld, NV_OP_MOV, value);
884 STORE_TEMP(reg->Register.Index, chan, value);
885 break;
886 case TGSI_FILE_ADDRESS:
887 assert(reg->Register.Index < BLD_MAX_ADDRS);
888 value->reg.file = NV_FILE_ADDR;
889 STORE_ADDR(reg->Register.Index, chan, value);
890 break;
891 }
892 }
893
894 static INLINE uint32_t
895 bld_is_output_written(struct bld_context *bld, int i, int c)
896 {
897 if (c < 0)
898 return bld->outputs_written[i / 8] & (0xf << ((i * 4) % 32));
899 return bld->outputs_written[i / 8] & (1 << ((i * 4 + c) % 32));
900 }
901
902 static void
903 bld_export_outputs(struct bld_context *bld)
904 {
905 struct nv_value *vals[4];
906 struct nv_instruction *nvi;
907 int i, c, n;
908
909 bld_push_values(&bld->ovs[0][0], PIPE_MAX_SHADER_OUTPUTS);
910
911 for (i = 0; i < PIPE_MAX_SHADER_OUTPUTS; ++i) {
912 if (!bld_is_output_written(bld, i, -1))
913 continue;
914 for (n = 0, c = 0; c < 4; ++c) {
915 if (!bld_is_output_written(bld, i, c))
916 continue;
917 vals[n] = bld_fetch_global(bld, &bld->ovs[i][c]);
918 assert(vals[n]);
919 vals[n] = bld_insn_1(bld, NV_OP_MOV, vals[n]);
920 vals[n++]->reg.id = bld->ti->output_map[i][c];
921 }
922 assert(n);
923
924 (nvi = new_instruction(bld->pc, NV_OP_EXPORT))->fixed = 1;
925
926 for (c = 0; c < n; ++c)
927 nvi->src[c] = new_ref(bld->pc, vals[c]);
928 }
929 }
930
931 static void
932 bld_new_block(struct bld_context *bld, struct nv_basic_block *b)
933 {
934 int i;
935
936 bld_push_values(&bld->tvs[0][0], BLD_MAX_TEMPS);
937 bld_push_values(&bld->avs[0][0], BLD_MAX_ADDRS);
938 bld_push_values(&bld->pvs[0][0], BLD_MAX_PREDS);
939 bld_push_values(&bld->ovs[0][0], PIPE_MAX_SHADER_OUTPUTS);
940
941 bld->pc->current_block = b;
942
943 for (i = 0; i < 4; ++i)
944 bld->saved_addr[i][0] = NULL;
945
946 for (i = 0; i < 128; ++i)
947 bld->saved_inputs[i] = NULL;
948
949 bld->out_kind = CFG_EDGE_FORWARD;
950 }
951
952 static struct nv_value *
953 bld_saved_input(struct bld_context *bld, unsigned i, unsigned c)
954 {
955 unsigned idx = bld->ti->input_map[i][c];
956
957 if (bld->ti->p->type != PIPE_SHADER_FRAGMENT)
958 return NULL;
959 if (bld->saved_inputs[idx])
960 return bld->saved_inputs[idx];
961 return NULL;
962 }
963
964 static struct nv_value *
965 bld_interpolate(struct bld_context *bld, unsigned mode, struct nv_value *val)
966 {
967 if (val->reg.id == 255) {
968 /* gl_FrontFacing: 0/~0 to -1.0/+1.0 */
969 val = bld_insn_1(bld, NV_OP_LINTERP, val);
970 val = bld_insn_2(bld, NV_OP_SHL, val, bld_imm_u32(bld, 31));
971 val->insn->src[0]->typecast = NV_TYPE_U32;
972 val = bld_insn_2(bld, NV_OP_XOR, val, bld_imm_f32(bld, -1.0f));
973 val->insn->src[0]->typecast = NV_TYPE_U32;
974 } else
975 if (mode & (NV50_INTERP_LINEAR | NV50_INTERP_FLAT))
976 val = bld_insn_1(bld, NV_OP_LINTERP, val);
977 else
978 val = bld_insn_2(bld, NV_OP_PINTERP, val, bld->frgcrd[3]);
979
980 val->insn->flat = (mode & NV50_INTERP_FLAT) ? 1 : 0;
981 val->insn->centroid = (mode & NV50_INTERP_CENTROID) ? 1 : 0;
982 return val;
983 }
984
985 static struct nv_value *
986 emit_fetch(struct bld_context *bld, const struct tgsi_full_instruction *insn,
987 const unsigned s, const unsigned chan)
988 {
989 const struct tgsi_full_src_register *src = &insn->Src[s];
990 struct nv_value *res;
991 unsigned idx, swz, dim_idx, ind_idx, ind_swz;
992 ubyte type = infer_src_type(insn->Instruction.Opcode);
993
994 idx = src->Register.Index;
995 swz = tgsi_util_get_full_src_register_swizzle(src, chan);
996 dim_idx = -1;
997 ind_idx = -1;
998 ind_swz = 0;
999
1000 if (src->Register.Indirect) {
1001 ind_idx = src->Indirect.Index;
1002 ind_swz = tgsi_util_get_src_register_swizzle(&src->Indirect, 0);
1003 }
1004
1005 switch (src->Register.File) {
1006 case TGSI_FILE_CONSTANT:
1007 dim_idx = src->Dimension.Index ? src->Dimension.Index + 2 : 1;
1008 assert(dim_idx < 14);
1009 assert(dim_idx == 1); /* for now */
1010
1011 res = new_value(bld->pc, NV_FILE_MEM_C(dim_idx), type);
1012 res->reg.type = type;
1013 res->reg.id = (idx * 4 + swz) & 127;
1014 res = bld_insn_1(bld, NV_OP_LDA, res);
1015
1016 if (src->Register.Indirect)
1017 res->insn->src[4] = new_ref(bld->pc, FETCH_ADDR(ind_idx, ind_swz));
1018 if (idx >= (128 / 4))
1019 res->insn->src[4] =
1020 new_ref(bld->pc, bld_get_address(bld, (idx * 16) & ~0x1ff, NULL));
1021 break;
1022 case TGSI_FILE_IMMEDIATE:
1023 assert(idx < bld->ti->immd32_nr);
1024 res = bld_load_imm_u32(bld, bld->ti->immd32[idx * 4 + swz]);
1025 res->reg.type = type;
1026 break;
1027 case TGSI_FILE_INPUT:
1028 res = bld_saved_input(bld, idx, swz);
1029 if (res && (insn->Instruction.Opcode != TGSI_OPCODE_TXP))
1030 return res;
1031
1032 res = new_value(bld->pc, bld->ti->input_file, type);
1033 res->reg.id = bld->ti->input_map[idx][swz];
1034
1035 if (res->reg.file == NV_FILE_MEM_V) {
1036 res = bld_interpolate(bld, bld->ti->interp_mode[idx], res);
1037 } else {
1038 assert(src->Dimension.Dimension == 0);
1039 res = bld_insn_1(bld, NV_OP_LDA, res);
1040 assert(res->reg.type == type);
1041 }
1042 bld->saved_inputs[bld->ti->input_map[idx][swz]] = res;
1043 break;
1044 case TGSI_FILE_TEMPORARY:
1045 /* this should be load from l[], with reload elimination later on */
1046 res = bld_fetch_global(bld, &bld->tvs[idx][swz]);
1047 break;
1048 case TGSI_FILE_ADDRESS:
1049 res = bld_fetch_global(bld, &bld->avs[idx][swz]);
1050 break;
1051 case TGSI_FILE_PREDICATE:
1052 res = bld_fetch_global(bld, &bld->pvs[idx][swz]);
1053 break;
1054 default:
1055 NOUVEAU_ERR("illegal/unhandled src reg file: %d\n", src->Register.File);
1056 abort();
1057 break;
1058 }
1059 if (!res)
1060 return bld_undef(bld, NV_FILE_GPR);
1061
1062 switch (tgsi_util_get_full_src_register_sign_mode(src, chan)) {
1063 case TGSI_UTIL_SIGN_KEEP:
1064 break;
1065 case TGSI_UTIL_SIGN_CLEAR:
1066 res = bld_insn_1(bld, NV_OP_ABS, res);
1067 break;
1068 case TGSI_UTIL_SIGN_TOGGLE:
1069 res = bld_insn_1(bld, NV_OP_NEG, res);
1070 break;
1071 case TGSI_UTIL_SIGN_SET:
1072 res = bld_insn_1(bld, NV_OP_ABS, res);
1073 res = bld_insn_1(bld, NV_OP_NEG, res);
1074 break;
1075 default:
1076 NOUVEAU_ERR("illegal/unhandled src reg sign mode\n");
1077 abort();
1078 break;
1079 }
1080
1081 return res;
1082 }
1083
1084 static void
1085 bld_lit(struct bld_context *bld, struct nv_value *dst0[4],
1086 const struct tgsi_full_instruction *insn)
1087 {
1088 struct nv_value *val0, *zero;
1089 unsigned mask = insn->Dst[0].Register.WriteMask;
1090
1091 if (mask & ((1 << 0) | (1 << 3)))
1092 dst0[3] = dst0[0] = bld_load_imm_f32(bld, 1.0f);
1093
1094 if (mask & (3 << 1)) {
1095 zero = bld_load_imm_f32(bld, 0.0f);
1096 val0 = bld_insn_2(bld, NV_OP_MAX, emit_fetch(bld, insn, 0, 0), zero);
1097
1098 if (mask & (1 << 1))
1099 dst0[1] = val0;
1100 }
1101
1102 if (mask & (1 << 2)) {
1103 struct nv_value *val1, *val3, *src1, *src3;
1104 struct nv_value *pos128 = bld_load_imm_f32(bld, 127.999999f);
1105 struct nv_value *neg128 = bld_load_imm_f32(bld, -127.999999f);
1106
1107 src1 = emit_fetch(bld, insn, 0, 1);
1108 src3 = emit_fetch(bld, insn, 0, 3);
1109
1110 val0->insn->flags_def = new_value(bld->pc, NV_FILE_FLAGS, NV_TYPE_U16);
1111 val0->insn->flags_def->insn = val0->insn;
1112
1113 val1 = bld_insn_2(bld, NV_OP_MAX, src1, zero);
1114 val3 = bld_insn_2(bld, NV_OP_MAX, src3, neg128);
1115 val3 = bld_insn_2(bld, NV_OP_MIN, val3, pos128);
1116 val3 = bld_pow(bld, val1, val3);
1117
1118 dst0[2] = bld_insn_1(bld, NV_OP_MOV, zero);
1119 dst0[2]->insn->cc = NV_CC_LE;
1120 dst0[2]->insn->flags_src = new_ref(bld->pc, val0->insn->flags_def);
1121
1122 dst0[2] = bld_insn_2(bld, NV_OP_SELECT, val3, dst0[2]);
1123 }
1124 }
1125
1126 static INLINE void
1127 get_tex_dim(const struct tgsi_full_instruction *insn, int *dim, int *arg)
1128 {
1129 switch (insn->Texture.Texture) {
1130 case TGSI_TEXTURE_1D:
1131 *arg = *dim = 1;
1132 break;
1133 case TGSI_TEXTURE_SHADOW1D:
1134 *dim = 1;
1135 *arg = 2;
1136 break;
1137 case TGSI_TEXTURE_UNKNOWN:
1138 case TGSI_TEXTURE_2D:
1139 case TGSI_TEXTURE_RECT:
1140 *arg = *dim = 2;
1141 break;
1142 case TGSI_TEXTURE_SHADOW2D:
1143 case TGSI_TEXTURE_SHADOWRECT:
1144 *dim = 2;
1145 *arg = 3;
1146 break;
1147 case TGSI_TEXTURE_3D:
1148 case TGSI_TEXTURE_CUBE:
1149 *dim = *arg = 3;
1150 break;
1151 default:
1152 assert(0);
1153 break;
1154 }
1155 }
1156
1157 static void
1158 load_proj_tex_coords(struct bld_context *bld,
1159 struct nv_value *t[4], int dim,
1160 const struct tgsi_full_instruction *insn)
1161 {
1162 int c, mask = 0;
1163
1164 t[3] = emit_fetch(bld, insn, 0, 3);
1165
1166 if (t[3]->insn->opcode == NV_OP_PINTERP) {
1167 t[3]->insn->opcode = NV_OP_LINTERP;
1168 nv_reference(bld->pc, &t[3]->insn->src[1], NULL);
1169 }
1170
1171 t[3] = bld_insn_1(bld, NV_OP_RCP, t[3]);
1172
1173 for (c = 0; c < dim; ++c) {
1174 t[c] = emit_fetch(bld, insn, 0, c);
1175 if (t[c]->insn->opcode == NV_OP_LINTERP)
1176 t[c]->insn->opcode = NV_OP_PINTERP;
1177
1178 if (t[c]->insn->opcode == NV_OP_PINTERP)
1179 nv_reference(bld->pc, &t[c]->insn->src[1], t[3]);
1180 else
1181 mask |= 1 << c;
1182 }
1183
1184 for (c = 0; mask; ++c, mask >>= 1) {
1185 if (!(mask & 1))
1186 continue;
1187 t[c] = bld_insn_2(bld, NV_OP_MUL, t[c], t[3]);
1188 }
1189 }
1190
1191 static void
1192 bld_tex(struct bld_context *bld, struct nv_value *dst0[4],
1193 const struct tgsi_full_instruction *insn)
1194 {
1195 struct nv_value *t[4];
1196 struct nv_instruction *nvi;
1197 uint opcode = translate_opcode(insn->Instruction.Opcode);
1198 int arg, dim, c;
1199
1200 get_tex_dim(insn, &dim, &arg);
1201
1202 if (insn->Texture.Texture == TGSI_TEXTURE_CUBE) {
1203 }
1204 // else
1205 if (insn->Instruction.Opcode == TGSI_OPCODE_TXP) {
1206 load_proj_tex_coords(bld, t, dim, insn);
1207 } else
1208 for (c = 0; c < dim; ++c)
1209 t[c] = emit_fetch(bld, insn, 0, c);
1210
1211 if (arg != dim)
1212 t[dim] = emit_fetch(bld, insn, 0, 2);
1213
1214 if (insn->Instruction.Opcode == TGSI_OPCODE_TXB ||
1215 insn->Instruction.Opcode == TGSI_OPCODE_TXL) {
1216 t[arg++] = emit_fetch(bld, insn, 0, 3);
1217 }
1218
1219 for (c = 0; c < arg; ++c) {
1220 t[c] = bld_insn_1(bld, NV_OP_MOV, t[c]);
1221 t[c]->reg.type = NV_TYPE_F32;
1222 }
1223
1224 nvi = new_instruction(bld->pc, opcode);
1225
1226 for (c = 0; c < 4; ++c) {
1227 nvi->def[c] = dst0[c] = new_value(bld->pc, NV_FILE_GPR, NV_TYPE_F32);
1228 nvi->def[c]->insn = nvi;
1229 }
1230 for (c = 0; c < arg; ++c)
1231 nvi->src[c] = new_ref(bld->pc, t[c]);
1232
1233 nvi->tex_t = insn->Src[1].Register.Index;
1234 nvi->tex_s = 0;
1235 nvi->tex_mask = 0xf;
1236 nvi->tex_cube = (insn->Texture.Texture == TGSI_TEXTURE_CUBE) ? 1 : 0;
1237 nvi->tex_live = 0;
1238 nvi->tex_argc = arg;
1239 }
1240
1241 static INLINE struct nv_value *
1242 bld_dot(struct bld_context *bld, const struct tgsi_full_instruction *insn,
1243 int n)
1244 {
1245 struct nv_value *dotp, *src0, *src1;
1246 int c;
1247
1248 src0 = emit_fetch(bld, insn, 0, 0);
1249 src1 = emit_fetch(bld, insn, 1, 0);
1250 dotp = bld_insn_2(bld, NV_OP_MUL, src0, src1);
1251
1252 for (c = 1; c < n; ++c) {
1253 src0 = emit_fetch(bld, insn, 0, c);
1254 src1 = emit_fetch(bld, insn, 1, c);
1255 dotp = bld_insn_3(bld, NV_OP_MAD, src0, src1, dotp);
1256 }
1257 return dotp;
1258 }
1259
1260 #define FOR_EACH_DST0_ENABLED_CHANNEL(chan, inst) \
1261 for (chan = 0; chan < 4; ++chan) \
1262 if ((inst)->Dst[0].Register.WriteMask & (1 << chan))
1263
1264 static void
1265 bld_instruction(struct bld_context *bld,
1266 const struct tgsi_full_instruction *insn)
1267 {
1268 struct nv_value *src0;
1269 struct nv_value *src1;
1270 struct nv_value *src2;
1271 struct nv_value *dst0[4];
1272 struct nv_value *temp;
1273 int c;
1274 uint opcode = translate_opcode(insn->Instruction.Opcode);
1275
1276 #ifdef NV50_TGSI2NC_DEBUG
1277 debug_printf("bld_instruction:"); tgsi_dump_instruction(insn, 1);
1278 #endif
1279
1280 switch (insn->Instruction.Opcode) {
1281 case TGSI_OPCODE_ADD:
1282 case TGSI_OPCODE_MAX:
1283 case TGSI_OPCODE_MIN:
1284 case TGSI_OPCODE_MUL:
1285 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1286 src0 = emit_fetch(bld, insn, 0, c);
1287 src1 = emit_fetch(bld, insn, 1, c);
1288 dst0[c] = bld_insn_2(bld, opcode, src0, src1);
1289 }
1290 break;
1291 case TGSI_OPCODE_ARL:
1292 src1 = bld_imm_u32(bld, 4);
1293 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1294 src0 = emit_fetch(bld, insn, 0, c);
1295 (temp = bld_insn_1(bld, NV_OP_FLOOR, src0))->reg.type = NV_TYPE_S32;
1296 dst0[c] = bld_insn_2(bld, NV_OP_SHL, temp, src1);
1297 }
1298 break;
1299 case TGSI_OPCODE_CMP:
1300 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1301 src0 = emit_fetch(bld, insn, 0, c);
1302 src1 = emit_fetch(bld, insn, 1, c);
1303 src2 = emit_fetch(bld, insn, 2, c);
1304 src0 = bld_predicate(bld, src0, FALSE);
1305
1306 src1 = bld_insn_1(bld, NV_OP_MOV, src1);
1307 src1->insn->flags_src = new_ref(bld->pc, src0);
1308 src1->insn->cc = NV_CC_LT;
1309
1310 src2 = bld_insn_1(bld, NV_OP_MOV, src2);
1311 src2->insn->flags_src = new_ref(bld->pc, src0);
1312 src2->insn->cc = NV_CC_GE;
1313
1314 dst0[c] = bld_insn_2(bld, NV_OP_SELECT, src1, src2);
1315 }
1316 break;
1317 case TGSI_OPCODE_COS:
1318 case TGSI_OPCODE_SIN:
1319 src0 = emit_fetch(bld, insn, 0, 0);
1320 temp = bld_insn_1(bld, NV_OP_PRESIN, src0);
1321 if (insn->Dst[0].Register.WriteMask & 7)
1322 temp = bld_insn_1(bld, opcode, temp);
1323 for (c = 0; c < 3; ++c)
1324 if (insn->Dst[0].Register.WriteMask & (1 << c))
1325 dst0[c] = temp;
1326 if (!(insn->Dst[0].Register.WriteMask & (1 << 3)))
1327 break;
1328 src0 = emit_fetch(bld, insn, 0, 3);
1329 temp = bld_insn_1(bld, NV_OP_PRESIN, src0);
1330 dst0[3] = bld_insn_1(bld, opcode, temp);
1331 break;
1332 case TGSI_OPCODE_DP2:
1333 temp = bld_dot(bld, insn, 2);
1334 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1335 dst0[c] = temp;
1336 break;
1337 case TGSI_OPCODE_DP3:
1338 temp = bld_dot(bld, insn, 3);
1339 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1340 dst0[c] = temp;
1341 break;
1342 case TGSI_OPCODE_DP4:
1343 temp = bld_dot(bld, insn, 4);
1344 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1345 dst0[c] = temp;
1346 break;
1347 case TGSI_OPCODE_DPH:
1348 src0 = bld_dot(bld, insn, 3);
1349 src1 = emit_fetch(bld, insn, 1, 3);
1350 temp = bld_insn_2(bld, NV_OP_ADD, src0, src1);
1351 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1352 dst0[c] = temp;
1353 break;
1354 case TGSI_OPCODE_DST:
1355 if (insn->Dst[0].Register.WriteMask & 1)
1356 dst0[0] = bld_imm_f32(bld, 1.0f);
1357 if (insn->Dst[0].Register.WriteMask & 2) {
1358 src0 = emit_fetch(bld, insn, 0, 1);
1359 src1 = emit_fetch(bld, insn, 1, 1);
1360 dst0[1] = bld_insn_2(bld, NV_OP_MUL, src0, src1);
1361 }
1362 if (insn->Dst[0].Register.WriteMask & 4)
1363 dst0[2] = emit_fetch(bld, insn, 0, 2);
1364 if (insn->Dst[0].Register.WriteMask & 8)
1365 dst0[3] = emit_fetch(bld, insn, 1, 3);
1366 break;
1367 case TGSI_OPCODE_EX2:
1368 src0 = emit_fetch(bld, insn, 0, 0);
1369 temp = bld_insn_1(bld, NV_OP_PREEX2, src0);
1370 temp = bld_insn_1(bld, NV_OP_EX2, temp);
1371 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1372 dst0[c] = temp;
1373 break;
1374 case TGSI_OPCODE_FRC:
1375 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1376 src0 = emit_fetch(bld, insn, 0, c);
1377 dst0[c] = bld_insn_1(bld, NV_OP_FLOOR, src0);
1378 dst0[c] = bld_insn_2(bld, NV_OP_SUB, src0, dst0[c]);
1379 }
1380 break;
1381 case TGSI_OPCODE_KIL:
1382 for (c = 0; c < 4; ++c) {
1383 src0 = emit_fetch(bld, insn, 0, c);
1384 bld_kil(bld, src0);
1385 }
1386 break;
1387 case TGSI_OPCODE_KILP:
1388 (new_instruction(bld->pc, NV_OP_KIL))->fixed = 1;
1389 break;
1390 case TGSI_OPCODE_IF:
1391 {
1392 struct nv_basic_block *b = new_basic_block(bld->pc);
1393
1394 nvbb_attach_block(bld->pc->current_block, b, CFG_EDGE_FORWARD);
1395
1396 bld->join_bb[bld->cond_lvl] = bld->pc->current_block;
1397 bld->cond_bb[bld->cond_lvl] = bld->pc->current_block;
1398
1399 src1 = bld_predicate(bld, emit_fetch(bld, insn, 0, 0), TRUE);
1400
1401 bld_flow(bld, NV_OP_BRA, NV_CC_EQ, src1, NULL, (bld->cond_lvl == 0));
1402
1403 ++bld->cond_lvl;
1404 bld_new_block(bld, b);
1405 }
1406 break;
1407 case TGSI_OPCODE_ELSE:
1408 {
1409 struct nv_basic_block *b = new_basic_block(bld->pc);
1410
1411 --bld->cond_lvl;
1412 nvbb_attach_block(bld->join_bb[bld->cond_lvl], b, CFG_EDGE_FORWARD);
1413
1414 bld->cond_bb[bld->cond_lvl]->exit->target = b;
1415 bld->cond_bb[bld->cond_lvl] = bld->pc->current_block;
1416
1417 new_instruction(bld->pc, NV_OP_BRA)->is_terminator = 1;
1418
1419 ++bld->cond_lvl;
1420 bld_new_block(bld, b);
1421 }
1422 break;
1423 case TGSI_OPCODE_ENDIF:
1424 {
1425 struct nv_basic_block *b = new_basic_block(bld->pc);
1426
1427 --bld->cond_lvl;
1428 nvbb_attach_block(bld->pc->current_block, b, bld->out_kind);
1429 nvbb_attach_block(bld->cond_bb[bld->cond_lvl], b, CFG_EDGE_FORWARD);
1430
1431 bld->cond_bb[bld->cond_lvl]->exit->target = b;
1432
1433 bld_new_block(bld, b);
1434
1435 if (!bld->cond_lvl && bld->join_bb[bld->cond_lvl]) {
1436 bld->join_bb[bld->cond_lvl]->exit->prev->target = b;
1437 new_instruction(bld->pc, NV_OP_JOIN)->is_join = TRUE;
1438 }
1439 }
1440 break;
1441 case TGSI_OPCODE_BGNLOOP:
1442 {
1443 struct nv_basic_block *bl = new_basic_block(bld->pc);
1444 struct nv_basic_block *bb = new_basic_block(bld->pc);
1445
1446 bld->loop_bb[bld->loop_lvl] = bl;
1447 bld->brkt_bb[bld->loop_lvl] = bb;
1448
1449 bld_flow(bld, NV_OP_BREAKADDR, NV_CC_TR, NULL, bb, FALSE);
1450
1451 nvbb_attach_block(bld->pc->current_block, bl, CFG_EDGE_LOOP_ENTER);
1452
1453 bld_new_block(bld, bld->loop_bb[bld->loop_lvl++]);
1454
1455 if (bld->loop_lvl == bld->pc->loop_nesting_bound)
1456 bld->pc->loop_nesting_bound++;
1457
1458 bld_clear_def_use(&bld->tvs[0][0], BLD_MAX_TEMPS, bld->loop_lvl);
1459 bld_clear_def_use(&bld->avs[0][0], BLD_MAX_ADDRS, bld->loop_lvl);
1460 bld_clear_def_use(&bld->pvs[0][0], BLD_MAX_PREDS, bld->loop_lvl);
1461 }
1462 break;
1463 case TGSI_OPCODE_BRK:
1464 {
1465 struct nv_basic_block *bb = bld->brkt_bb[bld->loop_lvl - 1];
1466
1467 bld_flow(bld, NV_OP_BREAK, NV_CC_TR, NULL, bb, FALSE);
1468
1469 if (bld->out_kind == CFG_EDGE_FORWARD) /* else we already had BRK/CONT */
1470 nvbb_attach_block(bld->pc->current_block, bb, CFG_EDGE_LOOP_LEAVE);
1471
1472 bld->out_kind = CFG_EDGE_FAKE;
1473 }
1474 break;
1475 case TGSI_OPCODE_CONT:
1476 {
1477 struct nv_basic_block *bb = bld->loop_bb[bld->loop_lvl - 1];
1478
1479 bld_flow(bld, NV_OP_BRA, NV_CC_TR, NULL, bb, FALSE);
1480
1481 nvbb_attach_block(bld->pc->current_block, bb, CFG_EDGE_BACK);
1482
1483 if ((bb = bld->join_bb[bld->cond_lvl - 1])) {
1484 bld->join_bb[bld->cond_lvl - 1] = NULL;
1485 nv_nvi_delete(bb->exit->prev);
1486 }
1487 bld->out_kind = CFG_EDGE_FAKE;
1488 }
1489 break;
1490 case TGSI_OPCODE_ENDLOOP:
1491 {
1492 struct nv_basic_block *bb = bld->loop_bb[bld->loop_lvl - 1];
1493
1494 bld_flow(bld, NV_OP_BRA, NV_CC_TR, NULL, bb, FALSE);
1495
1496 nvbb_attach_block(bld->pc->current_block, bb, CFG_EDGE_BACK);
1497
1498 bld_loop_end(bld, bb); /* replace loop-side operand of the phis */
1499
1500 bld_new_block(bld, bld->brkt_bb[--bld->loop_lvl]);
1501 }
1502 break;
1503 case TGSI_OPCODE_ABS:
1504 case TGSI_OPCODE_CEIL:
1505 case TGSI_OPCODE_FLR:
1506 case TGSI_OPCODE_TRUNC:
1507 case TGSI_OPCODE_DDX:
1508 case TGSI_OPCODE_DDY:
1509 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1510 src0 = emit_fetch(bld, insn, 0, c);
1511 dst0[c] = bld_insn_1(bld, opcode, src0);
1512 }
1513 break;
1514 case TGSI_OPCODE_LIT:
1515 bld_lit(bld, dst0, insn);
1516 break;
1517 case TGSI_OPCODE_LRP:
1518 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1519 src0 = emit_fetch(bld, insn, 0, c);
1520 src1 = emit_fetch(bld, insn, 1, c);
1521 src2 = emit_fetch(bld, insn, 2, c);
1522 dst0[c] = bld_insn_2(bld, NV_OP_SUB, src1, src2);
1523 dst0[c] = bld_insn_3(bld, NV_OP_MAD, dst0[c], src0, src2);
1524 }
1525 break;
1526 case TGSI_OPCODE_MOV:
1527 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1528 dst0[c] = emit_fetch(bld, insn, 0, c);
1529 break;
1530 case TGSI_OPCODE_MAD:
1531 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1532 src0 = emit_fetch(bld, insn, 0, c);
1533 src1 = emit_fetch(bld, insn, 1, c);
1534 src2 = emit_fetch(bld, insn, 2, c);
1535 dst0[c] = bld_insn_3(bld, opcode, src0, src1, src2);
1536 }
1537 break;
1538 case TGSI_OPCODE_POW:
1539 src0 = emit_fetch(bld, insn, 0, 0);
1540 src1 = emit_fetch(bld, insn, 1, 0);
1541 temp = bld_pow(bld, src0, src1);
1542 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1543 dst0[c] = temp;
1544 break;
1545 case TGSI_OPCODE_RCP:
1546 case TGSI_OPCODE_LG2:
1547 src0 = emit_fetch(bld, insn, 0, 0);
1548 temp = bld_insn_1(bld, opcode, src0);
1549 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1550 dst0[c] = temp;
1551 break;
1552 case TGSI_OPCODE_RSQ:
1553 src0 = emit_fetch(bld, insn, 0, 0);
1554 temp = bld_insn_1(bld, NV_OP_ABS, src0);
1555 temp = bld_insn_1(bld, NV_OP_RSQ, temp);
1556 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1557 dst0[c] = temp;
1558 break;
1559 case TGSI_OPCODE_SLT:
1560 case TGSI_OPCODE_SGE:
1561 case TGSI_OPCODE_SEQ:
1562 case TGSI_OPCODE_SGT:
1563 case TGSI_OPCODE_SLE:
1564 case TGSI_OPCODE_SNE:
1565 case TGSI_OPCODE_ISLT:
1566 case TGSI_OPCODE_ISGE:
1567 case TGSI_OPCODE_USEQ:
1568 case TGSI_OPCODE_USGE:
1569 case TGSI_OPCODE_USLT:
1570 case TGSI_OPCODE_USNE:
1571 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1572 src0 = emit_fetch(bld, insn, 0, c);
1573 src1 = emit_fetch(bld, insn, 1, c);
1574 dst0[c] = bld_insn_2(bld, NV_OP_SET, src0, src1);
1575 dst0[c]->insn->set_cond = translate_setcc(insn->Instruction.Opcode);
1576 dst0[c]->reg.type = infer_dst_type(insn->Instruction.Opcode);
1577
1578 dst0[c]->insn->src[0]->typecast =
1579 dst0[c]->insn->src[1]->typecast =
1580 infer_src_type(insn->Instruction.Opcode);
1581
1582 if (dst0[c]->reg.type != NV_TYPE_F32)
1583 break;
1584 dst0[c] = bld_insn_1(bld, NV_OP_ABS, dst0[c]);
1585 dst0[c]->insn->src[0]->typecast = NV_TYPE_S32;
1586 dst0[c]->reg.type = NV_TYPE_S32;
1587 dst0[c] = bld_insn_1(bld, NV_OP_CVT, dst0[c]);
1588 dst0[c]->reg.type = NV_TYPE_F32;
1589 }
1590 break;
1591 case TGSI_OPCODE_SCS:
1592 if (insn->Dst[0].Register.WriteMask & 0x3) {
1593 src0 = emit_fetch(bld, insn, 0, 0);
1594 temp = bld_insn_1(bld, NV_OP_PRESIN, src0);
1595 if (insn->Dst[0].Register.WriteMask & 0x1)
1596 dst0[0] = bld_insn_1(bld, NV_OP_COS, temp);
1597 if (insn->Dst[0].Register.WriteMask & 0x2)
1598 dst0[1] = bld_insn_1(bld, NV_OP_SIN, temp);
1599 }
1600 if (insn->Dst[0].Register.WriteMask & 0x4)
1601 dst0[2] = bld_imm_f32(bld, 0.0f);
1602 if (insn->Dst[0].Register.WriteMask & 0x8)
1603 dst0[3] = bld_imm_f32(bld, 1.0f);
1604 break;
1605 case TGSI_OPCODE_SSG:
1606 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1607 src0 = emit_fetch(bld, insn, 0, c);
1608 src1 = bld_predicate(bld, src0, FALSE);
1609 temp = bld_insn_2(bld, NV_OP_AND, src0, bld_imm_u32(bld, 0x80000000));
1610 temp = bld_insn_2(bld, NV_OP_OR, temp, bld_imm_f32(bld, 1.0f));
1611 dst0[c] = bld_insn_2(bld, NV_OP_XOR, temp, temp);
1612 dst0[c]->insn->cc = NV_CC_EQ;
1613 nv_reference(bld->pc, &dst0[c]->insn->flags_src, src1);
1614 }
1615 break;
1616 case TGSI_OPCODE_SUB:
1617 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1618 src0 = emit_fetch(bld, insn, 0, c);
1619 src1 = emit_fetch(bld, insn, 1, c);
1620 dst0[c] = bld_insn_2(bld, NV_OP_ADD, src0, src1);
1621 dst0[c]->insn->src[1]->mod ^= NV_MOD_NEG;
1622 }
1623 break;
1624 case TGSI_OPCODE_TEX:
1625 case TGSI_OPCODE_TXB:
1626 case TGSI_OPCODE_TXL:
1627 case TGSI_OPCODE_TXP:
1628 bld_tex(bld, dst0, insn);
1629 break;
1630 case TGSI_OPCODE_XPD:
1631 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
1632 if (c == 3) {
1633 dst0[3] = bld_imm_f32(bld, 1.0f);
1634 break;
1635 }
1636 src0 = emit_fetch(bld, insn, 0, (c + 1) % 3);
1637 src1 = emit_fetch(bld, insn, 1, (c + 2) % 3);
1638 dst0[c] = bld_insn_2(bld, NV_OP_MUL, src0, src1);
1639
1640 src0 = emit_fetch(bld, insn, 0, (c + 2) % 3);
1641 src1 = emit_fetch(bld, insn, 1, (c + 1) % 3);
1642 dst0[c] = bld_insn_3(bld, NV_OP_MAD, src0, src1, dst0[c]);
1643
1644 dst0[c]->insn->src[2]->mod ^= NV_MOD_NEG;
1645 }
1646 break;
1647 case TGSI_OPCODE_RET:
1648 (new_instruction(bld->pc, NV_OP_RET))->fixed = 1;
1649 break;
1650 case TGSI_OPCODE_END:
1651 if (bld->ti->p->type == PIPE_SHADER_FRAGMENT)
1652 bld_export_outputs(bld);
1653 break;
1654 default:
1655 NOUVEAU_ERR("unhandled opcode %u\n", insn->Instruction.Opcode);
1656 abort();
1657 break;
1658 }
1659
1660 FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
1661 emit_store(bld, insn, c, dst0[c]);
1662 }
1663
1664 static INLINE void
1665 bld_free_value_trackers(struct bld_value_stack *base, int n)
1666 {
1667 int i, c;
1668
1669 for (i = 0; i < n; ++i)
1670 for (c = 0; c < 4; ++c)
1671 if (base[i * 4 + c].body)
1672 FREE(base[i * 4 + c].body);
1673 }
1674
1675 int
1676 nv50_tgsi_to_nc(struct nv_pc *pc, struct nv50_translation_info *ti)
1677 {
1678 struct bld_context *bld = CALLOC_STRUCT(bld_context);
1679 int c;
1680
1681 pc->root = pc->current_block = new_basic_block(pc);
1682
1683 bld->pc = pc;
1684 bld->ti = ti;
1685
1686 pc->loop_nesting_bound = 1;
1687
1688 c = util_bitcount(bld->ti->p->fp.interp >> 24);
1689 if (c && ti->p->type == PIPE_SHADER_FRAGMENT) {
1690 bld->frgcrd[3] = new_value(pc, NV_FILE_MEM_V, NV_TYPE_F32);
1691 bld->frgcrd[3]->reg.id = c - 1;
1692 bld->frgcrd[3] = bld_insn_1(bld, NV_OP_LINTERP, bld->frgcrd[3]);
1693 bld->frgcrd[3] = bld_insn_1(bld, NV_OP_RCP, bld->frgcrd[3]);
1694 }
1695
1696 tgsi_parse_init(&bld->parse[0], ti->p->pipe.tokens);
1697
1698 while (!tgsi_parse_end_of_tokens(&bld->parse[bld->call_lvl])) {
1699 const union tgsi_full_token *tok = &bld->parse[bld->call_lvl].FullToken;
1700
1701 tgsi_parse_token(&bld->parse[bld->call_lvl]);
1702
1703 switch (tok->Token.Type) {
1704 case TGSI_TOKEN_TYPE_INSTRUCTION:
1705 bld_instruction(bld, &tok->FullInstruction);
1706 break;
1707 default:
1708 break;
1709 }
1710 }
1711
1712 bld_free_value_trackers(&bld->tvs[0][0], BLD_MAX_TEMPS);
1713 bld_free_value_trackers(&bld->avs[0][0], BLD_MAX_ADDRS);
1714 bld_free_value_trackers(&bld->pvs[0][0], BLD_MAX_PREDS);
1715
1716 bld_free_value_trackers(&bld->ovs[0][0], PIPE_MAX_SHADER_OUTPUTS);
1717
1718 FREE(bld);
1719 return 0;
1720 }
1721
1722 /* If a variable is assigned in a loop, replace all references to the value
1723 * from outside the loop with a phi value.
1724 */
1725 static void
1726 bld_replace_value(struct nv_pc *pc, struct nv_basic_block *b,
1727 struct nv_value *old_val,
1728 struct nv_value *new_val)
1729 {
1730 struct nv_instruction *nvi;
1731
1732 for (nvi = b->phi ? b->phi : b->entry; nvi; nvi = nvi->next) {
1733 int s;
1734 for (s = 0; s < 5; ++s) {
1735 if (!nvi->src[s])
1736 continue;
1737 if (nvi->src[s]->value == old_val)
1738 nv_reference(pc, &nvi->src[s], new_val);
1739 }
1740 if (nvi->flags_src && nvi->flags_src->value == old_val)
1741 nv_reference(pc, &nvi->flags_src, new_val);
1742 }
1743
1744 b->pass_seq = pc->pass_seq;
1745
1746 if (b->out[0] && b->out[0]->pass_seq < pc->pass_seq)
1747 bld_replace_value(pc, b->out[0], old_val, new_val);
1748
1749 if (b->out[1] && b->out[1]->pass_seq < pc->pass_seq)
1750 bld_replace_value(pc, b->out[1], old_val, new_val);
1751 }