freedreno/ir3: don't precolor unassigned inputs
[mesa.git] / src / freedreno / ir3 / ir3_ra.c
1 /*
2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/u_math.h"
28 #include "util/register_allocate.h"
29 #include "util/ralloc.h"
30 #include "util/bitset.h"
31
32 #include "ir3.h"
33 #include "ir3_compiler.h"
34
35 /*
36 * Register Assignment:
37 *
38 * Uses the register_allocate util, which implements graph coloring
39 * algo with interference classes. To handle the cases where we need
40 * consecutive registers (for example, texture sample instructions),
41 * we model these as larger (double/quad/etc) registers which conflict
42 * with the corresponding registers in other classes.
43 *
44 * Additionally we create additional classes for half-regs, which
45 * do not conflict with the full-reg classes. We do need at least
46 * sizes 1-4 (to deal w/ texture sample instructions output to half-
47 * reg). At the moment we don't create the higher order half-reg
48 * classes as half-reg frequently does not have enough precision
49 * for texture coords at higher resolutions.
50 *
51 * There are some additional cases that we need to handle specially,
52 * as the graph coloring algo doesn't understand "partial writes".
53 * For example, a sequence like:
54 *
55 * add r0.z, ...
56 * sam (f32)(xy)r0.x, ...
57 * ...
58 * sam (f32)(xyzw)r0.w, r0.x, ... ; 3d texture, so r0.xyz are coord
59 *
60 * In this scenario, we treat r0.xyz as class size 3, which is written
61 * (from a use/def perspective) at the 'add' instruction and ignore the
62 * subsequent partial writes to r0.xy. So the 'add r0.z, ...' is the
63 * defining instruction, as it is the first to partially write r0.xyz.
64 *
65 * To address the fragmentation that this can potentially cause, a
66 * two pass register allocation is used. After the first pass the
67 * assignment of scalars is discarded, but the assignment of vecN (for
68 * N > 1) is used to pre-color in the second pass, which considers
69 * only scalars.
70 *
71 * Arrays of arbitrary size are handled via pre-coloring a consecutive
72 * sequence of registers. Additional scalar (single component) reg
73 * names are allocated starting at ctx->class_base[total_class_count]
74 * (see arr->base), which are pre-colored. In the use/def graph direct
75 * access is treated as a single element use/def, and indirect access
76 * is treated as use or def of all array elements. (Only the first
77 * def is tracked, in case of multiple indirect writes, etc.)
78 *
79 * TODO arrays that fit in one of the pre-defined class sizes should
80 * not need to be pre-colored, but instead could be given a normal
81 * vreg name. (Ignoring this for now since it is a good way to work
82 * out the kinks with arbitrary sized arrays.)
83 *
84 * TODO might be easier for debugging to split this into two passes,
85 * the first assigning vreg names in a way that we could ir3_print()
86 * the result.
87 */
88
89 static const unsigned class_sizes[] = {
90 1, 2, 3, 4,
91 4 + 4, /* txd + 1d/2d */
92 4 + 6, /* txd + 3d */
93 };
94 #define class_count ARRAY_SIZE(class_sizes)
95
96 static const unsigned half_class_sizes[] = {
97 1, 2, 3, 4,
98 };
99 #define half_class_count ARRAY_SIZE(half_class_sizes)
100
101 /* seems to just be used for compute shaders? Seems like vec1 and vec3
102 * are sufficient (for now?)
103 */
104 static const unsigned high_class_sizes[] = {
105 1, 3,
106 };
107 #define high_class_count ARRAY_SIZE(high_class_sizes)
108
109 #define total_class_count (class_count + half_class_count + high_class_count)
110
111 /* Below a0.x are normal regs. RA doesn't need to assign a0.x/p0.x. */
112 #define NUM_REGS (4 * 48) /* r0 to r47 */
113 #define NUM_HIGH_REGS (4 * 8) /* r48 to r55 */
114 #define FIRST_HIGH_REG (4 * 48)
115 /* Number of virtual regs in a given class: */
116 #define CLASS_REGS(i) (NUM_REGS - (class_sizes[i] - 1))
117 #define HALF_CLASS_REGS(i) (NUM_REGS - (half_class_sizes[i] - 1))
118 #define HIGH_CLASS_REGS(i) (NUM_HIGH_REGS - (high_class_sizes[i] - 1))
119
120 #define HALF_OFFSET (class_count)
121 #define HIGH_OFFSET (class_count + half_class_count)
122
123 /* register-set, created one time, used for all shaders: */
124 struct ir3_ra_reg_set {
125 struct ra_regs *regs;
126 unsigned int classes[class_count];
127 unsigned int half_classes[half_class_count];
128 unsigned int high_classes[high_class_count];
129 /* maps flat virtual register space to base gpr: */
130 uint16_t *ra_reg_to_gpr;
131 /* maps cls,gpr to flat virtual register space: */
132 uint16_t **gpr_to_ra_reg;
133 };
134
135 static void
136 build_q_values(unsigned int **q_values, unsigned off,
137 const unsigned *sizes, unsigned count)
138 {
139 for (unsigned i = 0; i < count; i++) {
140 q_values[i + off] = rzalloc_array(q_values, unsigned, total_class_count);
141
142 /* From register_allocate.c:
143 *
144 * q(B,C) (indexed by C, B is this register class) in
145 * Runeson/Nyström paper. This is "how many registers of B could
146 * the worst choice register from C conflict with".
147 *
148 * If we just let the register allocation algorithm compute these
149 * values, is extremely expensive. However, since all of our
150 * registers are laid out, we can very easily compute them
151 * ourselves. View the register from C as fixed starting at GRF n
152 * somewhere in the middle, and the register from B as sliding back
153 * and forth. Then the first register to conflict from B is the
154 * one starting at n - class_size[B] + 1 and the last register to
155 * conflict will start at n + class_size[B] - 1. Therefore, the
156 * number of conflicts from B is class_size[B] + class_size[C] - 1.
157 *
158 * +-+-+-+-+-+-+ +-+-+-+-+-+-+
159 * B | | | | | |n| --> | | | | | | |
160 * +-+-+-+-+-+-+ +-+-+-+-+-+-+
161 * +-+-+-+-+-+
162 * C |n| | | | |
163 * +-+-+-+-+-+
164 *
165 * (Idea copied from brw_fs_reg_allocate.cpp)
166 */
167 for (unsigned j = 0; j < count; j++)
168 q_values[i + off][j + off] = sizes[i] + sizes[j] - 1;
169 }
170 }
171
172 /* One-time setup of RA register-set, which describes all the possible
173 * "virtual" registers and their interferences. Ie. double register
174 * occupies (and conflicts with) two single registers, and so forth.
175 * Since registers do not need to be aligned to their class size, they
176 * can conflict with other registers in the same class too. Ie:
177 *
178 * Single (base) | Double
179 * --------------+---------------
180 * R0 | D0
181 * R1 | D0 D1
182 * R2 | D1 D2
183 * R3 | D2
184 * .. and so on..
185 *
186 * (NOTE the disassembler uses notation like r0.x/y/z/w but those are
187 * really just four scalar registers. Don't let that confuse you.)
188 */
189 struct ir3_ra_reg_set *
190 ir3_ra_alloc_reg_set(struct ir3_compiler *compiler)
191 {
192 struct ir3_ra_reg_set *set = rzalloc(compiler, struct ir3_ra_reg_set);
193 unsigned ra_reg_count, reg, first_half_reg, first_high_reg, base;
194 unsigned int **q_values;
195
196 /* calculate # of regs across all classes: */
197 ra_reg_count = 0;
198 for (unsigned i = 0; i < class_count; i++)
199 ra_reg_count += CLASS_REGS(i);
200 for (unsigned i = 0; i < half_class_count; i++)
201 ra_reg_count += HALF_CLASS_REGS(i);
202 for (unsigned i = 0; i < high_class_count; i++)
203 ra_reg_count += HIGH_CLASS_REGS(i);
204
205 /* allocate and populate q_values: */
206 q_values = ralloc_array(set, unsigned *, total_class_count);
207
208 build_q_values(q_values, 0, class_sizes, class_count);
209 build_q_values(q_values, HALF_OFFSET, half_class_sizes, half_class_count);
210 build_q_values(q_values, HIGH_OFFSET, high_class_sizes, high_class_count);
211
212 /* allocate the reg-set.. */
213 set->regs = ra_alloc_reg_set(set, ra_reg_count, true);
214 set->ra_reg_to_gpr = ralloc_array(set, uint16_t, ra_reg_count);
215 set->gpr_to_ra_reg = ralloc_array(set, uint16_t *, total_class_count);
216
217 /* .. and classes */
218 reg = 0;
219 for (unsigned i = 0; i < class_count; i++) {
220 set->classes[i] = ra_alloc_reg_class(set->regs);
221
222 set->gpr_to_ra_reg[i] = ralloc_array(set, uint16_t, CLASS_REGS(i));
223
224 for (unsigned j = 0; j < CLASS_REGS(i); j++) {
225 ra_class_add_reg(set->regs, set->classes[i], reg);
226
227 set->ra_reg_to_gpr[reg] = j;
228 set->gpr_to_ra_reg[i][j] = reg;
229
230 for (unsigned br = j; br < j + class_sizes[i]; br++)
231 ra_add_transitive_reg_conflict(set->regs, br, reg);
232
233 reg++;
234 }
235 }
236
237 first_half_reg = reg;
238 base = HALF_OFFSET;
239
240 for (unsigned i = 0; i < half_class_count; i++) {
241 set->half_classes[i] = ra_alloc_reg_class(set->regs);
242
243 set->gpr_to_ra_reg[base + i] =
244 ralloc_array(set, uint16_t, HALF_CLASS_REGS(i));
245
246 for (unsigned j = 0; j < HALF_CLASS_REGS(i); j++) {
247 ra_class_add_reg(set->regs, set->half_classes[i], reg);
248
249 set->ra_reg_to_gpr[reg] = j;
250 set->gpr_to_ra_reg[base + i][j] = reg;
251
252 for (unsigned br = j; br < j + half_class_sizes[i]; br++)
253 ra_add_transitive_reg_conflict(set->regs, br + first_half_reg, reg);
254
255 reg++;
256 }
257 }
258
259 first_high_reg = reg;
260 base = HIGH_OFFSET;
261
262 for (unsigned i = 0; i < high_class_count; i++) {
263 set->high_classes[i] = ra_alloc_reg_class(set->regs);
264
265 set->gpr_to_ra_reg[base + i] =
266 ralloc_array(set, uint16_t, HIGH_CLASS_REGS(i));
267
268 for (unsigned j = 0; j < HIGH_CLASS_REGS(i); j++) {
269 ra_class_add_reg(set->regs, set->high_classes[i], reg);
270
271 set->ra_reg_to_gpr[reg] = j;
272 set->gpr_to_ra_reg[base + i][j] = reg;
273
274 for (unsigned br = j; br < j + high_class_sizes[i]; br++)
275 ra_add_transitive_reg_conflict(set->regs, br + first_high_reg, reg);
276
277 reg++;
278 }
279 }
280
281 /* starting a6xx, half precision regs conflict w/ full precision regs: */
282 if (compiler->gpu_id >= 600) {
283 /* because of transitivity, we can get away with just setting up
284 * conflicts between the first class of full and half regs:
285 */
286 for (unsigned i = 0; i < half_class_count; i++) {
287 /* NOTE there are fewer half class sizes, but they match the
288 * first N full class sizes.. but assert in case that ever
289 * accidentally changes:
290 */
291 debug_assert(class_sizes[i] == half_class_sizes[i]);
292 for (unsigned j = 0; j < CLASS_REGS(i) / 2; j++) {
293 unsigned freg = set->gpr_to_ra_reg[i][j];
294 unsigned hreg0 = set->gpr_to_ra_reg[i + HALF_OFFSET][(j * 2) + 0];
295 unsigned hreg1 = set->gpr_to_ra_reg[i + HALF_OFFSET][(j * 2) + 1];
296
297 ra_add_transitive_reg_pair_conflict(set->regs, freg, hreg0, hreg1);
298 }
299 }
300
301 // TODO also need to update q_values, but for now:
302 ra_set_finalize(set->regs, NULL);
303 } else {
304 ra_set_finalize(set->regs, q_values);
305 }
306
307 ralloc_free(q_values);
308
309 return set;
310 }
311
312 /* additional block-data (per-block) */
313 struct ir3_ra_block_data {
314 BITSET_WORD *def; /* variables defined before used in block */
315 BITSET_WORD *use; /* variables used before defined in block */
316 BITSET_WORD *livein; /* which defs reach entry point of block */
317 BITSET_WORD *liveout; /* which defs reach exit point of block */
318 };
319
320 /* additional instruction-data (per-instruction) */
321 struct ir3_ra_instr_data {
322 /* cached instruction 'definer' info: */
323 struct ir3_instruction *defn;
324 int off, sz, cls;
325 };
326
327 /* register-assign context, per-shader */
328 struct ir3_ra_ctx {
329 struct ir3_shader_variant *v;
330 struct ir3 *ir;
331
332 struct ir3_ra_reg_set *set;
333 struct ra_graph *g;
334
335 /* Are we in the scalar assignment pass? In this pass, all larger-
336 * than-vec1 vales have already been assigned and pre-colored, so
337 * we only consider scalar values.
338 */
339 bool scalar_pass;
340
341 unsigned alloc_count;
342 /* one per class, plus one slot for arrays: */
343 unsigned class_alloc_count[total_class_count + 1];
344 unsigned class_base[total_class_count + 1];
345 unsigned instr_cnt;
346 unsigned *def, *use; /* def/use table */
347 struct ir3_ra_instr_data *instrd;
348 };
349
350 /* does it conflict? */
351 static inline bool
352 intersects(unsigned a_start, unsigned a_end, unsigned b_start, unsigned b_end)
353 {
354 return !((a_start >= b_end) || (b_start >= a_end));
355 }
356
357 static unsigned
358 reg_size_for_array(struct ir3_array *arr)
359 {
360 if (arr->half)
361 return DIV_ROUND_UP(arr->length, 2);
362
363 return arr->length;
364 }
365
366 static int
367 size_to_class(unsigned sz, bool half, bool high)
368 {
369 if (high) {
370 for (unsigned i = 0; i < high_class_count; i++)
371 if (high_class_sizes[i] >= sz)
372 return i + HIGH_OFFSET;
373 } else if (half) {
374 for (unsigned i = 0; i < half_class_count; i++)
375 if (half_class_sizes[i] >= sz)
376 return i + HALF_OFFSET;
377 } else {
378 for (unsigned i = 0; i < class_count; i++)
379 if (class_sizes[i] >= sz)
380 return i;
381 }
382 debug_assert(0);
383 return -1;
384 }
385
386 static bool
387 writes_gpr(struct ir3_instruction *instr)
388 {
389 if (dest_regs(instr) == 0)
390 return false;
391 /* is dest a normal temp register: */
392 struct ir3_register *reg = instr->regs[0];
393 debug_assert(!(reg->flags & (IR3_REG_CONST | IR3_REG_IMMED)));
394 if ((reg->num == regid(REG_A0, 0)) ||
395 (reg->num == regid(REG_P0, 0)))
396 return false;
397 return true;
398 }
399
400 static bool
401 instr_before(struct ir3_instruction *a, struct ir3_instruction *b)
402 {
403 if (a->flags & IR3_INSTR_UNUSED)
404 return false;
405 return (a->ip < b->ip);
406 }
407
408 static struct ir3_instruction *
409 get_definer(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr,
410 int *sz, int *off)
411 {
412 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
413 struct ir3_instruction *d = NULL;
414
415 if (ctx->scalar_pass) {
416 id->defn = instr;
417 id->off = 0;
418 id->sz = 1; /* considering things as N scalar regs now */
419 }
420
421 if (id->defn) {
422 *sz = id->sz;
423 *off = id->off;
424 return id->defn;
425 }
426
427 if (instr->opc == OPC_META_COLLECT) {
428 /* What about the case where collect is subset of array, we
429 * need to find the distance between where actual array starts
430 * and collect.. that probably doesn't happen currently.
431 */
432 struct ir3_register *src;
433 int dsz, doff;
434
435 /* note: don't use foreach_ssa_src as this gets called once
436 * while assigning regs (which clears SSA flag)
437 */
438 foreach_src_n (src, n, instr) {
439 struct ir3_instruction *dd;
440 if (!src->instr)
441 continue;
442
443 dd = get_definer(ctx, src->instr, &dsz, &doff);
444
445 if ((!d) || instr_before(dd, d)) {
446 d = dd;
447 *sz = dsz;
448 *off = doff - n;
449 }
450 }
451
452 } else if (instr->cp.right || instr->cp.left) {
453 /* covers also the meta:fo case, which ends up w/ single
454 * scalar instructions for each component:
455 */
456 struct ir3_instruction *f = ir3_neighbor_first(instr);
457
458 /* by definition, the entire sequence forms one linked list
459 * of single scalar register nodes (even if some of them may
460 * be splits from a texture sample (for example) instr. We
461 * just need to walk the list finding the first element of
462 * the group defined (lowest ip)
463 */
464 int cnt = 0;
465
466 /* need to skip over unused in the group: */
467 while (f && (f->flags & IR3_INSTR_UNUSED)) {
468 f = f->cp.right;
469 cnt++;
470 }
471
472 while (f) {
473 if ((!d) || instr_before(f, d))
474 d = f;
475 if (f == instr)
476 *off = cnt;
477 f = f->cp.right;
478 cnt++;
479 }
480
481 *sz = cnt;
482
483 } else {
484 /* second case is looking directly at the instruction which
485 * produces multiple values (eg, texture sample), rather
486 * than the split nodes that point back to that instruction.
487 * This isn't quite right, because it may be part of a larger
488 * group, such as:
489 *
490 * sam (f32)(xyzw)r0.x, ...
491 * add r1.x, ...
492 * add r1.y, ...
493 * sam (f32)(xyzw)r2.x, r0.w <-- (r0.w, r1.x, r1.y)
494 *
495 * need to come up with a better way to handle that case.
496 */
497 if (instr->address) {
498 *sz = instr->regs[0]->size;
499 } else {
500 *sz = util_last_bit(instr->regs[0]->wrmask);
501 }
502 *off = 0;
503 d = instr;
504 }
505
506 if (d->opc == OPC_META_SPLIT) {
507 struct ir3_instruction *dd;
508 int dsz, doff;
509
510 dd = get_definer(ctx, d->regs[1]->instr, &dsz, &doff);
511
512 /* by definition, should come before: */
513 debug_assert(instr_before(dd, d));
514
515 *sz = MAX2(*sz, dsz);
516
517 if (instr->opc == OPC_META_SPLIT)
518 *off = MAX2(*off, instr->split.off);
519
520 d = dd;
521 }
522
523 debug_assert(d->opc != OPC_META_SPLIT);
524
525 id->defn = d;
526 id->sz = *sz;
527 id->off = *off;
528
529 return d;
530 }
531
532 static void
533 ra_block_find_definers(struct ir3_ra_ctx *ctx, struct ir3_block *block)
534 {
535 foreach_instr (instr, &block->instr_list) {
536 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
537 if (instr->regs_count == 0)
538 continue;
539 /* couple special cases: */
540 if (writes_addr(instr) || writes_pred(instr)) {
541 id->cls = -1;
542 } else if (instr->regs[0]->flags & IR3_REG_ARRAY) {
543 id->cls = total_class_count;
544 } else {
545 /* and the normal case: */
546 id->defn = get_definer(ctx, instr, &id->sz, &id->off);
547 id->cls = size_to_class(id->sz, is_half(id->defn), is_high(id->defn));
548
549 /* this is a bit of duct-tape.. if we have a scenario like:
550 *
551 * sam (f32)(x) out.x, ...
552 * sam (f32)(x) out.y, ...
553 *
554 * Then the fanout/split meta instructions for the two different
555 * tex instructions end up grouped as left/right neighbors. The
556 * upshot is that in when you get_definer() on one of the meta:fo's
557 * you get definer as the first sam with sz=2, but when you call
558 * get_definer() on the either of the sam's you get itself as the
559 * definer with sz=1.
560 *
561 * (We actually avoid this scenario exactly, the neighbor links
562 * prevent one of the output mov's from being eliminated, so this
563 * hack should be enough. But probably we need to rethink how we
564 * find the "defining" instruction.)
565 *
566 * TODO how do we figure out offset properly...
567 */
568 if (id->defn != instr) {
569 struct ir3_ra_instr_data *did = &ctx->instrd[id->defn->ip];
570 if (did->sz < id->sz) {
571 did->sz = id->sz;
572 did->cls = id->cls;
573 }
574 }
575 }
576 }
577 }
578
579 /* give each instruction a name (and ip), and count up the # of names
580 * of each class
581 */
582 static void
583 ra_block_name_instructions(struct ir3_ra_ctx *ctx, struct ir3_block *block)
584 {
585 foreach_instr (instr, &block->instr_list) {
586 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
587
588 #ifdef DEBUG
589 instr->name = ~0;
590 #endif
591
592 ctx->instr_cnt++;
593
594 if (!writes_gpr(instr))
595 continue;
596
597 if (id->defn != instr)
598 continue;
599
600 /* In scalar pass, collect/split don't get their own names,
601 * but instead inherit them from their src(s):
602 *
603 * Possibly we don't need this because of scalar_name(), but
604 * it does make the ir3_print() dumps easier to read.
605 */
606 if (ctx->scalar_pass) {
607 if (instr->opc == OPC_META_SPLIT) {
608 instr->name = instr->regs[1]->instr->name + instr->split.off;
609 continue;
610 }
611
612 if (instr->opc == OPC_META_COLLECT) {
613 instr->name = instr->regs[1]->instr->name;
614 continue;
615 }
616 }
617
618 /* arrays which don't fit in one of the pre-defined class
619 * sizes are pre-colored:
620 */
621 if ((id->cls >= 0) && (id->cls < total_class_count)) {
622 /* in the scalar pass, we generate a name for each
623 * scalar component, instr->name is the name of the
624 * first component.
625 */
626 unsigned n = ctx->scalar_pass ? dest_regs(instr) : 1;
627 instr->name = ctx->class_alloc_count[id->cls];
628 ctx->class_alloc_count[id->cls] += n;
629 ctx->alloc_count += n;
630 }
631 }
632 }
633
634 static void
635 ra_init(struct ir3_ra_ctx *ctx)
636 {
637 unsigned n, base;
638
639 ir3_clear_mark(ctx->ir);
640 n = ir3_count_instructions(ctx->ir);
641
642 ctx->instrd = rzalloc_array(NULL, struct ir3_ra_instr_data, n);
643
644 foreach_block (block, &ctx->ir->block_list) {
645 ra_block_find_definers(ctx, block);
646 }
647
648 foreach_block (block, &ctx->ir->block_list) {
649 ra_block_name_instructions(ctx, block);
650 }
651
652 /* figure out the base register name for each class. The
653 * actual ra name is class_base[cls] + instr->name;
654 */
655 ctx->class_base[0] = 0;
656 for (unsigned i = 1; i <= total_class_count; i++) {
657 ctx->class_base[i] = ctx->class_base[i-1] +
658 ctx->class_alloc_count[i-1];
659 }
660
661 /* and vreg names for array elements: */
662 base = ctx->class_base[total_class_count];
663 foreach_array (arr, &ctx->ir->array_list) {
664 arr->base = base;
665 ctx->class_alloc_count[total_class_count] += reg_size_for_array(arr);
666 base += reg_size_for_array(arr);
667 }
668 ctx->alloc_count += ctx->class_alloc_count[total_class_count];
669
670 ctx->g = ra_alloc_interference_graph(ctx->set->regs, ctx->alloc_count);
671 ralloc_steal(ctx->g, ctx->instrd);
672 ctx->def = rzalloc_array(ctx->g, unsigned, ctx->alloc_count);
673 ctx->use = rzalloc_array(ctx->g, unsigned, ctx->alloc_count);
674 }
675
676 static unsigned
677 __ra_name(struct ir3_ra_ctx *ctx, int cls, struct ir3_instruction *defn)
678 {
679 unsigned name;
680 debug_assert(cls >= 0);
681 debug_assert(cls < total_class_count); /* we shouldn't get arrays here.. */
682 name = ctx->class_base[cls] + defn->name;
683 debug_assert(name < ctx->alloc_count);
684 return name;
685 }
686
687 static int
688 ra_name(struct ir3_ra_ctx *ctx, struct ir3_ra_instr_data *id)
689 {
690 /* TODO handle name mapping for arrays */
691 return __ra_name(ctx, id->cls, id->defn);
692 }
693
694 /* Get the scalar name of the n'th component of an instruction dst: */
695 static int
696 scalar_name(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr, unsigned n)
697 {
698 if (ctx->scalar_pass) {
699 if (instr->opc == OPC_META_SPLIT) {
700 debug_assert(n == 0); /* split results in a scalar */
701 struct ir3_instruction *src = instr->regs[1]->instr;
702 return scalar_name(ctx, src, instr->split.off);
703 } else if (instr->opc == OPC_META_COLLECT) {
704 debug_assert(n < (instr->regs_count + 1));
705 struct ir3_instruction *src = instr->regs[n + 1]->instr;
706 return scalar_name(ctx, src, 0);
707 }
708 } else {
709 debug_assert(n == 0);
710 }
711
712 return ra_name(ctx, &ctx->instrd[instr->ip]) + n;
713 }
714
715 static void
716 ra_destroy(struct ir3_ra_ctx *ctx)
717 {
718 ralloc_free(ctx->g);
719 }
720
721 static void
722 __def(struct ir3_ra_ctx *ctx, struct ir3_ra_block_data *bd, unsigned name,
723 struct ir3_instruction *instr)
724 {
725 debug_assert(name < ctx->alloc_count);
726 /* defined on first write: */
727 if (!ctx->def[name])
728 ctx->def[name] = instr->ip;
729 ctx->use[name] = MAX2(ctx->use[name], instr->ip);
730 BITSET_SET(bd->def, name);
731 }
732
733 static void
734 __use(struct ir3_ra_ctx *ctx, struct ir3_ra_block_data *bd, unsigned name,
735 struct ir3_instruction *instr)
736 {
737 debug_assert(name < ctx->alloc_count);
738 ctx->use[name] = MAX2(ctx->use[name], instr->ip);
739 if (!BITSET_TEST(bd->def, name))
740 BITSET_SET(bd->use, name);
741 }
742
743 static void
744 ra_block_compute_live_ranges(struct ir3_ra_ctx *ctx, struct ir3_block *block)
745 {
746 struct ir3_ra_block_data *bd;
747 unsigned bitset_words = BITSET_WORDS(ctx->alloc_count);
748
749 #define def(name, instr) __def(ctx, bd, name, instr)
750 #define use(name, instr) __use(ctx, bd, name, instr)
751
752 bd = rzalloc(ctx->g, struct ir3_ra_block_data);
753
754 bd->def = rzalloc_array(bd, BITSET_WORD, bitset_words);
755 bd->use = rzalloc_array(bd, BITSET_WORD, bitset_words);
756 bd->livein = rzalloc_array(bd, BITSET_WORD, bitset_words);
757 bd->liveout = rzalloc_array(bd, BITSET_WORD, bitset_words);
758
759 block->data = bd;
760
761 struct ir3_instruction *first_non_input = NULL;
762 foreach_instr (instr, &block->instr_list) {
763 if (instr->opc != OPC_META_INPUT) {
764 first_non_input = instr;
765 break;
766 }
767 }
768
769 foreach_instr (instr, &block->instr_list) {
770 struct ir3_instruction *src;
771 struct ir3_register *reg;
772
773 if (writes_gpr(instr)) {
774 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
775 struct ir3_register *dst = instr->regs[0];
776
777 if (dst->flags & IR3_REG_ARRAY) {
778 struct ir3_array *arr =
779 ir3_lookup_array(ctx->ir, dst->array.id);
780 unsigned i;
781
782 arr->start_ip = MIN2(arr->start_ip, instr->ip);
783 arr->end_ip = MAX2(arr->end_ip, instr->ip);
784
785 /* set the node class now.. in case we don't encounter
786 * this array dst again. From register_alloc algo's
787 * perspective, these are all single/scalar regs:
788 */
789 for (i = 0; i < arr->length; i++) {
790 unsigned name = arr->base + i;
791 if(arr->half)
792 ra_set_node_class(ctx->g, name, ctx->set->half_classes[0]);
793 else
794 ra_set_node_class(ctx->g, name, ctx->set->classes[0]);
795 }
796
797 /* indirect write is treated like a write to all array
798 * elements, since we don't know which one is actually
799 * written:
800 */
801 if (dst->flags & IR3_REG_RELATIV) {
802 for (i = 0; i < arr->length; i++) {
803 unsigned name = arr->base + i;
804 def(name, instr);
805 }
806 } else {
807 unsigned name = arr->base + dst->array.offset;
808 def(name, instr);
809 }
810 } else if (id->defn == instr) {
811 /* in scalar pass, we aren't considering virtual register
812 * classes, ie. if an instruction writes a vec2, then it
813 * defines two different scalar register names.
814 */
815 unsigned n = ctx->scalar_pass ? dest_regs(instr) : 1;
816 for (unsigned i = 0; i < n; i++) {
817 unsigned name = scalar_name(ctx, instr, i);
818
819 /* tex instructions actually have a wrmask, and
820 * don't touch masked out components. We can't do
821 * anything useful about that in the first pass,
822 * but in the scalar pass we can realize these
823 * registers are available:
824 */
825 if (ctx->scalar_pass && is_tex_or_prefetch(instr) &&
826 !(instr->regs[0]->wrmask & (1 << i)))
827 continue;
828
829 def(name, instr);
830
831 if ((instr->opc == OPC_META_INPUT) && first_non_input)
832 use(name, first_non_input);
833
834 if (is_high(instr)) {
835 ra_set_node_class(ctx->g, name,
836 ctx->set->high_classes[id->cls - HIGH_OFFSET]);
837 } else if (is_half(instr)) {
838 ra_set_node_class(ctx->g, name,
839 ctx->set->half_classes[id->cls - HALF_OFFSET]);
840 } else {
841 ra_set_node_class(ctx->g, name,
842 ctx->set->classes[id->cls]);
843 }
844 }
845 }
846 }
847
848 foreach_src(reg, instr) {
849 if (reg->flags & IR3_REG_ARRAY) {
850 struct ir3_array *arr =
851 ir3_lookup_array(ctx->ir, reg->array.id);
852 arr->start_ip = MIN2(arr->start_ip, instr->ip);
853 arr->end_ip = MAX2(arr->end_ip, instr->ip);
854
855 /* indirect read is treated like a read from all array
856 * elements, since we don't know which one is actually
857 * read:
858 */
859 if (reg->flags & IR3_REG_RELATIV) {
860 unsigned i;
861 for (i = 0; i < arr->length; i++) {
862 unsigned name = arr->base + i;
863 use(name, instr);
864 BITSET_SET(bd->use, name);
865 }
866 } else {
867 unsigned name = arr->base + reg->array.offset;
868 use(name, instr);
869 /* NOTE: arrays are not SSA so unconditionally
870 * set use bit:
871 */
872 BITSET_SET(bd->use, name);
873 debug_assert(reg->array.offset < arr->length);
874 }
875 } else if (ctx->scalar_pass) {
876 struct ir3_instruction *src = reg->instr;
877 /* skip things that aren't SSA: */
878 unsigned n = src ? dest_regs(src) : 0;
879
880 /* in scalar pass, we aren't considering virtual register
881 * classes, ie. if an instruction writes a vec2, then it
882 * defines two different scalar register names.
883 *
884 * We need to traverse up thru collect/split to find the
885 * actual non-meta instruction names for each of the
886 * components:
887 */
888 for (unsigned i = 0; i < n; i++) {
889 /* Need to filter out a couple special cases, ie.
890 * writes to a0.x or p0.x:
891 */
892 if (!writes_gpr(src))
893 continue;
894
895 /* split takes a src w/ wrmask potentially greater
896 * than 0x1, but it really only cares about a single
897 * component. This shows up in splits coming out of
898 * a tex instruction w/ wrmask=.z, for example.
899 */
900 if (ctx->scalar_pass && (instr->opc == OPC_META_SPLIT) &&
901 !(i == instr->split.off))
902 continue;
903
904 use(scalar_name(ctx, src, i), instr);
905 }
906 } else if ((src = ssa(reg)) && writes_gpr(src)) {
907 unsigned name = ra_name(ctx, &ctx->instrd[src->ip]);
908 use(name, instr);
909 }
910 }
911 }
912 }
913
914 static bool
915 ra_compute_livein_liveout(struct ir3_ra_ctx *ctx)
916 {
917 unsigned bitset_words = BITSET_WORDS(ctx->alloc_count);
918 bool progress = false;
919
920 foreach_block (block, &ctx->ir->block_list) {
921 struct ir3_ra_block_data *bd = block->data;
922
923 /* update livein: */
924 for (unsigned i = 0; i < bitset_words; i++) {
925 BITSET_WORD new_livein =
926 (bd->use[i] | (bd->liveout[i] & ~bd->def[i]));
927
928 if (new_livein & ~bd->livein[i]) {
929 bd->livein[i] |= new_livein;
930 progress = true;
931 }
932 }
933
934 /* update liveout: */
935 for (unsigned j = 0; j < ARRAY_SIZE(block->successors); j++) {
936 struct ir3_block *succ = block->successors[j];
937 struct ir3_ra_block_data *succ_bd;
938
939 if (!succ)
940 continue;
941
942 succ_bd = succ->data;
943
944 for (unsigned i = 0; i < bitset_words; i++) {
945 BITSET_WORD new_liveout =
946 (succ_bd->livein[i] & ~bd->liveout[i]);
947
948 if (new_liveout) {
949 bd->liveout[i] |= new_liveout;
950 progress = true;
951 }
952 }
953 }
954 }
955
956 return progress;
957 }
958
959 static void
960 print_bitset(const char *name, BITSET_WORD *bs, unsigned cnt)
961 {
962 bool first = true;
963 debug_printf(" %s:", name);
964 for (unsigned i = 0; i < cnt; i++) {
965 if (BITSET_TEST(bs, i)) {
966 if (!first)
967 debug_printf(",");
968 debug_printf(" %04u", i);
969 first = false;
970 }
971 }
972 debug_printf("\n");
973 }
974
975 static void
976 ra_add_interference(struct ir3_ra_ctx *ctx)
977 {
978 struct ir3 *ir = ctx->ir;
979
980 /* initialize array live ranges: */
981 foreach_array (arr, &ir->array_list) {
982 arr->start_ip = ~0;
983 arr->end_ip = 0;
984 }
985
986 /* compute live ranges (use/def) on a block level, also updating
987 * block's def/use bitmasks (used below to calculate per-block
988 * livein/liveout):
989 */
990 foreach_block (block, &ir->block_list) {
991 ra_block_compute_live_ranges(ctx, block);
992 }
993
994 /* update per-block livein/liveout: */
995 while (ra_compute_livein_liveout(ctx)) {}
996
997 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
998 debug_printf("AFTER LIVEIN/OUT:\n");
999 foreach_block (block, &ir->block_list) {
1000 struct ir3_ra_block_data *bd = block->data;
1001 debug_printf("block%u:\n", block_id(block));
1002 print_bitset(" def", bd->def, ctx->alloc_count);
1003 print_bitset(" use", bd->use, ctx->alloc_count);
1004 print_bitset(" l/i", bd->livein, ctx->alloc_count);
1005 print_bitset(" l/o", bd->liveout, ctx->alloc_count);
1006 }
1007 foreach_array (arr, &ir->array_list) {
1008 debug_printf("array%u:\n", arr->id);
1009 debug_printf(" length: %u\n", arr->length);
1010 debug_printf(" start_ip: %u\n", arr->start_ip);
1011 debug_printf(" end_ip: %u\n", arr->end_ip);
1012 }
1013 debug_printf("INSTRUCTION VREG NAMES:\n");
1014 foreach_block (block, &ctx->ir->block_list) {
1015 foreach_instr (instr, &block->instr_list) {
1016 if (!ctx->instrd[instr->ip].defn)
1017 continue;
1018 debug_printf("%04u: ", scalar_name(ctx, instr, 0));
1019 ir3_print_instr(instr);
1020 }
1021 }
1022 debug_printf("ARRAY VREG NAMES:\n");
1023 foreach_array (arr, &ctx->ir->array_list) {
1024 debug_printf("%04u: arr%u\n", arr->base, arr->id);
1025 }
1026 }
1027
1028 /* extend start/end ranges based on livein/liveout info from cfg: */
1029 foreach_block (block, &ir->block_list) {
1030 struct ir3_ra_block_data *bd = block->data;
1031
1032 for (unsigned i = 0; i < ctx->alloc_count; i++) {
1033 if (BITSET_TEST(bd->livein, i)) {
1034 ctx->def[i] = MIN2(ctx->def[i], block->start_ip);
1035 ctx->use[i] = MAX2(ctx->use[i], block->start_ip);
1036 }
1037
1038 if (BITSET_TEST(bd->liveout, i)) {
1039 ctx->def[i] = MIN2(ctx->def[i], block->end_ip);
1040 ctx->use[i] = MAX2(ctx->use[i], block->end_ip);
1041 }
1042 }
1043
1044 foreach_array (arr, &ctx->ir->array_list) {
1045 for (unsigned i = 0; i < arr->length; i++) {
1046 if (BITSET_TEST(bd->livein, i + arr->base)) {
1047 arr->start_ip = MIN2(arr->start_ip, block->start_ip);
1048 }
1049 if (BITSET_TEST(bd->livein, i + arr->base)) {
1050 arr->end_ip = MAX2(arr->end_ip, block->end_ip);
1051 }
1052 }
1053 }
1054 }
1055
1056 /* need to fix things up to keep outputs live: */
1057 struct ir3_instruction *out;
1058 foreach_output(out, ir) {
1059 unsigned name = ra_name(ctx, &ctx->instrd[out->ip]);
1060 ctx->use[name] = ctx->instr_cnt;
1061 }
1062
1063 for (unsigned i = 0; i < ctx->alloc_count; i++) {
1064 for (unsigned j = 0; j < ctx->alloc_count; j++) {
1065 if (intersects(ctx->def[i], ctx->use[i],
1066 ctx->def[j], ctx->use[j])) {
1067 ra_add_node_interference(ctx->g, i, j);
1068 }
1069 }
1070 }
1071 }
1072
1073 /* some instructions need fix-up if dst register is half precision: */
1074 static void fixup_half_instr_dst(struct ir3_instruction *instr)
1075 {
1076 switch (opc_cat(instr->opc)) {
1077 case 1: /* move instructions */
1078 instr->cat1.dst_type = half_type(instr->cat1.dst_type);
1079 break;
1080 case 3:
1081 switch (instr->opc) {
1082 case OPC_MAD_F32:
1083 /* Available for that dest is half and srcs are full.
1084 * eg. mad.f32 hr0, r0.x, r0.y, r0.z
1085 */
1086 if (instr->regs[1]->flags & IR3_REG_HALF)
1087 instr->opc = OPC_MAD_F16;
1088 break;
1089 case OPC_SEL_B32:
1090 instr->opc = OPC_SEL_B16;
1091 break;
1092 case OPC_SEL_S32:
1093 instr->opc = OPC_SEL_S16;
1094 break;
1095 case OPC_SEL_F32:
1096 instr->opc = OPC_SEL_F16;
1097 break;
1098 case OPC_SAD_S32:
1099 instr->opc = OPC_SAD_S16;
1100 break;
1101 /* instructions may already be fixed up: */
1102 case OPC_MAD_F16:
1103 case OPC_SEL_B16:
1104 case OPC_SEL_S16:
1105 case OPC_SEL_F16:
1106 case OPC_SAD_S16:
1107 break;
1108 default:
1109 assert(0);
1110 break;
1111 }
1112 break;
1113 case 4:
1114 switch (instr->opc) {
1115 case OPC_RSQ:
1116 instr->opc = OPC_HRSQ;
1117 break;
1118 case OPC_LOG2:
1119 instr->opc = OPC_HLOG2;
1120 break;
1121 case OPC_EXP2:
1122 instr->opc = OPC_HEXP2;
1123 break;
1124 default:
1125 break;
1126 }
1127 break;
1128 case 5:
1129 instr->cat5.type = half_type(instr->cat5.type);
1130 break;
1131 }
1132 }
1133 /* some instructions need fix-up if src register is half precision: */
1134 static void fixup_half_instr_src(struct ir3_instruction *instr)
1135 {
1136 switch (instr->opc) {
1137 case OPC_MOV:
1138 instr->cat1.src_type = half_type(instr->cat1.src_type);
1139 break;
1140 default:
1141 break;
1142 }
1143 }
1144
1145 /* NOTE: instr could be NULL for IR3_REG_ARRAY case, for the first
1146 * array access(es) which do not have any previous access to depend
1147 * on from scheduling point of view
1148 */
1149 static void
1150 reg_assign(struct ir3_ra_ctx *ctx, struct ir3_register *reg,
1151 struct ir3_instruction *instr)
1152 {
1153 struct ir3_ra_instr_data *id;
1154
1155 if (reg->flags & IR3_REG_ARRAY) {
1156 struct ir3_array *arr =
1157 ir3_lookup_array(ctx->ir, reg->array.id);
1158 unsigned name = arr->base + reg->array.offset;
1159 unsigned r = ra_get_node_reg(ctx->g, name);
1160 unsigned num = ctx->set->ra_reg_to_gpr[r];
1161
1162 if (reg->flags & IR3_REG_RELATIV) {
1163 reg->array.offset = num;
1164 } else {
1165 reg->num = num;
1166 reg->flags &= ~IR3_REG_SSA;
1167 }
1168
1169 reg->flags &= ~IR3_REG_ARRAY;
1170 } else if ((id = &ctx->instrd[instr->ip]) && id->defn) {
1171 unsigned first_component = 0;
1172
1173 /* Special case for tex instructions, which may use the wrmask
1174 * to mask off the first component(s). In the scalar pass,
1175 * this means the masked off component(s) are not def'd/use'd,
1176 * so we get a bogus value when we ask the register_allocate
1177 * algo to get the assigned reg for the unused/untouched
1178 * component. So we need to consider the first used component:
1179 */
1180 if (ctx->scalar_pass && is_tex_or_prefetch(id->defn)) {
1181 unsigned n = ffs(id->defn->regs[0]->wrmask);
1182 debug_assert(n > 0);
1183 first_component = n - 1;
1184 }
1185
1186 unsigned name = scalar_name(ctx, id->defn, first_component);
1187 unsigned r = ra_get_node_reg(ctx->g, name);
1188 unsigned num = ctx->set->ra_reg_to_gpr[r] + id->off;
1189
1190 debug_assert(!(reg->flags & IR3_REG_RELATIV));
1191
1192 debug_assert(num >= first_component);
1193
1194 if (is_high(id->defn))
1195 num += FIRST_HIGH_REG;
1196
1197 reg->num = num - first_component;
1198
1199 reg->flags &= ~IR3_REG_SSA;
1200
1201 if (is_half(id->defn))
1202 reg->flags |= IR3_REG_HALF;
1203 }
1204 }
1205
1206 /* helper to determine which regs to assign in which pass: */
1207 static bool
1208 should_assign(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr)
1209 {
1210 if ((instr->opc == OPC_META_SPLIT) ||
1211 (instr->opc == OPC_META_COLLECT))
1212 return !ctx->scalar_pass;
1213 return ctx->scalar_pass;
1214 }
1215
1216 static void
1217 ra_block_alloc(struct ir3_ra_ctx *ctx, struct ir3_block *block)
1218 {
1219 foreach_instr (instr, &block->instr_list) {
1220 struct ir3_register *reg;
1221
1222 if (writes_gpr(instr)) {
1223 if (should_assign(ctx, instr)) {
1224 reg_assign(ctx, instr->regs[0], instr);
1225 if (instr->regs[0]->flags & IR3_REG_HALF)
1226 fixup_half_instr_dst(instr);
1227 }
1228 }
1229
1230 foreach_src_n(reg, n, instr) {
1231 struct ir3_instruction *src = reg->instr;
1232
1233 if (src && !should_assign(ctx, src) && !should_assign(ctx, instr))
1234 continue;
1235
1236 if (src && should_assign(ctx, instr))
1237 reg_assign(ctx, src->regs[0], src);
1238
1239 /* Note: reg->instr could be null for IR3_REG_ARRAY */
1240 if (src || (reg->flags & IR3_REG_ARRAY))
1241 reg_assign(ctx, instr->regs[n+1], src);
1242
1243 if (instr->regs[n+1]->flags & IR3_REG_HALF)
1244 fixup_half_instr_src(instr);
1245 }
1246 }
1247
1248 /* We need to pre-color outputs for the scalar pass in
1249 * ra_precolor_assigned(), so we need to actually assign
1250 * them in the first pass:
1251 */
1252 if (!ctx->scalar_pass) {
1253 struct ir3_instruction *in, *out;
1254
1255 foreach_input (in, ctx->ir) {
1256 reg_assign(ctx, in->regs[0], in);
1257 }
1258 foreach_output (out, ctx->ir) {
1259 reg_assign(ctx, out->regs[0], out);
1260 }
1261 }
1262 }
1263
1264 /* handle pre-colored registers. This includes "arrays" (which could be of
1265 * length 1, used for phi webs lowered to registers in nir), as well as
1266 * special shader input values that need to be pinned to certain registers.
1267 */
1268 static void
1269 ra_precolor(struct ir3_ra_ctx *ctx, struct ir3_instruction **precolor, unsigned nprecolor)
1270 {
1271 unsigned num_precolor = 0;
1272 for (unsigned i = 0; i < nprecolor; i++) {
1273 if (precolor[i] && !(precolor[i]->flags & IR3_INSTR_UNUSED)) {
1274 struct ir3_instruction *instr = precolor[i];
1275
1276 if (instr->regs[0]->num == INVALID_REG)
1277 continue;
1278
1279 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
1280
1281 debug_assert(!(instr->regs[0]->flags & (IR3_REG_HALF | IR3_REG_HIGH)));
1282
1283 /* only consider the first component: */
1284 if (id->off > 0)
1285 continue;
1286
1287 if (ctx->scalar_pass && !should_assign(ctx, instr))
1288 continue;
1289
1290 /* 'base' is in scalar (class 0) but we need to map that
1291 * the conflicting register of the appropriate class (ie.
1292 * input could be vec2/vec3/etc)
1293 *
1294 * Note that the higher class (larger than scalar) regs
1295 * are setup to conflict with others in the same class,
1296 * so for example, R1 (scalar) is also the first component
1297 * of D1 (vec2/double):
1298 *
1299 * Single (base) | Double
1300 * --------------+---------------
1301 * R0 | D0
1302 * R1 | D0 D1
1303 * R2 | D1 D2
1304 * R3 | D2
1305 * .. and so on..
1306 */
1307 unsigned regid = instr->regs[0]->num;
1308 unsigned reg = ctx->set->gpr_to_ra_reg[id->cls][regid];
1309 unsigned name = ra_name(ctx, id);
1310 ra_set_node_reg(ctx->g, name, reg);
1311 num_precolor = MAX2(regid, num_precolor);
1312 }
1313 }
1314
1315 /* pre-assign array elements:
1316 *
1317 * TODO this is going to need some work for half-precision.. possibly
1318 * this is easier on a6xx, where we can just divide array size by two?
1319 * But on a5xx and earlier it will need to track two bases.
1320 */
1321 foreach_array (arr, &ctx->ir->array_list) {
1322 unsigned base = 0;
1323
1324 if (arr->end_ip == 0)
1325 continue;
1326
1327 /* figure out what else we conflict with which has already
1328 * been assigned:
1329 */
1330 retry:
1331 foreach_array (arr2, &ctx->ir->array_list) {
1332 if (arr2 == arr)
1333 break;
1334 if (arr2->end_ip == 0)
1335 continue;
1336 /* if it intersects with liverange AND register range.. */
1337 if (intersects(arr->start_ip, arr->end_ip,
1338 arr2->start_ip, arr2->end_ip) &&
1339 intersects(base, base + reg_size_for_array(arr),
1340 arr2->reg, arr2->reg + reg_size_for_array(arr2))) {
1341 base = MAX2(base, arr2->reg + reg_size_for_array(arr2));
1342 goto retry;
1343 }
1344 }
1345
1346 /* also need to not conflict with any pre-assigned inputs: */
1347 for (unsigned i = 0; i < nprecolor; i++) {
1348 struct ir3_instruction *instr = precolor[i];
1349
1350 if (!instr || (instr->flags & IR3_INSTR_UNUSED))
1351 continue;
1352
1353 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
1354
1355 /* only consider the first component: */
1356 if (id->off > 0)
1357 continue;
1358
1359 unsigned name = ra_name(ctx, id);
1360 unsigned regid = instr->regs[0]->num;
1361
1362 /* Check if array intersects with liverange AND register
1363 * range of the input:
1364 */
1365 if (intersects(arr->start_ip, arr->end_ip,
1366 ctx->def[name], ctx->use[name]) &&
1367 intersects(base, base + reg_size_for_array(arr),
1368 regid, regid + class_sizes[id->cls])) {
1369 base = MAX2(base, regid + class_sizes[id->cls]);
1370 goto retry;
1371 }
1372 }
1373
1374 arr->reg = base;
1375
1376 for (unsigned i = 0; i < arr->length; i++) {
1377 unsigned name, reg;
1378
1379 if (arr->half) {
1380 /* Doesn't need to do this on older generations than a6xx,
1381 * since there's no conflict between full regs and half regs
1382 * on them.
1383 *
1384 * TODO Presumably "base" could start from 0 respectively
1385 * for half regs of arrays on older generations.
1386 */
1387 unsigned base_half = base * 2 + i;
1388 reg = ctx->set->gpr_to_ra_reg[0+HALF_OFFSET][base_half];
1389 base = base_half / 2 + 1;
1390 } else {
1391 reg = ctx->set->gpr_to_ra_reg[0][base++];
1392 }
1393
1394 name = arr->base + i;
1395 ra_set_node_reg(ctx->g, name, reg);
1396 }
1397 }
1398
1399 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
1400 foreach_array (arr, &ctx->ir->array_list) {
1401 unsigned first = arr->reg;
1402 unsigned last = arr->reg + arr->length - 1;
1403 debug_printf("arr[%d] at r%d.%c->r%d.%c\n", arr->id,
1404 (first >> 2), "xyzw"[first & 0x3],
1405 (last >> 2), "xyzw"[last & 0x3]);
1406 }
1407 }
1408 }
1409
1410 static void
1411 precolor(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr)
1412 {
1413 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
1414 unsigned n = dest_regs(instr);
1415 for (unsigned i = 0; i < n; i++) {
1416 /* tex instructions actually have a wrmask, and
1417 * don't touch masked out components. So we
1418 * shouldn't precolor them::
1419 */
1420 if (is_tex_or_prefetch(instr) &&
1421 !(instr->regs[0]->wrmask & (1 << i)))
1422 continue;
1423
1424 unsigned name = scalar_name(ctx, instr, i);
1425 unsigned regid = instr->regs[0]->num + i;
1426
1427 if (instr->regs[0]->flags & IR3_REG_HIGH)
1428 regid -= FIRST_HIGH_REG;
1429
1430 unsigned vreg = ctx->set->gpr_to_ra_reg[id->cls][regid];
1431 ra_set_node_reg(ctx->g, name, vreg);
1432 }
1433 }
1434
1435 /* pre-color non-scalar registers based on the registers assigned in previous
1436 * pass. Do this by looking actually at the fanout instructions.
1437 */
1438 static void
1439 ra_precolor_assigned(struct ir3_ra_ctx *ctx)
1440 {
1441 debug_assert(ctx->scalar_pass);
1442
1443 foreach_block (block, &ctx->ir->block_list) {
1444 foreach_instr (instr, &block->instr_list) {
1445
1446 if ((instr->opc != OPC_META_SPLIT) &&
1447 (instr->opc != OPC_META_COLLECT))
1448 continue;
1449
1450 precolor(ctx, instr);
1451
1452 struct ir3_register *src;
1453 foreach_src (src, instr) {
1454 if (!src->instr)
1455 continue;
1456 precolor(ctx, src->instr);
1457 }
1458 }
1459 }
1460 }
1461
1462 static int
1463 ra_alloc(struct ir3_ra_ctx *ctx)
1464 {
1465 if (!ra_allocate(ctx->g))
1466 return -1;
1467
1468 foreach_block (block, &ctx->ir->block_list) {
1469 ra_block_alloc(ctx, block);
1470 }
1471
1472 return 0;
1473 }
1474
1475 /* if we end up with split/collect instructions with non-matching src
1476 * and dest regs, that means something has gone wrong. Which makes it
1477 * a pretty good sanity check.
1478 */
1479 static void
1480 ra_sanity_check(struct ir3 *ir)
1481 {
1482 foreach_block (block, &ir->block_list) {
1483 foreach_instr (instr, &block->instr_list) {
1484 if (instr->opc == OPC_META_SPLIT) {
1485 struct ir3_register *dst = instr->regs[0];
1486 struct ir3_register *src = instr->regs[1];
1487 debug_assert(dst->num == (src->num + instr->split.off));
1488 } else if (instr->opc == OPC_META_COLLECT) {
1489 struct ir3_register *dst = instr->regs[0];
1490 struct ir3_register *src;
1491
1492 foreach_src_n (src, n, instr) {
1493 debug_assert(dst->num == (src->num - n));
1494 }
1495 }
1496 }
1497 }
1498 }
1499
1500 static int
1501 ir3_ra_pass(struct ir3_shader_variant *v, struct ir3_instruction **precolor,
1502 unsigned nprecolor, bool scalar_pass)
1503 {
1504 struct ir3_ra_ctx ctx = {
1505 .v = v,
1506 .ir = v->ir,
1507 .set = v->ir->compiler->set,
1508 .scalar_pass = scalar_pass,
1509 };
1510 int ret;
1511
1512 ra_init(&ctx);
1513 ra_add_interference(&ctx);
1514 ra_precolor(&ctx, precolor, nprecolor);
1515 if (scalar_pass)
1516 ra_precolor_assigned(&ctx);
1517 ret = ra_alloc(&ctx);
1518 ra_destroy(&ctx);
1519
1520 return ret;
1521 }
1522
1523 int
1524 ir3_ra(struct ir3_shader_variant *v, struct ir3_instruction **precolor,
1525 unsigned nprecolor)
1526 {
1527 int ret;
1528
1529 /* First pass, assign the vecN (non-scalar) registers: */
1530 ret = ir3_ra_pass(v, precolor, nprecolor, false);
1531 if (ret)
1532 return ret;
1533
1534 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
1535 printf("AFTER RA (1st pass):\n");
1536 ir3_print(v->ir);
1537 }
1538
1539 /* Second pass, assign the scalar registers: */
1540 ret = ir3_ra_pass(v, precolor, nprecolor, true);
1541 if (ret)
1542 return ret;
1543
1544 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
1545 printf("AFTER RA (2nd pass):\n");
1546 ir3_print(v->ir);
1547 }
1548
1549 #ifdef DEBUG
1550 # define SANITY_CHECK DEBUG
1551 #else
1552 # define SANITY_CHECK 0
1553 #endif
1554 if (SANITY_CHECK)
1555 ra_sanity_check(v->ir);
1556
1557 return ret;
1558 }