freedreno/ir3: post-RA sched pass
[mesa.git] / src / freedreno / ir3 / ir3_ra.c
1 /*
2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/u_math.h"
28 #include "util/register_allocate.h"
29 #include "util/ralloc.h"
30 #include "util/bitset.h"
31
32 #include "ir3.h"
33 #include "ir3_compiler.h"
34
35 /*
36 * Register Assignment:
37 *
38 * Uses the register_allocate util, which implements graph coloring
39 * algo with interference classes. To handle the cases where we need
40 * consecutive registers (for example, texture sample instructions),
41 * we model these as larger (double/quad/etc) registers which conflict
42 * with the corresponding registers in other classes.
43 *
44 * Additionally we create additional classes for half-regs, which
45 * do not conflict with the full-reg classes. We do need at least
46 * sizes 1-4 (to deal w/ texture sample instructions output to half-
47 * reg). At the moment we don't create the higher order half-reg
48 * classes as half-reg frequently does not have enough precision
49 * for texture coords at higher resolutions.
50 *
51 * There are some additional cases that we need to handle specially,
52 * as the graph coloring algo doesn't understand "partial writes".
53 * For example, a sequence like:
54 *
55 * add r0.z, ...
56 * sam (f32)(xy)r0.x, ...
57 * ...
58 * sam (f32)(xyzw)r0.w, r0.x, ... ; 3d texture, so r0.xyz are coord
59 *
60 * In this scenario, we treat r0.xyz as class size 3, which is written
61 * (from a use/def perspective) at the 'add' instruction and ignore the
62 * subsequent partial writes to r0.xy. So the 'add r0.z, ...' is the
63 * defining instruction, as it is the first to partially write r0.xyz.
64 *
65 * Note i965 has a similar scenario, which they solve with a virtual
66 * LOAD_PAYLOAD instruction which gets turned into multiple MOV's after
67 * register assignment. But for us that is horrible from a scheduling
68 * standpoint. Instead what we do is use idea of 'definer' instruction.
69 * Ie. the first instruction (lowest ip) to write to the variable is the
70 * one we consider from use/def perspective when building interference
71 * graph. (Other instructions which write other variable components
72 * just define the variable some more.)
73 *
74 * Arrays of arbitrary size are handled via pre-coloring a consecutive
75 * sequence of registers. Additional scalar (single component) reg
76 * names are allocated starting at ctx->class_base[total_class_count]
77 * (see arr->base), which are pre-colored. In the use/def graph direct
78 * access is treated as a single element use/def, and indirect access
79 * is treated as use or def of all array elements. (Only the first
80 * def is tracked, in case of multiple indirect writes, etc.)
81 *
82 * TODO arrays that fit in one of the pre-defined class sizes should
83 * not need to be pre-colored, but instead could be given a normal
84 * vreg name. (Ignoring this for now since it is a good way to work
85 * out the kinks with arbitrary sized arrays.)
86 *
87 * TODO might be easier for debugging to split this into two passes,
88 * the first assigning vreg names in a way that we could ir3_print()
89 * the result.
90 */
91
92 static const unsigned class_sizes[] = {
93 1, 2, 3, 4,
94 4 + 4, /* txd + 1d/2d */
95 4 + 6, /* txd + 3d */
96 };
97 #define class_count ARRAY_SIZE(class_sizes)
98
99 static const unsigned half_class_sizes[] = {
100 1, 2, 3, 4,
101 };
102 #define half_class_count ARRAY_SIZE(half_class_sizes)
103
104 /* seems to just be used for compute shaders? Seems like vec1 and vec3
105 * are sufficient (for now?)
106 */
107 static const unsigned high_class_sizes[] = {
108 1, 3,
109 };
110 #define high_class_count ARRAY_SIZE(high_class_sizes)
111
112 #define total_class_count (class_count + half_class_count + high_class_count)
113
114 /* Below a0.x are normal regs. RA doesn't need to assign a0.x/p0.x. */
115 #define NUM_REGS (4 * 48) /* r0 to r47 */
116 #define NUM_HIGH_REGS (4 * 8) /* r48 to r55 */
117 #define FIRST_HIGH_REG (4 * 48)
118 /* Number of virtual regs in a given class: */
119 #define CLASS_REGS(i) (NUM_REGS - (class_sizes[i] - 1))
120 #define HALF_CLASS_REGS(i) (NUM_REGS - (half_class_sizes[i] - 1))
121 #define HIGH_CLASS_REGS(i) (NUM_HIGH_REGS - (high_class_sizes[i] - 1))
122
123 #define HALF_OFFSET (class_count)
124 #define HIGH_OFFSET (class_count + half_class_count)
125
126 /* register-set, created one time, used for all shaders: */
127 struct ir3_ra_reg_set {
128 struct ra_regs *regs;
129 unsigned int classes[class_count];
130 unsigned int half_classes[half_class_count];
131 unsigned int high_classes[high_class_count];
132 /* maps flat virtual register space to base gpr: */
133 uint16_t *ra_reg_to_gpr;
134 /* maps cls,gpr to flat virtual register space: */
135 uint16_t **gpr_to_ra_reg;
136 };
137
138 static void
139 build_q_values(unsigned int **q_values, unsigned off,
140 const unsigned *sizes, unsigned count)
141 {
142 for (unsigned i = 0; i < count; i++) {
143 q_values[i + off] = rzalloc_array(q_values, unsigned, total_class_count);
144
145 /* From register_allocate.c:
146 *
147 * q(B,C) (indexed by C, B is this register class) in
148 * Runeson/Nyström paper. This is "how many registers of B could
149 * the worst choice register from C conflict with".
150 *
151 * If we just let the register allocation algorithm compute these
152 * values, is extremely expensive. However, since all of our
153 * registers are laid out, we can very easily compute them
154 * ourselves. View the register from C as fixed starting at GRF n
155 * somewhere in the middle, and the register from B as sliding back
156 * and forth. Then the first register to conflict from B is the
157 * one starting at n - class_size[B] + 1 and the last register to
158 * conflict will start at n + class_size[B] - 1. Therefore, the
159 * number of conflicts from B is class_size[B] + class_size[C] - 1.
160 *
161 * +-+-+-+-+-+-+ +-+-+-+-+-+-+
162 * B | | | | | |n| --> | | | | | | |
163 * +-+-+-+-+-+-+ +-+-+-+-+-+-+
164 * +-+-+-+-+-+
165 * C |n| | | | |
166 * +-+-+-+-+-+
167 *
168 * (Idea copied from brw_fs_reg_allocate.cpp)
169 */
170 for (unsigned j = 0; j < count; j++)
171 q_values[i + off][j + off] = sizes[i] + sizes[j] - 1;
172 }
173 }
174
175 /* One-time setup of RA register-set, which describes all the possible
176 * "virtual" registers and their interferences. Ie. double register
177 * occupies (and conflicts with) two single registers, and so forth.
178 * Since registers do not need to be aligned to their class size, they
179 * can conflict with other registers in the same class too. Ie:
180 *
181 * Single (base) | Double
182 * --------------+---------------
183 * R0 | D0
184 * R1 | D0 D1
185 * R2 | D1 D2
186 * R3 | D2
187 * .. and so on..
188 *
189 * (NOTE the disassembler uses notation like r0.x/y/z/w but those are
190 * really just four scalar registers. Don't let that confuse you.)
191 */
192 struct ir3_ra_reg_set *
193 ir3_ra_alloc_reg_set(struct ir3_compiler *compiler)
194 {
195 struct ir3_ra_reg_set *set = rzalloc(compiler, struct ir3_ra_reg_set);
196 unsigned ra_reg_count, reg, first_half_reg, first_high_reg, base;
197 unsigned int **q_values;
198
199 /* calculate # of regs across all classes: */
200 ra_reg_count = 0;
201 for (unsigned i = 0; i < class_count; i++)
202 ra_reg_count += CLASS_REGS(i);
203 for (unsigned i = 0; i < half_class_count; i++)
204 ra_reg_count += HALF_CLASS_REGS(i);
205 for (unsigned i = 0; i < high_class_count; i++)
206 ra_reg_count += HIGH_CLASS_REGS(i);
207
208 /* allocate and populate q_values: */
209 q_values = ralloc_array(set, unsigned *, total_class_count);
210
211 build_q_values(q_values, 0, class_sizes, class_count);
212 build_q_values(q_values, HALF_OFFSET, half_class_sizes, half_class_count);
213 build_q_values(q_values, HIGH_OFFSET, high_class_sizes, high_class_count);
214
215 /* allocate the reg-set.. */
216 set->regs = ra_alloc_reg_set(set, ra_reg_count, true);
217 set->ra_reg_to_gpr = ralloc_array(set, uint16_t, ra_reg_count);
218 set->gpr_to_ra_reg = ralloc_array(set, uint16_t *, total_class_count);
219
220 /* .. and classes */
221 reg = 0;
222 for (unsigned i = 0; i < class_count; i++) {
223 set->classes[i] = ra_alloc_reg_class(set->regs);
224
225 set->gpr_to_ra_reg[i] = ralloc_array(set, uint16_t, CLASS_REGS(i));
226
227 for (unsigned j = 0; j < CLASS_REGS(i); j++) {
228 ra_class_add_reg(set->regs, set->classes[i], reg);
229
230 set->ra_reg_to_gpr[reg] = j;
231 set->gpr_to_ra_reg[i][j] = reg;
232
233 for (unsigned br = j; br < j + class_sizes[i]; br++)
234 ra_add_transitive_reg_conflict(set->regs, br, reg);
235
236 reg++;
237 }
238 }
239
240 first_half_reg = reg;
241 base = HALF_OFFSET;
242
243 for (unsigned i = 0; i < half_class_count; i++) {
244 set->half_classes[i] = ra_alloc_reg_class(set->regs);
245
246 set->gpr_to_ra_reg[base + i] =
247 ralloc_array(set, uint16_t, HALF_CLASS_REGS(i));
248
249 for (unsigned j = 0; j < HALF_CLASS_REGS(i); j++) {
250 ra_class_add_reg(set->regs, set->half_classes[i], reg);
251
252 set->ra_reg_to_gpr[reg] = j;
253 set->gpr_to_ra_reg[base + i][j] = reg;
254
255 for (unsigned br = j; br < j + half_class_sizes[i]; br++)
256 ra_add_transitive_reg_conflict(set->regs, br + first_half_reg, reg);
257
258 reg++;
259 }
260 }
261
262 first_high_reg = reg;
263 base = HIGH_OFFSET;
264
265 for (unsigned i = 0; i < high_class_count; i++) {
266 set->high_classes[i] = ra_alloc_reg_class(set->regs);
267
268 set->gpr_to_ra_reg[base + i] =
269 ralloc_array(set, uint16_t, HIGH_CLASS_REGS(i));
270
271 for (unsigned j = 0; j < HIGH_CLASS_REGS(i); j++) {
272 ra_class_add_reg(set->regs, set->high_classes[i], reg);
273
274 set->ra_reg_to_gpr[reg] = j;
275 set->gpr_to_ra_reg[base + i][j] = reg;
276
277 for (unsigned br = j; br < j + high_class_sizes[i]; br++)
278 ra_add_transitive_reg_conflict(set->regs, br + first_high_reg, reg);
279
280 reg++;
281 }
282 }
283
284 /* starting a6xx, half precision regs conflict w/ full precision regs: */
285 if (compiler->gpu_id >= 600) {
286 /* because of transitivity, we can get away with just setting up
287 * conflicts between the first class of full and half regs:
288 */
289 for (unsigned i = 0; i < half_class_count; i++) {
290 /* NOTE there are fewer half class sizes, but they match the
291 * first N full class sizes.. but assert in case that ever
292 * accidentially changes:
293 */
294 debug_assert(class_sizes[i] == half_class_sizes[i]);
295 for (unsigned j = 0; j < CLASS_REGS(i) / 2; j++) {
296 unsigned freg = set->gpr_to_ra_reg[i][j];
297 unsigned hreg0 = set->gpr_to_ra_reg[i + HALF_OFFSET][(j * 2) + 0];
298 unsigned hreg1 = set->gpr_to_ra_reg[i + HALF_OFFSET][(j * 2) + 1];
299
300 ra_add_transitive_reg_pair_conflict(set->regs, freg, hreg0, hreg1);
301 }
302 }
303
304 // TODO also need to update q_values, but for now:
305 ra_set_finalize(set->regs, NULL);
306 } else {
307 ra_set_finalize(set->regs, q_values);
308 }
309
310 ralloc_free(q_values);
311
312 return set;
313 }
314
315 /* additional block-data (per-block) */
316 struct ir3_ra_block_data {
317 BITSET_WORD *def; /* variables defined before used in block */
318 BITSET_WORD *use; /* variables used before defined in block */
319 BITSET_WORD *livein; /* which defs reach entry point of block */
320 BITSET_WORD *liveout; /* which defs reach exit point of block */
321 };
322
323 /* additional instruction-data (per-instruction) */
324 struct ir3_ra_instr_data {
325 /* cached instruction 'definer' info: */
326 struct ir3_instruction *defn;
327 int off, sz, cls;
328 };
329
330 /* register-assign context, per-shader */
331 struct ir3_ra_ctx {
332 struct ir3_shader_variant *v;
333 struct ir3 *ir;
334
335 struct ir3_ra_reg_set *set;
336 struct ra_graph *g;
337 unsigned alloc_count;
338 /* one per class, plus one slot for arrays: */
339 unsigned class_alloc_count[total_class_count + 1];
340 unsigned class_base[total_class_count + 1];
341 unsigned instr_cnt;
342 unsigned *def, *use; /* def/use table */
343 struct ir3_ra_instr_data *instrd;
344 };
345
346 /* does it conflict? */
347 static inline bool
348 intersects(unsigned a_start, unsigned a_end, unsigned b_start, unsigned b_end)
349 {
350 return !((a_start >= b_end) || (b_start >= a_end));
351 }
352
353 static bool
354 is_half(struct ir3_instruction *instr)
355 {
356 return !!(instr->regs[0]->flags & IR3_REG_HALF);
357 }
358
359 static bool
360 is_high(struct ir3_instruction *instr)
361 {
362 return !!(instr->regs[0]->flags & IR3_REG_HIGH);
363 }
364
365 static int
366 size_to_class(unsigned sz, bool half, bool high)
367 {
368 if (high) {
369 for (unsigned i = 0; i < high_class_count; i++)
370 if (high_class_sizes[i] >= sz)
371 return i + HIGH_OFFSET;
372 } else if (half) {
373 for (unsigned i = 0; i < half_class_count; i++)
374 if (half_class_sizes[i] >= sz)
375 return i + HALF_OFFSET;
376 } else {
377 for (unsigned i = 0; i < class_count; i++)
378 if (class_sizes[i] >= sz)
379 return i;
380 }
381 debug_assert(0);
382 return -1;
383 }
384
385 static bool
386 writes_gpr(struct ir3_instruction *instr)
387 {
388 if (dest_regs(instr) == 0)
389 return false;
390 /* is dest a normal temp register: */
391 struct ir3_register *reg = instr->regs[0];
392 debug_assert(!(reg->flags & (IR3_REG_CONST | IR3_REG_IMMED)));
393 if ((reg->num == regid(REG_A0, 0)) ||
394 (reg->num == regid(REG_P0, 0)))
395 return false;
396 return true;
397 }
398
399 static bool
400 instr_before(struct ir3_instruction *a, struct ir3_instruction *b)
401 {
402 if (a->flags & IR3_INSTR_UNUSED)
403 return false;
404 return (a->ip < b->ip);
405 }
406
407 static struct ir3_instruction *
408 get_definer(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr,
409 int *sz, int *off)
410 {
411 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
412 struct ir3_instruction *d = NULL;
413
414 if (id->defn) {
415 *sz = id->sz;
416 *off = id->off;
417 return id->defn;
418 }
419
420 if (instr->opc == OPC_META_COLLECT) {
421 /* What about the case where collect is subset of array, we
422 * need to find the distance between where actual array starts
423 * and collect.. that probably doesn't happen currently.
424 */
425 struct ir3_register *src;
426 int dsz, doff;
427
428 /* note: don't use foreach_ssa_src as this gets called once
429 * while assigning regs (which clears SSA flag)
430 */
431 foreach_src_n(src, n, instr) {
432 struct ir3_instruction *dd;
433 if (!src->instr)
434 continue;
435
436 dd = get_definer(ctx, src->instr, &dsz, &doff);
437
438 if ((!d) || instr_before(dd, d)) {
439 d = dd;
440 *sz = dsz;
441 *off = doff - n;
442 }
443 }
444
445 } else if (instr->cp.right || instr->cp.left) {
446 /* covers also the meta:fo case, which ends up w/ single
447 * scalar instructions for each component:
448 */
449 struct ir3_instruction *f = ir3_neighbor_first(instr);
450
451 /* by definition, the entire sequence forms one linked list
452 * of single scalar register nodes (even if some of them may
453 * be splits from a texture sample (for example) instr. We
454 * just need to walk the list finding the first element of
455 * the group defined (lowest ip)
456 */
457 int cnt = 0;
458
459 /* need to skip over unused in the group: */
460 while (f && (f->flags & IR3_INSTR_UNUSED)) {
461 f = f->cp.right;
462 cnt++;
463 }
464
465 while (f) {
466 if ((!d) || instr_before(f, d))
467 d = f;
468 if (f == instr)
469 *off = cnt;
470 f = f->cp.right;
471 cnt++;
472 }
473
474 *sz = cnt;
475
476 } else {
477 /* second case is looking directly at the instruction which
478 * produces multiple values (eg, texture sample), rather
479 * than the split nodes that point back to that instruction.
480 * This isn't quite right, because it may be part of a larger
481 * group, such as:
482 *
483 * sam (f32)(xyzw)r0.x, ...
484 * add r1.x, ...
485 * add r1.y, ...
486 * sam (f32)(xyzw)r2.x, r0.w <-- (r0.w, r1.x, r1.y)
487 *
488 * need to come up with a better way to handle that case.
489 */
490 if (instr->address) {
491 *sz = instr->regs[0]->size;
492 } else {
493 *sz = util_last_bit(instr->regs[0]->wrmask);
494 }
495 *off = 0;
496 d = instr;
497 }
498
499 if (d->opc == OPC_META_SPLIT) {
500 struct ir3_instruction *dd;
501 int dsz, doff;
502
503 dd = get_definer(ctx, d->regs[1]->instr, &dsz, &doff);
504
505 /* by definition, should come before: */
506 debug_assert(instr_before(dd, d));
507
508 *sz = MAX2(*sz, dsz);
509
510 if (instr->opc == OPC_META_SPLIT)
511 *off = MAX2(*off, instr->split.off);
512
513 d = dd;
514 }
515
516 debug_assert(d->opc != OPC_META_SPLIT);
517
518 id->defn = d;
519 id->sz = *sz;
520 id->off = *off;
521
522 return d;
523 }
524
525 static void
526 ra_block_find_definers(struct ir3_ra_ctx *ctx, struct ir3_block *block)
527 {
528 foreach_instr (instr, &block->instr_list) {
529 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
530 if (instr->regs_count == 0)
531 continue;
532 /* couple special cases: */
533 if (writes_addr(instr) || writes_pred(instr)) {
534 id->cls = -1;
535 } else if (instr->regs[0]->flags & IR3_REG_ARRAY) {
536 id->cls = total_class_count;
537 } else {
538 /* and the normal case: */
539 id->defn = get_definer(ctx, instr, &id->sz, &id->off);
540 id->cls = size_to_class(id->sz, is_half(id->defn), is_high(id->defn));
541
542 /* this is a bit of duct-tape.. if we have a scenario like:
543 *
544 * sam (f32)(x) out.x, ...
545 * sam (f32)(x) out.y, ...
546 *
547 * Then the fanout/split meta instructions for the two different
548 * tex instructions end up grouped as left/right neighbors. The
549 * upshot is that in when you get_definer() on one of the meta:fo's
550 * you get definer as the first sam with sz=2, but when you call
551 * get_definer() on the either of the sam's you get itself as the
552 * definer with sz=1.
553 *
554 * (We actually avoid this scenario exactly, the neighbor links
555 * prevent one of the output mov's from being eliminated, so this
556 * hack should be enough. But probably we need to rethink how we
557 * find the "defining" instruction.)
558 *
559 * TODO how do we figure out offset properly...
560 */
561 if (id->defn != instr) {
562 struct ir3_ra_instr_data *did = &ctx->instrd[id->defn->ip];
563 if (did->sz < id->sz) {
564 did->sz = id->sz;
565 did->cls = id->cls;
566 }
567 }
568 }
569 }
570 }
571
572 /* give each instruction a name (and ip), and count up the # of names
573 * of each class
574 */
575 static void
576 ra_block_name_instructions(struct ir3_ra_ctx *ctx, struct ir3_block *block)
577 {
578 foreach_instr (instr, &block->instr_list) {
579 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
580
581 #ifdef DEBUG
582 instr->name = ~0;
583 #endif
584
585 ctx->instr_cnt++;
586
587 if (!writes_gpr(instr))
588 continue;
589
590 if (id->defn != instr)
591 continue;
592
593 /* arrays which don't fit in one of the pre-defined class
594 * sizes are pre-colored:
595 */
596 if ((id->cls >= 0) && (id->cls < total_class_count)) {
597 instr->name = ctx->class_alloc_count[id->cls]++;
598 ctx->alloc_count++;
599 }
600 }
601 }
602
603 static void
604 ra_init(struct ir3_ra_ctx *ctx)
605 {
606 unsigned n, base;
607
608 ir3_clear_mark(ctx->ir);
609 n = ir3_count_instructions(ctx->ir);
610
611 ctx->instrd = rzalloc_array(NULL, struct ir3_ra_instr_data, n);
612
613 foreach_block (block, &ctx->ir->block_list) {
614 ra_block_find_definers(ctx, block);
615 }
616
617 foreach_block (block, &ctx->ir->block_list) {
618 ra_block_name_instructions(ctx, block);
619 }
620
621 /* figure out the base register name for each class. The
622 * actual ra name is class_base[cls] + instr->name;
623 */
624 ctx->class_base[0] = 0;
625 for (unsigned i = 1; i <= total_class_count; i++) {
626 ctx->class_base[i] = ctx->class_base[i-1] +
627 ctx->class_alloc_count[i-1];
628 }
629
630 /* and vreg names for array elements: */
631 base = ctx->class_base[total_class_count];
632 foreach_array (arr, &ctx->ir->array_list) {
633 arr->base = base;
634 ctx->class_alloc_count[total_class_count] += arr->length;
635 base += arr->length;
636 }
637 ctx->alloc_count += ctx->class_alloc_count[total_class_count];
638
639 ctx->g = ra_alloc_interference_graph(ctx->set->regs, ctx->alloc_count);
640 ralloc_steal(ctx->g, ctx->instrd);
641 ctx->def = rzalloc_array(ctx->g, unsigned, ctx->alloc_count);
642 ctx->use = rzalloc_array(ctx->g, unsigned, ctx->alloc_count);
643 }
644
645 static unsigned
646 __ra_name(struct ir3_ra_ctx *ctx, int cls, struct ir3_instruction *defn)
647 {
648 unsigned name;
649 debug_assert(cls >= 0);
650 debug_assert(cls < total_class_count); /* we shouldn't get arrays here.. */
651 name = ctx->class_base[cls] + defn->name;
652 debug_assert(name < ctx->alloc_count);
653 return name;
654 }
655
656 static int
657 ra_name(struct ir3_ra_ctx *ctx, struct ir3_ra_instr_data *id)
658 {
659 /* TODO handle name mapping for arrays */
660 return __ra_name(ctx, id->cls, id->defn);
661 }
662
663 static void
664 ra_destroy(struct ir3_ra_ctx *ctx)
665 {
666 ralloc_free(ctx->g);
667 }
668
669 static void
670 __def(struct ir3_ra_ctx *ctx, struct ir3_ra_block_data *bd, unsigned name,
671 struct ir3_instruction *instr)
672 {
673 debug_assert(name < ctx->alloc_count);
674 /* defined on first write: */
675 if (!ctx->def[name])
676 ctx->def[name] = instr->ip;
677 ctx->use[name] = instr->ip;
678 BITSET_SET(bd->def, name);
679 }
680
681 static void
682 __use(struct ir3_ra_ctx *ctx, struct ir3_ra_block_data *bd, unsigned name,
683 struct ir3_instruction *instr)
684 {
685 debug_assert(name < ctx->alloc_count);
686 ctx->use[name] = MAX2(ctx->use[name], instr->ip);
687 if (!BITSET_TEST(bd->def, name))
688 BITSET_SET(bd->use, name);
689 }
690
691 static void
692 ra_block_compute_live_ranges(struct ir3_ra_ctx *ctx, struct ir3_block *block)
693 {
694 struct ir3_ra_block_data *bd;
695 unsigned bitset_words = BITSET_WORDS(ctx->alloc_count);
696
697 #define def(name, instr) __def(ctx, bd, name, instr)
698 #define use(name, instr) __use(ctx, bd, name, instr)
699
700 bd = rzalloc(ctx->g, struct ir3_ra_block_data);
701
702 bd->def = rzalloc_array(bd, BITSET_WORD, bitset_words);
703 bd->use = rzalloc_array(bd, BITSET_WORD, bitset_words);
704 bd->livein = rzalloc_array(bd, BITSET_WORD, bitset_words);
705 bd->liveout = rzalloc_array(bd, BITSET_WORD, bitset_words);
706
707 block->data = bd;
708
709 struct ir3_instruction *first_non_input = NULL;
710 foreach_instr (instr, &block->instr_list) {
711 if (instr->opc != OPC_META_INPUT) {
712 first_non_input = instr;
713 break;
714 }
715 }
716
717
718 foreach_instr (instr, &block->instr_list) {
719 struct ir3_instruction *src;
720 struct ir3_register *reg;
721
722 /* There are a couple special cases to deal with here:
723 *
724 * split: used to split values from a higher class to a lower
725 * class, for example split the results of a texture fetch
726 * into individual scalar values; We skip over these from
727 * a 'def' perspective, and for a 'use' we walk the chain
728 * up to the defining instruction.
729 *
730 * collect: used to collect values from lower class and assemble
731 * them together into a higher class, for example arguments
732 * to texture sample instructions; We consider these to be
733 * defined at the earliest collect source.
734 *
735 * Most of this is handled in the get_definer() helper.
736 *
737 * In either case, we trace the instruction back to the original
738 * definer and consider that as the def/use ip.
739 */
740
741 if (writes_gpr(instr)) {
742 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
743 struct ir3_register *dst = instr->regs[0];
744
745 if (dst->flags & IR3_REG_ARRAY) {
746 struct ir3_array *arr =
747 ir3_lookup_array(ctx->ir, dst->array.id);
748 unsigned i;
749
750 arr->start_ip = MIN2(arr->start_ip, instr->ip);
751 arr->end_ip = MAX2(arr->end_ip, instr->ip);
752
753 /* set the node class now.. in case we don't encounter
754 * this array dst again. From register_alloc algo's
755 * perspective, these are all single/scalar regs:
756 */
757 for (i = 0; i < arr->length; i++) {
758 unsigned name = arr->base + i;
759 ra_set_node_class(ctx->g, name, ctx->set->classes[0]);
760 }
761
762 /* indirect write is treated like a write to all array
763 * elements, since we don't know which one is actually
764 * written:
765 */
766 if (dst->flags & IR3_REG_RELATIV) {
767 for (i = 0; i < arr->length; i++) {
768 unsigned name = arr->base + i;
769 def(name, instr);
770 }
771 } else {
772 unsigned name = arr->base + dst->array.offset;
773 def(name, instr);
774 }
775
776 } else if (id->defn == instr) {
777 unsigned name = ra_name(ctx, id);
778
779 /* since we are in SSA at this point: */
780 debug_assert(!BITSET_TEST(bd->use, name));
781
782 def(name, id->defn);
783
784 if ((instr->opc == OPC_META_INPUT) && first_non_input)
785 use(name, first_non_input);
786
787 if (is_high(id->defn)) {
788 ra_set_node_class(ctx->g, name,
789 ctx->set->high_classes[id->cls - HIGH_OFFSET]);
790 } else if (is_half(id->defn)) {
791 ra_set_node_class(ctx->g, name,
792 ctx->set->half_classes[id->cls - HALF_OFFSET]);
793 } else {
794 ra_set_node_class(ctx->g, name,
795 ctx->set->classes[id->cls]);
796 }
797 }
798 }
799
800 foreach_src(reg, instr) {
801 if (reg->flags & IR3_REG_ARRAY) {
802 struct ir3_array *arr =
803 ir3_lookup_array(ctx->ir, reg->array.id);
804 arr->start_ip = MIN2(arr->start_ip, instr->ip);
805 arr->end_ip = MAX2(arr->end_ip, instr->ip);
806
807 /* indirect read is treated like a read fromall array
808 * elements, since we don't know which one is actually
809 * read:
810 */
811 if (reg->flags & IR3_REG_RELATIV) {
812 unsigned i;
813 for (i = 0; i < arr->length; i++) {
814 unsigned name = arr->base + i;
815 use(name, instr);
816 }
817 } else {
818 unsigned name = arr->base + reg->array.offset;
819 use(name, instr);
820 /* NOTE: arrays are not SSA so unconditionally
821 * set use bit:
822 */
823 BITSET_SET(bd->use, name);
824 debug_assert(reg->array.offset < arr->length);
825 }
826 } else if ((src = ssa(reg)) && writes_gpr(src)) {
827 unsigned name = ra_name(ctx, &ctx->instrd[src->ip]);
828 use(name, instr);
829 }
830 }
831 }
832 }
833
834 static bool
835 ra_compute_livein_liveout(struct ir3_ra_ctx *ctx)
836 {
837 unsigned bitset_words = BITSET_WORDS(ctx->alloc_count);
838 bool progress = false;
839
840 foreach_block (block, &ctx->ir->block_list) {
841 struct ir3_ra_block_data *bd = block->data;
842
843 /* update livein: */
844 for (unsigned i = 0; i < bitset_words; i++) {
845 BITSET_WORD new_livein =
846 (bd->use[i] | (bd->liveout[i] & ~bd->def[i]));
847
848 if (new_livein & ~bd->livein[i]) {
849 bd->livein[i] |= new_livein;
850 progress = true;
851 }
852 }
853
854 /* update liveout: */
855 for (unsigned j = 0; j < ARRAY_SIZE(block->successors); j++) {
856 struct ir3_block *succ = block->successors[j];
857 struct ir3_ra_block_data *succ_bd;
858
859 if (!succ)
860 continue;
861
862 succ_bd = succ->data;
863
864 for (unsigned i = 0; i < bitset_words; i++) {
865 BITSET_WORD new_liveout =
866 (succ_bd->livein[i] & ~bd->liveout[i]);
867
868 if (new_liveout) {
869 bd->liveout[i] |= new_liveout;
870 progress = true;
871 }
872 }
873 }
874 }
875
876 return progress;
877 }
878
879 static void
880 print_bitset(const char *name, BITSET_WORD *bs, unsigned cnt)
881 {
882 bool first = true;
883 debug_printf(" %s:", name);
884 for (unsigned i = 0; i < cnt; i++) {
885 if (BITSET_TEST(bs, i)) {
886 if (!first)
887 debug_printf(",");
888 debug_printf(" %04u", i);
889 first = false;
890 }
891 }
892 debug_printf("\n");
893 }
894
895 static void
896 ra_add_interference(struct ir3_ra_ctx *ctx)
897 {
898 struct ir3 *ir = ctx->ir;
899
900 /* initialize array live ranges: */
901 foreach_array (arr, &ir->array_list) {
902 arr->start_ip = ~0;
903 arr->end_ip = 0;
904 }
905
906 /* compute live ranges (use/def) on a block level, also updating
907 * block's def/use bitmasks (used below to calculate per-block
908 * livein/liveout):
909 */
910 foreach_block (block, &ir->block_list) {
911 ra_block_compute_live_ranges(ctx, block);
912 }
913
914 /* update per-block livein/liveout: */
915 while (ra_compute_livein_liveout(ctx)) {}
916
917 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
918 debug_printf("AFTER LIVEIN/OUT:\n");
919 foreach_block (block, &ir->block_list) {
920 struct ir3_ra_block_data *bd = block->data;
921 debug_printf("block%u:\n", block_id(block));
922 print_bitset(" def", bd->def, ctx->alloc_count);
923 print_bitset(" use", bd->use, ctx->alloc_count);
924 print_bitset(" l/i", bd->livein, ctx->alloc_count);
925 print_bitset(" l/o", bd->liveout, ctx->alloc_count);
926 }
927 foreach_array (arr, &ir->array_list) {
928 debug_printf("array%u:\n", arr->id);
929 debug_printf(" length: %u\n", arr->length);
930 debug_printf(" start_ip: %u\n", arr->start_ip);
931 debug_printf(" end_ip: %u\n", arr->end_ip);
932 }
933 }
934
935 /* extend start/end ranges based on livein/liveout info from cfg: */
936 foreach_block (block, &ir->block_list) {
937 struct ir3_ra_block_data *bd = block->data;
938
939 for (unsigned i = 0; i < ctx->alloc_count; i++) {
940 if (BITSET_TEST(bd->livein, i)) {
941 ctx->def[i] = MIN2(ctx->def[i], block->start_ip);
942 ctx->use[i] = MAX2(ctx->use[i], block->start_ip);
943 }
944
945 if (BITSET_TEST(bd->liveout, i)) {
946 ctx->def[i] = MIN2(ctx->def[i], block->end_ip);
947 ctx->use[i] = MAX2(ctx->use[i], block->end_ip);
948 }
949 }
950
951 foreach_array (arr, &ctx->ir->array_list) {
952 for (unsigned i = 0; i < arr->length; i++) {
953 if (BITSET_TEST(bd->livein, i + arr->base)) {
954 arr->start_ip = MIN2(arr->start_ip, block->start_ip);
955 }
956 if (BITSET_TEST(bd->livein, i + arr->base)) {
957 arr->end_ip = MAX2(arr->end_ip, block->end_ip);
958 }
959 }
960 }
961 }
962
963 /* need to fix things up to keep outputs live: */
964 struct ir3_instruction *out;
965 foreach_output(out, ir) {
966 unsigned name = ra_name(ctx, &ctx->instrd[out->ip]);
967 ctx->use[name] = ctx->instr_cnt;
968 }
969
970 for (unsigned i = 0; i < ctx->alloc_count; i++) {
971 for (unsigned j = 0; j < ctx->alloc_count; j++) {
972 if (intersects(ctx->def[i], ctx->use[i],
973 ctx->def[j], ctx->use[j])) {
974 ra_add_node_interference(ctx->g, i, j);
975 }
976 }
977 }
978 }
979
980 /* some instructions need fix-up if dst register is half precision: */
981 static void fixup_half_instr_dst(struct ir3_instruction *instr)
982 {
983 switch (opc_cat(instr->opc)) {
984 case 1: /* move instructions */
985 instr->cat1.dst_type = half_type(instr->cat1.dst_type);
986 break;
987 case 3:
988 switch (instr->opc) {
989 case OPC_MAD_F32:
990 /* Available for that dest is half and srcs are full.
991 * eg. mad.f32 hr0, r0.x, r0.y, r0.z
992 */
993 if (instr->regs[1]->flags & IR3_REG_HALF)
994 instr->opc = OPC_MAD_F16;
995 break;
996 case OPC_SEL_B32:
997 instr->opc = OPC_SEL_B16;
998 break;
999 case OPC_SEL_S32:
1000 instr->opc = OPC_SEL_S16;
1001 break;
1002 case OPC_SEL_F32:
1003 instr->opc = OPC_SEL_F16;
1004 break;
1005 case OPC_SAD_S32:
1006 instr->opc = OPC_SAD_S16;
1007 break;
1008 /* instructions may already be fixed up: */
1009 case OPC_MAD_F16:
1010 case OPC_SEL_B16:
1011 case OPC_SEL_S16:
1012 case OPC_SEL_F16:
1013 case OPC_SAD_S16:
1014 break;
1015 default:
1016 assert(0);
1017 break;
1018 }
1019 break;
1020 case 5:
1021 instr->cat5.type = half_type(instr->cat5.type);
1022 break;
1023 }
1024 }
1025 /* some instructions need fix-up if src register is half precision: */
1026 static void fixup_half_instr_src(struct ir3_instruction *instr)
1027 {
1028 switch (instr->opc) {
1029 case OPC_MOV:
1030 instr->cat1.src_type = half_type(instr->cat1.src_type);
1031 break;
1032 default:
1033 break;
1034 }
1035 }
1036
1037 /* NOTE: instr could be NULL for IR3_REG_ARRAY case, for the first
1038 * array access(es) which do not have any previous access to depend
1039 * on from scheduling point of view
1040 */
1041 static void
1042 reg_assign(struct ir3_ra_ctx *ctx, struct ir3_register *reg,
1043 struct ir3_instruction *instr)
1044 {
1045 struct ir3_ra_instr_data *id;
1046
1047 if (reg->flags & IR3_REG_ARRAY) {
1048 struct ir3_array *arr =
1049 ir3_lookup_array(ctx->ir, reg->array.id);
1050 unsigned name = arr->base + reg->array.offset;
1051 unsigned r = ra_get_node_reg(ctx->g, name);
1052 unsigned num = ctx->set->ra_reg_to_gpr[r];
1053
1054 if (reg->flags & IR3_REG_RELATIV) {
1055 reg->array.offset = num;
1056 } else {
1057 reg->num = num;
1058 reg->flags &= ~IR3_REG_SSA;
1059 }
1060
1061 reg->flags &= ~IR3_REG_ARRAY;
1062 } else if ((id = &ctx->instrd[instr->ip]) && id->defn) {
1063 unsigned name = ra_name(ctx, id);
1064 unsigned r = ra_get_node_reg(ctx->g, name);
1065 unsigned num = ctx->set->ra_reg_to_gpr[r] + id->off;
1066
1067 debug_assert(!(reg->flags & IR3_REG_RELATIV));
1068
1069 if (is_high(id->defn))
1070 num += FIRST_HIGH_REG;
1071
1072 reg->num = num;
1073 reg->flags &= ~IR3_REG_SSA;
1074
1075 if (is_half(id->defn))
1076 reg->flags |= IR3_REG_HALF;
1077 }
1078 }
1079
1080 static void
1081 ra_block_alloc(struct ir3_ra_ctx *ctx, struct ir3_block *block)
1082 {
1083 foreach_instr (instr, &block->instr_list) {
1084 struct ir3_register *reg;
1085
1086 if (writes_gpr(instr)) {
1087 reg_assign(ctx, instr->regs[0], instr);
1088 if (instr->regs[0]->flags & IR3_REG_HALF)
1089 fixup_half_instr_dst(instr);
1090 }
1091
1092 foreach_src_n(reg, n, instr) {
1093 struct ir3_instruction *src = reg->instr;
1094 /* Note: reg->instr could be null for IR3_REG_ARRAY */
1095 if (src || (reg->flags & IR3_REG_ARRAY))
1096 reg_assign(ctx, instr->regs[n+1], src);
1097 if (instr->regs[n+1]->flags & IR3_REG_HALF)
1098 fixup_half_instr_src(instr);
1099 }
1100 }
1101 }
1102
1103 /* handle pre-colored registers. This includes "arrays" (which could be of
1104 * length 1, used for phi webs lowered to registers in nir), as well as
1105 * special shader input values that need to be pinned to certain registers.
1106 */
1107 static void
1108 ra_precolor(struct ir3_ra_ctx *ctx, struct ir3_instruction **precolor, unsigned nprecolor)
1109 {
1110 unsigned num_precolor = 0;
1111 for (unsigned i = 0; i < nprecolor; i++) {
1112 if (precolor[i] && !(precolor[i]->flags & IR3_INSTR_UNUSED)) {
1113 struct ir3_instruction *instr = precolor[i];
1114 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
1115
1116 debug_assert(!(instr->regs[0]->flags & (IR3_REG_HALF | IR3_REG_HIGH)));
1117
1118 /* only consider the first component: */
1119 if (id->off > 0)
1120 continue;
1121
1122 /* 'base' is in scalar (class 0) but we need to map that
1123 * the conflicting register of the appropriate class (ie.
1124 * input could be vec2/vec3/etc)
1125 *
1126 * Note that the higher class (larger than scalar) regs
1127 * are setup to conflict with others in the same class,
1128 * so for example, R1 (scalar) is also the first component
1129 * of D1 (vec2/double):
1130 *
1131 * Single (base) | Double
1132 * --------------+---------------
1133 * R0 | D0
1134 * R1 | D0 D1
1135 * R2 | D1 D2
1136 * R3 | D2
1137 * .. and so on..
1138 */
1139 unsigned regid = instr->regs[0]->num;
1140 unsigned reg = ctx->set->gpr_to_ra_reg[id->cls][regid];
1141 unsigned name = ra_name(ctx, id);
1142 ra_set_node_reg(ctx->g, name, reg);
1143 num_precolor = MAX2(regid, num_precolor);
1144 }
1145 }
1146
1147 /* pre-assign array elements:
1148 */
1149 foreach_array (arr, &ctx->ir->array_list) {
1150 unsigned base = 0;
1151
1152 if (arr->end_ip == 0)
1153 continue;
1154
1155 /* figure out what else we conflict with which has already
1156 * been assigned:
1157 */
1158 retry:
1159 foreach_array (arr2, &ctx->ir->array_list) {
1160 if (arr2 == arr)
1161 break;
1162 if (arr2->end_ip == 0)
1163 continue;
1164 /* if it intersects with liverange AND register range.. */
1165 if (intersects(arr->start_ip, arr->end_ip,
1166 arr2->start_ip, arr2->end_ip) &&
1167 intersects(base, base + arr->length,
1168 arr2->reg, arr2->reg + arr2->length)) {
1169 base = MAX2(base, arr2->reg + arr2->length);
1170 goto retry;
1171 }
1172 }
1173
1174 /* also need to not conflict with any pre-assigned inputs: */
1175 for (unsigned i = 0; i < nprecolor; i++) {
1176 struct ir3_instruction *instr = precolor[i];
1177
1178 if (!instr)
1179 continue;
1180
1181 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
1182
1183 /* only consider the first component: */
1184 if (id->off > 0)
1185 continue;
1186
1187 unsigned name = ra_name(ctx, id);
1188 unsigned regid = instr->regs[0]->num;
1189
1190 /* Check if array intersects with liverange AND register
1191 * range of the input:
1192 */
1193 if (intersects(arr->start_ip, arr->end_ip,
1194 ctx->def[name], ctx->use[name]) &&
1195 intersects(base, base + arr->length,
1196 regid, regid + class_sizes[id->cls])) {
1197 base = MAX2(base, regid + class_sizes[id->cls]);
1198 goto retry;
1199 }
1200 }
1201
1202 arr->reg = base;
1203
1204 for (unsigned i = 0; i < arr->length; i++) {
1205 unsigned name, reg;
1206
1207 name = arr->base + i;
1208 reg = ctx->set->gpr_to_ra_reg[0][base++];
1209
1210 ra_set_node_reg(ctx->g, name, reg);
1211 }
1212 }
1213
1214 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
1215 foreach_array (arr, &ctx->ir->array_list) {
1216 unsigned first = arr->reg;
1217 unsigned last = arr->reg + arr->length - 1;
1218 debug_printf("arr[%d] at r%d.%c->r%d.%c\n", arr->id,
1219 (first >> 2), "xyzw"[first & 0x3],
1220 (last >> 2), "xyzw"[last & 0x3]);
1221 }
1222 }
1223 }
1224
1225 static int
1226 ra_alloc(struct ir3_ra_ctx *ctx)
1227 {
1228 if (!ra_allocate(ctx->g))
1229 return -1;
1230
1231 foreach_block (block, &ctx->ir->block_list) {
1232 ra_block_alloc(ctx, block);
1233 }
1234
1235 return 0;
1236 }
1237
1238 int ir3_ra(struct ir3_shader_variant *v, struct ir3_instruction **precolor, unsigned nprecolor)
1239 {
1240 struct ir3_ra_ctx ctx = {
1241 .v = v,
1242 .ir = v->ir,
1243 .set = v->ir->compiler->set,
1244 };
1245 int ret;
1246
1247 ra_init(&ctx);
1248 ra_add_interference(&ctx);
1249 ra_precolor(&ctx, precolor, nprecolor);
1250 ret = ra_alloc(&ctx);
1251 ra_destroy(&ctx);
1252
1253 return ret;
1254 }