freedreno/ir3/ra: make use()/def() functions instead of macros
[mesa.git] / src / freedreno / ir3 / ir3_ra.c
1 /*
2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/u_math.h"
28 #include "util/register_allocate.h"
29 #include "util/ralloc.h"
30 #include "util/bitset.h"
31
32 #include "ir3.h"
33 #include "ir3_compiler.h"
34
35 /*
36 * Register Assignment:
37 *
38 * Uses the register_allocate util, which implements graph coloring
39 * algo with interference classes. To handle the cases where we need
40 * consecutive registers (for example, texture sample instructions),
41 * we model these as larger (double/quad/etc) registers which conflict
42 * with the corresponding registers in other classes.
43 *
44 * Additionally we create additional classes for half-regs, which
45 * do not conflict with the full-reg classes. We do need at least
46 * sizes 1-4 (to deal w/ texture sample instructions output to half-
47 * reg). At the moment we don't create the higher order half-reg
48 * classes as half-reg frequently does not have enough precision
49 * for texture coords at higher resolutions.
50 *
51 * There are some additional cases that we need to handle specially,
52 * as the graph coloring algo doesn't understand "partial writes".
53 * For example, a sequence like:
54 *
55 * add r0.z, ...
56 * sam (f32)(xy)r0.x, ...
57 * ...
58 * sam (f32)(xyzw)r0.w, r0.x, ... ; 3d texture, so r0.xyz are coord
59 *
60 * In this scenario, we treat r0.xyz as class size 3, which is written
61 * (from a use/def perspective) at the 'add' instruction and ignore the
62 * subsequent partial writes to r0.xy. So the 'add r0.z, ...' is the
63 * defining instruction, as it is the first to partially write r0.xyz.
64 *
65 * Note i965 has a similar scenario, which they solve with a virtual
66 * LOAD_PAYLOAD instruction which gets turned into multiple MOV's after
67 * register assignment. But for us that is horrible from a scheduling
68 * standpoint. Instead what we do is use idea of 'definer' instruction.
69 * Ie. the first instruction (lowest ip) to write to the variable is the
70 * one we consider from use/def perspective when building interference
71 * graph. (Other instructions which write other variable components
72 * just define the variable some more.)
73 *
74 * Arrays of arbitrary size are handled via pre-coloring a consecutive
75 * sequence of registers. Additional scalar (single component) reg
76 * names are allocated starting at ctx->class_base[total_class_count]
77 * (see arr->base), which are pre-colored. In the use/def graph direct
78 * access is treated as a single element use/def, and indirect access
79 * is treated as use or def of all array elements. (Only the first
80 * def is tracked, in case of multiple indirect writes, etc.)
81 *
82 * TODO arrays that fit in one of the pre-defined class sizes should
83 * not need to be pre-colored, but instead could be given a normal
84 * vreg name. (Ignoring this for now since it is a good way to work
85 * out the kinks with arbitrary sized arrays.)
86 *
87 * TODO might be easier for debugging to split this into two passes,
88 * the first assigning vreg names in a way that we could ir3_print()
89 * the result.
90 */
91
92 static const unsigned class_sizes[] = {
93 1, 2, 3, 4,
94 4 + 4, /* txd + 1d/2d */
95 4 + 6, /* txd + 3d */
96 };
97 #define class_count ARRAY_SIZE(class_sizes)
98
99 static const unsigned half_class_sizes[] = {
100 1, 2, 3, 4,
101 };
102 #define half_class_count ARRAY_SIZE(half_class_sizes)
103
104 /* seems to just be used for compute shaders? Seems like vec1 and vec3
105 * are sufficient (for now?)
106 */
107 static const unsigned high_class_sizes[] = {
108 1, 3,
109 };
110 #define high_class_count ARRAY_SIZE(high_class_sizes)
111
112 #define total_class_count (class_count + half_class_count + high_class_count)
113
114 /* Below a0.x are normal regs. RA doesn't need to assign a0.x/p0.x. */
115 #define NUM_REGS (4 * 48) /* r0 to r47 */
116 #define NUM_HIGH_REGS (4 * 8) /* r48 to r55 */
117 #define FIRST_HIGH_REG (4 * 48)
118 /* Number of virtual regs in a given class: */
119 #define CLASS_REGS(i) (NUM_REGS - (class_sizes[i] - 1))
120 #define HALF_CLASS_REGS(i) (NUM_REGS - (half_class_sizes[i] - 1))
121 #define HIGH_CLASS_REGS(i) (NUM_HIGH_REGS - (high_class_sizes[i] - 1))
122
123 #define HALF_OFFSET (class_count)
124 #define HIGH_OFFSET (class_count + half_class_count)
125
126 /* register-set, created one time, used for all shaders: */
127 struct ir3_ra_reg_set {
128 struct ra_regs *regs;
129 unsigned int classes[class_count];
130 unsigned int half_classes[half_class_count];
131 unsigned int high_classes[high_class_count];
132 /* maps flat virtual register space to base gpr: */
133 uint16_t *ra_reg_to_gpr;
134 /* maps cls,gpr to flat virtual register space: */
135 uint16_t **gpr_to_ra_reg;
136 };
137
138 static void
139 build_q_values(unsigned int **q_values, unsigned off,
140 const unsigned *sizes, unsigned count)
141 {
142 for (unsigned i = 0; i < count; i++) {
143 q_values[i + off] = rzalloc_array(q_values, unsigned, total_class_count);
144
145 /* From register_allocate.c:
146 *
147 * q(B,C) (indexed by C, B is this register class) in
148 * Runeson/Nyström paper. This is "how many registers of B could
149 * the worst choice register from C conflict with".
150 *
151 * If we just let the register allocation algorithm compute these
152 * values, is extremely expensive. However, since all of our
153 * registers are laid out, we can very easily compute them
154 * ourselves. View the register from C as fixed starting at GRF n
155 * somewhere in the middle, and the register from B as sliding back
156 * and forth. Then the first register to conflict from B is the
157 * one starting at n - class_size[B] + 1 and the last register to
158 * conflict will start at n + class_size[B] - 1. Therefore, the
159 * number of conflicts from B is class_size[B] + class_size[C] - 1.
160 *
161 * +-+-+-+-+-+-+ +-+-+-+-+-+-+
162 * B | | | | | |n| --> | | | | | | |
163 * +-+-+-+-+-+-+ +-+-+-+-+-+-+
164 * +-+-+-+-+-+
165 * C |n| | | | |
166 * +-+-+-+-+-+
167 *
168 * (Idea copied from brw_fs_reg_allocate.cpp)
169 */
170 for (unsigned j = 0; j < count; j++)
171 q_values[i + off][j + off] = sizes[i] + sizes[j] - 1;
172 }
173 }
174
175 /* One-time setup of RA register-set, which describes all the possible
176 * "virtual" registers and their interferences. Ie. double register
177 * occupies (and conflicts with) two single registers, and so forth.
178 * Since registers do not need to be aligned to their class size, they
179 * can conflict with other registers in the same class too. Ie:
180 *
181 * Single (base) | Double
182 * --------------+---------------
183 * R0 | D0
184 * R1 | D0 D1
185 * R2 | D1 D2
186 * R3 | D2
187 * .. and so on..
188 *
189 * (NOTE the disassembler uses notation like r0.x/y/z/w but those are
190 * really just four scalar registers. Don't let that confuse you.)
191 */
192 struct ir3_ra_reg_set *
193 ir3_ra_alloc_reg_set(struct ir3_compiler *compiler)
194 {
195 struct ir3_ra_reg_set *set = rzalloc(compiler, struct ir3_ra_reg_set);
196 unsigned ra_reg_count, reg, first_half_reg, first_high_reg, base;
197 unsigned int **q_values;
198
199 /* calculate # of regs across all classes: */
200 ra_reg_count = 0;
201 for (unsigned i = 0; i < class_count; i++)
202 ra_reg_count += CLASS_REGS(i);
203 for (unsigned i = 0; i < half_class_count; i++)
204 ra_reg_count += HALF_CLASS_REGS(i);
205 for (unsigned i = 0; i < high_class_count; i++)
206 ra_reg_count += HIGH_CLASS_REGS(i);
207
208 /* allocate and populate q_values: */
209 q_values = ralloc_array(set, unsigned *, total_class_count);
210
211 build_q_values(q_values, 0, class_sizes, class_count);
212 build_q_values(q_values, HALF_OFFSET, half_class_sizes, half_class_count);
213 build_q_values(q_values, HIGH_OFFSET, high_class_sizes, high_class_count);
214
215 /* allocate the reg-set.. */
216 set->regs = ra_alloc_reg_set(set, ra_reg_count, true);
217 set->ra_reg_to_gpr = ralloc_array(set, uint16_t, ra_reg_count);
218 set->gpr_to_ra_reg = ralloc_array(set, uint16_t *, total_class_count);
219
220 /* .. and classes */
221 reg = 0;
222 for (unsigned i = 0; i < class_count; i++) {
223 set->classes[i] = ra_alloc_reg_class(set->regs);
224
225 set->gpr_to_ra_reg[i] = ralloc_array(set, uint16_t, CLASS_REGS(i));
226
227 for (unsigned j = 0; j < CLASS_REGS(i); j++) {
228 ra_class_add_reg(set->regs, set->classes[i], reg);
229
230 set->ra_reg_to_gpr[reg] = j;
231 set->gpr_to_ra_reg[i][j] = reg;
232
233 for (unsigned br = j; br < j + class_sizes[i]; br++)
234 ra_add_transitive_reg_conflict(set->regs, br, reg);
235
236 reg++;
237 }
238 }
239
240 first_half_reg = reg;
241 base = HALF_OFFSET;
242
243 for (unsigned i = 0; i < half_class_count; i++) {
244 set->half_classes[i] = ra_alloc_reg_class(set->regs);
245
246 set->gpr_to_ra_reg[base + i] =
247 ralloc_array(set, uint16_t, HALF_CLASS_REGS(i));
248
249 for (unsigned j = 0; j < HALF_CLASS_REGS(i); j++) {
250 ra_class_add_reg(set->regs, set->half_classes[i], reg);
251
252 set->ra_reg_to_gpr[reg] = j;
253 set->gpr_to_ra_reg[base + i][j] = reg;
254
255 for (unsigned br = j; br < j + half_class_sizes[i]; br++)
256 ra_add_transitive_reg_conflict(set->regs, br + first_half_reg, reg);
257
258 reg++;
259 }
260 }
261
262 first_high_reg = reg;
263 base = HIGH_OFFSET;
264
265 for (unsigned i = 0; i < high_class_count; i++) {
266 set->high_classes[i] = ra_alloc_reg_class(set->regs);
267
268 set->gpr_to_ra_reg[base + i] =
269 ralloc_array(set, uint16_t, HIGH_CLASS_REGS(i));
270
271 for (unsigned j = 0; j < HIGH_CLASS_REGS(i); j++) {
272 ra_class_add_reg(set->regs, set->high_classes[i], reg);
273
274 set->ra_reg_to_gpr[reg] = j;
275 set->gpr_to_ra_reg[base + i][j] = reg;
276
277 for (unsigned br = j; br < j + high_class_sizes[i]; br++)
278 ra_add_transitive_reg_conflict(set->regs, br + first_high_reg, reg);
279
280 reg++;
281 }
282 }
283
284 /* starting a6xx, half precision regs conflict w/ full precision regs: */
285 if (compiler->gpu_id >= 600) {
286 /* because of transitivity, we can get away with just setting up
287 * conflicts between the first class of full and half regs:
288 */
289 for (unsigned i = 0; i < half_class_count; i++) {
290 /* NOTE there are fewer half class sizes, but they match the
291 * first N full class sizes.. but assert in case that ever
292 * accidentially changes:
293 */
294 debug_assert(class_sizes[i] == half_class_sizes[i]);
295 for (unsigned j = 0; j < CLASS_REGS(i) / 2; j++) {
296 unsigned freg = set->gpr_to_ra_reg[i][j];
297 unsigned hreg0 = set->gpr_to_ra_reg[i + HALF_OFFSET][(j * 2) + 0];
298 unsigned hreg1 = set->gpr_to_ra_reg[i + HALF_OFFSET][(j * 2) + 1];
299
300 ra_add_transitive_reg_pair_conflict(set->regs, freg, hreg0, hreg1);
301 }
302 }
303
304 // TODO also need to update q_values, but for now:
305 ra_set_finalize(set->regs, NULL);
306 } else {
307 ra_set_finalize(set->regs, q_values);
308 }
309
310 ralloc_free(q_values);
311
312 return set;
313 }
314
315 /* additional block-data (per-block) */
316 struct ir3_ra_block_data {
317 BITSET_WORD *def; /* variables defined before used in block */
318 BITSET_WORD *use; /* variables used before defined in block */
319 BITSET_WORD *livein; /* which defs reach entry point of block */
320 BITSET_WORD *liveout; /* which defs reach exit point of block */
321 };
322
323 /* additional instruction-data (per-instruction) */
324 struct ir3_ra_instr_data {
325 /* cached instruction 'definer' info: */
326 struct ir3_instruction *defn;
327 int off, sz, cls;
328 };
329
330 /* register-assign context, per-shader */
331 struct ir3_ra_ctx {
332 struct ir3_shader_variant *v;
333 struct ir3 *ir;
334
335 struct ir3_ra_reg_set *set;
336 struct ra_graph *g;
337 unsigned alloc_count;
338 /* one per class, plus one slot for arrays: */
339 unsigned class_alloc_count[total_class_count + 1];
340 unsigned class_base[total_class_count + 1];
341 unsigned instr_cnt;
342 unsigned *def, *use; /* def/use table */
343 struct ir3_ra_instr_data *instrd;
344 };
345
346 /* does it conflict? */
347 static inline bool
348 intersects(unsigned a_start, unsigned a_end, unsigned b_start, unsigned b_end)
349 {
350 return !((a_start >= b_end) || (b_start >= a_end));
351 }
352
353 static bool
354 is_half(struct ir3_instruction *instr)
355 {
356 return !!(instr->regs[0]->flags & IR3_REG_HALF);
357 }
358
359 static bool
360 is_high(struct ir3_instruction *instr)
361 {
362 return !!(instr->regs[0]->flags & IR3_REG_HIGH);
363 }
364
365 static int
366 size_to_class(unsigned sz, bool half, bool high)
367 {
368 if (high) {
369 for (unsigned i = 0; i < high_class_count; i++)
370 if (high_class_sizes[i] >= sz)
371 return i + HIGH_OFFSET;
372 } else if (half) {
373 for (unsigned i = 0; i < half_class_count; i++)
374 if (half_class_sizes[i] >= sz)
375 return i + HALF_OFFSET;
376 } else {
377 for (unsigned i = 0; i < class_count; i++)
378 if (class_sizes[i] >= sz)
379 return i;
380 }
381 debug_assert(0);
382 return -1;
383 }
384
385 static bool
386 writes_gpr(struct ir3_instruction *instr)
387 {
388 if (is_store(instr))
389 return false;
390 if (instr->regs_count == 0)
391 return false;
392 /* is dest a normal temp register: */
393 struct ir3_register *reg = instr->regs[0];
394 if (reg->flags & (IR3_REG_CONST | IR3_REG_IMMED))
395 return false;
396 if ((reg->num == regid(REG_A0, 0)) ||
397 (reg->num == regid(REG_P0, 0)))
398 return false;
399 return true;
400 }
401
402 static bool
403 instr_before(struct ir3_instruction *a, struct ir3_instruction *b)
404 {
405 if (a->flags & IR3_INSTR_UNUSED)
406 return false;
407 return (a->ip < b->ip);
408 }
409
410 static struct ir3_instruction *
411 get_definer(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr,
412 int *sz, int *off)
413 {
414 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
415 struct ir3_instruction *d = NULL;
416
417 if (id->defn) {
418 *sz = id->sz;
419 *off = id->off;
420 return id->defn;
421 }
422
423 if (instr->opc == OPC_META_COLLECT) {
424 /* What about the case where collect is subset of array, we
425 * need to find the distance between where actual array starts
426 * and collect.. that probably doesn't happen currently.
427 */
428 struct ir3_register *src;
429 int dsz, doff;
430
431 /* note: don't use foreach_ssa_src as this gets called once
432 * while assigning regs (which clears SSA flag)
433 */
434 foreach_src_n(src, n, instr) {
435 struct ir3_instruction *dd;
436 if (!src->instr)
437 continue;
438
439 dd = get_definer(ctx, src->instr, &dsz, &doff);
440
441 if ((!d) || instr_before(dd, d)) {
442 d = dd;
443 *sz = dsz;
444 *off = doff - n;
445 }
446 }
447
448 } else if (instr->cp.right || instr->cp.left) {
449 /* covers also the meta:fo case, which ends up w/ single
450 * scalar instructions for each component:
451 */
452 struct ir3_instruction *f = ir3_neighbor_first(instr);
453
454 /* by definition, the entire sequence forms one linked list
455 * of single scalar register nodes (even if some of them may
456 * be splits from a texture sample (for example) instr. We
457 * just need to walk the list finding the first element of
458 * the group defined (lowest ip)
459 */
460 int cnt = 0;
461
462 /* need to skip over unused in the group: */
463 while (f && (f->flags & IR3_INSTR_UNUSED)) {
464 f = f->cp.right;
465 cnt++;
466 }
467
468 while (f) {
469 if ((!d) || instr_before(f, d))
470 d = f;
471 if (f == instr)
472 *off = cnt;
473 f = f->cp.right;
474 cnt++;
475 }
476
477 *sz = cnt;
478
479 } else {
480 /* second case is looking directly at the instruction which
481 * produces multiple values (eg, texture sample), rather
482 * than the split nodes that point back to that instruction.
483 * This isn't quite right, because it may be part of a larger
484 * group, such as:
485 *
486 * sam (f32)(xyzw)r0.x, ...
487 * add r1.x, ...
488 * add r1.y, ...
489 * sam (f32)(xyzw)r2.x, r0.w <-- (r0.w, r1.x, r1.y)
490 *
491 * need to come up with a better way to handle that case.
492 */
493 if (instr->address) {
494 *sz = instr->regs[0]->size;
495 } else {
496 *sz = util_last_bit(instr->regs[0]->wrmask);
497 }
498 *off = 0;
499 d = instr;
500 }
501
502 if (d->opc == OPC_META_SPLIT) {
503 struct ir3_instruction *dd;
504 int dsz, doff;
505
506 dd = get_definer(ctx, d->regs[1]->instr, &dsz, &doff);
507
508 /* by definition, should come before: */
509 debug_assert(instr_before(dd, d));
510
511 *sz = MAX2(*sz, dsz);
512
513 if (instr->opc == OPC_META_SPLIT)
514 *off = MAX2(*off, instr->split.off);
515
516 d = dd;
517 }
518
519 debug_assert(d->opc != OPC_META_SPLIT);
520
521 id->defn = d;
522 id->sz = *sz;
523 id->off = *off;
524
525 return d;
526 }
527
528 static void
529 ra_block_find_definers(struct ir3_ra_ctx *ctx, struct ir3_block *block)
530 {
531 foreach_instr (instr, &block->instr_list) {
532 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
533 if (instr->regs_count == 0)
534 continue;
535 /* couple special cases: */
536 if (writes_addr(instr) || writes_pred(instr)) {
537 id->cls = -1;
538 } else if (instr->regs[0]->flags & IR3_REG_ARRAY) {
539 id->cls = total_class_count;
540 } else {
541 /* and the normal case: */
542 id->defn = get_definer(ctx, instr, &id->sz, &id->off);
543 id->cls = size_to_class(id->sz, is_half(id->defn), is_high(id->defn));
544
545 /* this is a bit of duct-tape.. if we have a scenario like:
546 *
547 * sam (f32)(x) out.x, ...
548 * sam (f32)(x) out.y, ...
549 *
550 * Then the fanout/split meta instructions for the two different
551 * tex instructions end up grouped as left/right neighbors. The
552 * upshot is that in when you get_definer() on one of the meta:fo's
553 * you get definer as the first sam with sz=2, but when you call
554 * get_definer() on the either of the sam's you get itself as the
555 * definer with sz=1.
556 *
557 * (We actually avoid this scenario exactly, the neighbor links
558 * prevent one of the output mov's from being eliminated, so this
559 * hack should be enough. But probably we need to rethink how we
560 * find the "defining" instruction.)
561 *
562 * TODO how do we figure out offset properly...
563 */
564 if (id->defn != instr) {
565 struct ir3_ra_instr_data *did = &ctx->instrd[id->defn->ip];
566 if (did->sz < id->sz) {
567 did->sz = id->sz;
568 did->cls = id->cls;
569 }
570 }
571 }
572 }
573 }
574
575 /* give each instruction a name (and ip), and count up the # of names
576 * of each class
577 */
578 static void
579 ra_block_name_instructions(struct ir3_ra_ctx *ctx, struct ir3_block *block)
580 {
581 foreach_instr (instr, &block->instr_list) {
582 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
583
584 #ifdef DEBUG
585 instr->name = ~0;
586 #endif
587
588 ctx->instr_cnt++;
589
590 if (!writes_gpr(instr))
591 continue;
592
593 if (id->defn != instr)
594 continue;
595
596 /* arrays which don't fit in one of the pre-defined class
597 * sizes are pre-colored:
598 */
599 if ((id->cls >= 0) && (id->cls < total_class_count)) {
600 instr->name = ctx->class_alloc_count[id->cls]++;
601 ctx->alloc_count++;
602 }
603 }
604 }
605
606 static void
607 ra_init(struct ir3_ra_ctx *ctx)
608 {
609 unsigned n, base;
610
611 ir3_clear_mark(ctx->ir);
612 n = ir3_count_instructions(ctx->ir);
613
614 ctx->instrd = rzalloc_array(NULL, struct ir3_ra_instr_data, n);
615
616 foreach_block (block, &ctx->ir->block_list) {
617 ra_block_find_definers(ctx, block);
618 }
619
620 foreach_block (block, &ctx->ir->block_list) {
621 ra_block_name_instructions(ctx, block);
622 }
623
624 /* figure out the base register name for each class. The
625 * actual ra name is class_base[cls] + instr->name;
626 */
627 ctx->class_base[0] = 0;
628 for (unsigned i = 1; i <= total_class_count; i++) {
629 ctx->class_base[i] = ctx->class_base[i-1] +
630 ctx->class_alloc_count[i-1];
631 }
632
633 /* and vreg names for array elements: */
634 base = ctx->class_base[total_class_count];
635 foreach_array (arr, &ctx->ir->array_list) {
636 arr->base = base;
637 ctx->class_alloc_count[total_class_count] += arr->length;
638 base += arr->length;
639 }
640 ctx->alloc_count += ctx->class_alloc_count[total_class_count];
641
642 ctx->g = ra_alloc_interference_graph(ctx->set->regs, ctx->alloc_count);
643 ralloc_steal(ctx->g, ctx->instrd);
644 ctx->def = rzalloc_array(ctx->g, unsigned, ctx->alloc_count);
645 ctx->use = rzalloc_array(ctx->g, unsigned, ctx->alloc_count);
646 }
647
648 static unsigned
649 __ra_name(struct ir3_ra_ctx *ctx, int cls, struct ir3_instruction *defn)
650 {
651 unsigned name;
652 debug_assert(cls >= 0);
653 debug_assert(cls < total_class_count); /* we shouldn't get arrays here.. */
654 name = ctx->class_base[cls] + defn->name;
655 debug_assert(name < ctx->alloc_count);
656 return name;
657 }
658
659 static int
660 ra_name(struct ir3_ra_ctx *ctx, struct ir3_ra_instr_data *id)
661 {
662 /* TODO handle name mapping for arrays */
663 return __ra_name(ctx, id->cls, id->defn);
664 }
665
666 static void
667 ra_destroy(struct ir3_ra_ctx *ctx)
668 {
669 ralloc_free(ctx->g);
670 }
671
672 static void
673 __def(struct ir3_ra_ctx *ctx, struct ir3_ra_block_data *bd, unsigned name,
674 struct ir3_instruction *instr)
675 {
676 debug_assert(name < ctx->alloc_count);
677 /* defined on first write: */
678 if (!ctx->def[name])
679 ctx->def[name] = instr->ip;
680 ctx->use[name] = instr->ip;
681 BITSET_SET(bd->def, name);
682 }
683
684 static void
685 __use(struct ir3_ra_ctx *ctx, struct ir3_ra_block_data *bd, unsigned name,
686 struct ir3_instruction *instr)
687 {
688 debug_assert(name < ctx->alloc_count);
689 ctx->use[name] = MAX2(ctx->use[name], instr->ip);
690 if (!BITSET_TEST(bd->def, name))
691 BITSET_SET(bd->use, name);
692 }
693
694 static void
695 ra_block_compute_live_ranges(struct ir3_ra_ctx *ctx, struct ir3_block *block)
696 {
697 struct ir3_ra_block_data *bd;
698 unsigned bitset_words = BITSET_WORDS(ctx->alloc_count);
699
700 #define def(name, instr) __def(ctx, bd, name, instr)
701 #define use(name, instr) __use(ctx, bd, name, instr)
702
703 bd = rzalloc(ctx->g, struct ir3_ra_block_data);
704
705 bd->def = rzalloc_array(bd, BITSET_WORD, bitset_words);
706 bd->use = rzalloc_array(bd, BITSET_WORD, bitset_words);
707 bd->livein = rzalloc_array(bd, BITSET_WORD, bitset_words);
708 bd->liveout = rzalloc_array(bd, BITSET_WORD, bitset_words);
709
710 block->data = bd;
711
712 struct ir3_instruction *first_non_input = NULL;
713 foreach_instr (instr, &block->instr_list) {
714 if (instr->opc != OPC_META_INPUT) {
715 first_non_input = instr;
716 break;
717 }
718 }
719
720
721 foreach_instr (instr, &block->instr_list) {
722 struct ir3_instruction *src;
723 struct ir3_register *reg;
724
725 /* There are a couple special cases to deal with here:
726 *
727 * split: used to split values from a higher class to a lower
728 * class, for example split the results of a texture fetch
729 * into individual scalar values; We skip over these from
730 * a 'def' perspective, and for a 'use' we walk the chain
731 * up to the defining instruction.
732 *
733 * collect: used to collect values from lower class and assemble
734 * them together into a higher class, for example arguments
735 * to texture sample instructions; We consider these to be
736 * defined at the earliest collect source.
737 *
738 * Most of this is handled in the get_definer() helper.
739 *
740 * In either case, we trace the instruction back to the original
741 * definer and consider that as the def/use ip.
742 */
743
744 if (writes_gpr(instr)) {
745 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
746 struct ir3_register *dst = instr->regs[0];
747
748 if (dst->flags & IR3_REG_ARRAY) {
749 struct ir3_array *arr =
750 ir3_lookup_array(ctx->ir, dst->array.id);
751 unsigned i;
752
753 arr->start_ip = MIN2(arr->start_ip, instr->ip);
754 arr->end_ip = MAX2(arr->end_ip, instr->ip);
755
756 /* set the node class now.. in case we don't encounter
757 * this array dst again. From register_alloc algo's
758 * perspective, these are all single/scalar regs:
759 */
760 for (i = 0; i < arr->length; i++) {
761 unsigned name = arr->base + i;
762 ra_set_node_class(ctx->g, name, ctx->set->classes[0]);
763 }
764
765 /* indirect write is treated like a write to all array
766 * elements, since we don't know which one is actually
767 * written:
768 */
769 if (dst->flags & IR3_REG_RELATIV) {
770 for (i = 0; i < arr->length; i++) {
771 unsigned name = arr->base + i;
772 def(name, instr);
773 }
774 } else {
775 unsigned name = arr->base + dst->array.offset;
776 def(name, instr);
777 }
778
779 } else if (id->defn == instr) {
780 unsigned name = ra_name(ctx, id);
781
782 /* since we are in SSA at this point: */
783 debug_assert(!BITSET_TEST(bd->use, name));
784
785 def(name, id->defn);
786
787 if ((instr->opc == OPC_META_INPUT) && first_non_input)
788 use(name, first_non_input);
789
790 if (is_high(id->defn)) {
791 ra_set_node_class(ctx->g, name,
792 ctx->set->high_classes[id->cls - HIGH_OFFSET]);
793 } else if (is_half(id->defn)) {
794 ra_set_node_class(ctx->g, name,
795 ctx->set->half_classes[id->cls - HALF_OFFSET]);
796 } else {
797 ra_set_node_class(ctx->g, name,
798 ctx->set->classes[id->cls]);
799 }
800 }
801 }
802
803 foreach_src(reg, instr) {
804 if (reg->flags & IR3_REG_ARRAY) {
805 struct ir3_array *arr =
806 ir3_lookup_array(ctx->ir, reg->array.id);
807 arr->start_ip = MIN2(arr->start_ip, instr->ip);
808 arr->end_ip = MAX2(arr->end_ip, instr->ip);
809
810 /* indirect read is treated like a read fromall array
811 * elements, since we don't know which one is actually
812 * read:
813 */
814 if (reg->flags & IR3_REG_RELATIV) {
815 unsigned i;
816 for (i = 0; i < arr->length; i++) {
817 unsigned name = arr->base + i;
818 use(name, instr);
819 }
820 } else {
821 unsigned name = arr->base + reg->array.offset;
822 use(name, instr);
823 /* NOTE: arrays are not SSA so unconditionally
824 * set use bit:
825 */
826 BITSET_SET(bd->use, name);
827 debug_assert(reg->array.offset < arr->length);
828 }
829 } else if ((src = ssa(reg)) && writes_gpr(src)) {
830 unsigned name = ra_name(ctx, &ctx->instrd[src->ip]);
831 use(name, instr);
832 }
833 }
834 }
835 }
836
837 static bool
838 ra_compute_livein_liveout(struct ir3_ra_ctx *ctx)
839 {
840 unsigned bitset_words = BITSET_WORDS(ctx->alloc_count);
841 bool progress = false;
842
843 foreach_block (block, &ctx->ir->block_list) {
844 struct ir3_ra_block_data *bd = block->data;
845
846 /* update livein: */
847 for (unsigned i = 0; i < bitset_words; i++) {
848 BITSET_WORD new_livein =
849 (bd->use[i] | (bd->liveout[i] & ~bd->def[i]));
850
851 if (new_livein & ~bd->livein[i]) {
852 bd->livein[i] |= new_livein;
853 progress = true;
854 }
855 }
856
857 /* update liveout: */
858 for (unsigned j = 0; j < ARRAY_SIZE(block->successors); j++) {
859 struct ir3_block *succ = block->successors[j];
860 struct ir3_ra_block_data *succ_bd;
861
862 if (!succ)
863 continue;
864
865 succ_bd = succ->data;
866
867 for (unsigned i = 0; i < bitset_words; i++) {
868 BITSET_WORD new_liveout =
869 (succ_bd->livein[i] & ~bd->liveout[i]);
870
871 if (new_liveout) {
872 bd->liveout[i] |= new_liveout;
873 progress = true;
874 }
875 }
876 }
877 }
878
879 return progress;
880 }
881
882 static void
883 print_bitset(const char *name, BITSET_WORD *bs, unsigned cnt)
884 {
885 bool first = true;
886 debug_printf(" %s:", name);
887 for (unsigned i = 0; i < cnt; i++) {
888 if (BITSET_TEST(bs, i)) {
889 if (!first)
890 debug_printf(",");
891 debug_printf(" %04u", i);
892 first = false;
893 }
894 }
895 debug_printf("\n");
896 }
897
898 static void
899 ra_add_interference(struct ir3_ra_ctx *ctx)
900 {
901 struct ir3 *ir = ctx->ir;
902
903 /* initialize array live ranges: */
904 foreach_array (arr, &ir->array_list) {
905 arr->start_ip = ~0;
906 arr->end_ip = 0;
907 }
908
909 /* compute live ranges (use/def) on a block level, also updating
910 * block's def/use bitmasks (used below to calculate per-block
911 * livein/liveout):
912 */
913 foreach_block (block, &ir->block_list) {
914 ra_block_compute_live_ranges(ctx, block);
915 }
916
917 /* update per-block livein/liveout: */
918 while (ra_compute_livein_liveout(ctx)) {}
919
920 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
921 debug_printf("AFTER LIVEIN/OUT:\n");
922 foreach_block (block, &ir->block_list) {
923 struct ir3_ra_block_data *bd = block->data;
924 debug_printf("block%u:\n", block_id(block));
925 print_bitset(" def", bd->def, ctx->alloc_count);
926 print_bitset(" use", bd->use, ctx->alloc_count);
927 print_bitset(" l/i", bd->livein, ctx->alloc_count);
928 print_bitset(" l/o", bd->liveout, ctx->alloc_count);
929 }
930 foreach_array (arr, &ir->array_list) {
931 debug_printf("array%u:\n", arr->id);
932 debug_printf(" length: %u\n", arr->length);
933 debug_printf(" start_ip: %u\n", arr->start_ip);
934 debug_printf(" end_ip: %u\n", arr->end_ip);
935 }
936 }
937
938 /* extend start/end ranges based on livein/liveout info from cfg: */
939 foreach_block (block, &ir->block_list) {
940 struct ir3_ra_block_data *bd = block->data;
941
942 for (unsigned i = 0; i < ctx->alloc_count; i++) {
943 if (BITSET_TEST(bd->livein, i)) {
944 ctx->def[i] = MIN2(ctx->def[i], block->start_ip);
945 ctx->use[i] = MAX2(ctx->use[i], block->start_ip);
946 }
947
948 if (BITSET_TEST(bd->liveout, i)) {
949 ctx->def[i] = MIN2(ctx->def[i], block->end_ip);
950 ctx->use[i] = MAX2(ctx->use[i], block->end_ip);
951 }
952 }
953
954 foreach_array (arr, &ctx->ir->array_list) {
955 for (unsigned i = 0; i < arr->length; i++) {
956 if (BITSET_TEST(bd->livein, i + arr->base)) {
957 arr->start_ip = MIN2(arr->start_ip, block->start_ip);
958 }
959 if (BITSET_TEST(bd->livein, i + arr->base)) {
960 arr->end_ip = MAX2(arr->end_ip, block->end_ip);
961 }
962 }
963 }
964 }
965
966 /* need to fix things up to keep outputs live: */
967 struct ir3_instruction *out;
968 foreach_output(out, ir) {
969 unsigned name = ra_name(ctx, &ctx->instrd[out->ip]);
970 ctx->use[name] = ctx->instr_cnt;
971 }
972
973 for (unsigned i = 0; i < ctx->alloc_count; i++) {
974 for (unsigned j = 0; j < ctx->alloc_count; j++) {
975 if (intersects(ctx->def[i], ctx->use[i],
976 ctx->def[j], ctx->use[j])) {
977 ra_add_node_interference(ctx->g, i, j);
978 }
979 }
980 }
981 }
982
983 /* some instructions need fix-up if dst register is half precision: */
984 static void fixup_half_instr_dst(struct ir3_instruction *instr)
985 {
986 switch (opc_cat(instr->opc)) {
987 case 1: /* move instructions */
988 instr->cat1.dst_type = half_type(instr->cat1.dst_type);
989 break;
990 case 3:
991 switch (instr->opc) {
992 case OPC_MAD_F32:
993 /* Available for that dest is half and srcs are full.
994 * eg. mad.f32 hr0, r0.x, r0.y, r0.z
995 */
996 if (instr->regs[1]->flags & IR3_REG_HALF)
997 instr->opc = OPC_MAD_F16;
998 break;
999 case OPC_SEL_B32:
1000 instr->opc = OPC_SEL_B16;
1001 break;
1002 case OPC_SEL_S32:
1003 instr->opc = OPC_SEL_S16;
1004 break;
1005 case OPC_SEL_F32:
1006 instr->opc = OPC_SEL_F16;
1007 break;
1008 case OPC_SAD_S32:
1009 instr->opc = OPC_SAD_S16;
1010 break;
1011 /* instructions may already be fixed up: */
1012 case OPC_MAD_F16:
1013 case OPC_SEL_B16:
1014 case OPC_SEL_S16:
1015 case OPC_SEL_F16:
1016 case OPC_SAD_S16:
1017 break;
1018 default:
1019 assert(0);
1020 break;
1021 }
1022 break;
1023 case 5:
1024 instr->cat5.type = half_type(instr->cat5.type);
1025 break;
1026 }
1027 }
1028 /* some instructions need fix-up if src register is half precision: */
1029 static void fixup_half_instr_src(struct ir3_instruction *instr)
1030 {
1031 switch (instr->opc) {
1032 case OPC_MOV:
1033 instr->cat1.src_type = half_type(instr->cat1.src_type);
1034 break;
1035 default:
1036 break;
1037 }
1038 }
1039
1040 /* NOTE: instr could be NULL for IR3_REG_ARRAY case, for the first
1041 * array access(es) which do not have any previous access to depend
1042 * on from scheduling point of view
1043 */
1044 static void
1045 reg_assign(struct ir3_ra_ctx *ctx, struct ir3_register *reg,
1046 struct ir3_instruction *instr)
1047 {
1048 struct ir3_ra_instr_data *id;
1049
1050 if (reg->flags & IR3_REG_ARRAY) {
1051 struct ir3_array *arr =
1052 ir3_lookup_array(ctx->ir, reg->array.id);
1053 unsigned name = arr->base + reg->array.offset;
1054 unsigned r = ra_get_node_reg(ctx->g, name);
1055 unsigned num = ctx->set->ra_reg_to_gpr[r];
1056
1057 if (reg->flags & IR3_REG_RELATIV) {
1058 reg->array.offset = num;
1059 } else {
1060 reg->num = num;
1061 reg->flags &= ~IR3_REG_SSA;
1062 }
1063
1064 reg->flags &= ~IR3_REG_ARRAY;
1065 } else if ((id = &ctx->instrd[instr->ip]) && id->defn) {
1066 unsigned name = ra_name(ctx, id);
1067 unsigned r = ra_get_node_reg(ctx->g, name);
1068 unsigned num = ctx->set->ra_reg_to_gpr[r] + id->off;
1069
1070 debug_assert(!(reg->flags & IR3_REG_RELATIV));
1071
1072 if (is_high(id->defn))
1073 num += FIRST_HIGH_REG;
1074
1075 reg->num = num;
1076 reg->flags &= ~IR3_REG_SSA;
1077
1078 if (is_half(id->defn))
1079 reg->flags |= IR3_REG_HALF;
1080 }
1081 }
1082
1083 static void
1084 ra_block_alloc(struct ir3_ra_ctx *ctx, struct ir3_block *block)
1085 {
1086 foreach_instr (instr, &block->instr_list) {
1087 struct ir3_register *reg;
1088
1089 if (writes_gpr(instr)) {
1090 reg_assign(ctx, instr->regs[0], instr);
1091 if (instr->regs[0]->flags & IR3_REG_HALF)
1092 fixup_half_instr_dst(instr);
1093 }
1094
1095 foreach_src_n(reg, n, instr) {
1096 struct ir3_instruction *src = reg->instr;
1097 /* Note: reg->instr could be null for IR3_REG_ARRAY */
1098 if (src || (reg->flags & IR3_REG_ARRAY))
1099 reg_assign(ctx, instr->regs[n+1], src);
1100 if (instr->regs[n+1]->flags & IR3_REG_HALF)
1101 fixup_half_instr_src(instr);
1102 }
1103 }
1104 }
1105
1106 /* handle pre-colored registers. This includes "arrays" (which could be of
1107 * length 1, used for phi webs lowered to registers in nir), as well as
1108 * special shader input values that need to be pinned to certain registers.
1109 */
1110 static void
1111 ra_precolor(struct ir3_ra_ctx *ctx, struct ir3_instruction **precolor, unsigned nprecolor)
1112 {
1113 unsigned num_precolor = 0;
1114 for (unsigned i = 0; i < nprecolor; i++) {
1115 if (precolor[i] && !(precolor[i]->flags & IR3_INSTR_UNUSED)) {
1116 struct ir3_instruction *instr = precolor[i];
1117 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
1118
1119 debug_assert(!(instr->regs[0]->flags & (IR3_REG_HALF | IR3_REG_HIGH)));
1120
1121 /* only consider the first component: */
1122 if (id->off > 0)
1123 continue;
1124
1125 /* 'base' is in scalar (class 0) but we need to map that
1126 * the conflicting register of the appropriate class (ie.
1127 * input could be vec2/vec3/etc)
1128 *
1129 * Note that the higher class (larger than scalar) regs
1130 * are setup to conflict with others in the same class,
1131 * so for example, R1 (scalar) is also the first component
1132 * of D1 (vec2/double):
1133 *
1134 * Single (base) | Double
1135 * --------------+---------------
1136 * R0 | D0
1137 * R1 | D0 D1
1138 * R2 | D1 D2
1139 * R3 | D2
1140 * .. and so on..
1141 */
1142 unsigned regid = instr->regs[0]->num;
1143 unsigned reg = ctx->set->gpr_to_ra_reg[id->cls][regid];
1144 unsigned name = ra_name(ctx, id);
1145 ra_set_node_reg(ctx->g, name, reg);
1146 num_precolor = MAX2(regid, num_precolor);
1147 }
1148 }
1149
1150 /* pre-assign array elements:
1151 */
1152 foreach_array (arr, &ctx->ir->array_list) {
1153 unsigned base = 0;
1154
1155 if (arr->end_ip == 0)
1156 continue;
1157
1158 /* figure out what else we conflict with which has already
1159 * been assigned:
1160 */
1161 retry:
1162 foreach_array (arr2, &ctx->ir->array_list) {
1163 if (arr2 == arr)
1164 break;
1165 if (arr2->end_ip == 0)
1166 continue;
1167 /* if it intersects with liverange AND register range.. */
1168 if (intersects(arr->start_ip, arr->end_ip,
1169 arr2->start_ip, arr2->end_ip) &&
1170 intersects(base, base + arr->length,
1171 arr2->reg, arr2->reg + arr2->length)) {
1172 base = MAX2(base, arr2->reg + arr2->length);
1173 goto retry;
1174 }
1175 }
1176
1177 /* also need to not conflict with any pre-assigned inputs: */
1178 for (unsigned i = 0; i < nprecolor; i++) {
1179 struct ir3_instruction *instr = precolor[i];
1180
1181 if (!instr)
1182 continue;
1183
1184 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
1185
1186 /* only consider the first component: */
1187 if (id->off > 0)
1188 continue;
1189
1190 unsigned name = ra_name(ctx, id);
1191 unsigned regid = instr->regs[0]->num;
1192
1193 /* Check if array intersects with liverange AND register
1194 * range of the input:
1195 */
1196 if (intersects(arr->start_ip, arr->end_ip,
1197 ctx->def[name], ctx->use[name]) &&
1198 intersects(base, base + arr->length,
1199 regid, regid + class_sizes[id->cls])) {
1200 base = MAX2(base, regid + class_sizes[id->cls]);
1201 goto retry;
1202 }
1203 }
1204
1205 arr->reg = base;
1206
1207 for (unsigned i = 0; i < arr->length; i++) {
1208 unsigned name, reg;
1209
1210 name = arr->base + i;
1211 reg = ctx->set->gpr_to_ra_reg[0][base++];
1212
1213 ra_set_node_reg(ctx->g, name, reg);
1214 }
1215 }
1216
1217 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
1218 foreach_array (arr, &ctx->ir->array_list) {
1219 unsigned first = arr->reg;
1220 unsigned last = arr->reg + arr->length - 1;
1221 debug_printf("arr[%d] at r%d.%c->r%d.%c\n", arr->id,
1222 (first >> 2), "xyzw"[first & 0x3],
1223 (last >> 2), "xyzw"[last & 0x3]);
1224 }
1225 }
1226 }
1227
1228 static int
1229 ra_alloc(struct ir3_ra_ctx *ctx)
1230 {
1231 if (!ra_allocate(ctx->g))
1232 return -1;
1233
1234 foreach_block (block, &ctx->ir->block_list) {
1235 ra_block_alloc(ctx, block);
1236 }
1237
1238 return 0;
1239 }
1240
1241 int ir3_ra(struct ir3_shader_variant *v, struct ir3_instruction **precolor, unsigned nprecolor)
1242 {
1243 struct ir3_ra_ctx ctx = {
1244 .v = v,
1245 .ir = v->ir,
1246 .set = v->ir->compiler->set,
1247 };
1248 int ret;
1249
1250 ra_init(&ctx);
1251 ra_add_interference(&ctx);
1252 ra_precolor(&ctx, precolor, nprecolor);
1253 ret = ra_alloc(&ctx);
1254 ra_destroy(&ctx);
1255
1256 return ret;
1257 }