freedreno/ir3/ra: fix array liveranges
[mesa.git] / src / freedreno / ir3 / ir3_ra.c
1 /*
2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/u_math.h"
28 #include "util/register_allocate.h"
29 #include "util/ralloc.h"
30 #include "util/bitset.h"
31
32 #include "ir3.h"
33 #include "ir3_compiler.h"
34 #include "ir3_ra.h"
35
36
37 #ifdef DEBUG
38 #define RA_DEBUG (ir3_shader_debug & IR3_DBG_RAMSGS)
39 #else
40 #define RA_DEBUG 0
41 #endif
42 #define d(fmt, ...) do { if (RA_DEBUG) { \
43 printf("RA: "fmt"\n", ##__VA_ARGS__); \
44 } } while (0)
45
46 #define di(instr, fmt, ...) do { if (RA_DEBUG) { \
47 printf("RA: "fmt": ", ##__VA_ARGS__); \
48 ir3_print_instr(instr); \
49 } } while (0)
50
51 /*
52 * Register Assignment:
53 *
54 * Uses the register_allocate util, which implements graph coloring
55 * algo with interference classes. To handle the cases where we need
56 * consecutive registers (for example, texture sample instructions),
57 * we model these as larger (double/quad/etc) registers which conflict
58 * with the corresponding registers in other classes.
59 *
60 * Additionally we create additional classes for half-regs, which
61 * do not conflict with the full-reg classes. We do need at least
62 * sizes 1-4 (to deal w/ texture sample instructions output to half-
63 * reg). At the moment we don't create the higher order half-reg
64 * classes as half-reg frequently does not have enough precision
65 * for texture coords at higher resolutions.
66 *
67 * There are some additional cases that we need to handle specially,
68 * as the graph coloring algo doesn't understand "partial writes".
69 * For example, a sequence like:
70 *
71 * add r0.z, ...
72 * sam (f32)(xy)r0.x, ...
73 * ...
74 * sam (f32)(xyzw)r0.w, r0.x, ... ; 3d texture, so r0.xyz are coord
75 *
76 * In this scenario, we treat r0.xyz as class size 3, which is written
77 * (from a use/def perspective) at the 'add' instruction and ignore the
78 * subsequent partial writes to r0.xy. So the 'add r0.z, ...' is the
79 * defining instruction, as it is the first to partially write r0.xyz.
80 *
81 * To address the fragmentation that this can potentially cause, a
82 * two pass register allocation is used. After the first pass the
83 * assignment of scalars is discarded, but the assignment of vecN (for
84 * N > 1) is used to pre-color in the second pass, which considers
85 * only scalars.
86 *
87 * Arrays of arbitrary size are handled via pre-coloring a consecutive
88 * sequence of registers. Additional scalar (single component) reg
89 * names are allocated starting at ctx->class_base[total_class_count]
90 * (see arr->base), which are pre-colored. In the use/def graph direct
91 * access is treated as a single element use/def, and indirect access
92 * is treated as use or def of all array elements. (Only the first
93 * def is tracked, in case of multiple indirect writes, etc.)
94 *
95 * TODO arrays that fit in one of the pre-defined class sizes should
96 * not need to be pre-colored, but instead could be given a normal
97 * vreg name. (Ignoring this for now since it is a good way to work
98 * out the kinks with arbitrary sized arrays.)
99 *
100 * TODO might be easier for debugging to split this into two passes,
101 * the first assigning vreg names in a way that we could ir3_print()
102 * the result.
103 */
104
105
106 static struct ir3_instruction * name_to_instr(struct ir3_ra_ctx *ctx, unsigned name);
107
108 static bool name_is_array(struct ir3_ra_ctx *ctx, unsigned name);
109 static struct ir3_array * name_to_array(struct ir3_ra_ctx *ctx, unsigned name);
110
111 /* does it conflict? */
112 static inline bool
113 intersects(unsigned a_start, unsigned a_end, unsigned b_start, unsigned b_end)
114 {
115 return !((a_start >= b_end) || (b_start >= a_end));
116 }
117
118 static unsigned
119 reg_size_for_array(struct ir3_array *arr)
120 {
121 if (arr->half)
122 return DIV_ROUND_UP(arr->length, 2);
123
124 return arr->length;
125 }
126
127 static bool
128 instr_before(struct ir3_instruction *a, struct ir3_instruction *b)
129 {
130 if (a->flags & IR3_INSTR_UNUSED)
131 return false;
132 return (a->ip < b->ip);
133 }
134
135 static struct ir3_instruction *
136 get_definer(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr,
137 int *sz, int *off)
138 {
139 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
140 struct ir3_instruction *d = NULL;
141
142 if (ctx->scalar_pass) {
143 id->defn = instr;
144 id->off = 0;
145 id->sz = 1; /* considering things as N scalar regs now */
146 }
147
148 if (id->defn) {
149 *sz = id->sz;
150 *off = id->off;
151 return id->defn;
152 }
153
154 if (instr->opc == OPC_META_COLLECT) {
155 /* What about the case where collect is subset of array, we
156 * need to find the distance between where actual array starts
157 * and collect.. that probably doesn't happen currently.
158 */
159 struct ir3_register *src;
160 int dsz, doff;
161
162 /* note: don't use foreach_ssa_src as this gets called once
163 * while assigning regs (which clears SSA flag)
164 */
165 foreach_src_n (src, n, instr) {
166 struct ir3_instruction *dd;
167 if (!src->instr)
168 continue;
169
170 dd = get_definer(ctx, src->instr, &dsz, &doff);
171
172 if ((!d) || instr_before(dd, d)) {
173 d = dd;
174 *sz = dsz;
175 *off = doff - n;
176 }
177 }
178
179 } else if (instr->cp.right || instr->cp.left) {
180 /* covers also the meta:fo case, which ends up w/ single
181 * scalar instructions for each component:
182 */
183 struct ir3_instruction *f = ir3_neighbor_first(instr);
184
185 /* by definition, the entire sequence forms one linked list
186 * of single scalar register nodes (even if some of them may
187 * be splits from a texture sample (for example) instr. We
188 * just need to walk the list finding the first element of
189 * the group defined (lowest ip)
190 */
191 int cnt = 0;
192
193 /* need to skip over unused in the group: */
194 while (f && (f->flags & IR3_INSTR_UNUSED)) {
195 f = f->cp.right;
196 cnt++;
197 }
198
199 while (f) {
200 if ((!d) || instr_before(f, d))
201 d = f;
202 if (f == instr)
203 *off = cnt;
204 f = f->cp.right;
205 cnt++;
206 }
207
208 *sz = cnt;
209
210 } else {
211 /* second case is looking directly at the instruction which
212 * produces multiple values (eg, texture sample), rather
213 * than the split nodes that point back to that instruction.
214 * This isn't quite right, because it may be part of a larger
215 * group, such as:
216 *
217 * sam (f32)(xyzw)r0.x, ...
218 * add r1.x, ...
219 * add r1.y, ...
220 * sam (f32)(xyzw)r2.x, r0.w <-- (r0.w, r1.x, r1.y)
221 *
222 * need to come up with a better way to handle that case.
223 */
224 if (instr->address) {
225 *sz = instr->regs[0]->size;
226 } else {
227 *sz = util_last_bit(instr->regs[0]->wrmask);
228 }
229 *off = 0;
230 d = instr;
231 }
232
233 if (d->opc == OPC_META_SPLIT) {
234 struct ir3_instruction *dd;
235 int dsz, doff;
236
237 dd = get_definer(ctx, d->regs[1]->instr, &dsz, &doff);
238
239 /* by definition, should come before: */
240 debug_assert(instr_before(dd, d));
241
242 *sz = MAX2(*sz, dsz);
243
244 if (instr->opc == OPC_META_SPLIT)
245 *off = MAX2(*off, instr->split.off);
246
247 d = dd;
248 }
249
250 debug_assert(d->opc != OPC_META_SPLIT);
251
252 id->defn = d;
253 id->sz = *sz;
254 id->off = *off;
255
256 return d;
257 }
258
259 static void
260 ra_block_find_definers(struct ir3_ra_ctx *ctx, struct ir3_block *block)
261 {
262 foreach_instr (instr, &block->instr_list) {
263 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
264 if (instr->regs_count == 0)
265 continue;
266 /* couple special cases: */
267 if (writes_addr(instr) || writes_pred(instr)) {
268 id->cls = -1;
269 } else if (instr->regs[0]->flags & IR3_REG_ARRAY) {
270 id->cls = total_class_count;
271 } else {
272 /* and the normal case: */
273 id->defn = get_definer(ctx, instr, &id->sz, &id->off);
274 id->cls = ra_size_to_class(id->sz, is_half(id->defn), is_high(id->defn));
275
276 /* this is a bit of duct-tape.. if we have a scenario like:
277 *
278 * sam (f32)(x) out.x, ...
279 * sam (f32)(x) out.y, ...
280 *
281 * Then the fanout/split meta instructions for the two different
282 * tex instructions end up grouped as left/right neighbors. The
283 * upshot is that in when you get_definer() on one of the meta:fo's
284 * you get definer as the first sam with sz=2, but when you call
285 * get_definer() on the either of the sam's you get itself as the
286 * definer with sz=1.
287 *
288 * (We actually avoid this scenario exactly, the neighbor links
289 * prevent one of the output mov's from being eliminated, so this
290 * hack should be enough. But probably we need to rethink how we
291 * find the "defining" instruction.)
292 *
293 * TODO how do we figure out offset properly...
294 */
295 if (id->defn != instr) {
296 struct ir3_ra_instr_data *did = &ctx->instrd[id->defn->ip];
297 if (did->sz < id->sz) {
298 did->sz = id->sz;
299 did->cls = id->cls;
300 }
301 }
302 }
303 }
304 }
305
306 /* give each instruction a name (and ip), and count up the # of names
307 * of each class
308 */
309 static void
310 ra_block_name_instructions(struct ir3_ra_ctx *ctx, struct ir3_block *block)
311 {
312 foreach_instr (instr, &block->instr_list) {
313 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
314
315 #ifdef DEBUG
316 instr->name = ~0;
317 #endif
318
319 ctx->instr_cnt++;
320
321 if (!writes_gpr(instr))
322 continue;
323
324 if (id->defn != instr)
325 continue;
326
327 /* In scalar pass, collect/split don't get their own names,
328 * but instead inherit them from their src(s):
329 *
330 * Possibly we don't need this because of scalar_name(), but
331 * it does make the ir3_print() dumps easier to read.
332 */
333 if (ctx->scalar_pass) {
334 if (instr->opc == OPC_META_SPLIT) {
335 instr->name = instr->regs[1]->instr->name + instr->split.off;
336 continue;
337 }
338
339 if (instr->opc == OPC_META_COLLECT) {
340 instr->name = instr->regs[1]->instr->name;
341 continue;
342 }
343 }
344
345 /* arrays which don't fit in one of the pre-defined class
346 * sizes are pre-colored:
347 */
348 if ((id->cls >= 0) && (id->cls < total_class_count)) {
349 /* in the scalar pass, we generate a name for each
350 * scalar component, instr->name is the name of the
351 * first component.
352 */
353 unsigned n = ctx->scalar_pass ? dest_regs(instr) : 1;
354 instr->name = ctx->class_alloc_count[id->cls];
355 ctx->class_alloc_count[id->cls] += n;
356 ctx->alloc_count += n;
357 }
358 }
359 }
360
361 static int
362 pick_in_range(BITSET_WORD *regs, unsigned min, unsigned max)
363 {
364 for (unsigned i = min; i < max; i++) {
365 if (BITSET_TEST(regs, i)) {
366 return i;
367 }
368 }
369 return -1;
370 }
371
372 /* register selector for the a6xx+ merged register file: */
373 static unsigned int
374 ra_select_reg_merged(unsigned int n, BITSET_WORD *regs, void *data)
375 {
376 struct ir3_ra_ctx *ctx = data;
377 unsigned int class = ra_get_node_class(ctx->g, n);
378
379 /* dimensions within the register class: */
380 unsigned max_target, start;
381
382 /* the regs bitset will include *all* of the virtual regs, but we lay
383 * out the different classes consecutively in the virtual register
384 * space. So we just need to think about the base offset of a given
385 * class within the virtual register space, and offset the register
386 * space we search within by that base offset.
387 */
388 unsigned base;
389
390 /* NOTE: this is only used in scalar pass, so the register
391 * class will be one of the scalar classes (ie. idx==0):
392 */
393 if (class == ctx->set->high_classes[0]) {
394 max_target = HIGH_CLASS_REGS(0);
395 start = 0;
396 base = ctx->set->gpr_to_ra_reg[HIGH_OFFSET][0];
397 } else if (class == ctx->set->half_classes[0]) {
398 max_target = ctx->max_target;
399 start = ctx->start_search_reg;
400 base = ctx->set->gpr_to_ra_reg[HALF_OFFSET][0];
401 } else if (class == ctx->set->classes[0]) {
402 max_target = ctx->max_target / 2;
403 start = ctx->start_search_reg;
404 base = ctx->set->gpr_to_ra_reg[0][0];
405 } else {
406 unreachable("unexpected register class!");
407 }
408
409 /* For cat4 instructions, if the src reg is already assigned, and
410 * avail to pick, use it. Because this doesn't introduce unnecessary
411 * dependencies, and it potentially avoids needing (ss) syncs to
412 * for write after read hazards:
413 */
414 struct ir3_instruction *instr = name_to_instr(ctx, n);
415 if (is_sfu(instr) && instr->regs[1]->instr) {
416 struct ir3_instruction *src = instr->regs[1]->instr;
417 unsigned src_n = scalar_name(ctx, src, 0);
418
419 unsigned reg = ra_get_node_reg(ctx->g, src_n);
420
421 /* Check if the src register has been assigned yet: */
422 if (reg != NO_REG) {
423 if (BITSET_TEST(regs, reg)) {
424 return reg;
425 }
426 }
427 }
428
429 int r = pick_in_range(regs, base + start, base + max_target);
430 if (r < 0) {
431 /* wrap-around: */
432 r = pick_in_range(regs, base, base + start);
433 }
434
435 if (r < 0) {
436 /* overflow, we need to increase max_target: */
437 ctx->max_target++;
438 return ra_select_reg_merged(n, regs, data);
439 }
440
441 if (class == ctx->set->half_classes[0]) {
442 int n = r - base;
443 ctx->start_search_reg = (n + 1) % ctx->max_target;
444 } else if (class == ctx->set->classes[0]) {
445 int n = (r - base) * 2;
446 ctx->start_search_reg = (n + 1) % ctx->max_target;
447 }
448
449 return r;
450 }
451
452 static void
453 ra_init(struct ir3_ra_ctx *ctx)
454 {
455 unsigned n, base;
456
457 ir3_clear_mark(ctx->ir);
458 n = ir3_count_instructions(ctx->ir);
459
460 ctx->instrd = rzalloc_array(NULL, struct ir3_ra_instr_data, n);
461
462 foreach_block (block, &ctx->ir->block_list) {
463 ra_block_find_definers(ctx, block);
464 }
465
466 foreach_block (block, &ctx->ir->block_list) {
467 ra_block_name_instructions(ctx, block);
468 }
469
470 /* figure out the base register name for each class. The
471 * actual ra name is class_base[cls] + instr->name;
472 */
473 ctx->class_base[0] = 0;
474 for (unsigned i = 1; i <= total_class_count; i++) {
475 ctx->class_base[i] = ctx->class_base[i-1] +
476 ctx->class_alloc_count[i-1];
477 }
478
479 /* and vreg names for array elements: */
480 base = ctx->class_base[total_class_count];
481 foreach_array (arr, &ctx->ir->array_list) {
482 arr->base = base;
483 ctx->class_alloc_count[total_class_count] += reg_size_for_array(arr);
484 base += reg_size_for_array(arr);
485 }
486 ctx->alloc_count += ctx->class_alloc_count[total_class_count];
487
488 ctx->g = ra_alloc_interference_graph(ctx->set->regs, ctx->alloc_count);
489 ralloc_steal(ctx->g, ctx->instrd);
490 ctx->def = rzalloc_array(ctx->g, unsigned, ctx->alloc_count);
491 ctx->use = rzalloc_array(ctx->g, unsigned, ctx->alloc_count);
492
493 /* TODO add selector callback for split (pre-a6xx) register file: */
494 if (ctx->scalar_pass && (ctx->ir->compiler->gpu_id >= 600)) {
495 ra_set_select_reg_callback(ctx->g, ra_select_reg_merged, ctx);
496
497 ctx->name_to_instr = _mesa_hash_table_create(ctx->g,
498 _mesa_hash_int, _mesa_key_int_equal);
499 }
500 }
501
502 /* Map the name back to instruction: */
503 static struct ir3_instruction *
504 name_to_instr(struct ir3_ra_ctx *ctx, unsigned name)
505 {
506 assert(!name_is_array(ctx, name));
507 struct hash_entry *entry = _mesa_hash_table_search(ctx->name_to_instr, &name);
508 if (entry)
509 return entry->data;
510 unreachable("invalid instr name");
511 return NULL;
512 }
513
514 static bool
515 name_is_array(struct ir3_ra_ctx *ctx, unsigned name)
516 {
517 return name >= ctx->class_base[total_class_count];
518 }
519
520 static struct ir3_array *
521 name_to_array(struct ir3_ra_ctx *ctx, unsigned name)
522 {
523 assert(name_is_array(ctx, name));
524 foreach_array (arr, &ctx->ir->array_list) {
525 unsigned sz = reg_size_for_array(arr);
526 if (name < (arr->base + sz))
527 return arr;
528 }
529 unreachable("invalid array name");
530 return NULL;
531 }
532
533 static void
534 ra_destroy(struct ir3_ra_ctx *ctx)
535 {
536 ralloc_free(ctx->g);
537 }
538
539 static void
540 __def(struct ir3_ra_ctx *ctx, struct ir3_ra_block_data *bd, unsigned name,
541 struct ir3_instruction *instr)
542 {
543 debug_assert(name < ctx->alloc_count);
544 /* defined on first write: */
545 if (!ctx->def[name])
546 ctx->def[name] = instr->ip;
547 ctx->use[name] = MAX2(ctx->use[name], instr->ip);
548 BITSET_SET(bd->def, name);
549 }
550
551 static void
552 __use(struct ir3_ra_ctx *ctx, struct ir3_ra_block_data *bd, unsigned name,
553 struct ir3_instruction *instr)
554 {
555 debug_assert(name < ctx->alloc_count);
556 ctx->use[name] = MAX2(ctx->use[name], instr->ip);
557 if (!BITSET_TEST(bd->def, name))
558 BITSET_SET(bd->use, name);
559 }
560
561 static void
562 ra_block_compute_live_ranges(struct ir3_ra_ctx *ctx, struct ir3_block *block)
563 {
564 struct ir3_ra_block_data *bd;
565 unsigned bitset_words = BITSET_WORDS(ctx->alloc_count);
566
567 #define def(name, instr) __def(ctx, bd, name, instr)
568 #define use(name, instr) __use(ctx, bd, name, instr)
569
570 bd = rzalloc(ctx->g, struct ir3_ra_block_data);
571
572 bd->def = rzalloc_array(bd, BITSET_WORD, bitset_words);
573 bd->use = rzalloc_array(bd, BITSET_WORD, bitset_words);
574 bd->livein = rzalloc_array(bd, BITSET_WORD, bitset_words);
575 bd->liveout = rzalloc_array(bd, BITSET_WORD, bitset_words);
576
577 block->data = bd;
578
579 struct ir3_instruction *first_non_input = NULL;
580 foreach_instr (instr, &block->instr_list) {
581 if (instr->opc != OPC_META_INPUT) {
582 first_non_input = instr;
583 break;
584 }
585 }
586
587 foreach_instr (instr, &block->instr_list) {
588 foreach_def (name, ctx, instr) {
589 if (name_is_array(ctx, name)) {
590 struct ir3_array *arr = name_to_array(ctx, name);
591
592 arr->start_ip = MIN2(arr->start_ip, instr->ip);
593 arr->end_ip = MAX2(arr->end_ip, instr->ip);
594
595 for (unsigned i = 0; i < arr->length; i++) {
596 unsigned name = arr->base + i;
597 if(arr->half)
598 ra_set_node_class(ctx->g, name, ctx->set->half_classes[0]);
599 else
600 ra_set_node_class(ctx->g, name, ctx->set->classes[0]);
601 }
602 } else {
603 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
604 if (is_high(instr)) {
605 ra_set_node_class(ctx->g, name,
606 ctx->set->high_classes[id->cls - HIGH_OFFSET]);
607 } else if (is_half(instr)) {
608 ra_set_node_class(ctx->g, name,
609 ctx->set->half_classes[id->cls - HALF_OFFSET]);
610 } else {
611 ra_set_node_class(ctx->g, name,
612 ctx->set->classes[id->cls]);
613 }
614 }
615
616 def(name, instr);
617
618 if ((instr->opc == OPC_META_INPUT) && first_non_input)
619 use(name, first_non_input);
620 }
621
622 foreach_use (name, ctx, instr) {
623 if (name_is_array(ctx, name)) {
624 struct ir3_array *arr = name_to_array(ctx, name);
625
626 arr->start_ip = MIN2(arr->start_ip, instr->ip);
627 arr->end_ip = MAX2(arr->end_ip, instr->ip);
628
629 /* NOTE: arrays are not SSA so unconditionally
630 * set use bit:
631 */
632 BITSET_SET(bd->use, name);
633 }
634
635 use(name, instr);
636 }
637
638 foreach_name (name, ctx, instr) {
639 /* split/collect instructions have duplicate names
640 * as real instructions, so they skip the hashtable:
641 */
642 if (ctx->name_to_instr && !((instr->opc == OPC_META_SPLIT) ||
643 (instr->opc == OPC_META_COLLECT))) {
644 /* this is slightly annoying, we can't just use an
645 * integer on the stack
646 */
647 unsigned *key = ralloc(ctx->name_to_instr, unsigned);
648 *key = name;
649 debug_assert(!_mesa_hash_table_search(ctx->name_to_instr, key));
650 _mesa_hash_table_insert(ctx->name_to_instr, key, instr);
651 }
652 }
653 }
654 }
655
656 static bool
657 ra_compute_livein_liveout(struct ir3_ra_ctx *ctx)
658 {
659 unsigned bitset_words = BITSET_WORDS(ctx->alloc_count);
660 bool progress = false;
661
662 foreach_block (block, &ctx->ir->block_list) {
663 struct ir3_ra_block_data *bd = block->data;
664
665 /* update livein: */
666 for (unsigned i = 0; i < bitset_words; i++) {
667 /* anything used but not def'd within a block is
668 * by definition a live value coming into the block:
669 */
670 BITSET_WORD new_livein =
671 (bd->use[i] | (bd->liveout[i] & ~bd->def[i]));
672
673 if (new_livein & ~bd->livein[i]) {
674 bd->livein[i] |= new_livein;
675 progress = true;
676 }
677 }
678
679 /* update liveout: */
680 for (unsigned j = 0; j < ARRAY_SIZE(block->successors); j++) {
681 struct ir3_block *succ = block->successors[j];
682 struct ir3_ra_block_data *succ_bd;
683
684 if (!succ)
685 continue;
686
687 succ_bd = succ->data;
688
689 for (unsigned i = 0; i < bitset_words; i++) {
690 /* add anything that is livein in a successor block
691 * to our liveout:
692 */
693 BITSET_WORD new_liveout =
694 (succ_bd->livein[i] & ~bd->liveout[i]);
695
696 if (new_liveout) {
697 bd->liveout[i] |= new_liveout;
698 progress = true;
699 }
700 }
701 }
702 }
703
704 return progress;
705 }
706
707 static void
708 print_bitset(const char *name, BITSET_WORD *bs, unsigned cnt)
709 {
710 bool first = true;
711 debug_printf("RA: %s:", name);
712 for (unsigned i = 0; i < cnt; i++) {
713 if (BITSET_TEST(bs, i)) {
714 if (!first)
715 debug_printf(",");
716 debug_printf(" %04u", i);
717 first = false;
718 }
719 }
720 debug_printf("\n");
721 }
722
723 static void
724 ra_add_interference(struct ir3_ra_ctx *ctx)
725 {
726 struct ir3 *ir = ctx->ir;
727
728 /* initialize array live ranges: */
729 foreach_array (arr, &ir->array_list) {
730 arr->start_ip = ~0;
731 arr->end_ip = 0;
732 }
733
734 /* compute live ranges (use/def) on a block level, also updating
735 * block's def/use bitmasks (used below to calculate per-block
736 * livein/liveout):
737 */
738 foreach_block (block, &ir->block_list) {
739 ra_block_compute_live_ranges(ctx, block);
740 }
741
742 /* update per-block livein/liveout: */
743 while (ra_compute_livein_liveout(ctx)) {}
744
745 if (RA_DEBUG) {
746 d("AFTER LIVEIN/OUT:");
747 foreach_block (block, &ir->block_list) {
748 struct ir3_ra_block_data *bd = block->data;
749 d("block%u:", block_id(block));
750 print_bitset(" def", bd->def, ctx->alloc_count);
751 print_bitset(" use", bd->use, ctx->alloc_count);
752 print_bitset(" l/i", bd->livein, ctx->alloc_count);
753 print_bitset(" l/o", bd->liveout, ctx->alloc_count);
754 }
755 foreach_array (arr, &ir->array_list) {
756 d("array%u:", arr->id);
757 d(" length: %u", arr->length);
758 d(" start_ip: %u", arr->start_ip);
759 d(" end_ip: %u", arr->end_ip);
760 }
761 d("INSTRUCTION VREG NAMES:");
762 foreach_block (block, &ctx->ir->block_list) {
763 foreach_instr (instr, &block->instr_list) {
764 if (!ctx->instrd[instr->ip].defn)
765 continue;
766 if (!writes_gpr(instr))
767 continue;
768 di(instr, "%04u", scalar_name(ctx, instr, 0));
769 }
770 }
771 d("ARRAY VREG NAMES:");
772 foreach_array (arr, &ctx->ir->array_list) {
773 d("%04u: arr%u", arr->base, arr->id);
774 }
775 }
776
777 /* extend start/end ranges based on livein/liveout info from cfg: */
778 foreach_block (block, &ir->block_list) {
779 struct ir3_ra_block_data *bd = block->data;
780
781 for (unsigned i = 0; i < ctx->alloc_count; i++) {
782 if (BITSET_TEST(bd->livein, i)) {
783 ctx->def[i] = MIN2(ctx->def[i], block->start_ip);
784 ctx->use[i] = MAX2(ctx->use[i], block->start_ip);
785 }
786
787 if (BITSET_TEST(bd->liveout, i)) {
788 ctx->def[i] = MIN2(ctx->def[i], block->end_ip);
789 ctx->use[i] = MAX2(ctx->use[i], block->end_ip);
790 }
791 }
792
793 foreach_array (arr, &ctx->ir->array_list) {
794 for (unsigned i = 0; i < arr->length; i++) {
795 if (BITSET_TEST(bd->livein, i + arr->base)) {
796 arr->start_ip = MIN2(arr->start_ip, block->start_ip);
797 }
798 if (BITSET_TEST(bd->liveout, i + arr->base)) {
799 arr->end_ip = MAX2(arr->end_ip, block->end_ip);
800 }
801 }
802 }
803 }
804
805 for (unsigned i = 0; i < ctx->alloc_count; i++) {
806 for (unsigned j = 0; j < ctx->alloc_count; j++) {
807 if (intersects(ctx->def[i], ctx->use[i],
808 ctx->def[j], ctx->use[j])) {
809 ra_add_node_interference(ctx->g, i, j);
810 }
811 }
812 }
813 }
814
815 /* some instructions need fix-up if dst register is half precision: */
816 static void fixup_half_instr_dst(struct ir3_instruction *instr)
817 {
818 switch (opc_cat(instr->opc)) {
819 case 1: /* move instructions */
820 instr->cat1.dst_type = half_type(instr->cat1.dst_type);
821 break;
822 case 3:
823 switch (instr->opc) {
824 case OPC_MAD_F32:
825 /* Available for that dest is half and srcs are full.
826 * eg. mad.f32 hr0, r0.x, r0.y, r0.z
827 */
828 if (instr->regs[1]->flags & IR3_REG_HALF)
829 instr->opc = OPC_MAD_F16;
830 break;
831 case OPC_SEL_B32:
832 instr->opc = OPC_SEL_B16;
833 break;
834 case OPC_SEL_S32:
835 instr->opc = OPC_SEL_S16;
836 break;
837 case OPC_SEL_F32:
838 instr->opc = OPC_SEL_F16;
839 break;
840 case OPC_SAD_S32:
841 instr->opc = OPC_SAD_S16;
842 break;
843 /* instructions may already be fixed up: */
844 case OPC_MAD_F16:
845 case OPC_SEL_B16:
846 case OPC_SEL_S16:
847 case OPC_SEL_F16:
848 case OPC_SAD_S16:
849 break;
850 default:
851 assert(0);
852 break;
853 }
854 break;
855 case 4:
856 switch (instr->opc) {
857 case OPC_RSQ:
858 instr->opc = OPC_HRSQ;
859 break;
860 case OPC_LOG2:
861 instr->opc = OPC_HLOG2;
862 break;
863 case OPC_EXP2:
864 instr->opc = OPC_HEXP2;
865 break;
866 default:
867 break;
868 }
869 break;
870 case 5:
871 instr->cat5.type = half_type(instr->cat5.type);
872 break;
873 }
874 }
875 /* some instructions need fix-up if src register is half precision: */
876 static void fixup_half_instr_src(struct ir3_instruction *instr)
877 {
878 switch (instr->opc) {
879 case OPC_MOV:
880 instr->cat1.src_type = half_type(instr->cat1.src_type);
881 break;
882 default:
883 break;
884 }
885 }
886
887 /* NOTE: instr could be NULL for IR3_REG_ARRAY case, for the first
888 * array access(es) which do not have any previous access to depend
889 * on from scheduling point of view
890 */
891 static void
892 reg_assign(struct ir3_ra_ctx *ctx, struct ir3_register *reg,
893 struct ir3_instruction *instr)
894 {
895 struct ir3_ra_instr_data *id;
896
897 if (reg->flags & IR3_REG_ARRAY) {
898 struct ir3_array *arr =
899 ir3_lookup_array(ctx->ir, reg->array.id);
900 unsigned name = arr->base + reg->array.offset;
901 unsigned r = ra_get_node_reg(ctx->g, name);
902 unsigned num = ctx->set->ra_reg_to_gpr[r];
903
904 if (reg->flags & IR3_REG_RELATIV) {
905 reg->array.offset = num;
906 } else {
907 reg->num = num;
908 reg->flags &= ~IR3_REG_SSA;
909 }
910
911 reg->flags &= ~IR3_REG_ARRAY;
912 } else if ((id = &ctx->instrd[instr->ip]) && id->defn) {
913 unsigned first_component = 0;
914
915 /* Special case for tex instructions, which may use the wrmask
916 * to mask off the first component(s). In the scalar pass,
917 * this means the masked off component(s) are not def'd/use'd,
918 * so we get a bogus value when we ask the register_allocate
919 * algo to get the assigned reg for the unused/untouched
920 * component. So we need to consider the first used component:
921 */
922 if (ctx->scalar_pass && is_tex_or_prefetch(id->defn)) {
923 unsigned n = ffs(id->defn->regs[0]->wrmask);
924 debug_assert(n > 0);
925 first_component = n - 1;
926 }
927
928 unsigned name = scalar_name(ctx, id->defn, first_component);
929 unsigned r = ra_get_node_reg(ctx->g, name);
930 unsigned num = ctx->set->ra_reg_to_gpr[r] + id->off;
931
932 debug_assert(!(reg->flags & IR3_REG_RELATIV));
933
934 debug_assert(num >= first_component);
935
936 if (is_high(id->defn))
937 num += FIRST_HIGH_REG;
938
939 reg->num = num - first_component;
940
941 reg->flags &= ~IR3_REG_SSA;
942
943 if (is_half(id->defn))
944 reg->flags |= IR3_REG_HALF;
945 }
946 }
947
948 static void
949 account_assignment(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr)
950 {
951 struct ir3_ra_instr_data *id;
952 struct ir3_register *dst = instr->regs[0];
953 unsigned max;
954
955 if (is_high(instr))
956 return;
957
958 if (dst->flags & IR3_REG_ARRAY) {
959 struct ir3_array *arr =
960 ir3_lookup_array(ctx->ir, dst->array.id);
961 max = arr->reg + arr->length;
962 } else if ((id = &ctx->instrd[instr->ip]) && id->defn) {
963 unsigned name = scalar_name(ctx, id->defn, 0);
964 unsigned r = ra_get_node_reg(ctx->g, name);
965 max = ctx->set->ra_reg_to_gpr[r] + id->off + dest_regs(id->defn);
966 } else {
967 return;
968 }
969
970 if (is_half(instr)) {
971 ctx->max_half_assigned = MAX2(ctx->max_half_assigned, max);
972 } else {
973 ctx->max_assigned = MAX2(ctx->max_assigned, max);
974 }
975 }
976
977 /* helper to determine which regs to assign in which pass: */
978 static bool
979 should_assign(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr)
980 {
981 if ((instr->opc == OPC_META_SPLIT) ||
982 (instr->opc == OPC_META_COLLECT))
983 return !ctx->scalar_pass;
984 return ctx->scalar_pass;
985 }
986
987 static void
988 ra_block_alloc(struct ir3_ra_ctx *ctx, struct ir3_block *block)
989 {
990 foreach_instr (instr, &block->instr_list) {
991 struct ir3_register *reg;
992
993 if (writes_gpr(instr)) {
994 account_assignment(ctx, instr);
995 if (should_assign(ctx, instr)) {
996 reg_assign(ctx, instr->regs[0], instr);
997 if (instr->regs[0]->flags & IR3_REG_HALF)
998 fixup_half_instr_dst(instr);
999 }
1000 }
1001
1002 foreach_src_n (reg, n, instr) {
1003 struct ir3_instruction *src = reg->instr;
1004
1005 if (src && !should_assign(ctx, src) && !should_assign(ctx, instr))
1006 continue;
1007
1008 if (src && should_assign(ctx, instr))
1009 reg_assign(ctx, src->regs[0], src);
1010
1011 /* Note: reg->instr could be null for IR3_REG_ARRAY */
1012 if (src || (reg->flags & IR3_REG_ARRAY))
1013 reg_assign(ctx, instr->regs[n+1], src);
1014
1015 if (instr->regs[n+1]->flags & IR3_REG_HALF)
1016 fixup_half_instr_src(instr);
1017 }
1018 }
1019
1020 /* We need to pre-color outputs for the scalar pass in
1021 * ra_precolor_assigned(), so we need to actually assign
1022 * them in the first pass:
1023 */
1024 if (!ctx->scalar_pass) {
1025 struct ir3_instruction *in, *out;
1026
1027 foreach_input (in, ctx->ir) {
1028 reg_assign(ctx, in->regs[0], in);
1029 }
1030 foreach_output (out, ctx->ir) {
1031 reg_assign(ctx, out->regs[0], out);
1032 }
1033 }
1034 }
1035
1036 /* handle pre-colored registers. This includes "arrays" (which could be of
1037 * length 1, used for phi webs lowered to registers in nir), as well as
1038 * special shader input values that need to be pinned to certain registers.
1039 */
1040 static void
1041 ra_precolor(struct ir3_ra_ctx *ctx, struct ir3_instruction **precolor, unsigned nprecolor)
1042 {
1043 unsigned num_precolor = 0;
1044 for (unsigned i = 0; i < nprecolor; i++) {
1045 if (precolor[i] && !(precolor[i]->flags & IR3_INSTR_UNUSED)) {
1046 struct ir3_instruction *instr = precolor[i];
1047
1048 if (instr->regs[0]->num == INVALID_REG)
1049 continue;
1050
1051 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
1052
1053 debug_assert(!(instr->regs[0]->flags & (IR3_REG_HALF | IR3_REG_HIGH)));
1054
1055 /* only consider the first component: */
1056 if (id->off > 0)
1057 continue;
1058
1059 if (ctx->scalar_pass && !should_assign(ctx, instr))
1060 continue;
1061
1062 /* 'base' is in scalar (class 0) but we need to map that
1063 * the conflicting register of the appropriate class (ie.
1064 * input could be vec2/vec3/etc)
1065 *
1066 * Note that the higher class (larger than scalar) regs
1067 * are setup to conflict with others in the same class,
1068 * so for example, R1 (scalar) is also the first component
1069 * of D1 (vec2/double):
1070 *
1071 * Single (base) | Double
1072 * --------------+---------------
1073 * R0 | D0
1074 * R1 | D0 D1
1075 * R2 | D1 D2
1076 * R3 | D2
1077 * .. and so on..
1078 */
1079 unsigned regid = instr->regs[0]->num;
1080 unsigned reg = ctx->set->gpr_to_ra_reg[id->cls][regid];
1081 unsigned name = ra_name(ctx, id);
1082 ra_set_node_reg(ctx->g, name, reg);
1083 num_precolor = MAX2(regid, num_precolor);
1084 }
1085 }
1086
1087 /* pre-assign array elements:
1088 *
1089 * TODO this is going to need some work for half-precision.. possibly
1090 * this is easier on a6xx, where we can just divide array size by two?
1091 * But on a5xx and earlier it will need to track two bases.
1092 */
1093 foreach_array (arr, &ctx->ir->array_list) {
1094 unsigned base = 0;
1095
1096 if (arr->end_ip == 0)
1097 continue;
1098
1099 /* figure out what else we conflict with which has already
1100 * been assigned:
1101 */
1102 retry:
1103 foreach_array (arr2, &ctx->ir->array_list) {
1104 if (arr2 == arr)
1105 break;
1106 if (arr2->end_ip == 0)
1107 continue;
1108 /* if it intersects with liverange AND register range.. */
1109 if (intersects(arr->start_ip, arr->end_ip,
1110 arr2->start_ip, arr2->end_ip) &&
1111 intersects(base, base + reg_size_for_array(arr),
1112 arr2->reg, arr2->reg + reg_size_for_array(arr2))) {
1113 base = MAX2(base, arr2->reg + reg_size_for_array(arr2));
1114 goto retry;
1115 }
1116 }
1117
1118 /* also need to not conflict with any pre-assigned inputs: */
1119 for (unsigned i = 0; i < nprecolor; i++) {
1120 struct ir3_instruction *instr = precolor[i];
1121
1122 if (!instr || (instr->flags & IR3_INSTR_UNUSED))
1123 continue;
1124
1125 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
1126
1127 /* only consider the first component: */
1128 if (id->off > 0)
1129 continue;
1130
1131 unsigned name = ra_name(ctx, id);
1132 unsigned regid = instr->regs[0]->num;
1133
1134 /* Check if array intersects with liverange AND register
1135 * range of the input:
1136 */
1137 if (intersects(arr->start_ip, arr->end_ip,
1138 ctx->def[name], ctx->use[name]) &&
1139 intersects(base, base + reg_size_for_array(arr),
1140 regid, regid + class_sizes[id->cls])) {
1141 base = MAX2(base, regid + class_sizes[id->cls]);
1142 goto retry;
1143 }
1144 }
1145
1146 arr->reg = base;
1147
1148 for (unsigned i = 0; i < arr->length; i++) {
1149 unsigned name, reg;
1150
1151 if (arr->half) {
1152 /* Doesn't need to do this on older generations than a6xx,
1153 * since there's no conflict between full regs and half regs
1154 * on them.
1155 *
1156 * TODO Presumably "base" could start from 0 respectively
1157 * for half regs of arrays on older generations.
1158 */
1159 unsigned base_half = base * 2 + i;
1160 reg = ctx->set->gpr_to_ra_reg[0+HALF_OFFSET][base_half];
1161 base = base_half / 2 + 1;
1162 } else {
1163 reg = ctx->set->gpr_to_ra_reg[0][base++];
1164 }
1165
1166 name = arr->base + i;
1167 ra_set_node_reg(ctx->g, name, reg);
1168 }
1169 }
1170
1171 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
1172 foreach_array (arr, &ctx->ir->array_list) {
1173 unsigned first = arr->reg;
1174 unsigned last = arr->reg + arr->length - 1;
1175 debug_printf("arr[%d] at r%d.%c->r%d.%c\n", arr->id,
1176 (first >> 2), "xyzw"[first & 0x3],
1177 (last >> 2), "xyzw"[last & 0x3]);
1178 }
1179 }
1180 }
1181
1182 static void
1183 precolor(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr)
1184 {
1185 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
1186 unsigned n = dest_regs(instr);
1187 for (unsigned i = 0; i < n; i++) {
1188 /* tex instructions actually have a wrmask, and
1189 * don't touch masked out components. So we
1190 * shouldn't precolor them::
1191 */
1192 if (is_tex_or_prefetch(instr) &&
1193 !(instr->regs[0]->wrmask & (1 << i)))
1194 continue;
1195
1196 unsigned name = scalar_name(ctx, instr, i);
1197 unsigned regid = instr->regs[0]->num + i;
1198
1199 if (instr->regs[0]->flags & IR3_REG_HIGH)
1200 regid -= FIRST_HIGH_REG;
1201
1202 unsigned vreg = ctx->set->gpr_to_ra_reg[id->cls][regid];
1203 ra_set_node_reg(ctx->g, name, vreg);
1204 }
1205 }
1206
1207 /* pre-color non-scalar registers based on the registers assigned in previous
1208 * pass. Do this by looking actually at the fanout instructions.
1209 */
1210 static void
1211 ra_precolor_assigned(struct ir3_ra_ctx *ctx)
1212 {
1213 debug_assert(ctx->scalar_pass);
1214
1215 foreach_block (block, &ctx->ir->block_list) {
1216 foreach_instr (instr, &block->instr_list) {
1217
1218 if ((instr->opc != OPC_META_SPLIT) &&
1219 (instr->opc != OPC_META_COLLECT))
1220 continue;
1221
1222 precolor(ctx, instr);
1223
1224 struct ir3_register *src;
1225 foreach_src (src, instr) {
1226 if (!src->instr)
1227 continue;
1228 precolor(ctx, src->instr);
1229 }
1230 }
1231 }
1232 }
1233
1234 static int
1235 ra_alloc(struct ir3_ra_ctx *ctx)
1236 {
1237 if (!ra_allocate(ctx->g))
1238 return -1;
1239
1240 foreach_block (block, &ctx->ir->block_list) {
1241 ra_block_alloc(ctx, block);
1242 }
1243
1244 return 0;
1245 }
1246
1247 /* if we end up with split/collect instructions with non-matching src
1248 * and dest regs, that means something has gone wrong. Which makes it
1249 * a pretty good sanity check.
1250 */
1251 static void
1252 ra_sanity_check(struct ir3 *ir)
1253 {
1254 foreach_block (block, &ir->block_list) {
1255 foreach_instr (instr, &block->instr_list) {
1256 if (instr->opc == OPC_META_SPLIT) {
1257 struct ir3_register *dst = instr->regs[0];
1258 struct ir3_register *src = instr->regs[1];
1259 debug_assert(dst->num == (src->num + instr->split.off));
1260 } else if (instr->opc == OPC_META_COLLECT) {
1261 struct ir3_register *dst = instr->regs[0];
1262 struct ir3_register *src;
1263
1264 foreach_src_n (src, n, instr) {
1265 debug_assert(dst->num == (src->num - n));
1266 }
1267 }
1268 }
1269 }
1270 }
1271
1272 /* Target is calculated in terms of half-regs (with a full reg
1273 * consisting of two half-regs).
1274 */
1275 static void
1276 ra_calc_merged_register_target(struct ir3_ra_ctx *ctx)
1277 {
1278 const unsigned vec4 = 2 * 4; // 8 half-regs
1279 unsigned t = MAX2(2 * ctx->max_assigned, ctx->max_half_assigned);
1280
1281 /* second RA pass may have saved some regs, let's try to reclaim
1282 * the benefit by adjusting the target downwards slightly:
1283 */
1284 if (ir3_has_latency_to_hide(ctx->ir)) {
1285 if (t > 8 * vec4) {
1286 t -= 2 * vec4;
1287 } else if (t > 6 * vec4) {
1288 t -= vec4;
1289 }
1290 }
1291
1292 ctx->max_target = t;
1293 }
1294
1295 static int
1296 ir3_ra_pass(struct ir3_shader_variant *v, struct ir3_instruction **precolor,
1297 unsigned nprecolor, bool scalar_pass, unsigned *target)
1298 {
1299 struct ir3_ra_ctx ctx = {
1300 .v = v,
1301 .ir = v->ir,
1302 .set = v->ir->compiler->set,
1303 .scalar_pass = scalar_pass,
1304 };
1305 int ret;
1306
1307 if (scalar_pass) {
1308 ctx.max_target = *target;
1309 }
1310
1311 ra_init(&ctx);
1312 ra_add_interference(&ctx);
1313 ra_precolor(&ctx, precolor, nprecolor);
1314 if (scalar_pass)
1315 ra_precolor_assigned(&ctx);
1316 ret = ra_alloc(&ctx);
1317 ra_destroy(&ctx);
1318
1319 /* In the first pass, calculate the target register usage used in the
1320 * second (scalar) pass:
1321 */
1322 if (!scalar_pass) {
1323 /* TODO: round-robin support for pre-a6xx: */
1324 if (ctx.ir->compiler->gpu_id >= 600) {
1325 ra_calc_merged_register_target(&ctx);
1326 }
1327 *target = ctx.max_target;
1328 }
1329
1330 return ret;
1331 }
1332
1333 int
1334 ir3_ra(struct ir3_shader_variant *v, struct ir3_instruction **precolor,
1335 unsigned nprecolor)
1336 {
1337 unsigned target = 0;
1338 int ret;
1339
1340 /* First pass, assign the vecN (non-scalar) registers: */
1341 ret = ir3_ra_pass(v, precolor, nprecolor, false, &target);
1342 if (ret)
1343 return ret;
1344
1345 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
1346 printf("AFTER RA (1st pass):\n");
1347 ir3_print(v->ir);
1348 }
1349
1350 /* Second pass, assign the scalar registers: */
1351 ret = ir3_ra_pass(v, precolor, nprecolor, true, &target);
1352 if (ret)
1353 return ret;
1354
1355 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
1356 printf("AFTER RA (2nd pass):\n");
1357 ir3_print(v->ir);
1358 }
1359
1360 #ifdef DEBUG
1361 # define SANITY_CHECK DEBUG
1362 #else
1363 # define SANITY_CHECK 0
1364 #endif
1365 if (SANITY_CHECK)
1366 ra_sanity_check(v->ir);
1367
1368 return ret;
1369 }