freedreno/ir3/ra: add helper to map name to instruction
[mesa.git] / src / freedreno / ir3 / ir3_ra.c
1 /*
2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/u_math.h"
28 #include "util/register_allocate.h"
29 #include "util/ralloc.h"
30 #include "util/bitset.h"
31
32 #include "ir3.h"
33 #include "ir3_compiler.h"
34 #include "ir3_ra.h"
35
36
37 #ifdef DEBUG
38 #define RA_DEBUG (ir3_shader_debug & IR3_DBG_RAMSGS)
39 #else
40 #define RA_DEBUG 0
41 #endif
42 #define d(fmt, ...) do { if (RA_DEBUG) { \
43 printf("RA: "fmt"\n", ##__VA_ARGS__); \
44 } } while (0)
45
46 #define di(instr, fmt, ...) do { if (RA_DEBUG) { \
47 printf("RA: "fmt": ", ##__VA_ARGS__); \
48 ir3_print_instr(instr); \
49 } } while (0)
50
51 /*
52 * Register Assignment:
53 *
54 * Uses the register_allocate util, which implements graph coloring
55 * algo with interference classes. To handle the cases where we need
56 * consecutive registers (for example, texture sample instructions),
57 * we model these as larger (double/quad/etc) registers which conflict
58 * with the corresponding registers in other classes.
59 *
60 * Additionally we create additional classes for half-regs, which
61 * do not conflict with the full-reg classes. We do need at least
62 * sizes 1-4 (to deal w/ texture sample instructions output to half-
63 * reg). At the moment we don't create the higher order half-reg
64 * classes as half-reg frequently does not have enough precision
65 * for texture coords at higher resolutions.
66 *
67 * There are some additional cases that we need to handle specially,
68 * as the graph coloring algo doesn't understand "partial writes".
69 * For example, a sequence like:
70 *
71 * add r0.z, ...
72 * sam (f32)(xy)r0.x, ...
73 * ...
74 * sam (f32)(xyzw)r0.w, r0.x, ... ; 3d texture, so r0.xyz are coord
75 *
76 * In this scenario, we treat r0.xyz as class size 3, which is written
77 * (from a use/def perspective) at the 'add' instruction and ignore the
78 * subsequent partial writes to r0.xy. So the 'add r0.z, ...' is the
79 * defining instruction, as it is the first to partially write r0.xyz.
80 *
81 * To address the fragmentation that this can potentially cause, a
82 * two pass register allocation is used. After the first pass the
83 * assignment of scalars is discarded, but the assignment of vecN (for
84 * N > 1) is used to pre-color in the second pass, which considers
85 * only scalars.
86 *
87 * Arrays of arbitrary size are handled via pre-coloring a consecutive
88 * sequence of registers. Additional scalar (single component) reg
89 * names are allocated starting at ctx->class_base[total_class_count]
90 * (see arr->base), which are pre-colored. In the use/def graph direct
91 * access is treated as a single element use/def, and indirect access
92 * is treated as use or def of all array elements. (Only the first
93 * def is tracked, in case of multiple indirect writes, etc.)
94 *
95 * TODO arrays that fit in one of the pre-defined class sizes should
96 * not need to be pre-colored, but instead could be given a normal
97 * vreg name. (Ignoring this for now since it is a good way to work
98 * out the kinks with arbitrary sized arrays.)
99 *
100 * TODO might be easier for debugging to split this into two passes,
101 * the first assigning vreg names in a way that we could ir3_print()
102 * the result.
103 */
104
105
106 static struct ir3_instruction * name_to_instr(struct ir3_ra_ctx *ctx, unsigned name);
107
108 /* does it conflict? */
109 static inline bool
110 intersects(unsigned a_start, unsigned a_end, unsigned b_start, unsigned b_end)
111 {
112 return !((a_start >= b_end) || (b_start >= a_end));
113 }
114
115 static unsigned
116 reg_size_for_array(struct ir3_array *arr)
117 {
118 if (arr->half)
119 return DIV_ROUND_UP(arr->length, 2);
120
121 return arr->length;
122 }
123
124 static bool
125 instr_before(struct ir3_instruction *a, struct ir3_instruction *b)
126 {
127 if (a->flags & IR3_INSTR_UNUSED)
128 return false;
129 return (a->ip < b->ip);
130 }
131
132 static struct ir3_instruction *
133 get_definer(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr,
134 int *sz, int *off)
135 {
136 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
137 struct ir3_instruction *d = NULL;
138
139 if (ctx->scalar_pass) {
140 id->defn = instr;
141 id->off = 0;
142 id->sz = 1; /* considering things as N scalar regs now */
143 }
144
145 if (id->defn) {
146 *sz = id->sz;
147 *off = id->off;
148 return id->defn;
149 }
150
151 if (instr->opc == OPC_META_COLLECT) {
152 /* What about the case where collect is subset of array, we
153 * need to find the distance between where actual array starts
154 * and collect.. that probably doesn't happen currently.
155 */
156 struct ir3_register *src;
157 int dsz, doff;
158
159 /* note: don't use foreach_ssa_src as this gets called once
160 * while assigning regs (which clears SSA flag)
161 */
162 foreach_src_n (src, n, instr) {
163 struct ir3_instruction *dd;
164 if (!src->instr)
165 continue;
166
167 dd = get_definer(ctx, src->instr, &dsz, &doff);
168
169 if ((!d) || instr_before(dd, d)) {
170 d = dd;
171 *sz = dsz;
172 *off = doff - n;
173 }
174 }
175
176 } else if (instr->cp.right || instr->cp.left) {
177 /* covers also the meta:fo case, which ends up w/ single
178 * scalar instructions for each component:
179 */
180 struct ir3_instruction *f = ir3_neighbor_first(instr);
181
182 /* by definition, the entire sequence forms one linked list
183 * of single scalar register nodes (even if some of them may
184 * be splits from a texture sample (for example) instr. We
185 * just need to walk the list finding the first element of
186 * the group defined (lowest ip)
187 */
188 int cnt = 0;
189
190 /* need to skip over unused in the group: */
191 while (f && (f->flags & IR3_INSTR_UNUSED)) {
192 f = f->cp.right;
193 cnt++;
194 }
195
196 while (f) {
197 if ((!d) || instr_before(f, d))
198 d = f;
199 if (f == instr)
200 *off = cnt;
201 f = f->cp.right;
202 cnt++;
203 }
204
205 *sz = cnt;
206
207 } else {
208 /* second case is looking directly at the instruction which
209 * produces multiple values (eg, texture sample), rather
210 * than the split nodes that point back to that instruction.
211 * This isn't quite right, because it may be part of a larger
212 * group, such as:
213 *
214 * sam (f32)(xyzw)r0.x, ...
215 * add r1.x, ...
216 * add r1.y, ...
217 * sam (f32)(xyzw)r2.x, r0.w <-- (r0.w, r1.x, r1.y)
218 *
219 * need to come up with a better way to handle that case.
220 */
221 if (instr->address) {
222 *sz = instr->regs[0]->size;
223 } else {
224 *sz = util_last_bit(instr->regs[0]->wrmask);
225 }
226 *off = 0;
227 d = instr;
228 }
229
230 if (d->opc == OPC_META_SPLIT) {
231 struct ir3_instruction *dd;
232 int dsz, doff;
233
234 dd = get_definer(ctx, d->regs[1]->instr, &dsz, &doff);
235
236 /* by definition, should come before: */
237 debug_assert(instr_before(dd, d));
238
239 *sz = MAX2(*sz, dsz);
240
241 if (instr->opc == OPC_META_SPLIT)
242 *off = MAX2(*off, instr->split.off);
243
244 d = dd;
245 }
246
247 debug_assert(d->opc != OPC_META_SPLIT);
248
249 id->defn = d;
250 id->sz = *sz;
251 id->off = *off;
252
253 return d;
254 }
255
256 static void
257 ra_block_find_definers(struct ir3_ra_ctx *ctx, struct ir3_block *block)
258 {
259 foreach_instr (instr, &block->instr_list) {
260 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
261 if (instr->regs_count == 0)
262 continue;
263 /* couple special cases: */
264 if (writes_addr(instr) || writes_pred(instr)) {
265 id->cls = -1;
266 } else if (instr->regs[0]->flags & IR3_REG_ARRAY) {
267 id->cls = total_class_count;
268 } else {
269 /* and the normal case: */
270 id->defn = get_definer(ctx, instr, &id->sz, &id->off);
271 id->cls = ra_size_to_class(id->sz, is_half(id->defn), is_high(id->defn));
272
273 /* this is a bit of duct-tape.. if we have a scenario like:
274 *
275 * sam (f32)(x) out.x, ...
276 * sam (f32)(x) out.y, ...
277 *
278 * Then the fanout/split meta instructions for the two different
279 * tex instructions end up grouped as left/right neighbors. The
280 * upshot is that in when you get_definer() on one of the meta:fo's
281 * you get definer as the first sam with sz=2, but when you call
282 * get_definer() on the either of the sam's you get itself as the
283 * definer with sz=1.
284 *
285 * (We actually avoid this scenario exactly, the neighbor links
286 * prevent one of the output mov's from being eliminated, so this
287 * hack should be enough. But probably we need to rethink how we
288 * find the "defining" instruction.)
289 *
290 * TODO how do we figure out offset properly...
291 */
292 if (id->defn != instr) {
293 struct ir3_ra_instr_data *did = &ctx->instrd[id->defn->ip];
294 if (did->sz < id->sz) {
295 did->sz = id->sz;
296 did->cls = id->cls;
297 }
298 }
299 }
300 }
301 }
302
303 /* give each instruction a name (and ip), and count up the # of names
304 * of each class
305 */
306 static void
307 ra_block_name_instructions(struct ir3_ra_ctx *ctx, struct ir3_block *block)
308 {
309 foreach_instr (instr, &block->instr_list) {
310 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
311
312 #ifdef DEBUG
313 instr->name = ~0;
314 #endif
315
316 ctx->instr_cnt++;
317
318 if (!writes_gpr(instr))
319 continue;
320
321 if (id->defn != instr)
322 continue;
323
324 /* In scalar pass, collect/split don't get their own names,
325 * but instead inherit them from their src(s):
326 *
327 * Possibly we don't need this because of scalar_name(), but
328 * it does make the ir3_print() dumps easier to read.
329 */
330 if (ctx->scalar_pass) {
331 if (instr->opc == OPC_META_SPLIT) {
332 instr->name = instr->regs[1]->instr->name + instr->split.off;
333 continue;
334 }
335
336 if (instr->opc == OPC_META_COLLECT) {
337 instr->name = instr->regs[1]->instr->name;
338 continue;
339 }
340 }
341
342 /* arrays which don't fit in one of the pre-defined class
343 * sizes are pre-colored:
344 */
345 if ((id->cls >= 0) && (id->cls < total_class_count)) {
346 /* in the scalar pass, we generate a name for each
347 * scalar component, instr->name is the name of the
348 * first component.
349 */
350 unsigned n = ctx->scalar_pass ? dest_regs(instr) : 1;
351 instr->name = ctx->class_alloc_count[id->cls];
352 ctx->class_alloc_count[id->cls] += n;
353 ctx->alloc_count += n;
354 }
355 }
356 }
357
358 static int
359 pick_in_range(BITSET_WORD *regs, unsigned min, unsigned max)
360 {
361 for (unsigned i = min; i < max; i++) {
362 if (BITSET_TEST(regs, i)) {
363 return i;
364 }
365 }
366 return -1;
367 }
368
369 /* register selector for the a6xx+ merged register file: */
370 static unsigned int
371 ra_select_reg_merged(unsigned int n, BITSET_WORD *regs, void *data)
372 {
373 struct ir3_ra_ctx *ctx = data;
374 unsigned int class = ra_get_node_class(ctx->g, n);
375
376 /* dimensions within the register class: */
377 unsigned max_target, start;
378
379 /* the regs bitset will include *all* of the virtual regs, but we lay
380 * out the different classes consecutively in the virtual register
381 * space. So we just need to think about the base offset of a given
382 * class within the virtual register space, and offset the register
383 * space we search within by that base offset.
384 */
385 unsigned base;
386
387 /* NOTE: this is only used in scalar pass, so the register
388 * class will be one of the scalar classes (ie. idx==0):
389 */
390 if (class == ctx->set->high_classes[0]) {
391 max_target = HIGH_CLASS_REGS(0);
392 start = 0;
393 base = ctx->set->gpr_to_ra_reg[HIGH_OFFSET][0];
394 } else if (class == ctx->set->half_classes[0]) {
395 max_target = ctx->max_target;
396 start = ctx->start_search_reg;
397 base = ctx->set->gpr_to_ra_reg[HALF_OFFSET][0];
398 } else if (class == ctx->set->classes[0]) {
399 max_target = ctx->max_target / 2;
400 start = ctx->start_search_reg;
401 base = ctx->set->gpr_to_ra_reg[0][0];
402 } else {
403 unreachable("unexpected register class!");
404 }
405
406 /* For cat4 instructions, if the src reg is already assigned, and
407 * avail to pick, use it. Because this doesn't introduce unnecessary
408 * dependencies, and it potentially avoids needing (ss) syncs to
409 * for write after read hazards:
410 */
411 struct ir3_instruction *instr = name_to_instr(ctx, n);
412 if (is_sfu(instr) && instr->regs[1]->instr) {
413 struct ir3_instruction *src = instr->regs[1]->instr;
414 unsigned src_n = scalar_name(ctx, src, 0);
415
416 unsigned reg = ra_get_node_reg(ctx->g, src_n);
417
418 /* Check if the src register has been assigned yet: */
419 if (reg != NO_REG) {
420 if (BITSET_TEST(regs, reg)) {
421 return reg;
422 }
423 }
424 }
425
426 int r = pick_in_range(regs, base + start, base + max_target);
427 if (r < 0) {
428 /* wrap-around: */
429 r = pick_in_range(regs, base, base + start);
430 }
431
432 if (r < 0) {
433 /* overflow, we need to increase max_target: */
434 ctx->max_target++;
435 return ra_select_reg_merged(n, regs, data);
436 }
437
438 if (class == ctx->set->half_classes[0]) {
439 int n = r - base;
440 ctx->start_search_reg = (n + 1) % ctx->max_target;
441 } else if (class == ctx->set->classes[0]) {
442 int n = (r - base) * 2;
443 ctx->start_search_reg = (n + 1) % ctx->max_target;
444 }
445
446 return r;
447 }
448
449 static void
450 ra_init(struct ir3_ra_ctx *ctx)
451 {
452 unsigned n, base;
453
454 ir3_clear_mark(ctx->ir);
455 n = ir3_count_instructions(ctx->ir);
456
457 ctx->instrd = rzalloc_array(NULL, struct ir3_ra_instr_data, n);
458
459 foreach_block (block, &ctx->ir->block_list) {
460 ra_block_find_definers(ctx, block);
461 }
462
463 foreach_block (block, &ctx->ir->block_list) {
464 ra_block_name_instructions(ctx, block);
465 }
466
467 /* figure out the base register name for each class. The
468 * actual ra name is class_base[cls] + instr->name;
469 */
470 ctx->class_base[0] = 0;
471 for (unsigned i = 1; i <= total_class_count; i++) {
472 ctx->class_base[i] = ctx->class_base[i-1] +
473 ctx->class_alloc_count[i-1];
474 }
475
476 /* and vreg names for array elements: */
477 base = ctx->class_base[total_class_count];
478 foreach_array (arr, &ctx->ir->array_list) {
479 arr->base = base;
480 ctx->class_alloc_count[total_class_count] += reg_size_for_array(arr);
481 base += reg_size_for_array(arr);
482 }
483 ctx->alloc_count += ctx->class_alloc_count[total_class_count];
484
485 ctx->g = ra_alloc_interference_graph(ctx->set->regs, ctx->alloc_count);
486 ralloc_steal(ctx->g, ctx->instrd);
487 ctx->def = rzalloc_array(ctx->g, unsigned, ctx->alloc_count);
488 ctx->use = rzalloc_array(ctx->g, unsigned, ctx->alloc_count);
489
490 /* TODO add selector callback for split (pre-a6xx) register file: */
491 if (ctx->scalar_pass && (ctx->ir->compiler->gpu_id >= 600)) {
492 ra_set_select_reg_callback(ctx->g, ra_select_reg_merged, ctx);
493
494 ctx->name_to_instr = _mesa_hash_table_create(ctx->g,
495 _mesa_hash_int, _mesa_key_int_equal);
496 }
497 }
498
499 /* Map the name back to instruction: */
500 static struct ir3_instruction *
501 name_to_instr(struct ir3_ra_ctx *ctx, unsigned name)
502 {
503 struct hash_entry *entry = _mesa_hash_table_search(ctx->name_to_instr, &name);
504 if (entry)
505 return entry->data;
506 unreachable("invalid name");
507 return NULL;
508 }
509
510 static void
511 ra_destroy(struct ir3_ra_ctx *ctx)
512 {
513 ralloc_free(ctx->g);
514 }
515
516 static void
517 __def(struct ir3_ra_ctx *ctx, struct ir3_ra_block_data *bd, unsigned name,
518 struct ir3_instruction *instr)
519 {
520 debug_assert(name < ctx->alloc_count);
521 /* defined on first write: */
522 if (!ctx->def[name])
523 ctx->def[name] = instr->ip;
524 ctx->use[name] = MAX2(ctx->use[name], instr->ip);
525 BITSET_SET(bd->def, name);
526 }
527
528 static void
529 __use(struct ir3_ra_ctx *ctx, struct ir3_ra_block_data *bd, unsigned name,
530 struct ir3_instruction *instr)
531 {
532 debug_assert(name < ctx->alloc_count);
533 ctx->use[name] = MAX2(ctx->use[name], instr->ip);
534 if (!BITSET_TEST(bd->def, name))
535 BITSET_SET(bd->use, name);
536 }
537
538 static void
539 ra_block_compute_live_ranges(struct ir3_ra_ctx *ctx, struct ir3_block *block)
540 {
541 struct ir3_ra_block_data *bd;
542 unsigned bitset_words = BITSET_WORDS(ctx->alloc_count);
543
544 #define def(name, instr) __def(ctx, bd, name, instr)
545 #define use(name, instr) __use(ctx, bd, name, instr)
546
547 bd = rzalloc(ctx->g, struct ir3_ra_block_data);
548
549 bd->def = rzalloc_array(bd, BITSET_WORD, bitset_words);
550 bd->use = rzalloc_array(bd, BITSET_WORD, bitset_words);
551 bd->livein = rzalloc_array(bd, BITSET_WORD, bitset_words);
552 bd->liveout = rzalloc_array(bd, BITSET_WORD, bitset_words);
553
554 block->data = bd;
555
556 struct ir3_instruction *first_non_input = NULL;
557 foreach_instr (instr, &block->instr_list) {
558 if (instr->opc != OPC_META_INPUT) {
559 first_non_input = instr;
560 break;
561 }
562 }
563
564 foreach_instr (instr, &block->instr_list) {
565 struct ir3_instruction *src;
566 struct ir3_register *reg;
567
568 if (writes_gpr(instr)) {
569 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
570 struct ir3_register *dst = instr->regs[0];
571
572 if (dst->flags & IR3_REG_ARRAY) {
573 struct ir3_array *arr =
574 ir3_lookup_array(ctx->ir, dst->array.id);
575 unsigned i;
576
577 arr->start_ip = MIN2(arr->start_ip, instr->ip);
578 arr->end_ip = MAX2(arr->end_ip, instr->ip);
579
580 /* set the node class now.. in case we don't encounter
581 * this array dst again. From register_alloc algo's
582 * perspective, these are all single/scalar regs:
583 */
584 for (i = 0; i < arr->length; i++) {
585 unsigned name = arr->base + i;
586 if(arr->half)
587 ra_set_node_class(ctx->g, name, ctx->set->half_classes[0]);
588 else
589 ra_set_node_class(ctx->g, name, ctx->set->classes[0]);
590 }
591
592 /* indirect write is treated like a write to all array
593 * elements, since we don't know which one is actually
594 * written:
595 */
596 if (dst->flags & IR3_REG_RELATIV) {
597 for (i = 0; i < arr->length; i++) {
598 unsigned name = arr->base + i;
599 def(name, instr);
600 }
601 } else {
602 unsigned name = arr->base + dst->array.offset;
603 def(name, instr);
604 }
605 } else if (id->defn == instr) {
606 /* in scalar pass, we aren't considering virtual register
607 * classes, ie. if an instruction writes a vec2, then it
608 * defines two different scalar register names.
609 */
610 unsigned n = ctx->scalar_pass ? dest_regs(instr) : 1;
611 for (unsigned i = 0; i < n; i++) {
612 unsigned name = scalar_name(ctx, instr, i);
613
614 /* split/collect instructions have duplicate names
615 * as real instructions, so they skip the hashtable:
616 */
617 if (ctx->name_to_instr && !((instr->opc == OPC_META_SPLIT) ||
618 (instr->opc == OPC_META_COLLECT))) {
619 /* this is slightly annoying, we can't just use an
620 * integer on the stack
621 */
622 unsigned *key = ralloc(ctx->name_to_instr, unsigned);
623 *key = name;
624 debug_assert(!_mesa_hash_table_search(ctx->name_to_instr, key));
625 _mesa_hash_table_insert(ctx->name_to_instr, key, instr);
626 }
627
628 /* tex instructions actually have a wrmask, and
629 * don't touch masked out components. We can't do
630 * anything useful about that in the first pass,
631 * but in the scalar pass we can realize these
632 * registers are available:
633 */
634 if (ctx->scalar_pass && is_tex_or_prefetch(instr) &&
635 !(instr->regs[0]->wrmask & (1 << i)))
636 continue;
637
638 def(name, instr);
639
640 if ((instr->opc == OPC_META_INPUT) && first_non_input)
641 use(name, first_non_input);
642
643 if (is_high(instr)) {
644 ra_set_node_class(ctx->g, name,
645 ctx->set->high_classes[id->cls - HIGH_OFFSET]);
646 } else if (is_half(instr)) {
647 ra_set_node_class(ctx->g, name,
648 ctx->set->half_classes[id->cls - HALF_OFFSET]);
649 } else {
650 ra_set_node_class(ctx->g, name,
651 ctx->set->classes[id->cls]);
652 }
653 }
654 }
655 }
656
657 foreach_src (reg, instr) {
658 if (reg->flags & IR3_REG_ARRAY) {
659 struct ir3_array *arr =
660 ir3_lookup_array(ctx->ir, reg->array.id);
661 arr->start_ip = MIN2(arr->start_ip, instr->ip);
662 arr->end_ip = MAX2(arr->end_ip, instr->ip);
663
664 /* indirect read is treated like a read from all array
665 * elements, since we don't know which one is actually
666 * read:
667 */
668 if (reg->flags & IR3_REG_RELATIV) {
669 unsigned i;
670 for (i = 0; i < arr->length; i++) {
671 unsigned name = arr->base + i;
672 use(name, instr);
673 BITSET_SET(bd->use, name);
674 }
675 } else {
676 unsigned name = arr->base + reg->array.offset;
677 use(name, instr);
678 /* NOTE: arrays are not SSA so unconditionally
679 * set use bit:
680 */
681 BITSET_SET(bd->use, name);
682 debug_assert(reg->array.offset < arr->length);
683 }
684 } else if (ctx->scalar_pass) {
685 struct ir3_instruction *src = reg->instr;
686 /* skip things that aren't SSA: */
687 unsigned n = src ? dest_regs(src) : 0;
688
689 /* in scalar pass, we aren't considering virtual register
690 * classes, ie. if an instruction writes a vec2, then it
691 * defines two different scalar register names.
692 *
693 * We need to traverse up thru collect/split to find the
694 * actual non-meta instruction names for each of the
695 * components:
696 */
697 for (unsigned i = 0; i < n; i++) {
698 /* Need to filter out a couple special cases, ie.
699 * writes to a0.x or p0.x:
700 */
701 if (!writes_gpr(src))
702 continue;
703
704 /* split takes a src w/ wrmask potentially greater
705 * than 0x1, but it really only cares about a single
706 * component. This shows up in splits coming out of
707 * a tex instruction w/ wrmask=.z, for example.
708 */
709 if ((instr->opc == OPC_META_SPLIT) &&
710 !(i == instr->split.off))
711 continue;
712
713 use(scalar_name(ctx, src, i), instr);
714 }
715 } else if ((src = ssa(reg)) && writes_gpr(src)) {
716 unsigned name = ra_name(ctx, &ctx->instrd[src->ip]);
717 use(name, instr);
718 }
719 }
720 }
721 }
722
723 static bool
724 ra_compute_livein_liveout(struct ir3_ra_ctx *ctx)
725 {
726 unsigned bitset_words = BITSET_WORDS(ctx->alloc_count);
727 bool progress = false;
728
729 foreach_block (block, &ctx->ir->block_list) {
730 struct ir3_ra_block_data *bd = block->data;
731
732 /* update livein: */
733 for (unsigned i = 0; i < bitset_words; i++) {
734 /* anything used but not def'd within a block is
735 * by definition a live value coming into the block:
736 */
737 BITSET_WORD new_livein =
738 (bd->use[i] | (bd->liveout[i] & ~bd->def[i]));
739
740 if (new_livein & ~bd->livein[i]) {
741 bd->livein[i] |= new_livein;
742 progress = true;
743 }
744 }
745
746 /* update liveout: */
747 for (unsigned j = 0; j < ARRAY_SIZE(block->successors); j++) {
748 struct ir3_block *succ = block->successors[j];
749 struct ir3_ra_block_data *succ_bd;
750
751 if (!succ)
752 continue;
753
754 succ_bd = succ->data;
755
756 for (unsigned i = 0; i < bitset_words; i++) {
757 /* add anything that is livein in a successor block
758 * to our liveout:
759 */
760 BITSET_WORD new_liveout =
761 (succ_bd->livein[i] & ~bd->liveout[i]);
762
763 if (new_liveout) {
764 bd->liveout[i] |= new_liveout;
765 progress = true;
766 }
767 }
768 }
769 }
770
771 return progress;
772 }
773
774 static void
775 print_bitset(const char *name, BITSET_WORD *bs, unsigned cnt)
776 {
777 bool first = true;
778 debug_printf("RA: %s:", name);
779 for (unsigned i = 0; i < cnt; i++) {
780 if (BITSET_TEST(bs, i)) {
781 if (!first)
782 debug_printf(",");
783 debug_printf(" %04u", i);
784 first = false;
785 }
786 }
787 debug_printf("\n");
788 }
789
790 static void
791 ra_add_interference(struct ir3_ra_ctx *ctx)
792 {
793 struct ir3 *ir = ctx->ir;
794
795 /* initialize array live ranges: */
796 foreach_array (arr, &ir->array_list) {
797 arr->start_ip = ~0;
798 arr->end_ip = 0;
799 }
800
801 /* compute live ranges (use/def) on a block level, also updating
802 * block's def/use bitmasks (used below to calculate per-block
803 * livein/liveout):
804 */
805 foreach_block (block, &ir->block_list) {
806 ra_block_compute_live_ranges(ctx, block);
807 }
808
809 /* update per-block livein/liveout: */
810 while (ra_compute_livein_liveout(ctx)) {}
811
812 if (RA_DEBUG) {
813 d("AFTER LIVEIN/OUT:");
814 foreach_block (block, &ir->block_list) {
815 struct ir3_ra_block_data *bd = block->data;
816 d("block%u:", block_id(block));
817 print_bitset(" def", bd->def, ctx->alloc_count);
818 print_bitset(" use", bd->use, ctx->alloc_count);
819 print_bitset(" l/i", bd->livein, ctx->alloc_count);
820 print_bitset(" l/o", bd->liveout, ctx->alloc_count);
821 }
822 foreach_array (arr, &ir->array_list) {
823 d("array%u:", arr->id);
824 d(" length: %u", arr->length);
825 d(" start_ip: %u", arr->start_ip);
826 d(" end_ip: %u", arr->end_ip);
827 }
828 d("INSTRUCTION VREG NAMES:");
829 foreach_block (block, &ctx->ir->block_list) {
830 foreach_instr (instr, &block->instr_list) {
831 if (!ctx->instrd[instr->ip].defn)
832 continue;
833 if (!writes_gpr(instr))
834 continue;
835 di(instr, "%04u", scalar_name(ctx, instr, 0));
836 }
837 }
838 d("ARRAY VREG NAMES:");
839 foreach_array (arr, &ctx->ir->array_list) {
840 d("%04u: arr%u", arr->base, arr->id);
841 }
842 }
843
844 /* extend start/end ranges based on livein/liveout info from cfg: */
845 foreach_block (block, &ir->block_list) {
846 struct ir3_ra_block_data *bd = block->data;
847
848 for (unsigned i = 0; i < ctx->alloc_count; i++) {
849 if (BITSET_TEST(bd->livein, i)) {
850 ctx->def[i] = MIN2(ctx->def[i], block->start_ip);
851 ctx->use[i] = MAX2(ctx->use[i], block->start_ip);
852 }
853
854 if (BITSET_TEST(bd->liveout, i)) {
855 ctx->def[i] = MIN2(ctx->def[i], block->end_ip);
856 ctx->use[i] = MAX2(ctx->use[i], block->end_ip);
857 }
858 }
859
860 foreach_array (arr, &ctx->ir->array_list) {
861 for (unsigned i = 0; i < arr->length; i++) {
862 if (BITSET_TEST(bd->livein, i + arr->base)) {
863 arr->start_ip = MIN2(arr->start_ip, block->start_ip);
864 }
865 if (BITSET_TEST(bd->livein, i + arr->base)) {
866 arr->end_ip = MAX2(arr->end_ip, block->end_ip);
867 }
868 }
869 }
870 }
871
872 /* need to fix things up to keep outputs live: */
873 struct ir3_instruction *out;
874 foreach_output (out, ir) {
875 unsigned name = ra_name(ctx, &ctx->instrd[out->ip]);
876 ctx->use[name] = ctx->instr_cnt;
877 }
878
879 for (unsigned i = 0; i < ctx->alloc_count; i++) {
880 for (unsigned j = 0; j < ctx->alloc_count; j++) {
881 if (intersects(ctx->def[i], ctx->use[i],
882 ctx->def[j], ctx->use[j])) {
883 ra_add_node_interference(ctx->g, i, j);
884 }
885 }
886 }
887 }
888
889 /* some instructions need fix-up if dst register is half precision: */
890 static void fixup_half_instr_dst(struct ir3_instruction *instr)
891 {
892 switch (opc_cat(instr->opc)) {
893 case 1: /* move instructions */
894 instr->cat1.dst_type = half_type(instr->cat1.dst_type);
895 break;
896 case 3:
897 switch (instr->opc) {
898 case OPC_MAD_F32:
899 /* Available for that dest is half and srcs are full.
900 * eg. mad.f32 hr0, r0.x, r0.y, r0.z
901 */
902 if (instr->regs[1]->flags & IR3_REG_HALF)
903 instr->opc = OPC_MAD_F16;
904 break;
905 case OPC_SEL_B32:
906 instr->opc = OPC_SEL_B16;
907 break;
908 case OPC_SEL_S32:
909 instr->opc = OPC_SEL_S16;
910 break;
911 case OPC_SEL_F32:
912 instr->opc = OPC_SEL_F16;
913 break;
914 case OPC_SAD_S32:
915 instr->opc = OPC_SAD_S16;
916 break;
917 /* instructions may already be fixed up: */
918 case OPC_MAD_F16:
919 case OPC_SEL_B16:
920 case OPC_SEL_S16:
921 case OPC_SEL_F16:
922 case OPC_SAD_S16:
923 break;
924 default:
925 assert(0);
926 break;
927 }
928 break;
929 case 4:
930 switch (instr->opc) {
931 case OPC_RSQ:
932 instr->opc = OPC_HRSQ;
933 break;
934 case OPC_LOG2:
935 instr->opc = OPC_HLOG2;
936 break;
937 case OPC_EXP2:
938 instr->opc = OPC_HEXP2;
939 break;
940 default:
941 break;
942 }
943 break;
944 case 5:
945 instr->cat5.type = half_type(instr->cat5.type);
946 break;
947 }
948 }
949 /* some instructions need fix-up if src register is half precision: */
950 static void fixup_half_instr_src(struct ir3_instruction *instr)
951 {
952 switch (instr->opc) {
953 case OPC_MOV:
954 instr->cat1.src_type = half_type(instr->cat1.src_type);
955 break;
956 default:
957 break;
958 }
959 }
960
961 /* NOTE: instr could be NULL for IR3_REG_ARRAY case, for the first
962 * array access(es) which do not have any previous access to depend
963 * on from scheduling point of view
964 */
965 static void
966 reg_assign(struct ir3_ra_ctx *ctx, struct ir3_register *reg,
967 struct ir3_instruction *instr)
968 {
969 struct ir3_ra_instr_data *id;
970
971 if (reg->flags & IR3_REG_ARRAY) {
972 struct ir3_array *arr =
973 ir3_lookup_array(ctx->ir, reg->array.id);
974 unsigned name = arr->base + reg->array.offset;
975 unsigned r = ra_get_node_reg(ctx->g, name);
976 unsigned num = ctx->set->ra_reg_to_gpr[r];
977
978 if (reg->flags & IR3_REG_RELATIV) {
979 reg->array.offset = num;
980 } else {
981 reg->num = num;
982 reg->flags &= ~IR3_REG_SSA;
983 }
984
985 reg->flags &= ~IR3_REG_ARRAY;
986 } else if ((id = &ctx->instrd[instr->ip]) && id->defn) {
987 unsigned first_component = 0;
988
989 /* Special case for tex instructions, which may use the wrmask
990 * to mask off the first component(s). In the scalar pass,
991 * this means the masked off component(s) are not def'd/use'd,
992 * so we get a bogus value when we ask the register_allocate
993 * algo to get the assigned reg for the unused/untouched
994 * component. So we need to consider the first used component:
995 */
996 if (ctx->scalar_pass && is_tex_or_prefetch(id->defn)) {
997 unsigned n = ffs(id->defn->regs[0]->wrmask);
998 debug_assert(n > 0);
999 first_component = n - 1;
1000 }
1001
1002 unsigned name = scalar_name(ctx, id->defn, first_component);
1003 unsigned r = ra_get_node_reg(ctx->g, name);
1004 unsigned num = ctx->set->ra_reg_to_gpr[r] + id->off;
1005
1006 debug_assert(!(reg->flags & IR3_REG_RELATIV));
1007
1008 debug_assert(num >= first_component);
1009
1010 if (is_high(id->defn))
1011 num += FIRST_HIGH_REG;
1012
1013 reg->num = num - first_component;
1014
1015 reg->flags &= ~IR3_REG_SSA;
1016
1017 if (is_half(id->defn))
1018 reg->flags |= IR3_REG_HALF;
1019 }
1020 }
1021
1022 static void
1023 account_assignment(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr)
1024 {
1025 struct ir3_ra_instr_data *id;
1026 struct ir3_register *dst = instr->regs[0];
1027 unsigned max;
1028
1029 if (is_high(instr))
1030 return;
1031
1032 if (dst->flags & IR3_REG_ARRAY) {
1033 struct ir3_array *arr =
1034 ir3_lookup_array(ctx->ir, dst->array.id);
1035 max = arr->reg + arr->length;
1036 } else if ((id = &ctx->instrd[instr->ip]) && id->defn) {
1037 unsigned name = scalar_name(ctx, id->defn, 0);
1038 unsigned r = ra_get_node_reg(ctx->g, name);
1039 max = ctx->set->ra_reg_to_gpr[r] + id->off;
1040 } else {
1041 return;
1042 }
1043
1044 if (is_half(instr)) {
1045 ctx->max_half_assigned = MAX2(ctx->max_half_assigned, max);
1046 } else {
1047 ctx->max_assigned = MAX2(ctx->max_assigned, max);
1048 }
1049 }
1050
1051 /* helper to determine which regs to assign in which pass: */
1052 static bool
1053 should_assign(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr)
1054 {
1055 if ((instr->opc == OPC_META_SPLIT) ||
1056 (instr->opc == OPC_META_COLLECT))
1057 return !ctx->scalar_pass;
1058 return ctx->scalar_pass;
1059 }
1060
1061 static void
1062 ra_block_alloc(struct ir3_ra_ctx *ctx, struct ir3_block *block)
1063 {
1064 foreach_instr (instr, &block->instr_list) {
1065 struct ir3_register *reg;
1066
1067 if (writes_gpr(instr)) {
1068 account_assignment(ctx, instr);
1069 if (should_assign(ctx, instr)) {
1070 reg_assign(ctx, instr->regs[0], instr);
1071 if (instr->regs[0]->flags & IR3_REG_HALF)
1072 fixup_half_instr_dst(instr);
1073 }
1074 }
1075
1076 foreach_src_n (reg, n, instr) {
1077 struct ir3_instruction *src = reg->instr;
1078
1079 if (src && !should_assign(ctx, src) && !should_assign(ctx, instr))
1080 continue;
1081
1082 if (src && should_assign(ctx, instr))
1083 reg_assign(ctx, src->regs[0], src);
1084
1085 /* Note: reg->instr could be null for IR3_REG_ARRAY */
1086 if (src || (reg->flags & IR3_REG_ARRAY))
1087 reg_assign(ctx, instr->regs[n+1], src);
1088
1089 if (instr->regs[n+1]->flags & IR3_REG_HALF)
1090 fixup_half_instr_src(instr);
1091 }
1092 }
1093
1094 /* We need to pre-color outputs for the scalar pass in
1095 * ra_precolor_assigned(), so we need to actually assign
1096 * them in the first pass:
1097 */
1098 if (!ctx->scalar_pass) {
1099 struct ir3_instruction *in, *out;
1100
1101 foreach_input (in, ctx->ir) {
1102 reg_assign(ctx, in->regs[0], in);
1103 }
1104 foreach_output (out, ctx->ir) {
1105 reg_assign(ctx, out->regs[0], out);
1106 }
1107 }
1108 }
1109
1110 /* handle pre-colored registers. This includes "arrays" (which could be of
1111 * length 1, used for phi webs lowered to registers in nir), as well as
1112 * special shader input values that need to be pinned to certain registers.
1113 */
1114 static void
1115 ra_precolor(struct ir3_ra_ctx *ctx, struct ir3_instruction **precolor, unsigned nprecolor)
1116 {
1117 unsigned num_precolor = 0;
1118 for (unsigned i = 0; i < nprecolor; i++) {
1119 if (precolor[i] && !(precolor[i]->flags & IR3_INSTR_UNUSED)) {
1120 struct ir3_instruction *instr = precolor[i];
1121
1122 if (instr->regs[0]->num == INVALID_REG)
1123 continue;
1124
1125 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
1126
1127 debug_assert(!(instr->regs[0]->flags & (IR3_REG_HALF | IR3_REG_HIGH)));
1128
1129 /* only consider the first component: */
1130 if (id->off > 0)
1131 continue;
1132
1133 if (ctx->scalar_pass && !should_assign(ctx, instr))
1134 continue;
1135
1136 /* 'base' is in scalar (class 0) but we need to map that
1137 * the conflicting register of the appropriate class (ie.
1138 * input could be vec2/vec3/etc)
1139 *
1140 * Note that the higher class (larger than scalar) regs
1141 * are setup to conflict with others in the same class,
1142 * so for example, R1 (scalar) is also the first component
1143 * of D1 (vec2/double):
1144 *
1145 * Single (base) | Double
1146 * --------------+---------------
1147 * R0 | D0
1148 * R1 | D0 D1
1149 * R2 | D1 D2
1150 * R3 | D2
1151 * .. and so on..
1152 */
1153 unsigned regid = instr->regs[0]->num;
1154 unsigned reg = ctx->set->gpr_to_ra_reg[id->cls][regid];
1155 unsigned name = ra_name(ctx, id);
1156 ra_set_node_reg(ctx->g, name, reg);
1157 num_precolor = MAX2(regid, num_precolor);
1158 }
1159 }
1160
1161 /* pre-assign array elements:
1162 *
1163 * TODO this is going to need some work for half-precision.. possibly
1164 * this is easier on a6xx, where we can just divide array size by two?
1165 * But on a5xx and earlier it will need to track two bases.
1166 */
1167 foreach_array (arr, &ctx->ir->array_list) {
1168 unsigned base = 0;
1169
1170 if (arr->end_ip == 0)
1171 continue;
1172
1173 /* figure out what else we conflict with which has already
1174 * been assigned:
1175 */
1176 retry:
1177 foreach_array (arr2, &ctx->ir->array_list) {
1178 if (arr2 == arr)
1179 break;
1180 if (arr2->end_ip == 0)
1181 continue;
1182 /* if it intersects with liverange AND register range.. */
1183 if (intersects(arr->start_ip, arr->end_ip,
1184 arr2->start_ip, arr2->end_ip) &&
1185 intersects(base, base + reg_size_for_array(arr),
1186 arr2->reg, arr2->reg + reg_size_for_array(arr2))) {
1187 base = MAX2(base, arr2->reg + reg_size_for_array(arr2));
1188 goto retry;
1189 }
1190 }
1191
1192 /* also need to not conflict with any pre-assigned inputs: */
1193 for (unsigned i = 0; i < nprecolor; i++) {
1194 struct ir3_instruction *instr = precolor[i];
1195
1196 if (!instr || (instr->flags & IR3_INSTR_UNUSED))
1197 continue;
1198
1199 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
1200
1201 /* only consider the first component: */
1202 if (id->off > 0)
1203 continue;
1204
1205 unsigned name = ra_name(ctx, id);
1206 unsigned regid = instr->regs[0]->num;
1207
1208 /* Check if array intersects with liverange AND register
1209 * range of the input:
1210 */
1211 if (intersects(arr->start_ip, arr->end_ip,
1212 ctx->def[name], ctx->use[name]) &&
1213 intersects(base, base + reg_size_for_array(arr),
1214 regid, regid + class_sizes[id->cls])) {
1215 base = MAX2(base, regid + class_sizes[id->cls]);
1216 goto retry;
1217 }
1218 }
1219
1220 arr->reg = base;
1221
1222 for (unsigned i = 0; i < arr->length; i++) {
1223 unsigned name, reg;
1224
1225 if (arr->half) {
1226 /* Doesn't need to do this on older generations than a6xx,
1227 * since there's no conflict between full regs and half regs
1228 * on them.
1229 *
1230 * TODO Presumably "base" could start from 0 respectively
1231 * for half regs of arrays on older generations.
1232 */
1233 unsigned base_half = base * 2 + i;
1234 reg = ctx->set->gpr_to_ra_reg[0+HALF_OFFSET][base_half];
1235 base = base_half / 2 + 1;
1236 } else {
1237 reg = ctx->set->gpr_to_ra_reg[0][base++];
1238 }
1239
1240 name = arr->base + i;
1241 ra_set_node_reg(ctx->g, name, reg);
1242 }
1243 }
1244
1245 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
1246 foreach_array (arr, &ctx->ir->array_list) {
1247 unsigned first = arr->reg;
1248 unsigned last = arr->reg + arr->length - 1;
1249 debug_printf("arr[%d] at r%d.%c->r%d.%c\n", arr->id,
1250 (first >> 2), "xyzw"[first & 0x3],
1251 (last >> 2), "xyzw"[last & 0x3]);
1252 }
1253 }
1254 }
1255
1256 static void
1257 precolor(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr)
1258 {
1259 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
1260 unsigned n = dest_regs(instr);
1261 for (unsigned i = 0; i < n; i++) {
1262 /* tex instructions actually have a wrmask, and
1263 * don't touch masked out components. So we
1264 * shouldn't precolor them::
1265 */
1266 if (is_tex_or_prefetch(instr) &&
1267 !(instr->regs[0]->wrmask & (1 << i)))
1268 continue;
1269
1270 unsigned name = scalar_name(ctx, instr, i);
1271 unsigned regid = instr->regs[0]->num + i;
1272
1273 if (instr->regs[0]->flags & IR3_REG_HIGH)
1274 regid -= FIRST_HIGH_REG;
1275
1276 unsigned vreg = ctx->set->gpr_to_ra_reg[id->cls][regid];
1277 ra_set_node_reg(ctx->g, name, vreg);
1278 }
1279 }
1280
1281 /* pre-color non-scalar registers based on the registers assigned in previous
1282 * pass. Do this by looking actually at the fanout instructions.
1283 */
1284 static void
1285 ra_precolor_assigned(struct ir3_ra_ctx *ctx)
1286 {
1287 debug_assert(ctx->scalar_pass);
1288
1289 foreach_block (block, &ctx->ir->block_list) {
1290 foreach_instr (instr, &block->instr_list) {
1291
1292 if ((instr->opc != OPC_META_SPLIT) &&
1293 (instr->opc != OPC_META_COLLECT))
1294 continue;
1295
1296 precolor(ctx, instr);
1297
1298 struct ir3_register *src;
1299 foreach_src (src, instr) {
1300 if (!src->instr)
1301 continue;
1302 precolor(ctx, src->instr);
1303 }
1304 }
1305 }
1306 }
1307
1308 static int
1309 ra_alloc(struct ir3_ra_ctx *ctx)
1310 {
1311 if (!ra_allocate(ctx->g))
1312 return -1;
1313
1314 foreach_block (block, &ctx->ir->block_list) {
1315 ra_block_alloc(ctx, block);
1316 }
1317
1318 return 0;
1319 }
1320
1321 /* if we end up with split/collect instructions with non-matching src
1322 * and dest regs, that means something has gone wrong. Which makes it
1323 * a pretty good sanity check.
1324 */
1325 static void
1326 ra_sanity_check(struct ir3 *ir)
1327 {
1328 foreach_block (block, &ir->block_list) {
1329 foreach_instr (instr, &block->instr_list) {
1330 if (instr->opc == OPC_META_SPLIT) {
1331 struct ir3_register *dst = instr->regs[0];
1332 struct ir3_register *src = instr->regs[1];
1333 debug_assert(dst->num == (src->num + instr->split.off));
1334 } else if (instr->opc == OPC_META_COLLECT) {
1335 struct ir3_register *dst = instr->regs[0];
1336 struct ir3_register *src;
1337
1338 foreach_src_n (src, n, instr) {
1339 debug_assert(dst->num == (src->num - n));
1340 }
1341 }
1342 }
1343 }
1344 }
1345
1346 /* Target is calculated in terms of half-regs (with a full reg
1347 * consisting of two half-regs).
1348 */
1349 static void
1350 ra_calc_merged_register_target(struct ir3_ra_ctx *ctx)
1351 {
1352 const unsigned vec4 = 2 * 4; // 8 half-regs
1353 unsigned t = MAX2(2 * ctx->max_assigned, ctx->max_half_assigned);
1354
1355 /* second RA pass may have saved some regs, let's try to reclaim
1356 * the benefit by adjusting the target downwards slightly:
1357 */
1358 if (ir3_has_latency_to_hide(ctx->ir)) {
1359 if (t > 8 * vec4) {
1360 t -= 2 * vec4;
1361 } else if (t > 6 * vec4) {
1362 t -= vec4;
1363 }
1364 }
1365
1366 ctx->max_target = t;
1367 }
1368
1369 static int
1370 ir3_ra_pass(struct ir3_shader_variant *v, struct ir3_instruction **precolor,
1371 unsigned nprecolor, bool scalar_pass, unsigned *target)
1372 {
1373 struct ir3_ra_ctx ctx = {
1374 .v = v,
1375 .ir = v->ir,
1376 .set = v->ir->compiler->set,
1377 .scalar_pass = scalar_pass,
1378 };
1379 int ret;
1380
1381 if (scalar_pass) {
1382 ctx.max_target = *target;
1383 }
1384
1385 ra_init(&ctx);
1386 ra_add_interference(&ctx);
1387 ra_precolor(&ctx, precolor, nprecolor);
1388 if (scalar_pass)
1389 ra_precolor_assigned(&ctx);
1390 ret = ra_alloc(&ctx);
1391 ra_destroy(&ctx);
1392
1393 /* In the first pass, calculate the target register usage used in the
1394 * second (scalar) pass:
1395 */
1396 if (!scalar_pass) {
1397 /* TODO: round-robin support for pre-a6xx: */
1398 if (ctx.ir->compiler->gpu_id >= 600) {
1399 ra_calc_merged_register_target(&ctx);
1400 }
1401 *target = ctx.max_target;
1402 }
1403
1404 return ret;
1405 }
1406
1407 int
1408 ir3_ra(struct ir3_shader_variant *v, struct ir3_instruction **precolor,
1409 unsigned nprecolor)
1410 {
1411 unsigned target = 0;
1412 int ret;
1413
1414 /* First pass, assign the vecN (non-scalar) registers: */
1415 ret = ir3_ra_pass(v, precolor, nprecolor, false, &target);
1416 if (ret)
1417 return ret;
1418
1419 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
1420 printf("AFTER RA (1st pass):\n");
1421 ir3_print(v->ir);
1422 }
1423
1424 /* Second pass, assign the scalar registers: */
1425 ret = ir3_ra_pass(v, precolor, nprecolor, true, &target);
1426 if (ret)
1427 return ret;
1428
1429 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
1430 printf("AFTER RA (2nd pass):\n");
1431 ir3_print(v->ir);
1432 }
1433
1434 #ifdef DEBUG
1435 # define SANITY_CHECK DEBUG
1436 #else
1437 # define SANITY_CHECK 0
1438 #endif
1439 if (SANITY_CHECK)
1440 ra_sanity_check(v->ir);
1441
1442 return ret;
1443 }