freedreno/ir3/ra: add helper to map name to array
[mesa.git] / src / freedreno / ir3 / ir3_ra.c
1 /*
2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/u_math.h"
28 #include "util/register_allocate.h"
29 #include "util/ralloc.h"
30 #include "util/bitset.h"
31
32 #include "ir3.h"
33 #include "ir3_compiler.h"
34 #include "ir3_ra.h"
35
36
37 #ifdef DEBUG
38 #define RA_DEBUG (ir3_shader_debug & IR3_DBG_RAMSGS)
39 #else
40 #define RA_DEBUG 0
41 #endif
42 #define d(fmt, ...) do { if (RA_DEBUG) { \
43 printf("RA: "fmt"\n", ##__VA_ARGS__); \
44 } } while (0)
45
46 #define di(instr, fmt, ...) do { if (RA_DEBUG) { \
47 printf("RA: "fmt": ", ##__VA_ARGS__); \
48 ir3_print_instr(instr); \
49 } } while (0)
50
51 /*
52 * Register Assignment:
53 *
54 * Uses the register_allocate util, which implements graph coloring
55 * algo with interference classes. To handle the cases where we need
56 * consecutive registers (for example, texture sample instructions),
57 * we model these as larger (double/quad/etc) registers which conflict
58 * with the corresponding registers in other classes.
59 *
60 * Additionally we create additional classes for half-regs, which
61 * do not conflict with the full-reg classes. We do need at least
62 * sizes 1-4 (to deal w/ texture sample instructions output to half-
63 * reg). At the moment we don't create the higher order half-reg
64 * classes as half-reg frequently does not have enough precision
65 * for texture coords at higher resolutions.
66 *
67 * There are some additional cases that we need to handle specially,
68 * as the graph coloring algo doesn't understand "partial writes".
69 * For example, a sequence like:
70 *
71 * add r0.z, ...
72 * sam (f32)(xy)r0.x, ...
73 * ...
74 * sam (f32)(xyzw)r0.w, r0.x, ... ; 3d texture, so r0.xyz are coord
75 *
76 * In this scenario, we treat r0.xyz as class size 3, which is written
77 * (from a use/def perspective) at the 'add' instruction and ignore the
78 * subsequent partial writes to r0.xy. So the 'add r0.z, ...' is the
79 * defining instruction, as it is the first to partially write r0.xyz.
80 *
81 * To address the fragmentation that this can potentially cause, a
82 * two pass register allocation is used. After the first pass the
83 * assignment of scalars is discarded, but the assignment of vecN (for
84 * N > 1) is used to pre-color in the second pass, which considers
85 * only scalars.
86 *
87 * Arrays of arbitrary size are handled via pre-coloring a consecutive
88 * sequence of registers. Additional scalar (single component) reg
89 * names are allocated starting at ctx->class_base[total_class_count]
90 * (see arr->base), which are pre-colored. In the use/def graph direct
91 * access is treated as a single element use/def, and indirect access
92 * is treated as use or def of all array elements. (Only the first
93 * def is tracked, in case of multiple indirect writes, etc.)
94 *
95 * TODO arrays that fit in one of the pre-defined class sizes should
96 * not need to be pre-colored, but instead could be given a normal
97 * vreg name. (Ignoring this for now since it is a good way to work
98 * out the kinks with arbitrary sized arrays.)
99 *
100 * TODO might be easier for debugging to split this into two passes,
101 * the first assigning vreg names in a way that we could ir3_print()
102 * the result.
103 */
104
105
106 static struct ir3_instruction * name_to_instr(struct ir3_ra_ctx *ctx, unsigned name);
107
108 static bool name_is_array(struct ir3_ra_ctx *ctx, unsigned name);
109 static struct ir3_array * name_to_array(struct ir3_ra_ctx *ctx, unsigned name);
110
111 /* does it conflict? */
112 static inline bool
113 intersects(unsigned a_start, unsigned a_end, unsigned b_start, unsigned b_end)
114 {
115 return !((a_start >= b_end) || (b_start >= a_end));
116 }
117
118 static unsigned
119 reg_size_for_array(struct ir3_array *arr)
120 {
121 if (arr->half)
122 return DIV_ROUND_UP(arr->length, 2);
123
124 return arr->length;
125 }
126
127 static bool
128 instr_before(struct ir3_instruction *a, struct ir3_instruction *b)
129 {
130 if (a->flags & IR3_INSTR_UNUSED)
131 return false;
132 return (a->ip < b->ip);
133 }
134
135 static struct ir3_instruction *
136 get_definer(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr,
137 int *sz, int *off)
138 {
139 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
140 struct ir3_instruction *d = NULL;
141
142 if (ctx->scalar_pass) {
143 id->defn = instr;
144 id->off = 0;
145 id->sz = 1; /* considering things as N scalar regs now */
146 }
147
148 if (id->defn) {
149 *sz = id->sz;
150 *off = id->off;
151 return id->defn;
152 }
153
154 if (instr->opc == OPC_META_COLLECT) {
155 /* What about the case where collect is subset of array, we
156 * need to find the distance between where actual array starts
157 * and collect.. that probably doesn't happen currently.
158 */
159 struct ir3_register *src;
160 int dsz, doff;
161
162 /* note: don't use foreach_ssa_src as this gets called once
163 * while assigning regs (which clears SSA flag)
164 */
165 foreach_src_n (src, n, instr) {
166 struct ir3_instruction *dd;
167 if (!src->instr)
168 continue;
169
170 dd = get_definer(ctx, src->instr, &dsz, &doff);
171
172 if ((!d) || instr_before(dd, d)) {
173 d = dd;
174 *sz = dsz;
175 *off = doff - n;
176 }
177 }
178
179 } else if (instr->cp.right || instr->cp.left) {
180 /* covers also the meta:fo case, which ends up w/ single
181 * scalar instructions for each component:
182 */
183 struct ir3_instruction *f = ir3_neighbor_first(instr);
184
185 /* by definition, the entire sequence forms one linked list
186 * of single scalar register nodes (even if some of them may
187 * be splits from a texture sample (for example) instr. We
188 * just need to walk the list finding the first element of
189 * the group defined (lowest ip)
190 */
191 int cnt = 0;
192
193 /* need to skip over unused in the group: */
194 while (f && (f->flags & IR3_INSTR_UNUSED)) {
195 f = f->cp.right;
196 cnt++;
197 }
198
199 while (f) {
200 if ((!d) || instr_before(f, d))
201 d = f;
202 if (f == instr)
203 *off = cnt;
204 f = f->cp.right;
205 cnt++;
206 }
207
208 *sz = cnt;
209
210 } else {
211 /* second case is looking directly at the instruction which
212 * produces multiple values (eg, texture sample), rather
213 * than the split nodes that point back to that instruction.
214 * This isn't quite right, because it may be part of a larger
215 * group, such as:
216 *
217 * sam (f32)(xyzw)r0.x, ...
218 * add r1.x, ...
219 * add r1.y, ...
220 * sam (f32)(xyzw)r2.x, r0.w <-- (r0.w, r1.x, r1.y)
221 *
222 * need to come up with a better way to handle that case.
223 */
224 if (instr->address) {
225 *sz = instr->regs[0]->size;
226 } else {
227 *sz = util_last_bit(instr->regs[0]->wrmask);
228 }
229 *off = 0;
230 d = instr;
231 }
232
233 if (d->opc == OPC_META_SPLIT) {
234 struct ir3_instruction *dd;
235 int dsz, doff;
236
237 dd = get_definer(ctx, d->regs[1]->instr, &dsz, &doff);
238
239 /* by definition, should come before: */
240 debug_assert(instr_before(dd, d));
241
242 *sz = MAX2(*sz, dsz);
243
244 if (instr->opc == OPC_META_SPLIT)
245 *off = MAX2(*off, instr->split.off);
246
247 d = dd;
248 }
249
250 debug_assert(d->opc != OPC_META_SPLIT);
251
252 id->defn = d;
253 id->sz = *sz;
254 id->off = *off;
255
256 return d;
257 }
258
259 static void
260 ra_block_find_definers(struct ir3_ra_ctx *ctx, struct ir3_block *block)
261 {
262 foreach_instr (instr, &block->instr_list) {
263 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
264 if (instr->regs_count == 0)
265 continue;
266 /* couple special cases: */
267 if (writes_addr(instr) || writes_pred(instr)) {
268 id->cls = -1;
269 } else if (instr->regs[0]->flags & IR3_REG_ARRAY) {
270 id->cls = total_class_count;
271 } else {
272 /* and the normal case: */
273 id->defn = get_definer(ctx, instr, &id->sz, &id->off);
274 id->cls = ra_size_to_class(id->sz, is_half(id->defn), is_high(id->defn));
275
276 /* this is a bit of duct-tape.. if we have a scenario like:
277 *
278 * sam (f32)(x) out.x, ...
279 * sam (f32)(x) out.y, ...
280 *
281 * Then the fanout/split meta instructions for the two different
282 * tex instructions end up grouped as left/right neighbors. The
283 * upshot is that in when you get_definer() on one of the meta:fo's
284 * you get definer as the first sam with sz=2, but when you call
285 * get_definer() on the either of the sam's you get itself as the
286 * definer with sz=1.
287 *
288 * (We actually avoid this scenario exactly, the neighbor links
289 * prevent one of the output mov's from being eliminated, so this
290 * hack should be enough. But probably we need to rethink how we
291 * find the "defining" instruction.)
292 *
293 * TODO how do we figure out offset properly...
294 */
295 if (id->defn != instr) {
296 struct ir3_ra_instr_data *did = &ctx->instrd[id->defn->ip];
297 if (did->sz < id->sz) {
298 did->sz = id->sz;
299 did->cls = id->cls;
300 }
301 }
302 }
303 }
304 }
305
306 /* give each instruction a name (and ip), and count up the # of names
307 * of each class
308 */
309 static void
310 ra_block_name_instructions(struct ir3_ra_ctx *ctx, struct ir3_block *block)
311 {
312 foreach_instr (instr, &block->instr_list) {
313 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
314
315 #ifdef DEBUG
316 instr->name = ~0;
317 #endif
318
319 ctx->instr_cnt++;
320
321 if (!writes_gpr(instr))
322 continue;
323
324 if (id->defn != instr)
325 continue;
326
327 /* In scalar pass, collect/split don't get their own names,
328 * but instead inherit them from their src(s):
329 *
330 * Possibly we don't need this because of scalar_name(), but
331 * it does make the ir3_print() dumps easier to read.
332 */
333 if (ctx->scalar_pass) {
334 if (instr->opc == OPC_META_SPLIT) {
335 instr->name = instr->regs[1]->instr->name + instr->split.off;
336 continue;
337 }
338
339 if (instr->opc == OPC_META_COLLECT) {
340 instr->name = instr->regs[1]->instr->name;
341 continue;
342 }
343 }
344
345 /* arrays which don't fit in one of the pre-defined class
346 * sizes are pre-colored:
347 */
348 if ((id->cls >= 0) && (id->cls < total_class_count)) {
349 /* in the scalar pass, we generate a name for each
350 * scalar component, instr->name is the name of the
351 * first component.
352 */
353 unsigned n = ctx->scalar_pass ? dest_regs(instr) : 1;
354 instr->name = ctx->class_alloc_count[id->cls];
355 ctx->class_alloc_count[id->cls] += n;
356 ctx->alloc_count += n;
357 }
358 }
359 }
360
361 static int
362 pick_in_range(BITSET_WORD *regs, unsigned min, unsigned max)
363 {
364 for (unsigned i = min; i < max; i++) {
365 if (BITSET_TEST(regs, i)) {
366 return i;
367 }
368 }
369 return -1;
370 }
371
372 /* register selector for the a6xx+ merged register file: */
373 static unsigned int
374 ra_select_reg_merged(unsigned int n, BITSET_WORD *regs, void *data)
375 {
376 struct ir3_ra_ctx *ctx = data;
377 unsigned int class = ra_get_node_class(ctx->g, n);
378
379 /* dimensions within the register class: */
380 unsigned max_target, start;
381
382 /* the regs bitset will include *all* of the virtual regs, but we lay
383 * out the different classes consecutively in the virtual register
384 * space. So we just need to think about the base offset of a given
385 * class within the virtual register space, and offset the register
386 * space we search within by that base offset.
387 */
388 unsigned base;
389
390 /* NOTE: this is only used in scalar pass, so the register
391 * class will be one of the scalar classes (ie. idx==0):
392 */
393 if (class == ctx->set->high_classes[0]) {
394 max_target = HIGH_CLASS_REGS(0);
395 start = 0;
396 base = ctx->set->gpr_to_ra_reg[HIGH_OFFSET][0];
397 } else if (class == ctx->set->half_classes[0]) {
398 max_target = ctx->max_target;
399 start = ctx->start_search_reg;
400 base = ctx->set->gpr_to_ra_reg[HALF_OFFSET][0];
401 } else if (class == ctx->set->classes[0]) {
402 max_target = ctx->max_target / 2;
403 start = ctx->start_search_reg;
404 base = ctx->set->gpr_to_ra_reg[0][0];
405 } else {
406 unreachable("unexpected register class!");
407 }
408
409 /* For cat4 instructions, if the src reg is already assigned, and
410 * avail to pick, use it. Because this doesn't introduce unnecessary
411 * dependencies, and it potentially avoids needing (ss) syncs to
412 * for write after read hazards:
413 */
414 struct ir3_instruction *instr = name_to_instr(ctx, n);
415 if (is_sfu(instr) && instr->regs[1]->instr) {
416 struct ir3_instruction *src = instr->regs[1]->instr;
417 unsigned src_n = scalar_name(ctx, src, 0);
418
419 unsigned reg = ra_get_node_reg(ctx->g, src_n);
420
421 /* Check if the src register has been assigned yet: */
422 if (reg != NO_REG) {
423 if (BITSET_TEST(regs, reg)) {
424 return reg;
425 }
426 }
427 }
428
429 int r = pick_in_range(regs, base + start, base + max_target);
430 if (r < 0) {
431 /* wrap-around: */
432 r = pick_in_range(regs, base, base + start);
433 }
434
435 if (r < 0) {
436 /* overflow, we need to increase max_target: */
437 ctx->max_target++;
438 return ra_select_reg_merged(n, regs, data);
439 }
440
441 if (class == ctx->set->half_classes[0]) {
442 int n = r - base;
443 ctx->start_search_reg = (n + 1) % ctx->max_target;
444 } else if (class == ctx->set->classes[0]) {
445 int n = (r - base) * 2;
446 ctx->start_search_reg = (n + 1) % ctx->max_target;
447 }
448
449 return r;
450 }
451
452 static void
453 ra_init(struct ir3_ra_ctx *ctx)
454 {
455 unsigned n, base;
456
457 ir3_clear_mark(ctx->ir);
458 n = ir3_count_instructions(ctx->ir);
459
460 ctx->instrd = rzalloc_array(NULL, struct ir3_ra_instr_data, n);
461
462 foreach_block (block, &ctx->ir->block_list) {
463 ra_block_find_definers(ctx, block);
464 }
465
466 foreach_block (block, &ctx->ir->block_list) {
467 ra_block_name_instructions(ctx, block);
468 }
469
470 /* figure out the base register name for each class. The
471 * actual ra name is class_base[cls] + instr->name;
472 */
473 ctx->class_base[0] = 0;
474 for (unsigned i = 1; i <= total_class_count; i++) {
475 ctx->class_base[i] = ctx->class_base[i-1] +
476 ctx->class_alloc_count[i-1];
477 }
478
479 /* and vreg names for array elements: */
480 base = ctx->class_base[total_class_count];
481 foreach_array (arr, &ctx->ir->array_list) {
482 arr->base = base;
483 ctx->class_alloc_count[total_class_count] += reg_size_for_array(arr);
484 base += reg_size_for_array(arr);
485 }
486 ctx->alloc_count += ctx->class_alloc_count[total_class_count];
487
488 ctx->g = ra_alloc_interference_graph(ctx->set->regs, ctx->alloc_count);
489 ralloc_steal(ctx->g, ctx->instrd);
490 ctx->def = rzalloc_array(ctx->g, unsigned, ctx->alloc_count);
491 ctx->use = rzalloc_array(ctx->g, unsigned, ctx->alloc_count);
492
493 /* TODO add selector callback for split (pre-a6xx) register file: */
494 if (ctx->scalar_pass && (ctx->ir->compiler->gpu_id >= 600)) {
495 ra_set_select_reg_callback(ctx->g, ra_select_reg_merged, ctx);
496
497 ctx->name_to_instr = _mesa_hash_table_create(ctx->g,
498 _mesa_hash_int, _mesa_key_int_equal);
499 }
500 }
501
502 /* Map the name back to instruction: */
503 static struct ir3_instruction *
504 name_to_instr(struct ir3_ra_ctx *ctx, unsigned name)
505 {
506 assert(!name_is_array(ctx, name));
507 struct hash_entry *entry = _mesa_hash_table_search(ctx->name_to_instr, &name);
508 if (entry)
509 return entry->data;
510 unreachable("invalid instr name");
511 return NULL;
512 }
513
514 static bool
515 name_is_array(struct ir3_ra_ctx *ctx, unsigned name)
516 {
517 return name >= ctx->class_base[total_class_count];
518 }
519
520 static struct ir3_array *
521 name_to_array(struct ir3_ra_ctx *ctx, unsigned name)
522 {
523 assert(name_is_array(ctx, name));
524 foreach_array (arr, &ctx->ir->array_list) {
525 unsigned sz = reg_size_for_array(arr);
526 if (name < (arr->base + sz))
527 return arr;
528 }
529 unreachable("invalid array name");
530 return NULL;
531 }
532
533 static void
534 ra_destroy(struct ir3_ra_ctx *ctx)
535 {
536 ralloc_free(ctx->g);
537 }
538
539 static void
540 __def(struct ir3_ra_ctx *ctx, struct ir3_ra_block_data *bd, unsigned name,
541 struct ir3_instruction *instr)
542 {
543 debug_assert(name < ctx->alloc_count);
544 /* defined on first write: */
545 if (!ctx->def[name])
546 ctx->def[name] = instr->ip;
547 ctx->use[name] = MAX2(ctx->use[name], instr->ip);
548 BITSET_SET(bd->def, name);
549 }
550
551 static void
552 __use(struct ir3_ra_ctx *ctx, struct ir3_ra_block_data *bd, unsigned name,
553 struct ir3_instruction *instr)
554 {
555 debug_assert(name < ctx->alloc_count);
556 ctx->use[name] = MAX2(ctx->use[name], instr->ip);
557 if (!BITSET_TEST(bd->def, name))
558 BITSET_SET(bd->use, name);
559 }
560
561 static void
562 ra_block_compute_live_ranges(struct ir3_ra_ctx *ctx, struct ir3_block *block)
563 {
564 struct ir3_ra_block_data *bd;
565 unsigned bitset_words = BITSET_WORDS(ctx->alloc_count);
566
567 #define def(name, instr) __def(ctx, bd, name, instr)
568 #define use(name, instr) __use(ctx, bd, name, instr)
569
570 bd = rzalloc(ctx->g, struct ir3_ra_block_data);
571
572 bd->def = rzalloc_array(bd, BITSET_WORD, bitset_words);
573 bd->use = rzalloc_array(bd, BITSET_WORD, bitset_words);
574 bd->livein = rzalloc_array(bd, BITSET_WORD, bitset_words);
575 bd->liveout = rzalloc_array(bd, BITSET_WORD, bitset_words);
576
577 block->data = bd;
578
579 struct ir3_instruction *first_non_input = NULL;
580 foreach_instr (instr, &block->instr_list) {
581 if (instr->opc != OPC_META_INPUT) {
582 first_non_input = instr;
583 break;
584 }
585 }
586
587 foreach_instr (instr, &block->instr_list) {
588 struct ir3_instruction *src;
589 struct ir3_register *reg;
590
591 if (writes_gpr(instr)) {
592 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
593 struct ir3_register *dst = instr->regs[0];
594
595 if (dst->flags & IR3_REG_ARRAY) {
596 struct ir3_array *arr =
597 ir3_lookup_array(ctx->ir, dst->array.id);
598 unsigned i;
599
600 arr->start_ip = MIN2(arr->start_ip, instr->ip);
601 arr->end_ip = MAX2(arr->end_ip, instr->ip);
602
603 /* set the node class now.. in case we don't encounter
604 * this array dst again. From register_alloc algo's
605 * perspective, these are all single/scalar regs:
606 */
607 for (i = 0; i < arr->length; i++) {
608 unsigned name = arr->base + i;
609 if(arr->half)
610 ra_set_node_class(ctx->g, name, ctx->set->half_classes[0]);
611 else
612 ra_set_node_class(ctx->g, name, ctx->set->classes[0]);
613 }
614
615 /* indirect write is treated like a write to all array
616 * elements, since we don't know which one is actually
617 * written:
618 */
619 if (dst->flags & IR3_REG_RELATIV) {
620 for (i = 0; i < arr->length; i++) {
621 unsigned name = arr->base + i;
622 def(name, instr);
623 }
624 } else {
625 unsigned name = arr->base + dst->array.offset;
626 def(name, instr);
627 }
628 } else if (id->defn == instr) {
629 /* in scalar pass, we aren't considering virtual register
630 * classes, ie. if an instruction writes a vec2, then it
631 * defines two different scalar register names.
632 */
633 unsigned n = ctx->scalar_pass ? dest_regs(instr) : 1;
634 for (unsigned i = 0; i < n; i++) {
635 unsigned name = scalar_name(ctx, instr, i);
636
637 /* split/collect instructions have duplicate names
638 * as real instructions, so they skip the hashtable:
639 */
640 if (ctx->name_to_instr && !((instr->opc == OPC_META_SPLIT) ||
641 (instr->opc == OPC_META_COLLECT))) {
642 /* this is slightly annoying, we can't just use an
643 * integer on the stack
644 */
645 unsigned *key = ralloc(ctx->name_to_instr, unsigned);
646 *key = name;
647 debug_assert(!_mesa_hash_table_search(ctx->name_to_instr, key));
648 _mesa_hash_table_insert(ctx->name_to_instr, key, instr);
649 }
650
651 /* tex instructions actually have a wrmask, and
652 * don't touch masked out components. We can't do
653 * anything useful about that in the first pass,
654 * but in the scalar pass we can realize these
655 * registers are available:
656 */
657 if (ctx->scalar_pass && is_tex_or_prefetch(instr) &&
658 !(instr->regs[0]->wrmask & (1 << i)))
659 continue;
660
661 def(name, instr);
662
663 if ((instr->opc == OPC_META_INPUT) && first_non_input)
664 use(name, first_non_input);
665
666 if (is_high(instr)) {
667 ra_set_node_class(ctx->g, name,
668 ctx->set->high_classes[id->cls - HIGH_OFFSET]);
669 } else if (is_half(instr)) {
670 ra_set_node_class(ctx->g, name,
671 ctx->set->half_classes[id->cls - HALF_OFFSET]);
672 } else {
673 ra_set_node_class(ctx->g, name,
674 ctx->set->classes[id->cls]);
675 }
676 }
677 }
678 }
679
680 foreach_src (reg, instr) {
681 if (reg->flags & IR3_REG_ARRAY) {
682 struct ir3_array *arr =
683 ir3_lookup_array(ctx->ir, reg->array.id);
684 arr->start_ip = MIN2(arr->start_ip, instr->ip);
685 arr->end_ip = MAX2(arr->end_ip, instr->ip);
686
687 /* indirect read is treated like a read from all array
688 * elements, since we don't know which one is actually
689 * read:
690 */
691 if (reg->flags & IR3_REG_RELATIV) {
692 unsigned i;
693 for (i = 0; i < arr->length; i++) {
694 unsigned name = arr->base + i;
695 use(name, instr);
696 BITSET_SET(bd->use, name);
697 }
698 } else {
699 unsigned name = arr->base + reg->array.offset;
700 use(name, instr);
701 /* NOTE: arrays are not SSA so unconditionally
702 * set use bit:
703 */
704 BITSET_SET(bd->use, name);
705 debug_assert(reg->array.offset < arr->length);
706 }
707 } else if (ctx->scalar_pass) {
708 struct ir3_instruction *src = reg->instr;
709 /* skip things that aren't SSA: */
710 unsigned n = src ? dest_regs(src) : 0;
711
712 /* in scalar pass, we aren't considering virtual register
713 * classes, ie. if an instruction writes a vec2, then it
714 * defines two different scalar register names.
715 *
716 * We need to traverse up thru collect/split to find the
717 * actual non-meta instruction names for each of the
718 * components:
719 */
720 for (unsigned i = 0; i < n; i++) {
721 /* Need to filter out a couple special cases, ie.
722 * writes to a0.x or p0.x:
723 */
724 if (!writes_gpr(src))
725 continue;
726
727 /* split takes a src w/ wrmask potentially greater
728 * than 0x1, but it really only cares about a single
729 * component. This shows up in splits coming out of
730 * a tex instruction w/ wrmask=.z, for example.
731 */
732 if ((instr->opc == OPC_META_SPLIT) &&
733 !(i == instr->split.off))
734 continue;
735
736 use(scalar_name(ctx, src, i), instr);
737 }
738 } else if ((src = ssa(reg)) && writes_gpr(src)) {
739 unsigned name = ra_name(ctx, &ctx->instrd[src->ip]);
740 use(name, instr);
741 }
742 }
743 }
744 }
745
746 static bool
747 ra_compute_livein_liveout(struct ir3_ra_ctx *ctx)
748 {
749 unsigned bitset_words = BITSET_WORDS(ctx->alloc_count);
750 bool progress = false;
751
752 foreach_block (block, &ctx->ir->block_list) {
753 struct ir3_ra_block_data *bd = block->data;
754
755 /* update livein: */
756 for (unsigned i = 0; i < bitset_words; i++) {
757 /* anything used but not def'd within a block is
758 * by definition a live value coming into the block:
759 */
760 BITSET_WORD new_livein =
761 (bd->use[i] | (bd->liveout[i] & ~bd->def[i]));
762
763 if (new_livein & ~bd->livein[i]) {
764 bd->livein[i] |= new_livein;
765 progress = true;
766 }
767 }
768
769 /* update liveout: */
770 for (unsigned j = 0; j < ARRAY_SIZE(block->successors); j++) {
771 struct ir3_block *succ = block->successors[j];
772 struct ir3_ra_block_data *succ_bd;
773
774 if (!succ)
775 continue;
776
777 succ_bd = succ->data;
778
779 for (unsigned i = 0; i < bitset_words; i++) {
780 /* add anything that is livein in a successor block
781 * to our liveout:
782 */
783 BITSET_WORD new_liveout =
784 (succ_bd->livein[i] & ~bd->liveout[i]);
785
786 if (new_liveout) {
787 bd->liveout[i] |= new_liveout;
788 progress = true;
789 }
790 }
791 }
792 }
793
794 return progress;
795 }
796
797 static void
798 print_bitset(const char *name, BITSET_WORD *bs, unsigned cnt)
799 {
800 bool first = true;
801 debug_printf("RA: %s:", name);
802 for (unsigned i = 0; i < cnt; i++) {
803 if (BITSET_TEST(bs, i)) {
804 if (!first)
805 debug_printf(",");
806 debug_printf(" %04u", i);
807 first = false;
808 }
809 }
810 debug_printf("\n");
811 }
812
813 static void
814 ra_add_interference(struct ir3_ra_ctx *ctx)
815 {
816 struct ir3 *ir = ctx->ir;
817
818 /* initialize array live ranges: */
819 foreach_array (arr, &ir->array_list) {
820 arr->start_ip = ~0;
821 arr->end_ip = 0;
822 }
823
824 /* compute live ranges (use/def) on a block level, also updating
825 * block's def/use bitmasks (used below to calculate per-block
826 * livein/liveout):
827 */
828 foreach_block (block, &ir->block_list) {
829 ra_block_compute_live_ranges(ctx, block);
830 }
831
832 /* update per-block livein/liveout: */
833 while (ra_compute_livein_liveout(ctx)) {}
834
835 if (RA_DEBUG) {
836 d("AFTER LIVEIN/OUT:");
837 foreach_block (block, &ir->block_list) {
838 struct ir3_ra_block_data *bd = block->data;
839 d("block%u:", block_id(block));
840 print_bitset(" def", bd->def, ctx->alloc_count);
841 print_bitset(" use", bd->use, ctx->alloc_count);
842 print_bitset(" l/i", bd->livein, ctx->alloc_count);
843 print_bitset(" l/o", bd->liveout, ctx->alloc_count);
844 }
845 foreach_array (arr, &ir->array_list) {
846 d("array%u:", arr->id);
847 d(" length: %u", arr->length);
848 d(" start_ip: %u", arr->start_ip);
849 d(" end_ip: %u", arr->end_ip);
850 }
851 d("INSTRUCTION VREG NAMES:");
852 foreach_block (block, &ctx->ir->block_list) {
853 foreach_instr (instr, &block->instr_list) {
854 if (!ctx->instrd[instr->ip].defn)
855 continue;
856 if (!writes_gpr(instr))
857 continue;
858 di(instr, "%04u", scalar_name(ctx, instr, 0));
859 }
860 }
861 d("ARRAY VREG NAMES:");
862 foreach_array (arr, &ctx->ir->array_list) {
863 d("%04u: arr%u", arr->base, arr->id);
864 }
865 }
866
867 /* extend start/end ranges based on livein/liveout info from cfg: */
868 foreach_block (block, &ir->block_list) {
869 struct ir3_ra_block_data *bd = block->data;
870
871 for (unsigned i = 0; i < ctx->alloc_count; i++) {
872 if (BITSET_TEST(bd->livein, i)) {
873 ctx->def[i] = MIN2(ctx->def[i], block->start_ip);
874 ctx->use[i] = MAX2(ctx->use[i], block->start_ip);
875 }
876
877 if (BITSET_TEST(bd->liveout, i)) {
878 ctx->def[i] = MIN2(ctx->def[i], block->end_ip);
879 ctx->use[i] = MAX2(ctx->use[i], block->end_ip);
880 }
881 }
882
883 foreach_array (arr, &ctx->ir->array_list) {
884 for (unsigned i = 0; i < arr->length; i++) {
885 if (BITSET_TEST(bd->livein, i + arr->base)) {
886 arr->start_ip = MIN2(arr->start_ip, block->start_ip);
887 }
888 if (BITSET_TEST(bd->livein, i + arr->base)) {
889 arr->end_ip = MAX2(arr->end_ip, block->end_ip);
890 }
891 }
892 }
893 }
894
895 /* need to fix things up to keep outputs live: */
896 struct ir3_instruction *out;
897 foreach_output (out, ir) {
898 unsigned name = ra_name(ctx, &ctx->instrd[out->ip]);
899 ctx->use[name] = ctx->instr_cnt;
900 }
901
902 for (unsigned i = 0; i < ctx->alloc_count; i++) {
903 for (unsigned j = 0; j < ctx->alloc_count; j++) {
904 if (intersects(ctx->def[i], ctx->use[i],
905 ctx->def[j], ctx->use[j])) {
906 ra_add_node_interference(ctx->g, i, j);
907 }
908 }
909 }
910 }
911
912 /* some instructions need fix-up if dst register is half precision: */
913 static void fixup_half_instr_dst(struct ir3_instruction *instr)
914 {
915 switch (opc_cat(instr->opc)) {
916 case 1: /* move instructions */
917 instr->cat1.dst_type = half_type(instr->cat1.dst_type);
918 break;
919 case 3:
920 switch (instr->opc) {
921 case OPC_MAD_F32:
922 /* Available for that dest is half and srcs are full.
923 * eg. mad.f32 hr0, r0.x, r0.y, r0.z
924 */
925 if (instr->regs[1]->flags & IR3_REG_HALF)
926 instr->opc = OPC_MAD_F16;
927 break;
928 case OPC_SEL_B32:
929 instr->opc = OPC_SEL_B16;
930 break;
931 case OPC_SEL_S32:
932 instr->opc = OPC_SEL_S16;
933 break;
934 case OPC_SEL_F32:
935 instr->opc = OPC_SEL_F16;
936 break;
937 case OPC_SAD_S32:
938 instr->opc = OPC_SAD_S16;
939 break;
940 /* instructions may already be fixed up: */
941 case OPC_MAD_F16:
942 case OPC_SEL_B16:
943 case OPC_SEL_S16:
944 case OPC_SEL_F16:
945 case OPC_SAD_S16:
946 break;
947 default:
948 assert(0);
949 break;
950 }
951 break;
952 case 4:
953 switch (instr->opc) {
954 case OPC_RSQ:
955 instr->opc = OPC_HRSQ;
956 break;
957 case OPC_LOG2:
958 instr->opc = OPC_HLOG2;
959 break;
960 case OPC_EXP2:
961 instr->opc = OPC_HEXP2;
962 break;
963 default:
964 break;
965 }
966 break;
967 case 5:
968 instr->cat5.type = half_type(instr->cat5.type);
969 break;
970 }
971 }
972 /* some instructions need fix-up if src register is half precision: */
973 static void fixup_half_instr_src(struct ir3_instruction *instr)
974 {
975 switch (instr->opc) {
976 case OPC_MOV:
977 instr->cat1.src_type = half_type(instr->cat1.src_type);
978 break;
979 default:
980 break;
981 }
982 }
983
984 /* NOTE: instr could be NULL for IR3_REG_ARRAY case, for the first
985 * array access(es) which do not have any previous access to depend
986 * on from scheduling point of view
987 */
988 static void
989 reg_assign(struct ir3_ra_ctx *ctx, struct ir3_register *reg,
990 struct ir3_instruction *instr)
991 {
992 struct ir3_ra_instr_data *id;
993
994 if (reg->flags & IR3_REG_ARRAY) {
995 struct ir3_array *arr =
996 ir3_lookup_array(ctx->ir, reg->array.id);
997 unsigned name = arr->base + reg->array.offset;
998 unsigned r = ra_get_node_reg(ctx->g, name);
999 unsigned num = ctx->set->ra_reg_to_gpr[r];
1000
1001 if (reg->flags & IR3_REG_RELATIV) {
1002 reg->array.offset = num;
1003 } else {
1004 reg->num = num;
1005 reg->flags &= ~IR3_REG_SSA;
1006 }
1007
1008 reg->flags &= ~IR3_REG_ARRAY;
1009 } else if ((id = &ctx->instrd[instr->ip]) && id->defn) {
1010 unsigned first_component = 0;
1011
1012 /* Special case for tex instructions, which may use the wrmask
1013 * to mask off the first component(s). In the scalar pass,
1014 * this means the masked off component(s) are not def'd/use'd,
1015 * so we get a bogus value when we ask the register_allocate
1016 * algo to get the assigned reg for the unused/untouched
1017 * component. So we need to consider the first used component:
1018 */
1019 if (ctx->scalar_pass && is_tex_or_prefetch(id->defn)) {
1020 unsigned n = ffs(id->defn->regs[0]->wrmask);
1021 debug_assert(n > 0);
1022 first_component = n - 1;
1023 }
1024
1025 unsigned name = scalar_name(ctx, id->defn, first_component);
1026 unsigned r = ra_get_node_reg(ctx->g, name);
1027 unsigned num = ctx->set->ra_reg_to_gpr[r] + id->off;
1028
1029 debug_assert(!(reg->flags & IR3_REG_RELATIV));
1030
1031 debug_assert(num >= first_component);
1032
1033 if (is_high(id->defn))
1034 num += FIRST_HIGH_REG;
1035
1036 reg->num = num - first_component;
1037
1038 reg->flags &= ~IR3_REG_SSA;
1039
1040 if (is_half(id->defn))
1041 reg->flags |= IR3_REG_HALF;
1042 }
1043 }
1044
1045 static void
1046 account_assignment(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr)
1047 {
1048 struct ir3_ra_instr_data *id;
1049 struct ir3_register *dst = instr->regs[0];
1050 unsigned max;
1051
1052 if (is_high(instr))
1053 return;
1054
1055 if (dst->flags & IR3_REG_ARRAY) {
1056 struct ir3_array *arr =
1057 ir3_lookup_array(ctx->ir, dst->array.id);
1058 max = arr->reg + arr->length;
1059 } else if ((id = &ctx->instrd[instr->ip]) && id->defn) {
1060 unsigned name = scalar_name(ctx, id->defn, 0);
1061 unsigned r = ra_get_node_reg(ctx->g, name);
1062 max = ctx->set->ra_reg_to_gpr[r] + id->off + dest_regs(id->defn);
1063 } else {
1064 return;
1065 }
1066
1067 if (is_half(instr)) {
1068 ctx->max_half_assigned = MAX2(ctx->max_half_assigned, max);
1069 } else {
1070 ctx->max_assigned = MAX2(ctx->max_assigned, max);
1071 }
1072 }
1073
1074 /* helper to determine which regs to assign in which pass: */
1075 static bool
1076 should_assign(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr)
1077 {
1078 if ((instr->opc == OPC_META_SPLIT) ||
1079 (instr->opc == OPC_META_COLLECT))
1080 return !ctx->scalar_pass;
1081 return ctx->scalar_pass;
1082 }
1083
1084 static void
1085 ra_block_alloc(struct ir3_ra_ctx *ctx, struct ir3_block *block)
1086 {
1087 foreach_instr (instr, &block->instr_list) {
1088 struct ir3_register *reg;
1089
1090 if (writes_gpr(instr)) {
1091 account_assignment(ctx, instr);
1092 if (should_assign(ctx, instr)) {
1093 reg_assign(ctx, instr->regs[0], instr);
1094 if (instr->regs[0]->flags & IR3_REG_HALF)
1095 fixup_half_instr_dst(instr);
1096 }
1097 }
1098
1099 foreach_src_n (reg, n, instr) {
1100 struct ir3_instruction *src = reg->instr;
1101
1102 if (src && !should_assign(ctx, src) && !should_assign(ctx, instr))
1103 continue;
1104
1105 if (src && should_assign(ctx, instr))
1106 reg_assign(ctx, src->regs[0], src);
1107
1108 /* Note: reg->instr could be null for IR3_REG_ARRAY */
1109 if (src || (reg->flags & IR3_REG_ARRAY))
1110 reg_assign(ctx, instr->regs[n+1], src);
1111
1112 if (instr->regs[n+1]->flags & IR3_REG_HALF)
1113 fixup_half_instr_src(instr);
1114 }
1115 }
1116
1117 /* We need to pre-color outputs for the scalar pass in
1118 * ra_precolor_assigned(), so we need to actually assign
1119 * them in the first pass:
1120 */
1121 if (!ctx->scalar_pass) {
1122 struct ir3_instruction *in, *out;
1123
1124 foreach_input (in, ctx->ir) {
1125 reg_assign(ctx, in->regs[0], in);
1126 }
1127 foreach_output (out, ctx->ir) {
1128 reg_assign(ctx, out->regs[0], out);
1129 }
1130 }
1131 }
1132
1133 /* handle pre-colored registers. This includes "arrays" (which could be of
1134 * length 1, used for phi webs lowered to registers in nir), as well as
1135 * special shader input values that need to be pinned to certain registers.
1136 */
1137 static void
1138 ra_precolor(struct ir3_ra_ctx *ctx, struct ir3_instruction **precolor, unsigned nprecolor)
1139 {
1140 unsigned num_precolor = 0;
1141 for (unsigned i = 0; i < nprecolor; i++) {
1142 if (precolor[i] && !(precolor[i]->flags & IR3_INSTR_UNUSED)) {
1143 struct ir3_instruction *instr = precolor[i];
1144
1145 if (instr->regs[0]->num == INVALID_REG)
1146 continue;
1147
1148 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
1149
1150 debug_assert(!(instr->regs[0]->flags & (IR3_REG_HALF | IR3_REG_HIGH)));
1151
1152 /* only consider the first component: */
1153 if (id->off > 0)
1154 continue;
1155
1156 if (ctx->scalar_pass && !should_assign(ctx, instr))
1157 continue;
1158
1159 /* 'base' is in scalar (class 0) but we need to map that
1160 * the conflicting register of the appropriate class (ie.
1161 * input could be vec2/vec3/etc)
1162 *
1163 * Note that the higher class (larger than scalar) regs
1164 * are setup to conflict with others in the same class,
1165 * so for example, R1 (scalar) is also the first component
1166 * of D1 (vec2/double):
1167 *
1168 * Single (base) | Double
1169 * --------------+---------------
1170 * R0 | D0
1171 * R1 | D0 D1
1172 * R2 | D1 D2
1173 * R3 | D2
1174 * .. and so on..
1175 */
1176 unsigned regid = instr->regs[0]->num;
1177 unsigned reg = ctx->set->gpr_to_ra_reg[id->cls][regid];
1178 unsigned name = ra_name(ctx, id);
1179 ra_set_node_reg(ctx->g, name, reg);
1180 num_precolor = MAX2(regid, num_precolor);
1181 }
1182 }
1183
1184 /* pre-assign array elements:
1185 *
1186 * TODO this is going to need some work for half-precision.. possibly
1187 * this is easier on a6xx, where we can just divide array size by two?
1188 * But on a5xx and earlier it will need to track two bases.
1189 */
1190 foreach_array (arr, &ctx->ir->array_list) {
1191 unsigned base = 0;
1192
1193 if (arr->end_ip == 0)
1194 continue;
1195
1196 /* figure out what else we conflict with which has already
1197 * been assigned:
1198 */
1199 retry:
1200 foreach_array (arr2, &ctx->ir->array_list) {
1201 if (arr2 == arr)
1202 break;
1203 if (arr2->end_ip == 0)
1204 continue;
1205 /* if it intersects with liverange AND register range.. */
1206 if (intersects(arr->start_ip, arr->end_ip,
1207 arr2->start_ip, arr2->end_ip) &&
1208 intersects(base, base + reg_size_for_array(arr),
1209 arr2->reg, arr2->reg + reg_size_for_array(arr2))) {
1210 base = MAX2(base, arr2->reg + reg_size_for_array(arr2));
1211 goto retry;
1212 }
1213 }
1214
1215 /* also need to not conflict with any pre-assigned inputs: */
1216 for (unsigned i = 0; i < nprecolor; i++) {
1217 struct ir3_instruction *instr = precolor[i];
1218
1219 if (!instr || (instr->flags & IR3_INSTR_UNUSED))
1220 continue;
1221
1222 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
1223
1224 /* only consider the first component: */
1225 if (id->off > 0)
1226 continue;
1227
1228 unsigned name = ra_name(ctx, id);
1229 unsigned regid = instr->regs[0]->num;
1230
1231 /* Check if array intersects with liverange AND register
1232 * range of the input:
1233 */
1234 if (intersects(arr->start_ip, arr->end_ip,
1235 ctx->def[name], ctx->use[name]) &&
1236 intersects(base, base + reg_size_for_array(arr),
1237 regid, regid + class_sizes[id->cls])) {
1238 base = MAX2(base, regid + class_sizes[id->cls]);
1239 goto retry;
1240 }
1241 }
1242
1243 arr->reg = base;
1244
1245 for (unsigned i = 0; i < arr->length; i++) {
1246 unsigned name, reg;
1247
1248 if (arr->half) {
1249 /* Doesn't need to do this on older generations than a6xx,
1250 * since there's no conflict between full regs and half regs
1251 * on them.
1252 *
1253 * TODO Presumably "base" could start from 0 respectively
1254 * for half regs of arrays on older generations.
1255 */
1256 unsigned base_half = base * 2 + i;
1257 reg = ctx->set->gpr_to_ra_reg[0+HALF_OFFSET][base_half];
1258 base = base_half / 2 + 1;
1259 } else {
1260 reg = ctx->set->gpr_to_ra_reg[0][base++];
1261 }
1262
1263 name = arr->base + i;
1264 ra_set_node_reg(ctx->g, name, reg);
1265 }
1266 }
1267
1268 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
1269 foreach_array (arr, &ctx->ir->array_list) {
1270 unsigned first = arr->reg;
1271 unsigned last = arr->reg + arr->length - 1;
1272 debug_printf("arr[%d] at r%d.%c->r%d.%c\n", arr->id,
1273 (first >> 2), "xyzw"[first & 0x3],
1274 (last >> 2), "xyzw"[last & 0x3]);
1275 }
1276 }
1277 }
1278
1279 static void
1280 precolor(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr)
1281 {
1282 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
1283 unsigned n = dest_regs(instr);
1284 for (unsigned i = 0; i < n; i++) {
1285 /* tex instructions actually have a wrmask, and
1286 * don't touch masked out components. So we
1287 * shouldn't precolor them::
1288 */
1289 if (is_tex_or_prefetch(instr) &&
1290 !(instr->regs[0]->wrmask & (1 << i)))
1291 continue;
1292
1293 unsigned name = scalar_name(ctx, instr, i);
1294 unsigned regid = instr->regs[0]->num + i;
1295
1296 if (instr->regs[0]->flags & IR3_REG_HIGH)
1297 regid -= FIRST_HIGH_REG;
1298
1299 unsigned vreg = ctx->set->gpr_to_ra_reg[id->cls][regid];
1300 ra_set_node_reg(ctx->g, name, vreg);
1301 }
1302 }
1303
1304 /* pre-color non-scalar registers based on the registers assigned in previous
1305 * pass. Do this by looking actually at the fanout instructions.
1306 */
1307 static void
1308 ra_precolor_assigned(struct ir3_ra_ctx *ctx)
1309 {
1310 debug_assert(ctx->scalar_pass);
1311
1312 foreach_block (block, &ctx->ir->block_list) {
1313 foreach_instr (instr, &block->instr_list) {
1314
1315 if ((instr->opc != OPC_META_SPLIT) &&
1316 (instr->opc != OPC_META_COLLECT))
1317 continue;
1318
1319 precolor(ctx, instr);
1320
1321 struct ir3_register *src;
1322 foreach_src (src, instr) {
1323 if (!src->instr)
1324 continue;
1325 precolor(ctx, src->instr);
1326 }
1327 }
1328 }
1329 }
1330
1331 static int
1332 ra_alloc(struct ir3_ra_ctx *ctx)
1333 {
1334 if (!ra_allocate(ctx->g))
1335 return -1;
1336
1337 foreach_block (block, &ctx->ir->block_list) {
1338 ra_block_alloc(ctx, block);
1339 }
1340
1341 return 0;
1342 }
1343
1344 /* if we end up with split/collect instructions with non-matching src
1345 * and dest regs, that means something has gone wrong. Which makes it
1346 * a pretty good sanity check.
1347 */
1348 static void
1349 ra_sanity_check(struct ir3 *ir)
1350 {
1351 foreach_block (block, &ir->block_list) {
1352 foreach_instr (instr, &block->instr_list) {
1353 if (instr->opc == OPC_META_SPLIT) {
1354 struct ir3_register *dst = instr->regs[0];
1355 struct ir3_register *src = instr->regs[1];
1356 debug_assert(dst->num == (src->num + instr->split.off));
1357 } else if (instr->opc == OPC_META_COLLECT) {
1358 struct ir3_register *dst = instr->regs[0];
1359 struct ir3_register *src;
1360
1361 foreach_src_n (src, n, instr) {
1362 debug_assert(dst->num == (src->num - n));
1363 }
1364 }
1365 }
1366 }
1367 }
1368
1369 /* Target is calculated in terms of half-regs (with a full reg
1370 * consisting of two half-regs).
1371 */
1372 static void
1373 ra_calc_merged_register_target(struct ir3_ra_ctx *ctx)
1374 {
1375 const unsigned vec4 = 2 * 4; // 8 half-regs
1376 unsigned t = MAX2(2 * ctx->max_assigned, ctx->max_half_assigned);
1377
1378 /* second RA pass may have saved some regs, let's try to reclaim
1379 * the benefit by adjusting the target downwards slightly:
1380 */
1381 if (ir3_has_latency_to_hide(ctx->ir)) {
1382 if (t > 8 * vec4) {
1383 t -= 2 * vec4;
1384 } else if (t > 6 * vec4) {
1385 t -= vec4;
1386 }
1387 }
1388
1389 ctx->max_target = t;
1390 }
1391
1392 static int
1393 ir3_ra_pass(struct ir3_shader_variant *v, struct ir3_instruction **precolor,
1394 unsigned nprecolor, bool scalar_pass, unsigned *target)
1395 {
1396 struct ir3_ra_ctx ctx = {
1397 .v = v,
1398 .ir = v->ir,
1399 .set = v->ir->compiler->set,
1400 .scalar_pass = scalar_pass,
1401 };
1402 int ret;
1403
1404 if (scalar_pass) {
1405 ctx.max_target = *target;
1406 }
1407
1408 ra_init(&ctx);
1409 ra_add_interference(&ctx);
1410 ra_precolor(&ctx, precolor, nprecolor);
1411 if (scalar_pass)
1412 ra_precolor_assigned(&ctx);
1413 ret = ra_alloc(&ctx);
1414 ra_destroy(&ctx);
1415
1416 /* In the first pass, calculate the target register usage used in the
1417 * second (scalar) pass:
1418 */
1419 if (!scalar_pass) {
1420 /* TODO: round-robin support for pre-a6xx: */
1421 if (ctx.ir->compiler->gpu_id >= 600) {
1422 ra_calc_merged_register_target(&ctx);
1423 }
1424 *target = ctx.max_target;
1425 }
1426
1427 return ret;
1428 }
1429
1430 int
1431 ir3_ra(struct ir3_shader_variant *v, struct ir3_instruction **precolor,
1432 unsigned nprecolor)
1433 {
1434 unsigned target = 0;
1435 int ret;
1436
1437 /* First pass, assign the vecN (non-scalar) registers: */
1438 ret = ir3_ra_pass(v, precolor, nprecolor, false, &target);
1439 if (ret)
1440 return ret;
1441
1442 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
1443 printf("AFTER RA (1st pass):\n");
1444 ir3_print(v->ir);
1445 }
1446
1447 /* Second pass, assign the scalar registers: */
1448 ret = ir3_ra_pass(v, precolor, nprecolor, true, &target);
1449 if (ret)
1450 return ret;
1451
1452 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
1453 printf("AFTER RA (2nd pass):\n");
1454 ir3_print(v->ir);
1455 }
1456
1457 #ifdef DEBUG
1458 # define SANITY_CHECK DEBUG
1459 #else
1460 # define SANITY_CHECK 0
1461 #endif
1462 if (SANITY_CHECK)
1463 ra_sanity_check(v->ir);
1464
1465 return ret;
1466 }