freedreno/ir3: make input/output iterators declare cursor ptr
[mesa.git] / src / freedreno / ir3 / ir3_ra.c
1 /*
2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/u_math.h"
28 #include "util/register_allocate.h"
29 #include "util/ralloc.h"
30 #include "util/bitset.h"
31
32 #include "ir3.h"
33 #include "ir3_compiler.h"
34 #include "ir3_ra.h"
35
36
37 #ifdef DEBUG
38 #define RA_DEBUG (ir3_shader_debug & IR3_DBG_RAMSGS)
39 #else
40 #define RA_DEBUG 0
41 #endif
42 #define d(fmt, ...) do { if (RA_DEBUG) { \
43 printf("RA: "fmt"\n", ##__VA_ARGS__); \
44 } } while (0)
45
46 #define di(instr, fmt, ...) do { if (RA_DEBUG) { \
47 printf("RA: "fmt": ", ##__VA_ARGS__); \
48 ir3_print_instr(instr); \
49 } } while (0)
50
51 /*
52 * Register Assignment:
53 *
54 * Uses the register_allocate util, which implements graph coloring
55 * algo with interference classes. To handle the cases where we need
56 * consecutive registers (for example, texture sample instructions),
57 * we model these as larger (double/quad/etc) registers which conflict
58 * with the corresponding registers in other classes.
59 *
60 * Additionally we create additional classes for half-regs, which
61 * do not conflict with the full-reg classes. We do need at least
62 * sizes 1-4 (to deal w/ texture sample instructions output to half-
63 * reg). At the moment we don't create the higher order half-reg
64 * classes as half-reg frequently does not have enough precision
65 * for texture coords at higher resolutions.
66 *
67 * There are some additional cases that we need to handle specially,
68 * as the graph coloring algo doesn't understand "partial writes".
69 * For example, a sequence like:
70 *
71 * add r0.z, ...
72 * sam (f32)(xy)r0.x, ...
73 * ...
74 * sam (f32)(xyzw)r0.w, r0.x, ... ; 3d texture, so r0.xyz are coord
75 *
76 * In this scenario, we treat r0.xyz as class size 3, which is written
77 * (from a use/def perspective) at the 'add' instruction and ignore the
78 * subsequent partial writes to r0.xy. So the 'add r0.z, ...' is the
79 * defining instruction, as it is the first to partially write r0.xyz.
80 *
81 * To address the fragmentation that this can potentially cause, a
82 * two pass register allocation is used. After the first pass the
83 * assignment of scalars is discarded, but the assignment of vecN (for
84 * N > 1) is used to pre-color in the second pass, which considers
85 * only scalars.
86 *
87 * Arrays of arbitrary size are handled via pre-coloring a consecutive
88 * sequence of registers. Additional scalar (single component) reg
89 * names are allocated starting at ctx->class_base[total_class_count]
90 * (see arr->base), which are pre-colored. In the use/def graph direct
91 * access is treated as a single element use/def, and indirect access
92 * is treated as use or def of all array elements. (Only the first
93 * def is tracked, in case of multiple indirect writes, etc.)
94 *
95 * TODO arrays that fit in one of the pre-defined class sizes should
96 * not need to be pre-colored, but instead could be given a normal
97 * vreg name. (Ignoring this for now since it is a good way to work
98 * out the kinks with arbitrary sized arrays.)
99 *
100 * TODO might be easier for debugging to split this into two passes,
101 * the first assigning vreg names in a way that we could ir3_print()
102 * the result.
103 */
104
105
106 static struct ir3_instruction * name_to_instr(struct ir3_ra_ctx *ctx, unsigned name);
107
108 static bool name_is_array(struct ir3_ra_ctx *ctx, unsigned name);
109 static struct ir3_array * name_to_array(struct ir3_ra_ctx *ctx, unsigned name);
110
111 /* does it conflict? */
112 static inline bool
113 intersects(unsigned a_start, unsigned a_end, unsigned b_start, unsigned b_end)
114 {
115 return !((a_start >= b_end) || (b_start >= a_end));
116 }
117
118 static unsigned
119 reg_size_for_array(struct ir3_array *arr)
120 {
121 if (arr->half)
122 return DIV_ROUND_UP(arr->length, 2);
123
124 return arr->length;
125 }
126
127 static bool
128 instr_before(struct ir3_instruction *a, struct ir3_instruction *b)
129 {
130 if (a->flags & IR3_INSTR_UNUSED)
131 return false;
132 return (a->ip < b->ip);
133 }
134
135 static struct ir3_instruction *
136 get_definer(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr,
137 int *sz, int *off)
138 {
139 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
140 struct ir3_instruction *d = NULL;
141
142 if (ctx->scalar_pass) {
143 id->defn = instr;
144 id->off = 0;
145 id->sz = 1; /* considering things as N scalar regs now */
146 }
147
148 if (id->defn) {
149 *sz = id->sz;
150 *off = id->off;
151 return id->defn;
152 }
153
154 if (instr->opc == OPC_META_COLLECT) {
155 /* What about the case where collect is subset of array, we
156 * need to find the distance between where actual array starts
157 * and collect.. that probably doesn't happen currently.
158 */
159 int dsz, doff;
160
161 /* note: don't use foreach_ssa_src as this gets called once
162 * while assigning regs (which clears SSA flag)
163 */
164 foreach_src_n (src, n, instr) {
165 struct ir3_instruction *dd;
166 if (!src->instr)
167 continue;
168
169 dd = get_definer(ctx, src->instr, &dsz, &doff);
170
171 if ((!d) || instr_before(dd, d)) {
172 d = dd;
173 *sz = dsz;
174 *off = doff - n;
175 }
176 }
177
178 } else if (instr->cp.right || instr->cp.left) {
179 /* covers also the meta:fo case, which ends up w/ single
180 * scalar instructions for each component:
181 */
182 struct ir3_instruction *f = ir3_neighbor_first(instr);
183
184 /* by definition, the entire sequence forms one linked list
185 * of single scalar register nodes (even if some of them may
186 * be splits from a texture sample (for example) instr. We
187 * just need to walk the list finding the first element of
188 * the group defined (lowest ip)
189 */
190 int cnt = 0;
191
192 /* need to skip over unused in the group: */
193 while (f && (f->flags & IR3_INSTR_UNUSED)) {
194 f = f->cp.right;
195 cnt++;
196 }
197
198 while (f) {
199 if ((!d) || instr_before(f, d))
200 d = f;
201 if (f == instr)
202 *off = cnt;
203 f = f->cp.right;
204 cnt++;
205 }
206
207 *sz = cnt;
208
209 } else {
210 /* second case is looking directly at the instruction which
211 * produces multiple values (eg, texture sample), rather
212 * than the split nodes that point back to that instruction.
213 * This isn't quite right, because it may be part of a larger
214 * group, such as:
215 *
216 * sam (f32)(xyzw)r0.x, ...
217 * add r1.x, ...
218 * add r1.y, ...
219 * sam (f32)(xyzw)r2.x, r0.w <-- (r0.w, r1.x, r1.y)
220 *
221 * need to come up with a better way to handle that case.
222 */
223 if (instr->address) {
224 *sz = instr->regs[0]->size;
225 } else {
226 *sz = util_last_bit(instr->regs[0]->wrmask);
227 }
228 *off = 0;
229 d = instr;
230 }
231
232 if (d->opc == OPC_META_SPLIT) {
233 struct ir3_instruction *dd;
234 int dsz, doff;
235
236 dd = get_definer(ctx, d->regs[1]->instr, &dsz, &doff);
237
238 /* by definition, should come before: */
239 debug_assert(instr_before(dd, d));
240
241 *sz = MAX2(*sz, dsz);
242
243 if (instr->opc == OPC_META_SPLIT)
244 *off = MAX2(*off, instr->split.off);
245
246 d = dd;
247 }
248
249 debug_assert(d->opc != OPC_META_SPLIT);
250
251 id->defn = d;
252 id->sz = *sz;
253 id->off = *off;
254
255 return d;
256 }
257
258 static void
259 ra_block_find_definers(struct ir3_ra_ctx *ctx, struct ir3_block *block)
260 {
261 foreach_instr (instr, &block->instr_list) {
262 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
263 if (instr->regs_count == 0)
264 continue;
265 /* couple special cases: */
266 if (writes_addr0(instr) || writes_addr1(instr) || writes_pred(instr)) {
267 id->cls = -1;
268 } else if (instr->regs[0]->flags & IR3_REG_ARRAY) {
269 id->cls = total_class_count;
270 } else {
271 /* and the normal case: */
272 id->defn = get_definer(ctx, instr, &id->sz, &id->off);
273 id->cls = ra_size_to_class(id->sz, is_half(id->defn), is_high(id->defn));
274
275 /* this is a bit of duct-tape.. if we have a scenario like:
276 *
277 * sam (f32)(x) out.x, ...
278 * sam (f32)(x) out.y, ...
279 *
280 * Then the fanout/split meta instructions for the two different
281 * tex instructions end up grouped as left/right neighbors. The
282 * upshot is that in when you get_definer() on one of the meta:fo's
283 * you get definer as the first sam with sz=2, but when you call
284 * get_definer() on the either of the sam's you get itself as the
285 * definer with sz=1.
286 *
287 * (We actually avoid this scenario exactly, the neighbor links
288 * prevent one of the output mov's from being eliminated, so this
289 * hack should be enough. But probably we need to rethink how we
290 * find the "defining" instruction.)
291 *
292 * TODO how do we figure out offset properly...
293 */
294 if (id->defn != instr) {
295 struct ir3_ra_instr_data *did = &ctx->instrd[id->defn->ip];
296 if (did->sz < id->sz) {
297 did->sz = id->sz;
298 did->cls = id->cls;
299 }
300 }
301 }
302 }
303 }
304
305 /* give each instruction a name (and ip), and count up the # of names
306 * of each class
307 */
308 static void
309 ra_block_name_instructions(struct ir3_ra_ctx *ctx, struct ir3_block *block)
310 {
311 foreach_instr (instr, &block->instr_list) {
312 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
313
314 #ifdef DEBUG
315 instr->name = ~0;
316 #endif
317
318 ctx->instr_cnt++;
319
320 if (!writes_gpr(instr))
321 continue;
322
323 if (id->defn != instr)
324 continue;
325
326 /* In scalar pass, collect/split don't get their own names,
327 * but instead inherit them from their src(s):
328 *
329 * Possibly we don't need this because of scalar_name(), but
330 * it does make the ir3_print() dumps easier to read.
331 */
332 if (ctx->scalar_pass) {
333 if (instr->opc == OPC_META_SPLIT) {
334 instr->name = instr->regs[1]->instr->name + instr->split.off;
335 continue;
336 }
337
338 if (instr->opc == OPC_META_COLLECT) {
339 instr->name = instr->regs[1]->instr->name;
340 continue;
341 }
342 }
343
344 /* arrays which don't fit in one of the pre-defined class
345 * sizes are pre-colored:
346 */
347 if ((id->cls >= 0) && (id->cls < total_class_count)) {
348 /* in the scalar pass, we generate a name for each
349 * scalar component, instr->name is the name of the
350 * first component.
351 */
352 unsigned n = ctx->scalar_pass ? dest_regs(instr) : 1;
353 instr->name = ctx->class_alloc_count[id->cls];
354 ctx->class_alloc_count[id->cls] += n;
355 ctx->alloc_count += n;
356 }
357 }
358 }
359
360 /**
361 * Set a value for max register target.
362 *
363 * Currently this just rounds up to a multiple of full-vec4 (ie. the
364 * granularity that we configure the hw for.. there is no point to
365 * using r3.x if you aren't going to make r3.yzw available). But
366 * in reality there seems to be multiple thresholds that affect the
367 * number of waves.. and we should round up the target to the next
368 * threshold when we round-robin registers, to give postsched more
369 * options. When we understand that better, this is where we'd
370 * implement that.
371 */
372 static void
373 ra_set_register_target(struct ir3_ra_ctx *ctx, unsigned max_target)
374 {
375 const unsigned hvec4 = 4;
376 const unsigned vec4 = 2 * hvec4;
377
378 ctx->max_target = align(max_target, vec4);
379
380 d("New max_target=%u", ctx->max_target);
381 }
382
383 static int
384 pick_in_range(BITSET_WORD *regs, unsigned min, unsigned max)
385 {
386 for (unsigned i = min; i <= max; i++) {
387 if (BITSET_TEST(regs, i)) {
388 return i;
389 }
390 }
391 return -1;
392 }
393
394 static int
395 pick_in_range_rev(BITSET_WORD *regs, int min, int max)
396 {
397 for (int i = max; i >= min; i--) {
398 if (BITSET_TEST(regs, i)) {
399 return i;
400 }
401 }
402 return -1;
403 }
404
405 /* register selector for the a6xx+ merged register file: */
406 static unsigned int
407 ra_select_reg_merged(unsigned int n, BITSET_WORD *regs, void *data)
408 {
409 struct ir3_ra_ctx *ctx = data;
410 unsigned int class = ra_get_node_class(ctx->g, n);
411 bool half, high;
412 int sz = ra_class_to_size(class, &half, &high);
413
414 assert (sz > 0);
415
416 /* dimensions within the register class: */
417 unsigned max_target, start;
418
419 /* the regs bitset will include *all* of the virtual regs, but we lay
420 * out the different classes consecutively in the virtual register
421 * space. So we just need to think about the base offset of a given
422 * class within the virtual register space, and offset the register
423 * space we search within by that base offset.
424 */
425 unsigned base;
426
427 /* TODO I think eventually we want to round-robin in vector pass
428 * as well, but needs some more work to calculate # of live vals
429 * for this. (Maybe with some work, we could just figure out
430 * the scalar target and use that, since that is what we care
431 * about in the end.. but that would mean setting up use-def/
432 * liveranges for scalar pass before doing vector pass.)
433 *
434 * For now, in the vector class, just move assignments for scalar
435 * vals higher to hopefully prevent them from limiting where vecN
436 * values can be placed. Since the scalar values are re-assigned
437 * in the 2nd pass, we don't really care where they end up in the
438 * vector pass.
439 */
440 if (!ctx->scalar_pass) {
441 base = ctx->set->gpr_to_ra_reg[class][0];
442 if (high) {
443 max_target = HIGH_CLASS_REGS(class - HIGH_OFFSET);
444 } else if (half) {
445 max_target = HALF_CLASS_REGS(class - HALF_OFFSET);
446 } else {
447 max_target = CLASS_REGS(class);
448 }
449
450 if ((sz == 1) && !high) {
451 return pick_in_range_rev(regs, base, base + max_target);
452 } else {
453 return pick_in_range(regs, base, base + max_target);
454 }
455 } else {
456 assert(sz == 1);
457 }
458
459 /* NOTE: this is only used in scalar pass, so the register
460 * class will be one of the scalar classes (ie. idx==0):
461 */
462 base = ctx->set->gpr_to_ra_reg[class][0];
463 if (high) {
464 max_target = HIGH_CLASS_REGS(0);
465 start = 0;
466 } else if (half) {
467 max_target = ctx->max_target;
468 start = ctx->start_search_reg;
469 } else {
470 max_target = ctx->max_target / 2;
471 start = ctx->start_search_reg;
472 }
473
474 /* For cat4 instructions, if the src reg is already assigned, and
475 * avail to pick, use it. Because this doesn't introduce unnecessary
476 * dependencies, and it potentially avoids needing (ss) syncs to
477 * for write after read hazards:
478 */
479 struct ir3_instruction *instr = name_to_instr(ctx, n);
480 if (is_sfu(instr)) {
481 struct ir3_register *src = instr->regs[1];
482 int src_n;
483
484 if ((src->flags & IR3_REG_ARRAY) && !(src->flags & IR3_REG_RELATIV)) {
485 struct ir3_array *arr = ir3_lookup_array(ctx->ir, src->array.id);
486 src_n = arr->base + src->array.offset;
487 } else {
488 src_n = scalar_name(ctx, src->instr, 0);
489 }
490
491 unsigned reg = ra_get_node_reg(ctx->g, src_n);
492
493 /* Check if the src register has been assigned yet: */
494 if (reg != NO_REG) {
495 if (BITSET_TEST(regs, reg)) {
496 return reg;
497 }
498 }
499 }
500
501 int r = pick_in_range(regs, base + start, base + max_target);
502 if (r < 0) {
503 /* wrap-around: */
504 r = pick_in_range(regs, base, base + start);
505 }
506
507 if (r < 0) {
508 /* overflow, we need to increase max_target: */
509 ra_set_register_target(ctx, ctx->max_target + 1);
510 return ra_select_reg_merged(n, regs, data);
511 }
512
513 if (class == ctx->set->half_classes[0]) {
514 int n = r - base;
515 ctx->start_search_reg = (n + 1) % ctx->max_target;
516 } else if (class == ctx->set->classes[0]) {
517 int n = (r - base) * 2;
518 ctx->start_search_reg = (n + 1) % ctx->max_target;
519 }
520
521 return r;
522 }
523
524 static void
525 ra_init(struct ir3_ra_ctx *ctx)
526 {
527 unsigned n, base;
528
529 ir3_clear_mark(ctx->ir);
530 n = ir3_count_instructions_ra(ctx->ir);
531
532 ctx->instrd = rzalloc_array(NULL, struct ir3_ra_instr_data, n);
533
534 foreach_block (block, &ctx->ir->block_list) {
535 ra_block_find_definers(ctx, block);
536 }
537
538 foreach_block (block, &ctx->ir->block_list) {
539 ra_block_name_instructions(ctx, block);
540 }
541
542 /* figure out the base register name for each class. The
543 * actual ra name is class_base[cls] + instr->name;
544 */
545 ctx->class_base[0] = 0;
546 for (unsigned i = 1; i <= total_class_count; i++) {
547 ctx->class_base[i] = ctx->class_base[i-1] +
548 ctx->class_alloc_count[i-1];
549 }
550
551 /* and vreg names for array elements: */
552 base = ctx->class_base[total_class_count];
553 foreach_array (arr, &ctx->ir->array_list) {
554 arr->base = base;
555 ctx->class_alloc_count[total_class_count] += reg_size_for_array(arr);
556 base += reg_size_for_array(arr);
557 }
558 ctx->alloc_count += ctx->class_alloc_count[total_class_count];
559
560 /* Add vreg names for r0.xyz */
561 ctx->r0_xyz_nodes = ctx->alloc_count;
562 ctx->alloc_count += 3;
563 ctx->hr0_xyz_nodes = ctx->alloc_count;
564 ctx->alloc_count += 3;
565
566 ctx->g = ra_alloc_interference_graph(ctx->set->regs, ctx->alloc_count);
567 ralloc_steal(ctx->g, ctx->instrd);
568 ctx->def = rzalloc_array(ctx->g, unsigned, ctx->alloc_count);
569 ctx->use = rzalloc_array(ctx->g, unsigned, ctx->alloc_count);
570
571 /* TODO add selector callback for split (pre-a6xx) register file: */
572 if (ctx->ir->compiler->gpu_id >= 600) {
573 ra_set_select_reg_callback(ctx->g, ra_select_reg_merged, ctx);
574
575 if (ctx->scalar_pass) {
576 ctx->name_to_instr = _mesa_hash_table_create(ctx->g,
577 _mesa_hash_int, _mesa_key_int_equal);
578 }
579 }
580 }
581
582 /* Map the name back to instruction: */
583 static struct ir3_instruction *
584 name_to_instr(struct ir3_ra_ctx *ctx, unsigned name)
585 {
586 assert(!name_is_array(ctx, name));
587 struct hash_entry *entry = _mesa_hash_table_search(ctx->name_to_instr, &name);
588 if (entry)
589 return entry->data;
590 unreachable("invalid instr name");
591 return NULL;
592 }
593
594 static bool
595 name_is_array(struct ir3_ra_ctx *ctx, unsigned name)
596 {
597 return name >= ctx->class_base[total_class_count];
598 }
599
600 static struct ir3_array *
601 name_to_array(struct ir3_ra_ctx *ctx, unsigned name)
602 {
603 assert(name_is_array(ctx, name));
604 foreach_array (arr, &ctx->ir->array_list) {
605 unsigned sz = reg_size_for_array(arr);
606 if (name < (arr->base + sz))
607 return arr;
608 }
609 unreachable("invalid array name");
610 return NULL;
611 }
612
613 static void
614 ra_destroy(struct ir3_ra_ctx *ctx)
615 {
616 ralloc_free(ctx->g);
617 }
618
619 static void
620 __def(struct ir3_ra_ctx *ctx, struct ir3_ra_block_data *bd, unsigned name,
621 struct ir3_instruction *instr)
622 {
623 debug_assert(name < ctx->alloc_count);
624
625 /* split/collect do not actually define any real value */
626 if ((instr->opc == OPC_META_SPLIT) || (instr->opc == OPC_META_COLLECT))
627 return;
628
629 /* defined on first write: */
630 if (!ctx->def[name])
631 ctx->def[name] = instr->ip;
632 ctx->use[name] = MAX2(ctx->use[name], instr->ip);
633 BITSET_SET(bd->def, name);
634 }
635
636 static void
637 __use(struct ir3_ra_ctx *ctx, struct ir3_ra_block_data *bd, unsigned name,
638 struct ir3_instruction *instr)
639 {
640 debug_assert(name < ctx->alloc_count);
641 ctx->use[name] = MAX2(ctx->use[name], instr->ip);
642 if (!BITSET_TEST(bd->def, name))
643 BITSET_SET(bd->use, name);
644 }
645
646 static void
647 ra_block_compute_live_ranges(struct ir3_ra_ctx *ctx, struct ir3_block *block)
648 {
649 struct ir3_ra_block_data *bd;
650 unsigned bitset_words = BITSET_WORDS(ctx->alloc_count);
651
652 #define def(name, instr) __def(ctx, bd, name, instr)
653 #define use(name, instr) __use(ctx, bd, name, instr)
654
655 bd = rzalloc(ctx->g, struct ir3_ra_block_data);
656
657 bd->def = rzalloc_array(bd, BITSET_WORD, bitset_words);
658 bd->use = rzalloc_array(bd, BITSET_WORD, bitset_words);
659 bd->livein = rzalloc_array(bd, BITSET_WORD, bitset_words);
660 bd->liveout = rzalloc_array(bd, BITSET_WORD, bitset_words);
661
662 block->data = bd;
663
664 struct ir3_instruction *first_non_input = NULL;
665 foreach_instr (instr, &block->instr_list) {
666 if (instr->opc != OPC_META_INPUT) {
667 first_non_input = instr;
668 break;
669 }
670 }
671
672 foreach_instr (instr, &block->instr_list) {
673 foreach_def (name, ctx, instr) {
674 if (name_is_array(ctx, name)) {
675 struct ir3_array *arr = name_to_array(ctx, name);
676
677 arr->start_ip = MIN2(arr->start_ip, instr->ip);
678 arr->end_ip = MAX2(arr->end_ip, instr->ip);
679
680 for (unsigned i = 0; i < arr->length; i++) {
681 unsigned name = arr->base + i;
682 if(arr->half)
683 ra_set_node_class(ctx->g, name, ctx->set->half_classes[0]);
684 else
685 ra_set_node_class(ctx->g, name, ctx->set->classes[0]);
686 }
687 } else {
688 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
689 if (is_high(instr)) {
690 ra_set_node_class(ctx->g, name,
691 ctx->set->high_classes[id->cls - HIGH_OFFSET]);
692 } else if (is_half(instr)) {
693 ra_set_node_class(ctx->g, name,
694 ctx->set->half_classes[id->cls - HALF_OFFSET]);
695 } else {
696 ra_set_node_class(ctx->g, name,
697 ctx->set->classes[id->cls]);
698 }
699 }
700
701 def(name, instr);
702
703 if ((instr->opc == OPC_META_INPUT) && first_non_input)
704 use(name, first_non_input);
705
706 /* Texture instructions with writemasks can be treated as smaller
707 * vectors (or just scalars!) to allocate knowing that the
708 * masked-out regs won't be written, but we need to make sure that
709 * the start of the vector doesn't come before the first register
710 * or we'll wrap.
711 */
712 if (is_tex_or_prefetch(instr)) {
713 int writemask_skipped_regs = ffs(instr->regs[0]->wrmask) - 1;
714 int r0_xyz = (instr->regs[0]->flags & IR3_REG_HALF) ?
715 ctx->hr0_xyz_nodes : ctx->r0_xyz_nodes;
716 for (int i = 0; i < writemask_skipped_regs; i++)
717 ra_add_node_interference(ctx->g, name, r0_xyz + i);
718 }
719 }
720
721 foreach_use (name, ctx, instr) {
722 if (name_is_array(ctx, name)) {
723 struct ir3_array *arr = name_to_array(ctx, name);
724
725 arr->start_ip = MIN2(arr->start_ip, instr->ip);
726 arr->end_ip = MAX2(arr->end_ip, instr->ip);
727
728 /* NOTE: arrays are not SSA so unconditionally
729 * set use bit:
730 */
731 BITSET_SET(bd->use, name);
732 }
733
734 use(name, instr);
735 }
736
737 foreach_name (name, ctx, instr) {
738 /* split/collect instructions have duplicate names
739 * as real instructions, so they skip the hashtable:
740 */
741 if (ctx->name_to_instr && !((instr->opc == OPC_META_SPLIT) ||
742 (instr->opc == OPC_META_COLLECT))) {
743 /* this is slightly annoying, we can't just use an
744 * integer on the stack
745 */
746 unsigned *key = ralloc(ctx->name_to_instr, unsigned);
747 *key = name;
748 debug_assert(!_mesa_hash_table_search(ctx->name_to_instr, key));
749 _mesa_hash_table_insert(ctx->name_to_instr, key, instr);
750 }
751 }
752 }
753 }
754
755 static bool
756 ra_compute_livein_liveout(struct ir3_ra_ctx *ctx)
757 {
758 unsigned bitset_words = BITSET_WORDS(ctx->alloc_count);
759 bool progress = false;
760
761 foreach_block (block, &ctx->ir->block_list) {
762 struct ir3_ra_block_data *bd = block->data;
763
764 /* update livein: */
765 for (unsigned i = 0; i < bitset_words; i++) {
766 /* anything used but not def'd within a block is
767 * by definition a live value coming into the block:
768 */
769 BITSET_WORD new_livein =
770 (bd->use[i] | (bd->liveout[i] & ~bd->def[i]));
771
772 if (new_livein & ~bd->livein[i]) {
773 bd->livein[i] |= new_livein;
774 progress = true;
775 }
776 }
777
778 /* update liveout: */
779 for (unsigned j = 0; j < ARRAY_SIZE(block->successors); j++) {
780 struct ir3_block *succ = block->successors[j];
781 struct ir3_ra_block_data *succ_bd;
782
783 if (!succ)
784 continue;
785
786 succ_bd = succ->data;
787
788 for (unsigned i = 0; i < bitset_words; i++) {
789 /* add anything that is livein in a successor block
790 * to our liveout:
791 */
792 BITSET_WORD new_liveout =
793 (succ_bd->livein[i] & ~bd->liveout[i]);
794
795 if (new_liveout) {
796 bd->liveout[i] |= new_liveout;
797 progress = true;
798 }
799 }
800 }
801 }
802
803 return progress;
804 }
805
806 static void
807 print_bitset(const char *name, BITSET_WORD *bs, unsigned cnt)
808 {
809 bool first = true;
810 debug_printf("RA: %s:", name);
811 for (unsigned i = 0; i < cnt; i++) {
812 if (BITSET_TEST(bs, i)) {
813 if (!first)
814 debug_printf(",");
815 debug_printf(" %04u", i);
816 first = false;
817 }
818 }
819 debug_printf("\n");
820 }
821
822 /* size of one component of instruction result, ie. half vs full: */
823 static unsigned
824 live_size(struct ir3_instruction *instr)
825 {
826 if (is_half(instr)) {
827 return 1;
828 } else if (is_high(instr)) {
829 /* doesn't count towards footprint */
830 return 0;
831 } else {
832 return 2;
833 }
834 }
835
836 static unsigned
837 name_size(struct ir3_ra_ctx *ctx, unsigned name)
838 {
839 if (name_is_array(ctx, name)) {
840 struct ir3_array *arr = name_to_array(ctx, name);
841 return arr->half ? 1 : 2;
842 } else {
843 struct ir3_instruction *instr = name_to_instr(ctx, name);
844 /* in scalar pass, each name represents on scalar value,
845 * half or full precision
846 */
847 return live_size(instr);
848 }
849 }
850
851 static unsigned
852 ra_calc_block_live_values(struct ir3_ra_ctx *ctx, struct ir3_block *block)
853 {
854 struct ir3_ra_block_data *bd = block->data;
855 unsigned name;
856
857 assert(ctx->name_to_instr);
858
859 /* TODO this gets a bit more complicated in non-scalar pass.. but
860 * possibly a lowball estimate is fine to start with if we do
861 * round-robin in non-scalar pass? Maybe we just want to handle
862 * that in a different fxn?
863 */
864 assert(ctx->scalar_pass);
865
866 BITSET_WORD *live =
867 rzalloc_array(bd, BITSET_WORD, BITSET_WORDS(ctx->alloc_count));
868
869 /* Add the live input values: */
870 unsigned livein = 0;
871 BITSET_FOREACH_SET (name, bd->livein, ctx->alloc_count) {
872 livein += name_size(ctx, name);
873 BITSET_SET(live, name);
874 }
875
876 d("---------------------");
877 d("block%u: LIVEIN: %u", block_id(block), livein);
878
879 unsigned max = livein;
880 int cur_live = max;
881
882 /* Now that we know the live inputs to the block, iterate the
883 * instructions adjusting the current # of live values as we
884 * see their last use:
885 */
886 foreach_instr (instr, &block->instr_list) {
887 if (RA_DEBUG)
888 print_bitset("LIVE", live, ctx->alloc_count);
889 di(instr, "CALC");
890
891 unsigned new_live = 0; /* newly live values */
892 unsigned new_dead = 0; /* newly no-longer live values */
893 unsigned next_dead = 0; /* newly dead following this instr */
894
895 foreach_def (name, ctx, instr) {
896 /* NOTE: checking ctx->def filters out things like split/
897 * collect which are just redefining existing live names
898 * or array writes to already live array elements:
899 */
900 if (ctx->def[name] != instr->ip)
901 continue;
902 new_live += live_size(instr);
903 d("NEW_LIVE: %u (new_live=%u, use=%u)", name, new_live, ctx->use[name]);
904 BITSET_SET(live, name);
905 /* There can be cases where this is *also* the last use
906 * of a value, for example instructions that write multiple
907 * values, only some of which are used. These values are
908 * dead *after* (rather than during) this instruction.
909 */
910 if (ctx->use[name] != instr->ip)
911 continue;
912 next_dead += live_size(instr);
913 d("NEXT_DEAD: %u (next_dead=%u)", name, next_dead);
914 BITSET_CLEAR(live, name);
915 }
916
917 /* To be more resilient against special cases where liverange
918 * is extended (like first_non_input), rather than using the
919 * foreach_use() iterator, we iterate the current live values
920 * instead:
921 */
922 BITSET_FOREACH_SET (name, live, ctx->alloc_count) {
923 /* Is this the last use? */
924 if (ctx->use[name] != instr->ip)
925 continue;
926 new_dead += name_size(ctx, name);
927 d("NEW_DEAD: %u (new_dead=%u)", name, new_dead);
928 BITSET_CLEAR(live, name);
929 }
930
931 cur_live += new_live;
932 cur_live -= new_dead;
933
934 assert(cur_live >= 0);
935 d("CUR_LIVE: %u", cur_live);
936
937 max = MAX2(max, cur_live);
938
939 /* account for written values which are not used later,
940 * but after updating max (since they are for one cycle
941 * live)
942 */
943 cur_live -= next_dead;
944 assert(cur_live >= 0);
945
946 if (RA_DEBUG) {
947 unsigned cnt = 0;
948 BITSET_FOREACH_SET (name, live, ctx->alloc_count) {
949 cnt += name_size(ctx, name);
950 }
951 assert(cur_live == cnt);
952 }
953 }
954
955 d("block%u max=%u", block_id(block), max);
956
957 /* the remaining live should match liveout (for extra sanity testing): */
958 if (RA_DEBUG) {
959 unsigned new_dead = 0;
960 BITSET_FOREACH_SET (name, live, ctx->alloc_count) {
961 /* Is this the last use? */
962 if (ctx->use[name] != block->end_ip)
963 continue;
964 new_dead += name_size(ctx, name);
965 d("NEW_DEAD: %u (new_dead=%u)", name, new_dead);
966 BITSET_CLEAR(live, name);
967 }
968 unsigned liveout = 0;
969 BITSET_FOREACH_SET (name, bd->liveout, ctx->alloc_count) {
970 liveout += name_size(ctx, name);
971 BITSET_CLEAR(live, name);
972 }
973
974 if (cur_live != liveout) {
975 print_bitset("LEAKED", live, ctx->alloc_count);
976 /* TODO there are a few edge cases where live-range extension
977 * tells us a value is livein. But not used by the block or
978 * liveout for the block. Possibly a bug in the liverange
979 * extension. But for now leave the assert disabled:
980 assert(cur_live == liveout);
981 */
982 }
983 }
984
985 ralloc_free(live);
986
987 return max;
988 }
989
990 static unsigned
991 ra_calc_max_live_values(struct ir3_ra_ctx *ctx)
992 {
993 unsigned max = 0;
994
995 foreach_block (block, &ctx->ir->block_list) {
996 unsigned block_live = ra_calc_block_live_values(ctx, block);
997 max = MAX2(max, block_live);
998 }
999
1000 return max;
1001 }
1002
1003 static void
1004 ra_add_interference(struct ir3_ra_ctx *ctx)
1005 {
1006 struct ir3 *ir = ctx->ir;
1007
1008 /* initialize array live ranges: */
1009 foreach_array (arr, &ir->array_list) {
1010 arr->start_ip = ~0;
1011 arr->end_ip = 0;
1012 }
1013
1014
1015 /* set up the r0.xyz precolor regs. */
1016 for (int i = 0; i < 3; i++) {
1017 ra_set_node_reg(ctx->g, ctx->r0_xyz_nodes + i, i);
1018 ra_set_node_reg(ctx->g, ctx->hr0_xyz_nodes + i,
1019 ctx->set->first_half_reg + i);
1020 }
1021
1022 /* compute live ranges (use/def) on a block level, also updating
1023 * block's def/use bitmasks (used below to calculate per-block
1024 * livein/liveout):
1025 */
1026 foreach_block (block, &ir->block_list) {
1027 ra_block_compute_live_ranges(ctx, block);
1028 }
1029
1030 /* update per-block livein/liveout: */
1031 while (ra_compute_livein_liveout(ctx)) {}
1032
1033 if (RA_DEBUG) {
1034 d("AFTER LIVEIN/OUT:");
1035 foreach_block (block, &ir->block_list) {
1036 struct ir3_ra_block_data *bd = block->data;
1037 d("block%u:", block_id(block));
1038 print_bitset(" def", bd->def, ctx->alloc_count);
1039 print_bitset(" use", bd->use, ctx->alloc_count);
1040 print_bitset(" l/i", bd->livein, ctx->alloc_count);
1041 print_bitset(" l/o", bd->liveout, ctx->alloc_count);
1042 }
1043 foreach_array (arr, &ir->array_list) {
1044 d("array%u:", arr->id);
1045 d(" length: %u", arr->length);
1046 d(" start_ip: %u", arr->start_ip);
1047 d(" end_ip: %u", arr->end_ip);
1048 }
1049 d("INSTRUCTION VREG NAMES:");
1050 foreach_block (block, &ctx->ir->block_list) {
1051 foreach_instr (instr, &block->instr_list) {
1052 if (!ctx->instrd[instr->ip].defn)
1053 continue;
1054 if (!writes_gpr(instr))
1055 continue;
1056 di(instr, "%04u", scalar_name(ctx, instr, 0));
1057 }
1058 }
1059 d("ARRAY VREG NAMES:");
1060 foreach_array (arr, &ctx->ir->array_list) {
1061 d("%04u: arr%u", arr->base, arr->id);
1062 }
1063 }
1064
1065 /* extend start/end ranges based on livein/liveout info from cfg: */
1066 foreach_block (block, &ir->block_list) {
1067 struct ir3_ra_block_data *bd = block->data;
1068
1069 for (unsigned i = 0; i < ctx->alloc_count; i++) {
1070 if (BITSET_TEST(bd->livein, i)) {
1071 ctx->def[i] = MIN2(ctx->def[i], block->start_ip);
1072 ctx->use[i] = MAX2(ctx->use[i], block->start_ip);
1073 }
1074
1075 if (BITSET_TEST(bd->liveout, i)) {
1076 ctx->def[i] = MIN2(ctx->def[i], block->end_ip);
1077 ctx->use[i] = MAX2(ctx->use[i], block->end_ip);
1078 }
1079 }
1080
1081 foreach_array (arr, &ctx->ir->array_list) {
1082 for (unsigned i = 0; i < arr->length; i++) {
1083 if (BITSET_TEST(bd->livein, i + arr->base)) {
1084 arr->start_ip = MIN2(arr->start_ip, block->start_ip);
1085 }
1086 if (BITSET_TEST(bd->liveout, i + arr->base)) {
1087 arr->end_ip = MAX2(arr->end_ip, block->end_ip);
1088 }
1089 }
1090 }
1091 }
1092
1093 if (ctx->name_to_instr) {
1094 unsigned max = ra_calc_max_live_values(ctx);
1095 ra_set_register_target(ctx, max);
1096 }
1097
1098 for (unsigned i = 0; i < ctx->alloc_count; i++) {
1099 for (unsigned j = 0; j < ctx->alloc_count; j++) {
1100 if (intersects(ctx->def[i], ctx->use[i],
1101 ctx->def[j], ctx->use[j])) {
1102 ra_add_node_interference(ctx->g, i, j);
1103 }
1104 }
1105 }
1106 }
1107
1108 /* some instructions need fix-up if dst register is half precision: */
1109 static void fixup_half_instr_dst(struct ir3_instruction *instr)
1110 {
1111 switch (opc_cat(instr->opc)) {
1112 case 1: /* move instructions */
1113 instr->cat1.dst_type = half_type(instr->cat1.dst_type);
1114 break;
1115 case 4:
1116 switch (instr->opc) {
1117 case OPC_RSQ:
1118 instr->opc = OPC_HRSQ;
1119 break;
1120 case OPC_LOG2:
1121 instr->opc = OPC_HLOG2;
1122 break;
1123 case OPC_EXP2:
1124 instr->opc = OPC_HEXP2;
1125 break;
1126 default:
1127 break;
1128 }
1129 break;
1130 case 5:
1131 instr->cat5.type = half_type(instr->cat5.type);
1132 break;
1133 }
1134 }
1135 /* some instructions need fix-up if src register is half precision: */
1136 static void fixup_half_instr_src(struct ir3_instruction *instr)
1137 {
1138 switch (instr->opc) {
1139 case OPC_MOV:
1140 instr->cat1.src_type = half_type(instr->cat1.src_type);
1141 break;
1142 case OPC_MAD_F32:
1143 instr->opc = OPC_MAD_F16;
1144 break;
1145 case OPC_SEL_B32:
1146 instr->opc = OPC_SEL_B16;
1147 break;
1148 case OPC_SEL_S32:
1149 instr->opc = OPC_SEL_S16;
1150 break;
1151 case OPC_SEL_F32:
1152 instr->opc = OPC_SEL_F16;
1153 break;
1154 case OPC_SAD_S32:
1155 instr->opc = OPC_SAD_S16;
1156 break;
1157 default:
1158 break;
1159 }
1160 }
1161
1162 /* NOTE: instr could be NULL for IR3_REG_ARRAY case, for the first
1163 * array access(es) which do not have any previous access to depend
1164 * on from scheduling point of view
1165 */
1166 static void
1167 reg_assign(struct ir3_ra_ctx *ctx, struct ir3_register *reg,
1168 struct ir3_instruction *instr)
1169 {
1170 struct ir3_ra_instr_data *id;
1171
1172 if (reg->flags & IR3_REG_ARRAY) {
1173 struct ir3_array *arr =
1174 ir3_lookup_array(ctx->ir, reg->array.id);
1175 unsigned name = arr->base + reg->array.offset;
1176 unsigned r = ra_get_node_reg(ctx->g, name);
1177 unsigned num = ctx->set->ra_reg_to_gpr[r];
1178
1179 if (reg->flags & IR3_REG_RELATIV) {
1180 reg->array.offset = num;
1181 } else {
1182 reg->num = num;
1183 reg->flags &= ~IR3_REG_SSA;
1184 }
1185
1186 reg->flags &= ~IR3_REG_ARRAY;
1187 } else if ((id = &ctx->instrd[instr->ip]) && id->defn) {
1188 unsigned first_component = 0;
1189
1190 /* Special case for tex instructions, which may use the wrmask
1191 * to mask off the first component(s). In the scalar pass,
1192 * this means the masked off component(s) are not def'd/use'd,
1193 * so we get a bogus value when we ask the register_allocate
1194 * algo to get the assigned reg for the unused/untouched
1195 * component. So we need to consider the first used component:
1196 */
1197 if (ctx->scalar_pass && is_tex_or_prefetch(id->defn)) {
1198 unsigned n = ffs(id->defn->regs[0]->wrmask);
1199 debug_assert(n > 0);
1200 first_component = n - 1;
1201 }
1202
1203 unsigned name = scalar_name(ctx, id->defn, first_component);
1204 unsigned r = ra_get_node_reg(ctx->g, name);
1205 unsigned num = ctx->set->ra_reg_to_gpr[r] + id->off;
1206
1207 debug_assert(!(reg->flags & IR3_REG_RELATIV));
1208
1209 debug_assert(num >= first_component);
1210
1211 if (is_high(id->defn))
1212 num += FIRST_HIGH_REG;
1213
1214 reg->num = num - first_component;
1215
1216 reg->flags &= ~IR3_REG_SSA;
1217
1218 if (is_half(id->defn))
1219 reg->flags |= IR3_REG_HALF;
1220 }
1221 }
1222
1223 /* helper to determine which regs to assign in which pass: */
1224 static bool
1225 should_assign(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr)
1226 {
1227 if ((instr->opc == OPC_META_SPLIT) &&
1228 (util_bitcount(instr->regs[1]->wrmask) > 1))
1229 return !ctx->scalar_pass;
1230 if ((instr->opc == OPC_META_COLLECT) &&
1231 (util_bitcount(instr->regs[0]->wrmask) > 1))
1232 return !ctx->scalar_pass;
1233 return ctx->scalar_pass;
1234 }
1235
1236 static void
1237 ra_block_alloc(struct ir3_ra_ctx *ctx, struct ir3_block *block)
1238 {
1239 foreach_instr (instr, &block->instr_list) {
1240
1241 if (writes_gpr(instr)) {
1242 if (should_assign(ctx, instr)) {
1243 reg_assign(ctx, instr->regs[0], instr);
1244 if (instr->regs[0]->flags & IR3_REG_HALF)
1245 fixup_half_instr_dst(instr);
1246 }
1247 }
1248
1249 foreach_src_n (reg, n, instr) {
1250 struct ir3_instruction *src = reg->instr;
1251
1252 if (src && !should_assign(ctx, src) && !should_assign(ctx, instr))
1253 continue;
1254
1255 if (src && should_assign(ctx, instr))
1256 reg_assign(ctx, src->regs[0], src);
1257
1258 /* Note: reg->instr could be null for IR3_REG_ARRAY */
1259 if (src || (reg->flags & IR3_REG_ARRAY))
1260 reg_assign(ctx, instr->regs[n+1], src);
1261
1262 if (instr->regs[n+1]->flags & IR3_REG_HALF)
1263 fixup_half_instr_src(instr);
1264 }
1265 }
1266
1267 /* We need to pre-color outputs for the scalar pass in
1268 * ra_precolor_assigned(), so we need to actually assign
1269 * them in the first pass:
1270 */
1271 if (!ctx->scalar_pass) {
1272 foreach_input (in, ctx->ir) {
1273 reg_assign(ctx, in->regs[0], in);
1274 }
1275 foreach_output (out, ctx->ir) {
1276 reg_assign(ctx, out->regs[0], out);
1277 }
1278 }
1279 }
1280
1281 static void
1282 assign_arr_base(struct ir3_ra_ctx *ctx, struct ir3_array *arr,
1283 struct ir3_instruction **precolor, unsigned nprecolor)
1284 {
1285 unsigned base = 0;
1286
1287 /* figure out what else we conflict with which has already
1288 * been assigned:
1289 */
1290 retry:
1291 foreach_array (arr2, &ctx->ir->array_list) {
1292 if (arr2 == arr)
1293 break;
1294 if (arr2->end_ip == 0)
1295 continue;
1296 /* if it intersects with liverange AND register range.. */
1297 if (intersects(arr->start_ip, arr->end_ip,
1298 arr2->start_ip, arr2->end_ip) &&
1299 intersects(base, base + reg_size_for_array(arr),
1300 arr2->reg, arr2->reg + reg_size_for_array(arr2))) {
1301 base = MAX2(base, arr2->reg + reg_size_for_array(arr2));
1302 goto retry;
1303 }
1304 }
1305
1306 /* also need to not conflict with any pre-assigned inputs: */
1307 for (unsigned i = 0; i < nprecolor; i++) {
1308 struct ir3_instruction *instr = precolor[i];
1309
1310 if (!instr || (instr->flags & IR3_INSTR_UNUSED))
1311 continue;
1312
1313 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
1314
1315 /* only consider the first component: */
1316 if (id->off > 0)
1317 continue;
1318
1319 unsigned name = ra_name(ctx, id);
1320 unsigned regid = instr->regs[0]->num;
1321
1322 /* Check if array intersects with liverange AND register
1323 * range of the input:
1324 */
1325 if (intersects(arr->start_ip, arr->end_ip,
1326 ctx->def[name], ctx->use[name]) &&
1327 intersects(base, base + reg_size_for_array(arr),
1328 regid, regid + class_sizes[id->cls])) {
1329 base = MAX2(base, regid + class_sizes[id->cls]);
1330 goto retry;
1331 }
1332 }
1333
1334 arr->reg = base;
1335 }
1336
1337 /* handle pre-colored registers. This includes "arrays" (which could be of
1338 * length 1, used for phi webs lowered to registers in nir), as well as
1339 * special shader input values that need to be pinned to certain registers.
1340 */
1341 static void
1342 ra_precolor(struct ir3_ra_ctx *ctx, struct ir3_instruction **precolor, unsigned nprecolor)
1343 {
1344 for (unsigned i = 0; i < nprecolor; i++) {
1345 if (precolor[i] && !(precolor[i]->flags & IR3_INSTR_UNUSED)) {
1346 struct ir3_instruction *instr = precolor[i];
1347
1348 if (instr->regs[0]->num == INVALID_REG)
1349 continue;
1350
1351 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
1352
1353 debug_assert(!(instr->regs[0]->flags & (IR3_REG_HALF | IR3_REG_HIGH)));
1354
1355 /* only consider the first component: */
1356 if (id->off > 0)
1357 continue;
1358
1359 if (ctx->scalar_pass && !should_assign(ctx, instr))
1360 continue;
1361
1362 /* 'base' is in scalar (class 0) but we need to map that
1363 * the conflicting register of the appropriate class (ie.
1364 * input could be vec2/vec3/etc)
1365 *
1366 * Note that the higher class (larger than scalar) regs
1367 * are setup to conflict with others in the same class,
1368 * so for example, R1 (scalar) is also the first component
1369 * of D1 (vec2/double):
1370 *
1371 * Single (base) | Double
1372 * --------------+---------------
1373 * R0 | D0
1374 * R1 | D0 D1
1375 * R2 | D1 D2
1376 * R3 | D2
1377 * .. and so on..
1378 */
1379 unsigned regid = instr->regs[0]->num;
1380 unsigned reg = ctx->set->gpr_to_ra_reg[id->cls][regid];
1381 unsigned name = ra_name(ctx, id);
1382 ra_set_node_reg(ctx->g, name, reg);
1383 }
1384 }
1385
1386 /* pre-assign array elements:
1387 *
1388 * TODO this is going to need some work for half-precision.. possibly
1389 * this is easier on a6xx, where we can just divide array size by two?
1390 * But on a5xx and earlier it will need to track two bases.
1391 */
1392 foreach_array (arr, &ctx->ir->array_list) {
1393
1394 if (arr->end_ip == 0)
1395 continue;
1396
1397 if (!ctx->scalar_pass)
1398 assign_arr_base(ctx, arr, precolor, nprecolor);
1399
1400 unsigned base = arr->reg;
1401
1402 for (unsigned i = 0; i < arr->length; i++) {
1403 unsigned name, reg;
1404
1405 if (arr->half) {
1406 /* Doesn't need to do this on older generations than a6xx,
1407 * since there's no conflict between full regs and half regs
1408 * on them.
1409 *
1410 * TODO Presumably "base" could start from 0 respectively
1411 * for half regs of arrays on older generations.
1412 */
1413 unsigned base_half = base * 2 + i;
1414 reg = ctx->set->gpr_to_ra_reg[0+HALF_OFFSET][base_half];
1415 base = base_half / 2 + 1;
1416 } else {
1417 reg = ctx->set->gpr_to_ra_reg[0][base++];
1418 }
1419
1420 name = arr->base + i;
1421 ra_set_node_reg(ctx->g, name, reg);
1422 }
1423 }
1424
1425 if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
1426 foreach_array (arr, &ctx->ir->array_list) {
1427 unsigned first = arr->reg;
1428 unsigned last = arr->reg + arr->length - 1;
1429 debug_printf("arr[%d] at r%d.%c->r%d.%c\n", arr->id,
1430 (first >> 2), "xyzw"[first & 0x3],
1431 (last >> 2), "xyzw"[last & 0x3]);
1432 }
1433 }
1434 }
1435
1436 static void
1437 precolor(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr)
1438 {
1439 struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
1440 unsigned n = dest_regs(instr);
1441 for (unsigned i = 0; i < n; i++) {
1442 /* tex instructions actually have a wrmask, and
1443 * don't touch masked out components. So we
1444 * shouldn't precolor them::
1445 */
1446 if (is_tex_or_prefetch(instr) &&
1447 !(instr->regs[0]->wrmask & (1 << i)))
1448 continue;
1449
1450 unsigned name = scalar_name(ctx, instr, i);
1451 unsigned regid = instr->regs[0]->num + i;
1452
1453 if (instr->regs[0]->flags & IR3_REG_HIGH)
1454 regid -= FIRST_HIGH_REG;
1455
1456 unsigned vreg = ctx->set->gpr_to_ra_reg[id->cls][regid];
1457 ra_set_node_reg(ctx->g, name, vreg);
1458 }
1459 }
1460
1461 /* pre-color non-scalar registers based on the registers assigned in previous
1462 * pass. Do this by looking actually at the fanout instructions.
1463 */
1464 static void
1465 ra_precolor_assigned(struct ir3_ra_ctx *ctx)
1466 {
1467 debug_assert(ctx->scalar_pass);
1468
1469 foreach_block (block, &ctx->ir->block_list) {
1470 foreach_instr (instr, &block->instr_list) {
1471
1472 if (!writes_gpr(instr))
1473 continue;
1474
1475 if (should_assign(ctx, instr))
1476 continue;
1477
1478 precolor(ctx, instr);
1479
1480 foreach_src (src, instr) {
1481 if (!src->instr)
1482 continue;
1483 precolor(ctx, src->instr);
1484 }
1485 }
1486 }
1487 }
1488
1489 static int
1490 ra_alloc(struct ir3_ra_ctx *ctx)
1491 {
1492 if (!ra_allocate(ctx->g))
1493 return -1;
1494
1495 foreach_block (block, &ctx->ir->block_list) {
1496 ra_block_alloc(ctx, block);
1497 }
1498
1499 return 0;
1500 }
1501
1502 /* if we end up with split/collect instructions with non-matching src
1503 * and dest regs, that means something has gone wrong. Which makes it
1504 * a pretty good sanity check.
1505 */
1506 static void
1507 ra_sanity_check(struct ir3 *ir)
1508 {
1509 foreach_block (block, &ir->block_list) {
1510 foreach_instr (instr, &block->instr_list) {
1511 if (instr->opc == OPC_META_SPLIT) {
1512 struct ir3_register *dst = instr->regs[0];
1513 struct ir3_register *src = instr->regs[1];
1514 debug_assert(dst->num == (src->num + instr->split.off));
1515 } else if (instr->opc == OPC_META_COLLECT) {
1516 struct ir3_register *dst = instr->regs[0];
1517
1518 foreach_src_n (src, n, instr) {
1519 debug_assert(dst->num == (src->num - n));
1520 }
1521 }
1522 }
1523 }
1524 }
1525
1526 static int
1527 ir3_ra_pass(struct ir3_shader_variant *v, struct ir3_instruction **precolor,
1528 unsigned nprecolor, bool scalar_pass)
1529 {
1530 struct ir3_ra_ctx ctx = {
1531 .v = v,
1532 .ir = v->ir,
1533 .set = v->ir->compiler->set,
1534 .scalar_pass = scalar_pass,
1535 };
1536 int ret;
1537
1538 ra_init(&ctx);
1539 ra_add_interference(&ctx);
1540 ra_precolor(&ctx, precolor, nprecolor);
1541 if (scalar_pass)
1542 ra_precolor_assigned(&ctx);
1543 ret = ra_alloc(&ctx);
1544 ra_destroy(&ctx);
1545
1546 return ret;
1547 }
1548
1549 int
1550 ir3_ra(struct ir3_shader_variant *v, struct ir3_instruction **precolor,
1551 unsigned nprecolor)
1552 {
1553 int ret;
1554
1555 /* First pass, assign the vecN (non-scalar) registers: */
1556 ret = ir3_ra_pass(v, precolor, nprecolor, false);
1557 if (ret)
1558 return ret;
1559
1560 ir3_debug_print(v->ir, "AFTER: ir3_ra (1st pass)");
1561
1562 /* Second pass, assign the scalar registers: */
1563 ret = ir3_ra_pass(v, precolor, nprecolor, true);
1564 if (ret)
1565 return ret;
1566
1567 ir3_debug_print(v->ir, "AFTER: ir3_ra (2st pass)");
1568
1569 #ifdef DEBUG
1570 # define SANITY_CHECK DEBUG
1571 #else
1572 # define SANITY_CHECK 0
1573 #endif
1574 if (SANITY_CHECK)
1575 ra_sanity_check(v->ir);
1576
1577 return ret;
1578 }