2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #include "util/u_math.h"
28 #include "util/register_allocate.h"
29 #include "util/ralloc.h"
30 #include "util/bitset.h"
33 #include "ir3_compiler.h"
38 #define RA_DEBUG (ir3_shader_debug & IR3_DBG_RAMSGS)
42 #define d(fmt, ...) do { if (RA_DEBUG) { \
43 printf("RA: "fmt"\n", ##__VA_ARGS__); \
46 #define di(instr, fmt, ...) do { if (RA_DEBUG) { \
47 printf("RA: "fmt": ", ##__VA_ARGS__); \
48 ir3_print_instr(instr); \
52 * Register Assignment:
54 * Uses the register_allocate util, which implements graph coloring
55 * algo with interference classes. To handle the cases where we need
56 * consecutive registers (for example, texture sample instructions),
57 * we model these as larger (double/quad/etc) registers which conflict
58 * with the corresponding registers in other classes.
60 * Additionally we create additional classes for half-regs, which
61 * do not conflict with the full-reg classes. We do need at least
62 * sizes 1-4 (to deal w/ texture sample instructions output to half-
63 * reg). At the moment we don't create the higher order half-reg
64 * classes as half-reg frequently does not have enough precision
65 * for texture coords at higher resolutions.
67 * There are some additional cases that we need to handle specially,
68 * as the graph coloring algo doesn't understand "partial writes".
69 * For example, a sequence like:
72 * sam (f32)(xy)r0.x, ...
74 * sam (f32)(xyzw)r0.w, r0.x, ... ; 3d texture, so r0.xyz are coord
76 * In this scenario, we treat r0.xyz as class size 3, which is written
77 * (from a use/def perspective) at the 'add' instruction and ignore the
78 * subsequent partial writes to r0.xy. So the 'add r0.z, ...' is the
79 * defining instruction, as it is the first to partially write r0.xyz.
81 * To address the fragmentation that this can potentially cause, a
82 * two pass register allocation is used. After the first pass the
83 * assignment of scalars is discarded, but the assignment of vecN (for
84 * N > 1) is used to pre-color in the second pass, which considers
87 * Arrays of arbitrary size are handled via pre-coloring a consecutive
88 * sequence of registers. Additional scalar (single component) reg
89 * names are allocated starting at ctx->class_base[total_class_count]
90 * (see arr->base), which are pre-colored. In the use/def graph direct
91 * access is treated as a single element use/def, and indirect access
92 * is treated as use or def of all array elements. (Only the first
93 * def is tracked, in case of multiple indirect writes, etc.)
95 * TODO arrays that fit in one of the pre-defined class sizes should
96 * not need to be pre-colored, but instead could be given a normal
97 * vreg name. (Ignoring this for now since it is a good way to work
98 * out the kinks with arbitrary sized arrays.)
100 * TODO might be easier for debugging to split this into two passes,
101 * the first assigning vreg names in a way that we could ir3_print()
106 static struct ir3_instruction
* name_to_instr(struct ir3_ra_ctx
*ctx
, unsigned name
);
108 static bool name_is_array(struct ir3_ra_ctx
*ctx
, unsigned name
);
109 static struct ir3_array
* name_to_array(struct ir3_ra_ctx
*ctx
, unsigned name
);
111 /* does it conflict? */
113 intersects(unsigned a_start
, unsigned a_end
, unsigned b_start
, unsigned b_end
)
115 return !((a_start
>= b_end
) || (b_start
>= a_end
));
119 reg_size_for_array(struct ir3_array
*arr
)
122 return DIV_ROUND_UP(arr
->length
, 2);
128 instr_before(struct ir3_instruction
*a
, struct ir3_instruction
*b
)
130 if (a
->flags
& IR3_INSTR_UNUSED
)
132 return (a
->ip
< b
->ip
);
135 static struct ir3_instruction
*
136 get_definer(struct ir3_ra_ctx
*ctx
, struct ir3_instruction
*instr
,
139 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
140 struct ir3_instruction
*d
= NULL
;
142 if (ctx
->scalar_pass
) {
145 id
->sz
= 1; /* considering things as N scalar regs now */
154 if (instr
->opc
== OPC_META_COLLECT
) {
155 /* What about the case where collect is subset of array, we
156 * need to find the distance between where actual array starts
157 * and collect.. that probably doesn't happen currently.
159 struct ir3_register
*src
;
162 /* note: don't use foreach_ssa_src as this gets called once
163 * while assigning regs (which clears SSA flag)
165 foreach_src_n (src
, n
, instr
) {
166 struct ir3_instruction
*dd
;
170 dd
= get_definer(ctx
, src
->instr
, &dsz
, &doff
);
172 if ((!d
) || instr_before(dd
, d
)) {
179 } else if (instr
->cp
.right
|| instr
->cp
.left
) {
180 /* covers also the meta:fo case, which ends up w/ single
181 * scalar instructions for each component:
183 struct ir3_instruction
*f
= ir3_neighbor_first(instr
);
185 /* by definition, the entire sequence forms one linked list
186 * of single scalar register nodes (even if some of them may
187 * be splits from a texture sample (for example) instr. We
188 * just need to walk the list finding the first element of
189 * the group defined (lowest ip)
193 /* need to skip over unused in the group: */
194 while (f
&& (f
->flags
& IR3_INSTR_UNUSED
)) {
200 if ((!d
) || instr_before(f
, d
))
211 /* second case is looking directly at the instruction which
212 * produces multiple values (eg, texture sample), rather
213 * than the split nodes that point back to that instruction.
214 * This isn't quite right, because it may be part of a larger
217 * sam (f32)(xyzw)r0.x, ...
220 * sam (f32)(xyzw)r2.x, r0.w <-- (r0.w, r1.x, r1.y)
222 * need to come up with a better way to handle that case.
224 if (instr
->address
) {
225 *sz
= instr
->regs
[0]->size
;
227 *sz
= util_last_bit(instr
->regs
[0]->wrmask
);
233 if (d
->opc
== OPC_META_SPLIT
) {
234 struct ir3_instruction
*dd
;
237 dd
= get_definer(ctx
, d
->regs
[1]->instr
, &dsz
, &doff
);
239 /* by definition, should come before: */
240 debug_assert(instr_before(dd
, d
));
242 *sz
= MAX2(*sz
, dsz
);
244 if (instr
->opc
== OPC_META_SPLIT
)
245 *off
= MAX2(*off
, instr
->split
.off
);
250 debug_assert(d
->opc
!= OPC_META_SPLIT
);
260 ra_block_find_definers(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
262 foreach_instr (instr
, &block
->instr_list
) {
263 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
264 if (instr
->regs_count
== 0)
266 /* couple special cases: */
267 if (writes_addr0(instr
) || writes_addr1(instr
) || writes_pred(instr
)) {
269 } else if (instr
->regs
[0]->flags
& IR3_REG_ARRAY
) {
270 id
->cls
= total_class_count
;
272 /* and the normal case: */
273 id
->defn
= get_definer(ctx
, instr
, &id
->sz
, &id
->off
);
274 id
->cls
= ra_size_to_class(id
->sz
, is_half(id
->defn
), is_high(id
->defn
));
276 /* this is a bit of duct-tape.. if we have a scenario like:
278 * sam (f32)(x) out.x, ...
279 * sam (f32)(x) out.y, ...
281 * Then the fanout/split meta instructions for the two different
282 * tex instructions end up grouped as left/right neighbors. The
283 * upshot is that in when you get_definer() on one of the meta:fo's
284 * you get definer as the first sam with sz=2, but when you call
285 * get_definer() on the either of the sam's you get itself as the
288 * (We actually avoid this scenario exactly, the neighbor links
289 * prevent one of the output mov's from being eliminated, so this
290 * hack should be enough. But probably we need to rethink how we
291 * find the "defining" instruction.)
293 * TODO how do we figure out offset properly...
295 if (id
->defn
!= instr
) {
296 struct ir3_ra_instr_data
*did
= &ctx
->instrd
[id
->defn
->ip
];
297 if (did
->sz
< id
->sz
) {
306 /* give each instruction a name (and ip), and count up the # of names
310 ra_block_name_instructions(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
312 foreach_instr (instr
, &block
->instr_list
) {
313 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
321 if (!writes_gpr(instr
))
324 if (id
->defn
!= instr
)
327 /* In scalar pass, collect/split don't get their own names,
328 * but instead inherit them from their src(s):
330 * Possibly we don't need this because of scalar_name(), but
331 * it does make the ir3_print() dumps easier to read.
333 if (ctx
->scalar_pass
) {
334 if (instr
->opc
== OPC_META_SPLIT
) {
335 instr
->name
= instr
->regs
[1]->instr
->name
+ instr
->split
.off
;
339 if (instr
->opc
== OPC_META_COLLECT
) {
340 instr
->name
= instr
->regs
[1]->instr
->name
;
345 /* arrays which don't fit in one of the pre-defined class
346 * sizes are pre-colored:
348 if ((id
->cls
>= 0) && (id
->cls
< total_class_count
)) {
349 /* in the scalar pass, we generate a name for each
350 * scalar component, instr->name is the name of the
353 unsigned n
= ctx
->scalar_pass
? dest_regs(instr
) : 1;
354 instr
->name
= ctx
->class_alloc_count
[id
->cls
];
355 ctx
->class_alloc_count
[id
->cls
] += n
;
356 ctx
->alloc_count
+= n
;
362 * Set a value for max register target.
364 * Currently this just rounds up to a multiple of full-vec4 (ie. the
365 * granularity that we configure the hw for.. there is no point to
366 * using r3.x if you aren't going to make r3.yzw available). But
367 * in reality there seems to be multiple thresholds that affect the
368 * number of waves.. and we should round up the target to the next
369 * threshold when we round-robin registers, to give postsched more
370 * options. When we understand that better, this is where we'd
374 ra_set_register_target(struct ir3_ra_ctx
*ctx
, unsigned max_target
)
376 const unsigned hvec4
= 4;
377 const unsigned vec4
= 2 * hvec4
;
379 ctx
->max_target
= align(max_target
, vec4
);
381 d("New max_target=%u", ctx
->max_target
);
385 pick_in_range(BITSET_WORD
*regs
, unsigned min
, unsigned max
)
387 for (unsigned i
= min
; i
<= max
; i
++) {
388 if (BITSET_TEST(regs
, i
)) {
396 pick_in_range_rev(BITSET_WORD
*regs
, int min
, int max
)
398 for (int i
= max
; i
>= min
; i
--) {
399 if (BITSET_TEST(regs
, i
)) {
406 /* register selector for the a6xx+ merged register file: */
408 ra_select_reg_merged(unsigned int n
, BITSET_WORD
*regs
, void *data
)
410 struct ir3_ra_ctx
*ctx
= data
;
411 unsigned int class = ra_get_node_class(ctx
->g
, n
);
413 int sz
= ra_class_to_size(class, &half
, &high
);
417 /* dimensions within the register class: */
418 unsigned max_target
, start
;
420 /* the regs bitset will include *all* of the virtual regs, but we lay
421 * out the different classes consecutively in the virtual register
422 * space. So we just need to think about the base offset of a given
423 * class within the virtual register space, and offset the register
424 * space we search within by that base offset.
428 /* TODO I think eventually we want to round-robin in vector pass
429 * as well, but needs some more work to calculate # of live vals
430 * for this. (Maybe with some work, we could just figure out
431 * the scalar target and use that, since that is what we care
432 * about in the end.. but that would mean setting up use-def/
433 * liveranges for scalar pass before doing vector pass.)
435 * For now, in the vector class, just move assignments for scalar
436 * vals higher to hopefully prevent them from limiting where vecN
437 * values can be placed. Since the scalar values are re-assigned
438 * in the 2nd pass, we don't really care where they end up in the
441 if (!ctx
->scalar_pass
) {
442 base
= ctx
->set
->gpr_to_ra_reg
[class][0];
444 max_target
= HIGH_CLASS_REGS(sz
);
446 max_target
= HALF_CLASS_REGS(sz
);
448 max_target
= CLASS_REGS(sz
);
451 if ((sz
== 1) && !high
) {
452 return pick_in_range_rev(regs
, base
, base
+ max_target
);
454 return pick_in_range(regs
, base
, base
+ max_target
);
460 /* NOTE: this is only used in scalar pass, so the register
461 * class will be one of the scalar classes (ie. idx==0):
463 base
= ctx
->set
->gpr_to_ra_reg
[class][0];
465 max_target
= HIGH_CLASS_REGS(0);
468 max_target
= ctx
->max_target
;
469 start
= ctx
->start_search_reg
;
471 max_target
= ctx
->max_target
/ 2;
472 start
= ctx
->start_search_reg
;
475 /* For cat4 instructions, if the src reg is already assigned, and
476 * avail to pick, use it. Because this doesn't introduce unnecessary
477 * dependencies, and it potentially avoids needing (ss) syncs to
478 * for write after read hazards:
480 struct ir3_instruction
*instr
= name_to_instr(ctx
, n
);
481 if (is_sfu(instr
) && instr
->regs
[1]->instr
) {
482 struct ir3_instruction
*src
= instr
->regs
[1]->instr
;
483 unsigned src_n
= scalar_name(ctx
, src
, 0);
485 unsigned reg
= ra_get_node_reg(ctx
->g
, src_n
);
487 /* Check if the src register has been assigned yet: */
489 if (BITSET_TEST(regs
, reg
)) {
493 } else if (is_tex_or_prefetch(instr
)) {
494 /* we could have a tex fetch w/ wrmask .z, for example.. these
495 * cannot land in r0.x since that would underflow when we
496 * subtract the offset. Ie. if we pick r0.z, and subtract
497 * the offset, the register encoded for dst will be r0.x
499 unsigned n
= ffs(instr
->regs
[0]->wrmask
);
501 unsigned offset
= n
- 1;
505 max_target
-= offset
;
508 int r
= pick_in_range(regs
, base
+ start
, base
+ max_target
);
511 r
= pick_in_range(regs
, base
, base
+ start
);
515 /* overflow, we need to increase max_target: */
516 ra_set_register_target(ctx
, ctx
->max_target
+ 1);
517 return ra_select_reg_merged(n
, regs
, data
);
520 if (class == ctx
->set
->half_classes
[0]) {
522 ctx
->start_search_reg
= (n
+ 1) % ctx
->max_target
;
523 } else if (class == ctx
->set
->classes
[0]) {
524 int n
= (r
- base
) * 2;
525 ctx
->start_search_reg
= (n
+ 1) % ctx
->max_target
;
532 ra_init(struct ir3_ra_ctx
*ctx
)
536 ir3_clear_mark(ctx
->ir
);
537 n
= ir3_count_instructions(ctx
->ir
);
539 ctx
->instrd
= rzalloc_array(NULL
, struct ir3_ra_instr_data
, n
);
541 foreach_block (block
, &ctx
->ir
->block_list
) {
542 ra_block_find_definers(ctx
, block
);
545 foreach_block (block
, &ctx
->ir
->block_list
) {
546 ra_block_name_instructions(ctx
, block
);
549 /* figure out the base register name for each class. The
550 * actual ra name is class_base[cls] + instr->name;
552 ctx
->class_base
[0] = 0;
553 for (unsigned i
= 1; i
<= total_class_count
; i
++) {
554 ctx
->class_base
[i
] = ctx
->class_base
[i
-1] +
555 ctx
->class_alloc_count
[i
-1];
558 /* and vreg names for array elements: */
559 base
= ctx
->class_base
[total_class_count
];
560 foreach_array (arr
, &ctx
->ir
->array_list
) {
562 ctx
->class_alloc_count
[total_class_count
] += reg_size_for_array(arr
);
563 base
+= reg_size_for_array(arr
);
565 ctx
->alloc_count
+= ctx
->class_alloc_count
[total_class_count
];
567 ctx
->g
= ra_alloc_interference_graph(ctx
->set
->regs
, ctx
->alloc_count
);
568 ralloc_steal(ctx
->g
, ctx
->instrd
);
569 ctx
->def
= rzalloc_array(ctx
->g
, unsigned, ctx
->alloc_count
);
570 ctx
->use
= rzalloc_array(ctx
->g
, unsigned, ctx
->alloc_count
);
572 /* TODO add selector callback for split (pre-a6xx) register file: */
573 if (ctx
->ir
->compiler
->gpu_id
>= 600) {
574 ra_set_select_reg_callback(ctx
->g
, ra_select_reg_merged
, ctx
);
576 if (ctx
->scalar_pass
) {
577 ctx
->name_to_instr
= _mesa_hash_table_create(ctx
->g
,
578 _mesa_hash_int
, _mesa_key_int_equal
);
583 /* Map the name back to instruction: */
584 static struct ir3_instruction
*
585 name_to_instr(struct ir3_ra_ctx
*ctx
, unsigned name
)
587 assert(!name_is_array(ctx
, name
));
588 struct hash_entry
*entry
= _mesa_hash_table_search(ctx
->name_to_instr
, &name
);
591 unreachable("invalid instr name");
596 name_is_array(struct ir3_ra_ctx
*ctx
, unsigned name
)
598 return name
>= ctx
->class_base
[total_class_count
];
601 static struct ir3_array
*
602 name_to_array(struct ir3_ra_ctx
*ctx
, unsigned name
)
604 assert(name_is_array(ctx
, name
));
605 foreach_array (arr
, &ctx
->ir
->array_list
) {
606 unsigned sz
= reg_size_for_array(arr
);
607 if (name
< (arr
->base
+ sz
))
610 unreachable("invalid array name");
615 ra_destroy(struct ir3_ra_ctx
*ctx
)
621 __def(struct ir3_ra_ctx
*ctx
, struct ir3_ra_block_data
*bd
, unsigned name
,
622 struct ir3_instruction
*instr
)
624 debug_assert(name
< ctx
->alloc_count
);
626 /* split/collect do not actually define any real value */
627 if ((instr
->opc
== OPC_META_SPLIT
) || (instr
->opc
== OPC_META_COLLECT
))
630 /* defined on first write: */
632 ctx
->def
[name
] = instr
->ip
;
633 ctx
->use
[name
] = MAX2(ctx
->use
[name
], instr
->ip
);
634 BITSET_SET(bd
->def
, name
);
638 __use(struct ir3_ra_ctx
*ctx
, struct ir3_ra_block_data
*bd
, unsigned name
,
639 struct ir3_instruction
*instr
)
641 debug_assert(name
< ctx
->alloc_count
);
642 ctx
->use
[name
] = MAX2(ctx
->use
[name
], instr
->ip
);
643 if (!BITSET_TEST(bd
->def
, name
))
644 BITSET_SET(bd
->use
, name
);
648 ra_block_compute_live_ranges(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
650 struct ir3_ra_block_data
*bd
;
651 unsigned bitset_words
= BITSET_WORDS(ctx
->alloc_count
);
653 #define def(name, instr) __def(ctx, bd, name, instr)
654 #define use(name, instr) __use(ctx, bd, name, instr)
656 bd
= rzalloc(ctx
->g
, struct ir3_ra_block_data
);
658 bd
->def
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
659 bd
->use
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
660 bd
->livein
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
661 bd
->liveout
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
665 struct ir3_instruction
*first_non_input
= NULL
;
666 foreach_instr (instr
, &block
->instr_list
) {
667 if (instr
->opc
!= OPC_META_INPUT
) {
668 first_non_input
= instr
;
673 foreach_instr (instr
, &block
->instr_list
) {
674 foreach_def (name
, ctx
, instr
) {
675 if (name_is_array(ctx
, name
)) {
676 struct ir3_array
*arr
= name_to_array(ctx
, name
);
678 arr
->start_ip
= MIN2(arr
->start_ip
, instr
->ip
);
679 arr
->end_ip
= MAX2(arr
->end_ip
, instr
->ip
);
681 for (unsigned i
= 0; i
< arr
->length
; i
++) {
682 unsigned name
= arr
->base
+ i
;
684 ra_set_node_class(ctx
->g
, name
, ctx
->set
->half_classes
[0]);
686 ra_set_node_class(ctx
->g
, name
, ctx
->set
->classes
[0]);
689 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
690 if (is_high(instr
)) {
691 ra_set_node_class(ctx
->g
, name
,
692 ctx
->set
->high_classes
[id
->cls
- HIGH_OFFSET
]);
693 } else if (is_half(instr
)) {
694 ra_set_node_class(ctx
->g
, name
,
695 ctx
->set
->half_classes
[id
->cls
- HALF_OFFSET
]);
697 ra_set_node_class(ctx
->g
, name
,
698 ctx
->set
->classes
[id
->cls
]);
704 if ((instr
->opc
== OPC_META_INPUT
) && first_non_input
)
705 use(name
, first_non_input
);
708 foreach_use (name
, ctx
, instr
) {
709 if (name_is_array(ctx
, name
)) {
710 struct ir3_array
*arr
= name_to_array(ctx
, name
);
712 arr
->start_ip
= MIN2(arr
->start_ip
, instr
->ip
);
713 arr
->end_ip
= MAX2(arr
->end_ip
, instr
->ip
);
715 /* NOTE: arrays are not SSA so unconditionally
718 BITSET_SET(bd
->use
, name
);
724 foreach_name (name
, ctx
, instr
) {
725 /* split/collect instructions have duplicate names
726 * as real instructions, so they skip the hashtable:
728 if (ctx
->name_to_instr
&& !((instr
->opc
== OPC_META_SPLIT
) ||
729 (instr
->opc
== OPC_META_COLLECT
))) {
730 /* this is slightly annoying, we can't just use an
731 * integer on the stack
733 unsigned *key
= ralloc(ctx
->name_to_instr
, unsigned);
735 debug_assert(!_mesa_hash_table_search(ctx
->name_to_instr
, key
));
736 _mesa_hash_table_insert(ctx
->name_to_instr
, key
, instr
);
743 ra_compute_livein_liveout(struct ir3_ra_ctx
*ctx
)
745 unsigned bitset_words
= BITSET_WORDS(ctx
->alloc_count
);
746 bool progress
= false;
748 foreach_block (block
, &ctx
->ir
->block_list
) {
749 struct ir3_ra_block_data
*bd
= block
->data
;
752 for (unsigned i
= 0; i
< bitset_words
; i
++) {
753 /* anything used but not def'd within a block is
754 * by definition a live value coming into the block:
756 BITSET_WORD new_livein
=
757 (bd
->use
[i
] | (bd
->liveout
[i
] & ~bd
->def
[i
]));
759 if (new_livein
& ~bd
->livein
[i
]) {
760 bd
->livein
[i
] |= new_livein
;
765 /* update liveout: */
766 for (unsigned j
= 0; j
< ARRAY_SIZE(block
->successors
); j
++) {
767 struct ir3_block
*succ
= block
->successors
[j
];
768 struct ir3_ra_block_data
*succ_bd
;
773 succ_bd
= succ
->data
;
775 for (unsigned i
= 0; i
< bitset_words
; i
++) {
776 /* add anything that is livein in a successor block
779 BITSET_WORD new_liveout
=
780 (succ_bd
->livein
[i
] & ~bd
->liveout
[i
]);
783 bd
->liveout
[i
] |= new_liveout
;
794 print_bitset(const char *name
, BITSET_WORD
*bs
, unsigned cnt
)
797 debug_printf("RA: %s:", name
);
798 for (unsigned i
= 0; i
< cnt
; i
++) {
799 if (BITSET_TEST(bs
, i
)) {
802 debug_printf(" %04u", i
);
809 /* size of one component of instruction result, ie. half vs full: */
811 live_size(struct ir3_instruction
*instr
)
813 if (is_half(instr
)) {
815 } else if (is_high(instr
)) {
816 /* doesn't count towards footprint */
824 name_size(struct ir3_ra_ctx
*ctx
, unsigned name
)
826 if (name_is_array(ctx
, name
)) {
827 struct ir3_array
*arr
= name_to_array(ctx
, name
);
828 return arr
->half
? 1 : 2;
830 struct ir3_instruction
*instr
= name_to_instr(ctx
, name
);
831 /* in scalar pass, each name represents on scalar value,
832 * half or full precision
834 return live_size(instr
);
839 ra_calc_block_live_values(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
841 struct ir3_ra_block_data
*bd
= block
->data
;
844 assert(ctx
->name_to_instr
);
846 /* TODO this gets a bit more complicated in non-scalar pass.. but
847 * possibly a lowball estimate is fine to start with if we do
848 * round-robin in non-scalar pass? Maybe we just want to handle
849 * that in a different fxn?
851 assert(ctx
->scalar_pass
);
854 rzalloc_array(bd
, BITSET_WORD
, BITSET_WORDS(ctx
->alloc_count
));
856 /* Add the live input values: */
858 BITSET_FOREACH_SET (name
, bd
->livein
, ctx
->alloc_count
) {
859 livein
+= name_size(ctx
, name
);
860 BITSET_SET(live
, name
);
863 d("---------------------");
864 d("block%u: LIVEIN: %u", block_id(block
), livein
);
866 unsigned max
= livein
;
869 /* Now that we know the live inputs to the block, iterate the
870 * instructions adjusting the current # of live values as we
871 * see their last use:
873 foreach_instr (instr
, &block
->instr_list
) {
875 print_bitset("LIVE", live
, ctx
->alloc_count
);
878 unsigned new_live
= 0; /* newly live values */
879 unsigned new_dead
= 0; /* newly no-longer live values */
880 unsigned next_dead
= 0; /* newly dead following this instr */
882 foreach_def (name
, ctx
, instr
) {
883 /* NOTE: checking ctx->def filters out things like split/
884 * collect which are just redefining existing live names
885 * or array writes to already live array elements:
887 if (ctx
->def
[name
] != instr
->ip
)
889 new_live
+= live_size(instr
);
890 d("NEW_LIVE: %u (new_live=%u, use=%u)", name
, new_live
, ctx
->use
[name
]);
891 BITSET_SET(live
, name
);
892 /* There can be cases where this is *also* the last use
893 * of a value, for example instructions that write multiple
894 * values, only some of which are used. These values are
895 * dead *after* (rather than during) this instruction.
897 if (ctx
->use
[name
] != instr
->ip
)
899 next_dead
+= live_size(instr
);
900 d("NEXT_DEAD: %u (next_dead=%u)", name
, next_dead
);
901 BITSET_CLEAR(live
, name
);
904 /* To be more resilient against special cases where liverange
905 * is extended (like first_non_input), rather than using the
906 * foreach_use() iterator, we iterate the current live values
909 BITSET_FOREACH_SET (name
, live
, ctx
->alloc_count
) {
910 /* Is this the last use? */
911 if (ctx
->use
[name
] != instr
->ip
)
913 new_dead
+= name_size(ctx
, name
);
914 d("NEW_DEAD: %u (new_dead=%u)", name
, new_dead
);
915 BITSET_CLEAR(live
, name
);
918 cur_live
+= new_live
;
919 cur_live
-= new_dead
;
921 assert(cur_live
>= 0);
922 d("CUR_LIVE: %u", cur_live
);
924 max
= MAX2(max
, cur_live
);
926 /* account for written values which are not used later,
927 * but after updating max (since they are for one cycle
930 cur_live
-= next_dead
;
931 assert(cur_live
>= 0);
935 BITSET_FOREACH_SET (name
, live
, ctx
->alloc_count
) {
936 cnt
+= name_size(ctx
, name
);
938 assert(cur_live
== cnt
);
942 d("block%u max=%u", block_id(block
), max
);
944 /* the remaining live should match liveout (for extra sanity testing): */
946 unsigned liveout
= 0;
947 BITSET_FOREACH_SET (name
, bd
->liveout
, ctx
->alloc_count
) {
948 liveout
+= name_size(ctx
, name
);
949 BITSET_CLEAR(live
, name
);
952 if (cur_live
!= liveout
) {
953 print_bitset("LEAKED", live
, ctx
->alloc_count
);
954 /* TODO there are a few edge cases where live-range extension
955 * tells us a value is livein. But not used by the block or
956 * liveout for the block. Possibly a bug in the liverange
957 * extension. But for now leave the assert disabled:
958 assert(cur_live == liveout);
969 ra_calc_max_live_values(struct ir3_ra_ctx
*ctx
)
973 foreach_block (block
, &ctx
->ir
->block_list
) {
974 unsigned block_live
= ra_calc_block_live_values(ctx
, block
);
975 max
= MAX2(max
, block_live
);
982 ra_add_interference(struct ir3_ra_ctx
*ctx
)
984 struct ir3
*ir
= ctx
->ir
;
986 /* initialize array live ranges: */
987 foreach_array (arr
, &ir
->array_list
) {
992 /* compute live ranges (use/def) on a block level, also updating
993 * block's def/use bitmasks (used below to calculate per-block
996 foreach_block (block
, &ir
->block_list
) {
997 ra_block_compute_live_ranges(ctx
, block
);
1000 /* update per-block livein/liveout: */
1001 while (ra_compute_livein_liveout(ctx
)) {}
1004 d("AFTER LIVEIN/OUT:");
1005 foreach_block (block
, &ir
->block_list
) {
1006 struct ir3_ra_block_data
*bd
= block
->data
;
1007 d("block%u:", block_id(block
));
1008 print_bitset(" def", bd
->def
, ctx
->alloc_count
);
1009 print_bitset(" use", bd
->use
, ctx
->alloc_count
);
1010 print_bitset(" l/i", bd
->livein
, ctx
->alloc_count
);
1011 print_bitset(" l/o", bd
->liveout
, ctx
->alloc_count
);
1013 foreach_array (arr
, &ir
->array_list
) {
1014 d("array%u:", arr
->id
);
1015 d(" length: %u", arr
->length
);
1016 d(" start_ip: %u", arr
->start_ip
);
1017 d(" end_ip: %u", arr
->end_ip
);
1019 d("INSTRUCTION VREG NAMES:");
1020 foreach_block (block
, &ctx
->ir
->block_list
) {
1021 foreach_instr (instr
, &block
->instr_list
) {
1022 if (!ctx
->instrd
[instr
->ip
].defn
)
1024 if (!writes_gpr(instr
))
1026 di(instr
, "%04u", scalar_name(ctx
, instr
, 0));
1029 d("ARRAY VREG NAMES:");
1030 foreach_array (arr
, &ctx
->ir
->array_list
) {
1031 d("%04u: arr%u", arr
->base
, arr
->id
);
1035 /* extend start/end ranges based on livein/liveout info from cfg: */
1036 foreach_block (block
, &ir
->block_list
) {
1037 struct ir3_ra_block_data
*bd
= block
->data
;
1039 for (unsigned i
= 0; i
< ctx
->alloc_count
; i
++) {
1040 if (BITSET_TEST(bd
->livein
, i
)) {
1041 ctx
->def
[i
] = MIN2(ctx
->def
[i
], block
->start_ip
);
1042 ctx
->use
[i
] = MAX2(ctx
->use
[i
], block
->start_ip
);
1045 if (BITSET_TEST(bd
->liveout
, i
)) {
1046 ctx
->def
[i
] = MIN2(ctx
->def
[i
], block
->end_ip
);
1047 ctx
->use
[i
] = MAX2(ctx
->use
[i
], block
->end_ip
);
1051 foreach_array (arr
, &ctx
->ir
->array_list
) {
1052 for (unsigned i
= 0; i
< arr
->length
; i
++) {
1053 if (BITSET_TEST(bd
->livein
, i
+ arr
->base
)) {
1054 arr
->start_ip
= MIN2(arr
->start_ip
, block
->start_ip
);
1056 if (BITSET_TEST(bd
->liveout
, i
+ arr
->base
)) {
1057 arr
->end_ip
= MAX2(arr
->end_ip
, block
->end_ip
);
1063 if (ctx
->name_to_instr
) {
1064 unsigned max
= ra_calc_max_live_values(ctx
);
1065 ra_set_register_target(ctx
, max
);
1068 for (unsigned i
= 0; i
< ctx
->alloc_count
; i
++) {
1069 for (unsigned j
= 0; j
< ctx
->alloc_count
; j
++) {
1070 if (intersects(ctx
->def
[i
], ctx
->use
[i
],
1071 ctx
->def
[j
], ctx
->use
[j
])) {
1072 ra_add_node_interference(ctx
->g
, i
, j
);
1078 /* some instructions need fix-up if dst register is half precision: */
1079 static void fixup_half_instr_dst(struct ir3_instruction
*instr
)
1081 switch (opc_cat(instr
->opc
)) {
1082 case 1: /* move instructions */
1083 instr
->cat1
.dst_type
= half_type(instr
->cat1
.dst_type
);
1086 switch (instr
->opc
) {
1088 instr
->opc
= OPC_HRSQ
;
1091 instr
->opc
= OPC_HLOG2
;
1094 instr
->opc
= OPC_HEXP2
;
1101 instr
->cat5
.type
= half_type(instr
->cat5
.type
);
1105 /* some instructions need fix-up if src register is half precision: */
1106 static void fixup_half_instr_src(struct ir3_instruction
*instr
)
1108 switch (instr
->opc
) {
1110 instr
->cat1
.src_type
= half_type(instr
->cat1
.src_type
);
1113 instr
->opc
= OPC_MAD_F16
;
1116 instr
->opc
= OPC_SEL_B16
;
1119 instr
->opc
= OPC_SEL_S16
;
1122 instr
->opc
= OPC_SEL_F16
;
1125 instr
->opc
= OPC_SAD_S16
;
1132 /* NOTE: instr could be NULL for IR3_REG_ARRAY case, for the first
1133 * array access(es) which do not have any previous access to depend
1134 * on from scheduling point of view
1137 reg_assign(struct ir3_ra_ctx
*ctx
, struct ir3_register
*reg
,
1138 struct ir3_instruction
*instr
)
1140 struct ir3_ra_instr_data
*id
;
1142 if (reg
->flags
& IR3_REG_ARRAY
) {
1143 struct ir3_array
*arr
=
1144 ir3_lookup_array(ctx
->ir
, reg
->array
.id
);
1145 unsigned name
= arr
->base
+ reg
->array
.offset
;
1146 unsigned r
= ra_get_node_reg(ctx
->g
, name
);
1147 unsigned num
= ctx
->set
->ra_reg_to_gpr
[r
];
1149 if (reg
->flags
& IR3_REG_RELATIV
) {
1150 reg
->array
.offset
= num
;
1153 reg
->flags
&= ~IR3_REG_SSA
;
1156 reg
->flags
&= ~IR3_REG_ARRAY
;
1157 } else if ((id
= &ctx
->instrd
[instr
->ip
]) && id
->defn
) {
1158 unsigned first_component
= 0;
1160 /* Special case for tex instructions, which may use the wrmask
1161 * to mask off the first component(s). In the scalar pass,
1162 * this means the masked off component(s) are not def'd/use'd,
1163 * so we get a bogus value when we ask the register_allocate
1164 * algo to get the assigned reg for the unused/untouched
1165 * component. So we need to consider the first used component:
1167 if (ctx
->scalar_pass
&& is_tex_or_prefetch(id
->defn
)) {
1168 unsigned n
= ffs(id
->defn
->regs
[0]->wrmask
);
1169 debug_assert(n
> 0);
1170 first_component
= n
- 1;
1173 unsigned name
= scalar_name(ctx
, id
->defn
, first_component
);
1174 unsigned r
= ra_get_node_reg(ctx
->g
, name
);
1175 unsigned num
= ctx
->set
->ra_reg_to_gpr
[r
] + id
->off
;
1177 debug_assert(!(reg
->flags
& IR3_REG_RELATIV
));
1179 debug_assert(num
>= first_component
);
1181 if (is_high(id
->defn
))
1182 num
+= FIRST_HIGH_REG
;
1184 reg
->num
= num
- first_component
;
1186 reg
->flags
&= ~IR3_REG_SSA
;
1188 if (is_half(id
->defn
))
1189 reg
->flags
|= IR3_REG_HALF
;
1193 /* helper to determine which regs to assign in which pass: */
1195 should_assign(struct ir3_ra_ctx
*ctx
, struct ir3_instruction
*instr
)
1197 if ((instr
->opc
== OPC_META_SPLIT
) &&
1198 (util_bitcount(instr
->regs
[1]->wrmask
) > 1))
1199 return !ctx
->scalar_pass
;
1200 if ((instr
->opc
== OPC_META_COLLECT
) &&
1201 (util_bitcount(instr
->regs
[0]->wrmask
) > 1))
1202 return !ctx
->scalar_pass
;
1203 return ctx
->scalar_pass
;
1207 ra_block_alloc(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
1209 foreach_instr (instr
, &block
->instr_list
) {
1210 struct ir3_register
*reg
;
1212 if (writes_gpr(instr
)) {
1213 if (should_assign(ctx
, instr
)) {
1214 reg_assign(ctx
, instr
->regs
[0], instr
);
1215 if (instr
->regs
[0]->flags
& IR3_REG_HALF
)
1216 fixup_half_instr_dst(instr
);
1220 foreach_src_n (reg
, n
, instr
) {
1221 struct ir3_instruction
*src
= reg
->instr
;
1223 if (src
&& !should_assign(ctx
, src
) && !should_assign(ctx
, instr
))
1226 if (src
&& should_assign(ctx
, instr
))
1227 reg_assign(ctx
, src
->regs
[0], src
);
1229 /* Note: reg->instr could be null for IR3_REG_ARRAY */
1230 if (src
|| (reg
->flags
& IR3_REG_ARRAY
))
1231 reg_assign(ctx
, instr
->regs
[n
+1], src
);
1233 if (instr
->regs
[n
+1]->flags
& IR3_REG_HALF
)
1234 fixup_half_instr_src(instr
);
1238 /* We need to pre-color outputs for the scalar pass in
1239 * ra_precolor_assigned(), so we need to actually assign
1240 * them in the first pass:
1242 if (!ctx
->scalar_pass
) {
1243 struct ir3_instruction
*in
, *out
;
1245 foreach_input (in
, ctx
->ir
) {
1246 reg_assign(ctx
, in
->regs
[0], in
);
1248 foreach_output (out
, ctx
->ir
) {
1249 reg_assign(ctx
, out
->regs
[0], out
);
1254 /* handle pre-colored registers. This includes "arrays" (which could be of
1255 * length 1, used for phi webs lowered to registers in nir), as well as
1256 * special shader input values that need to be pinned to certain registers.
1259 ra_precolor(struct ir3_ra_ctx
*ctx
, struct ir3_instruction
**precolor
, unsigned nprecolor
)
1261 unsigned num_precolor
= 0;
1262 for (unsigned i
= 0; i
< nprecolor
; i
++) {
1263 if (precolor
[i
] && !(precolor
[i
]->flags
& IR3_INSTR_UNUSED
)) {
1264 struct ir3_instruction
*instr
= precolor
[i
];
1266 if (instr
->regs
[0]->num
== INVALID_REG
)
1269 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
1271 debug_assert(!(instr
->regs
[0]->flags
& (IR3_REG_HALF
| IR3_REG_HIGH
)));
1273 /* only consider the first component: */
1277 if (ctx
->scalar_pass
&& !should_assign(ctx
, instr
))
1280 /* 'base' is in scalar (class 0) but we need to map that
1281 * the conflicting register of the appropriate class (ie.
1282 * input could be vec2/vec3/etc)
1284 * Note that the higher class (larger than scalar) regs
1285 * are setup to conflict with others in the same class,
1286 * so for example, R1 (scalar) is also the first component
1287 * of D1 (vec2/double):
1289 * Single (base) | Double
1290 * --------------+---------------
1297 unsigned regid
= instr
->regs
[0]->num
;
1298 unsigned reg
= ctx
->set
->gpr_to_ra_reg
[id
->cls
][regid
];
1299 unsigned name
= ra_name(ctx
, id
);
1300 ra_set_node_reg(ctx
->g
, name
, reg
);
1301 num_precolor
= MAX2(regid
, num_precolor
);
1305 /* pre-assign array elements:
1307 * TODO this is going to need some work for half-precision.. possibly
1308 * this is easier on a6xx, where we can just divide array size by two?
1309 * But on a5xx and earlier it will need to track two bases.
1311 foreach_array (arr
, &ctx
->ir
->array_list
) {
1314 if (arr
->end_ip
== 0)
1317 /* figure out what else we conflict with which has already
1321 foreach_array (arr2
, &ctx
->ir
->array_list
) {
1324 if (arr2
->end_ip
== 0)
1326 /* if it intersects with liverange AND register range.. */
1327 if (intersects(arr
->start_ip
, arr
->end_ip
,
1328 arr2
->start_ip
, arr2
->end_ip
) &&
1329 intersects(base
, base
+ reg_size_for_array(arr
),
1330 arr2
->reg
, arr2
->reg
+ reg_size_for_array(arr2
))) {
1331 base
= MAX2(base
, arr2
->reg
+ reg_size_for_array(arr2
));
1336 /* also need to not conflict with any pre-assigned inputs: */
1337 for (unsigned i
= 0; i
< nprecolor
; i
++) {
1338 struct ir3_instruction
*instr
= precolor
[i
];
1340 if (!instr
|| (instr
->flags
& IR3_INSTR_UNUSED
))
1343 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
1345 /* only consider the first component: */
1349 unsigned name
= ra_name(ctx
, id
);
1350 unsigned regid
= instr
->regs
[0]->num
;
1352 /* Check if array intersects with liverange AND register
1353 * range of the input:
1355 if (intersects(arr
->start_ip
, arr
->end_ip
,
1356 ctx
->def
[name
], ctx
->use
[name
]) &&
1357 intersects(base
, base
+ reg_size_for_array(arr
),
1358 regid
, regid
+ class_sizes
[id
->cls
])) {
1359 base
= MAX2(base
, regid
+ class_sizes
[id
->cls
]);
1366 for (unsigned i
= 0; i
< arr
->length
; i
++) {
1370 /* Doesn't need to do this on older generations than a6xx,
1371 * since there's no conflict between full regs and half regs
1374 * TODO Presumably "base" could start from 0 respectively
1375 * for half regs of arrays on older generations.
1377 unsigned base_half
= base
* 2 + i
;
1378 reg
= ctx
->set
->gpr_to_ra_reg
[0+HALF_OFFSET
][base_half
];
1379 base
= base_half
/ 2 + 1;
1381 reg
= ctx
->set
->gpr_to_ra_reg
[0][base
++];
1384 name
= arr
->base
+ i
;
1385 ra_set_node_reg(ctx
->g
, name
, reg
);
1389 if (ir3_shader_debug
& IR3_DBG_OPTMSGS
) {
1390 foreach_array (arr
, &ctx
->ir
->array_list
) {
1391 unsigned first
= arr
->reg
;
1392 unsigned last
= arr
->reg
+ arr
->length
- 1;
1393 debug_printf("arr[%d] at r%d.%c->r%d.%c\n", arr
->id
,
1394 (first
>> 2), "xyzw"[first
& 0x3],
1395 (last
>> 2), "xyzw"[last
& 0x3]);
1401 precolor(struct ir3_ra_ctx
*ctx
, struct ir3_instruction
*instr
)
1403 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
1404 unsigned n
= dest_regs(instr
);
1405 for (unsigned i
= 0; i
< n
; i
++) {
1406 /* tex instructions actually have a wrmask, and
1407 * don't touch masked out components. So we
1408 * shouldn't precolor them::
1410 if (is_tex_or_prefetch(instr
) &&
1411 !(instr
->regs
[0]->wrmask
& (1 << i
)))
1414 unsigned name
= scalar_name(ctx
, instr
, i
);
1415 unsigned regid
= instr
->regs
[0]->num
+ i
;
1417 if (instr
->regs
[0]->flags
& IR3_REG_HIGH
)
1418 regid
-= FIRST_HIGH_REG
;
1420 unsigned vreg
= ctx
->set
->gpr_to_ra_reg
[id
->cls
][regid
];
1421 ra_set_node_reg(ctx
->g
, name
, vreg
);
1425 /* pre-color non-scalar registers based on the registers assigned in previous
1426 * pass. Do this by looking actually at the fanout instructions.
1429 ra_precolor_assigned(struct ir3_ra_ctx
*ctx
)
1431 debug_assert(ctx
->scalar_pass
);
1433 foreach_block (block
, &ctx
->ir
->block_list
) {
1434 foreach_instr (instr
, &block
->instr_list
) {
1436 if (!writes_gpr(instr
))
1439 if (should_assign(ctx
, instr
))
1442 precolor(ctx
, instr
);
1444 struct ir3_register
*src
;
1445 foreach_src (src
, instr
) {
1448 precolor(ctx
, src
->instr
);
1455 ra_alloc(struct ir3_ra_ctx
*ctx
)
1457 if (!ra_allocate(ctx
->g
))
1460 foreach_block (block
, &ctx
->ir
->block_list
) {
1461 ra_block_alloc(ctx
, block
);
1467 /* if we end up with split/collect instructions with non-matching src
1468 * and dest regs, that means something has gone wrong. Which makes it
1469 * a pretty good sanity check.
1472 ra_sanity_check(struct ir3
*ir
)
1474 foreach_block (block
, &ir
->block_list
) {
1475 foreach_instr (instr
, &block
->instr_list
) {
1476 if (instr
->opc
== OPC_META_SPLIT
) {
1477 struct ir3_register
*dst
= instr
->regs
[0];
1478 struct ir3_register
*src
= instr
->regs
[1];
1479 debug_assert(dst
->num
== (src
->num
+ instr
->split
.off
));
1480 } else if (instr
->opc
== OPC_META_COLLECT
) {
1481 struct ir3_register
*dst
= instr
->regs
[0];
1482 struct ir3_register
*src
;
1484 foreach_src_n (src
, n
, instr
) {
1485 debug_assert(dst
->num
== (src
->num
- n
));
1493 ir3_ra_pass(struct ir3_shader_variant
*v
, struct ir3_instruction
**precolor
,
1494 unsigned nprecolor
, bool scalar_pass
)
1496 struct ir3_ra_ctx ctx
= {
1499 .set
= v
->ir
->compiler
->set
,
1500 .scalar_pass
= scalar_pass
,
1505 ra_add_interference(&ctx
);
1506 ra_precolor(&ctx
, precolor
, nprecolor
);
1508 ra_precolor_assigned(&ctx
);
1509 ret
= ra_alloc(&ctx
);
1516 ir3_ra(struct ir3_shader_variant
*v
, struct ir3_instruction
**precolor
,
1521 /* First pass, assign the vecN (non-scalar) registers: */
1522 ret
= ir3_ra_pass(v
, precolor
, nprecolor
, false);
1526 if (ir3_shader_debug
& IR3_DBG_OPTMSGS
) {
1527 printf("AFTER RA (1st pass):\n");
1531 /* Second pass, assign the scalar registers: */
1532 ret
= ir3_ra_pass(v
, precolor
, nprecolor
, true);
1536 if (ir3_shader_debug
& IR3_DBG_OPTMSGS
) {
1537 printf("AFTER RA (2nd pass):\n");
1542 # define SANITY_CHECK DEBUG
1544 # define SANITY_CHECK 0
1547 ra_sanity_check(v
->ir
);