f74174498d0f329c35306657a4cd8ade8bf78089
2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #include "util/u_math.h"
28 #include "util/register_allocate.h"
29 #include "util/ralloc.h"
30 #include "util/bitset.h"
33 #include "ir3_compiler.h"
38 #define RA_DEBUG (ir3_shader_debug & IR3_DBG_RAMSGS)
42 #define d(fmt, ...) do { if (RA_DEBUG) { \
43 printf("RA: "fmt"\n", ##__VA_ARGS__); \
46 #define di(instr, fmt, ...) do { if (RA_DEBUG) { \
47 printf("RA: "fmt": ", ##__VA_ARGS__); \
48 ir3_print_instr(instr); \
52 * Register Assignment:
54 * Uses the register_allocate util, which implements graph coloring
55 * algo with interference classes. To handle the cases where we need
56 * consecutive registers (for example, texture sample instructions),
57 * we model these as larger (double/quad/etc) registers which conflict
58 * with the corresponding registers in other classes.
60 * Additionally we create additional classes for half-regs, which
61 * do not conflict with the full-reg classes. We do need at least
62 * sizes 1-4 (to deal w/ texture sample instructions output to half-
63 * reg). At the moment we don't create the higher order half-reg
64 * classes as half-reg frequently does not have enough precision
65 * for texture coords at higher resolutions.
67 * There are some additional cases that we need to handle specially,
68 * as the graph coloring algo doesn't understand "partial writes".
69 * For example, a sequence like:
72 * sam (f32)(xy)r0.x, ...
74 * sam (f32)(xyzw)r0.w, r0.x, ... ; 3d texture, so r0.xyz are coord
76 * In this scenario, we treat r0.xyz as class size 3, which is written
77 * (from a use/def perspective) at the 'add' instruction and ignore the
78 * subsequent partial writes to r0.xy. So the 'add r0.z, ...' is the
79 * defining instruction, as it is the first to partially write r0.xyz.
81 * To address the fragmentation that this can potentially cause, a
82 * two pass register allocation is used. After the first pass the
83 * assignment of scalars is discarded, but the assignment of vecN (for
84 * N > 1) is used to pre-color in the second pass, which considers
87 * Arrays of arbitrary size are handled via pre-coloring a consecutive
88 * sequence of registers. Additional scalar (single component) reg
89 * names are allocated starting at ctx->class_base[total_class_count]
90 * (see arr->base), which are pre-colored. In the use/def graph direct
91 * access is treated as a single element use/def, and indirect access
92 * is treated as use or def of all array elements. (Only the first
93 * def is tracked, in case of multiple indirect writes, etc.)
95 * TODO arrays that fit in one of the pre-defined class sizes should
96 * not need to be pre-colored, but instead could be given a normal
97 * vreg name. (Ignoring this for now since it is a good way to work
98 * out the kinks with arbitrary sized arrays.)
100 * TODO might be easier for debugging to split this into two passes,
101 * the first assigning vreg names in a way that we could ir3_print()
106 static struct ir3_instruction
* name_to_instr(struct ir3_ra_ctx
*ctx
, unsigned name
);
108 static bool name_is_array(struct ir3_ra_ctx
*ctx
, unsigned name
);
109 static struct ir3_array
* name_to_array(struct ir3_ra_ctx
*ctx
, unsigned name
);
111 /* does it conflict? */
113 intersects(unsigned a_start
, unsigned a_end
, unsigned b_start
, unsigned b_end
)
115 return !((a_start
>= b_end
) || (b_start
>= a_end
));
119 reg_size_for_array(struct ir3_array
*arr
)
122 return DIV_ROUND_UP(arr
->length
, 2);
128 instr_before(struct ir3_instruction
*a
, struct ir3_instruction
*b
)
130 if (a
->flags
& IR3_INSTR_UNUSED
)
132 return (a
->ip
< b
->ip
);
135 static struct ir3_instruction
*
136 get_definer(struct ir3_ra_ctx
*ctx
, struct ir3_instruction
*instr
,
139 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
140 struct ir3_instruction
*d
= NULL
;
142 if (ctx
->scalar_pass
) {
145 id
->sz
= 1; /* considering things as N scalar regs now */
154 if (instr
->opc
== OPC_META_COLLECT
) {
155 /* What about the case where collect is subset of array, we
156 * need to find the distance between where actual array starts
157 * and collect.. that probably doesn't happen currently.
159 struct ir3_register
*src
;
162 /* note: don't use foreach_ssa_src as this gets called once
163 * while assigning regs (which clears SSA flag)
165 foreach_src_n (src
, n
, instr
) {
166 struct ir3_instruction
*dd
;
170 dd
= get_definer(ctx
, src
->instr
, &dsz
, &doff
);
172 if ((!d
) || instr_before(dd
, d
)) {
179 } else if (instr
->cp
.right
|| instr
->cp
.left
) {
180 /* covers also the meta:fo case, which ends up w/ single
181 * scalar instructions for each component:
183 struct ir3_instruction
*f
= ir3_neighbor_first(instr
);
185 /* by definition, the entire sequence forms one linked list
186 * of single scalar register nodes (even if some of them may
187 * be splits from a texture sample (for example) instr. We
188 * just need to walk the list finding the first element of
189 * the group defined (lowest ip)
193 /* need to skip over unused in the group: */
194 while (f
&& (f
->flags
& IR3_INSTR_UNUSED
)) {
200 if ((!d
) || instr_before(f
, d
))
211 /* second case is looking directly at the instruction which
212 * produces multiple values (eg, texture sample), rather
213 * than the split nodes that point back to that instruction.
214 * This isn't quite right, because it may be part of a larger
217 * sam (f32)(xyzw)r0.x, ...
220 * sam (f32)(xyzw)r2.x, r0.w <-- (r0.w, r1.x, r1.y)
222 * need to come up with a better way to handle that case.
224 if (instr
->address
) {
225 *sz
= instr
->regs
[0]->size
;
227 *sz
= util_last_bit(instr
->regs
[0]->wrmask
);
233 if (d
->opc
== OPC_META_SPLIT
) {
234 struct ir3_instruction
*dd
;
237 dd
= get_definer(ctx
, d
->regs
[1]->instr
, &dsz
, &doff
);
239 /* by definition, should come before: */
240 debug_assert(instr_before(dd
, d
));
242 *sz
= MAX2(*sz
, dsz
);
244 if (instr
->opc
== OPC_META_SPLIT
)
245 *off
= MAX2(*off
, instr
->split
.off
);
250 debug_assert(d
->opc
!= OPC_META_SPLIT
);
260 ra_block_find_definers(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
262 foreach_instr (instr
, &block
->instr_list
) {
263 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
264 if (instr
->regs_count
== 0)
266 /* couple special cases: */
267 if (writes_addr0(instr
) || writes_addr1(instr
) || writes_pred(instr
)) {
269 } else if (instr
->regs
[0]->flags
& IR3_REG_ARRAY
) {
270 id
->cls
= total_class_count
;
272 /* and the normal case: */
273 id
->defn
= get_definer(ctx
, instr
, &id
->sz
, &id
->off
);
274 id
->cls
= ra_size_to_class(id
->sz
, is_half(id
->defn
), is_high(id
->defn
));
276 /* this is a bit of duct-tape.. if we have a scenario like:
278 * sam (f32)(x) out.x, ...
279 * sam (f32)(x) out.y, ...
281 * Then the fanout/split meta instructions for the two different
282 * tex instructions end up grouped as left/right neighbors. The
283 * upshot is that in when you get_definer() on one of the meta:fo's
284 * you get definer as the first sam with sz=2, but when you call
285 * get_definer() on the either of the sam's you get itself as the
288 * (We actually avoid this scenario exactly, the neighbor links
289 * prevent one of the output mov's from being eliminated, so this
290 * hack should be enough. But probably we need to rethink how we
291 * find the "defining" instruction.)
293 * TODO how do we figure out offset properly...
295 if (id
->defn
!= instr
) {
296 struct ir3_ra_instr_data
*did
= &ctx
->instrd
[id
->defn
->ip
];
297 if (did
->sz
< id
->sz
) {
306 /* give each instruction a name (and ip), and count up the # of names
310 ra_block_name_instructions(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
312 foreach_instr (instr
, &block
->instr_list
) {
313 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
321 if (!writes_gpr(instr
))
324 if (id
->defn
!= instr
)
327 /* In scalar pass, collect/split don't get their own names,
328 * but instead inherit them from their src(s):
330 * Possibly we don't need this because of scalar_name(), but
331 * it does make the ir3_print() dumps easier to read.
333 if (ctx
->scalar_pass
) {
334 if (instr
->opc
== OPC_META_SPLIT
) {
335 instr
->name
= instr
->regs
[1]->instr
->name
+ instr
->split
.off
;
339 if (instr
->opc
== OPC_META_COLLECT
) {
340 instr
->name
= instr
->regs
[1]->instr
->name
;
345 /* arrays which don't fit in one of the pre-defined class
346 * sizes are pre-colored:
348 if ((id
->cls
>= 0) && (id
->cls
< total_class_count
)) {
349 /* in the scalar pass, we generate a name for each
350 * scalar component, instr->name is the name of the
353 unsigned n
= ctx
->scalar_pass
? dest_regs(instr
) : 1;
354 instr
->name
= ctx
->class_alloc_count
[id
->cls
];
355 ctx
->class_alloc_count
[id
->cls
] += n
;
356 ctx
->alloc_count
+= n
;
362 * Set a value for max register target.
364 * Currently this just rounds up to a multiple of full-vec4 (ie. the
365 * granularity that we configure the hw for.. there is no point to
366 * using r3.x if you aren't going to make r3.yzw available). But
367 * in reality there seems to be multiple thresholds that affect the
368 * number of waves.. and we should round up the target to the next
369 * threshold when we round-robin registers, to give postsched more
370 * options. When we understand that better, this is where we'd
374 ra_set_register_target(struct ir3_ra_ctx
*ctx
, unsigned max_target
)
376 const unsigned hvec4
= 4;
377 const unsigned vec4
= 2 * hvec4
;
379 ctx
->max_target
= align(max_target
, vec4
);
381 d("New max_target=%u", ctx
->max_target
);
385 pick_in_range(BITSET_WORD
*regs
, unsigned min
, unsigned max
)
387 for (unsigned i
= min
; i
<= max
; i
++) {
388 if (BITSET_TEST(regs
, i
)) {
396 pick_in_range_rev(BITSET_WORD
*regs
, int min
, int max
)
398 for (int i
= max
; i
>= min
; i
--) {
399 if (BITSET_TEST(regs
, i
)) {
406 /* register selector for the a6xx+ merged register file: */
408 ra_select_reg_merged(unsigned int n
, BITSET_WORD
*regs
, void *data
)
410 struct ir3_ra_ctx
*ctx
= data
;
411 unsigned int class = ra_get_node_class(ctx
->g
, n
);
413 int sz
= ra_class_to_size(class, &half
, &high
);
417 /* dimensions within the register class: */
418 unsigned max_target
, start
;
420 /* the regs bitset will include *all* of the virtual regs, but we lay
421 * out the different classes consecutively in the virtual register
422 * space. So we just need to think about the base offset of a given
423 * class within the virtual register space, and offset the register
424 * space we search within by that base offset.
428 /* TODO I think eventually we want to round-robin in vector pass
429 * as well, but needs some more work to calculate # of live vals
430 * for this. (Maybe with some work, we could just figure out
431 * the scalar target and use that, since that is what we care
432 * about in the end.. but that would mean setting up use-def/
433 * liveranges for scalar pass before doing vector pass.)
435 * For now, in the vector class, just move assignments for scalar
436 * vals higher to hopefully prevent them from limiting where vecN
437 * values can be placed. Since the scalar values are re-assigned
438 * in the 2nd pass, we don't really care where they end up in the
441 if (!ctx
->scalar_pass
) {
442 base
= ctx
->set
->gpr_to_ra_reg
[class][0];
444 max_target
= HIGH_CLASS_REGS(class - HIGH_OFFSET
);
446 max_target
= HALF_CLASS_REGS(class - HALF_OFFSET
);
448 max_target
= CLASS_REGS(class);
451 if ((sz
== 1) && !high
) {
452 return pick_in_range_rev(regs
, base
, base
+ max_target
);
454 return pick_in_range(regs
, base
, base
+ max_target
);
460 /* NOTE: this is only used in scalar pass, so the register
461 * class will be one of the scalar classes (ie. idx==0):
463 base
= ctx
->set
->gpr_to_ra_reg
[class][0];
465 max_target
= HIGH_CLASS_REGS(0);
468 max_target
= ctx
->max_target
;
469 start
= ctx
->start_search_reg
;
471 max_target
= ctx
->max_target
/ 2;
472 start
= ctx
->start_search_reg
;
475 /* For cat4 instructions, if the src reg is already assigned, and
476 * avail to pick, use it. Because this doesn't introduce unnecessary
477 * dependencies, and it potentially avoids needing (ss) syncs to
478 * for write after read hazards:
480 struct ir3_instruction
*instr
= name_to_instr(ctx
, n
);
482 struct ir3_register
*src
= instr
->regs
[1];
485 if ((src
->flags
& IR3_REG_ARRAY
) && !(src
->flags
& IR3_REG_RELATIV
)) {
486 struct ir3_array
*arr
= ir3_lookup_array(ctx
->ir
, src
->array
.id
);
487 src_n
= arr
->base
+ src
->array
.offset
;
489 src_n
= scalar_name(ctx
, src
->instr
, 0);
492 unsigned reg
= ra_get_node_reg(ctx
->g
, src_n
);
494 /* Check if the src register has been assigned yet: */
496 if (BITSET_TEST(regs
, reg
)) {
500 } else if (is_tex_or_prefetch(instr
)) {
501 /* we could have a tex fetch w/ wrmask .z, for example.. these
502 * cannot land in r0.x since that would underflow when we
503 * subtract the offset. Ie. if we pick r0.z, and subtract
504 * the offset, the register encoded for dst will be r0.x
506 unsigned n
= ffs(instr
->regs
[0]->wrmask
);
508 unsigned offset
= n
- 1;
512 max_target
-= offset
;
515 int r
= pick_in_range(regs
, base
+ start
, base
+ max_target
);
518 r
= pick_in_range(regs
, base
, base
+ start
);
522 /* overflow, we need to increase max_target: */
523 ra_set_register_target(ctx
, ctx
->max_target
+ 1);
524 return ra_select_reg_merged(n
, regs
, data
);
527 if (class == ctx
->set
->half_classes
[0]) {
529 ctx
->start_search_reg
= (n
+ 1) % ctx
->max_target
;
530 } else if (class == ctx
->set
->classes
[0]) {
531 int n
= (r
- base
) * 2;
532 ctx
->start_search_reg
= (n
+ 1) % ctx
->max_target
;
539 ra_init(struct ir3_ra_ctx
*ctx
)
543 ir3_clear_mark(ctx
->ir
);
544 n
= ir3_count_instructions_ra(ctx
->ir
);
546 ctx
->instrd
= rzalloc_array(NULL
, struct ir3_ra_instr_data
, n
);
548 foreach_block (block
, &ctx
->ir
->block_list
) {
549 ra_block_find_definers(ctx
, block
);
552 foreach_block (block
, &ctx
->ir
->block_list
) {
553 ra_block_name_instructions(ctx
, block
);
556 /* figure out the base register name for each class. The
557 * actual ra name is class_base[cls] + instr->name;
559 ctx
->class_base
[0] = 0;
560 for (unsigned i
= 1; i
<= total_class_count
; i
++) {
561 ctx
->class_base
[i
] = ctx
->class_base
[i
-1] +
562 ctx
->class_alloc_count
[i
-1];
565 /* and vreg names for array elements: */
566 base
= ctx
->class_base
[total_class_count
];
567 foreach_array (arr
, &ctx
->ir
->array_list
) {
569 ctx
->class_alloc_count
[total_class_count
] += reg_size_for_array(arr
);
570 base
+= reg_size_for_array(arr
);
572 ctx
->alloc_count
+= ctx
->class_alloc_count
[total_class_count
];
574 ctx
->g
= ra_alloc_interference_graph(ctx
->set
->regs
, ctx
->alloc_count
);
575 ralloc_steal(ctx
->g
, ctx
->instrd
);
576 ctx
->def
= rzalloc_array(ctx
->g
, unsigned, ctx
->alloc_count
);
577 ctx
->use
= rzalloc_array(ctx
->g
, unsigned, ctx
->alloc_count
);
579 /* TODO add selector callback for split (pre-a6xx) register file: */
580 if (ctx
->ir
->compiler
->gpu_id
>= 600) {
581 ra_set_select_reg_callback(ctx
->g
, ra_select_reg_merged
, ctx
);
583 if (ctx
->scalar_pass
) {
584 ctx
->name_to_instr
= _mesa_hash_table_create(ctx
->g
,
585 _mesa_hash_int
, _mesa_key_int_equal
);
590 /* Map the name back to instruction: */
591 static struct ir3_instruction
*
592 name_to_instr(struct ir3_ra_ctx
*ctx
, unsigned name
)
594 assert(!name_is_array(ctx
, name
));
595 struct hash_entry
*entry
= _mesa_hash_table_search(ctx
->name_to_instr
, &name
);
598 unreachable("invalid instr name");
603 name_is_array(struct ir3_ra_ctx
*ctx
, unsigned name
)
605 return name
>= ctx
->class_base
[total_class_count
];
608 static struct ir3_array
*
609 name_to_array(struct ir3_ra_ctx
*ctx
, unsigned name
)
611 assert(name_is_array(ctx
, name
));
612 foreach_array (arr
, &ctx
->ir
->array_list
) {
613 unsigned sz
= reg_size_for_array(arr
);
614 if (name
< (arr
->base
+ sz
))
617 unreachable("invalid array name");
622 ra_destroy(struct ir3_ra_ctx
*ctx
)
628 __def(struct ir3_ra_ctx
*ctx
, struct ir3_ra_block_data
*bd
, unsigned name
,
629 struct ir3_instruction
*instr
)
631 debug_assert(name
< ctx
->alloc_count
);
633 /* split/collect do not actually define any real value */
634 if ((instr
->opc
== OPC_META_SPLIT
) || (instr
->opc
== OPC_META_COLLECT
))
637 /* defined on first write: */
639 ctx
->def
[name
] = instr
->ip
;
640 ctx
->use
[name
] = MAX2(ctx
->use
[name
], instr
->ip
);
641 BITSET_SET(bd
->def
, name
);
645 __use(struct ir3_ra_ctx
*ctx
, struct ir3_ra_block_data
*bd
, unsigned name
,
646 struct ir3_instruction
*instr
)
648 debug_assert(name
< ctx
->alloc_count
);
649 ctx
->use
[name
] = MAX2(ctx
->use
[name
], instr
->ip
);
650 if (!BITSET_TEST(bd
->def
, name
))
651 BITSET_SET(bd
->use
, name
);
655 ra_block_compute_live_ranges(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
657 struct ir3_ra_block_data
*bd
;
658 unsigned bitset_words
= BITSET_WORDS(ctx
->alloc_count
);
660 #define def(name, instr) __def(ctx, bd, name, instr)
661 #define use(name, instr) __use(ctx, bd, name, instr)
663 bd
= rzalloc(ctx
->g
, struct ir3_ra_block_data
);
665 bd
->def
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
666 bd
->use
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
667 bd
->livein
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
668 bd
->liveout
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
672 struct ir3_instruction
*first_non_input
= NULL
;
673 foreach_instr (instr
, &block
->instr_list
) {
674 if (instr
->opc
!= OPC_META_INPUT
) {
675 first_non_input
= instr
;
680 foreach_instr (instr
, &block
->instr_list
) {
681 foreach_def (name
, ctx
, instr
) {
682 if (name_is_array(ctx
, name
)) {
683 struct ir3_array
*arr
= name_to_array(ctx
, name
);
685 arr
->start_ip
= MIN2(arr
->start_ip
, instr
->ip
);
686 arr
->end_ip
= MAX2(arr
->end_ip
, instr
->ip
);
688 for (unsigned i
= 0; i
< arr
->length
; i
++) {
689 unsigned name
= arr
->base
+ i
;
691 ra_set_node_class(ctx
->g
, name
, ctx
->set
->half_classes
[0]);
693 ra_set_node_class(ctx
->g
, name
, ctx
->set
->classes
[0]);
696 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
697 if (is_high(instr
)) {
698 ra_set_node_class(ctx
->g
, name
,
699 ctx
->set
->high_classes
[id
->cls
- HIGH_OFFSET
]);
700 } else if (is_half(instr
)) {
701 ra_set_node_class(ctx
->g
, name
,
702 ctx
->set
->half_classes
[id
->cls
- HALF_OFFSET
]);
704 ra_set_node_class(ctx
->g
, name
,
705 ctx
->set
->classes
[id
->cls
]);
711 if ((instr
->opc
== OPC_META_INPUT
) && first_non_input
)
712 use(name
, first_non_input
);
715 foreach_use (name
, ctx
, instr
) {
716 if (name_is_array(ctx
, name
)) {
717 struct ir3_array
*arr
= name_to_array(ctx
, name
);
719 arr
->start_ip
= MIN2(arr
->start_ip
, instr
->ip
);
720 arr
->end_ip
= MAX2(arr
->end_ip
, instr
->ip
);
722 /* NOTE: arrays are not SSA so unconditionally
725 BITSET_SET(bd
->use
, name
);
731 foreach_name (name
, ctx
, instr
) {
732 /* split/collect instructions have duplicate names
733 * as real instructions, so they skip the hashtable:
735 if (ctx
->name_to_instr
&& !((instr
->opc
== OPC_META_SPLIT
) ||
736 (instr
->opc
== OPC_META_COLLECT
))) {
737 /* this is slightly annoying, we can't just use an
738 * integer on the stack
740 unsigned *key
= ralloc(ctx
->name_to_instr
, unsigned);
742 debug_assert(!_mesa_hash_table_search(ctx
->name_to_instr
, key
));
743 _mesa_hash_table_insert(ctx
->name_to_instr
, key
, instr
);
750 ra_compute_livein_liveout(struct ir3_ra_ctx
*ctx
)
752 unsigned bitset_words
= BITSET_WORDS(ctx
->alloc_count
);
753 bool progress
= false;
755 foreach_block (block
, &ctx
->ir
->block_list
) {
756 struct ir3_ra_block_data
*bd
= block
->data
;
759 for (unsigned i
= 0; i
< bitset_words
; i
++) {
760 /* anything used but not def'd within a block is
761 * by definition a live value coming into the block:
763 BITSET_WORD new_livein
=
764 (bd
->use
[i
] | (bd
->liveout
[i
] & ~bd
->def
[i
]));
766 if (new_livein
& ~bd
->livein
[i
]) {
767 bd
->livein
[i
] |= new_livein
;
772 /* update liveout: */
773 for (unsigned j
= 0; j
< ARRAY_SIZE(block
->successors
); j
++) {
774 struct ir3_block
*succ
= block
->successors
[j
];
775 struct ir3_ra_block_data
*succ_bd
;
780 succ_bd
= succ
->data
;
782 for (unsigned i
= 0; i
< bitset_words
; i
++) {
783 /* add anything that is livein in a successor block
786 BITSET_WORD new_liveout
=
787 (succ_bd
->livein
[i
] & ~bd
->liveout
[i
]);
790 bd
->liveout
[i
] |= new_liveout
;
801 print_bitset(const char *name
, BITSET_WORD
*bs
, unsigned cnt
)
804 debug_printf("RA: %s:", name
);
805 for (unsigned i
= 0; i
< cnt
; i
++) {
806 if (BITSET_TEST(bs
, i
)) {
809 debug_printf(" %04u", i
);
816 /* size of one component of instruction result, ie. half vs full: */
818 live_size(struct ir3_instruction
*instr
)
820 if (is_half(instr
)) {
822 } else if (is_high(instr
)) {
823 /* doesn't count towards footprint */
831 name_size(struct ir3_ra_ctx
*ctx
, unsigned name
)
833 if (name_is_array(ctx
, name
)) {
834 struct ir3_array
*arr
= name_to_array(ctx
, name
);
835 return arr
->half
? 1 : 2;
837 struct ir3_instruction
*instr
= name_to_instr(ctx
, name
);
838 /* in scalar pass, each name represents on scalar value,
839 * half or full precision
841 return live_size(instr
);
846 ra_calc_block_live_values(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
848 struct ir3_ra_block_data
*bd
= block
->data
;
851 assert(ctx
->name_to_instr
);
853 /* TODO this gets a bit more complicated in non-scalar pass.. but
854 * possibly a lowball estimate is fine to start with if we do
855 * round-robin in non-scalar pass? Maybe we just want to handle
856 * that in a different fxn?
858 assert(ctx
->scalar_pass
);
861 rzalloc_array(bd
, BITSET_WORD
, BITSET_WORDS(ctx
->alloc_count
));
863 /* Add the live input values: */
865 BITSET_FOREACH_SET (name
, bd
->livein
, ctx
->alloc_count
) {
866 livein
+= name_size(ctx
, name
);
867 BITSET_SET(live
, name
);
870 d("---------------------");
871 d("block%u: LIVEIN: %u", block_id(block
), livein
);
873 unsigned max
= livein
;
876 /* Now that we know the live inputs to the block, iterate the
877 * instructions adjusting the current # of live values as we
878 * see their last use:
880 foreach_instr (instr
, &block
->instr_list
) {
882 print_bitset("LIVE", live
, ctx
->alloc_count
);
885 unsigned new_live
= 0; /* newly live values */
886 unsigned new_dead
= 0; /* newly no-longer live values */
887 unsigned next_dead
= 0; /* newly dead following this instr */
889 foreach_def (name
, ctx
, instr
) {
890 /* NOTE: checking ctx->def filters out things like split/
891 * collect which are just redefining existing live names
892 * or array writes to already live array elements:
894 if (ctx
->def
[name
] != instr
->ip
)
896 new_live
+= live_size(instr
);
897 d("NEW_LIVE: %u (new_live=%u, use=%u)", name
, new_live
, ctx
->use
[name
]);
898 BITSET_SET(live
, name
);
899 /* There can be cases where this is *also* the last use
900 * of a value, for example instructions that write multiple
901 * values, only some of which are used. These values are
902 * dead *after* (rather than during) this instruction.
904 if (ctx
->use
[name
] != instr
->ip
)
906 next_dead
+= live_size(instr
);
907 d("NEXT_DEAD: %u (next_dead=%u)", name
, next_dead
);
908 BITSET_CLEAR(live
, name
);
911 /* To be more resilient against special cases where liverange
912 * is extended (like first_non_input), rather than using the
913 * foreach_use() iterator, we iterate the current live values
916 BITSET_FOREACH_SET (name
, live
, ctx
->alloc_count
) {
917 /* Is this the last use? */
918 if (ctx
->use
[name
] != instr
->ip
)
920 new_dead
+= name_size(ctx
, name
);
921 d("NEW_DEAD: %u (new_dead=%u)", name
, new_dead
);
922 BITSET_CLEAR(live
, name
);
925 cur_live
+= new_live
;
926 cur_live
-= new_dead
;
928 assert(cur_live
>= 0);
929 d("CUR_LIVE: %u", cur_live
);
931 max
= MAX2(max
, cur_live
);
933 /* account for written values which are not used later,
934 * but after updating max (since they are for one cycle
937 cur_live
-= next_dead
;
938 assert(cur_live
>= 0);
942 BITSET_FOREACH_SET (name
, live
, ctx
->alloc_count
) {
943 cnt
+= name_size(ctx
, name
);
945 assert(cur_live
== cnt
);
949 d("block%u max=%u", block_id(block
), max
);
951 /* the remaining live should match liveout (for extra sanity testing): */
953 unsigned new_dead
= 0;
954 BITSET_FOREACH_SET (name
, live
, ctx
->alloc_count
) {
955 /* Is this the last use? */
956 if (ctx
->use
[name
] != block
->end_ip
)
958 new_dead
+= name_size(ctx
, name
);
959 d("NEW_DEAD: %u (new_dead=%u)", name
, new_dead
);
960 BITSET_CLEAR(live
, name
);
962 unsigned liveout
= 0;
963 BITSET_FOREACH_SET (name
, bd
->liveout
, ctx
->alloc_count
) {
964 liveout
+= name_size(ctx
, name
);
965 BITSET_CLEAR(live
, name
);
968 if (cur_live
!= liveout
) {
969 print_bitset("LEAKED", live
, ctx
->alloc_count
);
970 /* TODO there are a few edge cases where live-range extension
971 * tells us a value is livein. But not used by the block or
972 * liveout for the block. Possibly a bug in the liverange
973 * extension. But for now leave the assert disabled:
974 assert(cur_live == liveout);
985 ra_calc_max_live_values(struct ir3_ra_ctx
*ctx
)
989 foreach_block (block
, &ctx
->ir
->block_list
) {
990 unsigned block_live
= ra_calc_block_live_values(ctx
, block
);
991 max
= MAX2(max
, block_live
);
998 ra_add_interference(struct ir3_ra_ctx
*ctx
)
1000 struct ir3
*ir
= ctx
->ir
;
1002 /* initialize array live ranges: */
1003 foreach_array (arr
, &ir
->array_list
) {
1008 /* compute live ranges (use/def) on a block level, also updating
1009 * block's def/use bitmasks (used below to calculate per-block
1012 foreach_block (block
, &ir
->block_list
) {
1013 ra_block_compute_live_ranges(ctx
, block
);
1016 /* update per-block livein/liveout: */
1017 while (ra_compute_livein_liveout(ctx
)) {}
1020 d("AFTER LIVEIN/OUT:");
1021 foreach_block (block
, &ir
->block_list
) {
1022 struct ir3_ra_block_data
*bd
= block
->data
;
1023 d("block%u:", block_id(block
));
1024 print_bitset(" def", bd
->def
, ctx
->alloc_count
);
1025 print_bitset(" use", bd
->use
, ctx
->alloc_count
);
1026 print_bitset(" l/i", bd
->livein
, ctx
->alloc_count
);
1027 print_bitset(" l/o", bd
->liveout
, ctx
->alloc_count
);
1029 foreach_array (arr
, &ir
->array_list
) {
1030 d("array%u:", arr
->id
);
1031 d(" length: %u", arr
->length
);
1032 d(" start_ip: %u", arr
->start_ip
);
1033 d(" end_ip: %u", arr
->end_ip
);
1035 d("INSTRUCTION VREG NAMES:");
1036 foreach_block (block
, &ctx
->ir
->block_list
) {
1037 foreach_instr (instr
, &block
->instr_list
) {
1038 if (!ctx
->instrd
[instr
->ip
].defn
)
1040 if (!writes_gpr(instr
))
1042 di(instr
, "%04u", scalar_name(ctx
, instr
, 0));
1045 d("ARRAY VREG NAMES:");
1046 foreach_array (arr
, &ctx
->ir
->array_list
) {
1047 d("%04u: arr%u", arr
->base
, arr
->id
);
1051 /* extend start/end ranges based on livein/liveout info from cfg: */
1052 foreach_block (block
, &ir
->block_list
) {
1053 struct ir3_ra_block_data
*bd
= block
->data
;
1055 for (unsigned i
= 0; i
< ctx
->alloc_count
; i
++) {
1056 if (BITSET_TEST(bd
->livein
, i
)) {
1057 ctx
->def
[i
] = MIN2(ctx
->def
[i
], block
->start_ip
);
1058 ctx
->use
[i
] = MAX2(ctx
->use
[i
], block
->start_ip
);
1061 if (BITSET_TEST(bd
->liveout
, i
)) {
1062 ctx
->def
[i
] = MIN2(ctx
->def
[i
], block
->end_ip
);
1063 ctx
->use
[i
] = MAX2(ctx
->use
[i
], block
->end_ip
);
1067 foreach_array (arr
, &ctx
->ir
->array_list
) {
1068 for (unsigned i
= 0; i
< arr
->length
; i
++) {
1069 if (BITSET_TEST(bd
->livein
, i
+ arr
->base
)) {
1070 arr
->start_ip
= MIN2(arr
->start_ip
, block
->start_ip
);
1072 if (BITSET_TEST(bd
->liveout
, i
+ arr
->base
)) {
1073 arr
->end_ip
= MAX2(arr
->end_ip
, block
->end_ip
);
1079 if (ctx
->name_to_instr
) {
1080 unsigned max
= ra_calc_max_live_values(ctx
);
1081 ra_set_register_target(ctx
, max
);
1084 for (unsigned i
= 0; i
< ctx
->alloc_count
; i
++) {
1085 for (unsigned j
= 0; j
< ctx
->alloc_count
; j
++) {
1086 if (intersects(ctx
->def
[i
], ctx
->use
[i
],
1087 ctx
->def
[j
], ctx
->use
[j
])) {
1088 ra_add_node_interference(ctx
->g
, i
, j
);
1094 /* some instructions need fix-up if dst register is half precision: */
1095 static void fixup_half_instr_dst(struct ir3_instruction
*instr
)
1097 switch (opc_cat(instr
->opc
)) {
1098 case 1: /* move instructions */
1099 instr
->cat1
.dst_type
= half_type(instr
->cat1
.dst_type
);
1102 switch (instr
->opc
) {
1104 instr
->opc
= OPC_HRSQ
;
1107 instr
->opc
= OPC_HLOG2
;
1110 instr
->opc
= OPC_HEXP2
;
1117 instr
->cat5
.type
= half_type(instr
->cat5
.type
);
1121 /* some instructions need fix-up if src register is half precision: */
1122 static void fixup_half_instr_src(struct ir3_instruction
*instr
)
1124 switch (instr
->opc
) {
1126 instr
->cat1
.src_type
= half_type(instr
->cat1
.src_type
);
1129 instr
->opc
= OPC_MAD_F16
;
1132 instr
->opc
= OPC_SEL_B16
;
1135 instr
->opc
= OPC_SEL_S16
;
1138 instr
->opc
= OPC_SEL_F16
;
1141 instr
->opc
= OPC_SAD_S16
;
1148 /* NOTE: instr could be NULL for IR3_REG_ARRAY case, for the first
1149 * array access(es) which do not have any previous access to depend
1150 * on from scheduling point of view
1153 reg_assign(struct ir3_ra_ctx
*ctx
, struct ir3_register
*reg
,
1154 struct ir3_instruction
*instr
)
1156 struct ir3_ra_instr_data
*id
;
1158 if (reg
->flags
& IR3_REG_ARRAY
) {
1159 struct ir3_array
*arr
=
1160 ir3_lookup_array(ctx
->ir
, reg
->array
.id
);
1161 unsigned name
= arr
->base
+ reg
->array
.offset
;
1162 unsigned r
= ra_get_node_reg(ctx
->g
, name
);
1163 unsigned num
= ctx
->set
->ra_reg_to_gpr
[r
];
1165 if (reg
->flags
& IR3_REG_RELATIV
) {
1166 reg
->array
.offset
= num
;
1169 reg
->flags
&= ~IR3_REG_SSA
;
1172 reg
->flags
&= ~IR3_REG_ARRAY
;
1173 } else if ((id
= &ctx
->instrd
[instr
->ip
]) && id
->defn
) {
1174 unsigned first_component
= 0;
1176 /* Special case for tex instructions, which may use the wrmask
1177 * to mask off the first component(s). In the scalar pass,
1178 * this means the masked off component(s) are not def'd/use'd,
1179 * so we get a bogus value when we ask the register_allocate
1180 * algo to get the assigned reg for the unused/untouched
1181 * component. So we need to consider the first used component:
1183 if (ctx
->scalar_pass
&& is_tex_or_prefetch(id
->defn
)) {
1184 unsigned n
= ffs(id
->defn
->regs
[0]->wrmask
);
1185 debug_assert(n
> 0);
1186 first_component
= n
- 1;
1189 unsigned name
= scalar_name(ctx
, id
->defn
, first_component
);
1190 unsigned r
= ra_get_node_reg(ctx
->g
, name
);
1191 unsigned num
= ctx
->set
->ra_reg_to_gpr
[r
] + id
->off
;
1193 debug_assert(!(reg
->flags
& IR3_REG_RELATIV
));
1195 debug_assert(num
>= first_component
);
1197 if (is_high(id
->defn
))
1198 num
+= FIRST_HIGH_REG
;
1200 reg
->num
= num
- first_component
;
1202 reg
->flags
&= ~IR3_REG_SSA
;
1204 if (is_half(id
->defn
))
1205 reg
->flags
|= IR3_REG_HALF
;
1209 /* helper to determine which regs to assign in which pass: */
1211 should_assign(struct ir3_ra_ctx
*ctx
, struct ir3_instruction
*instr
)
1213 if ((instr
->opc
== OPC_META_SPLIT
) &&
1214 (util_bitcount(instr
->regs
[1]->wrmask
) > 1))
1215 return !ctx
->scalar_pass
;
1216 if ((instr
->opc
== OPC_META_COLLECT
) &&
1217 (util_bitcount(instr
->regs
[0]->wrmask
) > 1))
1218 return !ctx
->scalar_pass
;
1219 return ctx
->scalar_pass
;
1223 ra_block_alloc(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
1225 foreach_instr (instr
, &block
->instr_list
) {
1226 struct ir3_register
*reg
;
1228 if (writes_gpr(instr
)) {
1229 if (should_assign(ctx
, instr
)) {
1230 reg_assign(ctx
, instr
->regs
[0], instr
);
1231 if (instr
->regs
[0]->flags
& IR3_REG_HALF
)
1232 fixup_half_instr_dst(instr
);
1236 foreach_src_n (reg
, n
, instr
) {
1237 struct ir3_instruction
*src
= reg
->instr
;
1239 if (src
&& !should_assign(ctx
, src
) && !should_assign(ctx
, instr
))
1242 if (src
&& should_assign(ctx
, instr
))
1243 reg_assign(ctx
, src
->regs
[0], src
);
1245 /* Note: reg->instr could be null for IR3_REG_ARRAY */
1246 if (src
|| (reg
->flags
& IR3_REG_ARRAY
))
1247 reg_assign(ctx
, instr
->regs
[n
+1], src
);
1249 if (instr
->regs
[n
+1]->flags
& IR3_REG_HALF
)
1250 fixup_half_instr_src(instr
);
1254 /* We need to pre-color outputs for the scalar pass in
1255 * ra_precolor_assigned(), so we need to actually assign
1256 * them in the first pass:
1258 if (!ctx
->scalar_pass
) {
1259 struct ir3_instruction
*in
, *out
;
1261 foreach_input (in
, ctx
->ir
) {
1262 reg_assign(ctx
, in
->regs
[0], in
);
1264 foreach_output (out
, ctx
->ir
) {
1265 reg_assign(ctx
, out
->regs
[0], out
);
1270 /* handle pre-colored registers. This includes "arrays" (which could be of
1271 * length 1, used for phi webs lowered to registers in nir), as well as
1272 * special shader input values that need to be pinned to certain registers.
1275 ra_precolor(struct ir3_ra_ctx
*ctx
, struct ir3_instruction
**precolor
, unsigned nprecolor
)
1277 unsigned num_precolor
= 0;
1278 for (unsigned i
= 0; i
< nprecolor
; i
++) {
1279 if (precolor
[i
] && !(precolor
[i
]->flags
& IR3_INSTR_UNUSED
)) {
1280 struct ir3_instruction
*instr
= precolor
[i
];
1282 if (instr
->regs
[0]->num
== INVALID_REG
)
1285 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
1287 debug_assert(!(instr
->regs
[0]->flags
& (IR3_REG_HALF
| IR3_REG_HIGH
)));
1289 /* only consider the first component: */
1293 if (ctx
->scalar_pass
&& !should_assign(ctx
, instr
))
1296 /* 'base' is in scalar (class 0) but we need to map that
1297 * the conflicting register of the appropriate class (ie.
1298 * input could be vec2/vec3/etc)
1300 * Note that the higher class (larger than scalar) regs
1301 * are setup to conflict with others in the same class,
1302 * so for example, R1 (scalar) is also the first component
1303 * of D1 (vec2/double):
1305 * Single (base) | Double
1306 * --------------+---------------
1313 unsigned regid
= instr
->regs
[0]->num
;
1314 unsigned reg
= ctx
->set
->gpr_to_ra_reg
[id
->cls
][regid
];
1315 unsigned name
= ra_name(ctx
, id
);
1316 ra_set_node_reg(ctx
->g
, name
, reg
);
1317 num_precolor
= MAX2(regid
, num_precolor
);
1321 /* pre-assign array elements:
1323 * TODO this is going to need some work for half-precision.. possibly
1324 * this is easier on a6xx, where we can just divide array size by two?
1325 * But on a5xx and earlier it will need to track two bases.
1327 foreach_array (arr
, &ctx
->ir
->array_list
) {
1330 if (arr
->end_ip
== 0)
1333 /* figure out what else we conflict with which has already
1337 foreach_array (arr2
, &ctx
->ir
->array_list
) {
1340 if (arr2
->end_ip
== 0)
1342 /* if it intersects with liverange AND register range.. */
1343 if (intersects(arr
->start_ip
, arr
->end_ip
,
1344 arr2
->start_ip
, arr2
->end_ip
) &&
1345 intersects(base
, base
+ reg_size_for_array(arr
),
1346 arr2
->reg
, arr2
->reg
+ reg_size_for_array(arr2
))) {
1347 base
= MAX2(base
, arr2
->reg
+ reg_size_for_array(arr2
));
1352 /* also need to not conflict with any pre-assigned inputs: */
1353 for (unsigned i
= 0; i
< nprecolor
; i
++) {
1354 struct ir3_instruction
*instr
= precolor
[i
];
1356 if (!instr
|| (instr
->flags
& IR3_INSTR_UNUSED
))
1359 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
1361 /* only consider the first component: */
1365 unsigned name
= ra_name(ctx
, id
);
1366 unsigned regid
= instr
->regs
[0]->num
;
1368 /* Check if array intersects with liverange AND register
1369 * range of the input:
1371 if (intersects(arr
->start_ip
, arr
->end_ip
,
1372 ctx
->def
[name
], ctx
->use
[name
]) &&
1373 intersects(base
, base
+ reg_size_for_array(arr
),
1374 regid
, regid
+ class_sizes
[id
->cls
])) {
1375 base
= MAX2(base
, regid
+ class_sizes
[id
->cls
]);
1382 for (unsigned i
= 0; i
< arr
->length
; i
++) {
1386 /* Doesn't need to do this on older generations than a6xx,
1387 * since there's no conflict between full regs and half regs
1390 * TODO Presumably "base" could start from 0 respectively
1391 * for half regs of arrays on older generations.
1393 unsigned base_half
= base
* 2 + i
;
1394 reg
= ctx
->set
->gpr_to_ra_reg
[0+HALF_OFFSET
][base_half
];
1395 base
= base_half
/ 2 + 1;
1397 reg
= ctx
->set
->gpr_to_ra_reg
[0][base
++];
1400 name
= arr
->base
+ i
;
1401 ra_set_node_reg(ctx
->g
, name
, reg
);
1405 if (ir3_shader_debug
& IR3_DBG_OPTMSGS
) {
1406 foreach_array (arr
, &ctx
->ir
->array_list
) {
1407 unsigned first
= arr
->reg
;
1408 unsigned last
= arr
->reg
+ arr
->length
- 1;
1409 debug_printf("arr[%d] at r%d.%c->r%d.%c\n", arr
->id
,
1410 (first
>> 2), "xyzw"[first
& 0x3],
1411 (last
>> 2), "xyzw"[last
& 0x3]);
1417 precolor(struct ir3_ra_ctx
*ctx
, struct ir3_instruction
*instr
)
1419 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
1420 unsigned n
= dest_regs(instr
);
1421 for (unsigned i
= 0; i
< n
; i
++) {
1422 /* tex instructions actually have a wrmask, and
1423 * don't touch masked out components. So we
1424 * shouldn't precolor them::
1426 if (is_tex_or_prefetch(instr
) &&
1427 !(instr
->regs
[0]->wrmask
& (1 << i
)))
1430 unsigned name
= scalar_name(ctx
, instr
, i
);
1431 unsigned regid
= instr
->regs
[0]->num
+ i
;
1433 if (instr
->regs
[0]->flags
& IR3_REG_HIGH
)
1434 regid
-= FIRST_HIGH_REG
;
1436 unsigned vreg
= ctx
->set
->gpr_to_ra_reg
[id
->cls
][regid
];
1437 ra_set_node_reg(ctx
->g
, name
, vreg
);
1441 /* pre-color non-scalar registers based on the registers assigned in previous
1442 * pass. Do this by looking actually at the fanout instructions.
1445 ra_precolor_assigned(struct ir3_ra_ctx
*ctx
)
1447 debug_assert(ctx
->scalar_pass
);
1449 foreach_block (block
, &ctx
->ir
->block_list
) {
1450 foreach_instr (instr
, &block
->instr_list
) {
1452 if (!writes_gpr(instr
))
1455 if (should_assign(ctx
, instr
))
1458 precolor(ctx
, instr
);
1460 struct ir3_register
*src
;
1461 foreach_src (src
, instr
) {
1464 precolor(ctx
, src
->instr
);
1471 ra_alloc(struct ir3_ra_ctx
*ctx
)
1473 if (!ra_allocate(ctx
->g
))
1476 foreach_block (block
, &ctx
->ir
->block_list
) {
1477 ra_block_alloc(ctx
, block
);
1483 /* if we end up with split/collect instructions with non-matching src
1484 * and dest regs, that means something has gone wrong. Which makes it
1485 * a pretty good sanity check.
1488 ra_sanity_check(struct ir3
*ir
)
1490 foreach_block (block
, &ir
->block_list
) {
1491 foreach_instr (instr
, &block
->instr_list
) {
1492 if (instr
->opc
== OPC_META_SPLIT
) {
1493 struct ir3_register
*dst
= instr
->regs
[0];
1494 struct ir3_register
*src
= instr
->regs
[1];
1495 debug_assert(dst
->num
== (src
->num
+ instr
->split
.off
));
1496 } else if (instr
->opc
== OPC_META_COLLECT
) {
1497 struct ir3_register
*dst
= instr
->regs
[0];
1498 struct ir3_register
*src
;
1500 foreach_src_n (src
, n
, instr
) {
1501 debug_assert(dst
->num
== (src
->num
- n
));
1509 ir3_ra_pass(struct ir3_shader_variant
*v
, struct ir3_instruction
**precolor
,
1510 unsigned nprecolor
, bool scalar_pass
)
1512 struct ir3_ra_ctx ctx
= {
1515 .set
= v
->ir
->compiler
->set
,
1516 .scalar_pass
= scalar_pass
,
1521 ra_add_interference(&ctx
);
1522 ra_precolor(&ctx
, precolor
, nprecolor
);
1524 ra_precolor_assigned(&ctx
);
1525 ret
= ra_alloc(&ctx
);
1532 ir3_ra(struct ir3_shader_variant
*v
, struct ir3_instruction
**precolor
,
1537 /* First pass, assign the vecN (non-scalar) registers: */
1538 ret
= ir3_ra_pass(v
, precolor
, nprecolor
, false);
1542 if (ir3_shader_debug
& IR3_DBG_OPTMSGS
) {
1543 printf("AFTER RA (1st pass):\n");
1547 /* Second pass, assign the scalar registers: */
1548 ret
= ir3_ra_pass(v
, precolor
, nprecolor
, true);
1552 if (ir3_shader_debug
& IR3_DBG_OPTMSGS
) {
1553 printf("AFTER RA (2nd pass):\n");
1558 # define SANITY_CHECK DEBUG
1560 # define SANITY_CHECK 0
1563 ra_sanity_check(v
->ir
);