c92436f8b21213ad9e7288cb92cf1a536f9eb3fa
2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #include "util/u_math.h"
28 #include "util/register_allocate.h"
29 #include "util/ralloc.h"
30 #include "util/bitset.h"
33 #include "ir3_compiler.h"
38 #define RA_DEBUG (ir3_shader_debug & IR3_DBG_RAMSGS)
42 #define d(fmt, ...) do { if (RA_DEBUG) { \
43 printf("RA: "fmt"\n", ##__VA_ARGS__); \
46 #define di(instr, fmt, ...) do { if (RA_DEBUG) { \
47 printf("RA: "fmt": ", ##__VA_ARGS__); \
48 ir3_print_instr(instr); \
52 * Register Assignment:
54 * Uses the register_allocate util, which implements graph coloring
55 * algo with interference classes. To handle the cases where we need
56 * consecutive registers (for example, texture sample instructions),
57 * we model these as larger (double/quad/etc) registers which conflict
58 * with the corresponding registers in other classes.
60 * Additionally we create additional classes for half-regs, which
61 * do not conflict with the full-reg classes. We do need at least
62 * sizes 1-4 (to deal w/ texture sample instructions output to half-
63 * reg). At the moment we don't create the higher order half-reg
64 * classes as half-reg frequently does not have enough precision
65 * for texture coords at higher resolutions.
67 * There are some additional cases that we need to handle specially,
68 * as the graph coloring algo doesn't understand "partial writes".
69 * For example, a sequence like:
72 * sam (f32)(xy)r0.x, ...
74 * sam (f32)(xyzw)r0.w, r0.x, ... ; 3d texture, so r0.xyz are coord
76 * In this scenario, we treat r0.xyz as class size 3, which is written
77 * (from a use/def perspective) at the 'add' instruction and ignore the
78 * subsequent partial writes to r0.xy. So the 'add r0.z, ...' is the
79 * defining instruction, as it is the first to partially write r0.xyz.
81 * To address the fragmentation that this can potentially cause, a
82 * two pass register allocation is used. After the first pass the
83 * assignment of scalars is discarded, but the assignment of vecN (for
84 * N > 1) is used to pre-color in the second pass, which considers
87 * Arrays of arbitrary size are handled via pre-coloring a consecutive
88 * sequence of registers. Additional scalar (single component) reg
89 * names are allocated starting at ctx->class_base[total_class_count]
90 * (see arr->base), which are pre-colored. In the use/def graph direct
91 * access is treated as a single element use/def, and indirect access
92 * is treated as use or def of all array elements. (Only the first
93 * def is tracked, in case of multiple indirect writes, etc.)
95 * TODO arrays that fit in one of the pre-defined class sizes should
96 * not need to be pre-colored, but instead could be given a normal
97 * vreg name. (Ignoring this for now since it is a good way to work
98 * out the kinks with arbitrary sized arrays.)
100 * TODO might be easier for debugging to split this into two passes,
101 * the first assigning vreg names in a way that we could ir3_print()
106 static struct ir3_instruction
* name_to_instr(struct ir3_ra_ctx
*ctx
, unsigned name
);
108 static bool name_is_array(struct ir3_ra_ctx
*ctx
, unsigned name
);
109 static struct ir3_array
* name_to_array(struct ir3_ra_ctx
*ctx
, unsigned name
);
111 /* does it conflict? */
113 intersects(unsigned a_start
, unsigned a_end
, unsigned b_start
, unsigned b_end
)
115 return !((a_start
>= b_end
) || (b_start
>= a_end
));
119 reg_size_for_array(struct ir3_array
*arr
)
122 return DIV_ROUND_UP(arr
->length
, 2);
128 instr_before(struct ir3_instruction
*a
, struct ir3_instruction
*b
)
130 if (a
->flags
& IR3_INSTR_UNUSED
)
132 return (a
->ip
< b
->ip
);
135 static struct ir3_instruction
*
136 get_definer(struct ir3_ra_ctx
*ctx
, struct ir3_instruction
*instr
,
139 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
140 struct ir3_instruction
*d
= NULL
;
142 if (ctx
->scalar_pass
) {
145 id
->sz
= 1; /* considering things as N scalar regs now */
154 if (instr
->opc
== OPC_META_COLLECT
) {
155 /* What about the case where collect is subset of array, we
156 * need to find the distance between where actual array starts
157 * and collect.. that probably doesn't happen currently.
161 /* note: don't use foreach_ssa_src as this gets called once
162 * while assigning regs (which clears SSA flag)
164 foreach_src_n (src
, n
, instr
) {
165 struct ir3_instruction
*dd
;
169 dd
= get_definer(ctx
, src
->instr
, &dsz
, &doff
);
171 if ((!d
) || instr_before(dd
, d
)) {
178 } else if (instr
->cp
.right
|| instr
->cp
.left
) {
179 /* covers also the meta:fo case, which ends up w/ single
180 * scalar instructions for each component:
182 struct ir3_instruction
*f
= ir3_neighbor_first(instr
);
184 /* by definition, the entire sequence forms one linked list
185 * of single scalar register nodes (even if some of them may
186 * be splits from a texture sample (for example) instr. We
187 * just need to walk the list finding the first element of
188 * the group defined (lowest ip)
192 /* need to skip over unused in the group: */
193 while (f
&& (f
->flags
& IR3_INSTR_UNUSED
)) {
199 if ((!d
) || instr_before(f
, d
))
210 /* second case is looking directly at the instruction which
211 * produces multiple values (eg, texture sample), rather
212 * than the split nodes that point back to that instruction.
213 * This isn't quite right, because it may be part of a larger
216 * sam (f32)(xyzw)r0.x, ...
219 * sam (f32)(xyzw)r2.x, r0.w <-- (r0.w, r1.x, r1.y)
221 * need to come up with a better way to handle that case.
223 if (instr
->address
) {
224 *sz
= instr
->regs
[0]->size
;
226 *sz
= util_last_bit(instr
->regs
[0]->wrmask
);
232 if (d
->opc
== OPC_META_SPLIT
) {
233 struct ir3_instruction
*dd
;
236 dd
= get_definer(ctx
, d
->regs
[1]->instr
, &dsz
, &doff
);
238 /* by definition, should come before: */
239 debug_assert(instr_before(dd
, d
));
241 *sz
= MAX2(*sz
, dsz
);
243 if (instr
->opc
== OPC_META_SPLIT
)
244 *off
= MAX2(*off
, instr
->split
.off
);
249 debug_assert(d
->opc
!= OPC_META_SPLIT
);
259 ra_block_find_definers(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
261 foreach_instr (instr
, &block
->instr_list
) {
262 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
263 if (instr
->regs_count
== 0)
265 /* couple special cases: */
266 if (writes_addr0(instr
) || writes_addr1(instr
) || writes_pred(instr
)) {
268 } else if (instr
->regs
[0]->flags
& IR3_REG_ARRAY
) {
269 id
->cls
= total_class_count
;
271 /* and the normal case: */
272 id
->defn
= get_definer(ctx
, instr
, &id
->sz
, &id
->off
);
273 id
->cls
= ra_size_to_class(id
->sz
, is_half(id
->defn
), is_high(id
->defn
));
275 /* this is a bit of duct-tape.. if we have a scenario like:
277 * sam (f32)(x) out.x, ...
278 * sam (f32)(x) out.y, ...
280 * Then the fanout/split meta instructions for the two different
281 * tex instructions end up grouped as left/right neighbors. The
282 * upshot is that in when you get_definer() on one of the meta:fo's
283 * you get definer as the first sam with sz=2, but when you call
284 * get_definer() on the either of the sam's you get itself as the
287 * (We actually avoid this scenario exactly, the neighbor links
288 * prevent one of the output mov's from being eliminated, so this
289 * hack should be enough. But probably we need to rethink how we
290 * find the "defining" instruction.)
292 * TODO how do we figure out offset properly...
294 if (id
->defn
!= instr
) {
295 struct ir3_ra_instr_data
*did
= &ctx
->instrd
[id
->defn
->ip
];
296 if (did
->sz
< id
->sz
) {
305 /* give each instruction a name (and ip), and count up the # of names
309 ra_block_name_instructions(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
311 foreach_instr (instr
, &block
->instr_list
) {
312 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
320 if (!writes_gpr(instr
))
323 if (id
->defn
!= instr
)
326 /* In scalar pass, collect/split don't get their own names,
327 * but instead inherit them from their src(s):
329 * Possibly we don't need this because of scalar_name(), but
330 * it does make the ir3_print() dumps easier to read.
332 if (ctx
->scalar_pass
) {
333 if (instr
->opc
== OPC_META_SPLIT
) {
334 instr
->name
= instr
->regs
[1]->instr
->name
+ instr
->split
.off
;
338 if (instr
->opc
== OPC_META_COLLECT
) {
339 instr
->name
= instr
->regs
[1]->instr
->name
;
344 /* arrays which don't fit in one of the pre-defined class
345 * sizes are pre-colored:
347 if ((id
->cls
>= 0) && (id
->cls
< total_class_count
)) {
348 /* in the scalar pass, we generate a name for each
349 * scalar component, instr->name is the name of the
352 unsigned n
= ctx
->scalar_pass
? dest_regs(instr
) : 1;
353 instr
->name
= ctx
->class_alloc_count
[id
->cls
];
354 ctx
->class_alloc_count
[id
->cls
] += n
;
355 ctx
->alloc_count
+= n
;
361 * Set a value for max register target.
363 * Currently this just rounds up to a multiple of full-vec4 (ie. the
364 * granularity that we configure the hw for.. there is no point to
365 * using r3.x if you aren't going to make r3.yzw available). But
366 * in reality there seems to be multiple thresholds that affect the
367 * number of waves.. and we should round up the target to the next
368 * threshold when we round-robin registers, to give postsched more
369 * options. When we understand that better, this is where we'd
373 ra_set_register_target(struct ir3_ra_ctx
*ctx
, unsigned max_target
)
375 const unsigned hvec4
= 4;
376 const unsigned vec4
= 2 * hvec4
;
378 ctx
->max_target
= align(max_target
, vec4
);
380 d("New max_target=%u", ctx
->max_target
);
384 pick_in_range(BITSET_WORD
*regs
, unsigned min
, unsigned max
)
386 for (unsigned i
= min
; i
<= max
; i
++) {
387 if (BITSET_TEST(regs
, i
)) {
395 pick_in_range_rev(BITSET_WORD
*regs
, int min
, int max
)
397 for (int i
= max
; i
>= min
; i
--) {
398 if (BITSET_TEST(regs
, i
)) {
405 /* register selector for the a6xx+ merged register file: */
407 ra_select_reg_merged(unsigned int n
, BITSET_WORD
*regs
, void *data
)
409 struct ir3_ra_ctx
*ctx
= data
;
410 unsigned int class = ra_get_node_class(ctx
->g
, n
);
412 int sz
= ra_class_to_size(class, &half
, &high
);
416 /* dimensions within the register class: */
417 unsigned max_target
, start
;
419 /* the regs bitset will include *all* of the virtual regs, but we lay
420 * out the different classes consecutively in the virtual register
421 * space. So we just need to think about the base offset of a given
422 * class within the virtual register space, and offset the register
423 * space we search within by that base offset.
427 /* TODO I think eventually we want to round-robin in vector pass
428 * as well, but needs some more work to calculate # of live vals
429 * for this. (Maybe with some work, we could just figure out
430 * the scalar target and use that, since that is what we care
431 * about in the end.. but that would mean setting up use-def/
432 * liveranges for scalar pass before doing vector pass.)
434 * For now, in the vector class, just move assignments for scalar
435 * vals higher to hopefully prevent them from limiting where vecN
436 * values can be placed. Since the scalar values are re-assigned
437 * in the 2nd pass, we don't really care where they end up in the
440 if (!ctx
->scalar_pass
) {
441 base
= ctx
->set
->gpr_to_ra_reg
[class][0];
443 max_target
= HIGH_CLASS_REGS(class - HIGH_OFFSET
);
445 max_target
= HALF_CLASS_REGS(class - HALF_OFFSET
);
447 max_target
= CLASS_REGS(class);
450 if ((sz
== 1) && !high
) {
451 return pick_in_range_rev(regs
, base
, base
+ max_target
);
453 return pick_in_range(regs
, base
, base
+ max_target
);
459 /* NOTE: this is only used in scalar pass, so the register
460 * class will be one of the scalar classes (ie. idx==0):
462 base
= ctx
->set
->gpr_to_ra_reg
[class][0];
464 max_target
= HIGH_CLASS_REGS(0);
467 max_target
= ctx
->max_target
;
468 start
= ctx
->start_search_reg
;
470 max_target
= ctx
->max_target
/ 2;
471 start
= ctx
->start_search_reg
;
474 /* For cat4 instructions, if the src reg is already assigned, and
475 * avail to pick, use it. Because this doesn't introduce unnecessary
476 * dependencies, and it potentially avoids needing (ss) syncs to
477 * for write after read hazards:
479 struct ir3_instruction
*instr
= name_to_instr(ctx
, n
);
481 struct ir3_register
*src
= instr
->regs
[1];
484 if ((src
->flags
& IR3_REG_ARRAY
) && !(src
->flags
& IR3_REG_RELATIV
)) {
485 struct ir3_array
*arr
= ir3_lookup_array(ctx
->ir
, src
->array
.id
);
486 src_n
= arr
->base
+ src
->array
.offset
;
488 src_n
= scalar_name(ctx
, src
->instr
, 0);
491 unsigned reg
= ra_get_node_reg(ctx
->g
, src_n
);
493 /* Check if the src register has been assigned yet: */
495 if (BITSET_TEST(regs
, reg
)) {
501 int r
= pick_in_range(regs
, base
+ start
, base
+ max_target
);
504 r
= pick_in_range(regs
, base
, base
+ start
);
508 /* overflow, we need to increase max_target: */
509 ra_set_register_target(ctx
, ctx
->max_target
+ 1);
510 return ra_select_reg_merged(n
, regs
, data
);
513 if (class == ctx
->set
->half_classes
[0]) {
515 ctx
->start_search_reg
= (n
+ 1) % ctx
->max_target
;
516 } else if (class == ctx
->set
->classes
[0]) {
517 int n
= (r
- base
) * 2;
518 ctx
->start_search_reg
= (n
+ 1) % ctx
->max_target
;
525 ra_init(struct ir3_ra_ctx
*ctx
)
529 ir3_clear_mark(ctx
->ir
);
530 n
= ir3_count_instructions_ra(ctx
->ir
);
532 ctx
->instrd
= rzalloc_array(NULL
, struct ir3_ra_instr_data
, n
);
534 foreach_block (block
, &ctx
->ir
->block_list
) {
535 ra_block_find_definers(ctx
, block
);
538 foreach_block (block
, &ctx
->ir
->block_list
) {
539 ra_block_name_instructions(ctx
, block
);
542 /* figure out the base register name for each class. The
543 * actual ra name is class_base[cls] + instr->name;
545 ctx
->class_base
[0] = 0;
546 for (unsigned i
= 1; i
<= total_class_count
; i
++) {
547 ctx
->class_base
[i
] = ctx
->class_base
[i
-1] +
548 ctx
->class_alloc_count
[i
-1];
551 /* and vreg names for array elements: */
552 base
= ctx
->class_base
[total_class_count
];
553 foreach_array (arr
, &ctx
->ir
->array_list
) {
555 ctx
->class_alloc_count
[total_class_count
] += reg_size_for_array(arr
);
556 base
+= reg_size_for_array(arr
);
558 ctx
->alloc_count
+= ctx
->class_alloc_count
[total_class_count
];
560 /* Add vreg names for r0.xyz */
561 ctx
->r0_xyz_nodes
= ctx
->alloc_count
;
562 ctx
->alloc_count
+= 3;
563 ctx
->hr0_xyz_nodes
= ctx
->alloc_count
;
564 ctx
->alloc_count
+= 3;
566 /* Add vreg name for prefetch-exclusion range: */
567 ctx
->prefetch_exclude_node
= ctx
->alloc_count
++;
569 ctx
->g
= ra_alloc_interference_graph(ctx
->set
->regs
, ctx
->alloc_count
);
570 ralloc_steal(ctx
->g
, ctx
->instrd
);
571 ctx
->def
= rzalloc_array(ctx
->g
, unsigned, ctx
->alloc_count
);
572 ctx
->use
= rzalloc_array(ctx
->g
, unsigned, ctx
->alloc_count
);
574 /* TODO add selector callback for split (pre-a6xx) register file: */
575 if (ctx
->v
->mergedregs
) {
576 ra_set_select_reg_callback(ctx
->g
, ra_select_reg_merged
, ctx
);
578 if (ctx
->scalar_pass
) {
579 ctx
->name_to_instr
= _mesa_hash_table_create(ctx
->g
,
580 _mesa_hash_int
, _mesa_key_int_equal
);
585 /* Map the name back to instruction: */
586 static struct ir3_instruction
*
587 name_to_instr(struct ir3_ra_ctx
*ctx
, unsigned name
)
589 assert(!name_is_array(ctx
, name
));
590 struct hash_entry
*entry
= _mesa_hash_table_search(ctx
->name_to_instr
, &name
);
593 unreachable("invalid instr name");
598 name_is_array(struct ir3_ra_ctx
*ctx
, unsigned name
)
600 return name
>= ctx
->class_base
[total_class_count
];
603 static struct ir3_array
*
604 name_to_array(struct ir3_ra_ctx
*ctx
, unsigned name
)
606 assert(name_is_array(ctx
, name
));
607 foreach_array (arr
, &ctx
->ir
->array_list
) {
608 unsigned sz
= reg_size_for_array(arr
);
609 if (name
< (arr
->base
+ sz
))
612 unreachable("invalid array name");
617 ra_destroy(struct ir3_ra_ctx
*ctx
)
623 __def(struct ir3_ra_ctx
*ctx
, struct ir3_ra_block_data
*bd
, unsigned name
,
624 struct ir3_instruction
*instr
)
626 debug_assert(name
< ctx
->alloc_count
);
628 /* split/collect do not actually define any real value */
629 if ((instr
->opc
== OPC_META_SPLIT
) || (instr
->opc
== OPC_META_COLLECT
))
632 /* defined on first write: */
634 ctx
->def
[name
] = instr
->ip
;
635 ctx
->use
[name
] = MAX2(ctx
->use
[name
], instr
->ip
);
636 BITSET_SET(bd
->def
, name
);
640 __use(struct ir3_ra_ctx
*ctx
, struct ir3_ra_block_data
*bd
, unsigned name
,
641 struct ir3_instruction
*instr
)
643 debug_assert(name
< ctx
->alloc_count
);
644 ctx
->use
[name
] = MAX2(ctx
->use
[name
], instr
->ip
);
645 if (!BITSET_TEST(bd
->def
, name
))
646 BITSET_SET(bd
->use
, name
);
650 ra_block_compute_live_ranges(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
652 struct ir3_ra_block_data
*bd
;
653 unsigned bitset_words
= BITSET_WORDS(ctx
->alloc_count
);
655 #define def(name, instr) __def(ctx, bd, name, instr)
656 #define use(name, instr) __use(ctx, bd, name, instr)
658 bd
= rzalloc(ctx
->g
, struct ir3_ra_block_data
);
660 bd
->def
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
661 bd
->use
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
662 bd
->livein
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
663 bd
->liveout
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
667 struct ir3_instruction
*first_non_input
= NULL
;
668 foreach_instr (instr
, &block
->instr_list
) {
669 if (instr
->opc
!= OPC_META_INPUT
) {
670 first_non_input
= instr
;
675 foreach_instr (instr
, &block
->instr_list
) {
676 foreach_def (name
, ctx
, instr
) {
677 if (name_is_array(ctx
, name
)) {
678 struct ir3_array
*arr
= name_to_array(ctx
, name
);
680 arr
->start_ip
= MIN2(arr
->start_ip
, instr
->ip
);
681 arr
->end_ip
= MAX2(arr
->end_ip
, instr
->ip
);
683 for (unsigned i
= 0; i
< arr
->length
; i
++) {
684 unsigned name
= arr
->base
+ i
;
686 ra_set_node_class(ctx
->g
, name
, ctx
->set
->half_classes
[0]);
688 ra_set_node_class(ctx
->g
, name
, ctx
->set
->classes
[0]);
691 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
692 if (is_high(instr
)) {
693 ra_set_node_class(ctx
->g
, name
,
694 ctx
->set
->high_classes
[id
->cls
- HIGH_OFFSET
]);
695 } else if (is_half(instr
)) {
696 ra_set_node_class(ctx
->g
, name
,
697 ctx
->set
->half_classes
[id
->cls
- HALF_OFFSET
]);
699 ra_set_node_class(ctx
->g
, name
,
700 ctx
->set
->classes
[id
->cls
]);
706 if ((instr
->opc
== OPC_META_INPUT
) && first_non_input
)
707 use(name
, first_non_input
);
709 /* Texture instructions with writemasks can be treated as smaller
710 * vectors (or just scalars!) to allocate knowing that the
711 * masked-out regs won't be written, but we need to make sure that
712 * the start of the vector doesn't come before the first register
715 if (is_tex_or_prefetch(instr
)) {
716 int writemask_skipped_regs
= ffs(instr
->regs
[0]->wrmask
) - 1;
717 int r0_xyz
= is_half(instr
) ?
718 ctx
->hr0_xyz_nodes
: ctx
->r0_xyz_nodes
;
719 for (int i
= 0; i
< writemask_skipped_regs
; i
++)
720 ra_add_node_interference(ctx
->g
, name
, r0_xyz
+ i
);
723 /* Pre-fetched textures have a lower limit for bits to encode dst
724 * register, so add additional interference with registers above
727 if (instr
->opc
== OPC_META_TEX_PREFETCH
) {
728 ra_add_node_interference(ctx
->g
, name
,
729 ctx
->prefetch_exclude_node
);
733 foreach_use (name
, ctx
, instr
) {
734 if (name_is_array(ctx
, name
)) {
735 struct ir3_array
*arr
= name_to_array(ctx
, name
);
737 arr
->start_ip
= MIN2(arr
->start_ip
, instr
->ip
);
738 arr
->end_ip
= MAX2(arr
->end_ip
, instr
->ip
);
740 /* NOTE: arrays are not SSA so unconditionally
743 BITSET_SET(bd
->use
, name
);
749 foreach_name (name
, ctx
, instr
) {
750 /* split/collect instructions have duplicate names
751 * as real instructions, so they skip the hashtable:
753 if (ctx
->name_to_instr
&& !((instr
->opc
== OPC_META_SPLIT
) ||
754 (instr
->opc
== OPC_META_COLLECT
))) {
755 /* this is slightly annoying, we can't just use an
756 * integer on the stack
758 unsigned *key
= ralloc(ctx
->name_to_instr
, unsigned);
760 debug_assert(!_mesa_hash_table_search(ctx
->name_to_instr
, key
));
761 _mesa_hash_table_insert(ctx
->name_to_instr
, key
, instr
);
768 ra_compute_livein_liveout(struct ir3_ra_ctx
*ctx
)
770 unsigned bitset_words
= BITSET_WORDS(ctx
->alloc_count
);
771 bool progress
= false;
773 foreach_block (block
, &ctx
->ir
->block_list
) {
774 struct ir3_ra_block_data
*bd
= block
->data
;
777 for (unsigned i
= 0; i
< bitset_words
; i
++) {
778 /* anything used but not def'd within a block is
779 * by definition a live value coming into the block:
781 BITSET_WORD new_livein
=
782 (bd
->use
[i
] | (bd
->liveout
[i
] & ~bd
->def
[i
]));
784 if (new_livein
& ~bd
->livein
[i
]) {
785 bd
->livein
[i
] |= new_livein
;
790 /* update liveout: */
791 for (unsigned j
= 0; j
< ARRAY_SIZE(block
->successors
); j
++) {
792 struct ir3_block
*succ
= block
->successors
[j
];
793 struct ir3_ra_block_data
*succ_bd
;
798 succ_bd
= succ
->data
;
800 for (unsigned i
= 0; i
< bitset_words
; i
++) {
801 /* add anything that is livein in a successor block
804 BITSET_WORD new_liveout
=
805 (succ_bd
->livein
[i
] & ~bd
->liveout
[i
]);
808 bd
->liveout
[i
] |= new_liveout
;
819 print_bitset(const char *name
, BITSET_WORD
*bs
, unsigned cnt
)
822 debug_printf("RA: %s:", name
);
823 for (unsigned i
= 0; i
< cnt
; i
++) {
824 if (BITSET_TEST(bs
, i
)) {
827 debug_printf(" %04u", i
);
834 /* size of one component of instruction result, ie. half vs full: */
836 live_size(struct ir3_instruction
*instr
)
838 if (is_half(instr
)) {
840 } else if (is_high(instr
)) {
841 /* doesn't count towards footprint */
849 name_size(struct ir3_ra_ctx
*ctx
, unsigned name
)
851 if (name_is_array(ctx
, name
)) {
852 struct ir3_array
*arr
= name_to_array(ctx
, name
);
853 return arr
->half
? 1 : 2;
855 struct ir3_instruction
*instr
= name_to_instr(ctx
, name
);
856 /* in scalar pass, each name represents on scalar value,
857 * half or full precision
859 return live_size(instr
);
864 ra_calc_block_live_values(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
866 struct ir3_ra_block_data
*bd
= block
->data
;
869 assert(ctx
->name_to_instr
);
871 /* TODO this gets a bit more complicated in non-scalar pass.. but
872 * possibly a lowball estimate is fine to start with if we do
873 * round-robin in non-scalar pass? Maybe we just want to handle
874 * that in a different fxn?
876 assert(ctx
->scalar_pass
);
879 rzalloc_array(bd
, BITSET_WORD
, BITSET_WORDS(ctx
->alloc_count
));
881 /* Add the live input values: */
883 BITSET_FOREACH_SET (name
, bd
->livein
, ctx
->alloc_count
) {
884 livein
+= name_size(ctx
, name
);
885 BITSET_SET(live
, name
);
888 d("---------------------");
889 d("block%u: LIVEIN: %u", block_id(block
), livein
);
891 unsigned max
= livein
;
894 /* Now that we know the live inputs to the block, iterate the
895 * instructions adjusting the current # of live values as we
896 * see their last use:
898 foreach_instr (instr
, &block
->instr_list
) {
900 print_bitset("LIVE", live
, ctx
->alloc_count
);
903 unsigned new_live
= 0; /* newly live values */
904 unsigned new_dead
= 0; /* newly no-longer live values */
905 unsigned next_dead
= 0; /* newly dead following this instr */
907 foreach_def (name
, ctx
, instr
) {
908 /* NOTE: checking ctx->def filters out things like split/
909 * collect which are just redefining existing live names
910 * or array writes to already live array elements:
912 if (ctx
->def
[name
] != instr
->ip
)
914 new_live
+= live_size(instr
);
915 d("NEW_LIVE: %u (new_live=%u, use=%u)", name
, new_live
, ctx
->use
[name
]);
916 BITSET_SET(live
, name
);
917 /* There can be cases where this is *also* the last use
918 * of a value, for example instructions that write multiple
919 * values, only some of which are used. These values are
920 * dead *after* (rather than during) this instruction.
922 if (ctx
->use
[name
] != instr
->ip
)
924 next_dead
+= live_size(instr
);
925 d("NEXT_DEAD: %u (next_dead=%u)", name
, next_dead
);
926 BITSET_CLEAR(live
, name
);
929 /* To be more resilient against special cases where liverange
930 * is extended (like first_non_input), rather than using the
931 * foreach_use() iterator, we iterate the current live values
934 BITSET_FOREACH_SET (name
, live
, ctx
->alloc_count
) {
935 /* Is this the last use? */
936 if (ctx
->use
[name
] != instr
->ip
)
938 new_dead
+= name_size(ctx
, name
);
939 d("NEW_DEAD: %u (new_dead=%u)", name
, new_dead
);
940 BITSET_CLEAR(live
, name
);
943 cur_live
+= new_live
;
944 cur_live
-= new_dead
;
946 assert(cur_live
>= 0);
947 d("CUR_LIVE: %u", cur_live
);
949 max
= MAX2(max
, cur_live
);
951 /* account for written values which are not used later,
952 * but after updating max (since they are for one cycle
955 cur_live
-= next_dead
;
956 assert(cur_live
>= 0);
960 BITSET_FOREACH_SET (name
, live
, ctx
->alloc_count
) {
961 cnt
+= name_size(ctx
, name
);
963 assert(cur_live
== cnt
);
967 d("block%u max=%u", block_id(block
), max
);
969 /* the remaining live should match liveout (for extra sanity testing): */
971 unsigned new_dead
= 0;
972 BITSET_FOREACH_SET (name
, live
, ctx
->alloc_count
) {
973 /* Is this the last use? */
974 if (ctx
->use
[name
] != block
->end_ip
)
976 new_dead
+= name_size(ctx
, name
);
977 d("NEW_DEAD: %u (new_dead=%u)", name
, new_dead
);
978 BITSET_CLEAR(live
, name
);
980 unsigned liveout
= 0;
981 BITSET_FOREACH_SET (name
, bd
->liveout
, ctx
->alloc_count
) {
982 liveout
+= name_size(ctx
, name
);
983 BITSET_CLEAR(live
, name
);
986 if (cur_live
!= liveout
) {
987 print_bitset("LEAKED", live
, ctx
->alloc_count
);
988 /* TODO there are a few edge cases where live-range extension
989 * tells us a value is livein. But not used by the block or
990 * liveout for the block. Possibly a bug in the liverange
991 * extension. But for now leave the assert disabled:
992 assert(cur_live == liveout);
1003 ra_calc_max_live_values(struct ir3_ra_ctx
*ctx
)
1007 foreach_block (block
, &ctx
->ir
->block_list
) {
1008 unsigned block_live
= ra_calc_block_live_values(ctx
, block
);
1009 max
= MAX2(max
, block_live
);
1016 ra_add_interference(struct ir3_ra_ctx
*ctx
)
1018 struct ir3
*ir
= ctx
->ir
;
1020 /* initialize array live ranges: */
1021 foreach_array (arr
, &ir
->array_list
) {
1026 /* set up the r0.xyz precolor regs. */
1027 for (int i
= 0; i
< 3; i
++) {
1028 ra_set_node_reg(ctx
->g
, ctx
->r0_xyz_nodes
+ i
, i
);
1029 ra_set_node_reg(ctx
->g
, ctx
->hr0_xyz_nodes
+ i
,
1030 ctx
->set
->first_half_reg
+ i
);
1033 /* pre-color node that conflict with half/full regs higher than what
1034 * can be encoded for tex-prefetch:
1036 ra_set_node_reg(ctx
->g
, ctx
->prefetch_exclude_node
,
1037 ctx
->set
->prefetch_exclude_reg
);
1039 /* compute live ranges (use/def) on a block level, also updating
1040 * block's def/use bitmasks (used below to calculate per-block
1043 foreach_block (block
, &ir
->block_list
) {
1044 ra_block_compute_live_ranges(ctx
, block
);
1047 /* update per-block livein/liveout: */
1048 while (ra_compute_livein_liveout(ctx
)) {}
1051 d("AFTER LIVEIN/OUT:");
1052 foreach_block (block
, &ir
->block_list
) {
1053 struct ir3_ra_block_data
*bd
= block
->data
;
1054 d("block%u:", block_id(block
));
1055 print_bitset(" def", bd
->def
, ctx
->alloc_count
);
1056 print_bitset(" use", bd
->use
, ctx
->alloc_count
);
1057 print_bitset(" l/i", bd
->livein
, ctx
->alloc_count
);
1058 print_bitset(" l/o", bd
->liveout
, ctx
->alloc_count
);
1060 foreach_array (arr
, &ir
->array_list
) {
1061 d("array%u:", arr
->id
);
1062 d(" length: %u", arr
->length
);
1063 d(" start_ip: %u", arr
->start_ip
);
1064 d(" end_ip: %u", arr
->end_ip
);
1066 d("INSTRUCTION VREG NAMES:");
1067 foreach_block (block
, &ctx
->ir
->block_list
) {
1068 foreach_instr (instr
, &block
->instr_list
) {
1069 if (!ctx
->instrd
[instr
->ip
].defn
)
1071 if (!writes_gpr(instr
))
1073 di(instr
, "%04u", scalar_name(ctx
, instr
, 0));
1076 d("ARRAY VREG NAMES:");
1077 foreach_array (arr
, &ctx
->ir
->array_list
) {
1078 d("%04u: arr%u", arr
->base
, arr
->id
);
1082 /* extend start/end ranges based on livein/liveout info from cfg: */
1083 foreach_block (block
, &ir
->block_list
) {
1084 struct ir3_ra_block_data
*bd
= block
->data
;
1086 for (unsigned i
= 0; i
< ctx
->alloc_count
; i
++) {
1087 if (BITSET_TEST(bd
->livein
, i
)) {
1088 ctx
->def
[i
] = MIN2(ctx
->def
[i
], block
->start_ip
);
1089 ctx
->use
[i
] = MAX2(ctx
->use
[i
], block
->start_ip
);
1092 if (BITSET_TEST(bd
->liveout
, i
)) {
1093 ctx
->def
[i
] = MIN2(ctx
->def
[i
], block
->end_ip
);
1094 ctx
->use
[i
] = MAX2(ctx
->use
[i
], block
->end_ip
);
1098 foreach_array (arr
, &ctx
->ir
->array_list
) {
1099 for (unsigned i
= 0; i
< arr
->length
; i
++) {
1100 if (BITSET_TEST(bd
->livein
, i
+ arr
->base
)) {
1101 arr
->start_ip
= MIN2(arr
->start_ip
, block
->start_ip
);
1103 if (BITSET_TEST(bd
->liveout
, i
+ arr
->base
)) {
1104 arr
->end_ip
= MAX2(arr
->end_ip
, block
->end_ip
);
1110 if (ctx
->name_to_instr
) {
1111 unsigned max
= ra_calc_max_live_values(ctx
);
1112 ra_set_register_target(ctx
, max
);
1115 for (unsigned i
= 0; i
< ctx
->alloc_count
; i
++) {
1116 for (unsigned j
= 0; j
< ctx
->alloc_count
; j
++) {
1117 if (intersects(ctx
->def
[i
], ctx
->use
[i
],
1118 ctx
->def
[j
], ctx
->use
[j
])) {
1119 ra_add_node_interference(ctx
->g
, i
, j
);
1125 /* NOTE: instr could be NULL for IR3_REG_ARRAY case, for the first
1126 * array access(es) which do not have any previous access to depend
1127 * on from scheduling point of view
1130 reg_assign(struct ir3_ra_ctx
*ctx
, struct ir3_register
*reg
,
1131 struct ir3_instruction
*instr
)
1133 struct ir3_ra_instr_data
*id
;
1135 if (reg
->flags
& IR3_REG_ARRAY
) {
1136 struct ir3_array
*arr
=
1137 ir3_lookup_array(ctx
->ir
, reg
->array
.id
);
1138 unsigned name
= arr
->base
+ reg
->array
.offset
;
1139 unsigned r
= ra_get_node_reg(ctx
->g
, name
);
1140 unsigned num
= ctx
->set
->ra_reg_to_gpr
[r
];
1142 if (reg
->flags
& IR3_REG_RELATIV
) {
1143 reg
->array
.offset
= num
;
1146 reg
->flags
&= ~IR3_REG_SSA
;
1149 reg
->flags
&= ~IR3_REG_ARRAY
;
1150 } else if ((id
= &ctx
->instrd
[instr
->ip
]) && id
->defn
) {
1151 unsigned first_component
= 0;
1153 /* Special case for tex instructions, which may use the wrmask
1154 * to mask off the first component(s). In the scalar pass,
1155 * this means the masked off component(s) are not def'd/use'd,
1156 * so we get a bogus value when we ask the register_allocate
1157 * algo to get the assigned reg for the unused/untouched
1158 * component. So we need to consider the first used component:
1160 if (ctx
->scalar_pass
&& is_tex_or_prefetch(id
->defn
)) {
1161 unsigned n
= ffs(id
->defn
->regs
[0]->wrmask
);
1162 debug_assert(n
> 0);
1163 first_component
= n
- 1;
1166 unsigned name
= scalar_name(ctx
, id
->defn
, first_component
);
1167 unsigned r
= ra_get_node_reg(ctx
->g
, name
);
1168 unsigned num
= ctx
->set
->ra_reg_to_gpr
[r
] + id
->off
;
1170 debug_assert(!(reg
->flags
& IR3_REG_RELATIV
));
1172 debug_assert(num
>= first_component
);
1174 if (is_high(id
->defn
))
1175 num
+= FIRST_HIGH_REG
;
1177 reg
->num
= num
- first_component
;
1179 reg
->flags
&= ~IR3_REG_SSA
;
1181 if (is_half(id
->defn
))
1182 reg
->flags
|= IR3_REG_HALF
;
1186 /* helper to determine which regs to assign in which pass: */
1188 should_assign(struct ir3_ra_ctx
*ctx
, struct ir3_instruction
*instr
)
1190 if ((instr
->opc
== OPC_META_SPLIT
) &&
1191 (util_bitcount(instr
->regs
[1]->wrmask
) > 1))
1192 return !ctx
->scalar_pass
;
1193 if ((instr
->opc
== OPC_META_COLLECT
) &&
1194 (util_bitcount(instr
->regs
[0]->wrmask
) > 1))
1195 return !ctx
->scalar_pass
;
1196 return ctx
->scalar_pass
;
1200 ra_block_alloc(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
1202 foreach_instr (instr
, &block
->instr_list
) {
1204 if (writes_gpr(instr
)) {
1205 if (should_assign(ctx
, instr
)) {
1206 reg_assign(ctx
, instr
->regs
[0], instr
);
1210 foreach_src_n (reg
, n
, instr
) {
1211 struct ir3_instruction
*src
= reg
->instr
;
1213 if (src
&& !should_assign(ctx
, src
) && !should_assign(ctx
, instr
))
1216 if (src
&& should_assign(ctx
, instr
))
1217 reg_assign(ctx
, src
->regs
[0], src
);
1219 /* Note: reg->instr could be null for IR3_REG_ARRAY */
1220 if (src
|| (reg
->flags
& IR3_REG_ARRAY
))
1221 reg_assign(ctx
, instr
->regs
[n
+1], src
);
1225 /* We need to pre-color outputs for the scalar pass in
1226 * ra_precolor_assigned(), so we need to actually assign
1227 * them in the first pass:
1229 if (!ctx
->scalar_pass
) {
1230 foreach_input (in
, ctx
->ir
) {
1231 reg_assign(ctx
, in
->regs
[0], in
);
1233 foreach_output (out
, ctx
->ir
) {
1234 reg_assign(ctx
, out
->regs
[0], out
);
1240 assign_arr_base(struct ir3_ra_ctx
*ctx
, struct ir3_array
*arr
,
1241 struct ir3_instruction
**precolor
, unsigned nprecolor
)
1245 /* figure out what else we conflict with which has already
1249 foreach_array (arr2
, &ctx
->ir
->array_list
) {
1252 if (arr2
->end_ip
== 0)
1254 /* if it intersects with liverange AND register range.. */
1255 if (intersects(arr
->start_ip
, arr
->end_ip
,
1256 arr2
->start_ip
, arr2
->end_ip
) &&
1257 intersects(base
, base
+ reg_size_for_array(arr
),
1258 arr2
->reg
, arr2
->reg
+ reg_size_for_array(arr2
))) {
1259 base
= MAX2(base
, arr2
->reg
+ reg_size_for_array(arr2
));
1264 /* also need to not conflict with any pre-assigned inputs: */
1265 for (unsigned i
= 0; i
< nprecolor
; i
++) {
1266 struct ir3_instruction
*instr
= precolor
[i
];
1268 if (!instr
|| (instr
->flags
& IR3_INSTR_UNUSED
))
1271 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
1273 /* only consider the first component: */
1277 unsigned name
= ra_name(ctx
, id
);
1278 unsigned regid
= instr
->regs
[0]->num
;
1280 /* Check if array intersects with liverange AND register
1281 * range of the input:
1283 if (intersects(arr
->start_ip
, arr
->end_ip
,
1284 ctx
->def
[name
], ctx
->use
[name
]) &&
1285 intersects(base
, base
+ reg_size_for_array(arr
),
1286 regid
, regid
+ class_sizes
[id
->cls
])) {
1287 base
= MAX2(base
, regid
+ class_sizes
[id
->cls
]);
1295 /* handle pre-colored registers. This includes "arrays" (which could be of
1296 * length 1, used for phi webs lowered to registers in nir), as well as
1297 * special shader input values that need to be pinned to certain registers.
1300 ra_precolor(struct ir3_ra_ctx
*ctx
, struct ir3_instruction
**precolor
, unsigned nprecolor
)
1302 for (unsigned i
= 0; i
< nprecolor
; i
++) {
1303 if (precolor
[i
] && !(precolor
[i
]->flags
& IR3_INSTR_UNUSED
)) {
1304 struct ir3_instruction
*instr
= precolor
[i
];
1306 if (instr
->regs
[0]->num
== INVALID_REG
)
1309 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
1311 debug_assert(!(instr
->regs
[0]->flags
& (IR3_REG_HALF
| IR3_REG_HIGH
)));
1313 /* only consider the first component: */
1317 if (ctx
->scalar_pass
&& !should_assign(ctx
, instr
))
1320 /* 'base' is in scalar (class 0) but we need to map that
1321 * the conflicting register of the appropriate class (ie.
1322 * input could be vec2/vec3/etc)
1324 * Note that the higher class (larger than scalar) regs
1325 * are setup to conflict with others in the same class,
1326 * so for example, R1 (scalar) is also the first component
1327 * of D1 (vec2/double):
1329 * Single (base) | Double
1330 * --------------+---------------
1337 unsigned regid
= instr
->regs
[0]->num
;
1338 unsigned reg
= ctx
->set
->gpr_to_ra_reg
[id
->cls
][regid
];
1339 unsigned name
= ra_name(ctx
, id
);
1340 ra_set_node_reg(ctx
->g
, name
, reg
);
1344 /* pre-assign array elements:
1346 * TODO this is going to need some work for half-precision.. possibly
1347 * this is easier on a6xx, where we can just divide array size by two?
1348 * But on a5xx and earlier it will need to track two bases.
1350 foreach_array (arr
, &ctx
->ir
->array_list
) {
1352 if (arr
->end_ip
== 0)
1355 if (!ctx
->scalar_pass
)
1356 assign_arr_base(ctx
, arr
, precolor
, nprecolor
);
1358 unsigned base
= arr
->reg
;
1360 for (unsigned i
= 0; i
< arr
->length
; i
++) {
1364 /* Doesn't need to do this on older generations than a6xx,
1365 * since there's no conflict between full regs and half regs
1368 * TODO Presumably "base" could start from 0 respectively
1369 * for half regs of arrays on older generations.
1371 unsigned base_half
= base
* 2 + i
;
1372 reg
= ctx
->set
->gpr_to_ra_reg
[0+HALF_OFFSET
][base_half
];
1373 base
= base_half
/ 2 + 1;
1375 reg
= ctx
->set
->gpr_to_ra_reg
[0][base
++];
1378 name
= arr
->base
+ i
;
1379 ra_set_node_reg(ctx
->g
, name
, reg
);
1383 if (ir3_shader_debug
& IR3_DBG_OPTMSGS
) {
1384 foreach_array (arr
, &ctx
->ir
->array_list
) {
1385 unsigned first
= arr
->reg
;
1386 unsigned last
= arr
->reg
+ arr
->length
- 1;
1387 debug_printf("arr[%d] at r%d.%c->r%d.%c\n", arr
->id
,
1388 (first
>> 2), "xyzw"[first
& 0x3],
1389 (last
>> 2), "xyzw"[last
& 0x3]);
1395 precolor(struct ir3_ra_ctx
*ctx
, struct ir3_instruction
*instr
)
1397 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
1398 unsigned n
= dest_regs(instr
);
1399 for (unsigned i
= 0; i
< n
; i
++) {
1400 /* tex instructions actually have a wrmask, and
1401 * don't touch masked out components. So we
1402 * shouldn't precolor them::
1404 if (is_tex_or_prefetch(instr
) &&
1405 !(instr
->regs
[0]->wrmask
& (1 << i
)))
1408 unsigned name
= scalar_name(ctx
, instr
, i
);
1409 unsigned regid
= instr
->regs
[0]->num
+ i
;
1411 if (instr
->regs
[0]->flags
& IR3_REG_HIGH
)
1412 regid
-= FIRST_HIGH_REG
;
1414 unsigned vreg
= ctx
->set
->gpr_to_ra_reg
[id
->cls
][regid
];
1415 ra_set_node_reg(ctx
->g
, name
, vreg
);
1419 /* pre-color non-scalar registers based on the registers assigned in previous
1420 * pass. Do this by looking actually at the fanout instructions.
1423 ra_precolor_assigned(struct ir3_ra_ctx
*ctx
)
1425 debug_assert(ctx
->scalar_pass
);
1427 foreach_block (block
, &ctx
->ir
->block_list
) {
1428 foreach_instr (instr
, &block
->instr_list
) {
1430 if (!writes_gpr(instr
))
1433 if (should_assign(ctx
, instr
))
1436 precolor(ctx
, instr
);
1438 foreach_src (src
, instr
) {
1441 precolor(ctx
, src
->instr
);
1448 ra_alloc(struct ir3_ra_ctx
*ctx
)
1450 if (!ra_allocate(ctx
->g
))
1453 foreach_block (block
, &ctx
->ir
->block_list
) {
1454 ra_block_alloc(ctx
, block
);
1460 /* if we end up with split/collect instructions with non-matching src
1461 * and dest regs, that means something has gone wrong. Which makes it
1462 * a pretty good sanity check.
1465 ra_sanity_check(struct ir3
*ir
)
1467 foreach_block (block
, &ir
->block_list
) {
1468 foreach_instr (instr
, &block
->instr_list
) {
1469 if (instr
->opc
== OPC_META_SPLIT
) {
1470 struct ir3_register
*dst
= instr
->regs
[0];
1471 struct ir3_register
*src
= instr
->regs
[1];
1472 debug_assert(dst
->num
== (src
->num
+ instr
->split
.off
));
1473 } else if (instr
->opc
== OPC_META_COLLECT
) {
1474 struct ir3_register
*dst
= instr
->regs
[0];
1476 foreach_src_n (src
, n
, instr
) {
1477 debug_assert(dst
->num
== (src
->num
- n
));
1485 ir3_ra_pass(struct ir3_shader_variant
*v
, struct ir3_instruction
**precolor
,
1486 unsigned nprecolor
, bool scalar_pass
)
1488 struct ir3_ra_ctx ctx
= {
1491 .set
= v
->mergedregs
?
1492 v
->ir
->compiler
->mergedregs_set
: v
->ir
->compiler
->set
,
1493 .scalar_pass
= scalar_pass
,
1498 ra_add_interference(&ctx
);
1499 ra_precolor(&ctx
, precolor
, nprecolor
);
1501 ra_precolor_assigned(&ctx
);
1502 ret
= ra_alloc(&ctx
);
1509 ir3_ra(struct ir3_shader_variant
*v
, struct ir3_instruction
**precolor
,
1514 /* First pass, assign the vecN (non-scalar) registers: */
1515 ret
= ir3_ra_pass(v
, precolor
, nprecolor
, false);
1519 ir3_debug_print(v
->ir
, "AFTER: ir3_ra (1st pass)");
1521 /* Second pass, assign the scalar registers: */
1522 ret
= ir3_ra_pass(v
, precolor
, nprecolor
, true);
1526 ir3_debug_print(v
->ir
, "AFTER: ir3_ra (2st pass)");
1529 # define SANITY_CHECK DEBUG
1531 # define SANITY_CHECK 0
1534 ra_sanity_check(v
->ir
);