2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #include "util/u_math.h"
28 #include "util/register_allocate.h"
29 #include "util/ralloc.h"
30 #include "util/bitset.h"
33 #include "ir3_shader.h"
38 #define RA_DEBUG (ir3_shader_debug & IR3_DBG_RAMSGS)
42 #define d(fmt, ...) do { if (RA_DEBUG) { \
43 printf("RA: "fmt"\n", ##__VA_ARGS__); \
46 #define di(instr, fmt, ...) do { if (RA_DEBUG) { \
47 printf("RA: "fmt": ", ##__VA_ARGS__); \
48 ir3_print_instr(instr); \
52 * Register Assignment:
54 * Uses the register_allocate util, which implements graph coloring
55 * algo with interference classes. To handle the cases where we need
56 * consecutive registers (for example, texture sample instructions),
57 * we model these as larger (double/quad/etc) registers which conflict
58 * with the corresponding registers in other classes.
60 * Additionally we create additional classes for half-regs, which
61 * do not conflict with the full-reg classes. We do need at least
62 * sizes 1-4 (to deal w/ texture sample instructions output to half-
63 * reg). At the moment we don't create the higher order half-reg
64 * classes as half-reg frequently does not have enough precision
65 * for texture coords at higher resolutions.
67 * There are some additional cases that we need to handle specially,
68 * as the graph coloring algo doesn't understand "partial writes".
69 * For example, a sequence like:
72 * sam (f32)(xy)r0.x, ...
74 * sam (f32)(xyzw)r0.w, r0.x, ... ; 3d texture, so r0.xyz are coord
76 * In this scenario, we treat r0.xyz as class size 3, which is written
77 * (from a use/def perspective) at the 'add' instruction and ignore the
78 * subsequent partial writes to r0.xy. So the 'add r0.z, ...' is the
79 * defining instruction, as it is the first to partially write r0.xyz.
81 * To address the fragmentation that this can potentially cause, a
82 * two pass register allocation is used. After the first pass the
83 * assignment of scalars is discarded, but the assignment of vecN (for
84 * N > 1) is used to pre-color in the second pass, which considers
87 * Arrays of arbitrary size are handled via pre-coloring a consecutive
88 * sequence of registers. Additional scalar (single component) reg
89 * names are allocated starting at ctx->class_base[total_class_count]
90 * (see arr->base), which are pre-colored. In the use/def graph direct
91 * access is treated as a single element use/def, and indirect access
92 * is treated as use or def of all array elements. (Only the first
93 * def is tracked, in case of multiple indirect writes, etc.)
95 * TODO arrays that fit in one of the pre-defined class sizes should
96 * not need to be pre-colored, but instead could be given a normal
97 * vreg name. (Ignoring this for now since it is a good way to work
98 * out the kinks with arbitrary sized arrays.)
100 * TODO might be easier for debugging to split this into two passes,
101 * the first assigning vreg names in a way that we could ir3_print()
106 static struct ir3_instruction
* name_to_instr(struct ir3_ra_ctx
*ctx
, unsigned name
);
108 static bool name_is_array(struct ir3_ra_ctx
*ctx
, unsigned name
);
109 static struct ir3_array
* name_to_array(struct ir3_ra_ctx
*ctx
, unsigned name
);
111 /* does it conflict? */
113 intersects(unsigned a_start
, unsigned a_end
, unsigned b_start
, unsigned b_end
)
115 return !((a_start
>= b_end
) || (b_start
>= a_end
));
119 instr_before(struct ir3_instruction
*a
, struct ir3_instruction
*b
)
121 if (a
->flags
& IR3_INSTR_UNUSED
)
123 return (a
->ip
< b
->ip
);
126 static struct ir3_instruction
*
127 get_definer(struct ir3_ra_ctx
*ctx
, struct ir3_instruction
*instr
,
130 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
131 struct ir3_instruction
*d
= NULL
;
133 if (ctx
->scalar_pass
) {
136 id
->sz
= 1; /* considering things as N scalar regs now */
145 if (instr
->opc
== OPC_META_COLLECT
) {
146 /* What about the case where collect is subset of array, we
147 * need to find the distance between where actual array starts
148 * and collect.. that probably doesn't happen currently.
152 /* note: don't use foreach_ssa_src as this gets called once
153 * while assigning regs (which clears SSA flag)
155 foreach_src_n (src
, n
, instr
) {
156 struct ir3_instruction
*dd
;
160 dd
= get_definer(ctx
, src
->instr
, &dsz
, &doff
);
162 if ((!d
) || instr_before(dd
, d
)) {
169 } else if (instr
->cp
.right
|| instr
->cp
.left
) {
170 /* covers also the meta:fo case, which ends up w/ single
171 * scalar instructions for each component:
173 struct ir3_instruction
*f
= ir3_neighbor_first(instr
);
175 /* by definition, the entire sequence forms one linked list
176 * of single scalar register nodes (even if some of them may
177 * be splits from a texture sample (for example) instr. We
178 * just need to walk the list finding the first element of
179 * the group defined (lowest ip)
183 /* need to skip over unused in the group: */
184 while (f
&& (f
->flags
& IR3_INSTR_UNUSED
)) {
190 if ((!d
) || instr_before(f
, d
))
201 /* second case is looking directly at the instruction which
202 * produces multiple values (eg, texture sample), rather
203 * than the split nodes that point back to that instruction.
204 * This isn't quite right, because it may be part of a larger
207 * sam (f32)(xyzw)r0.x, ...
210 * sam (f32)(xyzw)r2.x, r0.w <-- (r0.w, r1.x, r1.y)
212 * need to come up with a better way to handle that case.
214 if (instr
->address
) {
215 *sz
= instr
->regs
[0]->size
;
217 *sz
= util_last_bit(instr
->regs
[0]->wrmask
);
223 if (d
->opc
== OPC_META_SPLIT
) {
224 struct ir3_instruction
*dd
;
227 dd
= get_definer(ctx
, d
->regs
[1]->instr
, &dsz
, &doff
);
229 /* by definition, should come before: */
230 ra_assert(ctx
, instr_before(dd
, d
));
232 *sz
= MAX2(*sz
, dsz
);
234 if (instr
->opc
== OPC_META_SPLIT
)
235 *off
= MAX2(*off
, instr
->split
.off
);
240 ra_assert(ctx
, d
->opc
!= OPC_META_SPLIT
);
250 ra_block_find_definers(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
252 foreach_instr (instr
, &block
->instr_list
) {
253 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
254 if (instr
->regs_count
== 0)
256 /* couple special cases: */
257 if (writes_addr0(instr
) || writes_addr1(instr
) || writes_pred(instr
)) {
259 } else if (instr
->regs
[0]->flags
& IR3_REG_ARRAY
) {
260 id
->cls
= total_class_count
;
262 /* and the normal case: */
263 id
->defn
= get_definer(ctx
, instr
, &id
->sz
, &id
->off
);
264 id
->cls
= ra_size_to_class(id
->sz
, is_half(id
->defn
), is_high(id
->defn
));
266 /* this is a bit of duct-tape.. if we have a scenario like:
268 * sam (f32)(x) out.x, ...
269 * sam (f32)(x) out.y, ...
271 * Then the fanout/split meta instructions for the two different
272 * tex instructions end up grouped as left/right neighbors. The
273 * upshot is that in when you get_definer() on one of the meta:fo's
274 * you get definer as the first sam with sz=2, but when you call
275 * get_definer() on the either of the sam's you get itself as the
278 * (We actually avoid this scenario exactly, the neighbor links
279 * prevent one of the output mov's from being eliminated, so this
280 * hack should be enough. But probably we need to rethink how we
281 * find the "defining" instruction.)
283 * TODO how do we figure out offset properly...
285 if (id
->defn
!= instr
) {
286 struct ir3_ra_instr_data
*did
= &ctx
->instrd
[id
->defn
->ip
];
287 if (did
->sz
< id
->sz
) {
296 /* give each instruction a name (and ip), and count up the # of names
300 ra_block_name_instructions(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
302 foreach_instr (instr
, &block
->instr_list
) {
303 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
311 if (!writes_gpr(instr
))
314 if (id
->defn
!= instr
)
317 /* In scalar pass, collect/split don't get their own names,
318 * but instead inherit them from their src(s):
320 * Possibly we don't need this because of scalar_name(), but
321 * it does make the ir3_print() dumps easier to read.
323 if (ctx
->scalar_pass
) {
324 if (instr
->opc
== OPC_META_SPLIT
) {
325 instr
->name
= instr
->regs
[1]->instr
->name
+ instr
->split
.off
;
329 if (instr
->opc
== OPC_META_COLLECT
) {
330 instr
->name
= instr
->regs
[1]->instr
->name
;
335 /* arrays which don't fit in one of the pre-defined class
336 * sizes are pre-colored:
338 if ((id
->cls
>= 0) && (id
->cls
< total_class_count
)) {
339 /* in the scalar pass, we generate a name for each
340 * scalar component, instr->name is the name of the
343 unsigned n
= ctx
->scalar_pass
? dest_regs(instr
) : 1;
344 instr
->name
= ctx
->class_alloc_count
[id
->cls
];
345 ctx
->class_alloc_count
[id
->cls
] += n
;
346 ctx
->alloc_count
+= n
;
352 * Set a value for max register target.
354 * Currently this just rounds up to a multiple of full-vec4 (ie. the
355 * granularity that we configure the hw for.. there is no point to
356 * using r3.x if you aren't going to make r3.yzw available). But
357 * in reality there seems to be multiple thresholds that affect the
358 * number of waves.. and we should round up the target to the next
359 * threshold when we round-robin registers, to give postsched more
360 * options. When we understand that better, this is where we'd
364 ra_set_register_target(struct ir3_ra_ctx
*ctx
, unsigned max_target
)
366 const unsigned hvec4
= 4;
367 const unsigned vec4
= 2 * hvec4
;
369 ctx
->max_target
= align(max_target
, vec4
);
371 d("New max_target=%u", ctx
->max_target
);
375 pick_in_range(BITSET_WORD
*regs
, unsigned min
, unsigned max
)
377 for (unsigned i
= min
; i
<= max
; i
++) {
378 if (BITSET_TEST(regs
, i
)) {
386 pick_in_range_rev(BITSET_WORD
*regs
, int min
, int max
)
388 for (int i
= max
; i
>= min
; i
--) {
389 if (BITSET_TEST(regs
, i
)) {
396 /* register selector for the a6xx+ merged register file: */
398 ra_select_reg_merged(unsigned int n
, BITSET_WORD
*regs
, void *data
)
400 struct ir3_ra_ctx
*ctx
= data
;
401 unsigned int class = ra_get_node_class(ctx
->g
, n
);
403 int sz
= ra_class_to_size(class, &half
, &high
);
407 /* dimensions within the register class: */
408 unsigned max_target
, start
;
410 /* the regs bitset will include *all* of the virtual regs, but we lay
411 * out the different classes consecutively in the virtual register
412 * space. So we just need to think about the base offset of a given
413 * class within the virtual register space, and offset the register
414 * space we search within by that base offset.
418 /* TODO I think eventually we want to round-robin in vector pass
419 * as well, but needs some more work to calculate # of live vals
420 * for this. (Maybe with some work, we could just figure out
421 * the scalar target and use that, since that is what we care
422 * about in the end.. but that would mean setting up use-def/
423 * liveranges for scalar pass before doing vector pass.)
425 * For now, in the vector class, just move assignments for scalar
426 * vals higher to hopefully prevent them from limiting where vecN
427 * values can be placed. Since the scalar values are re-assigned
428 * in the 2nd pass, we don't really care where they end up in the
431 if (!ctx
->scalar_pass
) {
432 base
= ctx
->set
->gpr_to_ra_reg
[class][0];
434 max_target
= HIGH_CLASS_REGS(class - HIGH_OFFSET
);
436 max_target
= HALF_CLASS_REGS(class - HALF_OFFSET
);
438 max_target
= CLASS_REGS(class);
441 if ((sz
== 1) && !high
) {
442 return pick_in_range_rev(regs
, base
, base
+ max_target
);
444 return pick_in_range(regs
, base
, base
+ max_target
);
447 ra_assert(ctx
, sz
== 1);
450 /* NOTE: this is only used in scalar pass, so the register
451 * class will be one of the scalar classes (ie. idx==0):
453 base
= ctx
->set
->gpr_to_ra_reg
[class][0];
455 max_target
= HIGH_CLASS_REGS(0);
458 max_target
= ctx
->max_target
;
459 start
= ctx
->start_search_reg
;
461 max_target
= ctx
->max_target
/ 2;
462 start
= ctx
->start_search_reg
;
465 /* For cat4 instructions, if the src reg is already assigned, and
466 * avail to pick, use it. Because this doesn't introduce unnecessary
467 * dependencies, and it potentially avoids needing (ss) syncs to
468 * for write after read hazards:
470 struct ir3_instruction
*instr
= name_to_instr(ctx
, n
);
472 struct ir3_register
*src
= instr
->regs
[1];
475 if ((src
->flags
& IR3_REG_ARRAY
) && !(src
->flags
& IR3_REG_RELATIV
)) {
476 struct ir3_array
*arr
= ir3_lookup_array(ctx
->ir
, src
->array
.id
);
477 src_n
= arr
->base
+ src
->array
.offset
;
479 src_n
= scalar_name(ctx
, src
->instr
, 0);
482 unsigned reg
= ra_get_node_reg(ctx
->g
, src_n
);
484 /* Check if the src register has been assigned yet: */
486 if (BITSET_TEST(regs
, reg
)) {
492 int r
= pick_in_range(regs
, base
+ start
, base
+ max_target
);
495 r
= pick_in_range(regs
, base
, base
+ start
);
499 /* overflow, we need to increase max_target: */
500 ra_set_register_target(ctx
, ctx
->max_target
+ 1);
501 return ra_select_reg_merged(n
, regs
, data
);
504 if (class == ctx
->set
->half_classes
[0]) {
506 ctx
->start_search_reg
= (n
+ 1) % ctx
->max_target
;
507 } else if (class == ctx
->set
->classes
[0]) {
508 int n
= (r
- base
) * 2;
509 ctx
->start_search_reg
= (n
+ 1) % ctx
->max_target
;
516 ra_init(struct ir3_ra_ctx
*ctx
)
520 ir3_clear_mark(ctx
->ir
);
521 n
= ir3_count_instructions_ra(ctx
->ir
);
523 ctx
->instrd
= rzalloc_array(NULL
, struct ir3_ra_instr_data
, n
);
525 foreach_block (block
, &ctx
->ir
->block_list
) {
526 ra_block_find_definers(ctx
, block
);
529 foreach_block (block
, &ctx
->ir
->block_list
) {
530 ra_block_name_instructions(ctx
, block
);
533 /* figure out the base register name for each class. The
534 * actual ra name is class_base[cls] + instr->name;
536 ctx
->class_base
[0] = 0;
537 for (unsigned i
= 1; i
<= total_class_count
; i
++) {
538 ctx
->class_base
[i
] = ctx
->class_base
[i
-1] +
539 ctx
->class_alloc_count
[i
-1];
542 /* and vreg names for array elements: */
543 base
= ctx
->class_base
[total_class_count
];
544 foreach_array (arr
, &ctx
->ir
->array_list
) {
546 ctx
->class_alloc_count
[total_class_count
] += arr
->length
;
549 ctx
->alloc_count
+= ctx
->class_alloc_count
[total_class_count
];
551 /* Add vreg names for r0.xyz */
552 ctx
->r0_xyz_nodes
= ctx
->alloc_count
;
553 ctx
->alloc_count
+= 3;
554 ctx
->hr0_xyz_nodes
= ctx
->alloc_count
;
555 ctx
->alloc_count
+= 3;
557 /* Add vreg name for prefetch-exclusion range: */
558 ctx
->prefetch_exclude_node
= ctx
->alloc_count
++;
561 d("INSTRUCTION VREG NAMES:");
562 foreach_block (block
, &ctx
->ir
->block_list
) {
563 foreach_instr (instr
, &block
->instr_list
) {
564 if (!ctx
->instrd
[instr
->ip
].defn
)
566 if (!writes_gpr(instr
))
568 di(instr
, "%04u", scalar_name(ctx
, instr
, 0));
571 d("ARRAY VREG NAMES:");
572 foreach_array (arr
, &ctx
->ir
->array_list
) {
573 d("%04u: arr%u", arr
->base
, arr
->id
);
575 d("EXTRA VREG NAMES:");
576 d("%04u: r0_xyz_nodes", ctx
->r0_xyz_nodes
);
577 d("%04u: hr0_xyz_nodes", ctx
->hr0_xyz_nodes
);
578 d("%04u: prefetch_exclude_node", ctx
->prefetch_exclude_node
);
581 ctx
->g
= ra_alloc_interference_graph(ctx
->set
->regs
, ctx
->alloc_count
);
582 ralloc_steal(ctx
->g
, ctx
->instrd
);
583 ctx
->def
= rzalloc_array(ctx
->g
, unsigned, ctx
->alloc_count
);
584 ctx
->use
= rzalloc_array(ctx
->g
, unsigned, ctx
->alloc_count
);
586 /* TODO add selector callback for split (pre-a6xx) register file: */
587 if (ctx
->v
->mergedregs
) {
588 ra_set_select_reg_callback(ctx
->g
, ra_select_reg_merged
, ctx
);
590 if (ctx
->scalar_pass
) {
591 ctx
->name_to_instr
= _mesa_hash_table_create(ctx
->g
,
592 _mesa_hash_int
, _mesa_key_int_equal
);
597 /* Map the name back to instruction: */
598 static struct ir3_instruction
*
599 name_to_instr(struct ir3_ra_ctx
*ctx
, unsigned name
)
601 ra_assert(ctx
, !name_is_array(ctx
, name
));
602 struct hash_entry
*entry
= _mesa_hash_table_search(ctx
->name_to_instr
, &name
);
605 ra_unreachable(ctx
, "invalid instr name");
610 name_is_array(struct ir3_ra_ctx
*ctx
, unsigned name
)
612 return name
>= ctx
->class_base
[total_class_count
];
615 static struct ir3_array
*
616 name_to_array(struct ir3_ra_ctx
*ctx
, unsigned name
)
618 ra_assert(ctx
, name_is_array(ctx
, name
));
619 foreach_array (arr
, &ctx
->ir
->array_list
) {
620 if (name
< (arr
->base
+ arr
->length
))
623 ra_unreachable(ctx
, "invalid array name");
628 ra_destroy(struct ir3_ra_ctx
*ctx
)
634 __def(struct ir3_ra_ctx
*ctx
, struct ir3_ra_block_data
*bd
, unsigned name
,
635 struct ir3_instruction
*instr
)
637 ra_assert(ctx
, name
< ctx
->alloc_count
);
639 /* split/collect do not actually define any real value */
640 if ((instr
->opc
== OPC_META_SPLIT
) || (instr
->opc
== OPC_META_COLLECT
))
643 /* defined on first write: */
645 ctx
->def
[name
] = instr
->ip
;
646 ctx
->use
[name
] = MAX2(ctx
->use
[name
], instr
->ip
);
647 BITSET_SET(bd
->def
, name
);
651 __use(struct ir3_ra_ctx
*ctx
, struct ir3_ra_block_data
*bd
, unsigned name
,
652 struct ir3_instruction
*instr
)
654 ra_assert(ctx
, name
< ctx
->alloc_count
);
655 ctx
->use
[name
] = MAX2(ctx
->use
[name
], instr
->ip
);
656 if (!BITSET_TEST(bd
->def
, name
))
657 BITSET_SET(bd
->use
, name
);
661 ra_block_compute_live_ranges(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
663 struct ir3_ra_block_data
*bd
;
664 unsigned bitset_words
= BITSET_WORDS(ctx
->alloc_count
);
666 #define def(name, instr) __def(ctx, bd, name, instr)
667 #define use(name, instr) __use(ctx, bd, name, instr)
669 bd
= rzalloc(ctx
->g
, struct ir3_ra_block_data
);
671 bd
->def
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
672 bd
->use
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
673 bd
->livein
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
674 bd
->liveout
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
678 struct ir3_instruction
*first_non_input
= NULL
;
679 foreach_instr (instr
, &block
->instr_list
) {
680 if (instr
->opc
!= OPC_META_INPUT
) {
681 first_non_input
= instr
;
686 foreach_instr (instr
, &block
->instr_list
) {
687 foreach_def (name
, ctx
, instr
) {
688 if (name_is_array(ctx
, name
)) {
689 struct ir3_array
*arr
= name_to_array(ctx
, name
);
691 arr
->start_ip
= MIN2(arr
->start_ip
, instr
->ip
);
692 arr
->end_ip
= MAX2(arr
->end_ip
, instr
->ip
);
694 for (unsigned i
= 0; i
< arr
->length
; i
++) {
695 unsigned name
= arr
->base
+ i
;
697 ra_set_node_class(ctx
->g
, name
, ctx
->set
->half_classes
[0]);
699 ra_set_node_class(ctx
->g
, name
, ctx
->set
->classes
[0]);
702 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
703 if (is_high(instr
)) {
704 ra_set_node_class(ctx
->g
, name
,
705 ctx
->set
->high_classes
[id
->cls
- HIGH_OFFSET
]);
706 } else if (is_half(instr
)) {
707 ra_set_node_class(ctx
->g
, name
,
708 ctx
->set
->half_classes
[id
->cls
- HALF_OFFSET
]);
710 ra_set_node_class(ctx
->g
, name
,
711 ctx
->set
->classes
[id
->cls
]);
717 if ((instr
->opc
== OPC_META_INPUT
) && first_non_input
)
718 use(name
, first_non_input
);
720 /* Texture instructions with writemasks can be treated as smaller
721 * vectors (or just scalars!) to allocate knowing that the
722 * masked-out regs won't be written, but we need to make sure that
723 * the start of the vector doesn't come before the first register
726 if (is_tex_or_prefetch(instr
)) {
727 int writemask_skipped_regs
= ffs(instr
->regs
[0]->wrmask
) - 1;
728 int r0_xyz
= is_half(instr
) ?
729 ctx
->hr0_xyz_nodes
: ctx
->r0_xyz_nodes
;
730 for (int i
= 0; i
< writemask_skipped_regs
; i
++)
731 ra_add_node_interference(ctx
->g
, name
, r0_xyz
+ i
);
734 /* Pre-fetched textures have a lower limit for bits to encode dst
735 * register, so add additional interference with registers above
738 if (instr
->opc
== OPC_META_TEX_PREFETCH
) {
739 ra_add_node_interference(ctx
->g
, name
,
740 ctx
->prefetch_exclude_node
);
744 foreach_use (name
, ctx
, instr
) {
745 if (name_is_array(ctx
, name
)) {
746 struct ir3_array
*arr
= name_to_array(ctx
, name
);
748 arr
->start_ip
= MIN2(arr
->start_ip
, instr
->ip
);
749 arr
->end_ip
= MAX2(arr
->end_ip
, instr
->ip
);
751 /* NOTE: arrays are not SSA so unconditionally
754 BITSET_SET(bd
->use
, name
);
760 foreach_name (name
, ctx
, instr
) {
761 /* split/collect instructions have duplicate names
762 * as real instructions, so they skip the hashtable:
764 if (ctx
->name_to_instr
&& !((instr
->opc
== OPC_META_SPLIT
) ||
765 (instr
->opc
== OPC_META_COLLECT
))) {
766 /* this is slightly annoying, we can't just use an
767 * integer on the stack
769 unsigned *key
= ralloc(ctx
->name_to_instr
, unsigned);
771 ra_assert(ctx
, !_mesa_hash_table_search(ctx
->name_to_instr
, key
));
772 _mesa_hash_table_insert(ctx
->name_to_instr
, key
, instr
);
779 ra_compute_livein_liveout(struct ir3_ra_ctx
*ctx
)
781 unsigned bitset_words
= BITSET_WORDS(ctx
->alloc_count
);
782 bool progress
= false;
784 foreach_block (block
, &ctx
->ir
->block_list
) {
785 struct ir3_ra_block_data
*bd
= block
->data
;
788 for (unsigned i
= 0; i
< bitset_words
; i
++) {
789 /* anything used but not def'd within a block is
790 * by definition a live value coming into the block:
792 BITSET_WORD new_livein
=
793 (bd
->use
[i
] | (bd
->liveout
[i
] & ~bd
->def
[i
]));
795 if (new_livein
& ~bd
->livein
[i
]) {
796 bd
->livein
[i
] |= new_livein
;
801 /* update liveout: */
802 for (unsigned j
= 0; j
< ARRAY_SIZE(block
->successors
); j
++) {
803 struct ir3_block
*succ
= block
->successors
[j
];
804 struct ir3_ra_block_data
*succ_bd
;
809 succ_bd
= succ
->data
;
811 for (unsigned i
= 0; i
< bitset_words
; i
++) {
812 /* add anything that is livein in a successor block
815 BITSET_WORD new_liveout
=
816 (succ_bd
->livein
[i
] & ~bd
->liveout
[i
]);
819 bd
->liveout
[i
] |= new_liveout
;
830 print_bitset(const char *name
, BITSET_WORD
*bs
, unsigned cnt
)
833 debug_printf("RA: %s:", name
);
834 for (unsigned i
= 0; i
< cnt
; i
++) {
835 if (BITSET_TEST(bs
, i
)) {
838 debug_printf(" %04u", i
);
845 /* size of one component of instruction result, ie. half vs full: */
847 live_size(struct ir3_instruction
*instr
)
849 if (is_half(instr
)) {
851 } else if (is_high(instr
)) {
852 /* doesn't count towards footprint */
860 name_size(struct ir3_ra_ctx
*ctx
, unsigned name
)
862 if (name_is_array(ctx
, name
)) {
863 struct ir3_array
*arr
= name_to_array(ctx
, name
);
864 return arr
->half
? 1 : 2;
866 struct ir3_instruction
*instr
= name_to_instr(ctx
, name
);
867 /* in scalar pass, each name represents on scalar value,
868 * half or full precision
870 return live_size(instr
);
875 ra_calc_block_live_values(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
877 struct ir3_ra_block_data
*bd
= block
->data
;
880 ra_assert(ctx
, ctx
->name_to_instr
);
882 /* TODO this gets a bit more complicated in non-scalar pass.. but
883 * possibly a lowball estimate is fine to start with if we do
884 * round-robin in non-scalar pass? Maybe we just want to handle
885 * that in a different fxn?
887 ra_assert(ctx
, ctx
->scalar_pass
);
890 rzalloc_array(bd
, BITSET_WORD
, BITSET_WORDS(ctx
->alloc_count
));
892 /* Add the live input values: */
894 BITSET_FOREACH_SET (name
, bd
->livein
, ctx
->alloc_count
) {
895 livein
+= name_size(ctx
, name
);
896 BITSET_SET(live
, name
);
899 d("---------------------");
900 d("block%u: LIVEIN: %u", block_id(block
), livein
);
902 unsigned max
= livein
;
905 /* Now that we know the live inputs to the block, iterate the
906 * instructions adjusting the current # of live values as we
907 * see their last use:
909 foreach_instr (instr
, &block
->instr_list
) {
911 print_bitset("LIVE", live
, ctx
->alloc_count
);
914 unsigned new_live
= 0; /* newly live values */
915 unsigned new_dead
= 0; /* newly no-longer live values */
916 unsigned next_dead
= 0; /* newly dead following this instr */
918 foreach_def (name
, ctx
, instr
) {
919 /* NOTE: checking ctx->def filters out things like split/
920 * collect which are just redefining existing live names
921 * or array writes to already live array elements:
923 if (ctx
->def
[name
] != instr
->ip
)
925 new_live
+= live_size(instr
);
926 d("NEW_LIVE: %u (new_live=%u, use=%u)", name
, new_live
, ctx
->use
[name
]);
927 BITSET_SET(live
, name
);
928 /* There can be cases where this is *also* the last use
929 * of a value, for example instructions that write multiple
930 * values, only some of which are used. These values are
931 * dead *after* (rather than during) this instruction.
933 if (ctx
->use
[name
] != instr
->ip
)
935 next_dead
+= live_size(instr
);
936 d("NEXT_DEAD: %u (next_dead=%u)", name
, next_dead
);
937 BITSET_CLEAR(live
, name
);
940 /* To be more resilient against special cases where liverange
941 * is extended (like first_non_input), rather than using the
942 * foreach_use() iterator, we iterate the current live values
945 BITSET_FOREACH_SET (name
, live
, ctx
->alloc_count
) {
946 /* Is this the last use? */
947 if (ctx
->use
[name
] != instr
->ip
)
949 new_dead
+= name_size(ctx
, name
);
950 d("NEW_DEAD: %u (new_dead=%u)", name
, new_dead
);
951 BITSET_CLEAR(live
, name
);
954 cur_live
+= new_live
;
955 cur_live
-= new_dead
;
957 ra_assert(ctx
, cur_live
>= 0);
958 d("CUR_LIVE: %u", cur_live
);
960 max
= MAX2(max
, cur_live
);
962 /* account for written values which are not used later,
963 * but after updating max (since they are for one cycle
966 cur_live
-= next_dead
;
967 ra_assert(ctx
, cur_live
>= 0);
971 BITSET_FOREACH_SET (name
, live
, ctx
->alloc_count
) {
972 cnt
+= name_size(ctx
, name
);
974 ra_assert(ctx
, cur_live
== cnt
);
978 d("block%u max=%u", block_id(block
), max
);
980 /* the remaining live should match liveout (for extra sanity testing): */
982 unsigned new_dead
= 0;
983 BITSET_FOREACH_SET (name
, live
, ctx
->alloc_count
) {
984 /* Is this the last use? */
985 if (ctx
->use
[name
] != block
->end_ip
)
987 new_dead
+= name_size(ctx
, name
);
988 d("NEW_DEAD: %u (new_dead=%u)", name
, new_dead
);
989 BITSET_CLEAR(live
, name
);
991 unsigned liveout
= 0;
992 BITSET_FOREACH_SET (name
, bd
->liveout
, ctx
->alloc_count
) {
993 liveout
+= name_size(ctx
, name
);
994 BITSET_CLEAR(live
, name
);
997 if (cur_live
!= liveout
) {
998 print_bitset("LEAKED", live
, ctx
->alloc_count
);
999 /* TODO there are a few edge cases where live-range extension
1000 * tells us a value is livein. But not used by the block or
1001 * liveout for the block. Possibly a bug in the liverange
1002 * extension. But for now leave the assert disabled:
1003 ra_assert(ctx, cur_live == liveout);
1014 ra_calc_max_live_values(struct ir3_ra_ctx
*ctx
)
1018 foreach_block (block
, &ctx
->ir
->block_list
) {
1019 unsigned block_live
= ra_calc_block_live_values(ctx
, block
);
1020 max
= MAX2(max
, block_live
);
1027 ra_add_interference(struct ir3_ra_ctx
*ctx
)
1029 struct ir3
*ir
= ctx
->ir
;
1031 /* initialize array live ranges: */
1032 foreach_array (arr
, &ir
->array_list
) {
1037 /* set up the r0.xyz precolor regs. */
1038 for (int i
= 0; i
< 3; i
++) {
1039 ra_set_node_reg(ctx
->g
, ctx
->r0_xyz_nodes
+ i
, i
);
1040 ra_set_node_reg(ctx
->g
, ctx
->hr0_xyz_nodes
+ i
,
1041 ctx
->set
->first_half_reg
+ i
);
1044 /* pre-color node that conflict with half/full regs higher than what
1045 * can be encoded for tex-prefetch:
1047 ra_set_node_reg(ctx
->g
, ctx
->prefetch_exclude_node
,
1048 ctx
->set
->prefetch_exclude_reg
);
1050 /* compute live ranges (use/def) on a block level, also updating
1051 * block's def/use bitmasks (used below to calculate per-block
1054 foreach_block (block
, &ir
->block_list
) {
1055 ra_block_compute_live_ranges(ctx
, block
);
1058 /* update per-block livein/liveout: */
1059 while (ra_compute_livein_liveout(ctx
)) {}
1062 d("AFTER LIVEIN/OUT:");
1063 foreach_block (block
, &ir
->block_list
) {
1064 struct ir3_ra_block_data
*bd
= block
->data
;
1065 d("block%u:", block_id(block
));
1066 print_bitset(" def", bd
->def
, ctx
->alloc_count
);
1067 print_bitset(" use", bd
->use
, ctx
->alloc_count
);
1068 print_bitset(" l/i", bd
->livein
, ctx
->alloc_count
);
1069 print_bitset(" l/o", bd
->liveout
, ctx
->alloc_count
);
1071 foreach_array (arr
, &ir
->array_list
) {
1072 d("array%u:", arr
->id
);
1073 d(" length: %u", arr
->length
);
1074 d(" start_ip: %u", arr
->start_ip
);
1075 d(" end_ip: %u", arr
->end_ip
);
1079 /* extend start/end ranges based on livein/liveout info from cfg: */
1080 foreach_block (block
, &ir
->block_list
) {
1081 struct ir3_ra_block_data
*bd
= block
->data
;
1083 for (unsigned i
= 0; i
< ctx
->alloc_count
; i
++) {
1084 if (BITSET_TEST(bd
->livein
, i
)) {
1085 ctx
->def
[i
] = MIN2(ctx
->def
[i
], block
->start_ip
);
1086 ctx
->use
[i
] = MAX2(ctx
->use
[i
], block
->start_ip
);
1089 if (BITSET_TEST(bd
->liveout
, i
)) {
1090 ctx
->def
[i
] = MIN2(ctx
->def
[i
], block
->end_ip
);
1091 ctx
->use
[i
] = MAX2(ctx
->use
[i
], block
->end_ip
);
1095 foreach_array (arr
, &ctx
->ir
->array_list
) {
1096 for (unsigned i
= 0; i
< arr
->length
; i
++) {
1097 if (BITSET_TEST(bd
->livein
, i
+ arr
->base
)) {
1098 arr
->start_ip
= MIN2(arr
->start_ip
, block
->start_ip
);
1100 if (BITSET_TEST(bd
->liveout
, i
+ arr
->base
)) {
1101 arr
->end_ip
= MAX2(arr
->end_ip
, block
->end_ip
);
1107 if (ctx
->name_to_instr
) {
1108 unsigned max
= ra_calc_max_live_values(ctx
);
1109 ra_set_register_target(ctx
, max
);
1112 for (unsigned i
= 0; i
< ctx
->alloc_count
; i
++) {
1113 for (unsigned j
= 0; j
< ctx
->alloc_count
; j
++) {
1114 if (intersects(ctx
->def
[i
], ctx
->use
[i
],
1115 ctx
->def
[j
], ctx
->use
[j
])) {
1116 ra_add_node_interference(ctx
->g
, i
, j
);
1122 /* NOTE: instr could be NULL for IR3_REG_ARRAY case, for the first
1123 * array access(es) which do not have any previous access to depend
1124 * on from scheduling point of view
1127 reg_assign(struct ir3_ra_ctx
*ctx
, struct ir3_register
*reg
,
1128 struct ir3_instruction
*instr
)
1130 struct ir3_ra_instr_data
*id
;
1132 if (reg
->flags
& IR3_REG_ARRAY
) {
1133 struct ir3_array
*arr
=
1134 ir3_lookup_array(ctx
->ir
, reg
->array
.id
);
1135 unsigned name
= arr
->base
+ reg
->array
.offset
;
1136 unsigned r
= ra_get_node_reg(ctx
->g
, name
);
1137 unsigned num
= ctx
->set
->ra_reg_to_gpr
[r
];
1139 if (reg
->flags
& IR3_REG_RELATIV
) {
1140 reg
->array
.offset
= num
;
1143 reg
->flags
&= ~IR3_REG_SSA
;
1146 reg
->flags
&= ~IR3_REG_ARRAY
;
1147 } else if ((id
= &ctx
->instrd
[instr
->ip
]) && id
->defn
) {
1148 unsigned first_component
= 0;
1150 /* Special case for tex instructions, which may use the wrmask
1151 * to mask off the first component(s). In the scalar pass,
1152 * this means the masked off component(s) are not def'd/use'd,
1153 * so we get a bogus value when we ask the register_allocate
1154 * algo to get the assigned reg for the unused/untouched
1155 * component. So we need to consider the first used component:
1157 if (ctx
->scalar_pass
&& is_tex_or_prefetch(id
->defn
)) {
1158 unsigned n
= ffs(id
->defn
->regs
[0]->wrmask
);
1159 ra_assert(ctx
, n
> 0);
1160 first_component
= n
- 1;
1163 unsigned name
= scalar_name(ctx
, id
->defn
, first_component
);
1164 unsigned r
= ra_get_node_reg(ctx
->g
, name
);
1165 unsigned num
= ctx
->set
->ra_reg_to_gpr
[r
] + id
->off
;
1167 ra_assert(ctx
, !(reg
->flags
& IR3_REG_RELATIV
));
1169 ra_assert(ctx
, num
>= first_component
);
1171 if (is_high(id
->defn
))
1172 num
+= FIRST_HIGH_REG
;
1174 reg
->num
= num
- first_component
;
1176 reg
->flags
&= ~IR3_REG_SSA
;
1178 if (is_half(id
->defn
))
1179 reg
->flags
|= IR3_REG_HALF
;
1183 /* helper to determine which regs to assign in which pass: */
1185 should_assign(struct ir3_ra_ctx
*ctx
, struct ir3_instruction
*instr
)
1187 if ((instr
->opc
== OPC_META_SPLIT
) &&
1188 (util_bitcount(instr
->regs
[1]->wrmask
) > 1))
1189 return !ctx
->scalar_pass
;
1190 if ((instr
->opc
== OPC_META_COLLECT
) &&
1191 (util_bitcount(instr
->regs
[0]->wrmask
) > 1))
1192 return !ctx
->scalar_pass
;
1193 return ctx
->scalar_pass
;
1197 ra_block_alloc(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
1199 foreach_instr (instr
, &block
->instr_list
) {
1201 if (writes_gpr(instr
)) {
1202 if (should_assign(ctx
, instr
)) {
1203 reg_assign(ctx
, instr
->regs
[0], instr
);
1207 foreach_src_n (reg
, n
, instr
) {
1208 struct ir3_instruction
*src
= reg
->instr
;
1210 if (src
&& !should_assign(ctx
, src
) && !should_assign(ctx
, instr
))
1213 if (src
&& should_assign(ctx
, instr
))
1214 reg_assign(ctx
, src
->regs
[0], src
);
1216 /* Note: reg->instr could be null for IR3_REG_ARRAY */
1217 if (src
|| (reg
->flags
& IR3_REG_ARRAY
))
1218 reg_assign(ctx
, instr
->regs
[n
+1], src
);
1222 /* We need to pre-color outputs for the scalar pass in
1223 * ra_precolor_assigned(), so we need to actually assign
1224 * them in the first pass:
1226 if (!ctx
->scalar_pass
) {
1227 foreach_input (in
, ctx
->ir
) {
1228 reg_assign(ctx
, in
->regs
[0], in
);
1230 foreach_output (out
, ctx
->ir
) {
1231 reg_assign(ctx
, out
->regs
[0], out
);
1237 assign_arr_base(struct ir3_ra_ctx
*ctx
, struct ir3_array
*arr
,
1238 struct ir3_instruction
**precolor
, unsigned nprecolor
)
1240 /* In the mergedregs case, we convert full precision arrays
1241 * to their effective half-precision base, and find conflicts
1242 * amongst all other arrays/inputs.
1244 * In the splitregs case (halfreg file and fullreg file do
1245 * not conflict), we ignore arrays and other pre-colors that
1246 * are not the same precision.
1248 bool mergedregs
= ctx
->v
->mergedregs
;
1251 /* figure out what else we conflict with which has already
1255 foreach_array (arr2
, &ctx
->ir
->array_list
) {
1258 ra_assert(ctx
, arr2
->start_ip
<= arr2
->end_ip
);
1260 unsigned base2
= arr2
->reg
;
1261 unsigned len2
= arr2
->length
;
1262 unsigned len
= arr
->length
;
1265 /* convert into half-reg space: */
1273 } else if (arr2
->half
!= arr
->half
) {
1274 /* for split-register-file mode, we only conflict with
1275 * other arrays of same precision:
1280 /* if it intersects with liverange AND register range.. */
1281 if (intersects(arr
->start_ip
, arr
->end_ip
,
1282 arr2
->start_ip
, arr2
->end_ip
) &&
1283 intersects(base
, base
+ len
,
1284 base2
, base2
+ len2
)) {
1285 base
= MAX2(base
, base2
+ len2
);
1290 /* also need to not conflict with any pre-assigned inputs: */
1291 for (unsigned i
= 0; i
< nprecolor
; i
++) {
1292 struct ir3_instruction
*instr
= precolor
[i
];
1294 if (!instr
|| (instr
->flags
& IR3_INSTR_UNUSED
))
1297 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
1299 /* only consider the first component: */
1303 unsigned name
= ra_name(ctx
, id
);
1304 unsigned regid
= instr
->regs
[0]->num
;
1305 unsigned reglen
= class_sizes
[id
->cls
];
1306 unsigned len
= arr
->length
;
1309 /* convert into half-reg space: */
1310 if (!is_half(instr
)) {
1317 } else if (is_half(instr
) != arr
->half
) {
1318 /* for split-register-file mode, we only conflict with
1319 * other arrays of same precision:
1324 /* Check if array intersects with liverange AND register
1325 * range of the input:
1327 if (intersects(arr
->start_ip
, arr
->end_ip
,
1328 ctx
->def
[name
], ctx
->use
[name
]) &&
1329 intersects(base
, base
+ len
,
1330 regid
, regid
+ reglen
)) {
1331 base
= MAX2(base
, regid
+ reglen
);
1336 /* convert back from half-reg space to fullreg space: */
1337 if (mergedregs
&& !arr
->half
) {
1338 base
= DIV_ROUND_UP(base
, 2);
1344 /* handle pre-colored registers. This includes "arrays" (which could be of
1345 * length 1, used for phi webs lowered to registers in nir), as well as
1346 * special shader input values that need to be pinned to certain registers.
1349 ra_precolor(struct ir3_ra_ctx
*ctx
, struct ir3_instruction
**precolor
, unsigned nprecolor
)
1351 for (unsigned i
= 0; i
< nprecolor
; i
++) {
1352 if (precolor
[i
] && !(precolor
[i
]->flags
& IR3_INSTR_UNUSED
)) {
1353 struct ir3_instruction
*instr
= precolor
[i
];
1355 if (instr
->regs
[0]->num
== INVALID_REG
)
1358 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
1360 ra_assert(ctx
, !(instr
->regs
[0]->flags
& (IR3_REG_HALF
| IR3_REG_HIGH
)));
1362 /* 'base' is in scalar (class 0) but we need to map that
1363 * the conflicting register of the appropriate class (ie.
1364 * input could be vec2/vec3/etc)
1366 * Note that the higher class (larger than scalar) regs
1367 * are setup to conflict with others in the same class,
1368 * so for example, R1 (scalar) is also the first component
1369 * of D1 (vec2/double):
1371 * Single (base) | Double
1372 * --------------+---------------
1379 unsigned regid
= instr
->regs
[0]->num
;
1380 ra_assert(ctx
, regid
>= id
->off
);
1383 unsigned reg
= ctx
->set
->gpr_to_ra_reg
[id
->cls
][regid
];
1384 unsigned name
= ra_name(ctx
, id
);
1385 ra_set_node_reg(ctx
->g
, name
, reg
);
1390 * Pre-assign array elements:
1392 foreach_array (arr
, &ctx
->ir
->array_list
) {
1394 if (arr
->end_ip
== 0)
1397 if (!ctx
->scalar_pass
)
1398 assign_arr_base(ctx
, arr
, precolor
, nprecolor
);
1400 for (unsigned i
= 0; i
< arr
->length
; i
++) {
1401 unsigned cls
= arr
->half
? HALF_OFFSET
: 0;
1403 ra_set_node_reg(ctx
->g
,
1404 arr
->base
+ i
, /* vreg name */
1405 ctx
->set
->gpr_to_ra_reg
[cls
][arr
->reg
+ i
]);
1409 if (ir3_shader_debug
& IR3_DBG_OPTMSGS
) {
1410 foreach_array (arr
, &ctx
->ir
->array_list
) {
1411 unsigned first
= arr
->reg
;
1412 unsigned last
= arr
->reg
+ arr
->length
- 1;
1413 debug_printf("arr[%d] at r%d.%c->r%d.%c\n", arr
->id
,
1414 (first
>> 2), "xyzw"[first
& 0x3],
1415 (last
>> 2), "xyzw"[last
& 0x3]);
1421 precolor(struct ir3_ra_ctx
*ctx
, struct ir3_instruction
*instr
)
1423 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
1424 unsigned n
= dest_regs(instr
);
1425 for (unsigned i
= 0; i
< n
; i
++) {
1426 /* tex instructions actually have a wrmask, and
1427 * don't touch masked out components. So we
1428 * shouldn't precolor them::
1430 if (is_tex_or_prefetch(instr
) &&
1431 !(instr
->regs
[0]->wrmask
& (1 << i
)))
1434 unsigned name
= scalar_name(ctx
, instr
, i
);
1435 unsigned regid
= instr
->regs
[0]->num
+ i
;
1437 if (instr
->regs
[0]->flags
& IR3_REG_HIGH
)
1438 regid
-= FIRST_HIGH_REG
;
1440 unsigned vreg
= ctx
->set
->gpr_to_ra_reg
[id
->cls
][regid
];
1441 ra_set_node_reg(ctx
->g
, name
, vreg
);
1445 /* pre-color non-scalar registers based on the registers assigned in previous
1446 * pass. Do this by looking actually at the fanout instructions.
1449 ra_precolor_assigned(struct ir3_ra_ctx
*ctx
)
1451 ra_assert(ctx
, ctx
->scalar_pass
);
1453 foreach_block (block
, &ctx
->ir
->block_list
) {
1454 foreach_instr (instr
, &block
->instr_list
) {
1456 if (!writes_gpr(instr
))
1459 if (should_assign(ctx
, instr
))
1462 precolor(ctx
, instr
);
1464 foreach_src (src
, instr
) {
1467 precolor(ctx
, src
->instr
);
1474 ra_alloc(struct ir3_ra_ctx
*ctx
)
1476 if (!ra_allocate(ctx
->g
))
1479 foreach_block (block
, &ctx
->ir
->block_list
) {
1480 ra_block_alloc(ctx
, block
);
1486 /* if we end up with split/collect instructions with non-matching src
1487 * and dest regs, that means something has gone wrong. Which makes it
1488 * a pretty good sanity check.
1491 ra_sanity_check(struct ir3
*ir
)
1493 foreach_block (block
, &ir
->block_list
) {
1494 foreach_instr (instr
, &block
->instr_list
) {
1495 if (instr
->opc
== OPC_META_SPLIT
) {
1496 struct ir3_register
*dst
= instr
->regs
[0];
1497 struct ir3_register
*src
= instr
->regs
[1];
1498 debug_assert(dst
->num
== (src
->num
+ instr
->split
.off
));
1499 } else if (instr
->opc
== OPC_META_COLLECT
) {
1500 struct ir3_register
*dst
= instr
->regs
[0];
1502 foreach_src_n (src
, n
, instr
) {
1503 debug_assert(dst
->num
== (src
->num
- n
));
1511 ir3_ra_pass(struct ir3_shader_variant
*v
, struct ir3_instruction
**precolor
,
1512 unsigned nprecolor
, bool scalar_pass
)
1514 struct ir3_ra_ctx ctx
= {
1517 .set
= v
->mergedregs
?
1518 v
->ir
->compiler
->mergedregs_set
: v
->ir
->compiler
->set
,
1519 .scalar_pass
= scalar_pass
,
1523 ret
= setjmp(ctx
.jmp_env
);
1528 ra_add_interference(&ctx
);
1529 ra_precolor(&ctx
, precolor
, nprecolor
);
1531 ra_precolor_assigned(&ctx
);
1532 ret
= ra_alloc(&ctx
);
1541 ir3_ra(struct ir3_shader_variant
*v
, struct ir3_instruction
**precolor
,
1546 /* First pass, assign the vecN (non-scalar) registers: */
1547 ret
= ir3_ra_pass(v
, precolor
, nprecolor
, false);
1551 ir3_debug_print(v
->ir
, "AFTER: ir3_ra (1st pass)");
1553 /* Second pass, assign the scalar registers: */
1554 ret
= ir3_ra_pass(v
, precolor
, nprecolor
, true);
1558 ir3_debug_print(v
->ir
, "AFTER: ir3_ra (2st pass)");
1561 # define SANITY_CHECK DEBUG
1563 # define SANITY_CHECK 0
1566 ra_sanity_check(v
->ir
);