fa379c3495b9c95eaf7a43075c34ce1b8228b63b
2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #include "util/u_math.h"
28 #include "util/register_allocate.h"
29 #include "util/ralloc.h"
30 #include "util/bitset.h"
33 #include "ir3_compiler.h"
38 #define RA_DEBUG (ir3_shader_debug & IR3_DBG_RAMSGS)
42 #define d(fmt, ...) do { if (RA_DEBUG) { \
43 printf("RA: "fmt"\n", ##__VA_ARGS__); \
46 #define di(instr, fmt, ...) do { if (RA_DEBUG) { \
47 printf("RA: "fmt": ", ##__VA_ARGS__); \
48 ir3_print_instr(instr); \
52 * Register Assignment:
54 * Uses the register_allocate util, which implements graph coloring
55 * algo with interference classes. To handle the cases where we need
56 * consecutive registers (for example, texture sample instructions),
57 * we model these as larger (double/quad/etc) registers which conflict
58 * with the corresponding registers in other classes.
60 * Additionally we create additional classes for half-regs, which
61 * do not conflict with the full-reg classes. We do need at least
62 * sizes 1-4 (to deal w/ texture sample instructions output to half-
63 * reg). At the moment we don't create the higher order half-reg
64 * classes as half-reg frequently does not have enough precision
65 * for texture coords at higher resolutions.
67 * There are some additional cases that we need to handle specially,
68 * as the graph coloring algo doesn't understand "partial writes".
69 * For example, a sequence like:
72 * sam (f32)(xy)r0.x, ...
74 * sam (f32)(xyzw)r0.w, r0.x, ... ; 3d texture, so r0.xyz are coord
76 * In this scenario, we treat r0.xyz as class size 3, which is written
77 * (from a use/def perspective) at the 'add' instruction and ignore the
78 * subsequent partial writes to r0.xy. So the 'add r0.z, ...' is the
79 * defining instruction, as it is the first to partially write r0.xyz.
81 * To address the fragmentation that this can potentially cause, a
82 * two pass register allocation is used. After the first pass the
83 * assignment of scalars is discarded, but the assignment of vecN (for
84 * N > 1) is used to pre-color in the second pass, which considers
87 * Arrays of arbitrary size are handled via pre-coloring a consecutive
88 * sequence of registers. Additional scalar (single component) reg
89 * names are allocated starting at ctx->class_base[total_class_count]
90 * (see arr->base), which are pre-colored. In the use/def graph direct
91 * access is treated as a single element use/def, and indirect access
92 * is treated as use or def of all array elements. (Only the first
93 * def is tracked, in case of multiple indirect writes, etc.)
95 * TODO arrays that fit in one of the pre-defined class sizes should
96 * not need to be pre-colored, but instead could be given a normal
97 * vreg name. (Ignoring this for now since it is a good way to work
98 * out the kinks with arbitrary sized arrays.)
100 * TODO might be easier for debugging to split this into two passes,
101 * the first assigning vreg names in a way that we could ir3_print()
106 static struct ir3_instruction
* name_to_instr(struct ir3_ra_ctx
*ctx
, unsigned name
);
108 static bool name_is_array(struct ir3_ra_ctx
*ctx
, unsigned name
);
109 static struct ir3_array
* name_to_array(struct ir3_ra_ctx
*ctx
, unsigned name
);
111 /* does it conflict? */
113 intersects(unsigned a_start
, unsigned a_end
, unsigned b_start
, unsigned b_end
)
115 return !((a_start
>= b_end
) || (b_start
>= a_end
));
119 reg_size_for_array(struct ir3_array
*arr
)
122 return DIV_ROUND_UP(arr
->length
, 2);
128 instr_before(struct ir3_instruction
*a
, struct ir3_instruction
*b
)
130 if (a
->flags
& IR3_INSTR_UNUSED
)
132 return (a
->ip
< b
->ip
);
135 static struct ir3_instruction
*
136 get_definer(struct ir3_ra_ctx
*ctx
, struct ir3_instruction
*instr
,
139 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
140 struct ir3_instruction
*d
= NULL
;
142 if (ctx
->scalar_pass
) {
145 id
->sz
= 1; /* considering things as N scalar regs now */
154 if (instr
->opc
== OPC_META_COLLECT
) {
155 /* What about the case where collect is subset of array, we
156 * need to find the distance between where actual array starts
157 * and collect.. that probably doesn't happen currently.
159 struct ir3_register
*src
;
162 /* note: don't use foreach_ssa_src as this gets called once
163 * while assigning regs (which clears SSA flag)
165 foreach_src_n (src
, n
, instr
) {
166 struct ir3_instruction
*dd
;
170 dd
= get_definer(ctx
, src
->instr
, &dsz
, &doff
);
172 if ((!d
) || instr_before(dd
, d
)) {
179 } else if (instr
->cp
.right
|| instr
->cp
.left
) {
180 /* covers also the meta:fo case, which ends up w/ single
181 * scalar instructions for each component:
183 struct ir3_instruction
*f
= ir3_neighbor_first(instr
);
185 /* by definition, the entire sequence forms one linked list
186 * of single scalar register nodes (even if some of them may
187 * be splits from a texture sample (for example) instr. We
188 * just need to walk the list finding the first element of
189 * the group defined (lowest ip)
193 /* need to skip over unused in the group: */
194 while (f
&& (f
->flags
& IR3_INSTR_UNUSED
)) {
200 if ((!d
) || instr_before(f
, d
))
211 /* second case is looking directly at the instruction which
212 * produces multiple values (eg, texture sample), rather
213 * than the split nodes that point back to that instruction.
214 * This isn't quite right, because it may be part of a larger
217 * sam (f32)(xyzw)r0.x, ...
220 * sam (f32)(xyzw)r2.x, r0.w <-- (r0.w, r1.x, r1.y)
222 * need to come up with a better way to handle that case.
224 if (instr
->address
) {
225 *sz
= instr
->regs
[0]->size
;
227 *sz
= util_last_bit(instr
->regs
[0]->wrmask
);
233 if (d
->opc
== OPC_META_SPLIT
) {
234 struct ir3_instruction
*dd
;
237 dd
= get_definer(ctx
, d
->regs
[1]->instr
, &dsz
, &doff
);
239 /* by definition, should come before: */
240 debug_assert(instr_before(dd
, d
));
242 *sz
= MAX2(*sz
, dsz
);
244 if (instr
->opc
== OPC_META_SPLIT
)
245 *off
= MAX2(*off
, instr
->split
.off
);
250 debug_assert(d
->opc
!= OPC_META_SPLIT
);
260 ra_block_find_definers(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
262 foreach_instr (instr
, &block
->instr_list
) {
263 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
264 if (instr
->regs_count
== 0)
266 /* couple special cases: */
267 if (writes_addr(instr
) || writes_pred(instr
)) {
269 } else if (instr
->regs
[0]->flags
& IR3_REG_ARRAY
) {
270 id
->cls
= total_class_count
;
272 /* and the normal case: */
273 id
->defn
= get_definer(ctx
, instr
, &id
->sz
, &id
->off
);
274 id
->cls
= ra_size_to_class(id
->sz
, is_half(id
->defn
), is_high(id
->defn
));
276 /* this is a bit of duct-tape.. if we have a scenario like:
278 * sam (f32)(x) out.x, ...
279 * sam (f32)(x) out.y, ...
281 * Then the fanout/split meta instructions for the two different
282 * tex instructions end up grouped as left/right neighbors. The
283 * upshot is that in when you get_definer() on one of the meta:fo's
284 * you get definer as the first sam with sz=2, but when you call
285 * get_definer() on the either of the sam's you get itself as the
288 * (We actually avoid this scenario exactly, the neighbor links
289 * prevent one of the output mov's from being eliminated, so this
290 * hack should be enough. But probably we need to rethink how we
291 * find the "defining" instruction.)
293 * TODO how do we figure out offset properly...
295 if (id
->defn
!= instr
) {
296 struct ir3_ra_instr_data
*did
= &ctx
->instrd
[id
->defn
->ip
];
297 if (did
->sz
< id
->sz
) {
306 /* give each instruction a name (and ip), and count up the # of names
310 ra_block_name_instructions(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
312 foreach_instr (instr
, &block
->instr_list
) {
313 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
321 if (!writes_gpr(instr
))
324 if (id
->defn
!= instr
)
327 /* In scalar pass, collect/split don't get their own names,
328 * but instead inherit them from their src(s):
330 * Possibly we don't need this because of scalar_name(), but
331 * it does make the ir3_print() dumps easier to read.
333 if (ctx
->scalar_pass
) {
334 if (instr
->opc
== OPC_META_SPLIT
) {
335 instr
->name
= instr
->regs
[1]->instr
->name
+ instr
->split
.off
;
339 if (instr
->opc
== OPC_META_COLLECT
) {
340 instr
->name
= instr
->regs
[1]->instr
->name
;
345 /* arrays which don't fit in one of the pre-defined class
346 * sizes are pre-colored:
348 if ((id
->cls
>= 0) && (id
->cls
< total_class_count
)) {
349 /* in the scalar pass, we generate a name for each
350 * scalar component, instr->name is the name of the
353 unsigned n
= ctx
->scalar_pass
? dest_regs(instr
) : 1;
354 instr
->name
= ctx
->class_alloc_count
[id
->cls
];
355 ctx
->class_alloc_count
[id
->cls
] += n
;
356 ctx
->alloc_count
+= n
;
362 pick_in_range(BITSET_WORD
*regs
, unsigned min
, unsigned max
)
364 for (unsigned i
= min
; i
< max
; i
++) {
365 if (BITSET_TEST(regs
, i
)) {
372 /* register selector for the a6xx+ merged register file: */
374 ra_select_reg_merged(unsigned int n
, BITSET_WORD
*regs
, void *data
)
376 struct ir3_ra_ctx
*ctx
= data
;
377 unsigned int class = ra_get_node_class(ctx
->g
, n
);
379 /* dimensions within the register class: */
380 unsigned max_target
, start
;
382 /* the regs bitset will include *all* of the virtual regs, but we lay
383 * out the different classes consecutively in the virtual register
384 * space. So we just need to think about the base offset of a given
385 * class within the virtual register space, and offset the register
386 * space we search within by that base offset.
390 /* NOTE: this is only used in scalar pass, so the register
391 * class will be one of the scalar classes (ie. idx==0):
393 if (class == ctx
->set
->high_classes
[0]) {
394 max_target
= HIGH_CLASS_REGS(0);
396 base
= ctx
->set
->gpr_to_ra_reg
[HIGH_OFFSET
][0];
397 } else if (class == ctx
->set
->half_classes
[0]) {
398 max_target
= ctx
->max_target
;
399 start
= ctx
->start_search_reg
;
400 base
= ctx
->set
->gpr_to_ra_reg
[HALF_OFFSET
][0];
401 } else if (class == ctx
->set
->classes
[0]) {
402 max_target
= ctx
->max_target
/ 2;
403 start
= ctx
->start_search_reg
;
404 base
= ctx
->set
->gpr_to_ra_reg
[0][0];
406 unreachable("unexpected register class!");
409 /* For cat4 instructions, if the src reg is already assigned, and
410 * avail to pick, use it. Because this doesn't introduce unnecessary
411 * dependencies, and it potentially avoids needing (ss) syncs to
412 * for write after read hazards:
414 struct ir3_instruction
*instr
= name_to_instr(ctx
, n
);
415 if (is_sfu(instr
) && instr
->regs
[1]->instr
) {
416 struct ir3_instruction
*src
= instr
->regs
[1]->instr
;
417 unsigned src_n
= scalar_name(ctx
, src
, 0);
419 unsigned reg
= ra_get_node_reg(ctx
->g
, src_n
);
421 /* Check if the src register has been assigned yet: */
423 if (BITSET_TEST(regs
, reg
)) {
429 int r
= pick_in_range(regs
, base
+ start
, base
+ max_target
);
432 r
= pick_in_range(regs
, base
, base
+ start
);
436 /* overflow, we need to increase max_target: */
438 return ra_select_reg_merged(n
, regs
, data
);
441 if (class == ctx
->set
->half_classes
[0]) {
443 ctx
->start_search_reg
= (n
+ 1) % ctx
->max_target
;
444 } else if (class == ctx
->set
->classes
[0]) {
445 int n
= (r
- base
) * 2;
446 ctx
->start_search_reg
= (n
+ 1) % ctx
->max_target
;
453 ra_init(struct ir3_ra_ctx
*ctx
)
457 ir3_clear_mark(ctx
->ir
);
458 n
= ir3_count_instructions(ctx
->ir
);
460 ctx
->instrd
= rzalloc_array(NULL
, struct ir3_ra_instr_data
, n
);
462 foreach_block (block
, &ctx
->ir
->block_list
) {
463 ra_block_find_definers(ctx
, block
);
466 foreach_block (block
, &ctx
->ir
->block_list
) {
467 ra_block_name_instructions(ctx
, block
);
470 /* figure out the base register name for each class. The
471 * actual ra name is class_base[cls] + instr->name;
473 ctx
->class_base
[0] = 0;
474 for (unsigned i
= 1; i
<= total_class_count
; i
++) {
475 ctx
->class_base
[i
] = ctx
->class_base
[i
-1] +
476 ctx
->class_alloc_count
[i
-1];
479 /* and vreg names for array elements: */
480 base
= ctx
->class_base
[total_class_count
];
481 foreach_array (arr
, &ctx
->ir
->array_list
) {
483 ctx
->class_alloc_count
[total_class_count
] += reg_size_for_array(arr
);
484 base
+= reg_size_for_array(arr
);
486 ctx
->alloc_count
+= ctx
->class_alloc_count
[total_class_count
];
488 ctx
->g
= ra_alloc_interference_graph(ctx
->set
->regs
, ctx
->alloc_count
);
489 ralloc_steal(ctx
->g
, ctx
->instrd
);
490 ctx
->def
= rzalloc_array(ctx
->g
, unsigned, ctx
->alloc_count
);
491 ctx
->use
= rzalloc_array(ctx
->g
, unsigned, ctx
->alloc_count
);
493 /* TODO add selector callback for split (pre-a6xx) register file: */
494 if (ctx
->scalar_pass
&& (ctx
->ir
->compiler
->gpu_id
>= 600)) {
495 ra_set_select_reg_callback(ctx
->g
, ra_select_reg_merged
, ctx
);
497 ctx
->name_to_instr
= _mesa_hash_table_create(ctx
->g
,
498 _mesa_hash_int
, _mesa_key_int_equal
);
502 /* Map the name back to instruction: */
503 static struct ir3_instruction
*
504 name_to_instr(struct ir3_ra_ctx
*ctx
, unsigned name
)
506 assert(!name_is_array(ctx
, name
));
507 struct hash_entry
*entry
= _mesa_hash_table_search(ctx
->name_to_instr
, &name
);
510 unreachable("invalid instr name");
515 name_is_array(struct ir3_ra_ctx
*ctx
, unsigned name
)
517 return name
>= ctx
->class_base
[total_class_count
];
520 static struct ir3_array
*
521 name_to_array(struct ir3_ra_ctx
*ctx
, unsigned name
)
523 assert(name_is_array(ctx
, name
));
524 foreach_array (arr
, &ctx
->ir
->array_list
) {
525 unsigned sz
= reg_size_for_array(arr
);
526 if (name
< (arr
->base
+ sz
))
529 unreachable("invalid array name");
534 ra_destroy(struct ir3_ra_ctx
*ctx
)
540 __def(struct ir3_ra_ctx
*ctx
, struct ir3_ra_block_data
*bd
, unsigned name
,
541 struct ir3_instruction
*instr
)
543 debug_assert(name
< ctx
->alloc_count
);
544 /* defined on first write: */
546 ctx
->def
[name
] = instr
->ip
;
547 ctx
->use
[name
] = MAX2(ctx
->use
[name
], instr
->ip
);
548 BITSET_SET(bd
->def
, name
);
552 __use(struct ir3_ra_ctx
*ctx
, struct ir3_ra_block_data
*bd
, unsigned name
,
553 struct ir3_instruction
*instr
)
555 debug_assert(name
< ctx
->alloc_count
);
556 ctx
->use
[name
] = MAX2(ctx
->use
[name
], instr
->ip
);
557 if (!BITSET_TEST(bd
->def
, name
))
558 BITSET_SET(bd
->use
, name
);
562 ra_block_compute_live_ranges(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
564 struct ir3_ra_block_data
*bd
;
565 unsigned bitset_words
= BITSET_WORDS(ctx
->alloc_count
);
567 #define def(name, instr) __def(ctx, bd, name, instr)
568 #define use(name, instr) __use(ctx, bd, name, instr)
570 bd
= rzalloc(ctx
->g
, struct ir3_ra_block_data
);
572 bd
->def
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
573 bd
->use
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
574 bd
->livein
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
575 bd
->liveout
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
579 struct ir3_instruction
*first_non_input
= NULL
;
580 foreach_instr (instr
, &block
->instr_list
) {
581 if (instr
->opc
!= OPC_META_INPUT
) {
582 first_non_input
= instr
;
587 foreach_instr (instr
, &block
->instr_list
) {
588 foreach_def (name
, ctx
, instr
) {
589 if (name_is_array(ctx
, name
)) {
590 struct ir3_array
*arr
= name_to_array(ctx
, name
);
592 arr
->start_ip
= MIN2(arr
->start_ip
, instr
->ip
);
593 arr
->end_ip
= MAX2(arr
->end_ip
, instr
->ip
);
595 for (unsigned i
= 0; i
< arr
->length
; i
++) {
596 unsigned name
= arr
->base
+ i
;
598 ra_set_node_class(ctx
->g
, name
, ctx
->set
->half_classes
[0]);
600 ra_set_node_class(ctx
->g
, name
, ctx
->set
->classes
[0]);
603 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
604 if (is_high(instr
)) {
605 ra_set_node_class(ctx
->g
, name
,
606 ctx
->set
->high_classes
[id
->cls
- HIGH_OFFSET
]);
607 } else if (is_half(instr
)) {
608 ra_set_node_class(ctx
->g
, name
,
609 ctx
->set
->half_classes
[id
->cls
- HALF_OFFSET
]);
611 ra_set_node_class(ctx
->g
, name
,
612 ctx
->set
->classes
[id
->cls
]);
618 if ((instr
->opc
== OPC_META_INPUT
) && first_non_input
)
619 use(name
, first_non_input
);
622 foreach_use (name
, ctx
, instr
) {
623 if (name_is_array(ctx
, name
)) {
624 struct ir3_array
*arr
= name_to_array(ctx
, name
);
626 arr
->start_ip
= MIN2(arr
->start_ip
, instr
->ip
);
627 arr
->end_ip
= MAX2(arr
->end_ip
, instr
->ip
);
629 /* NOTE: arrays are not SSA so unconditionally
632 BITSET_SET(bd
->use
, name
);
638 foreach_name (name
, ctx
, instr
) {
639 /* split/collect instructions have duplicate names
640 * as real instructions, so they skip the hashtable:
642 if (ctx
->name_to_instr
&& !((instr
->opc
== OPC_META_SPLIT
) ||
643 (instr
->opc
== OPC_META_COLLECT
))) {
644 /* this is slightly annoying, we can't just use an
645 * integer on the stack
647 unsigned *key
= ralloc(ctx
->name_to_instr
, unsigned);
649 debug_assert(!_mesa_hash_table_search(ctx
->name_to_instr
, key
));
650 _mesa_hash_table_insert(ctx
->name_to_instr
, key
, instr
);
657 ra_compute_livein_liveout(struct ir3_ra_ctx
*ctx
)
659 unsigned bitset_words
= BITSET_WORDS(ctx
->alloc_count
);
660 bool progress
= false;
662 foreach_block (block
, &ctx
->ir
->block_list
) {
663 struct ir3_ra_block_data
*bd
= block
->data
;
666 for (unsigned i
= 0; i
< bitset_words
; i
++) {
667 /* anything used but not def'd within a block is
668 * by definition a live value coming into the block:
670 BITSET_WORD new_livein
=
671 (bd
->use
[i
] | (bd
->liveout
[i
] & ~bd
->def
[i
]));
673 if (new_livein
& ~bd
->livein
[i
]) {
674 bd
->livein
[i
] |= new_livein
;
679 /* update liveout: */
680 for (unsigned j
= 0; j
< ARRAY_SIZE(block
->successors
); j
++) {
681 struct ir3_block
*succ
= block
->successors
[j
];
682 struct ir3_ra_block_data
*succ_bd
;
687 succ_bd
= succ
->data
;
689 for (unsigned i
= 0; i
< bitset_words
; i
++) {
690 /* add anything that is livein in a successor block
693 BITSET_WORD new_liveout
=
694 (succ_bd
->livein
[i
] & ~bd
->liveout
[i
]);
697 bd
->liveout
[i
] |= new_liveout
;
708 print_bitset(const char *name
, BITSET_WORD
*bs
, unsigned cnt
)
711 debug_printf("RA: %s:", name
);
712 for (unsigned i
= 0; i
< cnt
; i
++) {
713 if (BITSET_TEST(bs
, i
)) {
716 debug_printf(" %04u", i
);
724 ra_add_interference(struct ir3_ra_ctx
*ctx
)
726 struct ir3
*ir
= ctx
->ir
;
728 /* initialize array live ranges: */
729 foreach_array (arr
, &ir
->array_list
) {
734 /* compute live ranges (use/def) on a block level, also updating
735 * block's def/use bitmasks (used below to calculate per-block
738 foreach_block (block
, &ir
->block_list
) {
739 ra_block_compute_live_ranges(ctx
, block
);
742 /* update per-block livein/liveout: */
743 while (ra_compute_livein_liveout(ctx
)) {}
746 d("AFTER LIVEIN/OUT:");
747 foreach_block (block
, &ir
->block_list
) {
748 struct ir3_ra_block_data
*bd
= block
->data
;
749 d("block%u:", block_id(block
));
750 print_bitset(" def", bd
->def
, ctx
->alloc_count
);
751 print_bitset(" use", bd
->use
, ctx
->alloc_count
);
752 print_bitset(" l/i", bd
->livein
, ctx
->alloc_count
);
753 print_bitset(" l/o", bd
->liveout
, ctx
->alloc_count
);
755 foreach_array (arr
, &ir
->array_list
) {
756 d("array%u:", arr
->id
);
757 d(" length: %u", arr
->length
);
758 d(" start_ip: %u", arr
->start_ip
);
759 d(" end_ip: %u", arr
->end_ip
);
761 d("INSTRUCTION VREG NAMES:");
762 foreach_block (block
, &ctx
->ir
->block_list
) {
763 foreach_instr (instr
, &block
->instr_list
) {
764 if (!ctx
->instrd
[instr
->ip
].defn
)
766 if (!writes_gpr(instr
))
768 di(instr
, "%04u", scalar_name(ctx
, instr
, 0));
771 d("ARRAY VREG NAMES:");
772 foreach_array (arr
, &ctx
->ir
->array_list
) {
773 d("%04u: arr%u", arr
->base
, arr
->id
);
777 /* extend start/end ranges based on livein/liveout info from cfg: */
778 foreach_block (block
, &ir
->block_list
) {
779 struct ir3_ra_block_data
*bd
= block
->data
;
781 for (unsigned i
= 0; i
< ctx
->alloc_count
; i
++) {
782 if (BITSET_TEST(bd
->livein
, i
)) {
783 ctx
->def
[i
] = MIN2(ctx
->def
[i
], block
->start_ip
);
784 ctx
->use
[i
] = MAX2(ctx
->use
[i
], block
->start_ip
);
787 if (BITSET_TEST(bd
->liveout
, i
)) {
788 ctx
->def
[i
] = MIN2(ctx
->def
[i
], block
->end_ip
);
789 ctx
->use
[i
] = MAX2(ctx
->use
[i
], block
->end_ip
);
793 foreach_array (arr
, &ctx
->ir
->array_list
) {
794 for (unsigned i
= 0; i
< arr
->length
; i
++) {
795 if (BITSET_TEST(bd
->livein
, i
+ arr
->base
)) {
796 arr
->start_ip
= MIN2(arr
->start_ip
, block
->start_ip
);
798 if (BITSET_TEST(bd
->livein
, i
+ arr
->base
)) {
799 arr
->end_ip
= MAX2(arr
->end_ip
, block
->end_ip
);
805 for (unsigned i
= 0; i
< ctx
->alloc_count
; i
++) {
806 for (unsigned j
= 0; j
< ctx
->alloc_count
; j
++) {
807 if (intersects(ctx
->def
[i
], ctx
->use
[i
],
808 ctx
->def
[j
], ctx
->use
[j
])) {
809 ra_add_node_interference(ctx
->g
, i
, j
);
815 /* some instructions need fix-up if dst register is half precision: */
816 static void fixup_half_instr_dst(struct ir3_instruction
*instr
)
818 switch (opc_cat(instr
->opc
)) {
819 case 1: /* move instructions */
820 instr
->cat1
.dst_type
= half_type(instr
->cat1
.dst_type
);
823 switch (instr
->opc
) {
825 /* Available for that dest is half and srcs are full.
826 * eg. mad.f32 hr0, r0.x, r0.y, r0.z
828 if (instr
->regs
[1]->flags
& IR3_REG_HALF
)
829 instr
->opc
= OPC_MAD_F16
;
832 instr
->opc
= OPC_SEL_B16
;
835 instr
->opc
= OPC_SEL_S16
;
838 instr
->opc
= OPC_SEL_F16
;
841 instr
->opc
= OPC_SAD_S16
;
843 /* instructions may already be fixed up: */
856 switch (instr
->opc
) {
858 instr
->opc
= OPC_HRSQ
;
861 instr
->opc
= OPC_HLOG2
;
864 instr
->opc
= OPC_HEXP2
;
871 instr
->cat5
.type
= half_type(instr
->cat5
.type
);
875 /* some instructions need fix-up if src register is half precision: */
876 static void fixup_half_instr_src(struct ir3_instruction
*instr
)
878 switch (instr
->opc
) {
880 instr
->cat1
.src_type
= half_type(instr
->cat1
.src_type
);
887 /* NOTE: instr could be NULL for IR3_REG_ARRAY case, for the first
888 * array access(es) which do not have any previous access to depend
889 * on from scheduling point of view
892 reg_assign(struct ir3_ra_ctx
*ctx
, struct ir3_register
*reg
,
893 struct ir3_instruction
*instr
)
895 struct ir3_ra_instr_data
*id
;
897 if (reg
->flags
& IR3_REG_ARRAY
) {
898 struct ir3_array
*arr
=
899 ir3_lookup_array(ctx
->ir
, reg
->array
.id
);
900 unsigned name
= arr
->base
+ reg
->array
.offset
;
901 unsigned r
= ra_get_node_reg(ctx
->g
, name
);
902 unsigned num
= ctx
->set
->ra_reg_to_gpr
[r
];
904 if (reg
->flags
& IR3_REG_RELATIV
) {
905 reg
->array
.offset
= num
;
908 reg
->flags
&= ~IR3_REG_SSA
;
911 reg
->flags
&= ~IR3_REG_ARRAY
;
912 } else if ((id
= &ctx
->instrd
[instr
->ip
]) && id
->defn
) {
913 unsigned first_component
= 0;
915 /* Special case for tex instructions, which may use the wrmask
916 * to mask off the first component(s). In the scalar pass,
917 * this means the masked off component(s) are not def'd/use'd,
918 * so we get a bogus value when we ask the register_allocate
919 * algo to get the assigned reg for the unused/untouched
920 * component. So we need to consider the first used component:
922 if (ctx
->scalar_pass
&& is_tex_or_prefetch(id
->defn
)) {
923 unsigned n
= ffs(id
->defn
->regs
[0]->wrmask
);
925 first_component
= n
- 1;
928 unsigned name
= scalar_name(ctx
, id
->defn
, first_component
);
929 unsigned r
= ra_get_node_reg(ctx
->g
, name
);
930 unsigned num
= ctx
->set
->ra_reg_to_gpr
[r
] + id
->off
;
932 debug_assert(!(reg
->flags
& IR3_REG_RELATIV
));
934 debug_assert(num
>= first_component
);
936 if (is_high(id
->defn
))
937 num
+= FIRST_HIGH_REG
;
939 reg
->num
= num
- first_component
;
941 reg
->flags
&= ~IR3_REG_SSA
;
943 if (is_half(id
->defn
))
944 reg
->flags
|= IR3_REG_HALF
;
949 account_assignment(struct ir3_ra_ctx
*ctx
, struct ir3_instruction
*instr
)
951 struct ir3_ra_instr_data
*id
;
952 struct ir3_register
*dst
= instr
->regs
[0];
958 if (dst
->flags
& IR3_REG_ARRAY
) {
959 struct ir3_array
*arr
=
960 ir3_lookup_array(ctx
->ir
, dst
->array
.id
);
961 max
= arr
->reg
+ arr
->length
;
962 } else if ((id
= &ctx
->instrd
[instr
->ip
]) && id
->defn
) {
963 unsigned name
= scalar_name(ctx
, id
->defn
, 0);
964 unsigned r
= ra_get_node_reg(ctx
->g
, name
);
965 max
= ctx
->set
->ra_reg_to_gpr
[r
] + id
->off
+ dest_regs(id
->defn
);
970 if (is_half(instr
)) {
971 ctx
->max_half_assigned
= MAX2(ctx
->max_half_assigned
, max
);
973 ctx
->max_assigned
= MAX2(ctx
->max_assigned
, max
);
977 /* helper to determine which regs to assign in which pass: */
979 should_assign(struct ir3_ra_ctx
*ctx
, struct ir3_instruction
*instr
)
981 if ((instr
->opc
== OPC_META_SPLIT
) ||
982 (instr
->opc
== OPC_META_COLLECT
))
983 return !ctx
->scalar_pass
;
984 return ctx
->scalar_pass
;
988 ra_block_alloc(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
990 foreach_instr (instr
, &block
->instr_list
) {
991 struct ir3_register
*reg
;
993 if (writes_gpr(instr
)) {
994 account_assignment(ctx
, instr
);
995 if (should_assign(ctx
, instr
)) {
996 reg_assign(ctx
, instr
->regs
[0], instr
);
997 if (instr
->regs
[0]->flags
& IR3_REG_HALF
)
998 fixup_half_instr_dst(instr
);
1002 foreach_src_n (reg
, n
, instr
) {
1003 struct ir3_instruction
*src
= reg
->instr
;
1005 if (src
&& !should_assign(ctx
, src
) && !should_assign(ctx
, instr
))
1008 if (src
&& should_assign(ctx
, instr
))
1009 reg_assign(ctx
, src
->regs
[0], src
);
1011 /* Note: reg->instr could be null for IR3_REG_ARRAY */
1012 if (src
|| (reg
->flags
& IR3_REG_ARRAY
))
1013 reg_assign(ctx
, instr
->regs
[n
+1], src
);
1015 if (instr
->regs
[n
+1]->flags
& IR3_REG_HALF
)
1016 fixup_half_instr_src(instr
);
1020 /* We need to pre-color outputs for the scalar pass in
1021 * ra_precolor_assigned(), so we need to actually assign
1022 * them in the first pass:
1024 if (!ctx
->scalar_pass
) {
1025 struct ir3_instruction
*in
, *out
;
1027 foreach_input (in
, ctx
->ir
) {
1028 reg_assign(ctx
, in
->regs
[0], in
);
1030 foreach_output (out
, ctx
->ir
) {
1031 reg_assign(ctx
, out
->regs
[0], out
);
1036 /* handle pre-colored registers. This includes "arrays" (which could be of
1037 * length 1, used for phi webs lowered to registers in nir), as well as
1038 * special shader input values that need to be pinned to certain registers.
1041 ra_precolor(struct ir3_ra_ctx
*ctx
, struct ir3_instruction
**precolor
, unsigned nprecolor
)
1043 unsigned num_precolor
= 0;
1044 for (unsigned i
= 0; i
< nprecolor
; i
++) {
1045 if (precolor
[i
] && !(precolor
[i
]->flags
& IR3_INSTR_UNUSED
)) {
1046 struct ir3_instruction
*instr
= precolor
[i
];
1048 if (instr
->regs
[0]->num
== INVALID_REG
)
1051 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
1053 debug_assert(!(instr
->regs
[0]->flags
& (IR3_REG_HALF
| IR3_REG_HIGH
)));
1055 /* only consider the first component: */
1059 if (ctx
->scalar_pass
&& !should_assign(ctx
, instr
))
1062 /* 'base' is in scalar (class 0) but we need to map that
1063 * the conflicting register of the appropriate class (ie.
1064 * input could be vec2/vec3/etc)
1066 * Note that the higher class (larger than scalar) regs
1067 * are setup to conflict with others in the same class,
1068 * so for example, R1 (scalar) is also the first component
1069 * of D1 (vec2/double):
1071 * Single (base) | Double
1072 * --------------+---------------
1079 unsigned regid
= instr
->regs
[0]->num
;
1080 unsigned reg
= ctx
->set
->gpr_to_ra_reg
[id
->cls
][regid
];
1081 unsigned name
= ra_name(ctx
, id
);
1082 ra_set_node_reg(ctx
->g
, name
, reg
);
1083 num_precolor
= MAX2(regid
, num_precolor
);
1087 /* pre-assign array elements:
1089 * TODO this is going to need some work for half-precision.. possibly
1090 * this is easier on a6xx, where we can just divide array size by two?
1091 * But on a5xx and earlier it will need to track two bases.
1093 foreach_array (arr
, &ctx
->ir
->array_list
) {
1096 if (arr
->end_ip
== 0)
1099 /* figure out what else we conflict with which has already
1103 foreach_array (arr2
, &ctx
->ir
->array_list
) {
1106 if (arr2
->end_ip
== 0)
1108 /* if it intersects with liverange AND register range.. */
1109 if (intersects(arr
->start_ip
, arr
->end_ip
,
1110 arr2
->start_ip
, arr2
->end_ip
) &&
1111 intersects(base
, base
+ reg_size_for_array(arr
),
1112 arr2
->reg
, arr2
->reg
+ reg_size_for_array(arr2
))) {
1113 base
= MAX2(base
, arr2
->reg
+ reg_size_for_array(arr2
));
1118 /* also need to not conflict with any pre-assigned inputs: */
1119 for (unsigned i
= 0; i
< nprecolor
; i
++) {
1120 struct ir3_instruction
*instr
= precolor
[i
];
1122 if (!instr
|| (instr
->flags
& IR3_INSTR_UNUSED
))
1125 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
1127 /* only consider the first component: */
1131 unsigned name
= ra_name(ctx
, id
);
1132 unsigned regid
= instr
->regs
[0]->num
;
1134 /* Check if array intersects with liverange AND register
1135 * range of the input:
1137 if (intersects(arr
->start_ip
, arr
->end_ip
,
1138 ctx
->def
[name
], ctx
->use
[name
]) &&
1139 intersects(base
, base
+ reg_size_for_array(arr
),
1140 regid
, regid
+ class_sizes
[id
->cls
])) {
1141 base
= MAX2(base
, regid
+ class_sizes
[id
->cls
]);
1148 for (unsigned i
= 0; i
< arr
->length
; i
++) {
1152 /* Doesn't need to do this on older generations than a6xx,
1153 * since there's no conflict between full regs and half regs
1156 * TODO Presumably "base" could start from 0 respectively
1157 * for half regs of arrays on older generations.
1159 unsigned base_half
= base
* 2 + i
;
1160 reg
= ctx
->set
->gpr_to_ra_reg
[0+HALF_OFFSET
][base_half
];
1161 base
= base_half
/ 2 + 1;
1163 reg
= ctx
->set
->gpr_to_ra_reg
[0][base
++];
1166 name
= arr
->base
+ i
;
1167 ra_set_node_reg(ctx
->g
, name
, reg
);
1171 if (ir3_shader_debug
& IR3_DBG_OPTMSGS
) {
1172 foreach_array (arr
, &ctx
->ir
->array_list
) {
1173 unsigned first
= arr
->reg
;
1174 unsigned last
= arr
->reg
+ arr
->length
- 1;
1175 debug_printf("arr[%d] at r%d.%c->r%d.%c\n", arr
->id
,
1176 (first
>> 2), "xyzw"[first
& 0x3],
1177 (last
>> 2), "xyzw"[last
& 0x3]);
1183 precolor(struct ir3_ra_ctx
*ctx
, struct ir3_instruction
*instr
)
1185 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
1186 unsigned n
= dest_regs(instr
);
1187 for (unsigned i
= 0; i
< n
; i
++) {
1188 /* tex instructions actually have a wrmask, and
1189 * don't touch masked out components. So we
1190 * shouldn't precolor them::
1192 if (is_tex_or_prefetch(instr
) &&
1193 !(instr
->regs
[0]->wrmask
& (1 << i
)))
1196 unsigned name
= scalar_name(ctx
, instr
, i
);
1197 unsigned regid
= instr
->regs
[0]->num
+ i
;
1199 if (instr
->regs
[0]->flags
& IR3_REG_HIGH
)
1200 regid
-= FIRST_HIGH_REG
;
1202 unsigned vreg
= ctx
->set
->gpr_to_ra_reg
[id
->cls
][regid
];
1203 ra_set_node_reg(ctx
->g
, name
, vreg
);
1207 /* pre-color non-scalar registers based on the registers assigned in previous
1208 * pass. Do this by looking actually at the fanout instructions.
1211 ra_precolor_assigned(struct ir3_ra_ctx
*ctx
)
1213 debug_assert(ctx
->scalar_pass
);
1215 foreach_block (block
, &ctx
->ir
->block_list
) {
1216 foreach_instr (instr
, &block
->instr_list
) {
1218 if ((instr
->opc
!= OPC_META_SPLIT
) &&
1219 (instr
->opc
!= OPC_META_COLLECT
))
1222 precolor(ctx
, instr
);
1224 struct ir3_register
*src
;
1225 foreach_src (src
, instr
) {
1228 precolor(ctx
, src
->instr
);
1235 ra_alloc(struct ir3_ra_ctx
*ctx
)
1237 if (!ra_allocate(ctx
->g
))
1240 foreach_block (block
, &ctx
->ir
->block_list
) {
1241 ra_block_alloc(ctx
, block
);
1247 /* if we end up with split/collect instructions with non-matching src
1248 * and dest regs, that means something has gone wrong. Which makes it
1249 * a pretty good sanity check.
1252 ra_sanity_check(struct ir3
*ir
)
1254 foreach_block (block
, &ir
->block_list
) {
1255 foreach_instr (instr
, &block
->instr_list
) {
1256 if (instr
->opc
== OPC_META_SPLIT
) {
1257 struct ir3_register
*dst
= instr
->regs
[0];
1258 struct ir3_register
*src
= instr
->regs
[1];
1259 debug_assert(dst
->num
== (src
->num
+ instr
->split
.off
));
1260 } else if (instr
->opc
== OPC_META_COLLECT
) {
1261 struct ir3_register
*dst
= instr
->regs
[0];
1262 struct ir3_register
*src
;
1264 foreach_src_n (src
, n
, instr
) {
1265 debug_assert(dst
->num
== (src
->num
- n
));
1272 /* Target is calculated in terms of half-regs (with a full reg
1273 * consisting of two half-regs).
1276 ra_calc_merged_register_target(struct ir3_ra_ctx
*ctx
)
1278 const unsigned vec4
= 2 * 4; // 8 half-regs
1279 unsigned t
= MAX2(2 * ctx
->max_assigned
, ctx
->max_half_assigned
);
1281 /* second RA pass may have saved some regs, let's try to reclaim
1282 * the benefit by adjusting the target downwards slightly:
1284 if (ir3_has_latency_to_hide(ctx
->ir
)) {
1287 } else if (t
> 6 * vec4
) {
1292 ctx
->max_target
= t
;
1296 ir3_ra_pass(struct ir3_shader_variant
*v
, struct ir3_instruction
**precolor
,
1297 unsigned nprecolor
, bool scalar_pass
, unsigned *target
)
1299 struct ir3_ra_ctx ctx
= {
1302 .set
= v
->ir
->compiler
->set
,
1303 .scalar_pass
= scalar_pass
,
1308 ctx
.max_target
= *target
;
1312 ra_add_interference(&ctx
);
1313 ra_precolor(&ctx
, precolor
, nprecolor
);
1315 ra_precolor_assigned(&ctx
);
1316 ret
= ra_alloc(&ctx
);
1319 /* In the first pass, calculate the target register usage used in the
1320 * second (scalar) pass:
1323 /* TODO: round-robin support for pre-a6xx: */
1324 if (ctx
.ir
->compiler
->gpu_id
>= 600) {
1325 ra_calc_merged_register_target(&ctx
);
1327 *target
= ctx
.max_target
;
1334 ir3_ra(struct ir3_shader_variant
*v
, struct ir3_instruction
**precolor
,
1337 unsigned target
= 0;
1340 /* First pass, assign the vecN (non-scalar) registers: */
1341 ret
= ir3_ra_pass(v
, precolor
, nprecolor
, false, &target
);
1345 if (ir3_shader_debug
& IR3_DBG_OPTMSGS
) {
1346 printf("AFTER RA (1st pass):\n");
1350 /* Second pass, assign the scalar registers: */
1351 ret
= ir3_ra_pass(v
, precolor
, nprecolor
, true, &target
);
1355 if (ir3_shader_debug
& IR3_DBG_OPTMSGS
) {
1356 printf("AFTER RA (2nd pass):\n");
1361 # define SANITY_CHECK DEBUG
1363 # define SANITY_CHECK 0
1366 ra_sanity_check(v
->ir
);