2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #include "util/u_math.h"
28 #include "util/register_allocate.h"
29 #include "util/ralloc.h"
30 #include "util/bitset.h"
32 #include "freedreno_util.h"
35 #include "ir3_compiler.h"
38 * Register Assignment:
40 * Uses the register_allocate util, which implements graph coloring
41 * algo with interference classes. To handle the cases where we need
42 * consecutive registers (for example, texture sample instructions),
43 * we model these as larger (double/quad/etc) registers which conflict
44 * with the corresponding registers in other classes.
46 * Additionally we create additional classes for half-regs, which
47 * do not conflict with the full-reg classes. We do need at least
48 * sizes 1-4 (to deal w/ texture sample instructions output to half-
49 * reg). At the moment we don't create the higher order half-reg
50 * classes as half-reg frequently does not have enough precision
51 * for texture coords at higher resolutions.
53 * There are some additional cases that we need to handle specially,
54 * as the graph coloring algo doesn't understand "partial writes".
55 * For example, a sequence like:
58 * sam (f32)(xy)r0.x, ...
60 * sam (f32)(xyzw)r0.w, r0.x, ... ; 3d texture, so r0.xyz are coord
62 * In this scenario, we treat r0.xyz as class size 3, which is written
63 * (from a use/def perspective) at the 'add' instruction and ignore the
64 * subsequent partial writes to r0.xy. So the 'add r0.z, ...' is the
65 * defining instruction, as it is the first to partially write r0.xyz.
67 * Note i965 has a similar scenario, which they solve with a virtual
68 * LOAD_PAYLOAD instruction which gets turned into multiple MOV's after
69 * register assignment. But for us that is horrible from a scheduling
70 * standpoint. Instead what we do is use idea of 'definer' instruction.
71 * Ie. the first instruction (lowest ip) to write to the variable is the
72 * one we consider from use/def perspective when building interference
73 * graph. (Other instructions which write other variable components
74 * just define the variable some more.)
76 * Arrays of arbitrary size are handled via pre-coloring a consecutive
77 * sequence of registers. Additional scalar (single component) reg
78 * names are allocated starting at ctx->class_base[total_class_count]
79 * (see arr->base), which are pre-colored. In the use/def graph direct
80 * access is treated as a single element use/def, and indirect access
81 * is treated as use or def of all array elements. (Only the first
82 * def is tracked, in case of multiple indirect writes, etc.)
84 * TODO arrays that fit in one of the pre-defined class sizes should
85 * not need to be pre-colored, but instead could be given a normal
86 * vreg name. (Ignoring this for now since it is a good way to work
87 * out the kinks with arbitrary sized arrays.)
89 * TODO might be easier for debugging to split this into two passes,
90 * the first assigning vreg names in a way that we could ir3_print()
94 static const unsigned class_sizes
[] = {
96 4 + 4, /* txd + 1d/2d */
99 #define class_count ARRAY_SIZE(class_sizes)
101 static const unsigned half_class_sizes
[] = {
104 #define half_class_count ARRAY_SIZE(half_class_sizes)
106 /* seems to just be used for compute shaders? Seems like vec1 and vec3
107 * are sufficient (for now?)
109 static const unsigned high_class_sizes
[] = {
112 #define high_class_count ARRAY_SIZE(high_class_sizes)
114 #define total_class_count (class_count + half_class_count + high_class_count)
116 /* Below a0.x are normal regs. RA doesn't need to assign a0.x/p0.x. */
117 #define NUM_REGS (4 * 48) /* r0 to r47 */
118 #define NUM_HIGH_REGS (4 * 8) /* r48 to r55 */
119 #define FIRST_HIGH_REG (4 * 48)
120 /* Number of virtual regs in a given class: */
121 #define CLASS_REGS(i) (NUM_REGS - (class_sizes[i] - 1))
122 #define HALF_CLASS_REGS(i) (NUM_REGS - (half_class_sizes[i] - 1))
123 #define HIGH_CLASS_REGS(i) (NUM_HIGH_REGS - (high_class_sizes[i] - 1))
125 #define HALF_OFFSET (class_count)
126 #define HIGH_OFFSET (class_count + half_class_count)
128 /* register-set, created one time, used for all shaders: */
129 struct ir3_ra_reg_set
{
130 struct ra_regs
*regs
;
131 unsigned int classes
[class_count
];
132 unsigned int half_classes
[half_class_count
];
133 unsigned int high_classes
[high_class_count
];
134 /* maps flat virtual register space to base gpr: */
135 uint16_t *ra_reg_to_gpr
;
136 /* maps cls,gpr to flat virtual register space: */
137 uint16_t **gpr_to_ra_reg
;
141 build_q_values(unsigned int **q_values
, unsigned off
,
142 const unsigned *sizes
, unsigned count
)
144 for (unsigned i
= 0; i
< count
; i
++) {
145 q_values
[i
+ off
] = rzalloc_array(q_values
, unsigned, total_class_count
);
147 /* From register_allocate.c:
149 * q(B,C) (indexed by C, B is this register class) in
150 * Runeson/Nyström paper. This is "how many registers of B could
151 * the worst choice register from C conflict with".
153 * If we just let the register allocation algorithm compute these
154 * values, is extremely expensive. However, since all of our
155 * registers are laid out, we can very easily compute them
156 * ourselves. View the register from C as fixed starting at GRF n
157 * somewhere in the middle, and the register from B as sliding back
158 * and forth. Then the first register to conflict from B is the
159 * one starting at n - class_size[B] + 1 and the last register to
160 * conflict will start at n + class_size[B] - 1. Therefore, the
161 * number of conflicts from B is class_size[B] + class_size[C] - 1.
163 * +-+-+-+-+-+-+ +-+-+-+-+-+-+
164 * B | | | | | |n| --> | | | | | | |
165 * +-+-+-+-+-+-+ +-+-+-+-+-+-+
170 * (Idea copied from brw_fs_reg_allocate.cpp)
172 for (unsigned j
= 0; j
< count
; j
++)
173 q_values
[i
+ off
][j
+ off
] = sizes
[i
] + sizes
[j
] - 1;
177 /* One-time setup of RA register-set, which describes all the possible
178 * "virtual" registers and their interferences. Ie. double register
179 * occupies (and conflicts with) two single registers, and so forth.
180 * Since registers do not need to be aligned to their class size, they
181 * can conflict with other registers in the same class too. Ie:
183 * Single (base) | Double
184 * --------------+---------------
191 * (NOTE the disassembler uses notation like r0.x/y/z/w but those are
192 * really just four scalar registers. Don't let that confuse you.)
194 struct ir3_ra_reg_set
*
195 ir3_ra_alloc_reg_set(struct ir3_compiler
*compiler
)
197 struct ir3_ra_reg_set
*set
= rzalloc(compiler
, struct ir3_ra_reg_set
);
198 unsigned ra_reg_count
, reg
, first_half_reg
, first_high_reg
, base
;
199 unsigned int **q_values
;
201 /* calculate # of regs across all classes: */
203 for (unsigned i
= 0; i
< class_count
; i
++)
204 ra_reg_count
+= CLASS_REGS(i
);
205 for (unsigned i
= 0; i
< half_class_count
; i
++)
206 ra_reg_count
+= HALF_CLASS_REGS(i
);
207 for (unsigned i
= 0; i
< high_class_count
; i
++)
208 ra_reg_count
+= HIGH_CLASS_REGS(i
);
210 /* allocate and populate q_values: */
211 q_values
= ralloc_array(set
, unsigned *, total_class_count
);
213 build_q_values(q_values
, 0, class_sizes
, class_count
);
214 build_q_values(q_values
, HALF_OFFSET
, half_class_sizes
, half_class_count
);
215 build_q_values(q_values
, HIGH_OFFSET
, high_class_sizes
, high_class_count
);
217 /* allocate the reg-set.. */
218 set
->regs
= ra_alloc_reg_set(set
, ra_reg_count
, true);
219 set
->ra_reg_to_gpr
= ralloc_array(set
, uint16_t, ra_reg_count
);
220 set
->gpr_to_ra_reg
= ralloc_array(set
, uint16_t *, total_class_count
);
224 for (unsigned i
= 0; i
< class_count
; i
++) {
225 set
->classes
[i
] = ra_alloc_reg_class(set
->regs
);
227 set
->gpr_to_ra_reg
[i
] = ralloc_array(set
, uint16_t, CLASS_REGS(i
));
229 for (unsigned j
= 0; j
< CLASS_REGS(i
); j
++) {
230 ra_class_add_reg(set
->regs
, set
->classes
[i
], reg
);
232 set
->ra_reg_to_gpr
[reg
] = j
;
233 set
->gpr_to_ra_reg
[i
][j
] = reg
;
235 for (unsigned br
= j
; br
< j
+ class_sizes
[i
]; br
++)
236 ra_add_transitive_reg_conflict(set
->regs
, br
, reg
);
242 first_half_reg
= reg
;
245 for (unsigned i
= 0; i
< half_class_count
; i
++) {
246 set
->half_classes
[i
] = ra_alloc_reg_class(set
->regs
);
248 set
->gpr_to_ra_reg
[base
+ i
] =
249 ralloc_array(set
, uint16_t, HALF_CLASS_REGS(i
));
251 for (unsigned j
= 0; j
< HALF_CLASS_REGS(i
); j
++) {
252 ra_class_add_reg(set
->regs
, set
->half_classes
[i
], reg
);
254 set
->ra_reg_to_gpr
[reg
] = j
;
255 set
->gpr_to_ra_reg
[base
+ i
][j
] = reg
;
257 for (unsigned br
= j
; br
< j
+ half_class_sizes
[i
]; br
++)
258 ra_add_transitive_reg_conflict(set
->regs
, br
+ first_half_reg
, reg
);
264 first_high_reg
= reg
;
267 for (unsigned i
= 0; i
< high_class_count
; i
++) {
268 set
->high_classes
[i
] = ra_alloc_reg_class(set
->regs
);
270 set
->gpr_to_ra_reg
[base
+ i
] =
271 ralloc_array(set
, uint16_t, HIGH_CLASS_REGS(i
));
273 for (unsigned j
= 0; j
< HIGH_CLASS_REGS(i
); j
++) {
274 ra_class_add_reg(set
->regs
, set
->high_classes
[i
], reg
);
276 set
->ra_reg_to_gpr
[reg
] = j
;
277 set
->gpr_to_ra_reg
[base
+ i
][j
] = reg
;
279 for (unsigned br
= j
; br
< j
+ high_class_sizes
[i
]; br
++)
280 ra_add_transitive_reg_conflict(set
->regs
, br
+ first_high_reg
, reg
);
286 /* starting a6xx, half precision regs conflict w/ full precision regs: */
287 if (compiler
->gpu_id
>= 600) {
288 /* because of transitivity, we can get away with just setting up
289 * conflicts between the first class of full and half regs:
291 for (unsigned j
= 0; j
< CLASS_REGS(0) / 2; j
++) {
292 unsigned freg
= set
->gpr_to_ra_reg
[0][j
];
293 unsigned hreg0
= set
->gpr_to_ra_reg
[HALF_OFFSET
][(j
* 2) + 0];
294 unsigned hreg1
= set
->gpr_to_ra_reg
[HALF_OFFSET
][(j
* 2) + 1];
296 ra_add_transitive_reg_conflict(set
->regs
, freg
, hreg0
);
297 ra_add_transitive_reg_conflict(set
->regs
, freg
, hreg1
);
300 // TODO also need to update q_values, but for now:
301 ra_set_finalize(set
->regs
, NULL
);
303 ra_set_finalize(set
->regs
, q_values
);
306 ralloc_free(q_values
);
311 /* additional block-data (per-block) */
312 struct ir3_ra_block_data
{
313 BITSET_WORD
*def
; /* variables defined before used in block */
314 BITSET_WORD
*use
; /* variables used before defined in block */
315 BITSET_WORD
*livein
; /* which defs reach entry point of block */
316 BITSET_WORD
*liveout
; /* which defs reach exit point of block */
319 /* additional instruction-data (per-instruction) */
320 struct ir3_ra_instr_data
{
321 /* cached instruction 'definer' info: */
322 struct ir3_instruction
*defn
;
326 /* register-assign context, per-shader */
332 struct ir3_ra_reg_set
*set
;
334 unsigned alloc_count
;
335 /* one per class, plus one slot for arrays: */
336 unsigned class_alloc_count
[total_class_count
+ 1];
337 unsigned class_base
[total_class_count
+ 1];
339 unsigned *def
, *use
; /* def/use table */
340 struct ir3_ra_instr_data
*instrd
;
343 /* does it conflict? */
345 intersects(unsigned a_start
, unsigned a_end
, unsigned b_start
, unsigned b_end
)
347 return !((a_start
>= b_end
) || (b_start
>= a_end
));
351 is_half(struct ir3_instruction
*instr
)
353 return !!(instr
->regs
[0]->flags
& IR3_REG_HALF
);
357 is_high(struct ir3_instruction
*instr
)
359 return !!(instr
->regs
[0]->flags
& IR3_REG_HIGH
);
363 size_to_class(unsigned sz
, bool half
, bool high
)
366 for (unsigned i
= 0; i
< high_class_count
; i
++)
367 if (high_class_sizes
[i
] >= sz
)
368 return i
+ HIGH_OFFSET
;
370 for (unsigned i
= 0; i
< half_class_count
; i
++)
371 if (half_class_sizes
[i
] >= sz
)
372 return i
+ HALF_OFFSET
;
374 for (unsigned i
= 0; i
< class_count
; i
++)
375 if (class_sizes
[i
] >= sz
)
383 writes_gpr(struct ir3_instruction
*instr
)
387 /* is dest a normal temp register: */
388 struct ir3_register
*reg
= instr
->regs
[0];
389 if (reg
->flags
& (IR3_REG_CONST
| IR3_REG_IMMED
))
391 if ((reg
->num
== regid(REG_A0
, 0)) ||
392 (reg
->num
== regid(REG_P0
, 0)))
398 instr_before(struct ir3_instruction
*a
, struct ir3_instruction
*b
)
400 if (a
->flags
& IR3_INSTR_UNUSED
)
402 return (a
->ip
< b
->ip
);
405 static struct ir3_instruction
*
406 get_definer(struct ir3_ra_ctx
*ctx
, struct ir3_instruction
*instr
,
409 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
410 struct ir3_instruction
*d
= NULL
;
418 if (instr
->opc
== OPC_META_FI
) {
419 /* What about the case where collect is subset of array, we
420 * need to find the distance between where actual array starts
421 * and fanin.. that probably doesn't happen currently.
423 struct ir3_register
*src
;
426 /* note: don't use foreach_ssa_src as this gets called once
427 * while assigning regs (which clears SSA flag)
429 foreach_src_n(src
, n
, instr
) {
430 struct ir3_instruction
*dd
;
434 dd
= get_definer(ctx
, src
->instr
, &dsz
, &doff
);
436 if ((!d
) || instr_before(dd
, d
)) {
443 } else if (instr
->cp
.right
|| instr
->cp
.left
) {
444 /* covers also the meta:fo case, which ends up w/ single
445 * scalar instructions for each component:
447 struct ir3_instruction
*f
= ir3_neighbor_first(instr
);
449 /* by definition, the entire sequence forms one linked list
450 * of single scalar register nodes (even if some of them may
451 * be fanouts from a texture sample (for example) instr. We
452 * just need to walk the list finding the first element of
453 * the group defined (lowest ip)
457 /* need to skip over unused in the group: */
458 while (f
&& (f
->flags
& IR3_INSTR_UNUSED
)) {
464 if ((!d
) || instr_before(f
, d
))
475 /* second case is looking directly at the instruction which
476 * produces multiple values (eg, texture sample), rather
477 * than the fanout nodes that point back to that instruction.
478 * This isn't quite right, because it may be part of a larger
481 * sam (f32)(xyzw)r0.x, ...
484 * sam (f32)(xyzw)r2.x, r0.w <-- (r0.w, r1.x, r1.y)
486 * need to come up with a better way to handle that case.
488 if (instr
->address
) {
489 *sz
= instr
->regs
[0]->size
;
491 *sz
= util_last_bit(instr
->regs
[0]->wrmask
);
497 if (d
->opc
== OPC_META_FO
) {
498 struct ir3_instruction
*dd
;
501 dd
= get_definer(ctx
, d
->regs
[1]->instr
, &dsz
, &doff
);
503 /* by definition, should come before: */
504 debug_assert(instr_before(dd
, d
));
506 *sz
= MAX2(*sz
, dsz
);
508 debug_assert(instr
->opc
== OPC_META_FO
);
509 *off
= MAX2(*off
, instr
->fo
.off
);
522 ra_block_find_definers(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
524 list_for_each_entry (struct ir3_instruction
, instr
, &block
->instr_list
, node
) {
525 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
526 if (instr
->regs_count
== 0)
528 /* couple special cases: */
529 if (writes_addr(instr
) || writes_pred(instr
)) {
531 } else if (instr
->regs
[0]->flags
& IR3_REG_ARRAY
) {
532 id
->cls
= total_class_count
;
534 id
->defn
= get_definer(ctx
, instr
, &id
->sz
, &id
->off
);
535 id
->cls
= size_to_class(id
->sz
, is_half(id
->defn
), is_high(id
->defn
));
540 /* give each instruction a name (and ip), and count up the # of names
544 ra_block_name_instructions(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
546 list_for_each_entry (struct ir3_instruction
, instr
, &block
->instr_list
, node
) {
547 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
555 if (instr
->regs_count
== 0)
558 if (!writes_gpr(instr
))
561 if (id
->defn
!= instr
)
564 /* arrays which don't fit in one of the pre-defined class
565 * sizes are pre-colored:
567 if ((id
->cls
>= 0) && (id
->cls
< total_class_count
)) {
568 instr
->name
= ctx
->class_alloc_count
[id
->cls
]++;
575 ra_init(struct ir3_ra_ctx
*ctx
)
579 ir3_clear_mark(ctx
->ir
);
580 n
= ir3_count_instructions(ctx
->ir
);
582 ctx
->instrd
= rzalloc_array(NULL
, struct ir3_ra_instr_data
, n
);
584 list_for_each_entry (struct ir3_block
, block
, &ctx
->ir
->block_list
, node
) {
585 ra_block_find_definers(ctx
, block
);
588 list_for_each_entry (struct ir3_block
, block
, &ctx
->ir
->block_list
, node
) {
589 ra_block_name_instructions(ctx
, block
);
592 /* figure out the base register name for each class. The
593 * actual ra name is class_base[cls] + instr->name;
595 ctx
->class_base
[0] = 0;
596 for (unsigned i
= 1; i
<= total_class_count
; i
++) {
597 ctx
->class_base
[i
] = ctx
->class_base
[i
-1] +
598 ctx
->class_alloc_count
[i
-1];
601 /* and vreg names for array elements: */
602 base
= ctx
->class_base
[total_class_count
];
603 list_for_each_entry (struct ir3_array
, arr
, &ctx
->ir
->array_list
, node
) {
605 ctx
->class_alloc_count
[total_class_count
] += arr
->length
;
608 ctx
->alloc_count
+= ctx
->class_alloc_count
[total_class_count
];
610 ctx
->g
= ra_alloc_interference_graph(ctx
->set
->regs
, ctx
->alloc_count
);
611 ralloc_steal(ctx
->g
, ctx
->instrd
);
612 ctx
->def
= rzalloc_array(ctx
->g
, unsigned, ctx
->alloc_count
);
613 ctx
->use
= rzalloc_array(ctx
->g
, unsigned, ctx
->alloc_count
);
617 __ra_name(struct ir3_ra_ctx
*ctx
, int cls
, struct ir3_instruction
*defn
)
620 debug_assert(cls
>= 0);
621 debug_assert(cls
< total_class_count
); /* we shouldn't get arrays here.. */
622 name
= ctx
->class_base
[cls
] + defn
->name
;
623 debug_assert(name
< ctx
->alloc_count
);
628 ra_name(struct ir3_ra_ctx
*ctx
, struct ir3_ra_instr_data
*id
)
630 /* TODO handle name mapping for arrays */
631 return __ra_name(ctx
, id
->cls
, id
->defn
);
635 ra_destroy(struct ir3_ra_ctx
*ctx
)
641 ra_block_compute_live_ranges(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
643 struct ir3_ra_block_data
*bd
;
644 unsigned bitset_words
= BITSET_WORDS(ctx
->alloc_count
);
646 #define def(name, instr) \
648 /* defined on first write: */ \
649 if (!ctx->def[name]) \
650 ctx->def[name] = instr->ip; \
651 ctx->use[name] = instr->ip; \
652 BITSET_SET(bd->def, name); \
655 #define use(name, instr) \
657 ctx->use[name] = MAX2(ctx->use[name], instr->ip); \
658 if (!BITSET_TEST(bd->def, name)) \
659 BITSET_SET(bd->use, name); \
662 bd
= rzalloc(ctx
->g
, struct ir3_ra_block_data
);
664 bd
->def
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
665 bd
->use
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
666 bd
->livein
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
667 bd
->liveout
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
671 list_for_each_entry (struct ir3_instruction
, instr
, &block
->instr_list
, node
) {
672 struct ir3_instruction
*src
;
673 struct ir3_register
*reg
;
675 if (instr
->regs_count
== 0)
678 /* There are a couple special cases to deal with here:
680 * fanout: used to split values from a higher class to a lower
681 * class, for example split the results of a texture fetch
682 * into individual scalar values; We skip over these from
683 * a 'def' perspective, and for a 'use' we walk the chain
684 * up to the defining instruction.
686 * fanin: used to collect values from lower class and assemble
687 * them together into a higher class, for example arguments
688 * to texture sample instructions; We consider these to be
689 * defined at the earliest fanin source.
691 * Most of this is handled in the get_definer() helper.
693 * In either case, we trace the instruction back to the original
694 * definer and consider that as the def/use ip.
697 if (writes_gpr(instr
)) {
698 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
699 struct ir3_register
*dst
= instr
->regs
[0];
701 if (dst
->flags
& IR3_REG_ARRAY
) {
702 struct ir3_array
*arr
=
703 ir3_lookup_array(ctx
->ir
, dst
->array
.id
);
706 arr
->start_ip
= MIN2(arr
->start_ip
, instr
->ip
);
707 arr
->end_ip
= MAX2(arr
->end_ip
, instr
->ip
);
709 /* set the node class now.. in case we don't encounter
710 * this array dst again. From register_alloc algo's
711 * perspective, these are all single/scalar regs:
713 for (i
= 0; i
< arr
->length
; i
++) {
714 unsigned name
= arr
->base
+ i
;
715 ra_set_node_class(ctx
->g
, name
, ctx
->set
->classes
[0]);
718 /* indirect write is treated like a write to all array
719 * elements, since we don't know which one is actually
722 if (dst
->flags
& IR3_REG_RELATIV
) {
723 for (i
= 0; i
< arr
->length
; i
++) {
724 unsigned name
= arr
->base
+ i
;
728 unsigned name
= arr
->base
+ dst
->array
.offset
;
732 } else if (id
->defn
== instr
) {
733 unsigned name
= ra_name(ctx
, id
);
735 /* since we are in SSA at this point: */
736 debug_assert(!BITSET_TEST(bd
->use
, name
));
740 if (is_high(id
->defn
)) {
741 ra_set_node_class(ctx
->g
, name
,
742 ctx
->set
->high_classes
[id
->cls
- HIGH_OFFSET
]);
743 } else if (is_half(id
->defn
)) {
744 ra_set_node_class(ctx
->g
, name
,
745 ctx
->set
->half_classes
[id
->cls
- HALF_OFFSET
]);
747 ra_set_node_class(ctx
->g
, name
,
748 ctx
->set
->classes
[id
->cls
]);
753 foreach_src(reg
, instr
) {
754 if (reg
->flags
& IR3_REG_ARRAY
) {
755 struct ir3_array
*arr
=
756 ir3_lookup_array(ctx
->ir
, reg
->array
.id
);
757 arr
->start_ip
= MIN2(arr
->start_ip
, instr
->ip
);
758 arr
->end_ip
= MAX2(arr
->end_ip
, instr
->ip
);
760 /* indirect read is treated like a read fromall array
761 * elements, since we don't know which one is actually
764 if (reg
->flags
& IR3_REG_RELATIV
) {
766 for (i
= 0; i
< arr
->length
; i
++) {
767 unsigned name
= arr
->base
+ i
;
771 unsigned name
= arr
->base
+ reg
->array
.offset
;
773 /* NOTE: arrays are not SSA so unconditionally
776 BITSET_SET(bd
->use
, name
);
777 debug_assert(reg
->array
.offset
< arr
->length
);
779 } else if ((src
= ssa(reg
)) && writes_gpr(src
)) {
780 unsigned name
= ra_name(ctx
, &ctx
->instrd
[src
->ip
]);
788 ra_compute_livein_liveout(struct ir3_ra_ctx
*ctx
)
790 unsigned bitset_words
= BITSET_WORDS(ctx
->alloc_count
);
791 bool progress
= false;
793 list_for_each_entry (struct ir3_block
, block
, &ctx
->ir
->block_list
, node
) {
794 struct ir3_ra_block_data
*bd
= block
->data
;
797 for (unsigned i
= 0; i
< bitset_words
; i
++) {
798 BITSET_WORD new_livein
=
799 (bd
->use
[i
] | (bd
->liveout
[i
] & ~bd
->def
[i
]));
801 if (new_livein
& ~bd
->livein
[i
]) {
802 bd
->livein
[i
] |= new_livein
;
807 /* update liveout: */
808 for (unsigned j
= 0; j
< ARRAY_SIZE(block
->successors
); j
++) {
809 struct ir3_block
*succ
= block
->successors
[j
];
810 struct ir3_ra_block_data
*succ_bd
;
815 succ_bd
= succ
->data
;
817 for (unsigned i
= 0; i
< bitset_words
; i
++) {
818 BITSET_WORD new_liveout
=
819 (succ_bd
->livein
[i
] & ~bd
->liveout
[i
]);
822 bd
->liveout
[i
] |= new_liveout
;
833 print_bitset(const char *name
, BITSET_WORD
*bs
, unsigned cnt
)
836 debug_printf(" %s:", name
);
837 for (unsigned i
= 0; i
< cnt
; i
++) {
838 if (BITSET_TEST(bs
, i
)) {
841 debug_printf(" %04u", i
);
849 ra_add_interference(struct ir3_ra_ctx
*ctx
)
851 struct ir3
*ir
= ctx
->ir
;
853 /* initialize array live ranges: */
854 list_for_each_entry (struct ir3_array
, arr
, &ir
->array_list
, node
) {
859 /* compute live ranges (use/def) on a block level, also updating
860 * block's def/use bitmasks (used below to calculate per-block
863 list_for_each_entry (struct ir3_block
, block
, &ir
->block_list
, node
) {
864 ra_block_compute_live_ranges(ctx
, block
);
867 /* update per-block livein/liveout: */
868 while (ra_compute_livein_liveout(ctx
)) {}
870 if (fd_mesa_debug
& FD_DBG_OPTMSGS
) {
871 debug_printf("AFTER LIVEIN/OUT:\n");
873 list_for_each_entry (struct ir3_block
, block
, &ir
->block_list
, node
) {
874 struct ir3_ra_block_data
*bd
= block
->data
;
875 debug_printf("block%u:\n", block_id(block
));
876 print_bitset(" def", bd
->def
, ctx
->alloc_count
);
877 print_bitset(" use", bd
->use
, ctx
->alloc_count
);
878 print_bitset(" l/i", bd
->livein
, ctx
->alloc_count
);
879 print_bitset(" l/o", bd
->liveout
, ctx
->alloc_count
);
881 list_for_each_entry (struct ir3_array
, arr
, &ir
->array_list
, node
) {
882 debug_printf("array%u:\n", arr
->id
);
883 debug_printf(" length: %u\n", arr
->length
);
884 debug_printf(" start_ip: %u\n", arr
->start_ip
);
885 debug_printf(" end_ip: %u\n", arr
->end_ip
);
889 /* extend start/end ranges based on livein/liveout info from cfg: */
890 list_for_each_entry (struct ir3_block
, block
, &ir
->block_list
, node
) {
891 struct ir3_ra_block_data
*bd
= block
->data
;
893 for (unsigned i
= 0; i
< ctx
->alloc_count
; i
++) {
894 if (BITSET_TEST(bd
->livein
, i
)) {
895 ctx
->def
[i
] = MIN2(ctx
->def
[i
], block
->start_ip
);
896 ctx
->use
[i
] = MAX2(ctx
->use
[i
], block
->start_ip
);
899 if (BITSET_TEST(bd
->liveout
, i
)) {
900 ctx
->def
[i
] = MIN2(ctx
->def
[i
], block
->end_ip
);
901 ctx
->use
[i
] = MAX2(ctx
->use
[i
], block
->end_ip
);
905 list_for_each_entry (struct ir3_array
, arr
, &ctx
->ir
->array_list
, node
) {
906 for (unsigned i
= 0; i
< arr
->length
; i
++) {
907 if (BITSET_TEST(bd
->livein
, i
+ arr
->base
)) {
908 arr
->start_ip
= MIN2(arr
->start_ip
, block
->start_ip
);
910 if (BITSET_TEST(bd
->livein
, i
+ arr
->base
)) {
911 arr
->end_ip
= MAX2(arr
->end_ip
, block
->end_ip
);
917 /* need to fix things up to keep outputs live: */
918 for (unsigned i
= 0; i
< ir
->noutputs
; i
++) {
919 struct ir3_instruction
*instr
= ir
->outputs
[i
];
920 unsigned name
= ra_name(ctx
, &ctx
->instrd
[instr
->ip
]);
921 ctx
->use
[name
] = ctx
->instr_cnt
;
924 for (unsigned i
= 0; i
< ctx
->alloc_count
; i
++) {
925 for (unsigned j
= 0; j
< ctx
->alloc_count
; j
++) {
926 if (intersects(ctx
->def
[i
], ctx
->use
[i
],
927 ctx
->def
[j
], ctx
->use
[j
])) {
928 ra_add_node_interference(ctx
->g
, i
, j
);
934 /* some instructions need fix-up if dst register is half precision: */
935 static void fixup_half_instr_dst(struct ir3_instruction
*instr
)
937 switch (opc_cat(instr
->opc
)) {
938 case 1: /* move instructions */
939 instr
->cat1
.dst_type
= half_type(instr
->cat1
.dst_type
);
942 switch (instr
->opc
) {
944 instr
->opc
= OPC_MAD_F16
;
947 instr
->opc
= OPC_SEL_B16
;
950 instr
->opc
= OPC_SEL_S16
;
953 instr
->opc
= OPC_SEL_F16
;
956 instr
->opc
= OPC_SAD_S16
;
958 /* instructions may already be fixed up: */
971 instr
->cat5
.type
= half_type(instr
->cat5
.type
);
975 /* some instructions need fix-up if src register is half precision: */
976 static void fixup_half_instr_src(struct ir3_instruction
*instr
)
978 switch (instr
->opc
) {
980 instr
->cat1
.src_type
= half_type(instr
->cat1
.src_type
);
987 /* NOTE: instr could be NULL for IR3_REG_ARRAY case, for the first
988 * array access(es) which do not have any previous access to depend
989 * on from scheduling point of view
992 reg_assign(struct ir3_ra_ctx
*ctx
, struct ir3_register
*reg
,
993 struct ir3_instruction
*instr
)
995 struct ir3_ra_instr_data
*id
;
997 if (reg
->flags
& IR3_REG_ARRAY
) {
998 struct ir3_array
*arr
=
999 ir3_lookup_array(ctx
->ir
, reg
->array
.id
);
1000 unsigned name
= arr
->base
+ reg
->array
.offset
;
1001 unsigned r
= ra_get_node_reg(ctx
->g
, name
);
1002 unsigned num
= ctx
->set
->ra_reg_to_gpr
[r
];
1004 if (reg
->flags
& IR3_REG_RELATIV
) {
1005 reg
->array
.offset
= num
;
1008 reg
->flags
&= ~IR3_REG_SSA
;
1011 reg
->flags
&= ~IR3_REG_ARRAY
;
1012 } else if ((id
= &ctx
->instrd
[instr
->ip
]) && id
->defn
) {
1013 unsigned name
= ra_name(ctx
, id
);
1014 unsigned r
= ra_get_node_reg(ctx
->g
, name
);
1015 unsigned num
= ctx
->set
->ra_reg_to_gpr
[r
] + id
->off
;
1017 debug_assert(!(reg
->flags
& IR3_REG_RELATIV
));
1019 if (is_high(id
->defn
))
1020 num
+= FIRST_HIGH_REG
;
1023 reg
->flags
&= ~IR3_REG_SSA
;
1025 if (is_half(id
->defn
))
1026 reg
->flags
|= IR3_REG_HALF
;
1031 ra_block_alloc(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
1033 list_for_each_entry (struct ir3_instruction
, instr
, &block
->instr_list
, node
) {
1034 struct ir3_register
*reg
;
1036 if (instr
->regs_count
== 0)
1039 if (writes_gpr(instr
)) {
1040 reg_assign(ctx
, instr
->regs
[0], instr
);
1041 if (instr
->regs
[0]->flags
& IR3_REG_HALF
)
1042 fixup_half_instr_dst(instr
);
1045 foreach_src_n(reg
, n
, instr
) {
1046 struct ir3_instruction
*src
= reg
->instr
;
1047 /* Note: reg->instr could be null for IR3_REG_ARRAY */
1048 if (!(src
|| (reg
->flags
& IR3_REG_ARRAY
)))
1050 reg_assign(ctx
, instr
->regs
[n
+1], src
);
1051 if (instr
->regs
[n
+1]->flags
& IR3_REG_HALF
)
1052 fixup_half_instr_src(instr
);
1058 ra_alloc(struct ir3_ra_ctx
*ctx
)
1060 /* pre-assign array elements:
1062 list_for_each_entry (struct ir3_array
, arr
, &ctx
->ir
->array_list
, node
) {
1065 if (arr
->end_ip
== 0)
1068 /* figure out what else we conflict with which has already
1072 list_for_each_entry (struct ir3_array
, arr2
, &ctx
->ir
->array_list
, node
) {
1075 if (arr2
->end_ip
== 0)
1077 /* if it intersects with liverange AND register range.. */
1078 if (intersects(arr
->start_ip
, arr
->end_ip
,
1079 arr2
->start_ip
, arr2
->end_ip
) &&
1080 intersects(base
, base
+ arr
->length
,
1081 arr2
->reg
, arr2
->reg
+ arr2
->length
)) {
1082 base
= MAX2(base
, arr2
->reg
+ arr2
->length
);
1089 for (unsigned i
= 0; i
< arr
->length
; i
++) {
1092 name
= arr
->base
+ i
;
1093 reg
= ctx
->set
->gpr_to_ra_reg
[0][base
++];
1095 ra_set_node_reg(ctx
->g
, name
, reg
);
1099 if (!ra_allocate(ctx
->g
))
1102 list_for_each_entry (struct ir3_block
, block
, &ctx
->ir
->block_list
, node
) {
1103 ra_block_alloc(ctx
, block
);
1109 int ir3_ra(struct ir3
*ir
, enum shader_t type
,
1110 bool frag_coord
, bool frag_face
)
1112 struct ir3_ra_ctx ctx
= {
1115 .frag_face
= frag_face
,
1116 .set
= ir
->compiler
->set
,
1121 ra_add_interference(&ctx
);
1122 ret
= ra_alloc(&ctx
);