2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #include "util/u_math.h"
28 #include "util/register_allocate.h"
29 #include "util/ralloc.h"
30 #include "util/bitset.h"
33 #include "ir3_compiler.h"
36 * Register Assignment:
38 * Uses the register_allocate util, which implements graph coloring
39 * algo with interference classes. To handle the cases where we need
40 * consecutive registers (for example, texture sample instructions),
41 * we model these as larger (double/quad/etc) registers which conflict
42 * with the corresponding registers in other classes.
44 * Additionally we create additional classes for half-regs, which
45 * do not conflict with the full-reg classes. We do need at least
46 * sizes 1-4 (to deal w/ texture sample instructions output to half-
47 * reg). At the moment we don't create the higher order half-reg
48 * classes as half-reg frequently does not have enough precision
49 * for texture coords at higher resolutions.
51 * There are some additional cases that we need to handle specially,
52 * as the graph coloring algo doesn't understand "partial writes".
53 * For example, a sequence like:
56 * sam (f32)(xy)r0.x, ...
58 * sam (f32)(xyzw)r0.w, r0.x, ... ; 3d texture, so r0.xyz are coord
60 * In this scenario, we treat r0.xyz as class size 3, which is written
61 * (from a use/def perspective) at the 'add' instruction and ignore the
62 * subsequent partial writes to r0.xy. So the 'add r0.z, ...' is the
63 * defining instruction, as it is the first to partially write r0.xyz.
65 * Note i965 has a similar scenario, which they solve with a virtual
66 * LOAD_PAYLOAD instruction which gets turned into multiple MOV's after
67 * register assignment. But for us that is horrible from a scheduling
68 * standpoint. Instead what we do is use idea of 'definer' instruction.
69 * Ie. the first instruction (lowest ip) to write to the variable is the
70 * one we consider from use/def perspective when building interference
71 * graph. (Other instructions which write other variable components
72 * just define the variable some more.)
74 * Arrays of arbitrary size are handled via pre-coloring a consecutive
75 * sequence of registers. Additional scalar (single component) reg
76 * names are allocated starting at ctx->class_base[total_class_count]
77 * (see arr->base), which are pre-colored. In the use/def graph direct
78 * access is treated as a single element use/def, and indirect access
79 * is treated as use or def of all array elements. (Only the first
80 * def is tracked, in case of multiple indirect writes, etc.)
82 * TODO arrays that fit in one of the pre-defined class sizes should
83 * not need to be pre-colored, but instead could be given a normal
84 * vreg name. (Ignoring this for now since it is a good way to work
85 * out the kinks with arbitrary sized arrays.)
87 * TODO might be easier for debugging to split this into two passes,
88 * the first assigning vreg names in a way that we could ir3_print()
92 static const unsigned class_sizes
[] = {
94 4 + 4, /* txd + 1d/2d */
97 #define class_count ARRAY_SIZE(class_sizes)
99 static const unsigned half_class_sizes
[] = {
102 #define half_class_count ARRAY_SIZE(half_class_sizes)
104 /* seems to just be used for compute shaders? Seems like vec1 and vec3
105 * are sufficient (for now?)
107 static const unsigned high_class_sizes
[] = {
110 #define high_class_count ARRAY_SIZE(high_class_sizes)
112 #define total_class_count (class_count + half_class_count + high_class_count)
114 /* Below a0.x are normal regs. RA doesn't need to assign a0.x/p0.x. */
115 #define NUM_REGS (4 * 48) /* r0 to r47 */
116 #define NUM_HIGH_REGS (4 * 8) /* r48 to r55 */
117 #define FIRST_HIGH_REG (4 * 48)
118 /* Number of virtual regs in a given class: */
119 #define CLASS_REGS(i) (NUM_REGS - (class_sizes[i] - 1))
120 #define HALF_CLASS_REGS(i) (NUM_REGS - (half_class_sizes[i] - 1))
121 #define HIGH_CLASS_REGS(i) (NUM_HIGH_REGS - (high_class_sizes[i] - 1))
123 #define HALF_OFFSET (class_count)
124 #define HIGH_OFFSET (class_count + half_class_count)
126 /* register-set, created one time, used for all shaders: */
127 struct ir3_ra_reg_set
{
128 struct ra_regs
*regs
;
129 unsigned int classes
[class_count
];
130 unsigned int half_classes
[half_class_count
];
131 unsigned int high_classes
[high_class_count
];
132 /* maps flat virtual register space to base gpr: */
133 uint16_t *ra_reg_to_gpr
;
134 /* maps cls,gpr to flat virtual register space: */
135 uint16_t **gpr_to_ra_reg
;
139 build_q_values(unsigned int **q_values
, unsigned off
,
140 const unsigned *sizes
, unsigned count
)
142 for (unsigned i
= 0; i
< count
; i
++) {
143 q_values
[i
+ off
] = rzalloc_array(q_values
, unsigned, total_class_count
);
145 /* From register_allocate.c:
147 * q(B,C) (indexed by C, B is this register class) in
148 * Runeson/Nyström paper. This is "how many registers of B could
149 * the worst choice register from C conflict with".
151 * If we just let the register allocation algorithm compute these
152 * values, is extremely expensive. However, since all of our
153 * registers are laid out, we can very easily compute them
154 * ourselves. View the register from C as fixed starting at GRF n
155 * somewhere in the middle, and the register from B as sliding back
156 * and forth. Then the first register to conflict from B is the
157 * one starting at n - class_size[B] + 1 and the last register to
158 * conflict will start at n + class_size[B] - 1. Therefore, the
159 * number of conflicts from B is class_size[B] + class_size[C] - 1.
161 * +-+-+-+-+-+-+ +-+-+-+-+-+-+
162 * B | | | | | |n| --> | | | | | | |
163 * +-+-+-+-+-+-+ +-+-+-+-+-+-+
168 * (Idea copied from brw_fs_reg_allocate.cpp)
170 for (unsigned j
= 0; j
< count
; j
++)
171 q_values
[i
+ off
][j
+ off
] = sizes
[i
] + sizes
[j
] - 1;
175 /* One-time setup of RA register-set, which describes all the possible
176 * "virtual" registers and their interferences. Ie. double register
177 * occupies (and conflicts with) two single registers, and so forth.
178 * Since registers do not need to be aligned to their class size, they
179 * can conflict with other registers in the same class too. Ie:
181 * Single (base) | Double
182 * --------------+---------------
189 * (NOTE the disassembler uses notation like r0.x/y/z/w but those are
190 * really just four scalar registers. Don't let that confuse you.)
192 struct ir3_ra_reg_set
*
193 ir3_ra_alloc_reg_set(struct ir3_compiler
*compiler
)
195 struct ir3_ra_reg_set
*set
= rzalloc(compiler
, struct ir3_ra_reg_set
);
196 unsigned ra_reg_count
, reg
, first_half_reg
, first_high_reg
, base
;
197 unsigned int **q_values
;
199 /* calculate # of regs across all classes: */
201 for (unsigned i
= 0; i
< class_count
; i
++)
202 ra_reg_count
+= CLASS_REGS(i
);
203 for (unsigned i
= 0; i
< half_class_count
; i
++)
204 ra_reg_count
+= HALF_CLASS_REGS(i
);
205 for (unsigned i
= 0; i
< high_class_count
; i
++)
206 ra_reg_count
+= HIGH_CLASS_REGS(i
);
208 /* allocate and populate q_values: */
209 q_values
= ralloc_array(set
, unsigned *, total_class_count
);
211 build_q_values(q_values
, 0, class_sizes
, class_count
);
212 build_q_values(q_values
, HALF_OFFSET
, half_class_sizes
, half_class_count
);
213 build_q_values(q_values
, HIGH_OFFSET
, high_class_sizes
, high_class_count
);
215 /* allocate the reg-set.. */
216 set
->regs
= ra_alloc_reg_set(set
, ra_reg_count
, true);
217 set
->ra_reg_to_gpr
= ralloc_array(set
, uint16_t, ra_reg_count
);
218 set
->gpr_to_ra_reg
= ralloc_array(set
, uint16_t *, total_class_count
);
222 for (unsigned i
= 0; i
< class_count
; i
++) {
223 set
->classes
[i
] = ra_alloc_reg_class(set
->regs
);
225 set
->gpr_to_ra_reg
[i
] = ralloc_array(set
, uint16_t, CLASS_REGS(i
));
227 for (unsigned j
= 0; j
< CLASS_REGS(i
); j
++) {
228 ra_class_add_reg(set
->regs
, set
->classes
[i
], reg
);
230 set
->ra_reg_to_gpr
[reg
] = j
;
231 set
->gpr_to_ra_reg
[i
][j
] = reg
;
233 for (unsigned br
= j
; br
< j
+ class_sizes
[i
]; br
++)
234 ra_add_transitive_reg_conflict(set
->regs
, br
, reg
);
240 first_half_reg
= reg
;
243 for (unsigned i
= 0; i
< half_class_count
; i
++) {
244 set
->half_classes
[i
] = ra_alloc_reg_class(set
->regs
);
246 set
->gpr_to_ra_reg
[base
+ i
] =
247 ralloc_array(set
, uint16_t, HALF_CLASS_REGS(i
));
249 for (unsigned j
= 0; j
< HALF_CLASS_REGS(i
); j
++) {
250 ra_class_add_reg(set
->regs
, set
->half_classes
[i
], reg
);
252 set
->ra_reg_to_gpr
[reg
] = j
;
253 set
->gpr_to_ra_reg
[base
+ i
][j
] = reg
;
255 for (unsigned br
= j
; br
< j
+ half_class_sizes
[i
]; br
++)
256 ra_add_transitive_reg_conflict(set
->regs
, br
+ first_half_reg
, reg
);
262 first_high_reg
= reg
;
265 for (unsigned i
= 0; i
< high_class_count
; i
++) {
266 set
->high_classes
[i
] = ra_alloc_reg_class(set
->regs
);
268 set
->gpr_to_ra_reg
[base
+ i
] =
269 ralloc_array(set
, uint16_t, HIGH_CLASS_REGS(i
));
271 for (unsigned j
= 0; j
< HIGH_CLASS_REGS(i
); j
++) {
272 ra_class_add_reg(set
->regs
, set
->high_classes
[i
], reg
);
274 set
->ra_reg_to_gpr
[reg
] = j
;
275 set
->gpr_to_ra_reg
[base
+ i
][j
] = reg
;
277 for (unsigned br
= j
; br
< j
+ high_class_sizes
[i
]; br
++)
278 ra_add_transitive_reg_conflict(set
->regs
, br
+ first_high_reg
, reg
);
284 /* starting a6xx, half precision regs conflict w/ full precision regs: */
285 if (compiler
->gpu_id
>= 600) {
286 /* because of transitivity, we can get away with just setting up
287 * conflicts between the first class of full and half regs:
289 for (unsigned j
= 0; j
< CLASS_REGS(0) / 2; j
++) {
290 unsigned freg
= set
->gpr_to_ra_reg
[0][j
];
291 unsigned hreg0
= set
->gpr_to_ra_reg
[HALF_OFFSET
][(j
* 2) + 0];
292 unsigned hreg1
= set
->gpr_to_ra_reg
[HALF_OFFSET
][(j
* 2) + 1];
294 ra_add_transitive_reg_conflict(set
->regs
, freg
, hreg0
);
295 ra_add_transitive_reg_conflict(set
->regs
, freg
, hreg1
);
298 // TODO also need to update q_values, but for now:
299 ra_set_finalize(set
->regs
, NULL
);
301 ra_set_finalize(set
->regs
, q_values
);
304 ralloc_free(q_values
);
309 /* additional block-data (per-block) */
310 struct ir3_ra_block_data
{
311 BITSET_WORD
*def
; /* variables defined before used in block */
312 BITSET_WORD
*use
; /* variables used before defined in block */
313 BITSET_WORD
*livein
; /* which defs reach entry point of block */
314 BITSET_WORD
*liveout
; /* which defs reach exit point of block */
317 /* additional instruction-data (per-instruction) */
318 struct ir3_ra_instr_data
{
319 /* cached instruction 'definer' info: */
320 struct ir3_instruction
*defn
;
324 /* register-assign context, per-shader */
327 gl_shader_stage type
;
330 struct ir3_ra_reg_set
*set
;
332 unsigned alloc_count
;
333 /* one per class, plus one slot for arrays: */
334 unsigned class_alloc_count
[total_class_count
+ 1];
335 unsigned class_base
[total_class_count
+ 1];
337 unsigned *def
, *use
; /* def/use table */
338 struct ir3_ra_instr_data
*instrd
;
341 /* does it conflict? */
343 intersects(unsigned a_start
, unsigned a_end
, unsigned b_start
, unsigned b_end
)
345 return !((a_start
>= b_end
) || (b_start
>= a_end
));
349 is_half(struct ir3_instruction
*instr
)
351 return !!(instr
->regs
[0]->flags
& IR3_REG_HALF
);
355 is_high(struct ir3_instruction
*instr
)
357 return !!(instr
->regs
[0]->flags
& IR3_REG_HIGH
);
361 size_to_class(unsigned sz
, bool half
, bool high
)
364 for (unsigned i
= 0; i
< high_class_count
; i
++)
365 if (high_class_sizes
[i
] >= sz
)
366 return i
+ HIGH_OFFSET
;
368 for (unsigned i
= 0; i
< half_class_count
; i
++)
369 if (half_class_sizes
[i
] >= sz
)
370 return i
+ HALF_OFFSET
;
372 for (unsigned i
= 0; i
< class_count
; i
++)
373 if (class_sizes
[i
] >= sz
)
381 writes_gpr(struct ir3_instruction
*instr
)
385 /* is dest a normal temp register: */
386 struct ir3_register
*reg
= instr
->regs
[0];
387 if (reg
->flags
& (IR3_REG_CONST
| IR3_REG_IMMED
))
389 if ((reg
->num
== regid(REG_A0
, 0)) ||
390 (reg
->num
== regid(REG_P0
, 0)))
396 instr_before(struct ir3_instruction
*a
, struct ir3_instruction
*b
)
398 if (a
->flags
& IR3_INSTR_UNUSED
)
400 return (a
->ip
< b
->ip
);
403 static struct ir3_instruction
*
404 get_definer(struct ir3_ra_ctx
*ctx
, struct ir3_instruction
*instr
,
407 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
408 struct ir3_instruction
*d
= NULL
;
416 if (instr
->opc
== OPC_META_FI
) {
417 /* What about the case where collect is subset of array, we
418 * need to find the distance between where actual array starts
419 * and fanin.. that probably doesn't happen currently.
421 struct ir3_register
*src
;
424 /* note: don't use foreach_ssa_src as this gets called once
425 * while assigning regs (which clears SSA flag)
427 foreach_src_n(src
, n
, instr
) {
428 struct ir3_instruction
*dd
;
432 dd
= get_definer(ctx
, src
->instr
, &dsz
, &doff
);
434 if ((!d
) || instr_before(dd
, d
)) {
441 } else if (instr
->cp
.right
|| instr
->cp
.left
) {
442 /* covers also the meta:fo case, which ends up w/ single
443 * scalar instructions for each component:
445 struct ir3_instruction
*f
= ir3_neighbor_first(instr
);
447 /* by definition, the entire sequence forms one linked list
448 * of single scalar register nodes (even if some of them may
449 * be fanouts from a texture sample (for example) instr. We
450 * just need to walk the list finding the first element of
451 * the group defined (lowest ip)
455 /* need to skip over unused in the group: */
456 while (f
&& (f
->flags
& IR3_INSTR_UNUSED
)) {
462 if ((!d
) || instr_before(f
, d
))
473 /* second case is looking directly at the instruction which
474 * produces multiple values (eg, texture sample), rather
475 * than the fanout nodes that point back to that instruction.
476 * This isn't quite right, because it may be part of a larger
479 * sam (f32)(xyzw)r0.x, ...
482 * sam (f32)(xyzw)r2.x, r0.w <-- (r0.w, r1.x, r1.y)
484 * need to come up with a better way to handle that case.
486 if (instr
->address
) {
487 *sz
= instr
->regs
[0]->size
;
489 *sz
= util_last_bit(instr
->regs
[0]->wrmask
);
495 if (d
->opc
== OPC_META_FO
) {
496 struct ir3_instruction
*dd
;
499 dd
= get_definer(ctx
, d
->regs
[1]->instr
, &dsz
, &doff
);
501 /* by definition, should come before: */
502 debug_assert(instr_before(dd
, d
));
504 *sz
= MAX2(*sz
, dsz
);
506 debug_assert(instr
->opc
== OPC_META_FO
);
507 *off
= MAX2(*off
, instr
->fo
.off
);
520 ra_block_find_definers(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
522 list_for_each_entry (struct ir3_instruction
, instr
, &block
->instr_list
, node
) {
523 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
524 if (instr
->regs_count
== 0)
526 /* couple special cases: */
527 if (writes_addr(instr
) || writes_pred(instr
)) {
529 } else if (instr
->regs
[0]->flags
& IR3_REG_ARRAY
) {
530 id
->cls
= total_class_count
;
532 id
->defn
= get_definer(ctx
, instr
, &id
->sz
, &id
->off
);
533 id
->cls
= size_to_class(id
->sz
, is_half(id
->defn
), is_high(id
->defn
));
538 /* give each instruction a name (and ip), and count up the # of names
542 ra_block_name_instructions(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
544 list_for_each_entry (struct ir3_instruction
, instr
, &block
->instr_list
, node
) {
545 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
553 if (instr
->regs_count
== 0)
556 if (!writes_gpr(instr
))
559 if (id
->defn
!= instr
)
562 /* arrays which don't fit in one of the pre-defined class
563 * sizes are pre-colored:
565 if ((id
->cls
>= 0) && (id
->cls
< total_class_count
)) {
566 instr
->name
= ctx
->class_alloc_count
[id
->cls
]++;
573 ra_init(struct ir3_ra_ctx
*ctx
)
577 ir3_clear_mark(ctx
->ir
);
578 n
= ir3_count_instructions(ctx
->ir
);
580 ctx
->instrd
= rzalloc_array(NULL
, struct ir3_ra_instr_data
, n
);
582 list_for_each_entry (struct ir3_block
, block
, &ctx
->ir
->block_list
, node
) {
583 ra_block_find_definers(ctx
, block
);
586 list_for_each_entry (struct ir3_block
, block
, &ctx
->ir
->block_list
, node
) {
587 ra_block_name_instructions(ctx
, block
);
590 /* figure out the base register name for each class. The
591 * actual ra name is class_base[cls] + instr->name;
593 ctx
->class_base
[0] = 0;
594 for (unsigned i
= 1; i
<= total_class_count
; i
++) {
595 ctx
->class_base
[i
] = ctx
->class_base
[i
-1] +
596 ctx
->class_alloc_count
[i
-1];
599 /* and vreg names for array elements: */
600 base
= ctx
->class_base
[total_class_count
];
601 list_for_each_entry (struct ir3_array
, arr
, &ctx
->ir
->array_list
, node
) {
603 ctx
->class_alloc_count
[total_class_count
] += arr
->length
;
606 ctx
->alloc_count
+= ctx
->class_alloc_count
[total_class_count
];
608 ctx
->g
= ra_alloc_interference_graph(ctx
->set
->regs
, ctx
->alloc_count
);
609 ralloc_steal(ctx
->g
, ctx
->instrd
);
610 ctx
->def
= rzalloc_array(ctx
->g
, unsigned, ctx
->alloc_count
);
611 ctx
->use
= rzalloc_array(ctx
->g
, unsigned, ctx
->alloc_count
);
615 __ra_name(struct ir3_ra_ctx
*ctx
, int cls
, struct ir3_instruction
*defn
)
618 debug_assert(cls
>= 0);
619 debug_assert(cls
< total_class_count
); /* we shouldn't get arrays here.. */
620 name
= ctx
->class_base
[cls
] + defn
->name
;
621 debug_assert(name
< ctx
->alloc_count
);
626 ra_name(struct ir3_ra_ctx
*ctx
, struct ir3_ra_instr_data
*id
)
628 /* TODO handle name mapping for arrays */
629 return __ra_name(ctx
, id
->cls
, id
->defn
);
633 ra_destroy(struct ir3_ra_ctx
*ctx
)
639 ra_block_compute_live_ranges(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
641 struct ir3_ra_block_data
*bd
;
642 unsigned bitset_words
= BITSET_WORDS(ctx
->alloc_count
);
644 #define def(name, instr) \
646 /* defined on first write: */ \
647 if (!ctx->def[name]) \
648 ctx->def[name] = instr->ip; \
649 ctx->use[name] = instr->ip; \
650 BITSET_SET(bd->def, name); \
653 #define use(name, instr) \
655 ctx->use[name] = MAX2(ctx->use[name], instr->ip); \
656 if (!BITSET_TEST(bd->def, name)) \
657 BITSET_SET(bd->use, name); \
660 bd
= rzalloc(ctx
->g
, struct ir3_ra_block_data
);
662 bd
->def
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
663 bd
->use
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
664 bd
->livein
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
665 bd
->liveout
= rzalloc_array(bd
, BITSET_WORD
, bitset_words
);
669 list_for_each_entry (struct ir3_instruction
, instr
, &block
->instr_list
, node
) {
670 struct ir3_instruction
*src
;
671 struct ir3_register
*reg
;
673 if (instr
->regs_count
== 0)
676 /* There are a couple special cases to deal with here:
678 * fanout: used to split values from a higher class to a lower
679 * class, for example split the results of a texture fetch
680 * into individual scalar values; We skip over these from
681 * a 'def' perspective, and for a 'use' we walk the chain
682 * up to the defining instruction.
684 * fanin: used to collect values from lower class and assemble
685 * them together into a higher class, for example arguments
686 * to texture sample instructions; We consider these to be
687 * defined at the earliest fanin source.
689 * Most of this is handled in the get_definer() helper.
691 * In either case, we trace the instruction back to the original
692 * definer and consider that as the def/use ip.
695 if (writes_gpr(instr
)) {
696 struct ir3_ra_instr_data
*id
= &ctx
->instrd
[instr
->ip
];
697 struct ir3_register
*dst
= instr
->regs
[0];
699 if (dst
->flags
& IR3_REG_ARRAY
) {
700 struct ir3_array
*arr
=
701 ir3_lookup_array(ctx
->ir
, dst
->array
.id
);
704 arr
->start_ip
= MIN2(arr
->start_ip
, instr
->ip
);
705 arr
->end_ip
= MAX2(arr
->end_ip
, instr
->ip
);
707 /* set the node class now.. in case we don't encounter
708 * this array dst again. From register_alloc algo's
709 * perspective, these are all single/scalar regs:
711 for (i
= 0; i
< arr
->length
; i
++) {
712 unsigned name
= arr
->base
+ i
;
713 ra_set_node_class(ctx
->g
, name
, ctx
->set
->classes
[0]);
716 /* indirect write is treated like a write to all array
717 * elements, since we don't know which one is actually
720 if (dst
->flags
& IR3_REG_RELATIV
) {
721 for (i
= 0; i
< arr
->length
; i
++) {
722 unsigned name
= arr
->base
+ i
;
726 unsigned name
= arr
->base
+ dst
->array
.offset
;
730 } else if (id
->defn
== instr
) {
731 unsigned name
= ra_name(ctx
, id
);
733 /* since we are in SSA at this point: */
734 debug_assert(!BITSET_TEST(bd
->use
, name
));
738 if (is_high(id
->defn
)) {
739 ra_set_node_class(ctx
->g
, name
,
740 ctx
->set
->high_classes
[id
->cls
- HIGH_OFFSET
]);
741 } else if (is_half(id
->defn
)) {
742 ra_set_node_class(ctx
->g
, name
,
743 ctx
->set
->half_classes
[id
->cls
- HALF_OFFSET
]);
745 ra_set_node_class(ctx
->g
, name
,
746 ctx
->set
->classes
[id
->cls
]);
751 foreach_src(reg
, instr
) {
752 if (reg
->flags
& IR3_REG_ARRAY
) {
753 struct ir3_array
*arr
=
754 ir3_lookup_array(ctx
->ir
, reg
->array
.id
);
755 arr
->start_ip
= MIN2(arr
->start_ip
, instr
->ip
);
756 arr
->end_ip
= MAX2(arr
->end_ip
, instr
->ip
);
758 /* indirect read is treated like a read fromall array
759 * elements, since we don't know which one is actually
762 if (reg
->flags
& IR3_REG_RELATIV
) {
764 for (i
= 0; i
< arr
->length
; i
++) {
765 unsigned name
= arr
->base
+ i
;
769 unsigned name
= arr
->base
+ reg
->array
.offset
;
771 /* NOTE: arrays are not SSA so unconditionally
774 BITSET_SET(bd
->use
, name
);
775 debug_assert(reg
->array
.offset
< arr
->length
);
777 } else if ((src
= ssa(reg
)) && writes_gpr(src
)) {
778 unsigned name
= ra_name(ctx
, &ctx
->instrd
[src
->ip
]);
786 ra_compute_livein_liveout(struct ir3_ra_ctx
*ctx
)
788 unsigned bitset_words
= BITSET_WORDS(ctx
->alloc_count
);
789 bool progress
= false;
791 list_for_each_entry (struct ir3_block
, block
, &ctx
->ir
->block_list
, node
) {
792 struct ir3_ra_block_data
*bd
= block
->data
;
795 for (unsigned i
= 0; i
< bitset_words
; i
++) {
796 BITSET_WORD new_livein
=
797 (bd
->use
[i
] | (bd
->liveout
[i
] & ~bd
->def
[i
]));
799 if (new_livein
& ~bd
->livein
[i
]) {
800 bd
->livein
[i
] |= new_livein
;
805 /* update liveout: */
806 for (unsigned j
= 0; j
< ARRAY_SIZE(block
->successors
); j
++) {
807 struct ir3_block
*succ
= block
->successors
[j
];
808 struct ir3_ra_block_data
*succ_bd
;
813 succ_bd
= succ
->data
;
815 for (unsigned i
= 0; i
< bitset_words
; i
++) {
816 BITSET_WORD new_liveout
=
817 (succ_bd
->livein
[i
] & ~bd
->liveout
[i
]);
820 bd
->liveout
[i
] |= new_liveout
;
831 print_bitset(const char *name
, BITSET_WORD
*bs
, unsigned cnt
)
834 debug_printf(" %s:", name
);
835 for (unsigned i
= 0; i
< cnt
; i
++) {
836 if (BITSET_TEST(bs
, i
)) {
839 debug_printf(" %04u", i
);
847 ra_add_interference(struct ir3_ra_ctx
*ctx
)
849 struct ir3
*ir
= ctx
->ir
;
851 /* initialize array live ranges: */
852 list_for_each_entry (struct ir3_array
, arr
, &ir
->array_list
, node
) {
857 /* compute live ranges (use/def) on a block level, also updating
858 * block's def/use bitmasks (used below to calculate per-block
861 list_for_each_entry (struct ir3_block
, block
, &ir
->block_list
, node
) {
862 ra_block_compute_live_ranges(ctx
, block
);
865 /* update per-block livein/liveout: */
866 while (ra_compute_livein_liveout(ctx
)) {}
868 if (ir3_shader_debug
& IR3_DBG_OPTMSGS
) {
869 debug_printf("AFTER LIVEIN/OUT:\n");
871 list_for_each_entry (struct ir3_block
, block
, &ir
->block_list
, node
) {
872 struct ir3_ra_block_data
*bd
= block
->data
;
873 debug_printf("block%u:\n", block_id(block
));
874 print_bitset(" def", bd
->def
, ctx
->alloc_count
);
875 print_bitset(" use", bd
->use
, ctx
->alloc_count
);
876 print_bitset(" l/i", bd
->livein
, ctx
->alloc_count
);
877 print_bitset(" l/o", bd
->liveout
, ctx
->alloc_count
);
879 list_for_each_entry (struct ir3_array
, arr
, &ir
->array_list
, node
) {
880 debug_printf("array%u:\n", arr
->id
);
881 debug_printf(" length: %u\n", arr
->length
);
882 debug_printf(" start_ip: %u\n", arr
->start_ip
);
883 debug_printf(" end_ip: %u\n", arr
->end_ip
);
887 /* extend start/end ranges based on livein/liveout info from cfg: */
888 list_for_each_entry (struct ir3_block
, block
, &ir
->block_list
, node
) {
889 struct ir3_ra_block_data
*bd
= block
->data
;
891 for (unsigned i
= 0; i
< ctx
->alloc_count
; i
++) {
892 if (BITSET_TEST(bd
->livein
, i
)) {
893 ctx
->def
[i
] = MIN2(ctx
->def
[i
], block
->start_ip
);
894 ctx
->use
[i
] = MAX2(ctx
->use
[i
], block
->start_ip
);
897 if (BITSET_TEST(bd
->liveout
, i
)) {
898 ctx
->def
[i
] = MIN2(ctx
->def
[i
], block
->end_ip
);
899 ctx
->use
[i
] = MAX2(ctx
->use
[i
], block
->end_ip
);
903 list_for_each_entry (struct ir3_array
, arr
, &ctx
->ir
->array_list
, node
) {
904 for (unsigned i
= 0; i
< arr
->length
; i
++) {
905 if (BITSET_TEST(bd
->livein
, i
+ arr
->base
)) {
906 arr
->start_ip
= MIN2(arr
->start_ip
, block
->start_ip
);
908 if (BITSET_TEST(bd
->livein
, i
+ arr
->base
)) {
909 arr
->end_ip
= MAX2(arr
->end_ip
, block
->end_ip
);
915 /* need to fix things up to keep outputs live: */
916 for (unsigned i
= 0; i
< ir
->noutputs
; i
++) {
917 struct ir3_instruction
*instr
= ir
->outputs
[i
];
918 unsigned name
= ra_name(ctx
, &ctx
->instrd
[instr
->ip
]);
919 ctx
->use
[name
] = ctx
->instr_cnt
;
922 for (unsigned i
= 0; i
< ctx
->alloc_count
; i
++) {
923 for (unsigned j
= 0; j
< ctx
->alloc_count
; j
++) {
924 if (intersects(ctx
->def
[i
], ctx
->use
[i
],
925 ctx
->def
[j
], ctx
->use
[j
])) {
926 ra_add_node_interference(ctx
->g
, i
, j
);
932 /* some instructions need fix-up if dst register is half precision: */
933 static void fixup_half_instr_dst(struct ir3_instruction
*instr
)
935 switch (opc_cat(instr
->opc
)) {
936 case 1: /* move instructions */
937 instr
->cat1
.dst_type
= half_type(instr
->cat1
.dst_type
);
940 switch (instr
->opc
) {
942 instr
->opc
= OPC_MAD_F16
;
945 instr
->opc
= OPC_SEL_B16
;
948 instr
->opc
= OPC_SEL_S16
;
951 instr
->opc
= OPC_SEL_F16
;
954 instr
->opc
= OPC_SAD_S16
;
956 /* instructions may already be fixed up: */
969 instr
->cat5
.type
= half_type(instr
->cat5
.type
);
973 /* some instructions need fix-up if src register is half precision: */
974 static void fixup_half_instr_src(struct ir3_instruction
*instr
)
976 switch (instr
->opc
) {
978 instr
->cat1
.src_type
= half_type(instr
->cat1
.src_type
);
985 /* NOTE: instr could be NULL for IR3_REG_ARRAY case, for the first
986 * array access(es) which do not have any previous access to depend
987 * on from scheduling point of view
990 reg_assign(struct ir3_ra_ctx
*ctx
, struct ir3_register
*reg
,
991 struct ir3_instruction
*instr
)
993 struct ir3_ra_instr_data
*id
;
995 if (reg
->flags
& IR3_REG_ARRAY
) {
996 struct ir3_array
*arr
=
997 ir3_lookup_array(ctx
->ir
, reg
->array
.id
);
998 unsigned name
= arr
->base
+ reg
->array
.offset
;
999 unsigned r
= ra_get_node_reg(ctx
->g
, name
);
1000 unsigned num
= ctx
->set
->ra_reg_to_gpr
[r
];
1002 if (reg
->flags
& IR3_REG_RELATIV
) {
1003 reg
->array
.offset
= num
;
1006 reg
->flags
&= ~IR3_REG_SSA
;
1009 reg
->flags
&= ~IR3_REG_ARRAY
;
1010 } else if ((id
= &ctx
->instrd
[instr
->ip
]) && id
->defn
) {
1011 unsigned name
= ra_name(ctx
, id
);
1012 unsigned r
= ra_get_node_reg(ctx
->g
, name
);
1013 unsigned num
= ctx
->set
->ra_reg_to_gpr
[r
] + id
->off
;
1015 debug_assert(!(reg
->flags
& IR3_REG_RELATIV
));
1017 if (is_high(id
->defn
))
1018 num
+= FIRST_HIGH_REG
;
1021 reg
->flags
&= ~IR3_REG_SSA
;
1023 if (is_half(id
->defn
))
1024 reg
->flags
|= IR3_REG_HALF
;
1029 ra_block_alloc(struct ir3_ra_ctx
*ctx
, struct ir3_block
*block
)
1031 list_for_each_entry (struct ir3_instruction
, instr
, &block
->instr_list
, node
) {
1032 struct ir3_register
*reg
;
1034 if (instr
->regs_count
== 0)
1037 if (writes_gpr(instr
)) {
1038 reg_assign(ctx
, instr
->regs
[0], instr
);
1039 if (instr
->regs
[0]->flags
& IR3_REG_HALF
)
1040 fixup_half_instr_dst(instr
);
1043 foreach_src_n(reg
, n
, instr
) {
1044 struct ir3_instruction
*src
= reg
->instr
;
1045 /* Note: reg->instr could be null for IR3_REG_ARRAY */
1046 if (!(src
|| (reg
->flags
& IR3_REG_ARRAY
)))
1048 reg_assign(ctx
, instr
->regs
[n
+1], src
);
1049 if (instr
->regs
[n
+1]->flags
& IR3_REG_HALF
)
1050 fixup_half_instr_src(instr
);
1056 ra_alloc(struct ir3_ra_ctx
*ctx
)
1058 /* pre-assign array elements:
1060 list_for_each_entry (struct ir3_array
, arr
, &ctx
->ir
->array_list
, node
) {
1063 if (arr
->end_ip
== 0)
1066 /* figure out what else we conflict with which has already
1070 list_for_each_entry (struct ir3_array
, arr2
, &ctx
->ir
->array_list
, node
) {
1073 if (arr2
->end_ip
== 0)
1075 /* if it intersects with liverange AND register range.. */
1076 if (intersects(arr
->start_ip
, arr
->end_ip
,
1077 arr2
->start_ip
, arr2
->end_ip
) &&
1078 intersects(base
, base
+ arr
->length
,
1079 arr2
->reg
, arr2
->reg
+ arr2
->length
)) {
1080 base
= MAX2(base
, arr2
->reg
+ arr2
->length
);
1087 for (unsigned i
= 0; i
< arr
->length
; i
++) {
1090 name
= arr
->base
+ i
;
1091 reg
= ctx
->set
->gpr_to_ra_reg
[0][base
++];
1093 ra_set_node_reg(ctx
->g
, name
, reg
);
1097 if (!ra_allocate(ctx
->g
))
1100 list_for_each_entry (struct ir3_block
, block
, &ctx
->ir
->block_list
, node
) {
1101 ra_block_alloc(ctx
, block
);
1107 int ir3_ra(struct ir3
*ir
, gl_shader_stage type
,
1108 bool frag_coord
, bool frag_face
)
1110 struct ir3_ra_ctx ctx
= {
1113 .frag_face
= frag_face
,
1114 .set
= ir
->compiler
->set
,
1119 ra_add_interference(&ctx
);
1120 ret
= ra_alloc(&ctx
);