2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 * Copyright (C) 2019 Collabora, Ltd.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 #include "midgard_ops.h"
27 #include "util/register_allocate.h"
28 #include "util/u_math.h"
30 /* For work registers, we can subdivide in various ways. So we create
31 * classes for the various sizes and conflict accordingly, keeping in
32 * mind that physical registers are divided along 128-bit boundaries.
33 * The important part is that 128-bit boundaries are not crossed.
35 * For each 128-bit register, we can subdivide to 32-bits 10 ways
42 * For each 64-bit register, we can subdivide similarly to 16-bit
43 * (TODO: half-float RA, not that we support fp16 yet)
46 #define WORK_STRIDE 10
48 /* Prepacked masks/swizzles for virtual register types */
49 static unsigned reg_type_to_mask
[WORK_STRIDE
] = {
51 0x7, 0x7 << 1, /* xyz */
52 0x3, 0x3 << 1, 0x3 << 2, /* xy */
53 0x1, 0x1 << 1, 0x1 << 2, 0x1 << 3 /* x */
56 static unsigned reg_type_to_swizzle
[WORK_STRIDE
] = {
57 SWIZZLE(COMPONENT_X
, COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
),
59 SWIZZLE(COMPONENT_X
, COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
),
60 SWIZZLE(COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
, COMPONENT_W
),
62 SWIZZLE(COMPONENT_X
, COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
),
63 SWIZZLE(COMPONENT_Y
, COMPONENT_Z
, COMPONENT_Z
, COMPONENT_W
),
64 SWIZZLE(COMPONENT_Z
, COMPONENT_W
, COMPONENT_Z
, COMPONENT_W
),
66 SWIZZLE(COMPONENT_X
, COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
),
67 SWIZZLE(COMPONENT_Y
, COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
),
68 SWIZZLE(COMPONENT_Z
, COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
),
69 SWIZZLE(COMPONENT_W
, COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
),
78 /* Given the mask/swizzle of both the register and the original source,
79 * compose to find the actual mask/swizzle to give the hardware */
82 compose_writemask(unsigned mask
, struct phys_reg reg
)
84 /* Note: the reg mask is guaranteed to be contiguous. So we shift
85 * into the X place, compose via a simple AND, and shift back */
87 unsigned shift
= __builtin_ctz(reg
.mask
);
88 return ((reg
.mask
>> shift
) & mask
) << shift
;
92 compose_swizzle(unsigned swizzle
, unsigned mask
,
93 struct phys_reg reg
, struct phys_reg dst
)
95 unsigned out
= pan_compose_swizzle(swizzle
, reg
.swizzle
);
97 /* Based on the register mask, we need to adjust over. E.g if we're
98 * writing to yz, a base swizzle of xy__ becomes _xy_. Save the
99 * original first component (x). But to prevent duplicate shifting
100 * (only applies to ALU -- mask param is set to xyzw out on L/S to
101 * prevent changes), we have to account for the shift inherent to the
102 * original writemask */
104 unsigned rep
= out
& 0x3;
105 unsigned shift
= __builtin_ctz(dst
.mask
) - __builtin_ctz(mask
);
106 unsigned shifted
= out
<< (2*shift
);
108 /* ..but we fill in the gaps so it appears to replicate */
110 for (unsigned s
= 0; s
< shift
; ++s
)
111 shifted
|= rep
<< (2*s
);
116 /* Helper to return the default phys_reg for a given register */
118 static struct phys_reg
119 default_phys_reg(int reg
)
121 struct phys_reg r
= {
123 .mask
= 0xF, /* xyzw */
124 .swizzle
= 0xE4 /* xyzw */
130 /* Determine which physical register, swizzle, and mask a virtual
131 * register corresponds to */
133 static struct phys_reg
134 index_to_reg(compiler_context
*ctx
, struct ra_graph
*g
, int reg
)
136 /* Check for special cases */
137 if (reg
>= SSA_FIXED_MINIMUM
)
138 return default_phys_reg(SSA_REG_FROM_FIXED(reg
));
139 else if ((reg
< 0) || !g
)
140 return default_phys_reg(REGISTER_UNUSED
);
142 /* Special cases aside, we pick the underlying register */
143 int virt
= ra_get_node_reg(g
, reg
);
145 /* Divide out the register and classification */
146 int phys
= virt
/ WORK_STRIDE
;
147 int type
= virt
% WORK_STRIDE
;
149 struct phys_reg r
= {
151 .mask
= reg_type_to_mask
[type
],
152 .swizzle
= reg_type_to_swizzle
[type
]
155 /* Report that we actually use this register, and return it */
158 ctx
->work_registers
= MAX2(ctx
->work_registers
, phys
);
163 /* This routine creates a register set. Should be called infrequently since
164 * it's slow and can be cached. For legibility, variables are named in terms of
165 * work registers, although it is also used to create the register set for
166 * special register allocation */
168 static struct ra_regs
*
169 create_register_set(unsigned work_count
, unsigned *classes
)
171 int virtual_count
= 32 * WORK_STRIDE
;
173 /* First, initialize the RA */
174 struct ra_regs
*regs
= ra_alloc_reg_set(NULL
, virtual_count
, true);
176 for (unsigned c
= 0; c
< NR_REG_CLASSES
; ++c
) {
177 int work_vec4
= ra_alloc_reg_class(regs
);
178 int work_vec3
= ra_alloc_reg_class(regs
);
179 int work_vec2
= ra_alloc_reg_class(regs
);
180 int work_vec1
= ra_alloc_reg_class(regs
);
182 classes
[4*c
+ 0] = work_vec1
;
183 classes
[4*c
+ 1] = work_vec2
;
184 classes
[4*c
+ 2] = work_vec3
;
185 classes
[4*c
+ 3] = work_vec4
;
187 /* Special register classes have two registers in them */
188 unsigned count
= (c
== REG_CLASS_WORK
) ? work_count
: 2;
191 (c
== REG_CLASS_LDST
) ? 26 :
192 (c
== REG_CLASS_TEX
) ? 28 : 0;
194 /* Add the full set of work registers */
195 for (unsigned i
= first_reg
; i
< (first_reg
+ count
); ++i
) {
196 int base
= WORK_STRIDE
* i
;
198 /* Build a full set of subdivisions */
199 ra_class_add_reg(regs
, work_vec4
, base
);
200 ra_class_add_reg(regs
, work_vec3
, base
+ 1);
201 ra_class_add_reg(regs
, work_vec3
, base
+ 2);
202 ra_class_add_reg(regs
, work_vec2
, base
+ 3);
203 ra_class_add_reg(regs
, work_vec2
, base
+ 4);
204 ra_class_add_reg(regs
, work_vec2
, base
+ 5);
205 ra_class_add_reg(regs
, work_vec1
, base
+ 6);
206 ra_class_add_reg(regs
, work_vec1
, base
+ 7);
207 ra_class_add_reg(regs
, work_vec1
, base
+ 8);
208 ra_class_add_reg(regs
, work_vec1
, base
+ 9);
210 for (unsigned a
= 0; a
< 10; ++a
) {
211 unsigned mask1
= reg_type_to_mask
[a
];
213 for (unsigned b
= 0; b
< 10; ++b
) {
214 unsigned mask2
= reg_type_to_mask
[b
];
217 ra_add_reg_conflict(regs
,
224 /* We're done setting up */
225 ra_set_finalize(regs
, NULL
);
230 /* This routine gets a precomputed register set off the screen if it's able, or
231 * otherwise it computes one on the fly */
233 static struct ra_regs
*
234 get_register_set(struct midgard_screen
*screen
, unsigned work_count
, unsigned **classes
)
237 assert(work_count
>= 8);
238 assert(work_count
<= 16);
241 unsigned index
= work_count
- 8;
243 /* Find the reg set */
244 struct ra_regs
*cached
= screen
->regs
[index
];
247 assert(screen
->reg_classes
[index
]);
248 *classes
= screen
->reg_classes
[index
];
252 /* Otherwise, create one */
253 struct ra_regs
*created
= create_register_set(work_count
, screen
->reg_classes
[index
]);
255 /* Cache it and use it */
256 screen
->regs
[index
] = created
;
258 *classes
= screen
->reg_classes
[index
];
262 /* Assign a (special) class, ensuring that it is compatible with whatever class
266 set_class(unsigned *classes
, unsigned node
, unsigned class)
268 /* Check that we're even a node */
269 if ((node
< 0) ||(node
>= SSA_FIXED_MINIMUM
))
272 /* First 4 are work, next 4 are load/store.. */
273 unsigned current_class
= classes
[node
] >> 2;
276 if (class == current_class
)
279 /* If we're changing, we must not have already assigned a special class
282 assert(current_class
== REG_CLASS_WORK
);
283 assert(REG_CLASS_WORK
== 0);
285 classes
[node
] |= (class << 2);
288 /* This routine performs the actual register allocation. It should be succeeded
289 * by install_registers */
292 allocate_registers(compiler_context
*ctx
, bool *spilled
)
294 /* The number of vec4 work registers available depends on when the
295 * uniforms start, so compute that first */
296 int work_count
= 16 - MAX2((ctx
->uniform_cutoff
- 8), 0);
297 unsigned *classes
= NULL
;
298 struct ra_regs
*regs
= get_register_set(ctx
->screen
, work_count
, &classes
);
300 assert(regs
!= NULL
);
301 assert(classes
!= NULL
);
303 /* No register allocation to do with no SSA */
305 if (!ctx
->temp_count
)
308 /* Let's actually do register allocation */
309 int nodes
= ctx
->temp_count
;
310 struct ra_graph
*g
= ra_alloc_interference_graph(regs
, nodes
);
312 /* Register class (as known to the Mesa register allocator) is actually
313 * the product of both semantic class (work, load/store, texture..) and
314 * size (vec2/vec3..). First, we'll go through and determine the
315 * minimum size needed to hold values */
317 unsigned *found_class
= calloc(sizeof(unsigned), ctx
->temp_count
);
319 mir_foreach_instr_global(ctx
, ins
) {
320 if (ins
->compact_branch
) continue;
321 if (ins
->ssa_args
.dest
< 0) continue;
322 if (ins
->ssa_args
.dest
>= SSA_FIXED_MINIMUM
) continue;
324 /* 0 for x, 1 for xy, 2 for xyz, 3 for xyzw */
325 int class = util_logbase2(ins
->mask
);
327 /* Use the largest class if there's ambiguity, this
328 * handles partial writes */
330 int dest
= ins
->ssa_args
.dest
;
331 found_class
[dest
] = MAX2(found_class
[dest
], class);
334 /* Next, we'll determine semantic class. We default to zero (work).
335 * But, if we're used with a special operation, that will force us to a
336 * particular class. Each node must be assigned to exactly one class; a
337 * prepass before RA should have lowered what-would-have-been
338 * multiclass nodes into a series of moves to break it up into multiple
341 mir_foreach_instr_global(ctx
, ins
) {
342 if (ins
->compact_branch
) continue;
344 /* Check if this operation imposes any classes */
346 if (ins
->type
== TAG_LOAD_STORE_4
) {
347 set_class(found_class
, ins
->ssa_args
.src0
, REG_CLASS_LDST
);
348 set_class(found_class
, ins
->ssa_args
.src1
, REG_CLASS_LDST
);
352 for (unsigned i
= 0; i
< ctx
->temp_count
; ++i
) {
353 unsigned class = found_class
[i
];
354 if (!class) continue;
355 ra_set_node_class(g
, i
, classes
[class]);
358 /* Determine liveness */
360 int *live_start
= malloc(nodes
* sizeof(int));
361 int *live_end
= malloc(nodes
* sizeof(int));
363 /* Initialize as non-existent */
365 for (int i
= 0; i
< nodes
; ++i
) {
366 live_start
[i
] = live_end
[i
] = -1;
371 mir_foreach_block(ctx
, block
) {
372 mir_foreach_instr_in_block(block
, ins
) {
373 if (ins
->compact_branch
) continue;
375 /* Dest is < 0 for st_vary instructions, which break
376 * the usual SSA conventions. Liveness analysis doesn't
377 * make sense on these instructions, so skip them to
378 * avoid memory corruption */
380 if (ins
->ssa_args
.dest
< 0) continue;
382 if (ins
->ssa_args
.dest
< SSA_FIXED_MINIMUM
) {
383 /* If this destination is not yet live, it is
384 * now since we just wrote it */
386 int dest
= ins
->ssa_args
.dest
;
388 if (live_start
[dest
] == -1)
389 live_start
[dest
] = d
;
392 /* Since we just used a source, the source might be
393 * dead now. Scan the rest of the block for
394 * invocations, and if there are none, the source dies
398 ins
->ssa_args
.src0
, ins
->ssa_args
.src1
401 for (int src
= 0; src
< 2; ++src
) {
402 int s
= sources
[src
];
404 if (ins
->ssa_args
.inline_constant
&& src
== 1)
409 if (s
>= SSA_FIXED_MINIMUM
) continue;
411 if (!mir_is_live_after(ctx
, block
, ins
, s
)) {
420 /* If a node still hasn't been killed, kill it now */
422 for (int i
= 0; i
< nodes
; ++i
) {
423 /* live_start == -1 most likely indicates a pinned output */
425 if (live_end
[i
] == -1)
429 /* Setup interference between nodes that are live at the same time */
431 for (int i
= 0; i
< nodes
; ++i
) {
432 for (int j
= i
+ 1; j
< nodes
; ++j
) {
433 bool j_overlaps_i
= live_start
[j
] < live_end
[i
];
434 bool i_overlaps_j
= live_end
[j
] < live_start
[i
];
436 if (i_overlaps_j
|| j_overlaps_i
)
437 ra_add_node_interference(g
, i
, j
);
445 if (!ra_allocate(g
)) {
451 /* Whether we were successful or not, report the graph so we can
452 * compute spill nodes */
457 /* Once registers have been decided via register allocation
458 * (allocate_registers), we need to rewrite the MIR to use registers instead of
462 install_registers_instr(
463 compiler_context
*ctx
,
465 midgard_instruction
*ins
)
467 ssa_args args
= ins
->ssa_args
;
471 int adjusted_src
= args
.inline_constant
? -1 : args
.src1
;
472 struct phys_reg src1
= index_to_reg(ctx
, g
, args
.src0
);
473 struct phys_reg src2
= index_to_reg(ctx
, g
, adjusted_src
);
474 struct phys_reg dest
= index_to_reg(ctx
, g
, args
.dest
);
476 unsigned uncomposed_mask
= ins
->mask
;
477 ins
->mask
= compose_writemask(uncomposed_mask
, dest
);
479 /* Adjust the dest mask if necessary. Mostly this is a no-op
480 * but it matters for dot products */
481 dest
.mask
= effective_writemask(&ins
->alu
, ins
->mask
);
483 midgard_vector_alu_src mod1
=
484 vector_alu_from_unsigned(ins
->alu
.src1
);
485 mod1
.swizzle
= compose_swizzle(mod1
.swizzle
, uncomposed_mask
, src1
, dest
);
486 ins
->alu
.src1
= vector_alu_srco_unsigned(mod1
);
488 ins
->registers
.src1_reg
= src1
.reg
;
490 ins
->registers
.src2_imm
= args
.inline_constant
;
492 if (args
.inline_constant
) {
493 /* Encode inline 16-bit constant. See disassembler for
494 * where the algorithm is from */
496 ins
->registers
.src2_reg
= ins
->inline_constant
>> 11;
498 int lower_11
= ins
->inline_constant
& ((1 << 12) - 1);
499 uint16_t imm
= ((lower_11
>> 8) & 0x7) |
500 ((lower_11
& 0xFF) << 3);
502 ins
->alu
.src2
= imm
<< 2;
504 midgard_vector_alu_src mod2
=
505 vector_alu_from_unsigned(ins
->alu
.src2
);
506 mod2
.swizzle
= compose_swizzle(
507 mod2
.swizzle
, uncomposed_mask
, src2
, dest
);
508 ins
->alu
.src2
= vector_alu_srco_unsigned(mod2
);
510 ins
->registers
.src2_reg
= src2
.reg
;
513 ins
->registers
.out_reg
= dest
.reg
;
517 case TAG_LOAD_STORE_4
: {
518 if (OP_IS_STORE_R26(ins
->load_store
.op
)) {
519 /* TODO: use ssa_args for st_vary */
520 ins
->load_store
.reg
= 0;
522 /* Which physical register we read off depends on
523 * whether we are loading or storing -- think about the
524 * logical dataflow */
526 unsigned r
= OP_IS_STORE(ins
->load_store
.op
) ?
527 args
.src0
: args
.dest
;
528 struct phys_reg src
= index_to_reg(ctx
, g
, r
);
530 ins
->load_store
.reg
= src
.reg
;
532 ins
->load_store
.swizzle
= compose_swizzle(
533 ins
->load_store
.swizzle
, 0xF,
534 default_phys_reg(0), src
);
536 ins
->mask
= compose_writemask(
549 install_registers(compiler_context
*ctx
, struct ra_graph
*g
)
551 mir_foreach_block(ctx
, block
) {
552 mir_foreach_instr_in_block(block
, ins
) {
553 if (ins
->compact_branch
) continue;
554 install_registers_instr(ctx
, g
, ins
);