2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 * Copyright (C) 2019 Collabora
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 #include "midgard_ops.h"
27 #include "util/register_allocate.h"
28 #include "util/u_math.h"
30 /* For work registers, we can subdivide in various ways. So we create
31 * classes for the various sizes and conflict accordingly, keeping in
32 * mind that physical registers are divided along 128-bit boundaries.
33 * The important part is that 128-bit boundaries are not crossed.
35 * For each 128-bit register, we can subdivide to 32-bits 10 ways
42 * For each 64-bit register, we can subdivide similarly to 16-bit
43 * (TODO: half-float RA, not that we support fp16 yet)
46 #define WORK_STRIDE 10
48 /* Prepacked masks/swizzles for virtual register types */
49 static unsigned reg_type_to_mask
[WORK_STRIDE
] = {
51 0x7, 0x7 << 1, /* xyz */
52 0x3, 0x3 << 1, 0x3 << 2, /* xy */
53 0x1, 0x1 << 1, 0x1 << 2, 0x1 << 3 /* x */
56 static unsigned reg_type_to_swizzle
[WORK_STRIDE
] = {
57 SWIZZLE(COMPONENT_X
, COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
),
59 SWIZZLE(COMPONENT_X
, COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
),
60 SWIZZLE(COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
, COMPONENT_W
),
62 SWIZZLE(COMPONENT_X
, COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
),
63 SWIZZLE(COMPONENT_Y
, COMPONENT_Z
, COMPONENT_Z
, COMPONENT_W
),
64 SWIZZLE(COMPONENT_Z
, COMPONENT_W
, COMPONENT_Z
, COMPONENT_W
),
66 SWIZZLE(COMPONENT_X
, COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
),
67 SWIZZLE(COMPONENT_Y
, COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
),
68 SWIZZLE(COMPONENT_Z
, COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
),
69 SWIZZLE(COMPONENT_W
, COMPONENT_Y
, COMPONENT_Z
, COMPONENT_W
),
78 /* Given the mask/swizzle of both the register and the original source,
79 * compose to find the actual mask/swizzle to give the hardware */
82 compose_writemask(unsigned mask
, struct phys_reg reg
)
84 /* Note: the reg mask is guaranteed to be contiguous. So we shift
85 * into the X place, compose via a simple AND, and shift back */
87 unsigned shift
= __builtin_ctz(reg
.mask
);
88 return ((reg
.mask
>> shift
) & mask
) << shift
;
92 compose_swizzle(unsigned swizzle
, unsigned mask
,
93 struct phys_reg reg
, struct phys_reg dst
)
97 for (unsigned c
= 0; c
< 4; ++c
) {
98 unsigned s
= (swizzle
>> (2*c
)) & 0x3;
99 unsigned q
= (reg
.swizzle
>> (2*s
)) & 0x3;
104 /* Based on the register mask, we need to adjust over. E.g if we're
105 * writing to yz, a base swizzle of xy__ becomes _xy_. Save the
106 * original first component (x). But to prevent duplicate shifting
107 * (only applies to ALU -- mask param is set to xyzw out on L/S to
108 * prevent changes), we have to account for the shift inherent to the
109 * original writemask */
111 unsigned rep
= out
& 0x3;
112 unsigned shift
= __builtin_ctz(dst
.mask
) - __builtin_ctz(mask
);
113 unsigned shifted
= out
<< (2*shift
);
115 /* ..but we fill in the gaps so it appears to replicate */
117 for (unsigned s
= 0; s
< shift
; ++s
)
118 shifted
|= rep
<< (2*s
);
123 /* When we're 'squeezing down' the values in the IR, we maintain a hash
127 find_or_allocate_temp(compiler_context
*ctx
, unsigned hash
)
129 if ((hash
< 0) || (hash
>= SSA_FIXED_MINIMUM
))
132 unsigned temp
= (uintptr_t) _mesa_hash_table_u64_search(
133 ctx
->hash_to_temp
, hash
+ 1);
138 /* If no temp is find, allocate one */
139 temp
= ctx
->temp_count
++;
140 ctx
->max_hash
= MAX2(ctx
->max_hash
, hash
);
142 _mesa_hash_table_u64_insert(ctx
->hash_to_temp
,
143 hash
+ 1, (void *) ((uintptr_t) temp
+ 1));
148 /* Callback for register allocation selection, trivial default for now */
151 midgard_ra_select_callback(struct ra_graph
*g
, BITSET_WORD
*regs
, void *data
)
153 /* Choose the first available register to minimise register pressure */
155 for (int i
= 0; i
< (16 * WORK_STRIDE
); ++i
) {
156 if (BITSET_TEST(regs
, i
)) {
165 /* Helper to return the default phys_reg for a given register */
167 static struct phys_reg
168 default_phys_reg(int reg
)
170 struct phys_reg r
= {
172 .mask
= 0xF, /* xyzw */
173 .swizzle
= 0xE4 /* xyzw */
179 /* Determine which physical register, swizzle, and mask a virtual
180 * register corresponds to */
182 static struct phys_reg
183 index_to_reg(compiler_context
*ctx
, struct ra_graph
*g
, int reg
)
185 /* Check for special cases */
186 if (reg
>= SSA_FIXED_MINIMUM
)
187 return default_phys_reg(SSA_REG_FROM_FIXED(reg
));
188 else if ((reg
< 0) || !g
)
189 return default_phys_reg(REGISTER_UNUSED
);
191 /* Special cases aside, we pick the underlying register */
192 int virt
= ra_get_node_reg(g
, reg
);
194 /* Divide out the register and classification */
195 int phys
= virt
/ WORK_STRIDE
;
196 int type
= virt
% WORK_STRIDE
;
198 struct phys_reg r
= {
200 .mask
= reg_type_to_mask
[type
],
201 .swizzle
= reg_type_to_swizzle
[type
]
204 /* Report that we actually use this register, and return it */
205 ctx
->work_registers
= MAX2(ctx
->work_registers
, phys
);
209 /* This routine performs the actual register allocation. It should be succeeded
210 * by install_registers */
213 allocate_registers(compiler_context
*ctx
)
215 /* The number of vec4 work registers available depends on when the
216 * uniforms start, so compute that first */
218 int work_count
= 16 - MAX2((ctx
->uniform_cutoff
- 8), 0);
220 int virtual_count
= work_count
* WORK_STRIDE
;
222 /* First, initialize the RA */
223 struct ra_regs
*regs
= ra_alloc_reg_set(NULL
, virtual_count
, true);
225 int work_vec4
= ra_alloc_reg_class(regs
);
226 int work_vec3
= ra_alloc_reg_class(regs
);
227 int work_vec2
= ra_alloc_reg_class(regs
);
228 int work_vec1
= ra_alloc_reg_class(regs
);
230 unsigned classes
[4] = {
237 /* Add the full set of work registers */
238 for (unsigned i
= 0; i
< work_count
; ++i
) {
239 int base
= WORK_STRIDE
* i
;
241 /* Build a full set of subdivisions */
242 ra_class_add_reg(regs
, work_vec4
, base
);
243 ra_class_add_reg(regs
, work_vec3
, base
+ 1);
244 ra_class_add_reg(regs
, work_vec3
, base
+ 2);
245 ra_class_add_reg(regs
, work_vec2
, base
+ 3);
246 ra_class_add_reg(regs
, work_vec2
, base
+ 4);
247 ra_class_add_reg(regs
, work_vec2
, base
+ 5);
248 ra_class_add_reg(regs
, work_vec1
, base
+ 6);
249 ra_class_add_reg(regs
, work_vec1
, base
+ 7);
250 ra_class_add_reg(regs
, work_vec1
, base
+ 8);
251 ra_class_add_reg(regs
, work_vec1
, base
+ 9);
253 for (unsigned a
= 0; a
< 10; ++a
) {
254 unsigned mask1
= reg_type_to_mask
[a
];
256 for (unsigned b
= 0; b
< 10; ++b
) {
257 unsigned mask2
= reg_type_to_mask
[b
];
260 ra_add_reg_conflict(regs
,
266 /* We're done setting up */
267 ra_set_finalize(regs
, NULL
);
269 /* Transform the MIR into squeezed index form */
270 mir_foreach_block(ctx
, block
) {
271 mir_foreach_instr_in_block(block
, ins
) {
272 if (ins
->compact_branch
) continue;
274 ins
->ssa_args
.dest
= find_or_allocate_temp(ctx
, ins
->ssa_args
.dest
);
275 ins
->ssa_args
.src0
= find_or_allocate_temp(ctx
, ins
->ssa_args
.src0
);
277 if (!ins
->ssa_args
.inline_constant
)
278 ins
->ssa_args
.src1
= find_or_allocate_temp(ctx
, ins
->ssa_args
.src1
);
283 /* No register allocation to do with no SSA */
285 if (!ctx
->temp_count
)
288 /* Let's actually do register allocation */
289 int nodes
= ctx
->temp_count
;
290 struct ra_graph
*g
= ra_alloc_interference_graph(regs
, nodes
);
292 /* Determine minimum size needed to hold values, to indirectly
295 unsigned *found_class
= calloc(sizeof(unsigned), ctx
->temp_count
);
297 mir_foreach_block(ctx
, block
) {
298 mir_foreach_instr_in_block(block
, ins
) {
299 if (ins
->compact_branch
) continue;
300 if (ins
->ssa_args
.dest
< 0) continue;
301 if (ins
->ssa_args
.dest
>= SSA_FIXED_MINIMUM
) continue;
303 /* Default to vec4 if we're not sure */
307 if (ins
->type
== TAG_ALU_4
)
308 mask
= squeeze_writemask(ins
->alu
.mask
);
309 else if (ins
->type
== TAG_LOAD_STORE_4
)
310 mask
= ins
->load_store
.mask
;
312 int class = util_logbase2(mask
) + 1;
314 /* Use the largest class if there's ambiguity, this
315 * handles partial writes */
317 int dest
= ins
->ssa_args
.dest
;
318 found_class
[dest
] = MAX2(found_class
[dest
], class);
322 for (unsigned i
= 0; i
< ctx
->temp_count
; ++i
) {
323 unsigned class = found_class
[i
];
324 if (!class) continue;
325 ra_set_node_class(g
, i
, classes
[class - 1]);
328 /* Determine liveness */
330 int *live_start
= malloc(nodes
* sizeof(int));
331 int *live_end
= malloc(nodes
* sizeof(int));
333 /* Initialize as non-existent */
335 for (int i
= 0; i
< nodes
; ++i
) {
336 live_start
[i
] = live_end
[i
] = -1;
341 mir_foreach_block(ctx
, block
) {
342 mir_foreach_instr_in_block(block
, ins
) {
343 if (ins
->compact_branch
) continue;
345 /* Dest is < 0 for st_vary instructions, which break
346 * the usual SSA conventions. Liveness analysis doesn't
347 * make sense on these instructions, so skip them to
348 * avoid memory corruption */
350 if (ins
->ssa_args
.dest
< 0) continue;
352 if (ins
->ssa_args
.dest
< SSA_FIXED_MINIMUM
) {
353 /* If this destination is not yet live, it is
354 * now since we just wrote it */
356 int dest
= ins
->ssa_args
.dest
;
358 if (live_start
[dest
] == -1)
359 live_start
[dest
] = d
;
362 /* Since we just used a source, the source might be
363 * dead now. Scan the rest of the block for
364 * invocations, and if there are none, the source dies
368 ins
->ssa_args
.src0
, ins
->ssa_args
.src1
371 for (int src
= 0; src
< 2; ++src
) {
372 int s
= sources
[src
];
376 if (s
>= SSA_FIXED_MINIMUM
) continue;
378 if (!mir_is_live_after(ctx
, block
, ins
, s
)) {
387 /* If a node still hasn't been killed, kill it now */
389 for (int i
= 0; i
< nodes
; ++i
) {
390 /* live_start == -1 most likely indicates a pinned output */
392 if (live_end
[i
] == -1)
396 /* Setup interference between nodes that are live at the same time */
398 for (int i
= 0; i
< nodes
; ++i
) {
399 for (int j
= i
+ 1; j
< nodes
; ++j
) {
400 bool j_overlaps_i
= live_start
[j
] < live_end
[i
];
401 bool i_overlaps_j
= live_end
[j
] < live_start
[i
];
403 if (i_overlaps_j
|| j_overlaps_i
)
404 ra_add_node_interference(g
, i
, j
);
408 ra_set_select_reg_callback(g
, midgard_ra_select_callback
, NULL
);
410 if (!ra_allocate(g
)) {
411 unreachable("Error allocating registers\n");
421 /* Once registers have been decided via register allocation
422 * (allocate_registers), we need to rewrite the MIR to use registers instead of
426 install_registers_instr(
427 compiler_context
*ctx
,
429 midgard_instruction
*ins
)
431 ssa_args args
= ins
->ssa_args
;
435 int adjusted_src
= args
.inline_constant
? -1 : args
.src1
;
436 struct phys_reg src1
= index_to_reg(ctx
, g
, args
.src0
);
437 struct phys_reg src2
= index_to_reg(ctx
, g
, adjusted_src
);
438 struct phys_reg dest
= index_to_reg(ctx
, g
, args
.dest
);
440 unsigned mask
= squeeze_writemask(ins
->alu
.mask
);
441 ins
->alu
.mask
= expand_writemask(compose_writemask(mask
, dest
));
443 /* Adjust the dest mask if necessary. Mostly this is a no-op
444 * but it matters for dot products */
445 dest
.mask
= effective_writemask(&ins
->alu
);
447 midgard_vector_alu_src mod1
=
448 vector_alu_from_unsigned(ins
->alu
.src1
);
449 mod1
.swizzle
= compose_swizzle(mod1
.swizzle
, mask
, src1
, dest
);
450 ins
->alu
.src1
= vector_alu_srco_unsigned(mod1
);
452 ins
->registers
.src1_reg
= src1
.reg
;
454 ins
->registers
.src2_imm
= args
.inline_constant
;
456 if (args
.inline_constant
) {
457 /* Encode inline 16-bit constant. See disassembler for
458 * where the algorithm is from */
460 ins
->registers
.src2_reg
= ins
->inline_constant
>> 11;
462 int lower_11
= ins
->inline_constant
& ((1 << 12) - 1);
463 uint16_t imm
= ((lower_11
>> 8) & 0x7) |
464 ((lower_11
& 0xFF) << 3);
466 ins
->alu
.src2
= imm
<< 2;
468 midgard_vector_alu_src mod2
=
469 vector_alu_from_unsigned(ins
->alu
.src2
);
470 mod2
.swizzle
= compose_swizzle(
471 mod2
.swizzle
, mask
, src2
, dest
);
472 ins
->alu
.src2
= vector_alu_srco_unsigned(mod2
);
474 ins
->registers
.src2_reg
= src2
.reg
;
477 ins
->registers
.out_reg
= dest
.reg
;
481 case TAG_LOAD_STORE_4
: {
482 if (OP_IS_STORE_VARY(ins
->load_store
.op
)) {
483 /* TODO: use ssa_args for st_vary */
484 ins
->load_store
.reg
= 0;
486 /* Which physical register we read off depends on
487 * whether we are loading or storing -- think about the
488 * logical dataflow */
490 unsigned r
= OP_IS_STORE(ins
->load_store
.op
) ?
491 args
.src0
: args
.dest
;
492 struct phys_reg src
= index_to_reg(ctx
, g
, r
);
494 ins
->load_store
.reg
= src
.reg
;
496 ins
->load_store
.swizzle
= compose_swizzle(
497 ins
->load_store
.swizzle
, 0xF,
498 default_phys_reg(0), src
);
500 ins
->load_store
.mask
= compose_writemask(
501 ins
->load_store
.mask
, src
);
513 install_registers(compiler_context
*ctx
, struct ra_graph
*g
)
515 mir_foreach_block(ctx
, block
) {
516 mir_foreach_instr_in_block(block
, ins
) {
517 if (ins
->compact_branch
) continue;
518 install_registers_instr(ctx
, g
, ins
);