2 * Copyright (C) 2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 #include "midgard_ops.h"
27 void mir_rewrite_index_src_single(midgard_instruction
*ins
, unsigned old
, unsigned new)
29 for (unsigned i
= 0; i
< ARRAY_SIZE(ins
->src
); ++i
) {
30 if (ins
->src
[i
] == old
)
35 void mir_rewrite_index_dst_single(midgard_instruction
*ins
, unsigned old
, unsigned new)
42 mir_rewrite_index_src_single_swizzle(midgard_instruction
*ins
, unsigned old
, unsigned new, unsigned *swizzle
)
44 for (unsigned i
= 0; i
< ARRAY_SIZE(ins
->src
); ++i
) {
45 if (ins
->src
[i
] != old
) continue;
48 mir_compose_swizzle(ins
->swizzle
[i
], swizzle
, ins
->swizzle
[i
]);
53 mir_rewrite_index_src(compiler_context
*ctx
, unsigned old
, unsigned new)
55 mir_foreach_instr_global(ctx
, ins
) {
56 mir_rewrite_index_src_single(ins
, old
, new);
61 mir_rewrite_index_src_swizzle(compiler_context
*ctx
, unsigned old
, unsigned new, unsigned *swizzle
)
63 mir_foreach_instr_global(ctx
, ins
) {
64 mir_rewrite_index_src_single_swizzle(ins
, old
, new, swizzle
);
69 mir_rewrite_index_dst(compiler_context
*ctx
, unsigned old
, unsigned new)
71 mir_foreach_instr_global(ctx
, ins
) {
72 mir_rewrite_index_dst_single(ins
, old
, new);
77 mir_rewrite_index(compiler_context
*ctx
, unsigned old
, unsigned new)
79 mir_rewrite_index_src(ctx
, old
, new);
80 mir_rewrite_index_dst(ctx
, old
, new);
84 mir_use_count(compiler_context
*ctx
, unsigned value
)
86 unsigned used_count
= 0;
88 mir_foreach_instr_global(ctx
, ins
) {
89 if (mir_has_arg(ins
, value
))
96 /* Checks if a value is used only once (or totally dead), which is an important
97 * heuristic to figure out if certain optimizations are Worth It (TM) */
100 mir_single_use(compiler_context
*ctx
, unsigned value
)
102 /* We can replicate constants in places so who cares */
103 if (value
== SSA_FIXED_REGISTER(REGISTER_CONSTANT
))
106 return mir_use_count(ctx
, value
) <= 1;
110 mir_nontrivial_mod(midgard_instruction
*ins
, unsigned i
, bool check_swizzle
)
112 bool is_int
= midgard_is_integer_op(ins
->alu
.op
);
115 if (ins
->src_shift
[i
]) return true;
117 if (ins
->src_neg
[i
]) return true;
118 if (ins
->src_abs
[i
]) return true;
121 if (ins
->dest_type
!= ins
->src_types
[i
]) return true;
124 for (unsigned c
= 0; c
< 16; ++c
) {
125 if (!(ins
->mask
& (1 << c
))) continue;
126 if (ins
->swizzle
[i
][c
] != c
) return true;
134 mir_nontrivial_outmod(midgard_instruction
*ins
)
136 bool is_int
= midgard_is_integer_op(ins
->alu
.op
);
137 unsigned mod
= ins
->alu
.outmod
;
139 if (ins
->dest_type
!= ins
->src_types
[1])
143 return mod
!= midgard_outmod_int_wrap
;
145 return mod
!= midgard_outmod_none
;
149 mir_from_bytemask(uint16_t bytemask
, unsigned bits
)
152 unsigned count
= bits
/ 8;
154 for (unsigned c
= 0, d
= 0; c
< 16; c
+= count
, ++d
) {
155 bool a
= (bytemask
& (1 << c
)) != 0;
157 for (unsigned q
= c
; q
< count
; ++q
)
158 assert(((bytemask
& (1 << q
)) != 0) == a
);
166 /* Rounds up a bytemask to fill a given component count. Iterate each
167 * component, and check if any bytes in the component are masked on */
170 mir_round_bytemask_up(uint16_t mask
, unsigned bits
)
172 unsigned bytes
= bits
/ 8;
173 unsigned maxmask
= mask_of(bytes
);
174 unsigned channels
= 16 / bytes
;
176 for (unsigned c
= 0; c
< channels
; ++c
) {
177 unsigned submask
= maxmask
<< (c
* bytes
);
186 /* Grabs the per-byte mask of an instruction (as opposed to per-component) */
189 mir_bytemask(midgard_instruction
*ins
)
191 unsigned type_size
= nir_alu_type_get_type_size(ins
->dest_type
);
192 return pan_to_bytemask(type_size
, ins
->mask
);
196 mir_set_bytemask(midgard_instruction
*ins
, uint16_t bytemask
)
198 unsigned type_size
= nir_alu_type_get_type_size(ins
->dest_type
);
199 ins
->mask
= mir_from_bytemask(bytemask
, type_size
);
202 /* Checks if we should use an upper destination override, rather than the lower
203 * one in the IR. Returns zero if no, returns the bytes to shift otherwise */
206 mir_upper_override(midgard_instruction
*ins
, unsigned inst_size
)
208 unsigned type_size
= nir_alu_type_get_type_size(ins
->dest_type
);
210 /* If the sizes are the same, there's nothing to override */
211 if (type_size
== inst_size
)
214 /* There are 16 bytes per vector, so there are (16/bytes)
215 * components per vector. So the magic half is half of
216 * (16/bytes), which simplifies to 8/bytes = 8 / (bits / 8) = 64 / bits
219 unsigned threshold
= 64 / type_size
;
221 /* How many components did we shift over? */
222 unsigned zeroes
= __builtin_ctz(ins
->mask
);
224 /* Did we hit the threshold? */
225 return (zeroes
>= threshold
) ? threshold
: 0;
228 /* Creates a mask of the components of a node read by an instruction, by
229 * analyzing the swizzle with respect to the instruction's mask. E.g.:
231 * fadd r0.xz, r1.yyyy, r2.zwyx
233 * will return a mask of Z/Y for r2
237 mir_bytemask_of_read_components_single(unsigned *swizzle
, unsigned inmask
, unsigned bits
)
241 for (unsigned c
= 0; c
< MIR_VEC_COMPONENTS
; ++c
) {
242 if (!(inmask
& (1 << c
))) continue;
243 cmask
|= (1 << swizzle
[c
]);
246 return pan_to_bytemask(bits
, cmask
);
250 mir_bytemask_of_read_components_index(midgard_instruction
*ins
, unsigned i
)
252 if (ins
->compact_branch
&& ins
->writeout
&& (i
== 0)) {
253 /* Non-ZS writeout uses all components */
254 if (!ins
->writeout_depth
&& !ins
->writeout_stencil
)
257 /* For ZS-writeout, if both Z and S are written we need two
258 * components, otherwise we only need one.
260 if (ins
->writeout_depth
&& ins
->writeout_stencil
)
266 /* Conditional branches read one 32-bit component = 4 bytes (TODO: multi branch??) */
267 if (ins
->compact_branch
&& ins
->branch
.conditional
&& (i
== 0))
270 /* ALU ops act componentwise so we need to pay attention to
271 * their mask. Texture/ldst does not so we don't clamp source
272 * readmasks based on the writemask */
273 unsigned qmask
= (ins
->type
== TAG_ALU_4
) ? ins
->mask
: ~0;
275 /* Handle dot products and things */
276 if (ins
->type
== TAG_ALU_4
&& !ins
->compact_branch
) {
277 unsigned props
= alu_opcode_props
[ins
->alu
.op
].props
;
279 unsigned channel_override
= GET_CHANNEL_COUNT(props
);
281 if (channel_override
)
282 qmask
= mask_of(channel_override
);
285 return mir_bytemask_of_read_components_single(ins
->swizzle
[i
], qmask
,
286 nir_alu_type_get_type_size(ins
->src_types
[i
]));
290 mir_bytemask_of_read_components(midgard_instruction
*ins
, unsigned node
)
297 mir_foreach_src(ins
, i
) {
298 if (ins
->src
[i
] != node
) continue;
299 mask
|= mir_bytemask_of_read_components_index(ins
, i
);
305 /* Register allocation occurs after instruction scheduling, which is fine until
306 * we start needing to spill registers and therefore insert instructions into
307 * an already-scheduled program. We don't have to be terribly efficient about
308 * this, since spilling is already slow. So just semantically we need to insert
309 * the instruction into a new bundle before/after the bundle of the instruction
312 static midgard_bundle
313 mir_bundle_for_op(compiler_context
*ctx
, midgard_instruction ins
)
315 midgard_instruction
*u
= mir_upload_ins(ctx
, ins
);
317 midgard_bundle bundle
= {
319 .instruction_count
= 1,
320 .instructions
= { u
},
323 if (bundle
.tag
== TAG_ALU_4
) {
324 assert(OP_IS_MOVE(u
->alu
.op
));
327 size_t bytes_emitted
= sizeof(uint32_t) + sizeof(midgard_reg_info
) + sizeof(midgard_vector_alu
);
328 bundle
.padding
= ~(bytes_emitted
- 1) & 0xF;
329 bundle
.control
= ins
.type
| u
->unit
;
336 mir_bundle_idx_for_ins(midgard_instruction
*tag
, midgard_block
*block
)
338 midgard_bundle
*bundles
=
339 (midgard_bundle
*) block
->bundles
.data
;
341 size_t count
= (block
->bundles
.size
/ sizeof(midgard_bundle
));
343 for (unsigned i
= 0; i
< count
; ++i
) {
344 for (unsigned j
= 0; j
< bundles
[i
].instruction_count
; ++j
) {
345 if (bundles
[i
].instructions
[j
] == tag
)
350 mir_print_instruction(tag
);
351 unreachable("Instruction not scheduled in block");
355 mir_insert_instruction_before_scheduled(
356 compiler_context
*ctx
,
357 midgard_block
*block
,
358 midgard_instruction
*tag
,
359 midgard_instruction ins
)
361 unsigned before
= mir_bundle_idx_for_ins(tag
, block
);
362 size_t count
= util_dynarray_num_elements(&block
->bundles
, midgard_bundle
);
363 UNUSED
void *unused
= util_dynarray_grow(&block
->bundles
, midgard_bundle
, 1);
365 midgard_bundle
*bundles
= (midgard_bundle
*) block
->bundles
.data
;
366 memmove(bundles
+ before
+ 1, bundles
+ before
, (count
- before
) * sizeof(midgard_bundle
));
367 midgard_bundle
*before_bundle
= bundles
+ before
+ 1;
369 midgard_bundle
new = mir_bundle_for_op(ctx
, ins
);
370 memcpy(bundles
+ before
, &new, sizeof(new));
372 list_addtail(&new.instructions
[0]->link
, &before_bundle
->instructions
[0]->link
);
373 block
->quadword_count
+= midgard_tag_props
[new.tag
].size
;
377 mir_insert_instruction_after_scheduled(
378 compiler_context
*ctx
,
379 midgard_block
*block
,
380 midgard_instruction
*tag
,
381 midgard_instruction ins
)
383 /* We need to grow the bundles array to add our new bundle */
384 size_t count
= util_dynarray_num_elements(&block
->bundles
, midgard_bundle
);
385 UNUSED
void *unused
= util_dynarray_grow(&block
->bundles
, midgard_bundle
, 1);
387 /* Find the bundle that we want to insert after */
388 unsigned after
= mir_bundle_idx_for_ins(tag
, block
);
390 /* All the bundles after that one, we move ahead by one */
391 midgard_bundle
*bundles
= (midgard_bundle
*) block
->bundles
.data
;
392 memmove(bundles
+ after
+ 2, bundles
+ after
+ 1, (count
- after
- 1) * sizeof(midgard_bundle
));
393 midgard_bundle
*after_bundle
= bundles
+ after
;
395 midgard_bundle
new = mir_bundle_for_op(ctx
, ins
);
396 memcpy(bundles
+ after
+ 1, &new, sizeof(new));
397 list_add(&new.instructions
[0]->link
, &after_bundle
->instructions
[after_bundle
->instruction_count
- 1]->link
);
398 block
->quadword_count
+= midgard_tag_props
[new.tag
].size
;
401 /* Flip the first-two arguments of a (binary) op. Currently ALU
402 * only, no known uses for ldst/tex */
405 mir_flip(midgard_instruction
*ins
)
407 unsigned temp
= ins
->src
[0];
408 ins
->src
[0] = ins
->src
[1];
411 assert(ins
->type
== TAG_ALU_4
);
413 temp
= ins
->alu
.src1
;
414 ins
->alu
.src1
= ins
->alu
.src2
;
415 ins
->alu
.src2
= temp
;
417 temp
= ins
->src_types
[0];
418 ins
->src_types
[0] = ins
->src_types
[1];
419 ins
->src_types
[1] = temp
;
421 temp
= ins
->src_abs
[0];
422 ins
->src_abs
[0] = ins
->src_abs
[1];
423 ins
->src_abs
[1] = temp
;
425 temp
= ins
->src_neg
[0];
426 ins
->src_neg
[0] = ins
->src_neg
[1];
427 ins
->src_neg
[1] = temp
;
429 unsigned temp_swizzle
[16];
430 memcpy(temp_swizzle
, ins
->swizzle
[0], sizeof(ins
->swizzle
[0]));
431 memcpy(ins
->swizzle
[0], ins
->swizzle
[1], sizeof(ins
->swizzle
[0]));
432 memcpy(ins
->swizzle
[1], temp_swizzle
, sizeof(ins
->swizzle
[0]));
435 /* Before squashing, calculate ctx->temp_count just by observing the MIR */
438 mir_compute_temp_count(compiler_context
*ctx
)
443 unsigned max_dest
= 0;
445 mir_foreach_instr_global(ctx
, ins
) {
446 if (ins
->dest
< SSA_FIXED_MINIMUM
)
447 max_dest
= MAX2(max_dest
, ins
->dest
+ 1);
450 ctx
->temp_count
= max_dest
;