2 * Copyright (C) 2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 #include "midgard_ops.h"
27 void mir_rewrite_index_src_single(midgard_instruction
*ins
, unsigned old
, unsigned new)
29 for (unsigned i
= 0; i
< ARRAY_SIZE(ins
->ssa_args
.src
); ++i
) {
30 if (ins
->ssa_args
.src
[i
] == old
)
31 ins
->ssa_args
.src
[i
] = new;
35 void mir_rewrite_index_dst_single(midgard_instruction
*ins
, unsigned old
, unsigned new)
37 if (ins
->ssa_args
.dest
== old
)
38 ins
->ssa_args
.dest
= new;
42 mir_get_swizzle(midgard_instruction
*ins
, unsigned idx
)
44 if (ins
->type
== TAG_ALU_4
) {
45 unsigned b
= (idx
== 0) ? ins
->alu
.src1
: ins
->alu
.src2
;
47 midgard_vector_alu_src s
=
48 vector_alu_from_unsigned(b
);
51 } else if (ins
->type
== TAG_LOAD_STORE_4
) {
52 /* Main swizzle of a load is on the destination */
53 if (!OP_IS_STORE(ins
->load_store
.op
))
58 return ins
->load_store
.swizzle
;
62 (idx
== 2) ? ins
->load_store
.arg_2
: ins
->load_store
.arg_1
;
64 return component_to_swizzle(midgard_ldst_select(raw
).component
);
67 unreachable("Unknown load/store source");
69 } else if (ins
->type
== TAG_TEXTURE_4
) {
72 return ins
->texture
.in_reg_swizzle
;
74 /* Swizzle on bias doesn't make sense */
77 unreachable("Unknown texture source");
80 unreachable("Unknown type");
85 mir_set_swizzle(midgard_instruction
*ins
, unsigned idx
, unsigned new)
87 if (ins
->type
== TAG_ALU_4
) {
88 unsigned b
= (idx
== 0) ? ins
->alu
.src1
: ins
->alu
.src2
;
90 midgard_vector_alu_src s
=
91 vector_alu_from_unsigned(b
);
94 unsigned pack
= vector_alu_srco_unsigned(s
);
100 } else if (ins
->type
== TAG_LOAD_STORE_4
) {
101 /* Main swizzle of a load is on the destination */
102 if (!OP_IS_STORE(ins
->load_store
.op
))
107 ins
->load_store
.swizzle
= new;
112 (idx
== 2) ? ins
->load_store
.arg_2
: ins
->load_store
.arg_1
;
114 midgard_ldst_register_select sel
115 = midgard_ldst_select(raw
);
116 sel
.component
= swizzle_to_component(new);
117 uint8_t packed
= midgard_ldst_pack(sel
);
120 ins
->load_store
.arg_2
= packed
;
122 ins
->load_store
.arg_1
= packed
;
130 } else if (ins
->type
== TAG_TEXTURE_4
) {
133 ins
->texture
.in_reg_swizzle
= new;
140 unreachable("Unknown type");
145 mir_rewrite_index_src_single_swizzle(midgard_instruction
*ins
, unsigned old
, unsigned new, unsigned swizzle
)
147 for (unsigned i
= 0; i
< ARRAY_SIZE(ins
->ssa_args
.src
); ++i
) {
148 if (ins
->ssa_args
.src
[i
] != old
) continue;
150 ins
->ssa_args
.src
[i
] = new;
152 mir_set_swizzle(ins
, i
,
153 pan_compose_swizzle(mir_get_swizzle(ins
, i
), swizzle
));
158 mir_rewrite_index_src(compiler_context
*ctx
, unsigned old
, unsigned new)
160 mir_foreach_instr_global(ctx
, ins
) {
161 mir_rewrite_index_src_single(ins
, old
, new);
166 mir_rewrite_index_src_swizzle(compiler_context
*ctx
, unsigned old
, unsigned new, unsigned swizzle
)
168 mir_foreach_instr_global(ctx
, ins
) {
169 mir_rewrite_index_src_single_swizzle(ins
, old
, new, swizzle
);
174 mir_rewrite_index_src_tag(compiler_context
*ctx
, unsigned old
, unsigned new, unsigned tag
)
176 mir_foreach_instr_global(ctx
, ins
) {
177 if (ins
->type
!= tag
)
180 mir_rewrite_index_src_single(ins
, old
, new);
187 mir_rewrite_index_dst(compiler_context
*ctx
, unsigned old
, unsigned new)
189 mir_foreach_instr_global(ctx
, ins
) {
190 mir_rewrite_index_dst_single(ins
, old
, new);
195 mir_rewrite_index_dst_tag(compiler_context
*ctx
, unsigned old
, unsigned new, unsigned tag
)
197 mir_foreach_instr_global(ctx
, ins
) {
198 if (ins
->type
!= tag
)
201 if (ins
->ssa_args
.dest
== old
)
202 ins
->ssa_args
.dest
= new;
209 mir_rewrite_index(compiler_context
*ctx
, unsigned old
, unsigned new)
211 mir_rewrite_index_src(ctx
, old
, new);
212 mir_rewrite_index_dst(ctx
, old
, new);
216 mir_use_count(compiler_context
*ctx
, unsigned value
)
218 unsigned used_count
= 0;
220 mir_foreach_instr_global(ctx
, ins
) {
221 if (mir_has_arg(ins
, value
))
228 /* Checks if a value is used only once (or totally dead), which is an important
229 * heuristic to figure out if certain optimizations are Worth It (TM) */
232 mir_single_use(compiler_context
*ctx
, unsigned value
)
234 return mir_use_count(ctx
, value
) <= 1;
238 mir_nontrivial_raw_mod(midgard_vector_alu_src src
, bool is_int
)
241 return src
.mod
== midgard_int_shift
;
247 mir_nontrivial_mod(midgard_vector_alu_src src
, bool is_int
, unsigned mask
)
249 if (mir_nontrivial_raw_mod(src
, is_int
)) return true;
251 /* size-conversion */
252 if (src
.half
) return true;
255 for (unsigned c
= 0; c
< 4; ++c
) {
256 if (!(mask
& (1 << c
))) continue;
257 if (((src
.swizzle
>> (2*c
)) & 3) != c
) return true;
264 mir_nontrivial_source2_mod(midgard_instruction
*ins
)
266 bool is_int
= midgard_is_integer_op(ins
->alu
.op
);
268 midgard_vector_alu_src src2
=
269 vector_alu_from_unsigned(ins
->alu
.src2
);
271 return mir_nontrivial_mod(src2
, is_int
, ins
->mask
);
275 mir_nontrivial_source2_mod_simple(midgard_instruction
*ins
)
277 bool is_int
= midgard_is_integer_op(ins
->alu
.op
);
279 midgard_vector_alu_src src2
=
280 vector_alu_from_unsigned(ins
->alu
.src2
);
282 return mir_nontrivial_raw_mod(src2
, is_int
) || src2
.half
;
286 mir_nontrivial_outmod(midgard_instruction
*ins
)
288 bool is_int
= midgard_is_integer_op(ins
->alu
.op
);
289 unsigned mod
= ins
->alu
.outmod
;
295 /* Type conversion is a sort of outmod */
296 if (ins
->alu
.dest_override
!= midgard_dest_override_none
)
300 return mod
!= midgard_outmod_int_wrap
;
302 return mod
!= midgard_outmod_none
;
305 /* Checks if an index will be used as a special register -- basically, if we're
306 * used as the input to a non-ALU op */
309 mir_special_index(compiler_context
*ctx
, unsigned idx
)
311 mir_foreach_instr_global(ctx
, ins
) {
312 bool is_ldst
= ins
->type
== TAG_LOAD_STORE_4
;
313 bool is_tex
= ins
->type
== TAG_TEXTURE_4
;
315 if (!(is_ldst
|| is_tex
))
318 if (mir_has_arg(ins
, idx
))
325 /* Is a node written before a given instruction? */
328 mir_is_written_before(compiler_context
*ctx
, midgard_instruction
*ins
, unsigned node
)
330 if ((node
< 0) || (node
>= SSA_FIXED_MINIMUM
))
333 mir_foreach_instr_global(ctx
, q
) {
337 if (q
->ssa_args
.dest
== node
)
344 /* Creates a mask of the components of a node read by an instruction, by
345 * analyzing the swizzle with respect to the instruction's mask. E.g.:
347 * fadd r0.xz, r1.yyyy, r2.zwyx
349 * will return a mask of Z/Y for r2
353 mir_mask_of_read_components_single(unsigned swizzle
, unsigned outmask
)
357 for (unsigned c
= 0; c
< 4; ++c
) {
358 if (!(outmask
& (1 << c
))) continue;
360 unsigned comp
= (swizzle
>> (2*c
)) & 3;
368 mir_source_count(midgard_instruction
*ins
)
370 if (ins
->type
== TAG_ALU_4
) {
371 /* ALU is always binary */
373 } else if (ins
->type
== TAG_LOAD_STORE_4
) {
374 bool load
= !OP_IS_STORE(ins
->load_store
.op
);
375 return (load
? 2 : 3);
376 } else if (ins
->type
== TAG_TEXTURE_4
) {
377 /* Coords, bias.. TODO: Offsets? */
380 unreachable("Invalid instruction type");
385 mir_component_count_implicit(midgard_instruction
*ins
, unsigned i
)
387 if (ins
->type
== TAG_LOAD_STORE_4
) {
388 switch (ins
->load_store
.op
) {
389 /* Address implicitly 64-bit */
390 case midgard_op_ld_int4
:
391 return (i
== 0) ? 1 : 0;
393 case midgard_op_st_int4
:
394 return (i
== 1) ? 1 : 0;
405 mir_mask_of_read_components(midgard_instruction
*ins
, unsigned node
)
409 for (unsigned i
= 0; i
< mir_source_count(ins
); ++i
) {
410 if (ins
->ssa_args
.src
[i
] != node
) continue;
412 unsigned swizzle
= mir_get_swizzle(ins
, i
);
413 unsigned m
= mir_mask_of_read_components_single(swizzle
, ins
->mask
);
415 /* Sometimes multi-arg ops are passed implicitly */
416 unsigned implicit
= mir_component_count_implicit(ins
, i
);
417 assert(implicit
< 2);
419 /* Extend the mask */
421 /* Ensure it's a single bit currently */
422 assert((m
>> __builtin_ctz(m
)) == 0x1);
424 /* Set the next bit to extend one*/
435 mir_ubo_shift(midgard_load_store_op op
)
438 case midgard_op_ld_ubo_char
:
440 case midgard_op_ld_ubo_char2
:
442 case midgard_op_ld_ubo_char4
:
444 case midgard_op_ld_ubo_short4
:
446 case midgard_op_ld_ubo_int4
:
449 unreachable("Invalid op");