2 * Copyright (C) 2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 #include "midgard_ops.h"
27 void mir_rewrite_index_src_single(midgard_instruction
*ins
, unsigned old
, unsigned new)
29 for (unsigned i
= 0; i
< ARRAY_SIZE(ins
->ssa_args
.src
); ++i
) {
30 if (ins
->ssa_args
.src
[i
] == old
)
31 ins
->ssa_args
.src
[i
] = new;
36 mir_get_swizzle(midgard_instruction
*ins
, unsigned idx
)
38 if (ins
->type
== TAG_ALU_4
) {
39 unsigned b
= (idx
== 0) ? ins
->alu
.src1
: ins
->alu
.src2
;
41 midgard_vector_alu_src s
=
42 vector_alu_from_unsigned(b
);
45 } else if (ins
->type
== TAG_LOAD_STORE_4
) {
46 /* Main swizzle of a load is on the destination */
47 if (!OP_IS_STORE(ins
->load_store
.op
))
52 return ins
->load_store
.swizzle
;
56 (idx
== 2) ? ins
->load_store
.arg_2
: ins
->load_store
.arg_1
;
58 return component_to_swizzle(midgard_ldst_select(raw
).component
);
61 unreachable("Unknown load/store source");
63 } else if (ins
->type
== TAG_TEXTURE_4
) {
66 return ins
->texture
.in_reg_swizzle
;
68 /* Swizzle on bias doesn't make sense */
71 unreachable("Unknown texture source");
74 unreachable("Unknown type");
79 mir_set_swizzle(midgard_instruction
*ins
, unsigned idx
, unsigned new)
81 if (ins
->type
== TAG_ALU_4
) {
82 unsigned b
= (idx
== 0) ? ins
->alu
.src1
: ins
->alu
.src2
;
84 midgard_vector_alu_src s
=
85 vector_alu_from_unsigned(b
);
88 unsigned pack
= vector_alu_srco_unsigned(s
);
94 } else if (ins
->type
== TAG_LOAD_STORE_4
) {
95 /* Main swizzle of a load is on the destination */
96 if (!OP_IS_STORE(ins
->load_store
.op
))
101 ins
->load_store
.swizzle
= new;
106 (idx
== 2) ? ins
->load_store
.arg_2
: ins
->load_store
.arg_1
;
108 midgard_ldst_register_select sel
109 = midgard_ldst_select(raw
);
110 sel
.component
= swizzle_to_component(new);
111 uint8_t packed
= midgard_ldst_pack(sel
);
114 ins
->load_store
.arg_2
= packed
;
116 ins
->load_store
.arg_1
= packed
;
124 } else if (ins
->type
== TAG_TEXTURE_4
) {
127 ins
->texture
.in_reg_swizzle
= new;
134 unreachable("Unknown type");
139 mir_rewrite_index_src_single_swizzle(midgard_instruction
*ins
, unsigned old
, unsigned new, unsigned swizzle
)
141 for (unsigned i
= 0; i
< ARRAY_SIZE(ins
->ssa_args
.src
); ++i
) {
142 if (ins
->ssa_args
.src
[i
] != old
) continue;
144 ins
->ssa_args
.src
[i
] = new;
146 mir_set_swizzle(ins
, i
,
147 pan_compose_swizzle(mir_get_swizzle(ins
, i
), swizzle
));
152 mir_rewrite_index_src(compiler_context
*ctx
, unsigned old
, unsigned new)
154 mir_foreach_instr_global(ctx
, ins
) {
155 mir_rewrite_index_src_single(ins
, old
, new);
160 mir_rewrite_index_src_swizzle(compiler_context
*ctx
, unsigned old
, unsigned new, unsigned swizzle
)
162 mir_foreach_instr_global(ctx
, ins
) {
163 mir_rewrite_index_src_single_swizzle(ins
, old
, new, swizzle
);
168 mir_rewrite_index_src_tag(compiler_context
*ctx
, unsigned old
, unsigned new, unsigned tag
)
170 mir_foreach_instr_global(ctx
, ins
) {
171 if (ins
->type
!= tag
)
174 mir_rewrite_index_src_single(ins
, old
, new);
181 mir_rewrite_index_dst(compiler_context
*ctx
, unsigned old
, unsigned new)
183 mir_foreach_instr_global(ctx
, ins
) {
184 if (ins
->ssa_args
.dest
== old
)
185 ins
->ssa_args
.dest
= new;
190 mir_rewrite_index_dst_tag(compiler_context
*ctx
, unsigned old
, unsigned new, unsigned tag
)
192 mir_foreach_instr_global(ctx
, ins
) {
193 if (ins
->type
!= tag
)
196 if (ins
->ssa_args
.dest
== old
)
197 ins
->ssa_args
.dest
= new;
204 mir_rewrite_index(compiler_context
*ctx
, unsigned old
, unsigned new)
206 mir_rewrite_index_src(ctx
, old
, new);
207 mir_rewrite_index_dst(ctx
, old
, new);
211 mir_use_count(compiler_context
*ctx
, unsigned value
)
213 unsigned used_count
= 0;
215 mir_foreach_instr_global(ctx
, ins
) {
216 if (mir_has_arg(ins
, value
))
223 /* Checks if a value is used only once (or totally dead), which is an important
224 * heuristic to figure out if certain optimizations are Worth It (TM) */
227 mir_single_use(compiler_context
*ctx
, unsigned value
)
229 return mir_use_count(ctx
, value
) <= 1;
233 mir_nontrivial_raw_mod(midgard_vector_alu_src src
, bool is_int
)
236 return src
.mod
== midgard_int_shift
;
242 mir_nontrivial_mod(midgard_vector_alu_src src
, bool is_int
, unsigned mask
)
244 if (mir_nontrivial_raw_mod(src
, is_int
)) return true;
246 /* size-conversion */
247 if (src
.half
) return true;
250 for (unsigned c
= 0; c
< 4; ++c
) {
251 if (!(mask
& (1 << c
))) continue;
252 if (((src
.swizzle
>> (2*c
)) & 3) != c
) return true;
259 mir_nontrivial_source2_mod(midgard_instruction
*ins
)
261 bool is_int
= midgard_is_integer_op(ins
->alu
.op
);
263 midgard_vector_alu_src src2
=
264 vector_alu_from_unsigned(ins
->alu
.src2
);
266 return mir_nontrivial_mod(src2
, is_int
, ins
->mask
);
270 mir_nontrivial_source2_mod_simple(midgard_instruction
*ins
)
272 bool is_int
= midgard_is_integer_op(ins
->alu
.op
);
274 midgard_vector_alu_src src2
=
275 vector_alu_from_unsigned(ins
->alu
.src2
);
277 return mir_nontrivial_raw_mod(src2
, is_int
) || src2
.half
;
281 mir_nontrivial_outmod(midgard_instruction
*ins
)
283 bool is_int
= midgard_is_integer_op(ins
->alu
.op
);
284 unsigned mod
= ins
->alu
.outmod
;
290 /* Type conversion is a sort of outmod */
291 if (ins
->alu
.dest_override
!= midgard_dest_override_none
)
295 return mod
!= midgard_outmod_int_wrap
;
297 return mod
!= midgard_outmod_none
;
300 /* Checks if an index will be used as a special register -- basically, if we're
301 * used as the input to a non-ALU op */
304 mir_special_index(compiler_context
*ctx
, unsigned idx
)
306 mir_foreach_instr_global(ctx
, ins
) {
307 bool is_ldst
= ins
->type
== TAG_LOAD_STORE_4
;
308 bool is_tex
= ins
->type
== TAG_TEXTURE_4
;
310 if (!(is_ldst
|| is_tex
))
313 if (mir_has_arg(ins
, idx
))
320 /* Is a node written before a given instruction? */
323 mir_is_written_before(compiler_context
*ctx
, midgard_instruction
*ins
, unsigned node
)
325 if ((node
< 0) || (node
>= SSA_FIXED_MINIMUM
))
328 mir_foreach_instr_global(ctx
, q
) {
332 if (q
->ssa_args
.dest
== node
)
339 /* Creates a mask of the components of a node read by an instruction, by
340 * analyzing the swizzle with respect to the instruction's mask. E.g.:
342 * fadd r0.xz, r1.yyyy, r2.zwyx
344 * will return a mask of Z/Y for r2
348 mir_mask_of_read_components_single(unsigned swizzle
, unsigned outmask
)
352 for (unsigned c
= 0; c
< 4; ++c
) {
353 if (!(outmask
& (1 << c
))) continue;
355 unsigned comp
= (swizzle
>> (2*c
)) & 3;
363 mir_source_count(midgard_instruction
*ins
)
365 if (ins
->type
== TAG_ALU_4
) {
366 /* ALU is always binary */
368 } else if (ins
->type
== TAG_LOAD_STORE_4
) {
369 bool load
= !OP_IS_STORE(ins
->load_store
.op
);
370 return (load
? 2 : 3);
371 } else if (ins
->type
== TAG_TEXTURE_4
) {
372 /* Coords, bias.. TODO: Offsets? */
375 unreachable("Invalid instruction type");
380 mir_component_count_implicit(midgard_instruction
*ins
, unsigned i
)
382 if (ins
->type
== TAG_LOAD_STORE_4
) {
383 switch (ins
->load_store
.op
) {
384 /* Address implicitly 64-bit */
385 case midgard_op_ld_int4
:
386 return (i
== 0) ? 1 : 0;
388 case midgard_op_st_int4
:
389 return (i
== 1) ? 1 : 0;
400 mir_mask_of_read_components(midgard_instruction
*ins
, unsigned node
)
404 for (unsigned i
= 0; i
< mir_source_count(ins
); ++i
) {
405 if (ins
->ssa_args
.src
[i
] != node
) continue;
407 unsigned swizzle
= mir_get_swizzle(ins
, i
);
408 unsigned m
= mir_mask_of_read_components_single(swizzle
, ins
->mask
);
410 /* Sometimes multi-arg ops are passed implicitly */
411 unsigned implicit
= mir_component_count_implicit(ins
, i
);
412 assert(implicit
< 2);
414 /* Extend the mask */
416 /* Ensure it's a single bit currently */
417 assert((m
>> __builtin_ctz(m
)) == 0x1);
419 /* Set the next bit to extend one*/
430 mir_ubo_shift(midgard_load_store_op op
)
433 case midgard_op_ld_ubo_char
:
435 case midgard_op_ld_ubo_char2
:
437 case midgard_op_ld_ubo_char4
:
439 case midgard_op_ld_ubo_short4
:
441 case midgard_op_ld_ubo_int4
:
444 unreachable("Invalid op");