2 * Copyright (C) 2019 Collabora, Ltd.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 #include "midgard_ops.h"
27 /* Lowers the invert field on instructions to a dedicated inot (inor)
28 * instruction instead, as invert is not always supported natively by the
32 midgard_lower_invert(compiler_context
*ctx
, midgard_block
*block
)
34 mir_foreach_instr_in_block_safe(block
, ins
) {
35 if (ins
->type
!= TAG_ALU_4
) continue;
36 if (!ins
->invert
) continue;
38 unsigned temp
= make_compiler_temp(ctx
);
40 midgard_instruction
not = {
43 .src
= { temp
, ~0, ~0 },
45 .has_inline_constant
= true,
47 .op
= midgard_alu_op_inor
,
49 .reg_mode
= midgard_reg_mode_32
,
50 .dest_override
= midgard_dest_override_none
,
51 .outmod
= midgard_outmod_int_wrap
,
52 .src1
= vector_alu_srco_unsigned(blank_alu_src
),
53 .src2
= vector_alu_srco_unsigned(zero_alu_src
)
59 mir_insert_instruction_before(ctx
, mir_next_op(ins
), not);
63 /* Propagate the .not up to the source */
66 midgard_opt_not_propagate(compiler_context
*ctx
, midgard_block
*block
)
68 bool progress
= false;
70 mir_foreach_instr_in_block_safe(block
, ins
) {
71 if (ins
->type
!= TAG_ALU_4
) continue;
72 if (ins
->alu
.op
!= midgard_alu_op_imov
) continue;
73 if (!ins
->invert
) continue;
74 if (mir_nontrivial_source2_mod_simple(ins
)) continue;
75 if (ins
->src
[1] & IS_REG
) continue;
77 /* Is it beneficial to propagate? */
78 if (!mir_single_use(ctx
, ins
->src
[1])) continue;
80 /* We found an imov.not, propagate the invert back */
82 mir_foreach_instr_in_block_from_rev(block
, v
, mir_prev_op(ins
)) {
83 if (v
->dest
!= ins
->src
[1]) continue;
84 if (v
->type
!= TAG_ALU_4
) break;
86 v
->invert
= !v
->invert
;
96 /* With that lowering out of the way, we can focus on more interesting
97 * optimizations. One easy one is fusing inverts into bitwise operations:
105 mir_is_bitwise(midgard_instruction
*ins
)
107 switch (ins
->alu
.op
) {
108 case midgard_alu_op_iand
:
109 case midgard_alu_op_ior
:
110 case midgard_alu_op_ixor
:
117 static midgard_alu_op
118 mir_invert_op(midgard_alu_op op
)
121 case midgard_alu_op_iand
:
122 return midgard_alu_op_inand
;
123 case midgard_alu_op_ior
:
124 return midgard_alu_op_inor
;
125 case midgard_alu_op_ixor
:
126 return midgard_alu_op_inxor
;
128 unreachable("Op not invertible");
132 static midgard_alu_op
133 mir_demorgan_op(midgard_alu_op op
)
136 case midgard_alu_op_iand
:
137 return midgard_alu_op_inor
;
138 case midgard_alu_op_ior
:
139 return midgard_alu_op_inand
;
141 unreachable("Op not De Morgan-able");
145 static midgard_alu_op
146 mir_notright_op(midgard_alu_op op
)
149 case midgard_alu_op_iand
:
150 return midgard_alu_op_iandnot
;
151 case midgard_alu_op_ior
:
152 return midgard_alu_op_iornot
;
154 unreachable("Op not right able");
159 midgard_opt_fuse_dest_invert(compiler_context
*ctx
, midgard_block
*block
)
161 bool progress
= false;
163 mir_foreach_instr_in_block_safe(block
, ins
) {
164 /* Search for inverted bitwise */
165 if (ins
->type
!= TAG_ALU_4
) continue;
166 if (!mir_is_bitwise(ins
)) continue;
167 if (!ins
->invert
) continue;
169 ins
->alu
.op
= mir_invert_op(ins
->alu
.op
);
177 /* Next up, we can fuse inverts into the sources of bitwise ops:
179 * ~a & b = b & ~a = iandnot(b, a)
180 * a & ~b = iandnot(a, b)
181 * ~a & ~b = ~(a | b) = inor(a, b)
183 * ~a | b = b | ~a = iornot(b, a)
184 * a | ~b = iornot(a, b)
185 * ~a | ~b = ~(a & b) = inand(a, b)
187 * ~a ^ b = ~(a ^ b) = inxor(a, b)
188 * a ^ ~b = ~(a ^ b) + inxor(a, b)
190 * ~(a ^ b) = inxor(a, b)
194 mir_strip_inverted(compiler_context
*ctx
, unsigned node
)
196 if (node
>= SSA_FIXED_MINIMUM
)
199 /* Strips and returns the invert off a node */
200 mir_foreach_instr_global(ctx
, ins
) {
201 if (ins
->compact_branch
) continue;
202 if (ins
->dest
!= node
) continue;
204 bool status
= ins
->invert
;
209 unreachable("Invalid node stripped");
213 is_ssa_or_constant(unsigned node
)
215 return !(node
& IS_REG
) || (node
== SSA_FIXED_REGISTER(26));
219 midgard_opt_fuse_src_invert(compiler_context
*ctx
, midgard_block
*block
)
221 bool progress
= false;
223 mir_foreach_instr_in_block_safe(block
, ins
) {
224 /* Search for inverted bitwise */
225 if (ins
->type
!= TAG_ALU_4
) continue;
226 if (!mir_is_bitwise(ins
)) continue;
227 if (ins
->invert
) continue;
229 if (!is_ssa_or_constant(ins
->src
[0])) continue;
230 if (!is_ssa_or_constant(ins
->src
[1])) continue;
231 if (!mir_single_use(ctx
, ins
->src
[0])) continue;
232 if (!ins
->has_inline_constant
&& !mir_single_use(ctx
, ins
->src
[1])) continue;
234 bool not_a
= mir_strip_inverted(ctx
, ins
->src
[0]);
236 ins
->has_inline_constant
? false :
237 mir_strip_inverted(ctx
, ins
->src
[1]);
239 /* Edge case: if src0 == src1, it'll've been stripped */
240 if ((ins
->src
[0] == ins
->src
[1]) && !ins
->has_inline_constant
)
243 progress
|= (not_a
|| not_b
);
246 if (!(not_a
|| not_b
)) continue;
248 bool both
= not_a
&& not_b
;
249 bool left
= not_a
&& !not_b
;
250 bool right
= !not_a
&& not_b
;
252 /* No-op, but we got to strip the inverts */
253 if (both
&& ins
->alu
.op
== midgard_alu_op_ixor
)
257 ins
->alu
.op
= mir_demorgan_op(ins
->alu
.op
);
258 } else if (right
|| (left
&& !ins
->has_inline_constant
)) {
259 /* Commute arguments */
263 ins
->alu
.op
= mir_notright_op(ins
->alu
.op
);
264 } else if (left
&& ins
->has_inline_constant
) {
265 /* Some special transformations:
267 * ~A & c = ~(~(~A) | (~c)) = ~(A | ~c) = inor(A, ~c)
268 * ~A | c = ~(~(~A) & (~c)) = ~(A & ~c) = inand(A, ~c)
271 ins
->alu
.op
= mir_demorgan_op(ins
->alu
.op
);
272 ins
->inline_constant
= ~ins
->inline_constant
;
279 /* Optimizes a .not away when used as the source of a conditional select:
281 * csel(a, b, c) = { b if a, c if !a }
282 * csel(!a, b, c) = { b if !a, c if !(!a) } = { c if a, b if !a } = csel(a, c, b)
283 * csel(!a, b, c) = csel(a, c, b)
287 midgard_opt_csel_invert(compiler_context
*ctx
, midgard_block
*block
)
289 bool progress
= false;
291 mir_foreach_instr_in_block_safe(block
, ins
) {
292 if (ins
->type
!= TAG_ALU_4
) continue;
293 if (!OP_IS_CSEL(ins
->alu
.op
)) continue;
294 if (!mir_single_use(ctx
, ins
->src
[2])) continue;
295 if (!mir_strip_inverted(ctx
, ins
->src
[2])) continue;