2 * Copyright (C) 2019 Collabora, Ltd.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 #include "midgard_ops.h"
27 /* Lowers the invert field on instructions to a dedicated inot (inor)
28 * instruction instead, as invert is not always supported natively by the
32 midgard_lower_invert(compiler_context
*ctx
, midgard_block
*block
)
34 mir_foreach_instr_in_block_safe(block
, ins
) {
35 if (ins
->type
!= TAG_ALU_4
) continue;
36 if (!ins
->invert
) continue;
38 unsigned temp
= make_compiler_temp(ctx
);
40 midgard_instruction
not = {
43 .src
= { temp
, ~0, ~0, ~0 },
44 .swizzle
= SWIZZLE_IDENTITY
,
46 .has_inline_constant
= true,
48 .op
= midgard_alu_op_inor
,
50 .reg_mode
= midgard_reg_mode_32
,
51 .dest_override
= midgard_dest_override_none
,
52 .outmod
= midgard_outmod_int_wrap
58 mir_insert_instruction_before(ctx
, mir_next_op(ins
), not);
62 /* Propagate the .not up to the source */
65 midgard_opt_not_propagate(compiler_context
*ctx
, midgard_block
*block
)
67 bool progress
= false;
69 mir_foreach_instr_in_block_safe(block
, ins
) {
70 if (ins
->type
!= TAG_ALU_4
) continue;
71 if (ins
->alu
.op
!= midgard_alu_op_imov
) continue;
72 if (!ins
->invert
) continue;
73 if (mir_nontrivial_source2_mod_simple(ins
)) continue;
74 if (ins
->src
[1] & IS_REG
) continue;
76 /* Is it beneficial to propagate? */
77 if (!mir_single_use(ctx
, ins
->src
[1])) continue;
79 /* We found an imov.not, propagate the invert back */
81 mir_foreach_instr_in_block_from_rev(block
, v
, mir_prev_op(ins
)) {
82 if (v
->dest
!= ins
->src
[1]) continue;
83 if (v
->type
!= TAG_ALU_4
) break;
85 v
->invert
= !v
->invert
;
95 /* With that lowering out of the way, we can focus on more interesting
96 * optimizations. One easy one is fusing inverts into bitwise operations:
104 mir_is_bitwise(midgard_instruction
*ins
)
106 switch (ins
->alu
.op
) {
107 case midgard_alu_op_iand
:
108 case midgard_alu_op_ior
:
109 case midgard_alu_op_ixor
:
116 static midgard_alu_op
117 mir_invert_op(midgard_alu_op op
)
120 case midgard_alu_op_iand
:
121 return midgard_alu_op_inand
;
122 case midgard_alu_op_ior
:
123 return midgard_alu_op_inor
;
124 case midgard_alu_op_ixor
:
125 return midgard_alu_op_inxor
;
127 unreachable("Op not invertible");
131 static midgard_alu_op
132 mir_demorgan_op(midgard_alu_op op
)
135 case midgard_alu_op_iand
:
136 return midgard_alu_op_inor
;
137 case midgard_alu_op_ior
:
138 return midgard_alu_op_inand
;
140 unreachable("Op not De Morgan-able");
144 static midgard_alu_op
145 mir_notright_op(midgard_alu_op op
)
148 case midgard_alu_op_iand
:
149 return midgard_alu_op_iandnot
;
150 case midgard_alu_op_ior
:
151 return midgard_alu_op_iornot
;
153 unreachable("Op not right able");
158 midgard_opt_fuse_dest_invert(compiler_context
*ctx
, midgard_block
*block
)
160 bool progress
= false;
162 mir_foreach_instr_in_block_safe(block
, ins
) {
163 /* Search for inverted bitwise */
164 if (ins
->type
!= TAG_ALU_4
) continue;
165 if (!mir_is_bitwise(ins
)) continue;
166 if (!ins
->invert
) continue;
168 ins
->alu
.op
= mir_invert_op(ins
->alu
.op
);
176 /* Next up, we can fuse inverts into the sources of bitwise ops:
178 * ~a & b = b & ~a = iandnot(b, a)
179 * a & ~b = iandnot(a, b)
180 * ~a & ~b = ~(a | b) = inor(a, b)
182 * ~a | b = b | ~a = iornot(b, a)
183 * a | ~b = iornot(a, b)
184 * ~a | ~b = ~(a & b) = inand(a, b)
186 * ~a ^ b = ~(a ^ b) = inxor(a, b)
187 * a ^ ~b = ~(a ^ b) + inxor(a, b)
189 * ~(a ^ b) = inxor(a, b)
193 mir_strip_inverted(compiler_context
*ctx
, unsigned node
)
195 if (node
>= SSA_FIXED_MINIMUM
)
198 /* Strips and returns the invert off a node */
199 mir_foreach_instr_global(ctx
, ins
) {
200 if (ins
->compact_branch
) continue;
201 if (ins
->dest
!= node
) continue;
203 bool status
= ins
->invert
;
208 unreachable("Invalid node stripped");
212 is_ssa_or_constant(unsigned node
)
214 return !(node
& IS_REG
) || (node
== SSA_FIXED_REGISTER(26));
218 midgard_opt_fuse_src_invert(compiler_context
*ctx
, midgard_block
*block
)
220 bool progress
= false;
222 mir_foreach_instr_in_block_safe(block
, ins
) {
223 /* Search for inverted bitwise */
224 if (ins
->type
!= TAG_ALU_4
) continue;
225 if (!mir_is_bitwise(ins
)) continue;
226 if (ins
->invert
) continue;
228 if (!is_ssa_or_constant(ins
->src
[0])) continue;
229 if (!is_ssa_or_constant(ins
->src
[1])) continue;
230 if (!mir_single_use(ctx
, ins
->src
[0])) continue;
231 if (!ins
->has_inline_constant
&& !mir_single_use(ctx
, ins
->src
[1])) continue;
233 bool not_a
= mir_strip_inverted(ctx
, ins
->src
[0]);
235 ins
->has_inline_constant
? false :
236 mir_strip_inverted(ctx
, ins
->src
[1]);
238 /* Edge case: if src0 == src1, it'll've been stripped */
239 if ((ins
->src
[0] == ins
->src
[1]) && !ins
->has_inline_constant
)
242 progress
|= (not_a
|| not_b
);
245 if (!(not_a
|| not_b
)) continue;
247 bool both
= not_a
&& not_b
;
248 bool left
= not_a
&& !not_b
;
249 bool right
= !not_a
&& not_b
;
251 /* No-op, but we got to strip the inverts */
252 if (both
&& ins
->alu
.op
== midgard_alu_op_ixor
)
256 ins
->alu
.op
= mir_demorgan_op(ins
->alu
.op
);
257 } else if (right
|| (left
&& !ins
->has_inline_constant
)) {
258 /* Commute arguments */
262 ins
->alu
.op
= mir_notright_op(ins
->alu
.op
);
263 } else if (left
&& ins
->has_inline_constant
) {
264 /* Some special transformations:
266 * ~A & c = ~(~(~A) | (~c)) = ~(A | ~c) = inor(A, ~c)
267 * ~A | c = ~(~(~A) & (~c)) = ~(A & ~c) = inand(A, ~c)
270 ins
->alu
.op
= mir_demorgan_op(ins
->alu
.op
);
271 ins
->inline_constant
= ~ins
->inline_constant
;
278 /* Optimizes a .not away when used as the source of a conditional select:
280 * csel(a, b, c) = { b if a, c if !a }
281 * csel(!a, b, c) = { b if !a, c if !(!a) } = { c if a, b if !a } = csel(a, c, b)
282 * csel(!a, b, c) = csel(a, c, b)
286 midgard_opt_csel_invert(compiler_context
*ctx
, midgard_block
*block
)
288 bool progress
= false;
290 mir_foreach_instr_in_block_safe(block
, ins
) {
291 if (ins
->type
!= TAG_ALU_4
) continue;
292 if (!OP_IS_CSEL(ins
->alu
.op
)) continue;
293 if (!mir_single_use(ctx
, ins
->src
[2])) continue;
294 if (!mir_strip_inverted(ctx
, ins
->src
[2])) continue;
305 mir_is_inverted(compiler_context
*ctx
, unsigned node
)
307 mir_foreach_instr_global(ctx
, ins
) {
308 if (ins
->compact_branch
) continue;
309 if (ins
->dest
!= node
) continue;
314 unreachable("Invalid node passed");
319 /* Optimizes comparisions which invert both arguments
322 * ieq(not(a), not(b)) = ieq(a, b)
323 * ine(not(a), not(b)) = ine(a, b)
325 * This does apply for ilt and ile if we flip the argument order:
326 * Proofs below provided by Alyssa Rosenzweig
330 * ( not(A) <= not(B) ) <=> ( −(A+1) <= −(B+1) )
334 * On unsigned comparisons (ult / ule) we can perform the same optimization
335 * with the additional restriction that the source registers must
336 * have the same size.
338 * TODO: We may not need them to be of the same size, if we can
339 * prove that they are the same after sext/zext
343 * ( not(A) <= not(B) ) <=> ( 2n−A−1 <= 2n−B−1 )
348 midgard_opt_drop_cmp_invert(compiler_context
*ctx
, midgard_block
*block
)
351 bool progress
= false;
353 mir_foreach_instr_in_block_safe(block
, ins
) {
354 if (ins
->type
!= TAG_ALU_4
) continue;
355 if (!OP_IS_INTEGER_CMP(ins
->alu
.op
)) continue;
357 if ((ins
->src
[0] & IS_REG
) || (ins
->src
[1] & IS_REG
)) continue;
358 if (!mir_single_use(ctx
, ins
->src
[0]) || !mir_single_use(ctx
, ins
->src
[1])) continue;
360 bool a_inverted
= mir_is_inverted(ctx
, ins
->src
[0]);
361 bool b_inverted
= mir_is_inverted(ctx
, ins
->src
[1]);
363 if (!a_inverted
|| !b_inverted
) continue;
364 if (OP_IS_UNSIGNED_CMP(ins
->alu
.op
) && mir_srcsize(ins
, 0) != mir_srcsize(ins
, 1)) continue;
367 mir_strip_inverted(ctx
, ins
->src
[0]);
368 mir_strip_inverted(ctx
, ins
->src
[1]);
370 if (ins
->alu
.op
!= midgard_alu_op_ieq
&& ins
->alu
.op
!= midgard_alu_op_ine
)
379 /* Optimizes branches with inverted arguments by inverting the
380 * branch condition instead of the argument condition.
383 midgard_opt_invert_branch(compiler_context
*ctx
, midgard_block
*block
)
385 bool progress
= false;
387 mir_foreach_instr_in_block_safe(block
, ins
) {
388 if (ins
->type
!= TAG_ALU_4
) continue;
389 if (!midgard_is_branch_unit(ins
->unit
)) continue;
390 if (!ins
->branch
.conditional
) continue;
391 if (ins
->src
[0] & IS_REG
) continue;
393 if (mir_strip_inverted(ctx
, ins
->src
[0])) {
394 ins
->branch
.invert_conditional
= !ins
->branch
.invert_conditional
;