2 * Copyright (C) 2019 Collabora, Ltd.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 #include "midgard_ops.h"
27 /* Lowers the invert field on instructions to a dedicated inot (inor)
28 * instruction instead, as invert is not always supported natively by the
32 midgard_lower_invert(compiler_context
*ctx
, midgard_block
*block
)
34 mir_foreach_instr_in_block_safe(block
, ins
) {
35 if (ins
->type
!= TAG_ALU_4
) continue;
36 if (!ins
->invert
) continue;
38 unsigned temp
= make_compiler_temp(ctx
);
40 midgard_instruction
not = {
44 .src
= { temp
, -1, -1 },
45 .dest
= ins
->ssa_args
.dest
,
46 .inline_constant
= true
49 .op
= midgard_alu_op_inor
,
51 .reg_mode
= midgard_reg_mode_32
,
52 .dest_override
= midgard_dest_override_none
,
53 .outmod
= midgard_outmod_int_wrap
,
54 .src1
= vector_alu_srco_unsigned(blank_alu_src
),
55 .src2
= vector_alu_srco_unsigned(zero_alu_src
)
59 ins
->ssa_args
.dest
= temp
;
61 mir_insert_instruction_before(mir_next_op(ins
), not);
65 /* Propagate the .not up to the source */
68 midgard_opt_not_propagate(compiler_context
*ctx
, midgard_block
*block
)
70 bool progress
= false;
72 mir_foreach_instr_in_block_safe(block
, ins
) {
73 if (ins
->type
!= TAG_ALU_4
) continue;
74 if (ins
->alu
.op
!= midgard_alu_op_imov
) continue;
75 if (!ins
->invert
) continue;
76 if (mir_nontrivial_source2_mod_simple(ins
)) continue;
77 if (ins
->ssa_args
.src
[1] & IS_REG
) continue;
79 /* Is it beneficial to propagate? */
80 if (!mir_single_use(ctx
, ins
->ssa_args
.src
[1])) continue;
82 /* We found an imov.not, propagate the invert back */
84 mir_foreach_instr_in_block_from_rev(block
, v
, mir_prev_op(ins
)) {
85 if (v
->ssa_args
.dest
!= ins
->ssa_args
.src
[1]) continue;
86 if (v
->type
!= TAG_ALU_4
) break;
88 v
->invert
= !v
->invert
;
98 /* With that lowering out of the way, we can focus on more interesting
99 * optimizations. One easy one is fusing inverts into bitwise operations:
107 mir_is_bitwise(midgard_instruction
*ins
)
109 switch (ins
->alu
.op
) {
110 case midgard_alu_op_iand
:
111 case midgard_alu_op_ior
:
112 case midgard_alu_op_ixor
:
119 static midgard_alu_op
120 mir_invert_op(midgard_alu_op op
)
123 case midgard_alu_op_iand
:
124 return midgard_alu_op_inand
;
125 case midgard_alu_op_ior
:
126 return midgard_alu_op_inor
;
127 case midgard_alu_op_ixor
:
128 return midgard_alu_op_inxor
;
130 unreachable("Op not invertible");
134 static midgard_alu_op
135 mir_demorgan_op(midgard_alu_op op
)
138 case midgard_alu_op_iand
:
139 return midgard_alu_op_inor
;
140 case midgard_alu_op_ior
:
141 return midgard_alu_op_inand
;
143 unreachable("Op not De Morgan-able");
147 static midgard_alu_op
148 mir_notright_op(midgard_alu_op op
)
151 case midgard_alu_op_iand
:
152 return midgard_alu_op_iandnot
;
153 case midgard_alu_op_ior
:
154 return midgard_alu_op_iornot
;
156 unreachable("Op not right able");
161 midgard_opt_fuse_dest_invert(compiler_context
*ctx
, midgard_block
*block
)
163 bool progress
= false;
165 mir_foreach_instr_in_block_safe(block
, ins
) {
166 /* Search for inverted bitwise */
167 if (ins
->type
!= TAG_ALU_4
) continue;
168 if (!mir_is_bitwise(ins
)) continue;
169 if (!ins
->invert
) continue;
171 ins
->alu
.op
= mir_invert_op(ins
->alu
.op
);
179 /* Next up, we can fuse inverts into the sources of bitwise ops:
181 * ~a & b = b & ~a = iandnot(b, a)
182 * a & ~b = iandnot(a, b)
183 * ~a & ~b = ~(a | b) = inor(a, b)
185 * ~a | b = b | ~a = iornot(b, a)
186 * a | ~b = iornot(a, b)
187 * ~a | ~b = ~(a & b) = inand(a, b)
189 * ~a ^ b = ~(a ^ b) = inxor(a, b)
190 * a ^ ~b = ~(a ^ b) + inxor(a, b)
192 * ~(a ^ b) = inxor(a, b)
196 mir_strip_inverted(compiler_context
*ctx
, unsigned node
)
198 /* Strips and returns the invert off a node */
199 mir_foreach_instr_global(ctx
, ins
) {
200 if (ins
->compact_branch
) continue;
201 if (ins
->ssa_args
.dest
!= node
) continue;
203 bool status
= ins
->invert
;
208 unreachable("Invalid node stripped");
212 midgard_opt_fuse_src_invert(compiler_context
*ctx
, midgard_block
*block
)
214 bool progress
= false;
216 mir_foreach_instr_in_block_safe(block
, ins
) {
217 /* Search for inverted bitwise */
218 if (ins
->type
!= TAG_ALU_4
) continue;
219 if (!mir_is_bitwise(ins
)) continue;
220 if (ins
->invert
) continue;
222 if (ins
->ssa_args
.src
[0] & IS_REG
) continue;
223 if (ins
->ssa_args
.src
[1] & IS_REG
) continue;
224 if (!mir_single_use(ctx
, ins
->ssa_args
.src
[0])) continue;
225 if (!ins
->ssa_args
.inline_constant
&& !mir_single_use(ctx
, ins
->ssa_args
.src
[1])) continue;
227 bool not_a
= mir_strip_inverted(ctx
, ins
->ssa_args
.src
[0]);
229 ins
->ssa_args
.inline_constant
? false :
230 mir_strip_inverted(ctx
, ins
->ssa_args
.src
[1]);
232 /* Edge case: if src0 == src1, it'll've been stripped */
233 if ((ins
->ssa_args
.src
[0] == ins
->ssa_args
.src
[1]) && !ins
->ssa_args
.inline_constant
)
236 progress
|= (not_a
|| not_b
);
239 if (!(not_a
|| not_b
)) continue;
241 bool both
= not_a
&& not_b
;
242 bool left
= not_a
&& !not_b
;
243 bool right
= !not_a
&& not_b
;
245 /* No-op, but we got to strip the inverts */
246 if (both
&& ins
->alu
.op
== midgard_alu_op_ixor
)
250 ins
->alu
.op
= mir_demorgan_op(ins
->alu
.op
);
251 } else if (right
|| (left
&& !ins
->ssa_args
.inline_constant
)) {
254 unsigned temp
= ins
->ssa_args
.src
[0];
255 ins
->ssa_args
.src
[0] = ins
->ssa_args
.src
[1];
256 ins
->ssa_args
.src
[1] = temp
;
259 ins
->alu
.op
= mir_notright_op(ins
->alu
.op
);
260 } else if (left
&& ins
->ssa_args
.inline_constant
) {
261 /* Some special transformations:
263 * ~A & c = ~(~(~A) | (~c)) = ~(A | ~c) = inor(A, ~c)
264 * ~A | c = ~(~(~A) & (~c)) = ~(A & ~c) = inand(A, ~c)
267 ins
->alu
.op
= mir_demorgan_op(ins
->alu
.op
);
268 ins
->inline_constant
= ~ins
->inline_constant
;