From b821e1b85e9a2325e3ee3048ca25476ac3b32ff6 Mon Sep 17 00:00:00 2001 From: Alyssa Rosenzweig Date: Fri, 26 Jul 2019 13:08:54 -0700 Subject: [PATCH] pan/midgard: Fuse invert into bitwise ops We use the new invert flag to produce ops like inand. Signed-off-by: Alyssa Rosenzweig --- src/panfrost/midgard/compiler.h | 1 + src/panfrost/midgard/midgard_compile.c | 1 + src/panfrost/midgard/midgard_opt_invert.c | 55 +++++++++++++++++++++++ 3 files changed, 57 insertions(+) diff --git a/src/panfrost/midgard/compiler.h b/src/panfrost/midgard/compiler.h index f428db3123d..b5231f3075b 100644 --- a/src/panfrost/midgard/compiler.h +++ b/src/panfrost/midgard/compiler.h @@ -576,5 +576,6 @@ bool midgard_opt_dead_move_eliminate(compiler_context *ctx, midgard_block *block void midgard_opt_post_move_eliminate(compiler_context *ctx, midgard_block *block, struct ra_graph *g); void midgard_lower_invert(compiler_context *ctx, midgard_block *block); +bool midgard_opt_fuse_dest_invert(compiler_context *ctx, midgard_block *block); #endif diff --git a/src/panfrost/midgard/midgard_compile.c b/src/panfrost/midgard/midgard_compile.c index a35b43faee7..f0b3dde2754 100644 --- a/src/panfrost/midgard/midgard_compile.c +++ b/src/panfrost/midgard/midgard_compile.c @@ -2357,6 +2357,7 @@ midgard_compile_shader_nir(struct midgard_screen *screen, nir_shader *nir, midga progress |= midgard_opt_dead_code_eliminate(ctx, block); progress |= midgard_opt_combine_projection(ctx, block); progress |= midgard_opt_varying_projection(ctx, block); + progress |= midgard_opt_fuse_dest_invert(ctx, block); } } while (progress); diff --git a/src/panfrost/midgard/midgard_opt_invert.c b/src/panfrost/midgard/midgard_opt_invert.c index 1e6c5b383ea..aab64a3c3b5 100644 --- a/src/panfrost/midgard/midgard_opt_invert.c +++ b/src/panfrost/midgard/midgard_opt_invert.c @@ -62,3 +62,58 @@ midgard_lower_invert(compiler_context *ctx, midgard_block *block) mir_insert_instruction_before(mir_next_op(ins), not); } } + +/* With that lowering out of the way, we can focus on more interesting + * optimizations. One easy one is fusing inverts into bitwise operations: + * + * ~iand = inand + * ~ior = inor + * ~ixor = inxor + */ + +static bool +mir_is_bitwise(midgard_instruction *ins) +{ + switch (ins->alu.op) { + case midgard_alu_op_iand: + case midgard_alu_op_ior: + case midgard_alu_op_ixor: + return true; + default: + return false; + } +} + +static midgard_alu_op +mir_invert_op(midgard_alu_op op) +{ + switch (op) { + case midgard_alu_op_iand: + return midgard_alu_op_inand; + case midgard_alu_op_ior: + return midgard_alu_op_inor; + case midgard_alu_op_ixor: + return midgard_alu_op_inxor; + default: + unreachable("Op not invertible"); + } +} + +bool +midgard_opt_fuse_dest_invert(compiler_context *ctx, midgard_block *block) +{ + bool progress = false; + + mir_foreach_instr_in_block_safe(block, ins) { + /* Search for inverted bitwise */ + if (ins->type != TAG_ALU_4) continue; + if (!mir_is_bitwise(ins)) continue; + if (!ins->invert) continue; + + ins->alu.op = mir_invert_op(ins->alu.op); + ins->invert = false; + progress |= true; + } + + return progress; +} -- 2.30.2