From 3e1e4ad13da1581f11b833fba021aaf209c02fff Mon Sep 17 00:00:00 2001 From: Afonso Bordado Date: Tue, 10 Dec 2019 13:18:00 +0000 Subject: [PATCH] pan/midgard: Optimize comparisions with similar operations Optimizes comparisions by removing the invert flag on operands which we can prove to be equal without the invert. Reviewed-by: Alyssa Rosenzweig Tested-by: Marge Bot Part-of: --- src/panfrost/midgard/compiler.h | 3 +- src/panfrost/midgard/helpers.h | 13 ++++ src/panfrost/midgard/midgard_compile.c | 1 + src/panfrost/midgard/midgard_opt_invert.c | 76 +++++++++++++++++++++++ 4 files changed, 92 insertions(+), 1 deletion(-) diff --git a/src/panfrost/midgard/compiler.h b/src/panfrost/midgard/compiler.h index 84b6550023b..723406cdb46 100644 --- a/src/panfrost/midgard/compiler.h +++ b/src/panfrost/midgard/compiler.h @@ -121,7 +121,7 @@ typedef struct midgard_instruction { uint16_t mask; /* For ALU ops only: set to true to invert (bitwise NOT) the - * destination of an integer-out op. Not imeplemented in hardware but + * destination of an integer-out op. Not implemented in hardware but * allows more optimizations */ bool invert; @@ -704,5 +704,6 @@ bool midgard_opt_fuse_src_invert(compiler_context *ctx, midgard_block *block); bool midgard_opt_fuse_dest_invert(compiler_context *ctx, midgard_block *block); bool midgard_opt_csel_invert(compiler_context *ctx, midgard_block *block); bool midgard_opt_promote_fmov(compiler_context *ctx, midgard_block *block); +bool midgard_opt_drop_cmp_invert(compiler_context *ctx, midgard_block *block); #endif diff --git a/src/panfrost/midgard/helpers.h b/src/panfrost/midgard/helpers.h index 3adfd9081e5..2511a8140c2 100644 --- a/src/panfrost/midgard/helpers.h +++ b/src/panfrost/midgard/helpers.h @@ -69,6 +69,19 @@ op == TEXTURE_OP_DFDY \ ) +#define OP_IS_UNSIGNED_CMP(op) ( \ + op == midgard_alu_op_ult || \ + op == midgard_alu_op_ule \ + ) + +#define OP_IS_INTEGER_CMP(op) ( \ + op == midgard_alu_op_ieq || \ + op == midgard_alu_op_ine || \ + op == midgard_alu_op_ilt || \ + op == midgard_alu_op_ile || \ + OP_IS_UNSIGNED_CMP(op) \ + ) + /* ALU control words are single bit fields with a lot of space */ #define ALU_ENAB_VEC_MUL (1 << 17) diff --git a/src/panfrost/midgard/midgard_compile.c b/src/panfrost/midgard/midgard_compile.c index 1e28b08e77d..ac712216ec2 100644 --- a/src/panfrost/midgard/midgard_compile.c +++ b/src/panfrost/midgard/midgard_compile.c @@ -2583,6 +2583,7 @@ midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_bl progress |= midgard_opt_fuse_src_invert(ctx, block); progress |= midgard_opt_fuse_dest_invert(ctx, block); progress |= midgard_opt_csel_invert(ctx, block); + progress |= midgard_opt_drop_cmp_invert(ctx, block); } } while (progress); diff --git a/src/panfrost/midgard/midgard_opt_invert.c b/src/panfrost/midgard/midgard_opt_invert.c index a00a0c931f0..4527102553b 100644 --- a/src/panfrost/midgard/midgard_opt_invert.c +++ b/src/panfrost/midgard/midgard_opt_invert.c @@ -299,3 +299,79 @@ midgard_opt_csel_invert(compiler_context *ctx, midgard_block *block) return progress; } + + +static bool +mir_is_inverted(compiler_context *ctx, unsigned node) +{ + mir_foreach_instr_global(ctx, ins) { + if (ins->compact_branch) continue; + if (ins->dest != node) continue; + + return ins->invert; + } + + unreachable("Invalid node passed"); +} + + + +/* Optimizes comparisions which invert both arguments + * + * + * ieq(not(a), not(b)) = ieq(a, b) + * ine(not(a), not(b)) = ine(a, b) + * + * This does apply for ilt and ile if we flip the argument order: + * Proofs below provided by Alyssa Rosenzweig + * + * not(x) = −(x+1) + * + * ( not(A) <= not(B) ) <=> ( −(A+1) <= −(B+1) ) + * <=> ( A+1 >= B+1) + * <=> ( B <= A ) + * + * On unsigned comparisons (ult / ule) we can perform the same optimization + * with the additional restriction that the source registers must + * have the same size. + * + * TODO: We may not need them to be of the same size, if we can + * prove that they are the same after sext/zext + * + * not(x) = 2n−x−1 + * + * ( not(A) <= not(B) ) <=> ( 2n−A−1 <= 2n−B−1 ) + * <=> ( −A <= −B ) + * <=> ( B <= A ) + */ +bool +midgard_opt_drop_cmp_invert(compiler_context *ctx, midgard_block *block) +{ + + bool progress = false; + + mir_foreach_instr_in_block_safe(block, ins) { + if (ins->type != TAG_ALU_4) continue; + if (!OP_IS_INTEGER_CMP(ins->alu.op)) continue; + + if ((ins->src[0] & IS_REG) || (ins->src[1] & IS_REG)) continue; + if (!mir_single_use(ctx, ins->src[0]) || !mir_single_use(ctx, ins->src[1])) continue; + + bool a_inverted = mir_is_inverted(ctx, ins->src[0]); + bool b_inverted = mir_is_inverted(ctx, ins->src[1]); + + if (!a_inverted || !b_inverted) continue; + if (OP_IS_UNSIGNED_CMP(ins->alu.op) && mir_srcsize(ins, 0) != mir_srcsize(ins, 1)) continue; + + + mir_strip_inverted(ctx, ins->src[0]); + mir_strip_inverted(ctx, ins->src[1]); + + if (ins->alu.op != midgard_alu_op_ieq && ins->alu.op != midgard_alu_op_ine) + mir_flip(ins); + + progress |= true; + } + + return progress; +} -- 2.30.2