pan/midgard: Add .not propagation pass
authorAlyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Fri, 26 Jul 2019 20:14:55 +0000 (13:14 -0700)
committerAlyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Fri, 2 Aug 2019 16:57:15 +0000 (09:57 -0700)
Essentially .pos propagation but for bitwise.

Signed-off-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
src/panfrost/midgard/compiler.h
src/panfrost/midgard/midgard_compile.c
src/panfrost/midgard/midgard_opt_invert.c

index b5231f3075b504c169f04067d218aab111b4c17f..cf9db9145a33677a886b3e7b771612b3cb43eead 100644 (file)
@@ -576,6 +576,7 @@ bool midgard_opt_dead_move_eliminate(compiler_context *ctx, midgard_block *block
 void midgard_opt_post_move_eliminate(compiler_context *ctx, midgard_block *block, struct ra_graph *g);
 
 void midgard_lower_invert(compiler_context *ctx, midgard_block *block);
+bool midgard_opt_not_propagate(compiler_context *ctx, midgard_block *block);
 bool midgard_opt_fuse_dest_invert(compiler_context *ctx, midgard_block *block);
 
 #endif
index f0b3dde27544081142a054f97edb48cced115803..a3c66ca43bb88fd9c7fd7209320cd77c5858862f 100644 (file)
@@ -2357,6 +2357,7 @@ midgard_compile_shader_nir(struct midgard_screen *screen, nir_shader *nir, midga
                         progress |= midgard_opt_dead_code_eliminate(ctx, block);
                         progress |= midgard_opt_combine_projection(ctx, block);
                         progress |= midgard_opt_varying_projection(ctx, block);
+                        progress |= midgard_opt_not_propagate(ctx, block);
                         progress |= midgard_opt_fuse_dest_invert(ctx, block);
                 }
         } while (progress);
index aab64a3c3b5b7990f82be08dcaabdde1d75cf8d8..ffe43a1b176f05139b12f2bb7c78e8f895c0704c 100644 (file)
@@ -63,6 +63,39 @@ midgard_lower_invert(compiler_context *ctx, midgard_block *block)
         }
 }
 
+/* Propagate the .not up to the source */
+
+bool
+midgard_opt_not_propagate(compiler_context *ctx, midgard_block *block)
+{
+        bool progress = false;
+
+        mir_foreach_instr_in_block_safe(block, ins) {
+                if (ins->type != TAG_ALU_4) continue;
+                if (ins->alu.op != midgard_alu_op_imov) continue;
+                if (!ins->invert) continue;
+                if (mir_nontrivial_source2_mod_simple(ins)) continue;
+                if (ins->ssa_args.src1 & IS_REG) continue;
+
+                /* Is it beneficial to propagate? */
+                if (!mir_single_use(ctx, ins->ssa_args.src1)) continue;
+
+                /* We found an imov.not, propagate the invert back */
+
+                mir_foreach_instr_in_block_from_rev(block, v, mir_prev_op(ins)) {
+                        if (v->ssa_args.dest != ins->ssa_args.src1) continue;
+                        if (v->type != TAG_ALU_4) break;
+
+                        v->invert = !v->invert;
+                        ins->invert = false;
+                        progress |= true;
+                        break;
+                }
+        }
+
+        return progress;
+}
+
 /* With that lowering out of the way, we can focus on more interesting
  * optimizations. One easy one is fusing inverts into bitwise operations:
  *