nir/lower_alu_to_scalar: Use the builder
authorJason Ekstrand <jason.ekstrand@intel.com>
Wed, 23 Sep 2015 00:14:45 +0000 (17:14 -0700)
committerJason Ekstrand <jason.ekstrand@intel.com>
Wed, 23 Sep 2015 03:37:35 +0000 (20:37 -0700)
Reviewed-by: Matt Turner <mattst88@gmail.com>
Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
src/glsl/nir/nir_lower_alu_to_scalar.c

index 710bb37409fd37c21b925f99c1f4333b762f875a..5ef5ec2b316ae16136cf7cd7171779711d3cf57b 100644 (file)
@@ -22,6 +22,7 @@
  */
 
 #include "nir.h"
+#include "nir_builder.h"
 
 /** @file nir_lower_alu_to_scalar.c
  *
@@ -38,13 +39,13 @@ nir_alu_ssa_dest_init(nir_alu_instr *instr, unsigned num_components)
 
 static void
 lower_reduction(nir_alu_instr *instr, nir_op chan_op, nir_op merge_op,
-                void *mem_ctx)
+                nir_builder *builder)
 {
    unsigned num_components = nir_op_infos[instr->op].input_sizes[0];
 
    nir_ssa_def *last = NULL;
    for (unsigned i = 0; i < num_components; i++) {
-      nir_alu_instr *chan = nir_alu_instr_create(mem_ctx, chan_op);
+      nir_alu_instr *chan = nir_alu_instr_create(builder->shader, chan_op);
       nir_alu_ssa_dest_init(chan, 1);
       nir_alu_src_copy(&chan->src[0], &instr->src[0], chan);
       chan->src[0].swizzle[0] = chan->src[0].swizzle[i];
@@ -54,18 +55,13 @@ lower_reduction(nir_alu_instr *instr, nir_op chan_op, nir_op merge_op,
          chan->src[1].swizzle[0] = chan->src[1].swizzle[i];
       }
 
-      nir_instr_insert_before(&instr->instr, &chan->instr);
+      nir_builder_instr_insert(builder, &chan->instr);
 
       if (i == 0) {
          last = &chan->dest.dest.ssa;
       } else {
-         nir_alu_instr *merge = nir_alu_instr_create(mem_ctx, merge_op);
-         nir_alu_ssa_dest_init(merge, 1);
-         merge->dest.write_mask = 1;
-         merge->src[0].src = nir_src_for_ssa(last);
-         merge->src[1].src = nir_src_for_ssa(&chan->dest.dest.ssa);
-         nir_instr_insert_before(&instr->instr, &merge->instr);
-         last = &merge->dest.dest.ssa;
+         last = nir_build_alu(builder, merge_op,
+                              last, &chan->dest.dest.ssa, NULL, NULL);
       }
    }
 
@@ -75,7 +71,7 @@ lower_reduction(nir_alu_instr *instr, nir_op chan_op, nir_op merge_op,
 }
 
 static void
-lower_alu_instr_scalar(nir_alu_instr *instr, void *mem_ctx)
+lower_alu_instr_scalar(nir_alu_instr *instr, nir_builder *b)
 {
    unsigned num_src = nir_op_infos[instr->op].num_inputs;
    unsigned i, chan;
@@ -83,11 +79,13 @@ lower_alu_instr_scalar(nir_alu_instr *instr, void *mem_ctx)
    assert(instr->dest.dest.is_ssa);
    assert(instr->dest.write_mask != 0);
 
+   b->cursor = nir_before_instr(&instr->instr);
+
 #define LOWER_REDUCTION(name, chan, merge) \
    case name##2: \
    case name##3: \
    case name##4: \
-      lower_reduction(instr, chan, merge, mem_ctx); \
+      lower_reduction(instr, chan, merge, b); \
       break;
 
    switch (instr->op) {
@@ -134,16 +132,13 @@ lower_alu_instr_scalar(nir_alu_instr *instr, void *mem_ctx)
       return;
 
    unsigned num_components = instr->dest.dest.ssa.num_components;
-   static const nir_op nir_op_map[] = {nir_op_vec2, nir_op_vec3, nir_op_vec4};
-   nir_alu_instr *vec_instr =
-      nir_alu_instr_create(mem_ctx, nir_op_map[num_components - 2]);
-   nir_alu_ssa_dest_init(vec_instr, num_components);
+   nir_ssa_def *comps[] = { NULL, NULL, NULL, NULL };
 
    for (chan = 0; chan < 4; chan++) {
       if (!(instr->dest.write_mask & (1 << chan)))
          continue;
 
-      nir_alu_instr *lower = nir_alu_instr_create(mem_ctx, instr->op);
+      nir_alu_instr *lower = nir_alu_instr_create(b->shader, instr->op);
       for (i = 0; i < num_src; i++) {
          /* We only handle same-size-as-dest (input_sizes[] == 0) or scalar
           * args (input_sizes[] == 1).
@@ -159,25 +154,24 @@ lower_alu_instr_scalar(nir_alu_instr *instr, void *mem_ctx)
 
       nir_alu_ssa_dest_init(lower, 1);
       lower->dest.saturate = instr->dest.saturate;
-      vec_instr->src[chan].src = nir_src_for_ssa(&lower->dest.dest.ssa);
+      comps[chan] = &lower->dest.dest.ssa;
 
-      nir_instr_insert_before(&instr->instr, &lower->instr);
+      nir_builder_instr_insert(b, &lower->instr);
    }
 
-   nir_instr_insert_before(&instr->instr, &vec_instr->instr);
+   nir_ssa_def *vec = nir_vec(b, comps, num_components);
 
-   nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa,
-                            nir_src_for_ssa(&vec_instr->dest.dest.ssa));
+   nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa, nir_src_for_ssa(vec));
 
    nir_instr_remove(&instr->instr);
 }
 
 static bool
-lower_alu_to_scalar_block(nir_block *block, void *data)
+lower_alu_to_scalar_block(nir_block *block, void *builder)
 {
    nir_foreach_instr_safe(block, instr) {
       if (instr->type == nir_instr_type_alu)
-         lower_alu_instr_scalar(nir_instr_as_alu(instr), data);
+         lower_alu_instr_scalar(nir_instr_as_alu(instr), builder);
    }
 
    return true;
@@ -186,7 +180,10 @@ lower_alu_to_scalar_block(nir_block *block, void *data)
 static void
 nir_lower_alu_to_scalar_impl(nir_function_impl *impl)
 {
-   nir_foreach_block(impl, lower_alu_to_scalar_block, ralloc_parent(impl));
+   nir_builder builder;
+   nir_builder_init(&builder, impl);
+
+   nir_foreach_block(impl, lower_alu_to_scalar_block, &builder);
 }
 
 void