vc4: fixup for new nir_foreach_block()
[mesa.git] / src / gallium / drivers / vc4 / vc4_nir_lower_io.c
index 72a514756fdaa05cd60a44cb210a0f097100d38e..261c00dc449640842fab1114ea3f9423dc67bc2f 100644 (file)
@@ -22,7 +22,7 @@
  */
 
 #include "vc4_qir.h"
-#include "glsl/nir/nir_builder.h"
+#include "compiler/nir/nir_builder.h"
 #include "util/u_format.h"
 
 /**
@@ -98,7 +98,7 @@ vc4_nir_get_vattr_channel_vpm(struct vc4_compile *c,
                 &desc->channel[swiz];
         nir_ssa_def *temp;
 
-        if (swiz > UTIL_FORMAT_SWIZZLE_W) {
+        if (swiz > PIPE_SWIZZLE_W) {
                 return vc4_nir_get_swizzled_channel(b, vpm_reads, swiz);
         } else if (chan->size == 32 && chan->type == UTIL_FORMAT_TYPE_FLOAT) {
                 return vc4_nir_get_swizzled_channel(b, vpm_reads, swiz);
@@ -179,6 +179,12 @@ vc4_nir_lower_vertex_attr(struct vc4_compile *c, nir_builder *b,
         /* All TGSI-to-NIR inputs are vec4. */
         assert(intr->num_components == 4);
 
+        /* We only accept direct outputs and TGSI only ever gives them to us
+         * with an offset value of 0.
+         */
+        assert(nir_src_as_const_value(intr->src[0]) &&
+               nir_src_as_const_value(intr->src[0])->u32[0] == 0);
+
         /* Generate dword loads for the VPM values (Since these intrinsics may
          * be reordered, the actual reads will be generated at the top of the
          * shader by ntq_setup_inputs().
@@ -190,7 +196,8 @@ vc4_nir_lower_vertex_attr(struct vc4_compile *c, nir_builder *b,
                                                    nir_intrinsic_load_input);
                 intr_comp->num_components = 1;
                 intr_comp->const_index[0] = intr->const_index[0] * 4 + i;
-                nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, NULL);
+                intr_comp->src[0] = nir_src_for_ssa(nir_imm_int(b, 0));
+                nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, 32, NULL);
                 nir_builder_instr_insert(b, &intr_comp->instr);
 
                 vpm_reads[i] = &intr_comp->dest.ssa;
@@ -245,6 +252,12 @@ vc4_nir_lower_fs_input(struct vc4_compile *c, nir_builder *b,
         /* All TGSI-to-NIR inputs are vec4. */
         assert(intr->num_components == 4);
 
+        /* We only accept direct inputs and TGSI only ever gives them to us
+         * with an offset value of 0.
+         */
+        assert(nir_src_as_const_value(intr->src[0]) &&
+               nir_src_as_const_value(intr->src[0])->u32[0] == 0);
+
         /* Generate scalar loads equivalent to the original VEC4. */
         nir_ssa_def *dests[4];
         for (unsigned i = 0; i < intr->num_components; i++) {
@@ -252,7 +265,9 @@ vc4_nir_lower_fs_input(struct vc4_compile *c, nir_builder *b,
                         nir_intrinsic_instr_create(c->s, nir_intrinsic_load_input);
                 intr_comp->num_components = 1;
                 intr_comp->const_index[0] = intr->const_index[0] * 4 + i;
-                nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, NULL);
+                intr_comp->src[0] = nir_src_for_ssa(nir_imm_int(b, 0));
+
+                nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, 32, NULL);
                 nir_builder_instr_insert(b, &intr_comp->instr);
 
                 dests[i] = &intr_comp->dest.ssa;
@@ -311,7 +326,8 @@ vc4_nir_lower_output(struct vc4_compile *c, nir_builder *b,
         /* Color output is lowered by vc4_nir_lower_blend(). */
         if (c->stage == QSTAGE_FRAG &&
             (output_var->data.location == FRAG_RESULT_COLOR ||
-             output_var->data.location == FRAG_RESULT_DATA0)) {
+             output_var->data.location == FRAG_RESULT_DATA0 ||
+             output_var->data.location == FRAG_RESULT_SAMPLE_MASK)) {
                 intr->const_index[0] *= 4;
                 return;
         }
@@ -319,6 +335,12 @@ vc4_nir_lower_output(struct vc4_compile *c, nir_builder *b,
         /* All TGSI-to-NIR outputs are VEC4. */
         assert(intr->num_components == 4);
 
+        /* We only accept direct outputs and TGSI only ever gives them to us
+         * with an offset value of 0.
+         */
+        assert(nir_src_as_const_value(intr->src[1]) &&
+               nir_src_as_const_value(intr->src[1])->u32[0] == 0);
+
         b->cursor = nir_before_instr(&intr->instr);
 
         for (unsigned i = 0; i < intr->num_components; i++) {
@@ -330,6 +352,7 @@ vc4_nir_lower_output(struct vc4_compile *c, nir_builder *b,
                 assert(intr->src[0].is_ssa);
                 intr_comp->src[0] =
                         nir_src_for_ssa(nir_channel(b, intr->src[0].ssa, i));
+                intr_comp->src[1] = nir_src_for_ssa(nir_imm_int(b, 0));
                 nir_builder_instr_insert(b, &intr_comp->instr);
         }
 
@@ -340,8 +363,8 @@ static void
 vc4_nir_lower_uniform(struct vc4_compile *c, nir_builder *b,
                       nir_intrinsic_instr *intr)
 {
-        /* All TGSI-to-NIR uniform loads are vec4, but we may create dword
-         * loads in our lowering passes.
+        /* All TGSI-to-NIR uniform loads are vec4, but we need byte offsets
+         * in the backend.
          */
         if (intr->num_components == 1)
                 return;
@@ -355,27 +378,16 @@ vc4_nir_lower_uniform(struct vc4_compile *c, nir_builder *b,
                 nir_intrinsic_instr *intr_comp =
                         nir_intrinsic_instr_create(c->s, intr->intrinsic);
                 intr_comp->num_components = 1;
-                nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, NULL);
-
-                if (intr->intrinsic == nir_intrinsic_load_uniform_indirect) {
-                        /* Convert the variable TGSI register index to a byte
-                         * offset.
-                         */
-                        intr_comp->src[0] =
-                                nir_src_for_ssa(nir_ishl(b,
-                                                         intr->src[0].ssa,
-                                                         nir_imm_int(b, 4)));
-
-                        /* Convert the offset to be a byte index, too. */
-                        intr_comp->const_index[0] = (intr->const_index[0] * 16 +
-                                                     i * 4);
-                } else {
-                        /* We want a dword index for non-indirect uniform
-                         * loads.
-                         */
-                        intr_comp->const_index[0] = (intr->const_index[0] * 4 +
-                                                     i);
-                }
+                nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, 32, NULL);
+
+                /* Convert the uniform offset to bytes.  If it happens to be a
+                 * constant, constant-folding will clean up the shift for us.
+                 */
+                intr_comp->const_index[0] = (intr->const_index[0] * 16 + i * 4);
+
+                intr_comp->src[0] =
+                        nir_src_for_ssa(nir_ishl(b, intr->src[0].ssa,
+                                                 nir_imm_int(b, 4)));
 
                 dests[i] = &intr_comp->dest.ssa;
 
@@ -406,36 +418,25 @@ vc4_nir_lower_io_instr(struct vc4_compile *c, nir_builder *b,
                 break;
 
         case nir_intrinsic_load_uniform:
-        case nir_intrinsic_load_uniform_indirect:
-        case nir_intrinsic_load_user_clip_plane:
                 vc4_nir_lower_uniform(c, b, intr);
                 break;
 
+        case nir_intrinsic_load_user_clip_plane:
         default:
                 break;
         }
 }
 
 static bool
-vc4_nir_lower_io_block(nir_block *block, void *arg)
+vc4_nir_lower_io_impl(struct vc4_compile *c, nir_function_impl *impl)
 {
-        struct vc4_compile *c = arg;
-        nir_function_impl *impl =
-                nir_cf_node_get_function(&block->cf_node);
-
         nir_builder b;
         nir_builder_init(&b, impl);
 
-        nir_foreach_instr_safe(block, instr)
-                vc4_nir_lower_io_instr(c, &b, instr);
-
-        return true;
-}
-
-static bool
-vc4_nir_lower_io_impl(struct vc4_compile *c, nir_function_impl *impl)
-{
-        nir_foreach_block(impl, vc4_nir_lower_io_block, c);
+        nir_foreach_block(block, impl) {
+                nir_foreach_instr_safe(instr, block)
+                        vc4_nir_lower_io_instr(c, &b, instr);
+        }
 
         nir_metadata_preserve(impl, nir_metadata_block_index |
                               nir_metadata_dominance);
@@ -444,10 +445,10 @@ vc4_nir_lower_io_impl(struct vc4_compile *c, nir_function_impl *impl)
 }
 
 void
-vc4_nir_lower_io(struct vc4_compile *c)
+vc4_nir_lower_io(nir_shader *s, struct vc4_compile *c)
 {
-        nir_foreach_overload(c->s, overload) {
-                if (overload->impl)
-                        vc4_nir_lower_io_impl(c, overload->impl);
+        nir_foreach_function(function, s) {
+                if (function->impl)
+                        vc4_nir_lower_io_impl(c, function->impl);
         }
 }