util: Move gallium's PIPE_FORMAT utils to /util/format/
[mesa.git] / src / gallium / drivers / vc4 / vc4_nir_lower_io.c
index ad96ef5ad82880946a6e0139bcabd87d413a7791..c82d99812259bf18d69038f88c2d2321350ed4e3 100644 (file)
 
 #include "vc4_qir.h"
 #include "compiler/nir/nir_builder.h"
-#include "util/u_format.h"
+#include "util/format/u_format.h"
 
 /**
- * Walks the NIR generated by TGSI-to-NIR to lower its io intrinsics into
- * something amenable to the VC4 architecture.
+ * Walks the NIR generated by TGSI-to-NIR or GLSL-to-NIR to lower its io
+ * intrinsics into something amenable to the VC4 architecture.
  *
- * Currently, it splits outputs, VS inputs, and uniforms into scalars, drops
- * any non-position outputs in coordinate shaders, and fixes up the addressing
- * on indirect uniform loads.  FS input scalarization is handled by
+ * Currently, it splits VS inputs and uniforms into scalars, drops any
+ * non-position outputs in coordinate shaders, and fixes up the addressing on
+ * indirect uniform loads.  FS input and VS output scalarization is handled by
  * nir_lower_io_to_scalar().
  */
 
 static void
-replace_intrinsic_with_vec4(nir_builder *b, nir_intrinsic_instr *intr,
-                            nir_ssa_def **comps)
+replace_intrinsic_with_vec(nir_builder *b, nir_intrinsic_instr *intr,
+                           nir_ssa_def **comps)
 {
 
-        /* Batch things back together into a vec4.  This will get split by the
-         * later ALU scalarization pass.
+        /* Batch things back together into a vector.  This will get split by
+         * the later ALU scalarization pass.
          */
-        nir_ssa_def *vec = nir_vec4(b, comps[0], comps[1], comps[2], comps[3]);
+        nir_ssa_def *vec = nir_vec(b, comps, intr->num_components);
 
         /* Replace the old intrinsic with a reference to our reconstructed
-         * vec4.
+         * vector.
          */
         nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(vec));
         nir_instr_remove(&intr->instr);
@@ -106,11 +106,11 @@ vc4_nir_get_vattr_channel_vpm(struct vc4_compile *c,
         } else if (chan->size == 32 && chan->type == UTIL_FORMAT_TYPE_SIGNED) {
                 if (chan->normalized) {
                         return nir_fmul(b,
-                                        nir_i2f(b, vpm_reads[swiz]),
+                                        nir_i2f32(b, vpm_reads[swiz]),
                                         nir_imm_float(b,
                                                       1.0 / 0x7fffffff));
                 } else {
-                        return nir_i2f(b, vpm_reads[swiz]);
+                        return nir_i2f32(b, vpm_reads[swiz]);
                 }
         } else if (chan->size == 8 &&
                    (chan->type == UTIL_FORMAT_TYPE_UNSIGNED ||
@@ -125,16 +125,16 @@ vc4_nir_get_vattr_channel_vpm(struct vc4_compile *c,
                                                 nir_imm_float(b, 1.0));
                         } else {
                                 return nir_fadd(b,
-                                                nir_i2f(b,
-                                                        vc4_nir_unpack_8i(b, temp,
-                                                                          swiz)),
+                                                nir_i2f32(b,
+                                                          vc4_nir_unpack_8i(b, temp,
+                                                                            swiz)),
                                                 nir_imm_float(b, -128.0));
                         }
                 } else {
                         if (chan->normalized) {
                                 return vc4_nir_unpack_8f(b, vpm, swiz);
                         } else {
-                                return nir_i2f(b, vc4_nir_unpack_8i(b, vpm, swiz));
+                                return nir_i2f32(b, vc4_nir_unpack_8i(b, vpm, swiz));
                         }
                 }
         } else if (chan->size == 16 &&
@@ -146,7 +146,7 @@ vc4_nir_get_vattr_channel_vpm(struct vc4_compile *c,
                  * UNPACK_16_I for all of these.
                  */
                 if (chan->type == UTIL_FORMAT_TYPE_SIGNED) {
-                        temp = nir_i2f(b, vc4_nir_unpack_16i(b, vpm, swiz & 1));
+                        temp = nir_i2f32(b, vc4_nir_unpack_16i(b, vpm, swiz & 1));
                         if (chan->normalized) {
                                 return nir_fmul(b, temp,
                                                 nir_imm_float(b, 1/32768.0f));
@@ -154,7 +154,7 @@ vc4_nir_get_vattr_channel_vpm(struct vc4_compile *c,
                                 return temp;
                         }
                 } else {
-                        temp = nir_i2f(b, vc4_nir_unpack_16u(b, vpm, swiz & 1));
+                        temp = nir_i2f32(b, vc4_nir_unpack_16u(b, vpm, swiz & 1));
                         if (chan->normalized) {
                                 return nir_fmul(b, temp,
                                                 nir_imm_float(b, 1 / 65535.0));
@@ -177,14 +177,10 @@ vc4_nir_lower_vertex_attr(struct vc4_compile *c, nir_builder *b,
         enum pipe_format format = c->vs_key->attr_formats[attr];
         uint32_t attr_size = util_format_get_blocksize(format);
 
-        /* All TGSI-to-NIR inputs are vec4. */
-        assert(intr->num_components == 4);
-
         /* We only accept direct outputs and TGSI only ever gives them to us
          * with an offset value of 0.
          */
-        assert(nir_src_as_const_value(intr->src[0]) &&
-               nir_src_as_const_value(intr->src[0])->u32[0] == 0);
+        assert(nir_src_as_uint(intr->src[0]) == 0);
 
         /* Generate dword loads for the VPM values (Since these intrinsics may
          * be reordered, the actual reads will be generated at the top of the
@@ -210,7 +206,7 @@ vc4_nir_lower_vertex_attr(struct vc4_compile *c, nir_builder *b,
                 util_format_description(format);
 
         nir_ssa_def *dests[4];
-        for (int i = 0; i < 4; i++) {
+        for (int i = 0; i < intr->num_components; i++) {
                 uint8_t swiz = desc->swizzle[i];
                 dests[i] = vc4_nir_get_vattr_channel_vpm(c, b, vpm_reads, swiz,
                                                          desc);
@@ -226,7 +222,7 @@ vc4_nir_lower_vertex_attr(struct vc4_compile *c, nir_builder *b,
                 }
         }
 
-        replace_intrinsic_with_vec4(b, intr, dests);
+        replace_intrinsic_with_vec(b, intr, dests);
 }
 
 static bool
@@ -319,70 +315,32 @@ vc4_nir_lower_output(struct vc4_compile *c, nir_builder *b,
                 nir_instr_remove(&intr->instr);
                 return;
         }
-
-        /* Color output is lowered by vc4_nir_lower_blend(). */
-        if (c->stage == QSTAGE_FRAG &&
-            (output_var->data.location == FRAG_RESULT_COLOR ||
-             output_var->data.location == FRAG_RESULT_DATA0 ||
-             output_var->data.location == FRAG_RESULT_SAMPLE_MASK)) {
-                nir_intrinsic_set_base(intr, nir_intrinsic_base(intr) * 4);
-                return;
-        }
-
-        /* All TGSI-to-NIR outputs are VEC4. */
-        assert(intr->num_components == 4);
-
-        /* We only accept direct outputs and TGSI only ever gives them to us
-         * with an offset value of 0.
-         */
-        assert(nir_src_as_const_value(intr->src[1]) &&
-               nir_src_as_const_value(intr->src[1])->u32[0] == 0);
-
-        b->cursor = nir_before_instr(&intr->instr);
-
-        for (unsigned i = 0; i < intr->num_components; i++) {
-                nir_intrinsic_instr *intr_comp =
-                        nir_intrinsic_instr_create(c->s, nir_intrinsic_store_output);
-                intr_comp->num_components = 1;
-                nir_intrinsic_set_base(intr_comp,
-                                       nir_intrinsic_base(intr) * 4 + i);
-
-                assert(intr->src[0].is_ssa);
-                intr_comp->src[0] =
-                        nir_src_for_ssa(nir_channel(b, intr->src[0].ssa, i));
-                intr_comp->src[1] = nir_src_for_ssa(nir_imm_int(b, 0));
-                nir_builder_instr_insert(b, &intr_comp->instr);
-        }
-
-        nir_instr_remove(&intr->instr);
 }
 
 static void
 vc4_nir_lower_uniform(struct vc4_compile *c, nir_builder *b,
                       nir_intrinsic_instr *intr)
 {
-        /* All TGSI-to-NIR uniform loads are vec4, but we need byte offsets
-         * in the backend.
-         */
-        if (intr->num_components == 1)
-                return;
-        assert(intr->num_components == 4);
-
         b->cursor = nir_before_instr(&intr->instr);
 
-        /* Generate scalar loads equivalent to the original VEC4. */
+        /* Generate scalar loads equivalent to the original vector. */
         nir_ssa_def *dests[4];
         for (unsigned i = 0; i < intr->num_components; i++) {
                 nir_intrinsic_instr *intr_comp =
                         nir_intrinsic_instr_create(c->s, intr->intrinsic);
                 intr_comp->num_components = 1;
-                nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, 32, NULL);
+                nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1,
+                                  intr->dest.ssa.bit_size, NULL);
 
-                /* Convert the uniform offset to bytes.  If it happens to be a
-                 * constant, constant-folding will clean up the shift for us.
+                /* Convert the uniform offset to bytes.  If it happens
+                 * to be a constant, constant-folding will clean up
+                 * the shift for us.
                  */
                 nir_intrinsic_set_base(intr_comp,
-                                       nir_intrinsic_base(intr) * 16 + i * 4);
+                                       nir_intrinsic_base(intr) * 16 +
+                                       i * 4);
+                nir_intrinsic_set_range(intr_comp,
+                                        nir_intrinsic_range(intr) * 16 - i * 4);
 
                 intr_comp->src[0] =
                         nir_src_for_ssa(nir_ishl(b, intr->src[0].ssa,
@@ -393,7 +351,7 @@ vc4_nir_lower_uniform(struct vc4_compile *c, nir_builder *b,
                 nir_builder_instr_insert(b, &intr_comp->instr);
         }
 
-        replace_intrinsic_with_vec4(b, intr, dests);
+        replace_intrinsic_with_vec(b, intr, dests);
 }
 
 static void