tu: ir3: Emit push constants directly
[mesa.git] / src / freedreno / vulkan / tu_pipeline.c
index eb920ec91724c1db532da073f16e22e245906ac8..5d36dfcaf3fc41fab1bb8a72968bd7dc9400d72e 100644 (file)
@@ -27,6 +27,7 @@
 
 #include "tu_private.h"
 
+#include "ir3/ir3_nir.h"
 #include "main/menums.h"
 #include "nir/nir.h"
 #include "nir/nir_builder.h"
@@ -111,32 +112,6 @@ tu_shader_stage(VkShaderStageFlagBits stage)
    }
 }
 
-static const VkVertexInputAttributeDescription *
-tu_find_vertex_input_attribute(
-   const VkPipelineVertexInputStateCreateInfo *vi_info, uint32_t slot)
-{
-   assert(slot >= VERT_ATTRIB_GENERIC0);
-   slot -= VERT_ATTRIB_GENERIC0;
-   for (uint32_t i = 0; i < vi_info->vertexAttributeDescriptionCount; i++) {
-      if (vi_info->pVertexAttributeDescriptions[i].location == slot)
-         return &vi_info->pVertexAttributeDescriptions[i];
-   }
-   return NULL;
-}
-
-static const VkVertexInputBindingDescription *
-tu_find_vertex_input_binding(
-   const VkPipelineVertexInputStateCreateInfo *vi_info,
-   const VkVertexInputAttributeDescription *vi_attr)
-{
-   assert(vi_attr);
-   for (uint32_t i = 0; i < vi_info->vertexBindingDescriptionCount; i++) {
-      if (vi_info->pVertexBindingDescriptions[i].binding == vi_attr->binding)
-         return &vi_info->pVertexBindingDescriptions[i];
-   }
-   return NULL;
-}
-
 static bool
 tu_logic_op_reads_dst(VkLogicOp op)
 {
@@ -437,19 +412,21 @@ static void
 tu6_emit_gs_config(struct tu_cs *cs, struct tu_shader *shader,
                    const struct ir3_shader_variant *gs)
 {
-   uint32_t sp_gs_config = 0;
-   if (gs->instrlen)
-      sp_gs_config |= A6XX_SP_GS_CONFIG_ENABLED;
-
+   bool has_gs = gs->type != MESA_SHADER_NONE;
    tu_cs_emit_pkt4(cs, REG_A6XX_SP_GS_UNKNOWN_A871, 1);
    tu_cs_emit(cs, 0);
 
    tu_cs_emit_pkt4(cs, REG_A6XX_SP_GS_CONFIG, 2);
-   tu_cs_emit(cs, sp_gs_config);
+   tu_cs_emit(cs, COND(has_gs,
+                       A6XX_SP_GS_CONFIG_ENABLED |
+                       A6XX_SP_GS_CONFIG_NIBO(ir3_shader_nibo(gs)) |
+                       A6XX_SP_GS_CONFIG_NTEX(gs->num_samp) |
+                       A6XX_SP_GS_CONFIG_NSAMP(gs->num_samp)));
    tu_cs_emit(cs, gs->instrlen);
 
    tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_GS_CNTL, 1);
-   tu_cs_emit(cs, A6XX_HLSQ_GS_CNTL_CONSTLEN(align(gs->constlen, 4)));
+   tu_cs_emit(cs, COND(has_gs, A6XX_HLSQ_GS_CNTL_ENABLED) |
+                  A6XX_HLSQ_GS_CNTL_CONSTLEN(align(gs->constlen, 4)));
 }
 
 static void
@@ -544,35 +521,202 @@ tu6_emit_cs_config(struct tu_cs *cs, const struct tu_shader *shader,
 
 static void
 tu6_emit_vs_system_values(struct tu_cs *cs,
-                          const struct ir3_shader_variant *vs)
+                          const struct ir3_shader_variant *vs,
+                          const struct ir3_shader_variant *gs)
 {
    const uint32_t vertexid_regid =
-      ir3_find_sysval_regid(vs, SYSTEM_VALUE_VERTEX_ID);
+         ir3_find_sysval_regid(vs, SYSTEM_VALUE_VERTEX_ID);
    const uint32_t instanceid_regid =
-      ir3_find_sysval_regid(vs, SYSTEM_VALUE_INSTANCE_ID);
+         ir3_find_sysval_regid(vs, SYSTEM_VALUE_INSTANCE_ID);
+   const uint32_t primitiveid_regid = gs->type != MESA_SHADER_NONE ?
+         ir3_find_sysval_regid(gs, SYSTEM_VALUE_PRIMITIVE_ID) :
+         regid(63, 0);
+   const uint32_t gsheader_regid = gs->type != MESA_SHADER_NONE ?
+         ir3_find_sysval_regid(gs, SYSTEM_VALUE_GS_HEADER_IR3) :
+         regid(63, 0);
 
    tu_cs_emit_pkt4(cs, REG_A6XX_VFD_CONTROL_1, 6);
    tu_cs_emit(cs, A6XX_VFD_CONTROL_1_REGID4VTX(vertexid_regid) |
-                     A6XX_VFD_CONTROL_1_REGID4INST(instanceid_regid) |
-                     0xfcfc0000);
+                  A6XX_VFD_CONTROL_1_REGID4INST(instanceid_regid) |
+                  A6XX_VFD_CONTROL_1_REGID4PRIMID(primitiveid_regid) |
+                  0xfc000000);
    tu_cs_emit(cs, 0x0000fcfc); /* VFD_CONTROL_2 */
    tu_cs_emit(cs, 0xfcfcfcfc); /* VFD_CONTROL_3 */
    tu_cs_emit(cs, 0x000000fc); /* VFD_CONTROL_4 */
-   tu_cs_emit(cs, 0x0000fcfc); /* VFD_CONTROL_5 */
+   tu_cs_emit(cs, A6XX_VFD_CONTROL_5_REGID_GSHEADER(gsheader_regid) |
+                  0xfc00); /* VFD_CONTROL_5 */
    tu_cs_emit(cs, 0x00000000); /* VFD_CONTROL_6 */
 }
 
+/* Add any missing varyings needed for stream-out. Otherwise varyings not
+ * used by fragment shader will be stripped out.
+ */
+static void
+tu6_link_streamout(struct ir3_shader_linkage *l,
+                     const struct ir3_shader_variant *v)
+{
+   const struct ir3_stream_output_info *info = &v->shader->stream_output;
+
+   /*
+    * First, any stream-out varyings not already in linkage map (ie. also
+    * consumed by frag shader) need to be added:
+    */
+   for (unsigned i = 0; i < info->num_outputs; i++) {
+      const struct ir3_stream_output *out = &info->output[i];
+      unsigned compmask =
+                  (1 << (out->num_components + out->start_component)) - 1;
+      unsigned k = out->register_index;
+      unsigned idx, nextloc = 0;
+
+      /* psize/pos need to be the last entries in linkage map, and will
+       * get added link_stream_out, so skip over them:
+       */
+      if (v->outputs[k].slot == VARYING_SLOT_PSIZ ||
+            v->outputs[k].slot == VARYING_SLOT_POS)
+         continue;
+
+      for (idx = 0; idx < l->cnt; idx++) {
+         if (l->var[idx].regid == v->outputs[k].regid)
+            break;
+         nextloc = MAX2(nextloc, l->var[idx].loc + 4);
+      }
+
+      /* add if not already in linkage map: */
+      if (idx == l->cnt)
+         ir3_link_add(l, v->outputs[k].regid, compmask, nextloc);
+
+      /* expand component-mask if needed, ie streaming out all components
+       * but frag shader doesn't consume all components:
+       */
+      if (compmask & ~l->var[idx].compmask) {
+         l->var[idx].compmask |= compmask;
+         l->max_loc = MAX2(l->max_loc, l->var[idx].loc +
+                           util_last_bit(l->var[idx].compmask));
+      }
+   }
+}
+
+static void
+tu6_setup_streamout(const struct ir3_shader_variant *v,
+            struct ir3_shader_linkage *l, struct tu_streamout_state *tf)
+{
+   const struct ir3_stream_output_info *info = &v->shader->stream_output;
+
+   memset(tf, 0, sizeof(*tf));
+
+   tf->prog_count = align(l->max_loc, 2) / 2;
+
+   debug_assert(tf->prog_count < ARRAY_SIZE(tf->prog));
+
+   /* set stride info to the streamout state */
+   for (unsigned i = 0; i < IR3_MAX_SO_BUFFERS; i++)
+      tf->stride[i] = info->stride[i];
+
+   for (unsigned i = 0; i < info->num_outputs; i++) {
+      const struct ir3_stream_output *out = &info->output[i];
+      unsigned k = out->register_index;
+      unsigned idx;
+
+      tf->ncomp[out->output_buffer] += out->num_components;
+
+      /* linkage map sorted by order frag shader wants things, so
+       * a bit less ideal here..
+       */
+      for (idx = 0; idx < l->cnt; idx++)
+         if (l->var[idx].regid == v->outputs[k].regid)
+            break;
+
+      debug_assert(idx < l->cnt);
+
+      for (unsigned j = 0; j < out->num_components; j++) {
+         unsigned c   = j + out->start_component;
+         unsigned loc = l->var[idx].loc + c;
+         unsigned off = j + out->dst_offset;  /* in dwords */
+
+         if (loc & 1) {
+            tf->prog[loc/2] |= A6XX_VPC_SO_PROG_B_EN |
+                        A6XX_VPC_SO_PROG_B_BUF(out->output_buffer) |
+                        A6XX_VPC_SO_PROG_B_OFF(off * 4);
+         } else {
+            tf->prog[loc/2] |= A6XX_VPC_SO_PROG_A_EN |
+                        A6XX_VPC_SO_PROG_A_BUF(out->output_buffer) |
+                        A6XX_VPC_SO_PROG_A_OFF(off * 4);
+         }
+      }
+   }
+
+   tf->vpc_so_buf_cntl = A6XX_VPC_SO_BUF_CNTL_ENABLE |
+               COND(tf->ncomp[0] > 0, A6XX_VPC_SO_BUF_CNTL_BUF0) |
+               COND(tf->ncomp[1] > 0, A6XX_VPC_SO_BUF_CNTL_BUF1) |
+               COND(tf->ncomp[2] > 0, A6XX_VPC_SO_BUF_CNTL_BUF2) |
+               COND(tf->ncomp[3] > 0, A6XX_VPC_SO_BUF_CNTL_BUF3);
+}
+
+static void
+tu6_emit_const(struct tu_cs *cs, uint32_t opcode, uint32_t base,
+               enum a6xx_state_block block, uint32_t offset,
+               uint32_t size, uint32_t *dwords) {
+   assert(size % 4 == 0);
+
+   tu_cs_emit_pkt7(cs, opcode, 3 + size);
+   tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(base) |
+         CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
+         CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
+         CP_LOAD_STATE6_0_STATE_BLOCK(block) |
+         CP_LOAD_STATE6_0_NUM_UNIT(size / 4));
+
+   tu_cs_emit(cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
+   tu_cs_emit(cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
+   dwords = (uint32_t *)&((uint8_t *)dwords)[offset];
+
+   tu_cs_emit_array(cs, dwords, size);
+}
+
+static void
+tu6_emit_link_map(struct tu_cs *cs,
+                  const struct ir3_shader_variant *producer,
+                  const struct ir3_shader_variant *consumer) {
+   const struct ir3_const_state *const_state = &consumer->shader->const_state;
+   uint32_t base = const_state->offsets.primitive_map;
+   uint32_t patch_locs[MAX_VARYING] = { }, num_loc;
+   num_loc = ir3_link_geometry_stages(producer, consumer, patch_locs);
+   int size = DIV_ROUND_UP(num_loc, 4);
+
+   size = (MIN2(size + base, consumer->constlen) - base) * 4;
+
+   tu6_emit_const(cs, CP_LOAD_STATE6_GEOM, base, SB6_GS_SHADER, 0, size,
+                  patch_locs);
+}
+
+static uint16_t
+gl_primitive_to_tess(uint16_t primitive) {
+   switch (primitive) {
+   case GL_POINTS:
+      return TESS_POINTS;
+   case GL_LINE_STRIP:
+      return TESS_LINES;
+   case GL_TRIANGLE_STRIP:
+      return TESS_CW_TRIS;
+   default:
+      unreachable("");
+   }
+}
+
 static void
 tu6_emit_vpc(struct tu_cs *cs,
              const struct ir3_shader_variant *vs,
+             const struct ir3_shader_variant *gs,
              const struct ir3_shader_variant *fs,
-             bool binning_pass)
+             bool binning_pass,
+             struct tu_streamout_state *tf)
 {
+   bool has_gs = gs->type != MESA_SHADER_NONE;
+   const struct ir3_shader_variant *last_shader = has_gs ? gs : vs;
    struct ir3_shader_linkage linkage = { 0 };
-   ir3_link_shaders(&linkage, vs, fs);
+   ir3_link_shaders(&linkage, last_shader, fs);
 
-   if (vs->shader->stream_output.num_outputs && !binning_pass)
-      tu_finishme("stream output");
+   if (last_shader->shader->stream_output.num_outputs)
+      tu6_link_streamout(&linkage, last_shader);
 
    BITSET_DECLARE(vpc_var_enables, 128) = { 0 };
    for (uint32_t i = 0; i < linkage.cnt; i++) {
@@ -589,10 +733,17 @@ tu6_emit_vpc(struct tu_cs *cs,
 
    /* a6xx finds position/pointsize at the end */
    const uint32_t position_regid =
-      ir3_find_output_regid(vs, VARYING_SLOT_POS);
+      ir3_find_output_regid(last_shader, VARYING_SLOT_POS);
    const uint32_t pointsize_regid =
-      ir3_find_output_regid(vs, VARYING_SLOT_PSIZ);
-   uint32_t pointsize_loc = 0xff, position_loc = 0xff;
+      ir3_find_output_regid(last_shader, VARYING_SLOT_PSIZ);
+   const uint32_t layer_regid = has_gs ?
+      ir3_find_output_regid(gs, VARYING_SLOT_LAYER) : regid(63, 0);
+
+   uint32_t pointsize_loc = 0xff, position_loc = 0xff, layer_loc = 0xff;
+   if (layer_regid != regid(63, 0)) {
+      layer_loc = linkage.max_loc;
+      ir3_link_add(&linkage, layer_regid, 0x1, linkage.max_loc);
+   }
    if (position_regid != regid(63, 0)) {
       position_loc = linkage.max_loc;
       ir3_link_add(&linkage, position_regid, 0xf, linkage.max_loc);
@@ -602,27 +753,34 @@ tu6_emit_vpc(struct tu_cs *cs,
       ir3_link_add(&linkage, pointsize_regid, 0x1, linkage.max_loc);
    }
 
-   /* map vs outputs to VPC */
+   if (last_shader->shader->stream_output.num_outputs)
+      tu6_setup_streamout(last_shader, &linkage, tf);
+
+   /* map outputs of the last shader to VPC */
    assert(linkage.cnt <= 32);
-   const uint32_t sp_vs_out_count = (linkage.cnt + 1) / 2;
-   const uint32_t sp_vs_vpc_dst_count = (linkage.cnt + 3) / 4;
-   uint32_t sp_vs_out[16];
-   uint32_t sp_vs_vpc_dst[8];
-   sp_vs_out[sp_vs_out_count - 1] = 0;
-   sp_vs_vpc_dst[sp_vs_vpc_dst_count - 1] = 0;
+   const uint32_t sp_out_count = DIV_ROUND_UP(linkage.cnt, 2);
+   const uint32_t sp_vpc_dst_count = DIV_ROUND_UP(linkage.cnt, 4);
+   uint32_t sp_out[16];
+   uint32_t sp_vpc_dst[8];
    for (uint32_t i = 0; i < linkage.cnt; i++) {
-      ((uint16_t *) sp_vs_out)[i] =
+      ((uint16_t *) sp_out)[i] =
          A6XX_SP_VS_OUT_REG_A_REGID(linkage.var[i].regid) |
          A6XX_SP_VS_OUT_REG_A_COMPMASK(linkage.var[i].compmask);
-      ((uint8_t *) sp_vs_vpc_dst)[i] =
+      ((uint8_t *) sp_vpc_dst)[i] =
          A6XX_SP_VS_VPC_DST_REG_OUTLOC0(linkage.var[i].loc);
    }
 
-   tu_cs_emit_pkt4(cs, REG_A6XX_SP_VS_OUT_REG(0), sp_vs_out_count);
-   tu_cs_emit_array(cs, sp_vs_out, sp_vs_out_count);
+   if (has_gs)
+      tu_cs_emit_pkt4(cs, REG_A6XX_SP_GS_OUT_REG(0), sp_out_count);
+   else
+      tu_cs_emit_pkt4(cs, REG_A6XX_SP_VS_OUT_REG(0), sp_out_count);
+   tu_cs_emit_array(cs, sp_out, sp_out_count);
 
-   tu_cs_emit_pkt4(cs, REG_A6XX_SP_VS_VPC_DST_REG(0), sp_vs_vpc_dst_count);
-   tu_cs_emit_array(cs, sp_vs_vpc_dst, sp_vs_vpc_dst_count);
+   if (has_gs)
+      tu_cs_emit_pkt4(cs, REG_A6XX_SP_GS_VPC_DST_REG(0), sp_vpc_dst_count);
+   else
+      tu_cs_emit_pkt4(cs, REG_A6XX_SP_VS_VPC_DST_REG(0), sp_vpc_dst_count);
+   tu_cs_emit_array(cs, sp_vpc_dst, sp_vpc_dst_count);
 
    tu_cs_emit_pkt4(cs, REG_A6XX_VPC_CNTL_0, 1);
    tu_cs_emit(cs, A6XX_VPC_CNTL_0_NUMNONPOSVAR(fs->total_in) |
@@ -634,12 +792,84 @@ tu6_emit_vpc(struct tu_cs *cs,
                      A6XX_VPC_PACK_PSIZELOC(pointsize_loc) |
                      A6XX_VPC_PACK_STRIDE_IN_VPC(linkage.max_loc));
 
+   if (has_gs) {
+      tu_cs_emit_pkt4(cs, REG_A6XX_SP_GS_CTRL_REG0, 1);
+      tu_cs_emit(cs, A6XX_SP_GS_CTRL_REG0_THREADSIZE(TWO_QUADS) |
+            A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT(gs->info.max_reg + 1) |
+            A6XX_SP_GS_CTRL_REG0_BRANCHSTACK(gs->branchstack) |
+            COND(gs->need_pixlod, A6XX_SP_GS_CTRL_REG0_PIXLODENABLE));
+
+      tu6_emit_link_map(cs, vs, gs);
+
+      uint32_t primitive_regid =
+            ir3_find_sysval_regid(gs, SYSTEM_VALUE_PRIMITIVE_ID);
+      tu_cs_emit_pkt4(cs, REG_A6XX_VPC_PACK_GS, 1);
+      tu_cs_emit(cs, A6XX_VPC_PACK_GS_POSITIONLOC(position_loc) |
+             A6XX_VPC_PACK_GS_PSIZELOC(pointsize_loc) |
+             A6XX_VPC_PACK_GS_STRIDE_IN_VPC(linkage.max_loc));
+
+      tu_cs_emit_pkt4(cs, REG_A6XX_VPC_UNKNOWN_9105, 1);
+      tu_cs_emit(cs, A6XX_VPC_UNKNOWN_9105_LAYERLOC(layer_loc) | 0xff00);
+
+      tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_UNKNOWN_809C, 1);
+      tu_cs_emit(cs, CONDREG(layer_regid,
+            A6XX_GRAS_UNKNOWN_809C_GS_WRITES_LAYER));
+
+      uint32_t flags_regid = ir3_find_output_regid(gs,
+            VARYING_SLOT_GS_VERTEX_FLAGS_IR3);
+
+      tu_cs_emit_pkt4(cs, REG_A6XX_SP_PRIMITIVE_CNTL_GS, 1);
+      tu_cs_emit(cs, A6XX_SP_PRIMITIVE_CNTL_GS_GSOUT(linkage.cnt) |
+            A6XX_SP_PRIMITIVE_CNTL_GS_FLAGS_REGID(flags_regid));
+
+      tu_cs_emit_pkt4(cs, REG_A6XX_PC_PRIMITIVE_CNTL_2, 1);
+      tu_cs_emit(cs, A6XX_PC_PRIMITIVE_CNTL_2_STRIDE_IN_VPC(linkage.max_loc) |
+            CONDREG(pointsize_regid, A6XX_PC_PRIMITIVE_CNTL_2_PSIZE) |
+            CONDREG(layer_regid, A6XX_PC_PRIMITIVE_CNTL_2_LAYER) |
+            CONDREG(primitive_regid, A6XX_PC_PRIMITIVE_CNTL_2_PRIMITIVE_ID));
+
+      uint32_t vertices_out = gs->shader->nir->info.gs.vertices_out - 1;
+      uint16_t output =
+            gl_primitive_to_tess(gs->shader->nir->info.gs.output_primitive);
+      uint32_t invocations = gs->shader->nir->info.gs.invocations - 1;
+      tu_cs_emit_pkt4(cs, REG_A6XX_PC_PRIMITIVE_CNTL_5, 1);
+      tu_cs_emit(cs,
+            A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT(vertices_out) |
+            A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT(output) |
+            A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS(invocations));
+
+      tu_cs_emit_pkt4(cs, REG_A6XX_PC_PRIMITIVE_CNTL_3, 1);
+      tu_cs_emit(cs, 0);
+
+      tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_UNKNOWN_8003, 1);
+      tu_cs_emit(cs, 0);
+
+      tu_cs_emit_pkt4(cs, REG_A6XX_VPC_UNKNOWN_9100, 1);
+      tu_cs_emit(cs, 0xff);
+
+      tu_cs_emit_pkt4(cs, REG_A6XX_VPC_UNKNOWN_9102, 1);
+      tu_cs_emit(cs, 0xffff00);
+
+      /* Size of per-primitive alloction in ldlw memory in vec4s. */
+      uint32_t vec4_size =
+         gs->shader->nir->info.gs.vertices_in *
+         DIV_ROUND_UP(vs->shader->output_size, 4);
+      tu_cs_emit_pkt4(cs, REG_A6XX_PC_PRIMITIVE_CNTL_6, 1);
+      tu_cs_emit(cs, A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC(vec4_size));
+
+      tu_cs_emit_pkt4(cs, REG_A6XX_PC_UNKNOWN_9B07, 1);
+      tu_cs_emit(cs, 0);
+
+      tu_cs_emit_pkt4(cs, REG_A6XX_SP_GS_UNKNOWN_A871, 1);
+      tu_cs_emit(cs, vs->shader->output_size);
+   }
+
    tu_cs_emit_pkt4(cs, REG_A6XX_SP_PRIMITIVE_CNTL, 1);
    tu_cs_emit(cs, A6XX_SP_PRIMITIVE_CNTL_VSOUT(linkage.cnt));
 
    tu_cs_emit_pkt4(cs, REG_A6XX_PC_PRIMITIVE_CNTL_1, 1);
    tu_cs_emit(cs, A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC(linkage.max_loc) |
-                     (vs->writes_psize ? A6XX_PC_PRIMITIVE_CNTL_1_PSIZE : 0));
+         (last_shader->writes_psize ? A6XX_PC_PRIMITIVE_CNTL_1_PSIZE : 0));
 }
 
 static int
@@ -1030,11 +1260,33 @@ tu6_emit_immediates(struct tu_cs *cs, const struct ir3_shader_variant *v,
    }
 }
 
+static void
+tu6_emit_geometry_consts(struct tu_cs *cs,
+                         const struct ir3_shader_variant *vs,
+                         const struct ir3_shader_variant *gs) {
+   unsigned num_vertices = gs->shader->nir->info.gs.vertices_in;
+
+   uint32_t params[4] = {
+      vs->shader->output_size * num_vertices * 4,  /* primitive stride */
+      vs->shader->output_size * 4,                 /* vertex stride */
+      0,
+      0,
+   };
+   uint32_t vs_base = vs->shader->const_state.offsets.primitive_param;
+   tu6_emit_const(cs, CP_LOAD_STATE6_GEOM, vs_base, SB6_VS_SHADER, 0,
+                  ARRAY_SIZE(params), params);
+
+   uint32_t gs_base = gs->shader->const_state.offsets.primitive_param;
+   tu6_emit_const(cs, CP_LOAD_STATE6_GEOM, gs_base, SB6_GS_SHADER, 0,
+                  ARRAY_SIZE(params), params);
+}
+
 static void
 tu6_emit_program(struct tu_cs *cs,
                  const struct tu_pipeline_builder *builder,
                  const struct tu_bo *binary_bo,
-                 bool binning_pass)
+                 bool binning_pass,
+                 struct tu_streamout_state *tf)
 {
    static const struct ir3_shader_variant dummy_variant = {
       .type = MESA_SHADER_NONE
@@ -1058,9 +1310,15 @@ tu6_emit_program(struct tu_cs *cs,
       builder->shaders[MESA_SHADER_FRAGMENT]
          ? &builder->shaders[MESA_SHADER_FRAGMENT]->variants[0]
          : &dummy_variant;
+   bool has_gs = gs->type != MESA_SHADER_NONE;
 
    if (binning_pass) {
-      vs = &builder->shaders[MESA_SHADER_VERTEX]->variants[1];
+      /* if we have streamout, use full VS in binning pass, as the
+       * binning pass VS will have outputs on other than position/psize
+       * stripped out:
+       */
+      if (vs->shader->stream_output.num_outputs == 0)
+         vs = &builder->shaders[MESA_SHADER_VERTEX]->variants[1];
       fs = &dummy_variant;
    }
 
@@ -1070,19 +1328,25 @@ tu6_emit_program(struct tu_cs *cs,
    tu6_emit_gs_config(cs, builder->shaders[MESA_SHADER_GEOMETRY], gs);
    tu6_emit_fs_config(cs, builder->shaders[MESA_SHADER_FRAGMENT], fs);
 
-   tu6_emit_vs_system_values(cs, vs);
-   tu6_emit_vpc(cs, vs, fs, binning_pass);
+   tu6_emit_vs_system_values(cs, vs, gs);
+   tu6_emit_vpc(cs, vs, gs, fs, binning_pass, tf);
    tu6_emit_vpc_varying_modes(cs, fs, binning_pass);
    tu6_emit_fs_inputs(cs, fs);
    tu6_emit_fs_outputs(cs, fs, builder->color_attachment_count);
 
    tu6_emit_shader_object(cs, MESA_SHADER_VERTEX, vs, binary_bo,
       binning_pass ? builder->binning_vs_offset : builder->shader_offsets[MESA_SHADER_VERTEX]);
-
+   if (has_gs)
+      tu6_emit_shader_object(cs, MESA_SHADER_GEOMETRY, gs, binary_bo,
+                             builder->shader_offsets[MESA_SHADER_GEOMETRY]);
    tu6_emit_shader_object(cs, MESA_SHADER_FRAGMENT, fs, binary_bo,
                           builder->shader_offsets[MESA_SHADER_FRAGMENT]);
 
    tu6_emit_immediates(cs, vs, CP_LOAD_STATE6_GEOM, SB6_VS_SHADER);
+   if (has_gs) {
+      tu6_emit_immediates(cs, gs, CP_LOAD_STATE6_GEOM, SB6_GS_SHADER);
+      tu6_emit_geometry_consts(cs, vs, gs);
+   }
    if (!binning_pass)
       tu6_emit_immediates(cs, fs, CP_LOAD_STATE6_FRAG, SB6_FS_SHADER);
 }
@@ -1090,61 +1354,76 @@ tu6_emit_program(struct tu_cs *cs,
 static void
 tu6_emit_vertex_input(struct tu_cs *cs,
                       const struct ir3_shader_variant *vs,
-                      const VkPipelineVertexInputStateCreateInfo *vi_info,
+                      const VkPipelineVertexInputStateCreateInfo *info,
                       uint8_t bindings[MAX_VERTEX_ATTRIBS],
-                      uint16_t strides[MAX_VERTEX_ATTRIBS],
-                      uint16_t offsets[MAX_VERTEX_ATTRIBS],
                       uint32_t *count)
 {
+   uint32_t vfd_fetch_idx = 0;
    uint32_t vfd_decode_idx = 0;
+   uint32_t binding_instanced = 0; /* bitmask of instanced bindings */
 
-   for (uint32_t i = 0; i < vs->inputs_count; i++) {
-      if (vs->inputs[i].sysval || !vs->inputs[i].compmask)
-         continue;
+   for (uint32_t i = 0; i < info->vertexBindingDescriptionCount; i++) {
+      const VkVertexInputBindingDescription *binding =
+         &info->pVertexBindingDescriptions[i];
 
-      const VkVertexInputAttributeDescription *vi_attr =
-         tu_find_vertex_input_attribute(vi_info, vs->inputs[i].slot);
-      const VkVertexInputBindingDescription *vi_binding =
-         tu_find_vertex_input_binding(vi_info, vi_attr);
-      assert(vi_attr && vi_binding);
+      tu_cs_emit_regs(cs,
+                      A6XX_VFD_FETCH_STRIDE(vfd_fetch_idx, binding->stride));
 
-      const struct tu_native_format format = tu6_format_vtx(vi_attr->format);
+      if (binding->inputRate == VK_VERTEX_INPUT_RATE_INSTANCE)
+         binding_instanced |= 1 << binding->binding;
 
-      uint32_t vfd_decode = A6XX_VFD_DECODE_INSTR_IDX(vfd_decode_idx) |
-                            A6XX_VFD_DECODE_INSTR_FORMAT(format.fmt) |
-                            A6XX_VFD_DECODE_INSTR_SWAP(format.swap) |
-                            A6XX_VFD_DECODE_INSTR_UNK30;
-      if (vi_binding->inputRate == VK_VERTEX_INPUT_RATE_INSTANCE)
-         vfd_decode |= A6XX_VFD_DECODE_INSTR_INSTANCED;
-      if (!vk_format_is_int(vi_attr->format))
-         vfd_decode |= A6XX_VFD_DECODE_INSTR_FLOAT;
+      bindings[vfd_fetch_idx] = binding->binding;
+      vfd_fetch_idx++;
+   }
 
-      const uint32_t vfd_decode_step_rate = 1;
+   /* TODO: emit all VFD_DECODE/VFD_DEST_CNTL in same (two) pkt4 */
 
-      const uint32_t vfd_dest_cntl =
-         A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK(vs->inputs[i].compmask) |
-         A6XX_VFD_DEST_CNTL_INSTR_REGID(vs->inputs[i].regid);
+   for (uint32_t i = 0; i < info->vertexAttributeDescriptionCount; i++) {
+      const VkVertexInputAttributeDescription *attr =
+         &info->pVertexAttributeDescriptions[i];
+      uint32_t binding_idx, input_idx;
 
-      tu_cs_emit_pkt4(cs, REG_A6XX_VFD_DECODE(vfd_decode_idx), 2);
-      tu_cs_emit(cs, vfd_decode);
-      tu_cs_emit(cs, vfd_decode_step_rate);
+      for (binding_idx = 0; binding_idx < vfd_fetch_idx; binding_idx++) {
+         if (bindings[binding_idx] == attr->binding)
+            break;
+      }
+      assert(binding_idx < vfd_fetch_idx);
 
-      tu_cs_emit_pkt4(cs, REG_A6XX_VFD_DEST_CNTL(vfd_decode_idx), 1);
-      tu_cs_emit(cs, vfd_dest_cntl);
+      for (input_idx = 0; input_idx < vs->inputs_count; input_idx++) {
+         if ((vs->inputs[input_idx].slot - VERT_ATTRIB_GENERIC0) == attr->location)
+            break;
+      }
 
-      bindings[vfd_decode_idx] = vi_binding->binding;
-      strides[vfd_decode_idx] = vi_binding->stride;
-      offsets[vfd_decode_idx] = vi_attr->offset;
+      /* attribute not used, skip it */
+      if (input_idx == vs->inputs_count)
+         continue;
+
+      const struct tu_native_format format = tu6_format_vtx(attr->format);
+      tu_cs_emit_regs(cs,
+                      A6XX_VFD_DECODE_INSTR(vfd_decode_idx,
+                        .idx = binding_idx,
+                        .offset = attr->offset,
+                        .instanced = binding_instanced & (1 << attr->binding),
+                        .format = format.fmt,
+                        .swap = format.swap,
+                        .unk30 = 1,
+                        ._float = !vk_format_is_int(attr->format)),
+                      A6XX_VFD_DECODE_STEP_RATE(vfd_decode_idx, 1));
+
+      tu_cs_emit_regs(cs,
+                      A6XX_VFD_DEST_CNTL_INSTR(vfd_decode_idx,
+                        .writemask = vs->inputs[input_idx].compmask,
+                        .regid = vs->inputs[input_idx].regid));
 
       vfd_decode_idx++;
-      assert(vfd_decode_idx <= MAX_VERTEX_ATTRIBS);
    }
 
-   tu_cs_emit_pkt4(cs, REG_A6XX_VFD_CONTROL_0, 1);
-   tu_cs_emit(
-      cs, A6XX_VFD_CONTROL_0_VTXCNT(vfd_decode_idx) | (vfd_decode_idx << 8));
+   tu_cs_emit_regs(cs,
+                   A6XX_VFD_CONTROL_0(
+                     .fetch_cnt = vfd_fetch_idx,
+                     .decode_cnt = vfd_decode_idx));
 
-   *count = vfd_decode_idx;
+   *count = vfd_fetch_idx;
 }
 
 static uint32_t
@@ -1207,6 +1486,17 @@ tu6_emit_viewport(struct tu_cs *cs, const VkViewport *viewport)
    tu_cs_emit(cs,
               A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(guardband_adj.width) |
                  A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(guardband_adj.height));
+
+   float z_clamp_min = MIN2(viewport->minDepth, viewport->maxDepth);
+   float z_clamp_max = MAX2(viewport->minDepth, viewport->maxDepth);
+
+   tu_cs_emit_regs(cs,
+                   A6XX_GRAS_CL_Z_CLAMP_MIN(z_clamp_min),
+                   A6XX_GRAS_CL_Z_CLAMP_MAX(z_clamp_max));
+
+   tu_cs_emit_regs(cs,
+                   A6XX_RB_Z_CLAMP_MIN(z_clamp_min),
+                   A6XX_RB_Z_CLAMP_MAX(z_clamp_max));
 }
 
 void
@@ -1228,12 +1518,8 @@ tu6_emit_scissor(struct tu_cs *cs, const VkRect2D *scissor)
 static void
 tu6_emit_gras_unknowns(struct tu_cs *cs)
 {
-   tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_DISABLE_CNTL, 1);
-   tu_cs_emit(cs, A6XX_GRAS_DISABLE_CNTL_VP_CLIP_CODE_IGNORE);
    tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_UNKNOWN_8001, 1);
    tu_cs_emit(cs, 0x0);
-   tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_LAYER_CNTL, 1);
-   tu_cs_emit(cs, 0x0);
 }
 
 static void
@@ -1303,7 +1589,8 @@ tu6_emit_alpha_control_disable(struct tu_cs *cs)
 
 static void
 tu6_emit_depth_control(struct tu_cs *cs,
-                       const VkPipelineDepthStencilStateCreateInfo *ds_info)
+                       const VkPipelineDepthStencilStateCreateInfo *ds_info,
+                       const VkPipelineRasterizationStateCreateInfo *rast_info)
 {
    assert(!ds_info->depthBoundsTestEnable);
 
@@ -1314,6 +1601,9 @@ tu6_emit_depth_control(struct tu_cs *cs,
          A6XX_RB_DEPTH_CNTL_ZFUNC(tu6_compare_func(ds_info->depthCompareOp)) |
          A6XX_RB_DEPTH_CNTL_Z_TEST_ENABLE;
 
+      if (rast_info->depthClampEnable)
+         rb_depth_cntl |= A6XX_RB_DEPTH_CNTL_Z_CLAMP_ENABLE;
+
       if (ds_info->depthWriteEnable)
          rb_depth_cntl |= A6XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE;
    }
@@ -1575,9 +1865,16 @@ tu_pipeline_builder_compile_shaders(struct tu_pipeline_builder *builder)
 
    if (builder->shaders[MESA_SHADER_VERTEX]->has_binning_pass) {
       const struct tu_shader *vs = builder->shaders[MESA_SHADER_VERTEX];
+      const struct ir3_shader_variant *variant;
+
+      if (vs->ir3_shader.stream_output.num_outputs)
+         variant = &vs->variants[0];
+      else
+         variant = &vs->variants[1];
+
       builder->binning_vs_offset = builder->shader_total_size;
       builder->shader_total_size +=
-         sizeof(uint32_t) * vs->variants[1].info.sizedwords;
+         sizeof(uint32_t) * variant->info.sizedwords;
    }
 
    return VK_SUCCESS;
@@ -1609,8 +1906,19 @@ tu_pipeline_builder_upload_shaders(struct tu_pipeline_builder *builder,
 
    if (builder->shaders[MESA_SHADER_VERTEX]->has_binning_pass) {
       const struct tu_shader *vs = builder->shaders[MESA_SHADER_VERTEX];
-      memcpy(bo->map + builder->binning_vs_offset, vs->binning_binary,
-             sizeof(uint32_t) * vs->variants[1].info.sizedwords);
+      const struct ir3_shader_variant *variant;
+      void *bin;
+
+      if (vs->ir3_shader.stream_output.num_outputs) {
+         variant = &vs->variants[0];
+         bin = vs->binary;
+      } else {
+         variant = &vs->variants[1];
+         bin = vs->binning_binary;
+      }
+
+      memcpy(bo->map + builder->binning_vs_offset, bin,
+             sizeof(uint32_t) * variant->info.sizedwords);
    }
 
    return VK_SUCCESS;
@@ -1640,6 +1948,7 @@ tu_pipeline_set_linkage(struct tu_program_descriptor_linkage *link,
    link->ubo_state = v->shader->ubo_state;
    link->const_state = v->shader->const_state;
    link->constlen = v->constlen;
+   link->push_consts = shader->push_consts;
    link->texture_map = shader->texture_map;
    link->sampler_map = shader->sampler_map;
    link->ubo_map = shader->ubo_map;
@@ -1653,14 +1962,20 @@ tu_pipeline_builder_parse_shader_stages(struct tu_pipeline_builder *builder,
 {
    struct tu_cs prog_cs;
    tu_cs_begin_sub_stream(&pipeline->cs, 512, &prog_cs);
-   tu6_emit_program(&prog_cs, builder, &pipeline->program.binary_bo, false);
+   tu6_emit_program(&prog_cs, builder, &pipeline->program.binary_bo, false, &pipeline->streamout);
    pipeline->program.state_ib = tu_cs_end_sub_stream(&pipeline->cs, &prog_cs);
 
    tu_cs_begin_sub_stream(&pipeline->cs, 512, &prog_cs);
-   tu6_emit_program(&prog_cs, builder, &pipeline->program.binary_bo, true);
+   tu6_emit_program(&prog_cs, builder, &pipeline->program.binary_bo, true, &pipeline->streamout);
    pipeline->program.binning_state_ib =
       tu_cs_end_sub_stream(&pipeline->cs, &prog_cs);
 
+   VkShaderStageFlags stages = 0;
+   for (unsigned i = 0; i < builder->create_info->stageCount; i++) {
+      stages |= builder->create_info->pStages[i].stage;
+   }
+   pipeline->active_stages = stages;
+
    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
       if (!builder->shaders[i])
          continue;
@@ -1681,18 +1996,16 @@ tu_pipeline_builder_parse_vertex_input(struct tu_pipeline_builder *builder,
 
    struct tu_cs vi_cs;
    tu_cs_begin_sub_stream(&pipeline->cs,
-                          MAX_VERTEX_ATTRIBS * 5 + 2, &vi_cs);
+                          MAX_VERTEX_ATTRIBS * 7 + 2, &vi_cs);
    tu6_emit_vertex_input(&vi_cs, &vs->variants[0], vi_info,
-                         pipeline->vi.bindings, pipeline->vi.strides,
-                         pipeline->vi.offsets, &pipeline->vi.count);
+                         pipeline->vi.bindings, &pipeline->vi.count);
    pipeline->vi.state_ib = tu_cs_end_sub_stream(&pipeline->cs, &vi_cs);
 
    if (vs->has_binning_pass) {
       tu_cs_begin_sub_stream(&pipeline->cs,
-                             MAX_VERTEX_ATTRIBS * 5 + 2, &vi_cs);
+                             MAX_VERTEX_ATTRIBS * 7 + 2, &vi_cs);
       tu6_emit_vertex_input(
          &vi_cs, &vs->variants[1], vi_info, pipeline->vi.binning_bindings,
-         pipeline->vi.binning_strides, pipeline->vi.binning_offsets,
          &pipeline->vi.binning_count);
       pipeline->vi.binning_state_ib =
          tu_cs_end_sub_stream(&pipeline->cs, &vi_cs);
@@ -1729,7 +2042,7 @@ tu_pipeline_builder_parse_viewport(struct tu_pipeline_builder *builder,
       builder->create_info->pViewportState;
 
    struct tu_cs vp_cs;
-   tu_cs_begin_sub_stream(&pipeline->cs, 15, &vp_cs);
+   tu_cs_begin_sub_stream(&pipeline->cs, 21, &vp_cs);
 
    if (!(pipeline->dynamic_state.mask & TU_DYNAMIC_VIEWPORT)) {
       assert(vp_info->viewportCount == 1);
@@ -1751,12 +2064,19 @@ tu_pipeline_builder_parse_rasterization(struct tu_pipeline_builder *builder,
    const VkPipelineRasterizationStateCreateInfo *rast_info =
       builder->create_info->pRasterizationState;
 
-   assert(!rast_info->depthClampEnable);
    assert(rast_info->polygonMode == VK_POLYGON_MODE_FILL);
 
    struct tu_cs rast_cs;
    tu_cs_begin_sub_stream(&pipeline->cs, 20, &rast_cs);
 
+
+   tu_cs_emit_regs(&rast_cs,
+                   A6XX_GRAS_CL_CNTL(
+                     .znear_clip_disable = rast_info->depthClampEnable,
+                     .zfar_clip_disable = rast_info->depthClampEnable,
+                     .unk5 = rast_info->depthClampEnable,
+                     .zero_gb_scale_z = 1,
+                     .vp_clip_code_ignore = 1));
    /* move to hw ctx init? */
    tu6_emit_gras_unknowns(&rast_cs);
    tu6_emit_point_size(&rast_cs);
@@ -1804,7 +2124,7 @@ tu_pipeline_builder_parse_depth_stencil(struct tu_pipeline_builder *builder,
    /* move to hw ctx init? */
    tu6_emit_alpha_control_disable(&ds_cs);
 
-   tu6_emit_depth_control(&ds_cs, ds_info);
+   tu6_emit_depth_control(&ds_cs, ds_info, builder->create_info->pRasterizationState);
    tu6_emit_stencil_control(&ds_cs, ds_info);
 
    if (!(pipeline->dynamic_state.mask & TU_DYNAMIC_STENCIL_COMPARE_MASK)) {