freedreno/ir3: rename has_kill to no_earlyz
[mesa.git] / src / freedreno / vulkan / tu_pipeline.c
index ebed283a8cc84f11492f01789ff57e4414691cb5..2df032923cfe5ab1f702bab41908bdad2efdd92d 100644 (file)
@@ -46,11 +46,17 @@ struct tu_pipeline_builder
    const VkAllocationCallbacks *alloc;
    const VkGraphicsPipelineCreateInfo *create_info;
 
+   struct tu_shader *shaders[MESA_SHADER_STAGES];
+   uint32_t shader_offsets[MESA_SHADER_STAGES];
+   uint32_t binning_vs_offset;
+   uint32_t shader_total_size;
+
    bool rasterizer_discard;
    /* these states are affectd by rasterizer_discard */
    VkSampleCountFlagBits samples;
    bool use_depth_stencil_attachment;
    bool use_color_attachments;
+   uint32_t color_attachment_count;
    VkFormat color_attachment_formats[MAX_RTS];
 };
 
@@ -82,6 +88,54 @@ tu_dynamic_state_bit(VkDynamicState state)
    }
 }
 
+static gl_shader_stage
+tu_shader_stage(VkShaderStageFlagBits stage)
+{
+   switch (stage) {
+   case VK_SHADER_STAGE_VERTEX_BIT:
+      return MESA_SHADER_VERTEX;
+   case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
+      return MESA_SHADER_TESS_CTRL;
+   case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
+      return MESA_SHADER_TESS_EVAL;
+   case VK_SHADER_STAGE_GEOMETRY_BIT:
+      return MESA_SHADER_GEOMETRY;
+   case VK_SHADER_STAGE_FRAGMENT_BIT:
+      return MESA_SHADER_FRAGMENT;
+   case VK_SHADER_STAGE_COMPUTE_BIT:
+      return MESA_SHADER_COMPUTE;
+   default:
+      unreachable("invalid VkShaderStageFlagBits");
+      return MESA_SHADER_NONE;
+   }
+}
+
+static const VkVertexInputAttributeDescription *
+tu_find_vertex_input_attribute(
+   const VkPipelineVertexInputStateCreateInfo *vi_info, uint32_t slot)
+{
+   assert(slot >= VERT_ATTRIB_GENERIC0);
+   slot -= VERT_ATTRIB_GENERIC0;
+   for (uint32_t i = 0; i < vi_info->vertexAttributeDescriptionCount; i++) {
+      if (vi_info->pVertexAttributeDescriptions[i].location == slot)
+         return &vi_info->pVertexAttributeDescriptions[i];
+   }
+   return NULL;
+}
+
+static const VkVertexInputBindingDescription *
+tu_find_vertex_input_binding(
+   const VkPipelineVertexInputStateCreateInfo *vi_info,
+   const VkVertexInputAttributeDescription *vi_attr)
+{
+   assert(vi_attr);
+   for (uint32_t i = 0; i < vi_info->vertexBindingDescriptionCount; i++) {
+      if (vi_info->pVertexBindingDescriptions[i].binding == vi_attr->binding)
+         return &vi_info->pVertexBindingDescriptions[i];
+   }
+   return NULL;
+}
+
 static bool
 tu_logic_op_reads_dst(VkLogicOp op)
 {
@@ -303,6 +357,649 @@ tu6_blend_op(VkBlendOp op)
    }
 }
 
+static void
+tu6_emit_vs_config(struct tu_cs *cs, const struct ir3_shader_variant *vs)
+{
+   uint32_t sp_vs_ctrl =
+      A6XX_SP_VS_CTRL_REG0_THREADSIZE(FOUR_QUADS) |
+      A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(vs->info.max_reg + 1) |
+      A6XX_SP_VS_CTRL_REG0_MERGEDREGS |
+      A6XX_SP_VS_CTRL_REG0_BRANCHSTACK(vs->branchstack);
+   if (vs->num_samp)
+      sp_vs_ctrl |= A6XX_SP_VS_CTRL_REG0_PIXLODENABLE;
+
+   uint32_t sp_vs_config = A6XX_SP_VS_CONFIG_NTEX(vs->num_samp) |
+                           A6XX_SP_VS_CONFIG_NSAMP(vs->num_samp);
+   if (vs->instrlen)
+      sp_vs_config |= A6XX_SP_VS_CONFIG_ENABLED;
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_SP_VS_CTRL_REG0, 1);
+   tu_cs_emit(cs, sp_vs_ctrl);
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_SP_VS_CONFIG, 2);
+   tu_cs_emit(cs, sp_vs_config);
+   tu_cs_emit(cs, vs->instrlen);
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_VS_CNTL, 1);
+   tu_cs_emit(cs, A6XX_HLSQ_VS_CNTL_CONSTLEN(align(vs->constlen, 4)) | 0x100);
+}
+
+static void
+tu6_emit_hs_config(struct tu_cs *cs, const struct ir3_shader_variant *hs)
+{
+   uint32_t sp_hs_config = 0;
+   if (hs->instrlen)
+      sp_hs_config |= A6XX_SP_HS_CONFIG_ENABLED;
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_SP_HS_UNKNOWN_A831, 1);
+   tu_cs_emit(cs, 0);
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_SP_HS_CONFIG, 2);
+   tu_cs_emit(cs, sp_hs_config);
+   tu_cs_emit(cs, hs->instrlen);
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_HS_CNTL, 1);
+   tu_cs_emit(cs, A6XX_HLSQ_HS_CNTL_CONSTLEN(align(hs->constlen, 4)));
+}
+
+static void
+tu6_emit_ds_config(struct tu_cs *cs, const struct ir3_shader_variant *ds)
+{
+   uint32_t sp_ds_config = 0;
+   if (ds->instrlen)
+      sp_ds_config |= A6XX_SP_DS_CONFIG_ENABLED;
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_SP_DS_CONFIG, 2);
+   tu_cs_emit(cs, sp_ds_config);
+   tu_cs_emit(cs, ds->instrlen);
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_DS_CNTL, 1);
+   tu_cs_emit(cs, A6XX_HLSQ_DS_CNTL_CONSTLEN(align(ds->constlen, 4)));
+}
+
+static void
+tu6_emit_gs_config(struct tu_cs *cs, const struct ir3_shader_variant *gs)
+{
+   uint32_t sp_gs_config = 0;
+   if (gs->instrlen)
+      sp_gs_config |= A6XX_SP_GS_CONFIG_ENABLED;
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_SP_GS_UNKNOWN_A871, 1);
+   tu_cs_emit(cs, 0);
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_SP_GS_CONFIG, 2);
+   tu_cs_emit(cs, sp_gs_config);
+   tu_cs_emit(cs, gs->instrlen);
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_GS_CNTL, 1);
+   tu_cs_emit(cs, A6XX_HLSQ_GS_CNTL_CONSTLEN(align(gs->constlen, 4)));
+}
+
+static void
+tu6_emit_fs_config(struct tu_cs *cs, const struct ir3_shader_variant *fs)
+{
+   uint32_t sp_fs_ctrl =
+      A6XX_SP_FS_CTRL_REG0_THREADSIZE(FOUR_QUADS) | 0x1000000 |
+      A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(fs->info.max_reg + 1) |
+      A6XX_SP_FS_CTRL_REG0_MERGEDREGS |
+      A6XX_SP_FS_CTRL_REG0_BRANCHSTACK(fs->branchstack);
+   if (fs->total_in > 0 || fs->frag_coord)
+      sp_fs_ctrl |= A6XX_SP_FS_CTRL_REG0_VARYING;
+   if (fs->num_samp > 0)
+      sp_fs_ctrl |= A6XX_SP_FS_CTRL_REG0_PIXLODENABLE;
+
+   uint32_t sp_fs_config = A6XX_SP_FS_CONFIG_NTEX(fs->num_samp) |
+                           A6XX_SP_FS_CONFIG_NSAMP(fs->num_samp);
+   if (fs->instrlen)
+      sp_fs_config |= A6XX_SP_FS_CONFIG_ENABLED;
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_SP_UNKNOWN_A99E, 1);
+   tu_cs_emit(cs, 0x7fc0);
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_SP_UNKNOWN_A9A8, 1);
+   tu_cs_emit(cs, 0);
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_SP_UNKNOWN_AB00, 1);
+   tu_cs_emit(cs, 0x5);
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_CTRL_REG0, 1);
+   tu_cs_emit(cs, sp_fs_ctrl);
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_CONFIG, 2);
+   tu_cs_emit(cs, sp_fs_config);
+   tu_cs_emit(cs, fs->instrlen);
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_FS_CNTL, 1);
+   tu_cs_emit(cs, A6XX_HLSQ_FS_CNTL_CONSTLEN(align(fs->constlen, 4)) | 0x100);
+}
+
+static void
+tu6_emit_vs_system_values(struct tu_cs *cs,
+                          const struct ir3_shader_variant *vs)
+{
+   const uint32_t vertexid_regid =
+      ir3_find_sysval_regid(vs, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE);
+   const uint32_t instanceid_regid =
+      ir3_find_sysval_regid(vs, SYSTEM_VALUE_INSTANCE_ID);
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_VFD_CONTROL_1, 6);
+   tu_cs_emit(cs, A6XX_VFD_CONTROL_1_REGID4VTX(vertexid_regid) |
+                     A6XX_VFD_CONTROL_1_REGID4INST(instanceid_regid) |
+                     0xfcfc0000);
+   tu_cs_emit(cs, 0x0000fcfc); /* VFD_CONTROL_2 */
+   tu_cs_emit(cs, 0xfcfcfcfc); /* VFD_CONTROL_3 */
+   tu_cs_emit(cs, 0x000000fc); /* VFD_CONTROL_4 */
+   tu_cs_emit(cs, 0x0000fcfc); /* VFD_CONTROL_5 */
+   tu_cs_emit(cs, 0x00000000); /* VFD_CONTROL_6 */
+}
+
+static void
+tu6_emit_vpc(struct tu_cs *cs,
+             const struct ir3_shader_variant *vs,
+             const struct ir3_shader_variant *fs,
+             bool binning_pass)
+{
+   struct ir3_shader_linkage linkage = { 0 };
+   ir3_link_shaders(&linkage, vs, fs);
+
+   if (vs->shader->stream_output.num_outputs && !binning_pass)
+      tu_finishme("stream output");
+
+   BITSET_DECLARE(vpc_var_enables, 128) = { 0 };
+   for (uint32_t i = 0; i < linkage.cnt; i++) {
+      const uint32_t comp_count = util_last_bit(linkage.var[i].compmask);
+      for (uint32_t j = 0; j < comp_count; j++)
+         BITSET_SET(vpc_var_enables, linkage.var[i].loc + j);
+   }
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_VPC_VAR_DISABLE(0), 4);
+   tu_cs_emit(cs, ~vpc_var_enables[0]);
+   tu_cs_emit(cs, ~vpc_var_enables[1]);
+   tu_cs_emit(cs, ~vpc_var_enables[2]);
+   tu_cs_emit(cs, ~vpc_var_enables[3]);
+
+   /* a6xx finds position/pointsize at the end */
+   const uint32_t position_regid =
+      ir3_find_output_regid(vs, VARYING_SLOT_POS);
+   const uint32_t pointsize_regid =
+      ir3_find_output_regid(vs, VARYING_SLOT_PSIZ);
+   uint32_t pointsize_loc = 0xff;
+   if (position_regid != regid(63, 0))
+      ir3_link_add(&linkage, position_regid, 0xf, linkage.max_loc);
+   if (pointsize_regid != regid(63, 0)) {
+      pointsize_loc = linkage.max_loc;
+      ir3_link_add(&linkage, pointsize_regid, 0x1, linkage.max_loc);
+   }
+
+   /* map vs outputs to VPC */
+   assert(linkage.cnt <= 32);
+   const uint32_t sp_vs_out_count = (linkage.cnt + 1) / 2;
+   const uint32_t sp_vs_vpc_dst_count = (linkage.cnt + 3) / 4;
+   uint32_t sp_vs_out[16];
+   uint32_t sp_vs_vpc_dst[8];
+   sp_vs_out[sp_vs_out_count - 1] = 0;
+   sp_vs_vpc_dst[sp_vs_vpc_dst_count - 1] = 0;
+   for (uint32_t i = 0; i < linkage.cnt; i++) {
+      ((uint16_t *) sp_vs_out)[i] =
+         A6XX_SP_VS_OUT_REG_A_REGID(linkage.var[i].regid) |
+         A6XX_SP_VS_OUT_REG_A_COMPMASK(linkage.var[i].compmask);
+      ((uint8_t *) sp_vs_vpc_dst)[i] =
+         A6XX_SP_VS_VPC_DST_REG_OUTLOC0(linkage.var[i].loc);
+   }
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_SP_VS_OUT_REG(0), sp_vs_out_count);
+   tu_cs_emit_array(cs, sp_vs_out, sp_vs_out_count);
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_SP_VS_VPC_DST_REG(0), sp_vs_vpc_dst_count);
+   tu_cs_emit_array(cs, sp_vs_vpc_dst, sp_vs_vpc_dst_count);
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_VPC_CNTL_0, 1);
+   tu_cs_emit(cs, A6XX_VPC_CNTL_0_NUMNONPOSVAR(fs->total_in) |
+                     (fs->total_in > 0 ? A6XX_VPC_CNTL_0_VARYING : 0) |
+                     0xff00ff00);
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_VPC_PACK, 1);
+   tu_cs_emit(cs, A6XX_VPC_PACK_NUMNONPOSVAR(fs->total_in) |
+                     A6XX_VPC_PACK_PSIZELOC(pointsize_loc) |
+                     A6XX_VPC_PACK_STRIDE_IN_VPC(linkage.max_loc));
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_VPC_GS_SIV_CNTL, 1);
+   tu_cs_emit(cs, 0x0000ffff); /* XXX */
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_SP_PRIMITIVE_CNTL, 1);
+   tu_cs_emit(cs, A6XX_SP_PRIMITIVE_CNTL_VSOUT(linkage.cnt));
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_PC_PRIMITIVE_CNTL_1, 1);
+   tu_cs_emit(cs, A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC(linkage.max_loc) |
+                     (vs->writes_psize ? A6XX_PC_PRIMITIVE_CNTL_1_PSIZE : 0));
+}
+
+static int
+tu6_vpc_varying_mode(const struct ir3_shader_variant *fs,
+                     uint32_t index,
+                     uint8_t *interp_mode,
+                     uint8_t *ps_repl_mode)
+{
+   enum
+   {
+      INTERP_SMOOTH = 0,
+      INTERP_FLAT = 1,
+      INTERP_ZERO = 2,
+      INTERP_ONE = 3,
+   };
+   enum
+   {
+      PS_REPL_NONE = 0,
+      PS_REPL_S = 1,
+      PS_REPL_T = 2,
+      PS_REPL_ONE_MINUS_T = 3,
+   };
+
+   const uint32_t compmask = fs->inputs[index].compmask;
+
+   /* NOTE: varyings are packed, so if compmask is 0xb then first, second, and
+    * fourth component occupy three consecutive varying slots
+    */
+   int shift = 0;
+   *interp_mode = 0;
+   *ps_repl_mode = 0;
+   if (fs->inputs[index].slot == VARYING_SLOT_PNTC) {
+      if (compmask & 0x1) {
+         *ps_repl_mode |= PS_REPL_S << shift;
+         shift += 2;
+      }
+      if (compmask & 0x2) {
+         *ps_repl_mode |= PS_REPL_T << shift;
+         shift += 2;
+      }
+      if (compmask & 0x4) {
+         *interp_mode |= INTERP_ZERO << shift;
+         shift += 2;
+      }
+      if (compmask & 0x8) {
+         *interp_mode |= INTERP_ONE << 6;
+         shift += 2;
+      }
+   } else if ((fs->inputs[index].interpolate == INTERP_MODE_FLAT) ||
+              fs->inputs[index].rasterflat) {
+      for (int i = 0; i < 4; i++) {
+         if (compmask & (1 << i)) {
+            *interp_mode |= INTERP_FLAT << shift;
+            shift += 2;
+         }
+      }
+   }
+
+   return shift;
+}
+
+static void
+tu6_emit_vpc_varying_modes(struct tu_cs *cs,
+                           const struct ir3_shader_variant *fs,
+                           bool binning_pass)
+{
+   uint32_t interp_modes[8] = { 0 };
+   uint32_t ps_repl_modes[8] = { 0 };
+
+   if (!binning_pass) {
+      for (int i = -1;
+           (i = ir3_next_varying(fs, i)) < (int) fs->inputs_count;) {
+
+         /* get the mode for input i */
+         uint8_t interp_mode;
+         uint8_t ps_repl_mode;
+         const int bits =
+            tu6_vpc_varying_mode(fs, i, &interp_mode, &ps_repl_mode);
+
+         /* OR the mode into the array */
+         const uint32_t inloc = fs->inputs[i].inloc * 2;
+         uint32_t n = inloc / 32;
+         uint32_t shift = inloc % 32;
+         interp_modes[n] |= interp_mode << shift;
+         ps_repl_modes[n] |= ps_repl_mode << shift;
+         if (shift + bits > 32) {
+            n++;
+            shift = 32 - shift;
+
+            interp_modes[n] |= interp_mode >> shift;
+            ps_repl_modes[n] |= ps_repl_mode >> shift;
+         }
+      }
+   }
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_VPC_VARYING_INTERP_MODE(0), 8);
+   tu_cs_emit_array(cs, interp_modes, 8);
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_VPC_VARYING_PS_REPL_MODE(0), 8);
+   tu_cs_emit_array(cs, ps_repl_modes, 8);
+}
+
+static void
+tu6_emit_fs_system_values(struct tu_cs *cs,
+                          const struct ir3_shader_variant *fs)
+{
+   const uint32_t frontfacing_regid =
+      ir3_find_sysval_regid(fs, SYSTEM_VALUE_FRONT_FACE);
+   const uint32_t sampleid_regid =
+      ir3_find_sysval_regid(fs, SYSTEM_VALUE_SAMPLE_ID);
+   const uint32_t samplemaskin_regid =
+      ir3_find_sysval_regid(fs, SYSTEM_VALUE_SAMPLE_MASK_IN);
+   const uint32_t fragcoord_xy_regid =
+      ir3_find_sysval_regid(fs, SYSTEM_VALUE_FRAG_COORD);
+   const uint32_t fragcoord_zw_regid = (fragcoord_xy_regid != regid(63, 0))
+                                          ? (fragcoord_xy_regid + 2)
+                                          : fragcoord_xy_regid;
+   const uint32_t varyingcoord_regid =
+      ir3_find_sysval_regid(fs, SYSTEM_VALUE_VARYING_COORD);
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_CONTROL_1_REG, 5);
+   tu_cs_emit(cs, 0x7);
+   tu_cs_emit(cs, A6XX_HLSQ_CONTROL_2_REG_FACEREGID(frontfacing_regid) |
+                     A6XX_HLSQ_CONTROL_2_REG_SAMPLEID(sampleid_regid) |
+                     A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(samplemaskin_regid) |
+                     0xfc000000);
+   tu_cs_emit(cs,
+              A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID(varyingcoord_regid) |
+                 0xfcfcfc00);
+   tu_cs_emit(cs,
+              A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(fragcoord_xy_regid) |
+                 A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(fragcoord_zw_regid) |
+                 0x0000fcfc);
+   tu_cs_emit(cs, 0xfc);
+}
+
+static void
+tu6_emit_fs_inputs(struct tu_cs *cs, const struct ir3_shader_variant *fs)
+{
+   tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_UNKNOWN_B980, 1);
+   tu_cs_emit(cs, fs->total_in > 0 ? 3 : 1);
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_SP_UNKNOWN_A982, 1);
+   tu_cs_emit(cs, 0); /* XXX */
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_UPDATE_CNTL, 1);
+   tu_cs_emit(cs, 0xff); /* XXX */
+
+   uint32_t gras_cntl = 0;
+   if (fs->total_in > 0)
+      gras_cntl |= A6XX_GRAS_CNTL_VARYING;
+   if (fs->frag_coord) {
+      gras_cntl |= A6XX_GRAS_CNTL_UNK3 | A6XX_GRAS_CNTL_XCOORD |
+                   A6XX_GRAS_CNTL_YCOORD | A6XX_GRAS_CNTL_ZCOORD |
+                   A6XX_GRAS_CNTL_WCOORD;
+   }
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_CNTL, 1);
+   tu_cs_emit(cs, gras_cntl);
+
+   uint32_t rb_render_control = 0;
+   if (fs->total_in > 0) {
+      rb_render_control =
+         A6XX_RB_RENDER_CONTROL0_VARYING | A6XX_RB_RENDER_CONTROL0_UNK10;
+   }
+   if (fs->frag_coord) {
+      rb_render_control |=
+         A6XX_RB_RENDER_CONTROL0_UNK3 | A6XX_RB_RENDER_CONTROL0_XCOORD |
+         A6XX_RB_RENDER_CONTROL0_YCOORD | A6XX_RB_RENDER_CONTROL0_ZCOORD |
+         A6XX_RB_RENDER_CONTROL0_WCOORD;
+   }
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_RB_RENDER_CONTROL0, 2);
+   tu_cs_emit(cs, rb_render_control);
+   tu_cs_emit(cs, (fs->frag_face ? A6XX_RB_RENDER_CONTROL1_FACENESS : 0));
+}
+
+static void
+tu6_emit_fs_outputs(struct tu_cs *cs,
+                    const struct ir3_shader_variant *fs,
+                    uint32_t mrt_count)
+{
+   const uint32_t fragdepth_regid =
+      ir3_find_output_regid(fs, FRAG_RESULT_DEPTH);
+   uint32_t fragdata_regid[8];
+   if (fs->color0_mrt) {
+      fragdata_regid[0] = ir3_find_output_regid(fs, FRAG_RESULT_COLOR);
+      for (uint32_t i = 1; i < ARRAY_SIZE(fragdata_regid); i++)
+         fragdata_regid[i] = fragdata_regid[0];
+   } else {
+      for (uint32_t i = 0; i < ARRAY_SIZE(fragdata_regid); i++)
+         fragdata_regid[i] = ir3_find_output_regid(fs, FRAG_RESULT_DATA0 + i);
+   }
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_OUTPUT_CNTL0, 2);
+   tu_cs_emit(
+      cs, A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID(fragdepth_regid) | 0xfcfc0000);
+   tu_cs_emit(cs, A6XX_SP_FS_OUTPUT_CNTL1_MRT(mrt_count));
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_OUTPUT_REG(0), 8);
+   for (uint32_t i = 0; i < ARRAY_SIZE(fragdata_regid); i++) {
+      // TODO we could have a mix of half and full precision outputs,
+      // we really need to figure out half-precision from IR3_REG_HALF
+      tu_cs_emit(cs, A6XX_SP_FS_OUTPUT_REG_REGID(fragdata_regid[i]) |
+                        (false ? A6XX_SP_FS_OUTPUT_REG_HALF_PRECISION : 0));
+   }
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_RB_FS_OUTPUT_CNTL0, 2);
+   tu_cs_emit(cs, fs->writes_pos ? A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_Z : 0);
+   tu_cs_emit(cs, A6XX_RB_FS_OUTPUT_CNTL1_MRT(mrt_count));
+
+   uint32_t gras_su_depth_plane_cntl = 0;
+   uint32_t rb_depth_plane_cntl = 0;
+   if (fs->no_earlyz | fs->writes_pos) {
+      gras_su_depth_plane_cntl |= A6XX_GRAS_SU_DEPTH_PLANE_CNTL_FRAG_WRITES_Z;
+      rb_depth_plane_cntl |= A6XX_RB_DEPTH_PLANE_CNTL_FRAG_WRITES_Z;
+   }
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_SU_DEPTH_PLANE_CNTL, 1);
+   tu_cs_emit(cs, gras_su_depth_plane_cntl);
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_RB_DEPTH_PLANE_CNTL, 1);
+   tu_cs_emit(cs, rb_depth_plane_cntl);
+}
+
+static void
+tu6_emit_shader_object(struct tu_cs *cs,
+                       gl_shader_stage stage,
+                       const struct ir3_shader_variant *variant,
+                       const struct tu_bo *binary_bo,
+                       uint32_t binary_offset)
+{
+   uint16_t reg;
+   uint8_t opcode;
+   enum a6xx_state_block sb;
+   switch (stage) {
+   case MESA_SHADER_VERTEX:
+      reg = REG_A6XX_SP_VS_OBJ_START_LO;
+      opcode = CP_LOAD_STATE6_GEOM;
+      sb = SB6_VS_SHADER;
+      break;
+   case MESA_SHADER_TESS_CTRL:
+      reg = REG_A6XX_SP_HS_OBJ_START_LO;
+      opcode = CP_LOAD_STATE6_GEOM;
+      sb = SB6_HS_SHADER;
+      break;
+   case MESA_SHADER_TESS_EVAL:
+      reg = REG_A6XX_SP_DS_OBJ_START_LO;
+      opcode = CP_LOAD_STATE6_GEOM;
+      sb = SB6_DS_SHADER;
+      break;
+   case MESA_SHADER_GEOMETRY:
+      reg = REG_A6XX_SP_GS_OBJ_START_LO;
+      opcode = CP_LOAD_STATE6_GEOM;
+      sb = SB6_GS_SHADER;
+      break;
+   case MESA_SHADER_FRAGMENT:
+      reg = REG_A6XX_SP_FS_OBJ_START_LO;
+      opcode = CP_LOAD_STATE6_FRAG;
+      sb = SB6_FS_SHADER;
+      break;
+   case MESA_SHADER_COMPUTE:
+      reg = REG_A6XX_SP_CS_OBJ_START_LO;
+      opcode = CP_LOAD_STATE6_FRAG;
+      sb = SB6_CS_SHADER;
+      break;
+   default:
+      unreachable("invalid gl_shader_stage");
+      opcode = CP_LOAD_STATE6_GEOM;
+      sb = SB6_VS_SHADER;
+      break;
+   }
+
+   if (!variant->instrlen) {
+      tu_cs_emit_pkt4(cs, reg, 2);
+      tu_cs_emit_qw(cs, 0);
+      return;
+   }
+
+   assert(variant->type == stage);
+
+   const uint64_t binary_iova = binary_bo->iova + binary_offset;
+   assert((binary_iova & 0x3) == 0);
+
+   tu_cs_emit_pkt4(cs, reg, 2);
+   tu_cs_emit_qw(cs, binary_iova);
+
+   /* always indirect */
+   const bool indirect = true;
+   if (indirect) {
+      tu_cs_emit_pkt7(cs, opcode, 3);
+      tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(0) |
+                        CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER) |
+                        CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
+                        CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
+                        CP_LOAD_STATE6_0_NUM_UNIT(variant->instrlen));
+      tu_cs_emit_qw(cs, binary_iova);
+   } else {
+      const void *binary = binary_bo->map + binary_offset;
+
+      tu_cs_emit_pkt7(cs, opcode, 3 + variant->info.sizedwords);
+      tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(0) |
+                        CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER) |
+                        CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
+                        CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
+                        CP_LOAD_STATE6_0_NUM_UNIT(variant->instrlen));
+      tu_cs_emit_qw(cs, 0);
+      tu_cs_emit_array(cs, binary, variant->info.sizedwords);
+   }
+}
+
+static void
+tu6_emit_program(struct tu_cs *cs,
+                 const struct tu_pipeline_builder *builder,
+                 const struct tu_bo *binary_bo,
+                 bool binning_pass)
+{
+   static const struct ir3_shader_variant dummy_variant = {
+      .type = MESA_SHADER_NONE
+   };
+   assert(builder->shaders[MESA_SHADER_VERTEX]);
+   const struct ir3_shader_variant *vs =
+      &builder->shaders[MESA_SHADER_VERTEX]->variants[0];
+   const struct ir3_shader_variant *hs =
+      builder->shaders[MESA_SHADER_TESS_CTRL]
+         ? &builder->shaders[MESA_SHADER_TESS_CTRL]->variants[0]
+         : &dummy_variant;
+   const struct ir3_shader_variant *ds =
+      builder->shaders[MESA_SHADER_TESS_EVAL]
+         ? &builder->shaders[MESA_SHADER_TESS_EVAL]->variants[0]
+         : &dummy_variant;
+   const struct ir3_shader_variant *gs =
+      builder->shaders[MESA_SHADER_GEOMETRY]
+         ? &builder->shaders[MESA_SHADER_GEOMETRY]->variants[0]
+         : &dummy_variant;
+   const struct ir3_shader_variant *fs =
+      builder->shaders[MESA_SHADER_FRAGMENT]
+         ? &builder->shaders[MESA_SHADER_FRAGMENT]->variants[0]
+         : &dummy_variant;
+
+   if (binning_pass) {
+      vs = &builder->shaders[MESA_SHADER_VERTEX]->variants[1];
+      fs = &dummy_variant;
+   }
+
+   tu6_emit_vs_config(cs, vs);
+   tu6_emit_hs_config(cs, hs);
+   tu6_emit_ds_config(cs, ds);
+   tu6_emit_gs_config(cs, gs);
+   tu6_emit_fs_config(cs, fs);
+
+   tu6_emit_vs_system_values(cs, vs);
+   tu6_emit_vpc(cs, vs, fs, binning_pass);
+   tu6_emit_vpc_varying_modes(cs, fs, binning_pass);
+   tu6_emit_fs_system_values(cs, fs);
+   tu6_emit_fs_inputs(cs, fs);
+   tu6_emit_fs_outputs(cs, fs, builder->color_attachment_count);
+
+   tu6_emit_shader_object(cs, MESA_SHADER_VERTEX, vs, binary_bo,
+                          builder->shader_offsets[MESA_SHADER_VERTEX]);
+
+   tu6_emit_shader_object(cs, MESA_SHADER_FRAGMENT, fs, binary_bo,
+                          builder->shader_offsets[MESA_SHADER_FRAGMENT]);
+}
+
+static void
+tu6_emit_vertex_input(struct tu_cs *cs,
+                      const struct ir3_shader_variant *vs,
+                      const VkPipelineVertexInputStateCreateInfo *vi_info,
+                      uint8_t bindings[MAX_VERTEX_ATTRIBS],
+                      uint16_t strides[MAX_VERTEX_ATTRIBS],
+                      uint16_t offsets[MAX_VERTEX_ATTRIBS],
+                      uint32_t *count)
+{
+   uint32_t vfd_decode_idx = 0;
+
+   /* why do we go beyond inputs_count? */
+   assert(vs->inputs_count + 1 <= MAX_VERTEX_ATTRIBS);
+   for (uint32_t i = 0; i <= vs->inputs_count; i++) {
+      if (vs->inputs[i].sysval || !vs->inputs[i].compmask)
+         continue;
+
+      const VkVertexInputAttributeDescription *vi_attr =
+         tu_find_vertex_input_attribute(vi_info, vs->inputs[i].slot);
+      const VkVertexInputBindingDescription *vi_binding =
+         tu_find_vertex_input_binding(vi_info, vi_attr);
+      assert(vi_attr && vi_binding);
+
+      const struct tu_native_format *format =
+         tu6_get_native_format(vi_attr->format);
+      assert(format && format->vtx >= 0);
+
+      uint32_t vfd_decode = A6XX_VFD_DECODE_INSTR_IDX(vfd_decode_idx) |
+                            A6XX_VFD_DECODE_INSTR_FORMAT(format->vtx) |
+                            A6XX_VFD_DECODE_INSTR_SWAP(format->swap) |
+                            A6XX_VFD_DECODE_INSTR_UNK30;
+      if (vi_binding->inputRate == VK_VERTEX_INPUT_RATE_INSTANCE)
+         vfd_decode |= A6XX_VFD_DECODE_INSTR_INSTANCED;
+      if (!vk_format_is_int(vi_attr->format))
+         vfd_decode |= A6XX_VFD_DECODE_INSTR_FLOAT;
+
+      const uint32_t vfd_decode_step_rate = 1;
+
+      const uint32_t vfd_dest_cntl =
+         A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK(vs->inputs[i].compmask) |
+         A6XX_VFD_DEST_CNTL_INSTR_REGID(vs->inputs[i].regid);
+
+      tu_cs_emit_pkt4(cs, REG_A6XX_VFD_DECODE(vfd_decode_idx), 2);
+      tu_cs_emit(cs, vfd_decode);
+      tu_cs_emit(cs, vfd_decode_step_rate);
+
+      tu_cs_emit_pkt4(cs, REG_A6XX_VFD_DEST_CNTL(vfd_decode_idx), 1);
+      tu_cs_emit(cs, vfd_dest_cntl);
+
+      bindings[vfd_decode_idx] = vi_binding->binding;
+      strides[vfd_decode_idx] = vi_binding->stride;
+      offsets[vfd_decode_idx] = vi_attr->offset;
+
+      vfd_decode_idx++;
+   }
+
+   tu_cs_emit_pkt4(cs, REG_A6XX_VFD_CONTROL_0, 1);
+   tu_cs_emit(
+      cs, A6XX_VFD_CONTROL_0_VTXCNT(vfd_decode_idx) | (vfd_decode_idx << 8));
+
+   *count = vfd_decode_idx;
+}
+
 static uint32_t
 tu6_guardband_adj(uint32_t v)
 {
@@ -694,6 +1391,91 @@ tu_pipeline_builder_create_pipeline(struct tu_pipeline_builder *builder,
    return VK_SUCCESS;
 }
 
+static VkResult
+tu_pipeline_builder_compile_shaders(struct tu_pipeline_builder *builder)
+{
+   const VkPipelineShaderStageCreateInfo *stage_infos[MESA_SHADER_STAGES] = {
+      NULL
+   };
+   for (uint32_t i = 0; i < builder->create_info->stageCount; i++) {
+      gl_shader_stage stage =
+         tu_shader_stage(builder->create_info->pStages[i].stage);
+      stage_infos[stage] = &builder->create_info->pStages[i];
+   }
+
+   struct tu_shader_compile_options options;
+   tu_shader_compile_options_init(&options, builder->create_info);
+
+   /* compile shaders in reverse order */
+   struct tu_shader *next_stage_shader = NULL;
+   for (gl_shader_stage stage = MESA_SHADER_STAGES - 1;
+        stage > MESA_SHADER_NONE; stage--) {
+      const VkPipelineShaderStageCreateInfo *stage_info = stage_infos[stage];
+      if (!stage_info)
+         continue;
+
+      struct tu_shader *shader =
+         tu_shader_create(builder->device, stage, stage_info, builder->alloc);
+      if (!shader)
+         return VK_ERROR_OUT_OF_HOST_MEMORY;
+
+      VkResult result =
+         tu_shader_compile(builder->device, shader, next_stage_shader,
+                           &options, builder->alloc);
+      if (result != VK_SUCCESS)
+         return result;
+
+      builder->shaders[stage] = shader;
+      builder->shader_offsets[stage] = builder->shader_total_size;
+      builder->shader_total_size +=
+         sizeof(uint32_t) * shader->variants[0].info.sizedwords;
+
+      next_stage_shader = shader;
+   }
+
+   if (builder->shaders[MESA_SHADER_VERTEX]->has_binning_pass) {
+      const struct tu_shader *vs = builder->shaders[MESA_SHADER_VERTEX];
+      builder->binning_vs_offset = builder->shader_total_size;
+      builder->shader_total_size +=
+         sizeof(uint32_t) * vs->variants[1].info.sizedwords;
+   }
+
+   return VK_SUCCESS;
+}
+
+static VkResult
+tu_pipeline_builder_upload_shaders(struct tu_pipeline_builder *builder,
+                                   struct tu_pipeline *pipeline)
+{
+   struct tu_bo *bo = &pipeline->program.binary_bo;
+
+   VkResult result =
+      tu_bo_init_new(builder->device, bo, builder->shader_total_size);
+   if (result != VK_SUCCESS)
+      return result;
+
+   result = tu_bo_map(builder->device, bo);
+   if (result != VK_SUCCESS)
+      return result;
+
+   for (uint32_t i = 0; i < MESA_SHADER_STAGES; i++) {
+      const struct tu_shader *shader = builder->shaders[i];
+      if (!shader)
+         continue;
+
+      memcpy(bo->map + builder->shader_offsets[i], shader->binary,
+             sizeof(uint32_t) * shader->variants[0].info.sizedwords);
+   }
+
+   if (builder->shaders[MESA_SHADER_VERTEX]->has_binning_pass) {
+      const struct tu_shader *vs = builder->shaders[MESA_SHADER_VERTEX];
+      memcpy(bo->map + builder->binning_vs_offset, vs->binning_binary,
+             sizeof(uint32_t) * vs->variants[1].info.sizedwords);
+   }
+
+   return VK_SUCCESS;
+}
+
 static void
 tu_pipeline_builder_parse_dynamic(struct tu_pipeline_builder *builder,
                                   struct tu_pipeline *pipeline)
@@ -710,6 +1492,49 @@ tu_pipeline_builder_parse_dynamic(struct tu_pipeline_builder *builder,
    }
 }
 
+static void
+tu_pipeline_builder_parse_shader_stages(struct tu_pipeline_builder *builder,
+                                        struct tu_pipeline *pipeline)
+{
+   struct tu_cs prog_cs;
+   tu_cs_begin_sub_stream(builder->device, &pipeline->cs, 512, &prog_cs);
+   tu6_emit_program(&prog_cs, builder, &pipeline->program.binary_bo, false);
+   pipeline->program.state_ib = tu_cs_end_sub_stream(&pipeline->cs, &prog_cs);
+
+   tu_cs_begin_sub_stream(builder->device, &pipeline->cs, 512, &prog_cs);
+   tu6_emit_program(&prog_cs, builder, &pipeline->program.binary_bo, true);
+   pipeline->program.binning_state_ib =
+      tu_cs_end_sub_stream(&pipeline->cs, &prog_cs);
+}
+
+static void
+tu_pipeline_builder_parse_vertex_input(struct tu_pipeline_builder *builder,
+                                       struct tu_pipeline *pipeline)
+{
+   const VkPipelineVertexInputStateCreateInfo *vi_info =
+      builder->create_info->pVertexInputState;
+   const struct tu_shader *vs = builder->shaders[MESA_SHADER_VERTEX];
+
+   struct tu_cs vi_cs;
+   tu_cs_begin_sub_stream(builder->device, &pipeline->cs,
+                          MAX_VERTEX_ATTRIBS * 5 + 2, &vi_cs);
+   tu6_emit_vertex_input(&vi_cs, &vs->variants[0], vi_info,
+                         pipeline->vi.bindings, pipeline->vi.strides,
+                         pipeline->vi.offsets, &pipeline->vi.count);
+   pipeline->vi.state_ib = tu_cs_end_sub_stream(&pipeline->cs, &vi_cs);
+
+   if (vs->has_binning_pass) {
+      tu_cs_begin_sub_stream(builder->device, &pipeline->cs,
+                             MAX_VERTEX_ATTRIBS * 5 + 2, &vi_cs);
+      tu6_emit_vertex_input(
+         &vi_cs, &vs->variants[1], vi_info, pipeline->vi.binning_bindings,
+         pipeline->vi.binning_strides, pipeline->vi.binning_offsets,
+         &pipeline->vi.binning_count);
+      pipeline->vi.binning_state_ib =
+         tu_cs_end_sub_stream(&pipeline->cs, &vi_cs);
+   }
+}
+
 static void
 tu_pipeline_builder_parse_input_assembly(struct tu_pipeline_builder *builder,
                                          struct tu_pipeline *pipeline)
@@ -887,6 +1712,9 @@ tu_pipeline_finish(struct tu_pipeline *pipeline,
                    const VkAllocationCallbacks *alloc)
 {
    tu_cs_finish(dev, &pipeline->cs);
+
+   if (pipeline->program.binary_bo.gem_handle)
+      tu_bo_finish(dev, &pipeline->program.binary_bo);
 }
 
 static VkResult
@@ -897,7 +1725,21 @@ tu_pipeline_builder_build(struct tu_pipeline_builder *builder,
    if (result != VK_SUCCESS)
       return result;
 
+   /* compile and upload shaders */
+   result = tu_pipeline_builder_compile_shaders(builder);
+   if (result == VK_SUCCESS)
+      result = tu_pipeline_builder_upload_shaders(builder, *pipeline);
+   if (result != VK_SUCCESS) {
+      tu_pipeline_finish(*pipeline, builder->device, builder->alloc);
+      vk_free2(&builder->device->alloc, builder->alloc, *pipeline);
+      *pipeline = VK_NULL_HANDLE;
+
+      return result;
+   }
+
    tu_pipeline_builder_parse_dynamic(builder, *pipeline);
+   tu_pipeline_builder_parse_shader_stages(builder, *pipeline);
+   tu_pipeline_builder_parse_vertex_input(builder, *pipeline);
    tu_pipeline_builder_parse_input_assembly(builder, *pipeline);
    tu_pipeline_builder_parse_viewport(builder, *pipeline);
    tu_pipeline_builder_parse_rasterization(builder, *pipeline);
@@ -912,6 +1754,16 @@ tu_pipeline_builder_build(struct tu_pipeline_builder *builder,
    return VK_SUCCESS;
 }
 
+static void
+tu_pipeline_builder_finish(struct tu_pipeline_builder *builder)
+{
+   for (uint32_t i = 0; i < MESA_SHADER_STAGES; i++) {
+      if (!builder->shaders[i])
+         continue;
+      tu_shader_destroy(builder->device, builder->shaders[i], builder->alloc);
+   }
+}
+
 static void
 tu_pipeline_builder_init_graphics(
    struct tu_pipeline_builder *builder,
@@ -943,6 +1795,9 @@ tu_pipeline_builder_init_graphics(
       builder->use_depth_stencil_attachment =
          subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED;
 
+      assert(subpass->color_count ==
+             create_info->pColorBlendState->attachmentCount);
+      builder->color_attachment_count = subpass->color_count;
       for (uint32_t i = 0; i < subpass->color_count; i++) {
          const uint32_t a = subpass->color_attachments[i].attachment;
          if (a == VK_ATTACHMENT_UNUSED)
@@ -972,6 +1827,7 @@ tu_CreateGraphicsPipelines(VkDevice device,
 
       struct tu_pipeline *pipeline;
       VkResult result = tu_pipeline_builder_build(&builder, &pipeline);
+      tu_pipeline_builder_finish(&builder);
 
       if (result != VK_SUCCESS) {
          for (uint32_t j = 0; j < i; j++) {