turnip: remove unnecessary OVERFLOW_FLAG_REG check
[mesa.git] / src / freedreno / vulkan / tu_cmd_buffer.c
index 02e7ee1d5390727eb670ac76ebfc1ae4d0e33623..975a0d905ad7ac45b9b0c68c9c12f1f099ee306c 100644 (file)
@@ -34,8 +34,6 @@
 
 #include "tu_cs.h"
 
-#define OVERFLOW_FLAG_REG REG_A6XX_CP_SCRATCH_REG(0)
-
 void
 tu_bo_list_init(struct tu_bo_list *list)
 {
@@ -284,75 +282,130 @@ tu_tiling_config_get_tile(const struct tu_tiling_config *tiling,
          : tile->begin.y + tiling->tile0.extent.height;
 }
 
-enum a3xx_msaa_samples
-tu_msaa_samples(uint32_t samples)
-{
-   switch (samples) {
-   case 1:
-      return MSAA_ONE;
-   case 2:
-      return MSAA_TWO;
-   case 4:
-      return MSAA_FOUR;
-   case 8:
-      return MSAA_EIGHT;
-   default:
-      assert(!"invalid sample count");
-      return MSAA_ONE;
-   }
-}
-
-static enum a4xx_index_size
-tu6_index_size(VkIndexType type)
-{
-   switch (type) {
-   case VK_INDEX_TYPE_UINT16:
-      return INDEX4_SIZE_16_BIT;
-   case VK_INDEX_TYPE_UINT32:
-      return INDEX4_SIZE_32_BIT;
-   default:
-      unreachable("invalid VkIndexType");
-      return INDEX4_SIZE_8_BIT;
-   }
-}
-
-unsigned
+void
 tu6_emit_event_write(struct tu_cmd_buffer *cmd,
                      struct tu_cs *cs,
-                     enum vgt_event_type event,
-                     bool need_seqno)
-{
-   unsigned seqno = 0;
+                     enum vgt_event_type event)
+{
+   bool need_seqno = false;
+   switch (event) {
+   case CACHE_FLUSH_TS:
+   case WT_DONE_TS:
+   case RB_DONE_TS:
+   case PC_CCU_FLUSH_DEPTH_TS:
+   case PC_CCU_FLUSH_COLOR_TS:
+   case PC_CCU_RESOLVE_TS:
+      need_seqno = true;
+      break;
+   default:
+      break;
+   }
 
    tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, need_seqno ? 4 : 1);
    tu_cs_emit(cs, CP_EVENT_WRITE_0_EVENT(event));
    if (need_seqno) {
       tu_cs_emit_qw(cs, cmd->scratch_bo.iova);
-      seqno = ++cmd->scratch_seqno;
-      tu_cs_emit(cs, seqno);
+      tu_cs_emit(cs, 0);
    }
+}
 
-   return seqno;
+static void
+tu6_emit_flushes(struct tu_cmd_buffer *cmd_buffer,
+                 struct tu_cs *cs,
+                 enum tu_cmd_flush_bits flushes)
+{
+   /* Experiments show that invalidating CCU while it still has data in it
+    * doesn't work, so make sure to always flush before invalidating in case
+    * any data remains that hasn't yet been made available through a barrier.
+    * However it does seem to work for UCHE.
+    */
+   if (flushes & (TU_CMD_FLAG_CCU_FLUSH_COLOR |
+                  TU_CMD_FLAG_CCU_INVALIDATE_COLOR))
+      tu6_emit_event_write(cmd_buffer, cs, PC_CCU_FLUSH_COLOR_TS);
+   if (flushes & (TU_CMD_FLAG_CCU_FLUSH_DEPTH |
+                  TU_CMD_FLAG_CCU_INVALIDATE_DEPTH))
+      tu6_emit_event_write(cmd_buffer, cs, PC_CCU_FLUSH_DEPTH_TS);
+   if (flushes & TU_CMD_FLAG_CCU_INVALIDATE_COLOR)
+      tu6_emit_event_write(cmd_buffer, cs, PC_CCU_INVALIDATE_COLOR);
+   if (flushes & TU_CMD_FLAG_CCU_INVALIDATE_DEPTH)
+      tu6_emit_event_write(cmd_buffer, cs, PC_CCU_INVALIDATE_DEPTH);
+   if (flushes & TU_CMD_FLAG_CACHE_FLUSH)
+      tu6_emit_event_write(cmd_buffer, cs, CACHE_FLUSH_TS);
+   if (flushes & TU_CMD_FLAG_CACHE_INVALIDATE)
+      tu6_emit_event_write(cmd_buffer, cs, CACHE_INVALIDATE);
+   if (flushes & TU_CMD_FLAG_WFI)
+      tu_cs_emit_wfi(cs);
 }
 
+/* "Normal" cache flushes, that don't require any special handling */
+
 static void
-tu6_emit_cache_flush(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
+tu_emit_cache_flush(struct tu_cmd_buffer *cmd_buffer,
+                    struct tu_cs *cs)
 {
-   tu6_emit_event_write(cmd, cs, 0x31, false);
+   tu6_emit_flushes(cmd_buffer, cs, cmd_buffer->state.cache.flush_bits);
+   cmd_buffer->state.cache.flush_bits = 0;
 }
 
-static void
-tu6_emit_lrz_flush(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
+/* Renderpass cache flushes */
+
+void
+tu_emit_cache_flush_renderpass(struct tu_cmd_buffer *cmd_buffer,
+                               struct tu_cs *cs)
 {
-   tu6_emit_event_write(cmd, cs, LRZ_FLUSH, false);
+   tu6_emit_flushes(cmd_buffer, cs, cmd_buffer->state.renderpass_cache.flush_bits);
+   cmd_buffer->state.renderpass_cache.flush_bits = 0;
 }
 
-static void
-tu6_emit_wfi(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
+/* Cache flushes for things that use the color/depth read/write path (i.e.
+ * blits and draws). This deals with changing CCU state as well as the usual
+ * cache flushing.
+ */
+
+void
+tu_emit_cache_flush_ccu(struct tu_cmd_buffer *cmd_buffer,
+                        struct tu_cs *cs,
+                        enum tu_cmd_ccu_state ccu_state)
 {
-   if (cmd->wait_for_idle) {
-      tu_cs_emit_wfi(cs);
-      cmd->wait_for_idle = false;
+   enum tu_cmd_flush_bits flushes = cmd_buffer->state.cache.flush_bits;
+
+   assert(ccu_state != TU_CMD_CCU_UNKNOWN);
+
+   /* Changing CCU state must involve invalidating the CCU. In sysmem mode,
+    * the CCU may also contain data that we haven't flushed out yet, so we
+    * also need to flush. Also, in order to program RB_CCU_CNTL, we need to
+    * emit a WFI as it isn't pipelined.
+    */
+   if (ccu_state != cmd_buffer->state.ccu_state) {
+      if (cmd_buffer->state.ccu_state != TU_CMD_CCU_GMEM) {
+         flushes |=
+            TU_CMD_FLAG_CCU_FLUSH_COLOR |
+            TU_CMD_FLAG_CCU_FLUSH_DEPTH;
+         cmd_buffer->state.cache.pending_flush_bits &= ~(
+            TU_CMD_FLAG_CCU_FLUSH_COLOR |
+            TU_CMD_FLAG_CCU_FLUSH_DEPTH);
+      }
+      flushes |=
+         TU_CMD_FLAG_CCU_INVALIDATE_COLOR |
+         TU_CMD_FLAG_CCU_INVALIDATE_DEPTH |
+         TU_CMD_FLAG_WFI;
+      cmd_buffer->state.cache.pending_flush_bits &= ~(
+         TU_CMD_FLAG_CCU_INVALIDATE_COLOR |
+         TU_CMD_FLAG_CCU_INVALIDATE_DEPTH);
+   }
+
+   tu6_emit_flushes(cmd_buffer, cs, flushes);
+   cmd_buffer->state.cache.flush_bits = 0;
+
+   if (ccu_state != cmd_buffer->state.ccu_state) {
+      struct tu_physical_device *phys_dev = cmd_buffer->device->physical_device;
+      tu_cs_emit_regs(cs,
+                      A6XX_RB_CCU_CNTL(.offset =
+                                          ccu_state == TU_CMD_CCU_GMEM ?
+                                          phys_dev->ccu_offset_gmem :
+                                          phys_dev->ccu_offset_bypass,
+                                       .gmem = ccu_state == TU_CMD_CCU_GMEM));
+      cmd_buffer->state.ccu_state = ccu_state;
    }
 }
 
@@ -448,11 +501,6 @@ tu6_emit_mrt(struct tu_cmd_buffer *cmd,
    tu_cs_emit_regs(cs,
                    A6XX_SP_SRGB_CNTL(.dword = subpass->srgb_cntl));
 
-   tu_cs_emit_regs(cs,
-                   A6XX_RB_RENDER_COMPONENTS(.dword = subpass->render_components));
-   tu_cs_emit_regs(cs,
-                   A6XX_SP_FS_RENDER_COMPONENTS(.dword = subpass->render_components));
-
    tu_cs_emit_regs(cs, A6XX_GRAS_MAX_LAYER_INDEX(fb->layers - 1));
 }
 
@@ -604,6 +652,57 @@ tu6_emit_window_offset(struct tu_cs *cs, uint32_t x1, uint32_t y1)
                    A6XX_SP_TP_WINDOW_OFFSET(.x = x1, .y = y1));
 }
 
+static void
+tu_cs_emit_draw_state(struct tu_cs *cs, uint32_t id, struct tu_draw_state state)
+{
+   uint32_t enable_mask;
+   switch (id) {
+   case TU_DRAW_STATE_PROGRAM:
+   case TU_DRAW_STATE_VI:
+   case TU_DRAW_STATE_FS_CONST:
+   /* The blob seems to not enable this (DESC_SETS_LOAD) for binning, even
+    * when resources would actually be used in the binning shader.
+    * Presumably the overhead of prefetching the resources isn't
+    * worth it.
+    */
+   case TU_DRAW_STATE_DESC_SETS_LOAD:
+      enable_mask = CP_SET_DRAW_STATE__0_GMEM |
+                    CP_SET_DRAW_STATE__0_SYSMEM;
+      break;
+   case TU_DRAW_STATE_PROGRAM_BINNING:
+   case TU_DRAW_STATE_VI_BINNING:
+      enable_mask = CP_SET_DRAW_STATE__0_BINNING;
+      break;
+   case TU_DRAW_STATE_INPUT_ATTACHMENTS_GMEM:
+      enable_mask = CP_SET_DRAW_STATE__0_GMEM;
+      break;
+   case TU_DRAW_STATE_INPUT_ATTACHMENTS_SYSMEM:
+      enable_mask = CP_SET_DRAW_STATE__0_SYSMEM;
+      break;
+   default:
+      enable_mask = CP_SET_DRAW_STATE__0_GMEM |
+                    CP_SET_DRAW_STATE__0_SYSMEM |
+                    CP_SET_DRAW_STATE__0_BINNING;
+      break;
+   }
+
+   tu_cs_emit(cs, CP_SET_DRAW_STATE__0_COUNT(state.size) |
+                  enable_mask |
+                  CP_SET_DRAW_STATE__0_GROUP_ID(id) |
+                  COND(!state.size, CP_SET_DRAW_STATE__0_DISABLE));
+   tu_cs_emit_qw(cs, state.iova);
+}
+
+/* note: get rid of this eventually */
+static void
+tu_cs_emit_sds_ib(struct tu_cs *cs, uint32_t id, struct tu_cs_entry entry)
+{
+   tu_cs_emit_draw_state(cs, id, (struct tu_draw_state) {
+      .iova = entry.size ? entry.bo->iova + entry.offset : 0,
+      .size = entry.size / 4,
+   });
+}
+
 static bool
 use_hw_binning(struct tu_cmd_buffer *cmd)
 {
@@ -661,33 +760,15 @@ tu6_emit_tile_select(struct tu_cmd_buffer *cmd,
       tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
       tu_cs_emit(cs, 0x0);
 
-      tu_cs_emit_pkt7(cs, CP_REG_TEST, 1);
-      tu_cs_emit(cs, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
-                     A6XX_CP_REG_TEST_0_BIT(0) |
-                     A6XX_CP_REG_TEST_0_WAIT_FOR_ME);
+      tu_cs_emit_pkt7(cs, CP_SET_BIN_DATA5, 7);
+      tu_cs_emit(cs, cmd->state.tiling_config.pipe_sizes[tile->pipe] |
+                     CP_SET_BIN_DATA5_0_VSC_N(tile->slot));
+      tu_cs_emit_qw(cs, cmd->vsc_draw_strm.iova + tile->pipe * cmd->vsc_draw_strm_pitch);
+      tu_cs_emit_qw(cs, cmd->vsc_draw_strm.iova + (tile->pipe * 4) + (32 * cmd->vsc_draw_strm_pitch));
+      tu_cs_emit_qw(cs, cmd->vsc_prim_strm.iova + (tile->pipe * cmd->vsc_prim_strm_pitch));
 
-      tu_cs_reserve(cs, 3 + 11);
-      tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
-      tu_cs_emit(cs, CP_COND_REG_EXEC_0_MODE(PRED_TEST));
-      tu_cs_emit(cs, CP_COND_REG_EXEC_1_DWORDS(11));
-
-      /* if (no overflow) */ {
-         tu_cs_emit_pkt7(cs, CP_SET_BIN_DATA5, 7);
-         tu_cs_emit(cs, cmd->state.tiling_config.pipe_sizes[tile->pipe] |
-                        CP_SET_BIN_DATA5_0_VSC_N(tile->slot));
-         tu_cs_emit_qw(cs, cmd->vsc_data.iova + tile->pipe * cmd->vsc_data_pitch);
-         tu_cs_emit_qw(cs, cmd->vsc_data.iova + (tile->pipe * 4) + (32 * cmd->vsc_data_pitch));
-         tu_cs_emit_qw(cs, cmd->vsc_data2.iova + (tile->pipe * cmd->vsc_data2_pitch));
-
-         tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
-         tu_cs_emit(cs, 0x0);
-
-         /* use a NOP packet to skip over the 'else' side: */
-         tu_cs_emit_pkt7(cs, CP_NOP, 2);
-      } /* else */ {
-         tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
-         tu_cs_emit(cs, 0x1);
-      }
+      tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
+      tu_cs_emit(cs, 0x0);
 
       tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
       tu_cs_emit(cs, 0x0);
@@ -713,6 +794,49 @@ tu6_emit_sysmem_resolve(struct tu_cmd_buffer *cmd,
    tu_resolve_sysmem(cmd, cs, src, dst, fb->layers, &cmd->state.tiling_config.render_area);
 }
 
+static void
+tu6_emit_sysmem_resolves(struct tu_cmd_buffer *cmd,
+                         struct tu_cs *cs,
+                         const struct tu_subpass *subpass)
+{
+   if (subpass->resolve_attachments) {
+      /* From the documentation for vkCmdNextSubpass, section 7.4 "Render Pass
+       * Commands":
+       *
+       *    End-of-subpass multisample resolves are treated as color
+       *    attachment writes for the purposes of synchronization. That is,
+       *    they are considered to execute in the
+       *    VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT pipeline stage and
+       *    their writes are synchronized with
+       *    VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT. Synchronization between
+       *    rendering within a subpass and any resolve operations at the end
+       *    of the subpass occurs automatically, without need for explicit
+       *    dependencies or pipeline barriers. However, if the resolve
+       *    attachment is also used in a different subpass, an explicit
+       *    dependency is needed.
+       *
+       * We use the CP_BLIT path for sysmem resolves, which is really a
+       * transfer command, so we have to manually flush similar to the gmem
+       * resolve case. However, a flush afterwards isn't needed because of the
+       * last sentence and the fact that we're in sysmem mode.
+       */
+      tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS);
+      tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE);
+
+      /* Wait for the flushes to land before using the 2D engine */
+      tu_cs_emit_wfi(cs);
+
+      for (unsigned i = 0; i < subpass->color_count; i++) {
+         uint32_t a = subpass->resolve_attachments[i].attachment;
+         if (a == VK_ATTACHMENT_UNUSED)
+            continue;
+
+         tu6_emit_sysmem_resolve(cmd, cs, a,
+                                 subpass->color_attachments[i].attachment);
+      }
+   }
+}
+
 static void
 tu6_emit_tile_store(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
 {
@@ -761,12 +885,13 @@ tu6_init_hw(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
 {
    const struct tu_physical_device *phys_dev = cmd->device->physical_device;
 
-   tu6_emit_cache_flush(cmd, cs);
+   tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE);
 
    tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UPDATE_CNTL, 0xfffff);
 
    tu_cs_emit_regs(cs,
                    A6XX_RB_CCU_CNTL(.offset = phys_dev->ccu_offset_bypass));
+   cmd->state.ccu_state = TU_CMD_CCU_SYSMEM;
    tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8E04, 0x00100000);
    tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE04, 0x8);
    tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE00, 0);
@@ -888,29 +1013,6 @@ tu6_init_hw(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
    tu_cs_sanity_check(cs);
 }
 
-static void
-tu6_cache_flush(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
-{
-   unsigned seqno;
-
-   seqno = tu6_emit_event_write(cmd, cs, RB_DONE_TS, true);
-
-   tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
-   tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
-                  CP_WAIT_REG_MEM_0_POLL_MEMORY);
-   tu_cs_emit_qw(cs, cmd->scratch_bo.iova);
-   tu_cs_emit(cs, CP_WAIT_REG_MEM_3_REF(seqno));
-   tu_cs_emit(cs, CP_WAIT_REG_MEM_4_MASK(~0));
-   tu_cs_emit(cs, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
-
-   seqno = tu6_emit_event_write(cmd, cs, CACHE_FLUSH_TS, true);
-
-   tu_cs_emit_pkt7(cs, CP_WAIT_MEM_GTE, 4);
-   tu_cs_emit(cs, CP_WAIT_MEM_GTE_0_RESERVED(0));
-   tu_cs_emit_qw(cs, cmd->scratch_bo.iova);
-   tu_cs_emit(cs, CP_WAIT_MEM_GTE_3_REF(seqno));
-}
-
 static void
 update_vsc_pipe(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
 {
@@ -919,8 +1021,8 @@ update_vsc_pipe(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
    tu_cs_emit_regs(cs,
                    A6XX_VSC_BIN_SIZE(.width = tiling->tile0.extent.width,
                                      .height = tiling->tile0.extent.height),
-                   A6XX_VSC_SIZE_ADDRESS(.bo = &cmd->vsc_data,
-                                         .bo_offset = 32 * cmd->vsc_data_pitch));
+                   A6XX_VSC_DRAW_STRM_SIZE_ADDRESS(.bo = &cmd->vsc_draw_strm,
+                                                   .bo_offset = 32 * cmd->vsc_draw_strm_pitch));
 
    tu_cs_emit_regs(cs,
                    A6XX_VSC_BIN_COUNT(.nx = tiling->tile_count.width,
@@ -931,14 +1033,14 @@ update_vsc_pipe(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
       tu_cs_emit(cs, tiling->pipe_config[i]);
 
    tu_cs_emit_regs(cs,
-                   A6XX_VSC_PIPE_DATA2_ADDRESS(.bo = &cmd->vsc_data2),
-                   A6XX_VSC_PIPE_DATA2_PITCH(cmd->vsc_data2_pitch),
-                   A6XX_VSC_PIPE_DATA2_ARRAY_PITCH(cmd->vsc_data2.size));
+                   A6XX_VSC_PRIM_STRM_ADDRESS(.bo = &cmd->vsc_prim_strm),
+                   A6XX_VSC_PRIM_STRM_PITCH(cmd->vsc_prim_strm_pitch),
+                   A6XX_VSC_PRIM_STRM_LIMIT(cmd->vsc_prim_strm_pitch - 64));
 
    tu_cs_emit_regs(cs,
-                   A6XX_VSC_PIPE_DATA_ADDRESS(.bo = &cmd->vsc_data),
-                   A6XX_VSC_PIPE_DATA_PITCH(cmd->vsc_data_pitch),
-                   A6XX_VSC_PIPE_DATA_ARRAY_PITCH(cmd->vsc_data.size));
+                   A6XX_VSC_DRAW_STRM_ADDRESS(.bo = &cmd->vsc_draw_strm),
+                   A6XX_VSC_DRAW_STRM_PITCH(cmd->vsc_draw_strm_pitch),
+                   A6XX_VSC_DRAW_STRM_LIMIT(cmd->vsc_draw_strm_pitch - 64));
 }
 
 static void
@@ -958,73 +1060,25 @@ emit_vsc_overflow_test(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
       tu_cs_emit_pkt7(cs, CP_COND_WRITE5, 8);
       tu_cs_emit(cs, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
             CP_COND_WRITE5_0_WRITE_MEMORY);
-      tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE_REG(i)));
+      tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_DRAW_STRM_SIZE_REG(i)));
       tu_cs_emit(cs, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
-      tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_data_pitch));
+      tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_draw_strm_pitch - 64));
       tu_cs_emit(cs, CP_COND_WRITE5_4_MASK(~0));
       tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(vsc_scratch));
-      tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(1 + cmd->vsc_data_pitch));
+      tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(1 + cmd->vsc_draw_strm_pitch));
 
       tu_cs_emit_pkt7(cs, CP_COND_WRITE5, 8);
       tu_cs_emit(cs, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
             CP_COND_WRITE5_0_WRITE_MEMORY);
-      tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE2_REG(i)));
+      tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_PRIM_STRM_SIZE_REG(i)));
       tu_cs_emit(cs, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
-      tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_data2_pitch));
+      tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_prim_strm_pitch - 64));
       tu_cs_emit(cs, CP_COND_WRITE5_4_MASK(~0));
       tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(vsc_scratch));
-      tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(3 + cmd->vsc_data2_pitch));
+      tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(3 + cmd->vsc_prim_strm_pitch));
    }
 
    tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
-
-   tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
-
-   tu_cs_emit_pkt7(cs, CP_MEM_TO_REG, 3);
-   tu_cs_emit(cs, CP_MEM_TO_REG_0_REG(OVERFLOW_FLAG_REG) |
-         CP_MEM_TO_REG_0_CNT(1 - 1));
-   tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(vsc_scratch));
-
-   /*
-    * This is a bit awkward, we really want a way to invert the
-    * CP_REG_TEST/CP_COND_REG_EXEC logic, so that we can conditionally
-    * execute cmds to use hwbinning when a bit is *not* set.  This
-    * dance is to invert OVERFLOW_FLAG_REG
-    *
-    * A CP_NOP packet is used to skip executing the 'else' clause
-    * if (b0 set)..
-    */
-
-   /* b0 will be set if VSC_DATA or VSC_DATA2 overflow: */
-   tu_cs_emit_pkt7(cs, CP_REG_TEST, 1);
-   tu_cs_emit(cs, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
-         A6XX_CP_REG_TEST_0_BIT(0) |
-         A6XX_CP_REG_TEST_0_WAIT_FOR_ME);
-
-   tu_cs_reserve(cs, 3 + 7);
-   tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
-   tu_cs_emit(cs, CP_COND_REG_EXEC_0_MODE(PRED_TEST));
-   tu_cs_emit(cs, CP_COND_REG_EXEC_1_DWORDS(7));
-
-   /* if (b0 set) */ {
-      /*
-       * On overflow, mirror the value to control->vsc_overflow
-       * which CPU is checking to detect overflow (see
-       * check_vsc_overflow())
-       */
-      tu_cs_emit_pkt7(cs, CP_REG_TO_MEM, 3);
-      tu_cs_emit(cs, CP_REG_TO_MEM_0_REG(OVERFLOW_FLAG_REG) |
-            CP_REG_TO_MEM_0_CNT(0));
-      tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(vsc_overflow));
-
-      tu_cs_emit_pkt4(cs, OVERFLOW_FLAG_REG, 1);
-      tu_cs_emit(cs, 0x0);
-
-      tu_cs_emit_pkt7(cs, CP_NOP, 2);  /* skip 'else' when 'if' is taken */
-   } /* else */ {
-      tu_cs_emit_pkt4(cs, OVERFLOW_FLAG_REG, 1);
-      tu_cs_emit(cs, 0x1);
-   }
 }
 
 static void
@@ -1084,8 +1138,15 @@ tu6_emit_binning_pass(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
    tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, 1);
    tu_cs_emit(cs, UNK_2D);
 
-   tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
-   tu6_cache_flush(cmd, cs);
+   /* This flush is probably required because the VSC, which produces the
+    * visibility stream, is a client of UCHE, whereas the CP needs to read the
+    * visibility stream (without caching) to do draw skipping. The
+    * WFI+WAIT_FOR_ME combination guarantees that the binning commands
+    * submitted are finished before reading the VSC regs (in
+    * emit_vsc_overflow_test) or the VSC_DATA buffer directly (implicitly as
+    * part of draws).
+    */
+   tu6_emit_event_write(cmd, cs, CACHE_FLUSH_TS);
 
    tu_cs_emit_wfi(cs);
 
@@ -1098,13 +1159,110 @@ tu6_emit_binning_pass(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
 
    tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
    tu_cs_emit(cs, 0x0);
+}
+
+static void
+tu_emit_input_attachments(struct tu_cmd_buffer *cmd,
+                          const struct tu_subpass *subpass,
+                          struct tu_cs_entry *ib,
+                          bool gmem)
+{
+   /* note: we can probably emit input attachments just once for the whole
+    * renderpass, this would avoid emitting both sysmem/gmem versions
+    *
+    * emit two texture descriptors for each input, as a workaround for
+    * d24s8, which can be sampled as both float (depth) and integer (stencil)
+    * tu_shader lowers uint input attachment loads to use the 2nd descriptor
+    * in the pair
+    * TODO: a smarter workaround
+    */
+
+   if (!subpass->input_count)
+      return;
+
+   struct ts_cs_memory texture;
+   VkResult result = tu_cs_alloc(&cmd->sub_cs, subpass->input_count * 2,
+                                 A6XX_TEX_CONST_DWORDS, &texture);
+   assert(result == VK_SUCCESS);
+
+   for (unsigned i = 0; i < subpass->input_count * 2; i++) {
+      uint32_t a = subpass->input_attachments[i / 2].attachment;
+      if (a == VK_ATTACHMENT_UNUSED)
+         continue;
+
+      struct tu_image_view *iview =
+         cmd->state.framebuffer->attachments[a].attachment;
+      const struct tu_render_pass_attachment *att =
+         &cmd->state.pass->attachments[a];
+      uint32_t *dst = &texture.map[A6XX_TEX_CONST_DWORDS * i];
+
+      memcpy(dst, iview->descriptor, A6XX_TEX_CONST_DWORDS * 4);
+
+      if (i % 2 == 1 && att->format == VK_FORMAT_D24_UNORM_S8_UINT) {
+         /* note this works because spec says fb and input attachments
+          * must use identity swizzle
+          */
+         dst[0] &= ~(A6XX_TEX_CONST_0_FMT__MASK |
+            A6XX_TEX_CONST_0_SWIZ_X__MASK | A6XX_TEX_CONST_0_SWIZ_Y__MASK |
+            A6XX_TEX_CONST_0_SWIZ_Z__MASK | A6XX_TEX_CONST_0_SWIZ_W__MASK);
+         dst[0] |= A6XX_TEX_CONST_0_FMT(FMT6_S8Z24_UINT) |
+            A6XX_TEX_CONST_0_SWIZ_X(A6XX_TEX_Y) |
+            A6XX_TEX_CONST_0_SWIZ_Y(A6XX_TEX_ZERO) |
+            A6XX_TEX_CONST_0_SWIZ_Z(A6XX_TEX_ZERO) |
+            A6XX_TEX_CONST_0_SWIZ_W(A6XX_TEX_ONE);
+      }
+
+      if (!gmem)
+         continue;
+
+      /* patched for gmem */
+      dst[0] &= ~(A6XX_TEX_CONST_0_SWAP__MASK | A6XX_TEX_CONST_0_TILE_MODE__MASK);
+      dst[0] |= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2);
+      dst[2] =
+         A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D) |
+         A6XX_TEX_CONST_2_PITCH(cmd->state.tiling_config.tile0.extent.width * att->cpp);
+      dst[3] = 0;
+      dst[4] = cmd->device->physical_device->gmem_base + att->gmem_offset;
+      dst[5] = A6XX_TEX_CONST_5_DEPTH(1);
+      for (unsigned i = 6; i < A6XX_TEX_CONST_DWORDS; i++)
+         dst[i] = 0;
+   }
+
+   struct tu_cs cs;
+   tu_cs_begin_sub_stream(&cmd->sub_cs, 9, &cs);
+
+   tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6_FRAG, 3);
+   tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
+                  CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
+                  CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
+                  CP_LOAD_STATE6_0_STATE_BLOCK(SB6_FS_TEX) |
+                  CP_LOAD_STATE6_0_NUM_UNIT(subpass->input_count * 2));
+   tu_cs_emit_qw(&cs, texture.iova);
+
+   tu_cs_emit_pkt4(&cs, REG_A6XX_SP_FS_TEX_CONST_LO, 2);
+   tu_cs_emit_qw(&cs, texture.iova);
+
+   tu_cs_emit_regs(&cs, A6XX_SP_FS_TEX_COUNT(subpass->input_count * 2));
+
+   *ib = tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
+}
 
-   cmd->wait_for_idle = false;
+static void
+tu_set_input_attachments(struct tu_cmd_buffer *cmd, const struct tu_subpass *subpass)
+{
+   struct tu_cs *cs = &cmd->draw_cs;
+
+   tu_emit_input_attachments(cmd, subpass, &cmd->state.ia_gmem_ib, true);
+   tu_emit_input_attachments(cmd, subpass, &cmd->state.ia_sysmem_ib, false);
+
+   tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 6);
+   tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_INPUT_ATTACHMENTS_GMEM, cmd->state.ia_gmem_ib);
+   tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_INPUT_ATTACHMENTS_SYSMEM, cmd->state.ia_sysmem_ib);
 }
 
 static void
-tu_emit_load_clear(struct tu_cmd_buffer *cmd,
-                   const VkRenderPassBeginInfo *info)
+tu_emit_renderpass_begin(struct tu_cmd_buffer *cmd,
+                         const VkRenderPassBeginInfo *info)
 {
    struct tu_cs *cs = &cmd->draw_cs;
 
@@ -1134,7 +1292,6 @@ static void
 tu6_sysmem_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs,
                         const struct VkRect2D *renderArea)
 {
-   const struct tu_physical_device *phys_dev = cmd->device->physical_device;
    const struct tu_framebuffer *fb = cmd->state.framebuffer;
 
    assert(fb->width > 0 && fb->height > 0);
@@ -1143,7 +1300,7 @@ tu6_sysmem_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs,
 
    tu6_emit_bin_size(cs, 0, 0, 0xc00000); /* 0xc00000 = BYPASS? */
 
-   tu6_emit_lrz_flush(cmd, cs);
+   tu6_emit_event_write(cmd, cs, LRZ_FLUSH);
 
    tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
    tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS));
@@ -1151,13 +1308,7 @@ tu6_sysmem_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs,
    tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
    tu_cs_emit(cs, 0x0);
 
-   tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_COLOR, false);
-   tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_DEPTH, false);
-   tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
-
-   tu6_emit_wfi(cmd, cs);
-   tu_cs_emit_regs(cs,
-                   A6XX_RB_CCU_CNTL(.offset = phys_dev->ccu_offset_bypass));
+   tu_emit_cache_flush_ccu(cmd, cs, TU_CMD_CCU_SYSMEM);
 
    /* enable stream-out, with sysmem there is only one pass: */
    tu_cs_emit_regs(cs,
@@ -1178,53 +1329,31 @@ tu6_sysmem_render_end(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
    /* Do any resolves of the last subpass. These are handled in the
     * tile_store_ib in the gmem path.
     */
-   const struct tu_subpass *subpass = cmd->state.subpass;
-   if (subpass->resolve_attachments) {
-      for (unsigned i = 0; i < subpass->color_count; i++) {
-         uint32_t a = subpass->resolve_attachments[i].attachment;
-         if (a != VK_ATTACHMENT_UNUSED)
-            tu6_emit_sysmem_resolve(cmd, cs, a,
-                                    subpass->color_attachments[i].attachment);
-      }
-   }
+   tu6_emit_sysmem_resolves(cmd, cs, cmd->state.subpass);
 
    tu_cs_emit_call(cs, &cmd->draw_epilogue_cs);
 
    tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
    tu_cs_emit(cs, 0x0);
 
-   tu6_emit_lrz_flush(cmd, cs);
-
-   tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true);
-   tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true);
+   tu6_emit_event_write(cmd, cs, LRZ_FLUSH);
 
    tu_cs_sanity_check(cs);
 }
 
-
 static void
 tu6_tile_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
 {
    struct tu_physical_device *phys_dev = cmd->device->physical_device;
 
-   tu6_emit_lrz_flush(cmd, cs);
+   tu6_emit_event_write(cmd, cs, LRZ_FLUSH);
 
    /* lrz clear? */
 
-   tu6_emit_cache_flush(cmd, cs);
-
    tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
    tu_cs_emit(cs, 0x0);
 
-   /* TODO: flushing with barriers instead of blindly always flushing */
-   tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true);
-   tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true);
-   tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_COLOR, false);
-   tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_DEPTH, false);
-
-   tu_cs_emit_wfi(cs);
-   tu_cs_emit_regs(cs,
-                   A6XX_RB_CCU_CNTL(.offset = phys_dev->ccu_offset_gmem, .gmem = 1));
+   tu_emit_cache_flush_ccu(cmd, cs, TU_CMD_CCU_GMEM);
 
    const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
    if (use_hw_binning(cmd)) {
@@ -1278,23 +1407,10 @@ tu6_render_tile(struct tu_cmd_buffer *cmd,
    tu6_emit_tile_select(cmd, cs, tile);
 
    tu_cs_emit_call(cs, &cmd->draw_cs);
-   cmd->wait_for_idle = true;
 
    if (use_hw_binning(cmd)) {
-      tu_cs_emit_pkt7(cs, CP_REG_TEST, 1);
-      tu_cs_emit(cs, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
-                     A6XX_CP_REG_TEST_0_BIT(0) |
-                     A6XX_CP_REG_TEST_0_WAIT_FOR_ME);
-
-      tu_cs_reserve(cs, 3 + 2);
-      tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
-      tu_cs_emit(cs, CP_COND_REG_EXEC_0_MODE(PRED_TEST));
-      tu_cs_emit(cs, CP_COND_REG_EXEC_1_DWORDS(2));
-
-      /* if (no overflow) */ {
-         tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
-         tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_ENDVIS));
-      }
+      tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
+      tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_ENDVIS));
    }
 
    tu_cs_emit_ib(cs, &cmd->state.tile_store_ib);
@@ -1310,9 +1426,9 @@ tu6_tile_render_end(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
    tu_cs_emit_regs(cs,
                    A6XX_GRAS_LRZ_CNTL(0));
 
-   tu6_emit_lrz_flush(cmd, cs);
+   tu6_emit_event_write(cmd, cs, LRZ_FLUSH);
 
-   tu6_emit_event_write(cmd, cs, PC_CCU_RESOLVE_TS, true);
+   tu6_emit_event_write(cmd, cs, PC_CCU_RESOLVE_TS);
 
    tu_cs_sanity_check(cs);
 }
@@ -1343,7 +1459,6 @@ tu_cmd_render_sysmem(struct tu_cmd_buffer *cmd)
    tu6_sysmem_render_begin(cmd, &cmd->cs, &tiling->render_area);
 
    tu_cs_emit_call(&cmd->cs, &cmd->draw_cs);
-   cmd->wait_for_idle = true;
 
    tu6_sysmem_render_end(cmd, &cmd->cs);
 }
@@ -1382,147 +1497,6 @@ tu_cmd_update_tiling_config(struct tu_cmd_buffer *cmd,
    tu_tiling_config_update_pipes(tiling, dev);
 }
 
-const struct tu_dynamic_state default_dynamic_state = {
-   .viewport =
-     {
-       .count = 0,
-     },
-   .scissor =
-     {
-       .count = 0,
-     },
-   .line_width = 1.0f,
-   .depth_bias =
-     {
-       .bias = 0.0f,
-       .clamp = 0.0f,
-       .slope = 0.0f,
-     },
-   .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
-   .depth_bounds =
-     {
-       .min = 0.0f,
-       .max = 1.0f,
-     },
-   .stencil_compare_mask =
-     {
-       .front = ~0u,
-       .back = ~0u,
-     },
-   .stencil_write_mask =
-     {
-       .front = ~0u,
-       .back = ~0u,
-     },
-   .stencil_reference =
-     {
-       .front = 0u,
-       .back = 0u,
-     },
-};
-
-static void UNUSED /* FINISHME */
-tu_bind_dynamic_state(struct tu_cmd_buffer *cmd_buffer,
-                      const struct tu_dynamic_state *src)
-{
-   struct tu_dynamic_state *dest = &cmd_buffer->state.dynamic;
-   uint32_t copy_mask = src->mask;
-   uint32_t dest_mask = 0;
-
-   tu_use_args(cmd_buffer); /* FINISHME */
-
-   /* Make sure to copy the number of viewports/scissors because they can
-    * only be specified at pipeline creation time.
-    */
-   dest->viewport.count = src->viewport.count;
-   dest->scissor.count = src->scissor.count;
-   dest->discard_rectangle.count = src->discard_rectangle.count;
-
-   if (copy_mask & TU_DYNAMIC_VIEWPORT) {
-      if (memcmp(&dest->viewport.viewports, &src->viewport.viewports,
-                 src->viewport.count * sizeof(VkViewport))) {
-         typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
-                      src->viewport.count);
-         dest_mask |= TU_DYNAMIC_VIEWPORT;
-      }
-   }
-
-   if (copy_mask & TU_DYNAMIC_SCISSOR) {
-      if (memcmp(&dest->scissor.scissors, &src->scissor.scissors,
-                 src->scissor.count * sizeof(VkRect2D))) {
-         typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
-                      src->scissor.count);
-         dest_mask |= TU_DYNAMIC_SCISSOR;
-      }
-   }
-
-   if (copy_mask & TU_DYNAMIC_LINE_WIDTH) {
-      if (dest->line_width != src->line_width) {
-         dest->line_width = src->line_width;
-         dest_mask |= TU_DYNAMIC_LINE_WIDTH;
-      }
-   }
-
-   if (copy_mask & TU_DYNAMIC_DEPTH_BIAS) {
-      if (memcmp(&dest->depth_bias, &src->depth_bias,
-                 sizeof(src->depth_bias))) {
-         dest->depth_bias = src->depth_bias;
-         dest_mask |= TU_DYNAMIC_DEPTH_BIAS;
-      }
-   }
-
-   if (copy_mask & TU_DYNAMIC_BLEND_CONSTANTS) {
-      if (memcmp(&dest->blend_constants, &src->blend_constants,
-                 sizeof(src->blend_constants))) {
-         typed_memcpy(dest->blend_constants, src->blend_constants, 4);
-         dest_mask |= TU_DYNAMIC_BLEND_CONSTANTS;
-      }
-   }
-
-   if (copy_mask & TU_DYNAMIC_DEPTH_BOUNDS) {
-      if (memcmp(&dest->depth_bounds, &src->depth_bounds,
-                 sizeof(src->depth_bounds))) {
-         dest->depth_bounds = src->depth_bounds;
-         dest_mask |= TU_DYNAMIC_DEPTH_BOUNDS;
-      }
-   }
-
-   if (copy_mask & TU_DYNAMIC_STENCIL_COMPARE_MASK) {
-      if (memcmp(&dest->stencil_compare_mask, &src->stencil_compare_mask,
-                 sizeof(src->stencil_compare_mask))) {
-         dest->stencil_compare_mask = src->stencil_compare_mask;
-         dest_mask |= TU_DYNAMIC_STENCIL_COMPARE_MASK;
-      }
-   }
-
-   if (copy_mask & TU_DYNAMIC_STENCIL_WRITE_MASK) {
-      if (memcmp(&dest->stencil_write_mask, &src->stencil_write_mask,
-                 sizeof(src->stencil_write_mask))) {
-         dest->stencil_write_mask = src->stencil_write_mask;
-         dest_mask |= TU_DYNAMIC_STENCIL_WRITE_MASK;
-      }
-   }
-
-   if (copy_mask & TU_DYNAMIC_STENCIL_REFERENCE) {
-      if (memcmp(&dest->stencil_reference, &src->stencil_reference,
-                 sizeof(src->stencil_reference))) {
-         dest->stencil_reference = src->stencil_reference;
-         dest_mask |= TU_DYNAMIC_STENCIL_REFERENCE;
-      }
-   }
-
-   if (copy_mask & TU_DYNAMIC_DISCARD_RECTANGLE) {
-      if (memcmp(&dest->discard_rectangle.rectangles,
-                 &src->discard_rectangle.rectangles,
-                 src->discard_rectangle.count * sizeof(VkRect2D))) {
-         typed_memcpy(dest->discard_rectangle.rectangles,
-                      src->discard_rectangle.rectangles,
-                      src->discard_rectangle.count);
-         dest_mask |= TU_DYNAMIC_DISCARD_RECTANGLE;
-      }
-   }
-}
-
 static VkResult
 tu_create_cmd_buffer(struct tu_device *device,
                      struct tu_cmd_pool *pool,
@@ -1567,10 +1541,10 @@ tu_create_cmd_buffer(struct tu_device *device,
       goto fail_scratch_bo;
 
    /* TODO: resize on overflow */
-   cmd_buffer->vsc_data_pitch = device->vsc_data_pitch;
-   cmd_buffer->vsc_data2_pitch = device->vsc_data2_pitch;
-   cmd_buffer->vsc_data = device->vsc_data;
-   cmd_buffer->vsc_data2 = device->vsc_data2;
+   cmd_buffer->vsc_draw_strm_pitch = device->vsc_draw_strm_pitch;
+   cmd_buffer->vsc_prim_strm_pitch = device->vsc_prim_strm_pitch;
+   cmd_buffer->vsc_draw_strm = device->vsc_draw_strm;
+   cmd_buffer->vsc_prim_strm = device->vsc_prim_strm;
 
    return VK_SUCCESS;
 
@@ -1586,9 +1560,6 @@ tu_cmd_buffer_destroy(struct tu_cmd_buffer *cmd_buffer)
 
    list_del(&cmd_buffer->pool_link);
 
-   for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++)
-      free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr);
-
    tu_cs_finish(&cmd_buffer->cs);
    tu_cs_finish(&cmd_buffer->draw_cs);
    tu_cs_finish(&cmd_buffer->draw_epilogue_cs);
@@ -1601,8 +1572,6 @@ tu_cmd_buffer_destroy(struct tu_cmd_buffer *cmd_buffer)
 static VkResult
 tu_reset_cmd_buffer(struct tu_cmd_buffer *cmd_buffer)
 {
-   cmd_buffer->wait_for_idle = true;
-
    cmd_buffer->record_result = VK_SUCCESS;
 
    tu_bo_list_reset(&cmd_buffer->bo_list);
@@ -1611,10 +1580,8 @@ tu_reset_cmd_buffer(struct tu_cmd_buffer *cmd_buffer)
    tu_cs_reset(&cmd_buffer->draw_epilogue_cs);
    tu_cs_reset(&cmd_buffer->sub_cs);
 
-   for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
-      cmd_buffer->descriptors[i].valid = 0;
-      cmd_buffer->descriptors[i].push_dirty = false;
-   }
+   for (unsigned i = 0; i < MAX_BIND_POINTS; i++)
+      memset(&cmd_buffer->descriptors[i].sets, 0, sizeof(cmd_buffer->descriptors[i].sets));
 
    cmd_buffer->status = TU_CMD_BUFFER_STATUS_INITIAL;
 
@@ -1702,6 +1669,16 @@ tu_ResetCommandBuffer(VkCommandBuffer commandBuffer,
    return tu_reset_cmd_buffer(cmd_buffer);
 }
 
+/* Initialize the cache, assuming all necessary flushes have happened but *not*
+ * invalidations.
+ */
+static void
+tu_cache_init(struct tu_cache_state *cache)
+{
+   cache->flush_bits = 0;
+   cache->pending_flush_bits = TU_CMD_FLAG_ALL_INVALIDATE;
+}
+
 VkResult
 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer,
                       const VkCommandBufferBeginInfo *pBeginInfo)
@@ -1719,14 +1696,14 @@ tu_BeginCommandBuffer(VkCommandBuffer commandBuffer,
    }
 
    memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
+   tu_cache_init(&cmd_buffer->state.cache);
+   tu_cache_init(&cmd_buffer->state.renderpass_cache);
    cmd_buffer->usage_flags = pBeginInfo->flags;
 
    tu_cs_begin(&cmd_buffer->cs);
    tu_cs_begin(&cmd_buffer->draw_cs);
    tu_cs_begin(&cmd_buffer->draw_epilogue_cs);
 
-   cmd_buffer->scratch_seqno = 0;
-
    /* setup initial configuration into command buffer */
    if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
       switch (cmd_buffer->queue_family_index) {
@@ -1736,11 +1713,18 @@ tu_BeginCommandBuffer(VkCommandBuffer commandBuffer,
       default:
          break;
       }
-   } else if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
-              (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
-      assert(pBeginInfo->pInheritanceInfo);
-      cmd_buffer->state.pass = tu_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
-      cmd_buffer->state.subpass = &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
+   } else if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
+      if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
+         assert(pBeginInfo->pInheritanceInfo);
+         cmd_buffer->state.pass = tu_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
+         cmd_buffer->state.subpass =
+            &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
+      } else {
+         /* When executing in the middle of another command buffer, the CCU
+          * state is unknown.
+          */
+         cmd_buffer->state.ccu_state = TU_CMD_CCU_UNKNOWN;
+      }
    }
 
    cmd_buffer->status = TU_CMD_BUFFER_STATUS_RECORDING;
@@ -1748,6 +1732,10 @@ tu_BeginCommandBuffer(VkCommandBuffer commandBuffer,
    return VK_SUCCESS;
 }
 
+/* Sets vertex buffers to HW binding points.  We emit VBs in SDS (so that bin
+ * rendering can skip over unused state), so we need to collect all the
+ * bindings together into a single state emit at draw time.
+ */
 void
 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer,
                         uint32_t firstBinding,
@@ -1760,12 +1748,14 @@ tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer,
    assert(firstBinding + bindingCount <= MAX_VBS);
 
    for (uint32_t i = 0; i < bindingCount; i++) {
-      cmd->state.vb.buffers[firstBinding + i] =
-         tu_buffer_from_handle(pBuffers[i]);
+      struct tu_buffer *buf = tu_buffer_from_handle(pBuffers[i]);
+
+      cmd->state.vb.buffers[firstBinding + i] = buf;
       cmd->state.vb.offsets[firstBinding + i] = pOffsets[i];
+
+      tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
    }
 
-   /* VB states depend on VkPipelineVertexInputStateCreateInfo */
    cmd->state.dirty |= TU_CMD_DIRTY_VERTEX_BUFFERS;
 }
 
@@ -1807,43 +1797,22 @@ tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,
                          uint32_t dynamicOffsetCount,
                          const uint32_t *pDynamicOffsets)
 {
-   TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
+   TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
    TU_FROM_HANDLE(tu_pipeline_layout, layout, _layout);
    unsigned dyn_idx = 0;
 
    struct tu_descriptor_state *descriptors_state =
-      tu_get_descriptors_state(cmd_buffer, pipelineBindPoint);
+      tu_get_descriptors_state(cmd, pipelineBindPoint);
 
    for (unsigned i = 0; i < descriptorSetCount; ++i) {
       unsigned idx = i + firstSet;
       TU_FROM_HANDLE(tu_descriptor_set, set, pDescriptorSets[i]);
 
       descriptors_state->sets[idx] = set;
-      descriptors_state->valid |= (1u << idx);
-
-      /* Note: the actual input attachment indices come from the shader
-       * itself, so we can't generate the patched versions of these until
-       * draw time when both the pipeline and descriptors are bound and
-       * we're inside the render pass.
-       */
-      unsigned dst_idx = layout->set[idx].input_attachment_start;
-      memcpy(&descriptors_state->input_attachments[dst_idx * A6XX_TEX_CONST_DWORDS],
-             set->dynamic_descriptors,
-             set->layout->input_attachment_count * A6XX_TEX_CONST_DWORDS * 4);
 
       for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) {
-         /* Dynamic buffers come after input attachments in the descriptor set
-          * itself, but due to how the Vulkan descriptor set binding works, we
-          * have to put input attachments and dynamic buffers in separate
-          * buffers in the descriptor_state and then combine them at draw
-          * time. Binding a descriptor set only invalidates the descriptor
-          * sets after it, but if we try to tightly pack the descriptors after
-          * the input attachments then we could corrupt dynamic buffers in the
-          * descriptor set before it, or we'd have to move all the dynamic
-          * buffers over. We just put them into separate buffers to make
-          * binding as well as the later patching of input attachments easy.
-          */
-         unsigned src_idx = j + set->layout->input_attachment_count;
+         /* update the contents of the dynamic descriptor set */
+         unsigned src_idx = j;
          unsigned dst_idx = j + layout->set[idx].dynamic_offset_start;
          assert(dyn_idx < dynamicOffsetCount);
 
@@ -1871,12 +1840,78 @@ tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,
             dst[5] = va >> 32;
          }
       }
+
+      for (unsigned j = 0; j < set->layout->buffer_count; ++j) {
+         if (set->buffers[j]) {
+            tu_bo_list_add(&cmd->bo_list, set->buffers[j],
+                           MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
+         }
+      }
+
+      if (set->size > 0) {
+         tu_bo_list_add(&cmd->bo_list, &set->pool->bo,
+                        MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
+      }
    }
+   assert(dyn_idx == dynamicOffsetCount);
 
-   if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE)
-      cmd_buffer->state.dirty |= TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS;
-   else
-      cmd_buffer->state.dirty |= TU_CMD_DIRTY_DESCRIPTOR_SETS;
+   uint32_t sp_bindless_base_reg, hlsq_bindless_base_reg, hlsq_update_value;
+   uint64_t addr[MAX_SETS + 1] = {};
+   struct tu_cs cs;
+
+   for (uint32_t i = 0; i < MAX_SETS; i++) {
+      struct tu_descriptor_set *set = descriptors_state->sets[i];
+      if (set)
+         addr[i] = set->va | 3;
+   }
+
+   if (layout->dynamic_offset_count) {
+      /* allocate and fill out dynamic descriptor set */
+      struct ts_cs_memory dynamic_desc_set;
+      VkResult result = tu_cs_alloc(&cmd->sub_cs, layout->dynamic_offset_count,
+                                    A6XX_TEX_CONST_DWORDS, &dynamic_desc_set);
+      assert(result == VK_SUCCESS);
+
+      memcpy(dynamic_desc_set.map, descriptors_state->dynamic_descriptors,
+             layout->dynamic_offset_count * A6XX_TEX_CONST_DWORDS * 4);
+      addr[MAX_SETS] = dynamic_desc_set.iova | 3;
+   }
+
+   if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) {
+      sp_bindless_base_reg = REG_A6XX_SP_BINDLESS_BASE(0);
+      hlsq_bindless_base_reg = REG_A6XX_HLSQ_BINDLESS_BASE(0);
+      hlsq_update_value = 0x7c000;
+
+      cmd->state.dirty |= TU_CMD_DIRTY_DESCRIPTOR_SETS | TU_CMD_DIRTY_SHADER_CONSTS;
+   } else {
+      assert(pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE);
+
+      sp_bindless_base_reg = REG_A6XX_SP_CS_BINDLESS_BASE(0);
+      hlsq_bindless_base_reg = REG_A6XX_HLSQ_CS_BINDLESS_BASE(0);
+      hlsq_update_value = 0x3e00;
+
+      cmd->state.dirty |= TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS;
+   }
+
+   tu_cs_begin_sub_stream(&cmd->sub_cs, 24, &cs);
+
+   tu_cs_emit_pkt4(&cs, sp_bindless_base_reg, 10);
+   tu_cs_emit_array(&cs, (const uint32_t*) addr, 10);
+   tu_cs_emit_pkt4(&cs, hlsq_bindless_base_reg, 10);
+   tu_cs_emit_array(&cs, (const uint32_t*) addr, 10);
+   tu_cs_emit_regs(&cs, A6XX_HLSQ_UPDATE_CNTL(.dword = hlsq_update_value));
+
+   struct tu_cs_entry ib = tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
+   if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) {
+      tu_cs_emit_pkt7(&cmd->draw_cs, CP_SET_DRAW_STATE, 3);
+      tu_cs_emit_sds_ib(&cmd->draw_cs, TU_DRAW_STATE_DESC_SETS, ib);
+      cmd->state.desc_sets_ib = ib;
+   } else {
+      /* note: for compute we could emit directly, instead of a CP_INDIRECT
+       * however, the blob uses draw states for compute
+       */
+      tu_cs_emit_ib(&cmd->cs, &ib);
+   }
 }
 
 void tu_CmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer,
@@ -1939,7 +1974,17 @@ tu_CmdPushConstants(VkCommandBuffer commandBuffer,
 {
    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
    memcpy((void*) cmd->push_constants + offset, pValues, size);
-   cmd->state.dirty |= TU_CMD_DIRTY_PUSH_CONSTANTS;
+   cmd->state.dirty |= TU_CMD_DIRTY_SHADER_CONSTS;
+}
+
+/* Flush everything which has been made available but we haven't actually
+ * flushed yet.
+ */
+static void
+tu_flush_all_pending(struct tu_cache_state *cache)
+{
+   cache->flush_bits |= cache->pending_flush_bits & TU_CMD_FLAG_ALL_FLUSH;
+   cache->pending_flush_bits &= ~TU_CMD_FLAG_ALL_FLUSH;
 }
 
 VkResult
@@ -1947,15 +1992,36 @@ tu_EndCommandBuffer(VkCommandBuffer commandBuffer)
 {
    TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
 
-   if (cmd_buffer->scratch_seqno) {
-      tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->scratch_bo,
-                     MSM_SUBMIT_BO_WRITE);
+   /* We currently flush CCU at the end of the command buffer, like
+    * what the blob does. There's implicit synchronization around every
+    * vkQueueSubmit, but the kernel only flushes the UCHE, and we don't
+    * know yet if this command buffer will be the last in the submit so we
+    * have to defensively flush everything else.
+    *
+    * TODO: We could definitely do better than this, since these flushes
+    * aren't required by Vulkan, but we'd need kernel support to do that.
+    * Ideally, we'd like the kernel to flush everything afterwards, so that we
+    * wouldn't have to do any flushes here, and when submitting multiple
+    * command buffers there wouldn't be any unnecessary flushes in between.
+    */
+   if (cmd_buffer->state.pass) {
+      tu_flush_all_pending(&cmd_buffer->state.renderpass_cache);
+      tu_emit_cache_flush_renderpass(cmd_buffer, &cmd_buffer->draw_cs);
+   } else {
+      tu_flush_all_pending(&cmd_buffer->state.cache);
+      cmd_buffer->state.cache.flush_bits |=
+         TU_CMD_FLAG_CCU_FLUSH_COLOR |
+         TU_CMD_FLAG_CCU_FLUSH_DEPTH;
+      tu_emit_cache_flush(cmd_buffer, &cmd_buffer->cs);
    }
 
+   tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->scratch_bo,
+                  MSM_SUBMIT_BO_WRITE);
+
    if (cmd_buffer->use_vsc_data) {
-      tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->vsc_data,
+      tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->vsc_draw_strm,
                      MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
-      tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->vsc_data2,
+      tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->vsc_prim_strm,
                      MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
    }
 
@@ -1986,6 +2052,28 @@ tu_EndCommandBuffer(VkCommandBuffer commandBuffer)
    return cmd_buffer->record_result;
 }
 
+static struct tu_cs
+tu_cmd_dynamic_state(struct tu_cmd_buffer *cmd, uint32_t id, uint32_t size)
+{
+   struct ts_cs_memory memory;
+   struct tu_cs cs;
+
+   /* TODO: share this logic with tu_pipeline_static_state */
+   tu_cs_alloc(&cmd->sub_cs, size, 1, &memory);
+   tu_cs_init_external(&cs, memory.map, memory.map + size);
+   tu_cs_begin(&cs);
+   tu_cs_reserve_space(&cs, size);
+
+   assert(id < ARRAY_SIZE(cmd->state.dynamic_state));
+   cmd->state.dynamic_state[id].iova = memory.iova;
+   cmd->state.dynamic_state[id].size = size;
+
+   tu_cs_emit_pkt7(&cmd->draw_cs, CP_SET_DRAW_STATE, 3);
+   tu_cs_emit_draw_state(&cmd->draw_cs, TU_DRAW_STATE_DYNAMIC + id, cmd->state.dynamic_state[id]);
+
+   return cs;
+}
+
 void
 tu_CmdBindPipeline(VkCommandBuffer commandBuffer,
                    VkPipelineBindPoint pipelineBindPoint,
@@ -1994,26 +2082,62 @@ tu_CmdBindPipeline(VkCommandBuffer commandBuffer,
    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
    TU_FROM_HANDLE(tu_pipeline, pipeline, _pipeline);
 
-   switch (pipelineBindPoint) {
-   case VK_PIPELINE_BIND_POINT_GRAPHICS:
-      cmd->state.pipeline = pipeline;
-      cmd->state.dirty |= TU_CMD_DIRTY_PIPELINE;
-      break;
-   case VK_PIPELINE_BIND_POINT_COMPUTE:
-      cmd->state.compute_pipeline = pipeline;
-      cmd->state.dirty |= TU_CMD_DIRTY_COMPUTE_PIPELINE;
-      break;
-   default:
-      unreachable("unrecognized pipeline bind point");
-      break;
-   }
-
    tu_bo_list_add(&cmd->bo_list, &pipeline->program.binary_bo,
                   MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
    for (uint32_t i = 0; i < pipeline->cs.bo_count; i++) {
       tu_bo_list_add(&cmd->bo_list, pipeline->cs.bos[i],
                      MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
    }
+
+   if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE) {
+      cmd->state.compute_pipeline = pipeline;
+      cmd->state.dirty |= TU_CMD_DIRTY_COMPUTE_PIPELINE;
+      return;
+   }
+
+   assert(pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS);
+
+   cmd->state.pipeline = pipeline;
+   cmd->state.dirty |= TU_CMD_DIRTY_SHADER_CONSTS;
+
+   struct tu_cs *cs = &cmd->draw_cs;
+   uint32_t mask = ~pipeline->dynamic_state_mask & BITFIELD_MASK(TU_DYNAMIC_STATE_COUNT);
+   uint32_t i;
+
+   tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3 * (7 + util_bitcount(mask)));
+   tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_PROGRAM, pipeline->program.state_ib);
+   tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_PROGRAM_BINNING, pipeline->program.binning_state_ib);
+   tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VI, pipeline->vi.state_ib);
+   tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VI_BINNING, pipeline->vi.binning_state_ib);
+   tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_RAST, pipeline->rast.state_ib);
+   tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DS, pipeline->ds.state_ib);
+   tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_BLEND, pipeline->blend.state_ib);
+
+   for_each_bit(i, mask)
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DYNAMIC + i, pipeline->dynamic_state[i]);
+
+   /* If the new pipeline requires more VBs than we had previously set up, we
+    * need to re-emit them in SDS.  If it requires the same set or fewer, we
+    * can just re-use the old SDS.
+    */
+   if (pipeline->vi.bindings_used & ~cmd->vertex_bindings_set)
+      cmd->state.dirty |= TU_CMD_DIRTY_VERTEX_BUFFERS;
+
+   /* If the pipeline needs a dynamic descriptor, re-emit descriptor sets */
+   if (pipeline->layout->dynamic_offset_count)
+      cmd->state.dirty |= TU_CMD_DIRTY_DESCRIPTOR_SETS;
+
+   /* dynamic linewidth state depends pipeline state's gras_su_cntl
+    * so the dynamic state ib must be updated when pipeline changes
+    */
+   if (pipeline->dynamic_state_mask & BIT(VK_DYNAMIC_STATE_LINE_WIDTH)) {
+      struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_LINE_WIDTH, 2);
+
+      cmd->state.dynamic_gras_su_cntl &= A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK;
+      cmd->state.dynamic_gras_su_cntl |= pipeline->gras_su_cntl;
+
+      tu_cs_emit_regs(&cs, A6XX_GRAS_SU_CNTL(.dword = cmd->state.dynamic_gras_su_cntl));
+   }
 }
 
 void
@@ -2023,10 +2147,11 @@ tu_CmdSetViewport(VkCommandBuffer commandBuffer,
                   const VkViewport *pViewports)
 {
    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+   struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_VIEWPORT, 18);
 
    assert(firstViewport == 0 && viewportCount == 1);
-   cmd->state.dynamic.viewport.viewports[0] = pViewports[0];
-   cmd->state.dirty |= TU_CMD_DIRTY_DYNAMIC_VIEWPORT;
+
+   tu6_emit_viewport(&cs, pViewports);
 }
 
 void
@@ -2036,21 +2161,23 @@ tu_CmdSetScissor(VkCommandBuffer commandBuffer,
                  const VkRect2D *pScissors)
 {
    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+   struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_SCISSOR, 3);
 
    assert(firstScissor == 0 && scissorCount == 1);
-   cmd->state.dynamic.scissor.scissors[0] = pScissors[0];
-   cmd->state.dirty |= TU_CMD_DIRTY_DYNAMIC_SCISSOR;
+
+   tu6_emit_scissor(&cs, pScissors);
 }
 
 void
 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth)
 {
    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+   struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_LINE_WIDTH, 2);
 
-   cmd->state.dynamic.line_width = lineWidth;
+   cmd->state.dynamic_gras_su_cntl &= ~A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK;
+   cmd->state.dynamic_gras_su_cntl |= A6XX_GRAS_SU_CNTL_LINEHALFWIDTH(lineWidth / 2.0f);
 
-   /* line width depends on VkPipelineRasterizationStateCreateInfo */
-   cmd->state.dirty |= TU_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
+   tu_cs_emit_regs(&cs, A6XX_GRAS_SU_CNTL(.dword = cmd->state.dynamic_gras_su_cntl));
 }
 
 void
@@ -2060,12 +2187,9 @@ tu_CmdSetDepthBias(VkCommandBuffer commandBuffer,
                    float depthBiasSlopeFactor)
 {
    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
-   struct tu_cs *draw_cs = &cmd->draw_cs;
+   struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_DEPTH_BIAS, 4);
 
-   tu6_emit_depth_bias(draw_cs, depthBiasConstantFactor, depthBiasClamp,
-                       depthBiasSlopeFactor);
-
-   tu_cs_sanity_check(draw_cs);
+   tu6_emit_depth_bias(&cs, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
 }
 
 void
@@ -2073,11 +2197,10 @@ tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer,
                         const float blendConstants[4])
 {
    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
-   struct tu_cs *draw_cs = &cmd->draw_cs;
-
-   tu6_emit_blend_constants(draw_cs, blendConstants);
+   struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_BLEND_CONSTANTS, 5);
 
-   tu_cs_sanity_check(draw_cs);
+   tu_cs_emit_pkt4(&cs, REG_A6XX_RB_BLEND_RED_F32, 4);
+   tu_cs_emit_array(&cs, (const uint32_t *) blendConstants, 4);
 }
 
 void
@@ -2087,20 +2210,26 @@ tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer,
 {
 }
 
+static void
+update_stencil_mask(uint32_t *value, VkStencilFaceFlags face, uint32_t mask)
+{
+   if (face & VK_STENCIL_FACE_FRONT_BIT)
+      *value |= A6XX_RB_STENCILMASK_MASK(mask);
+   if (face & VK_STENCIL_FACE_BACK_BIT)
+      *value |= A6XX_RB_STENCILMASK_BFMASK(mask);
+}
+
 void
 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer,
                             VkStencilFaceFlags faceMask,
                             uint32_t compareMask)
 {
    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+   struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK, 2);
 
-   if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
-      cmd->state.dynamic.stencil_compare_mask.front = compareMask;
-   if (faceMask & VK_STENCIL_FACE_BACK_BIT)
-      cmd->state.dynamic.stencil_compare_mask.back = compareMask;
+   update_stencil_mask(&cmd->state.dynamic_stencil_mask, faceMask, compareMask);
 
-   /* the front/back compare masks must be updated together */
-   cmd->state.dirty |= TU_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
+   tu_cs_emit_regs(&cs, A6XX_RB_STENCILMASK(.dword = cmd->state.dynamic_stencil_mask));
 }
 
 void
@@ -2109,14 +2238,11 @@ tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer,
                           uint32_t writeMask)
 {
    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+   struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_STENCIL_WRITE_MASK, 2);
 
-   if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
-      cmd->state.dynamic.stencil_write_mask.front = writeMask;
-   if (faceMask & VK_STENCIL_FACE_BACK_BIT)
-      cmd->state.dynamic.stencil_write_mask.back = writeMask;
+   update_stencil_mask(&cmd->state.dynamic_stencil_wrmask, faceMask, writeMask);
 
-   /* the front/back write masks must be updated together */
-   cmd->state.dirty |= TU_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
+   tu_cs_emit_regs(&cs, A6XX_RB_STENCILWRMASK(.dword = cmd->state.dynamic_stencil_wrmask));
 }
 
 void
@@ -2125,14 +2251,11 @@ tu_CmdSetStencilReference(VkCommandBuffer commandBuffer,
                           uint32_t reference)
 {
    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+   struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_STENCIL_REFERENCE, 2);
 
-   if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
-      cmd->state.dynamic.stencil_reference.front = reference;
-   if (faceMask & VK_STENCIL_FACE_BACK_BIT)
-      cmd->state.dynamic.stencil_reference.back = reference;
+   update_stencil_mask(&cmd->state.dynamic_stencil_ref, faceMask, reference);
 
-   /* the front/back references must be updated together */
-   cmd->state.dirty |= TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
+   tu_cs_emit_regs(&cs, A6XX_RB_STENCILREF(.dword = cmd->state.dynamic_stencil_ref));
 }
 
 void
@@ -2140,10 +2263,213 @@ tu_CmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer,
                             const VkSampleLocationsInfoEXT* pSampleLocationsInfo)
 {
    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+   struct tu_cs cs = tu_cmd_dynamic_state(cmd, TU_DYNAMIC_STATE_SAMPLE_LOCATIONS, 9);
+
+   assert(pSampleLocationsInfo);
+
+   tu6_emit_sample_locations(&cs, pSampleLocationsInfo);
+}
+
+static void
+tu_flush_for_access(struct tu_cache_state *cache,
+                    enum tu_cmd_access_mask src_mask,
+                    enum tu_cmd_access_mask dst_mask)
+{
+   enum tu_cmd_flush_bits flush_bits = 0;
+
+   if (src_mask & TU_ACCESS_SYSMEM_WRITE) {
+      cache->pending_flush_bits |= TU_CMD_FLAG_ALL_INVALIDATE;
+   }
+
+#define SRC_FLUSH(domain, flush, invalidate) \
+   if (src_mask & TU_ACCESS_##domain##_WRITE) {                      \
+      cache->pending_flush_bits |= TU_CMD_FLAG_##flush |             \
+         (TU_CMD_FLAG_ALL_INVALIDATE & ~TU_CMD_FLAG_##invalidate);   \
+   }
+
+   SRC_FLUSH(UCHE, CACHE_FLUSH, CACHE_INVALIDATE)
+   SRC_FLUSH(CCU_COLOR, CCU_FLUSH_COLOR, CCU_INVALIDATE_COLOR)
+   SRC_FLUSH(CCU_DEPTH, CCU_FLUSH_DEPTH, CCU_INVALIDATE_DEPTH)
+
+#undef SRC_FLUSH
+
+#define SRC_INCOHERENT_FLUSH(domain, flush, invalidate)              \
+   if (src_mask & TU_ACCESS_##domain##_INCOHERENT_WRITE) {           \
+      flush_bits |= TU_CMD_FLAG_##flush;                             \
+      cache->pending_flush_bits |=                                   \
+         (TU_CMD_FLAG_ALL_INVALIDATE & ~TU_CMD_FLAG_##invalidate);   \
+   }
+
+   SRC_INCOHERENT_FLUSH(CCU_COLOR, CCU_FLUSH_COLOR, CCU_INVALIDATE_COLOR)
+   SRC_INCOHERENT_FLUSH(CCU_DEPTH, CCU_FLUSH_DEPTH, CCU_INVALIDATE_DEPTH)
+
+#undef SRC_INCOHERENT_FLUSH
+
+   if (dst_mask & (TU_ACCESS_SYSMEM_READ | TU_ACCESS_SYSMEM_WRITE)) {
+      flush_bits |= cache->pending_flush_bits & TU_CMD_FLAG_ALL_FLUSH;
+   }
+
+#define DST_FLUSH(domain, flush, invalidate) \
+   if (dst_mask & (TU_ACCESS_##domain##_READ |                 \
+                   TU_ACCESS_##domain##_WRITE)) {              \
+      flush_bits |= cache->pending_flush_bits &                \
+         (TU_CMD_FLAG_##invalidate |                           \
+          (TU_CMD_FLAG_ALL_FLUSH & ~TU_CMD_FLAG_##flush));     \
+   }
+
+   DST_FLUSH(UCHE, CACHE_FLUSH, CACHE_INVALIDATE)
+   DST_FLUSH(CCU_COLOR, CCU_FLUSH_COLOR, CCU_INVALIDATE_COLOR)
+   DST_FLUSH(CCU_DEPTH, CCU_FLUSH_DEPTH, CCU_INVALIDATE_DEPTH)
+
+#undef DST_FLUSH
+
+#define DST_INCOHERENT_FLUSH(domain, flush, invalidate) \
+   if (dst_mask & (TU_ACCESS_##domain##_READ |                 \
+                   TU_ACCESS_##domain##_WRITE)) {              \
+      flush_bits |= TU_CMD_FLAG_##invalidate |                 \
+          (cache->pending_flush_bits &                         \
+           (TU_CMD_FLAG_ALL_FLUSH & ~TU_CMD_FLAG_##flush));    \
+   }
+
+   DST_INCOHERENT_FLUSH(CCU_COLOR, CCU_FLUSH_COLOR, CCU_INVALIDATE_COLOR)
+   DST_INCOHERENT_FLUSH(CCU_DEPTH, CCU_FLUSH_DEPTH, CCU_INVALIDATE_DEPTH)
+
+#undef DST_INCOHERENT_FLUSH
+
+   if (dst_mask & TU_ACCESS_WFI_READ) {
+      flush_bits |= TU_CMD_FLAG_WFI;
+   }
+
+   cache->flush_bits |= flush_bits;
+   cache->pending_flush_bits &= ~flush_bits;
+}
+
+static enum tu_cmd_access_mask
+vk2tu_access(VkAccessFlags flags, bool gmem)
+{
+   enum tu_cmd_access_mask mask = 0;
+
+   /* If the GPU writes a buffer that is then read by an indirect draw
+    * command, we theoretically need a WFI + WAIT_FOR_ME combination to
+    * wait for the writes to complete. The WAIT_FOR_ME is performed as part
+    * of the draw by the firmware, so we just need to execute a WFI.
+    */
+   if (flags &
+       (VK_ACCESS_INDIRECT_COMMAND_READ_BIT |
+        VK_ACCESS_MEMORY_READ_BIT)) {
+      mask |= TU_ACCESS_WFI_READ;
+   }
+
+   if (flags &
+       (VK_ACCESS_INDIRECT_COMMAND_READ_BIT | /* Read performed by CP */
+        VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT | /* Read performed by CP, I think */
+        VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT | /* Read performed by CP */
+        VK_ACCESS_HOST_READ_BIT | /* sysmem by definition */
+        VK_ACCESS_MEMORY_READ_BIT)) {
+      mask |= TU_ACCESS_SYSMEM_READ;
+   }
+
+   if (flags &
+       (VK_ACCESS_HOST_WRITE_BIT |
+        VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT | /* Write performed by CP, I think */
+        VK_ACCESS_MEMORY_WRITE_BIT)) {
+      mask |= TU_ACCESS_SYSMEM_WRITE;
+   }
+
+   if (flags &
+       (VK_ACCESS_INDEX_READ_BIT | /* Read performed by PC, I think */
+        VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | /* Read performed by VFD */
+        VK_ACCESS_UNIFORM_READ_BIT | /* Read performed by SP */
+        /* TODO: Is there a no-cache bit for textures so that we can ignore
+         * these?
+         */
+        VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | /* Read performed by TP */
+        VK_ACCESS_SHADER_READ_BIT | /* Read perfomed by SP/TP */
+        VK_ACCESS_MEMORY_READ_BIT)) {
+      mask |= TU_ACCESS_UCHE_READ;
+   }
+
+   if (flags &
+       (VK_ACCESS_SHADER_WRITE_BIT | /* Write performed by SP */
+        VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT | /* Write performed by VPC */
+        VK_ACCESS_MEMORY_WRITE_BIT)) {
+      mask |= TU_ACCESS_UCHE_WRITE;
+   }
+
+   /* When using GMEM, the CCU is always flushed automatically to GMEM, and
+    * then GMEM is flushed to sysmem. Furthermore, we already had to flush any
+    * previous writes in sysmem mode when transitioning to GMEM. Therefore we
+    * can ignore CCU and pretend that color attachments and transfers use
+    * sysmem directly.
+    */
+
+   if (flags &
+       (VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
+        VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT |
+        VK_ACCESS_MEMORY_READ_BIT)) {
+      if (gmem)
+         mask |= TU_ACCESS_SYSMEM_READ;
+      else
+         mask |= TU_ACCESS_CCU_COLOR_INCOHERENT_READ;
+   }
+
+   if (flags &
+       (VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
+        VK_ACCESS_MEMORY_READ_BIT)) {
+      if (gmem)
+         mask |= TU_ACCESS_SYSMEM_READ;
+      else
+         mask |= TU_ACCESS_CCU_DEPTH_INCOHERENT_READ;
+   }
+
+   if (flags &
+       (VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
+        VK_ACCESS_MEMORY_WRITE_BIT)) {
+      if (gmem) {
+         mask |= TU_ACCESS_SYSMEM_WRITE;
+      } else {
+         mask |= TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE;
+      }
+   }
+
+   if (flags &
+       (VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
+        VK_ACCESS_MEMORY_WRITE_BIT)) {
+      if (gmem) {
+         mask |= TU_ACCESS_SYSMEM_WRITE;
+      } else {
+         mask |= TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE;
+      }
+   }
+
+   /* When the dst access is a transfer read/write, it seems we sometimes need
+    * to insert a WFI after any flushes, to guarantee that the flushes finish
+    * before the 2D engine starts. However the opposite (i.e. a WFI after
+    * CP_BLIT and before any subsequent flush) does not seem to be needed, and
+    * the blob doesn't emit such a WFI.
+    */
+
+   if (flags &
+       (VK_ACCESS_TRANSFER_WRITE_BIT |
+        VK_ACCESS_MEMORY_WRITE_BIT)) {
+      if (gmem) {
+         mask |= TU_ACCESS_SYSMEM_WRITE;
+      } else {
+         mask |= TU_ACCESS_CCU_COLOR_WRITE;
+      }
+      mask |= TU_ACCESS_WFI_READ;
+   }
 
-   tu6_emit_sample_locations(&cmd->draw_cs, pSampleLocationsInfo);
+   if (flags &
+       (VK_ACCESS_TRANSFER_READ_BIT | /* Access performed by TP */
+        VK_ACCESS_MEMORY_READ_BIT)) {
+      mask |= TU_ACCESS_UCHE_READ | TU_ACCESS_WFI_READ;
+   }
+
+   return mask;
 }
 
+
 void
 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer,
                       uint32_t commandBufferCount,
@@ -2154,6 +2480,15 @@ tu_CmdExecuteCommands(VkCommandBuffer commandBuffer,
 
    assert(commandBufferCount > 0);
 
+   /* Emit any pending flushes. */
+   if (cmd->state.pass) {
+      tu_flush_all_pending(&cmd->state.renderpass_cache);
+      tu_emit_cache_flush_renderpass(cmd, &cmd->draw_cs);
+   } else {
+      tu_flush_all_pending(&cmd->state.cache);
+      tu_emit_cache_flush(cmd, &cmd->cs);
+   }
+
    for (uint32_t i = 0; i < commandBufferCount; i++) {
       TU_FROM_HANDLE(tu_cmd_buffer, secondary, pCmdBuffers[i]);
 
@@ -2192,6 +2527,17 @@ tu_CmdExecuteCommands(VkCommandBuffer commandBuffer,
       }
    }
    cmd->state.dirty = ~0u; /* TODO: set dirty only what needs to be */
+
+   /* After executing secondary command buffers, there may have been arbitrary
+    * flushes executed, so when we encounter a pipeline barrier with a
+    * srcMask, we have to assume that we need to invalidate. Therefore we need
+    * to re-initialize the cache with all pending invalidate bits set.
+    */
+   if (cmd->state.pass) {
+      tu_cache_init(&cmd->state.renderpass_cache);
+   } else {
+      tu_cache_init(&cmd->state.cache);
+   }
 }
 
 VkResult
@@ -2285,6 +2631,29 @@ tu_TrimCommandPool(VkDevice device,
    }
 }
 
+static void
+tu_subpass_barrier(struct tu_cmd_buffer *cmd_buffer,
+                   const struct tu_subpass_barrier *barrier,
+                   bool external)
+{
+   /* Note: we don't know until the end of the subpass whether we'll use
+    * sysmem, so assume sysmem here to be safe.
+    */
+   struct tu_cache_state *cache =
+      external ? &cmd_buffer->state.cache : &cmd_buffer->state.renderpass_cache;
+   enum tu_cmd_access_mask src_flags =
+      vk2tu_access(barrier->src_access_mask, false);
+   enum tu_cmd_access_mask dst_flags =
+      vk2tu_access(barrier->dst_access_mask, false);
+
+   if (barrier->incoherent_ccu_color)
+      src_flags |= TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE;
+   if (barrier->incoherent_ccu_depth)
+      src_flags |= TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE;
+
+   tu_flush_for_access(cache, src_flags, dst_flags);
+}
+
 void
 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer,
                       const VkRenderPassBeginInfo *pRenderPassBegin,
@@ -2301,13 +2670,24 @@ tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer,
    tu_cmd_update_tiling_config(cmd, &pRenderPassBegin->renderArea);
    tu_cmd_prepare_tile_store_ib(cmd);
 
-   tu_emit_load_clear(cmd, pRenderPassBegin);
+   /* Note: because this is external, any flushes will happen before draw_cs
+    * gets called. However deferred flushes could have to happen later as part
+    * of the subpass.
+    */
+   tu_subpass_barrier(cmd, &pass->subpasses[0].start_barrier, true);
+   cmd->state.renderpass_cache.pending_flush_bits =
+      cmd->state.cache.pending_flush_bits;
+   cmd->state.renderpass_cache.flush_bits = 0;
+
+   tu_emit_renderpass_begin(cmd, pRenderPassBegin);
 
    tu6_emit_zs(cmd, cmd->state.subpass, &cmd->draw_cs);
    tu6_emit_mrt(cmd, cmd->state.subpass, &cmd->draw_cs);
    tu6_emit_msaa(&cmd->draw_cs, cmd->state.subpass->samples);
    tu6_emit_render_cntl(cmd, cmd->state.subpass, &cmd->draw_cs, false);
 
+   tu_set_input_attachments(cmd, cmd->state.subpass);
+
    /* note: use_hw_binning only checks tiling config */
    if (use_hw_binning(cmd))
       cmd->use_vsc_data = true;
@@ -2318,8 +2698,7 @@ tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer,
                      MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
    }
 
-   /* Flag input attachment descriptors for re-emission if necessary */
-   cmd->state.dirty |= TU_CMD_DIRTY_INPUT_ATTACHMENTS;
+   cmd->state.dirty |= TU_CMD_DIRTY_DRAW_STATE;
 }
 
 void
@@ -2369,32 +2748,12 @@ tu_CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents)
 
    tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_SYSMEM);
 
-   /* Emit flushes so that input attachments will read the correct value.
-    * TODO: use subpass dependencies to flush or not
-    */
-   tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true);
-   tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true);
-
-   if (subpass->resolve_attachments) {
-      tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
-
-      for (unsigned i = 0; i < subpass->color_count; i++) {
-         uint32_t a = subpass->resolve_attachments[i].attachment;
-         if (a == VK_ATTACHMENT_UNUSED)
-            continue;
-
-         tu6_emit_sysmem_resolve(cmd, cs, a,
-                                 subpass->color_attachments[i].attachment);
-      }
-
-      tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true);
-   }
+   tu6_emit_sysmem_resolves(cmd, cs, subpass);
 
    tu_cond_exec_end(cs);
 
-   /* subpass->input_count > 0 then texture cache invalidate is likely to be needed */
-   if (cmd->state.subpass->input_count)
-      tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
+   /* Handle dependencies for the next subpass */
+   tu_subpass_barrier(cmd, &cmd->state.subpass->start_barrier, false);
 
    /* emit mrt/zs/msaa/ubwc state for the subpass that is starting */
    tu6_emit_zs(cmd, cmd->state.subpass, cs);
@@ -2402,8 +2761,7 @@ tu_CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents)
    tu6_emit_msaa(cs, cmd->state.subpass->samples);
    tu6_emit_render_cntl(cmd, cmd->state.subpass, cs, false);
 
-   /* Flag input attachment descriptors for re-emission if necessary */
-   cmd->state.dirty |= TU_CMD_DIRTY_INPUT_ATTACHMENTS;
+   tu_set_input_attachments(cmd, cmd->state.subpass);
 }
 
 void
@@ -2466,75 +2824,6 @@ struct tu_draw_info
    uint64_t streamout_buffer_offset;
 };
 
-#define ENABLE_ALL (CP_SET_DRAW_STATE__0_BINNING | CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
-#define ENABLE_DRAW (CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
-#define ENABLE_NON_GMEM (CP_SET_DRAW_STATE__0_BINNING | CP_SET_DRAW_STATE__0_SYSMEM)
-
-enum tu_draw_state_group_id
-{
-   TU_DRAW_STATE_PROGRAM,
-   TU_DRAW_STATE_PROGRAM_BINNING,
-   TU_DRAW_STATE_VI,
-   TU_DRAW_STATE_VI_BINNING,
-   TU_DRAW_STATE_VP,
-   TU_DRAW_STATE_RAST,
-   TU_DRAW_STATE_DS,
-   TU_DRAW_STATE_BLEND,
-   TU_DRAW_STATE_VS_CONST,
-   TU_DRAW_STATE_GS_CONST,
-   TU_DRAW_STATE_FS_CONST,
-   TU_DRAW_STATE_DESC_SETS,
-   TU_DRAW_STATE_DESC_SETS_GMEM,
-   TU_DRAW_STATE_DESC_SETS_LOAD,
-   TU_DRAW_STATE_VS_PARAMS,
-
-   TU_DRAW_STATE_COUNT,
-};
-
-struct tu_draw_state_group
-{
-   enum tu_draw_state_group_id id;
-   uint32_t enable_mask;
-   struct tu_cs_entry ib;
-};
-
-static inline uint32_t
-tu6_stage2opcode(gl_shader_stage type)
-{
-   switch (type) {
-   case MESA_SHADER_VERTEX:
-   case MESA_SHADER_TESS_CTRL:
-   case MESA_SHADER_TESS_EVAL:
-   case MESA_SHADER_GEOMETRY:
-      return CP_LOAD_STATE6_GEOM;
-   case MESA_SHADER_FRAGMENT:
-   case MESA_SHADER_COMPUTE:
-   case MESA_SHADER_KERNEL:
-      return CP_LOAD_STATE6_FRAG;
-   default:
-      unreachable("bad shader type");
-   }
-}
-
-static inline enum a6xx_state_block
-tu6_stage2shadersb(gl_shader_stage type)
-{
-   switch (type) {
-   case MESA_SHADER_VERTEX:
-      return SB6_VS_SHADER;
-   case MESA_SHADER_GEOMETRY:
-      return SB6_GS_SHADER;
-   case MESA_SHADER_FRAGMENT:
-      return SB6_FS_SHADER;
-   case MESA_SHADER_COMPUTE:
-   case MESA_SHADER_KERNEL:
-      return SB6_CS_SHADER;
-   default:
-      unreachable("bad shader type");
-      return ~0;
-   }
-}
-
 static void
 tu6_emit_user_consts(struct tu_cs *cs, const struct tu_pipeline *pipeline,
                      struct tu_descriptor_state *descriptors_state,
@@ -2543,7 +2832,7 @@ tu6_emit_user_consts(struct tu_cs *cs, const struct tu_pipeline *pipeline,
 {
    const struct tu_program_descriptor_linkage *link =
       &pipeline->program.link[type];
-   const struct ir3_ubo_analysis_state *state = &link->ubo_state;
+   const struct ir3_ubo_analysis_state *state = &link->const_state.ubo_state;
 
    if (link->push_consts.count > 0) {
       unsigned num_units = link->push_consts.count;
@@ -2585,14 +2874,6 @@ tu6_emit_user_consts(struct tu_cs *cs, const struct tu_pipeline *pipeline,
          descriptors_state->dynamic_descriptors :
          descriptors_state->sets[state->range[i].bindless_base]->mapped_ptr;
       unsigned block = state->range[i].block;
-      /* If the block in the shader here is in the dynamic descriptor set, it
-       * is an index into the dynamic descriptor set which is combined from
-       * dynamic descriptors and input attachments on-the-fly, and we don't
-       * have access to it here. Instead we work backwards to get the index
-       * into dynamic_descriptors.
-       */
-      if (state->range[i].bindless_base == MAX_SETS)
-         block -= pipeline->layout->input_attachment_count;
       uint32_t *desc = base + block * A6XX_TEX_CONST_DWORDS;
       uint64_t va = desc[0] | ((uint64_t)(desc[1] & A6XX_UBO_1_BASE_HI__MASK) << 32);
       assert(va);
@@ -2661,141 +2942,28 @@ tu6_emit_vs_params(struct tu_cmd_buffer *cmd,
    return VK_SUCCESS;
 }
 
-static VkResult
-tu6_emit_descriptor_sets(struct tu_cmd_buffer *cmd,
-                         const struct tu_pipeline *pipeline,
-                         VkPipelineBindPoint bind_point,
-                         struct tu_cs_entry *entry,
-                         bool gmem)
-{
-   struct tu_cs *draw_state = &cmd->sub_cs;
-   struct tu_pipeline_layout *layout = pipeline->layout;
-   struct tu_descriptor_state *descriptors_state =
-      tu_get_descriptors_state(cmd, bind_point);
-   const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
-   const uint32_t *input_attachment_idx =
-      pipeline->program.input_attachment_idx;
-   uint32_t num_dynamic_descs = layout->dynamic_offset_count +
-      layout->input_attachment_count;
-   struct ts_cs_memory dynamic_desc_set;
-   VkResult result;
-
-   if (num_dynamic_descs > 0) {
-      /* allocate and fill out dynamic descriptor set */
-      result = tu_cs_alloc(draw_state, num_dynamic_descs,
-                           A6XX_TEX_CONST_DWORDS, &dynamic_desc_set);
-      if (result != VK_SUCCESS)
-         return result;
-
-      memcpy(dynamic_desc_set.map, descriptors_state->input_attachments,
-             layout->input_attachment_count * A6XX_TEX_CONST_DWORDS * 4);
-
-      if (gmem) {
-         /* Patch input attachments to refer to GMEM instead */
-         for (unsigned i = 0; i < layout->input_attachment_count; i++) {
-            uint32_t *dst =
-               &dynamic_desc_set.map[A6XX_TEX_CONST_DWORDS * i];
-
-            /* The compiler has already laid out input_attachment_idx in the
-             * final order of input attachments, so there's no need to go
-             * through the pipeline layout finding input attachments.
-             */
-            unsigned attachment_idx = input_attachment_idx[i];
-
-            /* It's possible for the pipeline layout to include an input
-             * attachment which doesn't actually exist for the current
-             * subpass. Of course, this is only valid so long as the pipeline
-             * doesn't try to actually load that attachment. Just skip
-             * patching in that scenario to avoid out-of-bounds accesses.
-             */
-            if (attachment_idx >= cmd->state.subpass->input_count)
-               continue;
-
-            uint32_t a = cmd->state.subpass->input_attachments[attachment_idx].attachment;
-            const struct tu_render_pass_attachment *att = &cmd->state.pass->attachments[a];
-
-            assert(att->gmem_offset >= 0);
-
-            dst[0] &= ~(A6XX_TEX_CONST_0_SWAP__MASK | A6XX_TEX_CONST_0_TILE_MODE__MASK);
-            dst[0] |= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2);
-            dst[2] &= ~(A6XX_TEX_CONST_2_TYPE__MASK | A6XX_TEX_CONST_2_PITCH__MASK);
-            dst[2] |=
-               A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D) |
-               A6XX_TEX_CONST_2_PITCH(tiling->tile0.extent.width * att->cpp);
-            dst[3] = 0;
-            dst[4] = cmd->device->physical_device->gmem_base + att->gmem_offset;
-            dst[5] = A6XX_TEX_CONST_5_DEPTH(1);
-            for (unsigned i = 6; i < A6XX_TEX_CONST_DWORDS; i++)
-               dst[i] = 0;
-
-            if (cmd->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
-               tu_finishme("patch input attachment pitch for secondary cmd buffer");
-         }
-      }
+static struct tu_cs_entry
+tu6_emit_vertex_buffers(struct tu_cmd_buffer *cmd,
+                        const struct tu_pipeline *pipeline)
+{
+   struct tu_cs cs;
+   tu_cs_begin_sub_stream(&cmd->sub_cs, 4 * MAX_VBS, &cs);
 
-      memcpy(dynamic_desc_set.map + layout->input_attachment_count * A6XX_TEX_CONST_DWORDS,
-             descriptors_state->dynamic_descriptors,
-             layout->dynamic_offset_count * A6XX_TEX_CONST_DWORDS * 4);
-   }
+   int binding;
+   for_each_bit(binding, pipeline->vi.bindings_used) {
+      const struct tu_buffer *buf = cmd->state.vb.buffers[binding];
+      const VkDeviceSize offset = buf->bo_offset +
+         cmd->state.vb.offsets[binding];
 
-   uint32_t sp_bindless_base_reg, hlsq_bindless_base_reg;
-   uint32_t hlsq_update_value;
-   switch (bind_point) {
-   case VK_PIPELINE_BIND_POINT_GRAPHICS:
-      sp_bindless_base_reg = REG_A6XX_SP_BINDLESS_BASE(0);
-      hlsq_bindless_base_reg = REG_A6XX_HLSQ_BINDLESS_BASE(0);
-      hlsq_update_value = 0x7c000;
-      break;
-   case VK_PIPELINE_BIND_POINT_COMPUTE:
-      sp_bindless_base_reg = REG_A6XX_SP_CS_BINDLESS_BASE(0);
-      hlsq_bindless_base_reg = REG_A6XX_HLSQ_CS_BINDLESS_BASE(0);
-      hlsq_update_value = 0x3e00;
-      break;
-   default:
-      unreachable("bad bind point");
-   }
+      tu_cs_emit_regs(&cs,
+                      A6XX_VFD_FETCH_BASE(binding, .bo = buf->bo, .bo_offset = offset),
+                      A6XX_VFD_FETCH_SIZE(binding, buf->size - offset));
 
-   /* Be careful here to *not* refer to the pipeline, so that if only the
-    * pipeline changes we don't have to emit this again (except if there are
-    * dynamic descriptors in the pipeline layout). This means always emitting
-    * all the valid descriptors, which means that we always have to put the
-    * dynamic descriptor in the driver-only slot at the end
-    */
-   uint32_t num_user_sets = util_last_bit(descriptors_state->valid);
-   uint32_t num_sets = num_user_sets;
-   if (num_dynamic_descs > 0) {
-      num_user_sets = MAX_SETS;
-      num_sets = num_user_sets + 1;
    }
 
-   unsigned regs[2] = { sp_bindless_base_reg, hlsq_bindless_base_reg };
-
-   struct tu_cs cs;
-   result = tu_cs_begin_sub_stream(draw_state, ARRAY_SIZE(regs) * (1 + num_sets * 2) + 2, &cs);
-   if (result != VK_SUCCESS)
-      return result;
-
-   if (num_sets > 0) {
-      for (unsigned i = 0; i < ARRAY_SIZE(regs); i++) {
-         tu_cs_emit_pkt4(&cs, regs[i], num_sets * 2);
-         for (unsigned j = 0; j < num_user_sets; j++) {
-            if (descriptors_state->valid & (1 << j)) {
-               /* magic | 3 copied from the blob */
-               tu_cs_emit_qw(&cs, descriptors_state->sets[j]->va | 3);
-            } else {
-               tu_cs_emit_qw(&cs, 0 | 3);
-            }
-         }
-         if (num_dynamic_descs > 0) {
-            tu_cs_emit_qw(&cs, dynamic_desc_set.iova | 3);
-         }
-      }
-
-      tu_cs_emit_regs(&cs, A6XX_HLSQ_UPDATE_CNTL(hlsq_update_value));
-   }
+   cmd->vertex_bindings_set = pipeline->vi.bindings_used;
 
-   *entry = tu_cs_end_sub_stream(draw_state, &cs);
-   return VK_SUCCESS;
+   return tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
 }
 
 static void
@@ -2865,9 +3033,6 @@ tu6_bind_draw_states(struct tu_cmd_buffer *cmd,
                      const struct tu_draw_info *draw)
 {
    const struct tu_pipeline *pipeline = cmd->state.pipeline;
-   const struct tu_dynamic_state *dynamic = &cmd->state.dynamic;
-   struct tu_draw_state_group draw_state_groups[TU_DRAW_STATE_COUNT];
-   uint32_t draw_state_group_count = 0;
    VkResult result;
 
    struct tu_descriptor_state *descriptors_state =
@@ -2879,182 +3044,19 @@ tu6_bind_draw_states(struct tu_cmd_buffer *cmd,
                    A6XX_PC_PRIMITIVE_CNTL_0(.primitive_restart =
                                             pipeline->ia.primitive_restart && draw->indexed));
 
-   if (cmd->state.dirty &
-          (TU_CMD_DIRTY_PIPELINE | TU_CMD_DIRTY_DYNAMIC_LINE_WIDTH) &&
-       (pipeline->dynamic_state.mask & TU_DYNAMIC_LINE_WIDTH)) {
-      tu6_emit_gras_su_cntl(cs, pipeline->rast.gras_su_cntl,
-                            dynamic->line_width);
-   }
-
-   if ((cmd->state.dirty & TU_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK) &&
-       (pipeline->dynamic_state.mask & TU_DYNAMIC_STENCIL_COMPARE_MASK)) {
-      tu6_emit_stencil_compare_mask(cs, dynamic->stencil_compare_mask.front,
-                                    dynamic->stencil_compare_mask.back);
-   }
-
-   if ((cmd->state.dirty & TU_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK) &&
-       (pipeline->dynamic_state.mask & TU_DYNAMIC_STENCIL_WRITE_MASK)) {
-      tu6_emit_stencil_write_mask(cs, dynamic->stencil_write_mask.front,
-                                  dynamic->stencil_write_mask.back);
-   }
-
-   if ((cmd->state.dirty & TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE) &&
-       (pipeline->dynamic_state.mask & TU_DYNAMIC_STENCIL_REFERENCE)) {
-      tu6_emit_stencil_reference(cs, dynamic->stencil_reference.front,
-                                 dynamic->stencil_reference.back);
-   }
-
-   if ((cmd->state.dirty & TU_CMD_DIRTY_DYNAMIC_VIEWPORT) &&
-       (pipeline->dynamic_state.mask & TU_DYNAMIC_VIEWPORT)) {
-      tu6_emit_viewport(cs, &cmd->state.dynamic.viewport.viewports[0]);
-   }
-
-   if ((cmd->state.dirty & TU_CMD_DIRTY_DYNAMIC_SCISSOR) &&
-       (pipeline->dynamic_state.mask & TU_DYNAMIC_SCISSOR)) {
-      tu6_emit_scissor(cs, &cmd->state.dynamic.scissor.scissors[0]);
-   }
-
-   if (cmd->state.dirty &
-       (TU_CMD_DIRTY_PIPELINE | TU_CMD_DIRTY_VERTEX_BUFFERS)) {
-      for (uint32_t i = 0; i < pipeline->vi.count; i++) {
-         const uint32_t binding = pipeline->vi.bindings[i];
-         const struct tu_buffer *buf = cmd->state.vb.buffers[binding];
-         const VkDeviceSize offset = buf->bo_offset +
-                                     cmd->state.vb.offsets[binding];
-         const VkDeviceSize size =
-            offset < buf->size ? buf->size - offset : 0;
-
-         tu_cs_emit_regs(cs,
-                         A6XX_VFD_FETCH_BASE(i, .bo = buf->bo, .bo_offset = offset),
-                         A6XX_VFD_FETCH_SIZE(i, size));
-      }
-   }
-
-   if (cmd->state.dirty & TU_CMD_DIRTY_PIPELINE) {
-      draw_state_groups[draw_state_group_count++] =
-         (struct tu_draw_state_group) {
-            .id = TU_DRAW_STATE_PROGRAM,
-            .enable_mask = ENABLE_DRAW,
-            .ib = pipeline->program.state_ib,
-         };
-      draw_state_groups[draw_state_group_count++] =
-         (struct tu_draw_state_group) {
-            .id = TU_DRAW_STATE_PROGRAM_BINNING,
-            .enable_mask = CP_SET_DRAW_STATE__0_BINNING,
-            .ib = pipeline->program.binning_state_ib,
-         };
-      draw_state_groups[draw_state_group_count++] =
-         (struct tu_draw_state_group) {
-            .id = TU_DRAW_STATE_VI,
-            .enable_mask = ENABLE_DRAW,
-            .ib = pipeline->vi.state_ib,
-         };
-      draw_state_groups[draw_state_group_count++] =
-         (struct tu_draw_state_group) {
-            .id = TU_DRAW_STATE_VI_BINNING,
-            .enable_mask = CP_SET_DRAW_STATE__0_BINNING,
-            .ib = pipeline->vi.binning_state_ib,
-         };
-      draw_state_groups[draw_state_group_count++] =
-         (struct tu_draw_state_group) {
-            .id = TU_DRAW_STATE_VP,
-            .enable_mask = ENABLE_ALL,
-            .ib = pipeline->vp.state_ib,
-         };
-      draw_state_groups[draw_state_group_count++] =
-         (struct tu_draw_state_group) {
-            .id = TU_DRAW_STATE_RAST,
-            .enable_mask = ENABLE_ALL,
-            .ib = pipeline->rast.state_ib,
-         };
-      draw_state_groups[draw_state_group_count++] =
-         (struct tu_draw_state_group) {
-            .id = TU_DRAW_STATE_DS,
-            .enable_mask = ENABLE_ALL,
-            .ib = pipeline->ds.state_ib,
-         };
-      draw_state_groups[draw_state_group_count++] =
-         (struct tu_draw_state_group) {
-            .id = TU_DRAW_STATE_BLEND,
-            .enable_mask = ENABLE_ALL,
-            .ib = pipeline->blend.state_ib,
-         };
-   }
-
-   if (cmd->state.dirty &
-         (TU_CMD_DIRTY_PIPELINE | TU_CMD_DIRTY_DESCRIPTOR_SETS | TU_CMD_DIRTY_PUSH_CONSTANTS)) {
-      draw_state_groups[draw_state_group_count++] =
-         (struct tu_draw_state_group) {
-            .id = TU_DRAW_STATE_VS_CONST,
-            .enable_mask = ENABLE_ALL,
-            .ib = tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_VERTEX)
-         };
-      draw_state_groups[draw_state_group_count++] =
-         (struct tu_draw_state_group) {
-            .id = TU_DRAW_STATE_GS_CONST,
-            .enable_mask = ENABLE_ALL,
-            .ib = tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_GEOMETRY)
-         };
-      draw_state_groups[draw_state_group_count++] =
-         (struct tu_draw_state_group) {
-            .id = TU_DRAW_STATE_FS_CONST,
-            .enable_mask = ENABLE_DRAW,
-            .ib = tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_FRAGMENT)
-         };
+   if (cmd->state.dirty & TU_CMD_DIRTY_SHADER_CONSTS) {
+      cmd->state.shader_const_ib[MESA_SHADER_VERTEX] =
+         tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_VERTEX);
+      cmd->state.shader_const_ib[MESA_SHADER_GEOMETRY] =
+         tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_GEOMETRY);
+      cmd->state.shader_const_ib[MESA_SHADER_FRAGMENT] =
+         tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_FRAGMENT);
    }
 
    if (cmd->state.dirty & TU_CMD_DIRTY_STREAMOUT_BUFFERS)
       tu6_emit_streamout(cmd, cs);
 
-   /* If there are any any dynamic descriptors, then we may need to re-emit
-    * them after every pipeline change in case the number of input attachments
-    * changes. We also always need to re-emit after a pipeline change if there
-    * are any input attachments, because the input attachment index comes from
-    * the pipeline. Finally, it can also happen that the subpass changes
-    * without the pipeline changing, in which case the GMEM descriptors need
-    * to be patched differently.
-    *
-    * TODO: We could probably be clever and avoid re-emitting state on
-    * pipeline changes if the number of input attachments is always 0. We
-    * could also only re-emit dynamic state.
-    */
-   if (cmd->state.dirty & TU_CMD_DIRTY_DESCRIPTOR_SETS ||
-       ((pipeline->layout->dynamic_offset_count +
-         pipeline->layout->input_attachment_count > 0) &&
-        cmd->state.dirty & TU_CMD_DIRTY_PIPELINE) ||
-       (pipeline->layout->input_attachment_count > 0 &&
-        cmd->state.dirty & TU_CMD_DIRTY_INPUT_ATTACHMENTS)) {
-      struct tu_cs_entry desc_sets, desc_sets_gmem;
-      bool need_gmem_desc_set = pipeline->layout->input_attachment_count > 0;
-
-      result = tu6_emit_descriptor_sets(cmd, pipeline,
-                                        VK_PIPELINE_BIND_POINT_GRAPHICS,
-                                        &desc_sets, false);
-      if (result != VK_SUCCESS)
-         return result;
-
-      draw_state_groups[draw_state_group_count++] =
-         (struct tu_draw_state_group) {
-            .id = TU_DRAW_STATE_DESC_SETS,
-            .enable_mask = need_gmem_desc_set ? ENABLE_NON_GMEM : ENABLE_ALL,
-            .ib = desc_sets,
-         };
-
-      if (need_gmem_desc_set) {
-         result = tu6_emit_descriptor_sets(cmd, pipeline,
-                                           VK_PIPELINE_BIND_POINT_GRAPHICS,
-                                           &desc_sets_gmem, true);
-         if (result != VK_SUCCESS)
-            return result;
-
-         draw_state_groups[draw_state_group_count++] =
-            (struct tu_draw_state_group) {
-               .id = TU_DRAW_STATE_DESC_SETS_GMEM,
-               .enable_mask = CP_SET_DRAW_STATE__0_GMEM,
-               .ib = desc_sets_gmem,
-            };
-      }
-
+   if (cmd->state.dirty & TU_CMD_DIRTY_DESCRIPTOR_SETS) {
       /* We need to reload the descriptors every time the descriptor sets
        * change. However, the commands we send only depend on the pipeline
        * because the whole point is to cache descriptors which are used by the
@@ -3076,80 +3078,82 @@ tu6_bind_draw_states(struct tu_cmd_buffer *cmd,
          tu_cs_emit_array(&load_cs,
                           (uint32_t *)((char  *)load_entry->bo->map + load_entry->offset),
                           load_entry->size / 4);
-         struct tu_cs_entry load_copy = tu_cs_end_sub_stream(&cmd->sub_cs, &load_cs);
-
-         draw_state_groups[draw_state_group_count++] =
-            (struct tu_draw_state_group) {
-               .id = TU_DRAW_STATE_DESC_SETS_LOAD,
-               /* The blob seems to not enable this for binning, even when
-                * resources would actually be used in the binning shader.
-                * Presumably the overhead of prefetching the resources isn't
-                * worth it.
-                */
-               .enable_mask = ENABLE_DRAW,
-               .ib = load_copy,
-            };
+         cmd->state.desc_sets_load_ib = tu_cs_end_sub_stream(&cmd->sub_cs, &load_cs);
+      } else {
+         cmd->state.desc_sets_load_ib.size = 0;
       }
    }
 
+   if (cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS)
+      cmd->state.vertex_buffers_ib = tu6_emit_vertex_buffers(cmd, pipeline);
+
    struct tu_cs_entry vs_params;
    result = tu6_emit_vs_params(cmd, draw, &vs_params);
    if (result != VK_SUCCESS)
       return result;
 
-   draw_state_groups[draw_state_group_count++] =
-      (struct tu_draw_state_group) {
-         .id = TU_DRAW_STATE_VS_PARAMS,
-         .enable_mask = ENABLE_ALL,
-         .ib = vs_params,
-      };
-
-   tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3 * draw_state_group_count);
-   for (uint32_t i = 0; i < draw_state_group_count; i++) {
-      const struct tu_draw_state_group *group = &draw_state_groups[i];
-      debug_assert((group->enable_mask & ~ENABLE_ALL) == 0);
-      uint32_t cp_set_draw_state =
-         CP_SET_DRAW_STATE__0_COUNT(group->ib.size / 4) |
-         group->enable_mask |
-         CP_SET_DRAW_STATE__0_GROUP_ID(group->id);
-      uint64_t iova;
-      if (group->ib.size) {
-         iova = group->ib.bo->iova + group->ib.offset;
-      } else {
-         cp_set_draw_state |= CP_SET_DRAW_STATE__0_DISABLE;
-         iova = 0;
+   /* for the first draw in a renderpass, re-emit all the draw states
+    *
+    * and if a draw-state disabling path (CmdClearAttachments 3D fallback) was
+    * used, then draw states must be re-emitted. note however this only happens
+    * in the sysmem path, so this can be skipped this for the gmem path (TODO)
+    *
+    * the two input attachment states are excluded because secondary command
+    * buffer doesn't have a state ib to restore it, and not re-emitting them
+    * is OK since CmdClearAttachments won't disable/overwrite them
+    */
+   if (cmd->state.dirty & TU_CMD_DIRTY_DRAW_STATE) {
+      tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3 * (TU_DRAW_STATE_COUNT - 2));
+
+      tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_PROGRAM, pipeline->program.state_ib);
+      tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_PROGRAM_BINNING, pipeline->program.binning_state_ib);
+      tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VI, pipeline->vi.state_ib);
+      tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VI_BINNING, pipeline->vi.binning_state_ib);
+      tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_RAST, pipeline->rast.state_ib);
+      tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DS, pipeline->ds.state_ib);
+      tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_BLEND, pipeline->blend.state_ib);
+      tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VS_CONST, cmd->state.shader_const_ib[MESA_SHADER_VERTEX]);
+      tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_GS_CONST, cmd->state.shader_const_ib[MESA_SHADER_GEOMETRY]);
+      tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_FS_CONST, cmd->state.shader_const_ib[MESA_SHADER_FRAGMENT]);
+      tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DESC_SETS, cmd->state.desc_sets_ib);
+      tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DESC_SETS_LOAD, cmd->state.desc_sets_load_ib);
+      tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VB, cmd->state.vertex_buffers_ib);
+      tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VS_PARAMS, vs_params);
+
+      for (uint32_t i = 0; i < ARRAY_SIZE(cmd->state.dynamic_state); i++) {
+         tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DYNAMIC + i,
+                               ((pipeline->dynamic_state_mask & BIT(i)) ?
+                                cmd->state.dynamic_state[i] :
+                                pipeline->dynamic_state[i]));
       }
+   } else {
 
-      tu_cs_emit(cs, cp_set_draw_state);
-      tu_cs_emit_qw(cs, iova);
+      /* emit draw states that were just updated
+       * note we eventually don't want to have to emit anything here
+       */
+      uint32_t draw_state_count =
+         ((cmd->state.dirty & TU_CMD_DIRTY_SHADER_CONSTS) ? 3 : 0) +
+         ((cmd->state.dirty & TU_CMD_DIRTY_DESCRIPTOR_SETS) ? 1 : 0) +
+         ((cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS) ? 1 : 0) +
+         1; /* vs_params */
+
+         tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3 * draw_state_count);
+
+         if (cmd->state.dirty & TU_CMD_DIRTY_SHADER_CONSTS) {
+            tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VS_CONST, cmd->state.shader_const_ib[MESA_SHADER_VERTEX]);
+            tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_GS_CONST, cmd->state.shader_const_ib[MESA_SHADER_GEOMETRY]);
+            tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_FS_CONST, cmd->state.shader_const_ib[MESA_SHADER_FRAGMENT]);
+         }
+         if (cmd->state.dirty & TU_CMD_DIRTY_DESCRIPTOR_SETS)
+            tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DESC_SETS_LOAD, cmd->state.desc_sets_load_ib);
+         if (cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS)
+            tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VB, cmd->state.vertex_buffers_ib);
+         tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VS_PARAMS, vs_params);
    }
 
    tu_cs_sanity_check(cs);
 
    /* track BOs */
-   if (cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS) {
-      for (uint32_t i = 0; i < MAX_VBS; i++) {
-         const struct tu_buffer *buf = cmd->state.vb.buffers[i];
-         if (buf)
-            tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
-      }
-   }
-   if (cmd->state.dirty & TU_CMD_DIRTY_DESCRIPTOR_SETS) {
-      unsigned i;
-      for_each_bit(i, descriptors_state->valid) {
-         struct tu_descriptor_set *set = descriptors_state->sets[i];
-         for (unsigned j = 0; j < set->layout->buffer_count; ++j) {
-            if (set->buffers[j]) {
-               tu_bo_list_add(&cmd->bo_list, set->buffers[j],
-                              MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
-            }
-         }
-         if (set->size > 0) {
-            tu_bo_list_add(&cmd->bo_list, &set->pool->bo,
-                           MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
-         }
-      }
-   }
    if (cmd->state.dirty & TU_CMD_DIRTY_STREAMOUT_BUFFERS) {
       for (unsigned i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
          const struct tu_buffer *buf = cmd->state.streamout_buf.buffers[i];
@@ -3164,12 +3168,7 @@ tu6_bind_draw_states(struct tu_cmd_buffer *cmd,
     * bits to preserve instead. The only things not emitted here are
     * compute-related state.
     */
-   cmd->state.dirty &= TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS;
-
-   /* Fragment shader state overwrites compute shader state, so flag the
-    * compute pipeline for re-emit.
-    */
-   cmd->state.dirty |= TU_CMD_DIRTY_COMPUTE_PIPELINE;
+   cmd->state.dirty &= (TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS | TU_CMD_DIRTY_COMPUTE_PIPELINE);
    return VK_SUCCESS;
 }
 
@@ -3281,6 +3280,8 @@ tu_draw(struct tu_cmd_buffer *cmd, const struct tu_draw_info *draw)
    struct tu_cs *cs = &cmd->draw_cs;
    VkResult result;
 
+   tu_emit_cache_flush_renderpass(cmd, cs);
+
    result = tu6_bind_draw_states(cmd, cs, draw);
    if (result != VK_SUCCESS) {
       cmd->record_result = result;
@@ -3295,12 +3296,10 @@ tu_draw(struct tu_cmd_buffer *cmd, const struct tu_draw_info *draw)
    if (cmd->state.streamout_enabled) {
       for (unsigned i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
          if (cmd->state.streamout_enabled & (1 << i))
-            tu6_emit_event_write(cmd, cs, FLUSH_SO_0 + i, false);
+            tu6_emit_event_write(cmd, cs, FLUSH_SO_0 + i);
       }
    }
 
-   cmd->wait_for_idle = true;
-
    tu_cs_sanity_check(cs);
 }
 
@@ -3478,7 +3477,11 @@ tu_dispatch(struct tu_cmd_buffer *cmd,
    struct tu_pipeline *pipeline = cmd->state.compute_pipeline;
    struct tu_descriptor_state *descriptors_state =
       &cmd->descriptors[VK_PIPELINE_BIND_POINT_COMPUTE];
-   VkResult result;
+
+   /* TODO: We could probably flush less if we add a compute_flush_bits
+    * bitfield.
+    */
+   tu_emit_cache_flush(cmd, cs);
 
    if (cmd->state.dirty & TU_CMD_DIRTY_COMPUTE_PIPELINE)
       tu_cs_emit_ib(cs, &pipeline->program.state_ib);
@@ -3491,47 +3494,14 @@ tu_dispatch(struct tu_cmd_buffer *cmd,
 
    tu_emit_compute_driver_params(cs, pipeline, info);
 
-   if (cmd->state.dirty & TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS) {
-      result = tu6_emit_descriptor_sets(cmd, pipeline,
-                                        VK_PIPELINE_BIND_POINT_COMPUTE, &ib,
-                                        false);
-      if (result != VK_SUCCESS) {
-         cmd->record_result = result;
-         return;
-      }
-
-      /* track BOs */
-      unsigned i;
-      for_each_bit(i, descriptors_state->valid) {
-         struct tu_descriptor_set *set = descriptors_state->sets[i];
-         for (unsigned j = 0; j < set->layout->buffer_count; ++j) {
-            if (set->buffers[j]) {
-               tu_bo_list_add(&cmd->bo_list, set->buffers[j],
-                              MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
-            }
-         }
-
-         if (set->size > 0) {
-            tu_bo_list_add(&cmd->bo_list, &set->pool->bo,
-                           MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
-         }
-      }
-   }
-
-   if (ib.size)
-      tu_cs_emit_ib(cs, &ib);
-
-   if (cmd->state.dirty & TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS)
+   if ((cmd->state.dirty & TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS) &&
+       pipeline->load_state.state_ib.size > 0) {
       tu_cs_emit_ib(cs, &pipeline->load_state.state_ib);
+   }
 
    cmd->state.dirty &=
       ~(TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS | TU_CMD_DIRTY_COMPUTE_PIPELINE);
 
-   /* Compute shader state overwrites fragment shader state, so we flag the
-    * graphics pipeline for re-emit.
-    */
-   cmd->state.dirty |= TU_CMD_DIRTY_PIPELINE;
-
    tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
    tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE));
 
@@ -3576,8 +3546,6 @@ tu_dispatch(struct tu_cmd_buffer *cmd,
    }
 
    tu_cs_emit_wfi(cs);
-
-   tu6_emit_cache_flush(cmd, cs);
 }
 
 void
@@ -3646,6 +3614,10 @@ tu_CmdEndRenderPass(VkCommandBuffer commandBuffer)
    tu_cs_discard_entries(&cmd_buffer->draw_epilogue_cs);
    tu_cs_begin(&cmd_buffer->draw_epilogue_cs);
 
+   cmd_buffer->state.cache.pending_flush_bits |=
+      cmd_buffer->state.renderpass_cache.pending_flush_bits;
+   tu_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier, true);
+
    cmd_buffer->state.pass = NULL;
    cmd_buffer->state.subpass = NULL;
    cmd_buffer->state.framebuffer = NULL;
@@ -3675,16 +3647,67 @@ tu_barrier(struct tu_cmd_buffer *cmd,
            const VkImageMemoryBarrier *pImageMemoryBarriers,
            const struct tu_barrier_info *info)
 {
-   /* renderpass case is only for subpass self-dependencies
-    * which means syncing the render output with texture cache
-    * note: only the CACHE_INVALIDATE is needed in GMEM mode
-    * and in sysmem mode we might not need either color/depth flush
+   struct tu_cs *cs = cmd->state.pass ? &cmd->draw_cs : &cmd->cs;
+   VkAccessFlags srcAccessMask = 0;
+   VkAccessFlags dstAccessMask = 0;
+
+   for (uint32_t i = 0; i < memoryBarrierCount; i++) {
+      srcAccessMask |= pMemoryBarriers[i].srcAccessMask;
+      dstAccessMask |= pMemoryBarriers[i].dstAccessMask;
+   }
+
+   for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
+      srcAccessMask |= pBufferMemoryBarriers[i].srcAccessMask;
+      dstAccessMask |= pBufferMemoryBarriers[i].dstAccessMask;
+   }
+
+   enum tu_cmd_access_mask src_flags = 0;
+   enum tu_cmd_access_mask dst_flags = 0;
+
+   for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
+      TU_FROM_HANDLE(tu_image, image, pImageMemoryBarriers[i].image);
+      VkImageLayout old_layout = pImageMemoryBarriers[i].oldLayout;
+      /* For non-linear images, PREINITIALIZED is the same as UNDEFINED */
+      if (old_layout == VK_IMAGE_LAYOUT_UNDEFINED ||
+          (image->tiling != VK_IMAGE_TILING_LINEAR &&
+           old_layout == VK_IMAGE_LAYOUT_PREINITIALIZED)) {
+         /* The underlying memory for this image may have been used earlier
+          * within the same queue submission for a different image, which
+          * means that there may be old, stale cache entries which are in the
+          * "wrong" location, which could cause problems later after writing
+          * to the image. We don't want these entries being flushed later and
+          * overwriting the actual image, so we need to flush the CCU.
+          */
+         src_flags |= TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE;
+      }
+      srcAccessMask |= pImageMemoryBarriers[i].srcAccessMask;
+      dstAccessMask |= pImageMemoryBarriers[i].dstAccessMask;
+   }
+
+   /* Inside a renderpass, we don't know yet whether we'll be using sysmem
+    * so we have to use the sysmem flushes.
     */
-   if (cmd->state.pass) {
-      tu6_emit_event_write(cmd, &cmd->draw_cs, PC_CCU_FLUSH_COLOR_TS, true);
-      tu6_emit_event_write(cmd, &cmd->draw_cs, PC_CCU_FLUSH_DEPTH_TS, true);
-      tu6_emit_event_write(cmd, &cmd->draw_cs, CACHE_INVALIDATE, false);
-      return;
+   bool gmem = cmd->state.ccu_state == TU_CMD_CCU_GMEM &&
+      !cmd->state.pass;
+   src_flags |= vk2tu_access(srcAccessMask, gmem);
+   dst_flags |= vk2tu_access(dstAccessMask, gmem);
+
+   struct tu_cache_state *cache =
+      cmd->state.pass  ? &cmd->state.renderpass_cache : &cmd->state.cache;
+   tu_flush_for_access(cache, src_flags, dst_flags);
+
+   for (uint32_t i = 0; i < info->eventCount; i++) {
+      TU_FROM_HANDLE(tu_event, event, info->pEvents[i]);
+
+      tu_bo_list_add(&cmd->bo_list, &event->bo, MSM_SUBMIT_BO_READ);
+
+      tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
+      tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
+                     CP_WAIT_REG_MEM_0_POLL_MEMORY);
+      tu_cs_emit_qw(cs, event->bo.iova); /* POLL_ADDR_LO/HI */
+      tu_cs_emit(cs, CP_WAIT_REG_MEM_3_REF(1));
+      tu_cs_emit(cs, CP_WAIT_REG_MEM_4_MASK(~0u));
+      tu_cs_emit(cs, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(20));
    }
 }
 
@@ -3713,17 +3736,36 @@ tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer,
 }
 
 static void
-write_event(struct tu_cmd_buffer *cmd, struct tu_event *event, unsigned value)
+write_event(struct tu_cmd_buffer *cmd, struct tu_event *event,
+            VkPipelineStageFlags stageMask, unsigned value)
 {
    struct tu_cs *cs = &cmd->cs;
 
-   tu_bo_list_add(&cmd->bo_list, &event->bo, MSM_SUBMIT_BO_WRITE);
+   /* vkCmdSetEvent/vkCmdResetEvent cannot be called inside a render pass */
+   assert(!cmd->state.pass);
 
-   /* TODO: any flush required before/after ? */
+   tu_emit_cache_flush(cmd, cs);
 
-   tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 3);
-   tu_cs_emit_qw(cs, event->bo.iova); /* ADDR_LO/HI */
-   tu_cs_emit(cs, value);
+   tu_bo_list_add(&cmd->bo_list, &event->bo, MSM_SUBMIT_BO_WRITE);
+
+   /* Flags that only require a top-of-pipe event. DrawIndirect parameters are
+    * read by the CP, so the draw indirect stage counts as top-of-pipe too.
+    */
+   VkPipelineStageFlags top_of_pipe_flags =
+      VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT |
+      VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
+
+   if (!(stageMask & ~top_of_pipe_flags)) {
+      tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 3);
+      tu_cs_emit_qw(cs, event->bo.iova); /* ADDR_LO/HI */
+      tu_cs_emit(cs, value);
+   } else {
+      /* Use a RB_DONE_TS event to wait for everything to complete. */
+      tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, 4);
+      tu_cs_emit(cs, CP_EVENT_WRITE_0_EVENT(RB_DONE_TS));
+      tu_cs_emit_qw(cs, event->bo.iova);
+      tu_cs_emit(cs, value);
+   }
 }
 
 void
@@ -3734,7 +3776,7 @@ tu_CmdSetEvent(VkCommandBuffer commandBuffer,
    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
    TU_FROM_HANDLE(tu_event, event, _event);
 
-   write_event(cmd, event, 1);
+   write_event(cmd, event, stageMask, 1);
 }
 
 void
@@ -3745,7 +3787,7 @@ tu_CmdResetEvent(VkCommandBuffer commandBuffer,
    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
    TU_FROM_HANDLE(tu_event, event, _event);
 
-   write_event(cmd, event, 0);
+   write_event(cmd, event, stageMask, 0);
 }
 
 void
@@ -3762,23 +3804,15 @@ tu_CmdWaitEvents(VkCommandBuffer commandBuffer,
                  const VkImageMemoryBarrier *pImageMemoryBarriers)
 {
    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
-   struct tu_cs *cs = &cmd->cs;
-
-   /* TODO: any flush required before/after? (CP_WAIT_FOR_ME?) */
-
-   for (uint32_t i = 0; i < eventCount; i++) {
-      TU_FROM_HANDLE(tu_event, event, pEvents[i]);
+   struct tu_barrier_info info;
 
-      tu_bo_list_add(&cmd->bo_list, &event->bo, MSM_SUBMIT_BO_READ);
+   info.eventCount = eventCount;
+   info.pEvents = pEvents;
+   info.srcStageMask = 0;
 
-      tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
-      tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
-                     CP_WAIT_REG_MEM_0_POLL_MEMORY);
-      tu_cs_emit_qw(cs, event->bo.iova); /* POLL_ADDR_LO/HI */
-      tu_cs_emit(cs, CP_WAIT_REG_MEM_3_REF(1));
-      tu_cs_emit(cs, CP_WAIT_REG_MEM_4_MASK(~0u));
-      tu_cs_emit(cs, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(20));
-   }
+   tu_barrier(cmd, memoryBarrierCount, pMemoryBarriers,
+              bufferMemoryBarrierCount, pBufferMemoryBarriers,
+              imageMemoryBarrierCount, pImageMemoryBarriers, &info);
 }
 
 void