freedreno/decode: move dependencies up a level
[mesa.git] / src / freedreno / vulkan / tu_cmd_buffer.c
index f38c07f34e367200cb5736842d9a0d44d508877e..deebd7132d53d693a1783c653cabdc6727e15504 100644 (file)
 
 #include "tu_private.h"
 
-#include "registers/adreno_pm4.xml.h"
-#include "registers/adreno_common.xml.h"
+#include "adreno_pm4.xml.h"
+#include "adreno_common.xml.h"
 
 #include "vk_format.h"
 
 #include "tu_cs.h"
-#include "tu_blit.h"
-
-#define OVERFLOW_FLAG_REG REG_A6XX_CP_SCRATCH_REG(0)
 
 void
 tu_bo_list_init(struct tu_bo_list *list)
@@ -111,321 +108,138 @@ tu_bo_list_merge(struct tu_bo_list *list, const struct tu_bo_list *other)
    return VK_SUCCESS;
 }
 
-static bool
-is_linear_mipmapped(const struct tu_image_view *iview)
-{
-   return iview->image->layout.tile_mode == TILE6_LINEAR &&
-          iview->base_mip != iview->image->level_count - 1;
-}
-
-static bool
-force_sysmem(const struct tu_cmd_buffer *cmd,
-             const struct VkRect2D *render_area)
-{
-   const struct tu_framebuffer *fb = cmd->state.framebuffer;
-   const struct tu_physical_device *device = cmd->device->physical_device;
-   bool has_linear_mipmapped_store = false;
-   const struct tu_render_pass *pass = cmd->state.pass;
-
-   /* Layered rendering requires sysmem. */
-   if (fb->layers > 1)
-      return true;
-
-   /* Iterate over all the places we call tu6_emit_store_attachment() */
-   for (unsigned i = 0; i < pass->subpass_count; i++) {
-      const struct tu_subpass *subpass = &pass->subpasses[i];
-      if (subpass->resolve_attachments) {
-         for (unsigned i = 0; i < subpass->color_count; i++) {
-            uint32_t a = subpass->resolve_attachments[i].attachment;
-            if (a != VK_ATTACHMENT_UNUSED &&
-                cmd->state.pass->attachments[a].store_op == VK_ATTACHMENT_STORE_OP_STORE) {
-               const struct tu_image_view *iview = fb->attachments[a].attachment;
-               if (is_linear_mipmapped(iview)) {
-                  has_linear_mipmapped_store = true;
-                  break;
-               }
-            }
-         }
-      }
+void
+tu6_emit_event_write(struct tu_cmd_buffer *cmd,
+                     struct tu_cs *cs,
+                     enum vgt_event_type event)
+{
+   bool need_seqno = false;
+   switch (event) {
+   case CACHE_FLUSH_TS:
+   case WT_DONE_TS:
+   case RB_DONE_TS:
+   case PC_CCU_FLUSH_DEPTH_TS:
+   case PC_CCU_FLUSH_COLOR_TS:
+   case PC_CCU_RESOLVE_TS:
+      need_seqno = true;
+      break;
+   default:
+      break;
    }
 
-   for (unsigned i = 0; i < pass->attachment_count; i++) {
-      if (pass->attachments[i].gmem_offset >= 0 &&
-          cmd->state.pass->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_STORE) {
-         const struct tu_image_view *iview = fb->attachments[i].attachment;
-         if (is_linear_mipmapped(iview)) {
-            has_linear_mipmapped_store = true;
-            break;
-         }
-      }
+   tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, need_seqno ? 4 : 1);
+   tu_cs_emit(cs, CP_EVENT_WRITE_0_EVENT(event));
+   if (need_seqno) {
+      tu_cs_emit_qw(cs, global_iova(cmd, seqno_dummy));
+      tu_cs_emit(cs, 0);
    }
-
-   /* Linear textures cannot have any padding between mipmap levels and their
-    * height isn't padded, while at the same time the GMEM->MEM resolve does
-    * not have per-pixel granularity, so if the image height isn't aligned to
-    * the resolve granularity and the render area is tall enough, we may wind
-    * up writing past the bottom of the image into the next miplevel or even
-    * past the end of the image. For the last miplevel, the layout code should
-    * insert enough padding so that the overdraw writes to the padding.  To
-    * work around this, we force-enable sysmem rendering.
-    */
-   const uint32_t y2 = render_area->offset.y + render_area->extent.height;
-   const uint32_t aligned_y2 = ALIGN_POT(y2, device->tile_align_h);
-
-   return has_linear_mipmapped_store && aligned_y2 > fb->height;
 }
 
 static void
-tu_tiling_config_update_tile_layout(struct tu_tiling_config *tiling,
-                                    const struct tu_device *dev,
-                                    uint32_t pixels)
-{
-   const uint32_t tile_align_w = dev->physical_device->tile_align_w;
-   const uint32_t tile_align_h = dev->physical_device->tile_align_h;
-   const uint32_t max_tile_width = 1024; /* A6xx */
-
-   /* note: don't offset the tiling config by render_area.offset,
-    * because binning pass can't deal with it
-    * this means we might end up with more tiles than necessary,
-    * but load/store/etc are still scissored to the render_area
+tu6_emit_flushes(struct tu_cmd_buffer *cmd_buffer,
+                 struct tu_cs *cs,
+                 enum tu_cmd_flush_bits flushes)
+{
+   /* Experiments show that invalidating CCU while it still has data in it
+    * doesn't work, so make sure to always flush before invalidating in case
+    * any data remains that hasn't yet been made available through a barrier.
+    * However it does seem to work for UCHE.
     */
-   tiling->tile0.offset = (VkOffset2D) {};
-
-   const uint32_t ra_width =
-      tiling->render_area.extent.width +
-      (tiling->render_area.offset.x - tiling->tile0.offset.x);
-   const uint32_t ra_height =
-      tiling->render_area.extent.height +
-      (tiling->render_area.offset.y - tiling->tile0.offset.y);
-
-   /* start from 1 tile */
-   tiling->tile_count = (VkExtent2D) {
-      .width = 1,
-      .height = 1,
-   };
-   tiling->tile0.extent = (VkExtent2D) {
-      .width = align(ra_width, tile_align_w),
-      .height = align(ra_height, tile_align_h),
-   };
-
-   if (unlikely(dev->physical_device->instance->debug_flags & TU_DEBUG_FORCEBIN)) {
-      /* start with 2x2 tiles */
-      tiling->tile_count.width = 2;
-      tiling->tile_count.height = 2;
-      tiling->tile0.extent.width = align(DIV_ROUND_UP(ra_width, 2), tile_align_w);
-      tiling->tile0.extent.height = align(DIV_ROUND_UP(ra_height, 2), tile_align_h);
-   }
-
-   /* do not exceed max tile width */
-   while (tiling->tile0.extent.width > max_tile_width) {
-      tiling->tile_count.width++;
-      tiling->tile0.extent.width =
-         align(DIV_ROUND_UP(ra_width, tiling->tile_count.width), tile_align_w);
-   }
-
-   /* will force to sysmem, don't bother trying to have a valid tile config
-    * TODO: just skip all GMEM stuff when sysmem is forced?
-    */
-   if (!pixels)
-      return;
-
-   /* do not exceed gmem size */
-   while (tiling->tile0.extent.width * tiling->tile0.extent.height > pixels) {
-      if (tiling->tile0.extent.width > MAX2(tile_align_w, tiling->tile0.extent.height)) {
-         tiling->tile_count.width++;
-         tiling->tile0.extent.width =
-            align(DIV_ROUND_UP(ra_width, tiling->tile_count.width), tile_align_w);
-      } else {
-         /* if this assert fails then layout is impossible.. */
-         assert(tiling->tile0.extent.height > tile_align_h);
-         tiling->tile_count.height++;
-         tiling->tile0.extent.height =
-            align(DIV_ROUND_UP(ra_height, tiling->tile_count.height), tile_align_h);
-      }
-   }
+   if (flushes & (TU_CMD_FLAG_CCU_FLUSH_COLOR |
+                  TU_CMD_FLAG_CCU_INVALIDATE_COLOR))
+      tu6_emit_event_write(cmd_buffer, cs, PC_CCU_FLUSH_COLOR_TS);
+   if (flushes & (TU_CMD_FLAG_CCU_FLUSH_DEPTH |
+                  TU_CMD_FLAG_CCU_INVALIDATE_DEPTH))
+      tu6_emit_event_write(cmd_buffer, cs, PC_CCU_FLUSH_DEPTH_TS);
+   if (flushes & TU_CMD_FLAG_CCU_INVALIDATE_COLOR)
+      tu6_emit_event_write(cmd_buffer, cs, PC_CCU_INVALIDATE_COLOR);
+   if (flushes & TU_CMD_FLAG_CCU_INVALIDATE_DEPTH)
+      tu6_emit_event_write(cmd_buffer, cs, PC_CCU_INVALIDATE_DEPTH);
+   if (flushes & TU_CMD_FLAG_CACHE_FLUSH)
+      tu6_emit_event_write(cmd_buffer, cs, CACHE_FLUSH_TS);
+   if (flushes & TU_CMD_FLAG_CACHE_INVALIDATE)
+      tu6_emit_event_write(cmd_buffer, cs, CACHE_INVALIDATE);
+   if (flushes & TU_CMD_FLAG_WAIT_MEM_WRITES)
+      tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
+   if (flushes & TU_CMD_FLAG_WAIT_FOR_IDLE)
+      tu_cs_emit_wfi(cs);
+   if (flushes & TU_CMD_FLAG_WAIT_FOR_ME)
+      tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
 }
 
-static void
-tu_tiling_config_update_pipe_layout(struct tu_tiling_config *tiling,
-                                    const struct tu_device *dev)
-{
-   const uint32_t max_pipe_count = 32; /* A6xx */
-
-   /* start from 1 tile per pipe */
-   tiling->pipe0 = (VkExtent2D) {
-      .width = 1,
-      .height = 1,
-   };
-   tiling->pipe_count = tiling->tile_count;
-
-   while (tiling->pipe_count.width * tiling->pipe_count.height > max_pipe_count) {
-      if (tiling->pipe0.width < tiling->pipe0.height) {
-         tiling->pipe0.width += 1;
-         tiling->pipe_count.width =
-            DIV_ROUND_UP(tiling->tile_count.width, tiling->pipe0.width);
-      } else {
-         tiling->pipe0.height += 1;
-         tiling->pipe_count.height =
-            DIV_ROUND_UP(tiling->tile_count.height, tiling->pipe0.height);
-      }
-   }
-}
+/* "Normal" cache flushes, that don't require any special handling */
 
 static void
-tu_tiling_config_update_pipes(struct tu_tiling_config *tiling,
-                              const struct tu_device *dev)
+tu_emit_cache_flush(struct tu_cmd_buffer *cmd_buffer,
+                    struct tu_cs *cs)
 {
-   const uint32_t max_pipe_count = 32; /* A6xx */
-   const uint32_t used_pipe_count =
-      tiling->pipe_count.width * tiling->pipe_count.height;
-   const VkExtent2D last_pipe = {
-      .width = (tiling->tile_count.width - 1) % tiling->pipe0.width + 1,
-      .height = (tiling->tile_count.height - 1) % tiling->pipe0.height + 1,
-   };
-
-   assert(used_pipe_count <= max_pipe_count);
-   assert(max_pipe_count <= ARRAY_SIZE(tiling->pipe_config));
-
-   for (uint32_t y = 0; y < tiling->pipe_count.height; y++) {
-      for (uint32_t x = 0; x < tiling->pipe_count.width; x++) {
-         const uint32_t pipe_x = tiling->pipe0.width * x;
-         const uint32_t pipe_y = tiling->pipe0.height * y;
-         const uint32_t pipe_w = (x == tiling->pipe_count.width - 1)
-                                    ? last_pipe.width
-                                    : tiling->pipe0.width;
-         const uint32_t pipe_h = (y == tiling->pipe_count.height - 1)
-                                    ? last_pipe.height
-                                    : tiling->pipe0.height;
-         const uint32_t n = tiling->pipe_count.width * y + x;
-
-         tiling->pipe_config[n] = A6XX_VSC_PIPE_CONFIG_REG_X(pipe_x) |
-                                  A6XX_VSC_PIPE_CONFIG_REG_Y(pipe_y) |
-                                  A6XX_VSC_PIPE_CONFIG_REG_W(pipe_w) |
-                                  A6XX_VSC_PIPE_CONFIG_REG_H(pipe_h);
-         tiling->pipe_sizes[n] = CP_SET_BIN_DATA5_0_VSC_SIZE(pipe_w * pipe_h);
-      }
-   }
-
-   memset(tiling->pipe_config + used_pipe_count, 0,
-          sizeof(uint32_t) * (max_pipe_count - used_pipe_count));
+   tu6_emit_flushes(cmd_buffer, cs, cmd_buffer->state.cache.flush_bits);
+   cmd_buffer->state.cache.flush_bits = 0;
 }
 
-static void
-tu_tiling_config_get_tile(const struct tu_tiling_config *tiling,
-                          const struct tu_device *dev,
-                          uint32_t tx,
-                          uint32_t ty,
-                          struct tu_tile *tile)
-{
-   /* find the pipe and the slot for tile (tx, ty) */
-   const uint32_t px = tx / tiling->pipe0.width;
-   const uint32_t py = ty / tiling->pipe0.height;
-   const uint32_t sx = tx - tiling->pipe0.width * px;
-   const uint32_t sy = ty - tiling->pipe0.height * py;
-   /* last pipe has different width */
-   const uint32_t pipe_width =
-      MIN2(tiling->pipe0.width,
-           tiling->tile_count.width - px * tiling->pipe0.width);
-
-   assert(tx < tiling->tile_count.width && ty < tiling->tile_count.height);
-   assert(px < tiling->pipe_count.width && py < tiling->pipe_count.height);
-   assert(sx < tiling->pipe0.width && sy < tiling->pipe0.height);
-
-   /* convert to 1D indices */
-   tile->pipe = tiling->pipe_count.width * py + px;
-   tile->slot = pipe_width * sy + sx;
-
-   /* get the blit area for the tile */
-   tile->begin = (VkOffset2D) {
-      .x = tiling->tile0.offset.x + tiling->tile0.extent.width * tx,
-      .y = tiling->tile0.offset.y + tiling->tile0.extent.height * ty,
-   };
-   tile->end.x =
-      (tx == tiling->tile_count.width - 1)
-         ? tiling->render_area.offset.x + tiling->render_area.extent.width
-         : tile->begin.x + tiling->tile0.extent.width;
-   tile->end.y =
-      (ty == tiling->tile_count.height - 1)
-         ? tiling->render_area.offset.y + tiling->render_area.extent.height
-         : tile->begin.y + tiling->tile0.extent.height;
-}
+/* Renderpass cache flushes */
 
-enum a3xx_msaa_samples
-tu_msaa_samples(uint32_t samples)
+void
+tu_emit_cache_flush_renderpass(struct tu_cmd_buffer *cmd_buffer,
+                               struct tu_cs *cs)
 {
-   switch (samples) {
-   case 1:
-      return MSAA_ONE;
-   case 2:
-      return MSAA_TWO;
-   case 4:
-      return MSAA_FOUR;
-   case 8:
-      return MSAA_EIGHT;
-   default:
-      assert(!"invalid sample count");
-      return MSAA_ONE;
-   }
+   tu6_emit_flushes(cmd_buffer, cs, cmd_buffer->state.renderpass_cache.flush_bits);
+   cmd_buffer->state.renderpass_cache.flush_bits = 0;
 }
 
-static enum a4xx_index_size
-tu6_index_size(VkIndexType type)
-{
-   switch (type) {
-   case VK_INDEX_TYPE_UINT16:
-      return INDEX4_SIZE_16_BIT;
-   case VK_INDEX_TYPE_UINT32:
-      return INDEX4_SIZE_32_BIT;
-   default:
-      unreachable("invalid VkIndexType");
-      return INDEX4_SIZE_8_BIT;
-   }
-}
+/* Cache flushes for things that use the color/depth read/write path (i.e.
+ * blits and draws). This deals with changing CCU state as well as the usual
+ * cache flushing.
+ */
 
-unsigned
-tu6_emit_event_write(struct tu_cmd_buffer *cmd,
-                     struct tu_cs *cs,
-                     enum vgt_event_type event,
-                     bool need_seqno)
+void
+tu_emit_cache_flush_ccu(struct tu_cmd_buffer *cmd_buffer,
+                        struct tu_cs *cs,
+                        enum tu_cmd_ccu_state ccu_state)
 {
-   unsigned seqno = 0;
+   enum tu_cmd_flush_bits flushes = cmd_buffer->state.cache.flush_bits;
 
-   tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, need_seqno ? 4 : 1);
-   tu_cs_emit(cs, CP_EVENT_WRITE_0_EVENT(event));
-   if (need_seqno) {
-      tu_cs_emit_qw(cs, cmd->scratch_bo.iova);
-      seqno = ++cmd->scratch_seqno;
-      tu_cs_emit(cs, seqno);
-   }
+   assert(ccu_state != TU_CMD_CCU_UNKNOWN);
 
-   return seqno;
-}
-
-static void
-tu6_emit_cache_flush(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
-{
-   tu6_emit_event_write(cmd, cs, 0x31, false);
-}
+   /* Changing CCU state must involve invalidating the CCU. In sysmem mode,
+    * the CCU may also contain data that we haven't flushed out yet, so we
+    * also need to flush. Also, in order to program RB_CCU_CNTL, we need to
+    * emit a WFI as it isn't pipelined.
+    */
+   if (ccu_state != cmd_buffer->state.ccu_state) {
+      if (cmd_buffer->state.ccu_state != TU_CMD_CCU_GMEM) {
+         flushes |=
+            TU_CMD_FLAG_CCU_FLUSH_COLOR |
+            TU_CMD_FLAG_CCU_FLUSH_DEPTH;
+         cmd_buffer->state.cache.pending_flush_bits &= ~(
+            TU_CMD_FLAG_CCU_FLUSH_COLOR |
+            TU_CMD_FLAG_CCU_FLUSH_DEPTH);
+      }
+      flushes |=
+         TU_CMD_FLAG_CCU_INVALIDATE_COLOR |
+         TU_CMD_FLAG_CCU_INVALIDATE_DEPTH |
+         TU_CMD_FLAG_WAIT_FOR_IDLE;
+      cmd_buffer->state.cache.pending_flush_bits &= ~(
+         TU_CMD_FLAG_CCU_INVALIDATE_COLOR |
+         TU_CMD_FLAG_CCU_INVALIDATE_DEPTH |
+         TU_CMD_FLAG_WAIT_FOR_IDLE);
+   }
 
-static void
-tu6_emit_lrz_flush(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
-{
-   tu6_emit_event_write(cmd, cs, LRZ_FLUSH, false);
-}
+   tu6_emit_flushes(cmd_buffer, cs, flushes);
+   cmd_buffer->state.cache.flush_bits = 0;
 
-static void
-tu6_emit_wfi(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
-{
-   if (cmd->wait_for_idle) {
-      tu_cs_emit_wfi(cs);
-      cmd->wait_for_idle = false;
+   if (ccu_state != cmd_buffer->state.ccu_state) {
+      struct tu_physical_device *phys_dev = cmd_buffer->device->physical_device;
+      tu_cs_emit_regs(cs,
+                      A6XX_RB_CCU_CNTL(.offset =
+                                          ccu_state == TU_CMD_CCU_GMEM ?
+                                          phys_dev->ccu_offset_gmem :
+                                          phys_dev->ccu_offset_bypass,
+                                       .gmem = ccu_state == TU_CMD_CCU_GMEM));
+      cmd_buffer->state.ccu_state = ccu_state;
    }
 }
 
-#define tu_image_view_ubwc_pitches(iview)                                \
-   .pitch = tu_image_ubwc_pitch(iview->image, iview->base_mip),          \
-   .array_pitch = tu_image_ubwc_size(iview->image, iview->base_mip) >> 2
-
 static void
 tu6_emit_zs(struct tu_cmd_buffer *cmd,
             const struct tu_subpass *subpass,
@@ -456,31 +270,35 @@ tu6_emit_zs(struct tu_cmd_buffer *cmd,
    }
 
    const struct tu_image_view *iview = fb->attachments[a].attachment;
-   enum a6xx_depth_format fmt = tu6_pipe2depth(iview->vk_format);
+   const struct tu_render_pass_attachment *attachment =
+      &cmd->state.pass->attachments[a];
+   enum a6xx_depth_format fmt = tu6_pipe2depth(attachment->format);
 
-   tu_cs_emit_regs(cs,
-                   A6XX_RB_DEPTH_BUFFER_INFO(.depth_format = fmt),
-                   A6XX_RB_DEPTH_BUFFER_PITCH(tu_image_stride(iview->image, iview->base_mip)),
-                   A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(iview->image->layout.layer_size),
-                   A6XX_RB_DEPTH_BUFFER_BASE(tu_image_view_base_ref(iview)),
-                   A6XX_RB_DEPTH_BUFFER_BASE_GMEM(cmd->state.pass->attachments[a].gmem_offset));
+   tu_cs_emit_pkt4(cs, REG_A6XX_RB_DEPTH_BUFFER_INFO, 6);
+   tu_cs_emit(cs, A6XX_RB_DEPTH_BUFFER_INFO(.depth_format = fmt).value);
+   tu_cs_image_ref(cs, iview, 0);
+   tu_cs_emit(cs, attachment->gmem_offset);
 
    tu_cs_emit_regs(cs,
                    A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format = fmt));
 
-   tu_cs_emit_regs(cs,
-                   A6XX_RB_DEPTH_FLAG_BUFFER_BASE(tu_image_view_ubwc_base_ref(iview)),
-                   A6XX_RB_DEPTH_FLAG_BUFFER_PITCH(tu_image_view_ubwc_pitches(iview)));
+   tu_cs_emit_pkt4(cs, REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_LO, 3);
+   tu_cs_image_flag_ref(cs, iview, 0);
 
    tu_cs_emit_regs(cs,
                    A6XX_GRAS_LRZ_BUFFER_BASE(0),
                    A6XX_GRAS_LRZ_BUFFER_PITCH(0),
                    A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(0));
 
-   tu_cs_emit_regs(cs,
-                   A6XX_RB_STENCIL_INFO(0));
-
-   /* enable zs? */
+   if (attachment->format == VK_FORMAT_S8_UINT) {
+      tu_cs_emit_pkt4(cs, REG_A6XX_RB_STENCIL_INFO, 6);
+      tu_cs_emit(cs, A6XX_RB_STENCIL_INFO(.separate_stencil = true).value);
+      tu_cs_image_ref(cs, iview, 0);
+      tu_cs_emit(cs, attachment->gmem_offset);
+   } else {
+      tu_cs_emit_regs(cs,
+                     A6XX_RB_STENCIL_INFO(0));
+   }
 }
 
 static void
@@ -489,8 +307,6 @@ tu6_emit_mrt(struct tu_cmd_buffer *cmd,
              struct tu_cs *cs)
 {
    const struct tu_framebuffer *fb = cmd->state.framebuffer;
-   unsigned char mrt_comp[MAX_RTS] = { 0 };
-   unsigned srgb_cntl = 0;
 
    for (uint32_t i = 0; i < subpass->color_count; ++i) {
       uint32_t a = subpass->color_attachments[i].attachment;
@@ -498,78 +314,31 @@ tu6_emit_mrt(struct tu_cmd_buffer *cmd,
          continue;
 
       const struct tu_image_view *iview = fb->attachments[a].attachment;
-      const enum a6xx_tile_mode tile_mode =
-         tu6_get_image_tile_mode(iview->image, iview->base_mip);
-
-      mrt_comp[i] = 0xf;
-
-      if (vk_format_is_srgb(iview->vk_format))
-         srgb_cntl |= (1 << i);
-
-      const struct tu_native_format format =
-         tu6_format_color(iview->vk_format, iview->image->layout.tile_mode);
 
-      tu_cs_emit_regs(cs,
-                      A6XX_RB_MRT_BUF_INFO(i,
-                                           .color_tile_mode = tile_mode,
-                                           .color_format = format.fmt,
-                                           .color_swap = format.swap),
-                      A6XX_RB_MRT_PITCH(i, tu_image_stride(iview->image, iview->base_mip)),
-                      A6XX_RB_MRT_ARRAY_PITCH(i, iview->image->layout.layer_size),
-                      A6XX_RB_MRT_BASE(i, tu_image_view_base_ref(iview)),
-                      A6XX_RB_MRT_BASE_GMEM(i, cmd->state.pass->attachments[a].gmem_offset));
+      tu_cs_emit_pkt4(cs, REG_A6XX_RB_MRT_BUF_INFO(i), 6);
+      tu_cs_emit(cs, iview->RB_MRT_BUF_INFO);
+      tu_cs_image_ref(cs, iview, 0);
+      tu_cs_emit(cs, cmd->state.pass->attachments[a].gmem_offset);
 
       tu_cs_emit_regs(cs,
-                      A6XX_SP_FS_MRT_REG(i,
-                                         .color_format = format.fmt,
-                                         .color_sint = vk_format_is_sint(iview->vk_format),
-                                         .color_uint = vk_format_is_uint(iview->vk_format)));
+                      A6XX_SP_FS_MRT_REG(i, .dword = iview->SP_FS_MRT_REG));
 
-      tu_cs_emit_regs(cs,
-                      A6XX_RB_MRT_FLAG_BUFFER_ADDR(i, tu_image_view_ubwc_base_ref(iview)),
-                      A6XX_RB_MRT_FLAG_BUFFER_PITCH(i, tu_image_view_ubwc_pitches(iview)));
+      tu_cs_emit_pkt4(cs, REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR_LO(i), 3);
+      tu_cs_image_flag_ref(cs, iview, 0);
    }
 
    tu_cs_emit_regs(cs,
-                   A6XX_RB_SRGB_CNTL(.dword = srgb_cntl));
-
-   tu_cs_emit_regs(cs,
-                   A6XX_SP_SRGB_CNTL(.dword = srgb_cntl));
-
+                   A6XX_RB_SRGB_CNTL(.dword = subpass->srgb_cntl));
    tu_cs_emit_regs(cs,
-                   A6XX_RB_RENDER_COMPONENTS(
-                      .rt0 = mrt_comp[0],
-                      .rt1 = mrt_comp[1],
-                      .rt2 = mrt_comp[2],
-                      .rt3 = mrt_comp[3],
-                      .rt4 = mrt_comp[4],
-                      .rt5 = mrt_comp[5],
-                      .rt6 = mrt_comp[6],
-                      .rt7 = mrt_comp[7]));
+                   A6XX_SP_SRGB_CNTL(.dword = subpass->srgb_cntl));
 
-   tu_cs_emit_regs(cs,
-                   A6XX_SP_FS_RENDER_COMPONENTS(
-                      .rt0 = mrt_comp[0],
-                      .rt1 = mrt_comp[1],
-                      .rt2 = mrt_comp[2],
-                      .rt3 = mrt_comp[3],
-                      .rt4 = mrt_comp[4],
-                      .rt5 = mrt_comp[5],
-                      .rt6 = mrt_comp[6],
-                      .rt7 = mrt_comp[7]));
-
-   // XXX: We probably can't hardcode LAYER_CNTL_TYPE.
-   tu_cs_emit_regs(cs,
-                   A6XX_GRAS_LAYER_CNTL(.layered = fb->layers > 1,
-                                        .type = LAYER_2D_ARRAY));
+   tu_cs_emit_regs(cs, A6XX_GRAS_MAX_LAYER_INDEX(fb->layers - 1));
 }
 
-static void
-tu6_emit_msaa(struct tu_cmd_buffer *cmd,
-              const struct tu_subpass *subpass,
-              struct tu_cs *cs)
+void
+tu6_emit_msaa(struct tu_cs *cs, VkSampleCountFlagBits vk_samples)
 {
-   const enum a3xx_msaa_samples samples = tu_msaa_samples(subpass->samples);
+   const enum a3xx_msaa_samples samples = tu_msaa_samples(vk_samples);
    bool msaa_disable = samples == MSAA_ONE;
 
    tu_cs_emit_regs(cs,
@@ -630,7 +399,7 @@ tu6_emit_render_cntl(struct tu_cmd_buffer *cmd,
             continue;
 
          const struct tu_image_view *iview = fb->attachments[a].attachment;
-         if (iview->image->layout.ubwc_layer_size != 0)
+         if (iview->ubwc_enabled)
             mrts_ubwc_enable |= 1 << i;
       }
 
@@ -639,7 +408,7 @@ tu6_emit_render_cntl(struct tu_cmd_buffer *cmd,
       const uint32_t a = subpass->depth_stencil_attachment.attachment;
       if (a != VK_ATTACHMENT_UNUSED) {
          const struct tu_image_view *iview = fb->attachments[a].attachment;
-         if (iview->image->layout.ubwc_layer_size != 0)
+         if (iview->ubwc_enabled)
             cntl |= A6XX_RB_RENDER_CNTL_FLAG_DEPTH;
       }
 
@@ -664,18 +433,29 @@ tu6_emit_render_cntl(struct tu_cmd_buffer *cmd,
 static void
 tu6_emit_blit_scissor(struct tu_cmd_buffer *cmd, struct tu_cs *cs, bool align)
 {
-   const VkRect2D *render_area = &cmd->state.tiling_config.render_area;
+
+   const VkRect2D *render_area = &cmd->state.render_area;
+
+   /* Avoid assertion fails with an empty render area at (0, 0) where the
+    * subtraction below wraps around. Empty render areas should be forced to
+    * the sysmem path by use_sysmem_rendering(). It's not even clear whether
+    * an empty scissor here works, and the blob seems to force sysmem too as
+    * it sets something wrong (non-empty) for the scissor.
+    */
+   if (render_area->extent.width == 0 ||
+       render_area->extent.height == 0)
+      return;
+
    uint32_t x1 = render_area->offset.x;
    uint32_t y1 = render_area->offset.y;
    uint32_t x2 = x1 + render_area->extent.width - 1;
    uint32_t y2 = y1 + render_area->extent.height - 1;
 
-   /* TODO: alignment requirement seems to be less than tile_align_w/h */
    if (align) {
-      x1 = x1 & ~cmd->device->physical_device->tile_align_w;
-      y1 = y1 & ~cmd->device->physical_device->tile_align_h;
-      x2 = ALIGN_POT(x2 + 1, cmd->device->physical_device->tile_align_w) - 1;
-      y2 = ALIGN_POT(y2 + 1, cmd->device->physical_device->tile_align_h) - 1;
+      x1 = x1 & ~(GMEM_ALIGN_W - 1);
+      y1 = y1 & ~(GMEM_ALIGN_H - 1);
+      x2 = ALIGN_POT(x2 + 1, GMEM_ALIGN_W) - 1;
+      y2 = ALIGN_POT(y2 + 1, GMEM_ALIGN_H) - 1;
    }
 
    tu_cs_emit_regs(cs,
@@ -683,51 +463,8 @@ tu6_emit_blit_scissor(struct tu_cmd_buffer *cmd, struct tu_cs *cs, bool align)
                    A6XX_RB_BLIT_SCISSOR_BR(.x = x2, .y = y2));
 }
 
-static void
-tu6_emit_blit_info(struct tu_cmd_buffer *cmd,
-                   struct tu_cs *cs,
-                   const struct tu_image_view *iview,
-                   uint32_t gmem_offset,
-                   bool resolve)
-{
-   tu_cs_emit_regs(cs,
-                   A6XX_RB_BLIT_INFO(.unk0 = !resolve, .gmem = !resolve));
-
-   const struct tu_native_format format =
-      tu6_format_color(iview->vk_format, iview->image->layout.tile_mode);
-
-   enum a6xx_tile_mode tile_mode =
-      tu6_get_image_tile_mode(iview->image, iview->base_mip);
-   tu_cs_emit_regs(cs,
-                   A6XX_RB_BLIT_DST_INFO(
-                      .tile_mode = tile_mode,
-                      .samples = tu_msaa_samples(iview->image->samples),
-                      .color_format = format.fmt,
-                      .color_swap = format.swap,
-                      .flags = iview->image->layout.ubwc_layer_size != 0),
-                   A6XX_RB_BLIT_DST(tu_image_view_base_ref(iview)),
-                   A6XX_RB_BLIT_DST_PITCH(tu_image_stride(iview->image, iview->base_mip)),
-                   A6XX_RB_BLIT_DST_ARRAY_PITCH(iview->image->layout.layer_size));
-
-   if (iview->image->layout.ubwc_layer_size) {
-      tu_cs_emit_regs(cs,
-                      A6XX_RB_BLIT_FLAG_DST(tu_image_view_ubwc_base_ref(iview)),
-                      A6XX_RB_BLIT_FLAG_DST_PITCH(tu_image_view_ubwc_pitches(iview)));
-   }
-
-   tu_cs_emit_regs(cs,
-                   A6XX_RB_BLIT_BASE_GMEM(gmem_offset));
-}
-
-static void
-tu6_emit_blit(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
-{
-   tu6_emit_event_write(cmd, cs, BLIT, false);
-}
-
-static void
-tu6_emit_window_scissor(struct tu_cmd_buffer *cmd,
-                        struct tu_cs *cs,
+void
+tu6_emit_window_scissor(struct tu_cs *cs,
                         uint32_t x1,
                         uint32_t y1,
                         uint32_t x2,
@@ -738,15 +475,12 @@ tu6_emit_window_scissor(struct tu_cmd_buffer *cmd,
                    A6XX_GRAS_SC_WINDOW_SCISSOR_BR(.x = x2, .y = y2));
 
    tu_cs_emit_regs(cs,
-                   A6XX_GRAS_RESOLVE_CNTL_1(.x = x1, .y = y1),
-                   A6XX_GRAS_RESOLVE_CNTL_2(.x = x2, .y = y2));
+                   A6XX_GRAS_2D_RESOLVE_CNTL_1(.x = x1, .y = y1),
+                   A6XX_GRAS_2D_RESOLVE_CNTL_2(.x = x2, .y = y2));
 }
 
-static void
-tu6_emit_window_offset(struct tu_cmd_buffer *cmd,
-                       struct tu_cs *cs,
-                       uint32_t x1,
-                       uint32_t y1)
+void
+tu6_emit_window_offset(struct tu_cs *cs, uint32_t x1, uint32_t y1)
 {
    tu_cs_emit_regs(cs,
                    A6XX_RB_WINDOW_OFFSET(.x = x1, .y = y1));
@@ -761,10 +495,71 @@ tu6_emit_window_offset(struct tu_cmd_buffer *cmd,
                    A6XX_SP_TP_WINDOW_OFFSET(.x = x1, .y = y1));
 }
 
+static void
+tu_cs_emit_draw_state(struct tu_cs *cs, uint32_t id, struct tu_draw_state state)
+{
+   uint32_t enable_mask;
+   switch (id) {
+   case TU_DRAW_STATE_PROGRAM:
+   case TU_DRAW_STATE_VI:
+   case TU_DRAW_STATE_FS_CONST:
+   /* The blob seems to not enable this (DESC_SETS_LOAD) for binning, even
+    * when resources would actually be used in the binning shader.
+    * Presumably the overhead of prefetching the resources isn't
+    * worth it.
+    */
+   case TU_DRAW_STATE_DESC_SETS_LOAD:
+      enable_mask = CP_SET_DRAW_STATE__0_GMEM |
+                    CP_SET_DRAW_STATE__0_SYSMEM;
+      break;
+   case TU_DRAW_STATE_PROGRAM_BINNING:
+   case TU_DRAW_STATE_VI_BINNING:
+      enable_mask = CP_SET_DRAW_STATE__0_BINNING;
+      break;
+   case TU_DRAW_STATE_INPUT_ATTACHMENTS_GMEM:
+      enable_mask = CP_SET_DRAW_STATE__0_GMEM;
+      break;
+   case TU_DRAW_STATE_INPUT_ATTACHMENTS_SYSMEM:
+      enable_mask = CP_SET_DRAW_STATE__0_SYSMEM;
+      break;
+   default:
+      enable_mask = CP_SET_DRAW_STATE__0_GMEM |
+                    CP_SET_DRAW_STATE__0_SYSMEM |
+                    CP_SET_DRAW_STATE__0_BINNING;
+      break;
+   }
+
+   /* We need to reload the descriptors every time the descriptor sets
+    * change. However, the commands we send only depend on the pipeline
+    * because the whole point is to cache descriptors which are used by the
+    * pipeline. There's a problem here, in that the firmware has an
+    * "optimization" which skips executing groups that are set to the same
+    * value as the last draw. This means that if the descriptor sets change
+    * but not the pipeline, we'd try to re-execute the same buffer which
+    * the firmware would ignore and we wouldn't pre-load the new
+    * descriptors. Set the DIRTY bit to avoid this optimization
+    */
+   if (id == TU_DRAW_STATE_DESC_SETS_LOAD)
+      enable_mask |= CP_SET_DRAW_STATE__0_DIRTY;
+
+   tu_cs_emit(cs, CP_SET_DRAW_STATE__0_COUNT(state.size) |
+                  enable_mask |
+                  CP_SET_DRAW_STATE__0_GROUP_ID(id) |
+                  COND(!state.size, CP_SET_DRAW_STATE__0_DISABLE));
+   tu_cs_emit_qw(cs, state.iova);
+}
+
 static bool
 use_hw_binning(struct tu_cmd_buffer *cmd)
 {
-   const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
+   const struct tu_framebuffer *fb = cmd->state.framebuffer;
+
+   /* XFB commands are emitted for BINNING || SYSMEM, which makes it incompatible
+    * with non-hw binning GMEM rendering. this is required because some of the
+    * XFB commands need to only be executed once
+    */
+   if (cmd->state.xfb_used)
+      return true;
 
    if (unlikely(cmd->device->physical_device->instance->debug_flags & TU_DEBUG_NOBIN))
       return false;
@@ -772,7 +567,7 @@ use_hw_binning(struct tu_cmd_buffer *cmd)
    if (unlikely(cmd->device->physical_device->instance->debug_flags & TU_DEBUG_FORCEBIN))
       return true;
 
-   return (tiling->tile_count.width * tiling->tile_count.height) > 2;
+   return (fb->tile_count.width * fb->tile_count.height) > 2;
 }
 
 static bool
@@ -785,29 +580,41 @@ use_sysmem_rendering(struct tu_cmd_buffer *cmd)
    if (!cmd->state.pass->gmem_pixels)
       return true;
 
-   return cmd->state.tiling_config.force_sysmem;
+   if (cmd->state.framebuffer->layers > 1)
+      return true;
+
+   /* Use sysmem for empty render areas */
+   if (cmd->state.render_area.extent.width == 0 ||
+       cmd->state.render_area.extent.height == 0)
+      return true;
+
+   if (cmd->has_tess)
+      return true;
+
+   return false;
 }
 
 static void
 tu6_emit_tile_select(struct tu_cmd_buffer *cmd,
                      struct tu_cs *cs,
-                     const struct tu_tile *tile)
+                     uint32_t tx, uint32_t ty, uint32_t pipe, uint32_t slot)
 {
+   const struct tu_framebuffer *fb = cmd->state.framebuffer;
+
    tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
    tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_YIELD));
 
    tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
    tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_GMEM));
 
-   const uint32_t x1 = tile->begin.x;
-   const uint32_t y1 = tile->begin.y;
-   const uint32_t x2 = tile->end.x - 1;
-   const uint32_t y2 = tile->end.y - 1;
-   tu6_emit_window_scissor(cmd, cs, x1, y1, x2, y2);
-   tu6_emit_window_offset(cmd, cs, x1, y1);
+   const uint32_t x1 = fb->tile0.width * tx;
+   const uint32_t y1 = fb->tile0.height * ty;
+   const uint32_t x2 = x1 + fb->tile0.width - 1;
+   const uint32_t y2 = y1 + fb->tile0.height - 1;
+   tu6_emit_window_scissor(cs, x1, y1, x2, y2);
+   tu6_emit_window_offset(cs, x1, y1);
 
-   tu_cs_emit_regs(cs,
-                   A6XX_VPC_SO_OVERRIDE(.so_disable = false));
+   tu_cs_emit_regs(cs, A6XX_VPC_SO_DISABLE(false));
 
    if (use_hw_binning(cmd)) {
       tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
@@ -815,45 +622,18 @@ tu6_emit_tile_select(struct tu_cmd_buffer *cmd,
       tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
       tu_cs_emit(cs, 0x0);
 
-      tu_cs_emit_pkt7(cs, CP_REG_TEST, 1);
-      tu_cs_emit(cs, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
-                     A6XX_CP_REG_TEST_0_BIT(0) |
-                     A6XX_CP_REG_TEST_0_WAIT_FOR_ME);
+      tu_cs_emit_pkt7(cs, CP_SET_BIN_DATA5_OFFSET, 4);
+      tu_cs_emit(cs, fb->pipe_sizes[pipe] |
+                     CP_SET_BIN_DATA5_0_VSC_N(slot));
+      tu_cs_emit(cs, pipe * cmd->vsc_draw_strm_pitch);
+      tu_cs_emit(cs, pipe * 4);
+      tu_cs_emit(cs, pipe * cmd->vsc_prim_strm_pitch);
 
-      tu_cs_reserve(cs, 3 + 11);
-      tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
-      tu_cs_emit(cs, CP_COND_REG_EXEC_0_MODE(PRED_TEST));
-      tu_cs_emit(cs, CP_COND_REG_EXEC_1_DWORDS(11));
-
-      /* if (no overflow) */ {
-         tu_cs_emit_pkt7(cs, CP_SET_BIN_DATA5, 7);
-         tu_cs_emit(cs, cmd->state.tiling_config.pipe_sizes[tile->pipe] |
-                        CP_SET_BIN_DATA5_0_VSC_N(tile->slot));
-         tu_cs_emit_qw(cs, cmd->vsc_data.iova + tile->pipe * cmd->vsc_data_pitch);
-         tu_cs_emit_qw(cs, cmd->vsc_data.iova + (tile->pipe * 4) + (32 * cmd->vsc_data_pitch));
-         tu_cs_emit_qw(cs, cmd->vsc_data2.iova + (tile->pipe * cmd->vsc_data2_pitch));
-
-         tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
-         tu_cs_emit(cs, 0x0);
-
-         /* use a NOP packet to skip over the 'else' side: */
-         tu_cs_emit_pkt7(cs, CP_NOP, 2);
-      } /* else */ {
-         tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
-         tu_cs_emit(cs, 0x1);
-      }
+      tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
+      tu_cs_emit(cs, 0x0);
 
       tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
       tu_cs_emit(cs, 0x0);
-
-      tu_cs_emit_regs(cs,
-                      A6XX_RB_UNKNOWN_8804(0));
-
-      tu_cs_emit_regs(cs,
-                      A6XX_SP_TP_UNKNOWN_B304(0));
-
-      tu_cs_emit_regs(cs,
-                      A6XX_GRAS_UNKNOWN_80A4(0));
    } else {
       tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
       tu_cs_emit(cs, 0x1);
@@ -863,93 +643,6 @@ tu6_emit_tile_select(struct tu_cmd_buffer *cmd,
    }
 }
 
-static void
-tu6_emit_load_attachment(struct tu_cmd_buffer *cmd, struct tu_cs *cs, uint32_t a)
-{
-   const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
-   const struct tu_framebuffer *fb = cmd->state.framebuffer;
-   const struct tu_image_view *iview = fb->attachments[a].attachment;
-   const struct tu_render_pass_attachment *attachment =
-      &cmd->state.pass->attachments[a];
-
-   if (attachment->gmem_offset < 0)
-      return;
-
-   const uint32_t x1 = tiling->render_area.offset.x;
-   const uint32_t y1 = tiling->render_area.offset.y;
-   const uint32_t x2 = x1 + tiling->render_area.extent.width;
-   const uint32_t y2 = y1 + tiling->render_area.extent.height;
-   const uint32_t tile_x2 =
-      tiling->tile0.offset.x + tiling->tile0.extent.width * tiling->tile_count.width;
-   const uint32_t tile_y2 =
-      tiling->tile0.offset.y + tiling->tile0.extent.height * tiling->tile_count.height;
-   bool need_load =
-      x1 != tiling->tile0.offset.x || x2 != MIN2(fb->width, tile_x2) ||
-      y1 != tiling->tile0.offset.y || y2 != MIN2(fb->height, tile_y2);
-
-   if (need_load)
-      tu_finishme("improve handling of unaligned render area");
-
-   if (attachment->load_op == VK_ATTACHMENT_LOAD_OP_LOAD)
-      need_load = true;
-
-   if (vk_format_has_stencil(iview->vk_format) &&
-       attachment->stencil_load_op == VK_ATTACHMENT_LOAD_OP_LOAD)
-      need_load = true;
-
-   if (need_load) {
-      tu6_emit_blit_info(cmd, cs, iview, attachment->gmem_offset, false);
-      tu6_emit_blit(cmd, cs);
-   }
-}
-
-static void
-tu6_emit_clear_attachment(struct tu_cmd_buffer *cmd, struct tu_cs *cs,
-                          uint32_t a,
-                          const VkRenderPassBeginInfo *info)
-{
-   const struct tu_framebuffer *fb = cmd->state.framebuffer;
-   const struct tu_image_view *iview = fb->attachments[a].attachment;
-   const struct tu_render_pass_attachment *attachment =
-      &cmd->state.pass->attachments[a];
-   unsigned clear_mask = 0;
-
-   /* note: this means it isn't used by any subpass and shouldn't be cleared anyway */
-   if (attachment->gmem_offset < 0)
-      return;
-
-   if (attachment->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
-      clear_mask = 0xf;
-
-   if (vk_format_has_stencil(iview->vk_format)) {
-      clear_mask &= 0x1;
-      if (attachment->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
-         clear_mask |= 0x2;
-   }
-   if (!clear_mask)
-      return;
-
-   tu_clear_gmem_attachment(cmd, cs, a, clear_mask,
-                            &info->pClearValues[a]);
-}
-
-static void
-tu6_emit_predicated_blit(struct tu_cmd_buffer *cmd,
-                         struct tu_cs *cs,
-                         uint32_t a,
-                         uint32_t gmem_a,
-                         bool resolve)
-{
-   tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_GMEM);
-
-   tu6_emit_blit_info(cmd, cs,
-                      cmd->state.framebuffer->attachments[a].attachment,
-                      cmd->state.pass->attachments[gmem_a].gmem_offset, resolve);
-   tu6_emit_blit(cmd, cs);
-
-   tu_cond_exec_end(cs);
-}
-
 static void
 tu6_emit_sysmem_resolve(struct tu_cmd_buffer *cmd,
                         struct tu_cs *cs,
@@ -957,48 +650,53 @@ tu6_emit_sysmem_resolve(struct tu_cmd_buffer *cmd,
                         uint32_t gmem_a)
 {
    const struct tu_framebuffer *fb = cmd->state.framebuffer;
-   const struct tu_image_view *dst = fb->attachments[a].attachment;
-   const struct tu_image_view *src = fb->attachments[gmem_a].attachment;
-
-   tu_blit(cmd, cs, &(struct tu_blit) {
-      .dst = sysmem_attachment_surf(dst, dst->base_layer,
-                                    &cmd->state.tiling_config.render_area),
-      .src = sysmem_attachment_surf(src, src->base_layer,
-                                    &cmd->state.tiling_config.render_area),
-      .layers = fb->layers,
-   });
-}
-
+   struct tu_image_view *dst = fb->attachments[a].attachment;
+   struct tu_image_view *src = fb->attachments[gmem_a].attachment;
 
-/* Emit a MSAA resolve operation, with both gmem and sysmem paths. */
-static void tu6_emit_resolve(struct tu_cmd_buffer *cmd,
-                             struct tu_cs *cs,
-                             uint32_t a,
-                             uint32_t gmem_a)
-{
-   if (cmd->state.pass->attachments[a].store_op == VK_ATTACHMENT_STORE_OP_DONT_CARE)
-      return;
-
-   tu6_emit_predicated_blit(cmd, cs, a, gmem_a, true);
-
-   tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_SYSMEM);
-   tu6_emit_sysmem_resolve(cmd, cs, a, gmem_a);
-   tu_cond_exec_end(cs);
+   tu_resolve_sysmem(cmd, cs, src, dst, fb->layers, &cmd->state.render_area);
 }
 
 static void
-tu6_emit_store_attachment(struct tu_cmd_buffer *cmd,
-                          struct tu_cs *cs,
-                          uint32_t a,
-                          uint32_t gmem_a)
+tu6_emit_sysmem_resolves(struct tu_cmd_buffer *cmd,
+                         struct tu_cs *cs,
+                         const struct tu_subpass *subpass)
 {
-   if (cmd->state.pass->attachments[a].store_op == VK_ATTACHMENT_STORE_OP_DONT_CARE)
-      return;
+   if (subpass->resolve_attachments) {
+      /* From the documentation for vkCmdNextSubpass, section 7.4 "Render Pass
+       * Commands":
+       *
+       *    End-of-subpass multisample resolves are treated as color
+       *    attachment writes for the purposes of synchronization. That is,
+       *    they are considered to execute in the
+       *    VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT pipeline stage and
+       *    their writes are synchronized with
+       *    VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT. Synchronization between
+       *    rendering within a subpass and any resolve operations at the end
+       *    of the subpass occurs automatically, without need for explicit
+       *    dependencies or pipeline barriers. However, if the resolve
+       *    attachment is also used in a different subpass, an explicit
+       *    dependency is needed.
+       *
+       * We use the CP_BLIT path for sysmem resolves, which is really a
+       * transfer command, so we have to manually flush similar to the gmem
+       * resolve case. However, a flush afterwards isn't needed because of the
+       * last sentence and the fact that we're in sysmem mode.
+       */
+      tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS);
+      tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE);
+
+      /* Wait for the flushes to land before using the 2D engine */
+      tu_cs_emit_wfi(cs);
+
+      for (unsigned i = 0; i < subpass->color_count; i++) {
+         uint32_t a = subpass->resolve_attachments[i].attachment;
+         if (a == VK_ATTACHMENT_UNUSED)
+            continue;
 
-   tu6_emit_blit_info(cmd, cs,
-                      cmd->state.framebuffer->attachments[a].attachment,
-                      cmd->state.pass->attachments[gmem_a].gmem_offset, true);
-   tu6_emit_blit(cmd, cs);
+         tu6_emit_sysmem_resolve(cmd, cs, a,
+                                 subpass->color_attachments[i].attachment);
+      }
+   }
 }
 
 static void
@@ -1024,34 +722,49 @@ tu6_emit_tile_store(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
 
    for (uint32_t a = 0; a < pass->attachment_count; ++a) {
       if (pass->attachments[a].gmem_offset >= 0)
-         tu6_emit_store_attachment(cmd, cs, a, a);
+         tu_store_gmem_attachment(cmd, cs, a, a);
    }
 
    if (subpass->resolve_attachments) {
       for (unsigned i = 0; i < subpass->color_count; i++) {
          uint32_t a = subpass->resolve_attachments[i].attachment;
          if (a != VK_ATTACHMENT_UNUSED)
-            tu6_emit_store_attachment(cmd, cs, a,
-                                      subpass->color_attachments[i].attachment);
+            tu_store_gmem_attachment(cmd, cs, a,
+                                     subpass->color_attachments[i].attachment);
       }
    }
 }
 
-static void
-tu6_emit_restart_index(struct tu_cs *cs, uint32_t restart_index)
-{
-   tu_cs_emit_regs(cs,
-                   A6XX_PC_RESTART_INDEX(restart_index));
-}
-
 static void
 tu6_init_hw(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
 {
-   tu6_emit_cache_flush(cmd, cs);
+   struct tu_device *dev = cmd->device;
+   const struct tu_physical_device *phys_dev = dev->physical_device;
+
+   tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE);
+
+   tu_cs_emit_regs(cs, A6XX_HLSQ_INVALIDATE_CMD(
+         .vs_state = true,
+         .hs_state = true,
+         .ds_state = true,
+         .gs_state = true,
+         .fs_state = true,
+         .cs_state = true,
+         .gfx_ibo = true,
+         .cs_ibo = true,
+         .gfx_shared_const = true,
+         .cs_shared_const = true,
+         .gfx_bindless = 0x1f,
+         .cs_bindless = 0x1f));
+
+   tu_cs_emit_wfi(cs);
 
-   tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UPDATE_CNTL, 0xfffff);
+   cmd->state.cache.pending_flush_bits &=
+      ~(TU_CMD_FLAG_WAIT_FOR_IDLE | TU_CMD_FLAG_CACHE_INVALIDATE);
 
-   tu_cs_emit_write_reg(cs, REG_A6XX_RB_CCU_CNTL, 0x10000000);
+   tu_cs_emit_regs(cs,
+                   A6XX_RB_CCU_CNTL(.offset = phys_dev->ccu_offset_bypass));
+   cmd->state.ccu_state = TU_CMD_CCU_SYSMEM;
    tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8E04, 0x00100000);
    tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE04, 0x8);
    tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE00, 0);
@@ -1067,15 +780,15 @@ tu6_init_hw(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
    tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE03, 0x00000410);
    tu_cs_emit_write_reg(cs, REG_A6XX_SP_IBO_COUNT, 0);
    tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B182, 0);
-   tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BB11, 0);
+   tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_SHARED_CONSTS, 0);
    tu_cs_emit_write_reg(cs, REG_A6XX_UCHE_UNKNOWN_0E12, 0x3200000);
    tu_cs_emit_write_reg(cs, REG_A6XX_UCHE_CLIENT_PF, 4);
    tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8E01, 0x0);
    tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_A982, 0);
    tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_A9A8, 0);
    tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AB00, 0x5);
-   tu_cs_emit_write_reg(cs, REG_A6XX_VPC_GS_SIV_CNTL, 0x0000ffff);
 
+   /* TODO: set A6XX_VFD_ADD_OFFSET_INSTANCE and fix ir3 to avoid adding base instance */
    tu_cs_emit_write_reg(cs, REG_A6XX_VFD_ADD_OFFSET, A6XX_VFD_ADD_OFFSET_VERTEX);
    tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8811, 0x00000010);
    tu_cs_emit_write_reg(cs, REG_A6XX_PC_MODE_CNTL, 0x1f);
@@ -1096,19 +809,14 @@ tu6_init_hw(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
    tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881E, 0);
    tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_88F0, 0);
 
-   tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9101, 0xffff00);
    tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9107, 0);
 
-   tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9236, 1);
+   tu_cs_emit_regs(cs, A6XX_VPC_POINT_COORD_INVERT(false));
    tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9300, 0);
 
-   tu_cs_emit_write_reg(cs, REG_A6XX_VPC_SO_OVERRIDE,
-                        A6XX_VPC_SO_OVERRIDE_SO_DISABLE);
+   tu_cs_emit_regs(cs, A6XX_VPC_SO_DISABLE(true));
 
-   tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9801, 0);
-   tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9806, 0);
    tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9980, 0);
-   tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9990, 0);
 
    tu_cs_emit_write_reg(cs, REG_A6XX_PC_PRIMITIVE_CNTL_6, 0);
    tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9B07, 0);
@@ -1118,25 +826,13 @@ tu6_init_hw(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
    tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B183, 0);
 
    tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_8099, 0);
-   tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_809B, 0);
    tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A0, 2);
    tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80AF, 0);
    tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9210, 0);
    tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9211, 0);
    tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9602, 0);
-   tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9981, 0x3);
    tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9E72, 0);
-   tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9108, 0x3);
-   tu_cs_emit_write_reg(cs, REG_A6XX_SP_TP_UNKNOWN_B304, 0);
    tu_cs_emit_write_reg(cs, REG_A6XX_SP_TP_UNKNOWN_B309, 0x000000a2);
-   tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8804, 0);
-   tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A4, 0);
-   tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A5, 0);
-   tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A6, 0);
-   tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8805, 0);
-   tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8806, 0);
-   tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8878, 0);
-   tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8879, 0);
    tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_CONTROL_5_REG, 0xfc);
 
    tu_cs_emit_write_reg(cs, REG_A6XX_VFD_MODE_CNTL, 0x00000000);
@@ -1153,13 +849,6 @@ tu6_init_hw(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
    tu_cs_emit(cs, CP_SET_DRAW_STATE__1_ADDR_LO(0));
    tu_cs_emit(cs, CP_SET_DRAW_STATE__2_ADDR_HI(0));
 
-   /* Set not to use streamout by default, */
-   tu_cs_emit_pkt7(cs, CP_CONTEXT_REG_BUNCH, 4);
-   tu_cs_emit(cs, REG_A6XX_VPC_SO_CNTL);
-   tu_cs_emit(cs, 0);
-   tu_cs_emit(cs, REG_A6XX_VPC_SO_BUF_CNTL);
-   tu_cs_emit(cs, 0);
-
    tu_cs_emit_regs(cs,
                    A6XX_SP_HS_CTRL_REG0(0));
 
@@ -1173,164 +862,120 @@ tu6_init_hw(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
                    A6XX_RB_LRZ_CNTL(0));
 
    tu_cs_emit_regs(cs,
-                   A6XX_SP_TP_BORDER_COLOR_BASE_ADDR(.bo = &cmd->device->border_color));
+                   A6XX_SP_TP_BORDER_COLOR_BASE_ADDR(.bo = &dev->global_bo,
+                                                     .bo_offset = gb_offset(border_color)));
    tu_cs_emit_regs(cs,
-                   A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR(.bo = &cmd->device->border_color));
+                   A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR(.bo = &dev->global_bo,
+                                                        .bo_offset = gb_offset(border_color)));
 
-   tu_cs_sanity_check(cs);
-}
+   /* VSC buffers:
+    * use vsc pitches from the largest values used so far with this device
+    * if there hasn't been overflow, there will already be a scratch bo
+    * allocated for these sizes
+    *
+    * if overflow is detected, the stream size is increased by 2x
+    */
+   mtx_lock(&dev->vsc_pitch_mtx);
 
-static void
-tu6_cache_flush(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
-{
-   unsigned seqno;
+   struct tu6_global *global = dev->global_bo.map;
 
-   seqno = tu6_emit_event_write(cmd, cs, RB_DONE_TS, true);
+   uint32_t vsc_draw_overflow = global->vsc_draw_overflow;
+   uint32_t vsc_prim_overflow = global->vsc_prim_overflow;
 
-   tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
-   tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
-                  CP_WAIT_REG_MEM_0_POLL_MEMORY);
-   tu_cs_emit_qw(cs, cmd->scratch_bo.iova);
-   tu_cs_emit(cs, CP_WAIT_REG_MEM_3_REF(seqno));
-   tu_cs_emit(cs, CP_WAIT_REG_MEM_4_MASK(~0));
-   tu_cs_emit(cs, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
+   if (vsc_draw_overflow >= dev->vsc_draw_strm_pitch)
+      dev->vsc_draw_strm_pitch = (dev->vsc_draw_strm_pitch - VSC_PAD) * 2 + VSC_PAD;
 
-   seqno = tu6_emit_event_write(cmd, cs, CACHE_FLUSH_TS, true);
+   if (vsc_prim_overflow >= dev->vsc_prim_strm_pitch)
+      dev->vsc_prim_strm_pitch = (dev->vsc_prim_strm_pitch - VSC_PAD) * 2 + VSC_PAD;
 
-   tu_cs_emit_pkt7(cs, CP_WAIT_MEM_GTE, 4);
-   tu_cs_emit(cs, CP_WAIT_MEM_GTE_0_RESERVED(0));
-   tu_cs_emit_qw(cs, cmd->scratch_bo.iova);
-   tu_cs_emit(cs, CP_WAIT_MEM_GTE_3_REF(seqno));
-}
+   cmd->vsc_prim_strm_pitch = dev->vsc_prim_strm_pitch;
+   cmd->vsc_draw_strm_pitch = dev->vsc_draw_strm_pitch;
 
-static void
-update_vsc_pipe(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
-{
-   const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
+   mtx_unlock(&dev->vsc_pitch_mtx);
+
+   struct tu_bo *vsc_bo;
+   uint32_t size0 = cmd->vsc_prim_strm_pitch * MAX_VSC_PIPES +
+                    cmd->vsc_draw_strm_pitch * MAX_VSC_PIPES;
+
+   tu_get_scratch_bo(dev, size0 + MAX_VSC_PIPES * 4, &vsc_bo);
+
+   tu_cs_emit_regs(cs,
+                   A6XX_VSC_DRAW_STRM_SIZE_ADDRESS(.bo = vsc_bo, .bo_offset = size0));
+   tu_cs_emit_regs(cs,
+                   A6XX_VSC_PRIM_STRM_ADDRESS(.bo = vsc_bo));
+   tu_cs_emit_regs(cs,
+                   A6XX_VSC_DRAW_STRM_ADDRESS(.bo = vsc_bo,
+                                              .bo_offset = cmd->vsc_prim_strm_pitch * MAX_VSC_PIPES));
+
+   tu_bo_list_add(&cmd->bo_list, vsc_bo, MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
+
+   tu_cs_sanity_check(cs);
+}
+
+static void
+update_vsc_pipe(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
+{
+   const struct tu_framebuffer *fb = cmd->state.framebuffer;
 
    tu_cs_emit_regs(cs,
-                   A6XX_VSC_BIN_SIZE(.width = tiling->tile0.extent.width,
-                                     .height = tiling->tile0.extent.height),
-                   A6XX_VSC_SIZE_ADDRESS(.bo = &cmd->vsc_data,
-                                         .bo_offset = 32 * cmd->vsc_data_pitch));
+                   A6XX_VSC_BIN_SIZE(.width = fb->tile0.width,
+                                     .height = fb->tile0.height));
 
    tu_cs_emit_regs(cs,
-                   A6XX_VSC_BIN_COUNT(.nx = tiling->tile_count.width,
-                                      .ny = tiling->tile_count.height));
+                   A6XX_VSC_BIN_COUNT(.nx = fb->tile_count.width,
+                                      .ny = fb->tile_count.height));
 
    tu_cs_emit_pkt4(cs, REG_A6XX_VSC_PIPE_CONFIG_REG(0), 32);
-   for (unsigned i = 0; i < 32; i++)
-      tu_cs_emit(cs, tiling->pipe_config[i]);
+   tu_cs_emit_array(cs, fb->pipe_config, 32);
 
    tu_cs_emit_regs(cs,
-                   A6XX_VSC_PIPE_DATA2_ADDRESS(.bo = &cmd->vsc_data2),
-                   A6XX_VSC_PIPE_DATA2_PITCH(cmd->vsc_data2_pitch),
-                   A6XX_VSC_PIPE_DATA2_ARRAY_PITCH(cmd->vsc_data2.size));
+                   A6XX_VSC_PRIM_STRM_PITCH(cmd->vsc_prim_strm_pitch),
+                   A6XX_VSC_PRIM_STRM_LIMIT(cmd->vsc_prim_strm_pitch - VSC_PAD));
 
    tu_cs_emit_regs(cs,
-                   A6XX_VSC_PIPE_DATA_ADDRESS(.bo = &cmd->vsc_data),
-                   A6XX_VSC_PIPE_DATA_PITCH(cmd->vsc_data_pitch),
-                   A6XX_VSC_PIPE_DATA_ARRAY_PITCH(cmd->vsc_data.size));
+                   A6XX_VSC_DRAW_STRM_PITCH(cmd->vsc_draw_strm_pitch),
+                   A6XX_VSC_DRAW_STRM_LIMIT(cmd->vsc_draw_strm_pitch - VSC_PAD));
 }
 
 static void
 emit_vsc_overflow_test(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
 {
-   const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
+   const struct tu_framebuffer *fb = cmd->state.framebuffer;
    const uint32_t used_pipe_count =
-      tiling->pipe_count.width * tiling->pipe_count.height;
+      fb->pipe_count.width * fb->pipe_count.height;
 
-   /* Clear vsc_scratch: */
-   tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 3);
-   tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(vsc_scratch));
-   tu_cs_emit(cs, 0x0);
-
-   /* Check for overflow, write vsc_scratch if detected: */
    for (int i = 0; i < used_pipe_count; i++) {
       tu_cs_emit_pkt7(cs, CP_COND_WRITE5, 8);
       tu_cs_emit(cs, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
             CP_COND_WRITE5_0_WRITE_MEMORY);
-      tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE_REG(i)));
+      tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_DRAW_STRM_SIZE_REG(i)));
       tu_cs_emit(cs, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
-      tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_data_pitch));
+      tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_draw_strm_pitch - VSC_PAD));
       tu_cs_emit(cs, CP_COND_WRITE5_4_MASK(~0));
-      tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(vsc_scratch));
-      tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(1 + cmd->vsc_data_pitch));
+      tu_cs_emit_qw(cs, global_iova(cmd, vsc_draw_overflow));
+      tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(cmd->vsc_draw_strm_pitch));
 
       tu_cs_emit_pkt7(cs, CP_COND_WRITE5, 8);
       tu_cs_emit(cs, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
             CP_COND_WRITE5_0_WRITE_MEMORY);
-      tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE2_REG(i)));
+      tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_PRIM_STRM_SIZE_REG(i)));
       tu_cs_emit(cs, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
-      tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_data2_pitch));
+      tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_prim_strm_pitch - VSC_PAD));
       tu_cs_emit(cs, CP_COND_WRITE5_4_MASK(~0));
-      tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(vsc_scratch));
-      tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(3 + cmd->vsc_data2_pitch));
+      tu_cs_emit_qw(cs, global_iova(cmd, vsc_prim_overflow));
+      tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(cmd->vsc_prim_strm_pitch));
    }
 
    tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
-
-   tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
-
-   tu_cs_emit_pkt7(cs, CP_MEM_TO_REG, 3);
-   tu_cs_emit(cs, CP_MEM_TO_REG_0_REG(OVERFLOW_FLAG_REG) |
-         CP_MEM_TO_REG_0_CNT(1 - 1));
-   tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(vsc_scratch));
-
-   /*
-    * This is a bit awkward, we really want a way to invert the
-    * CP_REG_TEST/CP_COND_REG_EXEC logic, so that we can conditionally
-    * execute cmds to use hwbinning when a bit is *not* set.  This
-    * dance is to invert OVERFLOW_FLAG_REG
-    *
-    * A CP_NOP packet is used to skip executing the 'else' clause
-    * if (b0 set)..
-    */
-
-   /* b0 will be set if VSC_DATA or VSC_DATA2 overflow: */
-   tu_cs_emit_pkt7(cs, CP_REG_TEST, 1);
-   tu_cs_emit(cs, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
-         A6XX_CP_REG_TEST_0_BIT(0) |
-         A6XX_CP_REG_TEST_0_WAIT_FOR_ME);
-
-   tu_cs_reserve(cs, 3 + 7);
-   tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
-   tu_cs_emit(cs, CP_COND_REG_EXEC_0_MODE(PRED_TEST));
-   tu_cs_emit(cs, CP_COND_REG_EXEC_1_DWORDS(7));
-
-   /* if (b0 set) */ {
-      /*
-       * On overflow, mirror the value to control->vsc_overflow
-       * which CPU is checking to detect overflow (see
-       * check_vsc_overflow())
-       */
-      tu_cs_emit_pkt7(cs, CP_REG_TO_MEM, 3);
-      tu_cs_emit(cs, CP_REG_TO_MEM_0_REG(OVERFLOW_FLAG_REG) |
-            CP_REG_TO_MEM_0_CNT(0));
-      tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(vsc_overflow));
-
-      tu_cs_emit_pkt4(cs, OVERFLOW_FLAG_REG, 1);
-      tu_cs_emit(cs, 0x0);
-
-      tu_cs_emit_pkt7(cs, CP_NOP, 2);  /* skip 'else' when 'if' is taken */
-   } /* else */ {
-      tu_cs_emit_pkt4(cs, OVERFLOW_FLAG_REG, 1);
-      tu_cs_emit(cs, 0x1);
-   }
 }
 
 static void
 tu6_emit_binning_pass(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
 {
    struct tu_physical_device *phys_dev = cmd->device->physical_device;
-   const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
-
-   uint32_t x1 = tiling->tile0.offset.x;
-   uint32_t y1 = tiling->tile0.offset.y;
-   uint32_t x2 = tiling->render_area.offset.x + tiling->render_area.extent.width - 1;
-   uint32_t y2 = tiling->render_area.offset.y + tiling->render_area.extent.height - 1;
+   const struct tu_framebuffer *fb = cmd->state.framebuffer;
 
-   tu6_emit_window_scissor(cmd, cs, x1, y1, x2, y2);
+   tu6_emit_window_scissor(cs, 0, 0, fb->width - 1, fb->height - 1);
 
    tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
    tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_BINNING));
@@ -1376,8 +1021,15 @@ tu6_emit_binning_pass(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
    tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, 1);
    tu_cs_emit(cs, UNK_2D);
 
-   tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
-   tu6_cache_flush(cmd, cs);
+   /* This flush is probably required because the VSC, which produces the
+    * visibility stream, is a client of UCHE, whereas the CP needs to read the
+    * visibility stream (without caching) to do draw skipping. The
+    * WFI+WAIT_FOR_ME combination guarantees that the binning commands
+    * submitted are finished before reading the VSC regs (in
+    * emit_vsc_overflow_test) or the VSC_DATA buffer directly (implicitly as
+    * part of draws).
+    */
+   tu6_emit_event_write(cmd, cs, CACHE_FLUSH_TS);
 
    tu_cs_emit_wfi(cs);
 
@@ -1390,56 +1042,110 @@ tu6_emit_binning_pass(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
 
    tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
    tu_cs_emit(cs, 0x0);
+}
 
-   tu_cs_emit_wfi(cs);
+static struct tu_draw_state
+tu_emit_input_attachments(struct tu_cmd_buffer *cmd,
+                          const struct tu_subpass *subpass,
+                          bool gmem)
+{
+   /* note: we can probably emit input attachments just once for the whole
+    * renderpass, this would avoid emitting both sysmem/gmem versions
+    *
+    * emit two texture descriptors for each input, as a workaround for
+    * d24s8, which can be sampled as both float (depth) and integer (stencil)
+    * tu_shader lowers uint input attachment loads to use the 2nd descriptor
+    * in the pair
+    * TODO: a smarter workaround
+    */
 
-   tu_cs_emit_regs(cs,
-                   A6XX_RB_CCU_CNTL(.unknown = phys_dev->magic.RB_CCU_CNTL_gmem));
+   if (!subpass->input_count)
+      return (struct tu_draw_state) {};
 
-   cmd->wait_for_idle = false;
-}
+   struct tu_cs_memory texture;
+   VkResult result = tu_cs_alloc(&cmd->sub_cs, subpass->input_count * 2,
+                                 A6XX_TEX_CONST_DWORDS, &texture);
+   assert(result == VK_SUCCESS);
 
-static void
-tu_emit_sysmem_clear_attachment(struct tu_cmd_buffer *cmd, struct tu_cs *cs,
-                                uint32_t a,
-                                const VkRenderPassBeginInfo *info)
-{
-   const struct tu_framebuffer *fb = cmd->state.framebuffer;
-   const struct tu_image_view *iview = fb->attachments[a].attachment;
-   const struct tu_render_pass_attachment *attachment =
-      &cmd->state.pass->attachments[a];
-   unsigned clear_mask = 0;
+   for (unsigned i = 0; i < subpass->input_count * 2; i++) {
+      uint32_t a = subpass->input_attachments[i / 2].attachment;
+      if (a == VK_ATTACHMENT_UNUSED)
+         continue;
 
-   /* note: this means it isn't used by any subpass and shouldn't be cleared anyway */
-   if (attachment->gmem_offset < 0)
-      return;
+      struct tu_image_view *iview =
+         cmd->state.framebuffer->attachments[a].attachment;
+      const struct tu_render_pass_attachment *att =
+         &cmd->state.pass->attachments[a];
+      uint32_t *dst = &texture.map[A6XX_TEX_CONST_DWORDS * i];
 
-   if (attachment->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
-      clear_mask = 0xf;
-   }
+      memcpy(dst, iview->descriptor, A6XX_TEX_CONST_DWORDS * 4);
+
+      if (i % 2 == 1 && att->format == VK_FORMAT_D24_UNORM_S8_UINT) {
+         /* note this works because spec says fb and input attachments
+          * must use identity swizzle
+          */
+         dst[0] &= ~(A6XX_TEX_CONST_0_FMT__MASK |
+            A6XX_TEX_CONST_0_SWIZ_X__MASK | A6XX_TEX_CONST_0_SWIZ_Y__MASK |
+            A6XX_TEX_CONST_0_SWIZ_Z__MASK | A6XX_TEX_CONST_0_SWIZ_W__MASK);
+         dst[0] |= A6XX_TEX_CONST_0_FMT(FMT6_S8Z24_UINT) |
+            A6XX_TEX_CONST_0_SWIZ_X(A6XX_TEX_Y) |
+            A6XX_TEX_CONST_0_SWIZ_Y(A6XX_TEX_ZERO) |
+            A6XX_TEX_CONST_0_SWIZ_Z(A6XX_TEX_ZERO) |
+            A6XX_TEX_CONST_0_SWIZ_W(A6XX_TEX_ONE);
+      }
+
+      if (!gmem)
+         continue;
 
-   if (vk_format_has_stencil(iview->vk_format)) {
-      clear_mask &= 0x1;
-      if (attachment->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
-         clear_mask |= 0x2;
-      if (clear_mask != 0x3)
-         tu_finishme("depth/stencil only load op");
+      /* patched for gmem */
+      dst[0] &= ~(A6XX_TEX_CONST_0_SWAP__MASK | A6XX_TEX_CONST_0_TILE_MODE__MASK);
+      dst[0] |= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2);
+      dst[2] =
+         A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D) |
+         A6XX_TEX_CONST_2_PITCH(cmd->state.framebuffer->tile0.width * att->cpp);
+      dst[3] = 0;
+      dst[4] = cmd->device->physical_device->gmem_base + att->gmem_offset;
+      dst[5] = A6XX_TEX_CONST_5_DEPTH(1);
+      for (unsigned i = 6; i < A6XX_TEX_CONST_DWORDS; i++)
+         dst[i] = 0;
    }
 
-   if (!clear_mask)
-      return;
+   struct tu_cs cs;
+   struct tu_draw_state ds = tu_cs_draw_state(&cmd->sub_cs, &cs, 9);
+
+   tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6_FRAG, 3);
+   tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
+                  CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
+                  CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
+                  CP_LOAD_STATE6_0_STATE_BLOCK(SB6_FS_TEX) |
+                  CP_LOAD_STATE6_0_NUM_UNIT(subpass->input_count * 2));
+   tu_cs_emit_qw(&cs, texture.iova);
+
+   tu_cs_emit_pkt4(&cs, REG_A6XX_SP_FS_TEX_CONST_LO, 2);
+   tu_cs_emit_qw(&cs, texture.iova);
+
+   tu_cs_emit_regs(&cs, A6XX_SP_FS_TEX_COUNT(subpass->input_count * 2));
 
-   tu_clear_sysmem_attachment(cmd, cs, a,
-                              &info->pClearValues[a], &(struct VkClearRect) {
-      .rect = info->renderArea,
-      .baseArrayLayer = iview->base_layer,
-      .layerCount = iview->layer_count,
-   });
+   assert(cs.cur == cs.end); /* validate draw state size */
+
+   return ds;
+}
+
+static void
+tu_set_input_attachments(struct tu_cmd_buffer *cmd, const struct tu_subpass *subpass)
+{
+   struct tu_cs *cs = &cmd->draw_cs;
+
+   tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 6);
+   tu_cs_emit_draw_state(cs, TU_DRAW_STATE_INPUT_ATTACHMENTS_GMEM,
+                         tu_emit_input_attachments(cmd, subpass, true));
+   tu_cs_emit_draw_state(cs, TU_DRAW_STATE_INPUT_ATTACHMENTS_SYSMEM,
+                         tu_emit_input_attachments(cmd, subpass, false));
 }
 
 static void
-tu_emit_load_clear(struct tu_cmd_buffer *cmd,
-                   const VkRenderPassBeginInfo *info)
+tu_emit_renderpass_begin(struct tu_cmd_buffer *cmd,
+                         const VkRenderPassBeginInfo *info)
 {
    struct tu_cs *cs = &cmd->draw_cs;
 
@@ -1448,43 +1154,35 @@ tu_emit_load_clear(struct tu_cmd_buffer *cmd,
    tu6_emit_blit_scissor(cmd, cs, true);
 
    for (uint32_t i = 0; i < cmd->state.pass->attachment_count; ++i)
-      tu6_emit_load_attachment(cmd, cs, i);
+      tu_load_gmem_attachment(cmd, cs, i, false);
 
    tu6_emit_blit_scissor(cmd, cs, false);
 
    for (uint32_t i = 0; i < cmd->state.pass->attachment_count; ++i)
-      tu6_emit_clear_attachment(cmd, cs, i, info);
+      tu_clear_gmem_attachment(cmd, cs, i, info);
 
    tu_cond_exec_end(cs);
 
-   /* invalidate because reading input attachments will cache GMEM and
-    * the cache isn''t updated when GMEM is written
-    * TODO: is there a no-cache bit for textures?
-    */
-   if (cmd->state.subpass->input_count)
-      tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
-
    tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_SYSMEM);
 
    for (uint32_t i = 0; i < cmd->state.pass->attachment_count; ++i)
-      tu_emit_sysmem_clear_attachment(cmd, cs, i, info);
+      tu_clear_sysmem_attachment(cmd, cs, i, info);
 
    tu_cond_exec_end(cs);
 }
 
 static void
-tu6_sysmem_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs,
-                        const struct VkRect2D *renderArea)
+tu6_sysmem_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
 {
    const struct tu_framebuffer *fb = cmd->state.framebuffer;
 
    assert(fb->width > 0 && fb->height > 0);
-   tu6_emit_window_scissor(cmd, cs, 0, 0, fb->width - 1, fb->height - 1);
-   tu6_emit_window_offset(cmd, cs, 0, 0);
+   tu6_emit_window_scissor(cs, 0, 0, fb->width - 1, fb->height - 1);
+   tu6_emit_window_offset(cs, 0, 0);
 
    tu6_emit_bin_size(cs, 0, 0, 0xc00000); /* 0xc00000 = BYPASS? */
 
-   tu6_emit_lrz_flush(cmd, cs);
+   tu6_emit_event_write(cmd, cs, LRZ_FLUSH);
 
    tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
    tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS));
@@ -1492,17 +1190,10 @@ tu6_sysmem_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs,
    tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
    tu_cs_emit(cs, 0x0);
 
-   tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_COLOR, false);
-   tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_DEPTH, false);
-   tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
-
-   tu6_emit_wfi(cmd, cs);
-   tu_cs_emit_regs(cs,
-                   A6XX_RB_CCU_CNTL(0x10000000));
+   tu_emit_cache_flush_ccu(cmd, cs, TU_CMD_CCU_SYSMEM);
 
    /* enable stream-out, with sysmem there is only one pass: */
-   tu_cs_emit_regs(cs,
-                   A6XX_VPC_SO_OVERRIDE(.so_disable = false));
+   tu_cs_emit_regs(cs, A6XX_VPC_SO_DISABLE(false));
 
    tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
    tu_cs_emit(cs, 0x1);
@@ -1519,58 +1210,38 @@ tu6_sysmem_render_end(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
    /* Do any resolves of the last subpass. These are handled in the
     * tile_store_ib in the gmem path.
     */
-
-   const struct tu_subpass *subpass = cmd->state.subpass;
-   if (subpass->resolve_attachments) {
-      for (unsigned i = 0; i < subpass->color_count; i++) {
-         uint32_t a = subpass->resolve_attachments[i].attachment;
-         if (a != VK_ATTACHMENT_UNUSED)
-            tu6_emit_sysmem_resolve(cmd, cs, a,
-                                    subpass->color_attachments[i].attachment);
-      }
-   }
+   tu6_emit_sysmem_resolves(cmd, cs, cmd->state.subpass);
 
    tu_cs_emit_call(cs, &cmd->draw_epilogue_cs);
 
    tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
    tu_cs_emit(cs, 0x0);
 
-   tu6_emit_lrz_flush(cmd, cs);
-
-   tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true);
-   tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true);
+   tu6_emit_event_write(cmd, cs, LRZ_FLUSH);
 
    tu_cs_sanity_check(cs);
 }
 
-
 static void
 tu6_tile_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
 {
    struct tu_physical_device *phys_dev = cmd->device->physical_device;
 
-   tu6_emit_lrz_flush(cmd, cs);
+   tu6_emit_event_write(cmd, cs, LRZ_FLUSH);
 
    /* lrz clear? */
 
-   tu6_emit_cache_flush(cmd, cs);
-
    tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
    tu_cs_emit(cs, 0x0);
 
-   /* 0x10000000 for BYPASS.. 0x7c13c080 for GMEM: */
-   tu6_emit_wfi(cmd, cs);
-   tu_cs_emit_regs(cs,
-                   A6XX_RB_CCU_CNTL(phys_dev->magic.RB_CCU_CNTL_gmem));
+   tu_emit_cache_flush_ccu(cmd, cs, TU_CMD_CCU_GMEM);
 
-   const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
+   const struct tu_framebuffer *fb = cmd->state.framebuffer;
    if (use_hw_binning(cmd)) {
       /* enable stream-out during binning pass: */
-      tu_cs_emit_regs(cs, A6XX_VPC_SO_OVERRIDE(.so_disable=false));
+      tu_cs_emit_regs(cs, A6XX_VPC_SO_DISABLE(false));
 
-      tu6_emit_bin_size(cs,
-                        tiling->tile0.extent.width,
-                        tiling->tile0.extent.height,
+      tu6_emit_bin_size(cs, fb->tile0.width, fb->tile0.height,
                         A6XX_RB_BIN_CONTROL_BINNING_PASS | 0x6000000);
 
       tu6_emit_render_cntl(cmd, cmd->state.subpass, cs, true);
@@ -1578,11 +1249,9 @@ tu6_tile_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
       tu6_emit_binning_pass(cmd, cs);
 
       /* and disable stream-out for draw pass: */
-      tu_cs_emit_regs(cs, A6XX_VPC_SO_OVERRIDE(.so_disable=true));
+      tu_cs_emit_regs(cs, A6XX_VPC_SO_DISABLE(true));
 
-      tu6_emit_bin_size(cs,
-                        tiling->tile0.extent.width,
-                        tiling->tile0.extent.height,
+      tu6_emit_bin_size(cs, fb->tile0.width, fb->tile0.height,
                         A6XX_RB_BIN_CONTROL_USE_VIZ | 0x6000000);
 
       tu_cs_emit_regs(cs,
@@ -1596,42 +1265,22 @@ tu6_tile_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
       tu_cs_emit(cs, 0x1);
    } else {
       /* no binning pass, so enable stream-out for draw pass:: */
-      tu_cs_emit_regs(cs, A6XX_VPC_SO_OVERRIDE(.so_disable=false));
+      tu_cs_emit_regs(cs, A6XX_VPC_SO_DISABLE(false));
 
-      tu6_emit_bin_size(cs,
-                        tiling->tile0.extent.width,
-                        tiling->tile0.extent.height,
-                        0x6000000);
+      tu6_emit_bin_size(cs, fb->tile0.width, fb->tile0.height, 0x6000000);
    }
 
    tu_cs_sanity_check(cs);
 }
 
 static void
-tu6_render_tile(struct tu_cmd_buffer *cmd,
-                struct tu_cs *cs,
-                const struct tu_tile *tile)
+tu6_render_tile(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
 {
-   tu6_emit_tile_select(cmd, cs, tile);
-
    tu_cs_emit_call(cs, &cmd->draw_cs);
-   cmd->wait_for_idle = true;
 
    if (use_hw_binning(cmd)) {
-      tu_cs_emit_pkt7(cs, CP_REG_TEST, 1);
-      tu_cs_emit(cs, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
-                     A6XX_CP_REG_TEST_0_BIT(0) |
-                     A6XX_CP_REG_TEST_0_WAIT_FOR_ME);
-
-      tu_cs_reserve(cs, 3 + 2);
-      tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
-      tu_cs_emit(cs, CP_COND_REG_EXEC_0_MODE(PRED_TEST));
-      tu_cs_emit(cs, CP_COND_REG_EXEC_1_DWORDS(2));
-
-      /* if (no overflow) */ {
-         tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
-         tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_ENDVIS));
-      }
+      tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
+      tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_ENDVIS));
    }
 
    tu_cs_emit_ib(cs, &cmd->state.tile_store_ib);
@@ -1647,9 +1296,9 @@ tu6_tile_render_end(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
    tu_cs_emit_regs(cs,
                    A6XX_GRAS_LRZ_CNTL(0));
 
-   tu6_emit_lrz_flush(cmd, cs);
+   tu6_emit_event_write(cmd, cs, LRZ_FLUSH);
 
-   tu6_emit_event_write(cmd, cs, CACHE_FLUSH_TS, true);
+   tu6_emit_event_write(cmd, cs, PC_CCU_RESOLVE_TS);
 
    tu_cs_sanity_check(cs);
 }
@@ -1657,15 +1306,24 @@ tu6_tile_render_end(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
 static void
 tu_cmd_render_tiles(struct tu_cmd_buffer *cmd)
 {
-   const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
+   const struct tu_framebuffer *fb = cmd->state.framebuffer;
 
    tu6_tile_render_begin(cmd, &cmd->cs);
 
-   for (uint32_t y = 0; y < tiling->tile_count.height; y++) {
-      for (uint32_t x = 0; x < tiling->tile_count.width; x++) {
-         struct tu_tile tile;
-         tu_tiling_config_get_tile(tiling, cmd->device, x, y, &tile);
-         tu6_render_tile(cmd, &cmd->cs, &tile);
+   uint32_t pipe = 0;
+   for (uint32_t py = 0; py < fb->pipe_count.height; py++) {
+      for (uint32_t px = 0; px < fb->pipe_count.width; px++, pipe++) {
+         uint32_t tx1 = px * fb->pipe0.width;
+         uint32_t ty1 = py * fb->pipe0.height;
+         uint32_t tx2 = MIN2(tx1 + fb->pipe0.width, fb->tile_count.width);
+         uint32_t ty2 = MIN2(ty1 + fb->pipe0.height, fb->tile_count.height);
+         uint32_t slot = 0;
+         for (uint32_t ty = ty1; ty < ty2; ty++) {
+            for (uint32_t tx = tx1; tx < tx2; tx++, slot++) {
+               tu6_emit_tile_select(cmd, &cmd->cs, tx, ty, pipe, slot);
+               tu6_render_tile(cmd, &cmd->cs);
+            }
+         }
       }
    }
 
@@ -1675,12 +1333,9 @@ tu_cmd_render_tiles(struct tu_cmd_buffer *cmd)
 static void
 tu_cmd_render_sysmem(struct tu_cmd_buffer *cmd)
 {
-   const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
-
-   tu6_sysmem_render_begin(cmd, &cmd->cs, &tiling->render_area);
+   tu6_sysmem_render_begin(cmd, &cmd->cs);
 
    tu_cs_emit_call(&cmd->cs, &cmd->draw_cs);
-   cmd->wait_for_idle = true;
 
    tu6_sysmem_render_end(cmd, &cmd->cs);
 }
@@ -1688,7 +1343,7 @@ tu_cmd_render_sysmem(struct tu_cmd_buffer *cmd)
 static void
 tu_cmd_prepare_tile_store_ib(struct tu_cmd_buffer *cmd)
 {
-   const uint32_t tile_store_space = 32 + 23 * cmd->state.pass->attachment_count;
+   const uint32_t tile_store_space = 11 + (35 * 2) * cmd->state.pass->attachment_count;
    struct tu_cs sub_cs;
 
    VkResult result =
@@ -1704,162 +1359,6 @@ tu_cmd_prepare_tile_store_ib(struct tu_cmd_buffer *cmd)
    cmd->state.tile_store_ib = tu_cs_end_sub_stream(&cmd->sub_cs, &sub_cs);
 }
 
-static void
-tu_cmd_update_tiling_config(struct tu_cmd_buffer *cmd,
-                            const VkRect2D *render_area)
-{
-   const struct tu_device *dev = cmd->device;
-   struct tu_tiling_config *tiling = &cmd->state.tiling_config;
-
-   tiling->render_area = *render_area;
-   tiling->force_sysmem = force_sysmem(cmd, render_area);
-
-   tu_tiling_config_update_tile_layout(tiling, dev, cmd->state.pass->gmem_pixels);
-   tu_tiling_config_update_pipe_layout(tiling, dev);
-   tu_tiling_config_update_pipes(tiling, dev);
-}
-
-const struct tu_dynamic_state default_dynamic_state = {
-   .viewport =
-     {
-       .count = 0,
-     },
-   .scissor =
-     {
-       .count = 0,
-     },
-   .line_width = 1.0f,
-   .depth_bias =
-     {
-       .bias = 0.0f,
-       .clamp = 0.0f,
-       .slope = 0.0f,
-     },
-   .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
-   .depth_bounds =
-     {
-       .min = 0.0f,
-       .max = 1.0f,
-     },
-   .stencil_compare_mask =
-     {
-       .front = ~0u,
-       .back = ~0u,
-     },
-   .stencil_write_mask =
-     {
-       .front = ~0u,
-       .back = ~0u,
-     },
-   .stencil_reference =
-     {
-       .front = 0u,
-       .back = 0u,
-     },
-};
-
-static void UNUSED /* FINISHME */
-tu_bind_dynamic_state(struct tu_cmd_buffer *cmd_buffer,
-                      const struct tu_dynamic_state *src)
-{
-   struct tu_dynamic_state *dest = &cmd_buffer->state.dynamic;
-   uint32_t copy_mask = src->mask;
-   uint32_t dest_mask = 0;
-
-   tu_use_args(cmd_buffer); /* FINISHME */
-
-   /* Make sure to copy the number of viewports/scissors because they can
-    * only be specified at pipeline creation time.
-    */
-   dest->viewport.count = src->viewport.count;
-   dest->scissor.count = src->scissor.count;
-   dest->discard_rectangle.count = src->discard_rectangle.count;
-
-   if (copy_mask & TU_DYNAMIC_VIEWPORT) {
-      if (memcmp(&dest->viewport.viewports, &src->viewport.viewports,
-                 src->viewport.count * sizeof(VkViewport))) {
-         typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
-                      src->viewport.count);
-         dest_mask |= TU_DYNAMIC_VIEWPORT;
-      }
-   }
-
-   if (copy_mask & TU_DYNAMIC_SCISSOR) {
-      if (memcmp(&dest->scissor.scissors, &src->scissor.scissors,
-                 src->scissor.count * sizeof(VkRect2D))) {
-         typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
-                      src->scissor.count);
-         dest_mask |= TU_DYNAMIC_SCISSOR;
-      }
-   }
-
-   if (copy_mask & TU_DYNAMIC_LINE_WIDTH) {
-      if (dest->line_width != src->line_width) {
-         dest->line_width = src->line_width;
-         dest_mask |= TU_DYNAMIC_LINE_WIDTH;
-      }
-   }
-
-   if (copy_mask & TU_DYNAMIC_DEPTH_BIAS) {
-      if (memcmp(&dest->depth_bias, &src->depth_bias,
-                 sizeof(src->depth_bias))) {
-         dest->depth_bias = src->depth_bias;
-         dest_mask |= TU_DYNAMIC_DEPTH_BIAS;
-      }
-   }
-
-   if (copy_mask & TU_DYNAMIC_BLEND_CONSTANTS) {
-      if (memcmp(&dest->blend_constants, &src->blend_constants,
-                 sizeof(src->blend_constants))) {
-         typed_memcpy(dest->blend_constants, src->blend_constants, 4);
-         dest_mask |= TU_DYNAMIC_BLEND_CONSTANTS;
-      }
-   }
-
-   if (copy_mask & TU_DYNAMIC_DEPTH_BOUNDS) {
-      if (memcmp(&dest->depth_bounds, &src->depth_bounds,
-                 sizeof(src->depth_bounds))) {
-         dest->depth_bounds = src->depth_bounds;
-         dest_mask |= TU_DYNAMIC_DEPTH_BOUNDS;
-      }
-   }
-
-   if (copy_mask & TU_DYNAMIC_STENCIL_COMPARE_MASK) {
-      if (memcmp(&dest->stencil_compare_mask, &src->stencil_compare_mask,
-                 sizeof(src->stencil_compare_mask))) {
-         dest->stencil_compare_mask = src->stencil_compare_mask;
-         dest_mask |= TU_DYNAMIC_STENCIL_COMPARE_MASK;
-      }
-   }
-
-   if (copy_mask & TU_DYNAMIC_STENCIL_WRITE_MASK) {
-      if (memcmp(&dest->stencil_write_mask, &src->stencil_write_mask,
-                 sizeof(src->stencil_write_mask))) {
-         dest->stencil_write_mask = src->stencil_write_mask;
-         dest_mask |= TU_DYNAMIC_STENCIL_WRITE_MASK;
-      }
-   }
-
-   if (copy_mask & TU_DYNAMIC_STENCIL_REFERENCE) {
-      if (memcmp(&dest->stencil_reference, &src->stencil_reference,
-                 sizeof(src->stencil_reference))) {
-         dest->stencil_reference = src->stencil_reference;
-         dest_mask |= TU_DYNAMIC_STENCIL_REFERENCE;
-      }
-   }
-
-   if (copy_mask & TU_DYNAMIC_DISCARD_RECTANGLE) {
-      if (memcmp(&dest->discard_rectangle.rectangles,
-                 &src->discard_rectangle.rectangles,
-                 src->discard_rectangle.count * sizeof(VkRect2D))) {
-         typed_memcpy(dest->discard_rectangle.rectangles,
-                      src->discard_rectangle.rectangles,
-                      src->discard_rectangle.count);
-         dest_mask |= TU_DYNAMIC_DISCARD_RECTANGLE;
-      }
-   }
-}
-
 static VkResult
 tu_create_cmd_buffer(struct tu_device *device,
                      struct tu_cmd_pool *pool,
@@ -1867,12 +1366,12 @@ tu_create_cmd_buffer(struct tu_device *device,
                      VkCommandBuffer *pCommandBuffer)
 {
    struct tu_cmd_buffer *cmd_buffer;
-   cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8,
-                          VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+
+   cmd_buffer = vk_object_zalloc(&device->vk, NULL, sizeof(*cmd_buffer),
+                                 VK_OBJECT_TYPE_COMMAND_BUFFER);
    if (cmd_buffer == NULL)
       return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
 
-   cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
    cmd_buffer->device = device;
    cmd_buffer->pool = pool;
    cmd_buffer->level = level;
@@ -1899,47 +1398,26 @@ tu_create_cmd_buffer(struct tu_device *device,
 
    list_inithead(&cmd_buffer->upload.list);
 
-   VkResult result = tu_bo_init_new(device, &cmd_buffer->scratch_bo, 0x1000);
-   if (result != VK_SUCCESS)
-      goto fail_scratch_bo;
-
-   /* TODO: resize on overflow */
-   cmd_buffer->vsc_data_pitch = device->vsc_data_pitch;
-   cmd_buffer->vsc_data2_pitch = device->vsc_data2_pitch;
-   cmd_buffer->vsc_data = device->vsc_data;
-   cmd_buffer->vsc_data2 = device->vsc_data2;
-
    return VK_SUCCESS;
-
-fail_scratch_bo:
-   list_del(&cmd_buffer->pool_link);
-   return result;
 }
 
 static void
 tu_cmd_buffer_destroy(struct tu_cmd_buffer *cmd_buffer)
 {
-   tu_bo_finish(cmd_buffer->device, &cmd_buffer->scratch_bo);
-
    list_del(&cmd_buffer->pool_link);
 
-   for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++)
-      free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr);
-
    tu_cs_finish(&cmd_buffer->cs);
    tu_cs_finish(&cmd_buffer->draw_cs);
    tu_cs_finish(&cmd_buffer->draw_epilogue_cs);
    tu_cs_finish(&cmd_buffer->sub_cs);
 
    tu_bo_list_destroy(&cmd_buffer->bo_list);
-   vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
+   vk_object_free(&cmd_buffer->device->vk, &cmd_buffer->pool->alloc, cmd_buffer);
 }
 
 static VkResult
 tu_reset_cmd_buffer(struct tu_cmd_buffer *cmd_buffer)
 {
-   cmd_buffer->wait_for_idle = true;
-
    cmd_buffer->record_result = VK_SUCCESS;
 
    tu_bo_list_reset(&cmd_buffer->bo_list);
@@ -1948,10 +1426,8 @@ tu_reset_cmd_buffer(struct tu_cmd_buffer *cmd_buffer)
    tu_cs_reset(&cmd_buffer->draw_epilogue_cs);
    tu_cs_reset(&cmd_buffer->sub_cs);
 
-   for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
-      cmd_buffer->descriptors[i].valid = 0;
-      cmd_buffer->descriptors[i].push_dirty = false;
-   }
+   for (unsigned i = 0; i < MAX_BIND_POINTS; i++)
+      memset(&cmd_buffer->descriptors[i].sets, 0, sizeof(cmd_buffer->descriptors[i].sets));
 
    cmd_buffer->status = TU_CMD_BUFFER_STATUS_INITIAL;
 
@@ -1979,7 +1455,6 @@ tu_AllocateCommandBuffers(VkDevice _device,
          list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
 
          result = tu_reset_cmd_buffer(cmd_buffer);
-         cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
          cmd_buffer->level = pAllocateInfo->level;
 
          pCommandBuffers[i] = tu_cmd_buffer_to_handle(cmd_buffer);
@@ -2039,6 +1514,16 @@ tu_ResetCommandBuffer(VkCommandBuffer commandBuffer,
    return tu_reset_cmd_buffer(cmd_buffer);
 }
 
+/* Initialize the cache, assuming all necessary flushes have happened but *not*
+ * invalidations.
+ */
+static void
+tu_cache_init(struct tu_cache_state *cache)
+{
+   cache->flush_bits = 0;
+   cache->pending_flush_bits = TU_CMD_FLAG_ALL_INVALIDATE;
+}
+
 VkResult
 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer,
                       const VkCommandBufferBeginInfo *pBeginInfo)
@@ -2056,14 +1541,16 @@ tu_BeginCommandBuffer(VkCommandBuffer commandBuffer,
    }
 
    memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
+   cmd_buffer->state.index_size = 0xff; /* dirty restart index */
+
+   tu_cache_init(&cmd_buffer->state.cache);
+   tu_cache_init(&cmd_buffer->state.renderpass_cache);
    cmd_buffer->usage_flags = pBeginInfo->flags;
 
    tu_cs_begin(&cmd_buffer->cs);
    tu_cs_begin(&cmd_buffer->draw_cs);
    tu_cs_begin(&cmd_buffer->draw_epilogue_cs);
 
-   cmd_buffer->scratch_seqno = 0;
-
    /* setup initial configuration into command buffer */
    if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
       switch (cmd_buffer->queue_family_index) {
@@ -2073,11 +1560,18 @@ tu_BeginCommandBuffer(VkCommandBuffer commandBuffer,
       default:
          break;
       }
-   } else if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
-              (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
-      assert(pBeginInfo->pInheritanceInfo);
-      cmd_buffer->state.pass = tu_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
-      cmd_buffer->state.subpass = &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
+   } else if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
+      if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
+         assert(pBeginInfo->pInheritanceInfo);
+         cmd_buffer->state.pass = tu_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
+         cmd_buffer->state.subpass =
+            &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
+      } else {
+         /* When executing in the middle of another command buffer, the CCU
+          * state is unknown.
+          */
+         cmd_buffer->state.ccu_state = TU_CMD_CCU_UNKNOWN;
+      }
    }
 
    cmd_buffer->status = TU_CMD_BUFFER_STATUS_RECORDING;
@@ -2085,6 +1579,10 @@ tu_BeginCommandBuffer(VkCommandBuffer commandBuffer,
    return VK_SUCCESS;
 }
 
+/* Sets vertex buffers to HW binding points.  We emit VBs in SDS (so that bin
+ * rendering can skip over unused state), so we need to collect all the
+ * bindings together into a single state emit at draw time.
+ */
 void
 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer,
                         uint32_t firstBinding,
@@ -2097,12 +1595,14 @@ tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer,
    assert(firstBinding + bindingCount <= MAX_VBS);
 
    for (uint32_t i = 0; i < bindingCount; i++) {
-      cmd->state.vb.buffers[firstBinding + i] =
-         tu_buffer_from_handle(pBuffers[i]);
+      struct tu_buffer *buf = tu_buffer_from_handle(pBuffers[i]);
+
+      cmd->state.vb.buffers[firstBinding + i] = buf;
       cmd->state.vb.offsets[firstBinding + i] = pOffsets[i];
+
+      tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
    }
 
-   /* VB states depend on VkPipelineVertexInputStateCreateInfo */
    cmd->state.dirty |= TU_CMD_DIRTY_VERTEX_BUFFERS;
 }
 
@@ -2115,23 +1615,41 @@ tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer,
    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
    TU_FROM_HANDLE(tu_buffer, buf, buffer);
 
-   /* initialize/update the restart index */
-   if (!cmd->state.index_buffer || cmd->state.index_type != indexType) {
-      struct tu_cs *draw_cs = &cmd->draw_cs;
 
-      tu6_emit_restart_index(
-         draw_cs, indexType == VK_INDEX_TYPE_UINT32 ? 0xffffffff : 0xffff);
 
-      tu_cs_sanity_check(draw_cs);
+   uint32_t index_size, index_shift, restart_index;
+
+   switch (indexType) {
+   case VK_INDEX_TYPE_UINT16:
+      index_size = INDEX4_SIZE_16_BIT;
+      index_shift = 1;
+      restart_index = 0xffff;
+      break;
+   case VK_INDEX_TYPE_UINT32:
+      index_size = INDEX4_SIZE_32_BIT;
+      index_shift = 2;
+      restart_index = 0xffffffff;
+      break;
+   case VK_INDEX_TYPE_UINT8_EXT:
+      index_size = INDEX4_SIZE_8_BIT;
+      index_shift = 0;
+      restart_index = 0xff;
+      break;
+   default:
+      unreachable("invalid VkIndexType");
    }
 
-   /* track the BO */
-   if (cmd->state.index_buffer != buf)
-      tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
+   /* initialize/update the restart index */
+   if (cmd->state.index_size != index_size)
+      tu_cs_emit_regs(&cmd->draw_cs, A6XX_PC_RESTART_INDEX(restart_index));
+
+   assert(buf->size >= offset);
 
-   cmd->state.index_buffer = buf;
-   cmd->state.index_offset = offset;
-   cmd->state.index_type = indexType;
+   cmd->state.index_va = buf->bo->iova + buf->bo_offset + offset;
+   cmd->state.max_index_count = (buf->size - offset) >> index_shift;
+   cmd->state.index_size = index_size;
+
+   tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
 }
 
 void
@@ -2144,30 +1662,116 @@ tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,
                          uint32_t dynamicOffsetCount,
                          const uint32_t *pDynamicOffsets)
 {
-   TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
+   TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
    TU_FROM_HANDLE(tu_pipeline_layout, layout, _layout);
    unsigned dyn_idx = 0;
 
    struct tu_descriptor_state *descriptors_state =
-      tu_get_descriptors_state(cmd_buffer, pipelineBindPoint);
+      tu_get_descriptors_state(cmd, pipelineBindPoint);
 
    for (unsigned i = 0; i < descriptorSetCount; ++i) {
       unsigned idx = i + firstSet;
       TU_FROM_HANDLE(tu_descriptor_set, set, pDescriptorSets[i]);
 
       descriptors_state->sets[idx] = set;
-      descriptors_state->valid |= (1u << idx);
 
       for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) {
-         unsigned idx = j + layout->set[i + firstSet].dynamic_offset_start;
+         /* update the contents of the dynamic descriptor set */
+         unsigned src_idx = j;
+         unsigned dst_idx = j + layout->set[idx].dynamic_offset_start;
          assert(dyn_idx < dynamicOffsetCount);
 
-         descriptors_state->dynamic_buffers[idx] =
-         set->dynamic_descriptors[j].va + pDynamicOffsets[dyn_idx];
+         uint32_t *dst =
+            &descriptors_state->dynamic_descriptors[dst_idx * A6XX_TEX_CONST_DWORDS];
+         uint32_t *src =
+            &set->dynamic_descriptors[src_idx * A6XX_TEX_CONST_DWORDS];
+         uint32_t offset = pDynamicOffsets[dyn_idx];
+
+         /* Patch the storage/uniform descriptors right away. */
+         if (layout->set[idx].layout->dynamic_ubo & (1 << j)) {
+            /* Note: we can assume here that the addition won't roll over and
+             * change the SIZE field.
+             */
+            uint64_t va = src[0] | ((uint64_t)src[1] << 32);
+            va += offset;
+            dst[0] = va;
+            dst[1] = va >> 32;
+         } else {
+            memcpy(dst, src, A6XX_TEX_CONST_DWORDS * 4);
+            /* Note: A6XX_IBO_5_DEPTH is always 0 */
+            uint64_t va = dst[4] | ((uint64_t)dst[5] << 32);
+            va += offset;
+            dst[4] = va;
+            dst[5] = va >> 32;
+         }
+      }
+
+      for (unsigned j = 0; j < set->layout->buffer_count; ++j) {
+         if (set->buffers[j]) {
+            tu_bo_list_add(&cmd->bo_list, set->buffers[j],
+                           MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
+         }
+      }
+
+      if (set->size > 0) {
+         tu_bo_list_add(&cmd->bo_list, &set->pool->bo,
+                        MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
       }
    }
+   assert(dyn_idx == dynamicOffsetCount);
+
+   uint32_t sp_bindless_base_reg, hlsq_bindless_base_reg, hlsq_invalidate_value;
+   uint64_t addr[MAX_SETS + 1] = {};
+   struct tu_cs *cs, state_cs;
+
+   for (uint32_t i = 0; i < MAX_SETS; i++) {
+      struct tu_descriptor_set *set = descriptors_state->sets[i];
+      if (set)
+         addr[i] = set->va | 3;
+   }
+
+   if (layout->dynamic_offset_count) {
+      /* allocate and fill out dynamic descriptor set */
+      struct tu_cs_memory dynamic_desc_set;
+      VkResult result = tu_cs_alloc(&cmd->sub_cs, layout->dynamic_offset_count,
+                                    A6XX_TEX_CONST_DWORDS, &dynamic_desc_set);
+      assert(result == VK_SUCCESS);
+
+      memcpy(dynamic_desc_set.map, descriptors_state->dynamic_descriptors,
+             layout->dynamic_offset_count * A6XX_TEX_CONST_DWORDS * 4);
+      addr[MAX_SETS] = dynamic_desc_set.iova | 3;
+   }
+
+   if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) {
+      sp_bindless_base_reg = REG_A6XX_SP_BINDLESS_BASE(0);
+      hlsq_bindless_base_reg = REG_A6XX_HLSQ_BINDLESS_BASE(0);
+      hlsq_invalidate_value = A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS(0x1f);
+
+      cmd->state.desc_sets = tu_cs_draw_state(&cmd->sub_cs, &state_cs, 24);
+      cmd->state.dirty |= TU_CMD_DIRTY_DESC_SETS_LOAD | TU_CMD_DIRTY_SHADER_CONSTS;
+      cs = &state_cs;
+   } else {
+      assert(pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE);
+
+      sp_bindless_base_reg = REG_A6XX_SP_CS_BINDLESS_BASE(0);
+      hlsq_bindless_base_reg = REG_A6XX_HLSQ_CS_BINDLESS_BASE(0);
+      hlsq_invalidate_value = A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS(0x1f);
+
+      cmd->state.dirty |= TU_CMD_DIRTY_COMPUTE_DESC_SETS_LOAD;
+      cs = &cmd->cs;
+   }
+
+   tu_cs_emit_pkt4(cs, sp_bindless_base_reg, 10);
+   tu_cs_emit_array(cs, (const uint32_t*) addr, 10);
+   tu_cs_emit_pkt4(cs, hlsq_bindless_base_reg, 10);
+   tu_cs_emit_array(cs, (const uint32_t*) addr, 10);
+   tu_cs_emit_regs(cs, A6XX_HLSQ_INVALIDATE_CMD(.dword = hlsq_invalidate_value));
 
-   cmd_buffer->state.dirty |= TU_CMD_DIRTY_DESCRIPTOR_SETS;
+   if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) {
+      assert(cs->cur == cs->end); /* validate draw state size */
+      tu_cs_emit_pkt7(&cmd->draw_cs, CP_SET_DRAW_STATE, 3);
+      tu_cs_emit_draw_state(&cmd->draw_cs, TU_DRAW_STATE_DESC_SETS, cmd->state.desc_sets);
+   }
 }
 
 void tu_CmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer,
@@ -2178,33 +1782,86 @@ void tu_CmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer,
                                            const VkDeviceSize *pSizes)
 {
    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
-   assert(firstBinding + bindingCount <= IR3_MAX_SO_BUFFERS);
+   struct tu_cs *cs = &cmd->draw_cs;
+
+   /* using COND_REG_EXEC for xfb commands matches the blob behavior
+    * presumably there isn't any benefit using a draw state when the
+    * condition is (SYSMEM | BINNING)
+    */
+   tu_cond_exec_start(cs, CP_COND_REG_EXEC_0_MODE(RENDER_MODE) |
+                          CP_COND_REG_EXEC_0_SYSMEM |
+                          CP_COND_REG_EXEC_0_BINNING);
 
    for (uint32_t i = 0; i < bindingCount; i++) {
-      uint32_t idx = firstBinding + i;
       TU_FROM_HANDLE(tu_buffer, buf, pBuffers[i]);
+      uint64_t iova = buf->bo->iova + pOffsets[i];
+      uint32_t size = buf->bo->size - pOffsets[i];
+      uint32_t idx = i + firstBinding;
 
-      if (pOffsets[i] != 0)
-         cmd->state.streamout_reset |= 1 << idx;
+      if (pSizes && pSizes[i] != VK_WHOLE_SIZE)
+         size = pSizes[i];
 
-      cmd->state.streamout_buf.buffers[idx] = buf;
-      cmd->state.streamout_buf.offsets[idx] = pOffsets[i];
-      cmd->state.streamout_buf.sizes[idx] = pSizes[i];
+      /* BUFFER_BASE is 32-byte aligned, add remaining offset to BUFFER_OFFSET */
+      uint32_t offset = iova & 0x1f;
+      iova &= ~(uint64_t) 0x1f;
 
-      cmd->state.streamout_enabled |= 1 << idx;
+      tu_cs_emit_pkt4(cs, REG_A6XX_VPC_SO_BUFFER_BASE(idx), 3);
+      tu_cs_emit_qw(cs, iova);
+      tu_cs_emit(cs, size + offset);
+
+      cmd->state.streamout_offset[idx] = offset;
+
+      tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_WRITE);
    }
 
-   cmd->state.dirty |= TU_CMD_DIRTY_STREAMOUT_BUFFERS;
+   tu_cond_exec_end(cs);
 }
 
-void tu_CmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer,
-                                       uint32_t firstCounterBuffer,
-                                       uint32_t counterBufferCount,
-                                       const VkBuffer *pCounterBuffers,
-                                       const VkDeviceSize *pCounterBufferOffsets)
+void
+tu_CmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer,
+                                uint32_t firstCounterBuffer,
+                                uint32_t counterBufferCount,
+                                const VkBuffer *pCounterBuffers,
+                                const VkDeviceSize *pCounterBufferOffsets)
 {
-   assert(firstCounterBuffer + counterBufferCount <= IR3_MAX_SO_BUFFERS);
-   /* TODO do something with counter buffer? */
+   TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+   struct tu_cs *cs = &cmd->draw_cs;
+
+   tu_cond_exec_start(cs, CP_COND_REG_EXEC_0_MODE(RENDER_MODE) |
+                          CP_COND_REG_EXEC_0_SYSMEM |
+                          CP_COND_REG_EXEC_0_BINNING);
+
+   /* TODO: only update offset for active buffers */
+   for (uint32_t i = 0; i < IR3_MAX_SO_BUFFERS; i++)
+      tu_cs_emit_regs(cs, A6XX_VPC_SO_BUFFER_OFFSET(i, cmd->state.streamout_offset[i]));
+
+   for (uint32_t i = 0; i < counterBufferCount; i++) {
+      uint32_t idx = firstCounterBuffer + i;
+      uint32_t offset = cmd->state.streamout_offset[idx];
+
+      if (!pCounterBuffers[i])
+         continue;
+
+      TU_FROM_HANDLE(tu_buffer, buf, pCounterBuffers[i]);
+
+      tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
+
+      tu_cs_emit_pkt7(cs, CP_MEM_TO_REG, 3);
+      tu_cs_emit(cs, CP_MEM_TO_REG_0_REG(REG_A6XX_VPC_SO_BUFFER_OFFSET(idx)) |
+                     CP_MEM_TO_REG_0_UNK31 |
+                     CP_MEM_TO_REG_0_CNT(1));
+      tu_cs_emit_qw(cs, buf->bo->iova + pCounterBufferOffsets[i]);
+
+      if (offset) {
+         tu_cs_emit_pkt7(cs, CP_REG_RMW, 3);
+         tu_cs_emit(cs, CP_REG_RMW_0_DST_REG(REG_A6XX_VPC_SO_BUFFER_OFFSET(idx)) |
+                        CP_REG_RMW_0_SRC1_ADD);
+         tu_cs_emit_qw(cs, 0xffffffff);
+         tu_cs_emit_qw(cs, offset);
+      }
+   }
+
+   tu_cond_exec_end(cs);
 }
 
 void tu_CmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer,
@@ -2213,11 +1870,58 @@ void tu_CmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer,
                                        const VkBuffer *pCounterBuffers,
                                        const VkDeviceSize *pCounterBufferOffsets)
 {
-   assert(firstCounterBuffer + counterBufferCount <= IR3_MAX_SO_BUFFERS);
-   /* TODO do something with counter buffer? */
-
    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
-   cmd->state.streamout_enabled = 0;
+   struct tu_cs *cs = &cmd->draw_cs;
+
+   tu_cond_exec_start(cs, CP_COND_REG_EXEC_0_MODE(RENDER_MODE) |
+                          CP_COND_REG_EXEC_0_SYSMEM |
+                          CP_COND_REG_EXEC_0_BINNING);
+
+   /* TODO: only flush buffers that need to be flushed */
+   for (uint32_t i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
+      /* note: FLUSH_BASE is always the same, so it could go in init_hw()? */
+      tu_cs_emit_pkt4(cs, REG_A6XX_VPC_SO_FLUSH_BASE(i), 2);
+      tu_cs_emit_qw(cs, global_iova(cmd, flush_base[i]));
+      tu6_emit_event_write(cmd, cs, FLUSH_SO_0 + i);
+   }
+
+   for (uint32_t i = 0; i < counterBufferCount; i++) {
+      uint32_t idx = firstCounterBuffer + i;
+      uint32_t offset = cmd->state.streamout_offset[idx];
+
+      if (!pCounterBuffers[i])
+         continue;
+
+      TU_FROM_HANDLE(tu_buffer, buf, pCounterBuffers[i]);
+
+      tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_WRITE);
+
+      /* VPC_SO_FLUSH_BASE has dwords counter, but counter should be in bytes */
+      tu_cs_emit_pkt7(cs, CP_MEM_TO_REG, 3);
+      tu_cs_emit(cs, CP_MEM_TO_REG_0_REG(REG_A6XX_CP_SCRATCH_REG(0)) |
+                     CP_MEM_TO_REG_0_SHIFT_BY_2 |
+                     0x40000 | /* ??? */
+                     CP_MEM_TO_REG_0_UNK31 |
+                     CP_MEM_TO_REG_0_CNT(1));
+      tu_cs_emit_qw(cs, global_iova(cmd, flush_base[idx]));
+
+      if (offset) {
+         tu_cs_emit_pkt7(cs, CP_REG_RMW, 3);
+         tu_cs_emit(cs, CP_REG_RMW_0_DST_REG(REG_A6XX_CP_SCRATCH_REG(0)) |
+                        CP_REG_RMW_0_SRC1_ADD);
+         tu_cs_emit_qw(cs, 0xffffffff);
+         tu_cs_emit_qw(cs, -offset);
+      }
+
+      tu_cs_emit_pkt7(cs, CP_REG_TO_MEM, 3);
+      tu_cs_emit(cs, CP_REG_TO_MEM_0_REG(REG_A6XX_CP_SCRATCH_REG(0)) |
+                     CP_REG_TO_MEM_0_CNT(1));
+      tu_cs_emit_qw(cs, buf->bo->iova + pCounterBufferOffsets[i]);
+   }
+
+   tu_cond_exec_end(cs);
+
+   cmd->state.xfb_used = true;
 }
 
 void
@@ -2230,28 +1934,49 @@ tu_CmdPushConstants(VkCommandBuffer commandBuffer,
 {
    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
    memcpy((void*) cmd->push_constants + offset, pValues, size);
-   cmd->state.dirty |= TU_CMD_DIRTY_PUSH_CONSTANTS;
+   cmd->state.dirty |= TU_CMD_DIRTY_SHADER_CONSTS;
 }
 
-VkResult
-tu_EndCommandBuffer(VkCommandBuffer commandBuffer)
+/* Flush everything which has been made available but we haven't actually
+ * flushed yet.
+ */
+static void
+tu_flush_all_pending(struct tu_cache_state *cache)
 {
-   TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
+   cache->flush_bits |= cache->pending_flush_bits & TU_CMD_FLAG_ALL_FLUSH;
+   cache->pending_flush_bits &= ~TU_CMD_FLAG_ALL_FLUSH;
+}
 
-   if (cmd_buffer->scratch_seqno) {
-      tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->scratch_bo,
-                     MSM_SUBMIT_BO_WRITE);
-   }
+VkResult
+tu_EndCommandBuffer(VkCommandBuffer commandBuffer)
+{
+   TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
 
-   if (cmd_buffer->use_vsc_data) {
-      tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->vsc_data,
-                     MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
-      tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->vsc_data2,
-                     MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
+   /* We currently flush CCU at the end of the command buffer, like
+    * what the blob does. There's implicit synchronization around every
+    * vkQueueSubmit, but the kernel only flushes the UCHE, and we don't
+    * know yet if this command buffer will be the last in the submit so we
+    * have to defensively flush everything else.
+    *
+    * TODO: We could definitely do better than this, since these flushes
+    * aren't required by Vulkan, but we'd need kernel support to do that.
+    * Ideally, we'd like the kernel to flush everything afterwards, so that we
+    * wouldn't have to do any flushes here, and when submitting multiple
+    * command buffers there wouldn't be any unnecessary flushes in between.
+    */
+   if (cmd_buffer->state.pass) {
+      tu_flush_all_pending(&cmd_buffer->state.renderpass_cache);
+      tu_emit_cache_flush_renderpass(cmd_buffer, &cmd_buffer->draw_cs);
+   } else {
+      tu_flush_all_pending(&cmd_buffer->state.cache);
+      cmd_buffer->state.cache.flush_bits |=
+         TU_CMD_FLAG_CCU_FLUSH_COLOR |
+         TU_CMD_FLAG_CCU_FLUSH_DEPTH;
+      tu_emit_cache_flush(cmd_buffer, &cmd_buffer->cs);
    }
 
-   tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->device->border_color,
-                  MSM_SUBMIT_BO_READ);
+   tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->device->global_bo,
+                  MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
 
    for (uint32_t i = 0; i < cmd_buffer->draw_cs.bo_count; i++) {
       tu_bo_list_add(&cmd_buffer->bo_list, cmd_buffer->draw_cs.bos[i],
@@ -2277,6 +2002,20 @@ tu_EndCommandBuffer(VkCommandBuffer commandBuffer)
    return cmd_buffer->record_result;
 }
 
+static struct tu_cs
+tu_cmd_dynamic_state(struct tu_cmd_buffer *cmd, uint32_t id, uint32_t size)
+{
+   struct tu_cs cs;
+
+   assert(id < ARRAY_SIZE(cmd->state.dynamic_state));
+   cmd->state.dynamic_state[id] = tu_cs_draw_state(&cmd->sub_cs, &cs, size);
+
+   tu_cs_emit_pkt7(&cmd->draw_cs, CP_SET_DRAW_STATE, 3);
+   tu_cs_emit_draw_state(&cmd->draw_cs, TU_DRAW_STATE_DYNAMIC + id, cmd->state.dynamic_state[id]);
+
+   return cs;
+}
+
 void
 tu_CmdBindPipeline(VkCommandBuffer commandBuffer,
                    VkPipelineBindPoint pipelineBindPoint,
@@ -2285,26 +2024,55 @@ tu_CmdBindPipeline(VkCommandBuffer commandBuffer,
    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
    TU_FROM_HANDLE(tu_pipeline, pipeline, _pipeline);
 
-   switch (pipelineBindPoint) {
-   case VK_PIPELINE_BIND_POINT_GRAPHICS:
-      cmd->state.pipeline = pipeline;
-      cmd->state.dirty |= TU_CMD_DIRTY_PIPELINE;
-      break;
-   case VK_PIPELINE_BIND_POINT_COMPUTE:
-      cmd->state.compute_pipeline = pipeline;
-      cmd->state.dirty |= TU_CMD_DIRTY_COMPUTE_PIPELINE;
-      break;
-   default:
-      unreachable("unrecognized pipeline bind point");
-      break;
-   }
-
-   tu_bo_list_add(&cmd->bo_list, &pipeline->program.binary_bo,
-                  MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
    for (uint32_t i = 0; i < pipeline->cs.bo_count; i++) {
       tu_bo_list_add(&cmd->bo_list, pipeline->cs.bos[i],
                      MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
    }
+
+   if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE) {
+      cmd->state.compute_pipeline = pipeline;
+      tu_cs_emit_state_ib(&cmd->cs, pipeline->program.state);
+      return;
+   }
+
+   assert(pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS);
+
+   cmd->state.pipeline = pipeline;
+   cmd->state.dirty |= TU_CMD_DIRTY_DESC_SETS_LOAD | TU_CMD_DIRTY_SHADER_CONSTS;
+
+   struct tu_cs *cs = &cmd->draw_cs;
+   uint32_t mask = ~pipeline->dynamic_state_mask & BITFIELD_MASK(TU_DYNAMIC_STATE_COUNT);
+   uint32_t i;
+
+   tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3 * (7 + util_bitcount(mask)));
+   tu_cs_emit_draw_state(cs, TU_DRAW_STATE_PROGRAM, pipeline->program.state);
+   tu_cs_emit_draw_state(cs, TU_DRAW_STATE_PROGRAM_BINNING, pipeline->program.binning_state);
+   tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VI, pipeline->vi.state);
+   tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VI_BINNING, pipeline->vi.binning_state);
+   tu_cs_emit_draw_state(cs, TU_DRAW_STATE_RAST, pipeline->rast_state);
+   tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DS, pipeline->ds_state);
+   tu_cs_emit_draw_state(cs, TU_DRAW_STATE_BLEND, pipeline->blend_state);
+   for_each_bit(i, mask)
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DYNAMIC + i, pipeline->dynamic_state[i]);
+
+   /* If the new pipeline requires more VBs than we had previously set up, we
+    * need to re-emit them in SDS.  If it requires the same set or fewer, we
+    * can just re-use the old SDS.
+    */
+   if (pipeline->vi.bindings_used & ~cmd->vertex_bindings_set)
+      cmd->state.dirty |= TU_CMD_DIRTY_VERTEX_BUFFERS;
+
+   /* dynamic linewidth state depends pipeline state's gras_su_cntl
+    * so the dynamic state ib must be updated when pipeline changes
+    */
+   if (pipeline->dynamic_state_mask & BIT(VK_DYNAMIC_STATE_LINE_WIDTH)) {
+      struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_LINE_WIDTH, 2);
+
+      cmd->state.dynamic_gras_su_cntl &= A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK;
+      cmd->state.dynamic_gras_su_cntl |= pipeline->gras_su_cntl;
+
+      tu_cs_emit_regs(&cs, A6XX_GRAS_SU_CNTL(.dword = cmd->state.dynamic_gras_su_cntl));
+   }
 }
 
 void
@@ -2314,12 +2082,11 @@ tu_CmdSetViewport(VkCommandBuffer commandBuffer,
                   const VkViewport *pViewports)
 {
    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
-   struct tu_cs *draw_cs = &cmd->draw_cs;
+   struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_VIEWPORT, 18);
 
    assert(firstViewport == 0 && viewportCount == 1);
-   tu6_emit_viewport(draw_cs, pViewports);
 
-   tu_cs_sanity_check(draw_cs);
+   tu6_emit_viewport(&cs, pViewports);
 }
 
 void
@@ -2329,23 +2096,23 @@ tu_CmdSetScissor(VkCommandBuffer commandBuffer,
                  const VkRect2D *pScissors)
 {
    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
-   struct tu_cs *draw_cs = &cmd->draw_cs;
+   struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_SCISSOR, 3);
 
    assert(firstScissor == 0 && scissorCount == 1);
-   tu6_emit_scissor(draw_cs, pScissors);
 
-   tu_cs_sanity_check(draw_cs);
+   tu6_emit_scissor(&cs, pScissors);
 }
 
 void
 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth)
 {
    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+   struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_LINE_WIDTH, 2);
 
-   cmd->state.dynamic.line_width = lineWidth;
+   cmd->state.dynamic_gras_su_cntl &= ~A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK;
+   cmd->state.dynamic_gras_su_cntl |= A6XX_GRAS_SU_CNTL_LINEHALFWIDTH(lineWidth / 2.0f);
 
-   /* line width depends on VkPipelineRasterizationStateCreateInfo */
-   cmd->state.dirty |= TU_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
+   tu_cs_emit_regs(&cs, A6XX_GRAS_SU_CNTL(.dword = cmd->state.dynamic_gras_su_cntl));
 }
 
 void
@@ -2355,12 +2122,9 @@ tu_CmdSetDepthBias(VkCommandBuffer commandBuffer,
                    float depthBiasSlopeFactor)
 {
    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
-   struct tu_cs *draw_cs = &cmd->draw_cs;
-
-   tu6_emit_depth_bias(draw_cs, depthBiasConstantFactor, depthBiasClamp,
-                       depthBiasSlopeFactor);
+   struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_DEPTH_BIAS, 4);
 
-   tu_cs_sanity_check(draw_cs);
+   tu6_emit_depth_bias(&cs, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
 }
 
 void
@@ -2368,11 +2132,10 @@ tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer,
                         const float blendConstants[4])
 {
    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
-   struct tu_cs *draw_cs = &cmd->draw_cs;
+   struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_BLEND_CONSTANTS, 5);
 
-   tu6_emit_blend_constants(draw_cs, blendConstants);
-
-   tu_cs_sanity_check(draw_cs);
+   tu_cs_emit_pkt4(&cs, REG_A6XX_RB_BLEND_RED_F32, 4);
+   tu_cs_emit_array(&cs, (const uint32_t *) blendConstants, 4);
 }
 
 void
@@ -2380,6 +2143,21 @@ tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer,
                      float minDepthBounds,
                      float maxDepthBounds)
 {
+   TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+   struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_DEPTH_BOUNDS, 3);
+
+   tu_cs_emit_regs(&cs,
+                   A6XX_RB_Z_BOUNDS_MIN(minDepthBounds),
+                   A6XX_RB_Z_BOUNDS_MAX(maxDepthBounds));
+}
+
+static void
+update_stencil_mask(uint32_t *value, VkStencilFaceFlags face, uint32_t mask)
+{
+   if (face & VK_STENCIL_FACE_FRONT_BIT)
+      *value = (*value & 0xff00) | (mask & 0xff);
+   if (face & VK_STENCIL_FACE_BACK_BIT)
+      *value = (*value & 0xff) | (mask & 0xff) << 8;
 }
 
 void
@@ -2388,14 +2166,11 @@ tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer,
                             uint32_t compareMask)
 {
    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+   struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK, 2);
 
-   if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
-      cmd->state.dynamic.stencil_compare_mask.front = compareMask;
-   if (faceMask & VK_STENCIL_FACE_BACK_BIT)
-      cmd->state.dynamic.stencil_compare_mask.back = compareMask;
+   update_stencil_mask(&cmd->state.dynamic_stencil_mask, faceMask, compareMask);
 
-   /* the front/back compare masks must be updated together */
-   cmd->state.dirty |= TU_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
+   tu_cs_emit_regs(&cs, A6XX_RB_STENCILMASK(.dword = cmd->state.dynamic_stencil_mask));
 }
 
 void
@@ -2404,14 +2179,11 @@ tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer,
                           uint32_t writeMask)
 {
    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+   struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_STENCIL_WRITE_MASK, 2);
 
-   if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
-      cmd->state.dynamic.stencil_write_mask.front = writeMask;
-   if (faceMask & VK_STENCIL_FACE_BACK_BIT)
-      cmd->state.dynamic.stencil_write_mask.back = writeMask;
+   update_stencil_mask(&cmd->state.dynamic_stencil_wrmask, faceMask, writeMask);
 
-   /* the front/back write masks must be updated together */
-   cmd->state.dirty |= TU_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
+   tu_cs_emit_regs(&cs, A6XX_RB_STENCILWRMASK(.dword = cmd->state.dynamic_stencil_wrmask));
 }
 
 void
@@ -2420,16 +2192,268 @@ tu_CmdSetStencilReference(VkCommandBuffer commandBuffer,
                           uint32_t reference)
 {
    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+   struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_STENCIL_REFERENCE, 2);
 
-   if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
-      cmd->state.dynamic.stencil_reference.front = reference;
-   if (faceMask & VK_STENCIL_FACE_BACK_BIT)
-      cmd->state.dynamic.stencil_reference.back = reference;
+   update_stencil_mask(&cmd->state.dynamic_stencil_ref, faceMask, reference);
 
-   /* the front/back references must be updated together */
-   cmd->state.dirty |= TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
+   tu_cs_emit_regs(&cs, A6XX_RB_STENCILREF(.dword = cmd->state.dynamic_stencil_ref));
 }
 
+void
+tu_CmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer,
+                            const VkSampleLocationsInfoEXT* pSampleLocationsInfo)
+{
+   TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+   struct tu_cs cs = tu_cmd_dynamic_state(cmd, TU_DYNAMIC_STATE_SAMPLE_LOCATIONS, 9);
+
+   assert(pSampleLocationsInfo);
+
+   tu6_emit_sample_locations(&cs, pSampleLocationsInfo);
+}
+
+static void
+tu_flush_for_access(struct tu_cache_state *cache,
+                    enum tu_cmd_access_mask src_mask,
+                    enum tu_cmd_access_mask dst_mask)
+{
+   enum tu_cmd_flush_bits flush_bits = 0;
+
+   if (src_mask & TU_ACCESS_HOST_WRITE) {
+      /* Host writes are always visible to CP, so only invalidate GPU caches */
+      cache->pending_flush_bits |= TU_CMD_FLAG_GPU_INVALIDATE;
+   }
+
+   if (src_mask & TU_ACCESS_SYSMEM_WRITE) {
+      /* Invalidate CP and 2D engine (make it do WFI + WFM if necessary) as
+       * well.
+       */
+      cache->pending_flush_bits |= TU_CMD_FLAG_ALL_INVALIDATE;
+   }
+
+   if (src_mask & TU_ACCESS_CP_WRITE) {
+      /* Flush the CP write queue. However a WFI shouldn't be necessary as
+       * WAIT_MEM_WRITES should cover it.
+       */
+      cache->pending_flush_bits |=
+         TU_CMD_FLAG_WAIT_MEM_WRITES |
+         TU_CMD_FLAG_GPU_INVALIDATE |
+         TU_CMD_FLAG_WAIT_FOR_ME;
+   }
+
+#define SRC_FLUSH(domain, flush, invalidate) \
+   if (src_mask & TU_ACCESS_##domain##_WRITE) {                      \
+      cache->pending_flush_bits |= TU_CMD_FLAG_##flush |             \
+         (TU_CMD_FLAG_ALL_INVALIDATE & ~TU_CMD_FLAG_##invalidate);   \
+   }
+
+   SRC_FLUSH(UCHE, CACHE_FLUSH, CACHE_INVALIDATE)
+   SRC_FLUSH(CCU_COLOR, CCU_FLUSH_COLOR, CCU_INVALIDATE_COLOR)
+   SRC_FLUSH(CCU_DEPTH, CCU_FLUSH_DEPTH, CCU_INVALIDATE_DEPTH)
+
+#undef SRC_FLUSH
+
+#define SRC_INCOHERENT_FLUSH(domain, flush, invalidate)              \
+   if (src_mask & TU_ACCESS_##domain##_INCOHERENT_WRITE) {           \
+      flush_bits |= TU_CMD_FLAG_##flush;                             \
+      cache->pending_flush_bits |=                                   \
+         (TU_CMD_FLAG_ALL_INVALIDATE & ~TU_CMD_FLAG_##invalidate);   \
+   }
+
+   SRC_INCOHERENT_FLUSH(CCU_COLOR, CCU_FLUSH_COLOR, CCU_INVALIDATE_COLOR)
+   SRC_INCOHERENT_FLUSH(CCU_DEPTH, CCU_FLUSH_DEPTH, CCU_INVALIDATE_DEPTH)
+
+#undef SRC_INCOHERENT_FLUSH
+
+   /* Treat host & sysmem write accesses the same, since the kernel implicitly
+    * drains the queue before signalling completion to the host.
+    */
+   if (dst_mask & (TU_ACCESS_SYSMEM_READ | TU_ACCESS_SYSMEM_WRITE |
+                   TU_ACCESS_HOST_READ | TU_ACCESS_HOST_WRITE)) {
+      flush_bits |= cache->pending_flush_bits & TU_CMD_FLAG_ALL_FLUSH;
+   }
+
+#define DST_FLUSH(domain, flush, invalidate) \
+   if (dst_mask & (TU_ACCESS_##domain##_READ |                 \
+                   TU_ACCESS_##domain##_WRITE)) {              \
+      flush_bits |= cache->pending_flush_bits &                \
+         (TU_CMD_FLAG_##invalidate |                           \
+          (TU_CMD_FLAG_ALL_FLUSH & ~TU_CMD_FLAG_##flush));     \
+   }
+
+   DST_FLUSH(UCHE, CACHE_FLUSH, CACHE_INVALIDATE)
+   DST_FLUSH(CCU_COLOR, CCU_FLUSH_COLOR, CCU_INVALIDATE_COLOR)
+   DST_FLUSH(CCU_DEPTH, CCU_FLUSH_DEPTH, CCU_INVALIDATE_DEPTH)
+
+#undef DST_FLUSH
+
+#define DST_INCOHERENT_FLUSH(domain, flush, invalidate) \
+   if (dst_mask & (TU_ACCESS_##domain##_READ |                 \
+                   TU_ACCESS_##domain##_WRITE)) {              \
+      flush_bits |= TU_CMD_FLAG_##invalidate |                 \
+          (cache->pending_flush_bits &                         \
+           (TU_CMD_FLAG_ALL_FLUSH & ~TU_CMD_FLAG_##flush));    \
+   }
+
+   DST_INCOHERENT_FLUSH(CCU_COLOR, CCU_FLUSH_COLOR, CCU_INVALIDATE_COLOR)
+   DST_INCOHERENT_FLUSH(CCU_DEPTH, CCU_FLUSH_DEPTH, CCU_INVALIDATE_DEPTH)
+
+#undef DST_INCOHERENT_FLUSH
+
+   if (dst_mask & TU_ACCESS_WFI_READ) {
+      flush_bits |= cache->pending_flush_bits &
+         (TU_CMD_FLAG_ALL_FLUSH | TU_CMD_FLAG_WAIT_FOR_IDLE);
+   }
+
+   if (dst_mask & TU_ACCESS_WFM_READ) {
+      flush_bits |= cache->pending_flush_bits &
+         (TU_CMD_FLAG_ALL_FLUSH | TU_CMD_FLAG_WAIT_FOR_ME);
+   }
+
+   cache->flush_bits |= flush_bits;
+   cache->pending_flush_bits &= ~flush_bits;
+}
+
+static enum tu_cmd_access_mask
+vk2tu_access(VkAccessFlags flags, bool gmem)
+{
+   enum tu_cmd_access_mask mask = 0;
+
+   /* If the GPU writes a buffer that is then read by an indirect draw
+    * command, we theoretically need to emit a WFI to wait for any cache
+    * flushes, and then a WAIT_FOR_ME to wait on the CP for the WFI to
+    * complete. Waiting for the WFI to complete is performed as part of the
+    * draw by the firmware, so we just need to execute the WFI.
+    *
+    * Transform feedback counters are read via CP_MEM_TO_REG, which implicitly
+    * does CP_WAIT_FOR_ME, but we still need a WFI if the GPU writes it.
+    */
+   if (flags &
+       (VK_ACCESS_INDIRECT_COMMAND_READ_BIT |
+        VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT |
+        VK_ACCESS_MEMORY_READ_BIT)) {
+      mask |= TU_ACCESS_WFI_READ;
+   }
+
+   if (flags &
+       (VK_ACCESS_INDIRECT_COMMAND_READ_BIT | /* Read performed by CP */
+        VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT | /* Read performed by CP */
+        VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT | /* Read performed by CP */
+        VK_ACCESS_MEMORY_READ_BIT)) {
+      mask |= TU_ACCESS_SYSMEM_READ;
+   }
+
+   if (flags &
+       (VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT |
+        VK_ACCESS_MEMORY_WRITE_BIT)) {
+      mask |= TU_ACCESS_CP_WRITE;
+   }
+
+   if (flags &
+       (VK_ACCESS_HOST_READ_BIT |
+        VK_ACCESS_MEMORY_WRITE_BIT)) {
+      mask |= TU_ACCESS_HOST_READ;
+   }
+
+   if (flags &
+       (VK_ACCESS_HOST_WRITE_BIT |
+        VK_ACCESS_MEMORY_WRITE_BIT)) {
+      mask |= TU_ACCESS_HOST_WRITE;
+   }
+
+   if (flags &
+       (VK_ACCESS_INDEX_READ_BIT | /* Read performed by PC, I think */
+        VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | /* Read performed by VFD */
+        VK_ACCESS_UNIFORM_READ_BIT | /* Read performed by SP */
+        /* TODO: Is there a no-cache bit for textures so that we can ignore
+         * these?
+         */
+        VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | /* Read performed by TP */
+        VK_ACCESS_SHADER_READ_BIT | /* Read perfomed by SP/TP */
+        VK_ACCESS_MEMORY_READ_BIT)) {
+      mask |= TU_ACCESS_UCHE_READ;
+   }
+
+   if (flags &
+       (VK_ACCESS_SHADER_WRITE_BIT | /* Write performed by SP */
+        VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT | /* Write performed by VPC */
+        VK_ACCESS_MEMORY_WRITE_BIT)) {
+      mask |= TU_ACCESS_UCHE_WRITE;
+   }
+
+   /* When using GMEM, the CCU is always flushed automatically to GMEM, and
+    * then GMEM is flushed to sysmem. Furthermore, we already had to flush any
+    * previous writes in sysmem mode when transitioning to GMEM. Therefore we
+    * can ignore CCU and pretend that color attachments and transfers use
+    * sysmem directly.
+    */
+
+   if (flags &
+       (VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
+        VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT |
+        VK_ACCESS_MEMORY_READ_BIT)) {
+      if (gmem)
+         mask |= TU_ACCESS_SYSMEM_READ;
+      else
+         mask |= TU_ACCESS_CCU_COLOR_INCOHERENT_READ;
+   }
+
+   if (flags &
+       (VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
+        VK_ACCESS_MEMORY_READ_BIT)) {
+      if (gmem)
+         mask |= TU_ACCESS_SYSMEM_READ;
+      else
+         mask |= TU_ACCESS_CCU_DEPTH_INCOHERENT_READ;
+   }
+
+   if (flags &
+       (VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
+        VK_ACCESS_MEMORY_WRITE_BIT)) {
+      if (gmem) {
+         mask |= TU_ACCESS_SYSMEM_WRITE;
+      } else {
+         mask |= TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE;
+      }
+   }
+
+   if (flags &
+       (VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
+        VK_ACCESS_MEMORY_WRITE_BIT)) {
+      if (gmem) {
+         mask |= TU_ACCESS_SYSMEM_WRITE;
+      } else {
+         mask |= TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE;
+      }
+   }
+
+   /* When the dst access is a transfer read/write, it seems we sometimes need
+    * to insert a WFI after any flushes, to guarantee that the flushes finish
+    * before the 2D engine starts. However the opposite (i.e. a WFI after
+    * CP_BLIT and before any subsequent flush) does not seem to be needed, and
+    * the blob doesn't emit such a WFI.
+    */
+
+   if (flags &
+       (VK_ACCESS_TRANSFER_WRITE_BIT |
+        VK_ACCESS_MEMORY_WRITE_BIT)) {
+      if (gmem) {
+         mask |= TU_ACCESS_SYSMEM_WRITE;
+      } else {
+         mask |= TU_ACCESS_CCU_COLOR_WRITE;
+      }
+      mask |= TU_ACCESS_WFI_READ;
+   }
+
+   if (flags &
+       (VK_ACCESS_TRANSFER_READ_BIT | /* Access performed by TP */
+        VK_ACCESS_MEMORY_READ_BIT)) {
+      mask |= TU_ACCESS_UCHE_READ | TU_ACCESS_WFI_READ;
+   }
+
+   return mask;
+}
+
+
 void
 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer,
                       uint32_t commandBufferCount,
@@ -2440,6 +2464,15 @@ tu_CmdExecuteCommands(VkCommandBuffer commandBuffer,
 
    assert(commandBufferCount > 0);
 
+   /* Emit any pending flushes. */
+   if (cmd->state.pass) {
+      tu_flush_all_pending(&cmd->state.renderpass_cache);
+      tu_emit_cache_flush_renderpass(cmd, &cmd->draw_cs);
+   } else {
+      tu_flush_all_pending(&cmd->state.cache);
+      tu_emit_cache_flush(cmd, &cmd->cs);
+   }
+
    for (uint32_t i = 0; i < commandBufferCount; i++) {
       TU_FROM_HANDLE(tu_cmd_buffer, secondary, pCmdBuffers[i]);
 
@@ -2465,6 +2498,9 @@ tu_CmdExecuteCommands(VkCommandBuffer commandBuffer,
             cmd->record_result = result;
             break;
          }
+
+         if (secondary->has_tess)
+            cmd->has_tess = true;
       } else {
          assert(tu_cs_is_empty(&secondary->draw_cs));
          assert(tu_cs_is_empty(&secondary->draw_epilogue_cs));
@@ -2474,10 +2510,23 @@ tu_CmdExecuteCommands(VkCommandBuffer commandBuffer,
                            MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
          }
 
-         tu_cs_emit_call(&cmd->cs, &secondary->cs);
+         tu_cs_add_entries(&cmd->cs, &secondary->cs);
       }
+
+      cmd->state.index_size = secondary->state.index_size; /* for restart index update */
    }
    cmd->state.dirty = ~0u; /* TODO: set dirty only what needs to be */
+
+   /* After executing secondary command buffers, there may have been arbitrary
+    * flushes executed, so when we encounter a pipeline barrier with a
+    * srcMask, we have to assume that we need to invalidate. Therefore we need
+    * to re-initialize the cache with all pending invalidate bits set.
+    */
+   if (cmd->state.pass) {
+      tu_cache_init(&cmd->state.renderpass_cache);
+   } else {
+      tu_cache_init(&cmd->state.cache);
+   }
 }
 
 VkResult
@@ -2489,15 +2538,15 @@ tu_CreateCommandPool(VkDevice _device,
    TU_FROM_HANDLE(tu_device, device, _device);
    struct tu_cmd_pool *pool;
 
-   pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
-                    VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   pool = vk_object_alloc(&device->vk, pAllocator, sizeof(*pool),
+                          VK_OBJECT_TYPE_COMMAND_POOL);
    if (pool == NULL)
       return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
 
    if (pAllocator)
       pool->alloc = *pAllocator;
    else
-      pool->alloc = device->alloc;
+      pool->alloc = device->vk.alloc;
 
    list_inithead(&pool->cmd_buffers);
    list_inithead(&pool->free_cmd_buffers);
@@ -2532,7 +2581,7 @@ tu_DestroyCommandPool(VkDevice _device,
       tu_cmd_buffer_destroy(cmd_buffer);
    }
 
-   vk_free2(&device->alloc, pAllocator, pool);
+   vk_object_free(&device->vk, pAllocator, pool);
 }
 
 VkResult
@@ -2571,6 +2620,29 @@ tu_TrimCommandPool(VkDevice device,
    }
 }
 
+static void
+tu_subpass_barrier(struct tu_cmd_buffer *cmd_buffer,
+                   const struct tu_subpass_barrier *barrier,
+                   bool external)
+{
+   /* Note: we don't know until the end of the subpass whether we'll use
+    * sysmem, so assume sysmem here to be safe.
+    */
+   struct tu_cache_state *cache =
+      external ? &cmd_buffer->state.cache : &cmd_buffer->state.renderpass_cache;
+   enum tu_cmd_access_mask src_flags =
+      vk2tu_access(barrier->src_access_mask, false);
+   enum tu_cmd_access_mask dst_flags =
+      vk2tu_access(barrier->dst_access_mask, false);
+
+   if (barrier->incoherent_ccu_color)
+      src_flags |= TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE;
+   if (barrier->incoherent_ccu_depth)
+      src_flags |= TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE;
+
+   tu_flush_for_access(cache, src_flags, dst_flags);
+}
+
 void
 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer,
                       const VkRenderPassBeginInfo *pRenderPassBegin,
@@ -2583,26 +2655,35 @@ tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer,
    cmd->state.pass = pass;
    cmd->state.subpass = pass->subpasses;
    cmd->state.framebuffer = fb;
+   cmd->state.render_area = pRenderPassBegin->renderArea;
 
-   tu_cmd_update_tiling_config(cmd, &pRenderPassBegin->renderArea);
    tu_cmd_prepare_tile_store_ib(cmd);
 
-   tu_emit_load_clear(cmd, pRenderPassBegin);
+   /* Note: because this is external, any flushes will happen before draw_cs
+    * gets called. However deferred flushes could have to happen later as part
+    * of the subpass.
+    */
+   tu_subpass_barrier(cmd, &pass->subpasses[0].start_barrier, true);
+   cmd->state.renderpass_cache.pending_flush_bits =
+      cmd->state.cache.pending_flush_bits;
+   cmd->state.renderpass_cache.flush_bits = 0;
+
+   tu_emit_renderpass_begin(cmd, pRenderPassBegin);
 
    tu6_emit_zs(cmd, cmd->state.subpass, &cmd->draw_cs);
    tu6_emit_mrt(cmd, cmd->state.subpass, &cmd->draw_cs);
-   tu6_emit_msaa(cmd, cmd->state.subpass, &cmd->draw_cs);
+   tu6_emit_msaa(&cmd->draw_cs, cmd->state.subpass->samples);
    tu6_emit_render_cntl(cmd, cmd->state.subpass, &cmd->draw_cs, false);
 
-   /* note: use_hw_binning only checks tiling config */
-   if (use_hw_binning(cmd))
-      cmd->use_vsc_data = true;
+   tu_set_input_attachments(cmd, cmd->state.subpass);
 
    for (uint32_t i = 0; i < fb->attachment_count; ++i) {
       const struct tu_image_view *iview = fb->attachments[i].attachment;
       tu_bo_list_add(&cmd->bo_list, iview->image->bo,
                      MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
    }
+
+   cmd->state.dirty |= TU_CMD_DIRTY_DRAW_STATE;
 }
 
 void
@@ -2622,53 +2703,50 @@ tu_CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents)
    struct tu_cs *cs = &cmd->draw_cs;
 
    const struct tu_subpass *subpass = cmd->state.subpass++;
-   /* TODO:
-    * if msaa samples change between subpasses,
-    * attachment store is broken for some attachments
-    */
+
+   tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_GMEM);
+
    if (subpass->resolve_attachments) {
       tu6_emit_blit_scissor(cmd, cs, true);
+
       for (unsigned i = 0; i < subpass->color_count; i++) {
          uint32_t a = subpass->resolve_attachments[i].attachment;
-         if (a != VK_ATTACHMENT_UNUSED) {
-            tu6_emit_resolve(cmd, cs, a,
-                             subpass->color_attachments[i].attachment);
-         }
+         if (a == VK_ATTACHMENT_UNUSED)
+            continue;
+
+         tu_store_gmem_attachment(cmd, cs, a,
+                                  subpass->color_attachments[i].attachment);
+
+         if (pass->attachments[a].gmem_offset < 0)
+            continue;
+
+         /* TODO:
+          * check if the resolved attachment is needed by later subpasses,
+          * if it is, should be doing a GMEM->GMEM resolve instead of GMEM->MEM->GMEM..
+          */
+         tu_finishme("missing GMEM->GMEM resolve path\n");
+         tu_load_gmem_attachment(cmd, cs, a, true);
       }
    }
 
-   /* invalidate because reading input attachments will cache GMEM and
-    * the cache isn''t updated when GMEM is written
-    * TODO: is there a no-cache bit for textures?
-    */
-   if (cmd->state.subpass->input_count)
-      tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
+   tu_cond_exec_end(cs);
+
+   tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_SYSMEM);
+
+   tu6_emit_sysmem_resolves(cmd, cs, subpass);
+
+   tu_cond_exec_end(cs);
+
+   /* Handle dependencies for the next subpass */
+   tu_subpass_barrier(cmd, &cmd->state.subpass->start_barrier, false);
 
    /* emit mrt/zs/msaa/ubwc state for the subpass that is starting */
    tu6_emit_zs(cmd, cmd->state.subpass, cs);
    tu6_emit_mrt(cmd, cmd->state.subpass, cs);
-   tu6_emit_msaa(cmd, cmd->state.subpass, cs);
+   tu6_emit_msaa(cs, cmd->state.subpass->samples);
    tu6_emit_render_cntl(cmd, cmd->state.subpass, cs, false);
 
-   /* Emit flushes so that input attachments will read the correct value. This
-    * is for sysmem only, although it shouldn't do much harm on gmem.
-    */
-   tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true);
-   tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true);
-
-   /* TODO:
-    * since we don't know how to do GMEM->GMEM resolve,
-    * resolve attachments are resolved to memory then loaded to GMEM again if needed
-    */
-   if (subpass->resolve_attachments) {
-      for (unsigned i = 0; i < subpass->color_count; i++) {
-         uint32_t a = subpass->resolve_attachments[i].attachment;
-         if (a != VK_ATTACHMENT_UNUSED && pass->attachments[a].gmem_offset >= 0) {
-               tu_finishme("missing GMEM->GMEM resolve, performance will suffer\n");
-               tu6_emit_predicated_blit(cmd, cs, a, a, false);
-         }
-      }
-   }
+   tu_set_input_attachments(cmd, cmd->state.subpass);
 }
 
 void
@@ -2679,267 +2757,6 @@ tu_CmdNextSubpass2(VkCommandBuffer commandBuffer,
    tu_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents);
 }
 
-struct tu_draw_info
-{
-   /**
-    * Number of vertices.
-    */
-   uint32_t count;
-
-   /**
-    * Index of the first vertex.
-    */
-   int32_t vertex_offset;
-
-   /**
-    * First instance id.
-    */
-   uint32_t first_instance;
-
-   /**
-    * Number of instances.
-    */
-   uint32_t instance_count;
-
-   /**
-    * First index (indexed draws only).
-    */
-   uint32_t first_index;
-
-   /**
-    * Whether it's an indexed draw.
-    */
-   bool indexed;
-
-   /**
-    * Indirect draw parameters resource.
-    */
-   struct tu_buffer *indirect;
-   uint64_t indirect_offset;
-   uint32_t stride;
-
-   /**
-    * Draw count parameters resource.
-    */
-   struct tu_buffer *count_buffer;
-   uint64_t count_buffer_offset;
-
-   /**
-    * Stream output parameters resource.
-    */
-   struct tu_buffer *streamout_buffer;
-   uint64_t streamout_buffer_offset;
-};
-
-#define ENABLE_ALL (CP_SET_DRAW_STATE__0_BINNING | CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
-#define ENABLE_DRAW (CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
-
-enum tu_draw_state_group_id
-{
-   TU_DRAW_STATE_PROGRAM,
-   TU_DRAW_STATE_PROGRAM_BINNING,
-   TU_DRAW_STATE_VI,
-   TU_DRAW_STATE_VI_BINNING,
-   TU_DRAW_STATE_VP,
-   TU_DRAW_STATE_RAST,
-   TU_DRAW_STATE_DS,
-   TU_DRAW_STATE_BLEND,
-   TU_DRAW_STATE_VS_CONST,
-   TU_DRAW_STATE_FS_CONST,
-   TU_DRAW_STATE_VS_TEX,
-   TU_DRAW_STATE_FS_TEX_SYSMEM,
-   TU_DRAW_STATE_FS_TEX_GMEM,
-   TU_DRAW_STATE_FS_IBO,
-   TU_DRAW_STATE_VS_PARAMS,
-
-   TU_DRAW_STATE_COUNT,
-};
-
-struct tu_draw_state_group
-{
-   enum tu_draw_state_group_id id;
-   uint32_t enable_mask;
-   struct tu_cs_entry ib;
-};
-
-const static void *
-sampler_ptr(struct tu_descriptor_state *descriptors_state,
-            const struct tu_descriptor_map *map, unsigned i,
-            unsigned array_index)
-{
-   assert(descriptors_state->valid & (1 << map->set[i]));
-
-   struct tu_descriptor_set *set = descriptors_state->sets[map->set[i]];
-   assert(map->binding[i] < set->layout->binding_count);
-
-   const struct tu_descriptor_set_binding_layout *layout =
-      &set->layout->binding[map->binding[i]];
-
-   if (layout->immutable_samplers_offset) {
-      const uint32_t *immutable_samplers =
-         tu_immutable_samplers(set->layout, layout);
-
-      return &immutable_samplers[array_index * A6XX_TEX_SAMP_DWORDS];
-   }
-
-   switch (layout->type) {
-   case VK_DESCRIPTOR_TYPE_SAMPLER:
-      return &set->mapped_ptr[layout->offset / 4];
-   case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
-      return &set->mapped_ptr[layout->offset / 4 + A6XX_TEX_CONST_DWORDS +
-                              array_index * (A6XX_TEX_CONST_DWORDS + A6XX_TEX_SAMP_DWORDS)];
-   default:
-      unreachable("unimplemented descriptor type");
-      break;
-   }
-}
-
-static void
-write_tex_const(struct tu_cmd_buffer *cmd,
-                uint32_t *dst,
-                struct tu_descriptor_state *descriptors_state,
-                const struct tu_descriptor_map *map,
-                unsigned i, unsigned array_index, bool is_sysmem)
-{
-   assert(descriptors_state->valid & (1 << map->set[i]));
-
-   struct tu_descriptor_set *set = descriptors_state->sets[map->set[i]];
-   assert(map->binding[i] < set->layout->binding_count);
-
-   const struct tu_descriptor_set_binding_layout *layout =
-      &set->layout->binding[map->binding[i]];
-
-   switch (layout->type) {
-   case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
-   case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
-   case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
-   case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
-      memcpy(dst, &set->mapped_ptr[layout->offset / 4 +
-                                   array_index * A6XX_TEX_CONST_DWORDS],
-             A6XX_TEX_CONST_DWORDS * 4);
-      break;
-   case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
-      memcpy(dst, &set->mapped_ptr[layout->offset / 4 +
-                                   array_index *
-                                   (A6XX_TEX_CONST_DWORDS +
-                                    A6XX_TEX_SAMP_DWORDS)],
-             A6XX_TEX_CONST_DWORDS * 4);
-      break;
-   default:
-      unreachable("unimplemented descriptor type");
-      break;
-   }
-
-   if (layout->type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT && !is_sysmem) {
-      const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
-      uint32_t a = cmd->state.subpass->input_attachments[map->value[i] +
-                                                         array_index].attachment;
-      const struct tu_render_pass_attachment *att = &cmd->state.pass->attachments[a];
-
-      assert(att->gmem_offset >= 0);
-
-      dst[0] &= ~(A6XX_TEX_CONST_0_SWAP__MASK | A6XX_TEX_CONST_0_TILE_MODE__MASK);
-      dst[0] |= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2);
-      dst[2] &= ~(A6XX_TEX_CONST_2_TYPE__MASK | A6XX_TEX_CONST_2_PITCH__MASK);
-      dst[2] |=
-         A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D) |
-         A6XX_TEX_CONST_2_PITCH(tiling->tile0.extent.width * att->cpp);
-      dst[3] = 0;
-      dst[4] = cmd->device->physical_device->gmem_base + att->gmem_offset;
-      dst[5] = A6XX_TEX_CONST_5_DEPTH(1);
-      for (unsigned i = 6; i < A6XX_TEX_CONST_DWORDS; i++)
-         dst[i] = 0;
-
-      if (cmd->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
-         tu_finishme("patch input attachment pitch for secondary cmd buffer");
-   }
-}
-
-static void
-write_image_ibo(struct tu_cmd_buffer *cmd,
-                uint32_t *dst,
-                struct tu_descriptor_state *descriptors_state,
-                const struct tu_descriptor_map *map,
-                unsigned i, unsigned array_index)
-{
-   assert(descriptors_state->valid & (1 << map->set[i]));
-
-   struct tu_descriptor_set *set = descriptors_state->sets[map->set[i]];
-   assert(map->binding[i] < set->layout->binding_count);
-
-   const struct tu_descriptor_set_binding_layout *layout =
-      &set->layout->binding[map->binding[i]];
-
-   assert(layout->type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE);
-
-   memcpy(dst, &set->mapped_ptr[layout->offset / 4 +
-                                (array_index * 2 + 1) * A6XX_TEX_CONST_DWORDS],
-          A6XX_TEX_CONST_DWORDS * 4);
-}
-
-static uint64_t
-buffer_ptr(struct tu_descriptor_state *descriptors_state,
-           const struct tu_descriptor_map *map,
-           unsigned i, unsigned array_index)
-{
-   assert(descriptors_state->valid & (1 << map->set[i]));
-
-   struct tu_descriptor_set *set = descriptors_state->sets[map->set[i]];
-   assert(map->binding[i] < set->layout->binding_count);
-
-   const struct tu_descriptor_set_binding_layout *layout =
-      &set->layout->binding[map->binding[i]];
-
-   switch (layout->type) {
-   case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
-   case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
-      return descriptors_state->dynamic_buffers[layout->dynamic_offset_offset +
-                                                array_index];
-   case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
-   case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
-      return (uint64_t) set->mapped_ptr[layout->offset / 4 + array_index * 2 + 1] << 32 |
-                        set->mapped_ptr[layout->offset / 4 + array_index * 2];
-   default:
-      unreachable("unimplemented descriptor type");
-      break;
-   }
-}
-
-static inline uint32_t
-tu6_stage2opcode(gl_shader_stage type)
-{
-   switch (type) {
-   case MESA_SHADER_VERTEX:
-   case MESA_SHADER_TESS_CTRL:
-   case MESA_SHADER_TESS_EVAL:
-   case MESA_SHADER_GEOMETRY:
-      return CP_LOAD_STATE6_GEOM;
-   case MESA_SHADER_FRAGMENT:
-   case MESA_SHADER_COMPUTE:
-   case MESA_SHADER_KERNEL:
-      return CP_LOAD_STATE6_FRAG;
-   default:
-      unreachable("bad shader type");
-   }
-}
-
-static inline enum a6xx_state_block
-tu6_stage2shadersb(gl_shader_stage type)
-{
-   switch (type) {
-   case MESA_SHADER_VERTEX:
-      return SB6_VS_SHADER;
-   case MESA_SHADER_FRAGMENT:
-      return SB6_FS_SHADER;
-   case MESA_SHADER_COMPUTE:
-   case MESA_SHADER_KERNEL:
-      return SB6_CS_SHADER;
-   default:
-      unreachable("bad shader type");
-      return ~0;
-   }
-}
-
 static void
 tu6_emit_user_consts(struct tu_cs *cs, const struct tu_pipeline *pipeline,
                      struct tu_descriptor_state *descriptors_state,
@@ -2948,107 +2765,63 @@ tu6_emit_user_consts(struct tu_cs *cs, const struct tu_pipeline *pipeline,
 {
    const struct tu_program_descriptor_linkage *link =
       &pipeline->program.link[type];
-   const struct ir3_ubo_analysis_state *state = &link->ubo_state;
-
-   for (uint32_t i = 0; i < ARRAY_SIZE(state->range); i++) {
-      if (state->range[i].start < state->range[i].end) {
-         uint32_t size = state->range[i].end - state->range[i].start;
-         uint32_t offset = state->range[i].start;
-
-         /* and even if the start of the const buffer is before
-          * first_immediate, the end may not be:
-          */
-         size = MIN2(size, (16 * link->constlen) - state->range[i].offset);
-
-         if (size == 0)
-            continue;
-
-         /* things should be aligned to vec4: */
-         debug_assert((state->range[i].offset % 16) == 0);
-         debug_assert((size % 16) == 0);
-         debug_assert((offset % 16) == 0);
-
-         if (i == 0) {
-            /* push constants */
-            tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + (size / 4));
-            tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(state->range[i].offset / 16) |
-                  CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
-                  CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
-                  CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
-                  CP_LOAD_STATE6_0_NUM_UNIT(size / 16));
-            tu_cs_emit(cs, 0);
-            tu_cs_emit(cs, 0);
-            for (unsigned i = 0; i < size / 4; i++)
-               tu_cs_emit(cs, push_constants[i + offset / 4]);
-            continue;
-         }
+   const struct ir3_ubo_analysis_state *state = &link->const_state.ubo_state;
 
-         /* Look through the UBO map to find our UBO index, and get the VA for
-          * that UBO.
-          */
-         uint64_t va = 0;
-         uint32_t ubo_idx = i - 1;
-         uint32_t ubo_map_base = 0;
-         for (int j = 0; j < link->ubo_map.num; j++) {
-            if (ubo_idx >= ubo_map_base &&
-                ubo_idx < ubo_map_base + link->ubo_map.array_size[j]) {
-               va = buffer_ptr(descriptors_state, &link->ubo_map, j,
-                               ubo_idx - ubo_map_base);
-               break;
-            }
-            ubo_map_base += link->ubo_map.array_size[j];
-         }
-         assert(va);
-
-         tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3);
-         tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(state->range[i].offset / 16) |
-               CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
-               CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
-               CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
-               CP_LOAD_STATE6_0_NUM_UNIT(size / 16));
-         tu_cs_emit_qw(cs, va + offset);
-      }
+   if (link->push_consts.count > 0) {
+      unsigned num_units = link->push_consts.count;
+      unsigned offset = link->push_consts.lo;
+      tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + num_units * 4);
+      tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(offset) |
+            CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
+            CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
+            CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
+            CP_LOAD_STATE6_0_NUM_UNIT(num_units));
+      tu_cs_emit(cs, 0);
+      tu_cs_emit(cs, 0);
+      for (unsigned i = 0; i < num_units * 4; i++)
+         tu_cs_emit(cs, push_constants[i + offset * 4]);
    }
-}
-
-static void
-tu6_emit_ubos(struct tu_cs *cs, const struct tu_pipeline *pipeline,
-              struct tu_descriptor_state *descriptors_state,
-              gl_shader_stage type)
-{
-   const struct tu_program_descriptor_linkage *link =
-      &pipeline->program.link[type];
 
-   uint32_t num = MIN2(link->ubo_map.num_desc, link->const_state.num_ubos);
-   uint32_t anum = align(num, 2);
+   for (uint32_t i = 0; i < state->num_enabled; i++) {
+      uint32_t size = state->range[i].end - state->range[i].start;
+      uint32_t offset = state->range[i].start;
 
-   if (!num)
-      return;
+      /* and even if the start of the const buffer is before
+       * first_immediate, the end may not be:
+       */
+      size = MIN2(size, (16 * link->constlen) - state->range[i].offset);
 
-   tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + (2 * anum));
-   tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(link->const_state.offsets.ubo) |
-         CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
-         CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
-         CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
-         CP_LOAD_STATE6_0_NUM_UNIT(anum/2));
-   tu_cs_emit(cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
-   tu_cs_emit(cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
-
-   unsigned emitted = 0;
-   for (unsigned i = 0; emitted < num && i < link->ubo_map.num; i++) {
-      for (unsigned j = 0; emitted < num && j < link->ubo_map.array_size[i]; j++) {
-         tu_cs_emit_qw(cs, buffer_ptr(descriptors_state, &link->ubo_map, i, j));
-         emitted++;
-      }
-   }
+      if (size == 0)
+         continue;
 
-   for (; emitted < anum; emitted++) {
-      tu_cs_emit(cs, 0xffffffff);
-      tu_cs_emit(cs, 0xffffffff);
-   }
-}
+      /* things should be aligned to vec4: */
+      debug_assert((state->range[i].offset % 16) == 0);
+      debug_assert((size % 16) == 0);
+      debug_assert((offset % 16) == 0);
 
-static struct tu_cs_entry
+      /* Dig out the descriptor from the descriptor state and read the VA from
+       * it.
+       */
+      assert(state->range[i].ubo.bindless);
+      uint32_t *base = state->range[i].ubo.bindless_base == MAX_SETS ?
+         descriptors_state->dynamic_descriptors :
+         descriptors_state->sets[state->range[i].ubo.bindless_base]->mapped_ptr;
+      unsigned block = state->range[i].ubo.block;
+      uint32_t *desc = base + block * A6XX_TEX_CONST_DWORDS;
+      uint64_t va = desc[0] | ((uint64_t)(desc[1] & A6XX_UBO_1_BASE_HI__MASK) << 32);
+      assert(va);
+
+      tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3);
+      tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(state->range[i].offset / 16) |
+            CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
+            CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
+            CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
+            CP_LOAD_STATE6_0_NUM_UNIT(size / 16));
+      tu_cs_emit_qw(cs, va + offset);
+   }
+}
+
+static struct tu_draw_state
 tu6_emit_consts(struct tu_cmd_buffer *cmd,
                 const struct tu_pipeline *pipeline,
                 struct tu_descriptor_state *descriptors_state,
@@ -3058,726 +2831,372 @@ tu6_emit_consts(struct tu_cmd_buffer *cmd,
    tu_cs_begin_sub_stream(&cmd->sub_cs, 512, &cs); /* TODO: maximum size? */
 
    tu6_emit_user_consts(&cs, pipeline, descriptors_state, type, cmd->push_constants);
-   tu6_emit_ubos(&cs, pipeline, descriptors_state, type);
-
-   return tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
-}
-
-static VkResult
-tu6_emit_vs_params(struct tu_cmd_buffer *cmd,
-                   const struct tu_draw_info *draw,
-                   struct tu_cs_entry *entry)
-{
-   /* TODO: fill out more than just base instance */
-   const struct tu_program_descriptor_linkage *link =
-      &cmd->state.pipeline->program.link[MESA_SHADER_VERTEX];
-   const struct ir3_const_state *const_state = &link->const_state;
-   struct tu_cs cs;
-
-   if (const_state->offsets.driver_param >= link->constlen) {
-      *entry = (struct tu_cs_entry) {};
-      return VK_SUCCESS;
-   }
-
-   VkResult result = tu_cs_begin_sub_stream(&cmd->sub_cs, 8, &cs);
-   if (result != VK_SUCCESS)
-      return result;
-
-   tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6_GEOM, 3 + 4);
-   tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(const_state->offsets.driver_param) |
-         CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
-         CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
-         CP_LOAD_STATE6_0_STATE_BLOCK(SB6_VS_SHADER) |
-         CP_LOAD_STATE6_0_NUM_UNIT(1));
-   tu_cs_emit(&cs, 0);
-   tu_cs_emit(&cs, 0);
-
-   STATIC_ASSERT(IR3_DP_INSTID_BASE == 2);
-
-   tu_cs_emit(&cs, 0);
-   tu_cs_emit(&cs, 0);
-   tu_cs_emit(&cs, draw->first_instance);
-   tu_cs_emit(&cs, 0);
-
-   *entry = tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
-   return VK_SUCCESS;
-}
-
-static VkResult
-tu6_emit_textures(struct tu_cmd_buffer *cmd,
-                  const struct tu_pipeline *pipeline,
-                  struct tu_descriptor_state *descriptors_state,
-                  gl_shader_stage type,
-                  struct tu_cs_entry *entry,
-                  bool is_sysmem)
-{
-   struct tu_cs *draw_state = &cmd->sub_cs;
-   const struct tu_program_descriptor_linkage *link =
-      &pipeline->program.link[type];
-   VkResult result;
-
-   if (link->texture_map.num_desc == 0 && link->sampler_map.num_desc == 0) {
-      *entry = (struct tu_cs_entry) {};
-      return VK_SUCCESS;
-   }
-
-   /* allocate and fill texture state */
-   struct ts_cs_memory tex_const;
-   result = tu_cs_alloc(draw_state, link->texture_map.num_desc,
-                        A6XX_TEX_CONST_DWORDS, &tex_const);
-   if (result != VK_SUCCESS)
-      return result;
-
-   int tex_index = 0;
-   for (unsigned i = 0; i < link->texture_map.num; i++) {
-      for (int j = 0; j < link->texture_map.array_size[i]; j++) {
-         write_tex_const(cmd,
-                         &tex_const.map[A6XX_TEX_CONST_DWORDS * tex_index++],
-                         descriptors_state, &link->texture_map, i, j,
-                         is_sysmem);
-      }
-   }
-
-   /* allocate and fill sampler state */
-   struct ts_cs_memory tex_samp = { 0 };
-   if (link->sampler_map.num_desc) {
-      result = tu_cs_alloc(draw_state, link->sampler_map.num_desc,
-                           A6XX_TEX_SAMP_DWORDS, &tex_samp);
-      if (result != VK_SUCCESS)
-         return result;
-
-      int sampler_index = 0;
-      for (unsigned i = 0; i < link->sampler_map.num; i++) {
-         for (int j = 0; j < link->sampler_map.array_size[i]; j++) {
-            const uint32_t *sampler = sampler_ptr(descriptors_state,
-                                                  &link->sampler_map,
-                                                  i, j);
-            memcpy(&tex_samp.map[A6XX_TEX_SAMP_DWORDS * sampler_index++],
-                   sampler, A6XX_TEX_SAMP_DWORDS * 4);
-         }
-      }
-   }
-
-   unsigned tex_samp_reg, tex_const_reg, tex_count_reg;
-   enum a6xx_state_block sb;
-
-   switch (type) {
-   case MESA_SHADER_VERTEX:
-      sb = SB6_VS_TEX;
-      tex_samp_reg = REG_A6XX_SP_VS_TEX_SAMP_LO;
-      tex_const_reg = REG_A6XX_SP_VS_TEX_CONST_LO;
-      tex_count_reg = REG_A6XX_SP_VS_TEX_COUNT;
-      break;
-   case MESA_SHADER_FRAGMENT:
-      sb = SB6_FS_TEX;
-      tex_samp_reg = REG_A6XX_SP_FS_TEX_SAMP_LO;
-      tex_const_reg = REG_A6XX_SP_FS_TEX_CONST_LO;
-      tex_count_reg = REG_A6XX_SP_FS_TEX_COUNT;
-      break;
-   case MESA_SHADER_COMPUTE:
-      sb = SB6_CS_TEX;
-      tex_samp_reg = REG_A6XX_SP_CS_TEX_SAMP_LO;
-      tex_const_reg = REG_A6XX_SP_CS_TEX_CONST_LO;
-      tex_count_reg = REG_A6XX_SP_CS_TEX_COUNT;
-      break;
-   default:
-      unreachable("bad state block");
-   }
-
-   struct tu_cs cs;
-   result = tu_cs_begin_sub_stream(draw_state, 16, &cs);
-   if (result != VK_SUCCESS)
-      return result;
-
-   if (link->sampler_map.num_desc) {
-      /* output sampler state: */
-      tu_cs_emit_pkt7(&cs, tu6_stage2opcode(type), 3);
-      tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
-                 CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER) |
-                 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
-                 CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
-                 CP_LOAD_STATE6_0_NUM_UNIT(link->sampler_map.num_desc));
-      tu_cs_emit_qw(&cs, tex_samp.iova); /* SRC_ADDR_LO/HI */
-
-      tu_cs_emit_pkt4(&cs, tex_samp_reg, 2);
-      tu_cs_emit_qw(&cs, tex_samp.iova); /* SRC_ADDR_LO/HI */
-   }
-
-   /* emit texture state: */
-   tu_cs_emit_pkt7(&cs, tu6_stage2opcode(type), 3);
-   tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
-      CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
-      CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
-      CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
-      CP_LOAD_STATE6_0_NUM_UNIT(link->texture_map.num_desc));
-   tu_cs_emit_qw(&cs, tex_const.iova); /* SRC_ADDR_LO/HI */
 
-   tu_cs_emit_pkt4(&cs, tex_const_reg, 2);
-   tu_cs_emit_qw(&cs, tex_const.iova); /* SRC_ADDR_LO/HI */
-
-   tu_cs_emit_pkt4(&cs, tex_count_reg, 1);
-   tu_cs_emit(&cs, link->texture_map.num_desc);
-
-   *entry = tu_cs_end_sub_stream(draw_state, &cs);
-   return VK_SUCCESS;
+   return tu_cs_end_draw_state(&cmd->sub_cs, &cs);
 }
 
-static VkResult
-tu6_emit_ibo(struct tu_cmd_buffer *cmd,
-             const struct tu_pipeline *pipeline,
-             struct tu_descriptor_state *descriptors_state,
-             gl_shader_stage type,
-             struct tu_cs_entry *entry)
-{
-   struct tu_cs *draw_state = &cmd->sub_cs;
-   const struct tu_program_descriptor_linkage *link =
-      &pipeline->program.link[type];
-   VkResult result;
-
-   unsigned num_desc = link->ssbo_map.num_desc + link->image_map.num_desc;
-
-   if (num_desc == 0) {
-      *entry = (struct tu_cs_entry) {};
-      return VK_SUCCESS;
-   }
-
-   struct ts_cs_memory ibo_const;
-   result = tu_cs_alloc(draw_state, num_desc,
-                        A6XX_TEX_CONST_DWORDS, &ibo_const);
-   if (result != VK_SUCCESS)
-      return result;
-
-   int ssbo_index = 0;
-   for (unsigned i = 0; i < link->ssbo_map.num; i++) {
-      for (int j = 0; j < link->ssbo_map.array_size[i]; j++) {
-         uint32_t *dst = &ibo_const.map[A6XX_TEX_CONST_DWORDS * ssbo_index];
-
-         uint64_t va = buffer_ptr(descriptors_state, &link->ssbo_map, i, j);
-         /* We don't expose robustBufferAccess, so leave the size unlimited. */
-         uint32_t sz = MAX_STORAGE_BUFFER_RANGE / 4;
-
-         dst[0] = A6XX_IBO_0_FMT(FMT6_32_UINT);
-         dst[1] = A6XX_IBO_1_WIDTH(sz & MASK(15)) |
-                  A6XX_IBO_1_HEIGHT(sz >> 15);
-         dst[2] = A6XX_IBO_2_UNK4 |
-                  A6XX_IBO_2_UNK31 |
-                  A6XX_IBO_2_TYPE(A6XX_TEX_1D);
-         dst[3] = 0;
-         dst[4] = va;
-         dst[5] = va >> 32;
-         for (int i = 6; i < A6XX_TEX_CONST_DWORDS; i++)
-            dst[i] = 0;
-
-         ssbo_index++;
-      }
-   }
+static struct tu_draw_state
+tu6_emit_vertex_buffers(struct tu_cmd_buffer *cmd,
+                        const struct tu_pipeline *pipeline)
+{
+   struct tu_cs cs;
+   tu_cs_begin_sub_stream(&cmd->sub_cs, 4 * MAX_VBS, &cs);
 
-   for (unsigned i = 0; i < link->image_map.num; i++) {
-      for (int j = 0; j < link->image_map.array_size[i]; j++) {
-         uint32_t *dst = &ibo_const.map[A6XX_TEX_CONST_DWORDS * ssbo_index];
+   int binding;
+   for_each_bit(binding, pipeline->vi.bindings_used) {
+      const struct tu_buffer *buf = cmd->state.vb.buffers[binding];
+      const VkDeviceSize offset = buf->bo_offset +
+         cmd->state.vb.offsets[binding];
 
-         write_image_ibo(cmd, dst,
-                         descriptors_state, &link->image_map, i, j);
+      tu_cs_emit_regs(&cs,
+                      A6XX_VFD_FETCH_BASE(binding, .bo = buf->bo, .bo_offset = offset),
+                      A6XX_VFD_FETCH_SIZE(binding, buf->size - offset));
 
-         ssbo_index++;
-      }
    }
 
-   assert(ssbo_index == num_desc);
+   cmd->vertex_bindings_set = pipeline->vi.bindings_used;
 
-   struct tu_cs cs;
-   result = tu_cs_begin_sub_stream(draw_state, 7, &cs);
-   if (result != VK_SUCCESS)
-      return result;
+   return tu_cs_end_draw_state(&cmd->sub_cs, &cs);
+}
 
-   uint32_t opcode, ibo_addr_reg;
-   enum a6xx_state_block sb;
-   enum a6xx_state_type st;
+static uint64_t
+get_tess_param_bo_size(const struct tu_pipeline *pipeline,
+                       uint32_t draw_count)
+{
+   /* TODO: For indirect draws, we can't compute the BO size ahead of time.
+    * Still not sure what to do here, so just allocate a reasonably large
+    * BO and hope for the best for now. */
+   if (!draw_count)
+      draw_count = 2048;
+
+   /* the tess param BO is pipeline->tess.param_stride bytes per patch,
+    * which includes both the per-vertex outputs and per-patch outputs
+    * build_primitive_map in ir3 calculates this stride
+    */
+   uint32_t verts_per_patch = pipeline->ia.primtype - DI_PT_PATCHES0;
+   uint32_t num_patches = draw_count / verts_per_patch;
+   return num_patches * pipeline->tess.param_stride;
+}
 
-   switch (type) {
-   case MESA_SHADER_FRAGMENT:
-      opcode = CP_LOAD_STATE6;
-      st = ST6_SHADER;
-      sb = SB6_IBO;
-      ibo_addr_reg = REG_A6XX_SP_IBO_LO;
+static uint64_t
+get_tess_factor_bo_size(const struct tu_pipeline *pipeline,
+                        uint32_t draw_count)
+{
+   /* TODO: For indirect draws, we can't compute the BO size ahead of time.
+    * Still not sure what to do here, so just allocate a reasonably large
+    * BO and hope for the best for now. */
+   if (!draw_count)
+      draw_count = 2048;
+
+   /* Each distinct patch gets its own tess factor output. */
+   uint32_t verts_per_patch = pipeline->ia.primtype - DI_PT_PATCHES0;
+   uint32_t num_patches = draw_count / verts_per_patch;
+   uint32_t factor_stride;
+   switch (pipeline->tess.patch_type) {
+   case IR3_TESS_ISOLINES:
+      factor_stride = 12;
+      break;
+   case IR3_TESS_TRIANGLES:
+      factor_stride = 20;
       break;
-   case MESA_SHADER_COMPUTE:
-      opcode = CP_LOAD_STATE6_FRAG;
-      st = ST6_IBO;
-      sb = SB6_CS_SHADER;
-      ibo_addr_reg = REG_A6XX_SP_CS_IBO_LO;
+   case IR3_TESS_QUADS:
+      factor_stride = 28;
       break;
    default:
-      unreachable("unsupported stage for ibos");
+      unreachable("bad tessmode");
    }
-
-   /* emit texture state: */
-   tu_cs_emit_pkt7(&cs, opcode, 3);
-   tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
-              CP_LOAD_STATE6_0_STATE_TYPE(st) |
-              CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
-              CP_LOAD_STATE6_0_STATE_BLOCK(sb) |
-              CP_LOAD_STATE6_0_NUM_UNIT(num_desc));
-   tu_cs_emit_qw(&cs, ibo_const.iova); /* SRC_ADDR_LO/HI */
-
-   tu_cs_emit_pkt4(&cs, ibo_addr_reg, 2);
-   tu_cs_emit_qw(&cs, ibo_const.iova); /* SRC_ADDR_LO/HI */
-
-   *entry = tu_cs_end_sub_stream(draw_state, &cs);
-   return VK_SUCCESS;
+   return factor_stride * num_patches;
 }
 
-static void
-tu6_emit_streamout(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
+static VkResult
+tu6_emit_tess_consts(struct tu_cmd_buffer *cmd,
+                     uint32_t draw_count,
+                     const struct tu_pipeline *pipeline,
+                     struct tu_draw_state *state,
+                     uint64_t *factor_iova)
 {
-   struct tu_streamout_state *tf = &cmd->state.pipeline->streamout;
-
-   for (unsigned i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
-      struct tu_buffer *buf = cmd->state.streamout_buf.buffers[i];
-      if (!buf)
-         continue;
-
-      uint32_t offset;
-      offset = cmd->state.streamout_buf.offsets[i];
-
-      tu_cs_emit_regs(cs, A6XX_VPC_SO_BUFFER_BASE(i, .bo = buf->bo,
-                                                     .bo_offset = buf->bo_offset));
-      tu_cs_emit_regs(cs, A6XX_VPC_SO_BUFFER_SIZE(i, buf->size));
-
-      if (cmd->state.streamout_reset & (1 << i)) {
-         offset *= tf->stride[i];
-
-         tu_cs_emit_regs(cs, A6XX_VPC_SO_BUFFER_OFFSET(i, offset));
-         cmd->state.streamout_reset &= ~(1  << i);
-      } else {
-         tu_cs_emit_pkt7(cs, CP_MEM_TO_REG, 3);
-         tu_cs_emit(cs, CP_MEM_TO_REG_0_REG(REG_A6XX_VPC_SO_BUFFER_OFFSET(i)) |
-                        CP_MEM_TO_REG_0_SHIFT_BY_2 | CP_MEM_TO_REG_0_UNK31 |
-                        CP_MEM_TO_REG_0_CNT(0));
-         tu_cs_emit_qw(cs, cmd->scratch_bo.iova +
-                           ctrl_offset(flush_base[i].offset));
-      }
+   struct tu_cs cs;
+   VkResult result = tu_cs_begin_sub_stream(&cmd->sub_cs, 16, &cs);
+   if (result != VK_SUCCESS)
+      return result;
 
-      tu_cs_emit_regs(cs, A6XX_VPC_SO_FLUSH_BASE(i, .bo = &cmd->scratch_bo,
-                                                    .bo_offset =
-                                                       ctrl_offset(flush_base[i])));
-   }
+   uint64_t tess_factor_size = get_tess_factor_bo_size(pipeline, draw_count);
+   uint64_t tess_param_size = get_tess_param_bo_size(pipeline, draw_count);
+   uint64_t tess_bo_size =  tess_factor_size + tess_param_size;
+   if (tess_bo_size > 0) {
+      struct tu_bo *tess_bo;
+      result = tu_get_scratch_bo(cmd->device, tess_bo_size, &tess_bo);
+      if (result != VK_SUCCESS)
+         return result;
 
-   if (cmd->state.streamout_enabled) {
-      tu_cs_emit_pkt7(cs, CP_CONTEXT_REG_BUNCH, 12 + (2 * tf->prog_count));
-      tu_cs_emit(cs, REG_A6XX_VPC_SO_BUF_CNTL);
-      tu_cs_emit(cs, tf->vpc_so_buf_cntl);
-      tu_cs_emit(cs, REG_A6XX_VPC_SO_NCOMP(0));
-      tu_cs_emit(cs, tf->ncomp[0]);
-      tu_cs_emit(cs, REG_A6XX_VPC_SO_NCOMP(1));
-      tu_cs_emit(cs, tf->ncomp[1]);
-      tu_cs_emit(cs, REG_A6XX_VPC_SO_NCOMP(2));
-      tu_cs_emit(cs, tf->ncomp[2]);
-      tu_cs_emit(cs, REG_A6XX_VPC_SO_NCOMP(3));
-      tu_cs_emit(cs, tf->ncomp[3]);
-      tu_cs_emit(cs, REG_A6XX_VPC_SO_CNTL);
-      tu_cs_emit(cs, A6XX_VPC_SO_CNTL_ENABLE);
-      for (unsigned i = 0; i < tf->prog_count; i++) {
-         tu_cs_emit(cs, REG_A6XX_VPC_SO_PROG);
-         tu_cs_emit(cs, tf->prog[i]);
-      }
-   } else {
-      tu_cs_emit_pkt7(cs, CP_CONTEXT_REG_BUNCH, 4);
-      tu_cs_emit(cs, REG_A6XX_VPC_SO_CNTL);
-      tu_cs_emit(cs, 0);
-      tu_cs_emit(cs, REG_A6XX_VPC_SO_BUF_CNTL);
-      tu_cs_emit(cs, 0);
-   }
+      tu_bo_list_add(&cmd->bo_list, tess_bo,
+            MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
+      uint64_t tess_factor_iova = tess_bo->iova;
+      uint64_t tess_param_iova = tess_factor_iova + tess_factor_size;
+
+      tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6_GEOM, 3 + 4);
+      tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(pipeline->tess.hs_bo_regid) |
+            CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
+            CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
+            CP_LOAD_STATE6_0_STATE_BLOCK(SB6_HS_SHADER) |
+            CP_LOAD_STATE6_0_NUM_UNIT(1));
+      tu_cs_emit(&cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
+      tu_cs_emit(&cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
+      tu_cs_emit_qw(&cs, tess_param_iova);
+      tu_cs_emit_qw(&cs, tess_factor_iova);
+
+      tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6_GEOM, 3 + 4);
+      tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(pipeline->tess.ds_bo_regid) |
+            CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
+            CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
+            CP_LOAD_STATE6_0_STATE_BLOCK(SB6_DS_SHADER) |
+            CP_LOAD_STATE6_0_NUM_UNIT(1));
+      tu_cs_emit(&cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
+      tu_cs_emit(&cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
+      tu_cs_emit_qw(&cs, tess_param_iova);
+      tu_cs_emit_qw(&cs, tess_factor_iova);
+
+      *factor_iova = tess_factor_iova;
+   }
+   *state = tu_cs_end_draw_state(&cmd->sub_cs, &cs);
+   return VK_SUCCESS;
 }
 
 static VkResult
-tu6_bind_draw_states(struct tu_cmd_buffer *cmd,
-                     struct tu_cs *cs,
-                     const struct tu_draw_info *draw)
+tu6_draw_common(struct tu_cmd_buffer *cmd,
+                struct tu_cs *cs,
+                bool indexed,
+                /* note: draw_count is 0 for indirect */
+                uint32_t draw_count)
 {
    const struct tu_pipeline *pipeline = cmd->state.pipeline;
-   const struct tu_dynamic_state *dynamic = &cmd->state.dynamic;
-   struct tu_draw_state_group draw_state_groups[TU_DRAW_STATE_COUNT];
-   uint32_t draw_state_group_count = 0;
    VkResult result;
 
    struct tu_descriptor_state *descriptors_state =
       &cmd->descriptors[VK_PIPELINE_BIND_POINT_GRAPHICS];
 
-   /* TODO lrz */
-
-   tu_cs_emit_regs(cs,
-                   A6XX_PC_PRIMITIVE_CNTL_0(.primitive_restart =
-                                            pipeline->ia.primitive_restart && draw->indexed));
-
-   if (cmd->state.dirty &
-          (TU_CMD_DIRTY_PIPELINE | TU_CMD_DIRTY_DYNAMIC_LINE_WIDTH) &&
-       (pipeline->dynamic_state.mask & TU_DYNAMIC_LINE_WIDTH)) {
-      tu6_emit_gras_su_cntl(cs, pipeline->rast.gras_su_cntl,
-                            dynamic->line_width);
-   }
-
-   if ((cmd->state.dirty & TU_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK) &&
-       (pipeline->dynamic_state.mask & TU_DYNAMIC_STENCIL_COMPARE_MASK)) {
-      tu6_emit_stencil_compare_mask(cs, dynamic->stencil_compare_mask.front,
-                                    dynamic->stencil_compare_mask.back);
-   }
-
-   if ((cmd->state.dirty & TU_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK) &&
-       (pipeline->dynamic_state.mask & TU_DYNAMIC_STENCIL_WRITE_MASK)) {
-      tu6_emit_stencil_write_mask(cs, dynamic->stencil_write_mask.front,
-                                  dynamic->stencil_write_mask.back);
-   }
-
-   if ((cmd->state.dirty & TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE) &&
-       (pipeline->dynamic_state.mask & TU_DYNAMIC_STENCIL_REFERENCE)) {
-      tu6_emit_stencil_reference(cs, dynamic->stencil_reference.front,
-                                 dynamic->stencil_reference.back);
-   }
-
-   if (cmd->state.dirty &
-       (TU_CMD_DIRTY_PIPELINE | TU_CMD_DIRTY_VERTEX_BUFFERS)) {
-      for (uint32_t i = 0; i < pipeline->vi.count; i++) {
-         const uint32_t binding = pipeline->vi.bindings[i];
-         const uint32_t stride = pipeline->vi.strides[i];
-         const struct tu_buffer *buf = cmd->state.vb.buffers[binding];
-         const VkDeviceSize offset = buf->bo_offset +
-                                     cmd->state.vb.offsets[binding] +
-                                     pipeline->vi.offsets[i];
-         const VkDeviceSize size =
-            offset < buf->bo->size ? buf->bo->size - offset : 0;
-
-         tu_cs_emit_regs(cs,
-                         A6XX_VFD_FETCH_BASE(i, .bo = buf->bo, .bo_offset = offset),
-                         A6XX_VFD_FETCH_SIZE(i, size),
-                         A6XX_VFD_FETCH_STRIDE(i, stride));
-      }
-   }
+   tu_emit_cache_flush_renderpass(cmd, cs);
 
-   if (cmd->state.dirty & TU_CMD_DIRTY_PIPELINE) {
-      draw_state_groups[draw_state_group_count++] =
-         (struct tu_draw_state_group) {
-            .id = TU_DRAW_STATE_PROGRAM,
-            .enable_mask = ENABLE_DRAW,
-            .ib = pipeline->program.state_ib,
-         };
-      draw_state_groups[draw_state_group_count++] =
-         (struct tu_draw_state_group) {
-            .id = TU_DRAW_STATE_PROGRAM_BINNING,
-            .enable_mask = CP_SET_DRAW_STATE__0_BINNING,
-            .ib = pipeline->program.binning_state_ib,
-         };
-      draw_state_groups[draw_state_group_count++] =
-         (struct tu_draw_state_group) {
-            .id = TU_DRAW_STATE_VI,
-            .enable_mask = ENABLE_DRAW,
-            .ib = pipeline->vi.state_ib,
-         };
-      draw_state_groups[draw_state_group_count++] =
-         (struct tu_draw_state_group) {
-            .id = TU_DRAW_STATE_VI_BINNING,
-            .enable_mask = CP_SET_DRAW_STATE__0_BINNING,
-            .ib = pipeline->vi.binning_state_ib,
-         };
-      draw_state_groups[draw_state_group_count++] =
-         (struct tu_draw_state_group) {
-            .id = TU_DRAW_STATE_VP,
-            .enable_mask = ENABLE_ALL,
-            .ib = pipeline->vp.state_ib,
-         };
-      draw_state_groups[draw_state_group_count++] =
-         (struct tu_draw_state_group) {
-            .id = TU_DRAW_STATE_RAST,
-            .enable_mask = ENABLE_ALL,
-            .ib = pipeline->rast.state_ib,
-         };
-      draw_state_groups[draw_state_group_count++] =
-         (struct tu_draw_state_group) {
-            .id = TU_DRAW_STATE_DS,
-            .enable_mask = ENABLE_ALL,
-            .ib = pipeline->ds.state_ib,
-         };
-      draw_state_groups[draw_state_group_count++] =
-         (struct tu_draw_state_group) {
-            .id = TU_DRAW_STATE_BLEND,
-            .enable_mask = ENABLE_ALL,
-            .ib = pipeline->blend.state_ib,
-         };
-   }
-
-   if (cmd->state.dirty &
-         (TU_CMD_DIRTY_PIPELINE | TU_CMD_DIRTY_DESCRIPTOR_SETS | TU_CMD_DIRTY_PUSH_CONSTANTS)) {
-      draw_state_groups[draw_state_group_count++] =
-         (struct tu_draw_state_group) {
-            .id = TU_DRAW_STATE_VS_CONST,
-            .enable_mask = ENABLE_ALL,
-            .ib = tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_VERTEX)
-         };
-      draw_state_groups[draw_state_group_count++] =
-         (struct tu_draw_state_group) {
-            .id = TU_DRAW_STATE_FS_CONST,
-            .enable_mask = ENABLE_DRAW,
-            .ib = tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_FRAGMENT)
-         };
-   }
-
-   if (cmd->state.dirty & TU_CMD_DIRTY_STREAMOUT_BUFFERS)
-      tu6_emit_streamout(cmd, cs);
-
-   if (cmd->state.dirty &
-         (TU_CMD_DIRTY_PIPELINE | TU_CMD_DIRTY_DESCRIPTOR_SETS)) {
-      struct tu_cs_entry vs_tex, fs_tex_sysmem, fs_tex_gmem, fs_ibo;
+   /* TODO lrz */
 
-      result = tu6_emit_textures(cmd, pipeline, descriptors_state,
-                                 MESA_SHADER_VERTEX, &vs_tex, false);
+   tu_cs_emit_regs(cs, A6XX_PC_PRIMITIVE_CNTL_0(
+         .primitive_restart =
+               pipeline->ia.primitive_restart && indexed,
+         .tess_upper_left_domain_origin =
+               pipeline->tess.upper_left_domain_origin));
+
+   if (cmd->state.dirty & TU_CMD_DIRTY_SHADER_CONSTS) {
+      cmd->state.shader_const[MESA_SHADER_VERTEX] =
+         tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_VERTEX);
+      cmd->state.shader_const[MESA_SHADER_TESS_CTRL] =
+         tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_TESS_CTRL);
+      cmd->state.shader_const[MESA_SHADER_TESS_EVAL] =
+         tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_TESS_EVAL);
+      cmd->state.shader_const[MESA_SHADER_GEOMETRY] =
+         tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_GEOMETRY);
+      cmd->state.shader_const[MESA_SHADER_FRAGMENT] =
+         tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_FRAGMENT);
+   }
+
+   if (cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS)
+      cmd->state.vertex_buffers = tu6_emit_vertex_buffers(cmd, pipeline);
+
+   bool has_tess =
+         pipeline->active_stages & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
+   struct tu_draw_state tess_consts = {};
+   if (has_tess) {
+      uint64_t tess_factor_iova = 0;
+
+      cmd->has_tess = true;
+      result = tu6_emit_tess_consts(cmd, draw_count, pipeline, &tess_consts, &tess_factor_iova);
       if (result != VK_SUCCESS)
          return result;
 
-      /* TODO: we could emit just one texture descriptor draw state when there
-       * are no input attachments, which is the most common case. We could
-       * also split out the sampler state, which doesn't change even for input
-       * attachments.
+      /* this sequence matches what the blob does before every tess draw
+       * PC_TESSFACTOR_ADDR_LO is a non-context register and needs a wfi
+       * before writing to it
        */
-      result = tu6_emit_textures(cmd, pipeline, descriptors_state,
-                                 MESA_SHADER_FRAGMENT, &fs_tex_sysmem, true);
-      if (result != VK_SUCCESS)
-         return result;
-
-      result = tu6_emit_textures(cmd, pipeline, descriptors_state,
-                                 MESA_SHADER_FRAGMENT, &fs_tex_gmem, false);
-      if (result != VK_SUCCESS)
-         return result;
+      tu_cs_emit_wfi(cs);
 
-      result = tu6_emit_ibo(cmd, pipeline, descriptors_state,
-                            MESA_SHADER_FRAGMENT, &fs_ibo);
-      if (result != VK_SUCCESS)
-         return result;
+      tu_cs_emit_pkt4(cs, REG_A6XX_PC_TESSFACTOR_ADDR_LO, 2);
+      tu_cs_emit_qw(cs, tess_factor_iova);
 
-      draw_state_groups[draw_state_group_count++] =
-         (struct tu_draw_state_group) {
-            .id = TU_DRAW_STATE_VS_TEX,
-            .enable_mask = ENABLE_ALL,
-            .ib = vs_tex,
-         };
-      draw_state_groups[draw_state_group_count++] =
-         (struct tu_draw_state_group) {
-            .id = TU_DRAW_STATE_FS_TEX_GMEM,
-            .enable_mask = CP_SET_DRAW_STATE__0_GMEM,
-            .ib = fs_tex_gmem,
-         };
-      draw_state_groups[draw_state_group_count++] =
-         (struct tu_draw_state_group) {
-            .id = TU_DRAW_STATE_FS_TEX_SYSMEM,
-            .enable_mask = CP_SET_DRAW_STATE__0_SYSMEM,
-            .ib = fs_tex_sysmem,
-         };
-      draw_state_groups[draw_state_group_count++] =
-         (struct tu_draw_state_group) {
-            .id = TU_DRAW_STATE_FS_IBO,
-            .enable_mask = ENABLE_DRAW,
-            .ib = fs_ibo,
-         };
+      tu_cs_emit_pkt7(cs, CP_SET_SUBDRAW_SIZE, 1);
+      tu_cs_emit(cs, draw_count);
    }
 
-   struct tu_cs_entry vs_params;
-   result = tu6_emit_vs_params(cmd, draw, &vs_params);
-   if (result != VK_SUCCESS)
-      return result;
-
-   draw_state_groups[draw_state_group_count++] =
-      (struct tu_draw_state_group) {
-         .id = TU_DRAW_STATE_VS_PARAMS,
-         .enable_mask = ENABLE_ALL,
-         .ib = vs_params,
-      };
-
-   tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3 * draw_state_group_count);
-   for (uint32_t i = 0; i < draw_state_group_count; i++) {
-      const struct tu_draw_state_group *group = &draw_state_groups[i];
-      debug_assert((group->enable_mask & ~ENABLE_ALL) == 0);
-      uint32_t cp_set_draw_state =
-         CP_SET_DRAW_STATE__0_COUNT(group->ib.size / 4) |
-         group->enable_mask |
-         CP_SET_DRAW_STATE__0_GROUP_ID(group->id);
-      uint64_t iova;
-      if (group->ib.size) {
-         iova = group->ib.bo->iova + group->ib.offset;
-      } else {
-         cp_set_draw_state |= CP_SET_DRAW_STATE__0_DISABLE;
-         iova = 0;
+   /* for the first draw in a renderpass, re-emit all the draw states
+    *
+    * and if a draw-state disabling path (CmdClearAttachments 3D fallback) was
+    * used, then draw states must be re-emitted. note however this only happens
+    * in the sysmem path, so this can be skipped this for the gmem path (TODO)
+    *
+    * the two input attachment states are excluded because secondary command
+    * buffer doesn't have a state ib to restore it, and not re-emitting them
+    * is OK since CmdClearAttachments won't disable/overwrite them
+    */
+   if (cmd->state.dirty & TU_CMD_DIRTY_DRAW_STATE) {
+      tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3 * (TU_DRAW_STATE_COUNT - 2));
+
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_PROGRAM, pipeline->program.state);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_PROGRAM_BINNING, pipeline->program.binning_state);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_TESS, tess_consts);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VI, pipeline->vi.state);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VI_BINNING, pipeline->vi.binning_state);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_RAST, pipeline->rast_state);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DS, pipeline->ds_state);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_BLEND, pipeline->blend_state);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VS_CONST, cmd->state.shader_const[MESA_SHADER_VERTEX]);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_HS_CONST, cmd->state.shader_const[MESA_SHADER_TESS_CTRL]);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DS_CONST, cmd->state.shader_const[MESA_SHADER_TESS_EVAL]);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_GS_CONST, cmd->state.shader_const[MESA_SHADER_GEOMETRY]);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_FS_CONST, cmd->state.shader_const[MESA_SHADER_FRAGMENT]);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DESC_SETS, cmd->state.desc_sets);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DESC_SETS_LOAD, pipeline->load_state);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VB, cmd->state.vertex_buffers);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VS_PARAMS, cmd->state.vs_params);
+
+      for (uint32_t i = 0; i < ARRAY_SIZE(cmd->state.dynamic_state); i++) {
+         tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DYNAMIC + i,
+                               ((pipeline->dynamic_state_mask & BIT(i)) ?
+                                cmd->state.dynamic_state[i] :
+                                pipeline->dynamic_state[i]));
       }
+   } else {
 
-      tu_cs_emit(cs, cp_set_draw_state);
-      tu_cs_emit_qw(cs, iova);
+      /* emit draw states that were just updated
+       * note we eventually don't want to have to emit anything here
+       */
+      uint32_t draw_state_count =
+         has_tess +
+         ((cmd->state.dirty & TU_CMD_DIRTY_SHADER_CONSTS) ? 5 : 0) +
+         ((cmd->state.dirty & TU_CMD_DIRTY_DESC_SETS_LOAD) ? 1 : 0) +
+         ((cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS) ? 1 : 0) +
+         1; /* vs_params */
+
+         tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3 * draw_state_count);
+
+         /* We may need to re-emit tess consts if the current draw call is
+          * sufficiently larger than the last draw call. */
+         if (has_tess)
+            tu_cs_emit_draw_state(cs, TU_DRAW_STATE_TESS, tess_consts);
+         if (cmd->state.dirty & TU_CMD_DIRTY_SHADER_CONSTS) {
+            tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VS_CONST, cmd->state.shader_const[MESA_SHADER_VERTEX]);
+            tu_cs_emit_draw_state(cs, TU_DRAW_STATE_HS_CONST, cmd->state.shader_const[MESA_SHADER_TESS_CTRL]);
+            tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DS_CONST, cmd->state.shader_const[MESA_SHADER_TESS_EVAL]);
+            tu_cs_emit_draw_state(cs, TU_DRAW_STATE_GS_CONST, cmd->state.shader_const[MESA_SHADER_GEOMETRY]);
+            tu_cs_emit_draw_state(cs, TU_DRAW_STATE_FS_CONST, cmd->state.shader_const[MESA_SHADER_FRAGMENT]);
+         }
+         if (cmd->state.dirty & TU_CMD_DIRTY_DESC_SETS_LOAD)
+            tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DESC_SETS_LOAD, pipeline->load_state);
+         if (cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS)
+            tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VB, cmd->state.vertex_buffers);
+         tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VS_PARAMS, cmd->state.vs_params);
    }
 
    tu_cs_sanity_check(cs);
 
-   /* track BOs */
-   if (cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS) {
-      for (uint32_t i = 0; i < MAX_VBS; i++) {
-         const struct tu_buffer *buf = cmd->state.vb.buffers[i];
-         if (buf)
-            tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
-      }
-   }
-   if (cmd->state.dirty & TU_CMD_DIRTY_DESCRIPTOR_SETS) {
-      unsigned i;
-      for_each_bit(i, descriptors_state->valid) {
-         struct tu_descriptor_set *set = descriptors_state->sets[i];
-         for (unsigned j = 0; j < set->layout->buffer_count; ++j)
-            if (set->descriptors[j]) {
-               tu_bo_list_add(&cmd->bo_list, set->descriptors[j],
-                              MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
-            }
-      }
-   }
-   if (cmd->state.dirty & TU_CMD_DIRTY_STREAMOUT_BUFFERS) {
-      for (unsigned i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
-         const struct tu_buffer *buf = cmd->state.streamout_buf.buffers[i];
-         if (buf) {
-            tu_bo_list_add(&cmd->bo_list, buf->bo,
-                              MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
-         }
-      }
-   }
-
-   /* Fragment shader state overwrites compute shader state, so flag the
-    * compute pipeline for re-emit.
+   /* There are too many graphics dirty bits to list here, so just list the
+    * bits to preserve instead. The only things not emitted here are
+    * compute-related state.
     */
-   cmd->state.dirty = TU_CMD_DIRTY_COMPUTE_PIPELINE;
+   cmd->state.dirty &= TU_CMD_DIRTY_COMPUTE_DESC_SETS_LOAD;
    return VK_SUCCESS;
 }
 
-static void
-tu6_emit_draw_indirect(struct tu_cmd_buffer *cmd,
-                     struct tu_cs *cs,
-                     const struct tu_draw_info *draw)
+static uint32_t
+tu_draw_initiator(struct tu_cmd_buffer *cmd, enum pc_di_src_sel src_sel)
 {
-   const enum pc_di_primtype primtype = cmd->state.pipeline->ia.primtype;
-   bool has_gs = cmd->state.pipeline->active_stages &
-                 VK_SHADER_STAGE_GEOMETRY_BIT;
-
-   tu_cs_emit_regs(cs,
-                   A6XX_VFD_INDEX_OFFSET(draw->vertex_offset),
-                   A6XX_VFD_INSTANCE_START_OFFSET(draw->first_instance));
-
-   if (draw->indexed) {
-      const enum a4xx_index_size index_size =
-         tu6_index_size(cmd->state.index_type);
-      const uint32_t index_bytes =
-         (cmd->state.index_type == VK_INDEX_TYPE_UINT32) ? 4 : 2;
-      const struct tu_buffer *index_buf = cmd->state.index_buffer;
-      unsigned max_indicies =
-         (index_buf->size - cmd->state.index_offset) / index_bytes;
-
-      const uint32_t cp_draw_indx =
-         CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
-         CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_DMA) |
-         CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(index_size) |
-         CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY) |
-         COND(has_gs, CP_DRAW_INDX_OFFSET_0_GS_ENABLE) | 0x2000;
-
-      tu_cs_emit_pkt7(cs, CP_DRAW_INDX_INDIRECT, 6);
-      tu_cs_emit(cs, cp_draw_indx);
-      tu_cs_emit_qw(cs, index_buf->bo->iova + cmd->state.index_offset);
-      tu_cs_emit(cs, A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES(max_indicies));
-      tu_cs_emit_qw(cs, draw->indirect->bo->iova + draw->indirect_offset);
-   } else {
-      const uint32_t cp_draw_indx =
-         CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
-         CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX) |
-         CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY) |
-         COND(has_gs, CP_DRAW_INDX_OFFSET_0_GS_ENABLE) | 0x2000;
-
-      tu_cs_emit_pkt7(cs, CP_DRAW_INDIRECT, 3);
-      tu_cs_emit(cs, cp_draw_indx);
-      tu_cs_emit_qw(cs, draw->indirect->bo->iova + draw->indirect_offset);
+   const struct tu_pipeline *pipeline = cmd->state.pipeline;
+   uint32_t initiator =
+      CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(pipeline->ia.primtype) |
+      CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(src_sel) |
+      CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(cmd->state.index_size) |
+      CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY);
+
+   if (pipeline->active_stages & VK_SHADER_STAGE_GEOMETRY_BIT)
+      initiator |= CP_DRAW_INDX_OFFSET_0_GS_ENABLE;
+
+   switch (pipeline->tess.patch_type) {
+   case IR3_TESS_TRIANGLES:
+      initiator |= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_TRIANGLES) |
+                   CP_DRAW_INDX_OFFSET_0_TESS_ENABLE;
+      break;
+   case IR3_TESS_ISOLINES:
+      initiator |= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_ISOLINES) |
+                   CP_DRAW_INDX_OFFSET_0_TESS_ENABLE;
+      break;
+   case IR3_TESS_NONE:
+      initiator |= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_QUADS);
+      break;
+   case IR3_TESS_QUADS:
+      initiator |= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_QUADS) |
+                   CP_DRAW_INDX_OFFSET_0_TESS_ENABLE;
+      break;
    }
-
-   tu_bo_list_add(&cmd->bo_list, draw->indirect->bo, MSM_SUBMIT_BO_READ);
+   return initiator;
 }
 
-static void
-tu6_emit_draw_direct(struct tu_cmd_buffer *cmd,
-                     struct tu_cs *cs,
-                     const struct tu_draw_info *draw)
+
+static uint32_t
+vs_params_offset(struct tu_cmd_buffer *cmd)
 {
+   const struct tu_program_descriptor_linkage *link =
+      &cmd->state.pipeline->program.link[MESA_SHADER_VERTEX];
+   const struct ir3_const_state *const_state = &link->const_state;
 
-   const enum pc_di_primtype primtype = cmd->state.pipeline->ia.primtype;
-   bool has_gs = cmd->state.pipeline->active_stages &
-                 VK_SHADER_STAGE_GEOMETRY_BIT;
+   if (const_state->offsets.driver_param >= link->constlen)
+      return 0;
 
-   tu_cs_emit_regs(cs,
-                   A6XX_VFD_INDEX_OFFSET(draw->vertex_offset),
-                   A6XX_VFD_INSTANCE_START_OFFSET(draw->first_instance));
-
-   /* TODO hw binning */
-   if (draw->indexed) {
-      const enum a4xx_index_size index_size =
-         tu6_index_size(cmd->state.index_type);
-      const uint32_t index_bytes =
-         (cmd->state.index_type == VK_INDEX_TYPE_UINT32) ? 4 : 2;
-      const struct tu_buffer *buf = cmd->state.index_buffer;
-      const VkDeviceSize offset = buf->bo_offset + cmd->state.index_offset +
-                                  index_bytes * draw->first_index;
-      const uint32_t size = index_bytes * draw->count;
-
-      const uint32_t cp_draw_indx =
-         CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
-         CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_DMA) |
-         CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(index_size) |
-         CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY) |
-         COND(has_gs, CP_DRAW_INDX_OFFSET_0_GS_ENABLE) | 0x2000;
-
-      tu_cs_emit_pkt7(cs, CP_DRAW_INDX_OFFSET, 7);
-      tu_cs_emit(cs, cp_draw_indx);
-      tu_cs_emit(cs, draw->instance_count);
-      tu_cs_emit(cs, draw->count);
-      tu_cs_emit(cs, 0x0); /* XXX */
-      tu_cs_emit_qw(cs, buf->bo->iova + offset);
-      tu_cs_emit(cs, size);
-   } else {
-      const uint32_t cp_draw_indx =
-         CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
-         CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX) |
-         CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY) |
-         COND(has_gs, CP_DRAW_INDX_OFFSET_0_GS_ENABLE) | 0x2000;
-
-      tu_cs_emit_pkt7(cs, CP_DRAW_INDX_OFFSET, 3);
-      tu_cs_emit(cs, cp_draw_indx);
-      tu_cs_emit(cs, draw->instance_count);
-      tu_cs_emit(cs, draw->count);
-   }
+   /* this layout is required by CP_DRAW_INDIRECT_MULTI */
+   STATIC_ASSERT(IR3_DP_DRAWID == 0);
+   STATIC_ASSERT(IR3_DP_VTXID_BASE == 1);
+   STATIC_ASSERT(IR3_DP_INSTID_BASE == 2);
+
+   /* 0 means disabled for CP_DRAW_INDIRECT_MULTI */
+   assert(const_state->offsets.driver_param != 0);
+
+   return const_state->offsets.driver_param;
 }
 
-static void
-tu_draw(struct tu_cmd_buffer *cmd, const struct tu_draw_info *draw)
+static struct tu_draw_state
+tu6_emit_vs_params(struct tu_cmd_buffer *cmd,
+                   uint32_t vertex_offset,
+                   uint32_t first_instance)
 {
-   struct tu_cs *cs = &cmd->draw_cs;
-   VkResult result;
+   uint32_t offset = vs_params_offset(cmd);
 
-   result = tu6_bind_draw_states(cmd, cs, draw);
+   struct tu_cs cs;
+   VkResult result = tu_cs_begin_sub_stream(&cmd->sub_cs, 3 + (offset ? 8 : 0), &cs);
    if (result != VK_SUCCESS) {
       cmd->record_result = result;
-      return;
+      return (struct tu_draw_state) {};
    }
 
-   if (draw->indirect)
-      tu6_emit_draw_indirect(cmd, cs, draw);
-   else
-      tu6_emit_draw_direct(cmd, cs, draw);
+   /* TODO: don't make a new draw state when it doesn't change */
 
-   if (cmd->state.streamout_enabled) {
-      for (unsigned i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
-         if (cmd->state.streamout_enabled & (1 << i))
-            tu6_emit_event_write(cmd, cs, FLUSH_SO_0 + i, false);
-      }
-   }
+   tu_cs_emit_regs(&cs,
+                   A6XX_VFD_INDEX_OFFSET(vertex_offset),
+                   A6XX_VFD_INSTANCE_START_OFFSET(first_instance));
 
-   cmd->wait_for_idle = true;
+   if (offset) {
+      tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6_GEOM, 3 + 4);
+      tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(offset) |
+            CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
+            CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
+            CP_LOAD_STATE6_0_STATE_BLOCK(SB6_VS_SHADER) |
+            CP_LOAD_STATE6_0_NUM_UNIT(1));
+      tu_cs_emit(&cs, 0);
+      tu_cs_emit(&cs, 0);
 
-   tu_cs_sanity_check(cs);
+      tu_cs_emit(&cs, 0);
+      tu_cs_emit(&cs, vertex_offset);
+      tu_cs_emit(&cs, first_instance);
+      tu_cs_emit(&cs, 0);
+   }
+
+   struct tu_cs_entry entry = tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
+   return (struct tu_draw_state) {entry.bo->iova + entry.offset, entry.size / 4};
 }
 
 void
@@ -3787,15 +3206,17 @@ tu_CmdDraw(VkCommandBuffer commandBuffer,
            uint32_t firstVertex,
            uint32_t firstInstance)
 {
-   TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
-   struct tu_draw_info info = {};
+   TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+   struct tu_cs *cs = &cmd->draw_cs;
 
-   info.count = vertexCount;
-   info.instance_count = instanceCount;
-   info.first_instance = firstInstance;
-   info.vertex_offset = firstVertex;
+   cmd->state.vs_params = tu6_emit_vs_params(cmd, firstVertex, firstInstance);
 
-   tu_draw(cmd_buffer, &info);
+   tu6_draw_common(cmd, cs, false, vertexCount);
+
+   tu_cs_emit_pkt7(cs, CP_DRAW_INDX_OFFSET, 3);
+   tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_AUTO_INDEX));
+   tu_cs_emit(cs, instanceCount);
+   tu_cs_emit(cs, vertexCount);
 }
 
 void
@@ -3806,17 +3227,34 @@ tu_CmdDrawIndexed(VkCommandBuffer commandBuffer,
                   int32_t vertexOffset,
                   uint32_t firstInstance)
 {
-   TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
-   struct tu_draw_info info = {};
+   TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+   struct tu_cs *cs = &cmd->draw_cs;
 
-   info.indexed = true;
-   info.count = indexCount;
-   info.instance_count = instanceCount;
-   info.first_index = firstIndex;
-   info.vertex_offset = vertexOffset;
-   info.first_instance = firstInstance;
+   cmd->state.vs_params = tu6_emit_vs_params(cmd, vertexOffset, firstInstance);
 
-   tu_draw(cmd_buffer, &info);
+   tu6_draw_common(cmd, cs, true, indexCount);
+
+   tu_cs_emit_pkt7(cs, CP_DRAW_INDX_OFFSET, 7);
+   tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_DMA));
+   tu_cs_emit(cs, instanceCount);
+   tu_cs_emit(cs, indexCount);
+   tu_cs_emit(cs, firstIndex);
+   tu_cs_emit_qw(cs, cmd->state.index_va);
+   tu_cs_emit(cs, cmd->state.max_index_count);
+}
+
+/* Various firmware bugs/inconsistencies mean that some indirect draw opcodes
+ * do not wait for WFI's to complete before executing. Add a WAIT_FOR_ME if
+ * pending for these opcodes. This may result in a few extra WAIT_FOR_ME's
+ * with these opcodes, but the alternative would add unnecessary WAIT_FOR_ME's
+ * before draw opcodes that don't need it.
+ */
+static void
+draw_wfm(struct tu_cmd_buffer *cmd)
+{
+   cmd->state.renderpass_cache.flush_bits |=
+      cmd->state.renderpass_cache.pending_flush_bits & TU_CMD_FLAG_WAIT_FOR_ME;
+   cmd->state.renderpass_cache.pending_flush_bits &= ~TU_CMD_FLAG_WAIT_FOR_ME;
 }
 
 void
@@ -3826,16 +3264,33 @@ tu_CmdDrawIndirect(VkCommandBuffer commandBuffer,
                    uint32_t drawCount,
                    uint32_t stride)
 {
-   TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
-   TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
-   struct tu_draw_info info = {};
+   TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+   TU_FROM_HANDLE(tu_buffer, buf, _buffer);
+   struct tu_cs *cs = &cmd->draw_cs;
 
-   info.count = drawCount;
-   info.indirect = buffer;
-   info.indirect_offset = offset;
-   info.stride = stride;
+   cmd->state.vs_params = (struct tu_draw_state) {};
+
+   /* The latest known a630_sqe.fw fails to wait for WFI before reading the
+    * indirect buffer when using CP_DRAW_INDIRECT_MULTI, so we have to fall
+    * back to CP_WAIT_FOR_ME except for a650 which has a fixed firmware.
+    *
+    * TODO: There may be newer a630_sqe.fw released in the future which fixes
+    * this, if so we should detect it and avoid this workaround.
+    */
+   if (cmd->device->physical_device->gpu_id != 650)
+      draw_wfm(cmd);
+
+   tu6_draw_common(cmd, cs, false, 0);
 
-   tu_draw(cmd_buffer, &info);
+   tu_cs_emit_pkt7(cs, CP_DRAW_INDIRECT_MULTI, 6);
+   tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_AUTO_INDEX));
+   tu_cs_emit(cs, A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(INDIRECT_OP_NORMAL) |
+                  A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(vs_params_offset(cmd)));
+   tu_cs_emit(cs, drawCount);
+   tu_cs_emit_qw(cs, buf->bo->iova + buf->bo_offset + offset);
+   tu_cs_emit(cs, stride);
+
+   tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
 }
 
 void
@@ -3845,17 +3300,101 @@ tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,
                           uint32_t drawCount,
                           uint32_t stride)
 {
-   TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
-   TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
-   struct tu_draw_info info = {};
+   TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+   TU_FROM_HANDLE(tu_buffer, buf, _buffer);
+   struct tu_cs *cs = &cmd->draw_cs;
 
-   info.indexed = true;
-   info.count = drawCount;
-   info.indirect = buffer;
-   info.indirect_offset = offset;
-   info.stride = stride;
+   cmd->state.vs_params = (struct tu_draw_state) {};
+
+   if (cmd->device->physical_device->gpu_id != 650)
+      draw_wfm(cmd);
 
-   tu_draw(cmd_buffer, &info);
+   tu6_draw_common(cmd, cs, true, 0);
+
+   tu_cs_emit_pkt7(cs, CP_DRAW_INDIRECT_MULTI, 9);
+   tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_DMA));
+   tu_cs_emit(cs, A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(INDIRECT_OP_INDEXED) |
+                  A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(vs_params_offset(cmd)));
+   tu_cs_emit(cs, drawCount);
+   tu_cs_emit_qw(cs, cmd->state.index_va);
+   tu_cs_emit(cs, cmd->state.max_index_count);
+   tu_cs_emit_qw(cs, buf->bo->iova + buf->bo_offset + offset);
+   tu_cs_emit(cs, stride);
+
+   tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
+}
+
+void
+tu_CmdDrawIndirectCount(VkCommandBuffer commandBuffer,
+                        VkBuffer _buffer,
+                        VkDeviceSize offset,
+                        VkBuffer countBuffer,
+                        VkDeviceSize countBufferOffset,
+                        uint32_t drawCount,
+                        uint32_t stride)
+{
+   TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+   TU_FROM_HANDLE(tu_buffer, buf, _buffer);
+   TU_FROM_HANDLE(tu_buffer, count_buf, countBuffer);
+   struct tu_cs *cs = &cmd->draw_cs;
+
+   cmd->state.vs_params = (struct tu_draw_state) {};
+
+   /* It turns out that the firmware we have for a650 only partially fixed the
+    * problem with CP_DRAW_INDIRECT_MULTI not waiting for WFI's to complete
+    * before reading indirect parameters. It waits for WFI's before reading
+    * the draw parameters, but after reading the indirect count :(.
+    */
+   draw_wfm(cmd);
+
+   tu6_draw_common(cmd, cs, false, 0);
+
+   tu_cs_emit_pkt7(cs, CP_DRAW_INDIRECT_MULTI, 8);
+   tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_AUTO_INDEX));
+   tu_cs_emit(cs, A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(INDIRECT_OP_INDIRECT_COUNT) |
+                  A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(vs_params_offset(cmd)));
+   tu_cs_emit(cs, drawCount);
+   tu_cs_emit_qw(cs, buf->bo->iova + buf->bo_offset + offset);
+   tu_cs_emit_qw(cs, count_buf->bo->iova + count_buf->bo_offset + countBufferOffset);
+   tu_cs_emit(cs, stride);
+
+   tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
+   tu_bo_list_add(&cmd->bo_list, count_buf->bo, MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
+}
+
+void
+tu_CmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer,
+                               VkBuffer _buffer,
+                               VkDeviceSize offset,
+                               VkBuffer countBuffer,
+                               VkDeviceSize countBufferOffset,
+                               uint32_t drawCount,
+                               uint32_t stride)
+{
+   TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+   TU_FROM_HANDLE(tu_buffer, buf, _buffer);
+   TU_FROM_HANDLE(tu_buffer, count_buf, countBuffer);
+   struct tu_cs *cs = &cmd->draw_cs;
+
+   cmd->state.vs_params = (struct tu_draw_state) {};
+
+   draw_wfm(cmd);
+
+   tu6_draw_common(cmd, cs, true, 0);
+
+   tu_cs_emit_pkt7(cs, CP_DRAW_INDIRECT_MULTI, 11);
+   tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_DMA));
+   tu_cs_emit(cs, A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(INDIRECT_OP_INDIRECT_COUNT_INDEXED) |
+                  A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(vs_params_offset(cmd)));
+   tu_cs_emit(cs, drawCount);
+   tu_cs_emit_qw(cs, cmd->state.index_va);
+   tu_cs_emit(cs, cmd->state.max_index_count);
+   tu_cs_emit_qw(cs, buf->bo->iova + buf->bo_offset + offset);
+   tu_cs_emit_qw(cs, count_buf->bo->iova + count_buf->bo_offset + countBufferOffset);
+   tu_cs_emit(cs, stride);
+
+   tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
+   tu_bo_list_add(&cmd->bo_list, count_buf->bo, MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
 }
 
 void tu_CmdDrawIndirectByteCountEXT(VkCommandBuffer commandBuffer,
@@ -3866,18 +3405,29 @@ void tu_CmdDrawIndirectByteCountEXT(VkCommandBuffer commandBuffer,
                                     uint32_t counterOffset,
                                     uint32_t vertexStride)
 {
-   TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
-   TU_FROM_HANDLE(tu_buffer, buffer, _counterBuffer);
+   TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+   TU_FROM_HANDLE(tu_buffer, buf, _counterBuffer);
+   struct tu_cs *cs = &cmd->draw_cs;
 
-   struct tu_draw_info info = {};
+   /* All known firmware versions do not wait for WFI's with CP_DRAW_AUTO.
+    * Plus, for the common case where the counter buffer is written by
+    * vkCmdEndTransformFeedback, we need to wait for the CP_WAIT_MEM_WRITES to
+    * complete which means we need a WAIT_FOR_ME anyway.
+    */
+   draw_wfm(cmd);
+
+   cmd->state.vs_params = tu6_emit_vs_params(cmd, 0, firstInstance);
 
-   info.instance_count = instanceCount;
-   info.first_instance = firstInstance;
-   info.streamout_buffer = buffer;
-   info.streamout_buffer_offset = counterBufferOffset;
-   info.stride = vertexStride;
+   tu6_draw_common(cmd, cs, false, 0);
 
-   tu_draw(cmd_buffer, &info);
+   tu_cs_emit_pkt7(cs, CP_DRAW_AUTO, 6);
+   tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_AUTO_XFB));
+   tu_cs_emit(cs, instanceCount);
+   tu_cs_emit_qw(cs, buf->bo->iova + buf->bo_offset + counterBufferOffset);
+   tu_cs_emit(cs, counterOffset);
+   tu_cs_emit(cs, vertexStride);
+
+   tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
 }
 
 struct tu_dispatch_info
@@ -3954,55 +3504,22 @@ tu_dispatch(struct tu_cmd_buffer *cmd,
    struct tu_pipeline *pipeline = cmd->state.compute_pipeline;
    struct tu_descriptor_state *descriptors_state =
       &cmd->descriptors[VK_PIPELINE_BIND_POINT_COMPUTE];
-   VkResult result;
 
-   if (cmd->state.dirty & TU_CMD_DIRTY_COMPUTE_PIPELINE)
-      tu_cs_emit_ib(cs, &pipeline->program.state_ib);
-
-   struct tu_cs_entry ib;
+   /* TODO: We could probably flush less if we add a compute_flush_bits
+    * bitfield.
+    */
+   tu_emit_cache_flush(cmd, cs);
 
-   ib = tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_COMPUTE);
-   if (ib.size)
-      tu_cs_emit_ib(cs, &ib);
+   /* note: no reason to have this in a separate IB */
+   tu_cs_emit_state_ib(cs,
+         tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_COMPUTE));
 
    tu_emit_compute_driver_params(cs, pipeline, info);
 
-   result = tu6_emit_textures(cmd, pipeline, descriptors_state,
-                              MESA_SHADER_COMPUTE, &ib, false);
-   if (result != VK_SUCCESS) {
-      cmd->record_result = result;
-      return;
-   }
-
-   if (ib.size)
-      tu_cs_emit_ib(cs, &ib);
-
-   result = tu6_emit_ibo(cmd, pipeline, descriptors_state, MESA_SHADER_COMPUTE, &ib);
-   if (result != VK_SUCCESS) {
-      cmd->record_result = result;
-      return;
-   }
-
-   if (ib.size)
-      tu_cs_emit_ib(cs, &ib);
-
-   /* track BOs */
-   if (cmd->state.dirty & TU_CMD_DIRTY_DESCRIPTOR_SETS) {
-      unsigned i;
-      for_each_bit(i, descriptors_state->valid) {
-         struct tu_descriptor_set *set = descriptors_state->sets[i];
-         for (unsigned j = 0; j < set->layout->buffer_count; ++j)
-            if (set->descriptors[j]) {
-               tu_bo_list_add(&cmd->bo_list, set->descriptors[j],
-                              MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
-            }
-      }
-   }
+   if (cmd->state.dirty & TU_CMD_DIRTY_COMPUTE_DESC_SETS_LOAD)
+      tu_cs_emit_state_ib(cs, pipeline->load_state);
 
-   /* Compute shader state overwrites fragment shader state, so we flag the
-    * graphics pipeline for re-emit.
-    */
-   cmd->state.dirty = TU_CMD_DIRTY_PIPELINE;
+   cmd->state.dirty &= ~TU_CMD_DIRTY_COMPUTE_DESC_SETS_LOAD;
 
    tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
    tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE));
@@ -4048,8 +3565,6 @@ tu_dispatch(struct tu_cmd_buffer *cmd,
    }
 
    tu_cs_emit_wfi(cs);
-
-   tu6_emit_cache_flush(cmd, cs);
 }
 
 void
@@ -4118,6 +3633,10 @@ tu_CmdEndRenderPass(VkCommandBuffer commandBuffer)
    tu_cs_discard_entries(&cmd_buffer->draw_epilogue_cs);
    tu_cs_begin(&cmd_buffer->draw_epilogue_cs);
 
+   cmd_buffer->state.cache.pending_flush_bits |=
+      cmd_buffer->state.renderpass_cache.pending_flush_bits;
+   tu_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier, true);
+
    cmd_buffer->state.pass = NULL;
    cmd_buffer->state.subpass = NULL;
    cmd_buffer->state.framebuffer = NULL;
@@ -4138,7 +3657,7 @@ struct tu_barrier_info
 };
 
 static void
-tu_barrier(struct tu_cmd_buffer *cmd_buffer,
+tu_barrier(struct tu_cmd_buffer *cmd,
            uint32_t memoryBarrierCount,
            const VkMemoryBarrier *pMemoryBarriers,
            uint32_t bufferMemoryBarrierCount,
@@ -4147,13 +3666,75 @@ tu_barrier(struct tu_cmd_buffer *cmd_buffer,
            const VkImageMemoryBarrier *pImageMemoryBarriers,
            const struct tu_barrier_info *info)
 {
+   struct tu_cs *cs = cmd->state.pass ? &cmd->draw_cs : &cmd->cs;
+   VkAccessFlags srcAccessMask = 0;
+   VkAccessFlags dstAccessMask = 0;
+
+   for (uint32_t i = 0; i < memoryBarrierCount; i++) {
+      srcAccessMask |= pMemoryBarriers[i].srcAccessMask;
+      dstAccessMask |= pMemoryBarriers[i].dstAccessMask;
+   }
+
+   for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
+      srcAccessMask |= pBufferMemoryBarriers[i].srcAccessMask;
+      dstAccessMask |= pBufferMemoryBarriers[i].dstAccessMask;
+   }
+
+   enum tu_cmd_access_mask src_flags = 0;
+   enum tu_cmd_access_mask dst_flags = 0;
+
+   for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
+      TU_FROM_HANDLE(tu_image, image, pImageMemoryBarriers[i].image);
+      VkImageLayout old_layout = pImageMemoryBarriers[i].oldLayout;
+      /* For non-linear images, PREINITIALIZED is the same as UNDEFINED */
+      if (old_layout == VK_IMAGE_LAYOUT_UNDEFINED ||
+          (image->tiling != VK_IMAGE_TILING_LINEAR &&
+           old_layout == VK_IMAGE_LAYOUT_PREINITIALIZED)) {
+         /* The underlying memory for this image may have been used earlier
+          * within the same queue submission for a different image, which
+          * means that there may be old, stale cache entries which are in the
+          * "wrong" location, which could cause problems later after writing
+          * to the image. We don't want these entries being flushed later and
+          * overwriting the actual image, so we need to flush the CCU.
+          */
+         src_flags |= TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE;
+      }
+      srcAccessMask |= pImageMemoryBarriers[i].srcAccessMask;
+      dstAccessMask |= pImageMemoryBarriers[i].dstAccessMask;
+   }
+
+   /* Inside a renderpass, we don't know yet whether we'll be using sysmem
+    * so we have to use the sysmem flushes.
+    */
+   bool gmem = cmd->state.ccu_state == TU_CMD_CCU_GMEM &&
+      !cmd->state.pass;
+   src_flags |= vk2tu_access(srcAccessMask, gmem);
+   dst_flags |= vk2tu_access(dstAccessMask, gmem);
+
+   struct tu_cache_state *cache =
+      cmd->state.pass  ? &cmd->state.renderpass_cache : &cmd->state.cache;
+   tu_flush_for_access(cache, src_flags, dst_flags);
+
+   for (uint32_t i = 0; i < info->eventCount; i++) {
+      TU_FROM_HANDLE(tu_event, event, info->pEvents[i]);
+
+      tu_bo_list_add(&cmd->bo_list, &event->bo, MSM_SUBMIT_BO_READ);
+
+      tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
+      tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
+                     CP_WAIT_REG_MEM_0_POLL_MEMORY);
+      tu_cs_emit_qw(cs, event->bo.iova); /* POLL_ADDR_LO/HI */
+      tu_cs_emit(cs, CP_WAIT_REG_MEM_3_REF(1));
+      tu_cs_emit(cs, CP_WAIT_REG_MEM_4_MASK(~0u));
+      tu_cs_emit(cs, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(20));
+   }
 }
 
 void
 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer,
                       VkPipelineStageFlags srcStageMask,
-                      VkPipelineStageFlags destStageMask,
-                      VkBool32 byRegion,
+                      VkPipelineStageFlags dstStageMask,
+                      VkDependencyFlags dependencyFlags,
                       uint32_t memoryBarrierCount,
                       const VkMemoryBarrier *pMemoryBarriers,
                       uint32_t bufferMemoryBarrierCount,
@@ -4174,17 +3755,36 @@ tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer,
 }
 
 static void
-write_event(struct tu_cmd_buffer *cmd, struct tu_event *event, unsigned value)
+write_event(struct tu_cmd_buffer *cmd, struct tu_event *event,
+            VkPipelineStageFlags stageMask, unsigned value)
 {
    struct tu_cs *cs = &cmd->cs;
 
-   tu_bo_list_add(&cmd->bo_list, &event->bo, MSM_SUBMIT_BO_WRITE);
+   /* vkCmdSetEvent/vkCmdResetEvent cannot be called inside a render pass */
+   assert(!cmd->state.pass);
 
-   /* TODO: any flush required before/after ? */
+   tu_emit_cache_flush(cmd, cs);
 
-   tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 3);
-   tu_cs_emit_qw(cs, event->bo.iova); /* ADDR_LO/HI */
-   tu_cs_emit(cs, value);
+   tu_bo_list_add(&cmd->bo_list, &event->bo, MSM_SUBMIT_BO_WRITE);
+
+   /* Flags that only require a top-of-pipe event. DrawIndirect parameters are
+    * read by the CP, so the draw indirect stage counts as top-of-pipe too.
+    */
+   VkPipelineStageFlags top_of_pipe_flags =
+      VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT |
+      VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
+
+   if (!(stageMask & ~top_of_pipe_flags)) {
+      tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 3);
+      tu_cs_emit_qw(cs, event->bo.iova); /* ADDR_LO/HI */
+      tu_cs_emit(cs, value);
+   } else {
+      /* Use a RB_DONE_TS event to wait for everything to complete. */
+      tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, 4);
+      tu_cs_emit(cs, CP_EVENT_WRITE_0_EVENT(RB_DONE_TS));
+      tu_cs_emit_qw(cs, event->bo.iova);
+      tu_cs_emit(cs, value);
+   }
 }
 
 void
@@ -4195,7 +3795,7 @@ tu_CmdSetEvent(VkCommandBuffer commandBuffer,
    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
    TU_FROM_HANDLE(tu_event, event, _event);
 
-   write_event(cmd, event, 1);
+   write_event(cmd, event, stageMask, 1);
 }
 
 void
@@ -4206,7 +3806,7 @@ tu_CmdResetEvent(VkCommandBuffer commandBuffer,
    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
    TU_FROM_HANDLE(tu_event, event, _event);
 
-   write_event(cmd, event, 0);
+   write_event(cmd, event, stageMask, 0);
 }
 
 void
@@ -4223,23 +3823,15 @@ tu_CmdWaitEvents(VkCommandBuffer commandBuffer,
                  const VkImageMemoryBarrier *pImageMemoryBarriers)
 {
    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
-   struct tu_cs *cs = &cmd->cs;
-
-   /* TODO: any flush required before/after? (CP_WAIT_FOR_ME?) */
-
-   for (uint32_t i = 0; i < eventCount; i++) {
-      TU_FROM_HANDLE(tu_event, event, pEvents[i]);
+   struct tu_barrier_info info;
 
-      tu_bo_list_add(&cmd->bo_list, &event->bo, MSM_SUBMIT_BO_READ);
+   info.eventCount = eventCount;
+   info.pEvents = pEvents;
+   info.srcStageMask = 0;
 
-      tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
-      tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
-                     CP_WAIT_REG_MEM_0_POLL_MEMORY);
-      tu_cs_emit_qw(cs, event->bo.iova); /* POLL_ADDR_LO/HI */
-      tu_cs_emit(cs, CP_WAIT_REG_MEM_3_REF(1));
-      tu_cs_emit(cs, CP_WAIT_REG_MEM_4_MASK(~0u));
-      tu_cs_emit(cs, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(20));
-   }
+   tu_barrier(cmd, memoryBarrierCount, pMemoryBarriers,
+              bufferMemoryBarrierCount, pBufferMemoryBarriers,
+              imageMemoryBarrierCount, pImageMemoryBarriers, &info);
 }
 
 void