freedreno/regs: update a6xx VPC regs
[mesa.git] / src / freedreno / vulkan / tu_cmd_buffer.c
index 62751ba4ccb695916336c78d3ec0fce7d8aa5aa5..c7c6df49a3fb1562e27a7ecc9071db246b80b788 100644 (file)
@@ -458,8 +458,8 @@ tu6_emit_window_scissor(struct tu_cs *cs,
                    A6XX_GRAS_SC_WINDOW_SCISSOR_BR(.x = x2, .y = y2));
 
    tu_cs_emit_regs(cs,
-                   A6XX_GRAS_RESOLVE_CNTL_1(.x = x1, .y = y1),
-                   A6XX_GRAS_RESOLVE_CNTL_2(.x = x2, .y = y2));
+                   A6XX_GRAS_2D_RESOLVE_CNTL_1(.x = x1, .y = y1),
+                   A6XX_GRAS_2D_RESOLVE_CNTL_2(.x = x2, .y = y2));
 }
 
 void
@@ -512,6 +512,19 @@ tu_cs_emit_draw_state(struct tu_cs *cs, uint32_t id, struct tu_draw_state state)
       break;
    }
 
+   /* We need to reload the descriptors every time the descriptor sets
+    * change. However, the commands we send only depend on the pipeline
+    * because the whole point is to cache descriptors which are used by the
+    * pipeline. There's a problem here, in that the firmware has an
+    * "optimization" which skips executing groups that are set to the same
+    * value as the last draw. This means that if the descriptor sets change
+    * but not the pipeline, we'd try to re-execute the same buffer which
+    * the firmware would ignore and we wouldn't pre-load the new
+    * descriptors. Set the DIRTY bit to avoid this optimization
+    */
+   if (id == TU_DRAW_STATE_DESC_SETS_LOAD)
+      enable_mask |= CP_SET_DRAW_STATE__0_DIRTY;
+
    tu_cs_emit(cs, CP_SET_DRAW_STATE__0_COUNT(state.size) |
                   enable_mask |
                   CP_SET_DRAW_STATE__0_GROUP_ID(id) |
@@ -519,16 +532,6 @@ tu_cs_emit_draw_state(struct tu_cs *cs, uint32_t id, struct tu_draw_state state)
    tu_cs_emit_qw(cs, state.iova);
 }
 
-/* note: get rid of this eventually */
-static void
-tu_cs_emit_sds_ib(struct tu_cs *cs, uint32_t id, struct tu_cs_entry entry)
-{
-   tu_cs_emit_draw_state(cs, id, (struct tu_draw_state) {
-      .iova = entry.size ? entry.bo->iova + entry.offset : 0,
-      .size = entry.size / 4,
-   });
-}
-
 static bool
 use_hw_binning(struct tu_cmd_buffer *cmd)
 {
@@ -589,8 +592,7 @@ tu6_emit_tile_select(struct tu_cmd_buffer *cmd,
    tu6_emit_window_scissor(cs, x1, y1, x2, y2);
    tu6_emit_window_offset(cs, x1, y1);
 
-   tu_cs_emit_regs(cs,
-                   A6XX_VPC_SO_OVERRIDE(.so_disable = false));
+   tu_cs_emit_regs(cs, A6XX_VPC_SO_DISABLE(false));
 
    if (use_hw_binning(cmd)) {
       tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
@@ -719,7 +721,19 @@ tu6_init_hw(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
 
    tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE);
 
-   tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UPDATE_CNTL, 0xfffff);
+   tu_cs_emit_regs(cs, A6XX_HLSQ_INVALIDATE_CMD(
+         .vs_state = true,
+         .hs_state = true,
+         .ds_state = true,
+         .gs_state = true,
+         .fs_state = true,
+         .cs_state = true,
+         .gfx_ibo = true,
+         .cs_ibo = true,
+         .gfx_shared_const = true,
+         .cs_shared_const = true,
+         .gfx_bindless = 0x1f,
+         .cs_bindless = 0x1f));
 
    tu_cs_emit_regs(cs,
                    A6XX_RB_CCU_CNTL(.offset = phys_dev->ccu_offset_bypass));
@@ -739,14 +753,13 @@ tu6_init_hw(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
    tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE03, 0x00000410);
    tu_cs_emit_write_reg(cs, REG_A6XX_SP_IBO_COUNT, 0);
    tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B182, 0);
-   tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BB11, 0);
+   tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_SHARED_CONSTS, 0);
    tu_cs_emit_write_reg(cs, REG_A6XX_UCHE_UNKNOWN_0E12, 0x3200000);
    tu_cs_emit_write_reg(cs, REG_A6XX_UCHE_CLIENT_PF, 4);
    tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8E01, 0x0);
    tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_A982, 0);
    tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_A9A8, 0);
    tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AB00, 0x5);
-   tu_cs_emit_write_reg(cs, REG_A6XX_VPC_GS_SIV_CNTL, 0x0000ffff);
 
    /* TODO: set A6XX_VFD_ADD_OFFSET_INSTANCE and fix ir3 to avoid adding base instance */
    tu_cs_emit_write_reg(cs, REG_A6XX_VFD_ADD_OFFSET, A6XX_VFD_ADD_OFFSET_VERTEX);
@@ -769,15 +782,12 @@ tu6_init_hw(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
    tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881E, 0);
    tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_88F0, 0);
 
-   tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9101, 0xffff00);
    tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9107, 0);
 
-   tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9236,
-                        A6XX_VPC_UNKNOWN_9236_POINT_COORD_INVERT(0));
+   tu_cs_emit_regs(cs, A6XX_VPC_POINT_COORD_INVERT(false));
    tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9300, 0);
 
-   tu_cs_emit_write_reg(cs, REG_A6XX_VPC_SO_OVERRIDE,
-                        A6XX_VPC_SO_OVERRIDE_SO_DISABLE);
+   tu_cs_emit_regs(cs, A6XX_VPC_SO_DISABLE(true));
 
    tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9801, 0);
    tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9980, 0);
@@ -791,7 +801,6 @@ tu6_init_hw(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
    tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B183, 0);
 
    tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_8099, 0);
-   tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_809B, 0);
    tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A0, 2);
    tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80AF, 0);
    tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9210, 0);
@@ -1010,10 +1019,9 @@ tu6_emit_binning_pass(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
    tu_cs_emit(cs, 0x0);
 }
 
-static void
+static struct tu_draw_state
 tu_emit_input_attachments(struct tu_cmd_buffer *cmd,
                           const struct tu_subpass *subpass,
-                          struct tu_cs_entry *ib,
                           bool gmem)
 {
    /* note: we can probably emit input attachments just once for the whole
@@ -1027,7 +1035,7 @@ tu_emit_input_attachments(struct tu_cmd_buffer *cmd,
     */
 
    if (!subpass->input_count)
-      return;
+      return (struct tu_draw_state) {};
 
    struct tu_cs_memory texture;
    VkResult result = tu_cs_alloc(&cmd->sub_cs, subpass->input_count * 2,
@@ -1078,7 +1086,7 @@ tu_emit_input_attachments(struct tu_cmd_buffer *cmd,
    }
 
    struct tu_cs cs;
-   tu_cs_begin_sub_stream(&cmd->sub_cs, 9, &cs);
+   struct tu_draw_state ds = tu_cs_draw_state(&cmd->sub_cs, &cs, 9);
 
    tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6_FRAG, 3);
    tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
@@ -1093,7 +1101,9 @@ tu_emit_input_attachments(struct tu_cmd_buffer *cmd,
 
    tu_cs_emit_regs(&cs, A6XX_SP_FS_TEX_COUNT(subpass->input_count * 2));
 
-   *ib = tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
+   assert(cs.cur == cs.end); /* validate draw state size */
+
+   return ds;
 }
 
 static void
@@ -1101,12 +1111,11 @@ tu_set_input_attachments(struct tu_cmd_buffer *cmd, const struct tu_subpass *sub
 {
    struct tu_cs *cs = &cmd->draw_cs;
 
-   tu_emit_input_attachments(cmd, subpass, &cmd->state.ia_gmem_ib, true);
-   tu_emit_input_attachments(cmd, subpass, &cmd->state.ia_sysmem_ib, false);
-
    tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 6);
-   tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_INPUT_ATTACHMENTS_GMEM, cmd->state.ia_gmem_ib);
-   tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_INPUT_ATTACHMENTS_SYSMEM, cmd->state.ia_sysmem_ib);
+   tu_cs_emit_draw_state(cs, TU_DRAW_STATE_INPUT_ATTACHMENTS_GMEM,
+                         tu_emit_input_attachments(cmd, subpass, true));
+   tu_cs_emit_draw_state(cs, TU_DRAW_STATE_INPUT_ATTACHMENTS_SYSMEM,
+                         tu_emit_input_attachments(cmd, subpass, false));
 }
 
 static void
@@ -1159,8 +1168,7 @@ tu6_sysmem_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
    tu_emit_cache_flush_ccu(cmd, cs, TU_CMD_CCU_SYSMEM);
 
    /* enable stream-out, with sysmem there is only one pass: */
-   tu_cs_emit_regs(cs,
-                   A6XX_VPC_SO_OVERRIDE(.so_disable = false));
+   tu_cs_emit_regs(cs, A6XX_VPC_SO_DISABLE(false));
 
    tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
    tu_cs_emit(cs, 0x1);
@@ -1206,7 +1214,7 @@ tu6_tile_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
    const struct tu_framebuffer *fb = cmd->state.framebuffer;
    if (use_hw_binning(cmd)) {
       /* enable stream-out during binning pass: */
-      tu_cs_emit_regs(cs, A6XX_VPC_SO_OVERRIDE(.so_disable=false));
+      tu_cs_emit_regs(cs, A6XX_VPC_SO_DISABLE(false));
 
       tu6_emit_bin_size(cs, fb->tile0.width, fb->tile0.height,
                         A6XX_RB_BIN_CONTROL_BINNING_PASS | 0x6000000);
@@ -1216,7 +1224,7 @@ tu6_tile_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
       tu6_emit_binning_pass(cmd, cs);
 
       /* and disable stream-out for draw pass: */
-      tu_cs_emit_regs(cs, A6XX_VPC_SO_OVERRIDE(.so_disable=true));
+      tu_cs_emit_regs(cs, A6XX_VPC_SO_DISABLE(true));
 
       tu6_emit_bin_size(cs, fb->tile0.width, fb->tile0.height,
                         A6XX_RB_BIN_CONTROL_USE_VIZ | 0x6000000);
@@ -1232,7 +1240,7 @@ tu6_tile_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
       tu_cs_emit(cs, 0x1);
    } else {
       /* no binning pass, so enable stream-out for draw pass:: */
-      tu_cs_emit_regs(cs, A6XX_VPC_SO_OVERRIDE(.so_disable=false));
+      tu_cs_emit_regs(cs, A6XX_VPC_SO_DISABLE(false));
 
       tu6_emit_bin_size(cs, fb->tile0.width, fb->tile0.height, 0x6000000);
    }
@@ -1333,12 +1341,12 @@ tu_create_cmd_buffer(struct tu_device *device,
                      VkCommandBuffer *pCommandBuffer)
 {
    struct tu_cmd_buffer *cmd_buffer;
-   cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8,
-                          VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+
+   cmd_buffer = vk_object_zalloc(&device->vk, NULL, sizeof(*cmd_buffer),
+                                 VK_OBJECT_TYPE_COMMAND_BUFFER);
    if (cmd_buffer == NULL)
       return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
 
-   cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
    cmd_buffer->device = device;
    cmd_buffer->pool = pool;
    cmd_buffer->level = level;
@@ -1379,7 +1387,7 @@ tu_cmd_buffer_destroy(struct tu_cmd_buffer *cmd_buffer)
    tu_cs_finish(&cmd_buffer->sub_cs);
 
    tu_bo_list_destroy(&cmd_buffer->bo_list);
-   vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
+   vk_object_free(&cmd_buffer->device->vk, &cmd_buffer->pool->alloc, cmd_buffer);
 }
 
 static VkResult
@@ -1422,7 +1430,6 @@ tu_AllocateCommandBuffers(VkDevice _device,
          list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
 
          result = tu_reset_cmd_buffer(cmd_buffer);
-         cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
          cmd_buffer->level = pAllocateInfo->level;
 
          pCommandBuffers[i] = tu_cmd_buffer_to_handle(cmd_buffer);
@@ -1688,9 +1695,9 @@ tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,
    }
    assert(dyn_idx == dynamicOffsetCount);
 
-   uint32_t sp_bindless_base_reg, hlsq_bindless_base_reg, hlsq_update_value;
+   uint32_t sp_bindless_base_reg, hlsq_bindless_base_reg, hlsq_invalidate_value;
    uint64_t addr[MAX_SETS + 1] = {};
-   struct tu_cs cs;
+   struct tu_cs *cs, state_cs;
 
    for (uint32_t i = 0; i < MAX_SETS; i++) {
       struct tu_descriptor_set *set = descriptors_state->sets[i];
@@ -1713,37 +1720,32 @@ tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,
    if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) {
       sp_bindless_base_reg = REG_A6XX_SP_BINDLESS_BASE(0);
       hlsq_bindless_base_reg = REG_A6XX_HLSQ_BINDLESS_BASE(0);
-      hlsq_update_value = 0x7c000;
+      hlsq_invalidate_value = A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS(0x1f);
 
-      cmd->state.dirty |= TU_CMD_DIRTY_DESCRIPTOR_SETS | TU_CMD_DIRTY_SHADER_CONSTS;
+      cmd->state.desc_sets = tu_cs_draw_state(&cmd->sub_cs, &state_cs, 24);
+      cmd->state.dirty |= TU_CMD_DIRTY_DESC_SETS_LOAD | TU_CMD_DIRTY_SHADER_CONSTS;
+      cs = &state_cs;
    } else {
       assert(pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE);
 
       sp_bindless_base_reg = REG_A6XX_SP_CS_BINDLESS_BASE(0);
       hlsq_bindless_base_reg = REG_A6XX_HLSQ_CS_BINDLESS_BASE(0);
-      hlsq_update_value = 0x3e00;
+      hlsq_invalidate_value = A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS(0x1f);
 
-      cmd->state.dirty |= TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS;
+      cmd->state.dirty |= TU_CMD_DIRTY_COMPUTE_DESC_SETS_LOAD;
+      cs = &cmd->cs;
    }
 
-   tu_cs_begin_sub_stream(&cmd->sub_cs, 24, &cs);
-
-   tu_cs_emit_pkt4(&cs, sp_bindless_base_reg, 10);
-   tu_cs_emit_array(&cs, (const uint32_t*) addr, 10);
-   tu_cs_emit_pkt4(&cs, hlsq_bindless_base_reg, 10);
-   tu_cs_emit_array(&cs, (const uint32_t*) addr, 10);
-   tu_cs_emit_regs(&cs, A6XX_HLSQ_UPDATE_CNTL(.dword = hlsq_update_value));
+   tu_cs_emit_pkt4(cs, sp_bindless_base_reg, 10);
+   tu_cs_emit_array(cs, (const uint32_t*) addr, 10);
+   tu_cs_emit_pkt4(cs, hlsq_bindless_base_reg, 10);
+   tu_cs_emit_array(cs, (const uint32_t*) addr, 10);
+   tu_cs_emit_regs(cs, A6XX_HLSQ_INVALIDATE_CMD(.dword = hlsq_invalidate_value));
 
-   struct tu_cs_entry ib = tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
    if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) {
+      assert(cs->cur == cs->end); /* validate draw state size */
       tu_cs_emit_pkt7(&cmd->draw_cs, CP_SET_DRAW_STATE, 3);
-      tu_cs_emit_sds_ib(&cmd->draw_cs, TU_DRAW_STATE_DESC_SETS, ib);
-      cmd->state.desc_sets_ib = ib;
-   } else {
-      /* note: for compute we could emit directly, instead of a CP_INDIRECT
-       * however, the blob uses draw states for compute
-       */
-      tu_cs_emit_ib(&cmd->cs, &ib);
+      tu_cs_emit_draw_state(&cmd->draw_cs, TU_DRAW_STATE_DESC_SETS, cmd->state.desc_sets);
    }
 }
 
@@ -1978,18 +1980,10 @@ tu_EndCommandBuffer(VkCommandBuffer commandBuffer)
 static struct tu_cs
 tu_cmd_dynamic_state(struct tu_cmd_buffer *cmd, uint32_t id, uint32_t size)
 {
-   struct tu_cs_memory memory;
    struct tu_cs cs;
 
-   /* TODO: share this logic with tu_pipeline_static_state */
-   tu_cs_alloc(&cmd->sub_cs, size, 1, &memory);
-   tu_cs_init_external(&cs, memory.map, memory.map + size);
-   tu_cs_begin(&cs);
-   tu_cs_reserve_space(&cs, size);
-
    assert(id < ARRAY_SIZE(cmd->state.dynamic_state));
-   cmd->state.dynamic_state[id].iova = memory.iova;
-   cmd->state.dynamic_state[id].size = size;
+   cmd->state.dynamic_state[id] = tu_cs_draw_state(&cmd->sub_cs, &cs, size);
 
    tu_cs_emit_pkt7(&cmd->draw_cs, CP_SET_DRAW_STATE, 3);
    tu_cs_emit_draw_state(&cmd->draw_cs, TU_DRAW_STATE_DYNAMIC + id, cmd->state.dynamic_state[id]);
@@ -2012,28 +2006,27 @@ tu_CmdBindPipeline(VkCommandBuffer commandBuffer,
 
    if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE) {
       cmd->state.compute_pipeline = pipeline;
-      cmd->state.dirty |= TU_CMD_DIRTY_COMPUTE_PIPELINE;
+      tu_cs_emit_state_ib(&cmd->cs, pipeline->program.state);
       return;
    }
 
    assert(pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS);
 
    cmd->state.pipeline = pipeline;
-   cmd->state.dirty |= TU_CMD_DIRTY_SHADER_CONSTS;
+   cmd->state.dirty |= TU_CMD_DIRTY_DESC_SETS_LOAD | TU_CMD_DIRTY_SHADER_CONSTS;
 
    struct tu_cs *cs = &cmd->draw_cs;
    uint32_t mask = ~pipeline->dynamic_state_mask & BITFIELD_MASK(TU_DYNAMIC_STATE_COUNT);
    uint32_t i;
 
    tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3 * (7 + util_bitcount(mask)));
-   tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_PROGRAM, pipeline->program.state_ib);
-   tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_PROGRAM_BINNING, pipeline->program.binning_state_ib);
-   tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VI, pipeline->vi.state_ib);
-   tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VI_BINNING, pipeline->vi.binning_state_ib);
-   tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_RAST, pipeline->rast.state_ib);
-   tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DS, pipeline->ds.state_ib);
-   tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_BLEND, pipeline->blend.state_ib);
-
+   tu_cs_emit_draw_state(cs, TU_DRAW_STATE_PROGRAM, pipeline->program.state);
+   tu_cs_emit_draw_state(cs, TU_DRAW_STATE_PROGRAM_BINNING, pipeline->program.binning_state);
+   tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VI, pipeline->vi.state);
+   tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VI_BINNING, pipeline->vi.binning_state);
+   tu_cs_emit_draw_state(cs, TU_DRAW_STATE_RAST, pipeline->rast_state);
+   tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DS, pipeline->ds_state);
+   tu_cs_emit_draw_state(cs, TU_DRAW_STATE_BLEND, pipeline->blend_state);
    for_each_bit(i, mask)
       tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DYNAMIC + i, pipeline->dynamic_state[i]);
 
@@ -2044,10 +2037,6 @@ tu_CmdBindPipeline(VkCommandBuffer commandBuffer,
    if (pipeline->vi.bindings_used & ~cmd->vertex_bindings_set)
       cmd->state.dirty |= TU_CMD_DIRTY_VERTEX_BUFFERS;
 
-   /* If the pipeline needs a dynamic descriptor, re-emit descriptor sets */
-   if (pipeline->layout->dynamic_offset_count)
-      cmd->state.dirty |= TU_CMD_DIRTY_DESCRIPTOR_SETS;
-
    /* dynamic linewidth state depends pipeline state's gras_su_cntl
     * so the dynamic state ib must be updated when pipeline changes
     */
@@ -2481,15 +2470,15 @@ tu_CreateCommandPool(VkDevice _device,
    TU_FROM_HANDLE(tu_device, device, _device);
    struct tu_cmd_pool *pool;
 
-   pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
-                    VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   pool = vk_object_alloc(&device->vk, pAllocator, sizeof(*pool),
+                          VK_OBJECT_TYPE_COMMAND_POOL);
    if (pool == NULL)
       return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
 
    if (pAllocator)
       pool->alloc = *pAllocator;
    else
-      pool->alloc = device->alloc;
+      pool->alloc = device->vk.alloc;
 
    list_inithead(&pool->cmd_buffers);
    list_inithead(&pool->free_cmd_buffers);
@@ -2524,7 +2513,7 @@ tu_DestroyCommandPool(VkDevice _device,
       tu_cmd_buffer_destroy(cmd_buffer);
    }
 
-   vk_free2(&device->alloc, pAllocator, pool);
+   vk_object_free(&device->vk, pAllocator, pool);
 }
 
 VkResult
@@ -2764,7 +2753,7 @@ tu6_emit_user_consts(struct tu_cs *cs, const struct tu_pipeline *pipeline,
    }
 }
 
-static struct tu_cs_entry
+static struct tu_draw_state
 tu6_emit_consts(struct tu_cmd_buffer *cmd,
                 const struct tu_pipeline *pipeline,
                 struct tu_descriptor_state *descriptors_state,
@@ -2775,10 +2764,10 @@ tu6_emit_consts(struct tu_cmd_buffer *cmd,
 
    tu6_emit_user_consts(&cs, pipeline, descriptors_state, type, cmd->push_constants);
 
-   return tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
+   return tu_cs_end_draw_state(&cmd->sub_cs, &cs);
 }
 
-static struct tu_cs_entry
+static struct tu_draw_state
 tu6_emit_vertex_buffers(struct tu_cmd_buffer *cmd,
                         const struct tu_pipeline *pipeline)
 {
@@ -2799,7 +2788,7 @@ tu6_emit_vertex_buffers(struct tu_cmd_buffer *cmd,
 
    cmd->vertex_bindings_set = pipeline->vi.bindings_used;
 
-   return tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
+   return tu_cs_end_draw_state(&cmd->sub_cs, &cs);
 }
 
 static uint64_t
@@ -2855,7 +2844,7 @@ static VkResult
 tu6_emit_tess_consts(struct tu_cmd_buffer *cmd,
                      uint32_t draw_count,
                      const struct tu_pipeline *pipeline,
-                     struct tu_cs_entry *entry)
+                     struct tu_draw_state *state)
 {
    struct tu_cs cs;
    VkResult result = tu_cs_begin_sub_stream(&cmd->sub_cs, 20, &cs);
@@ -2907,7 +2896,7 @@ tu6_emit_tess_consts(struct tu_cmd_buffer *cmd,
        * but it requires a bit more indirection (SS6_INDIRECT for consts). */
       tu_cs_emit_wfi(&cs);
    }
-   *entry = tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
+   *state = tu_cs_end_draw_state(&cmd->sub_cs, &cs);
    return VK_SUCCESS;
 }
 
@@ -2935,52 +2924,24 @@ tu6_draw_common(struct tu_cmd_buffer *cmd,
                pipeline->tess.upper_left_domain_origin));
 
    if (cmd->state.dirty & TU_CMD_DIRTY_SHADER_CONSTS) {
-      cmd->state.shader_const_ib[MESA_SHADER_VERTEX] =
+      cmd->state.shader_const[MESA_SHADER_VERTEX] =
          tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_VERTEX);
-      cmd->state.shader_const_ib[MESA_SHADER_TESS_CTRL] =
+      cmd->state.shader_const[MESA_SHADER_TESS_CTRL] =
          tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_TESS_CTRL);
-      cmd->state.shader_const_ib[MESA_SHADER_TESS_EVAL] =
+      cmd->state.shader_const[MESA_SHADER_TESS_EVAL] =
          tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_TESS_EVAL);
-      cmd->state.shader_const_ib[MESA_SHADER_GEOMETRY] =
+      cmd->state.shader_const[MESA_SHADER_GEOMETRY] =
          tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_GEOMETRY);
-      cmd->state.shader_const_ib[MESA_SHADER_FRAGMENT] =
+      cmd->state.shader_const[MESA_SHADER_FRAGMENT] =
          tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_FRAGMENT);
    }
 
-   if (cmd->state.dirty & TU_CMD_DIRTY_DESCRIPTOR_SETS) {
-      /* We need to reload the descriptors every time the descriptor sets
-       * change. However, the commands we send only depend on the pipeline
-       * because the whole point is to cache descriptors which are used by the
-       * pipeline. There's a problem here, in that the firmware has an
-       * "optimization" which skips executing groups that are set to the same
-       * value as the last draw. This means that if the descriptor sets change
-       * but not the pipeline, we'd try to re-execute the same buffer which
-       * the firmware would ignore and we wouldn't pre-load the new
-       * descriptors. The blob seems to re-emit the LOAD_STATE group whenever
-       * the descriptor sets change, which we emulate here by copying the
-       * pre-prepared buffer.
-       */
-      const struct tu_cs_entry *load_entry = &pipeline->load_state.state_ib;
-      if (load_entry->size > 0) {
-         struct tu_cs load_cs;
-         result = tu_cs_begin_sub_stream(&cmd->sub_cs, load_entry->size, &load_cs);
-         if (result != VK_SUCCESS)
-            return result;
-         tu_cs_emit_array(&load_cs,
-                          (uint32_t *)((char  *)load_entry->bo->map + load_entry->offset),
-                          load_entry->size / 4);
-         cmd->state.desc_sets_load_ib = tu_cs_end_sub_stream(&cmd->sub_cs, &load_cs);
-      } else {
-         cmd->state.desc_sets_load_ib.size = 0;
-      }
-   }
-
    if (cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS)
-      cmd->state.vertex_buffers_ib = tu6_emit_vertex_buffers(cmd, pipeline);
+      cmd->state.vertex_buffers = tu6_emit_vertex_buffers(cmd, pipeline);
 
    bool has_tess =
          pipeline->active_stages & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
-   struct tu_cs_entry tess_consts = {};
+   struct tu_draw_state tess_consts = {};
    if (has_tess) {
       cmd->has_tess = true;
       result = tu6_emit_tess_consts(cmd, draw_count, pipeline, &tess_consts);
@@ -3001,22 +2962,22 @@ tu6_draw_common(struct tu_cmd_buffer *cmd,
    if (cmd->state.dirty & TU_CMD_DIRTY_DRAW_STATE) {
       tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3 * (TU_DRAW_STATE_COUNT - 2));
 
-      tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_PROGRAM, pipeline->program.state_ib);
-      tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_PROGRAM_BINNING, pipeline->program.binning_state_ib);
-      tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_TESS, tess_consts);
-      tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VI, pipeline->vi.state_ib);
-      tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VI_BINNING, pipeline->vi.binning_state_ib);
-      tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_RAST, pipeline->rast.state_ib);
-      tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DS, pipeline->ds.state_ib);
-      tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_BLEND, pipeline->blend.state_ib);
-      tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VS_CONST, cmd->state.shader_const_ib[MESA_SHADER_VERTEX]);
-      tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_HS_CONST, cmd->state.shader_const_ib[MESA_SHADER_TESS_CTRL]);
-      tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DS_CONST, cmd->state.shader_const_ib[MESA_SHADER_TESS_EVAL]);
-      tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_GS_CONST, cmd->state.shader_const_ib[MESA_SHADER_GEOMETRY]);
-      tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_FS_CONST, cmd->state.shader_const_ib[MESA_SHADER_FRAGMENT]);
-      tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DESC_SETS, cmd->state.desc_sets_ib);
-      tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DESC_SETS_LOAD, cmd->state.desc_sets_load_ib);
-      tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VB, cmd->state.vertex_buffers_ib);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_PROGRAM, pipeline->program.state);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_PROGRAM_BINNING, pipeline->program.binning_state);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_TESS, tess_consts);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VI, pipeline->vi.state);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VI_BINNING, pipeline->vi.binning_state);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_RAST, pipeline->rast_state);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DS, pipeline->ds_state);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_BLEND, pipeline->blend_state);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VS_CONST, cmd->state.shader_const[MESA_SHADER_VERTEX]);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_HS_CONST, cmd->state.shader_const[MESA_SHADER_TESS_CTRL]);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DS_CONST, cmd->state.shader_const[MESA_SHADER_TESS_EVAL]);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_GS_CONST, cmd->state.shader_const[MESA_SHADER_GEOMETRY]);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_FS_CONST, cmd->state.shader_const[MESA_SHADER_FRAGMENT]);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DESC_SETS, cmd->state.desc_sets);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DESC_SETS_LOAD, pipeline->load_state);
+      tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VB, cmd->state.vertex_buffers);
       tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VS_PARAMS, cmd->state.vs_params);
 
       for (uint32_t i = 0; i < ARRAY_SIZE(cmd->state.dynamic_state); i++) {
@@ -3033,7 +2994,7 @@ tu6_draw_common(struct tu_cmd_buffer *cmd,
       uint32_t draw_state_count =
          has_tess +
          ((cmd->state.dirty & TU_CMD_DIRTY_SHADER_CONSTS) ? 5 : 0) +
-         ((cmd->state.dirty & TU_CMD_DIRTY_DESCRIPTOR_SETS) ? 1 : 0) +
+         ((cmd->state.dirty & TU_CMD_DIRTY_DESC_SETS_LOAD) ? 1 : 0) +
          ((cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS) ? 1 : 0) +
          1; /* vs_params */
 
@@ -3042,18 +3003,18 @@ tu6_draw_common(struct tu_cmd_buffer *cmd,
          /* We may need to re-emit tess consts if the current draw call is
           * sufficiently larger than the last draw call. */
          if (has_tess)
-            tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_TESS, tess_consts);
+            tu_cs_emit_draw_state(cs, TU_DRAW_STATE_TESS, tess_consts);
          if (cmd->state.dirty & TU_CMD_DIRTY_SHADER_CONSTS) {
-            tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VS_CONST, cmd->state.shader_const_ib[MESA_SHADER_VERTEX]);
-            tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_HS_CONST, cmd->state.shader_const_ib[MESA_SHADER_TESS_CTRL]);
-            tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DS_CONST, cmd->state.shader_const_ib[MESA_SHADER_TESS_EVAL]);
-            tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_GS_CONST, cmd->state.shader_const_ib[MESA_SHADER_GEOMETRY]);
-            tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_FS_CONST, cmd->state.shader_const_ib[MESA_SHADER_FRAGMENT]);
+            tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VS_CONST, cmd->state.shader_const[MESA_SHADER_VERTEX]);
+            tu_cs_emit_draw_state(cs, TU_DRAW_STATE_HS_CONST, cmd->state.shader_const[MESA_SHADER_TESS_CTRL]);
+            tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DS_CONST, cmd->state.shader_const[MESA_SHADER_TESS_EVAL]);
+            tu_cs_emit_draw_state(cs, TU_DRAW_STATE_GS_CONST, cmd->state.shader_const[MESA_SHADER_GEOMETRY]);
+            tu_cs_emit_draw_state(cs, TU_DRAW_STATE_FS_CONST, cmd->state.shader_const[MESA_SHADER_FRAGMENT]);
          }
-         if (cmd->state.dirty & TU_CMD_DIRTY_DESCRIPTOR_SETS)
-            tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_DESC_SETS_LOAD, cmd->state.desc_sets_load_ib);
+         if (cmd->state.dirty & TU_CMD_DIRTY_DESC_SETS_LOAD)
+            tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DESC_SETS_LOAD, pipeline->load_state);
          if (cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS)
-            tu_cs_emit_sds_ib(cs, TU_DRAW_STATE_VB, cmd->state.vertex_buffers_ib);
+            tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VB, cmd->state.vertex_buffers);
          tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VS_PARAMS, cmd->state.vs_params);
    }
 
@@ -3063,7 +3024,7 @@ tu6_draw_common(struct tu_cmd_buffer *cmd,
     * bits to preserve instead. The only things not emitted here are
     * compute-related state.
     */
-   cmd->state.dirty &= (TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS | TU_CMD_DIRTY_COMPUTE_PIPELINE);
+   cmd->state.dirty &= TU_CMD_DIRTY_COMPUTE_DESC_SETS_LOAD;
    return VK_SUCCESS;
 }
 
@@ -3382,24 +3343,16 @@ tu_dispatch(struct tu_cmd_buffer *cmd,
     */
    tu_emit_cache_flush(cmd, cs);
 
-   if (cmd->state.dirty & TU_CMD_DIRTY_COMPUTE_PIPELINE)
-      tu_cs_emit_ib(cs, &pipeline->program.state_ib);
-
-   struct tu_cs_entry ib;
-
-   ib = tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_COMPUTE);
-   if (ib.size)
-      tu_cs_emit_ib(cs, &ib);
+   /* note: no reason to have this in a separate IB */
+   tu_cs_emit_state_ib(cs,
+         tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_COMPUTE));
 
    tu_emit_compute_driver_params(cs, pipeline, info);
 
-   if ((cmd->state.dirty & TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS) &&
-       pipeline->load_state.state_ib.size > 0) {
-      tu_cs_emit_ib(cs, &pipeline->load_state.state_ib);
-   }
+   if (cmd->state.dirty & TU_CMD_DIRTY_COMPUTE_DESC_SETS_LOAD)
+      tu_cs_emit_state_ib(cs, pipeline->load_state);
 
-   cmd->state.dirty &=
-      ~(TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS | TU_CMD_DIRTY_COMPUTE_PIPELINE);
+   cmd->state.dirty &= ~TU_CMD_DIRTY_COMPUTE_DESC_SETS_LOAD;
 
    tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
    tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE));