anv: Use isl_tiling_flags in anv_image_create_info
[mesa.git] / src / vulkan / anv_cmd_buffer.c
index 28a3af7a9b8bc7cc4ec7ad4cef62e5b853719017..ee437aa6330e15060b90b3a485c20a87f2315080 100644 (file)
 
 /** \file anv_cmd_buffer.c
  *
- * This file contains functions related to anv_cmd_buffer as a data
- * structure.  This involves everything required to create and destroy
- * the actual batch buffers as well as link them together and handle
- * relocations and surface state.  It specifically does *not* contain any
- * handling of actual vkCmd calls beyond vkCmdExecuteCommands.
+ * This file contains all of the stuff for emitting commands into a command
+ * buffer.  This includes implementations of most of the vkCmd*
+ * entrypoints.  This file is concerned entirely with state emission and
+ * not with the command buffer data structure itself.  As far as this file
+ * is concerned, most of anv_cmd_buffer is magic.
  */
 
-/*-----------------------------------------------------------------------*
- * Functions related to anv_reloc_list
- *-----------------------------------------------------------------------*/
+/* TODO: These are taken from GLES.  We should check the Vulkan spec */
+const struct anv_dynamic_state default_dynamic_state = {
+   .viewport = {
+      .count = 0,
+   },
+   .scissor = {
+      .count = 0,
+   },
+   .line_width = 1.0f,
+   .depth_bias = {
+      .bias = 0.0f,
+      .clamp = 0.0f,
+      .slope = 0.0f,
+   },
+   .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
+   .depth_bounds = {
+      .min = 0.0f,
+      .max = 1.0f,
+   },
+   .stencil_compare_mask = {
+      .front = ~0u,
+      .back = ~0u,
+   },
+   .stencil_write_mask = {
+      .front = ~0u,
+      .back = ~0u,
+   },
+   .stencil_reference = {
+      .front = 0u,
+      .back = 0u,
+   },
+};
 
-static VkResult
-anv_reloc_list_init_clone(struct anv_reloc_list *list,
-                          struct anv_device *device,
-                          const struct anv_reloc_list *other_list)
-{
-   if (other_list) {
-      list->num_relocs = other_list->num_relocs;
-      list->array_length = other_list->array_length;
-   } else {
-      list->num_relocs = 0;
-      list->array_length = 256;
+void
+anv_dynamic_state_copy(struct anv_dynamic_state *dest,
+                       const struct anv_dynamic_state *src,
+                       uint32_t copy_mask)
+{
+   if (copy_mask & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
+      dest->viewport.count = src->viewport.count;
+      typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
+                   src->viewport.count);
    }
 
-   list->relocs =
-      anv_device_alloc(device, list->array_length * sizeof(*list->relocs), 8,
-                       VK_SYSTEM_ALLOC_TYPE_INTERNAL);
+   if (copy_mask & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
+      dest->scissor.count = src->scissor.count;
+      typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
+                   src->scissor.count);
+   }
 
-   if (list->relocs == NULL)
-      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+   if (copy_mask & (1 << VK_DYNAMIC_STATE_LINE_WIDTH))
+      dest->line_width = src->line_width;
 
-   list->reloc_bos =
-      anv_device_alloc(device, list->array_length * sizeof(*list->reloc_bos), 8,
-                       VK_SYSTEM_ALLOC_TYPE_INTERNAL);
+   if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS))
+      dest->depth_bias = src->depth_bias;
 
-   if (list->reloc_bos == NULL) {
-      anv_device_free(device, list->relocs);
-      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
-   }
+   if (copy_mask & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
+      typed_memcpy(dest->blend_constants, src->blend_constants, 4);
 
-   if (other_list) {
-      memcpy(list->relocs, other_list->relocs,
-             list->array_length * sizeof(*list->relocs));
-      memcpy(list->reloc_bos, other_list->reloc_bos,
-             list->array_length * sizeof(*list->reloc_bos));
-   }
+   if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS))
+      dest->depth_bounds = src->depth_bounds;
 
-   return VK_SUCCESS;
-}
+   if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK))
+      dest->stencil_compare_mask = src->stencil_compare_mask;
 
-VkResult
-anv_reloc_list_init(struct anv_reloc_list *list, struct anv_device *device)
-{
-   return anv_reloc_list_init_clone(list, device, NULL);
+   if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK))
+      dest->stencil_write_mask = src->stencil_write_mask;
+
+   if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE))
+      dest->stencil_reference = src->stencil_reference;
 }
 
-void
-anv_reloc_list_finish(struct anv_reloc_list *list, struct anv_device *device)
+static void
+anv_cmd_state_init(struct anv_cmd_state *state)
 {
-   anv_device_free(device, list->relocs);
-   anv_device_free(device, list->reloc_bos);
+   memset(&state->descriptors, 0, sizeof(state->descriptors));
+   memset(&state->push_constants, 0, sizeof(state->push_constants));
+
+   state->dirty = ~0;
+   state->vb_dirty = 0;
+   state->descriptors_dirty = 0;
+   state->push_constants_dirty = 0;
+   state->pipeline = NULL;
+   state->restart_index = UINT32_MAX;
+   state->dynamic = default_dynamic_state;
+
+   state->gen7.index_buffer = NULL;
 }
 
 static VkResult
-anv_reloc_list_grow(struct anv_reloc_list *list, struct anv_device *device,
-                    size_t num_additional_relocs)
+anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer *cmd_buffer,
+                                          gl_shader_stage stage, uint32_t size)
 {
-   if (list->num_relocs + num_additional_relocs <= list->array_length)
-      return VK_SUCCESS;
+   struct anv_push_constants **ptr = &cmd_buffer->state.push_constants[stage];
 
-   size_t new_length = list->array_length * 2;
-   while (new_length < list->num_relocs + num_additional_relocs)
-      new_length *= 2;
+   if (*ptr == NULL) {
+      *ptr = anv_alloc(&cmd_buffer->pool->alloc, size, 8,
+                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+      if (*ptr == NULL)
+         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+   } else if ((*ptr)->size < size) {
+      *ptr = anv_realloc(&cmd_buffer->pool->alloc, *ptr, size, 8,
+                         VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+      if (*ptr == NULL)
+         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+   }
+   (*ptr)->size = size;
 
-   struct drm_i915_gem_relocation_entry *new_relocs =
-      anv_device_alloc(device, new_length * sizeof(*list->relocs), 8,
-                       VK_SYSTEM_ALLOC_TYPE_INTERNAL);
-   if (new_relocs == NULL)
-      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+   return VK_SUCCESS;
+}
+
+#define anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, field) \
+   anv_cmd_buffer_ensure_push_constants_size(cmd_buffer, stage, \
+      (offsetof(struct anv_push_constants, field) + \
+       sizeof(cmd_buffer->state.push_constants[0]->field)))
+
+static VkResult anv_create_cmd_buffer(
+    struct anv_device *                         device,
+    struct anv_cmd_pool *                       pool,
+    VkCommandBufferLevel                        level,
+    VkCommandBuffer*                            pCommandBuffer)
+{
+   struct anv_cmd_buffer *cmd_buffer;
+   VkResult result;
 
-   struct anv_bo **new_reloc_bos =
-      anv_device_alloc(device, new_length * sizeof(*list->reloc_bos), 8,
-                       VK_SYSTEM_ALLOC_TYPE_INTERNAL);
-   if (new_relocs == NULL) {
-      anv_device_free(device, new_relocs);
+   cmd_buffer = anv_alloc(&pool->alloc, sizeof(*cmd_buffer), 8,
+                          VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (cmd_buffer == NULL)
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
-   }
 
-   memcpy(new_relocs, list->relocs, list->num_relocs * sizeof(*list->relocs));
-   memcpy(new_reloc_bos, list->reloc_bos,
-          list->num_relocs * sizeof(*list->reloc_bos));
+   cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+   cmd_buffer->device = device;
+   cmd_buffer->pool = pool;
+
+   result = anv_cmd_buffer_init_batch_bo_chain(cmd_buffer);
+   if (result != VK_SUCCESS)
+      goto fail;
 
-   anv_device_free(device, list->relocs);
-   anv_device_free(device, list->reloc_bos);
+   anv_state_stream_init(&cmd_buffer->surface_state_stream,
+                         &device->surface_state_block_pool);
+   anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
+                         &device->dynamic_state_block_pool);
 
-   list->array_length = new_length;
-   list->relocs = new_relocs;
-   list->reloc_bos = new_reloc_bos;
+   cmd_buffer->level = level;
+   cmd_buffer->usage_flags = 0;
+
+   anv_cmd_state_init(&cmd_buffer->state);
+
+   if (pool) {
+      list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
+   } else {
+      /* Init the pool_link so we can safefly call list_del when we destroy
+       * the command buffer
+       */
+      list_inithead(&cmd_buffer->pool_link);
+   }
+
+   *pCommandBuffer = anv_cmd_buffer_to_handle(cmd_buffer);
 
    return VK_SUCCESS;
+
+ fail:
+   anv_free(&cmd_buffer->pool->alloc, cmd_buffer);
+
+   return result;
 }
 
-uint64_t
-anv_reloc_list_add(struct anv_reloc_list *list, struct anv_device *device,
-                   uint32_t offset, struct anv_bo *target_bo, uint32_t delta)
+VkResult anv_AllocateCommandBuffers(
+    VkDevice                                    _device,
+    const VkCommandBufferAllocateInfo*          pAllocateInfo,
+    VkCommandBuffer*                            pCommandBuffers)
 {
-   struct drm_i915_gem_relocation_entry *entry;
-   int index;
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_cmd_pool, pool, pAllocateInfo->commandPool);
+
+   VkResult result = VK_SUCCESS;
+   uint32_t i;
 
-   anv_reloc_list_grow(list, device, 1);
-   /* TODO: Handle failure */
+   for (i = 0; i < pAllocateInfo->bufferCount; i++) {
+      result = anv_create_cmd_buffer(device, pool, pAllocateInfo->level,
+                                     &pCommandBuffers[i]);
+      if (result != VK_SUCCESS)
+         break;
+   }
 
-   /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
-   index = list->num_relocs++;
-   list->reloc_bos[index] = target_bo;
-   entry = &list->relocs[index];
-   entry->target_handle = target_bo->gem_handle;
-   entry->delta = delta;
-   entry->offset = offset;
-   entry->presumed_offset = target_bo->offset;
-   entry->read_domains = 0;
-   entry->write_domain = 0;
+   if (result != VK_SUCCESS)
+      anv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
+                             i, pCommandBuffers);
 
-   return target_bo->offset + delta;
+   return result;
 }
 
 static void
-anv_reloc_list_append(struct anv_reloc_list *list, struct anv_device *device,
-                      struct anv_reloc_list *other, uint32_t offset)
+anv_cmd_buffer_destroy(struct anv_cmd_buffer *cmd_buffer)
 {
-   anv_reloc_list_grow(list, device, other->num_relocs);
-   /* TODO: Handle failure */
+   list_del(&cmd_buffer->pool_link);
 
-   memcpy(&list->relocs[list->num_relocs], &other->relocs[0],
-          other->num_relocs * sizeof(other->relocs[0]));
-   memcpy(&list->reloc_bos[list->num_relocs], &other->reloc_bos[0],
-          other->num_relocs * sizeof(other->reloc_bos[0]));
+   anv_cmd_buffer_fini_batch_bo_chain(cmd_buffer);
 
-   for (uint32_t i = 0; i < other->num_relocs; i++)
-      list->relocs[i + list->num_relocs].offset += offset;
+   anv_state_stream_finish(&cmd_buffer->surface_state_stream);
+   anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
 
-   list->num_relocs += other->num_relocs;
+   anv_free(&cmd_buffer->pool->alloc, cmd_buffer);
 }
 
-/*-----------------------------------------------------------------------*
- * Functions related to anv_batch
- *-----------------------------------------------------------------------*/
+void anv_FreeCommandBuffers(
+    VkDevice                                    device,
+    VkCommandPool                               commandPool,
+    uint32_t                                    commandBufferCount,
+    const VkCommandBuffer*                      pCommandBuffers)
+{
+   for (uint32_t i = 0; i < commandBufferCount; i++) {
+      ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
+
+      anv_cmd_buffer_destroy(cmd_buffer);
+   }
+}
 
-void *
-anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
+VkResult anv_ResetCommandBuffer(
+    VkCommandBuffer                             commandBuffer,
+    VkCommandBufferResetFlags                   flags)
 {
-   if (batch->next + num_dwords * 4 > batch->end)
-      batch->extend_cb(batch, batch->user_data);
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
 
-   void *p = batch->next;
+   anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer);
 
-   batch->next += num_dwords * 4;
-   assert(batch->next <= batch->end);
+   anv_cmd_state_init(&cmd_buffer->state);
 
-   return p;
+   return VK_SUCCESS;
 }
 
-uint64_t
-anv_batch_emit_reloc(struct anv_batch *batch,
-                     void *location, struct anv_bo *bo, uint32_t delta)
-{
-   return anv_reloc_list_add(batch->relocs, batch->device,
-                             location - batch->start, bo, delta);
+void
+anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer)
+{
+   switch (cmd_buffer->device->info.gen) {
+   case 7:
+      if (cmd_buffer->device->info.is_haswell)
+         return gen7_cmd_buffer_emit_state_base_address(cmd_buffer);
+      else
+         return gen7_cmd_buffer_emit_state_base_address(cmd_buffer);
+   case 8:
+      return gen8_cmd_buffer_emit_state_base_address(cmd_buffer);
+   case 9:
+      return gen9_cmd_buffer_emit_state_base_address(cmd_buffer);
+   default:
+      unreachable("unsupported gen\n");
+   }
 }
 
-void
-anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
+VkResult anv_BeginCommandBuffer(
+    VkCommandBuffer                             commandBuffer,
+    const VkCommandBufferBeginInfo*             pBeginInfo)
 {
-   uint32_t size, offset;
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
 
-   size = other->next - other->start;
-   assert(size % 4 == 0);
+   anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer);
 
-   if (batch->next + size > batch->end)
-      batch->extend_cb(batch, batch->user_data);
+   cmd_buffer->usage_flags = pBeginInfo->flags;
 
-   assert(batch->next + size <= batch->end);
+   if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
+      cmd_buffer->state.framebuffer =
+         anv_framebuffer_from_handle(pBeginInfo->framebuffer);
+      cmd_buffer->state.pass =
+         anv_render_pass_from_handle(pBeginInfo->renderPass);
 
-   VG(VALGRIND_CHECK_MEM_IS_DEFINED(other->start, size));
-   memcpy(batch->next, other->start, size);
+      struct anv_subpass *subpass =
+         &cmd_buffer->state.pass->subpasses[pBeginInfo->subpass];
 
-   offset = batch->next - batch->start;
-   anv_reloc_list_append(batch->relocs, batch->device,
-                         other->relocs, offset);
+      anv_cmd_buffer_begin_subpass(cmd_buffer, subpass);
+   }
 
-   batch->next += size;
-}
+   anv_cmd_buffer_emit_state_base_address(cmd_buffer);
+   cmd_buffer->state.current_pipeline = UINT32_MAX;
 
-/*-----------------------------------------------------------------------*
- * Functions related to anv_batch_bo
- *-----------------------------------------------------------------------*/
+   return VK_SUCCESS;
+}
 
-static VkResult
-anv_batch_bo_create(struct anv_device *device, struct anv_batch_bo **bbo_out)
+VkResult anv_EndCommandBuffer(
+    VkCommandBuffer                             commandBuffer)
 {
-   VkResult result;
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   struct anv_device *device = cmd_buffer->device;
 
-   struct anv_batch_bo *bbo =
-      anv_device_alloc(device, sizeof(*bbo), 8, VK_SYSTEM_ALLOC_TYPE_INTERNAL);
-   if (bbo == NULL)
-      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+   anv_cmd_buffer_end_batch_buffer(cmd_buffer);
 
-   result = anv_bo_pool_alloc(&device->batch_bo_pool, &bbo->bo);
-   if (result != VK_SUCCESS)
-      goto fail_alloc;
+   if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
+      /* The algorithm used to compute the validate list is not threadsafe as
+       * it uses the bo->index field.  We have to lock the device around it.
+       * Fortunately, the chances for contention here are probably very low.
+       */
+      pthread_mutex_lock(&device->mutex);
+      anv_cmd_buffer_prepare_execbuf(cmd_buffer);
+      pthread_mutex_unlock(&device->mutex);
+   }
 
-   result = anv_reloc_list_init(&bbo->relocs, device);
-   if (result != VK_SUCCESS)
-      goto fail_bo_alloc;
+   return VK_SUCCESS;
+}
 
-   *bbo_out = bbo;
+void anv_CmdBindPipeline(
+    VkCommandBuffer                             commandBuffer,
+    VkPipelineBindPoint                         pipelineBindPoint,
+    VkPipeline                                  _pipeline)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
 
-   return VK_SUCCESS;
+   switch (pipelineBindPoint) {
+   case VK_PIPELINE_BIND_POINT_COMPUTE:
+      cmd_buffer->state.compute_pipeline = pipeline;
+      cmd_buffer->state.compute_dirty |= ANV_CMD_DIRTY_PIPELINE;
+      cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
+      break;
 
- fail_bo_alloc:
-   anv_bo_pool_free(&device->batch_bo_pool, &bbo->bo);
- fail_alloc:
-   anv_device_free(device, bbo);
+   case VK_PIPELINE_BIND_POINT_GRAPHICS:
+      cmd_buffer->state.pipeline = pipeline;
+      cmd_buffer->state.vb_dirty |= pipeline->vb_used;
+      cmd_buffer->state.dirty |= ANV_CMD_DIRTY_PIPELINE;
+      cmd_buffer->state.push_constants_dirty |= pipeline->active_stages;
+
+      /* Apply the dynamic state from the pipeline */
+      cmd_buffer->state.dirty |= pipeline->dynamic_state_mask;
+      anv_dynamic_state_copy(&cmd_buffer->state.dynamic,
+                             &pipeline->dynamic_state,
+                             pipeline->dynamic_state_mask);
+      break;
 
-   return result;
+   default:
+      assert(!"invalid bind point");
+      break;
+   }
 }
 
-static VkResult
-anv_batch_bo_clone(struct anv_device *device,
-                   const struct anv_batch_bo *other_bbo,
-                   struct anv_batch_bo **bbo_out)
+void anv_CmdSetViewport(
+    VkCommandBuffer                             commandBuffer,
+    uint32_t                                    viewportCount,
+    const VkViewport*                           pViewports)
 {
-   VkResult result;
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
 
-   struct anv_batch_bo *bbo =
-      anv_device_alloc(device, sizeof(*bbo), 8, VK_SYSTEM_ALLOC_TYPE_INTERNAL);
-   if (bbo == NULL)
-      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+   cmd_buffer->state.dynamic.viewport.count = viewportCount;
+   memcpy(cmd_buffer->state.dynamic.viewport.viewports,
+          pViewports, viewportCount * sizeof(*pViewports));
 
-   result = anv_bo_pool_alloc(&device->batch_bo_pool, &bbo->bo);
-   if (result != VK_SUCCESS)
-      goto fail_alloc;
+   cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT;
+}
 
-   result = anv_reloc_list_init_clone(&bbo->relocs, device, &other_bbo->relocs);
-   if (result != VK_SUCCESS)
-      goto fail_bo_alloc;
+void anv_CmdSetScissor(
+    VkCommandBuffer                             commandBuffer,
+    uint32_t                                    scissorCount,
+    const VkRect2D*                             pScissors)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
 
-   bbo->length = other_bbo->length;
-   memcpy(bbo->bo.map, other_bbo->bo.map, other_bbo->length);
+   cmd_buffer->state.dynamic.scissor.count = scissorCount;
+   memcpy(cmd_buffer->state.dynamic.scissor.scissors,
+          pScissors, scissorCount * sizeof(*pScissors));
 
-   *bbo_out = bbo;
+   cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_SCISSOR;
+}
 
-   return VK_SUCCESS;
+void anv_CmdSetLineWidth(
+    VkCommandBuffer                             commandBuffer,
+    float                                       lineWidth)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+   cmd_buffer->state.dynamic.line_width = lineWidth;
+   cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
+}
+
+void anv_CmdSetDepthBias(
+    VkCommandBuffer                             commandBuffer,
+    float                                       depthBiasConstantFactor,
+    float                                       depthBiasClamp,
+    float                                       depthBiasSlopeFactor)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
 
- fail_bo_alloc:
-   anv_bo_pool_free(&device->batch_bo_pool, &bbo->bo);
- fail_alloc:
-   anv_device_free(device, bbo);
+   cmd_buffer->state.dynamic.depth_bias.bias = depthBiasConstantFactor;
+   cmd_buffer->state.dynamic.depth_bias.clamp = depthBiasClamp;
+   cmd_buffer->state.dynamic.depth_bias.slope = depthBiasSlopeFactor;
 
-   return result;
+   cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
 }
 
-static void
-anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
-                   size_t batch_padding)
+void anv_CmdSetBlendConstants(
+    VkCommandBuffer                             commandBuffer,
+    const float                                 blendConstants[4])
 {
-   batch->next = batch->start = bbo->bo.map;
-   batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
-   batch->relocs = &bbo->relocs;
-   bbo->relocs.num_relocs = 0;
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+   memcpy(cmd_buffer->state.dynamic.blend_constants,
+          blendConstants, sizeof(float) * 4);
+
+   cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
 }
 
-static void
-anv_batch_bo_continue(struct anv_batch_bo *bbo, struct anv_batch *batch,
-                      size_t batch_padding)
+void anv_CmdSetDepthBounds(
+    VkCommandBuffer                             commandBuffer,
+    float                                       minDepthBounds,
+    float                                       maxDepthBounds)
 {
-   batch->start = bbo->bo.map;
-   batch->next = bbo->bo.map + bbo->length;
-   batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
-   batch->relocs = &bbo->relocs;
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+   cmd_buffer->state.dynamic.depth_bounds.min = minDepthBounds;
+   cmd_buffer->state.dynamic.depth_bounds.max = maxDepthBounds;
+
+   cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
 }
 
-static void
-anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
+void anv_CmdSetStencilCompareMask(
+    VkCommandBuffer                             commandBuffer,
+    VkStencilFaceFlags                          faceMask,
+    uint32_t                                    compareMask)
 {
-   assert(batch->start == bbo->bo.map);
-   bbo->length = batch->next - batch->start;
-   VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+   if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
+      cmd_buffer->state.dynamic.stencil_compare_mask.front = compareMask;
+   if (faceMask & VK_STENCIL_FACE_BACK_BIT)
+      cmd_buffer->state.dynamic.stencil_compare_mask.back = compareMask;
+
+   cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
 }
 
-static void
-anv_batch_bo_destroy(struct anv_batch_bo *bbo, struct anv_device *device)
+void anv_CmdSetStencilWriteMask(
+    VkCommandBuffer                             commandBuffer,
+    VkStencilFaceFlags                          faceMask,
+    uint32_t                                    writeMask)
 {
-   anv_reloc_list_finish(&bbo->relocs, device);
-   anv_bo_pool_free(&device->batch_bo_pool, &bbo->bo);
-   anv_device_free(device, bbo);
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+   if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
+      cmd_buffer->state.dynamic.stencil_write_mask.front = writeMask;
+   if (faceMask & VK_STENCIL_FACE_BACK_BIT)
+      cmd_buffer->state.dynamic.stencil_write_mask.back = writeMask;
+
+   cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
 }
 
-static VkResult
-anv_batch_bo_list_clone(const struct list_head *list, struct anv_device *device,
-                        struct list_head *new_list)
+void anv_CmdSetStencilReference(
+    VkCommandBuffer                             commandBuffer,
+    VkStencilFaceFlags                          faceMask,
+    uint32_t                                    reference)
 {
-   VkResult result = VK_SUCCESS;
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
 
-   list_inithead(new_list);
+   if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
+      cmd_buffer->state.dynamic.stencil_reference.front = reference;
+   if (faceMask & VK_STENCIL_FACE_BACK_BIT)
+      cmd_buffer->state.dynamic.stencil_reference.back = reference;
 
-   struct anv_batch_bo *prev_bbo = NULL;
-   list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
-      struct anv_batch_bo *new_bbo;
-      result = anv_batch_bo_clone(device, bbo, &new_bbo);
-      if (result != VK_SUCCESS)
-         break;
-      list_addtail(&new_bbo->link, new_list);
-
-      if (prev_bbo) {
-         /* As we clone this list of batch_bo's, they chain one to the
-          * other using MI_BATCH_BUFFER_START commands.  We need to fix up
-          * those relocations as we go.  Fortunately, this is pretty easy
-          * as it will always be the last relocation in the list.
-          */
-         uint32_t last_idx = prev_bbo->relocs.num_relocs - 1;
-         assert(prev_bbo->relocs.reloc_bos[last_idx] == &bbo->bo);
-         prev_bbo->relocs.reloc_bos[last_idx] = &new_bbo->bo;
+   cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
+}
+
+void anv_CmdBindDescriptorSets(
+    VkCommandBuffer                             commandBuffer,
+    VkPipelineBindPoint                         pipelineBindPoint,
+    VkPipelineLayout                            _layout,
+    uint32_t                                    firstSet,
+    uint32_t                                    descriptorSetCount,
+    const VkDescriptorSet*                      pDescriptorSets,
+    uint32_t                                    dynamicOffsetCount,
+    const uint32_t*                             pDynamicOffsets)
+{
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
+   struct anv_descriptor_set_layout *set_layout;
+
+   assert(firstSet + descriptorSetCount < MAX_SETS);
+
+   uint32_t dynamic_slot = 0;
+   for (uint32_t i = 0; i < descriptorSetCount; i++) {
+      ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
+      set_layout = layout->set[firstSet + i].layout;
+
+      if (cmd_buffer->state.descriptors[firstSet + i] != set) {
+         cmd_buffer->state.descriptors[firstSet + i] = set;
+         cmd_buffer->state.descriptors_dirty |= set_layout->shader_stages;
       }
 
-      prev_bbo = new_bbo;
-   }
+      if (set_layout->dynamic_offset_count > 0) {
+         anv_foreach_stage(s, set_layout->shader_stages) {
+            anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, s, dynamic);
 
-   if (result != VK_SUCCESS) {
-      list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link)
-         anv_batch_bo_destroy(bbo, device);
-   }
+            struct anv_push_constants *push =
+               cmd_buffer->state.push_constants[s];
 
-   return result;
-}
+            unsigned d = layout->set[firstSet + i].dynamic_offset_start;
+            const uint32_t *offsets = pDynamicOffsets + dynamic_slot;
+            struct anv_descriptor *desc = set->descriptors;
 
-/*-----------------------------------------------------------------------*
- * Functions related to anv_batch_bo
- *-----------------------------------------------------------------------*/
+            for (unsigned b = 0; b < set_layout->binding_count; b++) {
+               if (set_layout->binding[b].dynamic_offset_index < 0)
+                  continue;
 
-static inline struct anv_batch_bo *
-anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
-{
-   return LIST_ENTRY(struct anv_batch_bo, cmd_buffer->batch_bos.prev, link);
+               unsigned array_size = set_layout->binding[b].array_size;
+               for (unsigned j = 0; j < array_size; j++) {
+                  push->dynamic[d].offset = *(offsets++);
+                  push->dynamic[d].range = (desc++)->range;
+                  d++;
+               }
+            }
+         }
+         cmd_buffer->state.push_constants_dirty |= set_layout->shader_stages;
+      }
+   }
 }
 
-static inline struct anv_batch_bo *
-anv_cmd_buffer_current_surface_bbo(struct anv_cmd_buffer *cmd_buffer)
+void anv_CmdBindVertexBuffers(
+    VkCommandBuffer                             commandBuffer,
+    uint32_t                                    startBinding,
+    uint32_t                                    bindingCount,
+    const VkBuffer*                             pBuffers,
+    const VkDeviceSize*                         pOffsets)
 {
-   return LIST_ENTRY(struct anv_batch_bo, cmd_buffer->surface_bos.prev, link);
-}
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+   struct anv_vertex_binding *vb = cmd_buffer->state.vertex_bindings;
 
-struct anv_bo *
-anv_cmd_buffer_current_surface_bo(struct anv_cmd_buffer *cmd_buffer)
-{
-   return &anv_cmd_buffer_current_surface_bbo(cmd_buffer)->bo;
+   /* We have to defer setting up vertex buffer since we need the buffer
+    * stride from the pipeline. */
+
+   assert(startBinding + bindingCount < MAX_VBS);
+   for (uint32_t i = 0; i < bindingCount; i++) {
+      vb[startBinding + i].buffer = anv_buffer_from_handle(pBuffers[i]);
+      vb[startBinding + i].offset = pOffsets[i];
+      cmd_buffer->state.vb_dirty |= 1 << (startBinding + i);
+   }
 }
 
-struct anv_reloc_list *
-anv_cmd_buffer_current_surface_relocs(struct anv_cmd_buffer *cmd_buffer)
+static void
+add_surface_state_reloc(struct anv_cmd_buffer *cmd_buffer,
+                        struct anv_state state, struct anv_bo *bo, uint32_t offset)
 {
-   return &anv_cmd_buffer_current_surface_bbo(cmd_buffer)->relocs;
+   /* The address goes in SURFACE_STATE dword 1 for gens < 8 and dwords 8 and
+    * 9 for gen8+.  We only write the first dword for gen8+ here and rely on
+    * the initial state to set the high bits to 0. */
+
+   const uint32_t dword = cmd_buffer->device->info.gen < 8 ? 1 : 8;
+
+   anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc,
+                      state.offset + dword * 4, bo, offset);
 }
 
 static void
-cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer *cmd_buffer,
-                             struct anv_batch_bo *bbo)
-{
-   struct anv_batch *batch = &cmd_buffer->batch;
-   struct anv_batch_bo *current_bbo =
-      anv_cmd_buffer_current_batch_bo(cmd_buffer);
+fill_descriptor_buffer_surface_state(struct anv_device *device, void *state,
+                                     gl_shader_stage stage,
+                                     VkDescriptorType type,
+                                     uint32_t offset, uint32_t range)
+{
+   VkFormat format;
+   uint32_t stride;
+
+   switch (type) {
+   case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+   case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+      if (device->instance->physicalDevice.compiler->scalar_stage[stage]) {
+         stride = 4;
+      } else {
+         stride = 16;
+      }
+      format = VK_FORMAT_R32G32B32A32_SFLOAT;
+      break;
 
-   /* We set the end of the batch a little short so we would be sure we
-    * have room for the chaining command.  Since we're about to emit the
-    * chaining command, let's set it back where it should go.
-    */
-   batch->end += GEN8_MI_BATCH_BUFFER_START_length * 4;
-   assert(batch->end == current_bbo->bo.map + current_bbo->bo.size);
+   case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+   case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+      stride = 1;
+      format = VK_FORMAT_UNDEFINED;
+      break;
 
-   anv_batch_emit(batch, GEN8_MI_BATCH_BUFFER_START,
-      GEN8_MI_BATCH_BUFFER_START_header,
-      ._2ndLevelBatchBuffer = _1stlevelbatch,
-      .AddressSpaceIndicator = ASI_PPGTT,
-      .BatchBufferStartAddress = { &bbo->bo, 0 },
-   );
+   default:
+      unreachable("Invalid descriptor type");
+   }
 
-   anv_batch_bo_finish(current_bbo, batch);
+   anv_fill_buffer_surface_state(device, state,
+                                 anv_format_for_vk_format(format),
+                                 offset, range, stride);
 }
 
-static VkResult
-anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
-{
-   struct anv_cmd_buffer *cmd_buffer = _data;
-   struct anv_batch_bo *new_bbo;
+VkResult
+anv_cmd_buffer_emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
+                                  gl_shader_stage stage,
+                                  struct anv_state *bt_state)
+{
+   struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
+   struct anv_subpass *subpass = cmd_buffer->state.subpass;
+   struct anv_pipeline_layout *layout;
+   uint32_t color_count, bias, state_offset;
+
+   if (stage == MESA_SHADER_COMPUTE)
+      layout = cmd_buffer->state.compute_pipeline->layout;
+   else
+      layout = cmd_buffer->state.pipeline->layout;
+
+   if (stage == MESA_SHADER_FRAGMENT) {
+      bias = MAX_RTS;
+      color_count = subpass->color_count;
+   } else {
+      bias = 0;
+      color_count = 0;
+   }
 
-   VkResult result = anv_batch_bo_create(cmd_buffer->device, &new_bbo);
-   if (result != VK_SUCCESS)
-      return result;
+   /* This is a little awkward: layout can be NULL but we still have to
+    * allocate and set a binding table for the PS stage for render
+    * targets. */
+   uint32_t surface_count = layout ? layout->stage[stage].surface_count : 0;
 
-   struct anv_batch_bo **seen_bbo = anv_vector_add(&cmd_buffer->seen_bbos);
-   if (seen_bbo == NULL) {
-      anv_batch_bo_destroy(new_bbo, cmd_buffer->device);
-      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
-   }
-   *seen_bbo = new_bbo;
+   if (color_count + surface_count == 0)
+      return VK_SUCCESS;
 
-   cmd_buffer_chain_to_batch_bo(cmd_buffer, new_bbo);
+   *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer,
+                                                  bias + surface_count,
+                                                  &state_offset);
+   uint32_t *bt_map = bt_state->map;
 
-   list_addtail(&new_bbo->link, &cmd_buffer->batch_bos);
+   if (bt_state->map == NULL)
+      return VK_ERROR_OUT_OF_DEVICE_MEMORY;
 
-   anv_batch_bo_start(new_bbo, batch, GEN8_MI_BATCH_BUFFER_START_length * 4);
+   for (uint32_t a = 0; a < color_count; a++) {
+      const struct anv_image_view *iview =
+         fb->attachments[subpass->color_attachments[a]];
 
-   return VK_SUCCESS;
-}
+      bt_map[a] = iview->color_rt_surface_state.offset + state_offset;
+      add_surface_state_reloc(cmd_buffer, iview->color_rt_surface_state,
+                              iview->bo, iview->offset);
+   }
 
-struct anv_state
-anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer,
-                                   uint32_t size, uint32_t alignment)
-{
-   struct anv_bo *surface_bo =
-      anv_cmd_buffer_current_surface_bo(cmd_buffer);
-   struct anv_state state;
+   if (layout == NULL)
+      goto out;
 
-   state.offset = align_u32(cmd_buffer->surface_next, alignment);
-   if (state.offset + size > surface_bo->size)
-      return (struct anv_state) { 0 };
+   for (uint32_t s = 0; s < layout->stage[stage].surface_count; s++) {
+      struct anv_pipeline_binding *binding =
+         &layout->stage[stage].surface_to_descriptor[s];
+      struct anv_descriptor_set *set =
+         cmd_buffer->state.descriptors[binding->set];
+      struct anv_descriptor *desc = &set->descriptors[binding->offset];
 
-   state.map = surface_bo->map + state.offset;
-   state.alloc_size = size;
-   cmd_buffer->surface_next = state.offset + size;
+      struct anv_state surface_state;
+      struct anv_bo *bo;
+      uint32_t bo_offset;
 
-   assert(state.offset + size <= surface_bo->size);
+      switch (desc->type) {
+      case VK_DESCRIPTOR_TYPE_SAMPLER:
+         /* Nothing for us to do here */
+         continue;
 
-   return state;
-}
+      case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+      case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+      case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+      case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
+         bo = desc->buffer->bo;
+         bo_offset = desc->buffer->offset + desc->offset;
 
-struct anv_state
-anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
-                                   uint32_t size, uint32_t alignment)
-{
-   return anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
-                                 size, alignment);
-}
+         surface_state =
+            anv_cmd_buffer_alloc_surface_state(cmd_buffer);
 
-VkResult
-anv_cmd_buffer_new_surface_state_bo(struct anv_cmd_buffer *cmd_buffer)
-{
-   struct anv_batch_bo *new_bbo, *old_bbo =
-      anv_cmd_buffer_current_surface_bbo(cmd_buffer);
+         fill_descriptor_buffer_surface_state(cmd_buffer->device,
+                                              surface_state.map,
+                                              stage, desc->type,
+                                              bo_offset, desc->range);
 
-   /* Finish off the old buffer */
-   old_bbo->length = cmd_buffer->surface_next;
+         if (!cmd_buffer->device->info.has_llc)
+            anv_state_clflush(surface_state);
 
-   VkResult result = anv_batch_bo_create(cmd_buffer->device, &new_bbo);
-   if (result != VK_SUCCESS)
-      return result;
+         break;
+      }
 
-   struct anv_batch_bo **seen_bbo = anv_vector_add(&cmd_buffer->seen_bbos);
-   if (seen_bbo == NULL) {
-      anv_batch_bo_destroy(new_bbo, cmd_buffer->device);
-      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
-   }
-   *seen_bbo = new_bbo;
+      case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+      case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+      case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
+         surface_state = desc->image_view->nonrt_surface_state;
+         bo = desc->image_view->bo;
+         bo_offset = desc->image_view->offset;
+         break;
+
+      case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+      case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+      case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+         assert(!"Unsupported descriptor type");
+         break;
+
+      default:
+         assert(!"Invalid descriptor type");
+         continue;
+      }
 
-   cmd_buffer->surface_next = 1;
+      bt_map[bias + s] = surface_state.offset + state_offset;
+      add_surface_state_reloc(cmd_buffer, surface_state, bo, bo_offset);
+   }
 
-   list_addtail(&new_bbo->link, &cmd_buffer->surface_bos);
+ out:
+   if (!cmd_buffer->device->info.has_llc)
+      anv_state_clflush(*bt_state);
 
    return VK_SUCCESS;
 }
 
 VkResult
-anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
+anv_cmd_buffer_emit_samplers(struct anv_cmd_buffer *cmd_buffer,
+                             gl_shader_stage stage, struct anv_state *state)
 {
-   struct anv_batch_bo *batch_bo, *surface_bbo;
-   struct anv_device *device = cmd_buffer->device;
-   VkResult result;
+   struct anv_pipeline_layout *layout;
+   uint32_t sampler_count;
 
-   list_inithead(&cmd_buffer->batch_bos);
-   list_inithead(&cmd_buffer->surface_bos);
+   if (stage == MESA_SHADER_COMPUTE)
+      layout = cmd_buffer->state.compute_pipeline->layout;
+   else
+      layout = cmd_buffer->state.pipeline->layout;
 
-   result = anv_batch_bo_create(device, &batch_bo);
-   if (result != VK_SUCCESS)
-      return result;
-
-   list_addtail(&batch_bo->link, &cmd_buffer->batch_bos);
+   sampler_count = layout ? layout->stage[stage].sampler_count : 0;
+   if (sampler_count == 0)
+      return VK_SUCCESS;
 
-   cmd_buffer->batch.device = device;
-   cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch;
-   cmd_buffer->batch.user_data = cmd_buffer;
+   uint32_t size = sampler_count * 16;
+   *state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, 32);
 
-   anv_batch_bo_start(batch_bo, &cmd_buffer->batch,
-                      GEN8_MI_BATCH_BUFFER_START_length * 4);
+   if (state->map == NULL)
+      return VK_ERROR_OUT_OF_DEVICE_MEMORY;
 
-   result = anv_batch_bo_create(device, &surface_bbo);
-   if (result != VK_SUCCESS)
-      goto fail_batch_bo;
+   for (uint32_t s = 0; s < layout->stage[stage].sampler_count; s++) {
+      struct anv_pipeline_binding *binding =
+         &layout->stage[stage].sampler_to_descriptor[s];
+      struct anv_descriptor_set *set =
+         cmd_buffer->state.descriptors[binding->set];
+      struct anv_descriptor *desc = &set->descriptors[binding->offset];
 
-   list_addtail(&surface_bbo->link, &cmd_buffer->surface_bos);
+      if (desc->type != VK_DESCRIPTOR_TYPE_SAMPLER &&
+          desc->type != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
+         continue;
 
-   int success = anv_vector_init(&cmd_buffer->seen_bbos,
-                                 sizeof(struct anv_bo *),
-                                 8 * sizeof(struct anv_bo *));
-   if (!success)
-      goto fail_surface_bo;
+      struct anv_sampler *sampler = desc->sampler;
 
-   *(struct anv_batch_bo **)anv_vector_add(&cmd_buffer->seen_bbos) = batch_bo;
-   *(struct anv_batch_bo **)anv_vector_add(&cmd_buffer->seen_bbos) = surface_bbo;
+      /* This can happen if we have an unfilled slot since TYPE_SAMPLER
+       * happens to be zero.
+       */
+      if (sampler == NULL)
+         continue;
 
-   /* Start surface_next at 1 so surface offset 0 is invalid. */
-   cmd_buffer->surface_next = 1;
+      memcpy(state->map + (s * 16),
+             sampler->state, sizeof(sampler->state));
+   }
 
-   cmd_buffer->execbuf2.objects = NULL;
-   cmd_buffer->execbuf2.bos = NULL;
-   cmd_buffer->execbuf2.array_length = 0;
+   if (!cmd_buffer->device->info.has_llc)
+      anv_state_clflush(*state);
 
    return VK_SUCCESS;
-
- fail_surface_bo:
-   anv_batch_bo_destroy(surface_bbo, device);
- fail_batch_bo:
-   anv_batch_bo_destroy(batch_bo, device);
-
-   return result;
 }
 
-void
-anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
+struct anv_state
+anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
+                            const void *data, uint32_t size, uint32_t alignment)
 {
-   struct anv_device *device = cmd_buffer->device;
+   struct anv_state state;
 
-   anv_vector_finish(&cmd_buffer->seen_bbos);
+   state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, alignment);
+   memcpy(state.map, data, size);
 
-   /* Destroy all of the batch buffers */
-   list_for_each_entry_safe(struct anv_batch_bo, bbo,
-                            &cmd_buffer->batch_bos, link) {
-      anv_batch_bo_destroy(bbo, device);
-   }
+   if (!cmd_buffer->device->info.has_llc)
+      anv_state_clflush(state);
 
-   /* Destroy all of the surface state buffers */
-   list_for_each_entry_safe(struct anv_batch_bo, bbo,
-                            &cmd_buffer->surface_bos, link) {
-      anv_batch_bo_destroy(bbo, device);
-   }
+   VG(VALGRIND_CHECK_MEM_IS_DEFINED(state.map, size));
 
-   anv_device_free(device, cmd_buffer->execbuf2.objects);
-   anv_device_free(device, cmd_buffer->execbuf2.bos);
+   return state;
 }
 
-void
-anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
+struct anv_state
+anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer *cmd_buffer,
+                             uint32_t *a, uint32_t *b,
+                             uint32_t dwords, uint32_t alignment)
 {
-   struct anv_device *device = cmd_buffer->device;
-
-   /* Delete all but the first batch bo */
-   assert(!list_empty(&cmd_buffer->batch_bos));
-   while (cmd_buffer->batch_bos.next != cmd_buffer->batch_bos.prev) {
-      struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
-      list_del(&bbo->link);
-      anv_batch_bo_destroy(bbo, device);
-   }
-   assert(!list_empty(&cmd_buffer->batch_bos));
-
-   anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer),
-                      &cmd_buffer->batch,
-                      GEN8_MI_BATCH_BUFFER_START_length * 4);
-
-   /* Delete all but the first batch bo */
-   assert(!list_empty(&cmd_buffer->batch_bos));
-   while (cmd_buffer->surface_bos.next != cmd_buffer->surface_bos.prev) {
-      struct anv_batch_bo *bbo = anv_cmd_buffer_current_surface_bbo(cmd_buffer);
-      list_del(&bbo->link);
-      anv_batch_bo_destroy(bbo, device);
-   }
-   assert(!list_empty(&cmd_buffer->batch_bos));
+   struct anv_state state;
+   uint32_t *p;
 
-   anv_cmd_buffer_current_surface_bbo(cmd_buffer)->relocs.num_relocs = 0;
+   state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
+                                              dwords * 4, alignment);
+   p = state.map;
+   for (uint32_t i = 0; i < dwords; i++)
+      p[i] = a[i] | b[i];
 
-   cmd_buffer->surface_next = 1;
+   if (!cmd_buffer->device->info.has_llc)
+      anv_state_clflush(state);
 
-   /* Reset the list of seen buffers */
-   cmd_buffer->seen_bbos.head = 0;
-   cmd_buffer->seen_bbos.tail = 0;
+   VG(VALGRIND_CHECK_MEM_IS_DEFINED(p, dwords * 4));
 
-   *(struct anv_batch_bo **)anv_vector_add(&cmd_buffer->seen_bbos) =
-      anv_cmd_buffer_current_batch_bo(cmd_buffer);
-   *(struct anv_batch_bo **)anv_vector_add(&cmd_buffer->seen_bbos) =
-      anv_cmd_buffer_current_surface_bbo(cmd_buffer);
+   return state;
 }
 
 void
-anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
+anv_cmd_buffer_begin_subpass(struct anv_cmd_buffer *cmd_buffer,
+                             struct anv_subpass *subpass)
 {
-   struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
-   struct anv_batch_bo *surface_bbo =
-      anv_cmd_buffer_current_surface_bbo(cmd_buffer);
+   switch (cmd_buffer->device->info.gen) {
+   case 7:
+      gen7_cmd_buffer_begin_subpass(cmd_buffer, subpass);
+      break;
+   case 8:
+      gen8_cmd_buffer_begin_subpass(cmd_buffer, subpass);
+      break;
+   case 9:
+      gen9_cmd_buffer_begin_subpass(cmd_buffer, subpass);
+      break;
+   default:
+      unreachable("unsupported gen\n");
+   }
+}
 
-   if (cmd_buffer->level == VK_CMD_BUFFER_LEVEL_PRIMARY) {
-      anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_END);
+void anv_CmdSetEvent(
+    VkCommandBuffer                             commandBuffer,
+    VkEvent                                     event,
+    VkPipelineStageFlags                        stageMask)
+{
+   stub();
+}
 
-      /* Round batch up to an even number of dwords. */
-      if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
-         anv_batch_emit(&cmd_buffer->batch, GEN8_MI_NOOP);
+void anv_CmdResetEvent(
+    VkCommandBuffer                             commandBuffer,
+    VkEvent                                     event,
+    VkPipelineStageFlags                        stageMask)
+{
+   stub();
+}
 
-      cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY;
-   } else {
-      /* If this is a secondary command buffer, we need to determine the
-       * mode in which it will be executed with vkExecuteCommands.  We
-       * determine this statically here so that this stays in sync with the
-       * actual ExecuteCommands implementation.
-       */
-      if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
-          (anv_cmd_buffer_current_batch_bo(cmd_buffer)->length <
-           ANV_CMD_BUFFER_BATCH_SIZE / 2)) {
-         /* If the secondary has exactly one batch buffer in its list *and*
-          * that batch buffer is less than half of the maximum size, we're
-          * probably better of simply copying it into our batch.
-          */
-         cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_EMIT;
-      } else if (cmd_buffer->opt_flags &
-                 VK_CMD_BUFFER_OPTIMIZE_NO_SIMULTANEOUS_USE_BIT) {
-         cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CHAIN;
-
-         /* For chaining mode, we need to increment the number of
-          * relocations.  This is because, when we chain, we need to add
-          * an MI_BATCH_BUFFER_START command.  Adding this command will
-          * also add a relocation.  In order to handle theis we'll
-          * increment it here and decrement it right before adding the
-          * MI_BATCH_BUFFER_START command.
-          */
-         anv_cmd_buffer_current_batch_bo(cmd_buffer)->relocs.num_relocs++;
-      } else {
-         cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN;
-      }
+void anv_CmdWaitEvents(
+    VkCommandBuffer                             commandBuffer,
+    uint32_t                                    eventCount,
+    const VkEvent*                              pEvents,
+    VkPipelineStageFlags                        srcStageMask,
+    VkPipelineStageFlags                        destStageMask,
+    uint32_t                                    memBarrierCount,
+    const void* const*                          ppMemBarriers)
+{
+   stub();
+}
+
+struct anv_state
+anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
+                              gl_shader_stage stage)
+{
+   struct anv_push_constants *data =
+      cmd_buffer->state.push_constants[stage];
+   struct brw_stage_prog_data *prog_data =
+      cmd_buffer->state.pipeline->prog_data[stage];
+
+   /* If we don't actually have any push constants, bail. */
+   if (data == NULL || prog_data->nr_params == 0)
+      return (struct anv_state) { .offset = 0 };
+
+   struct anv_state state =
+      anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
+                                         prog_data->nr_params * sizeof(float),
+                                         32 /* bottom 5 bits MBZ */);
+
+   /* Walk through the param array and fill the buffer with data */
+   uint32_t *u32_map = state.map;
+   for (unsigned i = 0; i < prog_data->nr_params; i++) {
+      uint32_t offset = (uintptr_t)prog_data->param[i];
+      u32_map[i] = *(uint32_t *)((uint8_t *)data + offset);
    }
 
-   anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
+   if (!cmd_buffer->device->info.has_llc)
+      anv_state_clflush(state);
 
-   surface_bbo->length = cmd_buffer->surface_next;
+   return state;
 }
 
-static inline VkResult
-anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer,
-                             struct list_head *list)
+void anv_CmdPushConstants(
+    VkCommandBuffer                             commandBuffer,
+    VkPipelineLayout                            layout,
+    VkShaderStageFlags                          stageFlags,
+    uint32_t                                    offset,
+    uint32_t                                    size,
+    const void*                                 pValues)
 {
-   list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
-      struct anv_batch_bo **bbo_ptr = anv_vector_add(&cmd_buffer->seen_bbos);
-      if (bbo_ptr == NULL)
-         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+   ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+   anv_foreach_stage(stage, stageFlags) {
+      anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, client_data);
 
-      *bbo_ptr = bbo;
+      memcpy(cmd_buffer->state.push_constants[stage]->client_data + offset,
+             pValues, size);
    }
 
-   return VK_SUCCESS;
+   cmd_buffer->state.push_constants_dirty |= stageFlags;
 }
 
-void
-anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
-                             struct anv_cmd_buffer *secondary)
+void anv_CmdExecuteCommands(
+    VkCommandBuffer                             commandBuffer,
+    uint32_t                                    commandBuffersCount,
+    const VkCommandBuffer*                      pCmdBuffers)
 {
-   switch (secondary->exec_mode) {
-   case ANV_CMD_BUFFER_EXEC_MODE_EMIT:
-      anv_batch_emit_batch(&primary->batch, &secondary->batch);
-      break;
-   case ANV_CMD_BUFFER_EXEC_MODE_CHAIN: {
-      struct anv_batch_bo *first_bbo =
-         list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
-      struct anv_batch_bo *last_bbo =
-         list_last_entry(&secondary->batch_bos, struct anv_batch_bo, link);
-
-      anv_batch_emit(&primary->batch, GEN8_MI_BATCH_BUFFER_START,
-         GEN8_MI_BATCH_BUFFER_START_header,
-         ._2ndLevelBatchBuffer = _1stlevelbatch,
-         .AddressSpaceIndicator = ASI_PPGTT,
-         .BatchBufferStartAddress = { &first_bbo->bo, 0 },
-      );
-
-      struct anv_batch_bo *this_bbo = anv_cmd_buffer_current_batch_bo(primary);
-      assert(primary->batch.start == this_bbo->bo.map);
-      uint32_t offset = primary->batch.next - primary->batch.start;
-
-      struct GEN8_MI_BATCH_BUFFER_START ret = {
-         GEN8_MI_BATCH_BUFFER_START_header,
-         ._2ndLevelBatchBuffer = _1stlevelbatch,
-         .AddressSpaceIndicator = ASI_PPGTT,
-         .BatchBufferStartAddress = { &this_bbo->bo, offset },
-      };
-      last_bbo->relocs.num_relocs++;
-      GEN8_MI_BATCH_BUFFER_START_pack(&secondary->batch,
-                                      last_bbo->bo.map + last_bbo->length,
-                                      &ret);
-
-      anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
-      break;
-   }
-   case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN: {
-      struct list_head copy_list;
-      VkResult result = anv_batch_bo_list_clone(&secondary->batch_bos,
-                                                secondary->device,
-                                                &copy_list);
-      if (result != VK_SUCCESS)
-         return; /* FIXME */
-
-      anv_cmd_buffer_add_seen_bbos(primary, &copy_list);
+   ANV_FROM_HANDLE(anv_cmd_buffer, primary, commandBuffer);
 
-      struct anv_batch_bo *first_bbo =
-         list_first_entry(&copy_list, struct anv_batch_bo, link);
-      struct anv_batch_bo *last_bbo =
-         list_last_entry(&copy_list, struct anv_batch_bo, link);
+   assert(primary->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
 
-      cmd_buffer_chain_to_batch_bo(primary, first_bbo);
+   anv_assert(primary->state.subpass == &primary->state.pass->subpasses[0]);
 
-      list_splicetail(&copy_list, &primary->batch_bos);
+   for (uint32_t i = 0; i < commandBuffersCount; i++) {
+      ANV_FROM_HANDLE(anv_cmd_buffer, secondary, pCmdBuffers[i]);
 
-      anv_batch_bo_continue(last_bbo, &primary->batch,
-                            GEN8_MI_BATCH_BUFFER_START_length * 4);
+      assert(secondary->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
 
-      anv_cmd_buffer_emit_state_base_address(primary);
-      break;
-   }
-   default:
-      assert(!"Invalid execution mode");
+      anv_cmd_buffer_add_secondary(primary, secondary);
    }
-
-   /* Mark the surface buffer from the secondary as seen */
-   anv_cmd_buffer_add_seen_bbos(primary, &secondary->surface_bos);
 }
 
-static VkResult
-anv_cmd_buffer_add_bo(struct anv_cmd_buffer *cmd_buffer,
-                      struct anv_bo *bo,
-                      struct anv_reloc_list *relocs)
+VkResult anv_CreateCommandPool(
+    VkDevice                                    _device,
+    const VkCommandPoolCreateInfo*              pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
+    VkCommandPool*                              pCmdPool)
 {
-   struct drm_i915_gem_exec_object2 *obj = NULL;
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   struct anv_cmd_pool *pool;
 
-   if (bo->index < cmd_buffer->execbuf2.bo_count &&
-       cmd_buffer->execbuf2.bos[bo->index] == bo)
-      obj = &cmd_buffer->execbuf2.objects[bo->index];
+   pool = anv_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
+                     VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+   if (pool == NULL)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
 
-   if (obj == NULL) {
-      /* We've never seen this one before.  Add it to the list and assign
-       * an id that we can use later.
-       */
-      if (cmd_buffer->execbuf2.bo_count >= cmd_buffer->execbuf2.array_length) {
-         uint32_t new_len = cmd_buffer->execbuf2.objects ?
-                            cmd_buffer->execbuf2.array_length * 2 : 64;
-
-         struct drm_i915_gem_exec_object2 *new_objects =
-            anv_device_alloc(cmd_buffer->device, new_len * sizeof(*new_objects),
-                             8, VK_SYSTEM_ALLOC_TYPE_INTERNAL);
-         if (new_objects == NULL)
-            return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
-
-         struct anv_bo **new_bos =
-            anv_device_alloc(cmd_buffer->device, new_len * sizeof(*new_bos),
-                             8, VK_SYSTEM_ALLOC_TYPE_INTERNAL);
-         if (new_objects == NULL) {
-            anv_device_free(cmd_buffer->device, new_objects);
-            return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
-         }
+   if (pAllocator)
+      pool->alloc = *pAllocator;
+   else
+      pool->alloc = device->alloc;
 
-         if (cmd_buffer->execbuf2.objects) {
-            memcpy(new_objects, cmd_buffer->execbuf2.objects,
-                   cmd_buffer->execbuf2.bo_count * sizeof(*new_objects));
-            memcpy(new_bos, cmd_buffer->execbuf2.bos,
-                   cmd_buffer->execbuf2.bo_count * sizeof(*new_bos));
-         }
+   list_inithead(&pool->cmd_buffers);
 
-         cmd_buffer->execbuf2.objects = new_objects;
-         cmd_buffer->execbuf2.bos = new_bos;
-         cmd_buffer->execbuf2.array_length = new_len;
-      }
+   *pCmdPool = anv_cmd_pool_to_handle(pool);
 
-      assert(cmd_buffer->execbuf2.bo_count < cmd_buffer->execbuf2.array_length);
+   return VK_SUCCESS;
+}
 
-      bo->index = cmd_buffer->execbuf2.bo_count++;
-      obj = &cmd_buffer->execbuf2.objects[bo->index];
-      cmd_buffer->execbuf2.bos[bo->index] = bo;
+void anv_DestroyCommandPool(
+    VkDevice                                    _device,
+    VkCommandPool                               commandPool,
+    const VkAllocationCallbacks*                pAllocator)
+{
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
 
-      obj->handle = bo->gem_handle;
-      obj->relocation_count = 0;
-      obj->relocs_ptr = 0;
-      obj->alignment = 0;
-      obj->offset = bo->offset;
-      obj->flags = 0;
-      obj->rsvd1 = 0;
-      obj->rsvd2 = 0;
-   }
+   anv_ResetCommandPool(_device, commandPool, 0);
 
-   if (relocs != NULL && obj->relocation_count == 0) {
-      /* This is the first time we've ever seen a list of relocations for
-       * this BO.  Go ahead and set the relocations and then walk the list
-       * of relocations and add them all.
-       */
-      obj->relocation_count = relocs->num_relocs;
-      obj->relocs_ptr = (uintptr_t) relocs->relocs;
+   anv_free2(&device->alloc, pAllocator, pool);
+}
+
+VkResult anv_ResetCommandPool(
+    VkDevice                                    device,
+    VkCommandPool                               commandPool,
+    VkCommandPoolResetFlags                     flags)
+{
+   ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
 
-      for (size_t i = 0; i < relocs->num_relocs; i++)
-         anv_cmd_buffer_add_bo(cmd_buffer, relocs->reloc_bos[i], NULL);
+   list_for_each_entry_safe(struct anv_cmd_buffer, cmd_buffer,
+                            &pool->cmd_buffers, pool_link) {
+      anv_cmd_buffer_destroy(cmd_buffer);
    }
 
    return VK_SUCCESS;
 }
 
-static void
-anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
-                              struct anv_reloc_list *list)
+/**
+ * Return NULL if the current subpass has no depthstencil attachment.
+ */
+const struct anv_image_view *
+anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer *cmd_buffer)
 {
-   struct anv_bo *bo;
+   const struct anv_subpass *subpass = cmd_buffer->state.subpass;
+   const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
 
-   /* If the kernel supports I915_EXEC_NO_RELOC, it will compare offset in
-    * struct drm_i915_gem_exec_object2 against the bos current offset and if
-    * all bos haven't moved it will skip relocation processing alltogether.
-    * If I915_EXEC_NO_RELOC is not supported, the kernel ignores the incoming
-    * value of offset so we can set it either way.  For that to work we need
-    * to make sure all relocs use the same presumed offset.
-    */
+   if (subpass->depth_stencil_attachment == VK_ATTACHMENT_UNUSED)
+      return NULL;
 
-   for (size_t i = 0; i < list->num_relocs; i++) {
-      bo = list->reloc_bos[i];
-      if (bo->offset != list->relocs[i].presumed_offset)
-         cmd_buffer->execbuf2.need_reloc = true;
+   const struct anv_image_view *iview =
+      fb->attachments[subpass->depth_stencil_attachment];
 
-      list->relocs[i].target_handle = bo->index;
-   }
-}
-
-void
-anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer)
-{
-   struct anv_batch *batch = &cmd_buffer->batch;
-
-   cmd_buffer->execbuf2.bo_count = 0;
-   cmd_buffer->execbuf2.need_reloc = false;
-
-   /* First, we walk over all of the bos we've seen and add them and their
-    * relocations to the validate list.
-    */
-   struct anv_batch_bo **bbo;
-   anv_vector_foreach(bbo, &cmd_buffer->seen_bbos)
-      anv_cmd_buffer_add_bo(cmd_buffer, &(*bbo)->bo, &(*bbo)->relocs);
-
-   struct anv_batch_bo *first_batch_bo =
-      list_first_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);
-
-   /* The kernel requires that the last entry in the validation list be the
-    * batch buffer to execute.  We can simply swap the element
-    * corresponding to the first batch_bo in the chain with the last
-    * element in the list.
-    */
-   if (first_batch_bo->bo.index != cmd_buffer->execbuf2.bo_count - 1) {
-      uint32_t idx = first_batch_bo->bo.index;
-
-      struct drm_i915_gem_exec_object2 tmp_obj =
-         cmd_buffer->execbuf2.objects[idx];
-      assert(cmd_buffer->execbuf2.bos[idx] == &first_batch_bo->bo);
-
-      cmd_buffer->execbuf2.objects[idx] =
-         cmd_buffer->execbuf2.objects[cmd_buffer->execbuf2.bo_count - 1];
-      cmd_buffer->execbuf2.bos[idx] =
-         cmd_buffer->execbuf2.bos[cmd_buffer->execbuf2.bo_count - 1];
-      cmd_buffer->execbuf2.bos[idx]->index = idx;
-
-      cmd_buffer->execbuf2.objects[cmd_buffer->execbuf2.bo_count - 1] = tmp_obj;
-      cmd_buffer->execbuf2.bos[cmd_buffer->execbuf2.bo_count - 1] =
-         &first_batch_bo->bo;
-      first_batch_bo->bo.index = cmd_buffer->execbuf2.bo_count - 1;
-   }
+   assert(anv_format_is_depth_or_stencil(iview->format));
 
-   /* Now we go through and fixup all of the relocation lists to point to
-    * the correct indices in the object array.  We have to do this after we
-    * reorder the list above as some of the indices may have changed.
-    */
-   anv_vector_foreach(bbo, &cmd_buffer->seen_bbos)
-      anv_cmd_buffer_process_relocs(cmd_buffer, &(*bbo)->relocs);
-
-   cmd_buffer->execbuf2.execbuf = (struct drm_i915_gem_execbuffer2) {
-      .buffers_ptr = (uintptr_t) cmd_buffer->execbuf2.objects,
-      .buffer_count = cmd_buffer->execbuf2.bo_count,
-      .batch_start_offset = 0,
-      .batch_len = batch->next - batch->start,
-      .cliprects_ptr = 0,
-      .num_cliprects = 0,
-      .DR1 = 0,
-      .DR4 = 0,
-      .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
-      .rsvd1 = cmd_buffer->device->context_id,
-      .rsvd2 = 0,
-   };
-
-   if (!cmd_buffer->execbuf2.need_reloc)
-      cmd_buffer->execbuf2.execbuf.flags |= I915_EXEC_NO_RELOC;
+   return iview;
 }