intel/genxml,isl: Add gen12 stencil buffer changes
[mesa.git] / src / intel / vulkan / anv_batch_chain.c
index 034f3fda24a8f1fd106ec403012ecf2b8f4430ec..3b1f713ef5b1ae170be27ba4bc6a07d24fb8c9a5 100644 (file)
 
 #include "anv_private.h"
 
-#include "genxml/gen7_pack.h"
 #include "genxml/gen8_pack.h"
 
+#include "util/debug.h"
+
 /** \file anv_batch_chain.c
  *
  * This file contains functions related to anv_cmd_buffer as a data
@@ -59,18 +60,26 @@ anv_reloc_list_init_clone(struct anv_reloc_list *list,
    }
 
    list->relocs =
-      anv_alloc(alloc, list->array_length * sizeof(*list->relocs), 8,
+      vk_alloc(alloc, list->array_length * sizeof(*list->relocs), 8,
                 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
 
    if (list->relocs == NULL)
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
 
    list->reloc_bos =
-      anv_alloc(alloc, list->array_length * sizeof(*list->reloc_bos), 8,
+      vk_alloc(alloc, list->array_length * sizeof(*list->reloc_bos), 8,
                 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
 
    if (list->reloc_bos == NULL) {
-      anv_free(alloc, list->relocs);
+      vk_free(alloc, list->relocs);
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+   }
+
+   list->deps = _mesa_pointer_set_create(NULL);
+
+   if (!list->deps) {
+      vk_free(alloc, list->relocs);
+      vk_free(alloc, list->reloc_bos);
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
    }
 
@@ -79,6 +88,9 @@ anv_reloc_list_init_clone(struct anv_reloc_list *list,
              list->array_length * sizeof(*list->relocs));
       memcpy(list->reloc_bos, other_list->reloc_bos,
              list->array_length * sizeof(*list->reloc_bos));
+      set_foreach(other_list->deps, entry) {
+         _mesa_set_add_pre_hashed(list->deps, entry->hash, entry->key);
+      }
    }
 
    return VK_SUCCESS;
@@ -95,8 +107,9 @@ void
 anv_reloc_list_finish(struct anv_reloc_list *list,
                       const VkAllocationCallbacks *alloc)
 {
-   anv_free(alloc, list->relocs);
-   anv_free(alloc, list->reloc_bos);
+   vk_free(alloc, list->relocs);
+   vk_free(alloc, list->reloc_bos);
+   _mesa_set_destroy(list->deps, NULL);
 }
 
 static VkResult
@@ -112,16 +125,16 @@ anv_reloc_list_grow(struct anv_reloc_list *list,
       new_length *= 2;
 
    struct drm_i915_gem_relocation_entry *new_relocs =
-      anv_alloc(alloc, new_length * sizeof(*list->relocs), 8,
+      vk_alloc(alloc, new_length * sizeof(*list->relocs), 8,
                 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    if (new_relocs == NULL)
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
 
    struct anv_bo **new_reloc_bos =
-      anv_alloc(alloc, new_length * sizeof(*list->reloc_bos), 8,
+      vk_alloc(alloc, new_length * sizeof(*list->reloc_bos), 8,
                 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
-   if (new_relocs == NULL) {
-      anv_free(alloc, new_relocs);
+   if (new_reloc_bos == NULL) {
+      vk_free(alloc, new_relocs);
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
    }
 
@@ -129,8 +142,8 @@ anv_reloc_list_grow(struct anv_reloc_list *list,
    memcpy(new_reloc_bos, list->reloc_bos,
           list->num_relocs * sizeof(*list->reloc_bos));
 
-   anv_free(alloc, list->relocs);
-   anv_free(alloc, list->reloc_bos);
+   vk_free(alloc, list->relocs);
+   vk_free(alloc, list->reloc_bos);
 
    list->array_length = new_length;
    list->relocs = new_relocs;
@@ -139,7 +152,7 @@ anv_reloc_list_grow(struct anv_reloc_list *list,
    return VK_SUCCESS;
 }
 
-uint64_t
+VkResult
 anv_reloc_list_add(struct anv_reloc_list *list,
                    const VkAllocationCallbacks *alloc,
                    uint32_t offset, struct anv_bo *target_bo, uint32_t delta)
@@ -147,11 +160,14 @@ anv_reloc_list_add(struct anv_reloc_list *list,
    struct drm_i915_gem_relocation_entry *entry;
    int index;
 
-   const uint32_t domain =
-      target_bo->is_winsys_bo ? I915_GEM_DOMAIN_RENDER : 0;
+   if (target_bo->flags & EXEC_OBJECT_PINNED) {
+      _mesa_set_add(list->deps, target_bo);
+      return VK_SUCCESS;
+   }
 
-   anv_reloc_list_grow(list, alloc, 1);
-   /* TODO: Handle failure */
+   VkResult result = anv_reloc_list_grow(list, alloc, 1);
+   if (result != VK_SUCCESS)
+      return result;
 
    /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
    index = list->num_relocs++;
@@ -161,20 +177,21 @@ anv_reloc_list_add(struct anv_reloc_list *list,
    entry->delta = delta;
    entry->offset = offset;
    entry->presumed_offset = target_bo->offset;
-   entry->read_domains = domain;
-   entry->write_domain = domain;
+   entry->read_domains = 0;
+   entry->write_domain = 0;
    VG(VALGRIND_CHECK_MEM_IS_DEFINED(entry, sizeof(*entry)));
 
-   return target_bo->offset + delta;
+   return VK_SUCCESS;
 }
 
-static void
+static VkResult
 anv_reloc_list_append(struct anv_reloc_list *list,
                       const VkAllocationCallbacks *alloc,
                       struct anv_reloc_list *other, uint32_t offset)
 {
-   anv_reloc_list_grow(list, alloc, other->num_relocs);
-   /* TODO: Handle failure */
+   VkResult result = anv_reloc_list_grow(list, alloc, other->num_relocs);
+   if (result != VK_SUCCESS)
+      return result;
 
    memcpy(&list->relocs[list->num_relocs], &other->relocs[0],
           other->num_relocs * sizeof(other->relocs[0]));
@@ -185,6 +202,12 @@ anv_reloc_list_append(struct anv_reloc_list *list,
       list->relocs[i + list->num_relocs].offset += offset;
 
    list->num_relocs += other->num_relocs;
+
+   set_foreach(other->deps, entry) {
+      _mesa_set_add_pre_hashed(list->deps, entry->hash, entry->key);
+   }
+
+   return VK_SUCCESS;
 }
 
 /*-----------------------------------------------------------------------*
@@ -194,8 +217,13 @@ anv_reloc_list_append(struct anv_reloc_list *list,
 void *
 anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
 {
-   if (batch->next + num_dwords * 4 > batch->end)
-      batch->extend_cb(batch, batch->user_data);
+   if (batch->next + num_dwords * 4 > batch->end) {
+      VkResult result = batch->extend_cb(batch, batch->user_data);
+      if (result != VK_SUCCESS) {
+         anv_batch_set_error(batch, result);
+         return NULL;
+      }
+   }
 
    void *p = batch->next;
 
@@ -209,8 +237,14 @@ uint64_t
 anv_batch_emit_reloc(struct anv_batch *batch,
                      void *location, struct anv_bo *bo, uint32_t delta)
 {
-   return anv_reloc_list_add(batch->relocs, batch->alloc,
-                             location - batch->start, bo, delta);
+   VkResult result = anv_reloc_list_add(batch->relocs, batch->alloc,
+                                        location - batch->start, bo, delta);
+   if (result != VK_SUCCESS) {
+      anv_batch_set_error(batch, result);
+      return 0;
+   }
+
+   return bo->offset + delta;
 }
 
 void
@@ -221,8 +255,13 @@ anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
    size = other->next - other->start;
    assert(size % 4 == 0);
 
-   if (batch->next + size > batch->end)
-      batch->extend_cb(batch, batch->user_data);
+   if (batch->next + size > batch->end) {
+      VkResult result = batch->extend_cb(batch, batch->user_data);
+      if (result != VK_SUCCESS) {
+         anv_batch_set_error(batch, result);
+         return;
+      }
+   }
 
    assert(batch->next + size <= batch->end);
 
@@ -230,8 +269,12 @@ anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
    memcpy(batch->next, other->start, size);
 
    offset = batch->next - batch->start;
-   anv_reloc_list_append(batch->relocs, batch->alloc,
-                         other->relocs, offset);
+   VkResult result = anv_reloc_list_append(batch->relocs, batch->alloc,
+                                           other->relocs, offset);
+   if (result != VK_SUCCESS) {
+      anv_batch_set_error(batch, result);
+      return;
+   }
 
    batch->next += size;
 }
@@ -246,7 +289,7 @@ anv_batch_bo_create(struct anv_cmd_buffer *cmd_buffer,
 {
    VkResult result;
 
-   struct anv_batch_bo *bbo = anv_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
+   struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
                                         8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    if (bbo == NULL)
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
@@ -267,7 +310,7 @@ anv_batch_bo_create(struct anv_cmd_buffer *cmd_buffer,
  fail_bo_alloc:
    anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
  fail_alloc:
-   anv_free(&cmd_buffer->pool->alloc, bbo);
+   vk_free(&cmd_buffer->pool->alloc, bbo);
 
    return result;
 }
@@ -279,7 +322,7 @@ anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer,
 {
    VkResult result;
 
-   struct anv_batch_bo *bbo = anv_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
+   struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
                                         8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    if (bbo == NULL)
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
@@ -297,8 +340,6 @@ anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer,
    bbo->length = other_bbo->length;
    memcpy(bbo->bo.map, other_bbo->bo.map, other_bbo->length);
 
-   bbo->last_ss_pool_bo_offset = other_bbo->last_ss_pool_bo_offset;
-
    *bbo_out = bbo;
 
    return VK_SUCCESS;
@@ -306,7 +347,7 @@ anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer,
  fail_bo_alloc:
    anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
  fail_alloc:
-   anv_free(&cmd_buffer->pool->alloc, bbo);
+   vk_free(&cmd_buffer->pool->alloc, bbo);
 
    return result;
 }
@@ -318,8 +359,8 @@ anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
    batch->next = batch->start = bbo->bo.map;
    batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
    batch->relocs = &bbo->relocs;
-   bbo->last_ss_pool_bo_offset = 0;
    bbo->relocs.num_relocs = 0;
+   _mesa_set_clear(bbo->relocs.deps, NULL);
 }
 
 static void
@@ -371,13 +412,46 @@ anv_batch_bo_grow(struct anv_cmd_buffer *cmd_buffer, struct anv_batch_bo *bbo,
    return VK_SUCCESS;
 }
 
+static void
+anv_batch_bo_link(struct anv_cmd_buffer *cmd_buffer,
+                  struct anv_batch_bo *prev_bbo,
+                  struct anv_batch_bo *next_bbo,
+                  uint32_t next_bbo_offset)
+{
+   const uint32_t bb_start_offset =
+      prev_bbo->length - GEN8_MI_BATCH_BUFFER_START_length * 4;
+   ASSERTED const uint32_t *bb_start = prev_bbo->bo.map + bb_start_offset;
+
+   /* Make sure we're looking at a MI_BATCH_BUFFER_START */
+   assert(((*bb_start >> 29) & 0x07) == 0);
+   assert(((*bb_start >> 23) & 0x3f) == 49);
+
+   if (cmd_buffer->device->instance->physicalDevice.use_softpin) {
+      assert(prev_bbo->bo.flags & EXEC_OBJECT_PINNED);
+      assert(next_bbo->bo.flags & EXEC_OBJECT_PINNED);
+
+      write_reloc(cmd_buffer->device,
+                  prev_bbo->bo.map + bb_start_offset + 4,
+                  next_bbo->bo.offset + next_bbo_offset, true);
+   } else {
+      uint32_t reloc_idx = prev_bbo->relocs.num_relocs - 1;
+      assert(prev_bbo->relocs.relocs[reloc_idx].offset == bb_start_offset + 4);
+
+      prev_bbo->relocs.reloc_bos[reloc_idx] = &next_bbo->bo;
+      prev_bbo->relocs.relocs[reloc_idx].delta = next_bbo_offset;
+
+      /* Use a bogus presumed offset to force a relocation */
+      prev_bbo->relocs.relocs[reloc_idx].presumed_offset = -1;
+   }
+}
+
 static void
 anv_batch_bo_destroy(struct anv_batch_bo *bbo,
                      struct anv_cmd_buffer *cmd_buffer)
 {
    anv_reloc_list_finish(&bbo->relocs, &cmd_buffer->pool->alloc);
    anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
-   anv_free(&cmd_buffer->pool->alloc, bbo);
+   vk_free(&cmd_buffer->pool->alloc, bbo);
 }
 
 static VkResult
@@ -397,16 +471,8 @@ anv_batch_bo_list_clone(const struct list_head *list,
          break;
       list_addtail(&new_bbo->link, new_list);
 
-      if (prev_bbo) {
-         /* As we clone this list of batch_bo's, they chain one to the
-          * other using MI_BATCH_BUFFER_START commands.  We need to fix up
-          * those relocations as we go.  Fortunately, this is pretty easy
-          * as it will always be the last relocation in the list.
-          */
-         uint32_t last_idx = prev_bbo->relocs.num_relocs - 1;
-         assert(prev_bbo->relocs.reloc_bos[last_idx] == &bbo->bo);
-         prev_bbo->relocs.reloc_bos[last_idx] = &new_bbo->bo;
-      }
+      if (prev_bbo)
+         anv_batch_bo_link(cmd_buffer, prev_bbo, new_bbo, 0);
 
       prev_bbo = new_bbo;
    }
@@ -423,7 +489,7 @@ anv_batch_bo_list_clone(const struct list_head *list,
  * Functions related to anv_batch_bo
  *-----------------------------------------------------------------------*/
 
-static inline struct anv_batch_bo *
+static struct anv_batch_bo *
 anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
 {
    return LIST_ENTRY(struct anv_batch_bo, cmd_buffer->batch_bos.prev, link);
@@ -432,9 +498,10 @@ anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
 struct anv_address
 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
 {
+   struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
    return (struct anv_address) {
-      .bo = &cmd_buffer->device->surface_state_block_pool.bo,
-      .offset = *(int32_t *)anv_vector_head(&cmd_buffer->bt_blocks),
+      .bo = anv_binding_table_pool(cmd_buffer->device)->block_pool.bo,
+      .offset = bt_block->offset,
    };
 }
 
@@ -450,17 +517,21 @@ emit_batch_buffer_start(struct anv_cmd_buffer *cmd_buffer,
     * gens.
     */
 
+#define GEN7_MI_BATCH_BUFFER_START_length      2
+#define GEN7_MI_BATCH_BUFFER_START_length_bias      2
+
    const uint32_t gen7_length =
       GEN7_MI_BATCH_BUFFER_START_length - GEN7_MI_BATCH_BUFFER_START_length_bias;
    const uint32_t gen8_length =
       GEN8_MI_BATCH_BUFFER_START_length - GEN8_MI_BATCH_BUFFER_START_length_bias;
 
-   anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_START,
-      .DWordLength = cmd_buffer->device->info.gen < 8 ?
-                     gen7_length : gen8_length,
-      ._2ndLevelBatchBuffer = _1stlevelbatch,
-      .AddressSpaceIndicator = ASI_PPGTT,
-      .BatchBufferStartAddress = { bo, offset });
+   anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_START, bbs) {
+      bbs.DWordLength               = cmd_buffer->device->info.gen < 8 ?
+                                      gen7_length : gen8_length;
+      bbs.SecondLevelBatchBuffer    = Firstlevelbatch;
+      bbs.AddressSpaceIndicator     = ASI_PPGTT;
+      bbs.BatchBufferStartAddress   = (struct anv_address) { bo, offset };
+   }
 }
 
 static void
@@ -493,7 +564,7 @@ anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
    if (result != VK_SUCCESS)
       return result;
 
-   struct anv_batch_bo **seen_bbo = anv_vector_add(&cmd_buffer->seen_bbos);
+   struct anv_batch_bo **seen_bbo = u_vector_add(&cmd_buffer->seen_bbos);
    if (seen_bbo == NULL) {
       anv_batch_bo_destroy(new_bbo, cmd_buffer);
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
@@ -521,27 +592,105 @@ anv_cmd_buffer_grow_batch(struct anv_batch *batch, void *_data)
    return VK_SUCCESS;
 }
 
+/** Allocate a binding table
+ *
+ * This function allocates a binding table.  This is a bit more complicated
+ * than one would think due to a combination of Vulkan driver design and some
+ * unfortunate hardware restrictions.
+ *
+ * The 3DSTATE_BINDING_TABLE_POINTERS_* packets only have a 16-bit field for
+ * the binding table pointer which means that all binding tables need to live
+ * in the bottom 64k of surface state base address.  The way the GL driver has
+ * classically dealt with this restriction is to emit all surface states
+ * on-the-fly into the batch and have a batch buffer smaller than 64k.  This
+ * isn't really an option in Vulkan for a couple of reasons:
+ *
+ *  1) In Vulkan, we have growing (or chaining) batches so surface states have
+ *     to live in their own buffer and we have to be able to re-emit
+ *     STATE_BASE_ADDRESS as needed which requires a full pipeline stall.  In
+ *     order to avoid emitting STATE_BASE_ADDRESS any more often than needed
+ *     (it's not that hard to hit 64k of just binding tables), we allocate
+ *     surface state objects up-front when VkImageView is created.  In order
+ *     for this to work, surface state objects need to be allocated from a
+ *     global buffer.
+ *
+ *  2) We tried to design the surface state system in such a way that it's
+ *     already ready for bindless texturing.  The way bindless texturing works
+ *     on our hardware is that you have a big pool of surface state objects
+ *     (with its own state base address) and the bindless handles are simply
+ *     offsets into that pool.  With the architecture we chose, we already
+ *     have that pool and it's exactly the same pool that we use for regular
+ *     surface states so we should already be ready for bindless.
+ *
+ *  3) For render targets, we need to be able to fill out the surface states
+ *     later in vkBeginRenderPass so that we can assign clear colors
+ *     correctly.  One way to do this would be to just create the surface
+ *     state data and then repeatedly copy it into the surface state BO every
+ *     time we have to re-emit STATE_BASE_ADDRESS.  While this works, it's
+ *     rather annoying and just being able to allocate them up-front and
+ *     re-use them for the entire render pass.
+ *
+ * While none of these are technically blockers for emitting state on the fly
+ * like we do in GL, the ability to have a single surface state pool is
+ * simplifies things greatly.  Unfortunately, it comes at a cost...
+ *
+ * Because of the 64k limitation of 3DSTATE_BINDING_TABLE_POINTERS_*, we can't
+ * place the binding tables just anywhere in surface state base address.
+ * Because 64k isn't a whole lot of space, we can't simply restrict the
+ * surface state buffer to 64k, we have to be more clever.  The solution we've
+ * chosen is to have a block pool with a maximum size of 2G that starts at
+ * zero and grows in both directions.  All surface states are allocated from
+ * the top of the pool (positive offsets) and we allocate blocks (< 64k) of
+ * binding tables from the bottom of the pool (negative offsets).  Every time
+ * we allocate a new binding table block, we set surface state base address to
+ * point to the bottom of the binding table block.  This way all of the
+ * binding tables in the block are in the bottom 64k of surface state base
+ * address.  When we fill out the binding table, we add the distance between
+ * the bottom of our binding table block and zero of the block pool to the
+ * surface state offsets so that they are correct relative to out new surface
+ * state base address at the bottom of the binding table block.
+ *
+ * \see adjust_relocations_from_block_pool()
+ * \see adjust_relocations_too_block_pool()
+ *
+ * \param[in]  entries        The number of surface state entries the binding
+ *                            table should be able to hold.
+ *
+ * \param[out] state_offset   The offset surface surface state base address
+ *                            where the surface states live.  This must be
+ *                            added to the surface state offset when it is
+ *                            written into the binding table entry.
+ *
+ * \return                    An anv_state representing the binding table
+ */
 struct anv_state
 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
                                    uint32_t entries, uint32_t *state_offset)
 {
-   struct anv_block_pool *block_pool =
-       &cmd_buffer->device->surface_state_block_pool;
-   int32_t *bt_block = anv_vector_head(&cmd_buffer->bt_blocks);
+   struct anv_device *device = cmd_buffer->device;
+   struct anv_state_pool *state_pool = &device->surface_state_pool;
+   struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
    struct anv_state state;
 
    state.alloc_size = align_u32(entries * 4, 32);
 
-   if (cmd_buffer->bt_next + state.alloc_size > block_pool->block_size)
+   if (cmd_buffer->bt_next + state.alloc_size > state_pool->block_size)
       return (struct anv_state) { 0 };
 
    state.offset = cmd_buffer->bt_next;
-   state.map = block_pool->map + *bt_block + state.offset;
+   state.map = anv_block_pool_map(&anv_binding_table_pool(device)->block_pool,
+                                  bt_block->offset + state.offset);
 
    cmd_buffer->bt_next += state.alloc_size;
 
-   assert(*bt_block < 0);
-   *state_offset = -(*bt_block);
+   if (device->instance->physicalDevice.use_softpin) {
+      assert(bt_block->offset >= 0);
+      *state_offset = device->surface_state_pool.block_pool.start_address -
+         device->binding_table_pool.block_pool.start_address - bt_block->offset;
+   } else {
+      assert(bt_block->offset < 0);
+      *state_offset = -bt_block->offset;
+   }
 
    return state;
 }
@@ -549,7 +698,9 @@ anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
 struct anv_state
 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer)
 {
-   return anv_state_stream_alloc(&cmd_buffer->surface_state_stream, 64, 64);
+   struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
+   return anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
+                                 isl_dev->ss.size, isl_dev->ss.align);
 }
 
 struct anv_state
@@ -563,14 +714,13 @@ anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
 VkResult
 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer)
 {
-   struct anv_block_pool *block_pool =
-       &cmd_buffer->device->surface_state_block_pool;
-
-   int32_t *offset = anv_vector_add(&cmd_buffer->bt_blocks);
-   if (offset == NULL)
+   struct anv_state *bt_block = u_vector_add(&cmd_buffer->bt_block_states);
+   if (bt_block == NULL) {
+      anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+   }
 
-   *offset = anv_block_pool_alloc_back(block_pool);
+   *bt_block = anv_binding_table_pool_alloc(cmd_buffer->device);
    cmd_buffer->bt_next = 0;
 
    return VK_SUCCESS;
@@ -602,16 +752,18 @@ anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
    anv_batch_bo_start(batch_bo, &cmd_buffer->batch,
                       GEN8_MI_BATCH_BUFFER_START_length * 4);
 
-   int success = anv_vector_init(&cmd_buffer->seen_bbos,
+   int success = u_vector_init(&cmd_buffer->seen_bbos,
                                  sizeof(struct anv_bo *),
                                  8 * sizeof(struct anv_bo *));
    if (!success)
       goto fail_batch_bo;
 
-   *(struct anv_batch_bo **)anv_vector_add(&cmd_buffer->seen_bbos) = batch_bo;
+   *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = batch_bo;
 
-   success = anv_vector_init(&cmd_buffer->bt_blocks, sizeof(int32_t),
-                             8 * sizeof(int32_t));
+   /* u_vector requires power-of-two size elements */
+   unsigned pow2_state_size = util_next_power_of_two(sizeof(struct anv_state));
+   success = u_vector_init(&cmd_buffer->bt_block_states,
+                           pow2_state_size, 8 * pow2_state_size);
    if (!success)
       goto fail_seen_bbos;
 
@@ -619,19 +771,18 @@ anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
                                 &cmd_buffer->pool->alloc);
    if (result != VK_SUCCESS)
       goto fail_bt_blocks;
+   cmd_buffer->last_ss_pool_center = 0;
 
-   anv_cmd_buffer_new_binding_table_block(cmd_buffer);
-
-   cmd_buffer->execbuf2.objects = NULL;
-   cmd_buffer->execbuf2.bos = NULL;
-   cmd_buffer->execbuf2.array_length = 0;
+   result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
+   if (result != VK_SUCCESS)
+      goto fail_bt_blocks;
 
    return VK_SUCCESS;
 
  fail_bt_blocks:
-   anv_vector_finish(&cmd_buffer->bt_blocks);
+   u_vector_finish(&cmd_buffer->bt_block_states);
  fail_seen_bbos:
-   anv_vector_finish(&cmd_buffer->seen_bbos);
+   u_vector_finish(&cmd_buffer->seen_bbos);
  fail_batch_bo:
    anv_batch_bo_destroy(batch_bo, cmd_buffer);
 
@@ -641,25 +792,20 @@ anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
 void
 anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
 {
-   int32_t *bt_block;
-   anv_vector_foreach(bt_block, &cmd_buffer->bt_blocks) {
-      anv_block_pool_free(&cmd_buffer->device->surface_state_block_pool,
-                          *bt_block);
-   }
-   anv_vector_finish(&cmd_buffer->bt_blocks);
+   struct anv_state *bt_block;
+   u_vector_foreach(bt_block, &cmd_buffer->bt_block_states)
+      anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
+   u_vector_finish(&cmd_buffer->bt_block_states);
 
    anv_reloc_list_finish(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc);
 
-   anv_vector_finish(&cmd_buffer->seen_bbos);
+   u_vector_finish(&cmd_buffer->seen_bbos);
 
    /* Destroy all of the batch buffers */
    list_for_each_entry_safe(struct anv_batch_bo, bbo,
                             &cmd_buffer->batch_bos, link) {
       anv_batch_bo_destroy(bbo, cmd_buffer);
    }
-
-   anv_free(&cmd_buffer->pool->alloc, cmd_buffer->execbuf2.objects);
-   anv_free(&cmd_buffer->pool->alloc, cmd_buffer->execbuf2.bos);
 }
 
 void
@@ -678,21 +824,22 @@ anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
                       &cmd_buffer->batch,
                       GEN8_MI_BATCH_BUFFER_START_length * 4);
 
-   while (anv_vector_length(&cmd_buffer->bt_blocks) > 1) {
-      int32_t *bt_block = anv_vector_remove(&cmd_buffer->bt_blocks);
-      anv_block_pool_free(&cmd_buffer->device->surface_state_block_pool,
-                          *bt_block);
+   while (u_vector_length(&cmd_buffer->bt_block_states) > 1) {
+      struct anv_state *bt_block = u_vector_remove(&cmd_buffer->bt_block_states);
+      anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
    }
-   assert(anv_vector_length(&cmd_buffer->bt_blocks) == 1);
+   assert(u_vector_length(&cmd_buffer->bt_block_states) == 1);
    cmd_buffer->bt_next = 0;
 
    cmd_buffer->surface_relocs.num_relocs = 0;
+   _mesa_set_clear(cmd_buffer->surface_relocs.deps, NULL);
+   cmd_buffer->last_ss_pool_center = 0;
 
    /* Reset the list of seen buffers */
    cmd_buffer->seen_bbos.head = 0;
    cmd_buffer->seen_bbos.tail = 0;
 
-   *(struct anv_batch_bo **)anv_vector_add(&cmd_buffer->seen_bbos) =
+   *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) =
       anv_cmd_buffer_current_batch_bo(cmd_buffer);
 }
 
@@ -711,27 +858,25 @@ anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
       cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4;
       assert(cmd_buffer->batch.end == batch_bo->bo.map + batch_bo->bo.size);
 
-      anv_batch_emit(&cmd_buffer->batch, GEN7_MI_BATCH_BUFFER_END);
+      anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_END, bbe);
 
       /* Round batch up to an even number of dwords. */
       if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
-         anv_batch_emit(&cmd_buffer->batch, GEN7_MI_NOOP);
+         anv_batch_emit(&cmd_buffer->batch, GEN8_MI_NOOP, noop);
 
       cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY;
-   }
-
-   anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
-
-   if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
+   } else {
+      assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
       /* If this is a secondary command buffer, we need to determine the
        * mode in which it will be executed with vkExecuteCommands.  We
        * determine this statically here so that this stays in sync with the
        * actual ExecuteCommands implementation.
        */
+      const uint32_t length = cmd_buffer->batch.next - cmd_buffer->batch.start;
       if (!cmd_buffer->device->can_chain_batches) {
          cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT;
       } else if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
-          (batch_bo->length < ANV_CMD_BUFFER_BATCH_SIZE / 2)) {
+                 (length < ANV_CMD_BUFFER_BATCH_SIZE / 2)) {
          /* If the secondary has exactly one batch buffer in its list *and*
           * that batch buffer is less than half of the maximum size, we're
           * probably better of simply copying it into our batch.
@@ -741,25 +886,36 @@ anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
                    VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
          cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CHAIN;
 
-         /* When we chain, we need to add an MI_BATCH_BUFFER_START command
-          * with its relocation.  In order to handle this we'll increment here
-          * so we can unconditionally decrement right before adding the
-          * MI_BATCH_BUFFER_START command.
+         /* In order to chain, we need this command buffer to contain an
+          * MI_BATCH_BUFFER_START which will jump back to the calling batch.
+          * It doesn't matter where it points now so long as has a valid
+          * relocation.  We'll adjust it later as part of the chaining
+          * process.
+          *
+          * We set the end of the batch a little short so we would be sure we
+          * have room for the chaining command.  Since we're about to emit the
+          * chaining command, let's set it back where it should go.
           */
-         batch_bo->relocs.num_relocs++;
-         cmd_buffer->batch.next += GEN8_MI_BATCH_BUFFER_START_length * 4;
+         cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4;
+         assert(cmd_buffer->batch.start == batch_bo->bo.map);
+         assert(cmd_buffer->batch.end == batch_bo->bo.map + batch_bo->bo.size);
+
+         emit_batch_buffer_start(cmd_buffer, &batch_bo->bo, 0);
+         assert(cmd_buffer->batch.start == batch_bo->bo.map);
       } else {
          cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN;
       }
    }
+
+   anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
 }
 
-static inline VkResult
+static VkResult
 anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer,
                              struct list_head *list)
 {
    list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
-      struct anv_batch_bo **bbo_ptr = anv_vector_add(&cmd_buffer->seen_bbos);
+      struct anv_batch_bo **bbo_ptr = u_vector_add(&cmd_buffer->seen_bbos);
       if (bbo_ptr == NULL)
          return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
 
@@ -776,7 +932,6 @@ anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
    switch (secondary->exec_mode) {
    case ANV_CMD_BUFFER_EXEC_MODE_EMIT:
       anv_batch_emit_batch(&primary->batch, &secondary->batch);
-      anv_cmd_buffer_emit_state_base_address(primary);
       break;
    case ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT: {
       struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(primary);
@@ -784,7 +939,6 @@ anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
       anv_batch_bo_grow(primary, bbo, &primary->batch, length,
                         GEN8_MI_BATCH_BUFFER_START_length * 4);
       anv_batch_emit_batch(&primary->batch, &secondary->batch);
-      anv_cmd_buffer_emit_state_base_address(primary);
       break;
    }
    case ANV_CMD_BUFFER_EXEC_MODE_CHAIN: {
@@ -798,35 +952,13 @@ anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
       struct anv_batch_bo *this_bbo = anv_cmd_buffer_current_batch_bo(primary);
       assert(primary->batch.start == this_bbo->bo.map);
       uint32_t offset = primary->batch.next - primary->batch.start;
-      const uint32_t inst_size = GEN8_MI_BATCH_BUFFER_START_length * 4;
 
-      /* Roll back the previous MI_BATCH_BUFFER_START and its relocation so we
-       * can emit a new command and relocation for the current splice.  In
-       * order to handle the initial-use case, we incremented next and
-       * num_relocs in end_batch_buffer() so we can alyways just subtract
-       * here.
+      /* Make the tail of the secondary point back to right after the
+       * MI_BATCH_BUFFER_START in the primary batch.
        */
-      last_bbo->relocs.num_relocs--;
-      secondary->batch.next -= inst_size;
-      emit_batch_buffer_start(secondary, &this_bbo->bo, offset);
-      anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
+      anv_batch_bo_link(primary, last_bbo, this_bbo, offset);
 
-      /* After patching up the secondary buffer, we need to clflush the
-       * modified instruction in case we're on a !llc platform. We use a
-       * little loop to handle the case where the instruction crosses a cache
-       * line boundary.
-       */
-      if (!primary->device->info.has_llc) {
-         void *inst = secondary->batch.next - inst_size;
-         void *p = (void *) (((uintptr_t) inst) & ~CACHELINE_MASK);
-         __builtin_ia32_mfence();
-         while (p < secondary->batch.next) {
-            __builtin_ia32_clflush(p);
-            p += CACHELINE_SIZE;
-         }
-      }
-
-      anv_cmd_buffer_emit_state_base_address(primary);
+      anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
       break;
    }
    case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN: {
@@ -850,8 +982,6 @@ anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
 
       anv_batch_bo_continue(last_bbo, &primary->batch,
                             GEN8_MI_BATCH_BUFFER_START_length * 4);
-
-      anv_cmd_buffer_emit_state_base_address(primary);
       break;
    }
    default:
@@ -862,165 +992,249 @@ anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
                          &secondary->surface_relocs, 0);
 }
 
+struct anv_execbuf {
+   struct drm_i915_gem_execbuffer2           execbuf;
+
+   struct drm_i915_gem_exec_object2 *        objects;
+   uint32_t                                  bo_count;
+   struct anv_bo **                          bos;
+
+   /* Allocated length of the 'objects' and 'bos' arrays */
+   uint32_t                                  array_length;
+
+   bool                                      has_relocs;
+
+   uint32_t                                  fence_count;
+   uint32_t                                  fence_array_length;
+   struct drm_i915_gem_exec_fence *          fences;
+   struct anv_syncobj **                     syncobjs;
+};
+
+static void
+anv_execbuf_init(struct anv_execbuf *exec)
+{
+   memset(exec, 0, sizeof(*exec));
+}
+
+static void
+anv_execbuf_finish(struct anv_execbuf *exec,
+                   const VkAllocationCallbacks *alloc)
+{
+   vk_free(alloc, exec->objects);
+   vk_free(alloc, exec->bos);
+   vk_free(alloc, exec->fences);
+   vk_free(alloc, exec->syncobjs);
+}
+
+static int
+_compare_bo_handles(const void *_bo1, const void *_bo2)
+{
+   struct anv_bo * const *bo1 = _bo1;
+   struct anv_bo * const *bo2 = _bo2;
+
+   return (*bo1)->gem_handle - (*bo2)->gem_handle;
+}
+
+static VkResult
+anv_execbuf_add_bo_set(struct anv_execbuf *exec,
+                       struct set *deps,
+                       uint32_t extra_flags,
+                       const VkAllocationCallbacks *alloc);
+
 static VkResult
-anv_cmd_buffer_add_bo(struct anv_cmd_buffer *cmd_buffer,
-                      struct anv_bo *bo,
-                      struct anv_reloc_list *relocs)
+anv_execbuf_add_bo(struct anv_execbuf *exec,
+                   struct anv_bo *bo,
+                   struct anv_reloc_list *relocs,
+                   uint32_t extra_flags,
+                   const VkAllocationCallbacks *alloc)
 {
    struct drm_i915_gem_exec_object2 *obj = NULL;
 
-   if (bo->index < cmd_buffer->execbuf2.bo_count &&
-       cmd_buffer->execbuf2.bos[bo->index] == bo)
-      obj = &cmd_buffer->execbuf2.objects[bo->index];
+   if (bo->index < exec->bo_count && exec->bos[bo->index] == bo)
+      obj = &exec->objects[bo->index];
 
    if (obj == NULL) {
       /* We've never seen this one before.  Add it to the list and assign
        * an id that we can use later.
        */
-      if (cmd_buffer->execbuf2.bo_count >= cmd_buffer->execbuf2.array_length) {
-         uint32_t new_len = cmd_buffer->execbuf2.objects ?
-                            cmd_buffer->execbuf2.array_length * 2 : 64;
+      if (exec->bo_count >= exec->array_length) {
+         uint32_t new_len = exec->objects ? exec->array_length * 2 : 64;
 
          struct drm_i915_gem_exec_object2 *new_objects =
-            anv_alloc(&cmd_buffer->pool->alloc, new_len * sizeof(*new_objects),
-                      8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+            vk_alloc(alloc, new_len * sizeof(*new_objects),
+                     8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
          if (new_objects == NULL)
             return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
 
          struct anv_bo **new_bos =
-            anv_alloc(&cmd_buffer->pool->alloc, new_len * sizeof(*new_bos),
-                      8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
-         if (new_objects == NULL) {
-            anv_free(&cmd_buffer->pool->alloc, new_objects);
+            vk_alloc(alloc, new_len * sizeof(*new_bos),
+                      8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
+         if (new_bos == NULL) {
+            vk_free(alloc, new_objects);
             return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
          }
 
-         if (cmd_buffer->execbuf2.objects) {
-            memcpy(new_objects, cmd_buffer->execbuf2.objects,
-                   cmd_buffer->execbuf2.bo_count * sizeof(*new_objects));
-            memcpy(new_bos, cmd_buffer->execbuf2.bos,
-                   cmd_buffer->execbuf2.bo_count * sizeof(*new_bos));
+         if (exec->objects) {
+            memcpy(new_objects, exec->objects,
+                   exec->bo_count * sizeof(*new_objects));
+            memcpy(new_bos, exec->bos,
+                   exec->bo_count * sizeof(*new_bos));
          }
 
-         cmd_buffer->execbuf2.objects = new_objects;
-         cmd_buffer->execbuf2.bos = new_bos;
-         cmd_buffer->execbuf2.array_length = new_len;
+         vk_free(alloc, exec->objects);
+         vk_free(alloc, exec->bos);
+
+         exec->objects = new_objects;
+         exec->bos = new_bos;
+         exec->array_length = new_len;
       }
 
-      assert(cmd_buffer->execbuf2.bo_count < cmd_buffer->execbuf2.array_length);
+      assert(exec->bo_count < exec->array_length);
 
-      bo->index = cmd_buffer->execbuf2.bo_count++;
-      obj = &cmd_buffer->execbuf2.objects[bo->index];
-      cmd_buffer->execbuf2.bos[bo->index] = bo;
+      bo->index = exec->bo_count++;
+      obj = &exec->objects[bo->index];
+      exec->bos[bo->index] = bo;
 
       obj->handle = bo->gem_handle;
       obj->relocation_count = 0;
       obj->relocs_ptr = 0;
       obj->alignment = 0;
       obj->offset = bo->offset;
-      obj->flags = bo->is_winsys_bo ? EXEC_OBJECT_WRITE : 0;
+      obj->flags = (bo->flags & ~ANV_BO_FLAG_MASK) | extra_flags;
       obj->rsvd1 = 0;
       obj->rsvd2 = 0;
    }
 
-   if (relocs != NULL && obj->relocation_count == 0) {
-      /* This is the first time we've ever seen a list of relocations for
-       * this BO.  Go ahead and set the relocations and then walk the list
-       * of relocations and add them all.
-       */
-      obj->relocation_count = relocs->num_relocs;
-      obj->relocs_ptr = (uintptr_t) relocs->relocs;
+   if (relocs != NULL) {
+      assert(obj->relocation_count == 0);
+
+      if (relocs->num_relocs > 0) {
+         /* This is the first time we've ever seen a list of relocations for
+          * this BO.  Go ahead and set the relocations and then walk the list
+          * of relocations and add them all.
+          */
+         exec->has_relocs = true;
+         obj->relocation_count = relocs->num_relocs;
+         obj->relocs_ptr = (uintptr_t) relocs->relocs;
+
+         for (size_t i = 0; i < relocs->num_relocs; i++) {
+            VkResult result;
 
-      for (size_t i = 0; i < relocs->num_relocs; i++) {
-         /* A quick sanity check on relocations */
-         assert(relocs->relocs[i].offset < bo->size);
-         anv_cmd_buffer_add_bo(cmd_buffer, relocs->reloc_bos[i], NULL);
+            /* A quick sanity check on relocations */
+            assert(relocs->relocs[i].offset < bo->size);
+            result = anv_execbuf_add_bo(exec, relocs->reloc_bos[i], NULL,
+                                        extra_flags, alloc);
+
+            if (result != VK_SUCCESS)
+               return result;
+         }
       }
+
+      return anv_execbuf_add_bo_set(exec, relocs->deps, extra_flags, alloc);
    }
 
    return VK_SUCCESS;
 }
 
-static void
-anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
-                              struct anv_reloc_list *list)
+/* Add BO dependencies to execbuf */
+static VkResult
+anv_execbuf_add_bo_set(struct anv_execbuf *exec,
+                       struct set *deps,
+                       uint32_t extra_flags,
+                       const VkAllocationCallbacks *alloc)
 {
-   struct anv_bo *bo;
+   if (!deps || deps->entries <= 0)
+      return VK_SUCCESS;
 
-   /* If the kernel supports I915_EXEC_NO_RELOC, it will compare offset in
-    * struct drm_i915_gem_exec_object2 against the bos current offset and if
-    * all bos haven't moved it will skip relocation processing alltogether.
-    * If I915_EXEC_NO_RELOC is not supported, the kernel ignores the incoming
-    * value of offset so we can set it either way.  For that to work we need
-    * to make sure all relocs use the same presumed offset.
-    */
+   const uint32_t entries = deps->entries;
+   struct anv_bo **bos =
+      vk_alloc(alloc, entries * sizeof(*bos),
+               8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
+   if (bos == NULL)
+      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
 
-   for (size_t i = 0; i < list->num_relocs; i++) {
-      bo = list->reloc_bos[i];
-      if (bo->offset != list->relocs[i].presumed_offset)
-         cmd_buffer->execbuf2.need_reloc = true;
+   struct anv_bo **bo = bos;
+   set_foreach(deps, entry) {
+      *bo++ = (void *)entry->key;
+   }
+
+   qsort(bos, entries, sizeof(struct anv_bo*), _compare_bo_handles);
 
-      list->relocs[i].target_handle = bo->index;
+   VkResult result = VK_SUCCESS;
+   for (bo = bos; bo < bos + entries; bo++) {
+      result = anv_execbuf_add_bo(exec, *bo, NULL, extra_flags, alloc);
+      if (result != VK_SUCCESS)
+         break;
    }
+
+   vk_free(alloc, bos);
+
+   return result;
 }
 
-static uint64_t
-read_reloc(const struct anv_device *device, const void *p)
+static VkResult
+anv_execbuf_add_syncobj(struct anv_execbuf *exec,
+                        uint32_t handle, uint32_t flags,
+                        const VkAllocationCallbacks *alloc)
 {
-   if (device->info.gen >= 8)
-      return *(uint64_t *)p;
-   else
-      return *(uint32_t *)p;
+   assert(flags != 0);
+
+   if (exec->fence_count >= exec->fence_array_length) {
+      uint32_t new_len = MAX2(exec->fence_array_length * 2, 64);
+
+      exec->fences = vk_realloc(alloc, exec->fences,
+                                new_len * sizeof(*exec->fences),
+                                8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
+      if (exec->fences == NULL)
+         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+      exec->fence_array_length = new_len;
+   }
+
+   exec->fences[exec->fence_count] = (struct drm_i915_gem_exec_fence) {
+      .handle = handle,
+      .flags = flags,
+   };
+
+   exec->fence_count++;
+
+   return VK_SUCCESS;
 }
 
 static void
-write_reloc(const struct anv_device *device, void *p, uint64_t v)
+anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
+                              struct anv_reloc_list *list)
 {
-   if (device->info.gen >= 8)
-      *(uint64_t *)p = v;
-   else
-      *(uint32_t *)p = v;
+   for (size_t i = 0; i < list->num_relocs; i++)
+      list->relocs[i].target_handle = list->reloc_bos[i]->index;
 }
 
 static void
-adjust_relocations_from_block_pool(struct anv_block_pool *pool,
-                                   struct anv_reloc_list *relocs)
+adjust_relocations_from_state_pool(struct anv_state_pool *pool,
+                                   struct anv_reloc_list *relocs,
+                                   uint32_t last_pool_center_bo_offset)
 {
-   for (size_t i = 0; i < relocs->num_relocs; i++) {
-      /* In general, we don't know how stale the relocated value is.  It
-       * may have been used last time or it may not.  Since we don't want
-       * to stomp it while the GPU may be accessing it, we haven't updated
-       * it anywhere else in the code.  Instead, we just set the presumed
-       * offset to what it is now based on the delta and the data in the
-       * block pool.  Then the kernel will update it for us if needed.
-       */
-      assert(relocs->relocs[i].offset < pool->state.end);
-      const void *p = pool->map + relocs->relocs[i].offset;
-
-      /* We're reading back the relocated value from potentially incoherent
-       * memory here. However, any change to the value will be from the kernel
-       * writing out relocations, which will keep the CPU cache up to date.
-       */
-      relocs->relocs[i].presumed_offset =
-         read_reloc(pool->device, p) - relocs->relocs[i].delta;
+   assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
+   uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
 
+   for (size_t i = 0; i < relocs->num_relocs; i++) {
       /* All of the relocations from this block pool to other BO's should
        * have been emitted relative to the surface block pool center.  We
        * need to add the center offset to make them relative to the
        * beginning of the actual GEM bo.
        */
-      relocs->relocs[i].offset += pool->center_bo_offset;
+      relocs->relocs[i].offset += delta;
    }
 }
 
 static void
-adjust_relocations_to_block_pool(struct anv_block_pool *pool,
+adjust_relocations_to_state_pool(struct anv_state_pool *pool,
                                  struct anv_bo *from_bo,
                                  struct anv_reloc_list *relocs,
-                                 uint32_t *last_pool_center_bo_offset)
+                                 uint32_t last_pool_center_bo_offset)
 {
-   assert(*last_pool_center_bo_offset <= pool->center_bo_offset);
-   uint32_t delta = pool->center_bo_offset - *last_pool_center_bo_offset;
+   assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
+   uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
 
    /* When we initially emit relocations into a block pool, we don't
     * actually know what the final center_bo_offset will be so we just emit
@@ -1029,7 +1243,7 @@ adjust_relocations_to_block_pool(struct anv_block_pool *pool,
     * relocations that point to the pool bo with the correct offset.
     */
    for (size_t i = 0; i < relocs->num_relocs; i++) {
-      if (relocs->reloc_bos[i] == &pool->bo) {
+      if (relocs->reloc_bos[i] == pool->block_pool.bo) {
          /* Adjust the delta value in the relocation to correctly
           * correspond to the new delta.  Initially, this value may have
           * been negative (if treated as unsigned), but we trust in
@@ -1043,39 +1257,208 @@ adjust_relocations_to_block_pool(struct anv_block_pool *pool,
           * use by the GPU at the moment.
           */
          assert(relocs->relocs[i].offset < from_bo->size);
-         write_reloc(pool->device, from_bo->map + relocs->relocs[i].offset,
+         write_reloc(pool->block_pool.device,
+                     from_bo->map + relocs->relocs[i].offset,
                      relocs->relocs[i].presumed_offset +
-                     relocs->relocs[i].delta);
+                     relocs->relocs[i].delta, false);
       }
    }
+}
 
-   *last_pool_center_bo_offset = pool->center_bo_offset;
+static void
+anv_reloc_list_apply(struct anv_device *device,
+                     struct anv_reloc_list *list,
+                     struct anv_bo *bo,
+                     bool always_relocate)
+{
+   for (size_t i = 0; i < list->num_relocs; i++) {
+      struct anv_bo *target_bo = list->reloc_bos[i];
+      if (list->relocs[i].presumed_offset == target_bo->offset &&
+          !always_relocate)
+         continue;
+
+      void *p = bo->map + list->relocs[i].offset;
+      write_reloc(device, p, target_bo->offset + list->relocs[i].delta, true);
+      list->relocs[i].presumed_offset = target_bo->offset;
+   }
 }
 
-void
-anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer)
+/**
+ * This function applies the relocation for a command buffer and writes the
+ * actual addresses into the buffers as per what we were told by the kernel on
+ * the previous execbuf2 call.  This should be safe to do because, for each
+ * relocated address, we have two cases:
+ *
+ *  1) The target BO is inactive (as seen by the kernel).  In this case, it is
+ *     not in use by the GPU so updating the address is 100% ok.  It won't be
+ *     in-use by the GPU (from our context) again until the next execbuf2
+ *     happens.  If the kernel decides to move it in the next execbuf2, it
+ *     will have to do the relocations itself, but that's ok because it should
+ *     have all of the information needed to do so.
+ *
+ *  2) The target BO is active (as seen by the kernel).  In this case, it
+ *     hasn't moved since the last execbuffer2 call because GTT shuffling
+ *     *only* happens when the BO is idle. (From our perspective, it only
+ *     happens inside the execbuffer2 ioctl, but the shuffling may be
+ *     triggered by another ioctl, with full-ppgtt this is limited to only
+ *     execbuffer2 ioctls on the same context, or memory pressure.)  Since the
+ *     target BO hasn't moved, our anv_bo::offset exactly matches the BO's GTT
+ *     address and the relocated value we are writing into the BO will be the
+ *     same as the value that is already there.
+ *
+ *     There is also a possibility that the target BO is active but the exact
+ *     RENDER_SURFACE_STATE object we are writing the relocation into isn't in
+ *     use.  In this case, the address currently in the RENDER_SURFACE_STATE
+ *     may be stale but it's still safe to write the relocation because that
+ *     particular RENDER_SURFACE_STATE object isn't in-use by the GPU and
+ *     won't be until the next execbuf2 call.
+ *
+ * By doing relocations on the CPU, we can tell the kernel that it doesn't
+ * need to bother.  We want to do this because the surface state buffer is
+ * used by every command buffer so, if the kernel does the relocations, it
+ * will always be busy and the kernel will always stall.  This is also
+ * probably the fastest mechanism for doing relocations since the kernel would
+ * have to make a full copy of all the relocations lists.
+ */
+static bool
+relocate_cmd_buffer(struct anv_cmd_buffer *cmd_buffer,
+                    struct anv_execbuf *exec)
+{
+   if (!exec->has_relocs)
+      return true;
+
+   static int userspace_relocs = -1;
+   if (userspace_relocs < 0)
+      userspace_relocs = env_var_as_boolean("ANV_USERSPACE_RELOCS", true);
+   if (!userspace_relocs)
+      return false;
+
+   /* First, we have to check to see whether or not we can even do the
+    * relocation.  New buffers which have never been submitted to the kernel
+    * don't have a valid offset so we need to let the kernel do relocations so
+    * that we can get offsets for them.  On future execbuf2 calls, those
+    * buffers will have offsets and we will be able to skip relocating.
+    * Invalid offsets are indicated by anv_bo::offset == (uint64_t)-1.
+    */
+   for (uint32_t i = 0; i < exec->bo_count; i++) {
+      if (exec->bos[i]->offset == (uint64_t)-1)
+         return false;
+   }
+
+   /* Since surface states are shared between command buffers and we don't
+    * know what order they will be submitted to the kernel, we don't know
+    * what address is actually written in the surface state object at any
+    * given time.  The only option is to always relocate them.
+    */
+   anv_reloc_list_apply(cmd_buffer->device, &cmd_buffer->surface_relocs,
+                        cmd_buffer->device->surface_state_pool.block_pool.bo,
+                        true /* always relocate surface states */);
+
+   /* Since we own all of the batch buffers, we know what values are stored
+    * in the relocated addresses and only have to update them if the offsets
+    * have changed.
+    */
+   struct anv_batch_bo **bbo;
+   u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
+      anv_reloc_list_apply(cmd_buffer->device,
+                           &(*bbo)->relocs, &(*bbo)->bo, false);
+   }
+
+   for (uint32_t i = 0; i < exec->bo_count; i++)
+      exec->objects[i].offset = exec->bos[i]->offset;
+
+   return true;
+}
+
+static VkResult
+setup_execbuf_for_cmd_buffer(struct anv_execbuf *execbuf,
+                             struct anv_cmd_buffer *cmd_buffer)
 {
    struct anv_batch *batch = &cmd_buffer->batch;
-   struct anv_block_pool *ss_pool =
-      &cmd_buffer->device->surface_state_block_pool;
+   struct anv_state_pool *ss_pool =
+      &cmd_buffer->device->surface_state_pool;
 
-   cmd_buffer->execbuf2.bo_count = 0;
-   cmd_buffer->execbuf2.need_reloc = false;
+   adjust_relocations_from_state_pool(ss_pool, &cmd_buffer->surface_relocs,
+                                      cmd_buffer->last_ss_pool_center);
+   VkResult result;
+   struct anv_bo *bo;
+   if (cmd_buffer->device->instance->physicalDevice.use_softpin) {
+      anv_block_pool_foreach_bo(bo, &ss_pool->block_pool) {
+         result = anv_execbuf_add_bo(execbuf, bo, NULL, 0,
+                                     &cmd_buffer->device->alloc);
+         if (result != VK_SUCCESS)
+            return result;
+      }
+      /* Add surface dependencies (BOs) to the execbuf */
+      anv_execbuf_add_bo_set(execbuf, cmd_buffer->surface_relocs.deps, 0,
+                             &cmd_buffer->device->alloc);
+
+      /* Add the BOs for all memory objects */
+      list_for_each_entry(struct anv_device_memory, mem,
+                          &cmd_buffer->device->memory_objects, link) {
+         result = anv_execbuf_add_bo(execbuf, mem->bo, NULL, 0,
+                                     &cmd_buffer->device->alloc);
+         if (result != VK_SUCCESS)
+            return result;
+      }
 
-   adjust_relocations_from_block_pool(ss_pool, &cmd_buffer->surface_relocs);
-   anv_cmd_buffer_add_bo(cmd_buffer, &ss_pool->bo, &cmd_buffer->surface_relocs);
+      struct anv_block_pool *pool;
+      pool = &cmd_buffer->device->dynamic_state_pool.block_pool;
+      anv_block_pool_foreach_bo(bo, pool) {
+         result = anv_execbuf_add_bo(execbuf, bo, NULL, 0,
+                                     &cmd_buffer->device->alloc);
+         if (result != VK_SUCCESS)
+            return result;
+      }
+
+      pool = &cmd_buffer->device->instruction_state_pool.block_pool;
+      anv_block_pool_foreach_bo(bo, pool) {
+         result = anv_execbuf_add_bo(execbuf, bo, NULL, 0,
+                                     &cmd_buffer->device->alloc);
+         if (result != VK_SUCCESS)
+            return result;
+      }
+
+      pool = &cmd_buffer->device->binding_table_pool.block_pool;
+      anv_block_pool_foreach_bo(bo, pool) {
+         result = anv_execbuf_add_bo(execbuf, bo, NULL, 0,
+                                     &cmd_buffer->device->alloc);
+         if (result != VK_SUCCESS)
+            return result;
+      }
+   } else {
+      /* Since we aren't in the softpin case, all of our STATE_BASE_ADDRESS BOs
+       * will get added automatically by processing relocations on the batch
+       * buffer.  We have to add the surface state BO manually because it has
+       * relocations of its own that we need to be sure are processsed.
+       */
+      result = anv_execbuf_add_bo(execbuf, ss_pool->block_pool.bo,
+                                  &cmd_buffer->surface_relocs, 0,
+                                  &cmd_buffer->device->alloc);
+      if (result != VK_SUCCESS)
+         return result;
+   }
 
    /* First, we walk over all of the bos we've seen and add them and their
     * relocations to the validate list.
     */
    struct anv_batch_bo **bbo;
-   anv_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
-      adjust_relocations_to_block_pool(ss_pool, &(*bbo)->bo, &(*bbo)->relocs,
-                                       &(*bbo)->last_ss_pool_bo_offset);
+   u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
+      adjust_relocations_to_state_pool(ss_pool, &(*bbo)->bo, &(*bbo)->relocs,
+                                       cmd_buffer->last_ss_pool_center);
 
-      anv_cmd_buffer_add_bo(cmd_buffer, &(*bbo)->bo, &(*bbo)->relocs);
+      result = anv_execbuf_add_bo(execbuf, &(*bbo)->bo, &(*bbo)->relocs, 0,
+                                  &cmd_buffer->device->alloc);
+      if (result != VK_SUCCESS)
+         return result;
    }
 
+   /* Now that we've adjusted all of the surface state relocations, we need to
+    * record the surface state pool center so future executions of the command
+    * buffer can adjust correctly.
+    */
+   cmd_buffer->last_ss_pool_center = ss_pool->block_pool.center_bo_offset;
+
    struct anv_batch_bo *first_batch_bo =
       list_first_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);
 
@@ -1084,55 +1467,360 @@ anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer)
     * corresponding to the first batch_bo in the chain with the last
     * element in the list.
     */
-   if (first_batch_bo->bo.index != cmd_buffer->execbuf2.bo_count - 1) {
+   if (first_batch_bo->bo.index != execbuf->bo_count - 1) {
       uint32_t idx = first_batch_bo->bo.index;
-      uint32_t last_idx = cmd_buffer->execbuf2.bo_count - 1;
+      uint32_t last_idx = execbuf->bo_count - 1;
 
-      struct drm_i915_gem_exec_object2 tmp_obj =
-         cmd_buffer->execbuf2.objects[idx];
-      assert(cmd_buffer->execbuf2.bos[idx] == &first_batch_bo->bo);
+      struct drm_i915_gem_exec_object2 tmp_obj = execbuf->objects[idx];
+      assert(execbuf->bos[idx] == &first_batch_bo->bo);
 
-      cmd_buffer->execbuf2.objects[idx] = cmd_buffer->execbuf2.objects[last_idx];
-      cmd_buffer->execbuf2.bos[idx] = cmd_buffer->execbuf2.bos[last_idx];
-      cmd_buffer->execbuf2.bos[idx]->index = idx;
+      execbuf->objects[idx] = execbuf->objects[last_idx];
+      execbuf->bos[idx] = execbuf->bos[last_idx];
+      execbuf->bos[idx]->index = idx;
 
-      cmd_buffer->execbuf2.objects[last_idx] = tmp_obj;
-      cmd_buffer->execbuf2.bos[last_idx] = &first_batch_bo->bo;
+      execbuf->objects[last_idx] = tmp_obj;
+      execbuf->bos[last_idx] = &first_batch_bo->bo;
       first_batch_bo->bo.index = last_idx;
    }
 
+   /* If we are pinning our BOs, we shouldn't have to relocate anything */
+   if (cmd_buffer->device->instance->physicalDevice.use_softpin)
+      assert(!execbuf->has_relocs);
+
    /* Now we go through and fixup all of the relocation lists to point to
     * the correct indices in the object array.  We have to do this after we
     * reorder the list above as some of the indices may have changed.
     */
-   anv_vector_foreach(bbo, &cmd_buffer->seen_bbos)
-      anv_cmd_buffer_process_relocs(cmd_buffer, &(*bbo)->relocs);
+   if (execbuf->has_relocs) {
+      u_vector_foreach(bbo, &cmd_buffer->seen_bbos)
+         anv_cmd_buffer_process_relocs(cmd_buffer, &(*bbo)->relocs);
 
-   anv_cmd_buffer_process_relocs(cmd_buffer, &cmd_buffer->surface_relocs);
+      anv_cmd_buffer_process_relocs(cmd_buffer, &cmd_buffer->surface_relocs);
+   }
 
    if (!cmd_buffer->device->info.has_llc) {
       __builtin_ia32_mfence();
-      anv_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
+      u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
          for (uint32_t i = 0; i < (*bbo)->length; i += CACHELINE_SIZE)
             __builtin_ia32_clflush((*bbo)->bo.map + i);
       }
    }
 
-   cmd_buffer->execbuf2.execbuf = (struct drm_i915_gem_execbuffer2) {
-      .buffers_ptr = (uintptr_t) cmd_buffer->execbuf2.objects,
-      .buffer_count = cmd_buffer->execbuf2.bo_count,
+   execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
+      .buffers_ptr = (uintptr_t) execbuf->objects,
+      .buffer_count = execbuf->bo_count,
       .batch_start_offset = 0,
       .batch_len = batch->next - batch->start,
       .cliprects_ptr = 0,
       .num_cliprects = 0,
       .DR1 = 0,
       .DR4 = 0,
-      .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER |
-               I915_EXEC_CONSTANTS_REL_GENERAL,
+      .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
       .rsvd1 = cmd_buffer->device->context_id,
       .rsvd2 = 0,
    };
 
-   if (!cmd_buffer->execbuf2.need_reloc)
-      cmd_buffer->execbuf2.execbuf.flags |= I915_EXEC_NO_RELOC;
+   if (relocate_cmd_buffer(cmd_buffer, execbuf)) {
+      /* If we were able to successfully relocate everything, tell the kernel
+       * that it can skip doing relocations. The requirement for using
+       * NO_RELOC is:
+       *
+       *  1) The addresses written in the objects must match the corresponding
+       *     reloc.presumed_offset which in turn must match the corresponding
+       *     execobject.offset.
+       *
+       *  2) To avoid stalling, execobject.offset should match the current
+       *     address of that object within the active context.
+       *
+       * In order to satisfy all of the invariants that make userspace
+       * relocations to be safe (see relocate_cmd_buffer()), we need to
+       * further ensure that the addresses we use match those used by the
+       * kernel for the most recent execbuf2.
+       *
+       * The kernel may still choose to do relocations anyway if something has
+       * moved in the GTT. In this case, the relocation list still needs to be
+       * valid.  All relocations on the batch buffers are already valid and
+       * kept up-to-date.  For surface state relocations, by applying the
+       * relocations in relocate_cmd_buffer, we ensured that the address in
+       * the RENDER_SURFACE_STATE matches presumed_offset, so it should be
+       * safe for the kernel to relocate them as needed.
+       */
+      execbuf->execbuf.flags |= I915_EXEC_NO_RELOC;
+   } else {
+      /* In the case where we fall back to doing kernel relocations, we need
+       * to ensure that the relocation list is valid.  All relocations on the
+       * batch buffers are already valid and kept up-to-date.  Since surface
+       * states are shared between command buffers and we don't know what
+       * order they will be submitted to the kernel, we don't know what
+       * address is actually written in the surface state object at any given
+       * time.  The only option is to set a bogus presumed offset and let the
+       * kernel relocate them.
+       */
+      for (size_t i = 0; i < cmd_buffer->surface_relocs.num_relocs; i++)
+         cmd_buffer->surface_relocs.relocs[i].presumed_offset = -1;
+   }
+
+   return VK_SUCCESS;
+}
+
+static VkResult
+setup_empty_execbuf(struct anv_execbuf *execbuf, struct anv_device *device)
+{
+   VkResult result = anv_execbuf_add_bo(execbuf, &device->trivial_batch_bo,
+                                        NULL, 0, &device->alloc);
+   if (result != VK_SUCCESS)
+      return result;
+
+   execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
+      .buffers_ptr = (uintptr_t) execbuf->objects,
+      .buffer_count = execbuf->bo_count,
+      .batch_start_offset = 0,
+      .batch_len = 8, /* GEN7_MI_BATCH_BUFFER_END and NOOP */
+      .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
+      .rsvd1 = device->context_id,
+      .rsvd2 = 0,
+   };
+
+   return VK_SUCCESS;
+}
+
+VkResult
+anv_cmd_buffer_execbuf(struct anv_device *device,
+                       struct anv_cmd_buffer *cmd_buffer,
+                       const VkSemaphore *in_semaphores,
+                       uint32_t num_in_semaphores,
+                       const VkSemaphore *out_semaphores,
+                       uint32_t num_out_semaphores,
+                       VkFence _fence)
+{
+   ANV_FROM_HANDLE(anv_fence, fence, _fence);
+   UNUSED struct anv_physical_device *pdevice = &device->instance->physicalDevice;
+
+   struct anv_execbuf execbuf;
+   anv_execbuf_init(&execbuf);
+
+   int in_fence = -1;
+   VkResult result = VK_SUCCESS;
+   for (uint32_t i = 0; i < num_in_semaphores; i++) {
+      ANV_FROM_HANDLE(anv_semaphore, semaphore, in_semaphores[i]);
+      struct anv_semaphore_impl *impl =
+         semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
+         &semaphore->temporary : &semaphore->permanent;
+
+      switch (impl->type) {
+      case ANV_SEMAPHORE_TYPE_BO:
+         assert(!pdevice->has_syncobj);
+         result = anv_execbuf_add_bo(&execbuf, impl->bo, NULL,
+                                     0, &device->alloc);
+         if (result != VK_SUCCESS)
+            return result;
+         break;
+
+      case ANV_SEMAPHORE_TYPE_SYNC_FILE:
+         assert(!pdevice->has_syncobj);
+         if (in_fence == -1) {
+            in_fence = impl->fd;
+         } else {
+            int merge = anv_gem_sync_file_merge(device, in_fence, impl->fd);
+            if (merge == -1)
+               return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
+
+            close(impl->fd);
+            close(in_fence);
+            in_fence = merge;
+         }
+
+         impl->fd = -1;
+         break;
+
+      case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
+         result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
+                                          I915_EXEC_FENCE_WAIT,
+                                          &device->alloc);
+         if (result != VK_SUCCESS)
+            return result;
+         break;
+
+      default:
+         break;
+      }
+   }
+
+   bool need_out_fence = false;
+   for (uint32_t i = 0; i < num_out_semaphores; i++) {
+      ANV_FROM_HANDLE(anv_semaphore, semaphore, out_semaphores[i]);
+
+      /* Under most circumstances, out fences won't be temporary.  However,
+       * the spec does allow it for opaque_fd.  From the Vulkan 1.0.53 spec:
+       *
+       *    "If the import is temporary, the implementation must restore the
+       *    semaphore to its prior permanent state after submitting the next
+       *    semaphore wait operation."
+       *
+       * The spec says nothing whatsoever about signal operations on
+       * temporarily imported semaphores so it appears they are allowed.
+       * There are also CTS tests that require this to work.
+       */
+      struct anv_semaphore_impl *impl =
+         semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
+         &semaphore->temporary : &semaphore->permanent;
+
+      switch (impl->type) {
+      case ANV_SEMAPHORE_TYPE_BO:
+         assert(!pdevice->has_syncobj);
+         result = anv_execbuf_add_bo(&execbuf, impl->bo, NULL,
+                                     EXEC_OBJECT_WRITE, &device->alloc);
+         if (result != VK_SUCCESS)
+            return result;
+         break;
+
+      case ANV_SEMAPHORE_TYPE_SYNC_FILE:
+         assert(!pdevice->has_syncobj);
+         need_out_fence = true;
+         break;
+
+      case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
+         result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
+                                          I915_EXEC_FENCE_SIGNAL,
+                                          &device->alloc);
+         if (result != VK_SUCCESS)
+            return result;
+         break;
+
+      default:
+         break;
+      }
+   }
+
+   if (fence) {
+      /* Under most circumstances, out fences won't be temporary.  However,
+       * the spec does allow it for opaque_fd.  From the Vulkan 1.0.53 spec:
+       *
+       *    "If the import is temporary, the implementation must restore the
+       *    semaphore to its prior permanent state after submitting the next
+       *    semaphore wait operation."
+       *
+       * The spec says nothing whatsoever about signal operations on
+       * temporarily imported semaphores so it appears they are allowed.
+       * There are also CTS tests that require this to work.
+       */
+      struct anv_fence_impl *impl =
+         fence->temporary.type != ANV_FENCE_TYPE_NONE ?
+         &fence->temporary : &fence->permanent;
+
+      switch (impl->type) {
+      case ANV_FENCE_TYPE_BO:
+         assert(!pdevice->has_syncobj_wait);
+         result = anv_execbuf_add_bo(&execbuf, &impl->bo.bo, NULL,
+                                     EXEC_OBJECT_WRITE, &device->alloc);
+         if (result != VK_SUCCESS)
+            return result;
+         break;
+
+      case ANV_FENCE_TYPE_SYNCOBJ:
+         result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
+                                          I915_EXEC_FENCE_SIGNAL,
+                                          &device->alloc);
+         if (result != VK_SUCCESS)
+            return result;
+         break;
+
+      default:
+         unreachable("Invalid fence type");
+      }
+   }
+
+   if (cmd_buffer) {
+      if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
+         struct anv_batch_bo **bo = u_vector_head(&cmd_buffer->seen_bbos);
+
+         device->cmd_buffer_being_decoded = cmd_buffer;
+         gen_print_batch(&device->decoder_ctx, (*bo)->bo.map,
+                         (*bo)->bo.size, (*bo)->bo.offset, false);
+         device->cmd_buffer_being_decoded = NULL;
+      }
+
+      result = setup_execbuf_for_cmd_buffer(&execbuf, cmd_buffer);
+   } else {
+      result = setup_empty_execbuf(&execbuf, device);
+   }
+
+   if (result != VK_SUCCESS)
+      return result;
+
+   if (execbuf.fence_count > 0) {
+      assert(device->instance->physicalDevice.has_syncobj);
+      execbuf.execbuf.flags |= I915_EXEC_FENCE_ARRAY;
+      execbuf.execbuf.num_cliprects = execbuf.fence_count;
+      execbuf.execbuf.cliprects_ptr = (uintptr_t) execbuf.fences;
+   }
+
+   if (in_fence != -1) {
+      execbuf.execbuf.flags |= I915_EXEC_FENCE_IN;
+      execbuf.execbuf.rsvd2 |= (uint32_t)in_fence;
+   }
+
+   if (need_out_fence)
+      execbuf.execbuf.flags |= I915_EXEC_FENCE_OUT;
+
+   result = anv_device_execbuf(device, &execbuf.execbuf, execbuf.bos);
+
+   /* Execbuf does not consume the in_fence.  It's our job to close it. */
+   if (in_fence != -1)
+      close(in_fence);
+
+   for (uint32_t i = 0; i < num_in_semaphores; i++) {
+      ANV_FROM_HANDLE(anv_semaphore, semaphore, in_semaphores[i]);
+      /* From the Vulkan 1.0.53 spec:
+       *
+       *    "If the import is temporary, the implementation must restore the
+       *    semaphore to its prior permanent state after submitting the next
+       *    semaphore wait operation."
+       *
+       * This has to happen after the execbuf in case we close any syncobjs in
+       * the process.
+       */
+      anv_semaphore_reset_temporary(device, semaphore);
+   }
+
+   if (fence && fence->permanent.type == ANV_FENCE_TYPE_BO) {
+      assert(!pdevice->has_syncobj_wait);
+      /* BO fences can't be shared, so they can't be temporary. */
+      assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
+
+      /* Once the execbuf has returned, we need to set the fence state to
+       * SUBMITTED.  We can't do this before calling execbuf because
+       * anv_GetFenceStatus does take the global device lock before checking
+       * fence->state.
+       *
+       * We set the fence state to SUBMITTED regardless of whether or not the
+       * execbuf succeeds because we need to ensure that vkWaitForFences() and
+       * vkGetFenceStatus() return a valid result (VK_ERROR_DEVICE_LOST or
+       * VK_SUCCESS) in a finite amount of time even if execbuf fails.
+       */
+      fence->permanent.bo.state = ANV_BO_FENCE_STATE_SUBMITTED;
+   }
+
+   if (result == VK_SUCCESS && need_out_fence) {
+      assert(!pdevice->has_syncobj_wait);
+      int out_fence = execbuf.execbuf.rsvd2 >> 32;
+      for (uint32_t i = 0; i < num_out_semaphores; i++) {
+         ANV_FROM_HANDLE(anv_semaphore, semaphore, out_semaphores[i]);
+         /* Out fences can't have temporary state because that would imply
+          * that we imported a sync file and are trying to signal it.
+          */
+         assert(semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE);
+         struct anv_semaphore_impl *impl = &semaphore->permanent;
+
+         if (impl->type == ANV_SEMAPHORE_TYPE_SYNC_FILE) {
+            assert(impl->fd == -1);
+            impl->fd = dup(out_fence);
+         }
+      }
+      close(out_fence);
+   }
+
+   anv_execbuf_finish(&execbuf, &device->alloc);
+
+   return result;
 }