* Functions related to anv_reloc_list
*-----------------------------------------------------------------------*/
+VkResult
+anv_reloc_list_init(struct anv_reloc_list *list,
+ const VkAllocationCallbacks *alloc)
+{
+ memset(list, 0, sizeof(*list));
+ return VK_SUCCESS;
+}
+
static VkResult
anv_reloc_list_init_clone(struct anv_reloc_list *list,
const VkAllocationCallbacks *alloc,
const struct anv_reloc_list *other_list)
{
- if (other_list) {
- list->num_relocs = other_list->num_relocs;
- list->array_length = other_list->array_length;
- } else {
- list->num_relocs = 0;
- list->array_length = 256;
- }
-
- list->relocs =
- vk_alloc(alloc, list->array_length * sizeof(*list->relocs), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
-
- if (list->relocs == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
-
- list->reloc_bos =
- vk_alloc(alloc, list->array_length * sizeof(*list->reloc_bos), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ list->num_relocs = other_list->num_relocs;
+ list->array_length = other_list->array_length;
+
+ if (list->num_relocs > 0) {
+ list->relocs =
+ vk_alloc(alloc, list->array_length * sizeof(*list->relocs), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (list->relocs == NULL)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- if (list->reloc_bos == NULL) {
- vk_free(alloc, list->relocs);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- }
+ list->reloc_bos =
+ vk_alloc(alloc, list->array_length * sizeof(*list->reloc_bos), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (list->reloc_bos == NULL) {
+ vk_free(alloc, list->relocs);
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
- if (other_list) {
memcpy(list->relocs, other_list->relocs,
list->array_length * sizeof(*list->relocs));
memcpy(list->reloc_bos, other_list->reloc_bos,
list->array_length * sizeof(*list->reloc_bos));
+ } else {
+ list->relocs = NULL;
+ list->reloc_bos = NULL;
}
- return VK_SUCCESS;
-}
+ list->dep_words = other_list->dep_words;
-VkResult
-anv_reloc_list_init(struct anv_reloc_list *list,
- const VkAllocationCallbacks *alloc)
-{
- return anv_reloc_list_init_clone(list, alloc, NULL);
+ if (list->dep_words > 0) {
+ list->deps =
+ vk_alloc(alloc, list->dep_words * sizeof(BITSET_WORD), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ memcpy(list->deps, other_list->deps,
+ list->dep_words * sizeof(BITSET_WORD));
+ } else {
+ list->deps = NULL;
+ }
+
+ return VK_SUCCESS;
}
void
{
vk_free(alloc, list->relocs);
vk_free(alloc, list->reloc_bos);
+ vk_free(alloc, list->deps);
}
static VkResult
if (list->num_relocs + num_additional_relocs <= list->array_length)
return VK_SUCCESS;
- size_t new_length = list->array_length * 2;
+ size_t new_length = MAX2(16, list->array_length * 2);
while (new_length < list->num_relocs + num_additional_relocs)
new_length *= 2;
struct drm_i915_gem_relocation_entry *new_relocs =
- vk_alloc(alloc, new_length * sizeof(*list->relocs), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ vk_realloc(alloc, list->relocs,
+ new_length * sizeof(*list->relocs), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (new_relocs == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ list->relocs = new_relocs;
struct anv_bo **new_reloc_bos =
- vk_alloc(alloc, new_length * sizeof(*list->reloc_bos), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
- if (new_reloc_bos == NULL) {
- vk_free(alloc, new_relocs);
+ vk_realloc(alloc, list->reloc_bos,
+ new_length * sizeof(*list->reloc_bos), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (new_reloc_bos == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- }
+ list->reloc_bos = new_reloc_bos;
- memcpy(new_relocs, list->relocs, list->num_relocs * sizeof(*list->relocs));
- memcpy(new_reloc_bos, list->reloc_bos,
- list->num_relocs * sizeof(*list->reloc_bos));
+ list->array_length = new_length;
- vk_free(alloc, list->relocs);
- vk_free(alloc, list->reloc_bos);
+ return VK_SUCCESS;
+}
- list->array_length = new_length;
- list->relocs = new_relocs;
- list->reloc_bos = new_reloc_bos;
+static VkResult
+anv_reloc_list_grow_deps(struct anv_reloc_list *list,
+ const VkAllocationCallbacks *alloc,
+ uint32_t min_num_words)
+{
+ if (min_num_words <= list->dep_words)
+ return VK_SUCCESS;
+
+ uint32_t new_length = MAX2(32, list->dep_words * 2);
+ while (new_length < min_num_words)
+ new_length *= 2;
+
+ BITSET_WORD *new_deps =
+ vk_realloc(alloc, list->deps, new_length * sizeof(BITSET_WORD), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (new_deps == NULL)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ list->deps = new_deps;
+
+ /* Zero out the new data */
+ memset(list->deps + list->dep_words, 0,
+ (new_length - list->dep_words) * sizeof(BITSET_WORD));
+ list->dep_words = new_length;
return VK_SUCCESS;
}
+#define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
+
VkResult
anv_reloc_list_add(struct anv_reloc_list *list,
const VkAllocationCallbacks *alloc,
- uint32_t offset, struct anv_bo *target_bo, uint32_t delta)
+ uint32_t offset, struct anv_bo *target_bo, uint32_t delta,
+ uint64_t *address_u64_out)
{
struct drm_i915_gem_relocation_entry *entry;
int index;
+ struct anv_bo *unwrapped_target_bo = anv_bo_unwrap(target_bo);
+ uint64_t target_bo_offset = READ_ONCE(unwrapped_target_bo->offset);
+ if (address_u64_out)
+ *address_u64_out = target_bo_offset + delta;
+
+ if (unwrapped_target_bo->flags & EXEC_OBJECT_PINNED) {
+ assert(!target_bo->is_wrapper);
+ uint32_t idx = unwrapped_target_bo->gem_handle;
+ anv_reloc_list_grow_deps(list, alloc, (idx / BITSET_WORDBITS) + 1);
+ BITSET_SET(list->deps, unwrapped_target_bo->gem_handle);
+ return VK_SUCCESS;
+ }
+
VkResult result = anv_reloc_list_grow(list, alloc, 1);
if (result != VK_SUCCESS)
return result;
index = list->num_relocs++;
list->reloc_bos[index] = target_bo;
entry = &list->relocs[index];
- entry->target_handle = target_bo->gem_handle;
+ entry->target_handle = -1; /* See also anv_cmd_buffer_process_relocs() */
entry->delta = delta;
entry->offset = offset;
- entry->presumed_offset = target_bo->offset;
+ entry->presumed_offset = target_bo_offset;
entry->read_domains = 0;
entry->write_domain = 0;
VG(VALGRIND_CHECK_MEM_IS_DEFINED(entry, sizeof(*entry)));
return VK_SUCCESS;
}
+static void
+anv_reloc_list_clear(struct anv_reloc_list *list)
+{
+ list->num_relocs = 0;
+ if (list->dep_words > 0)
+ memset(list->deps, 0, list->dep_words * sizeof(BITSET_WORD));
+}
+
static VkResult
anv_reloc_list_append(struct anv_reloc_list *list,
const VkAllocationCallbacks *alloc,
if (result != VK_SUCCESS)
return result;
- memcpy(&list->relocs[list->num_relocs], &other->relocs[0],
- other->num_relocs * sizeof(other->relocs[0]));
- memcpy(&list->reloc_bos[list->num_relocs], &other->reloc_bos[0],
- other->num_relocs * sizeof(other->reloc_bos[0]));
+ if (other->num_relocs > 0) {
+ memcpy(&list->relocs[list->num_relocs], &other->relocs[0],
+ other->num_relocs * sizeof(other->relocs[0]));
+ memcpy(&list->reloc_bos[list->num_relocs], &other->reloc_bos[0],
+ other->num_relocs * sizeof(other->reloc_bos[0]));
- for (uint32_t i = 0; i < other->num_relocs; i++)
- list->relocs[i + list->num_relocs].offset += offset;
+ for (uint32_t i = 0; i < other->num_relocs; i++)
+ list->relocs[i + list->num_relocs].offset += offset;
+
+ list->num_relocs += other->num_relocs;
+ }
+
+ anv_reloc_list_grow_deps(list, alloc, other->dep_words);
+ for (uint32_t w = 0; w < other->dep_words; w++)
+ list->deps[w] |= other->deps[w];
- list->num_relocs += other->num_relocs;
return VK_SUCCESS;
}
anv_batch_emit_reloc(struct anv_batch *batch,
void *location, struct anv_bo *bo, uint32_t delta)
{
+ uint64_t address_u64 = 0;
VkResult result = anv_reloc_list_add(batch->relocs, batch->alloc,
- location - batch->start, bo, delta);
+ location - batch->start, bo, delta,
+ &address_u64);
if (result != VK_SUCCESS) {
anv_batch_set_error(batch, result);
return 0;
}
- return bo->offset + delta;
+ return address_u64;
}
void
if (bbo == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool, &bbo->bo,
- ANV_CMD_BUFFER_BATCH_SIZE);
+ result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
+ ANV_CMD_BUFFER_BATCH_SIZE, &bbo->bo);
if (result != VK_SUCCESS)
goto fail_alloc;
return VK_SUCCESS;
fail_bo_alloc:
- anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
+ anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
fail_alloc:
vk_free(&cmd_buffer->pool->alloc, bbo);
if (bbo == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool, &bbo->bo,
- other_bbo->bo.size);
+ result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
+ other_bbo->bo->size, &bbo->bo);
if (result != VK_SUCCESS)
goto fail_alloc;
goto fail_bo_alloc;
bbo->length = other_bbo->length;
- memcpy(bbo->bo.map, other_bbo->bo.map, other_bbo->length);
-
+ memcpy(bbo->bo->map, other_bbo->bo->map, other_bbo->length);
*bbo_out = bbo;
return VK_SUCCESS;
fail_bo_alloc:
- anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
+ anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
fail_alloc:
vk_free(&cmd_buffer->pool->alloc, bbo);
anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
size_t batch_padding)
{
- batch->next = batch->start = bbo->bo.map;
- batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
+ batch->next = batch->start = bbo->bo->map;
+ batch->end = bbo->bo->map + bbo->bo->size - batch_padding;
batch->relocs = &bbo->relocs;
- bbo->relocs.num_relocs = 0;
+ anv_reloc_list_clear(&bbo->relocs);
}
static void
anv_batch_bo_continue(struct anv_batch_bo *bbo, struct anv_batch *batch,
size_t batch_padding)
{
- batch->start = bbo->bo.map;
- batch->next = bbo->bo.map + bbo->length;
- batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
+ batch->start = bbo->bo->map;
+ batch->next = bbo->bo->map + bbo->length;
+ batch->end = bbo->bo->map + bbo->bo->size - batch_padding;
batch->relocs = &bbo->relocs;
}
static void
anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
{
- assert(batch->start == bbo->bo.map);
+ assert(batch->start == bbo->bo->map);
bbo->length = batch->next - batch->start;
VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
}
struct anv_batch *batch, size_t aditional,
size_t batch_padding)
{
- assert(batch->start == bbo->bo.map);
+ assert(batch->start == bbo->bo->map);
bbo->length = batch->next - batch->start;
- size_t new_size = bbo->bo.size;
+ size_t new_size = bbo->bo->size;
while (new_size <= bbo->length + aditional + batch_padding)
new_size *= 2;
- if (new_size == bbo->bo.size)
+ if (new_size == bbo->bo->size)
return VK_SUCCESS;
- struct anv_bo new_bo;
+ struct anv_bo *new_bo;
VkResult result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
- &new_bo, new_size);
+ new_size, &new_bo);
if (result != VK_SUCCESS)
return result;
- memcpy(new_bo.map, bbo->bo.map, bbo->length);
+ memcpy(new_bo->map, bbo->bo->map, bbo->length);
- anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
+ anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
bbo->bo = new_bo;
anv_batch_bo_continue(bbo, batch, batch_padding);
return VK_SUCCESS;
}
+static void
+anv_batch_bo_link(struct anv_cmd_buffer *cmd_buffer,
+ struct anv_batch_bo *prev_bbo,
+ struct anv_batch_bo *next_bbo,
+ uint32_t next_bbo_offset)
+{
+ const uint32_t bb_start_offset =
+ prev_bbo->length - GEN8_MI_BATCH_BUFFER_START_length * 4;
+ ASSERTED const uint32_t *bb_start = prev_bbo->bo->map + bb_start_offset;
+
+ /* Make sure we're looking at a MI_BATCH_BUFFER_START */
+ assert(((*bb_start >> 29) & 0x07) == 0);
+ assert(((*bb_start >> 23) & 0x3f) == 49);
+
+ if (cmd_buffer->device->instance->physicalDevice.use_softpin) {
+ assert(prev_bbo->bo->flags & EXEC_OBJECT_PINNED);
+ assert(next_bbo->bo->flags & EXEC_OBJECT_PINNED);
+
+ write_reloc(cmd_buffer->device,
+ prev_bbo->bo->map + bb_start_offset + 4,
+ next_bbo->bo->offset + next_bbo_offset, true);
+ } else {
+ uint32_t reloc_idx = prev_bbo->relocs.num_relocs - 1;
+ assert(prev_bbo->relocs.relocs[reloc_idx].offset == bb_start_offset + 4);
+
+ prev_bbo->relocs.reloc_bos[reloc_idx] = next_bbo->bo;
+ prev_bbo->relocs.relocs[reloc_idx].delta = next_bbo_offset;
+
+ /* Use a bogus presumed offset to force a relocation */
+ prev_bbo->relocs.relocs[reloc_idx].presumed_offset = -1;
+ }
+}
+
static void
anv_batch_bo_destroy(struct anv_batch_bo *bbo,
struct anv_cmd_buffer *cmd_buffer)
{
anv_reloc_list_finish(&bbo->relocs, &cmd_buffer->pool->alloc);
- anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
+ anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
vk_free(&cmd_buffer->pool->alloc, bbo);
}
break;
list_addtail(&new_bbo->link, new_list);
- if (prev_bbo) {
- /* As we clone this list of batch_bo's, they chain one to the
- * other using MI_BATCH_BUFFER_START commands. We need to fix up
- * those relocations as we go. Fortunately, this is pretty easy
- * as it will always be the last relocation in the list.
- */
- uint32_t last_idx = prev_bbo->relocs.num_relocs - 1;
- assert(prev_bbo->relocs.reloc_bos[last_idx] == &bbo->bo);
- prev_bbo->relocs.reloc_bos[last_idx] = &new_bbo->bo;
- }
+ if (prev_bbo)
+ anv_batch_bo_link(cmd_buffer, prev_bbo, new_bbo, 0);
prev_bbo = new_bbo;
}
if (result != VK_SUCCESS) {
- list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link)
+ list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link) {
+ list_del(&bbo->link);
anv_batch_bo_destroy(bbo, cmd_buffer);
+ }
}
return result;
{
struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
return (struct anv_address) {
- .bo = &cmd_buffer->device->surface_state_pool.block_pool.bo,
+ .bo = anv_binding_table_pool(cmd_buffer->device)->block_pool.bo,
.offset = bt_block->offset,
};
}
anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_START, bbs) {
bbs.DWordLength = cmd_buffer->device->info.gen < 8 ?
gen7_length : gen8_length;
- bbs._2ndLevelBatchBuffer = _1stlevelbatch;
+ bbs.SecondLevelBatchBuffer = Firstlevelbatch;
bbs.AddressSpaceIndicator = ASI_PPGTT;
bbs.BatchBufferStartAddress = (struct anv_address) { bo, offset };
}
* chaining command, let's set it back where it should go.
*/
batch->end += GEN8_MI_BATCH_BUFFER_START_length * 4;
- assert(batch->end == current_bbo->bo.map + current_bbo->bo.size);
+ assert(batch->end == current_bbo->bo->map + current_bbo->bo->size);
- emit_batch_buffer_start(cmd_buffer, &bbo->bo, 0);
+ emit_batch_buffer_start(cmd_buffer, bbo->bo, 0);
anv_batch_bo_finish(current_bbo, batch);
}
anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
uint32_t entries, uint32_t *state_offset)
{
- struct anv_state_pool *state_pool = &cmd_buffer->device->surface_state_pool;
+ struct anv_device *device = cmd_buffer->device;
+ struct anv_state_pool *state_pool = &device->surface_state_pool;
struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
struct anv_state state;
return (struct anv_state) { 0 };
state.offset = cmd_buffer->bt_next;
- state.map = state_pool->block_pool.map + bt_block->offset + state.offset;
+ state.map = anv_block_pool_map(&anv_binding_table_pool(device)->block_pool,
+ bt_block->offset + state.offset);
cmd_buffer->bt_next += state.alloc_size;
- assert(bt_block->offset < 0);
- *state_offset = -bt_block->offset;
+ if (device->instance->physicalDevice.use_softpin) {
+ assert(bt_block->offset >= 0);
+ *state_offset = device->surface_state_pool.block_pool.start_address -
+ device->binding_table_pool.block_pool.start_address - bt_block->offset;
+ } else {
+ assert(bt_block->offset < 0);
+ *state_offset = -bt_block->offset;
+ }
return state;
}
VkResult
anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer)
{
- struct anv_state_pool *state_pool = &cmd_buffer->device->surface_state_pool;
-
struct anv_state *bt_block = u_vector_add(&cmd_buffer->bt_block_states);
if (bt_block == NULL) {
anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
- *bt_block = anv_state_pool_alloc_back(state_pool);
+ *bt_block = anv_binding_table_pool_alloc(cmd_buffer->device);
cmd_buffer->bt_next = 0;
return VK_SUCCESS;
{
struct anv_state *bt_block;
u_vector_foreach(bt_block, &cmd_buffer->bt_block_states)
- anv_state_pool_free(&cmd_buffer->device->surface_state_pool, *bt_block);
+ anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
u_vector_finish(&cmd_buffer->bt_block_states);
anv_reloc_list_finish(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc);
/* Destroy all of the batch buffers */
list_for_each_entry_safe(struct anv_batch_bo, bbo,
&cmd_buffer->batch_bos, link) {
+ list_del(&bbo->link);
anv_batch_bo_destroy(bbo, cmd_buffer);
}
}
anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
{
/* Delete all but the first batch bo */
- assert(!list_empty(&cmd_buffer->batch_bos));
+ assert(!list_is_empty(&cmd_buffer->batch_bos));
while (cmd_buffer->batch_bos.next != cmd_buffer->batch_bos.prev) {
struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
list_del(&bbo->link);
anv_batch_bo_destroy(bbo, cmd_buffer);
}
- assert(!list_empty(&cmd_buffer->batch_bos));
+ assert(!list_is_empty(&cmd_buffer->batch_bos));
anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer),
&cmd_buffer->batch,
while (u_vector_length(&cmd_buffer->bt_block_states) > 1) {
struct anv_state *bt_block = u_vector_remove(&cmd_buffer->bt_block_states);
- anv_state_pool_free(&cmd_buffer->device->surface_state_pool, *bt_block);
+ anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
}
assert(u_vector_length(&cmd_buffer->bt_block_states) == 1);
cmd_buffer->bt_next = 0;
- cmd_buffer->surface_relocs.num_relocs = 0;
+ anv_reloc_list_clear(&cmd_buffer->surface_relocs);
cmd_buffer->last_ss_pool_center = 0;
/* Reset the list of seen buffers */
* with our BATCH_BUFFER_END in another BO.
*/
cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4;
- assert(cmd_buffer->batch.end == batch_bo->bo.map + batch_bo->bo.size);
+ assert(cmd_buffer->batch.end == batch_bo->bo->map + batch_bo->bo->size);
anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_END, bbe);
anv_batch_emit(&cmd_buffer->batch, GEN8_MI_NOOP, noop);
cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY;
- }
-
- anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
-
- if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
+ } else {
+ assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
/* If this is a secondary command buffer, we need to determine the
* mode in which it will be executed with vkExecuteCommands. We
* determine this statically here so that this stays in sync with the
* actual ExecuteCommands implementation.
*/
+ const uint32_t length = cmd_buffer->batch.next - cmd_buffer->batch.start;
if (!cmd_buffer->device->can_chain_batches) {
cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT;
} else if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
- (batch_bo->length < ANV_CMD_BUFFER_BATCH_SIZE / 2)) {
+ (length < ANV_CMD_BUFFER_BATCH_SIZE / 2)) {
/* If the secondary has exactly one batch buffer in its list *and*
* that batch buffer is less than half of the maximum size, we're
* probably better of simply copying it into our batch.
VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CHAIN;
- /* When we chain, we need to add an MI_BATCH_BUFFER_START command
- * with its relocation. In order to handle this we'll increment here
- * so we can unconditionally decrement right before adding the
- * MI_BATCH_BUFFER_START command.
+ /* In order to chain, we need this command buffer to contain an
+ * MI_BATCH_BUFFER_START which will jump back to the calling batch.
+ * It doesn't matter where it points now so long as has a valid
+ * relocation. We'll adjust it later as part of the chaining
+ * process.
+ *
+ * We set the end of the batch a little short so we would be sure we
+ * have room for the chaining command. Since we're about to emit the
+ * chaining command, let's set it back where it should go.
*/
- batch_bo->relocs.num_relocs++;
- cmd_buffer->batch.next += GEN8_MI_BATCH_BUFFER_START_length * 4;
+ cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4;
+ assert(cmd_buffer->batch.start == batch_bo->bo->map);
+ assert(cmd_buffer->batch.end == batch_bo->bo->map + batch_bo->bo->size);
+
+ emit_batch_buffer_start(cmd_buffer, batch_bo->bo, 0);
+ assert(cmd_buffer->batch.start == batch_bo->bo->map);
} else {
cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN;
}
}
+
+ anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
}
static VkResult
struct anv_batch_bo *last_bbo =
list_last_entry(&secondary->batch_bos, struct anv_batch_bo, link);
- emit_batch_buffer_start(primary, &first_bbo->bo, 0);
+ emit_batch_buffer_start(primary, first_bbo->bo, 0);
struct anv_batch_bo *this_bbo = anv_cmd_buffer_current_batch_bo(primary);
- assert(primary->batch.start == this_bbo->bo.map);
+ assert(primary->batch.start == this_bbo->bo->map);
uint32_t offset = primary->batch.next - primary->batch.start;
- const uint32_t inst_size = GEN8_MI_BATCH_BUFFER_START_length * 4;
- /* Roll back the previous MI_BATCH_BUFFER_START and its relocation so we
- * can emit a new command and relocation for the current splice. In
- * order to handle the initial-use case, we incremented next and
- * num_relocs in end_batch_buffer() so we can alyways just subtract
- * here.
+ /* Make the tail of the secondary point back to right after the
+ * MI_BATCH_BUFFER_START in the primary batch.
*/
- last_bbo->relocs.num_relocs--;
- secondary->batch.next -= inst_size;
- emit_batch_buffer_start(secondary, &this_bbo->bo, offset);
- anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
+ anv_batch_bo_link(primary, last_bbo, this_bbo, offset);
- /* After patching up the secondary buffer, we need to clflush the
- * modified instruction in case we're on a !llc platform. We use a
- * little loop to handle the case where the instruction crosses a cache
- * line boundary.
- */
- if (!primary->device->info.has_llc) {
- void *inst = secondary->batch.next - inst_size;
- void *p = (void *) (((uintptr_t) inst) & ~CACHELINE_MASK);
- __builtin_ia32_mfence();
- while (p < secondary->batch.next) {
- __builtin_ia32_clflush(p);
- p += CACHELINE_SIZE;
- }
- }
+ anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
break;
}
case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN: {
/* Allocated length of the 'objects' and 'bos' arrays */
uint32_t array_length;
+ bool has_relocs;
+
+ const VkAllocationCallbacks * alloc;
+ VkSystemAllocationScope alloc_scope;
uint32_t fence_count;
uint32_t fence_array_length;
struct drm_i915_gem_exec_fence * fences;
}
static void
-anv_execbuf_finish(struct anv_execbuf *exec,
- const VkAllocationCallbacks *alloc)
+anv_execbuf_finish(struct anv_execbuf *exec)
{
- vk_free(alloc, exec->objects);
- vk_free(alloc, exec->bos);
- vk_free(alloc, exec->fences);
- vk_free(alloc, exec->syncobjs);
+ vk_free(exec->alloc, exec->objects);
+ vk_free(exec->alloc, exec->bos);
+ vk_free(exec->alloc, exec->fences);
+ vk_free(exec->alloc, exec->syncobjs);
}
static VkResult
-anv_execbuf_add_bo(struct anv_execbuf *exec,
+anv_execbuf_add_bo_bitset(struct anv_device *device,
+ struct anv_execbuf *exec,
+ uint32_t dep_words,
+ BITSET_WORD *deps,
+ uint32_t extra_flags);
+
+static VkResult
+anv_execbuf_add_bo(struct anv_device *device,
+ struct anv_execbuf *exec,
struct anv_bo *bo,
struct anv_reloc_list *relocs,
- uint32_t extra_flags,
- const VkAllocationCallbacks *alloc)
+ uint32_t extra_flags)
{
struct drm_i915_gem_exec_object2 *obj = NULL;
+ bo = anv_bo_unwrap(bo);
+
if (bo->index < exec->bo_count && exec->bos[bo->index] == bo)
obj = &exec->objects[bo->index];
uint32_t new_len = exec->objects ? exec->array_length * 2 : 64;
struct drm_i915_gem_exec_object2 *new_objects =
- vk_alloc(alloc, new_len * sizeof(*new_objects),
- 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
+ vk_alloc(exec->alloc, new_len * sizeof(*new_objects), 8, exec->alloc_scope);
if (new_objects == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
struct anv_bo **new_bos =
- vk_alloc(alloc, new_len * sizeof(*new_bos),
- 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
+ vk_alloc(exec->alloc, new_len * sizeof(*new_bos), 8, exec->alloc_scope);
if (new_bos == NULL) {
- vk_free(alloc, new_objects);
+ vk_free(exec->alloc, new_objects);
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
exec->bo_count * sizeof(*new_bos));
}
- vk_free(alloc, exec->objects);
- vk_free(alloc, exec->bos);
+ vk_free(exec->alloc, exec->objects);
+ vk_free(exec->alloc, exec->bos);
exec->objects = new_objects;
exec->bos = new_bos;
obj->rsvd2 = 0;
}
- if (relocs != NULL && obj->relocation_count == 0) {
- /* This is the first time we've ever seen a list of relocations for
- * this BO. Go ahead and set the relocations and then walk the list
- * of relocations and add them all.
- */
- obj->relocation_count = relocs->num_relocs;
- obj->relocs_ptr = (uintptr_t) relocs->relocs;
+ if (relocs != NULL) {
+ assert(obj->relocation_count == 0);
+
+ if (relocs->num_relocs > 0) {
+ /* This is the first time we've ever seen a list of relocations for
+ * this BO. Go ahead and set the relocations and then walk the list
+ * of relocations and add them all.
+ */
+ exec->has_relocs = true;
+ obj->relocation_count = relocs->num_relocs;
+ obj->relocs_ptr = (uintptr_t) relocs->relocs;
+
+ for (size_t i = 0; i < relocs->num_relocs; i++) {
+ VkResult result;
+
+ /* A quick sanity check on relocations */
+ assert(relocs->relocs[i].offset < bo->size);
+ result = anv_execbuf_add_bo(device, exec, relocs->reloc_bos[i],
+ NULL, extra_flags);
+ if (result != VK_SUCCESS)
+ return result;
+ }
+ }
- for (size_t i = 0; i < relocs->num_relocs; i++) {
- VkResult result;
+ return anv_execbuf_add_bo_bitset(device, exec, relocs->dep_words,
+ relocs->deps, extra_flags);
+ }
- /* A quick sanity check on relocations */
- assert(relocs->relocs[i].offset < bo->size);
- result = anv_execbuf_add_bo(exec, relocs->reloc_bos[i], NULL,
- extra_flags, alloc);
+ return VK_SUCCESS;
+}
+/* Add BO dependencies to execbuf */
+static VkResult
+anv_execbuf_add_bo_bitset(struct anv_device *device,
+ struct anv_execbuf *exec,
+ uint32_t dep_words,
+ BITSET_WORD *deps,
+ uint32_t extra_flags)
+{
+ for (uint32_t w = 0; w < dep_words; w++) {
+ BITSET_WORD mask = deps[w];
+ while (mask) {
+ int i = u_bit_scan(&mask);
+ uint32_t gem_handle = w * BITSET_WORDBITS + i;
+ struct anv_bo *bo = anv_device_lookup_bo(device, gem_handle);
+ assert(bo->refcount > 0);
+ VkResult result =
+ anv_execbuf_add_bo(device, exec, bo, NULL, extra_flags);
if (result != VK_SUCCESS)
return result;
}
static VkResult
anv_execbuf_add_syncobj(struct anv_execbuf *exec,
- uint32_t handle, uint32_t flags,
- const VkAllocationCallbacks *alloc)
+ uint32_t handle, uint32_t flags)
{
assert(flags != 0);
if (exec->fence_count >= exec->fence_array_length) {
uint32_t new_len = MAX2(exec->fence_array_length * 2, 64);
- exec->fences = vk_realloc(alloc, exec->fences,
+ exec->fences = vk_realloc(exec->alloc, exec->fences,
new_len * sizeof(*exec->fences),
- 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
+ 8, exec->alloc_scope);
if (exec->fences == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
struct anv_reloc_list *list)
{
for (size_t i = 0; i < list->num_relocs; i++)
- list->relocs[i].target_handle = list->reloc_bos[i]->index;
+ list->relocs[i].target_handle = anv_bo_unwrap(list->reloc_bos[i])->index;
}
static void
struct anv_reloc_list *relocs,
uint32_t last_pool_center_bo_offset)
{
+ assert(!from_bo->is_wrapper);
assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
* relocations that point to the pool bo with the correct offset.
*/
for (size_t i = 0; i < relocs->num_relocs; i++) {
- if (relocs->reloc_bos[i] == &pool->block_pool.bo) {
+ if (relocs->reloc_bos[i] == pool->block_pool.bo) {
/* Adjust the delta value in the relocation to correctly
* correspond to the new delta. Initially, this value may have
* been negative (if treated as unsigned), but we trust in
struct anv_bo *bo,
bool always_relocate)
{
+ bo = anv_bo_unwrap(bo);
+
for (size_t i = 0; i < list->num_relocs; i++) {
- struct anv_bo *target_bo = list->reloc_bos[i];
+ struct anv_bo *target_bo = anv_bo_unwrap(list->reloc_bos[i]);
if (list->relocs[i].presumed_offset == target_bo->offset &&
!always_relocate)
continue;
relocate_cmd_buffer(struct anv_cmd_buffer *cmd_buffer,
struct anv_execbuf *exec)
{
+ if (!exec->has_relocs)
+ return true;
+
static int userspace_relocs = -1;
if (userspace_relocs < 0)
userspace_relocs = env_var_as_boolean("ANV_USERSPACE_RELOCS", true);
* Invalid offsets are indicated by anv_bo::offset == (uint64_t)-1.
*/
for (uint32_t i = 0; i < exec->bo_count; i++) {
+ assert(!exec->bos[i]->is_wrapper);
if (exec->bos[i]->offset == (uint64_t)-1)
return false;
}
* what address is actually written in the surface state object at any
* given time. The only option is to always relocate them.
*/
+ struct anv_bo *surface_state_bo =
+ anv_bo_unwrap(cmd_buffer->device->surface_state_pool.block_pool.bo);
anv_reloc_list_apply(cmd_buffer->device, &cmd_buffer->surface_relocs,
- &cmd_buffer->device->surface_state_pool.block_pool.bo,
+ surface_state_bo,
true /* always relocate surface states */);
/* Since we own all of the batch buffers, we know what values are stored
struct anv_batch_bo **bbo;
u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
anv_reloc_list_apply(cmd_buffer->device,
- &(*bbo)->relocs, &(*bbo)->bo, false);
+ &(*bbo)->relocs, (*bbo)->bo, false);
}
for (uint32_t i = 0; i < exec->bo_count; i++)
adjust_relocations_from_state_pool(ss_pool, &cmd_buffer->surface_relocs,
cmd_buffer->last_ss_pool_center);
- VkResult result = anv_execbuf_add_bo(execbuf, &ss_pool->block_pool.bo,
- &cmd_buffer->surface_relocs, 0,
- &cmd_buffer->device->alloc);
- if (result != VK_SUCCESS)
- return result;
+ VkResult result;
+ if (cmd_buffer->device->instance->physicalDevice.use_softpin) {
+ anv_block_pool_foreach_bo(bo, &ss_pool->block_pool) {
+ result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
+ bo, NULL, 0);
+ if (result != VK_SUCCESS)
+ return result;
+ }
+ /* Add surface dependencies (BOs) to the execbuf */
+ anv_execbuf_add_bo_bitset(cmd_buffer->device, execbuf,
+ cmd_buffer->surface_relocs.dep_words,
+ cmd_buffer->surface_relocs.deps, 0);
+
+ /* Add the BOs for all memory objects */
+ list_for_each_entry(struct anv_device_memory, mem,
+ &cmd_buffer->device->memory_objects, link) {
+ result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
+ mem->bo, NULL, 0);
+ if (result != VK_SUCCESS)
+ return result;
+ }
+
+ struct anv_block_pool *pool;
+ pool = &cmd_buffer->device->dynamic_state_pool.block_pool;
+ anv_block_pool_foreach_bo(bo, pool) {
+ result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
+ bo, NULL, 0);
+ if (result != VK_SUCCESS)
+ return result;
+ }
+
+ pool = &cmd_buffer->device->instruction_state_pool.block_pool;
+ anv_block_pool_foreach_bo(bo, pool) {
+ result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
+ bo, NULL, 0);
+ if (result != VK_SUCCESS)
+ return result;
+ }
+
+ pool = &cmd_buffer->device->binding_table_pool.block_pool;
+ anv_block_pool_foreach_bo(bo, pool) {
+ result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
+ bo, NULL, 0);
+ if (result != VK_SUCCESS)
+ return result;
+ }
+ } else {
+ /* Since we aren't in the softpin case, all of our STATE_BASE_ADDRESS BOs
+ * will get added automatically by processing relocations on the batch
+ * buffer. We have to add the surface state BO manually because it has
+ * relocations of its own that we need to be sure are processsed.
+ */
+ result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
+ ss_pool->block_pool.bo,
+ &cmd_buffer->surface_relocs, 0);
+ if (result != VK_SUCCESS)
+ return result;
+ }
/* First, we walk over all of the bos we've seen and add them and their
* relocations to the validate list.
*/
struct anv_batch_bo **bbo;
u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
- adjust_relocations_to_state_pool(ss_pool, &(*bbo)->bo, &(*bbo)->relocs,
+ adjust_relocations_to_state_pool(ss_pool, (*bbo)->bo, &(*bbo)->relocs,
cmd_buffer->last_ss_pool_center);
- result = anv_execbuf_add_bo(execbuf, &(*bbo)->bo, &(*bbo)->relocs, 0,
- &cmd_buffer->device->alloc);
+ result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
+ (*bbo)->bo, &(*bbo)->relocs, 0);
if (result != VK_SUCCESS)
return result;
}
* corresponding to the first batch_bo in the chain with the last
* element in the list.
*/
- if (first_batch_bo->bo.index != execbuf->bo_count - 1) {
- uint32_t idx = first_batch_bo->bo.index;
+ if (first_batch_bo->bo->index != execbuf->bo_count - 1) {
+ uint32_t idx = first_batch_bo->bo->index;
uint32_t last_idx = execbuf->bo_count - 1;
struct drm_i915_gem_exec_object2 tmp_obj = execbuf->objects[idx];
- assert(execbuf->bos[idx] == &first_batch_bo->bo);
+ assert(execbuf->bos[idx] == first_batch_bo->bo);
execbuf->objects[idx] = execbuf->objects[last_idx];
execbuf->bos[idx] = execbuf->bos[last_idx];
execbuf->bos[idx]->index = idx;
execbuf->objects[last_idx] = tmp_obj;
- execbuf->bos[last_idx] = &first_batch_bo->bo;
- first_batch_bo->bo.index = last_idx;
+ execbuf->bos[last_idx] = first_batch_bo->bo;
+ first_batch_bo->bo->index = last_idx;
}
+ /* If we are pinning our BOs, we shouldn't have to relocate anything */
+ if (cmd_buffer->device->instance->physicalDevice.use_softpin)
+ assert(!execbuf->has_relocs);
+
/* Now we go through and fixup all of the relocation lists to point to
* the correct indices in the object array. We have to do this after we
* reorder the list above as some of the indices may have changed.
*/
- u_vector_foreach(bbo, &cmd_buffer->seen_bbos)
- anv_cmd_buffer_process_relocs(cmd_buffer, &(*bbo)->relocs);
+ if (execbuf->has_relocs) {
+ u_vector_foreach(bbo, &cmd_buffer->seen_bbos)
+ anv_cmd_buffer_process_relocs(cmd_buffer, &(*bbo)->relocs);
- anv_cmd_buffer_process_relocs(cmd_buffer, &cmd_buffer->surface_relocs);
+ anv_cmd_buffer_process_relocs(cmd_buffer, &cmd_buffer->surface_relocs);
+ }
if (!cmd_buffer->device->info.has_llc) {
__builtin_ia32_mfence();
u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
for (uint32_t i = 0; i < (*bbo)->length; i += CACHELINE_SIZE)
- __builtin_ia32_clflush((*bbo)->bo.map + i);
+ __builtin_ia32_clflush((*bbo)->bo->map + i);
}
}
static VkResult
setup_empty_execbuf(struct anv_execbuf *execbuf, struct anv_device *device)
{
- VkResult result = anv_execbuf_add_bo(execbuf, &device->trivial_batch_bo,
- NULL, 0, &device->alloc);
+ VkResult result = anv_execbuf_add_bo(device, execbuf,
+ device->trivial_batch_bo,
+ NULL, 0);
if (result != VK_SUCCESS)
return result;
VkFence _fence)
{
ANV_FROM_HANDLE(anv_fence, fence, _fence);
+ UNUSED struct anv_physical_device *pdevice = &device->instance->physicalDevice;
struct anv_execbuf execbuf;
anv_execbuf_init(&execbuf);
+ execbuf.alloc = &device->alloc;
+ execbuf.alloc_scope = VK_SYSTEM_ALLOCATION_SCOPE_COMMAND;
int in_fence = -1;
VkResult result = VK_SUCCESS;
switch (impl->type) {
case ANV_SEMAPHORE_TYPE_BO:
- result = anv_execbuf_add_bo(&execbuf, impl->bo, NULL,
- 0, &device->alloc);
+ assert(!pdevice->has_syncobj);
+ result = anv_execbuf_add_bo(device, &execbuf, impl->bo, NULL, 0);
if (result != VK_SUCCESS)
return result;
break;
case ANV_SEMAPHORE_TYPE_SYNC_FILE:
+ assert(!pdevice->has_syncobj);
if (in_fence == -1) {
in_fence = impl->fd;
+ if (in_fence == -1)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ impl->fd = -1;
} else {
int merge = anv_gem_sync_file_merge(device, in_fence, impl->fd);
if (merge == -1)
close(impl->fd);
close(in_fence);
+ impl->fd = -1;
in_fence = merge;
}
-
- impl->fd = -1;
break;
case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
- I915_EXEC_FENCE_WAIT,
- &device->alloc);
+ I915_EXEC_FENCE_WAIT);
if (result != VK_SUCCESS)
return result;
break;
switch (impl->type) {
case ANV_SEMAPHORE_TYPE_BO:
- result = anv_execbuf_add_bo(&execbuf, impl->bo, NULL,
- EXEC_OBJECT_WRITE, &device->alloc);
+ assert(!pdevice->has_syncobj);
+ result = anv_execbuf_add_bo(device, &execbuf, impl->bo, NULL,
+ EXEC_OBJECT_WRITE);
if (result != VK_SUCCESS)
return result;
break;
case ANV_SEMAPHORE_TYPE_SYNC_FILE:
+ assert(!pdevice->has_syncobj);
need_out_fence = true;
break;
case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
- I915_EXEC_FENCE_SIGNAL,
- &device->alloc);
+ I915_EXEC_FENCE_SIGNAL);
if (result != VK_SUCCESS)
return result;
break;
switch (impl->type) {
case ANV_FENCE_TYPE_BO:
- result = anv_execbuf_add_bo(&execbuf, &impl->bo.bo, NULL,
- EXEC_OBJECT_WRITE, &device->alloc);
+ assert(!pdevice->has_syncobj_wait);
+ result = anv_execbuf_add_bo(device, &execbuf, impl->bo.bo, NULL,
+ EXEC_OBJECT_WRITE);
if (result != VK_SUCCESS)
return result;
break;
case ANV_FENCE_TYPE_SYNCOBJ:
result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
- I915_EXEC_FENCE_SIGNAL,
- &device->alloc);
+ I915_EXEC_FENCE_SIGNAL);
if (result != VK_SUCCESS)
return result;
break;
}
}
- if (cmd_buffer)
+ if (cmd_buffer) {
+ if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
+ struct anv_batch_bo **bo = u_vector_tail(&cmd_buffer->seen_bbos);
+
+ device->cmd_buffer_being_decoded = cmd_buffer;
+ gen_print_batch(&device->decoder_ctx, (*bo)->bo->map,
+ (*bo)->bo->size, (*bo)->bo->offset, false);
+ device->cmd_buffer_being_decoded = NULL;
+ }
+
result = setup_execbuf_for_cmd_buffer(&execbuf, cmd_buffer);
- else
+ } else {
result = setup_empty_execbuf(&execbuf, device);
+ }
if (result != VK_SUCCESS)
return result;
}
if (fence && fence->permanent.type == ANV_FENCE_TYPE_BO) {
+ assert(!pdevice->has_syncobj_wait);
/* BO fences can't be shared, so they can't be temporary. */
assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
}
if (result == VK_SUCCESS && need_out_fence) {
+ assert(!pdevice->has_syncobj_wait);
int out_fence = execbuf.execbuf.rsvd2 >> 32;
for (uint32_t i = 0; i < num_out_semaphores; i++) {
ANV_FROM_HANDLE(anv_semaphore, semaphore, out_semaphores[i]);
close(out_fence);
}
- anv_execbuf_finish(&execbuf, &device->alloc);
+ anv_execbuf_finish(&execbuf);
return result;
}