#include "anv_private.h"
#include "genxml/gen8_pack.h"
+#include "genxml/genX_bits.h"
+#include "perf/gen_perf.h"
#include "util/debug.h"
return address_u64;
}
+struct anv_address
+anv_batch_address(struct anv_batch *batch, void *batch_location)
+{
+ assert(batch->start < batch_location);
+
+ /* Allow a jump at the current location of the batch. */
+ assert(batch->next >= batch_location);
+
+ return anv_address_add(batch->start_addr, batch_location - batch->start);
+}
+
void
anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
{
anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
size_t batch_padding)
{
- batch->next = batch->start = bbo->bo->map;
- batch->end = bbo->bo->map + bbo->bo->size - batch_padding;
+ anv_batch_set_storage(batch, (struct anv_address) { .bo = bbo->bo, },
+ bbo->bo->map, bbo->bo->size - batch_padding);
batch->relocs = &bbo->relocs;
anv_reloc_list_clear(&bbo->relocs);
}
anv_batch_bo_continue(struct anv_batch_bo *bbo, struct anv_batch *batch,
size_t batch_padding)
{
+ batch->start_addr = (struct anv_address) { .bo = bbo->bo, };
batch->start = bbo->bo->map;
batch->next = bbo->bo->map + bbo->length;
batch->end = bbo->bo->map + bbo->bo->size - batch_padding;
assert(((*bb_start >> 29) & 0x07) == 0);
assert(((*bb_start >> 23) & 0x3f) == 49);
- if (cmd_buffer->device->instance->physicalDevice.use_softpin) {
+ if (cmd_buffer->device->physical->use_softpin) {
assert(prev_bbo->bo->flags & EXEC_OBJECT_PINNED);
assert(next_bbo->bo->flags & EXEC_OBJECT_PINNED);
}
if (result != VK_SUCCESS) {
- list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link)
+ list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link) {
+ list_del(&bbo->link);
anv_batch_bo_destroy(bbo, cmd_buffer);
+ }
}
return result;
struct anv_address
anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
{
+ struct anv_state_pool *pool = anv_binding_table_pool(cmd_buffer->device);
struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
return (struct anv_address) {
- .bo = anv_binding_table_pool(cmd_buffer->device)->block_pool.bo,
- .offset = bt_block->offset,
+ .bo = pool->block_pool.bo,
+ .offset = bt_block->offset - pool->start_offset,
};
}
anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
uint32_t entries, uint32_t *state_offset)
{
- struct anv_device *device = cmd_buffer->device;
- struct anv_state_pool *state_pool = &device->surface_state_pool;
struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
- struct anv_state state;
- state.alloc_size = align_u32(entries * 4, 32);
+ uint32_t bt_size = align_u32(entries * 4, 32);
- if (cmd_buffer->bt_next + state.alloc_size > state_pool->block_size)
+ struct anv_state state = cmd_buffer->bt_next;
+ if (bt_size > state.alloc_size)
return (struct anv_state) { 0 };
- state.offset = cmd_buffer->bt_next;
- state.map = anv_block_pool_map(&anv_binding_table_pool(device)->block_pool,
- bt_block->offset + state.offset);
+ state.alloc_size = bt_size;
+ cmd_buffer->bt_next.offset += bt_size;
+ cmd_buffer->bt_next.map += bt_size;
+ cmd_buffer->bt_next.alloc_size -= bt_size;
- cmd_buffer->bt_next += state.alloc_size;
-
- if (device->instance->physicalDevice.use_softpin) {
- assert(bt_block->offset >= 0);
- *state_offset = device->surface_state_pool.block_pool.start_address -
- device->binding_table_pool.block_pool.start_address - bt_block->offset;
- } else {
- assert(bt_block->offset < 0);
- *state_offset = -bt_block->offset;
- }
+ assert(bt_block->offset < 0);
+ *state_offset = -bt_block->offset;
return state;
}
}
*bt_block = anv_binding_table_pool_alloc(cmd_buffer->device);
- cmd_buffer->bt_next = 0;
+
+ /* The bt_next state is a rolling state (we update it as we suballocate
+ * from it) which is relative to the start of the binding table block.
+ */
+ cmd_buffer->bt_next = *bt_block;
+ cmd_buffer->bt_next.offset = 0;
return VK_SUCCESS;
}
/* Destroy all of the batch buffers */
list_for_each_entry_safe(struct anv_batch_bo, bbo,
&cmd_buffer->batch_bos, link) {
+ list_del(&bbo->link);
anv_batch_bo_destroy(bbo, cmd_buffer);
}
}
anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
}
assert(u_vector_length(&cmd_buffer->bt_block_states) == 1);
- cmd_buffer->bt_next = 0;
+ cmd_buffer->bt_next = *(struct anv_state *)u_vector_head(&cmd_buffer->bt_block_states);
+ cmd_buffer->bt_next.offset = 0;
anv_reloc_list_clear(&cmd_buffer->surface_relocs);
cmd_buffer->last_ss_pool_center = 0;
const uint32_t length = cmd_buffer->batch.next - cmd_buffer->batch.start;
if (!cmd_buffer->device->can_chain_batches) {
cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT;
+ } else if (cmd_buffer->device->physical->use_call_secondary) {
+ cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CALL_AND_RETURN;
+ /* If the secondary command buffer begins & ends in the same BO and
+ * its length is less than the length of CS prefetch, add some NOOPs
+ * instructions so the last MI_BATCH_BUFFER_START is outside the CS
+ * prefetch.
+ */
+ if (cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) {
+ int32_t batch_len =
+ cmd_buffer->batch.next - cmd_buffer->batch.start;
+
+ for (int32_t i = 0; i < (512 - batch_len); i += 4)
+ anv_batch_emit(&cmd_buffer->batch, GEN8_MI_NOOP, noop);
+ }
+
+ void *jump_addr =
+ anv_batch_emitn(&cmd_buffer->batch,
+ GEN8_MI_BATCH_BUFFER_START_length,
+ GEN8_MI_BATCH_BUFFER_START,
+ .AddressSpaceIndicator = ASI_PPGTT,
+ .SecondLevelBatchBuffer = Firstlevelbatch) +
+ (GEN8_MI_BATCH_BUFFER_START_BatchBufferStartAddress_start / 8);
+ cmd_buffer->return_addr = anv_batch_address(&cmd_buffer->batch, jump_addr);
} else if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
(length < ANV_CMD_BUFFER_BATCH_SIZE / 2)) {
/* If the secondary has exactly one batch buffer in its list *and*
GEN8_MI_BATCH_BUFFER_START_length * 4);
break;
}
+ case ANV_CMD_BUFFER_EXEC_MODE_CALL_AND_RETURN: {
+ struct anv_batch_bo *first_bbo =
+ list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
+
+ uint64_t *write_return_addr =
+ anv_batch_emitn(&primary->batch,
+ GEN8_MI_STORE_DATA_IMM_length + 1 /* QWord write */,
+ GEN8_MI_STORE_DATA_IMM,
+ .Address = secondary->return_addr)
+ + (GEN8_MI_STORE_DATA_IMM_ImmediateData_start / 8);
+
+ emit_batch_buffer_start(primary, first_bbo->bo, 0);
+
+ *write_return_addr =
+ anv_address_physical(anv_batch_address(&primary->batch,
+ primary->batch.next));
+
+ anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
+ break;
+ }
default:
assert(!"Invalid execution mode");
}
bool has_relocs;
- uint32_t fence_count;
- uint32_t fence_array_length;
- struct drm_i915_gem_exec_fence * fences;
- struct anv_syncobj ** syncobjs;
+ const VkAllocationCallbacks * alloc;
+ VkSystemAllocationScope alloc_scope;
+
+ int perf_query_pass;
};
static void
}
static void
-anv_execbuf_finish(struct anv_execbuf *exec,
- const VkAllocationCallbacks *alloc)
+anv_execbuf_finish(struct anv_execbuf *exec)
{
- vk_free(alloc, exec->objects);
- vk_free(alloc, exec->bos);
- vk_free(alloc, exec->fences);
- vk_free(alloc, exec->syncobjs);
+ vk_free(exec->alloc, exec->objects);
+ vk_free(exec->alloc, exec->bos);
}
static VkResult
struct anv_execbuf *exec,
uint32_t dep_words,
BITSET_WORD *deps,
- uint32_t extra_flags,
- const VkAllocationCallbacks *alloc);
+ uint32_t extra_flags);
static VkResult
anv_execbuf_add_bo(struct anv_device *device,
struct anv_execbuf *exec,
struct anv_bo *bo,
struct anv_reloc_list *relocs,
- uint32_t extra_flags,
- const VkAllocationCallbacks *alloc)
+ uint32_t extra_flags)
{
struct drm_i915_gem_exec_object2 *obj = NULL;
uint32_t new_len = exec->objects ? exec->array_length * 2 : 64;
struct drm_i915_gem_exec_object2 *new_objects =
- vk_alloc(alloc, new_len * sizeof(*new_objects),
- 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
+ vk_alloc(exec->alloc, new_len * sizeof(*new_objects), 8, exec->alloc_scope);
if (new_objects == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
struct anv_bo **new_bos =
- vk_alloc(alloc, new_len * sizeof(*new_bos),
- 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
+ vk_alloc(exec->alloc, new_len * sizeof(*new_bos), 8, exec->alloc_scope);
if (new_bos == NULL) {
- vk_free(alloc, new_objects);
+ vk_free(exec->alloc, new_objects);
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
exec->bo_count * sizeof(*new_bos));
}
- vk_free(alloc, exec->objects);
- vk_free(alloc, exec->bos);
+ vk_free(exec->alloc, exec->objects);
+ vk_free(exec->alloc, exec->bos);
exec->objects = new_objects;
exec->bos = new_bos;
obj->rsvd2 = 0;
}
+ if (extra_flags & EXEC_OBJECT_WRITE) {
+ obj->flags |= EXEC_OBJECT_WRITE;
+ obj->flags &= ~EXEC_OBJECT_ASYNC;
+ }
+
if (relocs != NULL) {
assert(obj->relocation_count == 0);
/* A quick sanity check on relocations */
assert(relocs->relocs[i].offset < bo->size);
result = anv_execbuf_add_bo(device, exec, relocs->reloc_bos[i],
- NULL, extra_flags, alloc);
-
+ NULL, extra_flags);
if (result != VK_SUCCESS)
return result;
}
}
return anv_execbuf_add_bo_bitset(device, exec, relocs->dep_words,
- relocs->deps, extra_flags, alloc);
+ relocs->deps, extra_flags);
}
return VK_SUCCESS;
struct anv_execbuf *exec,
uint32_t dep_words,
BITSET_WORD *deps,
- uint32_t extra_flags,
- const VkAllocationCallbacks *alloc)
+ uint32_t extra_flags)
{
for (uint32_t w = 0; w < dep_words; w++) {
BITSET_WORD mask = deps[w];
uint32_t gem_handle = w * BITSET_WORDBITS + i;
struct anv_bo *bo = anv_device_lookup_bo(device, gem_handle);
assert(bo->refcount > 0);
- VkResult result = anv_execbuf_add_bo(device, exec,
- bo, NULL, extra_flags, alloc);
+ VkResult result =
+ anv_execbuf_add_bo(device, exec, bo, NULL, extra_flags);
if (result != VK_SUCCESS)
return result;
}
return VK_SUCCESS;
}
-static VkResult
-anv_execbuf_add_syncobj(struct anv_execbuf *exec,
- uint32_t handle, uint32_t flags,
- const VkAllocationCallbacks *alloc)
-{
- assert(flags != 0);
-
- if (exec->fence_count >= exec->fence_array_length) {
- uint32_t new_len = MAX2(exec->fence_array_length * 2, 64);
-
- exec->fences = vk_realloc(alloc, exec->fences,
- new_len * sizeof(*exec->fences),
- 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
- if (exec->fences == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
-
- exec->fence_array_length = new_len;
- }
-
- exec->fences[exec->fence_count] = (struct drm_i915_gem_exec_fence) {
- .handle = handle,
- .flags = flags,
- };
-
- exec->fence_count++;
-
- return VK_SUCCESS;
-}
-
static void
anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
struct anv_reloc_list *list)
adjust_relocations_from_state_pool(ss_pool, &cmd_buffer->surface_relocs,
cmd_buffer->last_ss_pool_center);
VkResult result;
- if (cmd_buffer->device->instance->physicalDevice.use_softpin) {
+ if (cmd_buffer->device->physical->use_softpin) {
anv_block_pool_foreach_bo(bo, &ss_pool->block_pool) {
result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
- bo, NULL, 0,
- &cmd_buffer->device->alloc);
+ bo, NULL, 0);
if (result != VK_SUCCESS)
return result;
}
/* Add surface dependencies (BOs) to the execbuf */
anv_execbuf_add_bo_bitset(cmd_buffer->device, execbuf,
cmd_buffer->surface_relocs.dep_words,
- cmd_buffer->surface_relocs.deps,
- 0, &cmd_buffer->device->alloc);
+ cmd_buffer->surface_relocs.deps, 0);
/* Add the BOs for all memory objects */
list_for_each_entry(struct anv_device_memory, mem,
&cmd_buffer->device->memory_objects, link) {
result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
- mem->bo, NULL, 0,
- &cmd_buffer->device->alloc);
+ mem->bo, NULL, 0);
if (result != VK_SUCCESS)
return result;
}
pool = &cmd_buffer->device->dynamic_state_pool.block_pool;
anv_block_pool_foreach_bo(bo, pool) {
result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
- bo, NULL, 0,
- &cmd_buffer->device->alloc);
+ bo, NULL, 0);
if (result != VK_SUCCESS)
return result;
}
pool = &cmd_buffer->device->instruction_state_pool.block_pool;
anv_block_pool_foreach_bo(bo, pool) {
result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
- bo, NULL, 0,
- &cmd_buffer->device->alloc);
+ bo, NULL, 0);
if (result != VK_SUCCESS)
return result;
}
pool = &cmd_buffer->device->binding_table_pool.block_pool;
anv_block_pool_foreach_bo(bo, pool) {
result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
- bo, NULL, 0,
- &cmd_buffer->device->alloc);
+ bo, NULL, 0);
if (result != VK_SUCCESS)
return result;
}
*/
result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
ss_pool->block_pool.bo,
- &cmd_buffer->surface_relocs, 0,
- &cmd_buffer->device->alloc);
+ &cmd_buffer->surface_relocs, 0);
if (result != VK_SUCCESS)
return result;
}
cmd_buffer->last_ss_pool_center);
result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
- (*bbo)->bo, &(*bbo)->relocs, 0,
- &cmd_buffer->device->alloc);
+ (*bbo)->bo, &(*bbo)->relocs, 0);
if (result != VK_SUCCESS)
return result;
}
}
/* If we are pinning our BOs, we shouldn't have to relocate anything */
- if (cmd_buffer->device->instance->physicalDevice.use_softpin)
+ if (cmd_buffer->device->physical->use_softpin)
assert(!execbuf->has_relocs);
/* Now we go through and fixup all of the relocation lists to point to
{
VkResult result = anv_execbuf_add_bo(device, execbuf,
device->trivial_batch_bo,
- NULL, 0, &device->alloc);
+ NULL, 0);
if (result != VK_SUCCESS)
return result;
.buffer_count = execbuf->bo_count,
.batch_start_offset = 0,
.batch_len = 8, /* GEN7_MI_BATCH_BUFFER_END and NOOP */
- .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
+ .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER | I915_EXEC_NO_RELOC,
.rsvd1 = device->context_id,
.rsvd2 = 0,
};
return VK_SUCCESS;
}
+/* We lock around execbuf for three main reasons:
+ *
+ * 1) When a block pool is resized, we create a new gem handle with a
+ * different size and, in the case of surface states, possibly a different
+ * center offset but we re-use the same anv_bo struct when we do so. If
+ * this happens in the middle of setting up an execbuf, we could end up
+ * with our list of BOs out of sync with our list of gem handles.
+ *
+ * 2) The algorithm we use for building the list of unique buffers isn't
+ * thread-safe. While the client is supposed to syncronize around
+ * QueueSubmit, this would be extremely difficult to debug if it ever came
+ * up in the wild due to a broken app. It's better to play it safe and
+ * just lock around QueueSubmit.
+ *
+ * 3) The anv_cmd_buffer_execbuf function may perform relocations in
+ * userspace. Due to the fact that the surface state buffer is shared
+ * between batches, we can't afford to have that happen from multiple
+ * threads at the same time. Even though the user is supposed to ensure
+ * this doesn't happen, we play it safe as in (2) above.
+ *
+ * Since the only other things that ever take the device lock such as block
+ * pool resize only rarely happen, this will almost never be contended so
+ * taking a lock isn't really an expensive operation in this case.
+ */
VkResult
-anv_cmd_buffer_execbuf(struct anv_device *device,
- struct anv_cmd_buffer *cmd_buffer,
- const VkSemaphore *in_semaphores,
- uint32_t num_in_semaphores,
- const VkSemaphore *out_semaphores,
- uint32_t num_out_semaphores,
- VkFence _fence)
+anv_queue_execbuf_locked(struct anv_queue *queue,
+ struct anv_queue_submit *submit)
{
- ANV_FROM_HANDLE(anv_fence, fence, _fence);
- UNUSED struct anv_physical_device *pdevice = &device->instance->physicalDevice;
-
+ struct anv_device *device = queue->device;
struct anv_execbuf execbuf;
anv_execbuf_init(&execbuf);
+ execbuf.alloc = submit->alloc;
+ execbuf.alloc_scope = submit->alloc_scope;
+ execbuf.perf_query_pass = submit->perf_query_pass;
- int in_fence = -1;
- VkResult result = VK_SUCCESS;
- for (uint32_t i = 0; i < num_in_semaphores; i++) {
- ANV_FROM_HANDLE(anv_semaphore, semaphore, in_semaphores[i]);
- struct anv_semaphore_impl *impl =
- semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
- &semaphore->temporary : &semaphore->permanent;
-
- switch (impl->type) {
- case ANV_SEMAPHORE_TYPE_BO:
- assert(!pdevice->has_syncobj);
- result = anv_execbuf_add_bo(device, &execbuf, impl->bo, NULL,
- 0, &device->alloc);
- if (result != VK_SUCCESS)
- return result;
- break;
-
- case ANV_SEMAPHORE_TYPE_SYNC_FILE:
- assert(!pdevice->has_syncobj);
- if (in_fence == -1) {
- in_fence = impl->fd;
- if (in_fence == -1)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- impl->fd = -1;
- } else {
- int merge = anv_gem_sync_file_merge(device, in_fence, impl->fd);
- if (merge == -1)
- return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
-
- close(impl->fd);
- close(in_fence);
- impl->fd = -1;
- in_fence = merge;
- }
- break;
-
- case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
- result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
- I915_EXEC_FENCE_WAIT,
- &device->alloc);
- if (result != VK_SUCCESS)
- return result;
- break;
-
- default:
- break;
- }
- }
-
- bool need_out_fence = false;
- for (uint32_t i = 0; i < num_out_semaphores; i++) {
- ANV_FROM_HANDLE(anv_semaphore, semaphore, out_semaphores[i]);
-
- /* Under most circumstances, out fences won't be temporary. However,
- * the spec does allow it for opaque_fd. From the Vulkan 1.0.53 spec:
- *
- * "If the import is temporary, the implementation must restore the
- * semaphore to its prior permanent state after submitting the next
- * semaphore wait operation."
- *
- * The spec says nothing whatsoever about signal operations on
- * temporarily imported semaphores so it appears they are allowed.
- * There are also CTS tests that require this to work.
- */
- struct anv_semaphore_impl *impl =
- semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
- &semaphore->temporary : &semaphore->permanent;
-
- switch (impl->type) {
- case ANV_SEMAPHORE_TYPE_BO:
- assert(!pdevice->has_syncobj);
- result = anv_execbuf_add_bo(device, &execbuf, impl->bo, NULL,
- EXEC_OBJECT_WRITE, &device->alloc);
- if (result != VK_SUCCESS)
- return result;
- break;
-
- case ANV_SEMAPHORE_TYPE_SYNC_FILE:
- assert(!pdevice->has_syncobj);
- need_out_fence = true;
- break;
+ /* Always add the workaround BO as it includes a driver identifier for the
+ * error_state.
+ */
+ VkResult result =
+ anv_execbuf_add_bo(device, &execbuf, device->workaround_bo, NULL, 0);
+ if (result != VK_SUCCESS)
+ goto error;
- case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
- result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
- I915_EXEC_FENCE_SIGNAL,
- &device->alloc);
- if (result != VK_SUCCESS)
- return result;
- break;
+ for (uint32_t i = 0; i < submit->fence_bo_count; i++) {
+ int signaled;
+ struct anv_bo *bo = anv_unpack_ptr(submit->fence_bos[i], 1, &signaled);
- default:
- break;
- }
+ result = anv_execbuf_add_bo(device, &execbuf, bo, NULL,
+ signaled ? EXEC_OBJECT_WRITE : 0);
+ if (result != VK_SUCCESS)
+ goto error;
}
- if (fence) {
- /* Under most circumstances, out fences won't be temporary. However,
- * the spec does allow it for opaque_fd. From the Vulkan 1.0.53 spec:
- *
- * "If the import is temporary, the implementation must restore the
- * semaphore to its prior permanent state after submitting the next
- * semaphore wait operation."
- *
- * The spec says nothing whatsoever about signal operations on
- * temporarily imported semaphores so it appears they are allowed.
- * There are also CTS tests that require this to work.
- */
- struct anv_fence_impl *impl =
- fence->temporary.type != ANV_FENCE_TYPE_NONE ?
- &fence->temporary : &fence->permanent;
-
- switch (impl->type) {
- case ANV_FENCE_TYPE_BO:
- assert(!pdevice->has_syncobj_wait);
- result = anv_execbuf_add_bo(device, &execbuf, impl->bo.bo, NULL,
- EXEC_OBJECT_WRITE, &device->alloc);
- if (result != VK_SUCCESS)
- return result;
- break;
-
- case ANV_FENCE_TYPE_SYNCOBJ:
- result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
- I915_EXEC_FENCE_SIGNAL,
- &device->alloc);
- if (result != VK_SUCCESS)
- return result;
- break;
-
- default:
- unreachable("Invalid fence type");
- }
+ if (submit->cmd_buffer) {
+ result = setup_execbuf_for_cmd_buffer(&execbuf, submit->cmd_buffer);
+ } else if (submit->simple_bo) {
+ result = anv_execbuf_add_bo(device, &execbuf, submit->simple_bo, NULL, 0);
+ if (result != VK_SUCCESS)
+ goto error;
+
+ execbuf.execbuf = (struct drm_i915_gem_execbuffer2) {
+ .buffers_ptr = (uintptr_t) execbuf.objects,
+ .buffer_count = execbuf.bo_count,
+ .batch_start_offset = 0,
+ .batch_len = submit->simple_bo_size,
+ .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER | I915_EXEC_NO_RELOC,
+ .rsvd1 = device->context_id,
+ .rsvd2 = 0,
+ };
+ } else {
+ result = setup_empty_execbuf(&execbuf, queue->device);
}
- if (cmd_buffer) {
- if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
- struct anv_batch_bo **bo = u_vector_tail(&cmd_buffer->seen_bbos);
+ if (result != VK_SUCCESS)
+ goto error;
+
+ const bool has_perf_query =
+ submit->perf_query_pass >= 0 &&
+ submit->cmd_buffer &&
+ submit->cmd_buffer->perf_query_pool;
+
+ if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
+ if (submit->cmd_buffer) {
+ if (has_perf_query) {
+ struct anv_query_pool *query_pool = submit->cmd_buffer->perf_query_pool;
+ struct anv_bo *pass_batch_bo = query_pool->bo;
+ uint64_t pass_batch_offset =
+ khr_perf_query_preamble_offset(query_pool,
+ submit->perf_query_pass);
+
+ gen_print_batch(&device->decoder_ctx,
+ pass_batch_bo->map + pass_batch_offset, 64,
+ pass_batch_bo->offset + pass_batch_offset, false);
+ }
- device->cmd_buffer_being_decoded = cmd_buffer;
+ struct anv_batch_bo **bo = u_vector_tail(&submit->cmd_buffer->seen_bbos);
+ device->cmd_buffer_being_decoded = submit->cmd_buffer;
gen_print_batch(&device->decoder_ctx, (*bo)->bo->map,
(*bo)->bo->size, (*bo)->bo->offset, false);
device->cmd_buffer_being_decoded = NULL;
+ } else if (submit->simple_bo) {
+ gen_print_batch(&device->decoder_ctx, submit->simple_bo->map,
+ submit->simple_bo->size, submit->simple_bo->offset, false);
+ } else {
+ gen_print_batch(&device->decoder_ctx,
+ device->trivial_batch_bo->map,
+ device->trivial_batch_bo->size,
+ device->trivial_batch_bo->offset, false);
}
-
- result = setup_execbuf_for_cmd_buffer(&execbuf, cmd_buffer);
- } else {
- result = setup_empty_execbuf(&execbuf, device);
}
- if (result != VK_SUCCESS)
- return result;
-
- if (execbuf.fence_count > 0) {
- assert(device->instance->physicalDevice.has_syncobj);
+ if (submit->fence_count > 0) {
+ assert(device->physical->has_syncobj);
execbuf.execbuf.flags |= I915_EXEC_FENCE_ARRAY;
- execbuf.execbuf.num_cliprects = execbuf.fence_count;
- execbuf.execbuf.cliprects_ptr = (uintptr_t) execbuf.fences;
+ execbuf.execbuf.num_cliprects = submit->fence_count;
+ execbuf.execbuf.cliprects_ptr = (uintptr_t)submit->fences;
}
- if (in_fence != -1) {
+ if (submit->in_fence != -1) {
execbuf.execbuf.flags |= I915_EXEC_FENCE_IN;
- execbuf.execbuf.rsvd2 |= (uint32_t)in_fence;
+ execbuf.execbuf.rsvd2 |= (uint32_t)submit->in_fence;
}
- if (need_out_fence)
+ if (submit->need_out_fence)
execbuf.execbuf.flags |= I915_EXEC_FENCE_OUT;
- result = anv_device_execbuf(device, &execbuf.execbuf, execbuf.bos);
-
- /* Execbuf does not consume the in_fence. It's our job to close it. */
- if (in_fence != -1)
- close(in_fence);
+ if (has_perf_query) {
+ struct anv_query_pool *query_pool = submit->cmd_buffer->perf_query_pool;
+ assert(submit->perf_query_pass < query_pool->n_passes);
+ struct gen_perf_query_info *query_info =
+ query_pool->pass_query[submit->perf_query_pass];
- for (uint32_t i = 0; i < num_in_semaphores; i++) {
- ANV_FROM_HANDLE(anv_semaphore, semaphore, in_semaphores[i]);
- /* From the Vulkan 1.0.53 spec:
- *
- * "If the import is temporary, the implementation must restore the
- * semaphore to its prior permanent state after submitting the next
- * semaphore wait operation."
- *
- * This has to happen after the execbuf in case we close any syncobjs in
- * the process.
+ /* Some performance queries just the pipeline statistic HW, no need for
+ * OA in that case, so no need to reconfigure.
*/
- anv_semaphore_reset_temporary(device, semaphore);
+ if (likely((INTEL_DEBUG & DEBUG_NO_OACONFIG) == 0) &&
+ (query_info->kind == GEN_PERF_QUERY_TYPE_OA ||
+ query_info->kind == GEN_PERF_QUERY_TYPE_RAW)) {
+ int ret = gen_ioctl(device->perf_fd, I915_PERF_IOCTL_CONFIG,
+ (void *)(uintptr_t) query_info->oa_metrics_set_id);
+ if (ret < 0) {
+ result = anv_device_set_lost(device,
+ "i915-perf config failed: %s",
+ strerror(ret));
+ }
+ }
+
+ struct anv_bo *pass_batch_bo = query_pool->bo;
+
+ struct drm_i915_gem_exec_object2 query_pass_object = {
+ .handle = pass_batch_bo->gem_handle,
+ .offset = pass_batch_bo->offset,
+ .flags = pass_batch_bo->flags,
+ };
+ struct drm_i915_gem_execbuffer2 query_pass_execbuf = {
+ .buffers_ptr = (uintptr_t) &query_pass_object,
+ .buffer_count = 1,
+ .batch_start_offset = khr_perf_query_preamble_offset(query_pool,
+ submit->perf_query_pass),
+ .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
+ .rsvd1 = device->context_id,
+ };
+
+ int ret = queue->device->no_hw ? 0 :
+ anv_gem_execbuffer(queue->device, &query_pass_execbuf);
+ if (ret)
+ result = anv_queue_set_lost(queue, "execbuf2 failed: %m");
}
- if (fence && fence->permanent.type == ANV_FENCE_TYPE_BO) {
- assert(!pdevice->has_syncobj_wait);
- /* BO fences can't be shared, so they can't be temporary. */
- assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
+ int ret = queue->device->no_hw ? 0 :
+ anv_gem_execbuffer(queue->device, &execbuf.execbuf);
+ if (ret)
+ result = anv_queue_set_lost(queue, "execbuf2 failed: %m");
- /* Once the execbuf has returned, we need to set the fence state to
- * SUBMITTED. We can't do this before calling execbuf because
- * anv_GetFenceStatus does take the global device lock before checking
- * fence->state.
- *
- * We set the fence state to SUBMITTED regardless of whether or not the
- * execbuf succeeds because we need to ensure that vkWaitForFences() and
- * vkGetFenceStatus() return a valid result (VK_ERROR_DEVICE_LOST or
- * VK_SUCCESS) in a finite amount of time even if execbuf fails.
- */
- fence->permanent.bo.state = ANV_BO_FENCE_STATE_SUBMITTED;
+ struct drm_i915_gem_exec_object2 *objects = execbuf.objects;
+ for (uint32_t k = 0; k < execbuf.bo_count; k++) {
+ if (execbuf.bos[k]->flags & EXEC_OBJECT_PINNED)
+ assert(execbuf.bos[k]->offset == objects[k].offset);
+ execbuf.bos[k]->offset = objects[k].offset;
}
- if (result == VK_SUCCESS && need_out_fence) {
- assert(!pdevice->has_syncobj_wait);
- int out_fence = execbuf.execbuf.rsvd2 >> 32;
- for (uint32_t i = 0; i < num_out_semaphores; i++) {
- ANV_FROM_HANDLE(anv_semaphore, semaphore, out_semaphores[i]);
- /* Out fences can't have temporary state because that would imply
- * that we imported a sync file and are trying to signal it.
- */
- assert(semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE);
- struct anv_semaphore_impl *impl = &semaphore->permanent;
+ if (result == VK_SUCCESS && submit->need_out_fence)
+ submit->out_fence = execbuf.execbuf.rsvd2 >> 32;
- if (impl->type == ANV_SEMAPHORE_TYPE_SYNC_FILE) {
- assert(impl->fd == -1);
- impl->fd = dup(out_fence);
- }
- }
- close(out_fence);
- }
+ error:
+ pthread_cond_broadcast(&device->queue_submit);
- anv_execbuf_finish(&execbuf, &device->alloc);
+ anv_execbuf_finish(&execbuf);
return result;
}