#include "anv_private.h"
#include "genxml/gen8_pack.h"
+#include "genxml/genX_bits.h"
+#include "perf/gen_perf.h"
#include "util/debug.h"
return address_u64;
}
+struct anv_address
+anv_batch_address(struct anv_batch *batch, void *batch_location)
+{
+ assert(batch->start < batch_location);
+
+ /* Allow a jump at the current location of the batch. */
+ assert(batch->next >= batch_location);
+
+ return anv_address_add(batch->start_addr, batch_location - batch->start);
+}
+
void
anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
{
anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
size_t batch_padding)
{
- batch->next = batch->start = bbo->bo->map;
- batch->end = bbo->bo->map + bbo->bo->size - batch_padding;
+ anv_batch_set_storage(batch, (struct anv_address) { .bo = bbo->bo, },
+ bbo->bo->map, bbo->bo->size - batch_padding);
batch->relocs = &bbo->relocs;
anv_reloc_list_clear(&bbo->relocs);
}
anv_batch_bo_continue(struct anv_batch_bo *bbo, struct anv_batch *batch,
size_t batch_padding)
{
+ batch->start_addr = (struct anv_address) { .bo = bbo->bo, };
batch->start = bbo->bo->map;
batch->next = bbo->bo->map + bbo->length;
batch->end = bbo->bo->map + bbo->bo->size - batch_padding;
assert(((*bb_start >> 29) & 0x07) == 0);
assert(((*bb_start >> 23) & 0x3f) == 49);
- if (cmd_buffer->device->instance->physicalDevice.use_softpin) {
+ if (cmd_buffer->device->physical->use_softpin) {
assert(prev_bbo->bo->flags & EXEC_OBJECT_PINNED);
assert(next_bbo->bo->flags & EXEC_OBJECT_PINNED);
struct anv_address
anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
{
+ struct anv_state_pool *pool = anv_binding_table_pool(cmd_buffer->device);
struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
return (struct anv_address) {
- .bo = anv_binding_table_pool(cmd_buffer->device)->block_pool.bo,
- .offset = bt_block->offset,
+ .bo = pool->block_pool.bo,
+ .offset = bt_block->offset - pool->start_offset,
};
}
anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
uint32_t entries, uint32_t *state_offset)
{
- struct anv_device *device = cmd_buffer->device;
- struct anv_state_pool *state_pool = &device->surface_state_pool;
struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
- struct anv_state state;
- state.alloc_size = align_u32(entries * 4, 32);
+ uint32_t bt_size = align_u32(entries * 4, 32);
- if (cmd_buffer->bt_next + state.alloc_size > state_pool->block_size)
+ struct anv_state state = cmd_buffer->bt_next;
+ if (bt_size > state.alloc_size)
return (struct anv_state) { 0 };
- state.offset = cmd_buffer->bt_next;
- state.map = anv_block_pool_map(&anv_binding_table_pool(device)->block_pool,
- bt_block->offset + state.offset);
+ state.alloc_size = bt_size;
+ cmd_buffer->bt_next.offset += bt_size;
+ cmd_buffer->bt_next.map += bt_size;
+ cmd_buffer->bt_next.alloc_size -= bt_size;
- cmd_buffer->bt_next += state.alloc_size;
-
- if (device->instance->physicalDevice.use_softpin) {
- assert(bt_block->offset >= 0);
- *state_offset = device->surface_state_pool.block_pool.start_address -
- device->binding_table_pool.block_pool.start_address - bt_block->offset;
- } else {
- assert(bt_block->offset < 0);
- *state_offset = -bt_block->offset;
- }
+ assert(bt_block->offset < 0);
+ *state_offset = -bt_block->offset;
return state;
}
}
*bt_block = anv_binding_table_pool_alloc(cmd_buffer->device);
- cmd_buffer->bt_next = 0;
+
+ /* The bt_next state is a rolling state (we update it as we suballocate
+ * from it) which is relative to the start of the binding table block.
+ */
+ cmd_buffer->bt_next = *bt_block;
+ cmd_buffer->bt_next.offset = 0;
return VK_SUCCESS;
}
anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
}
assert(u_vector_length(&cmd_buffer->bt_block_states) == 1);
- cmd_buffer->bt_next = 0;
+ cmd_buffer->bt_next = *(struct anv_state *)u_vector_head(&cmd_buffer->bt_block_states);
+ cmd_buffer->bt_next.offset = 0;
anv_reloc_list_clear(&cmd_buffer->surface_relocs);
cmd_buffer->last_ss_pool_center = 0;
const uint32_t length = cmd_buffer->batch.next - cmd_buffer->batch.start;
if (!cmd_buffer->device->can_chain_batches) {
cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT;
+ } else if (cmd_buffer->device->physical->use_call_secondary) {
+ cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CALL_AND_RETURN;
+ /* If the secondary command buffer begins & ends in the same BO and
+ * its length is less than the length of CS prefetch, add some NOOPs
+ * instructions so the last MI_BATCH_BUFFER_START is outside the CS
+ * prefetch.
+ */
+ if (cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) {
+ int32_t batch_len =
+ cmd_buffer->batch.next - cmd_buffer->batch.start;
+
+ for (int32_t i = 0; i < (512 - batch_len); i += 4)
+ anv_batch_emit(&cmd_buffer->batch, GEN8_MI_NOOP, noop);
+ }
+
+ void *jump_addr =
+ anv_batch_emitn(&cmd_buffer->batch,
+ GEN8_MI_BATCH_BUFFER_START_length,
+ GEN8_MI_BATCH_BUFFER_START,
+ .AddressSpaceIndicator = ASI_PPGTT,
+ .SecondLevelBatchBuffer = Firstlevelbatch) +
+ (GEN8_MI_BATCH_BUFFER_START_BatchBufferStartAddress_start / 8);
+ cmd_buffer->return_addr = anv_batch_address(&cmd_buffer->batch, jump_addr);
+
+ /* The emit above may have caused us to chain batch buffers which
+ * would mean that batch_bo is no longer valid.
+ */
+ batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
} else if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
(length < ANV_CMD_BUFFER_BATCH_SIZE / 2)) {
/* If the secondary has exactly one batch buffer in its list *and*
GEN8_MI_BATCH_BUFFER_START_length * 4);
break;
}
+ case ANV_CMD_BUFFER_EXEC_MODE_CALL_AND_RETURN: {
+ struct anv_batch_bo *first_bbo =
+ list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
+
+ uint64_t *write_return_addr =
+ anv_batch_emitn(&primary->batch,
+ GEN8_MI_STORE_DATA_IMM_length + 1 /* QWord write */,
+ GEN8_MI_STORE_DATA_IMM,
+ .Address = secondary->return_addr)
+ + (GEN8_MI_STORE_DATA_IMM_ImmediateData_start / 8);
+
+ emit_batch_buffer_start(primary, first_bbo->bo, 0);
+
+ *write_return_addr =
+ anv_address_physical(anv_batch_address(&primary->batch,
+ primary->batch.next));
+
+ anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
+ break;
+ }
default:
assert(!"Invalid execution mode");
}
struct anv_execbuf {
struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_execbuffer_ext_timeline_fences timeline_fences;
+
struct drm_i915_gem_exec_object2 * objects;
uint32_t bo_count;
struct anv_bo ** bos;
const VkAllocationCallbacks * alloc;
VkSystemAllocationScope alloc_scope;
+
+ int perf_query_pass;
};
static void
vk_free(exec->alloc, exec->bos);
}
+static void
+anv_execbuf_add_ext(struct anv_execbuf *exec,
+ uint32_t ext_name,
+ struct i915_user_extension *ext)
+{
+ __u64 *iter = &exec->execbuf.cliprects_ptr;
+
+ exec->execbuf.flags |= I915_EXEC_USE_EXTENSIONS;
+
+ while (*iter != 0) {
+ iter = (__u64 *) &((struct i915_user_extension *)(uintptr_t)*iter)->next_extension;
+ }
+
+ ext->name = ext_name;
+
+ *iter = (uintptr_t) ext;
+}
+
static VkResult
anv_execbuf_add_bo_bitset(struct anv_device *device,
struct anv_execbuf *exec,
obj->rsvd2 = 0;
}
+ if (extra_flags & EXEC_OBJECT_WRITE) {
+ obj->flags |= EXEC_OBJECT_WRITE;
+ obj->flags &= ~EXEC_OBJECT_ASYNC;
+ }
+
if (relocs != NULL) {
assert(obj->relocation_count == 0);
adjust_relocations_from_state_pool(ss_pool, &cmd_buffer->surface_relocs,
cmd_buffer->last_ss_pool_center);
VkResult result;
- if (cmd_buffer->device->instance->physicalDevice.use_softpin) {
+ if (cmd_buffer->device->physical->use_softpin) {
anv_block_pool_foreach_bo(bo, &ss_pool->block_pool) {
result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
bo, NULL, 0);
}
/* If we are pinning our BOs, we shouldn't have to relocate anything */
- if (cmd_buffer->device->instance->physicalDevice.use_softpin)
+ if (cmd_buffer->device->physical->use_softpin)
assert(!execbuf->has_relocs);
/* Now we go through and fixup all of the relocation lists to point to
.buffer_count = execbuf->bo_count,
.batch_start_offset = 0,
.batch_len = 8, /* GEN7_MI_BATCH_BUFFER_END and NOOP */
- .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
+ .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER | I915_EXEC_NO_RELOC,
.rsvd1 = device->context_id,
.rsvd2 = 0,
};
return VK_SUCCESS;
}
+/* We lock around execbuf for three main reasons:
+ *
+ * 1) When a block pool is resized, we create a new gem handle with a
+ * different size and, in the case of surface states, possibly a different
+ * center offset but we re-use the same anv_bo struct when we do so. If
+ * this happens in the middle of setting up an execbuf, we could end up
+ * with our list of BOs out of sync with our list of gem handles.
+ *
+ * 2) The algorithm we use for building the list of unique buffers isn't
+ * thread-safe. While the client is supposed to syncronize around
+ * QueueSubmit, this would be extremely difficult to debug if it ever came
+ * up in the wild due to a broken app. It's better to play it safe and
+ * just lock around QueueSubmit.
+ *
+ * 3) The anv_cmd_buffer_execbuf function may perform relocations in
+ * userspace. Due to the fact that the surface state buffer is shared
+ * between batches, we can't afford to have that happen from multiple
+ * threads at the same time. Even though the user is supposed to ensure
+ * this doesn't happen, we play it safe as in (2) above.
+ *
+ * Since the only other things that ever take the device lock such as block
+ * pool resize only rarely happen, this will almost never be contended so
+ * taking a lock isn't really an expensive operation in this case.
+ */
VkResult
-anv_queue_execbuf(struct anv_queue *queue,
- struct anv_queue_submit *submit)
+anv_queue_execbuf_locked(struct anv_queue *queue,
+ struct anv_queue_submit *submit)
{
struct anv_device *device = queue->device;
struct anv_execbuf execbuf;
anv_execbuf_init(&execbuf);
execbuf.alloc = submit->alloc;
execbuf.alloc_scope = submit->alloc_scope;
+ execbuf.perf_query_pass = submit->perf_query_pass;
- VkResult result;
-
- /* We lock around execbuf for three main reasons:
- *
- * 1) When a block pool is resized, we create a new gem handle with a
- * different size and, in the case of surface states, possibly a
- * different center offset but we re-use the same anv_bo struct when
- * we do so. If this happens in the middle of setting up an execbuf,
- * we could end up with our list of BOs out of sync with our list of
- * gem handles.
- *
- * 2) The algorithm we use for building the list of unique buffers isn't
- * thread-safe. While the client is supposed to syncronize around
- * QueueSubmit, this would be extremely difficult to debug if it ever
- * came up in the wild due to a broken app. It's better to play it
- * safe and just lock around QueueSubmit.
- *
- * 3) The anv_cmd_buffer_execbuf function may perform relocations in
- * userspace. Due to the fact that the surface state buffer is shared
- * between batches, we can't afford to have that happen from multiple
- * threads at the same time. Even though the user is supposed to
- * ensure this doesn't happen, we play it safe as in (2) above.
- *
- * Since the only other things that ever take the device lock such as block
- * pool resize only rarely happen, this will almost never be contended so
- * taking a lock isn't really an expensive operation in this case.
+ /* Always add the workaround BO as it includes a driver identifier for the
+ * error_state.
*/
- pthread_mutex_lock(&device->mutex);
+ VkResult result =
+ anv_execbuf_add_bo(device, &execbuf, device->workaround_bo, NULL, 0);
+ if (result != VK_SUCCESS)
+ goto error;
for (uint32_t i = 0; i < submit->fence_bo_count; i++) {
int signaled;
.buffer_count = execbuf.bo_count,
.batch_start_offset = 0,
.batch_len = submit->simple_bo_size,
- .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
+ .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER | I915_EXEC_NO_RELOC,
.rsvd1 = device->context_id,
.rsvd2 = 0,
};
if (result != VK_SUCCESS)
goto error;
+ const bool has_perf_query =
+ submit->perf_query_pass >= 0 &&
+ submit->cmd_buffer &&
+ submit->cmd_buffer->perf_query_pool;
+
if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
if (submit->cmd_buffer) {
- struct anv_batch_bo **bo = u_vector_tail(&submit->cmd_buffer->seen_bbos);
+ if (has_perf_query) {
+ struct anv_query_pool *query_pool = submit->cmd_buffer->perf_query_pool;
+ struct anv_bo *pass_batch_bo = query_pool->bo;
+ uint64_t pass_batch_offset =
+ khr_perf_query_preamble_offset(query_pool,
+ submit->perf_query_pass);
+
+ gen_print_batch(&device->decoder_ctx,
+ pass_batch_bo->map + pass_batch_offset, 64,
+ pass_batch_bo->offset + pass_batch_offset, false);
+ }
+ struct anv_batch_bo **bo = u_vector_tail(&submit->cmd_buffer->seen_bbos);
device->cmd_buffer_being_decoded = submit->cmd_buffer;
gen_print_batch(&device->decoder_ctx, (*bo)->bo->map,
(*bo)->bo->size, (*bo)->bo->offset, false);
}
if (submit->fence_count > 0) {
- assert(device->instance->physicalDevice.has_syncobj);
- execbuf.execbuf.flags |= I915_EXEC_FENCE_ARRAY;
- execbuf.execbuf.num_cliprects = submit->fence_count;
- execbuf.execbuf.cliprects_ptr = (uintptr_t)submit->fences;
+ assert(device->physical->has_syncobj);
+ if (device->has_thread_submit) {
+ execbuf.timeline_fences.fence_count = submit->fence_count;
+ execbuf.timeline_fences.handles_ptr = (uintptr_t)submit->fences;
+ execbuf.timeline_fences.values_ptr = (uintptr_t)submit->fence_values;
+ anv_execbuf_add_ext(&execbuf,
+ DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES,
+ &execbuf.timeline_fences.base);
+ } else {
+ execbuf.execbuf.flags |= I915_EXEC_FENCE_ARRAY;
+ execbuf.execbuf.num_cliprects = submit->fence_count;
+ execbuf.execbuf.cliprects_ptr = (uintptr_t)submit->fences;
+ }
}
if (submit->in_fence != -1) {
+ assert(!device->has_thread_submit);
execbuf.execbuf.flags |= I915_EXEC_FENCE_IN;
execbuf.execbuf.rsvd2 |= (uint32_t)submit->in_fence;
}
- if (submit->need_out_fence)
+ if (submit->need_out_fence) {
+ assert(!device->has_thread_submit);
execbuf.execbuf.flags |= I915_EXEC_FENCE_OUT;
+ }
+
+ if (has_perf_query) {
+ struct anv_query_pool *query_pool = submit->cmd_buffer->perf_query_pool;
+ assert(submit->perf_query_pass < query_pool->n_passes);
+ struct gen_perf_query_info *query_info =
+ query_pool->pass_query[submit->perf_query_pass];
+
+ /* Some performance queries just the pipeline statistic HW, no need for
+ * OA in that case, so no need to reconfigure.
+ */
+ if (likely((INTEL_DEBUG & DEBUG_NO_OACONFIG) == 0) &&
+ (query_info->kind == GEN_PERF_QUERY_TYPE_OA ||
+ query_info->kind == GEN_PERF_QUERY_TYPE_RAW)) {
+ int ret = gen_ioctl(device->perf_fd, I915_PERF_IOCTL_CONFIG,
+ (void *)(uintptr_t) query_info->oa_metrics_set_id);
+ if (ret < 0) {
+ result = anv_device_set_lost(device,
+ "i915-perf config failed: %s",
+ strerror(ret));
+ }
+ }
+
+ struct anv_bo *pass_batch_bo = query_pool->bo;
+
+ struct drm_i915_gem_exec_object2 query_pass_object = {
+ .handle = pass_batch_bo->gem_handle,
+ .offset = pass_batch_bo->offset,
+ .flags = pass_batch_bo->flags,
+ };
+ struct drm_i915_gem_execbuffer2 query_pass_execbuf = {
+ .buffers_ptr = (uintptr_t) &query_pass_object,
+ .buffer_count = 1,
+ .batch_start_offset = khr_perf_query_preamble_offset(query_pool,
+ submit->perf_query_pass),
+ .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
+ .rsvd1 = device->context_id,
+ };
+
+ int ret = queue->device->no_hw ? 0 :
+ anv_gem_execbuffer(queue->device, &query_pass_execbuf);
+ if (ret)
+ result = anv_queue_set_lost(queue, "execbuf2 failed: %m");
+ }
int ret = queue->device->no_hw ? 0 :
anv_gem_execbuffer(queue->device, &execbuf.execbuf);
- if (ret) {
- result = anv_queue_set_lost(queue,
- "execbuf2 failed: %s",
- strerror(ret));
- }
+ if (ret)
+ result = anv_queue_set_lost(queue, "execbuf2 failed: %m");
struct drm_i915_gem_exec_object2 *objects = execbuf.objects;
for (uint32_t k = 0; k < execbuf.bo_count; k++) {
error:
pthread_cond_broadcast(&device->queue_submit);
- pthread_mutex_unlock(&queue->device->mutex);
anv_execbuf_finish(&execbuf);