X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fintel%2Fvulkan%2Fanv_batch_chain.c;h=f5859460664091659fdbac9d4109e59c71141fc7;hb=23a36c28118f6967654e963fc37c6b4c3b37db1b;hp=d24dd06d7eb4014635fc298ac41ac5b77da60cda;hpb=9851c8285f7bf70a6cb4bede2ee94110c14acc19;p=mesa.git diff --git a/src/intel/vulkan/anv_batch_chain.c b/src/intel/vulkan/anv_batch_chain.c index d24dd06d7eb..f5859460664 100644 --- a/src/intel/vulkan/anv_batch_chain.c +++ b/src/intel/vulkan/anv_batch_chain.c @@ -29,9 +29,10 @@ #include "anv_private.h" -#include "genxml/gen7_pack.h" #include "genxml/gen8_pack.h" +#include "util/debug.h" + /** \file anv_batch_chain.c * * This file contains functions related to anv_cmd_buffer as a data @@ -59,18 +60,18 @@ anv_reloc_list_init_clone(struct anv_reloc_list *list, } list->relocs = - anv_alloc(alloc, list->array_length * sizeof(*list->relocs), 8, + vk_alloc(alloc, list->array_length * sizeof(*list->relocs), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (list->relocs == NULL) return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); list->reloc_bos = - anv_alloc(alloc, list->array_length * sizeof(*list->reloc_bos), 8, + vk_alloc(alloc, list->array_length * sizeof(*list->reloc_bos), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (list->reloc_bos == NULL) { - anv_free(alloc, list->relocs); + vk_free(alloc, list->relocs); return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); } @@ -95,8 +96,8 @@ void anv_reloc_list_finish(struct anv_reloc_list *list, const VkAllocationCallbacks *alloc) { - anv_free(alloc, list->relocs); - anv_free(alloc, list->reloc_bos); + vk_free(alloc, list->relocs); + vk_free(alloc, list->reloc_bos); } static VkResult @@ -112,16 +113,16 @@ anv_reloc_list_grow(struct anv_reloc_list *list, new_length *= 2; struct drm_i915_gem_relocation_entry *new_relocs = - anv_alloc(alloc, new_length * sizeof(*list->relocs), 8, + vk_alloc(alloc, new_length * sizeof(*list->relocs), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (new_relocs == NULL) return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); struct anv_bo **new_reloc_bos = - anv_alloc(alloc, new_length * sizeof(*list->reloc_bos), 8, + vk_alloc(alloc, new_length * sizeof(*list->reloc_bos), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); - if (new_relocs == NULL) { - anv_free(alloc, new_relocs); + if (new_reloc_bos == NULL) { + vk_free(alloc, new_relocs); return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); } @@ -129,8 +130,8 @@ anv_reloc_list_grow(struct anv_reloc_list *list, memcpy(new_reloc_bos, list->reloc_bos, list->num_relocs * sizeof(*list->reloc_bos)); - anv_free(alloc, list->relocs); - anv_free(alloc, list->reloc_bos); + vk_free(alloc, list->relocs); + vk_free(alloc, list->reloc_bos); list->array_length = new_length; list->relocs = new_relocs; @@ -246,12 +247,13 @@ anv_batch_bo_create(struct anv_cmd_buffer *cmd_buffer, { VkResult result; - struct anv_batch_bo *bbo = anv_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo), + struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (bbo == NULL) return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); - result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool, &bbo->bo); + result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool, &bbo->bo, + ANV_CMD_BUFFER_BATCH_SIZE); if (result != VK_SUCCESS) goto fail_alloc; @@ -266,7 +268,7 @@ anv_batch_bo_create(struct anv_cmd_buffer *cmd_buffer, fail_bo_alloc: anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo); fail_alloc: - anv_free(&cmd_buffer->pool->alloc, bbo); + vk_free(&cmd_buffer->pool->alloc, bbo); return result; } @@ -278,12 +280,13 @@ anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer, { VkResult result; - struct anv_batch_bo *bbo = anv_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo), + struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (bbo == NULL) return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); - result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool, &bbo->bo); + result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool, &bbo->bo, + other_bbo->bo.size); if (result != VK_SUCCESS) goto fail_alloc; @@ -295,8 +298,6 @@ anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer, bbo->length = other_bbo->length; memcpy(bbo->bo.map, other_bbo->bo.map, other_bbo->length); - bbo->last_ss_pool_bo_offset = other_bbo->last_ss_pool_bo_offset; - *bbo_out = bbo; return VK_SUCCESS; @@ -304,7 +305,7 @@ anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer, fail_bo_alloc: anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo); fail_alloc: - anv_free(&cmd_buffer->pool->alloc, bbo); + vk_free(&cmd_buffer->pool->alloc, bbo); return result; } @@ -316,7 +317,6 @@ anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch, batch->next = batch->start = bbo->bo.map; batch->end = bbo->bo.map + bbo->bo.size - batch_padding; batch->relocs = &bbo->relocs; - bbo->last_ss_pool_bo_offset = 0; bbo->relocs.num_relocs = 0; } @@ -338,13 +338,44 @@ anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch) VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length)); } +static VkResult +anv_batch_bo_grow(struct anv_cmd_buffer *cmd_buffer, struct anv_batch_bo *bbo, + struct anv_batch *batch, size_t aditional, + size_t batch_padding) +{ + assert(batch->start == bbo->bo.map); + bbo->length = batch->next - batch->start; + + size_t new_size = bbo->bo.size; + while (new_size <= bbo->length + aditional + batch_padding) + new_size *= 2; + + if (new_size == bbo->bo.size) + return VK_SUCCESS; + + struct anv_bo new_bo; + VkResult result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool, + &new_bo, new_size); + if (result != VK_SUCCESS) + return result; + + memcpy(new_bo.map, bbo->bo.map, bbo->length); + + anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo); + + bbo->bo = new_bo; + anv_batch_bo_continue(bbo, batch, batch_padding); + + return VK_SUCCESS; +} + static void anv_batch_bo_destroy(struct anv_batch_bo *bbo, struct anv_cmd_buffer *cmd_buffer) { anv_reloc_list_finish(&bbo->relocs, &cmd_buffer->pool->alloc); anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo); - anv_free(&cmd_buffer->pool->alloc, bbo); + vk_free(&cmd_buffer->pool->alloc, bbo); } static VkResult @@ -401,7 +432,7 @@ anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer) { return (struct anv_address) { .bo = &cmd_buffer->device->surface_state_block_pool.bo, - .offset = *(int32_t *)anv_vector_head(&cmd_buffer->bt_blocks), + .offset = *(int32_t *)u_vector_head(&cmd_buffer->bt_blocks), }; } @@ -417,17 +448,21 @@ emit_batch_buffer_start(struct anv_cmd_buffer *cmd_buffer, * gens. */ +#define GEN7_MI_BATCH_BUFFER_START_length 2 +#define GEN7_MI_BATCH_BUFFER_START_length_bias 2 + const uint32_t gen7_length = GEN7_MI_BATCH_BUFFER_START_length - GEN7_MI_BATCH_BUFFER_START_length_bias; const uint32_t gen8_length = GEN8_MI_BATCH_BUFFER_START_length - GEN8_MI_BATCH_BUFFER_START_length_bias; - anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_START, - .DWordLength = cmd_buffer->device->info.gen < 8 ? - gen7_length : gen8_length, - ._2ndLevelBatchBuffer = _1stlevelbatch, - .AddressSpaceIndicator = ASI_PPGTT, - .BatchBufferStartAddress = { bo, offset }); + anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_START, bbs) { + bbs.DWordLength = cmd_buffer->device->info.gen < 8 ? + gen7_length : gen8_length; + bbs._2ndLevelBatchBuffer = _1stlevelbatch; + bbs.AddressSpaceIndicator = ASI_PPGTT; + bbs.BatchBufferStartAddress = (struct anv_address) { bo, offset }; + } } static void @@ -460,7 +495,7 @@ anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data) if (result != VK_SUCCESS) return result; - struct anv_batch_bo **seen_bbo = anv_vector_add(&cmd_buffer->seen_bbos); + struct anv_batch_bo **seen_bbo = u_vector_add(&cmd_buffer->seen_bbos); if (seen_bbo == NULL) { anv_batch_bo_destroy(new_bbo, cmd_buffer); return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); @@ -476,13 +511,96 @@ anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data) return VK_SUCCESS; } +static VkResult +anv_cmd_buffer_grow_batch(struct anv_batch *batch, void *_data) +{ + struct anv_cmd_buffer *cmd_buffer = _data; + struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer); + + anv_batch_bo_grow(cmd_buffer, bbo, &cmd_buffer->batch, 4096, + GEN8_MI_BATCH_BUFFER_START_length * 4); + + return VK_SUCCESS; +} + +/** Allocate a binding table + * + * This function allocates a binding table. This is a bit more complicated + * than one would think due to a combination of Vulkan driver design and some + * unfortunate hardware restrictions. + * + * The 3DSTATE_BINDING_TABLE_POINTERS_* packets only have a 16-bit field for + * the binding table pointer which means that all binding tables need to live + * in the bottom 64k of surface state base address. The way the GL driver has + * classically dealt with this restriction is to emit all surface states + * on-the-fly into the batch and have a batch buffer smaller than 64k. This + * isn't really an option in Vulkan for a couple of reasons: + * + * 1) In Vulkan, we have growing (or chaining) batches so surface states have + * to live in their own buffer and we have to be able to re-emit + * STATE_BASE_ADDRESS as needed which requires a full pipeline stall. In + * order to avoid emitting STATE_BASE_ADDRESS any more often than needed + * (it's not that hard to hit 64k of just binding tables), we allocate + * surface state objects up-front when VkImageView is created. In order + * for this to work, surface state objects need to be allocated from a + * global buffer. + * + * 2) We tried to design the surface state system in such a way that it's + * already ready for bindless texturing. The way bindless texturing works + * on our hardware is that you have a big pool of surface state objects + * (with its own state base address) and the bindless handles are simply + * offsets into that pool. With the architecture we chose, we already + * have that pool and it's exactly the same pool that we use for regular + * surface states so we should already be ready for bindless. + * + * 3) For render targets, we need to be able to fill out the surface states + * later in vkBeginRenderPass so that we can assign clear colors + * correctly. One way to do this would be to just create the surface + * state data and then repeatedly copy it into the surface state BO every + * time we have to re-emit STATE_BASE_ADDRESS. While this works, it's + * rather annoying and just being able to allocate them up-front and + * re-use them for the entire render pass. + * + * While none of these are technically blockers for emitting state on the fly + * like we do in GL, the ability to have a single surface state pool is + * simplifies things greatly. Unfortunately, it comes at a cost... + * + * Because of the 64k limitation of 3DSTATE_BINDING_TABLE_POINTERS_*, we can't + * place the binding tables just anywhere in surface state base address. + * Because 64k isn't a whole lot of space, we can't simply restrict the + * surface state buffer to 64k, we have to be more clever. The solution we've + * chosen is to have a block pool with a maximum size of 2G that starts at + * zero and grows in both directions. All surface states are allocated from + * the top of the pool (positive offsets) and we allocate blocks (< 64k) of + * binding tables from the bottom of the pool (negative offsets). Every time + * we allocate a new binding table block, we set surface state base address to + * point to the bottom of the binding table block. This way all of the + * binding tables in the block are in the bottom 64k of surface state base + * address. When we fill out the binding table, we add the distance between + * the bottom of our binding table block and zero of the block pool to the + * surface state offsets so that they are correct relative to out new surface + * state base address at the bottom of the binding table block. + * + * \see adjust_relocations_from_block_pool() + * \see adjust_relocations_too_block_pool() + * + * \param[in] entries The number of surface state entries the binding + * table should be able to hold. + * + * \param[out] state_offset The offset surface surface state base address + * where the surface states live. This must be + * added to the surface state offset when it is + * written into the binding table entry. + * + * \return An anv_state representing the binding table + */ struct anv_state anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer, uint32_t entries, uint32_t *state_offset) { struct anv_block_pool *block_pool = &cmd_buffer->device->surface_state_block_pool; - int32_t *bt_block = anv_vector_head(&cmd_buffer->bt_blocks); + int32_t *bt_block = u_vector_head(&cmd_buffer->bt_blocks); struct anv_state state; state.alloc_size = align_u32(entries * 4, 32); @@ -504,7 +622,9 @@ anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer, struct anv_state anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer) { - return anv_state_stream_alloc(&cmd_buffer->surface_state_stream, 64, 64); + struct isl_device *isl_dev = &cmd_buffer->device->isl_dev; + return anv_state_stream_alloc(&cmd_buffer->surface_state_stream, + isl_dev->ss.size, isl_dev->ss.align); } struct anv_state @@ -521,7 +641,7 @@ anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer) struct anv_block_pool *block_pool = &cmd_buffer->device->surface_state_block_pool; - int32_t *offset = anv_vector_add(&cmd_buffer->bt_blocks); + int32_t *offset = u_vector_add(&cmd_buffer->bt_blocks); if (offset == NULL) return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); @@ -546,21 +666,26 @@ anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer) list_addtail(&batch_bo->link, &cmd_buffer->batch_bos); cmd_buffer->batch.alloc = &cmd_buffer->pool->alloc; - cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch; cmd_buffer->batch.user_data = cmd_buffer; + if (cmd_buffer->device->can_chain_batches) { + cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch; + } else { + cmd_buffer->batch.extend_cb = anv_cmd_buffer_grow_batch; + } + anv_batch_bo_start(batch_bo, &cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_START_length * 4); - int success = anv_vector_init(&cmd_buffer->seen_bbos, + int success = u_vector_init(&cmd_buffer->seen_bbos, sizeof(struct anv_bo *), 8 * sizeof(struct anv_bo *)); if (!success) goto fail_batch_bo; - *(struct anv_batch_bo **)anv_vector_add(&cmd_buffer->seen_bbos) = batch_bo; + *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = batch_bo; - success = anv_vector_init(&cmd_buffer->bt_blocks, sizeof(int32_t), + success = u_vector_init(&cmd_buffer->bt_blocks, sizeof(int32_t), 8 * sizeof(int32_t)); if (!success) goto fail_seen_bbos; @@ -569,19 +694,16 @@ anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer) &cmd_buffer->pool->alloc); if (result != VK_SUCCESS) goto fail_bt_blocks; + cmd_buffer->last_ss_pool_center = 0; anv_cmd_buffer_new_binding_table_block(cmd_buffer); - cmd_buffer->execbuf2.objects = NULL; - cmd_buffer->execbuf2.bos = NULL; - cmd_buffer->execbuf2.array_length = 0; - return VK_SUCCESS; fail_bt_blocks: - anv_vector_finish(&cmd_buffer->bt_blocks); + u_vector_finish(&cmd_buffer->bt_blocks); fail_seen_bbos: - anv_vector_finish(&cmd_buffer->seen_bbos); + u_vector_finish(&cmd_buffer->seen_bbos); fail_batch_bo: anv_batch_bo_destroy(batch_bo, cmd_buffer); @@ -592,24 +714,21 @@ void anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer) { int32_t *bt_block; - anv_vector_foreach(bt_block, &cmd_buffer->bt_blocks) { + u_vector_foreach(bt_block, &cmd_buffer->bt_blocks) { anv_block_pool_free(&cmd_buffer->device->surface_state_block_pool, *bt_block); } - anv_vector_finish(&cmd_buffer->bt_blocks); + u_vector_finish(&cmd_buffer->bt_blocks); anv_reloc_list_finish(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc); - anv_vector_finish(&cmd_buffer->seen_bbos); + u_vector_finish(&cmd_buffer->seen_bbos); /* Destroy all of the batch buffers */ list_for_each_entry_safe(struct anv_batch_bo, bbo, &cmd_buffer->batch_bos, link) { anv_batch_bo_destroy(bbo, cmd_buffer); } - - anv_free(&cmd_buffer->pool->alloc, cmd_buffer->execbuf2.objects); - anv_free(&cmd_buffer->pool->alloc, cmd_buffer->execbuf2.bos); } void @@ -628,21 +747,22 @@ anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer) &cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_START_length * 4); - while (anv_vector_length(&cmd_buffer->bt_blocks) > 1) { - int32_t *bt_block = anv_vector_remove(&cmd_buffer->bt_blocks); + while (u_vector_length(&cmd_buffer->bt_blocks) > 1) { + int32_t *bt_block = u_vector_remove(&cmd_buffer->bt_blocks); anv_block_pool_free(&cmd_buffer->device->surface_state_block_pool, *bt_block); } - assert(anv_vector_length(&cmd_buffer->bt_blocks) == 1); + assert(u_vector_length(&cmd_buffer->bt_blocks) == 1); cmd_buffer->bt_next = 0; cmd_buffer->surface_relocs.num_relocs = 0; + cmd_buffer->last_ss_pool_center = 0; /* Reset the list of seen buffers */ cmd_buffer->seen_bbos.head = 0; cmd_buffer->seen_bbos.tail = 0; - *(struct anv_batch_bo **)anv_vector_add(&cmd_buffer->seen_bbos) = + *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = anv_cmd_buffer_current_batch_bo(cmd_buffer); } @@ -661,11 +781,11 @@ anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer) cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4; assert(cmd_buffer->batch.end == batch_bo->bo.map + batch_bo->bo.size); - anv_batch_emit(&cmd_buffer->batch, GEN7_MI_BATCH_BUFFER_END); + anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_END, bbe); /* Round batch up to an even number of dwords. */ if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4) - anv_batch_emit(&cmd_buffer->batch, GEN7_MI_NOOP); + anv_batch_emit(&cmd_buffer->batch, GEN8_MI_NOOP, noop); cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY; } @@ -678,7 +798,9 @@ anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer) * determine this statically here so that this stays in sync with the * actual ExecuteCommands implementation. */ - if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) && + if (!cmd_buffer->device->can_chain_batches) { + cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT; + } else if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) && (batch_bo->length < ANV_CMD_BUFFER_BATCH_SIZE / 2)) { /* If the secondary has exactly one batch buffer in its list *and* * that batch buffer is less than half of the maximum size, we're @@ -707,7 +829,7 @@ anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer, struct list_head *list) { list_for_each_entry(struct anv_batch_bo, bbo, list, link) { - struct anv_batch_bo **bbo_ptr = anv_vector_add(&cmd_buffer->seen_bbos); + struct anv_batch_bo **bbo_ptr = u_vector_add(&cmd_buffer->seen_bbos); if (bbo_ptr == NULL) return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); @@ -724,8 +846,15 @@ anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary, switch (secondary->exec_mode) { case ANV_CMD_BUFFER_EXEC_MODE_EMIT: anv_batch_emit_batch(&primary->batch, &secondary->batch); - anv_cmd_buffer_emit_state_base_address(primary); break; + case ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT: { + struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(primary); + unsigned length = secondary->batch.end - secondary->batch.start; + anv_batch_bo_grow(primary, bbo, &primary->batch, length, + GEN8_MI_BATCH_BUFFER_START_length * 4); + anv_batch_emit_batch(&primary->batch, &secondary->batch); + break; + } case ANV_CMD_BUFFER_EXEC_MODE_CHAIN: { struct anv_batch_bo *first_bbo = list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link); @@ -764,8 +893,6 @@ anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary, p += CACHELINE_SIZE; } } - - anv_cmd_buffer_emit_state_base_address(primary); break; } case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN: { @@ -789,8 +916,6 @@ anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary, anv_batch_bo_continue(last_bbo, &primary->batch, GEN8_MI_BATCH_BUFFER_START_length * 4); - - anv_cmd_buffer_emit_state_base_address(primary); break; } default: @@ -801,56 +926,83 @@ anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary, &secondary->surface_relocs, 0); } +struct anv_execbuf { + struct drm_i915_gem_execbuffer2 execbuf; + + struct drm_i915_gem_exec_object2 * objects; + uint32_t bo_count; + struct anv_bo ** bos; + + /* Allocated length of the 'objects' and 'bos' arrays */ + uint32_t array_length; +}; + +static void +anv_execbuf_init(struct anv_execbuf *exec) +{ + memset(exec, 0, sizeof(*exec)); +} + +static void +anv_execbuf_finish(struct anv_execbuf *exec, + const VkAllocationCallbacks *alloc) +{ + vk_free(alloc, exec->objects); + vk_free(alloc, exec->bos); +} + static VkResult -anv_cmd_buffer_add_bo(struct anv_cmd_buffer *cmd_buffer, - struct anv_bo *bo, - struct anv_reloc_list *relocs) +anv_execbuf_add_bo(struct anv_execbuf *exec, + struct anv_bo *bo, + struct anv_reloc_list *relocs, + const VkAllocationCallbacks *alloc) { struct drm_i915_gem_exec_object2 *obj = NULL; - if (bo->index < cmd_buffer->execbuf2.bo_count && - cmd_buffer->execbuf2.bos[bo->index] == bo) - obj = &cmd_buffer->execbuf2.objects[bo->index]; + if (bo->index < exec->bo_count && exec->bos[bo->index] == bo) + obj = &exec->objects[bo->index]; if (obj == NULL) { /* We've never seen this one before. Add it to the list and assign * an id that we can use later. */ - if (cmd_buffer->execbuf2.bo_count >= cmd_buffer->execbuf2.array_length) { - uint32_t new_len = cmd_buffer->execbuf2.objects ? - cmd_buffer->execbuf2.array_length * 2 : 64; + if (exec->bo_count >= exec->array_length) { + uint32_t new_len = exec->objects ? exec->array_length * 2 : 64; struct drm_i915_gem_exec_object2 *new_objects = - anv_alloc(&cmd_buffer->pool->alloc, new_len * sizeof(*new_objects), - 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + vk_alloc(alloc, new_len * sizeof(*new_objects), + 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); if (new_objects == NULL) return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); struct anv_bo **new_bos = - anv_alloc(&cmd_buffer->pool->alloc, new_len * sizeof(*new_bos), - 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); - if (new_objects == NULL) { - anv_free(&cmd_buffer->pool->alloc, new_objects); + vk_alloc(alloc, new_len * sizeof(*new_bos), + 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); + if (new_bos == NULL) { + vk_free(alloc, new_objects); return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); } - if (cmd_buffer->execbuf2.objects) { - memcpy(new_objects, cmd_buffer->execbuf2.objects, - cmd_buffer->execbuf2.bo_count * sizeof(*new_objects)); - memcpy(new_bos, cmd_buffer->execbuf2.bos, - cmd_buffer->execbuf2.bo_count * sizeof(*new_bos)); + if (exec->objects) { + memcpy(new_objects, exec->objects, + exec->bo_count * sizeof(*new_objects)); + memcpy(new_bos, exec->bos, + exec->bo_count * sizeof(*new_bos)); } - cmd_buffer->execbuf2.objects = new_objects; - cmd_buffer->execbuf2.bos = new_bos; - cmd_buffer->execbuf2.array_length = new_len; + vk_free(alloc, exec->objects); + vk_free(alloc, exec->bos); + + exec->objects = new_objects; + exec->bos = new_bos; + exec->array_length = new_len; } - assert(cmd_buffer->execbuf2.bo_count < cmd_buffer->execbuf2.array_length); + assert(exec->bo_count < exec->array_length); - bo->index = cmd_buffer->execbuf2.bo_count++; - obj = &cmd_buffer->execbuf2.objects[bo->index]; - cmd_buffer->execbuf2.bos[bo->index] = bo; + bo->index = exec->bo_count++; + obj = &exec->objects[bo->index]; + exec->bos[bo->index] = bo; obj->handle = bo->gem_handle; obj->relocation_count = 0; @@ -873,7 +1025,7 @@ anv_cmd_buffer_add_bo(struct anv_cmd_buffer *cmd_buffer, for (size_t i = 0; i < relocs->num_relocs; i++) { /* A quick sanity check on relocations */ assert(relocs->relocs[i].offset < bo->size); - anv_cmd_buffer_add_bo(cmd_buffer, relocs->reloc_bos[i], NULL); + anv_execbuf_add_bo(exec, relocs->reloc_bos[i], NULL, alloc); } } @@ -884,82 +1036,62 @@ static void anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer, struct anv_reloc_list *list) { - struct anv_bo *bo; - - /* If the kernel supports I915_EXEC_NO_RELOC, it will compare offset in - * struct drm_i915_gem_exec_object2 against the bos current offset and if - * all bos haven't moved it will skip relocation processing alltogether. - * If I915_EXEC_NO_RELOC is not supported, the kernel ignores the incoming - * value of offset so we can set it either way. For that to work we need - * to make sure all relocs use the same presumed offset. - */ - - for (size_t i = 0; i < list->num_relocs; i++) { - bo = list->reloc_bos[i]; - if (bo->offset != list->relocs[i].presumed_offset) - cmd_buffer->execbuf2.need_reloc = true; - - list->relocs[i].target_handle = bo->index; - } -} - -static uint64_t -read_reloc(const struct anv_device *device, const void *p) -{ - if (device->info.gen >= 8) - return *(uint64_t *)p; - else - return *(uint32_t *)p; + for (size_t i = 0; i < list->num_relocs; i++) + list->relocs[i].target_handle = list->reloc_bos[i]->index; } static void -write_reloc(const struct anv_device *device, void *p, uint64_t v) +write_reloc(const struct anv_device *device, void *p, uint64_t v, bool flush) { - if (device->info.gen >= 8) - *(uint64_t *)p = v; - else + unsigned reloc_size = 0; + if (device->info.gen >= 8) { + /* From the Broadwell PRM Vol. 2a, MI_LOAD_REGISTER_MEM::MemoryAddress: + * + * "This field specifies the address of the memory location where the + * register value specified in the DWord above will read from. The + * address specifies the DWord location of the data. Range = + * GraphicsVirtualAddress[63:2] for a DWord register GraphicsAddress + * [63:48] are ignored by the HW and assumed to be in correct + * canonical form [63:48] == [47]." + */ + const int shift = 63 - 47; + reloc_size = sizeof(uint64_t); + *(uint64_t *)p = (((int64_t)v) << shift) >> shift; + } else { + reloc_size = sizeof(uint32_t); *(uint32_t *)p = v; + } + + if (flush && !device->info.has_llc) + anv_clflush_range(p, reloc_size); } static void -adjust_relocations_from_block_pool(struct anv_block_pool *pool, - struct anv_reloc_list *relocs) +adjust_relocations_from_state_pool(struct anv_block_pool *pool, + struct anv_reloc_list *relocs, + uint32_t last_pool_center_bo_offset) { - for (size_t i = 0; i < relocs->num_relocs; i++) { - /* In general, we don't know how stale the relocated value is. It - * may have been used last time or it may not. Since we don't want - * to stomp it while the GPU may be accessing it, we haven't updated - * it anywhere else in the code. Instead, we just set the presumed - * offset to what it is now based on the delta and the data in the - * block pool. Then the kernel will update it for us if needed. - */ - assert(relocs->relocs[i].offset < pool->state.end); - const void *p = pool->map + relocs->relocs[i].offset; - - /* We're reading back the relocated value from potentially incoherent - * memory here. However, any change to the value will be from the kernel - * writing out relocations, which will keep the CPU cache up to date. - */ - relocs->relocs[i].presumed_offset = - read_reloc(pool->device, p) - relocs->relocs[i].delta; + assert(last_pool_center_bo_offset <= pool->center_bo_offset); + uint32_t delta = pool->center_bo_offset - last_pool_center_bo_offset; + for (size_t i = 0; i < relocs->num_relocs; i++) { /* All of the relocations from this block pool to other BO's should * have been emitted relative to the surface block pool center. We * need to add the center offset to make them relative to the * beginning of the actual GEM bo. */ - relocs->relocs[i].offset += pool->center_bo_offset; + relocs->relocs[i].offset += delta; } } static void -adjust_relocations_to_block_pool(struct anv_block_pool *pool, +adjust_relocations_to_state_pool(struct anv_block_pool *pool, struct anv_bo *from_bo, struct anv_reloc_list *relocs, - uint32_t *last_pool_center_bo_offset) + uint32_t last_pool_center_bo_offset) { - assert(*last_pool_center_bo_offset <= pool->center_bo_offset); - uint32_t delta = pool->center_bo_offset - *last_pool_center_bo_offset; + assert(last_pool_center_bo_offset <= pool->center_bo_offset); + uint32_t delta = pool->center_bo_offset - last_pool_center_bo_offset; /* When we initially emit relocations into a block pool, we don't * actually know what the final center_bo_offset will be so we just emit @@ -984,37 +1116,147 @@ adjust_relocations_to_block_pool(struct anv_block_pool *pool, assert(relocs->relocs[i].offset < from_bo->size); write_reloc(pool->device, from_bo->map + relocs->relocs[i].offset, relocs->relocs[i].presumed_offset + - relocs->relocs[i].delta); + relocs->relocs[i].delta, false); } } +} - *last_pool_center_bo_offset = pool->center_bo_offset; +static void +anv_reloc_list_apply(struct anv_device *device, + struct anv_reloc_list *list, + struct anv_bo *bo, + bool always_relocate) +{ + for (size_t i = 0; i < list->num_relocs; i++) { + struct anv_bo *target_bo = list->reloc_bos[i]; + if (list->relocs[i].presumed_offset == target_bo->offset && + !always_relocate) + continue; + + void *p = bo->map + list->relocs[i].offset; + write_reloc(device, p, target_bo->offset + list->relocs[i].delta, true); + list->relocs[i].presumed_offset = target_bo->offset; + } } -void -anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer) +/** + * This function applies the relocation for a command buffer and writes the + * actual addresses into the buffers as per what we were told by the kernel on + * the previous execbuf2 call. This should be safe to do because, for each + * relocated address, we have two cases: + * + * 1) The target BO is inactive (as seen by the kernel). In this case, it is + * not in use by the GPU so updating the address is 100% ok. It won't be + * in-use by the GPU (from our context) again until the next execbuf2 + * happens. If the kernel decides to move it in the next execbuf2, it + * will have to do the relocations itself, but that's ok because it should + * have all of the information needed to do so. + * + * 2) The target BO is active (as seen by the kernel). In this case, it + * hasn't moved since the last execbuffer2 call because GTT shuffling + * *only* happens when the BO is idle. (From our perspective, it only + * happens inside the execbuffer2 ioctl, but the shuffling may be + * triggered by another ioctl, with full-ppgtt this is limited to only + * execbuffer2 ioctls on the same context, or memory pressure.) Since the + * target BO hasn't moved, our anv_bo::offset exactly matches the BO's GTT + * address and the relocated value we are writing into the BO will be the + * same as the value that is already there. + * + * There is also a possibility that the target BO is active but the exact + * RENDER_SURFACE_STATE object we are writing the relocation into isn't in + * use. In this case, the address currently in the RENDER_SURFACE_STATE + * may be stale but it's still safe to write the relocation because that + * particular RENDER_SURFACE_STATE object isn't in-use by the GPU and + * won't be until the next execbuf2 call. + * + * By doing relocations on the CPU, we can tell the kernel that it doesn't + * need to bother. We want to do this because the surface state buffer is + * used by every command buffer so, if the kernel does the relocations, it + * will always be busy and the kernel will always stall. This is also + * probably the fastest mechanism for doing relocations since the kernel would + * have to make a full copy of all the relocations lists. + */ +static bool +relocate_cmd_buffer(struct anv_cmd_buffer *cmd_buffer, + struct anv_execbuf *exec) +{ + static int userspace_relocs = -1; + if (userspace_relocs < 0) + userspace_relocs = env_var_as_boolean("ANV_USERSPACE_RELOCS", true); + if (!userspace_relocs) + return false; + + /* First, we have to check to see whether or not we can even do the + * relocation. New buffers which have never been submitted to the kernel + * don't have a valid offset so we need to let the kernel do relocations so + * that we can get offsets for them. On future execbuf2 calls, those + * buffers will have offsets and we will be able to skip relocating. + * Invalid offsets are indicated by anv_bo::offset == (uint64_t)-1. + */ + for (uint32_t i = 0; i < exec->bo_count; i++) { + if (exec->bos[i]->offset == (uint64_t)-1) + return false; + } + + /* Since surface states are shared between command buffers and we don't + * know what order they will be submitted to the kernel, we don't know + * what address is actually written in the surface state object at any + * given time. The only option is to always relocate them. + */ + anv_reloc_list_apply(cmd_buffer->device, &cmd_buffer->surface_relocs, + &cmd_buffer->device->surface_state_block_pool.bo, + true /* always relocate surface states */); + + /* Since we own all of the batch buffers, we know what values are stored + * in the relocated addresses and only have to update them if the offsets + * have changed. + */ + struct anv_batch_bo **bbo; + u_vector_foreach(bbo, &cmd_buffer->seen_bbos) { + anv_reloc_list_apply(cmd_buffer->device, + &(*bbo)->relocs, &(*bbo)->bo, false); + } + + for (uint32_t i = 0; i < exec->bo_count; i++) + exec->objects[i].offset = exec->bos[i]->offset; + + return true; +} + +VkResult +anv_cmd_buffer_execbuf(struct anv_device *device, + struct anv_cmd_buffer *cmd_buffer) { struct anv_batch *batch = &cmd_buffer->batch; struct anv_block_pool *ss_pool = &cmd_buffer->device->surface_state_block_pool; - cmd_buffer->execbuf2.bo_count = 0; - cmd_buffer->execbuf2.need_reloc = false; + struct anv_execbuf execbuf; + anv_execbuf_init(&execbuf); - adjust_relocations_from_block_pool(ss_pool, &cmd_buffer->surface_relocs); - anv_cmd_buffer_add_bo(cmd_buffer, &ss_pool->bo, &cmd_buffer->surface_relocs); + adjust_relocations_from_state_pool(ss_pool, &cmd_buffer->surface_relocs, + cmd_buffer->last_ss_pool_center); + anv_execbuf_add_bo(&execbuf, &ss_pool->bo, &cmd_buffer->surface_relocs, + &cmd_buffer->pool->alloc); /* First, we walk over all of the bos we've seen and add them and their * relocations to the validate list. */ struct anv_batch_bo **bbo; - anv_vector_foreach(bbo, &cmd_buffer->seen_bbos) { - adjust_relocations_to_block_pool(ss_pool, &(*bbo)->bo, &(*bbo)->relocs, - &(*bbo)->last_ss_pool_bo_offset); + u_vector_foreach(bbo, &cmd_buffer->seen_bbos) { + adjust_relocations_to_state_pool(ss_pool, &(*bbo)->bo, &(*bbo)->relocs, + cmd_buffer->last_ss_pool_center); - anv_cmd_buffer_add_bo(cmd_buffer, &(*bbo)->bo, &(*bbo)->relocs); + anv_execbuf_add_bo(&execbuf, &(*bbo)->bo, &(*bbo)->relocs, + &cmd_buffer->pool->alloc); } + /* Now that we've adjusted all of the surface state relocations, we need to + * record the surface state pool center so future executions of the command + * buffer can adjust correctly. + */ + cmd_buffer->last_ss_pool_center = ss_pool->center_bo_offset; + struct anv_batch_bo *first_batch_bo = list_first_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link); @@ -1023,20 +1265,19 @@ anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer) * corresponding to the first batch_bo in the chain with the last * element in the list. */ - if (first_batch_bo->bo.index != cmd_buffer->execbuf2.bo_count - 1) { + if (first_batch_bo->bo.index != execbuf.bo_count - 1) { uint32_t idx = first_batch_bo->bo.index; - uint32_t last_idx = cmd_buffer->execbuf2.bo_count - 1; + uint32_t last_idx = execbuf.bo_count - 1; - struct drm_i915_gem_exec_object2 tmp_obj = - cmd_buffer->execbuf2.objects[idx]; - assert(cmd_buffer->execbuf2.bos[idx] == &first_batch_bo->bo); + struct drm_i915_gem_exec_object2 tmp_obj = execbuf.objects[idx]; + assert(execbuf.bos[idx] == &first_batch_bo->bo); - cmd_buffer->execbuf2.objects[idx] = cmd_buffer->execbuf2.objects[last_idx]; - cmd_buffer->execbuf2.bos[idx] = cmd_buffer->execbuf2.bos[last_idx]; - cmd_buffer->execbuf2.bos[idx]->index = idx; + execbuf.objects[idx] = execbuf.objects[last_idx]; + execbuf.bos[idx] = execbuf.bos[last_idx]; + execbuf.bos[idx]->index = idx; - cmd_buffer->execbuf2.objects[last_idx] = tmp_obj; - cmd_buffer->execbuf2.bos[last_idx] = &first_batch_bo->bo; + execbuf.objects[last_idx] = tmp_obj; + execbuf.bos[last_idx] = &first_batch_bo->bo; first_batch_bo->bo.index = last_idx; } @@ -1044,22 +1285,22 @@ anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer) * the correct indices in the object array. We have to do this after we * reorder the list above as some of the indices may have changed. */ - anv_vector_foreach(bbo, &cmd_buffer->seen_bbos) + u_vector_foreach(bbo, &cmd_buffer->seen_bbos) anv_cmd_buffer_process_relocs(cmd_buffer, &(*bbo)->relocs); anv_cmd_buffer_process_relocs(cmd_buffer, &cmd_buffer->surface_relocs); if (!cmd_buffer->device->info.has_llc) { __builtin_ia32_mfence(); - anv_vector_foreach(bbo, &cmd_buffer->seen_bbos) { + u_vector_foreach(bbo, &cmd_buffer->seen_bbos) { for (uint32_t i = 0; i < (*bbo)->length; i += CACHELINE_SIZE) __builtin_ia32_clflush((*bbo)->bo.map + i); } } - cmd_buffer->execbuf2.execbuf = (struct drm_i915_gem_execbuffer2) { - .buffers_ptr = (uintptr_t) cmd_buffer->execbuf2.objects, - .buffer_count = cmd_buffer->execbuf2.bo_count, + execbuf.execbuf = (struct drm_i915_gem_execbuffer2) { + .buffers_ptr = (uintptr_t) execbuf.objects, + .buffer_count = execbuf.bo_count, .batch_start_offset = 0, .batch_len = batch->next - batch->start, .cliprects_ptr = 0, @@ -1072,6 +1313,49 @@ anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer) .rsvd2 = 0, }; - if (!cmd_buffer->execbuf2.need_reloc) - cmd_buffer->execbuf2.execbuf.flags |= I915_EXEC_NO_RELOC; + if (relocate_cmd_buffer(cmd_buffer, &execbuf)) { + /* If we were able to successfully relocate everything, tell the kernel + * that it can skip doing relocations. The requirement for using + * NO_RELOC is: + * + * 1) The addresses written in the objects must match the corresponding + * reloc.presumed_offset which in turn must match the corresponding + * execobject.offset. + * + * 2) To avoid stalling, execobject.offset should match the current + * address of that object within the active context. + * + * In order to satisfy all of the invariants that make userspace + * relocations to be safe (see relocate_cmd_buffer()), we need to + * further ensure that the addresses we use match those used by the + * kernel for the most recent execbuf2. + * + * The kernel may still choose to do relocations anyway if something has + * moved in the GTT. In this case, the relocation list still needs to be + * valid. All relocations on the batch buffers are already valid and + * kept up-to-date. For surface state relocations, by applying the + * relocations in relocate_cmd_buffer, we ensured that the address in + * the RENDER_SURFACE_STATE matches presumed_offset, so it should be + * safe for the kernel to relocate them as needed. + */ + execbuf.execbuf.flags |= I915_EXEC_NO_RELOC; + } else { + /* In the case where we fall back to doing kernel relocations, we need + * to ensure that the relocation list is valid. All relocations on the + * batch buffers are already valid and kept up-to-date. Since surface + * states are shared between command buffers and we don't know what + * order they will be submitted to the kernel, we don't know what + * address is actually written in the surface state object at any given + * time. The only option is to set a bogus presumed offset and let the + * kernel relocate them. + */ + for (size_t i = 0; i < cmd_buffer->surface_relocs.num_relocs; i++) + cmd_buffer->surface_relocs.relocs[i].presumed_offset = -1; + } + + VkResult result = anv_device_execbuf(device, &execbuf.execbuf, execbuf.bos); + + anv_execbuf_finish(&execbuf, &cmd_buffer->pool->alloc); + + return result; }