pool->start_address = gen_canonical_address(start_address);
pool->map = NULL;
- /* This pointer will always point to the first BO in the list */
- pool->bo = &pool->bos[0];
-
- anv_bo_init(pool->bo, 0, 0);
-
if (!(pool->bo_flags & EXEC_OBJECT_PINNED)) {
/* Just make it 2GB up-front. The Linux kernel won't actually back it
* with pages until we either map and fault on one of them or we use
pool->fd = os_create_anonymous_file(BLOCK_POOL_MEMFD_SIZE, "block pool");
if (pool->fd == -1)
return vk_error(VK_ERROR_INITIALIZATION_FAILED);
+
+ anv_bo_init(&pool->wrapper_bo, 0, 0);
+ pool->wrapper_bo.is_wrapper = true;
+ pool->bo = &pool->wrapper_bo;
} else {
+ /* This pointer will always point to the first BO in the list */
+ anv_bo_init(&pool->bos[0], 0, 0);
+ pool->bo = &pool->bos[0];
+
pool->fd = -1;
}
* it. Simply "allocate" it from our array if we didn't do it before.
* The offset doesn't matter since we are not pinning the BO anyway.
*/
- if (pool->nbos == 0)
+ if (pool->nbos == 0) {
+ pool->wrapper_bo.map = &pool->bos[0];
pool->nbos++;
- bo = pool->bo;
+ }
+ bo = pool->wrapper_bo.map;
bo_size = size;
bo_offset = 0;
}
result = anv_block_pool_expand_range(pool, center_bo_offset, size);
- pool->bo->flags = pool->bo_flags;
-
done:
pthread_mutex_unlock(&pool->device->mutex);
struct drm_i915_gem_relocation_entry *entry;
int index;
- uint64_t target_bo_offset = READ_ONCE(target_bo->offset);
+ struct anv_bo *unwrapped_target_bo = anv_bo_unwrap(target_bo);
+ uint64_t target_bo_offset = READ_ONCE(unwrapped_target_bo->offset);
if (address_u64_out)
*address_u64_out = target_bo_offset + delta;
- if (target_bo->flags & EXEC_OBJECT_PINNED) {
+ if (unwrapped_target_bo->flags & EXEC_OBJECT_PINNED) {
if (list->deps == NULL) {
list->deps = _mesa_pointer_set_create(NULL);
if (unlikely(list->deps == NULL))
{
struct drm_i915_gem_exec_object2 *obj = NULL;
+ bo = anv_bo_unwrap(bo);
+
if (bo->index < exec->bo_count && exec->bos[bo->index] == bo)
obj = &exec->objects[bo->index];
struct anv_reloc_list *list)
{
for (size_t i = 0; i < list->num_relocs; i++)
- list->relocs[i].target_handle = list->reloc_bos[i]->index;
+ list->relocs[i].target_handle = anv_bo_unwrap(list->reloc_bos[i])->index;
}
static void
struct anv_reloc_list *relocs,
uint32_t last_pool_center_bo_offset)
{
+ assert(!from_bo->is_wrapper);
assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
struct anv_bo *bo,
bool always_relocate)
{
+ bo = anv_bo_unwrap(bo);
+
for (size_t i = 0; i < list->num_relocs; i++) {
- struct anv_bo *target_bo = list->reloc_bos[i];
+ struct anv_bo *target_bo = anv_bo_unwrap(list->reloc_bos[i]);
if (list->relocs[i].presumed_offset == target_bo->offset &&
!always_relocate)
continue;
* Invalid offsets are indicated by anv_bo::offset == (uint64_t)-1.
*/
for (uint32_t i = 0; i < exec->bo_count; i++) {
+ assert(!exec->bos[i]->is_wrapper);
if (exec->bos[i]->offset == (uint64_t)-1)
return false;
}
* what address is actually written in the surface state object at any
* given time. The only option is to always relocate them.
*/
+ struct anv_bo *surface_state_bo =
+ anv_bo_unwrap(cmd_buffer->device->surface_state_pool.block_pool.bo);
anv_reloc_list_apply(cmd_buffer->device, &cmd_buffer->surface_relocs,
- cmd_buffer->device->surface_state_pool.block_pool.bo,
+ surface_state_bo,
true /* always relocate surface states */);
/* Since we own all of the batch buffers, we know what values are stored
uint64_t offset;
uint64_t size;
+
+ /* Map for internally mapped BOs.
+ *
+ * If ANV_BO_WRAPPER is set in flags, map points to the wrapped BO.
+ */
void *map;
/** Flags to pass to the kernel through drm_i915_exec_object2::flags */
/** True if this BO may be shared with other processes */
bool is_external:1;
+
+ /** True if this BO is a wrapper
+ *
+ * When set to true, none of the fields in this BO are meaningful except
+ * for anv_bo::is_wrapper and anv_bo::map which points to the actual BO.
+ * See also anv_bo_unwrap(). Wrapper BOs are not allowed when use_softpin
+ * is set in the physical device.
+ */
+ bool is_wrapper:1;
};
static inline void
bo->map = NULL;
bo->flags = 0;
bo->is_external = false;
+ bo->is_wrapper = false;
+}
+
+static inline struct anv_bo *
+anv_bo_unwrap(struct anv_bo *bo)
+{
+ while (bo->is_wrapper)
+ bo = bo->map;
+ return bo;
}
/* Represents a lock-free linked list of "free" things. This is used by
uint64_t bo_flags;
+ /* Wrapper BO for use in relocation lists. This BO is simply a wrapper
+ * around the actual BO so that we grow the pool after the wrapper BO has
+ * been put in a relocation list. This is only used in the non-softpin
+ * case.
+ */
+ struct anv_bo wrapper_bo;
+
struct anv_bo bos[ANV_MAX_BLOCK_POOL_BOS];
struct anv_bo *bo;
uint32_t nbos;