struct bo_pool_bo_link link_copy = VG_NOACCESS_READ(link);
anv_gem_munmap(link_copy.bo.map, link_copy.bo.size);
+ anv_vma_free(pool->device, &link_copy.bo);
anv_gem_close(pool->device, link_copy.bo.gem_handle);
link = link_copy.next;
}
new_bo.flags = pool->bo_flags;
+ if (!anv_vma_alloc(pool->device, &new_bo))
+ return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+
assert(new_bo.size == pow2_size);
new_bo.map = anv_gem_mmap(pool->device, new_bo.gem_handle, 0, pow2_size, 0);
if (new_bo.map == MAP_FAILED) {
anv_gem_close(pool->device, new_bo.gem_handle);
+ anv_vma_free(pool->device, &new_bo);
return vk_error(VK_ERROR_MEMORY_MAP_FAILED);
}
assert(((*bb_start >> 29) & 0x07) == 0);
assert(((*bb_start >> 23) & 0x3f) == 49);
- uint32_t reloc_idx = prev_bbo->relocs.num_relocs - 1;
- assert(prev_bbo->relocs.relocs[reloc_idx].offset == bb_start_offset + 4);
+ if (cmd_buffer->device->instance->physicalDevice.use_softpin) {
+ assert(prev_bbo->bo.flags & EXEC_OBJECT_PINNED);
+ assert(next_bbo->bo.flags & EXEC_OBJECT_PINNED);
- prev_bbo->relocs.reloc_bos[reloc_idx] = &next_bbo->bo;
- prev_bbo->relocs.relocs[reloc_idx].delta = next_bbo_offset;
+ write_reloc(cmd_buffer->device,
+ prev_bbo->bo.map + bb_start_offset + 4,
+ next_bbo->bo.offset + next_bbo_offset, true);
+ } else {
+ uint32_t reloc_idx = prev_bbo->relocs.num_relocs - 1;
+ assert(prev_bbo->relocs.relocs[reloc_idx].offset == bb_start_offset + 4);
+
+ prev_bbo->relocs.reloc_bos[reloc_idx] = &next_bbo->bo;
+ prev_bbo->relocs.relocs[reloc_idx].delta = next_bbo_offset;
- /* Use a bogus presumed offset to force a relocation */
- prev_bbo->relocs.relocs[reloc_idx].presumed_offset = -1;
+ /* Use a bogus presumed offset to force a relocation */
+ prev_bbo->relocs.relocs[reloc_idx].presumed_offset = -1;
+ }
}
static void
if (device->instance->physicalDevice.has_exec_async)
device->trivial_batch_bo.flags |= EXEC_OBJECT_ASYNC;
+ if (device->instance->physicalDevice.use_softpin)
+ device->trivial_batch_bo.flags |= EXEC_OBJECT_PINNED;
+
+ anv_vma_alloc(device, &device->trivial_batch_bo);
+
void *map = anv_gem_mmap(device, device->trivial_batch_bo.gem_handle,
0, 4096, 0);
uint64_t bo_flags =
(physical_device->supports_48bit_addresses ? EXEC_OBJECT_SUPPORTS_48B_ADDRESS : 0) |
(physical_device->has_exec_async ? EXEC_OBJECT_ASYNC : 0) |
- (physical_device->has_exec_capture ? EXEC_OBJECT_CAPTURE : 0);
+ (physical_device->has_exec_capture ? EXEC_OBJECT_CAPTURE : 0) |
+ (physical_device->use_softpin ? EXEC_OBJECT_PINNED : 0);
anv_bo_pool_init(&device->batch_bo_pool, device, bo_flags);
if (result != VK_SUCCESS)
goto fail_batch_bo_pool;
- if (physical_device->use_softpin)
- bo_flags |= EXEC_OBJECT_PINNED;
- else
+ if (!physical_device->use_softpin)
bo_flags &= ~EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
result = anv_state_pool_init(&device->dynamic_state_pool, device,
anv_gem_munmap(device->workaround_bo.map, device->workaround_bo.size);
anv_gem_close(device, device->workaround_bo.gem_handle);
+ anv_vma_free(device, &device->trivial_batch_bo);
anv_gem_close(device, device->trivial_batch_bo.gem_handle);
if (device->info.gen >= 10)
anv_gem_close(device, device->hiz_clear_bo.gem_handle);
exec2_objects[0].relocs_ptr = 0;
exec2_objects[0].alignment = 0;
exec2_objects[0].offset = bo.offset;
- exec2_objects[0].flags = 0;
+ exec2_objects[0].flags = bo.flags;
exec2_objects[0].rsvd1 = 0;
exec2_objects[0].rsvd2 = 0;