if (pool->use_softpin) {
uint32_t new_bo_size = size - pool->size;
struct anv_bo *new_bo;
+ assert(center_bo_offset == 0);
VkResult result = anv_device_alloc_bo(pool->device, new_bo_size,
bo_alloc_flags |
ANV_BO_ALLOC_FIXED_ADDRESS |
ANV_BO_ALLOC_MAPPED |
ANV_BO_ALLOC_SNOOPED,
+ pool->start_address + pool->size,
&new_bo);
if (result != VK_SUCCESS)
return result;
- assert(center_bo_offset == 0);
-
- new_bo->offset = pool->start_address + pool->size;
pool->bos[pool->nbos++] = new_bo;
/* This pointer will always point to the first BO in the list */
pow2_size,
ANV_BO_ALLOC_MAPPED |
ANV_BO_ALLOC_SNOOPED,
+ 0 /* explicit_address */,
&bo);
if (result != VK_SUCCESS)
return result;
* so nothing will ever touch the top page.
*/
VkResult result = anv_device_alloc_bo(device, size,
- ANV_BO_ALLOC_32BIT_ADDRESS, &bo);
+ ANV_BO_ALLOC_32BIT_ADDRESS,
+ 0 /* explicit_address */,
+ &bo);
if (result != VK_SUCCESS)
return NULL; /* TODO */
anv_device_alloc_bo(struct anv_device *device,
uint64_t size,
enum anv_bo_alloc_flags alloc_flags,
+ uint64_t explicit_address,
struct anv_bo **bo_out)
{
const uint32_t bo_flags =
if (alloc_flags & ANV_BO_ALLOC_FIXED_ADDRESS) {
new_bo.has_fixed_address = true;
+ new_bo.offset = explicit_address;
} else {
+ assert(explicit_address == 0);
if (!anv_vma_alloc(device, &new_bo)) {
if (new_bo.map)
anv_gem_munmap(new_bo.map, size);
descriptor_bo_size,
ANV_BO_ALLOC_MAPPED |
ANV_BO_ALLOC_SNOOPED,
+ 0 /* explicit_address */,
&pool->bo);
if (result != VK_SUCCESS) {
vk_free2(&device->alloc, pAllocator, pool);
{
VkResult result = anv_device_alloc_bo(device, 4096,
ANV_BO_ALLOC_MAPPED,
+ 0 /* explicit_address */,
&device->trivial_batch_bo);
if (result != VK_SUCCESS)
return result;
{
VkResult result = anv_device_alloc_bo(device, 4096,
ANV_BO_ALLOC_MAPPED,
+ 0 /* explicit_address */,
&device->hiz_clear_bo);
if (result != VK_SUCCESS)
return result;
goto fail_binding_table_pool;
}
- result = anv_device_alloc_bo(device, 4096, 0, &device->workaround_bo);
+ result = anv_device_alloc_bo(device, 4096, 0 /* flags */,
+ 0 /* explicit_address */,
+ &device->workaround_bo);
if (result != VK_SUCCESS)
goto fail_surface_aux_map_pool;
alloc_flags |= ANV_BO_ALLOC_EXTERNAL;
result = anv_device_alloc_bo(device, pAllocateInfo->allocationSize,
- alloc_flags, &mem->bo);
+ alloc_flags, 0 /* explicit_address */,
+ &mem->bo);
if (result != VK_SUCCESS)
goto fail;
/** Specifies that the BO should be captured in error states */
ANV_BO_ALLOC_CAPTURE = (1 << 4),
- /** Specifies that the BO will have an address assigned by the caller */
+ /** Specifies that the BO will have an address assigned by the caller
+ *
+ * Such BOs do not exist in any VMA heap.
+ */
ANV_BO_ALLOC_FIXED_ADDRESS = (1 << 5),
/** Enables implicit synchronization on the BO
VkResult anv_device_alloc_bo(struct anv_device *device, uint64_t size,
enum anv_bo_alloc_flags alloc_flags,
+ uint64_t explicit_address,
struct anv_bo **bo);
VkResult anv_device_import_bo_from_host_ptr(struct anv_device *device,
void *host_ptr, uint32_t size,
result = anv_device_alloc_bo(device, 4096,
ANV_BO_ALLOC_EXTERNAL |
ANV_BO_ALLOC_IMPLICIT_SYNC,
+ 0 /* explicit_address */,
&(*point)->bo);
if (result != VK_SUCCESS)
vk_free(&device->alloc, *point);
result = anv_device_alloc_bo(device, 4096,
ANV_BO_ALLOC_EXTERNAL |
ANV_BO_ALLOC_IMPLICIT_SYNC,
+ 0 /* explicit_address */,
&sync_bo);
if (result != VK_SUCCESS)
goto err_free_submit;
anv_device_alloc_bo(device, 4096,
ANV_BO_ALLOC_EXTERNAL |
ANV_BO_ALLOC_IMPLICIT_SYNC,
+ 0 /* explicit_address */,
&impl->bo);
/* If we're going to use this as a fence, we need to *not* have the
* EXEC_OBJECT_ASYNC bit set.
result = anv_device_alloc_bo(device, size,
ANV_BO_ALLOC_MAPPED |
ANV_BO_ALLOC_SNOOPED,
+ 0 /* explicit_address */,
&pool->bo);
if (result != VK_SUCCESS)
goto fail;