for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
for (unsigned i = 0; i < 16; i++) {
struct anv_scratch_bo *bo = &pool->bos[i][s];
- if (bo->exists > 0)
+ if (bo->exists > 0) {
+ anv_vma_free(device, &bo->bo);
anv_gem_close(device, bo->bo.gem_handle);
+ }
}
}
}
if (device->instance->physicalDevice.has_exec_async)
bo->bo.flags |= EXEC_OBJECT_ASYNC;
+ if (device->instance->physicalDevice.use_softpin)
+ bo->bo.flags |= EXEC_OBJECT_PINNED;
+
+ anv_vma_alloc(device, &bo->bo);
+
/* Set the exists last because it may be read by other threads */
__sync_synchronize();
bo->exists = true;
if (result != VK_SUCCESS)
goto fail_binding_table_pool;
+ if (physical_device->use_softpin)
+ device->workaround_bo.flags |= EXEC_OBJECT_PINNED;
+
+ if (!anv_vma_alloc(device, &device->workaround_bo))
+ goto fail_workaround_bo;
+
anv_device_init_trivial_batch(device);
if (device->info.gen >= 10)
anv_scratch_pool_finish(device, &device->scratch_pool);
anv_gem_munmap(device->workaround_bo.map, device->workaround_bo.size);
+ anv_vma_free(device, &device->workaround_bo);
anv_gem_close(device, device->workaround_bo.gem_handle);
anv_vma_free(device, &device->trivial_batch_bo);
if (pdevice->supports_48bit_addresses)
pool->bo.flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+ if (pdevice->use_softpin)
+ pool->bo.flags |= EXEC_OBJECT_PINNED;
+
if (pdevice->has_exec_async)
pool->bo.flags |= EXEC_OBJECT_ASYNC;
+ anv_vma_alloc(device, &pool->bo);
+
/* For query pools, we set the caching mode to I915_CACHING_CACHED. On LLC
* platforms, this does nothing. On non-LLC platforms, this means snooping
* which comes at a slight cost. However, the buffers aren't big, won't be
return;
anv_gem_munmap(pool->bo.map, pool->bo.size);
+ anv_vma_free(device, &pool->bo);
anv_gem_close(device, pool->bo.gem_handle);
vk_free2(&device->alloc, pAllocator, pool);
}