RADV_FROM_HANDLE(radv_device, device, _device);
struct radv_cmd_pool *pool;
- pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
+ pool = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*pool), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pool == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (pAllocator)
pool->alloc = *pAllocator;
else
- pool->alloc = device->alloc;
+ pool->alloc = device->vk.alloc;
list_inithead(&pool->cmd_buffers);
list_inithead(&pool->free_cmd_buffers);
radv_cmd_buffer_destroy(cmd_buffer);
}
- vk_free2(&device->alloc, pAllocator, pool);
+ vk_free2(&device->vk.alloc, pAllocator, pool);
}
VkResult radv_ResetCommandPool(
size += ycbcr_sampler_count * sizeof(struct radv_sampler_ycbcr_conversion) + (max_binding + 1) * sizeof(uint32_t);
}
- set_layout = vk_zalloc2(&device->alloc, pAllocator, size, 8,
+ set_layout = vk_zalloc2(&device->vk.alloc, pAllocator, size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!set_layout)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
VkDescriptorSetLayoutBinding *bindings = create_sorted_bindings(pCreateInfo->pBindings,
pCreateInfo->bindingCount);
if (!bindings) {
- vk_free2(&device->alloc, pAllocator, set_layout);
+ vk_free2(&device->vk.alloc, pAllocator, set_layout);
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
if (!set_layout)
return;
- vk_free2(&device->alloc, pAllocator, set_layout);
+ vk_free2(&device->vk.alloc, pAllocator, set_layout);
}
void radv_GetDescriptorSetLayoutSupport(VkDevice device,
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
- layout = vk_alloc2(&device->alloc, pAllocator, sizeof(*layout), 8,
+ layout = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*layout), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (layout == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (!pipeline_layout)
return;
- vk_free2(&device->alloc, pAllocator, pipeline_layout);
+ vk_free2(&device->vk.alloc, pAllocator, pipeline_layout);
}
#define EMPTY 1
set = (struct radv_descriptor_set*)pool->host_memory_ptr;
pool->host_memory_ptr += mem_size;
} else {
- set = vk_alloc2(&device->alloc, NULL, mem_size, 8,
+ set = vk_alloc2(&device->vk.alloc, NULL, mem_size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!set)
set->size = layout_size;
if (!pool->host_memory_base && pool->entry_count == pool->max_entry_count) {
- vk_free2(&device->alloc, NULL, set);
+ vk_free2(&device->vk.alloc, NULL, set);
return vk_error(device->instance, VK_ERROR_OUT_OF_POOL_MEMORY);
}
}
if (pool->size - offset < layout_size) {
- vk_free2(&device->alloc, NULL, set);
+ vk_free2(&device->vk.alloc, NULL, set);
return vk_error(device->instance, VK_ERROR_OUT_OF_POOL_MEMORY);
}
set->bo = pool->bo;
}
}
}
- vk_free2(&device->alloc, NULL, set);
+ vk_free2(&device->vk.alloc, NULL, set);
}
VkResult radv_CreateDescriptorPool(
size += sizeof(struct radv_descriptor_pool_entry) * pCreateInfo->maxSets;
}
- pool = vk_alloc2(&device->alloc, pAllocator, size, 8,
+ pool = vk_alloc2(&device->vk.alloc, pAllocator, size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!pool)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (pool->bo)
device->ws->buffer_destroy(pool->bo);
- vk_free2(&device->alloc, pAllocator, pool);
+ vk_free2(&device->vk.alloc, pAllocator, pool);
}
VkResult radv_ResetDescriptorPool(
struct radv_descriptor_update_template *templ;
uint32_t i;
- templ = vk_alloc2(&device->alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ templ = vk_alloc2(&device->vk.alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!templ)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (!templ)
return;
- vk_free2(&device->alloc, pAllocator, templ);
+ vk_free2(&device->vk.alloc, pAllocator, templ);
}
void radv_update_descriptor_set_with_template(struct radv_device *device,
RADV_FROM_HANDLE(radv_device, device, _device);
struct radv_sampler_ycbcr_conversion *conversion = NULL;
- conversion = vk_zalloc2(&device->alloc, pAllocator, sizeof(*conversion), 8,
+ conversion = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*conversion), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (conversion == NULL)
RADV_FROM_HANDLE(radv_sampler_ycbcr_conversion, ycbcr_conversion, ycbcrConversion);
if (ycbcr_conversion)
- vk_free2(&device->alloc, pAllocator, ycbcr_conversion);
+ vk_free2(&device->vk.alloc, pAllocator, ycbcr_conversion);
}
struct radv_pipeline *pipeline;
bool sc_read = true;
- pipeline = vk_zalloc2(&device->alloc, NULL, sizeof(*pipeline), 8,
+ pipeline = vk_zalloc2(&device->vk.alloc, NULL, sizeof(*pipeline), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
pipeline->device = device;
free((void *) pStages[i]);
}
- vk_free(&device->alloc, pipeline);
+ vk_free(&device->vk.alloc, pipeline);
sc_type = RADV_SC_TYPE_COMPILE_PIPELINE_FINISHED;
write(fd_secure_output, &sc_type, sizeof(sc_type));
static VkResult fork_secure_compile_idle_device(struct radv_device *device)
{
- device->sc_state = vk_zalloc(&device->alloc,
+ device->sc_state = vk_zalloc(&device->vk.alloc,
sizeof(struct radv_secure_compile_state),
8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
}
}
- device->sc_state->secure_compile_processes = vk_zalloc(&device->alloc,
+ device->sc_state->secure_compile_processes = vk_zalloc(&device->vk.alloc,
sizeof(struct radv_secure_compile_process) * sc_threads, 8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!device)
return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
- device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+ vk_device_init(&device->vk, pCreateInfo,
+ &physical_device->instance->alloc, pAllocator);
+
device->instance = physical_device->instance;
device->physical_device = physical_device;
device->ws = physical_device->ws;
- if (pAllocator)
- device->alloc = *pAllocator;
- else
- device->alloc = physical_device->instance->alloc;
for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
int index = radv_get_device_extension_index(ext_name);
if (index < 0 || !physical_device->supported_extensions.extensions[index]) {
- vk_free(&device->alloc, device);
+ vk_free(&device->vk.alloc, device);
return vk_error(physical_device->instance, VK_ERROR_EXTENSION_NOT_PRESENT);
}
assert(!global_priority || device->physical_device->rad_info.has_ctx_priority);
- device->queues[qfi] = vk_alloc(&device->alloc,
+ device->queues[qfi] = vk_alloc(&device->vk.alloc,
queue_create->queueCount * sizeof(struct radv_queue), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!device->queues[qfi]) {
result = VK_ERROR_OUT_OF_HOST_MEMORY;
for (unsigned q = 0; q < device->queue_count[i]; q++)
radv_queue_finish(&device->queues[i][q]);
if (device->queue_count[i])
- vk_free(&device->alloc, device->queues[i]);
+ vk_free(&device->vk.alloc, device->queues[i]);
}
- vk_free(&device->alloc, device);
+ vk_free(&device->vk.alloc, device);
return result;
}
for (unsigned q = 0; q < device->queue_count[i]; q++)
radv_queue_finish(&device->queues[i][q]);
if (device->queue_count[i])
- vk_free(&device->alloc, device->queues[i]);
+ vk_free(&device->vk.alloc, device->queues[i]);
if (device->empty_cs[i])
device->ws->cs_destroy(device->empty_cs[i]);
}
if (device->sc_state) {
free(device->sc_state->uid);
- vk_free(&device->alloc, device->sc_state->secure_compile_processes);
+ vk_free(&device->vk.alloc, device->sc_state->secure_compile_processes);
}
- vk_free(&device->alloc, device->sc_state);
- vk_free(&device->alloc, device);
+ vk_free(&device->vk.alloc, device->sc_state);
+ vk_free(&device->vk.alloc, device);
}
VkResult radv_EnumerateInstanceLayerProperties(
mem->bo = NULL;
}
- vk_free2(&device->alloc, pAllocator, mem);
+ vk_free2(&device->vk.alloc, pAllocator, mem);
}
static VkResult radv_alloc_memory(struct radv_device *device,
return VK_SUCCESS;
}
- mem = vk_zalloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
+ mem = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*mem), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (mem == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
VkExternalFenceHandleTypeFlags handleTypes =
export ? export->handleTypes : 0;
- struct radv_fence *fence = vk_alloc2(&device->alloc, pAllocator,
+ struct radv_fence *fence = vk_alloc2(&device->vk.alloc, pAllocator,
sizeof(*fence), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (device->always_use_syncobj || handleTypes) {
int ret = device->ws->create_syncobj(device->ws, &fence->syncobj);
if (ret) {
- vk_free2(&device->alloc, pAllocator, fence);
+ vk_free2(&device->vk.alloc, pAllocator, fence);
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
} else {
fence->fence = device->ws->create_fence();
if (!fence->fence) {
- vk_free2(&device->alloc, pAllocator, fence);
+ vk_free2(&device->vk.alloc, pAllocator, fence);
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
fence->syncobj = 0;
device->ws->destroy_fence(fence->fence);
if (fence->fence_wsi)
fence->fence_wsi->destroy(fence->fence_wsi);
- vk_free2(&device->alloc, pAllocator, fence);
+ vk_free2(&device->vk.alloc, pAllocator, fence);
}
uint64_t initial_value = 0;
VkSemaphoreTypeKHR type = radv_get_semaphore_type(pCreateInfo->pNext, &initial_value);
- struct radv_semaphore *sem = vk_alloc2(&device->alloc, pAllocator,
+ struct radv_semaphore *sem = vk_alloc2(&device->vk.alloc, pAllocator,
sizeof(*sem), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!sem)
assert (device->physical_device->rad_info.has_syncobj);
int ret = device->ws->create_syncobj(device->ws, &sem->permanent.syncobj);
if (ret) {
- vk_free2(&device->alloc, pAllocator, sem);
+ vk_free2(&device->vk.alloc, pAllocator, sem);
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
sem->permanent.kind = RADV_SEMAPHORE_SYNCOBJ;
} else {
sem->permanent.ws_sem = device->ws->create_sem(device->ws);
if (!sem->permanent.ws_sem) {
- vk_free2(&device->alloc, pAllocator, sem);
+ vk_free2(&device->vk.alloc, pAllocator, sem);
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
sem->permanent.kind = RADV_SEMAPHORE_WINSYS;
radv_destroy_semaphore_part(device, &sem->temporary);
radv_destroy_semaphore_part(device, &sem->permanent);
- vk_free2(&device->alloc, pAllocator, sem);
+ vk_free2(&device->vk.alloc, pAllocator, sem);
}
VkResult
VkEvent* pEvent)
{
RADV_FROM_HANDLE(radv_device, device, _device);
- struct radv_event *event = vk_alloc2(&device->alloc, pAllocator,
+ struct radv_event *event = vk_alloc2(&device->vk.alloc, pAllocator,
sizeof(*event), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
RADEON_FLAG_VA_UNCACHED | RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING,
RADV_BO_PRIORITY_FENCE);
if (!event->bo) {
- vk_free2(&device->alloc, pAllocator, event);
+ vk_free2(&device->vk.alloc, pAllocator, event);
return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
}
if (!event)
return;
device->ws->buffer_destroy(event->bo);
- vk_free2(&device->alloc, pAllocator, event);
+ vk_free2(&device->vk.alloc, pAllocator, event);
}
VkResult radv_GetEventStatus(
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
- buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
+ buffer = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*buffer), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (buffer == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
4096, 0, RADEON_FLAG_VIRTUAL,
RADV_BO_PRIORITY_VIRTUAL);
if (!buffer->bo) {
- vk_free2(&device->alloc, pAllocator, buffer);
+ vk_free2(&device->vk.alloc, pAllocator, buffer);
return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
}
}
if (buffer->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)
device->ws->buffer_destroy(buffer->bo);
- vk_free2(&device->alloc, pAllocator, buffer);
+ vk_free2(&device->vk.alloc, pAllocator, buffer);
}
VkDeviceAddress radv_GetBufferDeviceAddress(
size_t size = sizeof(*framebuffer);
if (!imageless_create_info)
size += sizeof(struct radv_image_view*) * pCreateInfo->attachmentCount;
- framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
+ framebuffer = vk_alloc2(&device->vk.alloc, pAllocator, size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (framebuffer == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (!fb)
return;
- vk_free2(&device->alloc, pAllocator, fb);
+ vk_free2(&device->vk.alloc, pAllocator, fb);
}
static unsigned radv_tex_wrap(VkSamplerAddressMode address_mode)
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
- sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
+ sampler = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*sampler), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!sampler)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (!sampler)
return;
- vk_free2(&device->alloc, pAllocator, sampler);
+ vk_free2(&device->vk.alloc, pAllocator, sampler);
}
/* vk_icd.h does not declare this function, so we declare it here to
radv_assert(pCreateInfo->extent.height > 0);
radv_assert(pCreateInfo->extent.depth > 0);
- image = vk_zalloc2(&device->alloc, alloc, image_struct_size, 8,
+ image = vk_zalloc2(&device->vk.alloc, alloc, image_struct_size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!image)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
image->bo = device->ws->buffer_create(device->ws, image->size, image->alignment,
0, RADEON_FLAG_VIRTUAL, RADV_BO_PRIORITY_VIRTUAL);
if (!image->bo) {
- vk_free2(&device->alloc, alloc, image);
+ vk_free2(&device->vk.alloc, alloc, image);
return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
}
}
if (image->owned_memory != VK_NULL_HANDLE)
radv_FreeMemory(_device, image->owned_memory, pAllocator);
- vk_free2(&device->alloc, pAllocator, image);
+ vk_free2(&device->vk.alloc, pAllocator, image);
}
void radv_GetImageSubresourceLayout(
RADV_FROM_HANDLE(radv_device, device, _device);
struct radv_image_view *view;
- view = vk_alloc2(&device->alloc, pAllocator, sizeof(*view), 8,
+ view = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*view), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (view == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (!iview)
return;
- vk_free2(&device->alloc, pAllocator, iview);
+ vk_free2(&device->vk.alloc, pAllocator, iview);
}
void radv_buffer_view_init(struct radv_buffer_view *view,
RADV_FROM_HANDLE(radv_device, device, _device);
struct radv_buffer_view *view;
- view = vk_alloc2(&device->alloc, pAllocator, sizeof(*view), 8,
+ view = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*view), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!view)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (!view)
return;
- vk_free2(&device->alloc, pAllocator, view);
+ vk_free2(&device->vk.alloc, pAllocator, view);
}
VkSystemAllocationScope allocationScope)
{
struct radv_device *device = _device;
- return device->alloc.pfnAllocation(device->alloc.pUserData, size, alignment,
+ return device->vk.alloc.pfnAllocation(device->vk.alloc.pUserData, size, alignment,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
}
VkSystemAllocationScope allocationScope)
{
struct radv_device *device = _device;
- return device->alloc.pfnReallocation(device->alloc.pUserData, original,
+ return device->vk.alloc.pfnReallocation(device->vk.alloc.pUserData, original,
size, alignment,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
}
meta_free(void* _device, void *data)
{
struct radv_device *device = _device;
- return device->alloc.pfnFree(device->alloc.pUserData, data);
+ return device->vk.alloc.pfnFree(device->vk.alloc.pUserData, data);
}
static bool
attachments_offset = size;
size += pCreateInfo->attachmentCount * sizeof(pass->attachments[0]);
- pass = vk_alloc2(&device->alloc, pAllocator, size, 8,
+ pass = vk_alloc2(&device->vk.alloc, pAllocator, size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pass == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (subpass_attachment_count) {
pass->subpass_attachments =
- vk_alloc2(&device->alloc, pAllocator,
+ vk_alloc2(&device->vk.alloc, pAllocator,
subpass_attachment_count * sizeof(struct radv_subpass_attachment), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pass->subpass_attachments == NULL) {
- vk_free2(&device->alloc, pAllocator, pass);
+ vk_free2(&device->vk.alloc, pAllocator, pass);
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
} else
attachments_offset = size;
size += pCreateInfo->attachmentCount * sizeof(pass->attachments[0]);
- pass = vk_alloc2(&device->alloc, pAllocator, size, 8,
+ pass = vk_alloc2(&device->vk.alloc, pAllocator, size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pass == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (subpass_attachment_count) {
pass->subpass_attachments =
- vk_alloc2(&device->alloc, pAllocator,
+ vk_alloc2(&device->vk.alloc, pAllocator,
subpass_attachment_count * sizeof(struct radv_subpass_attachment), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pass->subpass_attachments == NULL) {
- vk_free2(&device->alloc, pAllocator, pass);
+ vk_free2(&device->vk.alloc, pAllocator, pass);
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
} else
if (!_pass)
return;
- vk_free2(&device->alloc, pAllocator, pass->subpass_attachments);
- vk_free2(&device->alloc, pAllocator, pass);
+ vk_free2(&device->vk.alloc, pAllocator, pass->subpass_attachments);
+ vk_free2(&device->vk.alloc, pAllocator, pass);
}
void radv_GetRenderAreaGranularity(
if(pipeline->cs.buf)
free(pipeline->cs.buf);
- vk_free2(&device->alloc, allocator, pipeline);
+ vk_free2(&device->vk.alloc, allocator, pipeline);
}
void radv_DestroyPipeline(
struct radv_pipeline *pipeline;
VkResult result;
- pipeline = vk_zalloc2(&device->alloc, pAllocator, sizeof(*pipeline), 8,
+ pipeline = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*pipeline), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pipeline == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
struct radv_pipeline *pipeline;
VkResult result;
- pipeline = vk_zalloc2(&device->alloc, pAllocator, sizeof(*pipeline), 8,
+ pipeline = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*pipeline), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pipeline == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
assert(pCreateInfo->flags == 0);
- cache = vk_alloc2(&device->alloc, pAllocator,
+ cache = vk_alloc2(&device->vk.alloc, pAllocator,
sizeof(*cache), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (cache == NULL)
if (pAllocator)
cache->alloc = *pAllocator;
else
- cache->alloc = device->alloc;
+ cache->alloc = device->vk.alloc;
radv_pipeline_cache_init(cache, device);
return;
radv_pipeline_cache_finish(cache);
- vk_free2(&device->alloc, pAllocator, cache);
+ vk_free2(&device->vk.alloc, pAllocator, cache);
}
VkResult radv_GetPipelineCacheData(
#include "util/xmlconfig.h"
#include "vk_alloc.h"
#include "vk_debug_report.h"
+#include "vk_object.h"
#include "radv_radeon_winsys.h"
#include "ac_binary.h"
};
struct radv_device {
- VK_LOADER_DATA _loader_data;
-
- VkAllocationCallbacks alloc;
+ struct vk_device vk;
struct radv_instance * instance;
struct radeon_winsys *ws;
VkQueryPool* pQueryPool)
{
RADV_FROM_HANDLE(radv_device, device, _device);
- struct radv_query_pool *pool = vk_alloc2(&device->alloc, pAllocator,
+ struct radv_query_pool *pool = vk_alloc2(&device->vk.alloc, pAllocator,
sizeof(*pool), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
RADV_BO_PRIORITY_QUERY_POOL);
if (!pool->bo) {
- vk_free2(&device->alloc, pAllocator, pool);
+ vk_free2(&device->vk.alloc, pAllocator, pool);
return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
}
if (!pool->ptr) {
device->ws->buffer_destroy(pool->bo);
- vk_free2(&device->alloc, pAllocator, pool);
+ vk_free2(&device->vk.alloc, pAllocator, pool);
return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
}
return;
device->ws->buffer_destroy(pool->bo);
- vk_free2(&device->alloc, pAllocator, pool);
+ vk_free2(&device->vk.alloc, pAllocator, pool);
}
VkResult radv_GetQueryPoolResults(
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
assert(pCreateInfo->flags == 0);
- module = vk_alloc2(&device->alloc, pAllocator,
+ module = vk_alloc2(&device->vk.alloc, pAllocator,
sizeof(*module) + pCreateInfo->codeSize, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (module == NULL)
if (!module)
return;
- vk_free2(&device->alloc, pAllocator, module);
+ vk_free2(&device->vk.alloc, pAllocator, module);
}
void
if (pAllocator)
alloc = pAllocator;
else
- alloc = &device->alloc;
+ alloc = &device->vk.alloc;
return wsi_common_create_swapchain(&device->physical_device->wsi_device,
radv_device_to_handle(device),
if (pAllocator)
alloc = pAllocator;
else
- alloc = &device->alloc;
+ alloc = &device->vk.alloc;
wsi_common_destroy_swapchain(_device, swapchain, alloc);
}