v2. Define new helper function to avoid duplicated a pair of function calls.
v3. Move new helper functions to vk_object.h and call them.
v4. Merge 2 commits to use commomn base object type and struct into one.
Signed-off-by: Hyunjun Ko <zzoon@igalia.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5539>
VkCommandBuffer *pCommandBuffer)
{
struct tu_cmd_buffer *cmd_buffer;
- cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+
+ cmd_buffer = vk_object_zalloc(&device->vk, NULL, sizeof(*cmd_buffer),
+ VK_OBJECT_TYPE_COMMAND_BUFFER);
if (cmd_buffer == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
- cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
cmd_buffer->device = device;
cmd_buffer->pool = pool;
cmd_buffer->level = level;
tu_cs_finish(&cmd_buffer->sub_cs);
tu_bo_list_destroy(&cmd_buffer->bo_list);
- vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
+ vk_object_free(&cmd_buffer->device->vk, &cmd_buffer->pool->alloc, cmd_buffer);
}
static VkResult
list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
result = tu_reset_cmd_buffer(cmd_buffer);
- cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
cmd_buffer->level = pAllocateInfo->level;
pCommandBuffers[i] = tu_cmd_buffer_to_handle(cmd_buffer);
TU_FROM_HANDLE(tu_device, device, _device);
struct tu_cmd_pool *pool;
- pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ pool = vk_object_alloc(&device->vk, pAllocator, sizeof(*pool),
+ VK_OBJECT_TYPE_COMMAND_POOL);
if (pool == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (pAllocator)
pool->alloc = *pAllocator;
else
- pool->alloc = device->alloc;
+ pool->alloc = device->vk.alloc;
list_inithead(&pool->cmd_buffers);
list_inithead(&pool->free_cmd_buffers);
tu_cmd_buffer_destroy(cmd_buffer);
}
- vk_free2(&device->alloc, pAllocator, pool);
+ vk_object_free(&device->vk, pAllocator, pool);
}
VkResult
immutable_sampler_count * sizeof(struct tu_sampler) +
ycbcr_sampler_count * sizeof(struct tu_sampler_ycbcr_conversion);
- set_layout = vk_zalloc2(&device->alloc, pAllocator, size, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ set_layout = vk_object_zalloc(&device->vk, pAllocator, size,
+ VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT);
if (!set_layout)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
VkDescriptorSetLayoutBinding *bindings = create_sorted_bindings(
pCreateInfo->pBindings, pCreateInfo->bindingCount);
if (!bindings) {
- vk_free2(&device->alloc, pAllocator, set_layout);
+ vk_object_free(&device->vk, pAllocator, set_layout);
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
if (!set_layout)
return;
- vk_free2(&device->alloc, pAllocator, set_layout);
+ vk_object_free(&device->vk, pAllocator, set_layout);
}
void
assert(pCreateInfo->sType ==
VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
- layout = vk_alloc2(&device->alloc, pAllocator, sizeof(*layout), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ layout = vk_object_alloc(&device->vk, pAllocator, sizeof(*layout),
+ VK_OBJECT_TYPE_PIPELINE_LAYOUT);
if (layout == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (!pipeline_layout)
return;
- vk_free2(&device->alloc, pAllocator, pipeline_layout);
+
+ vk_object_free(&device->vk, pAllocator, pipeline_layout);
}
#define EMPTY 1
set = (struct tu_descriptor_set*)pool->host_memory_ptr;
pool->host_memory_ptr += mem_size;
} else {
- set = vk_alloc2(&device->alloc, NULL, mem_size, 8,
+ set = vk_alloc2(&device->vk.alloc, NULL, mem_size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!set)
}
memset(set, 0, mem_size);
+ vk_object_base_init(&device->vk, &set->base, VK_OBJECT_TYPE_DESCRIPTOR_SET);
if (layout->dynamic_offset_count) {
set->dynamic_descriptors = (uint32_t *)((uint8_t*)set + dynamic_offset);
set->size = layout_size;
if (!pool->host_memory_base && pool->entry_count == pool->max_entry_count) {
- vk_free2(&device->alloc, NULL, set);
+ vk_object_free(&device->vk, NULL, set);
return vk_error(device->instance, VK_ERROR_OUT_OF_POOL_MEMORY);
}
}
if (pool->size - offset < layout_size) {
- vk_free2(&device->alloc, NULL, set);
+ vk_object_free(&device->vk, NULL, set);
return vk_error(device->instance, VK_ERROR_OUT_OF_POOL_MEMORY);
}
}
}
}
- vk_free2(&device->alloc, NULL, set);
+
+ vk_object_free(&device->vk, NULL, set);
}
VkResult
size += sizeof(struct tu_descriptor_pool_entry) * pCreateInfo->maxSets;
}
- pool = vk_alloc2(&device->alloc, pAllocator, size, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ pool = vk_object_zalloc(&device->vk, pAllocator, size,
+ VK_OBJECT_TYPE_DESCRIPTOR_POOL);
if (!pool)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
- memset(pool, 0, sizeof(*pool));
-
if (!(pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT)) {
pool->host_memory_base = (uint8_t*)pool + sizeof(struct tu_descriptor_pool);
pool->host_memory_ptr = pool->host_memory_base;
if (pool->size)
tu_bo_finish(device, &pool->bo);
- vk_free2(&device->alloc, pAllocator, pool);
+
+ vk_object_free(&device->vk, pAllocator, pool);
}
VkResult
sizeof(struct tu_descriptor_update_template_entry) * entry_count;
struct tu_descriptor_update_template *templ;
- templ = vk_alloc2(&device->alloc, pAllocator, size, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ templ = vk_object_alloc(&device->vk, pAllocator, size,
+ VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE);
if (!templ)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (!templ)
return;
- vk_free2(&device->alloc, pAllocator, templ);
+ vk_object_free(&device->vk, pAllocator, templ);
}
void
TU_FROM_HANDLE(tu_device, device, _device);
struct tu_sampler_ycbcr_conversion *conversion;
- conversion = vk_alloc2(&device->alloc, pAllocator, sizeof(*conversion), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ conversion = vk_object_alloc(&device->vk, pAllocator, sizeof(*conversion),
+ VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION);
if (!conversion)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
TU_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_sampler_ycbcr_conversion, ycbcr_conversion, ycbcrConversion);
- if (ycbcr_conversion)
- vk_free2(&device->alloc, pAllocator, ycbcr_conversion);
+ if (!ycbcr_conversion)
+ return;
+
+ vk_object_free(&device->vk, pAllocator, ycbcr_conversion);
}
struct tu_descriptor_set_layout
{
+ struct vk_object_base base;
+
/* The create flags for this descriptor set layout */
VkDescriptorSetLayoutCreateFlags flags;
struct tu_pipeline_layout
{
+ struct vk_object_base base;
+
struct
{
struct tu_descriptor_set_layout *layout;
if (instance->debug_flags & TU_DEBUG_STARTUP)
tu_logi("Found compatible device '%s'.", path);
- device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+ vk_object_base_init(NULL, &device->base, VK_OBJECT_TYPE_PHYSICAL_DEVICE);
device->instance = instance;
assert(strlen(path) < ARRAY_SIZE(device->path));
strncpy(device->path, path, ARRAY_SIZE(device->path));
close(device->local_fd);
if (device->master_fd != -1)
close(device->master_fd);
+
+ vk_object_base_finish(&device->base);
}
static VKAPI_ATTR void *
instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
+
if (!instance)
return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
- instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+ vk_object_base_init(NULL, &instance->base, VK_OBJECT_TYPE_INSTANCE);
if (pAllocator)
instance->alloc = *pAllocator;
int index = tu_get_instance_extension_index(ext_name);
if (index < 0 || !tu_instance_extensions_supported.extensions[index]) {
+ vk_object_base_finish(&instance->base);
vk_free2(&default_alloc, pAllocator, instance);
return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
}
result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
if (result != VK_SUCCESS) {
+ vk_object_base_finish(&instance->base);
vk_free2(&default_alloc, pAllocator, instance);
return vk_error(instance, result);
}
vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
+ vk_object_base_finish(&instance->base);
vk_free(&instance->alloc, instance);
}
int idx,
VkDeviceQueueCreateFlags flags)
{
- queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+ vk_object_base_init(&device->vk, &queue->base, VK_OBJECT_TYPE_QUEUE);
+
queue->device = device;
queue->queue_family_index = queue_family_index;
queue->queue_idx = idx;
if (!device)
return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
- device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+ vk_device_init(&device->vk, pCreateInfo,
+ &physical_device->instance->alloc, pAllocator);
+
device->instance = physical_device->instance;
device->physical_device = physical_device;
device->_lost = false;
- if (pAllocator)
- device->alloc = *pAllocator;
- else
- device->alloc = physical_device->instance->alloc;
-
for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
int index = tu_get_device_extension_index(ext_name);
if (index < 0 ||
!physical_device->supported_extensions.extensions[index]) {
- vk_free(&device->alloc, device);
+ vk_free(&device->vk.alloc, device);
return vk_error(physical_device->instance,
VK_ERROR_EXTENSION_NOT_PRESENT);
}
&pCreateInfo->pQueueCreateInfos[i];
uint32_t qfi = queue_create->queueFamilyIndex;
device->queues[qfi] = vk_alloc(
- &device->alloc, queue_create->queueCount * sizeof(struct tu_queue),
+ &device->vk.alloc, queue_create->queueCount * sizeof(struct tu_queue),
8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!device->queues[qfi]) {
result = VK_ERROR_OUT_OF_HOST_MEMORY;
for (unsigned q = 0; q < device->queue_count[i]; q++)
tu_queue_finish(&device->queues[i][q]);
if (device->queue_count[i])
- vk_free(&device->alloc, device->queues[i]);
+ vk_object_free(&device->vk, NULL, device->queues[i]);
}
- vk_free(&device->alloc, device);
+ vk_free(&device->vk.alloc, device);
return result;
}
for (unsigned q = 0; q < device->queue_count[i]; q++)
tu_queue_finish(&device->queues[i][q]);
if (device->queue_count[i])
- vk_free(&device->alloc, device->queues[i]);
+ vk_object_free(&device->vk, NULL, device->queues[i]);
}
for (unsigned i = 0; i < ARRAY_SIZE(device->scratch_bos); i++) {
VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
- vk_free(&device->alloc, device);
+ vk_free(&device->vk.alloc, device);
}
VkResult
return VK_SUCCESS;
}
- mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ mem = vk_object_alloc(&device->vk, pAllocator, sizeof(*mem),
+ VK_OBJECT_TYPE_DEVICE_MEMORY);
if (mem == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
if (result != VK_SUCCESS) {
- vk_free2(&device->alloc, pAllocator, mem);
+ vk_object_free(&device->vk, pAllocator, mem);
return result;
}
return;
tu_bo_finish(device, &mem->bo);
- vk_free2(&device->alloc, pAllocator, mem);
+ vk_object_free(&device->vk, pAllocator, mem);
}
VkResult
TU_FROM_HANDLE(tu_device, device, _device);
struct tu_semaphore *sem =
- vk_alloc2(&device->alloc, pAllocator, sizeof(*sem), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ vk_object_alloc(&device->vk, pAllocator, sizeof(*sem),
+ VK_OBJECT_TYPE_SEMAPHORE);
if (!sem)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (handleTypes) {
if (drmSyncobjCreate(device->physical_device->local_fd, 0, &sem->permanent.syncobj) < 0) {
- vk_free2(&device->alloc, pAllocator, sem);
+ vk_free2(&device->vk.alloc, pAllocator, sem);
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
sem->permanent.kind = TU_SEMAPHORE_SYNCOBJ;
tu_semaphore_part_destroy(device, &sem->permanent);
tu_semaphore_part_destroy(device, &sem->temporary);
- vk_free2(&device->alloc, pAllocator, sem);
+ vk_object_free(&device->vk, pAllocator, sem);
}
VkResult
VkEvent *pEvent)
{
TU_FROM_HANDLE(tu_device, device, _device);
- struct tu_event *event =
- vk_alloc2(&device->alloc, pAllocator, sizeof(*event), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ struct tu_event *event =
+ vk_object_alloc(&device->vk, pAllocator, sizeof(*event),
+ VK_OBJECT_TYPE_EVENT);
if (!event)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
fail_map:
tu_bo_finish(device, &event->bo);
fail_alloc:
- vk_free2(&device->alloc, pAllocator, event);
+ vk_object_free(&device->vk, pAllocator, event);
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
return;
tu_bo_finish(device, &event->bo);
- vk_free2(&device->alloc, pAllocator, event);
+ vk_object_free(&device->vk, pAllocator, event);
}
VkResult
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
- buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ buffer = vk_object_alloc(&device->vk, pAllocator, sizeof(*buffer),
+ VK_OBJECT_TYPE_BUFFER);
if (buffer == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (!buffer)
return;
- vk_free2(&device->alloc, pAllocator, buffer);
+ vk_object_free(&device->vk, pAllocator, buffer);
}
VkResult
size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) *
pCreateInfo->attachmentCount;
- framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ framebuffer = vk_object_alloc(&device->vk, pAllocator, size,
+ VK_OBJECT_TYPE_FRAMEBUFFER);
if (framebuffer == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (!fb)
return;
- vk_free2(&device->alloc, pAllocator, fb);
+
+ vk_object_free(&device->vk, pAllocator, fb);
}
static void
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
- sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ sampler = vk_object_alloc(&device->vk, pAllocator, sizeof(*sampler),
+ VK_OBJECT_TYPE_SAMPLER);
if (!sampler)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (!sampler)
return;
- vk_free2(&device->alloc, pAllocator, sampler);
+
+ vk_object_free(&device->vk, pAllocator, sampler);
}
/* vk_icd.h does not declare this function, so we declare it here to
TU_FROM_HANDLE(tu_device, device, _device);
struct tu_fence *fence =
- vk_alloc2(&device->alloc, pAllocator, sizeof(*fence), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
-
+ vk_object_alloc(&device->vk, pAllocator, sizeof(*fence),
+ VK_OBJECT_TYPE_FENCE);
if (!fence)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
tu_fence_finish(fence);
- vk_free2(&device->alloc, pAllocator, fence);
+ vk_object_free(&device->vk, pAllocator, fence);
}
/**
struct pollfd stack_fds[8];
struct pollfd *fds = stack_fds;
if (fenceCount > ARRAY_SIZE(stack_fds)) {
- fds = vk_alloc(&device->alloc, sizeof(*fds) * fenceCount, 8,
+ fds = vk_alloc(&device->vk.alloc, sizeof(*fds) * fenceCount, 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!fds)
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
if (fds != stack_fds)
- vk_free(&device->alloc, fds);
+ vk_free(&device->vk.alloc, fds);
if (result != VK_SUCCESS)
return result;
assert(pCreateInfo->extent.height > 0);
assert(pCreateInfo->extent.depth > 0);
- image = vk_zalloc2(&device->alloc, alloc, sizeof(*image), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ image = vk_object_zalloc(&device->vk, alloc, sizeof(*image),
+ VK_OBJECT_TYPE_IMAGE);
if (!image)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
return VK_SUCCESS;
invalid_layout:
- vk_free2(&device->alloc, alloc, image);
+ vk_object_free(&device->vk, alloc, image);
return vk_error(device->instance, VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT);
}
if (image->owned_memory != VK_NULL_HANDLE)
tu_FreeMemory(_device, image->owned_memory, pAllocator);
- vk_free2(&device->alloc, pAllocator, image);
+ vk_object_free(&device->vk, pAllocator, image);
}
void
TU_FROM_HANDLE(tu_device, device, _device);
struct tu_image_view *view;
- view = vk_alloc2(&device->alloc, pAllocator, sizeof(*view), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ view = vk_object_alloc(&device->vk, pAllocator, sizeof(*view),
+ VK_OBJECT_TYPE_IMAGE_VIEW);
if (view == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (!iview)
return;
- vk_free2(&device->alloc, pAllocator, iview);
+
+ vk_object_free(&device->vk, pAllocator, iview);
}
void
TU_FROM_HANDLE(tu_device, device, _device);
struct tu_buffer_view *view;
- view = vk_alloc2(&device->alloc, pAllocator, sizeof(*view), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ view = vk_object_alloc(&device->vk, pAllocator, sizeof(*view),
+ VK_OBJECT_TYPE_BUFFER_VIEW);
if (!view)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (!view)
return;
- vk_free2(&device->alloc, pAllocator, view);
+ vk_object_free(&device->vk, pAllocator, view);
}
attachments_offset = size;
size += pCreateInfo->attachmentCount * sizeof(pass->attachments[0]);
- pass = vk_zalloc2(&device->alloc, pAllocator, size, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ pass = vk_object_zalloc(&device->vk, pAllocator, size,
+ VK_OBJECT_TYPE_RENDER_PASS);
if (pass == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (subpass_attachment_count) {
pass->subpass_attachments = vk_alloc2(
- &device->alloc, pAllocator,
+ &device->vk.alloc, pAllocator,
subpass_attachment_count * sizeof(struct tu_subpass_attachment), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pass->subpass_attachments == NULL) {
- vk_free2(&device->alloc, pAllocator, pass);
+ vk_object_free(&device->vk, pAllocator, pass);
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
} else
if (!_pass)
return;
- vk_free2(&device->alloc, pAllocator, pass->subpass_attachments);
- vk_free2(&device->alloc, pAllocator, pass);
+ vk_free2(&device->vk.alloc, pAllocator, pass->subpass_attachments);
+ vk_object_free(&device->vk, pAllocator, pass);
}
void
{
VkResult result;
- *pipeline =
- vk_zalloc2(&builder->device->alloc, builder->alloc, sizeof(**pipeline),
- 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ *pipeline = vk_object_zalloc(&builder->device->vk, builder->alloc,
+ sizeof(**pipeline), VK_OBJECT_TYPE_PIPELINE);
if (!*pipeline)
return VK_ERROR_OUT_OF_HOST_MEMORY;
/* compile and upload shaders */
result = tu_pipeline_builder_compile_shaders(builder, *pipeline);
if (result != VK_SUCCESS) {
- vk_free2(&builder->device->alloc, builder->alloc, *pipeline);
+ vk_object_free(&builder->device->vk, builder->alloc, *pipeline);
return result;
}
result = tu_pipeline_allocate_cs(builder->device, *pipeline, builder, NULL);
if (result != VK_SUCCESS) {
- vk_free2(&builder->device->alloc, builder->alloc, *pipeline);
+ vk_object_free(&builder->device->vk, builder->alloc, *pipeline);
return result;
}
*pPipeline = VK_NULL_HANDLE;
- pipeline =
- vk_zalloc2(&dev->alloc, pAllocator, sizeof(*pipeline), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ pipeline = vk_object_zalloc(&dev->vk, pAllocator, sizeof(*pipeline),
+ VK_OBJECT_TYPE_PIPELINE);
if (!pipeline)
return VK_ERROR_OUT_OF_HOST_MEMORY;
if (shader)
tu_shader_destroy(dev, shader, pAllocator);
- vk_free2(&dev->alloc, pAllocator, pipeline);
+ vk_object_free(&dev->vk, pAllocator, pipeline);
return result;
}
return;
tu_pipeline_finish(pipeline, dev, pAllocator);
- vk_free2(&dev->alloc, pAllocator, pipeline);
+ vk_object_free(&dev->vk, pAllocator, pipeline);
}
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
assert(pCreateInfo->flags == 0);
- cache = vk_alloc2(&device->alloc, pAllocator, sizeof(*cache), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ cache = vk_object_alloc(&device->vk, pAllocator, sizeof(*cache),
+ VK_OBJECT_TYPE_PIPELINE_CACHE);
if (cache == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (pAllocator)
cache->alloc = *pAllocator;
else
- cache->alloc = device->alloc;
+ cache->alloc = device->vk.alloc;
tu_pipeline_cache_init(cache, device);
return;
tu_pipeline_cache_finish(cache);
- vk_free2(&device->alloc, pAllocator, cache);
+ vk_object_free(&device->vk, pAllocator, cache);
}
VkResult
#include "util/macros.h"
#include "util/u_atomic.h"
#include "vk_alloc.h"
+#include "vk_object.h"
#include "vk_debug_report.h"
#include "wsi_common.h"
struct tu_physical_device
{
- VK_LOADER_DATA _loader_data;
+ struct vk_object_base base;
struct tu_instance *instance;
struct tu_instance
{
- VK_LOADER_DATA _loader_data;
+ struct vk_object_base base;
VkAllocationCallbacks alloc;
struct tu_pipeline_cache
{
+ struct vk_object_base base;
+
struct tu_device *device;
pthread_mutex_t mutex;
struct tu_fence
{
+ struct vk_object_base base;
struct wsi_fence *fence_wsi;
bool signaled;
int fd;
struct tu_queue
{
- VK_LOADER_DATA _loader_data;
+ struct vk_object_base base;
+
struct tu_device *device;
uint32_t queue_family_index;
int queue_idx;
struct tu_device
{
- VK_LOADER_DATA _loader_data;
-
- VkAllocationCallbacks alloc;
-
+ struct vk_device vk;
struct tu_instance *instance;
struct tu_queue *queues[TU_MAX_QUEUE_FAMILIES];
struct tu_device_memory
{
+ struct vk_object_base base;
+
struct tu_bo bo;
VkDeviceSize size;
struct tu_descriptor_set
{
+ struct vk_object_base base;
+
const struct tu_descriptor_set_layout *layout;
struct tu_descriptor_pool *pool;
uint32_t size;
struct tu_descriptor_pool
{
+ struct vk_object_base base;
+
struct tu_bo bo;
uint64_t current_offset;
uint64_t size;
struct tu_descriptor_update_template
{
+ struct vk_object_base base;
+
uint32_t entry_count;
struct tu_descriptor_update_template_entry entry[0];
};
struct tu_buffer
{
+ struct vk_object_base base;
+
VkDeviceSize size;
VkBufferUsageFlags usage;
struct tu_cmd_pool
{
+ struct vk_object_base base;
+
VkAllocationCallbacks alloc;
struct list_head cmd_buffers;
struct list_head free_cmd_buffers;
struct tu_cmd_buffer
{
- VK_LOADER_DATA _loader_data;
+ struct vk_object_base base;
struct tu_device *device;
struct tu_event
{
+ struct vk_object_base base;
struct tu_bo bo;
};
struct tu_shader_module
{
+ struct vk_object_base base;
+
unsigned char sha1[20];
uint32_t code_size;
struct tu_pipeline
{
+ struct vk_object_base base;
+
struct tu_cs cs;
struct tu_pipeline_layout *layout;
struct tu_image
{
+ struct vk_object_base base;
+
VkImageType type;
/* The original VkFormat provided by the client. This may not match any
* of the actual surface formats.
struct tu_image_view
{
+ struct vk_object_base base;
+
struct tu_image *image; /**< VkImageViewCreateInfo::image */
uint64_t base_addr;
};
struct tu_sampler_ycbcr_conversion {
+ struct vk_object_base base;
+
VkFormat format;
VkSamplerYcbcrModelConversion ycbcr_model;
VkSamplerYcbcrRange ycbcr_range;
};
struct tu_sampler {
+ struct vk_object_base base;
+
uint32_t descriptor[A6XX_TEX_SAMP_DWORDS];
struct tu_sampler_ycbcr_conversion *ycbcr_sampler;
};
struct tu_buffer_view
{
+ struct vk_object_base base;
+
uint32_t descriptor[A6XX_TEX_CONST_DWORDS];
struct tu_buffer *buffer;
struct tu_framebuffer
{
+ struct vk_object_base base;
+
uint32_t width;
uint32_t height;
uint32_t layers;
struct tu_render_pass
{
+ struct vk_object_base base;
+
uint32_t attachment_count;
uint32_t subpass_count;
uint32_t gmem_pixels;
struct tu_query_pool
{
+ struct vk_object_base base;
+
VkQueryType type;
uint32_t stride;
uint64_t size;
struct tu_semaphore
{
+ struct vk_object_base base;
+
struct tu_semaphore_part permanent;
struct tu_semaphore_part temporary;
};
}
struct tu_query_pool *pool =
- vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
-
+ vk_object_alloc(&device->vk, pAllocator, sizeof(*pool),
+ VK_OBJECT_TYPE_QUERY_POOL);
if (!pool)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
VkResult result = tu_bo_init_new(device, &pool->bo,
pCreateInfo->queryCount * slot_size);
if (result != VK_SUCCESS) {
- vk_free2(&device->alloc, pAllocator, pool);
+ vk_object_free(&device->vk, pAllocator, pool);
return result;
}
result = tu_bo_map(device, &pool->bo);
if (result != VK_SUCCESS) {
tu_bo_finish(device, &pool->bo);
- vk_free2(&device->alloc, pAllocator, pool);
+ vk_object_free(&device->vk, pAllocator, pool);
return result;
}
return;
tu_bo_finish(device, &pool->bo);
- vk_free2(&device->alloc, pAllocator, pool);
+ vk_object_free(&device->vk, pAllocator, pool);
}
static uint32_t
struct tu_shader *shader;
shader = vk_zalloc2(
- &dev->alloc, alloc,
+ &dev->vk.alloc, alloc,
sizeof(*shader),
8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!shader)
}
if (!nir) {
- vk_free2(&dev->alloc, alloc, shader);
+ vk_free2(&dev->vk.alloc, alloc, shader);
return NULL;
}
{
ir3_shader_destroy(shader->ir3_shader);
- vk_free2(&dev->alloc, alloc, shader);
+ vk_free2(&dev->vk.alloc, alloc, shader);
}
VkResult
assert(pCreateInfo->flags == 0);
assert(pCreateInfo->codeSize % 4 == 0);
- module = vk_alloc2(&device->alloc, pAllocator,
- sizeof(*module) + pCreateInfo->codeSize, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ module = vk_object_alloc(&device->vk, pAllocator,
+ sizeof(*module) + pCreateInfo->codeSize,
+ VK_OBJECT_TYPE_SHADER_MODULE);
if (module == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (!module)
return;
- vk_free2(&device->alloc, pAllocator, module);
+ vk_object_free(&device->vk, pAllocator, module);
}
if (pAllocator)
alloc = pAllocator;
else
- alloc = &device->alloc;
+ alloc = &device->vk.alloc;
return wsi_common_create_swapchain(&device->physical_device->wsi_device,
tu_device_to_handle(device),
if (pAllocator)
alloc = pAllocator;
else
- alloc = &device->alloc;
+ alloc = &device->vk.alloc;
wsi_common_destroy_swapchain(_device, swapchain, alloc);
}