assert(_set < MAX_SETS);
- const struct anv_descriptor_set_layout *set_layout =
- layout->set[_set].layout;
+ struct anv_descriptor_set_layout *set_layout = layout->set[_set].layout;
struct anv_push_descriptor_set *push_set =
anv_cmd_buffer_get_push_descriptor_set(cmd_buffer,
assert(_set < MAX_PUSH_DESCRIPTORS);
- const struct anv_descriptor_set_layout *set_layout =
- layout->set[_set].layout;
+ struct anv_descriptor_set_layout *set_layout = layout->set[_set].layout;
struct anv_push_descriptor_set *push_set =
anv_cmd_buffer_get_push_descriptor_set(cmd_buffer,
struct anv_descriptor_set_binding_layout *bindings;
struct anv_sampler **samplers;
+ /* We need to allocate decriptor set layouts off the device allocator
+ * with DEVICE scope because they are reference counted and may not be
+ * destroyed when vkDestroyDescriptorSetLayout is called.
+ */
ANV_MULTIALLOC(ma);
anv_multialloc_add(&ma, &set_layout, 1);
anv_multialloc_add(&ma, &bindings, max_binding + 1);
anv_multialloc_add(&ma, &samplers, immutable_sampler_count);
- if (!anv_multialloc_alloc2(&ma, &device->alloc, pAllocator,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT))
+ if (!anv_multialloc_alloc(&ma, &device->alloc,
+ VK_SYSTEM_ALLOCATION_SCOPE_DEVICE))
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
memset(set_layout, 0, sizeof(*set_layout));
+ set_layout->ref_cnt = 1;
set_layout->binding_count = max_binding + 1;
for (uint32_t b = 0; b <= max_binding; b++) {
if (!set_layout)
return;
- vk_free2(&device->alloc, pAllocator, set_layout);
+ anv_descriptor_set_layout_unref(device, set_layout);
}
static void
ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout,
pCreateInfo->pSetLayouts[set]);
layout->set[set].layout = set_layout;
+ anv_descriptor_set_layout_ref(set_layout);
layout->set[set].dynamic_offset_start = dynamic_offset_count;
for (uint32_t b = 0; b < set_layout->binding_count; b++) {
if (!pipeline_layout)
return;
+ for (uint32_t i = 0; i < pipeline_layout->num_sets; i++)
+ anv_descriptor_set_layout_unref(device, pipeline_layout->set[i].layout);
+
vk_free2(&device->alloc, pAllocator, pipeline_layout);
}
VkResult
anv_descriptor_set_create(struct anv_device *device,
struct anv_descriptor_pool *pool,
- const struct anv_descriptor_set_layout *layout,
+ struct anv_descriptor_set_layout *layout,
struct anv_descriptor_set **out_set)
{
struct anv_descriptor_set *set;
}
}
- set->size = size;
set->layout = layout;
+ anv_descriptor_set_layout_ref(layout);
+
+ set->size = size;
set->buffer_views =
(struct anv_buffer_view *) &set->descriptors[layout->size];
set->buffer_count = layout->buffer_count;
struct anv_descriptor_pool *pool,
struct anv_descriptor_set *set)
{
+ anv_descriptor_set_layout_unref(device, set->layout);
+
/* Put the buffer view surface state back on the free list. */
for (uint32_t b = 0; b < set->buffer_count; b++) {
struct surface_state_free_list_entry *entry =
};
struct anv_descriptor_set_layout {
+ /* Descriptor set layouts can be destroyed at almost any time */
+ uint32_t ref_cnt;
+
/* Number of bindings in this descriptor set */
uint16_t binding_count;
struct anv_descriptor_set_binding_layout binding[0];
};
+static inline void
+anv_descriptor_set_layout_ref(struct anv_descriptor_set_layout *layout)
+{
+ assert(layout && layout->ref_cnt >= 1);
+ p_atomic_inc(&layout->ref_cnt);
+}
+
+static inline void
+anv_descriptor_set_layout_unref(struct anv_device *device,
+ struct anv_descriptor_set_layout *layout)
+{
+ assert(layout && layout->ref_cnt >= 1);
+ if (p_atomic_dec_zero(&layout->ref_cnt))
+ vk_free(&device->alloc, layout);
+}
+
struct anv_descriptor {
VkDescriptorType type;
};
struct anv_descriptor_set {
- const struct anv_descriptor_set_layout *layout;
+ struct anv_descriptor_set_layout *layout;
uint32_t size;
uint32_t buffer_count;
struct anv_buffer_view *buffer_views;
VkResult
anv_descriptor_set_create(struct anv_device *device,
struct anv_descriptor_pool *pool,
- const struct anv_descriptor_set_layout *layout,
+ struct anv_descriptor_set_layout *layout,
struct anv_descriptor_set **out_set);
void