X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fintel%2Fvulkan%2Fanv_descriptor_set.c;h=8491436f14a5020b51bd68aadc37be7641accd5d;hb=c9bebae2877e55cdcd94f9f9f3f6805238caeb28;hp=1ad89185dd72d0277a797d03965d48d6c589518c;hpb=6e230d7607f9b3e082d00859bd7725c4dc87e5cf;p=mesa.git diff --git a/src/intel/vulkan/anv_descriptor_set.c b/src/intel/vulkan/anv_descriptor_set.c index 1ad89185dd7..8491436f14a 100644 --- a/src/intel/vulkan/anv_descriptor_set.c +++ b/src/intel/vulkan/anv_descriptor_set.c @@ -103,6 +103,16 @@ anv_descriptor_data_for_type(const struct anv_physical_device *device, type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) data |= ANV_DESCRIPTOR_ADDRESS_RANGE; + /* On Ivy Bridge and Bay Trail, we need swizzles textures in the shader + * Do not handle VK_DESCRIPTOR_TYPE_STORAGE_IMAGE and + * VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT because they already must + * have identity swizzle. + */ + if (device->info.gen == 7 && !device->info.is_haswell && + (type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE || + type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)) + data |= ANV_DESCRIPTOR_TEXTURE_SWIZZLE; + return data; } @@ -123,9 +133,22 @@ anv_descriptor_data_size(enum anv_descriptor_data data) if (data & ANV_DESCRIPTOR_ADDRESS_RANGE) size += sizeof(struct anv_address_range_descriptor); + if (data & ANV_DESCRIPTOR_TEXTURE_SWIZZLE) + size += sizeof(struct anv_texture_swizzle_descriptor); + return size; } +static bool +anv_needs_descriptor_buffer(VkDescriptorType desc_type, + enum anv_descriptor_data desc_data) +{ + if (desc_type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT || + anv_descriptor_data_size(desc_data) > 0) + return true; + return false; +} + /** Returns the size in bytes of each descriptor with the given layout */ unsigned anv_descriptor_size(const struct anv_descriptor_set_binding_layout *layout) @@ -226,6 +249,7 @@ void anv_GetDescriptorSetLayoutSupport( &device->instance->physicalDevice; uint32_t surface_count[MESA_SHADER_STAGES] = { 0, }; + bool needs_descriptor_buffer = false; for (uint32_t b = 0; b < pCreateInfo->bindingCount; b++) { const VkDescriptorSetLayoutBinding *binding = &pCreateInfo->pBindings[b]; @@ -233,11 +257,18 @@ void anv_GetDescriptorSetLayoutSupport( enum anv_descriptor_data desc_data = anv_descriptor_data_for_type(pdevice, binding->descriptorType); + if (anv_needs_descriptor_buffer(binding->descriptorType, desc_data)) + needs_descriptor_buffer = true; + switch (binding->descriptorType) { case VK_DESCRIPTOR_TYPE_SAMPLER: /* There is no real limit on samplers */ break; + case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT: + /* Inline uniforms don't use a binding */ + break; + case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: if (anv_descriptor_data_supports_bindless(pdevice, desc_data, false)) break; @@ -265,6 +296,11 @@ void anv_GetDescriptorSetLayoutSupport( } } + for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) { + if (needs_descriptor_buffer) + surface_count[s] += 1; + } + bool supported = true; for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) { /* Our maximum binding table size is 240 and we need to reserve 8 for @@ -352,32 +388,31 @@ VkResult anv_CreateDescriptorSetLayout( for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) { const VkDescriptorSetLayoutBinding *binding = &pCreateInfo->pBindings[j]; uint32_t b = binding->binding; - /* We temporarily store the pointer to the binding in the + /* We temporarily store pCreateInfo->pBindings[] index (plus one) in the * immutable_samplers pointer. This provides us with a quick-and-dirty * way to sort the bindings by binding number. */ - set_layout->binding[b].immutable_samplers = (void *)binding; + set_layout->binding[b].immutable_samplers = (void *)(uintptr_t)(j + 1); } const VkDescriptorSetLayoutBindingFlagsCreateInfoEXT *binding_flags_info = vk_find_struct_const(pCreateInfo->pNext, DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT); - if (binding_flags_info) - assert(binding_flags_info->bindingCount == pCreateInfo->bindingCount); for (uint32_t b = 0; b <= max_binding; b++) { - const VkDescriptorSetLayoutBinding *binding = - (void *)set_layout->binding[b].immutable_samplers; - - if (binding == NULL) - continue; - - /* We temporarily stashed the pointer to the binding in the - * immutable_samplers pointer. Now that we've pulled it back out - * again, we reset immutable_samplers to NULL. + /* We stashed the pCreateInfo->pBindings[] index (plus one) in the + * immutable_samplers pointer. Check for NULL (empty binding) and then + * reset it and compute the index. */ + if (set_layout->binding[b].immutable_samplers == NULL) + continue; + const uint32_t info_idx = + (uintptr_t)(void *)set_layout->binding[b].immutable_samplers - 1; set_layout->binding[b].immutable_samplers = NULL; + const VkDescriptorSetLayoutBinding *binding = + &pCreateInfo->pBindings[info_idx]; + if (binding->descriptorCount == 0) continue; @@ -385,11 +420,10 @@ VkResult anv_CreateDescriptorSetLayout( set_layout->binding[b].type = binding->descriptorType; #endif - if (binding_flags_info) { - uint32_t binding_strct_idx = binding - pCreateInfo->pBindings; - assert(binding_strct_idx < binding_flags_info->bindingCount); + if (binding_flags_info && binding_flags_info->bindingCount > 0) { + assert(binding_flags_info->bindingCount == pCreateInfo->bindingCount); set_layout->binding[b].flags = - binding_flags_info->pBindingFlags[binding_strct_idx]; + binding_flags_info->pBindingFlags[info_idx]; } set_layout->binding[b].data = @@ -678,10 +712,10 @@ VkResult anv_CreateDescriptorPool( * of them to 32B. */ descriptor_bo_size += 32 * pCreateInfo->maxSets; - descriptor_bo_size = ALIGN(descriptor_bo_size, 4096); /* We align inline uniform blocks to 32B */ if (inline_info) descriptor_bo_size += 32 * inline_info->maxInlineUniformBlockBindings; + descriptor_bo_size = ALIGN(descriptor_bo_size, 4096); const size_t pool_size = pCreateInfo->maxSets * sizeof(struct anv_descriptor_set) + @@ -747,20 +781,19 @@ void anv_DestroyDescriptorPool( if (!pool) return; + list_for_each_entry_safe(struct anv_descriptor_set, set, + &pool->desc_sets, pool_link) { + anv_descriptor_set_layout_unref(device, set->layout); + } + if (pool->bo.size) { anv_gem_munmap(pool->bo.map, pool->bo.size); anv_vma_free(device, &pool->bo); anv_gem_close(device, pool->bo.gem_handle); + util_vma_heap_finish(&pool->bo_heap); } anv_state_stream_finish(&pool->surface_state_stream); - list_for_each_entry_safe(struct anv_descriptor_set, set, - &pool->desc_sets, pool_link) { - anv_descriptor_set_destroy(device, pool, set); - } - - util_vma_heap_finish(&pool->bo_heap); - vk_free2(&device->alloc, pAllocator, pool); } @@ -774,8 +807,9 @@ VkResult anv_ResetDescriptorPool( list_for_each_entry_safe(struct anv_descriptor_set, set, &pool->desc_sets, pool_link) { - anv_descriptor_set_destroy(device, pool, set); + anv_descriptor_set_layout_unref(device, set->layout); } + list_inithead(&pool->desc_sets); pool->next = 0; pool->free_list = EMPTY; @@ -842,8 +876,6 @@ anv_descriptor_pool_free_set(struct anv_descriptor_pool *pool, entry->size = set->size; pool->free_list = (char *) entry - pool->data; } - - list_del(&set->pool_link); } struct surface_state_free_list_entry { @@ -904,9 +936,9 @@ anv_descriptor_set_create(struct anv_device *device, /* Align the size to 32 so that alignment gaps don't cause extra holes * in the heap which can lead to bad performance. */ + uint32_t set_buffer_size = ALIGN(layout->descriptor_buffer_size, 32); uint64_t pool_vma_offset = - util_vma_heap_alloc(&pool->bo_heap, - ALIGN(layout->descriptor_buffer_size, 32), 32); + util_vma_heap_alloc(&pool->bo_heap, set_buffer_size, 32); if (pool_vma_offset == 0) { anv_descriptor_pool_free_set(pool, set); return vk_error(VK_ERROR_FRAGMENTED_POOL); @@ -914,7 +946,7 @@ anv_descriptor_set_create(struct anv_device *device, assert(pool_vma_offset >= POOL_HEAP_OFFSET && pool_vma_offset - POOL_HEAP_OFFSET <= INT32_MAX); set->desc_mem.offset = pool_vma_offset - POOL_HEAP_OFFSET; - set->desc_mem.alloc_size = layout->descriptor_buffer_size; + set->desc_mem.alloc_size = set_buffer_size; set->desc_mem.map = pool->bo.map + set->desc_mem.offset; set->desc_surface_state = anv_descriptor_pool_alloc_state(pool); @@ -973,6 +1005,8 @@ anv_descriptor_set_create(struct anv_device *device, anv_descriptor_pool_alloc_state(pool); } + list_addtail(&set->pool_link, &pool->desc_sets); + *out_set = set; return VK_SUCCESS; @@ -995,6 +1029,8 @@ anv_descriptor_set_destroy(struct anv_device *device, for (uint32_t b = 0; b < set->buffer_view_count; b++) anv_descriptor_pool_free_state(pool, set->buffer_views[b].surface_state); + list_del(&set->pool_link); + anv_descriptor_pool_free_set(pool, set); } @@ -1018,8 +1054,6 @@ VkResult anv_AllocateDescriptorSets( if (result != VK_SUCCESS) break; - list_addtail(&set->pool_link, &pool->desc_sets); - pDescriptorSets[i] = anv_descriptor_set_to_handle(set); } @@ -1186,6 +1220,26 @@ anv_descriptor_set_write_image_view(struct anv_device *device, anv_descriptor_set_write_image_param(desc_map, image_param); } + + if (image_view && (bind_layout->data & ANV_DESCRIPTOR_TEXTURE_SWIZZLE)) { + assert(!(bind_layout->data & ANV_DESCRIPTOR_SAMPLED_IMAGE)); + assert(image_view); + struct anv_texture_swizzle_descriptor desc_data[3]; + memset(desc_data, 0, sizeof(desc_data)); + + for (unsigned p = 0; p < image_view->n_planes; p++) { + desc_data[p] = (struct anv_texture_swizzle_descriptor) { + .swizzle = { + (uint8_t)image_view->planes[p].isl.swizzle.r, + (uint8_t)image_view->planes[p].isl.swizzle.g, + (uint8_t)image_view->planes[p].isl.swizzle.b, + (uint8_t)image_view->planes[p].isl.swizzle.a, + }, + }; + } + memcpy(desc_map, desc_data, + MAX2(1, bind_layout->max_plane_count) * sizeof(desc_data[0])); + } } void