radv: drop tcs_out_layout
[mesa.git] / src / amd / vulkan / radv_descriptor_set.c
index ec7fd3d8cc80c48651a0558383689beadd4872c0..3d56f8c2176fbed6434e5c268aba2169526c6ddb 100644 (file)
 #include "radv_private.h"
 #include "sid.h"
 
+
+static bool has_equal_immutable_samplers(const VkSampler *samplers, uint32_t count)
+{
+       if (!samplers)
+               return false;
+       for(uint32_t i = 1; i < count; ++i) {
+               if (memcmp(radv_sampler_from_handle(samplers[0])->state,
+                          radv_sampler_from_handle(samplers[i])->state, 16)) {
+                       return false;
+               }
+       }
+       return true;
+}
+
 VkResult radv_CreateDescriptorSetLayout(
        VkDevice                                    _device,
        const VkDescriptorSetLayoutCreateInfo*      pCreateInfo,
@@ -66,6 +80,8 @@ VkResult radv_CreateDescriptorSetLayout(
 
        set_layout->binding_count = max_binding + 1;
        set_layout->shader_stages = 0;
+       set_layout->dynamic_shader_stages = 0;
+       set_layout->has_immutable_samplers = false;
        set_layout->size = 0;
 
        memset(set_layout->binding, 0, size - sizeof(struct radv_descriptor_set_layout));
@@ -121,7 +137,6 @@ VkResult radv_CreateDescriptorSetLayout(
                }
 
                set_layout->size = align(set_layout->size, alignment);
-               assert(binding->descriptorCount > 0);
                set_layout->binding[b].type = binding->descriptorType;
                set_layout->binding[b].array_size = binding->descriptorCount;
                set_layout->binding[b].offset = set_layout->size;
@@ -130,14 +145,13 @@ VkResult radv_CreateDescriptorSetLayout(
 
                if (binding->pImmutableSamplers) {
                        set_layout->binding[b].immutable_samplers_offset = samplers_offset;
-                       set_layout->binding[b].immutable_samplers_equal = true;
+                       set_layout->binding[b].immutable_samplers_equal =
+                               has_equal_immutable_samplers(binding->pImmutableSamplers, binding->descriptorCount);
+                       set_layout->has_immutable_samplers = true;
 
 
                        for (uint32_t i = 0; i < binding->descriptorCount; i++)
                                memcpy(samplers + 4 * i, &radv_sampler_from_handle(binding->pImmutableSamplers[i])->state, 16);
-                       for (uint32_t i = 1; i < binding->descriptorCount; i++)
-                               if (memcmp(samplers + 4 * i, samplers, 16) != 0)
-                                       set_layout->binding[b].immutable_samplers_equal = false;
 
                        /* Don't reserve space for the samplers if they're not accessed. */
                        if (set_layout->binding[b].immutable_samplers_equal) {
@@ -179,6 +193,69 @@ void radv_DestroyDescriptorSetLayout(
        vk_free2(&device->alloc, pAllocator, set_layout);
 }
 
+void radv_GetDescriptorSetLayoutSupport(VkDevice device,
+                                        const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
+                                        VkDescriptorSetLayoutSupport* pSupport)
+{
+       bool supported = true;
+       uint64_t size = 0;
+       for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
+               const VkDescriptorSetLayoutBinding *binding = &pCreateInfo->pBindings[i];
+
+               if (binding->descriptorCount == 0)
+                       continue;
+
+               uint64_t descriptor_size = 0;
+               uint64_t descriptor_alignment = 1;
+               switch (binding->descriptorType) {
+               case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+               case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+                       break;
+               case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+               case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+               case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+               case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+                       descriptor_size = 16;
+                       descriptor_alignment = 16;
+                       break;
+               case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+               case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+               case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
+                       descriptor_size = 64;
+                       descriptor_alignment = 32;
+                       break;
+               case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+                       if (!has_equal_immutable_samplers(binding->pImmutableSamplers, binding->descriptorCount)) {
+                               descriptor_size = 64;
+                       } else {
+                               descriptor_size = 96;
+                       }
+                       descriptor_alignment = 32;
+                       break;
+               case VK_DESCRIPTOR_TYPE_SAMPLER:
+                       if (!has_equal_immutable_samplers(binding->pImmutableSamplers, binding->descriptorCount)) {
+                               descriptor_size = 16;
+                               descriptor_alignment = 16;
+                       }
+                       break;
+               default:
+                       unreachable("unknown descriptor type\n");
+                       break;
+               }
+
+               if (size && !align_u64(size, descriptor_alignment)) {
+                       supported = false;
+               }
+               size = align_u64(size, descriptor_alignment);
+               if (descriptor_size && (UINT64_MAX - size) / descriptor_size < binding->descriptorCount) {
+                       supported = false;
+               }
+               size += binding->descriptorCount * descriptor_size;
+       }
+
+       pSupport->supported = supported;
+}
+
 /*
  * Pipeline layouts.  These have nothing to do with the pipeline.  They are
  * just muttiple descriptor set layouts pasted together
@@ -225,6 +302,7 @@ VkResult radv_CreatePipelineLayout(
 
        layout->dynamic_offset_count = dynamic_offset_count;
        layout->push_constant_size = 0;
+
        for (unsigned i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
                const VkPushConstantRange *range = pCreateInfo->pPushConstantRanges + i;
                layout->push_constant_size = MAX2(layout->push_constant_size,
@@ -292,58 +370,69 @@ radv_descriptor_set_create(struct radv_device *device,
                uint32_t layout_size = align_u32(layout->size, 32);
                set->size = layout->size;
 
+               if (!pool->host_memory_base && pool->entry_count == pool->max_entry_count) {
+                       vk_free2(&device->alloc, NULL, set);
+                       return vk_error(VK_ERROR_OUT_OF_POOL_MEMORY_KHR);
+               }
+
                /* try to allocate linearly first, so that we don't spend
                 * time looking for gaps if the app only allocates &
                 * resets via the pool. */
                if (pool->current_offset + layout_size <= pool->size) {
                        set->bo = pool->bo;
                        set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + pool->current_offset);
-                       set->va = device->ws->buffer_get_va(set->bo) + pool->current_offset;
+                       set->va = radv_buffer_get_va(set->bo) + pool->current_offset;
+                       if (!pool->host_memory_base) {
+                               pool->entries[pool->entry_count].offset = pool->current_offset;
+                               pool->entries[pool->entry_count].size = layout_size;
+                               pool->entries[pool->entry_count].set = set;
+                               pool->entry_count++;
+                       }
                        pool->current_offset += layout_size;
-                       list_addtail(&set->vram_list, &pool->vram_list);
                } else if (!pool->host_memory_base) {
                        uint64_t offset = 0;
-                       struct list_head *prev = &pool->vram_list;
-                       struct radv_descriptor_set *cur;
+                       int index;
 
-                       assert(!pool->host_memory_base);
-                       LIST_FOR_EACH_ENTRY(cur, &pool->vram_list, vram_list) {
-                               uint64_t start = (uint8_t*)cur->mapped_ptr - pool->mapped_ptr;
-                               if (start - offset >= layout_size)
+                       for (index = 0; index < pool->entry_count; ++index) {
+                               if (pool->entries[index].offset - offset >= layout_size)
                                        break;
-
-                               offset = start + cur->size;
-                               prev = &cur->vram_list;
+                               offset = pool->entries[index].offset + pool->entries[index].size;
                        }
 
                        if (pool->size - offset < layout_size) {
-                               vk_free2(&device->alloc, NULL, set->dynamic_descriptors);
                                vk_free2(&device->alloc, NULL, set);
                                return vk_error(VK_ERROR_OUT_OF_POOL_MEMORY_KHR);
                        }
                        set->bo = pool->bo;
                        set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + offset);
-                       set->va = device->ws->buffer_get_va(set->bo) + offset;
-                       list_add(&set->vram_list, prev);
+                       set->va = radv_buffer_get_va(set->bo) + offset;
+                       memmove(&pool->entries[index + 1], &pool->entries[index],
+                               sizeof(pool->entries[0]) * (pool->entry_count - index));
+                       pool->entries[index].offset = offset;
+                       pool->entries[index].size = layout_size;
+                       pool->entries[index].set = set;
+                       pool->entry_count++;
                } else
                        return vk_error(VK_ERROR_OUT_OF_POOL_MEMORY_KHR);
        }
 
-       for (unsigned i = 0; i < layout->binding_count; ++i) {
-               if (!layout->binding[i].immutable_samplers_offset ||
-                   layout->binding[i].immutable_samplers_equal)
-                       continue;
+       if (layout->has_immutable_samplers) {
+               for (unsigned i = 0; i < layout->binding_count; ++i) {
+                       if (!layout->binding[i].immutable_samplers_offset ||
+                       layout->binding[i].immutable_samplers_equal)
+                               continue;
 
-               unsigned offset = layout->binding[i].offset / 4;
-               if (layout->binding[i].type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
-                       offset += 16;
+                       unsigned offset = layout->binding[i].offset / 4;
+                       if (layout->binding[i].type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
+                               offset += 16;
 
-               const uint32_t *samplers = (const uint32_t*)((const char*)layout + layout->binding[i].immutable_samplers_offset);
-               for (unsigned j = 0; j < layout->binding[i].array_size; ++j) {
-                       memcpy(set->mapped_ptr + offset, samplers + 4 * j, 16);
-                       offset += layout->binding[i].size / 4;
-               }
+                       const uint32_t *samplers = (const uint32_t*)((const char*)layout + layout->binding[i].immutable_samplers_offset);
+                       for (unsigned j = 0; j < layout->binding[i].array_size; ++j) {
+                               memcpy(set->mapped_ptr + offset, samplers + 4 * j, 16);
+                               offset += layout->binding[i].size / 4;
+                       }
 
+               }
        }
        *out_set = set;
        return VK_SUCCESS;
@@ -357,8 +446,17 @@ radv_descriptor_set_destroy(struct radv_device *device,
 {
        assert(!pool->host_memory_base);
 
-       if (free_bo && set->size)
-               list_del(&set->vram_list);
+       if (free_bo && set->size && !pool->host_memory_base) {
+               uint32_t offset = (uint8_t*)set->mapped_ptr - pool->mapped_ptr;
+               for (int i = 0; i < pool->entry_count; ++i) {
+                       if (pool->entries[i].offset == offset) {
+                               memmove(&pool->entries[i], &pool->entries[i+1],
+                                       sizeof(pool->entries[i]) * (pool->entry_count - i - 1));
+                               --pool->entry_count;
+                               break;
+                       }
+               }
+       }
        vk_free2(&device->alloc, NULL, set);
 }
 
@@ -410,6 +508,8 @@ VkResult radv_CreateDescriptorPool(
                host_size += sizeof(struct radeon_winsys_bo*) * bo_count;
                host_size += sizeof(struct radv_descriptor_range) * range_count;
                size += host_size;
+       } else {
+               size += sizeof(struct radv_descriptor_pool_entry) * pCreateInfo->maxSets;
        }
 
        pool = vk_alloc2(&device->alloc, pAllocator, size, 8,
@@ -426,13 +526,15 @@ VkResult radv_CreateDescriptorPool(
        }
 
        if (bo_size) {
-               pool->bo = device->ws->buffer_create(device->ws, bo_size,
-                                                       32, RADEON_DOMAIN_VRAM, 0);
+               pool->bo = device->ws->buffer_create(device->ws, bo_size, 32,
+                                                    RADEON_DOMAIN_VRAM,
+                                                    RADEON_FLAG_NO_INTERPROCESS_SHARING |
+                                                    RADEON_FLAG_READ_ONLY);
                pool->mapped_ptr = (uint8_t*)device->ws->buffer_map(pool->bo);
        }
        pool->size = bo_size;
+       pool->max_entry_count = pCreateInfo->maxSets;
 
-       list_inithead(&pool->vram_list);
        *pDescriptorPool = radv_descriptor_pool_to_handle(pool);
        return VK_SUCCESS;
 }
@@ -449,9 +551,8 @@ void radv_DestroyDescriptorPool(
                return;
 
        if (!pool->host_memory_base) {
-               list_for_each_entry_safe(struct radv_descriptor_set, set,
-                                        &pool->vram_list, vram_list) {
-                       radv_descriptor_set_destroy(device, pool, set, false);
+               for(int i = 0; i < pool->entry_count; ++i) {
+                       radv_descriptor_set_destroy(device, pool, pool->entries[i].set, false);
                }
        }
 
@@ -469,14 +570,12 @@ VkResult radv_ResetDescriptorPool(
        RADV_FROM_HANDLE(radv_descriptor_pool, pool, descriptorPool);
 
        if (!pool->host_memory_base) {
-               list_for_each_entry_safe(struct radv_descriptor_set, set,
-                                        &pool->vram_list, vram_list) {
-                       radv_descriptor_set_destroy(device, pool, set, false);
+               for(int i = 0; i < pool->entry_count; ++i) {
+                       radv_descriptor_set_destroy(device, pool, pool->entries[i].set, false);
                }
+               pool->entry_count = 0;
        }
 
-       list_inithead(&pool->vram_list);
-
        pool->current_offset = 0;
        pool->host_memory_ptr = pool->host_memory_base;
 
@@ -493,7 +592,7 @@ VkResult radv_AllocateDescriptorSets(
 
        VkResult result = VK_SUCCESS;
        uint32_t i;
-       struct radv_descriptor_set *set;
+       struct radv_descriptor_set *set = NULL;
 
        /* allocate a set of buffers for each shader to contain descriptors */
        for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
@@ -544,7 +643,7 @@ static void write_texel_buffer_descriptor(struct radv_device *device,
        memcpy(dst, buffer_view->state, 4 * 4);
 
        if (cmd_buffer)
-               device->ws->cs_add_buffer(cmd_buffer->cs, buffer_view->bo, 7);
+               radv_cs_add_buffer(device->ws, cmd_buffer->cs, buffer_view->bo, 7);
        else
                *buffer_list = buffer_view->bo;
 }
@@ -556,7 +655,7 @@ static void write_buffer_descriptor(struct radv_device *device,
                                     const VkDescriptorBufferInfo *buffer_info)
 {
        RADV_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
-       uint64_t va = device->ws->buffer_get_va(buffer->bo);
+       uint64_t va = radv_buffer_get_va(buffer->bo);
        uint32_t range = buffer_info->range;
 
        if (buffer_info->range == VK_WHOLE_SIZE)
@@ -574,7 +673,7 @@ static void write_buffer_descriptor(struct radv_device *device,
                S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
 
        if (cmd_buffer)
-               device->ws->cs_add_buffer(cmd_buffer->cs, buffer->bo, 7);
+               radv_cs_add_buffer(device->ws, cmd_buffer->cs, buffer->bo, 7);
        else
                *buffer_list = buffer->bo;
 }
@@ -585,7 +684,7 @@ static void write_dynamic_buffer_descriptor(struct radv_device *device,
                                             const VkDescriptorBufferInfo *buffer_info)
 {
        RADV_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
-       uint64_t va = device->ws->buffer_get_va(buffer->bo);
+       uint64_t va = radv_buffer_get_va(buffer->bo);
        unsigned size = buffer_info->range;
 
        if (buffer_info->range == VK_WHOLE_SIZE)
@@ -603,14 +702,22 @@ write_image_descriptor(struct radv_device *device,
                       struct radv_cmd_buffer *cmd_buffer,
                       unsigned *dst,
                       struct radeon_winsys_bo **buffer_list,
+                      VkDescriptorType descriptor_type,
                       const VkDescriptorImageInfo *image_info)
 {
        RADV_FROM_HANDLE(radv_image_view, iview, image_info->imageView);
-       memcpy(dst, iview->descriptor, 8 * 4);
-       memcpy(dst + 8, iview->fmask_descriptor, 8 * 4);
+       uint32_t *descriptor;
+
+       if (descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
+               descriptor = iview->storage_descriptor;
+       } else {
+               descriptor = iview->descriptor;
+       }
+
+       memcpy(dst, descriptor, 16 * 4);
 
        if (cmd_buffer)
-               device->ws->cs_add_buffer(cmd_buffer->cs, iview->bo, 7);
+               radv_cs_add_buffer(device->ws, cmd_buffer->cs, iview->bo, 7);
        else
                *buffer_list = iview->bo;
 }
@@ -620,12 +727,13 @@ write_combined_image_sampler_descriptor(struct radv_device *device,
                                        struct radv_cmd_buffer *cmd_buffer,
                                        unsigned *dst,
                                        struct radeon_winsys_bo **buffer_list,
+                                       VkDescriptorType descriptor_type,
                                        const VkDescriptorImageInfo *image_info,
                                        bool has_sampler)
 {
        RADV_FROM_HANDLE(radv_sampler, sampler, image_info->sampler);
 
-       write_image_descriptor(device, cmd_buffer, dst, buffer_list, image_info);
+       write_image_descriptor(device, cmd_buffer, dst, buffer_list, descriptor_type, image_info);
        /* copy over sampler state */
        if (has_sampler)
                memcpy(dst + 16, sampler->state, 16);
@@ -696,10 +804,12 @@ void radv_update_descriptor_sets(
                        case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
                        case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
                                write_image_descriptor(device, cmd_buffer, ptr, buffer_list,
+                                                      writeset->descriptorType,
                                                       writeset->pImageInfo + j);
                                break;
                        case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
                                write_combined_image_sampler_descriptor(device, cmd_buffer, ptr, buffer_list,
+                                                                       writeset->descriptorType,
                                                                        writeset->pImageInfo + j,
                                                                        !binding_layout->immutable_samplers_offset);
                                if (copy_immutable_samplers) {
@@ -725,8 +835,59 @@ void radv_update_descriptor_sets(
                }
 
        }
-       if (descriptorCopyCount)
-               radv_finishme("copy descriptors");
+
+       for (i = 0; i < descriptorCopyCount; i++) {
+               const VkCopyDescriptorSet *copyset = &pDescriptorCopies[i];
+               RADV_FROM_HANDLE(radv_descriptor_set, src_set,
+                                copyset->srcSet);
+               RADV_FROM_HANDLE(radv_descriptor_set, dst_set,
+                                copyset->dstSet);
+               const struct radv_descriptor_set_binding_layout *src_binding_layout =
+                       src_set->layout->binding + copyset->srcBinding;
+               const struct radv_descriptor_set_binding_layout *dst_binding_layout =
+                       dst_set->layout->binding + copyset->dstBinding;
+               uint32_t *src_ptr = src_set->mapped_ptr;
+               uint32_t *dst_ptr = dst_set->mapped_ptr;
+               struct radeon_winsys_bo **src_buffer_list = src_set->descriptors;
+               struct radeon_winsys_bo **dst_buffer_list = dst_set->descriptors;
+
+               src_ptr += src_binding_layout->offset / 4;
+               dst_ptr += dst_binding_layout->offset / 4;
+
+               src_ptr += src_binding_layout->size * copyset->srcArrayElement / 4;
+               dst_ptr += dst_binding_layout->size * copyset->dstArrayElement / 4;
+
+               src_buffer_list += src_binding_layout->buffer_offset;
+               src_buffer_list += copyset->srcArrayElement;
+
+               dst_buffer_list += dst_binding_layout->buffer_offset;
+               dst_buffer_list += copyset->dstArrayElement;
+
+               for (j = 0; j < copyset->descriptorCount; ++j) {
+                       switch (src_binding_layout->type) {
+                       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+                       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
+                               unsigned src_idx = copyset->srcArrayElement + j;
+                               unsigned dst_idx = copyset->dstArrayElement + j;
+                               struct radv_descriptor_range *src_range, *dst_range;
+                               src_idx += src_binding_layout->dynamic_offset_offset;
+                               dst_idx += dst_binding_layout->dynamic_offset_offset;
+
+                               src_range = src_set->dynamic_descriptors + src_idx;
+                               dst_range = dst_set->dynamic_descriptors + dst_idx;
+                               *dst_range = *src_range;
+                               break;
+                       }
+                       default:
+                               memcpy(dst_ptr, src_ptr, src_binding_layout->size);
+                       }
+                       src_ptr += src_binding_layout->size / 4;
+                       dst_ptr += dst_binding_layout->size / 4;
+                       dst_buffer_list[j] = src_buffer_list[j];
+                       ++src_buffer_list;
+                       ++dst_buffer_list;
+               }
+       }
 }
 
 void radv_UpdateDescriptorSets(
@@ -742,10 +903,10 @@ void radv_UpdateDescriptorSets(
                                    descriptorCopyCount, pDescriptorCopies);
 }
 
-VkResult radv_CreateDescriptorUpdateTemplateKHR(VkDevice _device,
-                                                const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
-                                                const VkAllocationCallbacks *pAllocator,
-                                                VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate)
+VkResult radv_CreateDescriptorUpdateTemplate(VkDevice _device,
+                                             const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
+                                             const VkAllocationCallbacks *pAllocator,
+                                             VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate)
 {
        RADV_FROM_HANDLE(radv_device, device, _device);
        RADV_FROM_HANDLE(radv_descriptor_set_layout, set_layout, pCreateInfo->descriptorSetLayout);
@@ -760,6 +921,7 @@ VkResult radv_CreateDescriptorUpdateTemplateKHR(VkDevice _device,
                return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
 
        templ->entry_count = entry_count;
+       templ->bind_point = pCreateInfo->pipelineBindPoint;
 
        for (i = 0; i < entry_count; i++) {
                const VkDescriptorUpdateTemplateEntryKHR *entry = &pCreateInfo->pDescriptorUpdateEntries[i];
@@ -814,9 +976,9 @@ VkResult radv_CreateDescriptorUpdateTemplateKHR(VkDevice _device,
        return VK_SUCCESS;
 }
 
-void radv_DestroyDescriptorUpdateTemplateKHR(VkDevice _device,
-                                             VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
-                                             const VkAllocationCallbacks *pAllocator)
+void radv_DestroyDescriptorUpdateTemplate(VkDevice _device,
+                                          VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+                                          const VkAllocationCallbacks *pAllocator)
 {
        RADV_FROM_HANDLE(radv_device, device, _device);
        RADV_FROM_HANDLE(radv_descriptor_update_template, templ, descriptorUpdateTemplate);
@@ -866,10 +1028,12 @@ void radv_update_descriptor_set_with_template(struct radv_device *device,
                        case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
                        case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
                                write_image_descriptor(device, cmd_buffer, pDst, buffer_list,
+                                                      templ->entry[i].descriptor_type,
                                                       (struct VkDescriptorImageInfo *) pSrc);
                                break;
                        case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
                                write_combined_image_sampler_descriptor(device, cmd_buffer, pDst, buffer_list,
+                                                                       templ->entry[i].descriptor_type,
                                                                        (struct VkDescriptorImageInfo *) pSrc,
                                                                        templ->entry[i].has_sampler);
                                if (templ->entry[i].immutable_samplers)
@@ -893,13 +1057,31 @@ void radv_update_descriptor_set_with_template(struct radv_device *device,
        }
 }
 
-void radv_UpdateDescriptorSetWithTemplateKHR(VkDevice _device,
-                                             VkDescriptorSet descriptorSet,
-                                             VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
-                                             const void *pData)
+void radv_UpdateDescriptorSetWithTemplate(VkDevice _device,
+                                          VkDescriptorSet descriptorSet,
+                                          VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+                                          const void *pData)
 {
        RADV_FROM_HANDLE(radv_device, device, _device);
        RADV_FROM_HANDLE(radv_descriptor_set, set, descriptorSet);
 
        radv_update_descriptor_set_with_template(device, NULL, set, descriptorUpdateTemplate, pData);
 }
+
+
+VkResult radv_CreateSamplerYcbcrConversion(VkDevice device,
+                                          const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
+                                          const VkAllocationCallbacks* pAllocator,
+                                          VkSamplerYcbcrConversion* pYcbcrConversion)
+{
+       *pYcbcrConversion = VK_NULL_HANDLE;
+       return VK_SUCCESS;
+}
+
+
+void radv_DestroySamplerYcbcrConversion(VkDevice device,
+                                       VkSamplerYcbcrConversion ycbcrConversion,
+                                       const VkAllocationCallbacks* pAllocator)
+{
+       /* Do nothing. */
+}