X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fintel%2Fvulkan%2Fanv_descriptor_set.c;h=8491436f14a5020b51bd68aadc37be7641accd5d;hb=c9bebae2877e55cdcd94f9f9f3f6805238caeb28;hp=6644d62b81e1a390a4e00a148e17a55d1a5e654e;hpb=43f40dc7cb234e007fe612b67cc765288ddf0533;p=mesa.git diff --git a/src/intel/vulkan/anv_descriptor_set.c b/src/intel/vulkan/anv_descriptor_set.c index 6644d62b81e..8491436f14a 100644 --- a/src/intel/vulkan/anv_descriptor_set.c +++ b/src/intel/vulkan/anv_descriptor_set.c @@ -45,15 +45,24 @@ anv_descriptor_data_for_type(const struct anv_physical_device *device, switch (type) { case VK_DESCRIPTOR_TYPE_SAMPLER: data = ANV_DESCRIPTOR_SAMPLER_STATE; + if (device->has_bindless_samplers) + data |= ANV_DESCRIPTOR_SAMPLED_IMAGE; break; case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: data = ANV_DESCRIPTOR_SURFACE_STATE | ANV_DESCRIPTOR_SAMPLER_STATE; + if (device->has_bindless_images || device->has_bindless_samplers) + data |= ANV_DESCRIPTOR_SAMPLED_IMAGE; break; case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: + data = ANV_DESCRIPTOR_SURFACE_STATE; + if (device->has_bindless_images) + data |= ANV_DESCRIPTOR_SAMPLED_IMAGE; + break; + case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: data = ANV_DESCRIPTOR_SURFACE_STATE; break; @@ -63,6 +72,8 @@ anv_descriptor_data_for_type(const struct anv_physical_device *device, data = ANV_DESCRIPTOR_SURFACE_STATE; if (device->info.gen < 9) data |= ANV_DESCRIPTOR_IMAGE_PARAM; + if (device->has_bindless_images) + data |= ANV_DESCRIPTOR_STORAGE_IMAGE; break; case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: @@ -84,13 +95,58 @@ anv_descriptor_data_for_type(const struct anv_physical_device *device, unreachable("Unsupported descriptor type"); } + /* On gen8 and above when we have softpin enabled, we also need to push + * SSBO address ranges so that we can use A64 messages in the shader. + */ + if (device->has_a64_buffer_access && + (type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER || + type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) + data |= ANV_DESCRIPTOR_ADDRESS_RANGE; + + /* On Ivy Bridge and Bay Trail, we need swizzles textures in the shader + * Do not handle VK_DESCRIPTOR_TYPE_STORAGE_IMAGE and + * VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT because they already must + * have identity swizzle. + */ + if (device->info.gen == 7 && !device->info.is_haswell && + (type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE || + type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)) + data |= ANV_DESCRIPTOR_TEXTURE_SWIZZLE; + return data; } static unsigned anv_descriptor_data_size(enum anv_descriptor_data data) { - return 0; + unsigned size = 0; + + if (data & ANV_DESCRIPTOR_SAMPLED_IMAGE) + size += sizeof(struct anv_sampled_image_descriptor); + + if (data & ANV_DESCRIPTOR_STORAGE_IMAGE) + size += sizeof(struct anv_storage_image_descriptor); + + if (data & ANV_DESCRIPTOR_IMAGE_PARAM) + size += BRW_IMAGE_PARAM_SIZE * 4; + + if (data & ANV_DESCRIPTOR_ADDRESS_RANGE) + size += sizeof(struct anv_address_range_descriptor); + + if (data & ANV_DESCRIPTOR_TEXTURE_SWIZZLE) + size += sizeof(struct anv_texture_swizzle_descriptor); + + return size; +} + +static bool +anv_needs_descriptor_buffer(VkDescriptorType desc_type, + enum anv_descriptor_data desc_data) +{ + if (desc_type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT || + anv_descriptor_data_size(desc_data) > 0) + return true; + return false; } /** Returns the size in bytes of each descriptor with the given layout */ @@ -102,7 +158,17 @@ anv_descriptor_size(const struct anv_descriptor_set_binding_layout *layout) return layout->array_size; } - return anv_descriptor_data_size(layout->data); + unsigned size = anv_descriptor_data_size(layout->data); + + /* For multi-planar bindings, we make every descriptor consume the maximum + * number of planes so we don't have to bother with walking arrays and + * adding things up every time. Fortunately, YCbCr samplers aren't all + * that common and likely won't be in the middle of big arrays. + */ + if (layout->max_plane_count > 1) + size *= layout->max_plane_count; + + return size; } /** Returns the size in bytes of each descriptor of the given type @@ -116,26 +182,97 @@ unsigned anv_descriptor_type_size(const struct anv_physical_device *pdevice, VkDescriptorType type) { - assert(type != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT); + assert(type != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT && + type != VK_DESCRIPTOR_TYPE_SAMPLER && + type != VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE && + type != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER); + return anv_descriptor_data_size(anv_descriptor_data_for_type(pdevice, type)); } +static bool +anv_descriptor_data_supports_bindless(const struct anv_physical_device *pdevice, + enum anv_descriptor_data data, + bool sampler) +{ + if (data & ANV_DESCRIPTOR_ADDRESS_RANGE) { + assert(pdevice->has_a64_buffer_access); + return true; + } + + if (data & ANV_DESCRIPTOR_SAMPLED_IMAGE) { + assert(pdevice->has_bindless_images || pdevice->has_bindless_samplers); + return sampler ? pdevice->has_bindless_samplers : + pdevice->has_bindless_images; + } + + if (data & ANV_DESCRIPTOR_STORAGE_IMAGE) { + assert(pdevice->has_bindless_images); + return true; + } + + return false; +} + +bool +anv_descriptor_supports_bindless(const struct anv_physical_device *pdevice, + const struct anv_descriptor_set_binding_layout *binding, + bool sampler) +{ + return anv_descriptor_data_supports_bindless(pdevice, binding->data, + sampler); +} + +bool +anv_descriptor_requires_bindless(const struct anv_physical_device *pdevice, + const struct anv_descriptor_set_binding_layout *binding, + bool sampler) +{ + if (pdevice->always_use_bindless) + return anv_descriptor_supports_bindless(pdevice, binding, sampler); + + static const VkDescriptorBindingFlagBitsEXT flags_requiring_bindless = + VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT | + VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT | + VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT; + + return (binding->flags & flags_requiring_bindless) != 0; +} + void anv_GetDescriptorSetLayoutSupport( - VkDevice device, + VkDevice _device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport) { + ANV_FROM_HANDLE(anv_device, device, _device); + const struct anv_physical_device *pdevice = + &device->instance->physicalDevice; + uint32_t surface_count[MESA_SHADER_STAGES] = { 0, }; + bool needs_descriptor_buffer = false; for (uint32_t b = 0; b < pCreateInfo->bindingCount; b++) { const VkDescriptorSetLayoutBinding *binding = &pCreateInfo->pBindings[b]; + enum anv_descriptor_data desc_data = + anv_descriptor_data_for_type(pdevice, binding->descriptorType); + + if (anv_needs_descriptor_buffer(binding->descriptorType, desc_data)) + needs_descriptor_buffer = true; + switch (binding->descriptorType) { case VK_DESCRIPTOR_TYPE_SAMPLER: /* There is no real limit on samplers */ break; + case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT: + /* Inline uniforms don't use a binding */ + break; + case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: + if (anv_descriptor_data_supports_bindless(pdevice, desc_data, false)) + break; + if (binding->pImmutableSamplers) { for (uint32_t i = 0; i < binding->descriptorCount; i++) { ANV_FROM_HANDLE(anv_sampler, sampler, @@ -150,18 +287,26 @@ void anv_GetDescriptorSetLayoutSupport( break; default: + if (anv_descriptor_data_supports_bindless(pdevice, desc_data, false)) + break; + anv_foreach_stage(s, binding->stageFlags) surface_count[s] += binding->descriptorCount; break; } } + for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) { + if (needs_descriptor_buffer) + surface_count[s] += 1; + } + bool supported = true; for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) { - /* Our maximum binding table size is 250 and we need to reserve 8 for - * render targets. 240 is a nice round number. + /* Our maximum binding table size is 240 and we need to reserve 8 for + * render targets. */ - if (surface_count[s] >= 240) + if (surface_count[s] >= MAX_BINDING_TABLE_SIZE - MAX_RTS) supported = false; } @@ -226,7 +371,9 @@ VkResult anv_CreateDescriptorSetLayout( /* Initialize all binding_layout entries to -1 */ memset(&set_layout->binding[b], -1, sizeof(set_layout->binding[b])); + set_layout->binding[b].flags = 0; set_layout->binding[b].data = 0; + set_layout->binding[b].max_plane_count = 0; set_layout->binding[b].array_size = 0; set_layout->binding[b].immutable_samplers = NULL; } @@ -241,32 +388,44 @@ VkResult anv_CreateDescriptorSetLayout( for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) { const VkDescriptorSetLayoutBinding *binding = &pCreateInfo->pBindings[j]; uint32_t b = binding->binding; - /* We temporarily store the pointer to the binding in the + /* We temporarily store pCreateInfo->pBindings[] index (plus one) in the * immutable_samplers pointer. This provides us with a quick-and-dirty * way to sort the bindings by binding number. */ - set_layout->binding[b].immutable_samplers = (void *)binding; + set_layout->binding[b].immutable_samplers = (void *)(uintptr_t)(j + 1); } - for (uint32_t b = 0; b <= max_binding; b++) { - const VkDescriptorSetLayoutBinding *binding = - (void *)set_layout->binding[b].immutable_samplers; - - if (binding == NULL) - continue; + const VkDescriptorSetLayoutBindingFlagsCreateInfoEXT *binding_flags_info = + vk_find_struct_const(pCreateInfo->pNext, + DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT); - /* We temporarily stashed the pointer to the binding in the - * immutable_samplers pointer. Now that we've pulled it back out - * again, we reset immutable_samplers to NULL. + for (uint32_t b = 0; b <= max_binding; b++) { + /* We stashed the pCreateInfo->pBindings[] index (plus one) in the + * immutable_samplers pointer. Check for NULL (empty binding) and then + * reset it and compute the index. */ + if (set_layout->binding[b].immutable_samplers == NULL) + continue; + const uint32_t info_idx = + (uintptr_t)(void *)set_layout->binding[b].immutable_samplers - 1; set_layout->binding[b].immutable_samplers = NULL; + const VkDescriptorSetLayoutBinding *binding = + &pCreateInfo->pBindings[info_idx]; + if (binding->descriptorCount == 0) continue; #ifndef NDEBUG set_layout->binding[b].type = binding->descriptorType; #endif + + if (binding_flags_info && binding_flags_info->bindingCount > 0) { + assert(binding_flags_info->bindingCount == pCreateInfo->bindingCount); + set_layout->binding[b].flags = + binding_flags_info->pBindingFlags[info_idx]; + } + set_layout->binding[b].data = anv_descriptor_data_for_type(&device->instance->physicalDevice, binding->descriptorType); @@ -282,15 +441,26 @@ VkResult anv_CreateDescriptorSetLayout( switch (binding->descriptorType) { case VK_DESCRIPTOR_TYPE_SAMPLER: case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: + set_layout->binding[b].max_plane_count = 1; if (binding->pImmutableSamplers) { set_layout->binding[b].immutable_samplers = samplers; samplers += binding->descriptorCount; - for (uint32_t i = 0; i < binding->descriptorCount; i++) - set_layout->binding[b].immutable_samplers[i] = - anv_sampler_from_handle(binding->pImmutableSamplers[i]); + for (uint32_t i = 0; i < binding->descriptorCount; i++) { + ANV_FROM_HANDLE(anv_sampler, sampler, + binding->pImmutableSamplers[i]); + + set_layout->binding[b].immutable_samplers[i] = sampler; + if (set_layout->binding[b].max_plane_count < sampler->n_planes) + set_layout->binding[b].max_plane_count = sampler->n_planes; + } } break; + + case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: + set_layout->binding[b].max_plane_count = 1; + break; + default: break; } @@ -364,7 +534,9 @@ static void sha1_update_descriptor_set_binding_layout(struct mesa_sha1 *ctx, const struct anv_descriptor_set_binding_layout *layout) { + SHA1_UPDATE_VALUE(ctx, layout->flags); SHA1_UPDATE_VALUE(ctx, layout->data); + SHA1_UPDATE_VALUE(ctx, layout->max_plane_count); SHA1_UPDATE_VALUE(ctx, layout->array_size); SHA1_UPDATE_VALUE(ctx, layout->descriptor_index); SHA1_UPDATE_VALUE(ctx, layout->dynamic_offset_index); @@ -511,6 +683,13 @@ VkResult anv_CreateDescriptorPool( unsigned desc_data_size = anv_descriptor_data_size(desc_data) * pCreateInfo->pPoolSizes[i].descriptorCount; + /* Combined image sampler descriptors can take up to 3 slots if they + * hold a YCbCr image. + */ + if (pCreateInfo->pPoolSizes[i].type == + VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) + desc_data_size *= 3; + if (pCreateInfo->pPoolSizes[i].type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) { /* Inline uniform blocks are specified to use the descriptor array @@ -533,10 +712,10 @@ VkResult anv_CreateDescriptorPool( * of them to 32B. */ descriptor_bo_size += 32 * pCreateInfo->maxSets; - descriptor_bo_size = ALIGN(descriptor_bo_size, 4096); /* We align inline uniform blocks to 32B */ if (inline_info) descriptor_bo_size += 32 * inline_info->maxInlineUniformBlockBindings; + descriptor_bo_size = ALIGN(descriptor_bo_size, 4096); const size_t pool_size = pCreateInfo->maxSets * sizeof(struct anv_descriptor_set) + @@ -584,6 +763,8 @@ VkResult anv_CreateDescriptorPool( &device->surface_state_pool, 4096); pool->surface_state_free_list = NULL; + list_inithead(&pool->desc_sets); + *pDescriptorPool = anv_descriptor_pool_to_handle(pool); return VK_SUCCESS; @@ -600,12 +781,19 @@ void anv_DestroyDescriptorPool( if (!pool) return; + list_for_each_entry_safe(struct anv_descriptor_set, set, + &pool->desc_sets, pool_link) { + anv_descriptor_set_layout_unref(device, set->layout); + } + if (pool->bo.size) { anv_gem_munmap(pool->bo.map, pool->bo.size); anv_vma_free(device, &pool->bo); anv_gem_close(device, pool->bo.gem_handle); + util_vma_heap_finish(&pool->bo_heap); } anv_state_stream_finish(&pool->surface_state_stream); + vk_free2(&device->alloc, pAllocator, pool); } @@ -617,6 +805,12 @@ VkResult anv_ResetDescriptorPool( ANV_FROM_HANDLE(anv_device, device, _device); ANV_FROM_HANDLE(anv_descriptor_pool, pool, descriptorPool); + list_for_each_entry_safe(struct anv_descriptor_set, set, + &pool->desc_sets, pool_link) { + anv_descriptor_set_layout_unref(device, set->layout); + } + list_inithead(&pool->desc_sets); + pool->next = 0; pool->free_list = EMPTY; @@ -742,9 +936,9 @@ anv_descriptor_set_create(struct anv_device *device, /* Align the size to 32 so that alignment gaps don't cause extra holes * in the heap which can lead to bad performance. */ + uint32_t set_buffer_size = ALIGN(layout->descriptor_buffer_size, 32); uint64_t pool_vma_offset = - util_vma_heap_alloc(&pool->bo_heap, - ALIGN(layout->descriptor_buffer_size, 32), 32); + util_vma_heap_alloc(&pool->bo_heap, set_buffer_size, 32); if (pool_vma_offset == 0) { anv_descriptor_pool_free_set(pool, set); return vk_error(VK_ERROR_FRAGMENTED_POOL); @@ -752,7 +946,7 @@ anv_descriptor_set_create(struct anv_device *device, assert(pool_vma_offset >= POOL_HEAP_OFFSET && pool_vma_offset - POOL_HEAP_OFFSET <= INT32_MAX); set->desc_mem.offset = pool_vma_offset - POOL_HEAP_OFFSET; - set->desc_mem.alloc_size = layout->descriptor_buffer_size; + set->desc_mem.alloc_size = set_buffer_size; set->desc_mem.map = pool->bo.map + set->desc_mem.offset; set->desc_surface_state = anv_descriptor_pool_alloc_state(pool); @@ -791,11 +985,15 @@ anv_descriptor_set_create(struct anv_device *device, * UpdateDescriptorSets if needed. However, if the descriptor * set has an immutable sampler, UpdateDescriptorSets may never * touch it, so we need to make sure it's 100% valid now. + * + * We don't need to actually provide a sampler because the helper + * will always write in the immutable sampler regardless of what + * is in the sampler parameter. */ - desc[i] = (struct anv_descriptor) { - .type = VK_DESCRIPTOR_TYPE_SAMPLER, - .sampler = layout->binding[b].immutable_samplers[i], - }; + struct VkDescriptorImageInfo info = { }; + anv_descriptor_set_write_image_view(device, set, &info, + VK_DESCRIPTOR_TYPE_SAMPLER, + b, i); } } desc += layout->binding[b].array_size; @@ -807,6 +1005,8 @@ anv_descriptor_set_create(struct anv_device *device, anv_descriptor_pool_alloc_state(pool); } + list_addtail(&set->pool_link, &pool->desc_sets); + *out_set = set; return VK_SUCCESS; @@ -829,6 +1029,8 @@ anv_descriptor_set_destroy(struct anv_device *device, for (uint32_t b = 0; b < set->buffer_view_count; b++) anv_descriptor_pool_free_state(pool, set->buffer_views[b].surface_state); + list_del(&set->pool_link); + anv_descriptor_pool_free_set(pool, set); } @@ -883,6 +1085,36 @@ VkResult anv_FreeDescriptorSets( return VK_SUCCESS; } +static void +anv_descriptor_set_write_image_param(uint32_t *param_desc_map, + const struct brw_image_param *param) +{ +#define WRITE_PARAM_FIELD(field, FIELD) \ + for (unsigned i = 0; i < ARRAY_SIZE(param->field); i++) \ + param_desc_map[BRW_IMAGE_PARAM_##FIELD##_OFFSET + i] = param->field[i] + + WRITE_PARAM_FIELD(offset, OFFSET); + WRITE_PARAM_FIELD(size, SIZE); + WRITE_PARAM_FIELD(stride, STRIDE); + WRITE_PARAM_FIELD(tiling, TILING); + WRITE_PARAM_FIELD(swizzling, SWIZZLING); + WRITE_PARAM_FIELD(size, SIZE); + +#undef WRITE_PARAM_FIELD +} + +static uint32_t +anv_surface_state_to_handle(struct anv_state state) +{ + /* Bits 31:12 of the bindless surface offset in the extended message + * descriptor is bits 25:6 of the byte-based address. + */ + assert(state.offset >= 0); + uint32_t offset = state.offset; + assert((offset & 0x3f) == 0 && offset < (1 << 26)); + return offset << 6; +} + void anv_descriptor_set_write_image_view(struct anv_device *device, struct anv_descriptor_set *set, @@ -898,7 +1130,11 @@ anv_descriptor_set_write_image_view(struct anv_device *device, struct anv_image_view *image_view = NULL; struct anv_sampler *sampler = NULL; - assert(type == bind_layout->type); + /* We get called with just VK_DESCRIPTOR_TYPE_SAMPLER as part of descriptor + * set initialization to set the bindless samplers. + */ + assert(type == bind_layout->type || + type == VK_DESCRIPTOR_TYPE_SAMPLER); switch (type) { case VK_DESCRIPTOR_TYPE_SAMPLER: @@ -933,6 +1169,77 @@ anv_descriptor_set_write_image_view(struct anv_device *device, .image_view = image_view, .sampler = sampler, }; + + void *desc_map = set->desc_mem.map + bind_layout->descriptor_offset + + element * anv_descriptor_size(bind_layout); + + if (bind_layout->data & ANV_DESCRIPTOR_SAMPLED_IMAGE) { + struct anv_sampled_image_descriptor desc_data[3]; + memset(desc_data, 0, sizeof(desc_data)); + + if (image_view) { + for (unsigned p = 0; p < image_view->n_planes; p++) { + struct anv_surface_state sstate = + (desc->layout == VK_IMAGE_LAYOUT_GENERAL) ? + image_view->planes[p].general_sampler_surface_state : + image_view->planes[p].optimal_sampler_surface_state; + desc_data[p].image = anv_surface_state_to_handle(sstate.state); + } + } + + if (sampler) { + for (unsigned p = 0; p < sampler->n_planes; p++) + desc_data[p].sampler = sampler->bindless_state.offset + p * 32; + } + + /* We may have max_plane_count < 0 if this isn't a sampled image but it + * can be no more than the size of our array of handles. + */ + assert(bind_layout->max_plane_count <= ARRAY_SIZE(desc_data)); + memcpy(desc_map, desc_data, + MAX2(1, bind_layout->max_plane_count) * sizeof(desc_data[0])); + } + + if (bind_layout->data & ANV_DESCRIPTOR_STORAGE_IMAGE) { + assert(!(bind_layout->data & ANV_DESCRIPTOR_IMAGE_PARAM)); + assert(image_view->n_planes == 1); + struct anv_storage_image_descriptor desc_data = { + .read_write = anv_surface_state_to_handle( + image_view->planes[0].storage_surface_state.state), + .write_only = anv_surface_state_to_handle( + image_view->planes[0].writeonly_storage_surface_state.state), + }; + memcpy(desc_map, &desc_data, sizeof(desc_data)); + } + + if (bind_layout->data & ANV_DESCRIPTOR_IMAGE_PARAM) { + /* Storage images can only ever have one plane */ + assert(image_view->n_planes == 1); + const struct brw_image_param *image_param = + &image_view->planes[0].storage_image_param; + + anv_descriptor_set_write_image_param(desc_map, image_param); + } + + if (image_view && (bind_layout->data & ANV_DESCRIPTOR_TEXTURE_SWIZZLE)) { + assert(!(bind_layout->data & ANV_DESCRIPTOR_SAMPLED_IMAGE)); + assert(image_view); + struct anv_texture_swizzle_descriptor desc_data[3]; + memset(desc_data, 0, sizeof(desc_data)); + + for (unsigned p = 0; p < image_view->n_planes; p++) { + desc_data[p] = (struct anv_texture_swizzle_descriptor) { + .swizzle = { + (uint8_t)image_view->planes[p].isl.swizzle.r, + (uint8_t)image_view->planes[p].isl.swizzle.g, + (uint8_t)image_view->planes[p].isl.swizzle.b, + (uint8_t)image_view->planes[p].isl.swizzle.a, + }, + }; + } + memcpy(desc_map, desc_data, + MAX2(1, bind_layout->max_plane_count) * sizeof(desc_data[0])); + } } void @@ -954,6 +1261,32 @@ anv_descriptor_set_write_buffer_view(struct anv_device *device, .type = type, .buffer_view = buffer_view, }; + + void *desc_map = set->desc_mem.map + bind_layout->descriptor_offset + + element * anv_descriptor_size(bind_layout); + + if (bind_layout->data & ANV_DESCRIPTOR_SAMPLED_IMAGE) { + struct anv_sampled_image_descriptor desc_data = { + .image = anv_surface_state_to_handle(buffer_view->surface_state), + }; + memcpy(desc_map, &desc_data, sizeof(desc_data)); + } + + if (bind_layout->data & ANV_DESCRIPTOR_STORAGE_IMAGE) { + assert(!(bind_layout->data & ANV_DESCRIPTOR_IMAGE_PARAM)); + struct anv_storage_image_descriptor desc_data = { + .read_write = anv_surface_state_to_handle( + buffer_view->storage_surface_state), + .write_only = anv_surface_state_to_handle( + buffer_view->writeonly_storage_surface_state), + }; + memcpy(desc_map, &desc_data, sizeof(desc_data)); + } + + if (bind_layout->data & ANV_DESCRIPTOR_IMAGE_PARAM) { + anv_descriptor_set_write_image_param(desc_map, + &buffer_view->storage_image_param); + } } void @@ -974,6 +1307,9 @@ anv_descriptor_set_write_buffer(struct anv_device *device, assert(type == bind_layout->type); + struct anv_address bind_addr = anv_address_add(buffer->address, offset); + uint64_t bind_range = anv_buffer_get_range(buffer, offset, range); + if (type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC || type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) { *desc = (struct anv_descriptor) { @@ -988,8 +1324,8 @@ anv_descriptor_set_write_buffer(struct anv_device *device, &set->buffer_views[bind_layout->buffer_view_index + element]; bview->format = anv_isl_format_for_descriptor_type(type); - bview->range = anv_buffer_get_range(buffer, offset, range); - bview->address = anv_address_add(buffer->address, offset); + bview->range = bind_range; + bview->address = bind_addr; /* If we're writing descriptors through a push command, we need to * allocate the surface state from the command buffer. Otherwise it will @@ -999,14 +1335,24 @@ anv_descriptor_set_write_buffer(struct anv_device *device, bview->surface_state = anv_state_stream_alloc(alloc_stream, 64, 64); anv_fill_buffer_surface_state(device, bview->surface_state, - bview->format, - bview->address, bview->range, 1); + bview->format, bind_addr, bind_range, 1); *desc = (struct anv_descriptor) { .type = type, .buffer_view = bview, }; } + + void *desc_map = set->desc_mem.map + bind_layout->descriptor_offset + + element * anv_descriptor_size(bind_layout); + + if (bind_layout->data & ANV_DESCRIPTOR_ADDRESS_RANGE) { + struct anv_address_range_descriptor desc = { + .address = anv_address_physical(bind_addr), + .range = bind_range, + }; + memcpy(desc_map, &desc, sizeof(desc)); + } } void