if (cmd_buffer->upload.upload_bo)
cmd_buffer->device->ws->buffer_destroy(cmd_buffer->upload.upload_bo);
cmd_buffer->device->ws->cs_destroy(cmd_buffer->cs);
+ free(cmd_buffer->push_descriptors.set.mapped_ptr);
vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
}
MESA_SHADER_COMPUTE);
}
+static void
+radv_flush_push_descriptors(struct radv_cmd_buffer *cmd_buffer)
+{
+ struct radv_descriptor_set *set = &cmd_buffer->push_descriptors.set;
+ uint32_t *ptr = NULL;
+ unsigned bo_offset;
+
+ if (!radv_cmd_buffer_upload_alloc(cmd_buffer, set->size, 32,
+ &bo_offset,
+ (void**) &ptr))
+ return;
+
+ set->va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
+ set->va += bo_offset;
+
+ memcpy(ptr, set->mapped_ptr, set->size);
+}
+
static void
radv_flush_descriptors(struct radv_cmd_buffer *cmd_buffer,
struct radv_pipeline *pipeline,
if (!cmd_buffer->state.descriptors_dirty)
return;
+ if (cmd_buffer->state.push_descriptors_dirty)
+ radv_flush_push_descriptors(cmd_buffer);
+
for (i = 0; i < MAX_SETS; i++) {
if (!(cmd_buffer->state.descriptors_dirty & (1 << i)))
continue;
radv_emit_descriptor_set_userdata(cmd_buffer, pipeline, stages, set, i);
}
cmd_buffer->state.descriptors_dirty = 0;
+ cmd_buffer->state.push_descriptors_dirty = false;
}
static void
assert(cmd_buffer->cs->cdw <= cdw_max);
}
+static bool radv_init_push_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_descriptor_set *set,
+ struct radv_descriptor_set_layout *layout)
+{
+ set->size = layout->size;
+ set->layout = layout;
+
+ if (cmd_buffer->push_descriptors.capacity < set->size) {
+ size_t new_size = MAX2(set->size, 1024);
+ new_size = MAX2(new_size, 2 * cmd_buffer->push_descriptors.capacity);
+ new_size = MIN2(new_size, 96 * MAX_PUSH_DESCRIPTORS);
+
+ free(set->mapped_ptr);
+ set->mapped_ptr = malloc(new_size);
+
+ if (!set->mapped_ptr) {
+ cmd_buffer->push_descriptors.capacity = 0;
+ cmd_buffer->record_fail = true;
+ return false;
+ }
+
+ cmd_buffer->push_descriptors.capacity = new_size;
+ }
+
+ return true;
+}
+
+void radv_CmdPushDescriptorSetKHR(
+ VkCommandBuffer commandBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
+ VkPipelineLayout _layout,
+ uint32_t set,
+ uint32_t descriptorWriteCount,
+ const VkWriteDescriptorSet* pDescriptorWrites)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
+ struct radv_descriptor_set *push_set = &cmd_buffer->push_descriptors.set;
+
+ assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
+
+ if (!radv_init_push_descriptor_set(cmd_buffer, push_set, layout->set[set].layout))
+ return;
+
+ radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer,
+ radv_descriptor_set_to_handle(push_set),
+ descriptorWriteCount, pDescriptorWrites, 0, NULL);
+
+ cmd_buffer->state.descriptors[set] = push_set;
+ cmd_buffer->state.descriptors_dirty |= (1 << set);
+ cmd_buffer->state.push_descriptors_dirty = true;
+}
+
void radv_CmdPushConstants(VkCommandBuffer commandBuffer,
VkPipelineLayout layout,
VkShaderStageFlags stageFlags,
if (!set_layout)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ set_layout->flags = pCreateInfo->flags;
+
/* We just allocate all the samplers at the end of the struct */
uint32_t *samplers = (uint32_t*)&set_layout->binding[max_binding + 1];
switch (binding->descriptorType) {
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+ assert(!(pCreateInfo->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
set_layout->binding[b].dynamic_offset_count = 1;
set_layout->dynamic_shader_stages |= binding->stageFlags;
set_layout->binding[b].size = 0;
RADV_FROM_HANDLE(radv_descriptor_set_layout, layout,
pAllocateInfo->pSetLayouts[i]);
+ assert(!(layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
+
result = radv_descriptor_set_create(device, pool, NULL, layout, &set);
if (result != VK_SUCCESS)
break;
set->layout->binding + writeset->dstBinding;
uint32_t *ptr = set->mapped_ptr;
struct radeon_winsys_bo **buffer_list = set->descriptors;
+ /* Immutable samplers are not copied into push descriptors when they are
+ * allocated, so if we are writing push descriptors we have to copy the
+ * immutable samplers into them now.
+ */
+ const bool copy_immutable_samplers = cmd_buffer &&
+ binding_layout->immutable_samplers && !binding_layout->immutable_samplers_equal;
ptr += binding_layout->offset / 4;
ptr += binding_layout->size * writeset->dstArrayElement / 4;
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
unsigned idx = writeset->dstArrayElement + j;
idx += binding_layout->dynamic_offset_offset;
+ assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
write_dynamic_buffer_descriptor(device, set->dynamic_descriptors + idx,
buffer_list, writeset->pBufferInfo + j);
break;
write_combined_image_sampler_descriptor(device, cmd_buffer, ptr, buffer_list,
writeset->pImageInfo + j,
!binding_layout->immutable_samplers);
+ if (copy_immutable_samplers) {
+ const unsigned idx = writeset->dstArrayElement + j;
+ memcpy(ptr + 16, binding_layout->immutable_samplers + 4 * idx, 16);
+ }
break;
case VK_DESCRIPTOR_TYPE_SAMPLER:
- if (!binding_layout->immutable_samplers)
+ if (!binding_layout->immutable_samplers) {
write_sampler_descriptor(device, ptr,
writeset->pImageInfo + j);
+ } else if (copy_immutable_samplers) {
+ unsigned idx = writeset->dstArrayElement + j;
+ memcpy(ptr, binding_layout->immutable_samplers + 4 * idx, 16);
+ }
break;
default:
unreachable("unimplemented descriptor type");
};
struct radv_descriptor_set_layout {
+ /* The create flags for this descriptor set layout */
+ VkDescriptorSetLayoutCreateFlags flags;
+
/* Number of bindings in this descriptor set */
uint16_t binding_count;
.extensionName = VK_KHR_MAINTENANCE1_EXTENSION_NAME,
.specVersion = 1,
},
+ {
+ .extensionName = VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME,
+ .specVersion = 1,
+ },
{
.extensionName = VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME,
.specVersion = 1,
VkPhysicalDevice physicalDevice,
VkPhysicalDeviceProperties2KHR *pProperties)
{
- return radv_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
+ radv_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
+
+ vk_foreach_struct(ext, pProperties->pNext) {
+ switch (ext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
+ VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
+ (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
+ properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
+ break;
+ }
+ default:
+ break;
+ }
+ }
}
static void radv_get_physical_device_queue_family_properties(
'VK_KHR_get_physical_device_properties2',
'VK_KHR_incremental_present',
'VK_KHR_maintenance1',
+ 'VK_KHR_push_descriptor',
'VK_KHR_sampler_mirror_clamp_to_edge',
'VK_KHR_shader_draw_parameters',
'VK_KHR_surface',
#define MAX_VIEWPORTS 16
#define MAX_SCISSORS 16
#define MAX_PUSH_CONSTANTS_SIZE 128
+#define MAX_PUSH_DESCRIPTORS 32
#define MAX_DYNAMIC_BUFFERS 16
#define MAX_SAMPLES_LOG2 4
#define NUM_META_FS_KEYS 11
struct radeon_winsys_bo *descriptors[0];
};
+struct radv_push_descriptor_set
+{
+ struct radv_descriptor_set set;
+ uint32_t capacity;
+};
+
struct radv_descriptor_pool {
struct radeon_winsys_bo *bo;
uint8_t *mapped_ptr;
uint32_t vb_dirty;
radv_cmd_dirty_mask_t dirty;
bool vertex_descriptors_dirty;
+ bool push_descriptors_dirty;
struct radv_pipeline * pipeline;
struct radv_pipeline * emitted_pipeline;
uint8_t push_constants[MAX_PUSH_CONSTANTS_SIZE];
uint32_t dynamic_buffers[4 * MAX_DYNAMIC_BUFFERS];
VkShaderStageFlags push_constant_stages;
+ struct radv_push_descriptor_set push_descriptors;
struct radv_cmd_buffer_upload upload;