struct radv_pipeline *pipeline = stages & VK_SHADER_STAGE_COMPUTE_BIT
? cmd_buffer->state.compute_pipeline
: cmd_buffer->state.pipeline;
+ VkPipelineBindPoint bind_point = stages & VK_SHADER_STAGE_COMPUTE_BIT ?
+ VK_PIPELINE_BIND_POINT_COMPUTE :
+ VK_PIPELINE_BIND_POINT_GRAPHICS;
+ struct radv_descriptor_state *descriptors_state =
+ radv_get_descriptors_state(cmd_buffer, bind_point);
struct radv_pipeline_layout *layout = pipeline->layout;
struct radv_shader_variant *shader, *prev_shader;
unsigned offset;
return;
memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size);
- memcpy((char*)ptr + layout->push_constant_size, cmd_buffer->dynamic_buffers,
+ memcpy((char*)ptr + layout->push_constant_size,
+ descriptors_state->dynamic_buffers,
16 * layout->dynamic_offset_count);
va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
unsigned dyn_idx = 0;
const bool no_dynamic_bounds = cmd_buffer->device->instance->debug_flags & RADV_DEBUG_NO_DYNAMIC_BOUNDS;
+ struct radv_descriptor_state *descriptors_state =
+ radv_get_descriptors_state(cmd_buffer, pipelineBindPoint);
for (unsigned i = 0; i < descriptorSetCount; ++i) {
unsigned idx = i + firstSet;
for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) {
unsigned idx = j + layout->set[i + firstSet].dynamic_offset_start;
- uint32_t *dst = cmd_buffer->dynamic_buffers + idx * 4;
+ uint32_t *dst = descriptors_state->dynamic_buffers + idx * 4;
assert(dyn_idx < dynamicOffsetCount);
struct radv_descriptor_range *range = set->dynamic_descriptors + j;
uint32_t valid;
struct radv_push_descriptor_set push_set;
bool push_dirty;
+ uint32_t dynamic_buffers[4 * MAX_DYNAMIC_BUFFERS];
};
struct radv_cmd_state {
uint32_t queue_family_index;
uint8_t push_constants[MAX_PUSH_CONSTANTS_SIZE];
- uint32_t dynamic_buffers[4 * MAX_DYNAMIC_BUFFERS];
VkShaderStageFlags push_constant_stages;
struct radv_descriptor_set meta_push_descriptors;