switch (cmd_buffer->device->info.gen) {
case 7:
if (cmd_buffer->device->info.is_haswell)
- return gen7_cmd_buffer_emit_state_base_address(cmd_buffer);
+ return gen75_cmd_buffer_emit_state_base_address(cmd_buffer);
else
return gen7_cmd_buffer_emit_state_base_address(cmd_buffer);
case 8:
assert(firstSet + descriptorSetCount < MAX_SETS);
- uint32_t dynamic_slot = 0;
for (uint32_t i = 0; i < descriptorSetCount; i++) {
ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
set_layout = layout->set[firstSet + i].layout;
cmd_buffer->state.push_constants[s];
unsigned d = layout->set[firstSet + i].dynamic_offset_start;
- const uint32_t *offsets = pDynamicOffsets + dynamic_slot;
+ const uint32_t *offsets = pDynamicOffsets;
struct anv_descriptor *desc = set->descriptors;
for (unsigned b = 0; b < set_layout->binding_count; b++) {
unsigned array_size = set_layout->binding[b].array_size;
for (unsigned j = 0; j < array_size; j++) {
- uint32_t range = 0;
- if (desc->buffer_view)
- range = desc->buffer_view->range;
push->dynamic[d].offset = *(offsets++);
- push->dynamic[d].range = range;
+ push->dynamic[d].range = (desc->buffer_view) ?
+ desc->buffer_view->range : 0;
desc++;
d++;
}
{
struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
struct anv_subpass *subpass = cmd_buffer->state.subpass;
- struct anv_pipeline_bind_map *map;
+ struct anv_pipeline *pipeline;
uint32_t bias, state_offset;
switch (stage) {
case MESA_SHADER_COMPUTE:
- map = &cmd_buffer->state.compute_pipeline->bindings[stage];
+ pipeline = cmd_buffer->state.compute_pipeline;
bias = 1;
break;
default:
- map = &cmd_buffer->state.pipeline->bindings[stage];
+ pipeline = cmd_buffer->state.pipeline;
bias = 0;
break;
}
+ if (!anv_pipeline_has_stage(pipeline, stage)) {
+ *bt_state = (struct anv_state) { 0, };
+ return VK_SUCCESS;
+ }
+
+ struct anv_pipeline_bind_map *map = &pipeline->shaders[stage]->bind_map;
if (bias + map->surface_count == 0) {
*bt_state = (struct anv_state) { 0, };
return VK_SUCCESS;
anv_cmd_buffer_emit_samplers(struct anv_cmd_buffer *cmd_buffer,
gl_shader_stage stage, struct anv_state *state)
{
- struct anv_pipeline_bind_map *map;
+ struct anv_pipeline *pipeline;
if (stage == MESA_SHADER_COMPUTE)
- map = &cmd_buffer->state.compute_pipeline->bindings[stage];
+ pipeline = cmd_buffer->state.compute_pipeline;
else
- map = &cmd_buffer->state.pipeline->bindings[stage];
+ pipeline = cmd_buffer->state.pipeline;
+ if (!anv_pipeline_has_stage(pipeline, stage)) {
+ *state = (struct anv_state) { 0, };
+ return VK_SUCCESS;
+ }
+
+ struct anv_pipeline_bind_map *map = &pipeline->shaders[stage]->bind_map;
if (map->sampler_count == 0) {
*state = (struct anv_state) { 0, };
return VK_SUCCESS;
return VK_SUCCESS;
}
+uint32_t
+anv_cmd_buffer_flush_descriptor_sets(struct anv_cmd_buffer *cmd_buffer)
+{
+ VkShaderStageFlags dirty = cmd_buffer->state.descriptors_dirty &
+ cmd_buffer->state.pipeline->active_stages;
+
+ VkResult result = VK_SUCCESS;
+ anv_foreach_stage(s, dirty) {
+ result = anv_cmd_buffer_emit_samplers(cmd_buffer, s,
+ &cmd_buffer->state.samplers[s]);
+ if (result != VK_SUCCESS)
+ break;
+ result = anv_cmd_buffer_emit_binding_table(cmd_buffer, s,
+ &cmd_buffer->state.binding_tables[s]);
+ if (result != VK_SUCCESS)
+ break;
+ }
+
+ if (result != VK_SUCCESS) {
+ assert(result == VK_ERROR_OUT_OF_DEVICE_MEMORY);
+
+ result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
+ assert(result == VK_SUCCESS);
+
+ /* Re-emit state base addresses so we get the new surface state base
+ * address before we start emitting binding tables etc.
+ */
+ anv_cmd_buffer_emit_state_base_address(cmd_buffer);
+
+ /* Re-emit all active binding tables */
+ dirty |= cmd_buffer->state.pipeline->active_stages;
+ anv_foreach_stage(s, dirty) {
+ result = anv_cmd_buffer_emit_samplers(cmd_buffer, s,
+ &cmd_buffer->state.samplers[s]);
+ if (result != VK_SUCCESS)
+ return result;
+ result = anv_cmd_buffer_emit_binding_table(cmd_buffer, s,
+ &cmd_buffer->state.binding_tables[s]);
+ if (result != VK_SUCCESS)
+ return result;
+ }
+ }
+
+ cmd_buffer->state.descriptors_dirty &= ~dirty;
+
+ return dirty;
+}
+
struct anv_state
anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
const void *data, uint32_t size, uint32_t alignment)
anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
gl_shader_stage stage)
{
+ /* If we don't have this stage, bail. */
+ if (!anv_pipeline_has_stage(cmd_buffer->state.pipeline, stage))
+ return (struct anv_state) { .offset = 0 };
+
struct anv_push_constants *data =
cmd_buffer->state.push_constants[stage];
const struct brw_stage_prog_data *prog_data =
- cmd_buffer->state.pipeline->prog_data[stage];
+ anv_shader_bin_get_prog_data(cmd_buffer->state.pipeline->shaders[stage]);
/* If we don't actually have any push constants, bail. */
- if (data == NULL || prog_data->nr_params == 0)
+ if (data == NULL || prog_data == NULL || prog_data->nr_params == 0)
return (struct anv_state) { .offset = 0 };
struct anv_state state =