#include "anv_private.h"
+#include "vk_format_info.h"
+
/** \file anv_cmd_buffer.c
*
* This file contains all of the stuff for emitting commands into a command
/* 0 isn't a valid config. This ensures that we always configure L3$. */
cmd_buffer->state.current_l3_config = 0;
- state->dirty = ~0;
+ state->dirty = 0;
state->vb_dirty = 0;
+ state->pending_pipe_bits = 0;
state->descriptors_dirty = 0;
state->push_constants_dirty = 0;
state->pipeline = NULL;
+ state->push_constant_stages = 0;
state->restart_index = UINT32_MAX;
state->dynamic = default_dynamic_state;
state->need_query_wa = true;
for (uint32_t i = 0; i < pass->attachment_count; ++i) {
struct anv_render_pass_attachment *att = &pass->attachments[i];
+ VkImageAspectFlags att_aspects = vk_format_aspects(att->format);
VkImageAspectFlags clear_aspects = 0;
- if (anv_format_is_color(att->format)) {
+ if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
/* color attachment */
if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
}
} else {
/* depthstencil attachment */
- if (att->format->has_depth &&
+ if ((att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
}
- if (att->format->has_stencil &&
+ if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
}
}
}
-VkResult anv_ResetCommandBuffer(
- VkCommandBuffer commandBuffer,
- VkCommandBufferResetFlags flags)
+static VkResult
+anv_cmd_buffer_reset(struct anv_cmd_buffer *cmd_buffer)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
-
cmd_buffer->usage_flags = 0;
cmd_buffer->state.current_pipeline = UINT32_MAX;
anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer);
anv_cmd_state_reset(cmd_buffer);
+ anv_state_stream_finish(&cmd_buffer->surface_state_stream);
+ anv_state_stream_init(&cmd_buffer->surface_state_stream,
+ &cmd_buffer->device->surface_state_block_pool);
+
+ anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
+ anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
+ &cmd_buffer->device->dynamic_state_block_pool);
return VK_SUCCESS;
}
+VkResult anv_ResetCommandBuffer(
+ VkCommandBuffer commandBuffer,
+ VkCommandBufferResetFlags flags)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ return anv_cmd_buffer_reset(cmd_buffer);
+}
+
void
anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer)
{
switch (cmd_buffer->device->info.gen) {
case 7:
if (cmd_buffer->device->info.is_haswell)
- return gen7_cmd_buffer_emit_state_base_address(cmd_buffer);
+ return gen75_cmd_buffer_emit_state_base_address(cmd_buffer);
else
return gen7_cmd_buffer_emit_state_base_address(cmd_buffer);
case 8:
* VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT not set. It then puts
* the command buffer in the recording state.
*/
- anv_ResetCommandBuffer(commandBuffer, /*flags*/ 0);
+ anv_cmd_buffer_reset(cmd_buffer);
cmd_buffer->usage_flags = pBeginInfo->flags;
assert(firstSet + descriptorSetCount < MAX_SETS);
- uint32_t dynamic_slot = 0;
for (uint32_t i = 0; i < descriptorSetCount; i++) {
ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
set_layout = layout->set[firstSet + i].layout;
cmd_buffer->state.push_constants[s];
unsigned d = layout->set[firstSet + i].dynamic_offset_start;
- const uint32_t *offsets = pDynamicOffsets + dynamic_slot;
+ const uint32_t *offsets = pDynamicOffsets;
struct anv_descriptor *desc = set->descriptors;
for (unsigned b = 0; b < set_layout->binding_count; b++) {
unsigned array_size = set_layout->binding[b].array_size;
for (unsigned j = 0; j < array_size; j++) {
- uint32_t range = 0;
- if (desc->buffer_view)
- range = desc->buffer_view->range;
push->dynamic[d].offset = *(offsets++);
- push->dynamic[d].range = range;
+ push->dynamic[d].range = (desc->buffer_view) ?
+ desc->buffer_view->range : 0;
desc++;
d++;
}
state.offset + dword * 4, bo, offset);
}
-const struct anv_format *
-anv_format_for_descriptor_type(VkDescriptorType type)
+enum isl_format
+anv_isl_format_for_descriptor_type(VkDescriptorType type)
{
switch (type) {
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
- return anv_format_for_vk_format(VK_FORMAT_R32G32B32A32_SFLOAT);
+ return ISL_FORMAT_R32G32B32A32_FLOAT;
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
- return anv_format_for_vk_format(VK_FORMAT_UNDEFINED);
+ return ISL_FORMAT_RAW;
default:
unreachable("Invalid descriptor type");
}
}
+static struct anv_state
+anv_cmd_buffer_alloc_null_surface_state(struct anv_cmd_buffer *cmd_buffer,
+ struct anv_framebuffer *fb)
+{
+ switch (cmd_buffer->device->info.gen) {
+ case 7:
+ if (cmd_buffer->device->info.is_haswell) {
+ return gen75_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
+ } else {
+ return gen7_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
+ }
+ case 8:
+ return gen8_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
+ case 9:
+ return gen9_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
+ default:
+ unreachable("Invalid hardware generation");
+ }
+}
+
VkResult
anv_cmd_buffer_emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
gl_shader_stage stage,
{
struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
struct anv_subpass *subpass = cmd_buffer->state.subpass;
- struct anv_pipeline_bind_map *map;
- uint32_t color_count, bias, state_offset;
+ struct anv_pipeline *pipeline;
+ uint32_t bias, state_offset;
switch (stage) {
- case MESA_SHADER_FRAGMENT:
- map = &cmd_buffer->state.pipeline->bindings[stage];
- bias = MAX_RTS;
- color_count = subpass->color_count;
- break;
case MESA_SHADER_COMPUTE:
- map = &cmd_buffer->state.compute_pipeline->bindings[stage];
+ pipeline = cmd_buffer->state.compute_pipeline;
bias = 1;
- color_count = 0;
break;
default:
- map = &cmd_buffer->state.pipeline->bindings[stage];
+ pipeline = cmd_buffer->state.pipeline;
bias = 0;
- color_count = 0;
break;
}
- if (color_count + map->surface_count == 0) {
+ if (!anv_pipeline_has_stage(pipeline, stage)) {
+ *bt_state = (struct anv_state) { 0, };
+ return VK_SUCCESS;
+ }
+
+ struct anv_pipeline_bind_map *map = &pipeline->shaders[stage]->bind_map;
+ if (bias + map->surface_count == 0) {
*bt_state = (struct anv_state) { 0, };
return VK_SUCCESS;
}
if (bt_state->map == NULL)
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
- for (uint32_t a = 0; a < color_count; a++) {
- const struct anv_image_view *iview =
- fb->attachments[subpass->color_attachments[a]];
-
- assert(iview->color_rt_surface_state.alloc_size);
- bt_map[a] = iview->color_rt_surface_state.offset + state_offset;
- add_surface_state_reloc(cmd_buffer, iview->color_rt_surface_state,
- iview->bo, iview->offset);
- }
-
if (stage == MESA_SHADER_COMPUTE &&
- cmd_buffer->state.compute_pipeline->cs_prog_data.uses_num_work_groups) {
+ get_cs_prog_data(cmd_buffer->state.compute_pipeline)->uses_num_work_groups) {
struct anv_bo *bo = cmd_buffer->state.num_workgroups_bo;
uint32_t bo_offset = cmd_buffer->state.num_workgroups_offset;
surface_state =
anv_cmd_buffer_alloc_surface_state(cmd_buffer);
- const struct anv_format *format =
- anv_format_for_descriptor_type(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
+ const enum isl_format format =
+ anv_isl_format_for_descriptor_type(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
anv_fill_buffer_surface_state(cmd_buffer->device, surface_state,
- format->isl_format, bo_offset, 12, 1);
+ format, bo_offset, 12, 1);
bt_map[0] = surface_state.offset + state_offset;
add_surface_state_reloc(cmd_buffer, surface_state, bo, bo_offset);
uint32_t image = 0;
for (uint32_t s = 0; s < map->surface_count; s++) {
struct anv_pipeline_binding *binding = &map->surface_to_descriptor[s];
- struct anv_descriptor_set *set =
- cmd_buffer->state.descriptors[binding->set];
- struct anv_descriptor *desc = &set->descriptors[binding->offset];
struct anv_state surface_state;
struct anv_bo *bo;
uint32_t bo_offset;
+ if (binding->set == ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS) {
+ /* Color attachment binding */
+ assert(stage == MESA_SHADER_FRAGMENT);
+ assert(binding->binding == 0);
+ if (binding->index < subpass->color_count) {
+ const struct anv_image_view *iview =
+ fb->attachments[subpass->color_attachments[binding->index]];
+
+ assert(iview->color_rt_surface_state.alloc_size);
+ surface_state = iview->color_rt_surface_state;
+ add_surface_state_reloc(cmd_buffer, iview->color_rt_surface_state,
+ iview->bo, iview->offset);
+ } else {
+ /* Null render target */
+ struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
+ surface_state =
+ anv_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
+ }
+
+ bt_map[bias + s] = surface_state.offset + state_offset;
+ continue;
+ }
+
+ struct anv_descriptor_set *set =
+ cmd_buffer->state.descriptors[binding->set];
+ uint32_t offset = set->layout->binding[binding->binding].descriptor_index;
+ struct anv_descriptor *desc = &set->descriptors[offset + binding->index];
+
switch (desc->type) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
/* Nothing for us to do here */
struct brw_image_param *image_param =
&cmd_buffer->state.push_constants[stage]->images[image++];
- anv_image_view_fill_image_param(cmd_buffer->device, desc->image_view,
- image_param);
+ *image_param = desc->image_view->storage_image_param;
image_param->surface_idx = bias + s;
break;
}
struct brw_image_param *image_param =
&cmd_buffer->state.push_constants[stage]->images[image++];
- anv_buffer_view_fill_image_param(cmd_buffer->device, desc->buffer_view,
- image_param);
+ *image_param = desc->buffer_view->storage_image_param;
image_param->surface_idx = bias + s;
break;
anv_cmd_buffer_emit_samplers(struct anv_cmd_buffer *cmd_buffer,
gl_shader_stage stage, struct anv_state *state)
{
- struct anv_pipeline_bind_map *map;
+ struct anv_pipeline *pipeline;
if (stage == MESA_SHADER_COMPUTE)
- map = &cmd_buffer->state.compute_pipeline->bindings[stage];
+ pipeline = cmd_buffer->state.compute_pipeline;
else
- map = &cmd_buffer->state.pipeline->bindings[stage];
+ pipeline = cmd_buffer->state.pipeline;
+
+ if (!anv_pipeline_has_stage(pipeline, stage)) {
+ *state = (struct anv_state) { 0, };
+ return VK_SUCCESS;
+ }
+ struct anv_pipeline_bind_map *map = &pipeline->shaders[stage]->bind_map;
if (map->sampler_count == 0) {
*state = (struct anv_state) { 0, };
return VK_SUCCESS;
struct anv_pipeline_binding *binding = &map->sampler_to_descriptor[s];
struct anv_descriptor_set *set =
cmd_buffer->state.descriptors[binding->set];
- struct anv_descriptor *desc = &set->descriptors[binding->offset];
+ uint32_t offset = set->layout->binding[binding->binding].descriptor_index;
+ struct anv_descriptor *desc = &set->descriptors[offset + binding->index];
if (desc->type != VK_DESCRIPTOR_TYPE_SAMPLER &&
desc->type != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
return VK_SUCCESS;
}
+uint32_t
+anv_cmd_buffer_flush_descriptor_sets(struct anv_cmd_buffer *cmd_buffer)
+{
+ VkShaderStageFlags dirty = cmd_buffer->state.descriptors_dirty &
+ cmd_buffer->state.pipeline->active_stages;
+
+ VkResult result = VK_SUCCESS;
+ anv_foreach_stage(s, dirty) {
+ result = anv_cmd_buffer_emit_samplers(cmd_buffer, s,
+ &cmd_buffer->state.samplers[s]);
+ if (result != VK_SUCCESS)
+ break;
+ result = anv_cmd_buffer_emit_binding_table(cmd_buffer, s,
+ &cmd_buffer->state.binding_tables[s]);
+ if (result != VK_SUCCESS)
+ break;
+ }
+
+ if (result != VK_SUCCESS) {
+ assert(result == VK_ERROR_OUT_OF_DEVICE_MEMORY);
+
+ result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
+ assert(result == VK_SUCCESS);
+
+ /* Re-emit state base addresses so we get the new surface state base
+ * address before we start emitting binding tables etc.
+ */
+ anv_cmd_buffer_emit_state_base_address(cmd_buffer);
+
+ /* Re-emit all active binding tables */
+ dirty |= cmd_buffer->state.pipeline->active_stages;
+ anv_foreach_stage(s, dirty) {
+ result = anv_cmd_buffer_emit_samplers(cmd_buffer, s,
+ &cmd_buffer->state.samplers[s]);
+ if (result != VK_SUCCESS)
+ return result;
+ result = anv_cmd_buffer_emit_binding_table(cmd_buffer, s,
+ &cmd_buffer->state.binding_tables[s]);
+ if (result != VK_SUCCESS)
+ return result;
+ }
+ }
+
+ cmd_buffer->state.descriptors_dirty &= ~dirty;
+
+ return dirty;
+}
+
struct anv_state
anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
const void *data, uint32_t size, uint32_t alignment)
{
switch (cmd_buffer->device->info.gen) {
case 7:
- gen7_cmd_buffer_set_subpass(cmd_buffer, subpass);
+ if (cmd_buffer->device->info.is_haswell) {
+ gen75_cmd_buffer_set_subpass(cmd_buffer, subpass);
+ } else {
+ gen7_cmd_buffer_set_subpass(cmd_buffer, subpass);
+ }
break;
case 8:
gen8_cmd_buffer_set_subpass(cmd_buffer, subpass);
anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
gl_shader_stage stage)
{
+ /* If we don't have this stage, bail. */
+ if (!anv_pipeline_has_stage(cmd_buffer->state.pipeline, stage))
+ return (struct anv_state) { .offset = 0 };
+
struct anv_push_constants *data =
cmd_buffer->state.push_constants[stage];
- struct brw_stage_prog_data *prog_data =
- cmd_buffer->state.pipeline->prog_data[stage];
+ const struct brw_stage_prog_data *prog_data =
+ anv_shader_bin_get_prog_data(cmd_buffer->state.pipeline->shaders[stage]);
/* If we don't actually have any push constants, bail. */
- if (data == NULL || prog_data->nr_params == 0)
+ if (data == NULL || prog_data == NULL || prog_data->nr_params == 0)
return (struct anv_state) { .offset = 0 };
struct anv_state state =
struct anv_push_constants *data =
cmd_buffer->state.push_constants[MESA_SHADER_COMPUTE];
struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
- const struct brw_cs_prog_data *cs_prog_data = &pipeline->cs_prog_data;
+ const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
const struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
- const unsigned local_id_dwords = cs_prog_data->local_invocation_id_regs * 8;
- const unsigned push_constant_data_size =
- (local_id_dwords + prog_data->nr_params) * 4;
- const unsigned reg_aligned_constant_size = ALIGN(push_constant_data_size, 32);
- const unsigned param_aligned_count =
- reg_aligned_constant_size / sizeof(uint32_t);
-
/* If we don't actually have any push constants, bail. */
- if (reg_aligned_constant_size == 0)
+ if (cs_prog_data->push.total.size == 0)
return (struct anv_state) { .offset = 0 };
- const unsigned threads = pipeline->cs_thread_width_max;
- const unsigned total_push_constants_size =
- reg_aligned_constant_size * threads;
const unsigned push_constant_alignment =
cmd_buffer->device->info.gen < 8 ? 32 : 64;
const unsigned aligned_total_push_constants_size =
- ALIGN(total_push_constants_size, push_constant_alignment);
+ ALIGN(cs_prog_data->push.total.size, push_constant_alignment);
struct anv_state state =
anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
aligned_total_push_constants_size,
/* Walk through the param array and fill the buffer with data */
uint32_t *u32_map = state.map;
- brw_cs_fill_local_id_payload(cs_prog_data, u32_map, threads,
- reg_aligned_constant_size);
-
- /* Setup uniform data for the first thread */
- for (unsigned i = 0; i < prog_data->nr_params; i++) {
- uint32_t offset = (uintptr_t)prog_data->param[i];
- u32_map[local_id_dwords + i] = *(uint32_t *)((uint8_t *)data + offset);
+ if (cs_prog_data->push.cross_thread.size > 0) {
+ assert(cs_prog_data->thread_local_id_index < 0 ||
+ cs_prog_data->thread_local_id_index >=
+ cs_prog_data->push.cross_thread.dwords);
+ for (unsigned i = 0;
+ i < cs_prog_data->push.cross_thread.dwords;
+ i++) {
+ uint32_t offset = (uintptr_t)prog_data->param[i];
+ u32_map[i] = *(uint32_t *)((uint8_t *)data + offset);
+ }
}
- /* Copy uniform data from the first thread to every other thread */
- const size_t uniform_data_size = prog_data->nr_params * sizeof(uint32_t);
- for (unsigned t = 1; t < threads; t++) {
- memcpy(&u32_map[t * param_aligned_count + local_id_dwords],
- &u32_map[local_id_dwords],
- uniform_data_size);
+ if (cs_prog_data->push.per_thread.size > 0) {
+ for (unsigned t = 0; t < cs_prog_data->threads; t++) {
+ unsigned dst =
+ 8 * (cs_prog_data->push.per_thread.regs * t +
+ cs_prog_data->push.cross_thread.regs);
+ unsigned src = cs_prog_data->push.cross_thread.dwords;
+ for ( ; src < prog_data->nr_params; src++, dst++) {
+ if (src != cs_prog_data->thread_local_id_index) {
+ uint32_t offset = (uintptr_t)prog_data->param[src];
+ u32_map[dst] = *(uint32_t *)((uint8_t *)data + offset);
+ } else {
+ u32_map[dst] = t * cs_prog_data->simd_size;
+ }
+ }
+ }
}
if (!cmd_buffer->device->info.has_llc)
ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
- anv_ResetCommandPool(_device, commandPool, 0);
+ list_for_each_entry_safe(struct anv_cmd_buffer, cmd_buffer,
+ &pool->cmd_buffers, pool_link) {
+ anv_cmd_buffer_destroy(cmd_buffer);
+ }
anv_free2(&device->alloc, pAllocator, pool);
}
{
ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
- /* FIXME: vkResetCommandPool must not destroy its command buffers. The
- * Vulkan 1.0 spec requires that it only reset them:
- *
- * Resetting a command pool recycles all of the resources from all of
- * the command buffers allocated from the command pool back to the
- * command pool. All command buffers that have been allocated from the
- * command pool are put in the initial state.
- */
- list_for_each_entry_safe(struct anv_cmd_buffer, cmd_buffer,
- &pool->cmd_buffers, pool_link) {
- anv_cmd_buffer_destroy(cmd_buffer);
+ list_for_each_entry(struct anv_cmd_buffer, cmd_buffer,
+ &pool->cmd_buffers, pool_link) {
+ anv_cmd_buffer_reset(cmd_buffer);
}
return VK_SUCCESS;