#include "anv_private.h"
#include "vk_format_info.h"
+#include "vk_util.h"
/** \file anv_cmd_buffer.c
*
.front = 0u,
.back = 0u,
},
+ .line_stipple = {
+ .factor = 0u,
+ .pattern = 0u,
+ },
};
-void
+/**
+ * Copy the dynamic state from src to dest based on the copy_mask.
+ *
+ * Avoid copying states that have not changed, except for VIEWPORT, SCISSOR and
+ * BLEND_CONSTANTS (always copy them if they are in the copy_mask).
+ *
+ * Returns a mask of the states which changed.
+ */
+anv_cmd_dirty_mask_t
anv_dynamic_state_copy(struct anv_dynamic_state *dest,
const struct anv_dynamic_state *src,
- uint32_t copy_mask)
+ anv_cmd_dirty_mask_t copy_mask)
{
- if (copy_mask & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
+ anv_cmd_dirty_mask_t changed = 0;
+
+ if (copy_mask & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT) {
dest->viewport.count = src->viewport.count;
typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
src->viewport.count);
+ changed |= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT;
}
- if (copy_mask & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
+ if (copy_mask & ANV_CMD_DIRTY_DYNAMIC_SCISSOR) {
dest->scissor.count = src->scissor.count;
typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
src->scissor.count);
+ changed |= ANV_CMD_DIRTY_DYNAMIC_SCISSOR;
}
- if (copy_mask & (1 << VK_DYNAMIC_STATE_LINE_WIDTH))
- dest->line_width = src->line_width;
+ if (copy_mask & ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS) {
+ typed_memcpy(dest->blend_constants, src->blend_constants, 4);
+ changed |= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
+ }
- if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS))
- dest->depth_bias = src->depth_bias;
+#define ANV_CMP_COPY(field, flag) \
+ if (copy_mask & flag) { \
+ if (dest->field != src->field) { \
+ dest->field = src->field; \
+ changed |= flag; \
+ } \
+ }
- if (copy_mask & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
- typed_memcpy(dest->blend_constants, src->blend_constants, 4);
+ ANV_CMP_COPY(line_width, ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH);
- if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS))
- dest->depth_bounds = src->depth_bounds;
+ ANV_CMP_COPY(depth_bias.bias, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS);
+ ANV_CMP_COPY(depth_bias.clamp, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS);
+ ANV_CMP_COPY(depth_bias.slope, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS);
- if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK))
- dest->stencil_compare_mask = src->stencil_compare_mask;
+ ANV_CMP_COPY(depth_bounds.min, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS);
+ ANV_CMP_COPY(depth_bounds.max, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS);
- if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK))
- dest->stencil_write_mask = src->stencil_write_mask;
+ ANV_CMP_COPY(stencil_compare_mask.front, ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK);
+ ANV_CMP_COPY(stencil_compare_mask.back, ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK);
- if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE))
- dest->stencil_reference = src->stencil_reference;
+ ANV_CMP_COPY(stencil_write_mask.front, ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK);
+ ANV_CMP_COPY(stencil_write_mask.back, ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK);
+
+ ANV_CMP_COPY(stencil_reference.front, ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE);
+ ANV_CMP_COPY(stencil_reference.back, ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE);
+
+ ANV_CMP_COPY(line_stipple.factor, ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE);
+ ANV_CMP_COPY(line_stipple.pattern, ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE);
+
+#undef ANV_CMP_COPY
+
+ return changed;
}
static void
memset(state, 0, sizeof(*state));
+ state->current_pipeline = UINT32_MAX;
state->restart_index = UINT32_MAX;
- state->dynamic = default_dynamic_state;
+ state->gfx.dynamic = default_dynamic_state;
+}
+
+static void
+anv_cmd_pipeline_state_finish(struct anv_cmd_buffer *cmd_buffer,
+ struct anv_cmd_pipeline_state *pipe_state)
+{
+ for (uint32_t i = 0; i < ARRAY_SIZE(pipe_state->push_descriptors); i++) {
+ if (pipe_state->push_descriptors[i]) {
+ anv_descriptor_set_layout_unref(cmd_buffer->device,
+ pipe_state->push_descriptors[i]->set.layout);
+ vk_free(&cmd_buffer->pool->alloc, pipe_state->push_descriptors[i]);
+ }
+ }
}
static void
{
struct anv_cmd_state *state = &cmd_buffer->state;
- for (uint32_t i = 0; i < ARRAY_SIZE(state->push_descriptors); i++)
- vk_free(&cmd_buffer->pool->alloc, state->push_descriptors[i]);
-
- for (uint32_t i = 0; i < MESA_SHADER_STAGES; i++)
- vk_free(&cmd_buffer->pool->alloc, state->push_constants[i]);
+ anv_cmd_pipeline_state_finish(cmd_buffer, &state->gfx.base);
+ anv_cmd_pipeline_state_finish(cmd_buffer, &state->compute.base);
vk_free(&cmd_buffer->pool->alloc, state->attachments);
}
anv_cmd_state_init(cmd_buffer);
}
-VkResult
-anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer *cmd_buffer,
- gl_shader_stage stage, uint32_t size)
-{
- struct anv_push_constants **ptr = &cmd_buffer->state.push_constants[stage];
-
- if (*ptr == NULL) {
- *ptr = vk_alloc(&cmd_buffer->pool->alloc, size, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
- if (*ptr == NULL) {
- anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- }
- } else if ((*ptr)->size < size) {
- *ptr = vk_realloc(&cmd_buffer->pool->alloc, *ptr, size, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
- if (*ptr == NULL) {
- anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- }
- }
- (*ptr)->size = size;
-
- return VK_SUCCESS;
-}
-
static VkResult anv_create_cmd_buffer(
struct anv_device * device,
struct anv_cmd_pool * pool,
if (cmd_buffer == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ vk_object_base_init(&device->vk, &cmd_buffer->base,
+ VK_OBJECT_TYPE_COMMAND_BUFFER);
+
cmd_buffer->batch.status = VK_SUCCESS;
- cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
cmd_buffer->device = device;
cmd_buffer->pool = pool;
cmd_buffer->level = level;
anv_cmd_state_finish(cmd_buffer);
+ vk_object_base_finish(&cmd_buffer->base);
vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
}
anv_cmd_buffer_reset(struct anv_cmd_buffer *cmd_buffer)
{
cmd_buffer->usage_flags = 0;
- cmd_buffer->state.current_pipeline = UINT32_MAX;
+ cmd_buffer->perf_query_pool = NULL;
anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer);
anv_cmd_state_reset(cmd_buffer);
return anv_cmd_buffer_reset(cmd_buffer);
}
+#define anv_genX_call(devinfo, func, ...) \
+ switch ((devinfo)->gen) { \
+ case 7: \
+ if ((devinfo)->is_haswell) { \
+ gen75_##func(__VA_ARGS__); \
+ } else { \
+ gen7_##func(__VA_ARGS__); \
+ } \
+ break; \
+ case 8: \
+ gen8_##func(__VA_ARGS__); \
+ break; \
+ case 9: \
+ gen9_##func(__VA_ARGS__); \
+ break; \
+ case 10: \
+ gen10_##func(__VA_ARGS__); \
+ break; \
+ case 11: \
+ gen11_##func(__VA_ARGS__); \
+ break; \
+ case 12: \
+ gen12_##func(__VA_ARGS__); \
+ break; \
+ default: \
+ assert(!"Unknown hardware generation"); \
+ }
+
void
anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer)
{
- switch (cmd_buffer->device->info.gen) {
- case 7:
- if (cmd_buffer->device->info.is_haswell)
- return gen75_cmd_buffer_emit_state_base_address(cmd_buffer);
- else
- return gen7_cmd_buffer_emit_state_base_address(cmd_buffer);
- case 8:
- return gen8_cmd_buffer_emit_state_base_address(cmd_buffer);
- case 9:
- return gen9_cmd_buffer_emit_state_base_address(cmd_buffer);
- case 10:
- return gen10_cmd_buffer_emit_state_base_address(cmd_buffer);
- default:
- unreachable("unsupported gen\n");
- }
+ anv_genX_call(&cmd_buffer->device->info,
+ cmd_buffer_emit_state_base_address,
+ cmd_buffer);
+}
+
+void
+anv_cmd_buffer_mark_image_written(struct anv_cmd_buffer *cmd_buffer,
+ const struct anv_image *image,
+ VkImageAspectFlagBits aspect,
+ enum isl_aux_usage aux_usage,
+ uint32_t level,
+ uint32_t base_layer,
+ uint32_t layer_count)
+{
+ anv_genX_call(&cmd_buffer->device->info,
+ cmd_buffer_mark_image_written,
+ cmd_buffer, image, aspect, aux_usage,
+ level, base_layer, layer_count);
+}
+
+void
+anv_cmd_emit_conditional_render_predicate(struct anv_cmd_buffer *cmd_buffer)
+{
+ anv_genX_call(&cmd_buffer->device->info,
+ cmd_emit_conditional_render_predicate,
+ cmd_buffer);
+}
+
+static bool
+mem_update(void *dst, const void *src, size_t size)
+{
+ if (memcmp(dst, src, size) == 0)
+ return false;
+
+ memcpy(dst, src, size);
+ return true;
+}
+
+static void
+set_dirty_for_bind_map(struct anv_cmd_buffer *cmd_buffer,
+ gl_shader_stage stage,
+ const struct anv_pipeline_bind_map *map)
+{
+ if (mem_update(cmd_buffer->state.surface_sha1s[stage],
+ map->surface_sha1, sizeof(map->surface_sha1)))
+ cmd_buffer->state.descriptors_dirty |= mesa_to_vk_shader_stage(stage);
+
+ if (mem_update(cmd_buffer->state.sampler_sha1s[stage],
+ map->sampler_sha1, sizeof(map->sampler_sha1)))
+ cmd_buffer->state.descriptors_dirty |= mesa_to_vk_shader_stage(stage);
+
+ if (mem_update(cmd_buffer->state.push_sha1s[stage],
+ map->push_sha1, sizeof(map->push_sha1)))
+ cmd_buffer->state.push_constants_dirty |= mesa_to_vk_shader_stage(stage);
}
void anv_CmdBindPipeline(
ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
switch (pipelineBindPoint) {
- case VK_PIPELINE_BIND_POINT_COMPUTE:
- cmd_buffer->state.compute_pipeline = pipeline;
- cmd_buffer->state.compute_dirty |= ANV_CMD_DIRTY_PIPELINE;
- cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
- cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
+ case VK_PIPELINE_BIND_POINT_COMPUTE: {
+ struct anv_compute_pipeline *compute_pipeline =
+ anv_pipeline_to_compute(pipeline);
+ if (cmd_buffer->state.compute.pipeline == compute_pipeline)
+ return;
+
+ cmd_buffer->state.compute.pipeline = compute_pipeline;
+ cmd_buffer->state.compute.pipeline_dirty = true;
+ set_dirty_for_bind_map(cmd_buffer, MESA_SHADER_COMPUTE,
+ &compute_pipeline->cs->bind_map);
break;
+ }
- case VK_PIPELINE_BIND_POINT_GRAPHICS:
- cmd_buffer->state.pipeline = pipeline;
- cmd_buffer->state.vb_dirty |= pipeline->vb_used;
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_PIPELINE;
- cmd_buffer->state.push_constants_dirty |= pipeline->active_stages;
- cmd_buffer->state.descriptors_dirty |= pipeline->active_stages;
+ case VK_PIPELINE_BIND_POINT_GRAPHICS: {
+ struct anv_graphics_pipeline *gfx_pipeline =
+ anv_pipeline_to_graphics(pipeline);
+ if (cmd_buffer->state.gfx.pipeline == gfx_pipeline)
+ return;
+
+ cmd_buffer->state.gfx.pipeline = gfx_pipeline;
+ cmd_buffer->state.gfx.vb_dirty |= gfx_pipeline->vb_used;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_PIPELINE;
+
+ anv_foreach_stage(stage, gfx_pipeline->active_stages) {
+ set_dirty_for_bind_map(cmd_buffer, stage,
+ &gfx_pipeline->shaders[stage]->bind_map);
+ }
/* Apply the dynamic state from the pipeline */
- cmd_buffer->state.dirty |= pipeline->dynamic_state_mask;
- anv_dynamic_state_copy(&cmd_buffer->state.dynamic,
- &pipeline->dynamic_state,
- pipeline->dynamic_state_mask);
+ cmd_buffer->state.gfx.dirty |=
+ anv_dynamic_state_copy(&cmd_buffer->state.gfx.dynamic,
+ &gfx_pipeline->dynamic_state,
+ gfx_pipeline->dynamic_state_mask);
break;
+ }
default:
assert(!"invalid bind point");
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
const uint32_t total_count = firstViewport + viewportCount;
- if (cmd_buffer->state.dynamic.viewport.count < total_count)
- cmd_buffer->state.dynamic.viewport.count = total_count;
+ if (cmd_buffer->state.gfx.dynamic.viewport.count < total_count)
+ cmd_buffer->state.gfx.dynamic.viewport.count = total_count;
- memcpy(cmd_buffer->state.dynamic.viewport.viewports + firstViewport,
+ memcpy(cmd_buffer->state.gfx.dynamic.viewport.viewports + firstViewport,
pViewports, viewportCount * sizeof(*pViewports));
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT;
}
void anv_CmdSetScissor(
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
const uint32_t total_count = firstScissor + scissorCount;
- if (cmd_buffer->state.dynamic.scissor.count < total_count)
- cmd_buffer->state.dynamic.scissor.count = total_count;
+ if (cmd_buffer->state.gfx.dynamic.scissor.count < total_count)
+ cmd_buffer->state.gfx.dynamic.scissor.count = total_count;
- memcpy(cmd_buffer->state.dynamic.scissor.scissors + firstScissor,
+ memcpy(cmd_buffer->state.gfx.dynamic.scissor.scissors + firstScissor,
pScissors, scissorCount * sizeof(*pScissors));
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_SCISSOR;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_SCISSOR;
}
void anv_CmdSetLineWidth(
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
- cmd_buffer->state.dynamic.line_width = lineWidth;
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
+ cmd_buffer->state.gfx.dynamic.line_width = lineWidth;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
}
void anv_CmdSetDepthBias(
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
- cmd_buffer->state.dynamic.depth_bias.bias = depthBiasConstantFactor;
- cmd_buffer->state.dynamic.depth_bias.clamp = depthBiasClamp;
- cmd_buffer->state.dynamic.depth_bias.slope = depthBiasSlopeFactor;
+ cmd_buffer->state.gfx.dynamic.depth_bias.bias = depthBiasConstantFactor;
+ cmd_buffer->state.gfx.dynamic.depth_bias.clamp = depthBiasClamp;
+ cmd_buffer->state.gfx.dynamic.depth_bias.slope = depthBiasSlopeFactor;
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
}
void anv_CmdSetBlendConstants(
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
- memcpy(cmd_buffer->state.dynamic.blend_constants,
+ memcpy(cmd_buffer->state.gfx.dynamic.blend_constants,
blendConstants, sizeof(float) * 4);
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
}
void anv_CmdSetDepthBounds(
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
- cmd_buffer->state.dynamic.depth_bounds.min = minDepthBounds;
- cmd_buffer->state.dynamic.depth_bounds.max = maxDepthBounds;
+ cmd_buffer->state.gfx.dynamic.depth_bounds.min = minDepthBounds;
+ cmd_buffer->state.gfx.dynamic.depth_bounds.max = maxDepthBounds;
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
}
void anv_CmdSetStencilCompareMask(
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
- cmd_buffer->state.dynamic.stencil_compare_mask.front = compareMask;
+ cmd_buffer->state.gfx.dynamic.stencil_compare_mask.front = compareMask;
if (faceMask & VK_STENCIL_FACE_BACK_BIT)
- cmd_buffer->state.dynamic.stencil_compare_mask.back = compareMask;
+ cmd_buffer->state.gfx.dynamic.stencil_compare_mask.back = compareMask;
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
}
void anv_CmdSetStencilWriteMask(
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
- cmd_buffer->state.dynamic.stencil_write_mask.front = writeMask;
+ cmd_buffer->state.gfx.dynamic.stencil_write_mask.front = writeMask;
if (faceMask & VK_STENCIL_FACE_BACK_BIT)
- cmd_buffer->state.dynamic.stencil_write_mask.back = writeMask;
+ cmd_buffer->state.gfx.dynamic.stencil_write_mask.back = writeMask;
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
}
void anv_CmdSetStencilReference(
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
- cmd_buffer->state.dynamic.stencil_reference.front = reference;
+ cmd_buffer->state.gfx.dynamic.stencil_reference.front = reference;
if (faceMask & VK_STENCIL_FACE_BACK_BIT)
- cmd_buffer->state.dynamic.stencil_reference.back = reference;
+ cmd_buffer->state.gfx.dynamic.stencil_reference.back = reference;
+
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
+}
+
+void anv_CmdSetLineStippleEXT(
+ VkCommandBuffer commandBuffer,
+ uint32_t lineStippleFactor,
+ uint16_t lineStipplePattern)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
+ cmd_buffer->state.gfx.dynamic.line_stipple.factor = lineStippleFactor;
+ cmd_buffer->state.gfx.dynamic.line_stipple.pattern = lineStipplePattern;
+
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE;
+}
+
+static void
+anv_cmd_buffer_bind_descriptor_set(struct anv_cmd_buffer *cmd_buffer,
+ VkPipelineBindPoint bind_point,
+ struct anv_pipeline_layout *layout,
+ uint32_t set_index,
+ struct anv_descriptor_set *set,
+ uint32_t *dynamic_offset_count,
+ const uint32_t **dynamic_offsets)
+{
+ struct anv_descriptor_set_layout *set_layout =
+ layout->set[set_index].layout;
+
+ VkShaderStageFlags stages = set_layout->shader_stages;
+ struct anv_cmd_pipeline_state *pipe_state;
+
+ switch (bind_point) {
+ case VK_PIPELINE_BIND_POINT_GRAPHICS:
+ stages &= VK_SHADER_STAGE_ALL_GRAPHICS;
+ pipe_state = &cmd_buffer->state.gfx.base;
+ break;
+
+ case VK_PIPELINE_BIND_POINT_COMPUTE:
+ stages &= VK_SHADER_STAGE_COMPUTE_BIT;
+ pipe_state = &cmd_buffer->state.compute.base;
+ break;
+
+ default:
+ unreachable("invalid bind point");
+ }
+
+ VkShaderStageFlags dirty_stages = 0;
+ if (pipe_state->descriptors[set_index] != set) {
+ pipe_state->descriptors[set_index] = set;
+ dirty_stages |= stages;
+ }
+
+ /* If it's a push descriptor set, we have to flag things as dirty
+ * regardless of whether or not the CPU-side data structure changed as we
+ * may have edited in-place.
+ */
+ if (set->pool == NULL)
+ dirty_stages |= stages;
+
+ if (dynamic_offsets) {
+ if (set_layout->dynamic_offset_count > 0) {
+ uint32_t dynamic_offset_start =
+ layout->set[set_index].dynamic_offset_start;
+
+ anv_foreach_stage(stage, stages) {
+ struct anv_push_constants *push =
+ &cmd_buffer->state.push_constants[stage];
+ uint32_t *push_offsets =
+ &push->dynamic_offsets[dynamic_offset_start];
+
+ /* Assert that everything is in range */
+ assert(set_layout->dynamic_offset_count <= *dynamic_offset_count);
+ assert(dynamic_offset_start + set_layout->dynamic_offset_count <=
+ ARRAY_SIZE(push->dynamic_offsets));
+
+ unsigned mask = set_layout->stage_dynamic_offsets[stage];
+ STATIC_ASSERT(MAX_DYNAMIC_BUFFERS <= sizeof(mask) * 8);
+ while (mask) {
+ int i = u_bit_scan(&mask);
+ if (push_offsets[i] != (*dynamic_offsets)[i]) {
+ push_offsets[i] = (*dynamic_offsets)[i];
+ dirty_stages |= mesa_to_vk_shader_stage(stage);
+ }
+ }
+ }
+
+ *dynamic_offsets += set_layout->dynamic_offset_count;
+ *dynamic_offset_count -= set_layout->dynamic_offset_count;
+ }
+ }
+
+ cmd_buffer->state.descriptors_dirty |= dirty_stages;
+ cmd_buffer->state.push_constants_dirty |= dirty_stages;
}
void anv_CmdBindDescriptorSets(
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
- struct anv_descriptor_set_layout *set_layout;
- assert(firstSet + descriptorSetCount < MAX_SETS);
+ assert(firstSet + descriptorSetCount <= MAX_SETS);
- uint32_t dynamic_slot = 0;
for (uint32_t i = 0; i < descriptorSetCount; i++) {
ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
- set_layout = layout->set[firstSet + i].layout;
-
- cmd_buffer->state.descriptors[firstSet + i] = set;
-
- if (set_layout->dynamic_offset_count > 0) {
- uint32_t dynamic_offset_start =
- layout->set[firstSet + i].dynamic_offset_start;
-
- /* Assert that everything is in range */
- assert(dynamic_offset_start + set_layout->dynamic_offset_count <=
- ARRAY_SIZE(cmd_buffer->state.dynamic_offsets));
- assert(dynamic_slot + set_layout->dynamic_offset_count <=
- dynamicOffsetCount);
-
- typed_memcpy(&cmd_buffer->state.dynamic_offsets[dynamic_offset_start],
- &pDynamicOffsets[dynamic_slot],
- set_layout->dynamic_offset_count);
-
- dynamic_slot += set_layout->dynamic_offset_count;
- }
-
- cmd_buffer->state.descriptors_dirty |= set_layout->shader_stages;
+ anv_cmd_buffer_bind_descriptor_set(cmd_buffer, pipelineBindPoint,
+ layout, firstSet + i, set,
+ &dynamicOffsetCount,
+ &pDynamicOffsets);
}
}
for (uint32_t i = 0; i < bindingCount; i++) {
vb[firstBinding + i].buffer = anv_buffer_from_handle(pBuffers[i]);
vb[firstBinding + i].offset = pOffsets[i];
- cmd_buffer->state.vb_dirty |= 1 << (firstBinding + i);
+ cmd_buffer->state.gfx.vb_dirty |= 1 << (firstBinding + i);
+ }
+}
+
+void anv_CmdBindTransformFeedbackBuffersEXT(
+ VkCommandBuffer commandBuffer,
+ uint32_t firstBinding,
+ uint32_t bindingCount,
+ const VkBuffer* pBuffers,
+ const VkDeviceSize* pOffsets,
+ const VkDeviceSize* pSizes)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct anv_xfb_binding *xfb = cmd_buffer->state.xfb_bindings;
+
+ /* We have to defer setting up vertex buffer since we need the buffer
+ * stride from the pipeline. */
+
+ assert(firstBinding + bindingCount <= MAX_XFB_BUFFERS);
+ for (uint32_t i = 0; i < bindingCount; i++) {
+ if (pBuffers[i] == VK_NULL_HANDLE) {
+ xfb[firstBinding + i].buffer = NULL;
+ } else {
+ ANV_FROM_HANDLE(anv_buffer, buffer, pBuffers[i]);
+ xfb[firstBinding + i].buffer = buffer;
+ xfb[firstBinding + i].offset = pOffsets[i];
+ xfb[firstBinding + i].size =
+ anv_buffer_get_range(buffer, pOffsets[i],
+ pSizes ? pSizes[i] : VK_WHOLE_SIZE);
+ }
}
}
state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, alignment);
memcpy(state.map, data, size);
- anv_state_flush(cmd_buffer->device, state);
-
VG(VALGRIND_CHECK_MEM_IS_DEFINED(state.map, size));
return state;
for (uint32_t i = 0; i < dwords; i++)
p[i] = a[i] | b[i];
- anv_state_flush(cmd_buffer->device, state);
-
VG(VALGRIND_CHECK_MEM_IS_DEFINED(p, dwords * 4));
return state;
}
-static uint32_t
-anv_push_constant_value(struct anv_push_constants *data, uint32_t param)
-{
- if (BRW_PARAM_IS_BUILTIN(param)) {
- switch (param) {
- case BRW_PARAM_BUILTIN_ZERO:
- return 0;
- default:
- unreachable("Invalid param builtin");
- }
- } else {
- uint32_t offset = ANV_PARAM_PUSH_OFFSET(param);
- assert(offset % sizeof(uint32_t) == 0);
- if (offset < data->size)
- return *(uint32_t *)((uint8_t *)data + offset);
- else
- return 0;
- }
-}
-
struct anv_state
anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
gl_shader_stage stage)
{
- /* If we don't have this stage, bail. */
- if (!anv_pipeline_has_stage(cmd_buffer->state.pipeline, stage))
- return (struct anv_state) { .offset = 0 };
-
struct anv_push_constants *data =
- cmd_buffer->state.push_constants[stage];
- const struct brw_stage_prog_data *prog_data =
- cmd_buffer->state.pipeline->shaders[stage]->prog_data;
-
- /* If we don't actually have any push constants, bail. */
- if (data == NULL || prog_data == NULL || prog_data->nr_params == 0)
- return (struct anv_state) { .offset = 0 };
+ &cmd_buffer->state.push_constants[stage];
struct anv_state state =
anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
- prog_data->nr_params * sizeof(float),
+ sizeof(struct anv_push_constants),
32 /* bottom 5 bits MBZ */);
-
- /* Walk through the param array and fill the buffer with data */
- uint32_t *u32_map = state.map;
- for (unsigned i = 0; i < prog_data->nr_params; i++)
- u32_map[i] = anv_push_constant_value(data, prog_data->param[i]);
-
- anv_state_flush(cmd_buffer->device, state);
+ memcpy(state.map, data, sizeof(struct anv_push_constants));
return state;
}
anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer)
{
struct anv_push_constants *data =
- cmd_buffer->state.push_constants[MESA_SHADER_COMPUTE];
- struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
+ &cmd_buffer->state.push_constants[MESA_SHADER_COMPUTE];
+ struct anv_compute_pipeline *pipeline = cmd_buffer->state.compute.pipeline;
const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
- const struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
+ const struct anv_push_range *range = &pipeline->cs->bind_map.push_ranges[0];
- /* If we don't actually have any push constants, bail. */
- if (cs_prog_data->push.total.size == 0)
+ const uint32_t threads = anv_cs_threads(pipeline);
+ const unsigned total_push_constants_size =
+ brw_cs_push_const_total_size(cs_prog_data, threads);
+ if (total_push_constants_size == 0)
return (struct anv_state) { .offset = 0 };
const unsigned push_constant_alignment =
cmd_buffer->device->info.gen < 8 ? 32 : 64;
const unsigned aligned_total_push_constants_size =
- ALIGN(cs_prog_data->push.total.size, push_constant_alignment);
+ ALIGN(total_push_constants_size, push_constant_alignment);
struct anv_state state =
anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
aligned_total_push_constants_size,
push_constant_alignment);
- /* Walk through the param array and fill the buffer with data */
- uint32_t *u32_map = state.map;
+ void *dst = state.map;
+ const void *src = (char *)data + (range->start * 32);
if (cs_prog_data->push.cross_thread.size > 0) {
- for (unsigned i = 0;
- i < cs_prog_data->push.cross_thread.dwords;
- i++) {
- assert(prog_data->param[i] != BRW_PARAM_BUILTIN_SUBGROUP_ID);
- u32_map[i] = anv_push_constant_value(data, prog_data->param[i]);
- }
+ memcpy(dst, src, cs_prog_data->push.cross_thread.size);
+ dst += cs_prog_data->push.cross_thread.size;
+ src += cs_prog_data->push.cross_thread.size;
}
if (cs_prog_data->push.per_thread.size > 0) {
- for (unsigned t = 0; t < cs_prog_data->threads; t++) {
- unsigned dst =
- 8 * (cs_prog_data->push.per_thread.regs * t +
- cs_prog_data->push.cross_thread.regs);
- unsigned src = cs_prog_data->push.cross_thread.dwords;
- for ( ; src < prog_data->nr_params; src++, dst++) {
- if (prog_data->param[src] == BRW_PARAM_BUILTIN_SUBGROUP_ID) {
- u32_map[dst] = t;
- } else {
- u32_map[dst] =
- anv_push_constant_value(data, prog_data->param[src]);
- }
- }
+ for (unsigned t = 0; t < threads; t++) {
+ memcpy(dst, src, cs_prog_data->push.per_thread.size);
+
+ uint32_t *subgroup_id = dst +
+ offsetof(struct anv_push_constants, cs.subgroup_id) -
+ (range->start * 32 + cs_prog_data->push.cross_thread.size);
+ *subgroup_id = t;
+
+ dst += cs_prog_data->push.per_thread.size;
}
}
- anv_state_flush(cmd_buffer->device, state);
-
return state;
}
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
anv_foreach_stage(stage, stageFlags) {
- VkResult result =
- anv_cmd_buffer_ensure_push_constant_field(cmd_buffer,
- stage, client_data);
- if (result != VK_SUCCESS)
- return;
-
- memcpy(cmd_buffer->state.push_constants[stage]->client_data + offset,
+ memcpy(cmd_buffer->state.push_constants[stage].client_data + offset,
pValues, size);
}
ANV_FROM_HANDLE(anv_device, device, _device);
struct anv_cmd_pool *pool;
- pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
+ pool = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*pool), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pool == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ vk_object_base_init(&device->vk, &pool->base, VK_OBJECT_TYPE_COMMAND_POOL);
+
if (pAllocator)
pool->alloc = *pAllocator;
else
- pool->alloc = device->alloc;
+ pool->alloc = device->vk.alloc;
list_inithead(&pool->cmd_buffers);
anv_cmd_buffer_destroy(cmd_buffer);
}
- vk_free2(&device->alloc, pAllocator, pool);
+ vk_object_base_finish(&pool->base);
+ vk_free2(&device->vk.alloc, pAllocator, pool);
}
VkResult anv_ResetCommandPool(
return VK_SUCCESS;
}
-void anv_TrimCommandPoolKHR(
+void anv_TrimCommandPool(
VkDevice device,
VkCommandPool commandPool,
- VkCommandPoolTrimFlagsKHR flags)
+ VkCommandPoolTrimFlags flags)
{
/* Nothing for us to do here. Our pools stay pretty tidy. */
}
anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer *cmd_buffer)
{
const struct anv_subpass *subpass = cmd_buffer->state.subpass;
- const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
- if (subpass->depth_stencil_attachment.attachment == VK_ATTACHMENT_UNUSED)
+ if (subpass->depth_stencil_attachment == NULL)
return NULL;
const struct anv_image_view *iview =
- fb->attachments[subpass->depth_stencil_attachment.attachment];
+ cmd_buffer->state.attachments[subpass->depth_stencil_attachment->attachment].image_view;
assert(iview->aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT |
VK_IMAGE_ASPECT_STENCIL_BIT));
return iview;
}
-static VkResult
-anv_cmd_buffer_ensure_push_descriptor_set(struct anv_cmd_buffer *cmd_buffer,
- uint32_t set)
+static struct anv_descriptor_set *
+anv_cmd_buffer_push_descriptor_set(struct anv_cmd_buffer *cmd_buffer,
+ VkPipelineBindPoint bind_point,
+ struct anv_descriptor_set_layout *layout,
+ uint32_t _set)
{
+ struct anv_cmd_pipeline_state *pipe_state;
+ if (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
+ pipe_state = &cmd_buffer->state.compute.base;
+ } else {
+ assert(bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS);
+ pipe_state = &cmd_buffer->state.gfx.base;
+ }
+
struct anv_push_descriptor_set **push_set =
- &cmd_buffer->state.push_descriptors[set];
+ &pipe_state->push_descriptors[_set];
if (*push_set == NULL) {
- *push_set = vk_alloc(&cmd_buffer->pool->alloc,
- sizeof(struct anv_push_descriptor_set), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ *push_set = vk_zalloc(&cmd_buffer->pool->alloc,
+ sizeof(struct anv_push_descriptor_set), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (*push_set == NULL) {
anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return NULL;
}
}
- return VK_SUCCESS;
+ struct anv_descriptor_set *set = &(*push_set)->set;
+
+ if (set->layout != layout) {
+ if (set->layout)
+ anv_descriptor_set_layout_unref(cmd_buffer->device, set->layout);
+ anv_descriptor_set_layout_ref(layout);
+ set->layout = layout;
+ }
+ set->size = anv_descriptor_set_layout_size(layout);
+ set->buffer_view_count = layout->buffer_view_count;
+ set->buffer_views = (*push_set)->buffer_views;
+
+ if (layout->descriptor_buffer_size &&
+ ((*push_set)->set_used_on_gpu ||
+ set->desc_mem.alloc_size < layout->descriptor_buffer_size)) {
+ /* The previous buffer is either actively used by some GPU command (so
+ * we can't modify it) or is too small. Allocate a new one.
+ */
+ struct anv_state desc_mem =
+ anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
+ layout->descriptor_buffer_size, 32);
+ if (set->desc_mem.alloc_size) {
+ /* TODO: Do we really need to copy all the time? */
+ memcpy(desc_mem.map, set->desc_mem.map,
+ MIN2(desc_mem.alloc_size, set->desc_mem.alloc_size));
+ }
+ set->desc_mem = desc_mem;
+
+ struct anv_address addr = {
+ .bo = cmd_buffer->dynamic_state_stream.state_pool->block_pool.bo,
+ .offset = set->desc_mem.offset,
+ };
+
+ const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
+ set->desc_surface_state =
+ anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
+ isl_dev->ss.size, isl_dev->ss.align);
+ anv_fill_buffer_surface_state(cmd_buffer->device,
+ set->desc_surface_state,
+ ISL_FORMAT_R32G32B32A32_FLOAT,
+ addr, layout->descriptor_buffer_size, 1);
+ }
+
+ return set;
}
void anv_CmdPushDescriptorSetKHR(
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
- assert(pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS ||
- pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE);
assert(_set < MAX_SETS);
- const struct anv_descriptor_set_layout *set_layout =
- layout->set[_set].layout;
+ struct anv_descriptor_set_layout *set_layout = layout->set[_set].layout;
- if (anv_cmd_buffer_ensure_push_descriptor_set(cmd_buffer, _set) != VK_SUCCESS)
+ struct anv_descriptor_set *set =
+ anv_cmd_buffer_push_descriptor_set(cmd_buffer, pipelineBindPoint,
+ set_layout, _set);
+ if (!set)
return;
- struct anv_push_descriptor_set *push_set =
- cmd_buffer->state.push_descriptors[_set];
- struct anv_descriptor_set *set = &push_set->set;
-
- set->layout = set_layout;
- set->size = anv_descriptor_set_layout_size(set_layout);
- set->buffer_count = set_layout->buffer_count;
- set->buffer_views = push_set->buffer_views;
/* Go through the user supplied descriptors. */
for (uint32_t i = 0; i < descriptorWriteCount; i++) {
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
for (uint32_t j = 0; j < write->descriptorCount; j++) {
- anv_descriptor_set_write_image_view(set, &cmd_buffer->device->info,
+ anv_descriptor_set_write_image_view(cmd_buffer->device, set,
write->pImageInfo + j,
write->descriptorType,
write->dstBinding,
ANV_FROM_HANDLE(anv_buffer_view, bview,
write->pTexelBufferView[j]);
- anv_descriptor_set_write_buffer_view(set,
+ anv_descriptor_set_write_buffer_view(cmd_buffer->device, set,
write->descriptorType,
bview,
write->dstBinding,
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
for (uint32_t j = 0; j < write->descriptorCount; j++) {
- assert(write->pBufferInfo[j].buffer);
ANV_FROM_HANDLE(anv_buffer, buffer, write->pBufferInfo[j].buffer);
- assert(buffer);
- anv_descriptor_set_write_buffer(set,
- cmd_buffer->device,
+ anv_descriptor_set_write_buffer(cmd_buffer->device, set,
&cmd_buffer->surface_state_stream,
write->descriptorType,
buffer,
}
}
- cmd_buffer->state.descriptors[_set] = set;
- cmd_buffer->state.descriptors_dirty |= set_layout->shader_stages;
+ anv_cmd_buffer_bind_descriptor_set(cmd_buffer, pipelineBindPoint,
+ layout, _set, set, NULL, NULL);
}
void anv_CmdPushDescriptorSetWithTemplateKHR(
VkCommandBuffer commandBuffer,
- VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+ VkDescriptorUpdateTemplate descriptorUpdateTemplate,
VkPipelineLayout _layout,
uint32_t _set,
const void* pData)
assert(_set < MAX_PUSH_DESCRIPTORS);
- const struct anv_descriptor_set_layout *set_layout =
- layout->set[_set].layout;
+ struct anv_descriptor_set_layout *set_layout = layout->set[_set].layout;
- if (anv_cmd_buffer_ensure_push_descriptor_set(cmd_buffer, _set) != VK_SUCCESS)
+ struct anv_descriptor_set *set =
+ anv_cmd_buffer_push_descriptor_set(cmd_buffer, template->bind_point,
+ set_layout, _set);
+ if (!set)
return;
- struct anv_push_descriptor_set *push_set =
- cmd_buffer->state.push_descriptors[_set];
- struct anv_descriptor_set *set = &push_set->set;
- set->layout = set_layout;
- set->size = anv_descriptor_set_layout_size(set_layout);
- set->buffer_count = set_layout->buffer_count;
- set->buffer_views = push_set->buffer_views;
-
- anv_descriptor_set_write_template(set,
- cmd_buffer->device,
+ anv_descriptor_set_write_template(cmd_buffer->device, set,
&cmd_buffer->surface_state_stream,
template,
pData);
- cmd_buffer->state.descriptors[_set] = set;
- cmd_buffer->state.descriptors_dirty |= set_layout->shader_stages;
+ anv_cmd_buffer_bind_descriptor_set(cmd_buffer, template->bind_point,
+ layout, _set, set, NULL, NULL);
+}
+
+void anv_CmdSetDeviceMask(
+ VkCommandBuffer commandBuffer,
+ uint32_t deviceMask)
+{
+ /* No-op */
}