.front = 0u,
.back = 0u,
},
+ .line_stipple = {
+ .factor = 0u,
+ .pattern = 0u,
+ },
};
-void
+/**
+ * Copy the dynamic state from src to dest based on the copy_mask.
+ *
+ * Avoid copying states that have not changed, except for VIEWPORT, SCISSOR and
+ * BLEND_CONSTANTS (always copy them if they are in the copy_mask).
+ *
+ * Returns a mask of the states which changed.
+ */
+anv_cmd_dirty_mask_t
anv_dynamic_state_copy(struct anv_dynamic_state *dest,
const struct anv_dynamic_state *src,
- uint32_t copy_mask)
+ anv_cmd_dirty_mask_t copy_mask)
{
- if (copy_mask & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
+ anv_cmd_dirty_mask_t changed = 0;
+
+ if (copy_mask & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT) {
dest->viewport.count = src->viewport.count;
typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
src->viewport.count);
+ changed |= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT;
}
- if (copy_mask & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
+ if (copy_mask & ANV_CMD_DIRTY_DYNAMIC_SCISSOR) {
dest->scissor.count = src->scissor.count;
typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
src->scissor.count);
+ changed |= ANV_CMD_DIRTY_DYNAMIC_SCISSOR;
}
- if (copy_mask & (1 << VK_DYNAMIC_STATE_LINE_WIDTH))
- dest->line_width = src->line_width;
+ if (copy_mask & ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS) {
+ typed_memcpy(dest->blend_constants, src->blend_constants, 4);
+ changed |= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
+ }
+
+#define ANV_CMP_COPY(field, flag) \
+ if (copy_mask & flag) { \
+ if (dest->field != src->field) { \
+ dest->field = src->field; \
+ changed |= flag; \
+ } \
+ }
- if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS))
- dest->depth_bias = src->depth_bias;
+ ANV_CMP_COPY(line_width, ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH);
- if (copy_mask & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
- typed_memcpy(dest->blend_constants, src->blend_constants, 4);
+ ANV_CMP_COPY(depth_bias.bias, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS);
+ ANV_CMP_COPY(depth_bias.clamp, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS);
+ ANV_CMP_COPY(depth_bias.slope, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS);
+
+ ANV_CMP_COPY(depth_bounds.min, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS);
+ ANV_CMP_COPY(depth_bounds.max, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS);
+
+ ANV_CMP_COPY(stencil_compare_mask.front, ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK);
+ ANV_CMP_COPY(stencil_compare_mask.back, ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK);
+
+ ANV_CMP_COPY(stencil_write_mask.front, ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK);
+ ANV_CMP_COPY(stencil_write_mask.back, ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK);
- if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS))
- dest->depth_bounds = src->depth_bounds;
+ ANV_CMP_COPY(stencil_reference.front, ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE);
+ ANV_CMP_COPY(stencil_reference.back, ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE);
- if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK))
- dest->stencil_compare_mask = src->stencil_compare_mask;
+ ANV_CMP_COPY(line_stipple.factor, ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE);
+ ANV_CMP_COPY(line_stipple.pattern, ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE);
- if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK))
- dest->stencil_write_mask = src->stencil_write_mask;
+#undef ANV_CMP_COPY
- if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE))
- dest->stencil_reference = src->stencil_reference;
+ return changed;
}
static void
anv_cmd_pipeline_state_finish(cmd_buffer, &state->gfx.base);
anv_cmd_pipeline_state_finish(cmd_buffer, &state->compute.base);
- for (uint32_t i = 0; i < MESA_SHADER_STAGES; i++)
- vk_free(&cmd_buffer->pool->alloc, state->push_constants[i]);
-
vk_free(&cmd_buffer->pool->alloc, state->attachments);
}
anv_cmd_state_init(cmd_buffer);
}
-/**
- * This function updates the size of the push constant buffer we need to emit.
- * This is called in various parts of the driver to ensure that different
- * pieces of push constant data get emitted as needed. However, it is important
- * that we never shrink the size of the buffer. For example, a compute shader
- * dispatch will always call this for the base group id, which has an
- * offset in the push constant buffer that is smaller than the offset for
- * storage image data. If the compute shader has storage images, we will call
- * this again with a larger size during binding table emission. However,
- * if we dispatch the compute shader again without dirtying our descriptors,
- * we would still call this function with a smaller size for the base group
- * id, and not for the images, which would incorrectly shrink the size of the
- * push constant data we emit with that dispatch, making us drop the image data.
- */
-VkResult
-anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer *cmd_buffer,
- gl_shader_stage stage, uint32_t size)
-{
- struct anv_push_constants **ptr = &cmd_buffer->state.push_constants[stage];
-
- if (*ptr == NULL) {
- *ptr = vk_alloc(&cmd_buffer->pool->alloc, size, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
- if (*ptr == NULL) {
- anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- }
- (*ptr)->size = size;
- } else if ((*ptr)->size < size) {
- *ptr = vk_realloc(&cmd_buffer->pool->alloc, *ptr, size, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
- if (*ptr == NULL) {
- anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- }
- (*ptr)->size = size;
- }
-
- return VK_SUCCESS;
-}
-
static VkResult anv_create_cmd_buffer(
struct anv_device * device,
struct anv_cmd_pool * pool,
if (cmd_buffer == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ vk_object_base_init(&device->vk, &cmd_buffer->base,
+ VK_OBJECT_TYPE_COMMAND_BUFFER);
+
cmd_buffer->batch.status = VK_SUCCESS;
- cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
cmd_buffer->device = device;
cmd_buffer->pool = pool;
cmd_buffer->level = level;
anv_cmd_state_finish(cmd_buffer);
+ vk_object_base_finish(&cmd_buffer->base);
vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
}
anv_cmd_buffer_reset(struct anv_cmd_buffer *cmd_buffer)
{
cmd_buffer->usage_flags = 0;
+ cmd_buffer->perf_query_pool = NULL;
anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer);
anv_cmd_state_reset(cmd_buffer);
case 11: \
gen11_##func(__VA_ARGS__); \
break; \
+ case 12: \
+ gen12_##func(__VA_ARGS__); \
+ break; \
default: \
assert(!"Unknown hardware generation"); \
}
cmd_buffer);
}
+static bool
+mem_update(void *dst, const void *src, size_t size)
+{
+ if (memcmp(dst, src, size) == 0)
+ return false;
+
+ memcpy(dst, src, size);
+ return true;
+}
+
+static void
+set_dirty_for_bind_map(struct anv_cmd_buffer *cmd_buffer,
+ gl_shader_stage stage,
+ const struct anv_pipeline_bind_map *map)
+{
+ if (mem_update(cmd_buffer->state.surface_sha1s[stage],
+ map->surface_sha1, sizeof(map->surface_sha1)))
+ cmd_buffer->state.descriptors_dirty |= mesa_to_vk_shader_stage(stage);
+
+ if (mem_update(cmd_buffer->state.sampler_sha1s[stage],
+ map->sampler_sha1, sizeof(map->sampler_sha1)))
+ cmd_buffer->state.descriptors_dirty |= mesa_to_vk_shader_stage(stage);
+
+ if (mem_update(cmd_buffer->state.push_sha1s[stage],
+ map->push_sha1, sizeof(map->push_sha1)))
+ cmd_buffer->state.push_constants_dirty |= mesa_to_vk_shader_stage(stage);
+}
+
void anv_CmdBindPipeline(
VkCommandBuffer commandBuffer,
VkPipelineBindPoint pipelineBindPoint,
ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
switch (pipelineBindPoint) {
- case VK_PIPELINE_BIND_POINT_COMPUTE:
- cmd_buffer->state.compute.base.pipeline = pipeline;
+ case VK_PIPELINE_BIND_POINT_COMPUTE: {
+ struct anv_compute_pipeline *compute_pipeline =
+ anv_pipeline_to_compute(pipeline);
+ if (cmd_buffer->state.compute.pipeline == compute_pipeline)
+ return;
+
+ cmd_buffer->state.compute.pipeline = compute_pipeline;
cmd_buffer->state.compute.pipeline_dirty = true;
- cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
- cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
+ set_dirty_for_bind_map(cmd_buffer, MESA_SHADER_COMPUTE,
+ &compute_pipeline->cs->bind_map);
break;
+ }
- case VK_PIPELINE_BIND_POINT_GRAPHICS:
- cmd_buffer->state.gfx.base.pipeline = pipeline;
- cmd_buffer->state.gfx.vb_dirty |= pipeline->vb_used;
+ case VK_PIPELINE_BIND_POINT_GRAPHICS: {
+ struct anv_graphics_pipeline *gfx_pipeline =
+ anv_pipeline_to_graphics(pipeline);
+ if (cmd_buffer->state.gfx.pipeline == gfx_pipeline)
+ return;
+
+ cmd_buffer->state.gfx.pipeline = gfx_pipeline;
+ cmd_buffer->state.gfx.vb_dirty |= gfx_pipeline->vb_used;
cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_PIPELINE;
- cmd_buffer->state.push_constants_dirty |= pipeline->active_stages;
- cmd_buffer->state.descriptors_dirty |= pipeline->active_stages;
+
+ anv_foreach_stage(stage, gfx_pipeline->active_stages) {
+ set_dirty_for_bind_map(cmd_buffer, stage,
+ &gfx_pipeline->shaders[stage]->bind_map);
+ }
/* Apply the dynamic state from the pipeline */
- cmd_buffer->state.gfx.dirty |= pipeline->dynamic_state_mask;
- anv_dynamic_state_copy(&cmd_buffer->state.gfx.dynamic,
- &pipeline->dynamic_state,
- pipeline->dynamic_state_mask);
+ cmd_buffer->state.gfx.dirty |=
+ anv_dynamic_state_copy(&cmd_buffer->state.gfx.dynamic,
+ &gfx_pipeline->dynamic_state,
+ gfx_pipeline->dynamic_state_mask);
break;
+ }
default:
assert(!"invalid bind point");
cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
}
+void anv_CmdSetLineStippleEXT(
+ VkCommandBuffer commandBuffer,
+ uint32_t lineStippleFactor,
+ uint16_t lineStipplePattern)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+ cmd_buffer->state.gfx.dynamic.line_stipple.factor = lineStippleFactor;
+ cmd_buffer->state.gfx.dynamic.line_stipple.pattern = lineStipplePattern;
+
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE;
+}
+
static void
anv_cmd_buffer_bind_descriptor_set(struct anv_cmd_buffer *cmd_buffer,
VkPipelineBindPoint bind_point,
struct anv_descriptor_set_layout *set_layout =
layout->set[set_index].layout;
+ VkShaderStageFlags stages = set_layout->shader_stages;
struct anv_cmd_pipeline_state *pipe_state;
- if (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
- pipe_state = &cmd_buffer->state.compute.base;
- } else {
- assert(bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS);
+
+ switch (bind_point) {
+ case VK_PIPELINE_BIND_POINT_GRAPHICS:
+ stages &= VK_SHADER_STAGE_ALL_GRAPHICS;
pipe_state = &cmd_buffer->state.gfx.base;
+ break;
+
+ case VK_PIPELINE_BIND_POINT_COMPUTE:
+ stages &= VK_SHADER_STAGE_COMPUTE_BIT;
+ pipe_state = &cmd_buffer->state.compute.base;
+ break;
+
+ default:
+ unreachable("invalid bind point");
+ }
+
+ VkShaderStageFlags dirty_stages = 0;
+ if (pipe_state->descriptors[set_index] != set) {
+ pipe_state->descriptors[set_index] = set;
+ dirty_stages |= stages;
}
- pipe_state->descriptors[set_index] = set;
+
+ /* If it's a push descriptor set, we have to flag things as dirty
+ * regardless of whether or not the CPU-side data structure changed as we
+ * may have edited in-place.
+ */
+ if (set->pool == NULL)
+ dirty_stages |= stages;
if (dynamic_offsets) {
if (set_layout->dynamic_offset_count > 0) {
uint32_t dynamic_offset_start =
layout->set[set_index].dynamic_offset_start;
- /* Assert that everything is in range */
- assert(set_layout->dynamic_offset_count <= *dynamic_offset_count);
- assert(dynamic_offset_start + set_layout->dynamic_offset_count <=
- ARRAY_SIZE(pipe_state->dynamic_offsets));
-
- typed_memcpy(&pipe_state->dynamic_offsets[dynamic_offset_start],
- *dynamic_offsets, set_layout->dynamic_offset_count);
+ anv_foreach_stage(stage, stages) {
+ struct anv_push_constants *push =
+ &cmd_buffer->state.push_constants[stage];
+ uint32_t *push_offsets =
+ &push->dynamic_offsets[dynamic_offset_start];
+
+ /* Assert that everything is in range */
+ assert(set_layout->dynamic_offset_count <= *dynamic_offset_count);
+ assert(dynamic_offset_start + set_layout->dynamic_offset_count <=
+ ARRAY_SIZE(push->dynamic_offsets));
+
+ unsigned mask = set_layout->stage_dynamic_offsets[stage];
+ STATIC_ASSERT(MAX_DYNAMIC_BUFFERS <= sizeof(mask) * 8);
+ while (mask) {
+ int i = u_bit_scan(&mask);
+ if (push_offsets[i] != (*dynamic_offsets)[i]) {
+ push_offsets[i] = (*dynamic_offsets)[i];
+ dirty_stages |= mesa_to_vk_shader_stage(stage);
+ }
+ }
+ }
*dynamic_offsets += set_layout->dynamic_offset_count;
*dynamic_offset_count -= set_layout->dynamic_offset_count;
-
- if (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
- cmd_buffer->state.push_constants_dirty |=
- VK_SHADER_STAGE_COMPUTE_BIT;
- } else {
- cmd_buffer->state.push_constants_dirty |=
- VK_SHADER_STAGE_ALL_GRAPHICS;
- }
}
}
- if (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
- cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
- } else {
- assert(bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS);
- cmd_buffer->state.descriptors_dirty |=
- set_layout->shader_stages & VK_SHADER_STAGE_ALL_GRAPHICS;
- }
-
- /* Pipeline layout objects are required to live at least while any command
- * buffers that use them are in recording state. We need to grab a reference
- * to the pipeline layout being bound here so we can compute correct dynamic
- * offsets for VK_DESCRIPTOR_TYPE_*_DYNAMIC in dynamic_offset_for_binding()
- * when we record draw commands that come after this.
- */
- pipe_state->layout = layout;
+ cmd_buffer->state.descriptors_dirty |= dirty_stages;
+ cmd_buffer->state.push_constants_dirty |= dirty_stages;
}
void anv_CmdBindDescriptorSets(
return state;
}
-static uint32_t
-anv_push_constant_value(const struct anv_cmd_pipeline_state *state,
- const struct anv_push_constants *data, uint32_t param)
-{
- if (BRW_PARAM_IS_BUILTIN(param)) {
- switch (param) {
- case BRW_PARAM_BUILTIN_ZERO:
- return 0;
- case BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_X:
- return data->base_work_group_id[0];
- case BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_Y:
- return data->base_work_group_id[1];
- case BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_Z:
- return data->base_work_group_id[2];
- default:
- unreachable("Invalid param builtin");
- }
- } else if (ANV_PARAM_IS_PUSH(param)) {
- uint32_t offset = ANV_PARAM_PUSH_OFFSET(param);
- assert(offset % sizeof(uint32_t) == 0);
- if (offset < data->size)
- return *(uint32_t *)((uint8_t *)data + offset);
- else
- return 0;
- } else if (ANV_PARAM_IS_DYN_OFFSET(param)) {
- unsigned idx = ANV_PARAM_DYN_OFFSET_IDX(param);
- assert(idx < MAX_DYNAMIC_BUFFERS);
- return state->dynamic_offsets[idx];
- }
-
- assert(!"Invalid param");
- return 0;
-}
-
struct anv_state
anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
gl_shader_stage stage)
{
- struct anv_cmd_pipeline_state *pipeline_state = &cmd_buffer->state.gfx.base;
- struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline;
-
- /* If we don't have this stage, bail. */
- if (!anv_pipeline_has_stage(pipeline, stage))
- return (struct anv_state) { .offset = 0 };
-
struct anv_push_constants *data =
- cmd_buffer->state.push_constants[stage];
- const struct brw_stage_prog_data *prog_data =
- pipeline->shaders[stage]->prog_data;
-
- /* If we don't actually have any push constants, bail. */
- if (prog_data == NULL || prog_data->nr_params == 0 || data == NULL)
- return (struct anv_state) { .offset = 0 };
+ &cmd_buffer->state.push_constants[stage];
struct anv_state state =
anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
- prog_data->nr_params * sizeof(float),
+ sizeof(struct anv_push_constants),
32 /* bottom 5 bits MBZ */);
-
- /* Walk through the param array and fill the buffer with data */
- uint32_t *u32_map = state.map;
- for (unsigned i = 0; i < prog_data->nr_params; i++) {
- u32_map[i] = anv_push_constant_value(pipeline_state, data,
- prog_data->param[i]);
- }
+ memcpy(state.map, data, sizeof(struct anv_push_constants));
return state;
}
struct anv_state
anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer)
{
- struct anv_cmd_pipeline_state *pipeline_state = &cmd_buffer->state.compute.base;
struct anv_push_constants *data =
- cmd_buffer->state.push_constants[MESA_SHADER_COMPUTE];
- struct anv_pipeline *pipeline = cmd_buffer->state.compute.base.pipeline;
+ &cmd_buffer->state.push_constants[MESA_SHADER_COMPUTE];
+ struct anv_compute_pipeline *pipeline = cmd_buffer->state.compute.pipeline;
const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
- const struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
+ const struct anv_push_range *range = &pipeline->cs->bind_map.push_ranges[0];
- /* If we don't actually have any push constants, bail. */
- if (cs_prog_data->push.total.size == 0)
+ const struct anv_cs_parameters cs_params = anv_cs_parameters(pipeline);
+ const unsigned total_push_constants_size =
+ brw_cs_push_const_total_size(cs_prog_data, cs_params.threads);
+ if (total_push_constants_size == 0)
return (struct anv_state) { .offset = 0 };
const unsigned push_constant_alignment =
cmd_buffer->device->info.gen < 8 ? 32 : 64;
const unsigned aligned_total_push_constants_size =
- ALIGN(cs_prog_data->push.total.size, push_constant_alignment);
+ ALIGN(total_push_constants_size, push_constant_alignment);
struct anv_state state =
anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
aligned_total_push_constants_size,
push_constant_alignment);
- /* Walk through the param array and fill the buffer with data */
- uint32_t *u32_map = state.map;
+ void *dst = state.map;
+ const void *src = (char *)data + (range->start * 32);
if (cs_prog_data->push.cross_thread.size > 0) {
- for (unsigned i = 0;
- i < cs_prog_data->push.cross_thread.dwords;
- i++) {
- assert(prog_data->param[i] != BRW_PARAM_BUILTIN_SUBGROUP_ID);
- u32_map[i] = anv_push_constant_value(pipeline_state, data,
- prog_data->param[i]);
- }
+ memcpy(dst, src, cs_prog_data->push.cross_thread.size);
+ dst += cs_prog_data->push.cross_thread.size;
+ src += cs_prog_data->push.cross_thread.size;
}
if (cs_prog_data->push.per_thread.size > 0) {
- for (unsigned t = 0; t < cs_prog_data->threads; t++) {
- unsigned dst =
- 8 * (cs_prog_data->push.per_thread.regs * t +
- cs_prog_data->push.cross_thread.regs);
- unsigned src = cs_prog_data->push.cross_thread.dwords;
- for ( ; src < prog_data->nr_params; src++, dst++) {
- if (prog_data->param[src] == BRW_PARAM_BUILTIN_SUBGROUP_ID) {
- u32_map[dst] = t;
- } else {
- u32_map[dst] = anv_push_constant_value(pipeline_state, data,
- prog_data->param[src]);
- }
- }
+ for (unsigned t = 0; t < cs_params.threads; t++) {
+ memcpy(dst, src, cs_prog_data->push.per_thread.size);
+
+ uint32_t *subgroup_id = dst +
+ offsetof(struct anv_push_constants, cs.subgroup_id) -
+ (range->start * 32 + cs_prog_data->push.cross_thread.size);
+ *subgroup_id = t;
+
+ dst += cs_prog_data->push.per_thread.size;
}
}
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
anv_foreach_stage(stage, stageFlags) {
- VkResult result =
- anv_cmd_buffer_ensure_push_constant_field(cmd_buffer,
- stage, client_data);
- if (result != VK_SUCCESS)
- return;
-
- memcpy(cmd_buffer->state.push_constants[stage]->client_data + offset,
+ memcpy(cmd_buffer->state.push_constants[stage].client_data + offset,
pValues, size);
}
ANV_FROM_HANDLE(anv_device, device, _device);
struct anv_cmd_pool *pool;
- pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
+ pool = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*pool), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pool == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ vk_object_base_init(&device->vk, &pool->base, VK_OBJECT_TYPE_COMMAND_POOL);
+
if (pAllocator)
pool->alloc = *pAllocator;
else
- pool->alloc = device->alloc;
+ pool->alloc = device->vk.alloc;
list_inithead(&pool->cmd_buffers);
anv_cmd_buffer_destroy(cmd_buffer);
}
- vk_free2(&device->alloc, pAllocator, pool);
+ vk_object_base_finish(&pool->base);
+ vk_free2(&device->vk.alloc, pAllocator, pool);
}
VkResult anv_ResetCommandPool(
anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer *cmd_buffer)
{
const struct anv_subpass *subpass = cmd_buffer->state.subpass;
- const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
if (subpass->depth_stencil_attachment == NULL)
return NULL;
const struct anv_image_view *iview =
- fb->attachments[subpass->depth_stencil_attachment->attachment];
+ cmd_buffer->state.attachments[subpass->depth_stencil_attachment->attachment].image_view;
assert(iview->aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT |
VK_IMAGE_ASPECT_STENCIL_BIT));
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
for (uint32_t j = 0; j < write->descriptorCount; j++) {
- assert(write->pBufferInfo[j].buffer);
ANV_FROM_HANDLE(anv_buffer, buffer, write->pBufferInfo[j].buffer);
- assert(buffer);
anv_descriptor_set_write_buffer(cmd_buffer->device, set,
&cmd_buffer->surface_state_stream,
}
break;
- case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT: {
- const VkWriteDescriptorSetInlineUniformBlockEXT *inline_write =
- vk_find_struct_const(write->pNext,
- WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT);
- assert(inline_write->dataSize == write->descriptorCount);
- anv_descriptor_set_write_inline_uniform_data(cmd_buffer->device, set,
- write->dstBinding,
- inline_write->pData,
- write->dstArrayElement,
- inline_write->dataSize);
- break;
- }
-
default:
break;
}