if (cmd_buffer == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ vk_object_base_init(&device->vk, &cmd_buffer->base,
+ VK_OBJECT_TYPE_COMMAND_BUFFER);
+
cmd_buffer->batch.status = VK_SUCCESS;
- cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
cmd_buffer->device = device;
cmd_buffer->pool = pool;
cmd_buffer->level = level;
anv_cmd_state_finish(cmd_buffer);
+ vk_object_base_finish(&cmd_buffer->base);
vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
}
anv_cmd_buffer_reset(struct anv_cmd_buffer *cmd_buffer)
{
cmd_buffer->usage_flags = 0;
+ cmd_buffer->perf_query_pool = NULL;
anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer);
anv_cmd_state_reset(cmd_buffer);
case 11: \
gen11_##func(__VA_ARGS__); \
break; \
+ case 12: \
+ gen12_##func(__VA_ARGS__); \
+ break; \
default: \
assert(!"Unknown hardware generation"); \
}
cmd_buffer);
}
+static bool
+mem_update(void *dst, const void *src, size_t size)
+{
+ if (memcmp(dst, src, size) == 0)
+ return false;
+
+ memcpy(dst, src, size);
+ return true;
+}
+
+static void
+set_dirty_for_bind_map(struct anv_cmd_buffer *cmd_buffer,
+ gl_shader_stage stage,
+ const struct anv_pipeline_bind_map *map)
+{
+ if (mem_update(cmd_buffer->state.surface_sha1s[stage],
+ map->surface_sha1, sizeof(map->surface_sha1)))
+ cmd_buffer->state.descriptors_dirty |= mesa_to_vk_shader_stage(stage);
+
+ if (mem_update(cmd_buffer->state.sampler_sha1s[stage],
+ map->sampler_sha1, sizeof(map->sampler_sha1)))
+ cmd_buffer->state.descriptors_dirty |= mesa_to_vk_shader_stage(stage);
+
+ if (mem_update(cmd_buffer->state.push_sha1s[stage],
+ map->push_sha1, sizeof(map->push_sha1)))
+ cmd_buffer->state.push_constants_dirty |= mesa_to_vk_shader_stage(stage);
+}
+
void anv_CmdBindPipeline(
VkCommandBuffer commandBuffer,
VkPipelineBindPoint pipelineBindPoint,
ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
switch (pipelineBindPoint) {
- case VK_PIPELINE_BIND_POINT_COMPUTE:
- cmd_buffer->state.compute.base.pipeline = pipeline;
+ case VK_PIPELINE_BIND_POINT_COMPUTE: {
+ struct anv_compute_pipeline *compute_pipeline =
+ anv_pipeline_to_compute(pipeline);
+ if (cmd_buffer->state.compute.pipeline == compute_pipeline)
+ return;
+
+ cmd_buffer->state.compute.pipeline = compute_pipeline;
cmd_buffer->state.compute.pipeline_dirty = true;
- cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
- cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
+ set_dirty_for_bind_map(cmd_buffer, MESA_SHADER_COMPUTE,
+ &compute_pipeline->cs->bind_map);
break;
+ }
- case VK_PIPELINE_BIND_POINT_GRAPHICS:
- cmd_buffer->state.gfx.base.pipeline = pipeline;
- cmd_buffer->state.gfx.vb_dirty |= pipeline->vb_used;
+ case VK_PIPELINE_BIND_POINT_GRAPHICS: {
+ struct anv_graphics_pipeline *gfx_pipeline =
+ anv_pipeline_to_graphics(pipeline);
+ if (cmd_buffer->state.gfx.pipeline == gfx_pipeline)
+ return;
+
+ cmd_buffer->state.gfx.pipeline = gfx_pipeline;
+ cmd_buffer->state.gfx.vb_dirty |= gfx_pipeline->vb_used;
cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_PIPELINE;
- cmd_buffer->state.push_constants_dirty |= pipeline->active_stages;
- cmd_buffer->state.descriptors_dirty |= pipeline->active_stages;
+
+ anv_foreach_stage(stage, gfx_pipeline->active_stages) {
+ set_dirty_for_bind_map(cmd_buffer, stage,
+ &gfx_pipeline->shaders[stage]->bind_map);
+ }
/* Apply the dynamic state from the pipeline */
cmd_buffer->state.gfx.dirty |=
anv_dynamic_state_copy(&cmd_buffer->state.gfx.dynamic,
- &pipeline->dynamic_state,
- pipeline->dynamic_state_mask);
+ &gfx_pipeline->dynamic_state,
+ gfx_pipeline->dynamic_state_mask);
break;
+ }
default:
assert(!"invalid bind point");
struct anv_descriptor_set_layout *set_layout =
layout->set[set_index].layout;
+ VkShaderStageFlags stages = set_layout->shader_stages;
struct anv_cmd_pipeline_state *pipe_state;
- if (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
- pipe_state = &cmd_buffer->state.compute.base;
- } else {
- assert(bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS);
+
+ switch (bind_point) {
+ case VK_PIPELINE_BIND_POINT_GRAPHICS:
+ stages &= VK_SHADER_STAGE_ALL_GRAPHICS;
pipe_state = &cmd_buffer->state.gfx.base;
+ break;
+
+ case VK_PIPELINE_BIND_POINT_COMPUTE:
+ stages &= VK_SHADER_STAGE_COMPUTE_BIT;
+ pipe_state = &cmd_buffer->state.compute.base;
+ break;
+
+ default:
+ unreachable("invalid bind point");
+ }
+
+ VkShaderStageFlags dirty_stages = 0;
+ if (pipe_state->descriptors[set_index] != set) {
+ pipe_state->descriptors[set_index] = set;
+ dirty_stages |= stages;
}
- pipe_state->descriptors[set_index] = set;
+
+ /* If it's a push descriptor set, we have to flag things as dirty
+ * regardless of whether or not the CPU-side data structure changed as we
+ * may have edited in-place.
+ */
+ if (set->pool == NULL)
+ dirty_stages |= stages;
if (dynamic_offsets) {
if (set_layout->dynamic_offset_count > 0) {
uint32_t dynamic_offset_start =
layout->set[set_index].dynamic_offset_start;
- anv_foreach_stage(stage, set_layout->shader_stages) {
+ anv_foreach_stage(stage, stages) {
struct anv_push_constants *push =
&cmd_buffer->state.push_constants[stage];
+ uint32_t *push_offsets =
+ &push->dynamic_offsets[dynamic_offset_start];
/* Assert that everything is in range */
assert(set_layout->dynamic_offset_count <= *dynamic_offset_count);
assert(dynamic_offset_start + set_layout->dynamic_offset_count <=
ARRAY_SIZE(push->dynamic_offsets));
- typed_memcpy(&push->dynamic_offsets[dynamic_offset_start],
- *dynamic_offsets, set_layout->dynamic_offset_count);
+ unsigned mask = set_layout->stage_dynamic_offsets[stage];
+ STATIC_ASSERT(MAX_DYNAMIC_BUFFERS <= sizeof(mask) * 8);
+ while (mask) {
+ int i = u_bit_scan(&mask);
+ if (push_offsets[i] != (*dynamic_offsets)[i]) {
+ push_offsets[i] = (*dynamic_offsets)[i];
+ dirty_stages |= mesa_to_vk_shader_stage(stage);
+ }
+ }
}
*dynamic_offsets += set_layout->dynamic_offset_count;
*dynamic_offset_count -= set_layout->dynamic_offset_count;
-
- if (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
- cmd_buffer->state.push_constants_dirty |=
- VK_SHADER_STAGE_COMPUTE_BIT;
- } else {
- cmd_buffer->state.push_constants_dirty |=
- VK_SHADER_STAGE_ALL_GRAPHICS;
- }
}
}
- if (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
- cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
- } else {
- assert(bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS);
- cmd_buffer->state.descriptors_dirty |=
- set_layout->shader_stages & VK_SHADER_STAGE_ALL_GRAPHICS;
- }
+ cmd_buffer->state.descriptors_dirty |= dirty_stages;
+ cmd_buffer->state.push_constants_dirty |= dirty_stages;
}
void anv_CmdBindDescriptorSets(
return state;
}
-static uint32_t
-anv_push_constant_value(const struct anv_cmd_pipeline_state *state,
- const struct anv_push_constants *data, uint32_t param)
-{
- if (BRW_PARAM_IS_BUILTIN(param)) {
- switch (param) {
- case BRW_PARAM_BUILTIN_ZERO:
- return 0;
- case BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_X:
- return data->cs.base_work_group_id[0];
- case BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_Y:
- return data->cs.base_work_group_id[1];
- case BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_Z:
- return data->cs.base_work_group_id[2];
- default:
- unreachable("Invalid param builtin");
- }
- } else if (ANV_PARAM_IS_PUSH(param)) {
- uint32_t offset = ANV_PARAM_PUSH_OFFSET(param);
- assert(offset % sizeof(uint32_t) == 0);
- if (offset < sizeof(data->client_data))
- return *(uint32_t *)((uint8_t *)data + offset);
- else
- return 0;
- } else if (ANV_PARAM_IS_DYN_OFFSET(param)) {
- unsigned idx = ANV_PARAM_DYN_OFFSET_IDX(param);
- assert(idx < MAX_DYNAMIC_BUFFERS);
- return data->dynamic_offsets[idx];
- }
-
- assert(!"Invalid param");
- return 0;
-}
-
struct anv_state
anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
gl_shader_stage stage)
{
- struct anv_cmd_pipeline_state *pipeline_state = &cmd_buffer->state.gfx.base;
- struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline;
-
- /* If we don't have this stage, bail. */
- if (!anv_pipeline_has_stage(pipeline, stage))
- return (struct anv_state) { .offset = 0 };
-
struct anv_push_constants *data =
&cmd_buffer->state.push_constants[stage];
- const struct brw_stage_prog_data *prog_data =
- pipeline->shaders[stage]->prog_data;
-
- /* If we don't actually have any push constants, bail. */
- if (prog_data == NULL || prog_data->nr_params == 0)
- return (struct anv_state) { .offset = 0 };
struct anv_state state =
anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
- prog_data->nr_params * sizeof(float),
+ sizeof(struct anv_push_constants),
32 /* bottom 5 bits MBZ */);
-
- /* Walk through the param array and fill the buffer with data */
- uint32_t *u32_map = state.map;
- for (unsigned i = 0; i < prog_data->nr_params; i++) {
- u32_map[i] = anv_push_constant_value(pipeline_state, data,
- prog_data->param[i]);
- }
+ memcpy(state.map, data, sizeof(struct anv_push_constants));
return state;
}
struct anv_state
anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer)
{
- struct anv_cmd_pipeline_state *pipeline_state = &cmd_buffer->state.compute.base;
struct anv_push_constants *data =
&cmd_buffer->state.push_constants[MESA_SHADER_COMPUTE];
- struct anv_pipeline *pipeline = cmd_buffer->state.compute.base.pipeline;
+ struct anv_compute_pipeline *pipeline = cmd_buffer->state.compute.pipeline;
const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
- const struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
+ const struct anv_push_range *range = &pipeline->cs->bind_map.push_ranges[0];
- /* If we don't actually have any push constants, bail. */
- if (cs_prog_data->push.total.size == 0)
+ const uint32_t threads = anv_cs_threads(pipeline);
+ const unsigned total_push_constants_size =
+ brw_cs_push_const_total_size(cs_prog_data, threads);
+ if (total_push_constants_size == 0)
return (struct anv_state) { .offset = 0 };
const unsigned push_constant_alignment =
cmd_buffer->device->info.gen < 8 ? 32 : 64;
const unsigned aligned_total_push_constants_size =
- ALIGN(cs_prog_data->push.total.size, push_constant_alignment);
+ ALIGN(total_push_constants_size, push_constant_alignment);
struct anv_state state =
anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
aligned_total_push_constants_size,
push_constant_alignment);
- /* Walk through the param array and fill the buffer with data */
- uint32_t *u32_map = state.map;
+ void *dst = state.map;
+ const void *src = (char *)data + (range->start * 32);
if (cs_prog_data->push.cross_thread.size > 0) {
- for (unsigned i = 0;
- i < cs_prog_data->push.cross_thread.dwords;
- i++) {
- assert(prog_data->param[i] != BRW_PARAM_BUILTIN_SUBGROUP_ID);
- u32_map[i] = anv_push_constant_value(pipeline_state, data,
- prog_data->param[i]);
- }
+ memcpy(dst, src, cs_prog_data->push.cross_thread.size);
+ dst += cs_prog_data->push.cross_thread.size;
+ src += cs_prog_data->push.cross_thread.size;
}
if (cs_prog_data->push.per_thread.size > 0) {
- for (unsigned t = 0; t < cs_prog_data->threads; t++) {
- unsigned dst =
- 8 * (cs_prog_data->push.per_thread.regs * t +
- cs_prog_data->push.cross_thread.regs);
- unsigned src = cs_prog_data->push.cross_thread.dwords;
- for ( ; src < prog_data->nr_params; src++, dst++) {
- if (prog_data->param[src] == BRW_PARAM_BUILTIN_SUBGROUP_ID) {
- u32_map[dst] = t;
- } else {
- u32_map[dst] = anv_push_constant_value(pipeline_state, data,
- prog_data->param[src]);
- }
- }
+ for (unsigned t = 0; t < threads; t++) {
+ memcpy(dst, src, cs_prog_data->push.per_thread.size);
+
+ uint32_t *subgroup_id = dst +
+ offsetof(struct anv_push_constants, cs.subgroup_id) -
+ (range->start * 32 + cs_prog_data->push.cross_thread.size);
+ *subgroup_id = t;
+
+ dst += cs_prog_data->push.per_thread.size;
}
}
ANV_FROM_HANDLE(anv_device, device, _device);
struct anv_cmd_pool *pool;
- pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
+ pool = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*pool), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pool == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ vk_object_base_init(&device->vk, &pool->base, VK_OBJECT_TYPE_COMMAND_POOL);
+
if (pAllocator)
pool->alloc = *pAllocator;
else
- pool->alloc = device->alloc;
+ pool->alloc = device->vk.alloc;
list_inithead(&pool->cmd_buffers);
anv_cmd_buffer_destroy(cmd_buffer);
}
- vk_free2(&device->alloc, pAllocator, pool);
+ vk_object_base_finish(&pool->base);
+ vk_free2(&device->vk.alloc, pAllocator, pool);
}
VkResult anv_ResetCommandPool(
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
for (uint32_t j = 0; j < write->descriptorCount; j++) {
- assert(write->pBufferInfo[j].buffer);
ANV_FROM_HANDLE(anv_buffer, buffer, write->pBufferInfo[j].buffer);
- assert(buffer);
anv_descriptor_set_write_buffer(cmd_buffer->device, set,
&cmd_buffer->surface_state_stream,