static VkResult
anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer *cmd_buffer,
- VkShaderStage stage, uint32_t size)
+ gl_shader_stage stage, uint32_t size)
{
struct anv_push_constants **ptr = &cmd_buffer->state.push_constants[stage];
if (*ptr == NULL) {
- *ptr = anv_device_alloc(cmd_buffer->device, size, 8,
- VK_SYSTEM_ALLOC_TYPE_INTERNAL);
+ *ptr = anv_alloc(&cmd_buffer->pool->alloc, size, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (*ptr == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- (*ptr)->size = size;
} else if ((*ptr)->size < size) {
- void *new_data = anv_device_alloc(cmd_buffer->device, size, 8,
- VK_SYSTEM_ALLOC_TYPE_INTERNAL);
- if (new_data == NULL)
+ *ptr = anv_realloc(&cmd_buffer->pool->alloc, *ptr, size, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (*ptr == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
-
- memcpy(new_data, *ptr, (*ptr)->size);
- anv_device_free(cmd_buffer->device, *ptr);
-
- *ptr = new_data;
- (*ptr)->size = size;
}
+ (*ptr)->size = size;
return VK_SUCCESS;
}
(offsetof(struct anv_push_constants, field) + \
sizeof(cmd_buffer->state.push_constants[0]->field)))
-VkResult anv_CreateCommandBuffer(
- VkDevice _device,
- const VkCommandBufferCreateInfo* pCreateInfo,
+static VkResult anv_create_cmd_buffer(
+ struct anv_device * device,
+ struct anv_cmd_pool * pool,
+ VkCommandBufferLevel level,
VkCommandBuffer* pCommandBuffer)
{
- ANV_FROM_HANDLE(anv_device, device, _device);
- ANV_FROM_HANDLE(anv_cmd_pool, pool, pCreateInfo->commandPool);
struct anv_cmd_buffer *cmd_buffer;
VkResult result;
- cmd_buffer = anv_device_alloc(device, sizeof(*cmd_buffer), 8,
- VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
+ cmd_buffer = anv_alloc(&pool->alloc, sizeof(*cmd_buffer), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (cmd_buffer == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
cmd_buffer->device = device;
+ cmd_buffer->pool = pool;
result = anv_cmd_buffer_init_batch_bo_chain(cmd_buffer);
if (result != VK_SUCCESS)
anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
&device->dynamic_state_block_pool);
- cmd_buffer->level = pCreateInfo->level;
+ cmd_buffer->level = level;
cmd_buffer->usage_flags = 0;
anv_cmd_state_init(&cmd_buffer->state);
return VK_SUCCESS;
- fail: anv_device_free(device, cmd_buffer);
+ fail:
+ anv_free(&cmd_buffer->pool->alloc, cmd_buffer);
return result;
}
-void anv_DestroyCommandBuffer(
+VkResult anv_AllocateCommandBuffers(
VkDevice _device,
- VkCommandBuffer _cmd_buffer)
+ const VkCommandBufferAllocateInfo* pAllocateInfo,
+ VkCommandBuffer* pCommandBuffers)
{
ANV_FROM_HANDLE(anv_device, device, _device);
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, _cmd_buffer);
+ ANV_FROM_HANDLE(anv_cmd_pool, pool, pAllocateInfo->commandPool);
+
+ VkResult result = VK_SUCCESS;
+ uint32_t i;
+
+ for (i = 0; i < pAllocateInfo->bufferCount; i++) {
+ result = anv_create_cmd_buffer(device, pool, pAllocateInfo->level,
+ &pCommandBuffers[i]);
+ if (result != VK_SUCCESS)
+ break;
+ }
+
+ if (result != VK_SUCCESS)
+ anv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
+ i, pCommandBuffers);
+
+ return result;
+}
+static void
+anv_cmd_buffer_destroy(struct anv_cmd_buffer *cmd_buffer)
+{
list_del(&cmd_buffer->pool_link);
anv_cmd_buffer_fini_batch_bo_chain(cmd_buffer);
anv_state_stream_finish(&cmd_buffer->surface_state_stream);
anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
- anv_device_free(device, cmd_buffer);
+
+ anv_free(&cmd_buffer->pool->alloc, cmd_buffer);
+}
+
+void anv_FreeCommandBuffers(
+ VkDevice device,
+ VkCommandPool commandPool,
+ uint32_t commandBufferCount,
+ const VkCommandBuffer* pCommandBuffers)
+{
+ for (uint32_t i = 0; i < commandBufferCount; i++) {
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
+
+ anv_cmd_buffer_destroy(cmd_buffer);
+ }
}
VkResult anv_ResetCommandBuffer(
}
if (set_layout->dynamic_offset_count > 0) {
- VkShaderStage s;
- for_each_bit(s, set_layout->shader_stages) {
+ anv_foreach_stage(s, set_layout->shader_stages) {
anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, s, dynamic);
struct anv_push_constants *push =
unsigned array_size = set_layout->binding[b].array_size;
for (unsigned j = 0; j < array_size; j++) {
+ uint32_t range = 0;
+ if (desc->buffer_view)
+ range = desc->buffer_view;
push->dynamic[d].offset = *(offsets++);
- push->dynamic[d].range = (desc++)->range;
+ push->dynamic[d].range = range;
+ desc++;
d++;
}
}
const uint32_t dword = cmd_buffer->device->info.gen < 8 ? 1 : 8;
- anv_reloc_list_add(&cmd_buffer->surface_relocs, cmd_buffer->device,
+ anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc,
state.offset + dword * 4, bo, offset);
}
-static void
-fill_descriptor_buffer_surface_state(struct anv_device *device, void *state,
- VkShaderStage stage, VkDescriptorType type,
- uint32_t offset, uint32_t range)
+const struct anv_format *
+anv_format_for_descriptor_type(VkDescriptorType type)
{
- VkFormat format;
- uint32_t stride;
-
switch (type) {
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
- if (anv_is_scalar_shader_stage(device->instance->physicalDevice.compiler,
- stage)) {
- stride = 4;
- } else {
- stride = 16;
- }
- format = VK_FORMAT_R32G32B32A32_SFLOAT;
- break;
+ return anv_format_for_vk_format(VK_FORMAT_R32G32B32A32_SFLOAT);
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
- stride = 1;
- format = VK_FORMAT_UNDEFINED;
- break;
+ return anv_format_for_vk_format(VK_FORMAT_UNDEFINED);
default:
unreachable("Invalid descriptor type");
}
-
- anv_fill_buffer_surface_state(device, state,
- anv_format_for_vk_format(format),
- offset, range, stride);
}
VkResult
anv_cmd_buffer_emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
- VkShaderStage stage, struct anv_state *bt_state)
+ gl_shader_stage stage,
+ struct anv_state *bt_state)
{
struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
struct anv_subpass *subpass = cmd_buffer->state.subpass;
struct anv_pipeline_layout *layout;
uint32_t color_count, bias, state_offset;
- if (stage == VK_SHADER_STAGE_COMPUTE)
- layout = cmd_buffer->state.compute_pipeline->layout;
- else
+ switch (stage) {
+ case MESA_SHADER_FRAGMENT:
layout = cmd_buffer->state.pipeline->layout;
-
- if (stage == VK_SHADER_STAGE_FRAGMENT) {
bias = MAX_RTS;
color_count = subpass->color_count;
- } else {
+ break;
+ case MESA_SHADER_COMPUTE:
+ layout = cmd_buffer->state.compute_pipeline->layout;
+ bias = 1;
+ color_count = 0;
+ break;
+ default:
+ layout = cmd_buffer->state.pipeline->layout;
bias = 0;
color_count = 0;
+ break;
}
/* This is a little awkward: layout can be NULL but we still have to
iview->bo, iview->offset);
}
+ if (stage == MESA_SHADER_COMPUTE &&
+ cmd_buffer->state.compute_pipeline->cs_prog_data.uses_num_work_groups) {
+ struct anv_bo *bo = cmd_buffer->state.num_workgroups_bo;
+ uint32_t bo_offset = cmd_buffer->state.num_workgroups_offset;
+
+ struct anv_state surface_state;
+ surface_state =
+ anv_cmd_buffer_alloc_surface_state(cmd_buffer);
+
+ const struct anv_format *format =
+ anv_format_for_descriptor_type(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
+ anv_fill_buffer_surface_state(cmd_buffer->device, surface_state.map,
+ format->surface_format, bo_offset, 12, 1);
+
+ if (!cmd_buffer->device->info.has_llc)
+ anv_state_clflush(surface_state);
+
+ bt_map[0] = surface_state.offset + state_offset;
+ add_surface_state_reloc(cmd_buffer, surface_state, bo, bo_offset);
+ }
+
if (layout == NULL)
- return VK_SUCCESS;
+ goto out;
+ if (layout->stage[stage].image_count > 0) {
+ VkResult result =
+ anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, images);
+ if (result != VK_SUCCESS)
+ return result;
+
+ cmd_buffer->state.push_constants_dirty |= 1 << stage;
+ }
+
+ uint32_t image = 0;
for (uint32_t s = 0; s < layout->stage[stage].surface_count; s++) {
struct anv_pipeline_binding *binding =
&layout->stage[stage].surface_to_descriptor[s];
/* Nothing for us to do here */
continue;
- case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
- case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
- case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
- case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
- bo = desc->buffer->bo;
- bo_offset = desc->buffer->offset + desc->offset;
-
- surface_state =
- anv_cmd_buffer_alloc_surface_state(cmd_buffer);
-
- fill_descriptor_buffer_surface_state(cmd_buffer->device,
- surface_state.map,
- stage, desc->type,
- bo_offset, desc->range);
- break;
- }
-
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
bo_offset = desc->image_view->offset;
break;
- case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+ case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
+ surface_state = desc->image_view->storage_surface_state;
+ bo = desc->image_view->bo;
+ bo_offset = desc->image_view->offset;
+
+ struct brw_image_param *image_param =
+ &cmd_buffer->state.push_constants[stage]->images[image++];
+
+ anv_image_view_fill_image_param(cmd_buffer->device, desc->image_view,
+ image_param);
+ image_param->surface_idx = bias + s;
+ break;
+ }
+
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+ surface_state = desc->buffer_view->surface_state;
+ bo = desc->buffer_view->bo;
+ bo_offset = desc->buffer_view->offset;
+ break;
+
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
- assert(!"Unsupported descriptor type");
+ surface_state = desc->buffer_view->storage_surface_state;
+ bo = desc->buffer_view->bo;
+ bo_offset = desc->buffer_view->offset;
+
+ struct brw_image_param *image_param =
+ &cmd_buffer->state.push_constants[stage]->images[image++];
+
+ anv_buffer_view_fill_image_param(cmd_buffer->device, desc->buffer_view,
+ image_param);
+ image_param->surface_idx = bias + s;
break;
default:
bt_map[bias + s] = surface_state.offset + state_offset;
add_surface_state_reloc(cmd_buffer, surface_state, bo, bo_offset);
}
+ assert(image == layout->stage[stage].image_count);
+
+ out:
+ if (!cmd_buffer->device->info.has_llc)
+ anv_state_clflush(*bt_state);
return VK_SUCCESS;
}
VkResult
anv_cmd_buffer_emit_samplers(struct anv_cmd_buffer *cmd_buffer,
- VkShaderStage stage, struct anv_state *state)
+ gl_shader_stage stage, struct anv_state *state)
{
struct anv_pipeline_layout *layout;
uint32_t sampler_count;
- if (stage == VK_SHADER_STAGE_COMPUTE)
+ if (stage == MESA_SHADER_COMPUTE)
layout = cmd_buffer->state.compute_pipeline->layout;
else
layout = cmd_buffer->state.pipeline->layout;
sampler->state, sizeof(sampler->state));
}
+ if (!cmd_buffer->device->info.has_llc)
+ anv_state_clflush(*state);
+
return VK_SUCCESS;
}
struct anv_state
anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
- uint32_t *a, uint32_t dwords, uint32_t alignment)
+ const void *data, uint32_t size, uint32_t alignment)
{
struct anv_state state;
- state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
- dwords * 4, alignment);
- memcpy(state.map, a, dwords * 4);
+ state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, alignment);
+ memcpy(state.map, data, size);
+
+ if (!cmd_buffer->device->info.has_llc)
+ anv_state_clflush(state);
- VG(VALGRIND_CHECK_MEM_IS_DEFINED(state.map, dwords * 4));
+ VG(VALGRIND_CHECK_MEM_IS_DEFINED(state.map, size));
return state;
}
for (uint32_t i = 0; i < dwords; i++)
p[i] = a[i] | b[i];
+ if (!cmd_buffer->device->info.has_llc)
+ anv_state_clflush(state);
+
VG(VALGRIND_CHECK_MEM_IS_DEFINED(p, dwords * 4));
return state;
}
}
-void anv_CmdSetEvent(
- VkCommandBuffer commandBuffer,
- VkEvent event,
- VkPipelineStageFlags stageMask)
-{
- stub();
-}
-
-void anv_CmdResetEvent(
- VkCommandBuffer commandBuffer,
- VkEvent event,
- VkPipelineStageFlags stageMask)
-{
- stub();
-}
-
-void anv_CmdWaitEvents(
- VkCommandBuffer commandBuffer,
- uint32_t eventCount,
- const VkEvent* pEvents,
- VkPipelineStageFlags srcStageMask,
- VkPipelineStageFlags destStageMask,
- uint32_t memBarrierCount,
- const void* const* ppMemBarriers)
-{
- stub();
-}
-
struct anv_state
anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
- VkShaderStage stage)
+ gl_shader_stage stage)
{
struct anv_push_constants *data =
cmd_buffer->state.push_constants[stage];
u32_map[i] = *(uint32_t *)((uint8_t *)data + offset);
}
+ if (!cmd_buffer->device->info.has_llc)
+ anv_state_clflush(state);
+
+ return state;
+}
+
+struct anv_state
+anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer)
+{
+ struct anv_push_constants *data =
+ cmd_buffer->state.push_constants[MESA_SHADER_COMPUTE];
+ struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
+ const struct brw_cs_prog_data *cs_prog_data = &pipeline->cs_prog_data;
+ const struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
+
+ const unsigned local_id_dwords = cs_prog_data->local_invocation_id_regs * 8;
+ const unsigned push_constant_data_size =
+ (local_id_dwords + prog_data->nr_params) * 4;
+ const unsigned reg_aligned_constant_size = ALIGN(push_constant_data_size, 32);
+ const unsigned param_aligned_count =
+ reg_aligned_constant_size / sizeof(uint32_t);
+
+ /* If we don't actually have any push constants, bail. */
+ if (reg_aligned_constant_size == 0)
+ return (struct anv_state) { .offset = 0 };
+
+ const unsigned threads = pipeline->cs_thread_width_max;
+ const unsigned total_push_constants_size =
+ reg_aligned_constant_size * threads;
+ const unsigned push_constant_alignment =
+ cmd_buffer->device->info.gen < 8 ? 32 : 64;
+ const unsigned aligned_total_push_constants_size =
+ ALIGN(total_push_constants_size, push_constant_alignment);
+ struct anv_state state =
+ anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
+ aligned_total_push_constants_size,
+ push_constant_alignment);
+
+ /* Walk through the param array and fill the buffer with data */
+ uint32_t *u32_map = state.map;
+
+ brw_cs_fill_local_id_payload(cs_prog_data, u32_map, threads,
+ reg_aligned_constant_size);
+
+ /* Setup uniform data for the first thread */
+ for (unsigned i = 0; i < prog_data->nr_params; i++) {
+ uint32_t offset = (uintptr_t)prog_data->param[i];
+ u32_map[local_id_dwords + i] = *(uint32_t *)((uint8_t *)data + offset);
+ }
+
+ /* Copy uniform data from the first thread to every other thread */
+ const size_t uniform_data_size = prog_data->nr_params * sizeof(uint32_t);
+ for (unsigned t = 1; t < threads; t++) {
+ memcpy(&u32_map[t * param_aligned_count + local_id_dwords],
+ &u32_map[local_id_dwords],
+ uniform_data_size);
+ }
+
+ if (!cmd_buffer->device->info.has_llc)
+ anv_state_clflush(state);
+
return state;
}
const void* pValues)
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
- VkShaderStage stage;
- for_each_bit(stage, stageFlags) {
+ anv_foreach_stage(stage, stageFlags) {
anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, client_data);
memcpy(cmd_buffer->state.push_constants[stage]->client_data + offset,
VkResult anv_CreateCommandPool(
VkDevice _device,
const VkCommandPoolCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
VkCommandPool* pCmdPool)
{
ANV_FROM_HANDLE(anv_device, device, _device);
struct anv_cmd_pool *pool;
- pool = anv_device_alloc(device, sizeof(*pool), 8,
- VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
+ pool = anv_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pool == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ if (pAllocator)
+ pool->alloc = *pAllocator;
+ else
+ pool->alloc = device->alloc;
+
list_inithead(&pool->cmd_buffers);
*pCmdPool = anv_cmd_pool_to_handle(pool);
void anv_DestroyCommandPool(
VkDevice _device,
- VkCommandPool commandPool)
+ VkCommandPool commandPool,
+ const VkAllocationCallbacks* pAllocator)
{
ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
anv_ResetCommandPool(_device, commandPool, 0);
- anv_device_free(device, pool);
+ anv_free2(&device->alloc, pAllocator, pool);
}
VkResult anv_ResetCommandPool(
list_for_each_entry_safe(struct anv_cmd_buffer, cmd_buffer,
&pool->cmd_buffers, pool_link) {
- anv_DestroyCommandBuffer(device, anv_cmd_buffer_to_handle(cmd_buffer));
+ anv_cmd_buffer_destroy(cmd_buffer);
}
return VK_SUCCESS;