projects
/
mesa.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
nir: Properly preserve metadata in more cases
[mesa.git]
/
src
/
intel
/
vulkan
/
anv_cmd_buffer.c
diff --git
a/src/intel/vulkan/anv_cmd_buffer.c
b/src/intel/vulkan/anv_cmd_buffer.c
index 03af604de4c9b3b8e06d58205e4e4f7a0355d0e4..1ca33f206aa82f41cbb877af7bf976f98d8f3362 100644
(file)
--- a/
src/intel/vulkan/anv_cmd_buffer.c
+++ b/
src/intel/vulkan/anv_cmd_buffer.c
@@
-203,9
+203,11
@@
static VkResult anv_create_cmd_buffer(
if (cmd_buffer == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
if (cmd_buffer == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ vk_object_base_init(&device->vk, &cmd_buffer->base,
+ VK_OBJECT_TYPE_COMMAND_BUFFER);
+
cmd_buffer->batch.status = VK_SUCCESS;
cmd_buffer->batch.status = VK_SUCCESS;
- cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
cmd_buffer->device = device;
cmd_buffer->pool = pool;
cmd_buffer->level = level;
cmd_buffer->device = device;
cmd_buffer->pool = pool;
cmd_buffer->level = level;
@@
-280,6
+282,7
@@
anv_cmd_buffer_destroy(struct anv_cmd_buffer *cmd_buffer)
anv_cmd_state_finish(cmd_buffer);
anv_cmd_state_finish(cmd_buffer);
+ vk_object_base_finish(&cmd_buffer->base);
vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
}
vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
}
@@
-303,6
+306,7
@@
VkResult
anv_cmd_buffer_reset(struct anv_cmd_buffer *cmd_buffer)
{
cmd_buffer->usage_flags = 0;
anv_cmd_buffer_reset(struct anv_cmd_buffer *cmd_buffer)
{
cmd_buffer->usage_flags = 0;
+ cmd_buffer->perf_query_pool = NULL;
anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer);
anv_cmd_state_reset(cmd_buffer);
anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer);
anv_cmd_state_reset(cmd_buffer);
@@
-834,9
+838,9
@@
anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer)
const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
const struct anv_push_range *range = &pipeline->cs->bind_map.push_ranges[0];
const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
const struct anv_push_range *range = &pipeline->cs->bind_map.push_ranges[0];
- const
uint32_t threads = anv_cs_thread
s(pipeline);
+ const
struct anv_cs_parameters cs_params = anv_cs_parameter
s(pipeline);
const unsigned total_push_constants_size =
const unsigned total_push_constants_size =
- brw_cs_push_const_total_size(cs_prog_data, threads);
+ brw_cs_push_const_total_size(cs_prog_data,
cs_params.
threads);
if (total_push_constants_size == 0)
return (struct anv_state) { .offset = 0 };
if (total_push_constants_size == 0)
return (struct anv_state) { .offset = 0 };
@@
-859,7
+863,7
@@
anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer)
}
if (cs_prog_data->push.per_thread.size > 0) {
}
if (cs_prog_data->push.per_thread.size > 0) {
- for (unsigned t = 0; t < threads; t++) {
+ for (unsigned t = 0; t <
cs_params.
threads; t++) {
memcpy(dst, src, cs_prog_data->push.per_thread.size);
uint32_t *subgroup_id = dst +
memcpy(dst, src, cs_prog_data->push.per_thread.size);
uint32_t *subgroup_id = dst +
@@
-906,6
+910,8
@@
VkResult anv_CreateCommandPool(
if (pool == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
if (pool == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ vk_object_base_init(&device->vk, &pool->base, VK_OBJECT_TYPE_COMMAND_POOL);
+
if (pAllocator)
pool->alloc = *pAllocator;
else
if (pAllocator)
pool->alloc = *pAllocator;
else
@@
-934,6
+940,7
@@
void anv_DestroyCommandPool(
anv_cmd_buffer_destroy(cmd_buffer);
}
anv_cmd_buffer_destroy(cmd_buffer);
}
+ vk_object_base_finish(&pool->base);
vk_free2(&device->vk.alloc, pAllocator, pool);
}
vk_free2(&device->vk.alloc, pAllocator, pool);
}