if (cmd_buffer == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ vk_object_base_init(&device->vk, &cmd_buffer->base,
+ VK_OBJECT_TYPE_COMMAND_BUFFER);
+
cmd_buffer->batch.status = VK_SUCCESS;
- cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
cmd_buffer->device = device;
cmd_buffer->pool = pool;
cmd_buffer->level = level;
anv_cmd_state_finish(cmd_buffer);
+ vk_object_base_finish(&cmd_buffer->base);
vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
}
anv_cmd_buffer_reset(struct anv_cmd_buffer *cmd_buffer)
{
cmd_buffer->usage_flags = 0;
+ cmd_buffer->perf_query_pool = NULL;
anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer);
anv_cmd_state_reset(cmd_buffer);
const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
const struct anv_push_range *range = &pipeline->cs->bind_map.push_ranges[0];
- const uint32_t threads = anv_cs_threads(pipeline);
+ const struct anv_cs_parameters cs_params = anv_cs_parameters(pipeline);
const unsigned total_push_constants_size =
- brw_cs_push_const_total_size(cs_prog_data, threads);
+ brw_cs_push_const_total_size(cs_prog_data, cs_params.threads);
if (total_push_constants_size == 0)
return (struct anv_state) { .offset = 0 };
}
if (cs_prog_data->push.per_thread.size > 0) {
- for (unsigned t = 0; t < threads; t++) {
+ for (unsigned t = 0; t < cs_params.threads; t++) {
memcpy(dst, src, cs_prog_data->push.per_thread.size);
uint32_t *subgroup_id = dst +
if (pool == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ vk_object_base_init(&device->vk, &pool->base, VK_OBJECT_TYPE_COMMAND_POOL);
+
if (pAllocator)
pool->alloc = *pAllocator;
else
anv_cmd_buffer_destroy(cmd_buffer);
}
+ vk_object_base_finish(&pool->base);
vk_free2(&device->vk.alloc, pAllocator, pool);
}