assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
assert(pCreateInfo->flags == 0);
- module = vk_alloc2(&device->alloc, pAllocator,
+ module = vk_alloc2(&device->vk.alloc, pAllocator,
sizeof(*module) + pCreateInfo->codeSize, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (module == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ vk_object_base_init(&device->vk, &module->base,
+ VK_OBJECT_TYPE_SHADER_MODULE);
module->size = pCreateInfo->codeSize;
memcpy(module->data, pCreateInfo->pCode, module->size);
if (!module)
return;
- vk_free2(&device->alloc, pAllocator, module);
+ vk_object_base_finish(&module->base);
+ vk_free2(&device->vk.alloc, pAllocator, module);
}
#define SPIR_V_MAGIC_NUMBER 0x07230203
struct nir_spirv_specialization *spec_entries = NULL;
if (spec_info && spec_info->mapEntryCount > 0) {
num_spec_entries = spec_info->mapEntryCount;
- spec_entries = malloc(num_spec_entries * sizeof(*spec_entries));
+ spec_entries = calloc(num_spec_entries, sizeof(*spec_entries));
for (uint32_t i = 0; i < num_spec_entries; i++) {
VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
const void *data = spec_info->pData + entry.offset;
assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
spec_entries[i].id = spec_info->pMapEntries[i].constantID;
- if (spec_info->dataSize == 8)
- spec_entries[i].data64 = *(const uint64_t *)data;
- else
- spec_entries[i].data32 = *(const uint32_t *)data;
+ switch (entry.size) {
+ case 8:
+ spec_entries[i].value.u64 = *(const uint64_t *)data;
+ break;
+ case 4:
+ spec_entries[i].value.u32 = *(const uint32_t *)data;
+ break;
+ case 2:
+ spec_entries[i].value.u16 = *(const uint16_t *)data;
+ break;
+ case 1:
+ spec_entries[i].value.u8 = *(const uint8_t *)data;
+ break;
+ default:
+ assert(!"Invalid spec constant size");
+ break;
+ }
}
}
NIR_PASS_V(nir, nir_split_per_member_structs);
NIR_PASS_V(nir, nir_remove_dead_variables,
- nir_var_shader_in | nir_var_shader_out | nir_var_system_value);
+ nir_var_shader_in | nir_var_shader_out | nir_var_system_value,
+ NULL);
NIR_PASS_V(nir, nir_propagate_invariant);
NIR_PASS_V(nir, nir_lower_io_to_temporaries,
return;
anv_reloc_list_finish(&pipeline->batch_relocs,
- pAllocator ? pAllocator : &device->alloc);
+ pAllocator ? pAllocator : &device->vk.alloc);
ralloc_free(pipeline->mem_ctx);
unreachable("invalid pipeline type");
}
- vk_free2(&device->alloc, pAllocator, pipeline);
+ vk_object_base_finish(&pipeline->base);
+ vk_free2(&device->vk.alloc, pAllocator, pipeline);
}
static const uint32_t vk_to_gen_primitive_type[] = {
nir_var_mem_shared, shared_type_info);
NIR_PASS_V(stage.nir, nir_lower_explicit_io,
nir_var_mem_shared, nir_address_format_32bit_offset);
+ NIR_PASS_V(stage.nir, brw_nir_lower_cs_intrinsics);
stage.num_stats = 1;
stage.code = brw_compile_cs(compiler, pipeline->base.device, mem_ctx,
return VK_SUCCESS;
}
+struct anv_cs_parameters
+anv_cs_parameters(const struct anv_compute_pipeline *pipeline)
+{
+ const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
+
+ struct anv_cs_parameters cs_params = {};
+
+ cs_params.group_size = cs_prog_data->local_size[0] *
+ cs_prog_data->local_size[1] *
+ cs_prog_data->local_size[2];
+ cs_params.simd_size =
+ brw_cs_simd_size_for_group_size(&pipeline->base.device->info,
+ cs_prog_data, cs_params.group_size);
+ cs_params.threads = DIV_ROUND_UP(cs_params.group_size, cs_params.simd_size);
+
+ return cs_params;
+}
+
/**
* Copy pipeline state not marked as dynamic.
* Dynamic state is pipeline state which hasn't been provided at pipeline
anv_pipeline_validate_create_info(pCreateInfo);
if (alloc == NULL)
- alloc = &device->alloc;
+ alloc = &device->vk.alloc;
+ vk_object_base_init(&device->vk, &pipeline->base.base,
+ VK_OBJECT_TYPE_PIPELINE);
pipeline->base.device = device;
pipeline->base.type = ANV_PIPELINE_GRAPHICS;
stat->value.u64 = exe->stats.instructions;
}
+ vk_outarray_append(&out, stat) {
+ WRITE_STR(stat->name, "SEND Count");
+ WRITE_STR(stat->description,
+ "Number of instructions in the final generated shader "
+ "executable which access external units such as the "
+ "constant cache or the sampler.");
+ stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ stat->value.u64 = exe->stats.sends;
+ }
+
vk_outarray_append(&out, stat) {
WRITE_STR(stat->name, "Loop Count");
WRITE_STR(stat->description,
"Number of bytes of workgroup shared memory used by this "
"compute shader including any padding.");
stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
- stat->value.u64 = prog_data->total_scratch;
+ stat->value.u64 = brw_cs_prog_data_const(prog_data)->slm_size;
}
}