nir_lower_divmod64 |
nir_lower_minmax64 |
nir_lower_iabs64,
+ .lower_doubles_options = nir_lower_drcp |
+ nir_lower_dsqrt |
+ nir_lower_drsq |
+ nir_lower_ddiv,
};
static const struct nir_shader_compiler_options nir_options_aco = {
nir_lower_imul_high64 |
nir_lower_imul_2x32_64 |
nir_lower_divmod64 |
- nir_lower_logic64 |
nir_lower_minmax64 |
nir_lower_iabs64,
+ .lower_doubles_options = nir_lower_drcp |
+ nir_lower_dsqrt |
+ nir_lower_drsq |
+ nir_lower_ddiv,
};
bool
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
assert(pCreateInfo->flags == 0);
- module = vk_alloc2(&device->alloc, pAllocator,
+ module = vk_alloc2(&device->vk.alloc, pAllocator,
sizeof(*module) + pCreateInfo->codeSize, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (module == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+ vk_object_base_init(&device->vk, &module->base,
+ VK_OBJECT_TYPE_SHADER_MODULE);
+
module->nir = NULL;
module->size = pCreateInfo->codeSize;
memcpy(module->data, pCreateInfo->pCode, module->size);
if (!module)
return;
- vk_free2(&device->alloc, pAllocator, module);
+ vk_object_base_finish(&module->base);
+ vk_free2(&device->vk.alloc, pAllocator, module);
}
void
NIR_PASS(progress, shader, nir_opt_copy_prop_vars);
NIR_PASS(progress, shader, nir_opt_dead_write_vars);
NIR_PASS(progress, shader, nir_remove_dead_variables,
- nir_var_function_temp | nir_var_shader_in | nir_var_shader_out);
+ nir_var_function_temp | nir_var_shader_in | nir_var_shader_out,
+ NULL);
NIR_PASS_V(shader, nir_lower_alu_to_scalar, NULL, NULL);
NIR_PASS_V(shader, nir_lower_phis_to_scalar);
{
nir_shader *nir;
const nir_shader_compiler_options *nir_options =
- device->physical_device->use_aco ? &nir_options_aco :
- &nir_options_llvm;
+ device->physical_device->use_llvm ? &nir_options_llvm :
+ &nir_options_aco;
if (module->nir) {
/* Some things such as our meta clear/blit code will give us a NIR
struct nir_spirv_specialization *spec_entries = NULL;
if (spec_info && spec_info->mapEntryCount > 0) {
num_spec_entries = spec_info->mapEntryCount;
- spec_entries = malloc(num_spec_entries * sizeof(*spec_entries));
+ spec_entries = calloc(num_spec_entries, sizeof(*spec_entries));
for (uint32_t i = 0; i < num_spec_entries; i++) {
VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
const void *data = spec_info->pData + entry.offset;
spec_entries[i].id = spec_info->pMapEntries[i].constantID;
switch (entry.size) {
case 8:
- spec_entries[i].data64 = *(const uint64_t *)data;
+ spec_entries[i].value.u64 = *(const uint64_t *)data;
break;
case 4:
- spec_entries[i].data32 = *(const uint32_t *)data;
+ spec_entries[i].value.u32 = *(const uint32_t *)data;
break;
case 2:
- spec_entries[i].data32 = *(const uint16_t *)data;
+ spec_entries[i].value.u16 = *(const uint16_t *)data;
break;
case 1:
- spec_entries[i].data32 = *(const uint8_t *)data;
+ spec_entries[i].value.u8 = *(const uint8_t *)data;
break;
default:
assert(!"Invalid spec constant size");
.caps = {
.amd_fragment_mask = true,
.amd_gcn_shader = true,
+ .amd_image_gather_bias_lod = true,
.amd_image_read_write_lod = true,
- .amd_shader_ballot = device->physical_device->use_shader_ballot,
+ .amd_shader_ballot = true,
.amd_shader_explicit_vertex_parameter = true,
.amd_trinary_minmax = true,
- .demote_to_helper_invocation = device->physical_device->use_aco,
+ .demote_to_helper_invocation = true,
.derivative_group = true,
.descriptor_array_dynamic_indexing = true,
.descriptor_array_non_uniform_indexing = true,
.device_group = true,
.draw_parameters = true,
.float_controls = true,
- .float16 = device->physical_device->rad_info.has_double_rate_fp16 && !device->physical_device->use_aco,
+ .float16 = device->physical_device->rad_info.has_packed_math_16bit,
+ .float32_atomic_add = true,
.float64 = true,
.geometry_streams = true,
.image_ms_array = true,
.image_read_without_format = true,
.image_write_without_format = true,
- .int8 = !device->physical_device->use_aco,
- .int16 = !device->physical_device->use_aco,
+ .int8 = true,
+ .int16 = true,
.int64 = true,
.int64_atomics = true,
+ .min_lod = true,
.multiview = true,
.physical_storage_buffer_address = true,
.post_depth_coverage = true,
.shader_clock = true,
.shader_viewport_index_layer = true,
.stencil_export = true,
- .storage_8bit = !device->physical_device->use_aco,
- .storage_16bit = !device->physical_device->use_aco,
+ .storage_8bit = true,
+ .storage_16bit = true,
.storage_image_ms = true,
.subgroup_arithmetic = true,
.subgroup_ballot = true,
NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
NIR_PASS_V(nir, nir_lower_returns);
NIR_PASS_V(nir, nir_inline_functions);
+ NIR_PASS_V(nir, nir_copy_prop);
NIR_PASS_V(nir, nir_opt_deref);
/* Pick off the single entrypoint that we want */
NIR_PASS_V(nir, nir_split_per_member_structs);
if (nir->info.stage == MESA_SHADER_FRAGMENT &&
- device->physical_device->use_aco)
+ !device->physical_device->use_llvm)
NIR_PASS_V(nir, nir_lower_io_to_vector, nir_var_shader_out);
if (nir->info.stage == MESA_SHADER_FRAGMENT)
NIR_PASS_V(nir, nir_lower_input_attachments, true);
NIR_PASS_V(nir, nir_remove_dead_variables,
- nir_var_shader_in | nir_var_shader_out | nir_var_system_value | nir_var_mem_shared);
+ nir_var_shader_in | nir_var_shader_out | nir_var_system_value | nir_var_mem_shared,
+ NULL);
NIR_PASS_V(nir, nir_propagate_invariant);
NIR_PASS_V(nir, radv_nir_lower_ycbcr_textures, layout);
if (device->instance->debug_flags & RADV_DEBUG_DISCARD_TO_DEMOTE)
NIR_PASS_V(nir, nir_lower_discard_to_demote);
+
+ nir_lower_doubles_options lower_doubles =
+ nir->options->lower_doubles_options;
+
+ if (device->physical_device->rad_info.chip_class == GFX6) {
+ /* GFX6 doesn't support v_floor_f64 and the precision
+ * of v_fract_f64 which is used to implement 64-bit
+ * floor is less than what Vulkan requires.
+ */
+ lower_doubles |= nir_lower_dfloor;
+ }
+
+ NIR_PASS_V(nir, nir_lower_doubles, NULL, lower_doubles);
}
/* Vulkan uses the separate-shader linking model */
nir_split_var_copies(nir);
nir_lower_global_vars_to_local(nir);
- nir_remove_dead_variables(nir, nir_var_function_temp);
+ nir_remove_dead_variables(nir, nir_var_function_temp, NULL);
bool gfx7minus = device->physical_device->rad_info.chip_class <= GFX7;
nir_lower_subgroups(nir, &(struct nir_lower_subgroups_options) {
.subgroup_size = subgroup_size,
.lower_vote_eq_to_ballot = 1,
.lower_quad_broadcast_dynamic = 1,
.lower_quad_broadcast_dynamic_to_const = gfx7minus,
+ .lower_shuffle_to_swizzle_amd = 1,
});
nir_lower_load_const_to_scalar(nir);
}
-void *
+static void *
radv_alloc_shader_memory(struct radv_device *device,
struct radv_shader_variant *shader)
{
}
offset = align_u64(s->bo_offset + s->code_size, 256);
}
- if (slab->size - offset >= shader->code_size) {
+ if (offset <= slab->size && slab->size - offset >= shader->code_size) {
shader->bo = slab->bo;
shader->bo_offset = offset;
list_addtail(&shader->slab_list, &slab->shaders);
mtx_unlock(&device->shader_slab_mutex);
struct radv_shader_slab *slab = calloc(1, sizeof(struct radv_shader_slab));
- slab->size = 256 * 1024;
+ slab->size = MAX2(256 * 1024, shader->code_size);
slab->bo = device->ws->buffer_create(device->ws, slab->size, 256,
RADEON_DOMAIN_VRAM,
RADEON_FLAG_NO_INTERPROCESS_SHARING |
(device->physical_device->rad_info.cpdma_prefetch_writes_memory ?
0 : RADEON_FLAG_READ_ONLY),
RADV_BO_PRIORITY_SHADER);
+ if (!slab->bo) {
+ free(slab);
+ return NULL;
+ }
+
slab->ptr = (char*)device->ws->buffer_map(slab->bo);
+ if (!slab->ptr) {
+ device->ws->buffer_destroy(slab->bo);
+ free(slab);
+ return NULL;
+ }
+
list_inithead(&slab->shaders);
mtx_lock(&device->shader_slab_mutex);
return NULL;
}
- if (!ac_rtld_read_config(&rtld_binary, &config)) {
+ if (!ac_rtld_read_config(&device->physical_device->rad_info,
+ &rtld_binary, &config)) {
ac_rtld_close(&rtld_binary);
free(variant);
return NULL;
radv_postprocess_config(device->physical_device, &config, &binary->info,
binary->stage, &variant->config);
- if (radv_device_use_secure_compile(device->instance)) {
+ void *dest_ptr = radv_alloc_shader_memory(device, variant);
+ if (!dest_ptr) {
if (binary->type == RADV_BINARY_TYPE_RTLD)
ac_rtld_close(&rtld_binary);
-
- return variant;
+ free(variant);
+ return NULL;
}
- void *dest_ptr = radv_alloc_shader_memory(device, variant);
-
if (binary->type == RADV_BINARY_TYPE_RTLD) {
struct radv_shader_binary_rtld* bin = (struct radv_shader_binary_rtld *)binary;
struct ac_rtld_upload_info info = {
options->address32_hi = device->physical_device->rad_info.address32_hi;
options->has_ls_vgpr_init_bug = device->physical_device->rad_info.has_ls_vgpr_init_bug;
options->use_ngg_streamout = device->physical_device->use_ngg_streamout;
+ options->enable_mrt_output_nan_fixup = device->instance->enable_mrt_output_nan_fixup;
struct radv_shader_args args = {};
args.options = options;
shader_count >= 2 ? shaders[shader_count - 2]->info.stage
: MESA_SHADER_VERTEX);
- if (!device->physical_device->use_aco ||
+ if (device->physical_device->use_llvm ||
options->dump_shader || options->record_ir)
ac_init_llvm_once();
- if (device->physical_device->use_aco) {
- aco_compile_shader(shader_count, shaders, &binary, &args);
- } else {
+ if (device->physical_device->use_llvm) {
llvm_compile_shader(device, shader_count, shaders, &binary, &args);
+ } else {
+ aco_compile_shader(shader_count, shaders, &binary, &args);
}
binary->info = *info;
if (key)
options.key = *key;
- options.explicit_scratch_args = device->physical_device->use_aco;
+ options.explicit_scratch_args = !device->physical_device->use_llvm;
options.robust_buffer_access = device->robust_buffer_access;
return shader_variant_compile(device, module, shaders, shader_count, shaders[shader_count - 1]->info.stage, info,
{
struct radv_nir_compiler_options options = {0};
- options.explicit_scratch_args = device->physical_device->use_aco;
+ options.explicit_scratch_args = !device->physical_device->use_llvm;
options.key.has_multiview_view_index = multiview;
return shader_variant_compile(device, NULL, &shader, 1, MESA_SHADER_VERTEX,