X-Git-Url: https://git.libre-soc.org/?p=mesa.git;a=blobdiff_plain;f=src%2Famd%2Fvulkan%2Fradv_device.c;h=31f486d3d75615531c9fc29a595f14ad2c757561;hp=cf41a03b254841a9782089546d8afa0ab1c5f11e;hb=cacb388b3b720b63d17e08aabf2bd86d116f4506;hpb=5bc18b79a4625ae39f1d44e8139017834a53736c diff --git a/src/amd/vulkan/radv_device.c b/src/amd/vulkan/radv_device.c index cf41a03b254..31f486d3d75 100644 --- a/src/amd/vulkan/radv_device.c +++ b/src/amd/vulkan/radv_device.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -63,7 +64,7 @@ #include "util/timespec.h" #include "util/u_atomic.h" #include "compiler/glsl_types.h" -#include "util/xmlpool.h" +#include "util/driconf.h" static struct radv_timeline_point * radv_timeline_find_point_at_least_locked(struct radv_device *device, @@ -83,6 +84,25 @@ static void radv_destroy_semaphore_part(struct radv_device *device, struct radv_semaphore_part *part); +static VkResult +radv_create_pthread_cond(pthread_cond_t *cond); + +uint64_t radv_get_current_time(void) +{ + struct timespec tv; + clock_gettime(CLOCK_MONOTONIC, &tv); + return tv.tv_nsec + tv.tv_sec*1000000000ull; +} + +static uint64_t radv_get_absolute_timeout(uint64_t timeout) +{ + uint64_t current_time = radv_get_current_time(); + + timeout = MIN2(UINT64_MAX - current_time, timeout); + + return current_time + timeout; +} + static int radv_device_get_cache_uuid(enum radeon_family family, void *uuid) { @@ -162,24 +182,13 @@ radv_physical_device_init_mem_types(struct radv_physical_device *device) unsigned type_count = 0; - if (device->rad_info.has_dedicated_vram) { - if (vram_index >= 0) { - device->memory_domains[type_count] = RADEON_DOMAIN_VRAM; - device->memory_flags[type_count] = RADEON_FLAG_NO_CPU_ACCESS; - device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) { - .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, - .heapIndex = vram_index, - }; - } - } else { - if (visible_vram_index >= 0) { - device->memory_domains[type_count] = RADEON_DOMAIN_VRAM; - device->memory_flags[type_count] = RADEON_FLAG_NO_CPU_ACCESS; - device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) { - .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, - .heapIndex = visible_vram_index, - }; - } + if (vram_index >= 0 || visible_vram_index >= 0) { + device->memory_domains[type_count] = RADEON_DOMAIN_VRAM; + device->memory_flags[type_count] = RADEON_FLAG_NO_CPU_ACCESS; + device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) { + .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, + .heapIndex = vram_index >= 0 ? vram_index : visible_vram_index, + }; } if (gart_index >= 0) { @@ -241,7 +250,7 @@ radv_physical_device_init_mem_types(struct radv_physical_device *device) static const char * radv_get_compiler_string(struct radv_physical_device *pdevice) { - if (pdevice->use_aco) { + if (!pdevice->use_llvm) { /* Some games like SotTR apply shader workarounds if the LLVM * version is too old or if the LLVM version string is * missing. This gives 2-5% performance with SotTR and ACO. @@ -349,7 +358,7 @@ radv_physical_device_try_create(struct radv_instance *instance, device->local_fd = fd; device->ws->query_info(device->ws, &device->rad_info); - device->use_aco = instance->perftest_flags & RADV_PERFTEST_ACO; + device->use_llvm = instance->debug_flags & RADV_DEBUG_LLVM; snprintf(device->name, sizeof(device->name), "AMD RADV %s (%s)", @@ -362,7 +371,7 @@ radv_physical_device_try_create(struct radv_instance *instance, } /* These flags affect shader compilation. */ - uint64_t shader_env_flags = (device->use_aco ? 0x2 : 0); + uint64_t shader_env_flags = (device->use_llvm ? 0 : 0x2); /* The gpu id is already embedded in the uuid so we just pass "radv" * when creating the cache. @@ -383,15 +392,12 @@ radv_physical_device_try_create(struct radv_instance *instance, device->dcc_msaa_allowed = (device->instance->perftest_flags & RADV_PERFTEST_DCC_MSAA); - device->use_shader_ballot = (device->use_aco && device->rad_info.chip_class >= GFX8) || - (device->instance->perftest_flags & RADV_PERFTEST_SHADER_BALLOT); - device->use_ngg = device->rad_info.chip_class >= GFX10 && device->rad_info.family != CHIP_NAVI14 && !(device->instance->debug_flags & RADV_DEBUG_NO_NGG); /* TODO: Implement NGG GS with ACO. */ - device->use_ngg_gs = device->use_ngg && !device->use_aco; + device->use_ngg_gs = device->use_ngg && device->use_llvm; device->use_ngg_streamout = false; /* Determine the number of threads per wave for all stages. */ @@ -443,7 +449,8 @@ fail_wsi: fail_alloc: vk_free(&instance->alloc, device); fail_fd: - close(fd); + if (fd != -1) + close(fd); if (master_fd != -1) close(master_fd); return result; @@ -511,12 +518,12 @@ static const struct debug_control radv_debug_options[] = { {"checkir", RADV_DEBUG_CHECKIR}, {"nothreadllvm", RADV_DEBUG_NOTHREADLLVM}, {"nobinning", RADV_DEBUG_NOBINNING}, - {"noloadstoreopt", RADV_DEBUG_NO_LOAD_STORE_OPT}, {"nongg", RADV_DEBUG_NO_NGG}, - {"noshaderballot", RADV_DEBUG_NO_SHADER_BALLOT}, {"allentrypoints", RADV_DEBUG_ALL_ENTRYPOINTS}, {"metashaders", RADV_DEBUG_DUMP_META_SHADERS}, {"nomemorycache", RADV_DEBUG_NO_MEMORY_CACHE}, + {"llvm", RADV_DEBUG_LLVM}, + {"forcecompress", RADV_DEBUG_FORCE_COMPRESS}, {NULL, 0} }; @@ -531,13 +538,11 @@ static const struct debug_control radv_perftest_options[] = { {"localbos", RADV_PERFTEST_LOCAL_BOS}, {"dccmsaa", RADV_PERFTEST_DCC_MSAA}, {"bolist", RADV_PERFTEST_BO_LIST}, - {"shader_ballot", RADV_PERFTEST_SHADER_BALLOT}, {"tccompatcmask", RADV_PERFTEST_TC_COMPAT_CMASK}, {"cswave32", RADV_PERFTEST_CS_WAVE_32}, {"pswave32", RADV_PERFTEST_PS_WAVE_32}, {"gewave32", RADV_PERFTEST_GE_WAVE_32}, {"dfsm", RADV_PERFTEST_DFSM}, - {"aco", RADV_PERFTEST_ACO}, {NULL, 0} }; @@ -553,41 +558,51 @@ radv_handle_per_app_options(struct radv_instance *instance, const VkApplicationInfo *info) { const char *name = info ? info->pApplicationName : NULL; - - if (!name) - return; - - if (!strcmp(name, "DOOM_VFR")) { - /* Work around a Doom VFR game bug */ - instance->debug_flags |= RADV_DEBUG_NO_DYNAMIC_BOUNDS; - } else if (!strcmp(name, "MonsterHunterWorld.exe")) { - /* Workaround for a WaW hazard when LLVM moves/merges - * load/store memory operations. - * See https://reviews.llvm.org/D61313 - */ - if (LLVM_VERSION_MAJOR < 9) - instance->debug_flags |= RADV_DEBUG_NO_LOAD_STORE_OPT; - } else if (!strcmp(name, "Wolfenstein: Youngblood")) { - if (!(instance->debug_flags & RADV_DEBUG_NO_SHADER_BALLOT) && - !(instance->perftest_flags & RADV_PERFTEST_ACO)) { - /* Force enable VK_AMD_shader_ballot because it looks - * safe and it gives a nice boost (+20% on Vega 56 at - * this time). It also prevents corruption on LLVM. + const char *engine_name = info ? info->pEngineName : NULL; + + if (name) { + if (!strcmp(name, "DOOM_VFR")) { + /* Work around a Doom VFR game bug */ + instance->debug_flags |= RADV_DEBUG_NO_DYNAMIC_BOUNDS; + } else if (!strcmp(name, "Fledge")) { + /* + * Zero VRAM for "The Surge 2" + * + * This avoid a hang when when rendering any level. Likely + * uninitialized data in an indirect draw. + */ + instance->debug_flags |= RADV_DEBUG_ZERO_VRAM; + } else if (!strcmp(name, "No Man's Sky")) { + /* Work around a NMS game bug */ + instance->debug_flags |= RADV_DEBUG_DISCARD_TO_DEMOTE; + } else if (!strcmp(name, "DOOMEternal")) { + /* Zero VRAM for Doom Eternal to fix rendering issues. */ + instance->debug_flags |= RADV_DEBUG_ZERO_VRAM; + } else if (!strcmp(name, "Red Dead Redemption 2")) { + /* Work around a RDR2 game bug */ + instance->debug_flags |= RADV_DEBUG_DISCARD_TO_DEMOTE; + } + } + + if (engine_name) { + if (!strcmp(engine_name, "vkd3d")) { + /* Zero VRAM for all VKD3D (DX12->VK) games to fix + * rendering issues. */ - instance->perftest_flags |= RADV_PERFTEST_SHADER_BALLOT; + instance->debug_flags |= RADV_DEBUG_ZERO_VRAM; + } else if (!strcmp(engine_name, "Quantic Dream Engine")) { + /* Fix various artifacts in Detroit: Become Human */ + instance->debug_flags |= RADV_DEBUG_ZERO_VRAM | + RADV_DEBUG_DISCARD_TO_DEMOTE; } - } else if (!strcmp(name, "Fledge")) { - /* - * Zero VRAM for "The Surge 2" - * - * This avoid a hang when when rendering any level. Likely - * uninitialized data in an indirect draw. - */ - instance->debug_flags |= RADV_DEBUG_ZERO_VRAM; - } else if (!strcmp(name, "No Man's Sky")) { - /* Work around a NMS game bug */ - instance->debug_flags |= RADV_DEBUG_DISCARD_TO_DEMOTE; } + + instance->enable_mrt_output_nan_fixup = + driQueryOptionb(&instance->dri_options, + "radv_enable_mrt_output_nan_fixup"); + + if (driQueryOptionb(&instance->dri_options, "radv_no_dynamic_bounds")) + instance->debug_flags |= RADV_DEBUG_NO_DYNAMIC_BOUNDS; } static const char radv_dri_options_xml[] = @@ -596,7 +611,11 @@ DRI_CONF_BEGIN DRI_CONF_ADAPTIVE_SYNC("true") DRI_CONF_VK_X11_OVERRIDE_MIN_IMAGE_COUNT(0) DRI_CONF_VK_X11_STRICT_IMAGE_COUNT("false") + DRI_CONF_VK_X11_ENSURE_MIN_IMAGE_COUNT("false") DRI_CONF_RADV_REPORT_LLVM9_VERSION_STRING("false") + DRI_CONF_RADV_ENABLE_MRT_OUTPUT_NAN_FIXUP("false") + DRI_CONF_RADV_NO_DYNAMIC_BOUNDS("false") + DRI_CONF_RADV_OVERRIDE_UNIFORM_OFFSET_ALIGNMENT(0) DRI_CONF_SECTION_END DRI_CONF_SECTION_DEBUG @@ -610,6 +629,8 @@ static void radv_init_dri_options(struct radv_instance *instance) driParseConfigFiles(&instance->dri_options, &instance->available_dri_options, 0, "radv", NULL, + instance->applicationName, + instance->applicationVersion, instance->engineName, instance->engineVersion); } @@ -637,6 +658,11 @@ VkResult radv_CreateInstance( if (pCreateInfo->pApplicationInfo) { const VkApplicationInfo *app = pCreateInfo->pApplicationInfo; + instance->applicationName = + vk_strdup(&instance->alloc, app->pApplicationName, + VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + instance->applicationVersion = app->applicationVersion; + instance->engineName = vk_strdup(&instance->alloc, app->pEngineName, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); @@ -647,24 +673,29 @@ VkResult radv_CreateInstance( if (instance->apiVersion == 0) instance->apiVersion = VK_API_VERSION_1_0; - /* Get secure compile thread count. NOTE: We cap this at 32 */ -#define MAX_SC_PROCS 32 - char *num_sc_threads = getenv("RADV_SECURE_COMPILE_THREADS"); - if (num_sc_threads) - instance->num_sc_threads = MIN2(strtoul(num_sc_threads, NULL, 10), MAX_SC_PROCS); - instance->debug_flags = parse_debug_string(getenv("RADV_DEBUG"), radv_debug_options); - /* Disable memory cache when secure compile is set */ - if (radv_device_use_secure_compile(instance)) - instance->debug_flags |= RADV_DEBUG_NO_MEMORY_CACHE; + const char *radv_perftest_str = getenv("RADV_PERFTEST"); + instance->perftest_flags = parse_debug_string(radv_perftest_str, + radv_perftest_options); - instance->perftest_flags = parse_debug_string(getenv("RADV_PERFTEST"), - radv_perftest_options); - - if (instance->perftest_flags & RADV_PERFTEST_ACO) - fprintf(stderr, "WARNING: Experimental compiler backend enabled. Here be dragons! Incorrect rendering, GPU hangs and/or resets are likely\n"); + if (radv_perftest_str) { + /* Output warnings for famous RADV_PERFTEST options that no + * longer exist or are deprecated. + */ + if (strstr(radv_perftest_str, "aco")) { + fprintf(stderr, "*******************************************************************************\n"); + fprintf(stderr, "* WARNING: Unknown option RADV_PERFTEST='aco'. ACO is enabled by default now. *\n"); + fprintf(stderr, "*******************************************************************************\n"); + } + if (strstr(radv_perftest_str, "llvm")) { + fprintf(stderr, "*********************************************************************************\n"); + fprintf(stderr, "* WARNING: Unknown option 'RADV_PERFTEST=llvm'. Did you mean 'RADV_DEBUG=llvm'? *\n"); + fprintf(stderr, "*********************************************************************************\n"); + abort(); + } + } if (instance->debug_flags & RADV_DEBUG_STARTUP) radv_logi("Created an instance"); @@ -679,6 +710,7 @@ VkResult radv_CreateInstance( if (idx >= RADV_INSTANCE_EXTENSION_COUNT || !radv_instance_extensions_supported.extensions[idx]) { + vk_object_base_finish(&instance->base); vk_free2(&default_alloc, pAllocator, instance); return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT); } @@ -735,6 +767,7 @@ VkResult radv_CreateInstance( result = vk_debug_report_instance_init(&instance->debug_report_callbacks); if (result != VK_SUCCESS) { + vk_object_base_finish(&instance->base); vk_free2(&default_alloc, pAllocator, instance); return vk_error(instance, result); } @@ -766,6 +799,7 @@ void radv_DestroyInstance( } vk_free(&instance->alloc, instance->engineName); + vk_free(&instance->alloc, instance->applicationName); VG(VALGRIND_DESTROY_MEMPOOL(instance)); @@ -940,7 +974,7 @@ void radv_GetPhysicalDeviceFeatures( .shaderCullDistance = true, .shaderFloat64 = true, .shaderInt64 = true, - .shaderInt16 = !pdevice->use_aco || pdevice->rad_info.chip_class >= GFX8, + .shaderInt16 = true, .sparseBinding = true, .variableMultisampleRate = true, .shaderResourceMinLod = true, @@ -954,10 +988,10 @@ radv_get_physical_device_features_1_1(struct radv_physical_device *pdevice, { assert(f->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES); - f->storageBuffer16BitAccess = !pdevice->use_aco || pdevice->rad_info.chip_class >= GFX8; - f->uniformAndStorageBuffer16BitAccess = !pdevice->use_aco || pdevice->rad_info.chip_class >= GFX8; - f->storagePushConstant16 = !pdevice->use_aco || pdevice->rad_info.chip_class >= GFX8; - f->storageInputOutput16 = pdevice->rad_info.has_double_rate_fp16 && (LLVM_VERSION_MAJOR >= 9 || pdevice->use_aco); + f->storageBuffer16BitAccess = true; + f->uniformAndStorageBuffer16BitAccess = true; + f->storagePushConstant16 = true; + f->storageInputOutput16 = pdevice->rad_info.has_packed_math_16bit && (LLVM_VERSION_MAJOR >= 9 || !pdevice->use_llvm); f->multiview = true; f->multiviewGeometryShader = true; f->multiviewTessellationShader = true; @@ -976,13 +1010,13 @@ radv_get_physical_device_features_1_2(struct radv_physical_device *pdevice, f->samplerMirrorClampToEdge = true; f->drawIndirectCount = true; - f->storageBuffer8BitAccess = !pdevice->use_aco || pdevice->rad_info.chip_class >= GFX8; - f->uniformAndStorageBuffer8BitAccess = !pdevice->use_aco || pdevice->rad_info.chip_class >= GFX8; - f->storagePushConstant8 = !pdevice->use_aco || pdevice->rad_info.chip_class >= GFX8; - f->shaderBufferInt64Atomics = LLVM_VERSION_MAJOR >= 9; - f->shaderSharedInt64Atomics = LLVM_VERSION_MAJOR >= 9; - f->shaderFloat16 = pdevice->rad_info.has_double_rate_fp16 && !pdevice->use_aco; - f->shaderInt8 = !pdevice->use_aco || pdevice->rad_info.chip_class >= GFX8; + f->storageBuffer8BitAccess = true; + f->uniformAndStorageBuffer8BitAccess = true; + f->storagePushConstant8 = true; + f->shaderBufferInt64Atomics = LLVM_VERSION_MAJOR >= 9 || !pdevice->use_llvm; + f->shaderSharedInt64Atomics = LLVM_VERSION_MAJOR >= 9 || !pdevice->use_llvm; + f->shaderFloat16 = pdevice->rad_info.has_packed_math_16bit; + f->shaderInt8 = true; f->descriptorIndexing = true; f->shaderInputAttachmentArrayDynamicIndexing = true; @@ -1010,15 +1044,15 @@ radv_get_physical_device_features_1_2(struct radv_physical_device *pdevice, f->scalarBlockLayout = pdevice->rad_info.chip_class >= GFX7; f->imagelessFramebuffer = true; f->uniformBufferStandardLayout = true; - f->shaderSubgroupExtendedTypes = !pdevice->use_aco; + f->shaderSubgroupExtendedTypes = true; f->separateDepthStencilLayouts = true; f->hostQueryReset = true; f->timelineSemaphore = pdevice->rad_info.has_syncobj_wait_for_submit; f->bufferDeviceAddress = true; f->bufferDeviceAddressCaptureReplay = false; f->bufferDeviceAddressMultiDevice = false; - f->vulkanMemoryModel = false; - f->vulkanMemoryModelDeviceScope = false; + f->vulkanMemoryModel = true; + f->vulkanMemoryModelDeviceScope = true; f->vulkanMemoryModelAvailabilityVisibilityChains = false; f->shaderOutputViewportIndex = true; f->shaderOutputLayer = true; @@ -1198,7 +1232,7 @@ void radv_GetPhysicalDeviceFeatures2( case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT: { VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT *features = (VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT *)ext; - features->shaderDemoteToHelperInvocation = pdevice->use_aco; + features->shaderDemoteToHelperInvocation = LLVM_VERSION_MAJOR >= 9 || !pdevice->use_llvm; break; } case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT: { @@ -1342,6 +1376,51 @@ void radv_GetPhysicalDeviceFeatures2( features-> pipelineCreationCacheControl = true; break; } + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES_KHR: { + VkPhysicalDeviceVulkanMemoryModelFeaturesKHR *features = + (VkPhysicalDeviceVulkanMemoryModelFeaturesKHR *)ext; + CORE_FEATURE(1, 2, vulkanMemoryModel); + CORE_FEATURE(1, 2, vulkanMemoryModelDeviceScope); + CORE_FEATURE(1, 2, vulkanMemoryModelAvailabilityVisibilityChains); + break; + } + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT: { + VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *features = + (VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *) ext; + features->extendedDynamicState = true; + break; + } + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT: { + VkPhysicalDeviceImageRobustnessFeaturesEXT *features = + (VkPhysicalDeviceImageRobustnessFeaturesEXT *)ext; + features->robustImageAccess = true; + break; + } + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT: { + VkPhysicalDeviceShaderAtomicFloatFeaturesEXT *features = + (VkPhysicalDeviceShaderAtomicFloatFeaturesEXT *)ext; + features->shaderBufferFloat32Atomics = true; + features->shaderBufferFloat32AtomicAdd = false; + features->shaderBufferFloat64Atomics = true; + features->shaderBufferFloat64AtomicAdd = false; + features->shaderSharedFloat32Atomics = true; + features->shaderSharedFloat32AtomicAdd = pdevice->rad_info.chip_class >= GFX8 && + (!pdevice->use_llvm || LLVM_VERSION_MAJOR >= 10); + features->shaderSharedFloat64Atomics = true; + features->shaderSharedFloat64AtomicAdd = false; + features->shaderImageFloat32Atomics = true; + features->shaderImageFloat32AtomicAdd = false; + features->sparseImageFloat32Atomics = false; + features->sparseImageFloat32AtomicAdd = false; + break; + } + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT: { + VkPhysicalDevice4444FormatsFeaturesEXT *features = + (VkPhysicalDevice4444FormatsFeaturesEXT *)ext; + features->formatA4R4G4B4 = true; + features->formatA4B4G4R4 = true; + break; + } default: break; } @@ -1367,6 +1446,21 @@ radv_max_descriptor_set_size() 64 /* storage image */); } +static uint32_t +radv_uniform_buffer_offset_alignment(const struct radv_physical_device *pdevice) +{ + uint32_t uniform_offset_alignment = driQueryOptioni(&pdevice->instance->dri_options, + "radv_override_uniform_offset_alignment"); + if (!util_is_power_of_two_or_zero(uniform_offset_alignment)) { + fprintf(stderr, "ERROR: invalid radv_override_uniform_offset_alignment setting %d:" + "not a power of two\n", uniform_offset_alignment); + uniform_offset_alignment = 0; + } + + /* Take at least the hardware limit. */ + return MAX2(uniform_offset_alignment, 4); +} + void radv_GetPhysicalDeviceProperties( VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties* pProperties) @@ -1449,7 +1543,7 @@ void radv_GetPhysicalDeviceProperties( .viewportSubPixelBits = 8, .minMemoryMapAlignment = 4096, /* A page */ .minTexelBufferOffsetAlignment = 4, - .minUniformBufferOffsetAlignment = 4, + .minUniformBufferOffsetAlignment = radv_uniform_buffer_offset_alignment(pdevice), .minStorageBufferOffsetAlignment = 4, .minTexelOffset = -32, .maxTexelOffset = 31, @@ -1478,8 +1572,8 @@ void radv_GetPhysicalDeviceProperties( .maxCullDistances = 8, .maxCombinedClipAndCullDistances = 8, .discreteQueuePriorities = 2, - .pointSizeRange = { 0.0, 8192.0 }, - .lineWidthRange = { 0.0, 8192.0 }, + .pointSizeRange = { 0.0, 8191.875 }, + .lineWidthRange = { 0.0, 8191.875 }, .pointSizeGranularity = (1.0 / 8.0), .lineWidthGranularity = (1.0 / 8.0), .strictLines = false, /* FINISHME */ @@ -1524,14 +1618,9 @@ radv_get_physical_device_properties_1_1(struct radv_physical_device *pdevice, VK_SUBGROUP_FEATURE_ARITHMETIC_BIT | VK_SUBGROUP_FEATURE_BALLOT_BIT | VK_SUBGROUP_FEATURE_CLUSTERED_BIT | - VK_SUBGROUP_FEATURE_QUAD_BIT; - - if (((pdevice->rad_info.chip_class == GFX6 || - pdevice->rad_info.chip_class == GFX7) && !pdevice->use_aco) || - pdevice->rad_info.chip_class >= GFX8) { - p->subgroupSupportedOperations |= VK_SUBGROUP_FEATURE_SHUFFLE_BIT | - VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT; - } + VK_SUBGROUP_FEATURE_QUAD_BIT | + VK_SUBGROUP_FEATURE_SHUFFLE_BIT | + VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT; p->subgroupQuadOperationsInAllStages = true; p->pointClippingBehavior = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES; @@ -1556,14 +1645,14 @@ radv_get_physical_device_properties_1_2(struct radv_physical_device *pdevice, p->conformanceVersion = (VkConformanceVersion) { .major = 1, .minor = 2, - .subminor = 0, + .subminor = 3, .patch = 0, }; /* On AMD hardware, denormals and rounding modes for fp16/fp64 are * controlled by the same config register. */ - if (pdevice->rad_info.has_double_rate_fp16) { + if (pdevice->rad_info.has_packed_math_16bit) { p->denormBehaviorIndependence = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY_KHR; p->roundingModeIndependence = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY_KHR; } else { @@ -1571,30 +1660,29 @@ radv_get_physical_device_properties_1_2(struct radv_physical_device *pdevice, p->roundingModeIndependence = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL_KHR; } - /* Do not allow both preserving and flushing denorms because different - * shaders in the same pipeline can have different settings and this - * won't work for merged shaders. To make it work, this requires LLVM + /* With LLVM, do not allow both preserving and flushing denorms because + * different shaders in the same pipeline can have different settings and + * this won't work for merged shaders. To make it work, this requires LLVM * support for changing the register. The same logic applies for the * rounding modes because they are configured with the same config - * register. TODO: we can enable a lot of these for ACO when it - * supports all stages. + * register. */ p->shaderDenormFlushToZeroFloat32 = true; - p->shaderDenormPreserveFloat32 = false; + p->shaderDenormPreserveFloat32 = !pdevice->use_llvm; p->shaderRoundingModeRTEFloat32 = true; - p->shaderRoundingModeRTZFloat32 = false; + p->shaderRoundingModeRTZFloat32 = !pdevice->use_llvm; p->shaderSignedZeroInfNanPreserveFloat32 = true; - p->shaderDenormFlushToZeroFloat16 = false; - p->shaderDenormPreserveFloat16 = pdevice->rad_info.has_double_rate_fp16; - p->shaderRoundingModeRTEFloat16 = pdevice->rad_info.has_double_rate_fp16; - p->shaderRoundingModeRTZFloat16 = false; - p->shaderSignedZeroInfNanPreserveFloat16 = pdevice->rad_info.has_double_rate_fp16; + p->shaderDenormFlushToZeroFloat16 = pdevice->rad_info.has_packed_math_16bit && !pdevice->use_llvm; + p->shaderDenormPreserveFloat16 = pdevice->rad_info.has_packed_math_16bit; + p->shaderRoundingModeRTEFloat16 = pdevice->rad_info.has_packed_math_16bit; + p->shaderRoundingModeRTZFloat16 = pdevice->rad_info.has_packed_math_16bit && !pdevice->use_llvm; + p->shaderSignedZeroInfNanPreserveFloat16 = pdevice->rad_info.has_packed_math_16bit; - p->shaderDenormFlushToZeroFloat64 = false; + p->shaderDenormFlushToZeroFloat64 = pdevice->rad_info.chip_class >= GFX8 && !pdevice->use_llvm; p->shaderDenormPreserveFloat64 = pdevice->rad_info.chip_class >= GFX8; p->shaderRoundingModeRTEFloat64 = pdevice->rad_info.chip_class >= GFX8; - p->shaderRoundingModeRTZFloat64 = false; + p->shaderRoundingModeRTZFloat64 = pdevice->rad_info.chip_class >= GFX8 && !pdevice->use_llvm; p->shaderSignedZeroInfNanPreserveFloat64 = pdevice->rad_info.chip_class >= GFX8; p->maxUpdateAfterBindDescriptorsInAllPools = UINT32_MAX / 64; @@ -1752,7 +1840,7 @@ void radv_GetPhysicalDeviceProperties2( properties->shaderArraysPerEngineCount = pdevice->rad_info.max_sh_per_se; properties->computeUnitsPerShaderArray = - pdevice->rad_info.num_good_cu_per_sh; + pdevice->rad_info.min_good_cu_per_sa; properties->simdPerComputeUnit = pdevice->rad_info.num_simd_per_compute_unit; properties->wavefrontsPerSimd = @@ -2242,13 +2330,27 @@ radv_queue_init(struct radv_device *device, struct radv_queue *queue, list_inithead(&queue->pending_submissions); pthread_mutex_init(&queue->pending_mutex, NULL); + pthread_mutex_init(&queue->thread_mutex, NULL); + queue->thread_submission = NULL; + queue->thread_running = queue->thread_exit = false; + result = radv_create_pthread_cond(&queue->thread_cond); + if (result != VK_SUCCESS) + return vk_error(device->instance, result); + return VK_SUCCESS; } static void radv_queue_finish(struct radv_queue *queue) { + if (queue->thread_running) { + p_atomic_set(&queue->thread_exit, true); + pthread_cond_broadcast(&queue->thread_cond); + pthread_join(queue->submission_thread, NULL); + } + pthread_cond_destroy(&queue->thread_cond); pthread_mutex_destroy(&queue->pending_mutex); + pthread_mutex_destroy(&queue->thread_mutex); if (queue->hw_ctx) queue->device->ws->ctx_destroy(queue->hw_ctx); @@ -2384,537 +2486,6 @@ radv_get_int_debug_option(const char *name, int default_value) return result; } -static int install_seccomp_filter() { - - struct sock_filter filter[] = { - /* Check arch is 64bit x86 */ - BPF_STMT(BPF_LD + BPF_W + BPF_ABS, (offsetof(struct seccomp_data, arch))), - BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, AUDIT_ARCH_X86_64, 0, 12), - - /* Futex is required for mutex locks */ - #if defined __NR__newselect - BPF_STMT(BPF_LD + BPF_W + BPF_ABS, (offsetof(struct seccomp_data, nr))), - BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, __NR__newselect, 11, 0), - #elif defined __NR_select - BPF_STMT(BPF_LD + BPF_W + BPF_ABS, (offsetof(struct seccomp_data, nr))), - BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, __NR_select, 11, 0), - #else - BPF_STMT(BPF_LD + BPF_W + BPF_ABS, (offsetof(struct seccomp_data, nr))), - BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, __NR_pselect6, 11, 0), - #endif - - /* Allow system exit calls for the forked process */ - BPF_STMT(BPF_LD + BPF_W + BPF_ABS, (offsetof(struct seccomp_data, nr))), - BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, __NR_exit_group, 9, 0), - - /* Allow system read calls */ - BPF_STMT(BPF_LD + BPF_W + BPF_ABS, (offsetof(struct seccomp_data, nr))), - BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, __NR_read, 7, 0), - - /* Allow system write calls */ - BPF_STMT(BPF_LD + BPF_W + BPF_ABS, (offsetof(struct seccomp_data, nr))), - BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, __NR_write, 5, 0), - - /* Allow system brk calls (we need this for malloc) */ - BPF_STMT(BPF_LD + BPF_W + BPF_ABS, (offsetof(struct seccomp_data, nr))), - BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, __NR_brk, 3, 0), - - /* Futex is required for mutex locks */ - BPF_STMT(BPF_LD + BPF_W + BPF_ABS, (offsetof(struct seccomp_data, nr))), - BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, __NR_futex, 1, 0), - - /* Return error if we hit a system call not on the whitelist */ - BPF_STMT(BPF_RET + BPF_K, SECCOMP_RET_ERRNO | (EPERM & SECCOMP_RET_DATA)), - - /* Allow whitelisted system calls */ - BPF_STMT(BPF_RET + BPF_K, SECCOMP_RET_ALLOW), - }; - - struct sock_fprog prog = { - .len = (unsigned short)(sizeof(filter) / sizeof(filter[0])), - .filter = filter, - }; - - if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) - return -1; - - if (prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog)) - return -1; - - return 0; -} - -/* Helper function with timeout support for reading from the pipe between - * processes used for secure compile. - */ -bool radv_sc_read(int fd, void *buf, size_t size, bool timeout) -{ - fd_set fds; - struct timeval tv; - - FD_ZERO(&fds); - FD_SET(fd, &fds); - - while (true) { - /* We can't rely on the value of tv after calling select() so - * we must reset it on each iteration of the loop. - */ - tv.tv_sec = 5; - tv.tv_usec = 0; - - int rval = select(fd + 1, &fds, NULL, NULL, timeout ? &tv : NULL); - - if (rval == -1) { - /* select error */ - return false; - } else if (rval) { - ssize_t bytes_read = read(fd, buf, size); - if (bytes_read < 0) - return false; - - buf += bytes_read; - size -= bytes_read; - if (size == 0) - return true; - } else { - /* select timeout */ - return false; - } - } -} - -static bool radv_close_all_fds(const int *keep_fds, int keep_fd_count) -{ - DIR *d; - struct dirent *dir; - d = opendir("/proc/self/fd"); - if (!d) - return false; - int dir_fd = dirfd(d); - - while ((dir = readdir(d)) != NULL) { - if (dir->d_name[0] == '.') - continue; - - int fd = atoi(dir->d_name); - if (fd == dir_fd) - continue; - - bool keep = false; - for (int i = 0; !keep && i < keep_fd_count; ++i) - if (keep_fds[i] == fd) - keep = true; - - if (keep) - continue; - - close(fd); - } - closedir(d); - return true; -} - -static bool secure_compile_open_fifo_fds(struct radv_secure_compile_state *sc, - int *fd_server, int *fd_client, - unsigned process, bool make_fifo) -{ - bool result = false; - char *fifo_server_path = NULL; - char *fifo_client_path = NULL; - - if (asprintf(&fifo_server_path, "/tmp/radv_server_%s_%u", sc->uid, process) == -1) - goto open_fifo_exit; - - if (asprintf(&fifo_client_path, "/tmp/radv_client_%s_%u", sc->uid, process) == -1) - goto open_fifo_exit; - - if (make_fifo) { - int file1 = mkfifo(fifo_server_path, 0666); - if(file1 < 0) - goto open_fifo_exit; - - int file2 = mkfifo(fifo_client_path, 0666); - if(file2 < 0) - goto open_fifo_exit; - } - - *fd_server = open(fifo_server_path, O_RDWR); - if(*fd_server < 1) - goto open_fifo_exit; - - *fd_client = open(fifo_client_path, O_RDWR); - if(*fd_client < 1) { - close(*fd_server); - goto open_fifo_exit; - } - - result = true; - -open_fifo_exit: - free(fifo_server_path); - free(fifo_client_path); - - return result; -} - -static void run_secure_compile_device(struct radv_device *device, unsigned process, - int fd_idle_device_output) -{ - int fd_secure_input; - int fd_secure_output; - bool fifo_result = secure_compile_open_fifo_fds(device->sc_state, - &fd_secure_input, - &fd_secure_output, - process, false); - - enum radv_secure_compile_type sc_type; - - const int needed_fds[] = { - fd_secure_input, - fd_secure_output, - fd_idle_device_output, - }; - - if (!fifo_result || !radv_close_all_fds(needed_fds, ARRAY_SIZE(needed_fds)) || - install_seccomp_filter() == -1) { - sc_type = RADV_SC_TYPE_INIT_FAILURE; - } else { - sc_type = RADV_SC_TYPE_INIT_SUCCESS; - device->sc_state->secure_compile_processes[process].fd_secure_input = fd_secure_input; - device->sc_state->secure_compile_processes[process].fd_secure_output = fd_secure_output; - } - - write(fd_idle_device_output, &sc_type, sizeof(sc_type)); - - if (sc_type == RADV_SC_TYPE_INIT_FAILURE) - goto secure_compile_exit; - - while (true) { - radv_sc_read(fd_secure_input, &sc_type, sizeof(sc_type), false); - - if (sc_type == RADV_SC_TYPE_COMPILE_PIPELINE) { - struct radv_pipeline *pipeline; - bool sc_read = true; - - pipeline = vk_zalloc2(&device->vk.alloc, NULL, sizeof(*pipeline), 8, - VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); - - pipeline->device = device; - - /* Read pipeline layout */ - struct radv_pipeline_layout layout; - sc_read = radv_sc_read(fd_secure_input, &layout, sizeof(struct radv_pipeline_layout), true); - sc_read &= radv_sc_read(fd_secure_input, &layout.num_sets, sizeof(uint32_t), true); - if (!sc_read) - goto secure_compile_exit; - - for (uint32_t set = 0; set < layout.num_sets; set++) { - uint32_t layout_size; - sc_read &= radv_sc_read(fd_secure_input, &layout_size, sizeof(uint32_t), true); - if (!sc_read) - goto secure_compile_exit; - - layout.set[set].layout = malloc(layout_size); - layout.set[set].layout->layout_size = layout_size; - sc_read &= radv_sc_read(fd_secure_input, layout.set[set].layout, - layout.set[set].layout->layout_size, true); - } - - pipeline->layout = &layout; - - /* Read pipeline key */ - struct radv_pipeline_key key; - sc_read &= radv_sc_read(fd_secure_input, &key, sizeof(struct radv_pipeline_key), true); - - /* Read pipeline create flags */ - VkPipelineCreateFlags flags; - sc_read &= radv_sc_read(fd_secure_input, &flags, sizeof(VkPipelineCreateFlags), true); - - /* Read stage and shader information */ - uint32_t num_stages; - const VkPipelineShaderStageCreateInfo *pStages[MESA_SHADER_STAGES] = { 0, }; - sc_read &= radv_sc_read(fd_secure_input, &num_stages, sizeof(uint32_t), true); - if (!sc_read) - goto secure_compile_exit; - - for (uint32_t i = 0; i < num_stages; i++) { - - /* Read stage */ - gl_shader_stage stage; - sc_read &= radv_sc_read(fd_secure_input, &stage, sizeof(gl_shader_stage), true); - - VkPipelineShaderStageCreateInfo *pStage = calloc(1, sizeof(VkPipelineShaderStageCreateInfo)); - - /* Read entry point name */ - size_t name_size; - sc_read &= radv_sc_read(fd_secure_input, &name_size, sizeof(size_t), true); - if (!sc_read) - goto secure_compile_exit; - - char *ep_name = malloc(name_size); - sc_read &= radv_sc_read(fd_secure_input, ep_name, name_size, true); - pStage->pName = ep_name; - - /* Read shader module */ - size_t module_size; - sc_read &= radv_sc_read(fd_secure_input, &module_size, sizeof(size_t), true); - if (!sc_read) - goto secure_compile_exit; - - struct radv_shader_module *module = malloc(module_size); - sc_read &= radv_sc_read(fd_secure_input, module, module_size, true); - pStage->module = radv_shader_module_to_handle(module); - - /* Read specialization info */ - bool has_spec_info; - sc_read &= radv_sc_read(fd_secure_input, &has_spec_info, sizeof(bool), true); - if (!sc_read) - goto secure_compile_exit; - - if (has_spec_info) { - VkSpecializationInfo *specInfo = malloc(sizeof(VkSpecializationInfo)); - pStage->pSpecializationInfo = specInfo; - - sc_read &= radv_sc_read(fd_secure_input, &specInfo->dataSize, sizeof(size_t), true); - if (!sc_read) - goto secure_compile_exit; - - void *si_data = malloc(specInfo->dataSize); - sc_read &= radv_sc_read(fd_secure_input, si_data, specInfo->dataSize, true); - specInfo->pData = si_data; - - sc_read &= radv_sc_read(fd_secure_input, &specInfo->mapEntryCount, sizeof(uint32_t), true); - if (!sc_read) - goto secure_compile_exit; - - VkSpecializationMapEntry *mapEntries = malloc(sizeof(VkSpecializationMapEntry) * specInfo->mapEntryCount); - for (uint32_t j = 0; j < specInfo->mapEntryCount; j++) { - sc_read &= radv_sc_read(fd_secure_input, &mapEntries[j], sizeof(VkSpecializationMapEntry), true); - if (!sc_read) - goto secure_compile_exit; - } - - specInfo->pMapEntries = mapEntries; - } - - pStages[stage] = pStage; - } - - /* Compile the shaders */ - VkPipelineCreationFeedbackEXT *stage_feedbacks[MESA_SHADER_STAGES] = { 0 }; - - /* Not fully to spec but if we're doing sandboxed compilations already this doesn't matter. */ - flags &= ~VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT; - - radv_create_shaders(pipeline, device, NULL, &key, pStages, flags, NULL, stage_feedbacks); - - /* free memory allocated above */ - for (uint32_t set = 0; set < layout.num_sets; set++) - free(layout.set[set].layout); - - for (uint32_t i = 0; i < MESA_SHADER_STAGES; i++) { - if (!pStages[i]) - continue; - - free((void *) pStages[i]->pName); - free(radv_shader_module_from_handle(pStages[i]->module)); - if (pStages[i]->pSpecializationInfo) { - free((void *) pStages[i]->pSpecializationInfo->pData); - free((void *) pStages[i]->pSpecializationInfo->pMapEntries); - free((void *) pStages[i]->pSpecializationInfo); - } - free((void *) pStages[i]); - } - - vk_free(&device->vk.alloc, pipeline); - - sc_type = RADV_SC_TYPE_COMPILE_PIPELINE_FINISHED; - write(fd_secure_output, &sc_type, sizeof(sc_type)); - - } else if (sc_type == RADV_SC_TYPE_DESTROY_DEVICE) { - goto secure_compile_exit; - } - } - -secure_compile_exit: - close(fd_secure_input); - close(fd_secure_output); - close(fd_idle_device_output); - _exit(0); -} - -static enum radv_secure_compile_type fork_secure_compile_device(struct radv_device *device, unsigned process) -{ - int fd_secure_input[2]; - int fd_secure_output[2]; - - /* create pipe descriptors (used to communicate between processes) */ - if (pipe(fd_secure_input) == -1 || pipe(fd_secure_output) == -1) - return RADV_SC_TYPE_INIT_FAILURE; - - - int sc_pid; - if ((sc_pid = fork()) == 0) { - device->sc_state->secure_compile_thread_counter = process; - run_secure_compile_device(device, process, fd_secure_output[1]); - } else { - if (sc_pid == -1) - return RADV_SC_TYPE_INIT_FAILURE; - - /* Read the init result returned from the secure process */ - enum radv_secure_compile_type sc_type; - bool sc_read = radv_sc_read(fd_secure_output[0], &sc_type, sizeof(sc_type), true); - - if (sc_type == RADV_SC_TYPE_INIT_FAILURE || !sc_read) { - close(fd_secure_input[0]); - close(fd_secure_input[1]); - close(fd_secure_output[1]); - close(fd_secure_output[0]); - int status; - waitpid(sc_pid, &status, 0); - - return RADV_SC_TYPE_INIT_FAILURE; - } else { - assert(sc_type == RADV_SC_TYPE_INIT_SUCCESS); - write(device->sc_state->secure_compile_processes[process].fd_secure_output, &sc_type, sizeof(sc_type)); - - close(fd_secure_input[0]); - close(fd_secure_input[1]); - close(fd_secure_output[1]); - close(fd_secure_output[0]); - - int status; - waitpid(sc_pid, &status, 0); - } - } - - return RADV_SC_TYPE_INIT_SUCCESS; -} - -/* Run a bare bones fork of a device that was forked right after its creation. - * This device will have low overhead when it is forked again before each - * pipeline compilation. This device sits idle and its only job is to fork - * itself. - */ -static void run_secure_compile_idle_device(struct radv_device *device, unsigned process, - int fd_secure_input, int fd_secure_output) -{ - enum radv_secure_compile_type sc_type = RADV_SC_TYPE_INIT_SUCCESS; - device->sc_state->secure_compile_processes[process].fd_secure_input = fd_secure_input; - device->sc_state->secure_compile_processes[process].fd_secure_output = fd_secure_output; - - write(fd_secure_output, &sc_type, sizeof(sc_type)); - - while (true) { - radv_sc_read(fd_secure_input, &sc_type, sizeof(sc_type), false); - - if (sc_type == RADV_SC_TYPE_FORK_DEVICE) { - sc_type = fork_secure_compile_device(device, process); - - if (sc_type == RADV_SC_TYPE_INIT_FAILURE) - goto secure_compile_exit; - - } else if (sc_type == RADV_SC_TYPE_DESTROY_DEVICE) { - goto secure_compile_exit; - } - } - -secure_compile_exit: - close(fd_secure_input); - close(fd_secure_output); - _exit(0); -} - -static void destroy_secure_compile_device(struct radv_device *device, unsigned process) -{ - int fd_secure_input = device->sc_state->secure_compile_processes[process].fd_secure_input; - - enum radv_secure_compile_type sc_type = RADV_SC_TYPE_DESTROY_DEVICE; - write(fd_secure_input, &sc_type, sizeof(sc_type)); - - close(device->sc_state->secure_compile_processes[process].fd_secure_input); - close(device->sc_state->secure_compile_processes[process].fd_secure_output); - - int status; - waitpid(device->sc_state->secure_compile_processes[process].sc_pid, &status, 0); -} - -static VkResult fork_secure_compile_idle_device(struct radv_device *device) -{ - device->sc_state = vk_zalloc(&device->vk.alloc, - sizeof(struct radv_secure_compile_state), - 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE); - - mtx_init(&device->sc_state->secure_compile_mutex, mtx_plain); - - pid_t upid = getpid(); - time_t seconds = time(NULL); - - char *uid; - if (asprintf(&uid, "%ld_%ld", (long) upid, (long) seconds) == -1) - return VK_ERROR_INITIALIZATION_FAILED; - - device->sc_state->uid = uid; - - uint8_t sc_threads = device->instance->num_sc_threads; - int fd_secure_input[MAX_SC_PROCS][2]; - int fd_secure_output[MAX_SC_PROCS][2]; - - /* create pipe descriptors (used to communicate between processes) */ - for (unsigned i = 0; i < sc_threads; i++) { - if (pipe(fd_secure_input[i]) == -1 || - pipe(fd_secure_output[i]) == -1) { - return VK_ERROR_INITIALIZATION_FAILED; - } - } - - device->sc_state->secure_compile_processes = vk_zalloc(&device->vk.alloc, - sizeof(struct radv_secure_compile_process) * sc_threads, 8, - VK_SYSTEM_ALLOCATION_SCOPE_DEVICE); - - for (unsigned process = 0; process < sc_threads; process++) { - if ((device->sc_state->secure_compile_processes[process].sc_pid = fork()) == 0) { - device->sc_state->secure_compile_thread_counter = process; - run_secure_compile_idle_device(device, process, fd_secure_input[process][0], fd_secure_output[process][1]); - } else { - if (device->sc_state->secure_compile_processes[process].sc_pid == -1) - return VK_ERROR_INITIALIZATION_FAILED; - - /* Read the init result returned from the secure process */ - enum radv_secure_compile_type sc_type; - bool sc_read = radv_sc_read(fd_secure_output[process][0], &sc_type, sizeof(sc_type), true); - - bool fifo_result; - if (sc_read && sc_type == RADV_SC_TYPE_INIT_SUCCESS) { - fifo_result = secure_compile_open_fifo_fds(device->sc_state, - &device->sc_state->secure_compile_processes[process].fd_server, - &device->sc_state->secure_compile_processes[process].fd_client, - process, true); - - device->sc_state->secure_compile_processes[process].fd_secure_input = fd_secure_input[process][1]; - device->sc_state->secure_compile_processes[process].fd_secure_output = fd_secure_output[process][0]; - } - - if (sc_type == RADV_SC_TYPE_INIT_FAILURE || !sc_read || !fifo_result) { - close(fd_secure_input[process][0]); - close(fd_secure_input[process][1]); - close(fd_secure_output[process][1]); - close(fd_secure_output[process][0]); - int status; - waitpid(device->sc_state->secure_compile_processes[process].sc_pid, &status, 0); - - /* Destroy any forks that were created sucessfully */ - for (unsigned i = 0; i < process; i++) { - destroy_secure_compile_device(device, i); - } - - return VK_ERROR_INITIALIZATION_FAILED; - } - } - } - return VK_SUCCESS; -} - static void radv_device_init_dispatch(struct radv_device *device) { @@ -3003,6 +2574,8 @@ static VkResult radv_device_init_border_color(struct radv_device *device) device->border_color_data.colors_gpu_ptr = device->ws->buffer_map(device->border_color_data.bo); + if (!device->border_color_data.colors_gpu_ptr) + return vk_error(device->physical_device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY); pthread_mutex_init(&device->border_color_data.mutex, NULL); return VK_SUCCESS; @@ -3017,6 +2590,25 @@ static void radv_device_finish_border_color(struct radv_device *device) } } +VkResult +_radv_device_set_lost(struct radv_device *device, + const char *file, int line, + const char *msg, ...) +{ + VkResult err; + va_list ap; + + p_atomic_inc(&device->lost); + + va_start(ap, msg); + err = __vk_errorv(device->physical_device->instance, device, + VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, + VK_ERROR_DEVICE_LOST, file, line, msg, ap); + va_end(ap); + + return err; +} + VkResult radv_CreateDevice( VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, @@ -3225,10 +2817,22 @@ VkResult radv_CreateDevice( goto fail; } - /* Temporarily disable secure compile while we create meta shaders, etc */ - uint8_t sc_threads = device->instance->num_sc_threads; - if (sc_threads) - device->instance->num_sc_threads = 0; + if (getenv("RADV_TRAP_HANDLER")) { + /* TODO: Add support for more hardware. */ + assert(device->physical_device->rad_info.chip_class == GFX8); + + fprintf(stderr, "**********************************************************************\n"); + fprintf(stderr, "* WARNING: RADV_TRAP_HANDLER is experimental and only for debugging! *\n"); + fprintf(stderr, "**********************************************************************\n"); + + /* To get the disassembly of the faulty shaders, we have to + * keep some shader info around. + */ + keep_shader_info = true; + + if (!radv_trap_handler_init(device)) + goto fail; + } device->keep_shader_info = keep_shader_info; result = radv_device_init_meta(device); @@ -3246,6 +2850,9 @@ VkResult radv_CreateDevice( for (int family = 0; family < RADV_MAX_QUEUE_FAMILIES; ++family) { device->empty_cs[family] = device->ws->cs_create(device->ws, family); + if (!device->empty_cs[family]) + goto fail; + switch (family) { case RADV_QUEUE_GENERAL: radeon_emit(device->empty_cs[family], PKT3(PKT3_CONTEXT_CONTROL, 1, 0)); @@ -3257,7 +2864,10 @@ VkResult radv_CreateDevice( radeon_emit(device->empty_cs[family], 0); break; } - device->ws->cs_finalize(device->empty_cs[family]); + + result = device->ws->cs_finalize(device->empty_cs[family]); + if (result != VK_SUCCESS) + goto fail; } if (device->physical_device->rad_info.chip_class >= GFX7) @@ -3288,15 +2898,6 @@ VkResult radv_CreateDevice( 1 << util_logbase2(device->force_aniso)); } - /* Fork device for secure compile as required */ - device->instance->num_sc_threads = sc_threads; - if (radv_device_use_secure_compile(device->instance)) { - - result = fork_secure_compile_idle_device(device); - if (result != VK_SUCCESS) - goto fail_meta; - } - *pDevice = radv_device_to_handle(device); return VK_SUCCESS; @@ -3309,6 +2910,8 @@ fail: radv_thread_trace_finish(device); + radv_trap_handler_finish(device); + if (device->trace_bo) device->ws->buffer_destroy(device->trace_bo); @@ -3358,6 +2961,8 @@ void radv_DestroyDevice( VkPipelineCache pc = radv_pipeline_cache_to_handle(device->mem_cache); radv_DestroyPipelineCache(radv_device_to_handle(device), pc, NULL); + radv_trap_handler_finish(device); + radv_destroy_shader_slabs(device); pthread_cond_destroy(&device->timeline_cond); @@ -3365,17 +2970,6 @@ void radv_DestroyDevice( radv_thread_trace_finish(device); - if (radv_device_use_secure_compile(device->instance)) { - for (unsigned i = 0; i < device->instance->num_sc_threads; i++ ) { - destroy_secure_compile_device(device, i); - } - } - - if (device->sc_state) { - free(device->sc_state->uid); - vk_free(&device->vk.alloc, device->sc_state->secure_compile_processes); - } - vk_free(&device->vk.alloc, device->sc_state); vk_free(&device->vk.alloc, device); } @@ -3674,7 +3268,10 @@ radv_get_hs_offchip_param(struct radv_device *device, uint32_t *max_offchip_buff } *max_offchip_buffers_p = max_offchip_buffers; - if (device->physical_device->rad_info.chip_class >= GFX7) { + if (device->physical_device->rad_info.chip_class >= GFX10_3) { + hs_offchip_param = S_03093C_OFFCHIP_BUFFERING_GFX103(max_offchip_buffers - 1) | + S_03093C_OFFCHIP_GRANULARITY_GFX103(offchip_granularity); + } else if (device->physical_device->rad_info.chip_class >= GFX7) { if (device->physical_device->rad_info.chip_class >= GFX8) --max_offchip_buffers; hs_offchip_param = @@ -3844,6 +3441,50 @@ radv_emit_global_shader_pointers(struct radv_queue *queue, } } +static void +radv_emit_trap_handler(struct radv_queue *queue, + struct radeon_cmdbuf *cs, + struct radeon_winsys_bo *tma_bo) +{ + struct radv_device *device = queue->device; + struct radeon_winsys_bo *tba_bo; + uint64_t tba_va, tma_va; + + if (!device->trap_handler_shader || !tma_bo) + return; + + tba_bo = device->trap_handler_shader->bo; + + tba_va = radv_buffer_get_va(tba_bo) + device->trap_handler_shader->bo_offset; + tma_va = radv_buffer_get_va(tma_bo); + + radv_cs_add_buffer(queue->device->ws, cs, tba_bo); + radv_cs_add_buffer(queue->device->ws, cs, tma_bo); + + if (queue->queue_family_index == RADV_QUEUE_GENERAL) { + uint32_t regs[] = {R_00B000_SPI_SHADER_TBA_LO_PS, + R_00B100_SPI_SHADER_TBA_LO_VS, + R_00B200_SPI_SHADER_TBA_LO_GS, + R_00B300_SPI_SHADER_TBA_LO_ES, + R_00B400_SPI_SHADER_TBA_LO_HS, + R_00B500_SPI_SHADER_TBA_LO_LS}; + + for (int i = 0; i < ARRAY_SIZE(regs); ++i) { + radeon_set_sh_reg_seq(cs, regs[i], 4); + radeon_emit(cs, tba_va >> 8); + radeon_emit(cs, tba_va >> 40); + radeon_emit(cs, tma_va >> 8); + radeon_emit(cs, tma_va >> 40); + } + } else { + radeon_set_sh_reg_seq(cs, R_00B838_COMPUTE_TBA_LO, 4); + radeon_emit(cs, tba_va >> 8); + radeon_emit(cs, tba_va >> 40); + radeon_emit(cs, tma_va >> 8); + radeon_emit(cs, tma_va >> 40); + } +} + static void radv_init_graphics_state(struct radeon_cmdbuf *cs, struct radv_queue *queue) { @@ -3866,8 +3507,7 @@ radv_init_graphics_state(struct radeon_cmdbuf *cs, struct radv_queue *queue) static void radv_init_compute_state(struct radeon_cmdbuf *cs, struct radv_queue *queue) { - struct radv_physical_device *physical_device = queue->device->physical_device; - si_emit_compute(physical_device, cs); + si_emit_compute(queue->device, cs); } static VkResult @@ -4085,6 +3725,8 @@ radv_get_preamble_cs(struct radv_queue *queue, if (descriptor_bo != queue->descriptor_bo) { uint32_t *map = (uint32_t*)queue->device->ws->buffer_map(descriptor_bo); + if (!map) + goto fail; if (scratch_bo) { uint64_t scratch_va = radv_buffer_get_va(scratch_bo); @@ -4147,6 +3789,7 @@ radv_get_preamble_cs(struct radv_queue *queue, compute_scratch_waves, compute_scratch_bo); radv_emit_graphics_scratch(queue, cs, scratch_size_per_wave, scratch_waves, scratch_bo); + radv_emit_trap_handler(queue, cs, queue->device->tma_bo); if (gds_bo) radv_cs_add_buffer(queue->device->ws, cs, gds_bo); @@ -4156,6 +3799,10 @@ radv_get_preamble_cs(struct radv_queue *queue, if (queue->device->trace_bo) radv_cs_add_buffer(queue->device->ws, cs, queue->device->trace_bo); + if (queue->device->border_color_data.bo) + radv_cs_add_buffer(queue->device->ws, cs, + queue->device->border_color_data.bo); + if (i == 0) { si_cs_emit_cache_flush(cs, queue->device->physical_device->rad_info.chip_class, @@ -4181,7 +3828,7 @@ radv_get_preamble_cs(struct radv_queue *queue, RADV_CMD_FLAG_START_PIPELINE_STATS, 0); } - if (!queue->device->ws->cs_finalize(cs)) + if (queue->device->ws->cs_finalize(cs) != VK_SUCCESS) goto fail; } @@ -4291,7 +3938,7 @@ static VkResult radv_alloc_sem_counts(struct radv_device *device, VkFence _fence, bool is_signal) { - int syncobj_idx = 0, sem_idx = 0; + int syncobj_idx = 0, non_reset_idx = 0, sem_idx = 0, timeline_idx = 0; if (num_sems == 0 && _fence == VK_NULL_HANDLE) return VK_SUCCESS; @@ -4300,6 +3947,7 @@ static VkResult radv_alloc_sem_counts(struct radv_device *device, switch(sems[i]->kind) { case RADV_SEMAPHORE_SYNCOBJ: counts->syncobj_count++; + counts->syncobj_reset_count++; break; case RADV_SEMAPHORE_WINSYS: counts->sem_count++; @@ -4309,19 +3957,29 @@ static VkResult radv_alloc_sem_counts(struct radv_device *device, case RADV_SEMAPHORE_TIMELINE: counts->syncobj_count++; break; + case RADV_SEMAPHORE_TIMELINE_SYNCOBJ: + counts->timeline_syncobj_count++; + break; } } if (_fence != VK_NULL_HANDLE) { RADV_FROM_HANDLE(radv_fence, fence, _fence); - if (fence->temp_syncobj || fence->syncobj) + + struct radv_fence_part *part = + fence->temporary.kind != RADV_FENCE_NONE ? + &fence->temporary : &fence->permanent; + if (part->kind == RADV_FENCE_SYNCOBJ) counts->syncobj_count++; } - if (counts->syncobj_count) { - counts->syncobj = (uint32_t *)malloc(sizeof(uint32_t) * counts->syncobj_count); - if (!counts->syncobj) + if (counts->syncobj_count || counts->timeline_syncobj_count) { + counts->points = (uint64_t *)malloc( + sizeof(*counts->syncobj) * counts->syncobj_count + + (sizeof(*counts->syncobj) + sizeof(*counts->points)) * counts->timeline_syncobj_count); + if (!counts->points) return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); + counts->syncobj = (uint32_t*)(counts->points + counts->timeline_syncobj_count); } if (counts->sem_count) { @@ -4332,6 +3990,8 @@ static VkResult radv_alloc_sem_counts(struct radv_device *device, } } + non_reset_idx = counts->syncobj_reset_count; + for (uint32_t i = 0; i < num_sems; i++) { switch(sems[i]->kind) { case RADV_SEMAPHORE_NONE: @@ -4355,7 +4015,7 @@ static VkResult radv_alloc_sem_counts(struct radv_device *device, pthread_mutex_unlock(&sems[i]->timeline.mutex); if (point) { - counts->syncobj[syncobj_idx++] = point->syncobj; + counts->syncobj[non_reset_idx++] = point->syncobj; } else { /* Explicitly remove the semaphore so we might not find * a point later post-submit. */ @@ -4363,19 +4023,26 @@ static VkResult radv_alloc_sem_counts(struct radv_device *device, } break; } + case RADV_SEMAPHORE_TIMELINE_SYNCOBJ: + counts->syncobj[counts->syncobj_count + timeline_idx] = sems[i]->syncobj; + counts->points[timeline_idx] = timeline_values[i]; + ++timeline_idx; + break; } } if (_fence != VK_NULL_HANDLE) { RADV_FROM_HANDLE(radv_fence, fence, _fence); - if (fence->temp_syncobj) - counts->syncobj[syncobj_idx++] = fence->temp_syncobj; - else if (fence->syncobj) - counts->syncobj[syncobj_idx++] = fence->syncobj; + + struct radv_fence_part *part = + fence->temporary.kind != RADV_FENCE_NONE ? + &fence->temporary : &fence->permanent; + if (part->kind == RADV_FENCE_SYNCOBJ) + counts->syncobj[non_reset_idx++] = part->syncobj; } - assert(syncobj_idx <= counts->syncobj_count); - counts->syncobj_count = syncobj_idx; + assert(MAX2(syncobj_idx, non_reset_idx) <= counts->syncobj_count); + counts->syncobj_count = MAX2(syncobj_idx, non_reset_idx); return VK_SUCCESS; } @@ -4383,9 +4050,9 @@ static VkResult radv_alloc_sem_counts(struct radv_device *device, static void radv_free_sem_info(struct radv_winsys_sem_info *sem_info) { - free(sem_info->wait.syncobj); + free(sem_info->wait.points); free(sem_info->wait.sem); - free(sem_info->signal.syncobj); + free(sem_info->signal.points); free(sem_info->signal.sem); } @@ -4455,15 +4122,19 @@ radv_finalize_timelines(struct radv_device *device, point->wait_count -= 2; radv_timeline_trigger_waiters_locked(&signal_sems[i]->timeline, processing_list); pthread_mutex_unlock(&signal_sems[i]->timeline.mutex); + } else if (signal_sems[i] && signal_sems[i]->kind == RADV_SEMAPHORE_TIMELINE_SYNCOBJ) { + signal_sems[i]->timeline_syncobj.max_point = + MAX2(signal_sems[i]->timeline_syncobj.max_point, signal_values[i]); } } } -static void +static VkResult radv_sparse_buffer_bind_memory(struct radv_device *device, const VkSparseBufferMemoryBindInfo *bind) { RADV_FROM_HANDLE(radv_buffer, buffer, bind->buffer); + VkResult result; for (uint32_t i = 0; i < bind->bindCount; ++i) { struct radv_device_memory *mem = NULL; @@ -4471,19 +4142,24 @@ radv_sparse_buffer_bind_memory(struct radv_device *device, if (bind->pBinds[i].memory != VK_NULL_HANDLE) mem = radv_device_memory_from_handle(bind->pBinds[i].memory); - device->ws->buffer_virtual_bind(buffer->bo, - bind->pBinds[i].resourceOffset, - bind->pBinds[i].size, - mem ? mem->bo : NULL, - bind->pBinds[i].memoryOffset); + result = device->ws->buffer_virtual_bind(buffer->bo, + bind->pBinds[i].resourceOffset, + bind->pBinds[i].size, + mem ? mem->bo : NULL, + bind->pBinds[i].memoryOffset); + if (result != VK_SUCCESS) + return result; } + + return VK_SUCCESS; } -static void +static VkResult radv_sparse_image_opaque_bind_memory(struct radv_device *device, const VkSparseImageOpaqueMemoryBindInfo *bind) { RADV_FROM_HANDLE(radv_image, image, bind->image); + VkResult result; for (uint32_t i = 0; i < bind->bindCount; ++i) { struct radv_device_memory *mem = NULL; @@ -4491,12 +4167,16 @@ radv_sparse_image_opaque_bind_memory(struct radv_device *device, if (bind->pBinds[i].memory != VK_NULL_HANDLE) mem = radv_device_memory_from_handle(bind->pBinds[i].memory); - device->ws->buffer_virtual_bind(image->bo, - bind->pBinds[i].resourceOffset, - bind->pBinds[i].size, - mem ? mem->bo : NULL, - bind->pBinds[i].memoryOffset); + result = device->ws->buffer_virtual_bind(image->bo, + bind->pBinds[i].resourceOffset, + bind->pBinds[i].size, + mem ? mem->bo : NULL, + bind->pBinds[i].memoryOffset); + if (result != VK_SUCCESS) + return result; } + + return VK_SUCCESS; } static VkResult @@ -4597,6 +4277,11 @@ struct radv_queue_submission { uint32_t signal_value_count; }; +static VkResult +radv_queue_trigger_submission(struct radv_deferred_queue_submission *submission, + uint32_t decrement, + struct list_head *processing_list); + static VkResult radv_create_deferred_submission(struct radv_queue *queue, const struct radv_queue_submission *submission, @@ -4692,7 +4377,7 @@ radv_create_deferred_submission(struct radv_queue *queue, return VK_SUCCESS; } -static void +static VkResult radv_queue_enqueue_submission(struct radv_deferred_queue_submission *submission, struct list_head *processing_list) { @@ -4723,9 +4408,7 @@ radv_queue_enqueue_submission(struct radv_deferred_queue_submission *submission, * submitted, but if the queue was empty, we decrement ourselves as there is no previous * submission. */ uint32_t decrement = submission->wait_semaphore_count - wait_cnt + (is_first ? 1 : 0); - if (__atomic_sub_fetch(&submission->submission_wait_count, decrement, __ATOMIC_ACQ_REL) == 0) { - list_addtail(&submission->processing_list, processing_list); - } + return radv_queue_trigger_submission(submission, decrement, processing_list); } static void @@ -4741,9 +4424,7 @@ radv_queue_submission_update_queue(struct radv_deferred_queue_submission *submis list_first_entry(&submission->queue->pending_submissions, struct radv_deferred_queue_submission, queue_pending_list); - if (p_atomic_dec_zero(&next_submission->submission_wait_count)) { - list_addtail(&next_submission->processing_list, processing_list); - } + radv_queue_trigger_submission(next_submission, 1, processing_list); } pthread_mutex_unlock(&submission->queue->pending_mutex); @@ -4758,17 +4439,33 @@ radv_queue_submit_deferred(struct radv_deferred_queue_submission *submission, struct radv_queue *queue = submission->queue; struct radeon_winsys_ctx *ctx = queue->hw_ctx; uint32_t max_cs_submission = queue->device->trace_bo ? 1 : RADV_MAX_IBS_PER_SUBMIT; - struct radeon_winsys_fence *base_fence = fence ? fence->fence : NULL; + struct radeon_winsys_fence *base_fence = NULL; bool do_flush = submission->flush_caches || submission->wait_dst_stage_mask; bool can_patch = true; uint32_t advance; struct radv_winsys_sem_info sem_info; VkResult result; - int ret; struct radeon_cmdbuf *initial_preamble_cs = NULL; struct radeon_cmdbuf *initial_flush_preamble_cs = NULL; struct radeon_cmdbuf *continue_preamble_cs = NULL; + if (fence) { + /* Under most circumstances, out fences won't be temporary. + * However, the spec does allow it for opaque_fd. + * + * From the Vulkan 1.0.53 spec: + * + * "If the import is temporary, the implementation must + * restore the semaphore to its prior permanent state after + * submitting the next semaphore wait operation." + */ + struct radv_fence_part *part = + fence->temporary.kind != RADV_FENCE_NONE ? + &fence->temporary : &fence->permanent; + if (part->kind == RADV_FENCE_WINSYS) + base_fence = part->fence; + } + result = radv_get_preambles(queue, submission->cmd_buffers, submission->cmd_buffer_count, &initial_preamble_cs, @@ -4790,27 +4487,27 @@ radv_queue_submit_deferred(struct radv_deferred_queue_submission *submission, goto fail; for (uint32_t i = 0; i < submission->buffer_bind_count; ++i) { - radv_sparse_buffer_bind_memory(queue->device, - submission->buffer_binds + i); + result = radv_sparse_buffer_bind_memory(queue->device, + submission->buffer_binds + i); + if (result != VK_SUCCESS) + goto fail; } for (uint32_t i = 0; i < submission->image_opaque_bind_count; ++i) { - radv_sparse_image_opaque_bind_memory(queue->device, - submission->image_opaque_binds + i); + result = radv_sparse_image_opaque_bind_memory(queue->device, + submission->image_opaque_binds + i); + if (result != VK_SUCCESS) + goto fail; } if (!submission->cmd_buffer_count) { - ret = queue->device->ws->cs_submit(ctx, queue->queue_idx, - &queue->device->empty_cs[queue->queue_family_index], - 1, NULL, NULL, - &sem_info, NULL, - false, base_fence); - if (ret) { - radv_loge("failed to submit CS\n"); - abort(); - } - - goto success; + result = queue->device->ws->cs_submit(ctx, queue->queue_idx, + &queue->device->empty_cs[queue->queue_family_index], + 1, NULL, NULL, + &sem_info, NULL, + false, base_fence); + if (result != VK_SUCCESS) + goto fail; } else { struct radeon_cmdbuf **cs_array = malloc(sizeof(struct radeon_cmdbuf *) * (submission->cmd_buffer_count)); @@ -4844,27 +4541,29 @@ radv_queue_submit_deferred(struct radv_deferred_queue_submission *submission, bo_list = &queue->device->bo_list.list; } - ret = queue->device->ws->cs_submit(ctx, queue->queue_idx, cs_array + j, - advance, initial_preamble, continue_preamble_cs, - &sem_info, bo_list, - can_patch, base_fence); + result = queue->device->ws->cs_submit(ctx, queue->queue_idx, cs_array + j, + advance, initial_preamble, continue_preamble_cs, + &sem_info, bo_list, + can_patch, base_fence); if (unlikely(queue->device->use_global_bo_list)) pthread_mutex_unlock(&queue->device->bo_list.mutex); - if (ret) { - radv_loge("failed to submit CS\n"); - abort(); - } + if (result != VK_SUCCESS) + goto fail; + if (queue->device->trace_bo) { radv_check_gpu_hangs(queue, cs_array[j]); } + + if (queue->device->tma_bo) { + radv_check_trap_handler(queue); + } } free(cs_array); } -success: radv_free_temp_syncobjs(queue->device, submission->temporary_semaphore_part_count, submission->temporary_semaphore_parts); @@ -4885,11 +4584,22 @@ success: return VK_SUCCESS; fail: + if (result != VK_SUCCESS && result != VK_ERROR_DEVICE_LOST) { + /* When something bad happened during the submission, such as + * an out of memory issue, it might be hard to recover from + * this inconsistent state. To avoid this sort of problem, we + * assume that we are in a really bad situation and return + * VK_ERROR_DEVICE_LOST to ensure the clients do not attempt + * to submit the same job again to this device. + */ + result = radv_device_set_lost(queue->device, "vkQueueSubmit() failed"); + } + radv_free_temp_syncobjs(queue->device, submission->temporary_semaphore_part_count, submission->temporary_semaphore_parts); free(submission); - return VK_ERROR_DEVICE_LOST; + return result; } static VkResult @@ -4907,6 +4617,131 @@ radv_process_submissions(struct list_head *processing_list) return VK_SUCCESS; } +static VkResult +wait_for_submission_timelines_available(struct radv_deferred_queue_submission *submission, + uint64_t timeout) +{ + struct radv_device *device = submission->queue->device; + uint32_t syncobj_count = 0; + uint32_t syncobj_idx = 0; + + for (uint32_t i = 0; i < submission->wait_semaphore_count; ++i) { + if (submission->wait_semaphores[i]->kind != RADV_SEMAPHORE_TIMELINE_SYNCOBJ) + continue; + + if (submission->wait_semaphores[i]->timeline_syncobj.max_point >= submission->wait_values[i]) + continue; + ++syncobj_count; + } + + if (!syncobj_count) + return VK_SUCCESS; + + uint64_t *points = malloc((sizeof(uint64_t) + sizeof(uint32_t)) * syncobj_count); + if (!points) + return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); + + uint32_t *syncobj = (uint32_t*)(points + syncobj_count); + + for (uint32_t i = 0; i < submission->wait_semaphore_count; ++i) { + if (submission->wait_semaphores[i]->kind != RADV_SEMAPHORE_TIMELINE_SYNCOBJ) + continue; + + if (submission->wait_semaphores[i]->timeline_syncobj.max_point >= submission->wait_values[i]) + continue; + + syncobj[syncobj_idx] = submission->wait_semaphores[i]->syncobj; + points[syncobj_idx] = submission->wait_values[i]; + ++syncobj_idx; + } + bool success = device->ws->wait_timeline_syncobj(device->ws, syncobj, points, syncobj_idx, true, true, timeout); + + free(points); + return success ? VK_SUCCESS : VK_TIMEOUT; +} + +static void* radv_queue_submission_thread_run(void *q) +{ + struct radv_queue *queue = q; + + pthread_mutex_lock(&queue->thread_mutex); + while (!p_atomic_read(&queue->thread_exit)) { + struct radv_deferred_queue_submission *submission = queue->thread_submission; + struct list_head processing_list; + VkResult result = VK_SUCCESS; + if (!submission) { + pthread_cond_wait(&queue->thread_cond, &queue->thread_mutex); + continue; + } + pthread_mutex_unlock(&queue->thread_mutex); + + /* Wait at most 5 seconds so we have a chance to notice shutdown when + * a semaphore never gets signaled. If it takes longer we just retry + * the wait next iteration. */ + result = wait_for_submission_timelines_available(submission, + radv_get_absolute_timeout(5000000000)); + if (result != VK_SUCCESS) { + pthread_mutex_lock(&queue->thread_mutex); + continue; + } + + /* The lock isn't held but nobody will add one until we finish + * the current submission. */ + p_atomic_set(&queue->thread_submission, NULL); + + list_inithead(&processing_list); + list_addtail(&submission->processing_list, &processing_list); + result = radv_process_submissions(&processing_list); + + pthread_mutex_lock(&queue->thread_mutex); + } + pthread_mutex_unlock(&queue->thread_mutex); + return NULL; +} + +static VkResult +radv_queue_trigger_submission(struct radv_deferred_queue_submission *submission, + uint32_t decrement, + struct list_head *processing_list) +{ + struct radv_queue *queue = submission->queue; + int ret; + if (p_atomic_add_return(&submission->submission_wait_count, -decrement)) + return VK_SUCCESS; + + if (wait_for_submission_timelines_available(submission, radv_get_absolute_timeout(0)) == VK_SUCCESS) { + list_addtail(&submission->processing_list, processing_list); + return VK_SUCCESS; + } + + pthread_mutex_lock(&queue->thread_mutex); + + /* A submission can only be ready for the thread if it doesn't have + * any predecessors in the same queue, so there can only be one such + * submission at a time. */ + assert(queue->thread_submission == NULL); + + /* Only start the thread on demand to save resources for the many games + * which only use binary semaphores. */ + if (!queue->thread_running) { + ret = pthread_create(&queue->submission_thread, NULL, + radv_queue_submission_thread_run, queue); + if (ret) { + pthread_mutex_unlock(&queue->thread_mutex); + return vk_errorf(queue->device->instance, + VK_ERROR_DEVICE_LOST, + "Failed to start submission thread"); + } + queue->thread_running = true; + } + + queue->thread_submission = submission; + pthread_mutex_unlock(&queue->thread_mutex); + + pthread_cond_signal(&queue->thread_cond); + return VK_SUCCESS; +} + static VkResult radv_queue_submit(struct radv_queue *queue, const struct radv_queue_submission *submission) { @@ -4919,7 +4754,12 @@ static VkResult radv_queue_submit(struct radv_queue *queue, struct list_head processing_list; list_inithead(&processing_list); - radv_queue_enqueue_submission(deferred, &processing_list); + result = radv_queue_enqueue_submission(deferred, &processing_list); + if (result != VK_SUCCESS) { + /* If anything is in the list we leak. */ + assert(list_is_empty(&processing_list)); + return result; + } return radv_process_submissions(&processing_list); } @@ -4929,17 +4769,21 @@ radv_queue_internal_submit(struct radv_queue *queue, struct radeon_cmdbuf *cs) struct radeon_winsys_ctx *ctx = queue->hw_ctx; struct radv_winsys_sem_info sem_info; VkResult result; - int ret; result = radv_alloc_sem_info(queue->device, &sem_info, 0, NULL, 0, 0, 0, NULL, VK_NULL_HANDLE); if (result != VK_SUCCESS) return false; - ret = queue->device->ws->cs_submit(ctx, queue->queue_idx, &cs, 1, NULL, - NULL, &sem_info, NULL, false, NULL); + result = queue->device->ws->cs_submit(ctx, queue->queue_idx, &cs, 1, + NULL, NULL, &sem_info, NULL, + false, NULL); radv_free_sem_info(&sem_info); - return !ret; + if (result != VK_SUCCESS) + return false; + + return true; + } /* Signals fence as soon as all the work currently put on queue is done. */ @@ -4969,6 +4813,9 @@ VkResult radv_QueueSubmit( uint32_t fence_idx = 0; bool flushed_caches = false; + if (radv_device_is_lost(queue->device)) + return VK_ERROR_DEVICE_LOST; + if (fence != VK_NULL_HANDLE) { for (uint32_t i = 0; i < submitCount; ++i) if (radv_submit_has_effects(pSubmits + i)) @@ -5018,20 +4865,44 @@ VkResult radv_QueueSubmit( return VK_SUCCESS; } +static const char * +radv_get_queue_family_name(struct radv_queue *queue) +{ + switch (queue->queue_family_index) { + case RADV_QUEUE_GENERAL: + return "graphics"; + case RADV_QUEUE_COMPUTE: + return "compute"; + case RADV_QUEUE_TRANSFER: + return "transfer"; + default: + unreachable("Unknown queue family"); + } +} + VkResult radv_QueueWaitIdle( VkQueue _queue) { RADV_FROM_HANDLE(radv_queue, queue, _queue); + if (radv_device_is_lost(queue->device)) + return VK_ERROR_DEVICE_LOST; + pthread_mutex_lock(&queue->pending_mutex); while (!list_is_empty(&queue->pending_submissions)) { pthread_cond_wait(&queue->device->timeline_cond, &queue->pending_mutex); } pthread_mutex_unlock(&queue->pending_mutex); - queue->device->ws->ctx_wait_idle(queue->hw_ctx, - radv_queue_family_to_ring(queue->queue_family_index), - queue->queue_idx); + if (!queue->device->ws->ctx_wait_idle(queue->hw_ctx, + radv_queue_family_to_ring(queue->queue_family_index), + queue->queue_idx)) { + return radv_device_set_lost(queue->device, + "Failed to wait for a '%s' queue " + "to be idle. GPU hang ?", + radv_get_queue_family_name(queue)); + } + return VK_SUCCESS; } @@ -5042,7 +4913,11 @@ VkResult radv_DeviceWaitIdle( for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) { for (unsigned q = 0; q < device->queue_count[i]; q++) { - radv_QueueWaitIdle(radv_queue_to_handle(&device->queues[i][q])); + VkResult result = + radv_QueueWaitIdle(radv_queue_to_handle(&device->queues[i][q])); + + if (result != VK_SUCCESS) + return result; } } return VK_SUCCESS; @@ -5204,9 +5079,10 @@ bool radv_get_memory_fd(struct radv_device *device, } -static void radv_free_memory(struct radv_device *device, - const VkAllocationCallbacks* pAllocator, - struct radv_device_memory *mem) +void +radv_free_memory(struct radv_device *device, + const VkAllocationCallbacks* pAllocator, + struct radv_device_memory *mem) { if (mem == NULL) return; @@ -5323,6 +5199,26 @@ static VkResult radv_alloc_memory(struct radv_device *device, } else { close(import_info->fd); } + + if (mem->image && mem->image->plane_count == 1 && + !vk_format_is_depth_or_stencil(mem->image->vk_format)) { + struct radeon_bo_metadata metadata; + device->ws->buffer_get_metadata(mem->bo, &metadata); + + struct radv_image_create_info create_info = { + .no_metadata_planes = true, + .bo_metadata = &metadata + }; + + /* This gives a basic ability to import radeonsi images + * that don't have DCC. This is not guaranteed by any + * spec and can be removed after we support modifiers. */ + result = radv_image_create_layout(device, create_info, mem->image); + if (result != VK_SUCCESS) { + device->ws->buffer_destroy(mem->bo); + goto fail; + } + } } else if (host_ptr_info) { assert(host_ptr_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT); mem->bo = device->ws->buffer_from_ptr(device->ws, host_ptr_info->pHostPointer, @@ -5671,6 +5567,9 @@ static bool radv_sparse_bind_has_effects(const VkBindSparseInfo *info) VkResult result; uint32_t fence_idx = 0; + if (radv_device_is_lost(queue->device)) + return VK_ERROR_DEVICE_LOST; + if (fence != VK_NULL_HANDLE) { for (uint32_t i = 0; i < bindInfoCount; ++i) if (radv_sparse_bind_has_effects(pBindInfo + i)) @@ -5714,6 +5613,41 @@ static bool radv_sparse_bind_has_effects(const VkBindSparseInfo *info) return VK_SUCCESS; } +static void +radv_destroy_fence_part(struct radv_device *device, + struct radv_fence_part *part) +{ + switch (part->kind) { + case RADV_FENCE_NONE: + break; + case RADV_FENCE_WINSYS: + device->ws->destroy_fence(part->fence); + break; + case RADV_FENCE_SYNCOBJ: + device->ws->destroy_syncobj(device->ws, part->syncobj); + break; + case RADV_FENCE_WSI: + part->fence_wsi->destroy(part->fence_wsi); + break; + default: + unreachable("Invalid fence type"); + } + + part->kind = RADV_FENCE_NONE; +} + +static void +radv_destroy_fence(struct radv_device *device, + const VkAllocationCallbacks *pAllocator, + struct radv_fence *fence) +{ + radv_destroy_fence_part(device, &fence->temporary); + radv_destroy_fence_part(device, &fence->permanent); + + vk_object_base_finish(&fence->base); + vk_free2(&device->vk.alloc, pAllocator, fence); +} + VkResult radv_CreateFence( VkDevice _device, const VkFenceCreateInfo* pCreateInfo, @@ -5725,37 +5659,39 @@ VkResult radv_CreateFence( vk_find_struct_const(pCreateInfo->pNext, EXPORT_FENCE_CREATE_INFO); VkExternalFenceHandleTypeFlags handleTypes = export ? export->handleTypes : 0; + struct radv_fence *fence; - struct radv_fence *fence = vk_alloc2(&device->vk.alloc, pAllocator, - sizeof(*fence), 8, - VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); - + fence = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*fence), 8, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (!fence) return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); vk_object_base_init(&device->vk, &fence->base, VK_OBJECT_TYPE_FENCE); - fence->fence_wsi = NULL; - fence->temp_syncobj = 0; if (device->always_use_syncobj || handleTypes) { - int ret = device->ws->create_syncobj(device->ws, &fence->syncobj); + fence->permanent.kind = RADV_FENCE_SYNCOBJ; + + bool create_signaled = false; + if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) + create_signaled = true; + + int ret = device->ws->create_syncobj(device->ws, create_signaled, + &fence->permanent.syncobj); if (ret) { - vk_free2(&device->vk.alloc, pAllocator, fence); + radv_destroy_fence(device, pAllocator, fence); return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); } - if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) { - device->ws->signal_syncobj(device->ws, fence->syncobj); - } - fence->fence = NULL; } else { - fence->fence = device->ws->create_fence(); - if (!fence->fence) { + fence->permanent.kind = RADV_FENCE_WINSYS; + + fence->permanent.fence = device->ws->create_fence(); + if (!fence->permanent.fence) { vk_free2(&device->vk.alloc, pAllocator, fence); + radv_destroy_fence(device, pAllocator, fence); return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); } - fence->syncobj = 0; if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) - device->ws->signal_fence(fence->fence); + device->ws->signal_fence(fence->permanent.fence); } *pFence = radv_fence_to_handle(fence); @@ -5763,6 +5699,7 @@ VkResult radv_CreateFence( return VK_SUCCESS; } + void radv_DestroyFence( VkDevice _device, VkFence _fence, @@ -5774,45 +5711,20 @@ void radv_DestroyFence( if (!fence) return; - if (fence->temp_syncobj) - device->ws->destroy_syncobj(device->ws, fence->temp_syncobj); - if (fence->syncobj) - device->ws->destroy_syncobj(device->ws, fence->syncobj); - if (fence->fence) - device->ws->destroy_fence(fence->fence); - if (fence->fence_wsi) - fence->fence_wsi->destroy(fence->fence_wsi); - - vk_object_base_finish(&fence->base); - vk_free2(&device->vk.alloc, pAllocator, fence); -} - - -uint64_t radv_get_current_time(void) -{ - struct timespec tv; - clock_gettime(CLOCK_MONOTONIC, &tv); - return tv.tv_nsec + tv.tv_sec*1000000000ull; -} - -static uint64_t radv_get_absolute_timeout(uint64_t timeout) -{ - uint64_t current_time = radv_get_current_time(); - - timeout = MIN2(UINT64_MAX - current_time, timeout); - - return current_time + timeout; + radv_destroy_fence(device, pAllocator, fence); } - static bool radv_all_fences_plain_and_submitted(struct radv_device *device, uint32_t fenceCount, const VkFence *pFences) { for (uint32_t i = 0; i < fenceCount; ++i) { RADV_FROM_HANDLE(radv_fence, fence, pFences[i]); - if (fence->fence == NULL || fence->syncobj || - fence->temp_syncobj || fence->fence_wsi || - (!device->ws->is_fence_waitable(fence->fence))) + + struct radv_fence_part *part = + fence->temporary.kind != RADV_FENCE_NONE ? + &fence->temporary : &fence->permanent; + if (part->kind != RADV_FENCE_WINSYS || + !device->ws->is_fence_waitable(part->fence)) return false; } return true; @@ -5822,7 +5734,11 @@ static bool radv_all_fences_syncobj(uint32_t fenceCount, const VkFence *pFences) { for (uint32_t i = 0; i < fenceCount; ++i) { RADV_FROM_HANDLE(radv_fence, fence, pFences[i]); - if (fence->syncobj == 0 && fence->temp_syncobj == 0) + + struct radv_fence_part *part = + fence->temporary.kind != RADV_FENCE_NONE ? + &fence->temporary : &fence->permanent; + if (part->kind != RADV_FENCE_SYNCOBJ) return false; } return true; @@ -5836,6 +5752,10 @@ VkResult radv_WaitForFences( uint64_t timeout) { RADV_FROM_HANDLE(radv_device, device, _device); + + if (radv_device_is_lost(device)) + return VK_ERROR_DEVICE_LOST; + timeout = radv_get_absolute_timeout(timeout); if (device->always_use_syncobj && @@ -5847,7 +5767,13 @@ VkResult radv_WaitForFences( for (uint32_t i = 0; i < fenceCount; ++i) { RADV_FROM_HANDLE(radv_fence, fence, pFences[i]); - handles[i] = fence->temp_syncobj ? fence->temp_syncobj : fence->syncobj; + + struct radv_fence_part *part = + fence->temporary.kind != RADV_FENCE_NONE ? + &fence->temporary : &fence->permanent; + + assert(part->kind == RADV_FENCE_SYNCOBJ); + handles[i] = part->syncobj; } bool success = device->ws->wait_syncobj(device->ws, handles, fenceCount, waitAll, timeout); @@ -5867,12 +5793,17 @@ VkResult radv_WaitForFences( for (uint32_t i = 0; i < fenceCount; ++i) { RADV_FROM_HANDLE(radv_fence, fence, pFences[i]); - if (device->ws->fence_wait(device->ws, fence->fence, false, 0)) { + struct radv_fence_part *part = + fence->temporary.kind != RADV_FENCE_NONE ? + &fence->temporary : &fence->permanent; + assert(part->kind == RADV_FENCE_WINSYS); + + if (device->ws->fence_wait(device->ws, part->fence, false, 0)) { free(fences); return VK_SUCCESS; } - fences[wait_count++] = fence->fence; + fences[wait_count++] = part->fence; } bool success = device->ws->fences_wait(device->ws, fences, wait_count, @@ -5895,36 +5826,40 @@ VkResult radv_WaitForFences( RADV_FROM_HANDLE(radv_fence, fence, pFences[i]); bool expired = false; - if (fence->temp_syncobj) { - if (!device->ws->wait_syncobj(device->ws, &fence->temp_syncobj, 1, true, timeout)) - return VK_TIMEOUT; - continue; - } + struct radv_fence_part *part = + fence->temporary.kind != RADV_FENCE_NONE ? + &fence->temporary : &fence->permanent; - if (fence->syncobj) { - if (!device->ws->wait_syncobj(device->ws, &fence->syncobj, 1, true, timeout)) - return VK_TIMEOUT; - continue; - } - - if (fence->fence) { - if (!device->ws->is_fence_waitable(fence->fence)) { - while(!device->ws->is_fence_waitable(fence->fence) && + switch (part->kind) { + case RADV_FENCE_NONE: + break; + case RADV_FENCE_WINSYS: + if (!device->ws->is_fence_waitable(part->fence)) { + while (!device->ws->is_fence_waitable(part->fence) && radv_get_current_time() <= timeout) /* Do nothing */; } expired = device->ws->fence_wait(device->ws, - fence->fence, + part->fence, true, timeout); if (!expired) return VK_TIMEOUT; - } - - if (fence->fence_wsi) { - VkResult result = fence->fence_wsi->wait(fence->fence_wsi, timeout); + break; + case RADV_FENCE_SYNCOBJ: + if (!device->ws->wait_syncobj(device->ws, + &part->syncobj, 1, true, + timeout)) + return VK_TIMEOUT; + break; + case RADV_FENCE_WSI: { + VkResult result = part->fence_wsi->wait(part->fence_wsi, timeout); if (result != VK_SUCCESS) return result; + break; + } + default: + unreachable("Invalid fence type"); } } @@ -5939,18 +5874,29 @@ VkResult radv_ResetFences(VkDevice _device, for (unsigned i = 0; i < fenceCount; ++i) { RADV_FROM_HANDLE(radv_fence, fence, pFences[i]); - if (fence->fence) - device->ws->reset_fence(fence->fence); - /* Per spec, we first restore the permanent payload, and then reset, so - * having a temp syncobj should not skip resetting the permanent syncobj. */ - if (fence->temp_syncobj) { - device->ws->destroy_syncobj(device->ws, fence->temp_syncobj); - fence->temp_syncobj = 0; - } + /* From the Vulkan 1.0.53 spec: + * + * "If any member of pFences currently has its payload + * imported with temporary permanence, that fence’s prior + * permanent payload is irst restored. The remaining + * operations described therefore operate on the restored + * payload." + */ + if (fence->temporary.kind != RADV_FENCE_NONE) + radv_destroy_fence_part(device, &fence->temporary); + + struct radv_fence_part *part = &fence->permanent; - if (fence->syncobj) { - device->ws->reset_syncobj(device->ws, fence->syncobj); + switch (part->kind) { + case RADV_FENCE_WSI: + device->ws->reset_fence(part->fence); + break; + case RADV_FENCE_SYNCOBJ: + device->ws->reset_syncobj(device->ws, part->syncobj); + break; + default: + unreachable("Invalid fence type"); } } @@ -5962,29 +5908,40 @@ VkResult radv_GetFenceStatus(VkDevice _device, VkFence _fence) RADV_FROM_HANDLE(radv_device, device, _device); RADV_FROM_HANDLE(radv_fence, fence, _fence); - if (fence->temp_syncobj) { - bool success = device->ws->wait_syncobj(device->ws, &fence->temp_syncobj, 1, true, 0); - return success ? VK_SUCCESS : VK_NOT_READY; - } + struct radv_fence_part *part = + fence->temporary.kind != RADV_FENCE_NONE ? + &fence->temporary : &fence->permanent; - if (fence->syncobj) { - bool success = device->ws->wait_syncobj(device->ws, &fence->syncobj, 1, true, 0); - return success ? VK_SUCCESS : VK_NOT_READY; - } + if (radv_device_is_lost(device)) + return VK_ERROR_DEVICE_LOST; - if (fence->fence) { - if (!device->ws->fence_wait(device->ws, fence->fence, false, 0)) + switch (part->kind) { + case RADV_FENCE_NONE: + break; + case RADV_FENCE_WINSYS: + if (!device->ws->fence_wait(device->ws, part->fence, false, 0)) return VK_NOT_READY; + break; + case RADV_FENCE_SYNCOBJ: { + bool success = device->ws->wait_syncobj(device->ws, + &part->syncobj, 1, true, 0); + if (!success) + return VK_NOT_READY; + break; } - if (fence->fence_wsi) { - VkResult result = fence->fence_wsi->wait(fence->fence_wsi, 0); - + case RADV_FENCE_WSI: { + VkResult result = part->fence_wsi->wait(part->fence_wsi, 0); if (result != VK_SUCCESS) { if (result == VK_TIMEOUT) return VK_NOT_READY; return result; } + break; } + default: + unreachable("Invalid fence type"); + } + return VK_SUCCESS; } @@ -6067,6 +6024,7 @@ radv_timeline_add_point_locked(struct radv_device *device, struct radv_timeline_point *ret = NULL; struct radv_timeline_point *prev = NULL; + int r; if (p <= timeline->highest_signaled) return NULL; @@ -6083,7 +6041,11 @@ radv_timeline_add_point_locked(struct radv_device *device, if (list_is_empty(&timeline->free_points)) { ret = malloc(sizeof(struct radv_timeline_point)); - device->ws->create_syncobj(device->ws, &ret->syncobj); + r = device->ws->create_syncobj(device->ws, false, &ret->syncobj); + if (r) { + free(ret); + return NULL; + } } else { ret = list_first_entry(&timeline->free_points, struct radv_timeline_point, list); list_del(&ret->list); @@ -6104,31 +6066,35 @@ radv_timeline_add_point_locked(struct radv_device *device, static VkResult -radv_timeline_wait_locked(struct radv_device *device, - struct radv_timeline *timeline, - uint64_t value, - uint64_t abs_timeout) +radv_timeline_wait(struct radv_device *device, + struct radv_timeline *timeline, + uint64_t value, + uint64_t abs_timeout) { + pthread_mutex_lock(&timeline->mutex); + while(timeline->highest_submitted < value) { struct timespec abstime; timespec_from_nsec(&abstime, abs_timeout); pthread_cond_timedwait(&device->timeline_cond, &timeline->mutex, &abstime); - if (radv_get_current_time() >= abs_timeout && timeline->highest_submitted < value) + if (radv_get_current_time() >= abs_timeout && timeline->highest_submitted < value) { + pthread_mutex_unlock(&timeline->mutex); return VK_TIMEOUT; + } } struct radv_timeline_point *point = radv_timeline_find_point_at_least_locked(device, timeline, value); + pthread_mutex_unlock(&timeline->mutex); if (!point) return VK_SUCCESS; - pthread_mutex_unlock(&timeline->mutex); - bool success = device->ws->wait_syncobj(device->ws, &point->syncobj, 1, true, abs_timeout); pthread_mutex_lock(&timeline->mutex); point->wait_count--; + pthread_mutex_unlock(&timeline->mutex); return success ? VK_SUCCESS : VK_TIMEOUT; } @@ -6141,9 +6107,7 @@ radv_timeline_trigger_waiters_locked(struct radv_timeline *timeline, if (waiter->value > timeline->highest_submitted) continue; - if (p_atomic_dec_zero(&waiter->submission->submission_wait_count)) { - list_addtail(&waiter->submission->processing_list, processing_list); - } + radv_queue_trigger_submission(waiter->submission, 1, processing_list); list_del(&waiter->list); } } @@ -6162,6 +6126,7 @@ void radv_destroy_semaphore_part(struct radv_device *device, radv_destroy_timeline(device, &part->timeline); break; case RADV_SEMAPHORE_SYNCOBJ: + case RADV_SEMAPHORE_TIMELINE_SYNCOBJ: device->ws->destroy_syncobj(device->ws, part->syncobj); break; } @@ -6182,6 +6147,17 @@ radv_get_semaphore_type(const void *pNext, uint64_t *initial_value) return type_info->semaphoreType; } +static void +radv_destroy_semaphore(struct radv_device *device, + const VkAllocationCallbacks *pAllocator, + struct radv_semaphore *sem) +{ + radv_destroy_semaphore_part(device, &sem->temporary); + radv_destroy_semaphore_part(device, &sem->permanent); + vk_object_base_finish(&sem->base); + vk_free2(&device->vk.alloc, pAllocator, sem); +} + VkResult radv_CreateSemaphore( VkDevice _device, const VkSemaphoreCreateInfo* pCreateInfo, @@ -6208,21 +6184,32 @@ VkResult radv_CreateSemaphore( sem->temporary.kind = RADV_SEMAPHORE_NONE; sem->permanent.kind = RADV_SEMAPHORE_NONE; - if (type == VK_SEMAPHORE_TYPE_TIMELINE) { + if (type == VK_SEMAPHORE_TYPE_TIMELINE && + device->physical_device->rad_info.has_timeline_syncobj) { + int ret = device->ws->create_syncobj(device->ws, false, &sem->permanent.syncobj); + if (ret) { + radv_destroy_semaphore(device, pAllocator, sem); + return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); + } + device->ws->signal_syncobj(device->ws, sem->permanent.syncobj, initial_value); + sem->permanent.timeline_syncobj.max_point = initial_value; + sem->permanent.kind = RADV_SEMAPHORE_TIMELINE_SYNCOBJ; + } else if (type == VK_SEMAPHORE_TYPE_TIMELINE) { radv_create_timeline(&sem->permanent.timeline, initial_value); sem->permanent.kind = RADV_SEMAPHORE_TIMELINE; } else if (device->always_use_syncobj || handleTypes) { assert (device->physical_device->rad_info.has_syncobj); - int ret = device->ws->create_syncobj(device->ws, &sem->permanent.syncobj); + int ret = device->ws->create_syncobj(device->ws, false, + &sem->permanent.syncobj); if (ret) { - vk_free2(&device->vk.alloc, pAllocator, sem); + radv_destroy_semaphore(device, pAllocator, sem); return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); } sem->permanent.kind = RADV_SEMAPHORE_SYNCOBJ; } else { sem->permanent.ws_sem = device->ws->create_sem(device->ws); if (!sem->permanent.ws_sem) { - vk_free2(&device->vk.alloc, pAllocator, sem); + radv_destroy_semaphore(device, pAllocator, sem); return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); } sem->permanent.kind = RADV_SEMAPHORE_WINSYS; @@ -6242,10 +6229,7 @@ void radv_DestroySemaphore( if (!_semaphore) return; - radv_destroy_semaphore_part(device, &sem->temporary); - radv_destroy_semaphore_part(device, &sem->permanent); - vk_object_base_finish(&sem->base); - vk_free2(&device->vk.alloc, pAllocator, sem); + radv_destroy_semaphore(device, pAllocator, sem); } VkResult @@ -6256,6 +6240,9 @@ radv_GetSemaphoreCounterValue(VkDevice _device, RADV_FROM_HANDLE(radv_device, device, _device); RADV_FROM_HANDLE(radv_semaphore, semaphore, _semaphore); + if (radv_device_is_lost(device)) + return VK_ERROR_DEVICE_LOST; + struct radv_semaphore_part *part = semaphore->temporary.kind != RADV_SEMAPHORE_NONE ? &semaphore->temporary : &semaphore->permanent; @@ -6267,6 +6254,9 @@ radv_GetSemaphoreCounterValue(VkDevice _device, pthread_mutex_unlock(&part->timeline.mutex); return VK_SUCCESS; } + case RADV_SEMAPHORE_TIMELINE_SYNCOBJ: { + return device->ws->query_syncobj(device->ws, part->syncobj, pValue); + } case RADV_SEMAPHORE_NONE: case RADV_SEMAPHORE_SYNCOBJ: case RADV_SEMAPHORE_WINSYS: @@ -6285,9 +6275,7 @@ radv_wait_timelines(struct radv_device *device, for (;;) { for(uint32_t i = 0; i < pWaitInfo->semaphoreCount; ++i) { RADV_FROM_HANDLE(radv_semaphore, semaphore, pWaitInfo->pSemaphores[i]); - pthread_mutex_lock(&semaphore->permanent.timeline.mutex); - VkResult result = radv_timeline_wait_locked(device, &semaphore->permanent.timeline, pWaitInfo->pValues[i], 0); - pthread_mutex_unlock(&semaphore->permanent.timeline.mutex); + VkResult result = radv_timeline_wait(device, &semaphore->permanent.timeline, pWaitInfo->pValues[i], 0); if (result == VK_SUCCESS) return VK_SUCCESS; @@ -6299,9 +6287,7 @@ radv_wait_timelines(struct radv_device *device, for(uint32_t i = 0; i < pWaitInfo->semaphoreCount; ++i) { RADV_FROM_HANDLE(radv_semaphore, semaphore, pWaitInfo->pSemaphores[i]); - pthread_mutex_lock(&semaphore->permanent.timeline.mutex); - VkResult result = radv_timeline_wait_locked(device, &semaphore->permanent.timeline, pWaitInfo->pValues[i], abs_timeout); - pthread_mutex_unlock(&semaphore->permanent.timeline.mutex); + VkResult result = radv_timeline_wait(device, &semaphore->permanent.timeline, pWaitInfo->pValues[i], abs_timeout); if (result != VK_SUCCESS) return result; @@ -6314,8 +6300,33 @@ radv_WaitSemaphores(VkDevice _device, uint64_t timeout) { RADV_FROM_HANDLE(radv_device, device, _device); + + if (radv_device_is_lost(device)) + return VK_ERROR_DEVICE_LOST; + uint64_t abs_timeout = radv_get_absolute_timeout(timeout); - return radv_wait_timelines(device, pWaitInfo, abs_timeout); + + if (radv_semaphore_from_handle(pWaitInfo->pSemaphores[0])->permanent.kind == RADV_SEMAPHORE_TIMELINE) + return radv_wait_timelines(device, pWaitInfo, abs_timeout); + + if (pWaitInfo->semaphoreCount > UINT32_MAX / sizeof(uint32_t)) + return vk_errorf(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY, "semaphoreCount integer overflow"); + + bool wait_all = !(pWaitInfo->flags & VK_SEMAPHORE_WAIT_ANY_BIT_KHR); + uint32_t *handles = malloc(sizeof(*handles) * pWaitInfo->semaphoreCount); + if (!handles) + return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); + + for (uint32_t i = 0; i < pWaitInfo->semaphoreCount; ++i) { + RADV_FROM_HANDLE(radv_semaphore, semaphore, pWaitInfo->pSemaphores[i]); + handles[i] = semaphore->permanent.syncobj; + } + + bool success = device->ws->wait_timeline_syncobj(device->ws, handles, pWaitInfo->pValues, + pWaitInfo->semaphoreCount, wait_all, false, + abs_timeout); + free(handles); + return success ? VK_SUCCESS : VK_TIMEOUT; } VkResult @@ -6340,7 +6351,21 @@ radv_SignalSemaphore(VkDevice _device, radv_timeline_trigger_waiters_locked(&part->timeline, &processing_list); pthread_mutex_unlock(&part->timeline.mutex); - return radv_process_submissions(&processing_list); + VkResult result = radv_process_submissions(&processing_list); + + /* This needs to happen after radv_process_submissions, so + * that any submitted submissions that are now unblocked get + * processed before we wake the application. This way we + * ensure that any binary semaphores that are now unblocked + * are usable by the application. */ + pthread_cond_broadcast(&device->timeline_cond); + + return result; + } + case RADV_SEMAPHORE_TIMELINE_SYNCOBJ: { + part->timeline_syncobj.max_point = MAX2(part->timeline_syncobj.max_point, pSignalInfo->value); + device->ws->signal_syncobj(device->ws, part->syncobj, pSignalInfo->value); + break; } case RADV_SEMAPHORE_NONE: case RADV_SEMAPHORE_SYNCOBJ: @@ -6350,7 +6375,16 @@ radv_SignalSemaphore(VkDevice _device, return VK_SUCCESS; } +static void radv_destroy_event(struct radv_device *device, + const VkAllocationCallbacks* pAllocator, + struct radv_event *event) +{ + if (event->bo) + device->ws->buffer_destroy(event->bo); + vk_object_base_finish(&event->base); + vk_free2(&device->vk.alloc, pAllocator, event); +} VkResult radv_CreateEvent( VkDevice _device, @@ -6373,11 +6407,15 @@ VkResult radv_CreateEvent( RADEON_FLAG_VA_UNCACHED | RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING, RADV_BO_PRIORITY_FENCE); if (!event->bo) { - vk_free2(&device->vk.alloc, pAllocator, event); + radv_destroy_event(device, pAllocator, event); return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY); } event->map = (uint64_t*)device->ws->buffer_map(event->bo); + if (!event->map) { + radv_destroy_event(device, pAllocator, event); + return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY); + } *pEvent = radv_event_to_handle(event); @@ -6394,17 +6432,20 @@ void radv_DestroyEvent( if (!event) return; - device->ws->buffer_destroy(event->bo); - vk_object_base_finish(&event->base); - vk_free2(&device->vk.alloc, pAllocator, event); + + radv_destroy_event(device, pAllocator, event); } VkResult radv_GetEventStatus( VkDevice _device, VkEvent _event) { + RADV_FROM_HANDLE(radv_device, device, _device); RADV_FROM_HANDLE(radv_event, event, _event); + if (radv_device_is_lost(device)) + return VK_ERROR_DEVICE_LOST; + if (*event->map == 1) return VK_EVENT_SET; return VK_EVENT_RESET; @@ -6430,6 +6471,18 @@ VkResult radv_ResetEvent( return VK_SUCCESS; } +static void +radv_destroy_buffer(struct radv_device *device, + const VkAllocationCallbacks *pAllocator, + struct radv_buffer *buffer) +{ + if ((buffer->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) && buffer->bo) + device->ws->buffer_destroy(buffer->bo); + + vk_object_base_finish(&buffer->base); + vk_free2(&device->vk.alloc, pAllocator, buffer); +} + VkResult radv_CreateBuffer( VkDevice _device, const VkBufferCreateInfo* pCreateInfo, @@ -6466,7 +6519,7 @@ VkResult radv_CreateBuffer( 4096, 0, RADEON_FLAG_VIRTUAL, RADV_BO_PRIORITY_VIRTUAL); if (!buffer->bo) { - vk_free2(&device->vk.alloc, pAllocator, buffer); + radv_destroy_buffer(device, pAllocator, buffer); return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY); } } @@ -6487,11 +6540,7 @@ void radv_DestroyBuffer( if (!buffer) return; - if (buffer->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) - device->ws->buffer_destroy(buffer->bo); - - vk_object_base_finish(&buffer->base); - vk_free2(&device->vk.alloc, pAllocator, buffer); + radv_destroy_buffer(device, pAllocator, buffer); } VkDeviceAddress radv_GetBufferDeviceAddress( @@ -6625,7 +6674,7 @@ radv_initialise_color_surface(struct radv_device *device, .pipe_aligned = 1, }; - if (iview->image->dcc_offset) + if (surf->dcc_offset) meta = surf->u.gfx9.dcc; cb->cb_color_attrib |= S_028C74_COLOR_SW_MODE(surf->u.gfx9.surf.swizzle_mode) | @@ -6671,11 +6720,11 @@ radv_initialise_color_surface(struct radv_device *device, /* CMASK variables */ va = radv_buffer_get_va(iview->bo) + iview->image->offset; - va += iview->image->cmask_offset; + va += surf->cmask_offset; cb->cb_color_cmask = va >> 8; va = radv_buffer_get_va(iview->bo) + iview->image->offset; - va += iview->image->dcc_offset; + va += surf->dcc_offset; if (radv_dcc_enabled(iview->image, iview->base_mip) && device->physical_device->rad_info.chip_class <= GFX8) @@ -6700,7 +6749,7 @@ radv_initialise_color_surface(struct radv_device *device, } if (radv_image_has_fmask(iview->image)) { - va = radv_buffer_get_va(iview->bo) + iview->image->offset + iview->image->fmask_offset; + va = radv_buffer_get_va(iview->bo) + iview->image->offset + surf->fmask_offset; cb->cb_color_fmask = va >> 8; cb->cb_color_fmask |= surf->fmask_tile_swizzle; } else { @@ -6952,7 +7001,7 @@ radv_initialise_ds_surface(struct radv_device *device, /* Use all of the htile_buffer for depth if there's no stencil. */ ds->db_stencil_info |= S_02803C_TILE_STENCIL_DISABLE(1); va = radv_buffer_get_va(iview->bo) + iview->image->offset + - iview->image->htile_offset; + surf->htile_offset; ds->db_htile_data_base = va >> 8; ds->db_htile_surface = S_028ABC_FULL_CACHE(1) | S_028ABC_PIPE_ALIGNED(1); @@ -7020,7 +7069,7 @@ radv_initialise_ds_surface(struct radv_device *device, ds->db_stencil_info |= S_028044_TILE_STENCIL_DISABLE(1); va = radv_buffer_get_va(iview->bo) + iview->image->offset + - iview->image->htile_offset; + surf->htile_offset; ds->db_htile_data_base = va >> 8; ds->db_htile_surface = S_028ABC_FULL_CACHE(1); @@ -7578,23 +7627,26 @@ static VkResult radv_import_sync_fd(struct radv_device *device, * leave a syncobj in an undetermined state in the fence. */ uint32_t syncobj_handle = *syncobj; if (!syncobj_handle) { - int ret = device->ws->create_syncobj(device->ws, &syncobj_handle); + bool create_signaled = fd == -1 ? true : false; + + int ret = device->ws->create_syncobj(device->ws, create_signaled, + &syncobj_handle); if (ret) { - return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE); + return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); } + } else { + if (fd == -1) + device->ws->signal_syncobj(device->ws, syncobj_handle, 0); } - if (fd == -1) { - device->ws->signal_syncobj(device->ws, syncobj_handle); - } else { + if (fd != -1) { int ret = device->ws->import_syncobj_from_sync_file(device->ws, syncobj_handle, fd); - if (ret != 0) - return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE); + if (ret) + return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE); + close(fd); } *syncobj = syncobj_handle; - if (fd != -1) - close(fd); return VK_SUCCESS; } @@ -7606,20 +7658,24 @@ VkResult radv_ImportSemaphoreFdKHR(VkDevice _device, RADV_FROM_HANDLE(radv_semaphore, sem, pImportSemaphoreFdInfo->semaphore); VkResult result; struct radv_semaphore_part *dst = NULL; + bool timeline = sem->permanent.kind == RADV_SEMAPHORE_TIMELINE_SYNCOBJ; if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) { + assert(!timeline); dst = &sem->temporary; } else { dst = &sem->permanent; } - uint32_t syncobj = dst->kind == RADV_SEMAPHORE_SYNCOBJ ? dst->syncobj : 0; + uint32_t syncobj = (dst->kind == RADV_SEMAPHORE_SYNCOBJ || + dst->kind == RADV_SEMAPHORE_TIMELINE_SYNCOBJ) ? dst->syncobj : 0; switch(pImportSemaphoreFdInfo->handleType) { case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT: result = radv_import_opaque_fd(device, pImportSemaphoreFdInfo->fd, &syncobj); break; case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT: + assert(!timeline); result = radv_import_sync_fd(device, pImportSemaphoreFdInfo->fd, &syncobj); break; default: @@ -7629,6 +7685,10 @@ VkResult radv_ImportSemaphoreFdKHR(VkDevice _device, if (result == VK_SUCCESS) { dst->syncobj = syncobj; dst->kind = RADV_SEMAPHORE_SYNCOBJ; + if (timeline) { + dst->kind = RADV_SEMAPHORE_TIMELINE_SYNCOBJ; + dst->timeline_syncobj.max_point = 0; + } } return result; @@ -7644,33 +7704,36 @@ VkResult radv_GetSemaphoreFdKHR(VkDevice _device, uint32_t syncobj_handle; if (sem->temporary.kind != RADV_SEMAPHORE_NONE) { - assert(sem->temporary.kind == RADV_SEMAPHORE_SYNCOBJ); + assert(sem->temporary.kind == RADV_SEMAPHORE_SYNCOBJ || + sem->temporary.kind == RADV_SEMAPHORE_TIMELINE_SYNCOBJ); syncobj_handle = sem->temporary.syncobj; } else { - assert(sem->permanent.kind == RADV_SEMAPHORE_SYNCOBJ); + assert(sem->permanent.kind == RADV_SEMAPHORE_SYNCOBJ || + sem->permanent.kind == RADV_SEMAPHORE_TIMELINE_SYNCOBJ); syncobj_handle = sem->permanent.syncobj; } switch(pGetFdInfo->handleType) { case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT: ret = device->ws->export_syncobj(device->ws, syncobj_handle, pFd); + if (ret) + return vk_error(device->instance, VK_ERROR_TOO_MANY_OBJECTS); break; case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT: ret = device->ws->export_syncobj_to_sync_file(device->ws, syncobj_handle, pFd); - if (!ret) { - if (sem->temporary.kind != RADV_SEMAPHORE_NONE) { - radv_destroy_semaphore_part(device, &sem->temporary); - } else { - device->ws->reset_syncobj(device->ws, syncobj_handle); - } + if (ret) + return vk_error(device->instance, VK_ERROR_TOO_MANY_OBJECTS); + + if (sem->temporary.kind != RADV_SEMAPHORE_NONE) { + radv_destroy_semaphore_part(device, &sem->temporary); + } else { + device->ws->reset_syncobj(device->ws, syncobj_handle); } break; default: unreachable("Unhandled semaphore handle type"); } - if (ret) - return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE); return VK_SUCCESS; } @@ -7682,7 +7745,13 @@ void radv_GetPhysicalDeviceExternalSemaphoreProperties( RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice); VkSemaphoreTypeKHR type = radv_get_semaphore_type(pExternalSemaphoreInfo->pNext, NULL); - if (type == VK_SEMAPHORE_TYPE_TIMELINE) { + if (type == VK_SEMAPHORE_TYPE_TIMELINE && pdevice->rad_info.has_timeline_syncobj && + pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT) { + pExternalSemaphoreProperties->exportFromImportedHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT; + pExternalSemaphoreProperties->compatibleHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT; + pExternalSemaphoreProperties->externalSemaphoreFeatures = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT | + VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT; + } else if (type == VK_SEMAPHORE_TYPE_TIMELINE) { pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0; pExternalSemaphoreProperties->compatibleHandleTypes = 0; pExternalSemaphoreProperties->externalSemaphoreFeatures = 0; @@ -7712,23 +7781,34 @@ VkResult radv_ImportFenceFdKHR(VkDevice _device, { RADV_FROM_HANDLE(radv_device, device, _device); RADV_FROM_HANDLE(radv_fence, fence, pImportFenceFdInfo->fence); - uint32_t *syncobj_dst = NULL; - + struct radv_fence_part *dst = NULL; + VkResult result; if (pImportFenceFdInfo->flags & VK_FENCE_IMPORT_TEMPORARY_BIT) { - syncobj_dst = &fence->temp_syncobj; + dst = &fence->temporary; } else { - syncobj_dst = &fence->syncobj; + dst = &fence->permanent; } + uint32_t syncobj = dst->kind == RADV_FENCE_SYNCOBJ ? dst->syncobj : 0; + switch(pImportFenceFdInfo->handleType) { case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT: - return radv_import_opaque_fd(device, pImportFenceFdInfo->fd, syncobj_dst); + result = radv_import_opaque_fd(device, pImportFenceFdInfo->fd, &syncobj); + break; case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT: - return radv_import_sync_fd(device, pImportFenceFdInfo->fd, syncobj_dst); + result = radv_import_sync_fd(device, pImportFenceFdInfo->fd, &syncobj); + break; default: unreachable("Unhandled fence handle type"); } + + if (result == VK_SUCCESS) { + dst->syncobj = syncobj; + dst->kind = RADV_FENCE_SYNCOBJ; + } + + return result; } VkResult radv_GetFenceFdKHR(VkDevice _device, @@ -7738,34 +7818,33 @@ VkResult radv_GetFenceFdKHR(VkDevice _device, RADV_FROM_HANDLE(radv_device, device, _device); RADV_FROM_HANDLE(radv_fence, fence, pGetFdInfo->fence); int ret; - uint32_t syncobj_handle; - if (fence->temp_syncobj) - syncobj_handle = fence->temp_syncobj; - else - syncobj_handle = fence->syncobj; + struct radv_fence_part *part = + fence->temporary.kind != RADV_FENCE_NONE ? + &fence->temporary : &fence->permanent; switch(pGetFdInfo->handleType) { case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT: - ret = device->ws->export_syncobj(device->ws, syncobj_handle, pFd); + ret = device->ws->export_syncobj(device->ws, part->syncobj, pFd); + if (ret) + return vk_error(device->instance, VK_ERROR_TOO_MANY_OBJECTS); break; case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT: - ret = device->ws->export_syncobj_to_sync_file(device->ws, syncobj_handle, pFd); - if (!ret) { - if (fence->temp_syncobj) { - close (fence->temp_syncobj); - fence->temp_syncobj = 0; - } else { - device->ws->reset_syncobj(device->ws, syncobj_handle); - } + ret = device->ws->export_syncobj_to_sync_file(device->ws, + part->syncobj, pFd); + if (ret) + return vk_error(device->instance, VK_ERROR_TOO_MANY_OBJECTS); + + if (part == &fence->temporary) { + radv_destroy_fence_part(device, part); + } else { + device->ws->reset_syncobj(device->ws, part->syncobj); } break; default: unreachable("Unhandled fence handle type"); } - if (ret) - return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE); return VK_SUCCESS; }