X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Famd%2Fvulkan%2Fradv_device.c;h=fc102e23cedcadde9255af3b01a06beb6181071d;hb=1ef03dade12b4d5056c3fe5637f9dfd98a42aae6;hp=50c6f14bbcbff0bb32ad1e76cd5abf374e53fd6f;hpb=cfa299eadb21893348c60906dfde8feb175c7f14;p=mesa.git diff --git a/src/amd/vulkan/radv_device.c b/src/amd/vulkan/radv_device.c index 50c6f14bbcb..fc102e23ced 100644 --- a/src/amd/vulkan/radv_device.c +++ b/src/amd/vulkan/radv_device.c @@ -41,7 +41,6 @@ #include #include #include -#include #include "radv_debug.h" #include "radv_private.h" @@ -51,7 +50,7 @@ #include "vk_util.h" #include #include -#include +#include "drm-uapi/amdgpu_drm.h" #include "winsys/amdgpu/radv_amdgpu_winsys_public.h" #include "winsys/null/radv_null_winsys_public.h" #include "ac_llvm_util.h" @@ -130,46 +129,9 @@ radv_get_vram_size(struct radv_physical_device *device) return device->rad_info.vram_size - radv_get_visible_vram_size(device); } -static bool -radv_is_mem_type_vram(enum radv_mem_type type) -{ - return type == RADV_MEM_TYPE_VRAM || - type == RADV_MEM_TYPE_VRAM_UNCACHED; -} - -static bool -radv_is_mem_type_vram_visible(enum radv_mem_type type) -{ - return type == RADV_MEM_TYPE_VRAM_CPU_ACCESS || - type == RADV_MEM_TYPE_VRAM_CPU_ACCESS_UNCACHED; -} -static bool -radv_is_mem_type_gtt_wc(enum radv_mem_type type) -{ - return type == RADV_MEM_TYPE_GTT_WRITE_COMBINE || - type == RADV_MEM_TYPE_GTT_WRITE_COMBINE_VRAM_UNCACHED; -} - -static bool -radv_is_mem_type_gtt_cached(enum radv_mem_type type) -{ - return type == RADV_MEM_TYPE_GTT_CACHED || - type == RADV_MEM_TYPE_GTT_CACHED_VRAM_UNCACHED; -} - -static bool -radv_is_mem_type_uncached(enum radv_mem_type type) -{ - return type == RADV_MEM_TYPE_VRAM_UNCACHED || - type == RADV_MEM_TYPE_VRAM_CPU_ACCESS_UNCACHED || - type == RADV_MEM_TYPE_GTT_WRITE_COMBINE_VRAM_UNCACHED || - type == RADV_MEM_TYPE_GTT_CACHED_VRAM_UNCACHED; -} - static void radv_physical_device_init_mem_types(struct radv_physical_device *device) { - STATIC_ASSERT(RADV_MEM_HEAP_COUNT <= VK_MAX_MEMORY_HEAPS); uint64_t visible_vram_size = radv_get_visible_vram_size(device); uint64_t vram_size = radv_get_vram_size(device); int vram_index = -1, visible_vram_index = -1, gart_index = -1; @@ -181,6 +143,15 @@ radv_physical_device_init_mem_types(struct radv_physical_device *device) .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT, }; } + + if (device->rad_info.gart_size > 0) { + gart_index = device->memory_properties.memoryHeapCount++; + device->memory_properties.memoryHeaps[gart_index] = (VkMemoryHeap) { + .size = device->rad_info.gart_size, + .flags = 0, + }; + } + if (visible_vram_size) { visible_vram_index = device->memory_properties.memoryHeapCount++; device->memory_properties.memoryHeaps[visible_vram_index] = (VkMemoryHeap) { @@ -188,25 +159,32 @@ radv_physical_device_init_mem_types(struct radv_physical_device *device) .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT, }; } - if (device->rad_info.gart_size > 0) { - gart_index = device->memory_properties.memoryHeapCount++; - device->memory_properties.memoryHeaps[gart_index] = (VkMemoryHeap) { - .size = device->rad_info.gart_size, - .flags = device->rad_info.has_dedicated_vram ? 0 : VK_MEMORY_HEAP_DEVICE_LOCAL_BIT, - }; - } - STATIC_ASSERT(RADV_MEM_TYPE_COUNT <= VK_MAX_MEMORY_TYPES); unsigned type_count = 0; - if (vram_index >= 0) { - device->mem_type_indices[type_count] = RADV_MEM_TYPE_VRAM; - device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) { - .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, - .heapIndex = vram_index, - }; + + if (device->rad_info.has_dedicated_vram) { + if (vram_index >= 0) { + device->memory_domains[type_count] = RADEON_DOMAIN_VRAM; + device->memory_flags[type_count] = RADEON_FLAG_NO_CPU_ACCESS; + device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) { + .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, + .heapIndex = vram_index, + }; + } + } else { + if (visible_vram_index >= 0) { + device->memory_domains[type_count] = RADEON_DOMAIN_VRAM; + device->memory_flags[type_count] = RADEON_FLAG_NO_CPU_ACCESS; + device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) { + .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, + .heapIndex = visible_vram_index, + }; + } } - if (gart_index >= 0 && device->rad_info.has_dedicated_vram) { - device->mem_type_indices[type_count] = RADV_MEM_TYPE_GTT_WRITE_COMBINE; + + if (gart_index >= 0) { + device->memory_domains[type_count] = RADEON_DOMAIN_GTT; + device->memory_flags[type_count] = RADEON_FLAG_GTT_WC | RADEON_FLAG_CPU_ACCESS; device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) { .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, @@ -214,7 +192,8 @@ radv_physical_device_init_mem_types(struct radv_physical_device *device) }; } if (visible_vram_index >= 0) { - device->mem_type_indices[type_count] = RADV_MEM_TYPE_VRAM_CPU_ACCESS; + device->memory_domains[type_count] = RADEON_DOMAIN_VRAM; + device->memory_flags[type_count] = RADEON_FLAG_CPU_ACCESS; device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) { .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | @@ -222,26 +201,14 @@ radv_physical_device_init_mem_types(struct radv_physical_device *device) .heapIndex = visible_vram_index, }; } - if (gart_index >= 0 && !device->rad_info.has_dedicated_vram) { - /* Put GTT after visible VRAM for GPUs without dedicated VRAM - * as they have identical property flags, and according to the - * spec, for types with identical flags, the one with greater - * performance must be given a lower index. */ - device->mem_type_indices[type_count] = RADV_MEM_TYPE_GTT_WRITE_COMBINE; - device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) { - .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | - VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | - VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, - .heapIndex = gart_index, - }; - } + if (gart_index >= 0) { - device->mem_type_indices[type_count] = RADV_MEM_TYPE_GTT_CACHED; + device->memory_domains[type_count] = RADEON_DOMAIN_GTT; + device->memory_flags[type_count] = RADEON_FLAG_CPU_ACCESS; device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) { .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | - VK_MEMORY_PROPERTY_HOST_CACHED_BIT | - (device->rad_info.has_dedicated_vram ? 0 : VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT), + VK_MEMORY_PROPERTY_HOST_CACHED_BIT, .heapIndex = gart_index, }; } @@ -254,30 +221,13 @@ radv_physical_device_init_mem_types(struct radv_physical_device *device) if ((mem_type.propertyFlags & (VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)) || mem_type.propertyFlags == VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) { - enum radv_mem_type mem_type_id; - - switch (device->mem_type_indices[i]) { - case RADV_MEM_TYPE_VRAM: - mem_type_id = RADV_MEM_TYPE_VRAM_UNCACHED; - break; - case RADV_MEM_TYPE_VRAM_CPU_ACCESS: - mem_type_id = RADV_MEM_TYPE_VRAM_CPU_ACCESS_UNCACHED; - break; - case RADV_MEM_TYPE_GTT_WRITE_COMBINE: - mem_type_id = RADV_MEM_TYPE_GTT_WRITE_COMBINE_VRAM_UNCACHED; - break; - case RADV_MEM_TYPE_GTT_CACHED: - mem_type_id = RADV_MEM_TYPE_GTT_CACHED_VRAM_UNCACHED; - break; - default: - unreachable("invalid memory type"); - } VkMemoryPropertyFlags property_flags = mem_type.propertyFlags | VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD; - device->mem_type_indices[type_count] = mem_type_id; + device->memory_domains[type_count] = device->memory_domains[i]; + device->memory_flags[type_count] = device->memory_flags[i] | RADEON_FLAG_VA_UNCACHED; device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) { .propertyFlags = property_flags, .heapIndex = mem_type.heapIndex, @@ -288,10 +238,29 @@ radv_physical_device_init_mem_types(struct radv_physical_device *device) } } +static const char * +radv_get_compiler_string(struct radv_physical_device *pdevice) +{ + if (pdevice->use_aco) { + /* Some games like SotTR apply shader workarounds if the LLVM + * version is too old or if the LLVM version string is + * missing. This gives 2-5% performance with SotTR and ACO. + */ + if (driQueryOptionb(&pdevice->instance->dri_options, + "radv_report_llvm9_version_string")) { + return "ACO/LLVM 9.0.1"; + } + + return "ACO"; + } + + return "LLVM " MESA_LLVM_VERSION_STRING; +} + static VkResult -radv_physical_device_init(struct radv_physical_device *device, - struct radv_instance *instance, - drmDevicePtr drm_device) +radv_physical_device_try_create(struct radv_instance *instance, + drmDevicePtr drm_device, + struct radv_physical_device **device_out) { VkResult result; int fd = -1; @@ -335,6 +304,14 @@ radv_physical_device_init(struct radv_physical_device *device, radv_logi("Found compatible device '%s'.", path); } + struct radv_physical_device *device = + vk_zalloc2(&instance->alloc, NULL, sizeof(*device), 8, + VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (!device) { + result = vk_error(instance, VK_ERROR_OUT_OF_HOST_MEMORY); + goto fail_fd; + } + device->_loader_data.loaderMagic = ICD_LOADER_MAGIC; device->instance = instance; @@ -346,8 +323,9 @@ radv_physical_device_init(struct radv_physical_device *device, } if (!device->ws) { - result = vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER); - goto fail; + result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED, + "failed to initialize winsys"); + goto fail_alloc; } if (drm_device && instance->enabled_extensions.KHR_display) { @@ -374,14 +352,13 @@ radv_physical_device_init(struct radv_physical_device *device, device->use_aco = instance->perftest_flags & RADV_PERFTEST_ACO; snprintf(device->name, sizeof(device->name), - "AMD RADV%s %s (LLVM " MESA_LLVM_VERSION_STRING ")", device->use_aco ? "/ACO" : "", - device->rad_info.name); + "AMD RADV %s (%s)", + device->rad_info.name, radv_get_compiler_string(device)); if (radv_device_get_cache_uuid(device->rad_info.family, device->cache_uuid)) { - device->ws->destroy(device->ws); result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED, "cannot generate UUID"); - goto fail; + goto fail_wsi; } /* These flags affect shader compilation. */ @@ -412,11 +389,9 @@ radv_physical_device_init(struct radv_physical_device *device, device->use_ngg = device->rad_info.chip_class >= GFX10 && device->rad_info.family != CHIP_NAVI14 && !(device->instance->debug_flags & RADV_DEBUG_NO_NGG); - if (device->use_aco && device->use_ngg) { - fprintf(stderr, "WARNING: disabling NGG because ACO is used.\n"); - device->use_ngg = false; - } + /* TODO: Implement NGG GS with ACO. */ + device->use_ngg_gs = device->use_ngg && !device->use_aco; device->use_ngg_streamout = false; /* Determine the number of threads per wave for all stages. */ @@ -437,7 +412,9 @@ radv_physical_device_init(struct radv_physical_device *device, } radv_physical_device_init_mem_types(device); - radv_fill_device_extension_table(device, &device->supported_extensions); + + radv_physical_device_get_supported_extensions(device, + &device->supported_extensions); if (drm_device) device->bus_info = *drm_device->businfo.pci; @@ -451,14 +428,21 @@ radv_physical_device_init(struct radv_physical_device *device, */ result = radv_init_wsi(device); if (result != VK_SUCCESS) { - device->ws->destroy(device->ws); vk_error(instance, result); - goto fail; + goto fail_disk_cache; } + *device_out = device; + return VK_SUCCESS; -fail: +fail_disk_cache: + disk_cache_destroy(device->disk_cache); +fail_wsi: + device->ws->destroy(device->ws); +fail_alloc: + vk_free(&instance->alloc, device); +fail_fd: close(fd); if (master_fd != -1) close(master_fd); @@ -466,7 +450,7 @@ fail: } static void -radv_physical_device_finish(struct radv_physical_device *device) +radv_physical_device_destroy(struct radv_physical_device *device) { radv_finish_wsi(device); device->ws->destroy(device->ws); @@ -474,6 +458,7 @@ radv_physical_device_finish(struct radv_physical_device *device) close(device->local_fd); if (device->master_fd != -1) close(device->master_fd); + vk_free(&device->instance->alloc, device); } static void * @@ -605,21 +590,13 @@ radv_handle_per_app_options(struct radv_instance *instance, } } -static int radv_get_instance_extension_index(const char *name) -{ - for (unsigned i = 0; i < RADV_INSTANCE_EXTENSION_COUNT; ++i) { - if (strcmp(name, radv_instance_extensions[i].extensionName) == 0) - return i; - } - return -1; -} - static const char radv_dri_options_xml[] = DRI_CONF_BEGIN DRI_CONF_SECTION_PERFORMANCE DRI_CONF_ADAPTIVE_SYNC("true") DRI_CONF_VK_X11_OVERRIDE_MIN_IMAGE_COUNT(0) DRI_CONF_VK_X11_STRICT_IMAGE_COUNT("false") + DRI_CONF_RADV_REPORT_LLVM9_VERSION_STRING("false") DRI_CONF_SECTION_END DRI_CONF_SECTION_DEBUG @@ -645,37 +622,30 @@ VkResult radv_CreateInstance( struct radv_instance *instance; VkResult result; - assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO); - - uint32_t client_version; - if (pCreateInfo->pApplicationInfo && - pCreateInfo->pApplicationInfo->apiVersion != 0) { - client_version = pCreateInfo->pApplicationInfo->apiVersion; - } else { - client_version = VK_API_VERSION_1_0; - } - - const char *engine_name = NULL; - uint32_t engine_version = 0; - if (pCreateInfo->pApplicationInfo) { - engine_name = pCreateInfo->pApplicationInfo->pEngineName; - engine_version = pCreateInfo->pApplicationInfo->engineVersion; - } - instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); if (!instance) return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY); - instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC; + vk_object_base_init(NULL, &instance->base, VK_OBJECT_TYPE_INSTANCE); if (pAllocator) instance->alloc = *pAllocator; else instance->alloc = default_alloc; - instance->apiVersion = client_version; - instance->physicalDeviceCount = -1; + if (pCreateInfo->pApplicationInfo) { + const VkApplicationInfo *app = pCreateInfo->pApplicationInfo; + + instance->engineName = + vk_strdup(&instance->alloc, app->pEngineName, + VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + instance->engineVersion = app->engineVersion; + instance->apiVersion = app->apiVersion; + } + + if (instance->apiVersion == 0) + instance->apiVersion = VK_API_VERSION_1_0; /* Get secure compile thread count. NOTE: We cap this at 32 */ #define MAX_SC_PROCS 32 @@ -700,15 +670,20 @@ VkResult radv_CreateInstance( radv_logi("Created an instance"); for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { - const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i]; - int index = radv_get_instance_extension_index(ext_name); + int idx; + for (idx = 0; idx < RADV_INSTANCE_EXTENSION_COUNT; idx++) { + if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], + radv_instance_extensions[idx].extensionName)) + break; + } - if (index < 0 || !radv_supported_instance_extensions.extensions[index]) { + if (idx >= RADV_INSTANCE_EXTENSION_COUNT || + !radv_instance_extensions_supported.extensions[idx]) { vk_free2(&default_alloc, pAllocator, instance); return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT); } - instance->enabled_extensions.extensions[index] = true; + instance->enabled_extensions.extensions[idx] = true; } bool unchecked = instance->debug_flags & RADV_DEBUG_ALL_ENTRYPOINTS; @@ -755,16 +730,15 @@ VkResult radv_CreateInstance( } } + instance->physical_devices_enumerated = false; + list_inithead(&instance->physical_devices); + result = vk_debug_report_instance_init(&instance->debug_report_callbacks); if (result != VK_SUCCESS) { vk_free2(&default_alloc, pAllocator, instance); return vk_error(instance, result); } - instance->engineName = vk_strdup(&instance->alloc, engine_name, - VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); - instance->engineVersion = engine_version; - glsl_type_singleton_init_or_ref(); VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false)); @@ -786,8 +760,9 @@ void radv_DestroyInstance( if (!instance) return; - for (int i = 0; i < instance->physicalDeviceCount; ++i) { - radv_physical_device_finish(instance->physicalDevices + i); + list_for_each_entry_safe(struct radv_physical_device, pdevice, + &instance->physical_devices, link) { + radv_physical_device_destroy(pdevice); } vk_free(&instance->alloc, instance->engineName); @@ -801,29 +776,35 @@ void radv_DestroyInstance( vk_debug_report_instance_destroy(&instance->debug_report_callbacks); + vk_object_base_finish(&instance->base); vk_free(&instance->alloc, instance); } static VkResult -radv_enumerate_devices(struct radv_instance *instance) +radv_enumerate_physical_devices(struct radv_instance *instance) { + if (instance->physical_devices_enumerated) + return VK_SUCCESS; + + instance->physical_devices_enumerated = true; + /* TODO: Check for more devices ? */ drmDevicePtr devices[8]; - VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER; + VkResult result = VK_SUCCESS; int max_devices; - instance->physicalDeviceCount = 0; - if (getenv("RADV_FORCE_FAMILY")) { /* When RADV_FORCE_FAMILY is set, the driver creates a nul * device that allows to test the compiler without having an * AMDGPU instance. */ - result = radv_physical_device_init(instance->physicalDevices + - instance->physicalDeviceCount, - instance, NULL); + struct radv_physical_device *pdevice; + + result = radv_physical_device_try_create(instance, NULL, &pdevice); + if (result != VK_SUCCESS) + return result; - ++instance->physicalDeviceCount; + list_addtail(&pdevice->link, &instance->physical_devices); return VK_SUCCESS; } @@ -833,25 +814,32 @@ radv_enumerate_devices(struct radv_instance *instance) radv_logi("Found %d drm nodes", max_devices); if (max_devices < 1) - return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER); + return vk_error(instance, VK_SUCCESS); for (unsigned i = 0; i < (unsigned)max_devices; i++) { if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER && devices[i]->bustype == DRM_BUS_PCI && devices[i]->deviceinfo.pci->vendor_id == ATI_VENDOR_ID) { - result = radv_physical_device_init(instance->physicalDevices + - instance->physicalDeviceCount, - instance, - devices[i]); - if (result == VK_SUCCESS) - ++instance->physicalDeviceCount; - else if (result != VK_ERROR_INCOMPATIBLE_DRIVER) + struct radv_physical_device *pdevice; + result = radv_physical_device_try_create(instance, devices[i], + &pdevice); + /* Incompatible DRM device, skip. */ + if (result == VK_ERROR_INCOMPATIBLE_DRIVER) { + result = VK_SUCCESS; + continue; + } + + /* Error creating the physical device, report the error. */ + if (result != VK_SUCCESS) break; + + list_addtail(&pdevice->link, &instance->physical_devices); } } drmFreeDevices(devices, max_devices); + /* If we successfully enumerated any devices, call it success */ return result; } @@ -861,25 +849,20 @@ VkResult radv_EnumeratePhysicalDevices( VkPhysicalDevice* pPhysicalDevices) { RADV_FROM_HANDLE(radv_instance, instance, _instance); - VkResult result; + VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount); - if (instance->physicalDeviceCount < 0) { - result = radv_enumerate_devices(instance); - if (result != VK_SUCCESS && - result != VK_ERROR_INCOMPATIBLE_DRIVER) - return result; - } + VkResult result = radv_enumerate_physical_devices(instance); + if (result != VK_SUCCESS) + return result; - if (!pPhysicalDevices) { - *pPhysicalDeviceCount = instance->physicalDeviceCount; - } else { - *pPhysicalDeviceCount = MIN2(*pPhysicalDeviceCount, instance->physicalDeviceCount); - for (unsigned i = 0; i < *pPhysicalDeviceCount; ++i) - pPhysicalDevices[i] = radv_physical_device_to_handle(instance->physicalDevices + i); + list_for_each_entry(struct radv_physical_device, pdevice, + &instance->physical_devices, link) { + vk_outarray_append(&out, i) { + *i = radv_physical_device_to_handle(pdevice); + } } - return *pPhysicalDeviceCount < instance->physicalDeviceCount ? VK_INCOMPLETE - : VK_SUCCESS; + return vk_outarray_status(&out); } VkResult radv_EnumeratePhysicalDeviceGroups( @@ -888,27 +871,24 @@ VkResult radv_EnumeratePhysicalDeviceGroups( VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties) { RADV_FROM_HANDLE(radv_instance, instance, _instance); - VkResult result; + VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties, + pPhysicalDeviceGroupCount); - if (instance->physicalDeviceCount < 0) { - result = radv_enumerate_devices(instance); - if (result != VK_SUCCESS && - result != VK_ERROR_INCOMPATIBLE_DRIVER) - return result; - } + VkResult result = radv_enumerate_physical_devices(instance); + if (result != VK_SUCCESS) + return result; - if (!pPhysicalDeviceGroupProperties) { - *pPhysicalDeviceGroupCount = instance->physicalDeviceCount; - } else { - *pPhysicalDeviceGroupCount = MIN2(*pPhysicalDeviceGroupCount, instance->physicalDeviceCount); - for (unsigned i = 0; i < *pPhysicalDeviceGroupCount; ++i) { - pPhysicalDeviceGroupProperties[i].physicalDeviceCount = 1; - pPhysicalDeviceGroupProperties[i].physicalDevices[0] = radv_physical_device_to_handle(instance->physicalDevices + i); - pPhysicalDeviceGroupProperties[i].subsetAllocation = false; + list_for_each_entry(struct radv_physical_device, pdevice, + &instance->physical_devices, link) { + vk_outarray_append(&out, p) { + p->physicalDeviceCount = 1; + memset(p->physicalDevices, 0, sizeof(p->physicalDevices)); + p->physicalDevices[0] = radv_physical_device_to_handle(pdevice); + p->subsetAllocation = false; } } - return *pPhysicalDeviceGroupCount < instance->physicalDeviceCount ? VK_INCOMPLETE - : VK_SUCCESS; + + return vk_outarray_status(&out); } void radv_GetPhysicalDeviceFeatures( @@ -960,7 +940,7 @@ void radv_GetPhysicalDeviceFeatures( .shaderCullDistance = true, .shaderFloat64 = true, .shaderInt64 = true, - .shaderInt16 = pdevice->rad_info.chip_class >= GFX9 && !pdevice->use_aco, + .shaderInt16 = !pdevice->use_aco || pdevice->rad_info.chip_class >= GFX8, .sparseBinding = true, .variableMultisampleRate = true, .inheritedQueries = true, @@ -1002,11 +982,11 @@ void radv_GetPhysicalDeviceFeatures2( case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: { VkPhysicalDevice16BitStorageFeatures *features = (VkPhysicalDevice16BitStorageFeatures*)ext; - bool enabled = pdevice->rad_info.chip_class >= GFX8 && !pdevice->use_aco; - features->storageBuffer16BitAccess = enabled; - features->uniformAndStorageBuffer16BitAccess = enabled; - features->storagePushConstant16 = enabled; - features->storageInputOutput16 = enabled && LLVM_VERSION_MAJOR >= 9; + bool enable = !pdevice->use_aco || pdevice->rad_info.chip_class >= GFX8; + features->storageBuffer16BitAccess = enable; + features->uniformAndStorageBuffer16BitAccess = enable; + features->storagePushConstant16 = enable; + features->storageInputOutput16 = pdevice->rad_info.has_double_rate_fp16 && !pdevice->use_aco && LLVM_VERSION_MAJOR >= 9; break; } case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: { @@ -1104,17 +1084,17 @@ void radv_GetPhysicalDeviceFeatures2( case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES: { VkPhysicalDevice8BitStorageFeatures *features = (VkPhysicalDevice8BitStorageFeatures *)ext; - bool enabled = pdevice->rad_info.chip_class >= GFX8 && !pdevice->use_aco; - features->storageBuffer8BitAccess = enabled; - features->uniformAndStorageBuffer8BitAccess = enabled; - features->storagePushConstant8 = enabled; + bool enable = !pdevice->use_aco || pdevice->rad_info.chip_class >= GFX8; + features->storageBuffer8BitAccess = enable; + features->uniformAndStorageBuffer8BitAccess = enable; + features->storagePushConstant8 = enable; break; } case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES: { VkPhysicalDeviceShaderFloat16Int8Features *features = (VkPhysicalDeviceShaderFloat16Int8Features*)ext; - features->shaderFloat16 = pdevice->rad_info.chip_class >= GFX8 && !pdevice->use_aco; - features->shaderInt8 = !pdevice->use_aco; + features->shaderFloat16 = pdevice->rad_info.has_double_rate_fp16 && !pdevice->use_aco; + features->shaderInt8 = !pdevice->use_aco || pdevice->rad_info.chip_class >= GFX8; break; } case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES: { @@ -1210,7 +1190,7 @@ void radv_GetPhysicalDeviceFeatures2( case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES: { VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures *features = (VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures *)ext; - features->shaderSubgroupExtendedTypes = true; + features->shaderSubgroupExtendedTypes = !pdevice->use_aco; break; } case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES_KHR: { @@ -1222,10 +1202,11 @@ void radv_GetPhysicalDeviceFeatures2( case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES: { VkPhysicalDeviceVulkan11Features *features = (VkPhysicalDeviceVulkan11Features *)ext; - features->storageBuffer16BitAccess = pdevice->rad_info.chip_class >= GFX8 && !pdevice->use_aco; - features->uniformAndStorageBuffer16BitAccess = pdevice->rad_info.chip_class >= GFX8 && !pdevice->use_aco; - features->storagePushConstant16 = pdevice->rad_info.chip_class >= GFX8 && !pdevice->use_aco; - features->storageInputOutput16 = pdevice->rad_info.chip_class >= GFX8 && !pdevice->use_aco && LLVM_VERSION_MAJOR >= 9; + bool storage16_enable = !pdevice->use_aco || pdevice->rad_info.chip_class >= GFX8; + features->storageBuffer16BitAccess = storage16_enable; + features->uniformAndStorageBuffer16BitAccess = storage16_enable; + features->storagePushConstant16 = storage16_enable; + features->storageInputOutput16 = pdevice->rad_info.has_double_rate_fp16 && !pdevice->use_aco && LLVM_VERSION_MAJOR >= 9; features->multiview = true; features->multiviewGeometryShader = true; features->multiviewTessellationShader = true; @@ -1239,15 +1220,16 @@ void radv_GetPhysicalDeviceFeatures2( case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES: { VkPhysicalDeviceVulkan12Features *features = (VkPhysicalDeviceVulkan12Features *)ext; + bool int8_enable = !pdevice->use_aco || pdevice->rad_info.chip_class >= GFX8; features->samplerMirrorClampToEdge = true; features->drawIndirectCount = true; - features->storageBuffer8BitAccess = pdevice->rad_info.chip_class >= GFX8 && !pdevice->use_aco; - features->uniformAndStorageBuffer8BitAccess = pdevice->rad_info.chip_class >= GFX8 && !pdevice->use_aco; - features->storagePushConstant8 = pdevice->rad_info.chip_class >= GFX8 && !pdevice->use_aco; + features->storageBuffer8BitAccess = int8_enable; + features->uniformAndStorageBuffer8BitAccess = int8_enable; + features->storagePushConstant8 = int8_enable; features->shaderBufferInt64Atomics = LLVM_VERSION_MAJOR >= 9; features->shaderSharedInt64Atomics = LLVM_VERSION_MAJOR >= 9; - features->shaderFloat16 = pdevice->rad_info.chip_class >= GFX8 && !pdevice->use_aco; - features->shaderInt8 = !pdevice->use_aco; + features->shaderFloat16 = pdevice->rad_info.has_double_rate_fp16 && !pdevice->use_aco; + features->shaderInt8 = int8_enable; features->descriptorIndexing = true; features->shaderInputAttachmentArrayDynamicIndexing = true; features->shaderUniformTexelBufferArrayDynamicIndexing = true; @@ -1273,7 +1255,7 @@ void radv_GetPhysicalDeviceFeatures2( features->scalarBlockLayout = pdevice->rad_info.chip_class >= GFX7; features->imagelessFramebuffer = true; features->uniformBufferStandardLayout = true; - features->shaderSubgroupExtendedTypes = true; + features->shaderSubgroupExtendedTypes = !pdevice->use_aco; features->separateDepthStencilLayouts = true; features->hostQueryReset = true; features->timelineSemaphore = pdevice->rad_info.has_syncobj_wait_for_submit; @@ -1299,6 +1281,26 @@ void radv_GetPhysicalDeviceFeatures2( features->stippledSmoothLines = false; break; } + case VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD: { + VkDeviceMemoryOverallocationCreateInfoAMD *features = + (VkDeviceMemoryOverallocationCreateInfoAMD *)ext; + features->overallocationBehavior = true; + break; + } + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT: { + VkPhysicalDeviceRobustness2FeaturesEXT *features = + (VkPhysicalDeviceRobustness2FeaturesEXT *)ext; + features->robustBufferAccess2 = true; + features->robustImageAccess2 = true; + features->nullDescriptor = true; + break; + } + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT: { + VkPhysicalDevicePrivateDataFeaturesEXT *features = + (VkPhysicalDevicePrivateDataFeaturesEXT *)ext; + features->privateData = true; + break; + } default: break; } @@ -1339,14 +1341,14 @@ void radv_GetPhysicalDeviceProperties( .maxImageDimension3D = (1 << 11), .maxImageDimensionCube = (1 << 14), .maxImageArrayLayers = (1 << 11), - .maxTexelBufferElements = 128 * 1024 * 1024, + .maxTexelBufferElements = UINT32_MAX, .maxUniformBufferRange = UINT32_MAX, .maxStorageBufferRange = UINT32_MAX, .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE, .maxMemoryAllocationCount = UINT32_MAX, .maxSamplerAllocationCount = 64 * 1024, .bufferImageGranularity = 64, /* A cache line */ - .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */ + .sparseAddressSpaceSize = RADV_MAX_MEMORY_ALLOCATION_SIZE, /* buffer max size */ .maxBoundDescriptorSets = MAX_SETS, .maxPerStageDescriptorSamplers = max_descriptor_set_size, .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size, @@ -1474,7 +1476,8 @@ radv_get_physical_device_properties_1_1(struct radv_physical_device *pdevice, p->deviceNodeMask = 0; p->subgroupSize = RADV_SUBGROUP_SIZE; - p->subgroupSupportedStages = VK_SHADER_STAGE_ALL; + p->subgroupSupportedStages = VK_SHADER_STAGE_ALL_GRAPHICS | + VK_SHADER_STAGE_COMPUTE_BIT; p->subgroupSupportedOperations = VK_SUBGROUP_FEATURE_BASIC_BIT | VK_SUBGROUP_FEATURE_VOTE_BIT | VK_SUBGROUP_FEATURE_ARITHMETIC_BIT | @@ -1482,9 +1485,9 @@ radv_get_physical_device_properties_1_1(struct radv_physical_device *pdevice, VK_SUBGROUP_FEATURE_CLUSTERED_BIT | VK_SUBGROUP_FEATURE_QUAD_BIT; - if (pdevice->rad_info.chip_class == GFX8 || - pdevice->rad_info.chip_class == GFX9 || - (pdevice->rad_info.chip_class == GFX10 && pdevice->use_aco)) { + if (((pdevice->rad_info.chip_class == GFX6 || + pdevice->rad_info.chip_class == GFX7) && !pdevice->use_aco) || + pdevice->rad_info.chip_class >= GFX8) { p->subgroupSupportedOperations |= VK_SUBGROUP_FEATURE_SHUFFLE_BIT | VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT; } @@ -1507,8 +1510,8 @@ radv_get_physical_device_properties_1_2(struct radv_physical_device *pdevice, p->driverID = VK_DRIVER_ID_MESA_RADV; snprintf(p->driverName, VK_MAX_DRIVER_NAME_SIZE, "radv"); snprintf(p->driverInfo, VK_MAX_DRIVER_INFO_SIZE, - "Mesa " PACKAGE_VERSION MESA_GIT_SHA1 - " (LLVM " MESA_LLVM_VERSION_STRING ")"); + "Mesa " PACKAGE_VERSION MESA_GIT_SHA1 " (%s)", + radv_get_compiler_string(pdevice)); p->conformanceVersion = (VkConformanceVersion) { .major = 1, .minor = 2, @@ -1519,8 +1522,13 @@ radv_get_physical_device_properties_1_2(struct radv_physical_device *pdevice, /* On AMD hardware, denormals and rounding modes for fp16/fp64 are * controlled by the same config register. */ - p->denormBehaviorIndependence = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY_KHR; - p->roundingModeIndependence = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY_KHR; + if (pdevice->rad_info.has_double_rate_fp16) { + p->denormBehaviorIndependence = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY_KHR; + p->roundingModeIndependence = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY_KHR; + } else { + p->denormBehaviorIndependence = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL_KHR; + p->roundingModeIndependence = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL_KHR; + } /* Do not allow both preserving and flushing denorms because different * shaders in the same pipeline can have different settings and this @@ -1537,10 +1545,10 @@ radv_get_physical_device_properties_1_2(struct radv_physical_device *pdevice, p->shaderSignedZeroInfNanPreserveFloat32 = true; p->shaderDenormFlushToZeroFloat16 = false; - p->shaderDenormPreserveFloat16 = pdevice->rad_info.chip_class >= GFX8; - p->shaderRoundingModeRTEFloat16 = pdevice->rad_info.chip_class >= GFX8; + p->shaderDenormPreserveFloat16 = pdevice->rad_info.has_double_rate_fp16; + p->shaderRoundingModeRTEFloat16 = pdevice->rad_info.has_double_rate_fp16; p->shaderRoundingModeRTZFloat16 = false; - p->shaderSignedZeroInfNanPreserveFloat16 = pdevice->rad_info.chip_class >= GFX8; + p->shaderSignedZeroInfNanPreserveFloat16 = pdevice->rad_info.has_double_rate_fp16; p->shaderDenormFlushToZeroFloat64 = false; p->shaderDenormPreserveFloat64 = pdevice->rad_info.chip_class >= GFX8; @@ -1924,6 +1932,13 @@ void radv_GetPhysicalDeviceProperties2( props->lineSubPixelPrecisionBits = 4; break; } + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT: { + VkPhysicalDeviceRobustness2PropertiesEXT *properties = + (VkPhysicalDeviceRobustness2PropertiesEXT *)ext; + properties->robustStorageBufferAccessSizeAlignment = 4; + properties->robustUniformBufferAccessSizeAlignment = 4; + break; + } default: break; } @@ -2050,7 +2065,7 @@ radv_get_memory_budget_properties(VkPhysicalDevice physicalDevice, for (int i = 0; i < device->memory_properties.memoryTypeCount; i++) { uint32_t heap_index = device->memory_properties.memoryTypes[i].heapIndex; - if (radv_is_mem_type_vram(device->mem_type_indices[i])) { + if ((device->memory_domains[i] & RADEON_DOMAIN_VRAM) && (device->memory_flags[i] & RADEON_FLAG_NO_CPU_ACCESS)) { heap_usage = device->ws->query_value(device->ws, RADEON_ALLOCATED_VRAM); @@ -2060,7 +2075,7 @@ radv_get_memory_budget_properties(VkPhysicalDevice physicalDevice, memoryBudget->heapBudget[heap_index] = heap_budget; memoryBudget->heapUsage[heap_index] = heap_usage; - } else if (radv_is_mem_type_vram_visible(device->mem_type_indices[i])) { + } else if (device->memory_domains[i] & RADEON_DOMAIN_VRAM) { heap_usage = device->ws->query_value(device->ws, RADEON_ALLOCATED_VRAM_VIS); @@ -2070,7 +2085,9 @@ radv_get_memory_budget_properties(VkPhysicalDevice physicalDevice, memoryBudget->heapBudget[heap_index] = heap_budget; memoryBudget->heapUsage[heap_index] = heap_usage; - } else if (radv_is_mem_type_gtt_wc(device->mem_type_indices[i])) { + } else { + assert(device->memory_domains[i] & RADEON_DOMAIN_GTT); + heap_usage = device->ws->query_value(device->ws, RADEON_ALLOCATED_GTT); @@ -2121,7 +2138,8 @@ VkResult radv_GetMemoryHostPointerPropertiesEXT( const struct radv_physical_device *physical_device = device->physical_device; uint32_t memoryTypeBits = 0; for (int i = 0; i < physical_device->memory_properties.memoryTypeCount; i++) { - if (radv_is_mem_type_gtt_cached(physical_device->mem_type_indices[i])) { + if (physical_device->memory_domains[i] == RADEON_DOMAIN_GTT && + !(physical_device->memory_flags[i] & RADEON_FLAG_GTT_WC)) { memoryTypeBits = (1 << i); break; } @@ -2226,8 +2244,8 @@ radv_bo_list_finish(struct radv_bo_list *bo_list) pthread_mutex_destroy(&bo_list->mutex); } -static VkResult radv_bo_list_add(struct radv_device *device, - struct radeon_winsys_bo *bo) +VkResult radv_bo_list_add(struct radv_device *device, + struct radeon_winsys_bo *bo) { struct radv_bo_list *bo_list = &device->bo_list; @@ -2256,8 +2274,8 @@ static VkResult radv_bo_list_add(struct radv_device *device, return VK_SUCCESS; } -static void radv_bo_list_remove(struct radv_device *device, - struct radeon_winsys_bo *bo) +void radv_bo_list_remove(struct radv_device *device, + struct radeon_winsys_bo *bo) { struct radv_bo_list *bo_list = &device->bo_list; @@ -2268,7 +2286,9 @@ static void radv_bo_list_remove(struct radv_device *device, return; pthread_mutex_lock(&bo_list->mutex); - for(unsigned i = 0; i < bo_list->list.count; ++i) { + /* Loop the list backwards so we find the most recently added + * memory first. */ + for(unsigned i = bo_list->list.count; i-- > 0;) { if (bo_list->list.bos[i] == bo) { bo_list->list.bos[i] = bo_list->list.bos[bo_list->list.count - 1]; --bo_list->list.count; @@ -2528,7 +2548,7 @@ static void run_secure_compile_device(struct radv_device *device, unsigned proce struct radv_pipeline *pipeline; bool sc_read = true; - pipeline = vk_zalloc2(&device->alloc, NULL, sizeof(*pipeline), 8, + pipeline = vk_zalloc2(&device->vk.alloc, NULL, sizeof(*pipeline), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); pipeline->device = device; @@ -2654,7 +2674,7 @@ static void run_secure_compile_device(struct radv_device *device, unsigned proce free((void *) pStages[i]); } - vk_free(&device->alloc, pipeline); + vk_free(&device->vk.alloc, pipeline); sc_type = RADV_SC_TYPE_COMPILE_PIPELINE_FINISHED; write(fd_secure_output, &sc_type, sizeof(sc_type)); @@ -2769,7 +2789,7 @@ static void destroy_secure_compile_device(struct radv_device *device, unsigned p static VkResult fork_secure_compile_idle_device(struct radv_device *device) { - device->sc_state = vk_zalloc(&device->alloc, + device->sc_state = vk_zalloc(&device->vk.alloc, sizeof(struct radv_secure_compile_state), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE); @@ -2796,7 +2816,7 @@ static VkResult fork_secure_compile_idle_device(struct radv_device *device) } } - device->sc_state->secure_compile_processes = vk_zalloc(&device->alloc, + device->sc_state->secure_compile_processes = vk_zalloc(&device->vk.alloc, sizeof(struct radv_secure_compile_process) * sc_threads, 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE); @@ -2896,6 +2916,24 @@ radv_create_pthread_cond(pthread_cond_t *cond) return VK_SUCCESS; } +static VkResult +check_physical_device_features(VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceFeatures *features) +{ + RADV_FROM_HANDLE(radv_physical_device, physical_device, physicalDevice); + VkPhysicalDeviceFeatures supported_features; + radv_GetPhysicalDeviceFeatures(physicalDevice, &supported_features); + VkBool32 *supported_feature = (VkBool32 *)&supported_features; + VkBool32 *enabled_feature = (VkBool32 *)features; + unsigned num_features = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32); + for (uint32_t i = 0; i < num_features; i++) { + if (enabled_feature[i] && !supported_feature[i]) + return vk_error(physical_device->instance, VK_ERROR_FEATURE_NOT_PRESENT); + } + + return VK_SUCCESS; +} + VkResult radv_CreateDevice( VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, @@ -2907,17 +2945,41 @@ VkResult radv_CreateDevice( struct radv_device *device; bool keep_shader_info = false; + bool robust_buffer_access = false; + bool overallocation_disallowed = false; /* Check enabled features */ if (pCreateInfo->pEnabledFeatures) { - VkPhysicalDeviceFeatures supported_features; - radv_GetPhysicalDeviceFeatures(physicalDevice, &supported_features); - VkBool32 *supported_feature = (VkBool32 *)&supported_features; - VkBool32 *enabled_feature = (VkBool32 *)pCreateInfo->pEnabledFeatures; - unsigned num_features = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32); - for (uint32_t i = 0; i < num_features; i++) { - if (enabled_feature[i] && !supported_feature[i]) - return vk_error(physical_device->instance, VK_ERROR_FEATURE_NOT_PRESENT); + result = check_physical_device_features(physicalDevice, + pCreateInfo->pEnabledFeatures); + if (result != VK_SUCCESS) + return result; + + if (pCreateInfo->pEnabledFeatures->robustBufferAccess) + robust_buffer_access = true; + } + + vk_foreach_struct_const(ext, pCreateInfo->pNext) { + switch (ext->sType) { + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2: { + const VkPhysicalDeviceFeatures2 *features = (const void *)ext; + result = check_physical_device_features(physicalDevice, + &features->features); + if (result != VK_SUCCESS) + return result; + + if (features->features.robustBufferAccess) + robust_buffer_access = true; + break; + } + case VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD: { + const VkDeviceMemoryOverallocationCreateInfoAMD *overallocation = (const void *)ext; + if (overallocation->overallocationBehavior == VK_MEMORY_OVERALLOCATION_BEHAVIOR_DISALLOWED_AMD) + overallocation_disallowed = true; + break; + } + default: + break; } } @@ -2927,21 +2989,19 @@ VkResult radv_CreateDevice( if (!device) return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); - device->_loader_data.loaderMagic = ICD_LOADER_MAGIC; + vk_device_init(&device->vk, pCreateInfo, + &physical_device->instance->alloc, pAllocator); + device->instance = physical_device->instance; device->physical_device = physical_device; device->ws = physical_device->ws; - if (pAllocator) - device->alloc = *pAllocator; - else - device->alloc = physical_device->instance->alloc; for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i]; int index = radv_get_device_extension_index(ext_name); if (index < 0 || !physical_device->supported_extensions.extensions[index]) { - vk_free(&device->alloc, device); + vk_free(&device->vk.alloc, device); return vk_error(physical_device->instance, VK_ERROR_EXTENSION_NOT_PRESENT); } @@ -2961,12 +3021,14 @@ VkResult radv_CreateDevice( device->enabled_extensions.EXT_buffer_device_address || device->enabled_extensions.KHR_buffer_device_address; - device->robust_buffer_access = pCreateInfo->pEnabledFeatures && - pCreateInfo->pEnabledFeatures->robustBufferAccess; + device->robust_buffer_access = robust_buffer_access; mtx_init(&device->shader_slab_mutex, mtx_plain); list_inithead(&device->shader_slabs); + device->overallocation_disallowed = overallocation_disallowed; + mtx_init(&device->overallocation_mutex, mtx_plain); + radv_bo_list_init(&device->bo_list); for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) { @@ -2977,7 +3039,7 @@ VkResult radv_CreateDevice( assert(!global_priority || device->physical_device->rad_info.has_ctx_priority); - device->queues[qfi] = vk_alloc(&device->alloc, + device->queues[qfi] = vk_alloc(&device->vk.alloc, queue_create->queueCount * sizeof(struct radv_queue), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE); if (!device->queues[qfi]) { result = VK_ERROR_OUT_OF_HOST_MEMORY; @@ -3161,10 +3223,10 @@ fail: for (unsigned q = 0; q < device->queue_count[i]; q++) radv_queue_finish(&device->queues[i][q]); if (device->queue_count[i]) - vk_free(&device->alloc, device->queues[i]); + vk_free(&device->vk.alloc, device->queues[i]); } - vk_free(&device->alloc, device); + vk_free(&device->vk.alloc, device); return result; } @@ -3187,7 +3249,7 @@ void radv_DestroyDevice( for (unsigned q = 0; q < device->queue_count[i]; q++) radv_queue_finish(&device->queues[i][q]); if (device->queue_count[i]) - vk_free(&device->alloc, device->queues[i]); + vk_free(&device->vk.alloc, device->queues[i]); if (device->empty_cs[i]) device->ws->cs_destroy(device->empty_cs[i]); } @@ -3211,10 +3273,10 @@ void radv_DestroyDevice( if (device->sc_state) { free(device->sc_state->uid); - vk_free(&device->alloc, device->sc_state->secure_compile_processes); + vk_free(&device->vk.alloc, device->sc_state->secure_compile_processes); } - vk_free(&device->alloc, device->sc_state); - vk_free(&device->alloc, device); + vk_free(&device->vk.alloc, device->sc_state); + vk_free(&device->vk.alloc, device); } VkResult radv_EnumerateInstanceLayerProperties( @@ -3697,8 +3759,7 @@ radv_init_graphics_state(struct radeon_cmdbuf *cs, struct radv_queue *queue) radv_cs_add_buffer(device->ws, cs, device->gfx_init); } else { - struct radv_physical_device *physical_device = device->physical_device; - si_emit_graphics(physical_device, cs); + si_emit_graphics(device, cs); } } @@ -4895,7 +4956,7 @@ VkResult radv_EnumerateInstanceExtensionProperties( VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount); for (int i = 0; i < RADV_INSTANCE_EXTENSION_COUNT; i++) { - if (radv_supported_instance_extensions.extensions[i]) { + if (radv_instance_extensions_supported.extensions[i]) { vk_outarray_append(&out, prop) { *prop = radv_instance_extensions[i]; } @@ -4947,6 +5008,11 @@ PFN_vkVoidFunction radv_GetInstanceProcAddr( LOOKUP_RADV_ENTRYPOINT(EnumerateInstanceVersion); LOOKUP_RADV_ENTRYPOINT(CreateInstance); + /* GetInstanceProcAddr() can also be called with a NULL instance. + * See https://gitlab.khronos.org/vulkan/vulkan/issues/2057 + */ + LOOKUP_RADV_ENTRYPOINT(GetInstanceProcAddr); + #undef LOOKUP_RADV_ENTRYPOINT if (instance == NULL) @@ -5051,12 +5117,19 @@ static void radv_free_memory(struct radv_device *device, #endif if (mem->bo) { + if (device->overallocation_disallowed) { + mtx_lock(&device->overallocation_mutex); + device->allocated_memory_size[mem->heap_index] -= mem->alloc_size; + mtx_unlock(&device->overallocation_mutex); + } + radv_bo_list_remove(device, mem->bo); device->ws->buffer_destroy(mem->bo); mem->bo = NULL; } - vk_free2(&device->alloc, pAllocator, mem); + vk_object_base_finish(&mem->base); + vk_free2(&device->vk.alloc, pAllocator, mem); } static VkResult radv_alloc_memory(struct radv_device *device, @@ -5068,7 +5141,6 @@ static VkResult radv_alloc_memory(struct radv_device *device, VkResult result; enum radeon_bo_domain domain; uint32_t flags = 0; - enum radv_mem_type mem_type_index = device->physical_device->mem_type_indices[pAllocateInfo->memoryTypeIndex]; assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO); @@ -5094,11 +5166,14 @@ static VkResult radv_alloc_memory(struct radv_device *device, return VK_SUCCESS; } - mem = vk_zalloc2(&device->alloc, pAllocator, sizeof(*mem), 8, + mem = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*mem), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (mem == NULL) return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); + vk_object_base_init(&device->vk, &mem->base, + VK_OBJECT_TYPE_DEVICE_MEMORY); + if (wsi_info && wsi_info->implicit_sync) flags |= RADEON_FLAG_IMPLICIT_SYNC; @@ -5150,7 +5225,6 @@ static VkResult radv_alloc_memory(struct radv_device *device, } } else if (host_ptr_info) { assert(host_ptr_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT); - assert(radv_is_mem_type_gtt_cached(mem_type_index)); mem->bo = device->ws->buffer_from_ptr(device->ws, host_ptr_info->pHostPointer, pAllocateInfo->allocationSize, priority); @@ -5162,19 +5236,11 @@ static VkResult radv_alloc_memory(struct radv_device *device, } } else { uint64_t alloc_size = align_u64(pAllocateInfo->allocationSize, 4096); - if (radv_is_mem_type_gtt_wc(mem_type_index) || - radv_is_mem_type_gtt_cached(mem_type_index)) - domain = RADEON_DOMAIN_GTT; - else - domain = RADEON_DOMAIN_VRAM; - - if (radv_is_mem_type_vram(mem_type_index)) - flags |= RADEON_FLAG_NO_CPU_ACCESS; - else - flags |= RADEON_FLAG_CPU_ACCESS; + uint32_t heap_index; - if (radv_is_mem_type_gtt_wc(mem_type_index)) - flags |= RADEON_FLAG_GTT_WC; + heap_index = device->physical_device->memory_properties.memoryTypes[pAllocateInfo->memoryTypeIndex].heapIndex; + domain = device->physical_device->memory_domains[pAllocateInfo->memoryTypeIndex]; + flags |= device->physical_device->memory_flags[pAllocateInfo->memoryTypeIndex]; if (!dedicate_info && !import_info && (!export_info || !export_info->handleTypes)) { flags |= RADEON_FLAG_NO_INTERPROCESS_SHARING; @@ -5183,24 +5249,42 @@ static VkResult radv_alloc_memory(struct radv_device *device, } } - if (radv_is_mem_type_uncached(mem_type_index)) { - assert(device->physical_device->rad_info.has_l2_uncached); - flags |= RADEON_FLAG_VA_UNCACHED; + if (device->overallocation_disallowed) { + uint64_t total_size = + device->physical_device->memory_properties.memoryHeaps[heap_index].size; + + mtx_lock(&device->overallocation_mutex); + if (device->allocated_memory_size[heap_index] + alloc_size > total_size) { + mtx_unlock(&device->overallocation_mutex); + result = VK_ERROR_OUT_OF_DEVICE_MEMORY; + goto fail; + } + device->allocated_memory_size[heap_index] += alloc_size; + mtx_unlock(&device->overallocation_mutex); } mem->bo = device->ws->buffer_create(device->ws, alloc_size, device->physical_device->rad_info.max_alignment, domain, flags, priority); if (!mem->bo) { + if (device->overallocation_disallowed) { + mtx_lock(&device->overallocation_mutex); + device->allocated_memory_size[heap_index] -= alloc_size; + mtx_unlock(&device->overallocation_mutex); + } result = VK_ERROR_OUT_OF_DEVICE_MEMORY; goto fail; } - mem->type_index = mem_type_index; + + mem->heap_index = heap_index; + mem->alloc_size = alloc_size; } - result = radv_bo_list_add(device, mem->bo); - if (result != VK_SUCCESS) - goto fail; + if (!wsi_info) { + result = radv_bo_list_add(device, mem->bo); + if (result != VK_SUCCESS) + goto fail; + } *pMem = radv_device_memory_to_handle(mem); @@ -5542,19 +5626,21 @@ VkResult radv_CreateFence( VkExternalFenceHandleTypeFlags handleTypes = export ? export->handleTypes : 0; - struct radv_fence *fence = vk_alloc2(&device->alloc, pAllocator, + struct radv_fence *fence = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*fence), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (!fence) return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); + vk_object_base_init(&device->vk, &fence->base, VK_OBJECT_TYPE_FENCE); + fence->fence_wsi = NULL; fence->temp_syncobj = 0; if (device->always_use_syncobj || handleTypes) { int ret = device->ws->create_syncobj(device->ws, &fence->syncobj); if (ret) { - vk_free2(&device->alloc, pAllocator, fence); + vk_free2(&device->vk.alloc, pAllocator, fence); return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); } if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) { @@ -5564,7 +5650,7 @@ VkResult radv_CreateFence( } else { fence->fence = device->ws->create_fence(); if (!fence->fence) { - vk_free2(&device->alloc, pAllocator, fence); + vk_free2(&device->vk.alloc, pAllocator, fence); return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); } fence->syncobj = 0; @@ -5596,7 +5682,9 @@ void radv_DestroyFence( device->ws->destroy_fence(fence->fence); if (fence->fence_wsi) fence->fence_wsi->destroy(fence->fence_wsi); - vk_free2(&device->alloc, pAllocator, fence); + + vk_object_base_finish(&fence->base); + vk_free2(&device->vk.alloc, pAllocator, fence); } @@ -6008,12 +6096,15 @@ VkResult radv_CreateSemaphore( uint64_t initial_value = 0; VkSemaphoreTypeKHR type = radv_get_semaphore_type(pCreateInfo->pNext, &initial_value); - struct radv_semaphore *sem = vk_alloc2(&device->alloc, pAllocator, + struct radv_semaphore *sem = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*sem), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (!sem) return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); + vk_object_base_init(&device->vk, &sem->base, + VK_OBJECT_TYPE_SEMAPHORE); + sem->temporary.kind = RADV_SEMAPHORE_NONE; sem->permanent.kind = RADV_SEMAPHORE_NONE; @@ -6024,14 +6115,14 @@ VkResult radv_CreateSemaphore( assert (device->physical_device->rad_info.has_syncobj); int ret = device->ws->create_syncobj(device->ws, &sem->permanent.syncobj); if (ret) { - vk_free2(&device->alloc, pAllocator, sem); + vk_free2(&device->vk.alloc, pAllocator, sem); return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); } sem->permanent.kind = RADV_SEMAPHORE_SYNCOBJ; } else { sem->permanent.ws_sem = device->ws->create_sem(device->ws); if (!sem->permanent.ws_sem) { - vk_free2(&device->alloc, pAllocator, sem); + vk_free2(&device->vk.alloc, pAllocator, sem); return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); } sem->permanent.kind = RADV_SEMAPHORE_WINSYS; @@ -6053,7 +6144,8 @@ void radv_DestroySemaphore( radv_destroy_semaphore_part(device, &sem->temporary); radv_destroy_semaphore_part(device, &sem->permanent); - vk_free2(&device->alloc, pAllocator, sem); + vk_object_base_finish(&sem->base); + vk_free2(&device->vk.alloc, pAllocator, sem); } VkResult @@ -6167,19 +6259,21 @@ VkResult radv_CreateEvent( VkEvent* pEvent) { RADV_FROM_HANDLE(radv_device, device, _device); - struct radv_event *event = vk_alloc2(&device->alloc, pAllocator, + struct radv_event *event = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*event), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (!event) return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); + vk_object_base_init(&device->vk, &event->base, VK_OBJECT_TYPE_EVENT); + event->bo = device->ws->buffer_create(device->ws, 8, 8, RADEON_DOMAIN_GTT, RADEON_FLAG_VA_UNCACHED | RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING, RADV_BO_PRIORITY_FENCE); if (!event->bo) { - vk_free2(&device->alloc, pAllocator, event); + vk_free2(&device->vk.alloc, pAllocator, event); return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY); } @@ -6201,7 +6295,8 @@ void radv_DestroyEvent( if (!event) return; device->ws->buffer_destroy(event->bo); - vk_free2(&device->alloc, pAllocator, event); + vk_object_base_finish(&event->base); + vk_free2(&device->vk.alloc, pAllocator, event); } VkResult radv_GetEventStatus( @@ -6244,13 +6339,18 @@ VkResult radv_CreateBuffer( RADV_FROM_HANDLE(radv_device, device, _device); struct radv_buffer *buffer; + if (pCreateInfo->size > RADV_MAX_MEMORY_ALLOCATION_SIZE) + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO); - buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8, + buffer = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*buffer), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (buffer == NULL) return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); + vk_object_base_init(&device->vk, &buffer->base, VK_OBJECT_TYPE_BUFFER); + buffer->size = pCreateInfo->size; buffer->usage = pCreateInfo->usage; buffer->bo = NULL; @@ -6266,7 +6366,7 @@ VkResult radv_CreateBuffer( 4096, 0, RADEON_FLAG_VIRTUAL, RADV_BO_PRIORITY_VIRTUAL); if (!buffer->bo) { - vk_free2(&device->alloc, pAllocator, buffer); + vk_free2(&device->vk.alloc, pAllocator, buffer); return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY); } } @@ -6290,7 +6390,8 @@ void radv_DestroyBuffer( if (buffer->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) device->ws->buffer_destroy(buffer->bo); - vk_free2(&device->alloc, pAllocator, buffer); + vk_object_base_finish(&buffer->base); + vk_free2(&device->vk.alloc, pAllocator, buffer); } VkDeviceAddress radv_GetBufferDeviceAddress( @@ -6413,18 +6514,20 @@ radv_initialise_color_surface(struct radv_device *device, cb->cb_color_base = va >> 8; if (device->physical_device->rad_info.chip_class >= GFX9) { - struct gfx9_surf_meta_flags meta; - if (iview->image->dcc_offset) - meta = surf->u.gfx9.dcc; - else - meta = surf->u.gfx9.cmask; - if (device->physical_device->rad_info.chip_class >= GFX10) { cb->cb_color_attrib3 |= S_028EE0_COLOR_SW_MODE(surf->u.gfx9.surf.swizzle_mode) | S_028EE0_FMASK_SW_MODE(surf->u.gfx9.fmask.swizzle_mode) | - S_028EE0_CMASK_PIPE_ALIGNED(surf->u.gfx9.cmask.pipe_aligned) | + S_028EE0_CMASK_PIPE_ALIGNED(1) | S_028EE0_DCC_PIPE_ALIGNED(surf->u.gfx9.dcc.pipe_aligned); } else { + struct gfx9_surf_meta_flags meta = { + .rb_aligned = 1, + .pipe_aligned = 1, + }; + + if (iview->image->dcc_offset) + meta = surf->u.gfx9.dcc; + cb->cb_color_attrib |= S_028C74_COLOR_SW_MODE(surf->u.gfx9.surf.swizzle_mode) | S_028C74_FMASK_SW_MODE(surf->u.gfx9.fmask.swizzle_mode) | S_028C74_RB_ALIGNED(meta.rb_aligned) | @@ -6752,10 +6855,10 @@ radv_initialise_ds_surface(struct radv_device *device, iview->image->htile_offset; ds->db_htile_data_base = va >> 8; ds->db_htile_surface = S_028ABC_FULL_CACHE(1) | - S_028ABC_PIPE_ALIGNED(surf->u.gfx9.htile.pipe_aligned); + S_028ABC_PIPE_ALIGNED(1); if (device->physical_device->rad_info.chip_class == GFX9) { - ds->db_htile_surface |= S_028ABC_RB_ALIGNED(surf->u.gfx9.htile.rb_aligned); + ds->db_htile_surface |= S_028ABC_RB_ALIGNED(1); } } } else { @@ -6852,11 +6955,14 @@ VkResult radv_CreateFramebuffer( size_t size = sizeof(*framebuffer); if (!imageless_create_info) size += sizeof(struct radv_image_view*) * pCreateInfo->attachmentCount; - framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8, + framebuffer = vk_alloc2(&device->vk.alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (framebuffer == NULL) return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); + vk_object_base_init(&device->vk, &framebuffer->base, + VK_OBJECT_TYPE_FRAMEBUFFER); + framebuffer->attachment_count = pCreateInfo->attachmentCount; framebuffer->width = pCreateInfo->width; framebuffer->height = pCreateInfo->height; @@ -6894,7 +7000,8 @@ void radv_DestroyFramebuffer( if (!fb) return; - vk_free2(&device->alloc, pAllocator, fb); + vk_object_base_finish(&fb->base); + vk_free2(&device->vk.alloc, pAllocator, fb); } static unsigned radv_tex_wrap(VkSamplerAddressMode address_mode) @@ -7035,6 +7142,11 @@ radv_get_max_anisotropy(struct radv_device *device, return 0; } +static inline int S_FIXED(float value, unsigned frac_bits) +{ + return value * (1 << frac_bits); +} + static void radv_init_sampler(struct radv_device *device, struct radv_sampler *sampler, @@ -7046,6 +7158,7 @@ radv_init_sampler(struct radv_device *device, device->physical_device->rad_info.chip_class == GFX9; unsigned filter_mode = V_008F30_SQ_IMG_FILTER_MODE_BLEND; unsigned depth_compare_func = V_008F30_SQ_TEX_DEPTH_COMPARE_NEVER; + bool trunc_coord = pCreateInfo->minFilter == VK_FILTER_NEAREST && pCreateInfo->magFilter == VK_FILTER_NEAREST; const struct VkSamplerReductionModeCreateInfo *sampler_reduction = vk_find_struct_const(pCreateInfo->pNext, @@ -7066,7 +7179,8 @@ radv_init_sampler(struct radv_device *device, S_008F30_ANISO_BIAS(max_aniso_ratio) | S_008F30_DISABLE_CUBE_WRAP(0) | S_008F30_COMPAT_MODE(compat_mode) | - S_008F30_FILTER_MODE(filter_mode)); + S_008F30_FILTER_MODE(filter_mode) | + S_008F30_TRUNC_COORD(trunc_coord)); sampler->state[1] = (S_008F34_MIN_LOD(S_FIXED(CLAMP(pCreateInfo->minLod, 0, 15), 8)) | S_008F34_MAX_LOD(S_FIXED(CLAMP(pCreateInfo->maxLod, 0, 15), 8)) | S_008F34_PERF_MIP(max_aniso_ratio ? max_aniso_ratio + 6 : 0)); @@ -7103,11 +7217,14 @@ VkResult radv_CreateSampler( assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO); - sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8, + sampler = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*sampler), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (!sampler) return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); + vk_object_base_init(&device->vk, &sampler->base, + VK_OBJECT_TYPE_SAMPLER); + radv_init_sampler(device, sampler, pCreateInfo); sampler->ycbcr_sampler = ycbcr_conversion ? radv_sampler_ycbcr_conversion_from_handle(ycbcr_conversion->conversion): NULL; @@ -7126,7 +7243,8 @@ void radv_DestroySampler( if (!sampler) return; - vk_free2(&device->alloc, pAllocator, sampler); + vk_object_base_finish(&sampler->base); + vk_free2(&device->vk.alloc, pAllocator, sampler); } /* vk_icd.h does not declare this function, so we declare it here to @@ -7194,28 +7312,74 @@ VkResult radv_GetMemoryFdKHR(VkDevice _device, return VK_SUCCESS; } +static uint32_t radv_compute_valid_memory_types_attempt(struct radv_physical_device *dev, + enum radeon_bo_domain domains, + enum radeon_bo_flag flags, + enum radeon_bo_flag ignore_flags) +{ + /* Don't count GTT/CPU as relevant: + * + * - We're not fully consistent between the two. + * - Sometimes VRAM gets VRAM|GTT. + */ + const enum radeon_bo_domain relevant_domains = RADEON_DOMAIN_VRAM | + RADEON_DOMAIN_GDS | + RADEON_DOMAIN_OA; + uint32_t bits = 0; + for (unsigned i = 0; i < dev->memory_properties.memoryTypeCount; ++i) { + if ((domains & relevant_domains) != (dev->memory_domains[i] & relevant_domains)) + continue; + + if ((flags & ~ignore_flags) != (dev->memory_flags[i] & ~ignore_flags)) + continue; + + bits |= 1u << i; + } + + return bits; +} + +static uint32_t radv_compute_valid_memory_types(struct radv_physical_device *dev, + enum radeon_bo_domain domains, + enum radeon_bo_flag flags) +{ + enum radeon_bo_flag ignore_flags = ~(RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_GTT_WC); + uint32_t bits = radv_compute_valid_memory_types_attempt(dev, domains, flags, ignore_flags); + + if (!bits) { + ignore_flags |= RADEON_FLAG_NO_CPU_ACCESS; + bits = radv_compute_valid_memory_types_attempt(dev, domains, flags, ignore_flags); + } + + return bits; +} VkResult radv_GetMemoryFdPropertiesKHR(VkDevice _device, VkExternalMemoryHandleTypeFlagBits handleType, int fd, VkMemoryFdPropertiesKHR *pMemoryFdProperties) { - RADV_FROM_HANDLE(radv_device, device, _device); + RADV_FROM_HANDLE(radv_device, device, _device); - switch (handleType) { - case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT: - pMemoryFdProperties->memoryTypeBits = (1 << RADV_MEM_TYPE_COUNT) - 1; - return VK_SUCCESS; + switch (handleType) { + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT: { + enum radeon_bo_domain domains; + enum radeon_bo_flag flags; + if (!device->ws->buffer_get_flags_from_fd(device->ws, fd, &domains, &flags)) + return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE); - default: - /* The valid usage section for this function says: - * - * "handleType must not be one of the handle types defined as - * opaque." - * - * So opaque handle types fall into the default "unsupported" case. - */ - return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE); - } + pMemoryFdProperties->memoryTypeBits = radv_compute_valid_memory_types(device->physical_device, domains, flags); + return VK_SUCCESS; + } + default: + /* The valid usage section for this function says: + * + * "handleType must not be one of the handle types defined as + * opaque." + * + * So opaque handle types fall into the default "unsupported" case. + */ + return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE); + } } static VkResult radv_import_opaque_fd(struct radv_device *device, @@ -7641,3 +7805,48 @@ void radv_GetPhysicalDeviceMultisamplePropertiesEXT( pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 0, 0 }; } } + +VkResult radv_CreatePrivateDataSlotEXT( + VkDevice _device, + const VkPrivateDataSlotCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkPrivateDataSlotEXT* pPrivateDataSlot) +{ + RADV_FROM_HANDLE(radv_device, device, _device); + return vk_private_data_slot_create(&device->vk, pCreateInfo, pAllocator, + pPrivateDataSlot); +} + +void radv_DestroyPrivateDataSlotEXT( + VkDevice _device, + VkPrivateDataSlotEXT privateDataSlot, + const VkAllocationCallbacks* pAllocator) +{ + RADV_FROM_HANDLE(radv_device, device, _device); + vk_private_data_slot_destroy(&device->vk, privateDataSlot, pAllocator); +} + +VkResult radv_SetPrivateDataEXT( + VkDevice _device, + VkObjectType objectType, + uint64_t objectHandle, + VkPrivateDataSlotEXT privateDataSlot, + uint64_t data) +{ + RADV_FROM_HANDLE(radv_device, device, _device); + return vk_object_base_set_private_data(&device->vk, objectType, + objectHandle, privateDataSlot, + data); +} + +void radv_GetPrivateDataEXT( + VkDevice _device, + VkObjectType objectType, + uint64_t objectHandle, + VkPrivateDataSlotEXT privateDataSlot, + uint64_t* pData) +{ + RADV_FROM_HANDLE(radv_device, device, _device); + vk_object_base_get_private_data(&device->vk, objectType, objectHandle, + privateDataSlot, pData); +}