X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Famd%2Fvulkan%2Fradv_device.c;h=d4b78ef06df985f8f419288aa25e55fc4b5c1c30;hb=56e37374ddbc3b66bcfa4d0dadcb1fd53074c822;hp=ca0909ff73deaabeba2d9551206fa9e083ab4e2c;hpb=556c9401495930c23a10ca2a26db098d12663e27;p=mesa.git diff --git a/src/amd/vulkan/radv_device.c b/src/amd/vulkan/radv_device.c index ca0909ff73d..d4b78ef06df 100644 --- a/src/amd/vulkan/radv_device.c +++ b/src/amd/vulkan/radv_device.c @@ -41,7 +41,6 @@ #include #include #include -#include #include "radv_debug.h" #include "radv_private.h" @@ -51,8 +50,9 @@ #include "vk_util.h" #include #include -#include +#include "drm-uapi/amdgpu_drm.h" #include "winsys/amdgpu/radv_amdgpu_winsys_public.h" +#include "winsys/null/radv_null_winsys_public.h" #include "ac_llvm_util.h" #include "vk_format.h" #include "sid.h" @@ -129,46 +129,9 @@ radv_get_vram_size(struct radv_physical_device *device) return device->rad_info.vram_size - radv_get_visible_vram_size(device); } -static bool -radv_is_mem_type_vram(enum radv_mem_type type) -{ - return type == RADV_MEM_TYPE_VRAM || - type == RADV_MEM_TYPE_VRAM_UNCACHED; -} - -static bool -radv_is_mem_type_vram_visible(enum radv_mem_type type) -{ - return type == RADV_MEM_TYPE_VRAM_CPU_ACCESS || - type == RADV_MEM_TYPE_VRAM_CPU_ACCESS_UNCACHED; -} -static bool -radv_is_mem_type_gtt_wc(enum radv_mem_type type) -{ - return type == RADV_MEM_TYPE_GTT_WRITE_COMBINE || - type == RADV_MEM_TYPE_GTT_WRITE_COMBINE_VRAM_UNCACHED; -} - -static bool -radv_is_mem_type_gtt_cached(enum radv_mem_type type) -{ - return type == RADV_MEM_TYPE_GTT_CACHED || - type == RADV_MEM_TYPE_GTT_CACHED_VRAM_UNCACHED; -} - -static bool -radv_is_mem_type_uncached(enum radv_mem_type type) -{ - return type == RADV_MEM_TYPE_VRAM_UNCACHED || - type == RADV_MEM_TYPE_VRAM_CPU_ACCESS_UNCACHED || - type == RADV_MEM_TYPE_GTT_WRITE_COMBINE_VRAM_UNCACHED || - type == RADV_MEM_TYPE_GTT_CACHED_VRAM_UNCACHED; -} - static void radv_physical_device_init_mem_types(struct radv_physical_device *device) { - STATIC_ASSERT(RADV_MEM_HEAP_COUNT <= VK_MAX_MEMORY_HEAPS); uint64_t visible_vram_size = radv_get_visible_vram_size(device); uint64_t vram_size = radv_get_vram_size(device); int vram_index = -1, visible_vram_index = -1, gart_index = -1; @@ -180,6 +143,15 @@ radv_physical_device_init_mem_types(struct radv_physical_device *device) .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT, }; } + + if (device->rad_info.gart_size > 0) { + gart_index = device->memory_properties.memoryHeapCount++; + device->memory_properties.memoryHeaps[gart_index] = (VkMemoryHeap) { + .size = device->rad_info.gart_size, + .flags = 0, + }; + } + if (visible_vram_size) { visible_vram_index = device->memory_properties.memoryHeapCount++; device->memory_properties.memoryHeaps[visible_vram_index] = (VkMemoryHeap) { @@ -187,25 +159,32 @@ radv_physical_device_init_mem_types(struct radv_physical_device *device) .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT, }; } - if (device->rad_info.gart_size > 0) { - gart_index = device->memory_properties.memoryHeapCount++; - device->memory_properties.memoryHeaps[gart_index] = (VkMemoryHeap) { - .size = device->rad_info.gart_size, - .flags = device->rad_info.has_dedicated_vram ? 0 : VK_MEMORY_HEAP_DEVICE_LOCAL_BIT, - }; - } - STATIC_ASSERT(RADV_MEM_TYPE_COUNT <= VK_MAX_MEMORY_TYPES); unsigned type_count = 0; - if (vram_index >= 0) { - device->mem_type_indices[type_count] = RADV_MEM_TYPE_VRAM; - device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) { - .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, - .heapIndex = vram_index, - }; + + if (device->rad_info.has_dedicated_vram) { + if (vram_index >= 0) { + device->memory_domains[type_count] = RADEON_DOMAIN_VRAM; + device->memory_flags[type_count] = RADEON_FLAG_NO_CPU_ACCESS; + device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) { + .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, + .heapIndex = vram_index, + }; + } + } else { + if (visible_vram_index >= 0) { + device->memory_domains[type_count] = RADEON_DOMAIN_VRAM; + device->memory_flags[type_count] = RADEON_FLAG_NO_CPU_ACCESS; + device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) { + .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, + .heapIndex = visible_vram_index, + }; + } } - if (gart_index >= 0 && device->rad_info.has_dedicated_vram) { - device->mem_type_indices[type_count] = RADV_MEM_TYPE_GTT_WRITE_COMBINE; + + if (gart_index >= 0) { + device->memory_domains[type_count] = RADEON_DOMAIN_GTT; + device->memory_flags[type_count] = RADEON_FLAG_GTT_WC | RADEON_FLAG_CPU_ACCESS; device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) { .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, @@ -213,7 +192,8 @@ radv_physical_device_init_mem_types(struct radv_physical_device *device) }; } if (visible_vram_index >= 0) { - device->mem_type_indices[type_count] = RADV_MEM_TYPE_VRAM_CPU_ACCESS; + device->memory_domains[type_count] = RADEON_DOMAIN_VRAM; + device->memory_flags[type_count] = RADEON_FLAG_CPU_ACCESS; device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) { .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | @@ -221,26 +201,14 @@ radv_physical_device_init_mem_types(struct radv_physical_device *device) .heapIndex = visible_vram_index, }; } - if (gart_index >= 0 && !device->rad_info.has_dedicated_vram) { - /* Put GTT after visible VRAM for GPUs without dedicated VRAM - * as they have identical property flags, and according to the - * spec, for types with identical flags, the one with greater - * performance must be given a lower index. */ - device->mem_type_indices[type_count] = RADV_MEM_TYPE_GTT_WRITE_COMBINE; - device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) { - .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | - VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | - VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, - .heapIndex = gart_index, - }; - } + if (gart_index >= 0) { - device->mem_type_indices[type_count] = RADV_MEM_TYPE_GTT_CACHED; + device->memory_domains[type_count] = RADEON_DOMAIN_GTT; + device->memory_flags[type_count] = RADEON_FLAG_CPU_ACCESS; device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) { .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | - VK_MEMORY_PROPERTY_HOST_CACHED_BIT | - (device->rad_info.has_dedicated_vram ? 0 : VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT), + VK_MEMORY_PROPERTY_HOST_CACHED_BIT, .heapIndex = gart_index, }; } @@ -253,30 +221,13 @@ radv_physical_device_init_mem_types(struct radv_physical_device *device) if ((mem_type.propertyFlags & (VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)) || mem_type.propertyFlags == VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) { - enum radv_mem_type mem_type_id; - - switch (device->mem_type_indices[i]) { - case RADV_MEM_TYPE_VRAM: - mem_type_id = RADV_MEM_TYPE_VRAM_UNCACHED; - break; - case RADV_MEM_TYPE_VRAM_CPU_ACCESS: - mem_type_id = RADV_MEM_TYPE_VRAM_CPU_ACCESS_UNCACHED; - break; - case RADV_MEM_TYPE_GTT_WRITE_COMBINE: - mem_type_id = RADV_MEM_TYPE_GTT_WRITE_COMBINE_VRAM_UNCACHED; - break; - case RADV_MEM_TYPE_GTT_CACHED: - mem_type_id = RADV_MEM_TYPE_GTT_CACHED_VRAM_UNCACHED; - break; - default: - unreachable("invalid memory type"); - } VkMemoryPropertyFlags property_flags = mem_type.propertyFlags | VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD; - device->mem_type_indices[type_count] = mem_type_id; + device->memory_domains[type_count] = device->memory_domains[i]; + device->memory_flags[type_count] = device->memory_flags[i] | RADEON_FLAG_VA_UNCACHED; device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) { .propertyFlags = property_flags, .heapIndex = mem_type.heapIndex, @@ -287,97 +238,78 @@ radv_physical_device_init_mem_types(struct radv_physical_device *device) } } -static void -radv_handle_env_var_force_family(struct radv_physical_device *device) +static VkResult +radv_physical_device_try_create(struct radv_instance *instance, + drmDevicePtr drm_device, + struct radv_physical_device **device_out) { - const char *family = getenv("RADV_FORCE_FAMILY"); - unsigned i; + VkResult result; + int fd = -1; + int master_fd = -1; - if (!family) - return; + if (drm_device) { + const char *path = drm_device->nodes[DRM_NODE_RENDER]; + drmVersionPtr version; - for (i = CHIP_TAHITI; i < CHIP_LAST; i++) { - if (!strcmp(family, ac_get_llvm_processor_name(i))) { - /* Override family and chip_class. */ - device->rad_info.family = i; - device->rad_info.name = "OVERRIDDEN"; - - if (i >= CHIP_NAVI10) - device->rad_info.chip_class = GFX10; - else if (i >= CHIP_VEGA10) - device->rad_info.chip_class = GFX9; - else if (i >= CHIP_TONGA) - device->rad_info.chip_class = GFX8; - else if (i >= CHIP_BONAIRE) - device->rad_info.chip_class = GFX7; - else - device->rad_info.chip_class = GFX6; + fd = open(path, O_RDWR | O_CLOEXEC); + if (fd < 0) { + if (instance->debug_flags & RADV_DEBUG_STARTUP) + radv_logi("Could not open device '%s'", path); - /* Don't submit any IBs. */ - device->instance->debug_flags |= RADV_DEBUG_NOOP; - return; + return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER); } - } - - fprintf(stderr, "radv: Unknown family: %s\n", family); - exit(1); -} -static VkResult -radv_physical_device_init(struct radv_physical_device *device, - struct radv_instance *instance, - drmDevicePtr drm_device) -{ - const char *path = drm_device->nodes[DRM_NODE_RENDER]; - VkResult result; - drmVersionPtr version; - int fd; - int master_fd = -1; + version = drmGetVersion(fd); + if (!version) { + close(fd); - fd = open(path, O_RDWR | O_CLOEXEC); - if (fd < 0) { - if (instance->debug_flags & RADV_DEBUG_STARTUP) - radv_logi("Could not open device '%s'", path); + if (instance->debug_flags & RADV_DEBUG_STARTUP) + radv_logi("Could not get the kernel driver version for device '%s'", path); - return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER); - } + return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER, + "failed to get version %s: %m", path); + } - version = drmGetVersion(fd); - if (!version) { - close(fd); + if (strcmp(version->name, "amdgpu")) { + drmFreeVersion(version); + close(fd); - if (instance->debug_flags & RADV_DEBUG_STARTUP) - radv_logi("Could not get the kernel driver version for device '%s'", path); + if (instance->debug_flags & RADV_DEBUG_STARTUP) + radv_logi("Device '%s' is not using the amdgpu kernel driver.", path); - return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER, - "failed to get version %s: %m", path); - } - - if (strcmp(version->name, "amdgpu")) { + return VK_ERROR_INCOMPATIBLE_DRIVER; + } drmFreeVersion(version); - close(fd); if (instance->debug_flags & RADV_DEBUG_STARTUP) - radv_logi("Device '%s' is not using the amdgpu kernel driver.", path); - - return VK_ERROR_INCOMPATIBLE_DRIVER; + radv_logi("Found compatible device '%s'.", path); } - drmFreeVersion(version); - if (instance->debug_flags & RADV_DEBUG_STARTUP) - radv_logi("Found compatible device '%s'.", path); + struct radv_physical_device *device = + vk_zalloc2(&instance->alloc, NULL, sizeof(*device), 8, + VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (!device) { + result = vk_error(instance, VK_ERROR_OUT_OF_HOST_MEMORY); + goto fail_fd; + } device->_loader_data.loaderMagic = ICD_LOADER_MAGIC; device->instance = instance; - device->ws = radv_amdgpu_winsys_create(fd, instance->debug_flags, - instance->perftest_flags); + if (drm_device) { + device->ws = radv_amdgpu_winsys_create(fd, instance->debug_flags, + instance->perftest_flags); + } else { + device->ws = radv_null_winsys_create(); + } + if (!device->ws) { - result = vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER); - goto fail; + result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED, + "failed to initialize winsys"); + goto fail_alloc; } - if (instance->enabled_extensions.KHR_display) { + if (drm_device && instance->enabled_extensions.KHR_display) { master_fd = open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC); if (master_fd >= 0) { uint32_t accel_working = 0; @@ -398,8 +330,6 @@ radv_physical_device_init(struct radv_physical_device *device, device->local_fd = fd; device->ws->query_info(device->ws, &device->rad_info); - radv_handle_env_var_force_family(device); - device->use_aco = instance->perftest_flags & RADV_PERFTEST_ACO; snprintf(device->name, sizeof(device->name), @@ -407,16 +337,13 @@ radv_physical_device_init(struct radv_physical_device *device, device->rad_info.name); if (radv_device_get_cache_uuid(device->rad_info.family, device->cache_uuid)) { - device->ws->destroy(device->ws); result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED, "cannot generate UUID"); - goto fail; + goto fail_wsi; } /* These flags affect shader compilation. */ - uint64_t shader_env_flags = - (device->instance->perftest_flags & RADV_PERFTEST_SISCHED ? 0x1 : 0) | - (device->use_aco ? 0x2 : 0); + uint64_t shader_env_flags = (device->use_aco ? 0x2 : 0); /* The gpu id is already embedded in the uuid so we just pass "radv" * when creating the cache. @@ -443,11 +370,9 @@ radv_physical_device_init(struct radv_physical_device *device, device->use_ngg = device->rad_info.chip_class >= GFX10 && device->rad_info.family != CHIP_NAVI14 && !(device->instance->debug_flags & RADV_DEBUG_NO_NGG); - if (device->use_aco && device->use_ngg) { - fprintf(stderr, "WARNING: disabling NGG because ACO is used.\n"); - device->use_ngg = false; - } + /* TODO: Implement NGG GS with ACO. */ + device->use_ngg_gs = device->use_ngg && !device->use_aco; device->use_ngg_streamout = false; /* Determine the number of threads per wave for all stages. */ @@ -470,7 +395,8 @@ radv_physical_device_init(struct radv_physical_device *device, radv_physical_device_init_mem_types(device); radv_fill_device_extension_table(device, &device->supported_extensions); - device->bus_info = *drm_device->businfo.pci; + if (drm_device) + device->bus_info = *drm_device->businfo.pci; if ((device->instance->debug_flags & RADV_DEBUG_INFO)) ac_print_gpu_info(&device->rad_info); @@ -481,14 +407,21 @@ radv_physical_device_init(struct radv_physical_device *device, */ result = radv_init_wsi(device); if (result != VK_SUCCESS) { - device->ws->destroy(device->ws); vk_error(instance, result); - goto fail; + goto fail_disk_cache; } + *device_out = device; + return VK_SUCCESS; -fail: +fail_disk_cache: + disk_cache_destroy(device->disk_cache); +fail_wsi: + device->ws->destroy(device->ws); +fail_alloc: + vk_free(&instance->alloc, device); +fail_fd: close(fd); if (master_fd != -1) close(master_fd); @@ -496,7 +429,7 @@ fail: } static void -radv_physical_device_finish(struct radv_physical_device *device) +radv_physical_device_destroy(struct radv_physical_device *device) { radv_finish_wsi(device); device->ws->destroy(device->ws); @@ -504,6 +437,7 @@ radv_physical_device_finish(struct radv_physical_device *device) close(device->local_fd); if (device->master_fd != -1) close(device->master_fd); + vk_free(&device->instance->alloc, device); } static void * @@ -547,7 +481,6 @@ static const struct debug_control radv_debug_options[] = { {"vmfaults", RADV_DEBUG_VM_FAULTS}, {"zerovram", RADV_DEBUG_ZERO_VRAM}, {"syncshaders", RADV_DEBUG_SYNC_SHADERS}, - {"nosisched", RADV_DEBUG_NO_SISCHED}, {"preoptir", RADV_DEBUG_PREOPTIR}, {"nodynamicbounds", RADV_DEBUG_NO_DYNAMIC_BOUNDS}, {"nooutoforder", RADV_DEBUG_NO_OUT_OF_ORDER}, @@ -563,7 +496,6 @@ static const struct debug_control radv_debug_options[] = { {"allentrypoints", RADV_DEBUG_ALL_ENTRYPOINTS}, {"metashaders", RADV_DEBUG_DUMP_META_SHADERS}, {"nomemorycache", RADV_DEBUG_NO_MEMORY_CACHE}, - {"noop", RADV_DEBUG_NOOP}, {NULL, 0} }; @@ -575,8 +507,6 @@ radv_get_debug_option_name(int id) } static const struct debug_control radv_perftest_options[] = { - {"nobatchchain", RADV_PERFTEST_NO_BATCHCHAIN}, - {"sisched", RADV_PERFTEST_SISCHED}, {"localbos", RADV_PERFTEST_LOCAL_BOS}, {"dccmsaa", RADV_PERFTEST_DCC_MSAA}, {"bolist", RADV_PERFTEST_BO_LIST}, @@ -606,15 +536,7 @@ radv_handle_per_app_options(struct radv_instance *instance, if (!name) return; - if (!strcmp(name, "Talos - Linux - 32bit") || - !strcmp(name, "Talos - Linux - 64bit")) { - if (!(instance->debug_flags & RADV_DEBUG_NO_SISCHED)) { - /* Force enable LLVM sisched for Talos because it looks - * safe and it gives few more FPS. - */ - instance->perftest_flags |= RADV_PERFTEST_SISCHED; - } - } else if (!strcmp(name, "DOOM_VFR")) { + if (!strcmp(name, "DOOM_VFR")) { /* Work around a Doom VFR game bug */ instance->debug_flags |= RADV_DEBUG_NO_DYNAMIC_BOUNDS; } else if (!strcmp(name, "MonsterHunterWorld.exe")) { @@ -641,18 +563,12 @@ radv_handle_per_app_options(struct radv_instance *instance, * uninitialized data in an indirect draw. */ instance->debug_flags |= RADV_DEBUG_ZERO_VRAM; + } else if (!strcmp(name, "No Man's Sky")) { + /* Work around a NMS game bug */ + instance->debug_flags |= RADV_DEBUG_DISCARD_TO_DEMOTE; } } -static int radv_get_instance_extension_index(const char *name) -{ - for (unsigned i = 0; i < RADV_INSTANCE_EXTENSION_COUNT; ++i) { - if (strcmp(name, radv_instance_extensions[i].extensionName) == 0) - return i; - } - return -1; -} - static const char radv_dri_options_xml[] = DRI_CONF_BEGIN DRI_CONF_SECTION_PERFORMANCE @@ -684,23 +600,6 @@ VkResult radv_CreateInstance( struct radv_instance *instance; VkResult result; - assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO); - - uint32_t client_version; - if (pCreateInfo->pApplicationInfo && - pCreateInfo->pApplicationInfo->apiVersion != 0) { - client_version = pCreateInfo->pApplicationInfo->apiVersion; - } else { - client_version = VK_API_VERSION_1_0; - } - - const char *engine_name = NULL; - uint32_t engine_version = 0; - if (pCreateInfo->pApplicationInfo) { - engine_name = pCreateInfo->pApplicationInfo->pEngineName; - engine_version = pCreateInfo->pApplicationInfo->engineVersion; - } - instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); if (!instance) @@ -713,8 +612,18 @@ VkResult radv_CreateInstance( else instance->alloc = default_alloc; - instance->apiVersion = client_version; - instance->physicalDeviceCount = -1; + if (pCreateInfo->pApplicationInfo) { + const VkApplicationInfo *app = pCreateInfo->pApplicationInfo; + + instance->engineName = + vk_strdup(&instance->alloc, app->pEngineName, + VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + instance->engineVersion = app->engineVersion; + instance->apiVersion = app->apiVersion; + } + + if (instance->apiVersion == 0) + instance->apiVersion = VK_API_VERSION_1_0; /* Get secure compile thread count. NOTE: We cap this at 32 */ #define MAX_SC_PROCS 32 @@ -739,27 +648,75 @@ VkResult radv_CreateInstance( radv_logi("Created an instance"); for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { - const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i]; - int index = radv_get_instance_extension_index(ext_name); + int idx; + for (idx = 0; idx < RADV_INSTANCE_EXTENSION_COUNT; idx++) { + if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], + radv_instance_extensions[idx].extensionName)) + break; + } - if (index < 0 || !radv_supported_instance_extensions.extensions[index]) { + if (idx >= RADV_INSTANCE_EXTENSION_COUNT || + !radv_supported_instance_extensions.extensions[idx]) { vk_free2(&default_alloc, pAllocator, instance); return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT); } - instance->enabled_extensions.extensions[index] = true; + instance->enabled_extensions.extensions[idx] = true; + } + + bool unchecked = instance->debug_flags & RADV_DEBUG_ALL_ENTRYPOINTS; + + for (unsigned i = 0; i < ARRAY_SIZE(instance->dispatch.entrypoints); i++) { + /* Vulkan requires that entrypoints for extensions which have + * not been enabled must not be advertised. + */ + if (!unchecked && + !radv_instance_entrypoint_is_enabled(i, instance->apiVersion, + &instance->enabled_extensions)) { + instance->dispatch.entrypoints[i] = NULL; + } else { + instance->dispatch.entrypoints[i] = + radv_instance_dispatch_table.entrypoints[i]; + } } + for (unsigned i = 0; i < ARRAY_SIZE(instance->physical_device_dispatch.entrypoints); i++) { + /* Vulkan requires that entrypoints for extensions which have + * not been enabled must not be advertised. + */ + if (!unchecked && + !radv_physical_device_entrypoint_is_enabled(i, instance->apiVersion, + &instance->enabled_extensions)) { + instance->physical_device_dispatch.entrypoints[i] = NULL; + } else { + instance->physical_device_dispatch.entrypoints[i] = + radv_physical_device_dispatch_table.entrypoints[i]; + } + } + + for (unsigned i = 0; i < ARRAY_SIZE(instance->device_dispatch.entrypoints); i++) { + /* Vulkan requires that entrypoints for extensions which have + * not been enabled must not be advertised. + */ + if (!unchecked && + !radv_device_entrypoint_is_enabled(i, instance->apiVersion, + &instance->enabled_extensions, NULL)) { + instance->device_dispatch.entrypoints[i] = NULL; + } else { + instance->device_dispatch.entrypoints[i] = + radv_device_dispatch_table.entrypoints[i]; + } + } + + instance->physical_devices_enumerated = false; + list_inithead(&instance->physical_devices); + result = vk_debug_report_instance_init(&instance->debug_report_callbacks); if (result != VK_SUCCESS) { vk_free2(&default_alloc, pAllocator, instance); return vk_error(instance, result); } - instance->engineName = vk_strdup(&instance->alloc, engine_name, - VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); - instance->engineVersion = engine_version; - glsl_type_singleton_init_or_ref(); VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false)); @@ -781,8 +738,9 @@ void radv_DestroyInstance( if (!instance) return; - for (int i = 0; i < instance->physicalDeviceCount; ++i) { - radv_physical_device_finish(instance->physicalDevices + i); + list_for_each_entry_safe(struct radv_physical_device, pdevice, + &instance->physical_devices, link) { + radv_physical_device_destroy(pdevice); } vk_free(&instance->alloc, instance->engineName); @@ -800,14 +758,32 @@ void radv_DestroyInstance( } static VkResult -radv_enumerate_devices(struct radv_instance *instance) +radv_enumerate_physical_devices(struct radv_instance *instance) { + if (instance->physical_devices_enumerated) + return VK_SUCCESS; + + instance->physical_devices_enumerated = true; + /* TODO: Check for more devices ? */ drmDevicePtr devices[8]; - VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER; + VkResult result = VK_SUCCESS; int max_devices; - instance->physicalDeviceCount = 0; + if (getenv("RADV_FORCE_FAMILY")) { + /* When RADV_FORCE_FAMILY is set, the driver creates a nul + * device that allows to test the compiler without having an + * AMDGPU instance. + */ + struct radv_physical_device *pdevice; + + result = radv_physical_device_try_create(instance, NULL, &pdevice); + if (result != VK_SUCCESS) + return result; + + list_addtail(&pdevice->link, &instance->physical_devices); + return VK_SUCCESS; + } max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices)); @@ -815,25 +791,32 @@ radv_enumerate_devices(struct radv_instance *instance) radv_logi("Found %d drm nodes", max_devices); if (max_devices < 1) - return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER); + return vk_error(instance, VK_SUCCESS); for (unsigned i = 0; i < (unsigned)max_devices; i++) { if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER && devices[i]->bustype == DRM_BUS_PCI && devices[i]->deviceinfo.pci->vendor_id == ATI_VENDOR_ID) { - result = radv_physical_device_init(instance->physicalDevices + - instance->physicalDeviceCount, - instance, - devices[i]); - if (result == VK_SUCCESS) - ++instance->physicalDeviceCount; - else if (result != VK_ERROR_INCOMPATIBLE_DRIVER) + struct radv_physical_device *pdevice; + result = radv_physical_device_try_create(instance, devices[i], + &pdevice); + /* Incompatible DRM device, skip. */ + if (result == VK_ERROR_INCOMPATIBLE_DRIVER) { + result = VK_SUCCESS; + continue; + } + + /* Error creating the physical device, report the error. */ + if (result != VK_SUCCESS) break; + + list_addtail(&pdevice->link, &instance->physical_devices); } } drmFreeDevices(devices, max_devices); + /* If we successfully enumerated any devices, call it success */ return result; } @@ -843,25 +826,20 @@ VkResult radv_EnumeratePhysicalDevices( VkPhysicalDevice* pPhysicalDevices) { RADV_FROM_HANDLE(radv_instance, instance, _instance); - VkResult result; + VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount); - if (instance->physicalDeviceCount < 0) { - result = radv_enumerate_devices(instance); - if (result != VK_SUCCESS && - result != VK_ERROR_INCOMPATIBLE_DRIVER) - return result; - } + VkResult result = radv_enumerate_physical_devices(instance); + if (result != VK_SUCCESS) + return result; - if (!pPhysicalDevices) { - *pPhysicalDeviceCount = instance->physicalDeviceCount; - } else { - *pPhysicalDeviceCount = MIN2(*pPhysicalDeviceCount, instance->physicalDeviceCount); - for (unsigned i = 0; i < *pPhysicalDeviceCount; ++i) - pPhysicalDevices[i] = radv_physical_device_to_handle(instance->physicalDevices + i); + list_for_each_entry(struct radv_physical_device, pdevice, + &instance->physical_devices, link) { + vk_outarray_append(&out, i) { + *i = radv_physical_device_to_handle(pdevice); + } } - return *pPhysicalDeviceCount < instance->physicalDeviceCount ? VK_INCOMPLETE - : VK_SUCCESS; + return vk_outarray_status(&out); } VkResult radv_EnumeratePhysicalDeviceGroups( @@ -870,27 +848,24 @@ VkResult radv_EnumeratePhysicalDeviceGroups( VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties) { RADV_FROM_HANDLE(radv_instance, instance, _instance); - VkResult result; + VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties, + pPhysicalDeviceGroupCount); - if (instance->physicalDeviceCount < 0) { - result = radv_enumerate_devices(instance); - if (result != VK_SUCCESS && - result != VK_ERROR_INCOMPATIBLE_DRIVER) - return result; - } + VkResult result = radv_enumerate_physical_devices(instance); + if (result != VK_SUCCESS) + return result; - if (!pPhysicalDeviceGroupProperties) { - *pPhysicalDeviceGroupCount = instance->physicalDeviceCount; - } else { - *pPhysicalDeviceGroupCount = MIN2(*pPhysicalDeviceGroupCount, instance->physicalDeviceCount); - for (unsigned i = 0; i < *pPhysicalDeviceGroupCount; ++i) { - pPhysicalDeviceGroupProperties[i].physicalDeviceCount = 1; - pPhysicalDeviceGroupProperties[i].physicalDevices[0] = radv_physical_device_to_handle(instance->physicalDevices + i); - pPhysicalDeviceGroupProperties[i].subsetAllocation = false; + list_for_each_entry(struct radv_physical_device, pdevice, + &instance->physical_devices, link) { + vk_outarray_append(&out, p) { + p->physicalDeviceCount = 1; + memset(p->physicalDevices, 0, sizeof(p->physicalDevices)); + p->physicalDevices[0] = radv_physical_device_to_handle(pdevice); + p->subsetAllocation = false; } } - return *pPhysicalDeviceGroupCount < instance->physicalDeviceCount ? VK_INCOMPLETE - : VK_SUCCESS; + + return vk_outarray_status(&out); } void radv_GetPhysicalDeviceFeatures( @@ -931,7 +906,7 @@ void radv_GetPhysicalDeviceFeatures( .shaderTessellationAndGeometryPointSize = true, .shaderImageGatherExtended = true, .shaderStorageImageExtendedFormats = true, - .shaderStorageImageMultisample = pdevice->rad_info.chip_class >= GFX8, + .shaderStorageImageMultisample = true, .shaderUniformBufferArrayDynamicIndexing = true, .shaderSampledImageArrayDynamicIndexing = true, .shaderStorageBufferArrayDynamicIndexing = true, @@ -942,7 +917,7 @@ void radv_GetPhysicalDeviceFeatures( .shaderCullDistance = true, .shaderFloat64 = true, .shaderInt64 = true, - .shaderInt16 = pdevice->rad_info.chip_class >= GFX9 && !pdevice->use_aco, + .shaderInt16 = !pdevice->use_aco || pdevice->rad_info.chip_class >= GFX8, .sparseBinding = true, .variableMultisampleRate = true, .inheritedQueries = true, @@ -984,11 +959,11 @@ void radv_GetPhysicalDeviceFeatures2( case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: { VkPhysicalDevice16BitStorageFeatures *features = (VkPhysicalDevice16BitStorageFeatures*)ext; - bool enabled = pdevice->rad_info.chip_class >= GFX8 && !pdevice->use_aco; - features->storageBuffer16BitAccess = enabled; - features->uniformAndStorageBuffer16BitAccess = enabled; - features->storagePushConstant16 = enabled; - features->storageInputOutput16 = enabled && LLVM_VERSION_MAJOR >= 9; + bool enable = !pdevice->use_aco || pdevice->rad_info.chip_class >= GFX8; + features->storageBuffer16BitAccess = enable; + features->uniformAndStorageBuffer16BitAccess = enable; + features->storagePushConstant16 = enable; + features->storageInputOutput16 = pdevice->rad_info.has_double_rate_fp16 && !pdevice->use_aco && LLVM_VERSION_MAJOR >= 9; break; } case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: { @@ -1086,17 +1061,17 @@ void radv_GetPhysicalDeviceFeatures2( case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES: { VkPhysicalDevice8BitStorageFeatures *features = (VkPhysicalDevice8BitStorageFeatures *)ext; - bool enabled = pdevice->rad_info.chip_class >= GFX8 && !pdevice->use_aco; - features->storageBuffer8BitAccess = enabled; - features->uniformAndStorageBuffer8BitAccess = enabled; - features->storagePushConstant8 = enabled; + bool enable = !pdevice->use_aco || pdevice->rad_info.chip_class >= GFX8; + features->storageBuffer8BitAccess = enable; + features->uniformAndStorageBuffer8BitAccess = enable; + features->storagePushConstant8 = enable; break; } case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES: { VkPhysicalDeviceShaderFloat16Int8Features *features = (VkPhysicalDeviceShaderFloat16Int8Features*)ext; - features->shaderFloat16 = pdevice->rad_info.chip_class >= GFX8 && !pdevice->use_aco; - features->shaderInt8 = !pdevice->use_aco; + features->shaderFloat16 = pdevice->rad_info.has_double_rate_fp16 && !pdevice->use_aco; + features->shaderInt8 = !pdevice->use_aco || pdevice->rad_info.chip_class >= GFX8; break; } case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES: { @@ -1192,7 +1167,7 @@ void radv_GetPhysicalDeviceFeatures2( case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES: { VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures *features = (VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures *)ext; - features->shaderSubgroupExtendedTypes = true; + features->shaderSubgroupExtendedTypes = !pdevice->use_aco; break; } case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES_KHR: { @@ -1204,10 +1179,11 @@ void radv_GetPhysicalDeviceFeatures2( case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES: { VkPhysicalDeviceVulkan11Features *features = (VkPhysicalDeviceVulkan11Features *)ext; - features->storageBuffer16BitAccess = pdevice->rad_info.chip_class >= GFX8 && !pdevice->use_aco; - features->uniformAndStorageBuffer16BitAccess = pdevice->rad_info.chip_class >= GFX8 && !pdevice->use_aco; - features->storagePushConstant16 = pdevice->rad_info.chip_class >= GFX8 && !pdevice->use_aco; - features->storageInputOutput16 = pdevice->rad_info.chip_class >= GFX8 && !pdevice->use_aco && LLVM_VERSION_MAJOR >= 9; + bool storage16_enable = !pdevice->use_aco || pdevice->rad_info.chip_class >= GFX8; + features->storageBuffer16BitAccess = storage16_enable; + features->uniformAndStorageBuffer16BitAccess = storage16_enable; + features->storagePushConstant16 = storage16_enable; + features->storageInputOutput16 = pdevice->rad_info.has_double_rate_fp16 && !pdevice->use_aco && LLVM_VERSION_MAJOR >= 9; features->multiview = true; features->multiviewGeometryShader = true; features->multiviewTessellationShader = true; @@ -1221,15 +1197,16 @@ void radv_GetPhysicalDeviceFeatures2( case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES: { VkPhysicalDeviceVulkan12Features *features = (VkPhysicalDeviceVulkan12Features *)ext; + bool int8_enable = !pdevice->use_aco || pdevice->rad_info.chip_class >= GFX8; features->samplerMirrorClampToEdge = true; features->drawIndirectCount = true; - features->storageBuffer8BitAccess = pdevice->rad_info.chip_class >= GFX8 && !pdevice->use_aco; - features->uniformAndStorageBuffer8BitAccess = pdevice->rad_info.chip_class >= GFX8 && !pdevice->use_aco; - features->storagePushConstant8 = pdevice->rad_info.chip_class >= GFX8 && !pdevice->use_aco; + features->storageBuffer8BitAccess = int8_enable; + features->uniformAndStorageBuffer8BitAccess = int8_enable; + features->storagePushConstant8 = int8_enable; features->shaderBufferInt64Atomics = LLVM_VERSION_MAJOR >= 9; features->shaderSharedInt64Atomics = LLVM_VERSION_MAJOR >= 9; - features->shaderFloat16 = pdevice->rad_info.chip_class >= GFX8 && !pdevice->use_aco; - features->shaderInt8 = !pdevice->use_aco; + features->shaderFloat16 = pdevice->rad_info.has_double_rate_fp16 && !pdevice->use_aco; + features->shaderInt8 = int8_enable; features->descriptorIndexing = true; features->shaderInputAttachmentArrayDynamicIndexing = true; features->shaderUniformTexelBufferArrayDynamicIndexing = true; @@ -1251,11 +1228,11 @@ void radv_GetPhysicalDeviceFeatures2( features->descriptorBindingPartiallyBound = true; features->descriptorBindingVariableDescriptorCount = true; features->runtimeDescriptorArray = true; - features->samplerFilterMinmax = pdevice->rad_info.chip_class >= GFX7; + features->samplerFilterMinmax = true; features->scalarBlockLayout = pdevice->rad_info.chip_class >= GFX7; features->imagelessFramebuffer = true; features->uniformBufferStandardLayout = true; - features->shaderSubgroupExtendedTypes = true; + features->shaderSubgroupExtendedTypes = !pdevice->use_aco; features->separateDepthStencilLayouts = true; features->hostQueryReset = true; features->timelineSemaphore = pdevice->rad_info.has_syncobj_wait_for_submit; @@ -1281,6 +1258,20 @@ void radv_GetPhysicalDeviceFeatures2( features->stippledSmoothLines = false; break; } + case VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD: { + VkDeviceMemoryOverallocationCreateInfoAMD *features = + (VkDeviceMemoryOverallocationCreateInfoAMD *)ext; + features->overallocationBehavior = true; + break; + } + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT: { + VkPhysicalDeviceRobustness2FeaturesEXT *features = + (VkPhysicalDeviceRobustness2FeaturesEXT *)ext; + features->robustBufferAccess2 = true; + features->robustImageAccess2 = true; + features->nullDescriptor = true; + break; + } default: break; } @@ -1321,14 +1312,14 @@ void radv_GetPhysicalDeviceProperties( .maxImageDimension3D = (1 << 11), .maxImageDimensionCube = (1 << 14), .maxImageArrayLayers = (1 << 11), - .maxTexelBufferElements = 128 * 1024 * 1024, + .maxTexelBufferElements = UINT32_MAX, .maxUniformBufferRange = UINT32_MAX, .maxStorageBufferRange = UINT32_MAX, .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE, .maxMemoryAllocationCount = UINT32_MAX, .maxSamplerAllocationCount = 64 * 1024, .bufferImageGranularity = 64, /* A cache line */ - .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */ + .sparseAddressSpaceSize = RADV_MAX_MEMORY_ALLOCATION_SIZE, /* buffer max size */ .maxBoundDescriptorSets = MAX_SETS, .maxPerStageDescriptorSamplers = max_descriptor_set_size, .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size, @@ -1409,7 +1400,7 @@ void radv_GetPhysicalDeviceProperties( .sampledImageIntegerSampleCounts = sample_counts, .sampledImageDepthSampleCounts = sample_counts, .sampledImageStencilSampleCounts = sample_counts, - .storageImageSampleCounts = pdevice->rad_info.chip_class >= GFX8 ? sample_counts : VK_SAMPLE_COUNT_1_BIT, + .storageImageSampleCounts = sample_counts, .maxSampleMaskWords = 1, .timestampComputeAndGraphics = true, .timestampPeriod = 1000000.0 / pdevice->rad_info.clock_crystal_freq, @@ -1456,7 +1447,8 @@ radv_get_physical_device_properties_1_1(struct radv_physical_device *pdevice, p->deviceNodeMask = 0; p->subgroupSize = RADV_SUBGROUP_SIZE; - p->subgroupSupportedStages = VK_SHADER_STAGE_ALL; + p->subgroupSupportedStages = VK_SHADER_STAGE_ALL_GRAPHICS | + VK_SHADER_STAGE_COMPUTE_BIT; p->subgroupSupportedOperations = VK_SUBGROUP_FEATURE_BASIC_BIT | VK_SUBGROUP_FEATURE_VOTE_BIT | VK_SUBGROUP_FEATURE_ARITHMETIC_BIT | @@ -1464,8 +1456,9 @@ radv_get_physical_device_properties_1_1(struct radv_physical_device *pdevice, VK_SUBGROUP_FEATURE_CLUSTERED_BIT | VK_SUBGROUP_FEATURE_QUAD_BIT; - if (pdevice->rad_info.chip_class == GFX8 || - pdevice->rad_info.chip_class == GFX9) { + if (((pdevice->rad_info.chip_class == GFX6 || + pdevice->rad_info.chip_class == GFX7) && !pdevice->use_aco) || + pdevice->rad_info.chip_class >= GFX8) { p->subgroupSupportedOperations |= VK_SUBGROUP_FEATURE_SHUFFLE_BIT | VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT; } @@ -1500,8 +1493,13 @@ radv_get_physical_device_properties_1_2(struct radv_physical_device *pdevice, /* On AMD hardware, denormals and rounding modes for fp16/fp64 are * controlled by the same config register. */ - p->denormBehaviorIndependence = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY_KHR; - p->roundingModeIndependence = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY_KHR; + if (pdevice->rad_info.has_double_rate_fp16) { + p->denormBehaviorIndependence = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY_KHR; + p->roundingModeIndependence = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY_KHR; + } else { + p->denormBehaviorIndependence = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL_KHR; + p->roundingModeIndependence = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL_KHR; + } /* Do not allow both preserving and flushing denorms because different * shaders in the same pipeline can have different settings and this @@ -1518,10 +1516,10 @@ radv_get_physical_device_properties_1_2(struct radv_physical_device *pdevice, p->shaderSignedZeroInfNanPreserveFloat32 = true; p->shaderDenormFlushToZeroFloat16 = false; - p->shaderDenormPreserveFloat16 = pdevice->rad_info.chip_class >= GFX8; - p->shaderRoundingModeRTEFloat16 = pdevice->rad_info.chip_class >= GFX8; + p->shaderDenormPreserveFloat16 = pdevice->rad_info.has_double_rate_fp16; + p->shaderRoundingModeRTEFloat16 = pdevice->rad_info.has_double_rate_fp16; p->shaderRoundingModeRTZFloat16 = false; - p->shaderSignedZeroInfNanPreserveFloat16 = pdevice->rad_info.chip_class >= GFX8; + p->shaderSignedZeroInfNanPreserveFloat16 = pdevice->rad_info.has_double_rate_fp16; p->shaderDenormFlushToZeroFloat64 = false; p->shaderDenormPreserveFloat64 = pdevice->rad_info.chip_class >= GFX8; @@ -1685,32 +1683,31 @@ void radv_GetPhysicalDeviceProperties2( pdevice->rad_info.max_sh_per_se; properties->computeUnitsPerShaderArray = pdevice->rad_info.num_good_cu_per_sh; - properties->simdPerComputeUnit = 4; + properties->simdPerComputeUnit = + pdevice->rad_info.num_simd_per_compute_unit; properties->wavefrontsPerSimd = - pdevice->rad_info.family == CHIP_TONGA || - pdevice->rad_info.family == CHIP_ICELAND || - pdevice->rad_info.family == CHIP_POLARIS10 || - pdevice->rad_info.family == CHIP_POLARIS11 || - pdevice->rad_info.family == CHIP_POLARIS12 || - pdevice->rad_info.family == CHIP_VEGAM ? 8 : 10; + pdevice->rad_info.max_wave64_per_simd; properties->wavefrontSize = 64; /* SGPR. */ properties->sgprsPerSimd = pdevice->rad_info.num_physical_sgprs_per_simd; properties->minSgprAllocation = - pdevice->rad_info.chip_class >= GFX8 ? 16 : 8; + pdevice->rad_info.min_sgpr_alloc; properties->maxSgprAllocation = - pdevice->rad_info.family == CHIP_TONGA || - pdevice->rad_info.family == CHIP_ICELAND ? 96 : 104; + pdevice->rad_info.max_sgpr_alloc; properties->sgprAllocationGranularity = - pdevice->rad_info.chip_class >= GFX8 ? 16 : 8; + pdevice->rad_info.sgpr_alloc_granularity; /* VGPR. */ - properties->vgprsPerSimd = RADV_NUM_PHYSICAL_VGPRS; - properties->minVgprAllocation = 4; - properties->maxVgprAllocation = 256; - properties->vgprAllocationGranularity = 4; + properties->vgprsPerSimd = + pdevice->rad_info.num_physical_wave64_vgprs_per_simd; + properties->minVgprAllocation = + pdevice->rad_info.min_wave64_vgpr_alloc; + properties->maxVgprAllocation = + pdevice->rad_info.max_vgpr_alloc; + properties->vgprAllocationGranularity = + pdevice->rad_info.wave64_vgpr_alloc_granularity; break; } case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD: { @@ -1906,6 +1903,13 @@ void radv_GetPhysicalDeviceProperties2( props->lineSubPixelPrecisionBits = 4; break; } + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT: { + VkPhysicalDeviceRobustness2PropertiesEXT *properties = + (VkPhysicalDeviceRobustness2PropertiesEXT *)ext; + properties->robustStorageBufferAccessSizeAlignment = 4; + properties->robustUniformBufferAccessSizeAlignment = 4; + break; + } default: break; } @@ -2032,7 +2036,7 @@ radv_get_memory_budget_properties(VkPhysicalDevice physicalDevice, for (int i = 0; i < device->memory_properties.memoryTypeCount; i++) { uint32_t heap_index = device->memory_properties.memoryTypes[i].heapIndex; - if (radv_is_mem_type_vram(device->mem_type_indices[i])) { + if ((device->memory_domains[i] & RADEON_DOMAIN_VRAM) && (device->memory_flags[i] & RADEON_FLAG_NO_CPU_ACCESS)) { heap_usage = device->ws->query_value(device->ws, RADEON_ALLOCATED_VRAM); @@ -2042,7 +2046,7 @@ radv_get_memory_budget_properties(VkPhysicalDevice physicalDevice, memoryBudget->heapBudget[heap_index] = heap_budget; memoryBudget->heapUsage[heap_index] = heap_usage; - } else if (radv_is_mem_type_vram_visible(device->mem_type_indices[i])) { + } else if (device->memory_domains[i] & RADEON_DOMAIN_VRAM) { heap_usage = device->ws->query_value(device->ws, RADEON_ALLOCATED_VRAM_VIS); @@ -2052,7 +2056,9 @@ radv_get_memory_budget_properties(VkPhysicalDevice physicalDevice, memoryBudget->heapBudget[heap_index] = heap_budget; memoryBudget->heapUsage[heap_index] = heap_usage; - } else if (radv_is_mem_type_gtt_wc(device->mem_type_indices[i])) { + } else { + assert(device->memory_domains[i] & RADEON_DOMAIN_GTT); + heap_usage = device->ws->query_value(device->ws, RADEON_ALLOCATED_GTT); @@ -2103,7 +2109,8 @@ VkResult radv_GetMemoryHostPointerPropertiesEXT( const struct radv_physical_device *physical_device = device->physical_device; uint32_t memoryTypeBits = 0; for (int i = 0; i < physical_device->memory_properties.memoryTypeCount; i++) { - if (radv_is_mem_type_gtt_cached(physical_device->mem_type_indices[i])) { + if (physical_device->memory_domains[i] == RADEON_DOMAIN_GTT && + !(physical_device->memory_flags[i] & RADEON_FLAG_GTT_WC)) { memoryTypeBits = (1 << i); break; } @@ -2208,8 +2215,8 @@ radv_bo_list_finish(struct radv_bo_list *bo_list) pthread_mutex_destroy(&bo_list->mutex); } -static VkResult radv_bo_list_add(struct radv_device *device, - struct radeon_winsys_bo *bo) +VkResult radv_bo_list_add(struct radv_device *device, + struct radeon_winsys_bo *bo) { struct radv_bo_list *bo_list = &device->bo_list; @@ -2238,8 +2245,8 @@ static VkResult radv_bo_list_add(struct radv_device *device, return VK_SUCCESS; } -static void radv_bo_list_remove(struct radv_device *device, - struct radeon_winsys_bo *bo) +void radv_bo_list_remove(struct radv_device *device, + struct radeon_winsys_bo *bo) { struct radv_bo_list *bo_list = &device->bo_list; @@ -2250,7 +2257,9 @@ static void radv_bo_list_remove(struct radv_device *device, return; pthread_mutex_lock(&bo_list->mutex); - for(unsigned i = 0; i < bo_list->list.count; ++i) { + /* Loop the list backwards so we find the most recently added + * memory first. */ + for(unsigned i = bo_list->list.count; i-- > 0;) { if (bo_list->list.bos[i] == bo) { bo_list->list.bos[i] = bo_list->list.bos[bo_list->list.count - 1]; --bo_list->list.count; @@ -2825,6 +2834,39 @@ static VkResult fork_secure_compile_idle_device(struct radv_device *device) return VK_SUCCESS; } +static void +radv_device_init_dispatch(struct radv_device *device) +{ + const struct radv_instance *instance = device->physical_device->instance; + const struct radv_device_dispatch_table *dispatch_table_layer = NULL; + bool unchecked = instance->debug_flags & RADV_DEBUG_ALL_ENTRYPOINTS; + int radv_thread_trace = radv_get_int_debug_option("RADV_THREAD_TRACE", -1); + + if (radv_thread_trace >= 0) { + /* Use device entrypoints from the SQTT layer if enabled. */ + dispatch_table_layer = &sqtt_device_dispatch_table; + } + + for (unsigned i = 0; i < ARRAY_SIZE(device->dispatch.entrypoints); i++) { + /* Vulkan requires that entrypoints for extensions which have not been + * enabled must not be advertised. + */ + if (!unchecked && + !radv_device_entrypoint_is_enabled(i, instance->apiVersion, + &instance->enabled_extensions, + &device->enabled_extensions)) { + device->dispatch.entrypoints[i] = NULL; + } else if (dispatch_table_layer && + dispatch_table_layer->entrypoints[i]) { + device->dispatch.entrypoints[i] = + dispatch_table_layer->entrypoints[i]; + } else { + device->dispatch.entrypoints[i] = + radv_device_dispatch_table.entrypoints[i]; + } + } +} + static VkResult radv_create_pthread_cond(pthread_cond_t *cond) { @@ -2845,6 +2887,24 @@ radv_create_pthread_cond(pthread_cond_t *cond) return VK_SUCCESS; } +static VkResult +check_physical_device_features(VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceFeatures *features) +{ + RADV_FROM_HANDLE(radv_physical_device, physical_device, physicalDevice); + VkPhysicalDeviceFeatures supported_features; + radv_GetPhysicalDeviceFeatures(physicalDevice, &supported_features); + VkBool32 *supported_feature = (VkBool32 *)&supported_features; + VkBool32 *enabled_feature = (VkBool32 *)features; + unsigned num_features = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32); + for (uint32_t i = 0; i < num_features; i++) { + if (enabled_feature[i] && !supported_feature[i]) + return vk_error(physical_device->instance, VK_ERROR_FEATURE_NOT_PRESENT); + } + + return VK_SUCCESS; +} + VkResult radv_CreateDevice( VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, @@ -2856,17 +2916,41 @@ VkResult radv_CreateDevice( struct radv_device *device; bool keep_shader_info = false; + bool robust_buffer_access = false; + bool overallocation_disallowed = false; /* Check enabled features */ if (pCreateInfo->pEnabledFeatures) { - VkPhysicalDeviceFeatures supported_features; - radv_GetPhysicalDeviceFeatures(physicalDevice, &supported_features); - VkBool32 *supported_feature = (VkBool32 *)&supported_features; - VkBool32 *enabled_feature = (VkBool32 *)pCreateInfo->pEnabledFeatures; - unsigned num_features = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32); - for (uint32_t i = 0; i < num_features; i++) { - if (enabled_feature[i] && !supported_feature[i]) - return vk_error(physical_device->instance, VK_ERROR_FEATURE_NOT_PRESENT); + result = check_physical_device_features(physicalDevice, + pCreateInfo->pEnabledFeatures); + if (result != VK_SUCCESS) + return result; + + if (pCreateInfo->pEnabledFeatures->robustBufferAccess) + robust_buffer_access = true; + } + + vk_foreach_struct_const(ext, pCreateInfo->pNext) { + switch (ext->sType) { + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2: { + const VkPhysicalDeviceFeatures2 *features = (const void *)ext; + result = check_physical_device_features(physicalDevice, + &features->features); + if (result != VK_SUCCESS) + return result; + + if (features->features.robustBufferAccess) + robust_buffer_access = true; + break; + } + case VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD: { + const VkDeviceMemoryOverallocationCreateInfoAMD *overallocation = (const void *)ext; + if (overallocation->overallocationBehavior == VK_MEMORY_OVERALLOCATION_BEHAVIOR_DISALLOWED_AMD) + overallocation_disallowed = true; + break; + } + default: + break; } } @@ -2897,6 +2981,8 @@ VkResult radv_CreateDevice( device->enabled_extensions.extensions[index] = true; } + radv_device_init_dispatch(device); + keep_shader_info = device->enabled_extensions.AMD_shader_info; /* With update after bind we can't attach bo's to the command buffer @@ -2908,12 +2994,14 @@ VkResult radv_CreateDevice( device->enabled_extensions.EXT_buffer_device_address || device->enabled_extensions.KHR_buffer_device_address; - device->robust_buffer_access = pCreateInfo->pEnabledFeatures && - pCreateInfo->pEnabledFeatures->robustBufferAccess; + device->robust_buffer_access = robust_buffer_access; mtx_init(&device->shader_slab_mutex, mtx_plain); list_inithead(&device->shader_slabs); + device->overallocation_disallowed = overallocation_disallowed; + mtx_init(&device->overallocation_mutex, mtx_plain); + radv_bo_list_init(&device->bo_list); for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) { @@ -2999,6 +3087,28 @@ VkResult radv_CreateDevice( radv_dump_enabled_options(device, stderr); } + int radv_thread_trace = radv_get_int_debug_option("RADV_THREAD_TRACE", -1); + if (radv_thread_trace >= 0) { + fprintf(stderr, "*************************************************\n"); + fprintf(stderr, "* WARNING: Thread trace support is experimental *\n"); + fprintf(stderr, "*************************************************\n"); + + if (device->physical_device->rad_info.chip_class < GFX8) { + fprintf(stderr, "GPU hardware not supported: refer to " + "the RGP documentation for the list of " + "supported GPUs!\n"); + abort(); + } + + /* Default buffer size set to 1MB per SE. */ + device->thread_trace_buffer_size = + radv_get_int_debug_option("RADV_THREAD_TRACE_BUFFER_SIZE", 1024 * 1024); + device->thread_trace_start_frame = radv_thread_trace; + + if (!radv_thread_trace_init(device)) + goto fail; + } + /* Temporarily disable secure compile while we create meta shaders, etc */ uint8_t sc_threads = device->instance->num_sc_threads; if (sc_threads) @@ -3074,6 +3184,8 @@ fail_meta: fail: radv_bo_list_finish(&device->bo_list); + radv_thread_trace_finish(device); + if (device->trace_bo) device->ws->buffer_destroy(device->trace_bo); @@ -3123,6 +3235,9 @@ void radv_DestroyDevice( pthread_cond_destroy(&device->timeline_cond); radv_bo_list_finish(&device->bo_list); + + radv_thread_trace_finish(device); + if (radv_device_use_secure_compile(device->instance)) { for (unsigned i = 0; i < device->instance->num_sc_threads; i++ ) { destroy_secure_compile_device(device, i); @@ -3617,8 +3732,7 @@ radv_init_graphics_state(struct radeon_cmdbuf *cs, struct radv_queue *queue) radv_cs_add_buffer(device->ws, cs, device->gfx_init); } else { - struct radv_physical_device *physical_device = device->physical_device; - si_emit_graphics(physical_device, cs); + si_emit_graphics(device, cs); } } @@ -3912,6 +4026,9 @@ radv_get_preamble_cs(struct radv_queue *queue, if (gds_oa_bo) radv_cs_add_buffer(queue->device->ws, cs, gds_oa_bo); + if (queue->device->trace_bo) + radv_cs_add_buffer(queue->device->ws, cs, queue->device->trace_bo); + if (i == 0) { si_cs_emit_cache_flush(cs, queue->device->physical_device->rad_info.chip_class, @@ -4679,6 +4796,25 @@ static VkResult radv_queue_submit(struct radv_queue *queue, return radv_process_submissions(&processing_list); } +bool +radv_queue_internal_submit(struct radv_queue *queue, struct radeon_cmdbuf *cs) +{ + struct radeon_winsys_ctx *ctx = queue->hw_ctx; + struct radv_winsys_sem_info sem_info; + VkResult result; + int ret; + + result = radv_alloc_sem_info(queue->device, &sem_info, 0, NULL, 0, 0, + 0, NULL, VK_NULL_HANDLE); + if (result != VK_SUCCESS) + return false; + + ret = queue->device->ws->cs_submit(ctx, queue->queue_idx, &cs, 1, NULL, + NULL, &sem_info, NULL, false, NULL); + radv_free_sem_info(&sem_info); + return !ret; +} + /* Signals fence as soon as all the work currently put on queue is done. */ static VkResult radv_signal_fence(struct radv_queue *queue, VkFence fence) @@ -4828,16 +4964,46 @@ PFN_vkVoidFunction radv_GetInstanceProcAddr( const char* pName) { RADV_FROM_HANDLE(radv_instance, instance, _instance); - bool unchecked = instance ? instance->debug_flags & RADV_DEBUG_ALL_ENTRYPOINTS : false; - if (unchecked) { - return radv_lookup_entrypoint_unchecked(pName); - } else { - return radv_lookup_entrypoint_checked(pName, - instance ? instance->apiVersion : 0, - instance ? &instance->enabled_extensions : NULL, - NULL); - } + /* The Vulkan 1.0 spec for vkGetInstanceProcAddr has a table of exactly + * when we have to return valid function pointers, NULL, or it's left + * undefined. See the table for exact details. + */ + if (pName == NULL) + return NULL; + +#define LOOKUP_RADV_ENTRYPOINT(entrypoint) \ + if (strcmp(pName, "vk" #entrypoint) == 0) \ + return (PFN_vkVoidFunction)radv_##entrypoint + + LOOKUP_RADV_ENTRYPOINT(EnumerateInstanceExtensionProperties); + LOOKUP_RADV_ENTRYPOINT(EnumerateInstanceLayerProperties); + LOOKUP_RADV_ENTRYPOINT(EnumerateInstanceVersion); + LOOKUP_RADV_ENTRYPOINT(CreateInstance); + + /* GetInstanceProcAddr() can also be called with a NULL instance. + * See https://gitlab.khronos.org/vulkan/vulkan/issues/2057 + */ + LOOKUP_RADV_ENTRYPOINT(GetInstanceProcAddr); + +#undef LOOKUP_RADV_ENTRYPOINT + + if (instance == NULL) + return NULL; + + int idx = radv_get_instance_entrypoint_index(pName); + if (idx >= 0) + return instance->dispatch.entrypoints[idx]; + + idx = radv_get_physical_device_entrypoint_index(pName); + if (idx >= 0) + return instance->physical_device_dispatch.entrypoints[idx]; + + idx = radv_get_device_entrypoint_index(pName); + if (idx >= 0) + return instance->device_dispatch.entrypoints[idx]; + + return NULL; } /* The loader wants us to expose a second GetInstanceProcAddr function @@ -4868,9 +5034,14 @@ VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetPhysicalDeviceProcAddr( { RADV_FROM_HANDLE(radv_instance, instance, _instance); - return radv_lookup_physical_device_entrypoint_checked(pName, - instance ? instance->apiVersion : 0, - instance ? &instance->enabled_extensions : NULL); + if (!pName || !instance) + return NULL; + + int idx = radv_get_physical_device_entrypoint_index(pName); + if (idx < 0) + return NULL; + + return instance->physical_device_dispatch.entrypoints[idx]; } PFN_vkVoidFunction radv_GetDeviceProcAddr( @@ -4878,16 +5049,15 @@ PFN_vkVoidFunction radv_GetDeviceProcAddr( const char* pName) { RADV_FROM_HANDLE(radv_device, device, _device); - bool unchecked = device ? device->instance->debug_flags & RADV_DEBUG_ALL_ENTRYPOINTS : false; - if (unchecked) { - return radv_lookup_entrypoint_unchecked(pName); - } else { - return radv_lookup_entrypoint_checked(pName, - device->instance->apiVersion, - &device->instance->enabled_extensions, - &device->enabled_extensions); - } + if (!device || !pName) + return NULL; + + int idx = radv_get_device_entrypoint_index(pName); + if (idx < 0) + return NULL; + + return device->dispatch.entrypoints[idx]; } bool radv_get_memory_fd(struct radv_device *device, @@ -4920,6 +5090,12 @@ static void radv_free_memory(struct radv_device *device, #endif if (mem->bo) { + if (device->overallocation_disallowed) { + mtx_lock(&device->overallocation_mutex); + device->allocated_memory_size[mem->heap_index] -= mem->alloc_size; + mtx_unlock(&device->overallocation_mutex); + } + radv_bo_list_remove(device, mem->bo); device->ws->buffer_destroy(mem->bo); mem->bo = NULL; @@ -4937,7 +5113,6 @@ static VkResult radv_alloc_memory(struct radv_device *device, VkResult result; enum radeon_bo_domain domain; uint32_t flags = 0; - enum radv_mem_type mem_type_index = device->physical_device->mem_type_indices[pAllocateInfo->memoryTypeIndex]; assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO); @@ -5019,7 +5194,6 @@ static VkResult radv_alloc_memory(struct radv_device *device, } } else if (host_ptr_info) { assert(host_ptr_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT); - assert(radv_is_mem_type_gtt_cached(mem_type_index)); mem->bo = device->ws->buffer_from_ptr(device->ws, host_ptr_info->pHostPointer, pAllocateInfo->allocationSize, priority); @@ -5031,19 +5205,11 @@ static VkResult radv_alloc_memory(struct radv_device *device, } } else { uint64_t alloc_size = align_u64(pAllocateInfo->allocationSize, 4096); - if (radv_is_mem_type_gtt_wc(mem_type_index) || - radv_is_mem_type_gtt_cached(mem_type_index)) - domain = RADEON_DOMAIN_GTT; - else - domain = RADEON_DOMAIN_VRAM; + uint32_t heap_index; - if (radv_is_mem_type_vram(mem_type_index)) - flags |= RADEON_FLAG_NO_CPU_ACCESS; - else - flags |= RADEON_FLAG_CPU_ACCESS; - - if (radv_is_mem_type_gtt_wc(mem_type_index)) - flags |= RADEON_FLAG_GTT_WC; + heap_index = device->physical_device->memory_properties.memoryTypes[pAllocateInfo->memoryTypeIndex].heapIndex; + domain = device->physical_device->memory_domains[pAllocateInfo->memoryTypeIndex]; + flags |= device->physical_device->memory_flags[pAllocateInfo->memoryTypeIndex]; if (!dedicate_info && !import_info && (!export_info || !export_info->handleTypes)) { flags |= RADEON_FLAG_NO_INTERPROCESS_SHARING; @@ -5052,24 +5218,42 @@ static VkResult radv_alloc_memory(struct radv_device *device, } } - if (radv_is_mem_type_uncached(mem_type_index)) { - assert(device->physical_device->rad_info.has_l2_uncached); - flags |= RADEON_FLAG_VA_UNCACHED; + if (device->overallocation_disallowed) { + uint64_t total_size = + device->physical_device->memory_properties.memoryHeaps[heap_index].size; + + mtx_lock(&device->overallocation_mutex); + if (device->allocated_memory_size[heap_index] + alloc_size > total_size) { + mtx_unlock(&device->overallocation_mutex); + result = VK_ERROR_OUT_OF_DEVICE_MEMORY; + goto fail; + } + device->allocated_memory_size[heap_index] += alloc_size; + mtx_unlock(&device->overallocation_mutex); } mem->bo = device->ws->buffer_create(device->ws, alloc_size, device->physical_device->rad_info.max_alignment, domain, flags, priority); if (!mem->bo) { + if (device->overallocation_disallowed) { + mtx_lock(&device->overallocation_mutex); + device->allocated_memory_size[heap_index] -= alloc_size; + mtx_unlock(&device->overallocation_mutex); + } result = VK_ERROR_OUT_OF_DEVICE_MEMORY; goto fail; } - mem->type_index = mem_type_index; + + mem->heap_index = heap_index; + mem->alloc_size = alloc_size; } - result = radv_bo_list_add(device, mem->bo); - if (result != VK_SUCCESS) - goto fail; + if (!wsi_info) { + result = radv_bo_list_add(device, mem->bo); + if (result != VK_SUCCESS) + goto fail; + } *pMem = radv_device_memory_to_handle(mem); @@ -6113,6 +6297,9 @@ VkResult radv_CreateBuffer( RADV_FROM_HANDLE(radv_device, device, _device); struct radv_buffer *buffer; + if (pCreateInfo->size > RADV_MAX_MEMORY_ALLOCATION_SIZE) + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO); buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8, @@ -6282,18 +6469,20 @@ radv_initialise_color_surface(struct radv_device *device, cb->cb_color_base = va >> 8; if (device->physical_device->rad_info.chip_class >= GFX9) { - struct gfx9_surf_meta_flags meta; - if (iview->image->dcc_offset) - meta = surf->u.gfx9.dcc; - else - meta = surf->u.gfx9.cmask; - if (device->physical_device->rad_info.chip_class >= GFX10) { cb->cb_color_attrib3 |= S_028EE0_COLOR_SW_MODE(surf->u.gfx9.surf.swizzle_mode) | S_028EE0_FMASK_SW_MODE(surf->u.gfx9.fmask.swizzle_mode) | - S_028EE0_CMASK_PIPE_ALIGNED(surf->u.gfx9.cmask.pipe_aligned) | + S_028EE0_CMASK_PIPE_ALIGNED(1) | S_028EE0_DCC_PIPE_ALIGNED(surf->u.gfx9.dcc.pipe_aligned); } else { + struct gfx9_surf_meta_flags meta = { + .rb_aligned = 1, + .pipe_aligned = 1, + }; + + if (iview->image->dcc_offset) + meta = surf->u.gfx9.dcc; + cb->cb_color_attrib |= S_028C74_COLOR_SW_MODE(surf->u.gfx9.surf.swizzle_mode) | S_028C74_FMASK_SW_MODE(surf->u.gfx9.fmask.swizzle_mode) | S_028C74_RB_ALIGNED(meta.rb_aligned) | @@ -6621,10 +6810,10 @@ radv_initialise_ds_surface(struct radv_device *device, iview->image->htile_offset; ds->db_htile_data_base = va >> 8; ds->db_htile_surface = S_028ABC_FULL_CACHE(1) | - S_028ABC_PIPE_ALIGNED(surf->u.gfx9.htile.pipe_aligned); + S_028ABC_PIPE_ALIGNED(1); if (device->physical_device->rad_info.chip_class == GFX9) { - ds->db_htile_surface |= S_028ABC_RB_ALIGNED(surf->u.gfx9.htile.rb_aligned); + ds->db_htile_surface |= S_028ABC_RB_ALIGNED(1); } } } else { @@ -6904,6 +7093,11 @@ radv_get_max_anisotropy(struct radv_device *device, return 0; } +static inline int S_FIXED(float value, unsigned frac_bits) +{ + return value * (1 << frac_bits); +} + static void radv_init_sampler(struct radv_device *device, struct radv_sampler *sampler, @@ -6915,6 +7109,7 @@ radv_init_sampler(struct radv_device *device, device->physical_device->rad_info.chip_class == GFX9; unsigned filter_mode = V_008F30_SQ_IMG_FILTER_MODE_BLEND; unsigned depth_compare_func = V_008F30_SQ_TEX_DEPTH_COMPARE_NEVER; + bool trunc_coord = pCreateInfo->minFilter == VK_FILTER_NEAREST && pCreateInfo->magFilter == VK_FILTER_NEAREST; const struct VkSamplerReductionModeCreateInfo *sampler_reduction = vk_find_struct_const(pCreateInfo->pNext, @@ -6935,7 +7130,8 @@ radv_init_sampler(struct radv_device *device, S_008F30_ANISO_BIAS(max_aniso_ratio) | S_008F30_DISABLE_CUBE_WRAP(0) | S_008F30_COMPAT_MODE(compat_mode) | - S_008F30_FILTER_MODE(filter_mode)); + S_008F30_FILTER_MODE(filter_mode) | + S_008F30_TRUNC_COORD(trunc_coord)); sampler->state[1] = (S_008F34_MIN_LOD(S_FIXED(CLAMP(pCreateInfo->minLod, 0, 15), 8)) | S_008F34_MAX_LOD(S_FIXED(CLAMP(pCreateInfo->maxLod, 0, 15), 8)) | S_008F34_PERF_MIP(max_aniso_ratio ? max_aniso_ratio + 6 : 0)); @@ -7063,28 +7259,74 @@ VkResult radv_GetMemoryFdKHR(VkDevice _device, return VK_SUCCESS; } +static uint32_t radv_compute_valid_memory_types_attempt(struct radv_physical_device *dev, + enum radeon_bo_domain domains, + enum radeon_bo_flag flags, + enum radeon_bo_flag ignore_flags) +{ + /* Don't count GTT/CPU as relevant: + * + * - We're not fully consistent between the two. + * - Sometimes VRAM gets VRAM|GTT. + */ + const enum radeon_bo_domain relevant_domains = RADEON_DOMAIN_VRAM | + RADEON_DOMAIN_GDS | + RADEON_DOMAIN_OA; + uint32_t bits = 0; + for (unsigned i = 0; i < dev->memory_properties.memoryTypeCount; ++i) { + if ((domains & relevant_domains) != (dev->memory_domains[i] & relevant_domains)) + continue; + + if ((flags & ~ignore_flags) != (dev->memory_flags[i] & ~ignore_flags)) + continue; + + bits |= 1u << i; + } + + return bits; +} + +static uint32_t radv_compute_valid_memory_types(struct radv_physical_device *dev, + enum radeon_bo_domain domains, + enum radeon_bo_flag flags) +{ + enum radeon_bo_flag ignore_flags = ~(RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_GTT_WC); + uint32_t bits = radv_compute_valid_memory_types_attempt(dev, domains, flags, ignore_flags); + + if (!bits) { + ignore_flags |= RADEON_FLAG_NO_CPU_ACCESS; + bits = radv_compute_valid_memory_types_attempt(dev, domains, flags, ignore_flags); + } + + return bits; +} VkResult radv_GetMemoryFdPropertiesKHR(VkDevice _device, VkExternalMemoryHandleTypeFlagBits handleType, int fd, VkMemoryFdPropertiesKHR *pMemoryFdProperties) { - RADV_FROM_HANDLE(radv_device, device, _device); - - switch (handleType) { - case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT: - pMemoryFdProperties->memoryTypeBits = (1 << RADV_MEM_TYPE_COUNT) - 1; - return VK_SUCCESS; - - default: - /* The valid usage section for this function says: - * - * "handleType must not be one of the handle types defined as - * opaque." - * - * So opaque handle types fall into the default "unsupported" case. - */ - return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE); - } + RADV_FROM_HANDLE(radv_device, device, _device); + + switch (handleType) { + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT: { + enum radeon_bo_domain domains; + enum radeon_bo_flag flags; + if (!device->ws->buffer_get_flags_from_fd(device->ws, fd, &domains, &flags)) + return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE); + + pMemoryFdProperties->memoryTypeBits = radv_compute_valid_memory_types(device->physical_device, domains, flags); + return VK_SUCCESS; + } + default: + /* The valid usage section for this function says: + * + * "handleType must not be one of the handle types defined as + * opaque." + * + * So opaque handle types fall into the default "unsupported" case. + */ + return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE); + } } static VkResult radv_import_opaque_fd(struct radv_device *device,