#include <xf86drm.h>
#include <amdgpu.h>
#include <amdgpu_drm.h>
-#include "amdgpu_id.h"
#include "winsys/amdgpu/radv_amdgpu_winsys_public.h"
#include "ac_llvm_util.h"
#include "vk_format.h"
ac_compute_device_uuid(info, uuid, VK_UUID_SIZE);
}
-static const VkExtensionProperties instance_extensions[] = {
- {
- .extensionName = VK_KHR_SURFACE_EXTENSION_NAME,
- .specVersion = 25,
- },
-#ifdef VK_USE_PLATFORM_XCB_KHR
- {
- .extensionName = VK_KHR_XCB_SURFACE_EXTENSION_NAME,
- .specVersion = 6,
- },
-#endif
-#ifdef VK_USE_PLATFORM_XLIB_KHR
- {
- .extensionName = VK_KHR_XLIB_SURFACE_EXTENSION_NAME,
- .specVersion = 6,
- },
-#endif
-#ifdef VK_USE_PLATFORM_WAYLAND_KHR
- {
- .extensionName = VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME,
- .specVersion = 6,
- },
-#endif
- {
- .extensionName = VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
- .specVersion = 1,
- },
- {
- .extensionName = VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME,
- .specVersion = 1,
- },
- {
- .extensionName = VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME,
- .specVersion = 1,
- },
-};
-
-static const VkExtensionProperties common_device_extensions[] = {
- {
- .extensionName = VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME,
- .specVersion = 1,
- },
- {
- .extensionName = VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME,
- .specVersion = 1,
- },
- {
- .extensionName = VK_KHR_MAINTENANCE1_EXTENSION_NAME,
- .specVersion = 1,
- },
- {
- .extensionName = VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME,
- .specVersion = 1,
- },
- {
- .extensionName = VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME,
- .specVersion = 1,
- },
- {
- .extensionName = VK_KHR_SWAPCHAIN_EXTENSION_NAME,
- .specVersion = 68,
- },
- {
- .extensionName = VK_AMD_DRAW_INDIRECT_COUNT_EXTENSION_NAME,
- .specVersion = 1,
- },
- {
- .extensionName = VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME,
- .specVersion = 1,
- },
- {
- .extensionName = VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME,
- .specVersion = 1,
- },
- {
- .extensionName = VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME,
- .specVersion = 1,
- },
- {
- .extensionName = VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME,
- .specVersion = 1,
- },
- {
- .extensionName = VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME,
- .specVersion = 1,
- },
- {
- .extensionName = VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_EXTENSION_NAME,
- .specVersion = 1,
- },
- {
- .extensionName = VK_KHR_VARIABLE_POINTERS_EXTENSION_NAME,
- .specVersion = 1,
- },
- {
- .extensionName = VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME,
- .specVersion = 1,
- },
- {
- .extensionName = VK_KHR_BIND_MEMORY_2_EXTENSION_NAME,
- .specVersion = 1,
- },
-};
-
-static const VkExtensionProperties rasterization_order_extension[] ={
- {
- .extensionName = VK_AMD_RASTERIZATION_ORDER_EXTENSION_NAME,
- .specVersion = 1,
- },
-};
-
-static const VkExtensionProperties ext_sema_device_extensions[] = {
- {
- .extensionName = VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME,
- .specVersion = 1,
- },
- {
- .extensionName = VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME,
- .specVersion = 1,
- },
- {
- .extensionName = VK_KHX_MULTIVIEW_EXTENSION_NAME,
- .specVersion = 1,
- },
-};
-
-static VkResult
-radv_extensions_register(struct radv_instance *instance,
- struct radv_extensions *extensions,
- const VkExtensionProperties *new_ext,
- uint32_t num_ext)
-{
- size_t new_size;
- VkExtensionProperties *new_ptr;
-
- assert(new_ext && num_ext > 0);
-
- if (!new_ext)
- return VK_ERROR_INITIALIZATION_FAILED;
-
- new_size = (extensions->num_ext + num_ext) * sizeof(VkExtensionProperties);
- new_ptr = vk_realloc(&instance->alloc, extensions->ext_array,
- new_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
-
- /* Old array continues to be valid, update nothing */
- if (!new_ptr)
- return VK_ERROR_OUT_OF_HOST_MEMORY;
-
- memcpy(&new_ptr[extensions->num_ext], new_ext,
- num_ext * sizeof(VkExtensionProperties));
- extensions->ext_array = new_ptr;
- extensions->num_ext += num_ext;
-
- return VK_SUCCESS;
-}
-
-static void
-radv_extensions_finish(struct radv_instance *instance,
- struct radv_extensions *extensions)
-{
- assert(extensions);
-
- if (!extensions)
- radv_loge("Attemted to free invalid extension struct\n");
-
- if (extensions->ext_array)
- vk_free(&instance->alloc, extensions->ext_array);
-}
-
-static bool
-is_extension_enabled(const VkExtensionProperties *extensions,
- size_t num_ext,
- const char *name)
-{
- assert(extensions && name);
-
- for (uint32_t i = 0; i < num_ext; i++) {
- if (strcmp(name, extensions[i].extensionName) == 0)
- return true;
- }
-
- return false;
-}
-
static const char *
get_chip_name(enum radeon_family family)
{
}
}
+static void
+radv_physical_device_init_mem_types(struct radv_physical_device *device)
+{
+ STATIC_ASSERT(RADV_MEM_HEAP_COUNT <= VK_MAX_MEMORY_HEAPS);
+ uint64_t visible_vram_size = MIN2(device->rad_info.vram_size,
+ device->rad_info.vram_vis_size);
+
+ int vram_index = -1, visible_vram_index = -1, gart_index = -1;
+ device->memory_properties.memoryHeapCount = 0;
+ if (device->rad_info.vram_size - visible_vram_size > 0) {
+ vram_index = device->memory_properties.memoryHeapCount++;
+ device->memory_properties.memoryHeaps[vram_index] = (VkMemoryHeap) {
+ .size = device->rad_info.vram_size - visible_vram_size,
+ .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
+ };
+ }
+ if (visible_vram_size) {
+ visible_vram_index = device->memory_properties.memoryHeapCount++;
+ device->memory_properties.memoryHeaps[visible_vram_index] = (VkMemoryHeap) {
+ .size = visible_vram_size,
+ .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
+ };
+ }
+ if (device->rad_info.gart_size > 0) {
+ gart_index = device->memory_properties.memoryHeapCount++;
+ device->memory_properties.memoryHeaps[gart_index] = (VkMemoryHeap) {
+ .size = device->rad_info.gart_size,
+ .flags = 0,
+ };
+ }
+
+ STATIC_ASSERT(RADV_MEM_TYPE_COUNT <= VK_MAX_MEMORY_TYPES);
+ unsigned type_count = 0;
+ if (vram_index >= 0) {
+ device->mem_type_indices[type_count] = RADV_MEM_TYPE_VRAM;
+ device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) {
+ .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
+ .heapIndex = vram_index,
+ };
+ }
+ if (gart_index >= 0) {
+ device->mem_type_indices[type_count] = RADV_MEM_TYPE_GTT_WRITE_COMBINE;
+ device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) {
+ .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
+ .heapIndex = gart_index,
+ };
+ }
+ if (visible_vram_index >= 0) {
+ device->mem_type_indices[type_count] = RADV_MEM_TYPE_VRAM_CPU_ACCESS;
+ device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) {
+ .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
+ .heapIndex = visible_vram_index,
+ };
+ }
+ if (gart_index >= 0) {
+ device->mem_type_indices[type_count] = RADV_MEM_TYPE_GTT_CACHED;
+ device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) {
+ .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
+ VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
+ .heapIndex = gart_index,
+ };
+ }
+ device->memory_properties.memoryTypeCount = type_count;
+}
+
static VkResult
radv_physical_device_init(struct radv_physical_device *device,
struct radv_instance *instance,
fd = open(path, O_RDWR | O_CLOEXEC);
if (fd < 0)
- return VK_ERROR_INCOMPATIBLE_DRIVER;
+ return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
version = drmGetVersion(fd);
if (!version) {
device->local_fd = fd;
device->ws->query_info(device->ws, &device->rad_info);
- result = radv_init_wsi(device);
- if (result != VK_SUCCESS) {
- device->ws->destroy(device->ws);
- goto fail;
- }
+
+ device->name = get_chip_name(device->rad_info.family);
if (radv_device_get_cache_uuid(device->rad_info.family, device->cache_uuid)) {
- radv_finish_wsi(device);
device->ws->destroy(device->ws);
result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
"cannot generate UUID");
goto fail;
}
- result = radv_extensions_register(instance,
- &device->extensions,
- common_device_extensions,
- ARRAY_SIZE(common_device_extensions));
- if (result != VK_SUCCESS)
- goto fail;
-
- if (device->rad_info.chip_class >= VI && device->rad_info.max_se >= 2) {
- result = radv_extensions_register(instance,
- &device->extensions,
- rasterization_order_extension,
- ARRAY_SIZE(rasterization_order_extension));
- if (result != VK_SUCCESS)
- goto fail;
- }
+ /* These flags affect shader compilation. */
+ uint64_t shader_env_flags =
+ (device->instance->perftest_flags & RADV_PERFTEST_SISCHED ? 0x1 : 0) |
+ (device->instance->debug_flags & RADV_DEBUG_UNSAFE_MATH ? 0x2 : 0);
- if (device->rad_info.has_syncobj) {
- result = radv_extensions_register(instance,
- &device->extensions,
- ext_sema_device_extensions,
- ARRAY_SIZE(ext_sema_device_extensions));
- if (result != VK_SUCCESS)
- goto fail;
- }
+ /* The gpu id is already embeded in the uuid so we just pass "radv"
+ * when creating the cache.
+ */
+ char buf[VK_UUID_SIZE * 2 + 1];
+ disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
+ device->disk_cache = disk_cache_create(device->name, buf, shader_env_flags);
fprintf(stderr, "WARNING: radv is not a conformant vulkan implementation, testing use only.\n");
- device->name = get_chip_name(device->rad_info.family);
radv_get_driver_uuid(&device->device_uuid);
radv_get_device_uuid(&device->rad_info, &device->device_uuid);
device->rbplus_allowed = device->rad_info.family == CHIP_STONEY;
}
+ /* The mere presense of CLEAR_STATE in the IB causes random GPU hangs
+ * on SI.
+ */
+ device->has_clear_state = device->rad_info.chip_class >= CIK;
+
+ radv_physical_device_init_mem_types(device);
+
+ result = radv_init_wsi(device);
+ if (result != VK_SUCCESS) {
+ device->ws->destroy(device->ws);
+ goto fail;
+ }
+
return VK_SUCCESS;
fail:
static void
radv_physical_device_finish(struct radv_physical_device *device)
{
- radv_extensions_finish(device->instance, &device->extensions);
radv_finish_wsi(device);
device->ws->destroy(device->ws);
+ disk_cache_destroy(device->disk_cache);
close(device->local_fd);
}
{"vmfaults", RADV_DEBUG_VM_FAULTS},
{"zerovram", RADV_DEBUG_ZERO_VRAM},
{"syncshaders", RADV_DEBUG_SYNC_SHADERS},
+ {"nosisched", RADV_DEBUG_NO_SISCHED},
{NULL, 0}
};
return radv_perftest_options[id].string;
}
+static void
+radv_handle_per_app_options(struct radv_instance *instance,
+ const VkApplicationInfo *info)
+{
+ const char *name = info ? info->pApplicationName : NULL;
+
+ if (!name)
+ return;
+
+ if (!strcmp(name, "Talos - Linux - 32bit") ||
+ !strcmp(name, "Talos - Linux - 64bit")) {
+ /* Force enable LLVM sisched for Talos because it looks safe
+ * and it gives few more FPS.
+ */
+ instance->perftest_flags |= RADV_PERFTEST_SISCHED;
+ }
+}
+
VkResult radv_CreateInstance(
const VkInstanceCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
}
for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
- if (!is_extension_enabled(instance_extensions,
- ARRAY_SIZE(instance_extensions),
- pCreateInfo->ppEnabledExtensionNames[i]))
+ const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
+ if (!radv_instance_extension_supported(ext_name))
return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
}
- instance = vk_alloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
+ instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (!instance)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- memset(instance, 0, sizeof(*instance));
-
instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
if (pAllocator)
instance->perftest_flags = parse_debug_string(getenv("RADV_PERFTEST"),
radv_perftest_options);
+ radv_handle_per_app_options(instance, pCreateInfo->pApplicationInfo);
+
+ if (instance->debug_flags & RADV_DEBUG_NO_SISCHED) {
+ /* Disable sisched when the user requests it, this is mostly
+ * useful when the driver force-enable sisched for the given
+ * application.
+ */
+ instance->perftest_flags &= ~RADV_PERFTEST_SISCHED;
+ }
+
*pInstance = radv_instance_to_handle(instance);
return VK_SUCCESS;
max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
if (max_devices < 1)
- return VK_ERROR_INCOMPATIBLE_DRIVER;
+ return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
for (unsigned i = 0; i < (unsigned)max_devices; i++) {
if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
VkPhysicalDevice physicalDevice,
VkPhysicalDeviceFeatures* pFeatures)
{
- RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
- bool is_gfx9 = pdevice->rad_info.chip_class >= GFX9;
memset(pFeatures, 0, sizeof(*pFeatures));
*pFeatures = (VkPhysicalDeviceFeatures) {
.fullDrawIndexUint32 = true,
.imageCubeArray = true,
.independentBlend = true,
- .geometryShader = !is_gfx9,
- .tessellationShader = !is_gfx9,
+ .geometryShader = true,
+ .tessellationShader = true,
.sampleRateShading = true,
.dualSrcBlend = true,
.logicOp = true,
};
*pProperties = (VkPhysicalDeviceProperties) {
- .apiVersion = VK_MAKE_VERSION(1, 0, 42),
+ .apiVersion = radv_physical_device_api_version(pdevice),
.driverVersion = vk_get_driver_version(),
.vendorID = ATI_VENDOR_ID,
.deviceID = pdevice->rad_info.pci_id,
{
RADV_FROM_HANDLE(radv_physical_device, physical_device, physicalDevice);
- STATIC_ASSERT(RADV_MEM_TYPE_COUNT <= VK_MAX_MEMORY_TYPES);
-
- pMemoryProperties->memoryTypeCount = RADV_MEM_TYPE_COUNT;
- pMemoryProperties->memoryTypes[RADV_MEM_TYPE_VRAM] = (VkMemoryType) {
- .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
- .heapIndex = RADV_MEM_HEAP_VRAM,
- };
- pMemoryProperties->memoryTypes[RADV_MEM_TYPE_GTT_WRITE_COMBINE] = (VkMemoryType) {
- .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
- VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
- .heapIndex = RADV_MEM_HEAP_GTT,
- };
- pMemoryProperties->memoryTypes[RADV_MEM_TYPE_VRAM_CPU_ACCESS] = (VkMemoryType) {
- .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
- VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
- VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
- .heapIndex = RADV_MEM_HEAP_VRAM_CPU_ACCESS,
- };
- pMemoryProperties->memoryTypes[RADV_MEM_TYPE_GTT_CACHED] = (VkMemoryType) {
- .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
- VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
- VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
- .heapIndex = RADV_MEM_HEAP_GTT,
- };
-
- STATIC_ASSERT(RADV_MEM_HEAP_COUNT <= VK_MAX_MEMORY_HEAPS);
- uint64_t visible_vram_size = MIN2(physical_device->rad_info.vram_size,
- physical_device->rad_info.vram_vis_size);
-
- pMemoryProperties->memoryHeapCount = RADV_MEM_HEAP_COUNT;
- pMemoryProperties->memoryHeaps[RADV_MEM_HEAP_VRAM] = (VkMemoryHeap) {
- .size = physical_device->rad_info.vram_size -
- visible_vram_size,
- .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
- };
- pMemoryProperties->memoryHeaps[RADV_MEM_HEAP_VRAM_CPU_ACCESS] = (VkMemoryHeap) {
- .size = visible_vram_size,
- .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
- };
- pMemoryProperties->memoryHeaps[RADV_MEM_HEAP_GTT] = (VkMemoryHeap) {
- .size = physical_device->rad_info.gart_size,
- .flags = 0,
- };
+ *pMemoryProperties = physical_device->memory_properties;
}
void radv_GetPhysicalDeviceMemoryProperties2KHR(
&pMemoryProperties->memoryProperties);
}
+static enum radeon_ctx_priority
+radv_get_queue_global_priority(const VkDeviceQueueGlobalPriorityCreateInfoEXT *pObj)
+{
+ /* Default to MEDIUM when a specific global priority isn't requested */
+ if (!pObj)
+ return RADEON_CTX_PRIORITY_MEDIUM;
+
+ switch(pObj->globalPriority) {
+ case VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT:
+ return RADEON_CTX_PRIORITY_REALTIME;
+ case VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT:
+ return RADEON_CTX_PRIORITY_HIGH;
+ case VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT:
+ return RADEON_CTX_PRIORITY_MEDIUM;
+ case VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT:
+ return RADEON_CTX_PRIORITY_LOW;
+ default:
+ unreachable("Illegal global priority value");
+ return RADEON_CTX_PRIORITY_INVALID;
+ }
+}
+
static int
radv_queue_init(struct radv_device *device, struct radv_queue *queue,
- int queue_family_index, int idx)
+ uint32_t queue_family_index, int idx,
+ const VkDeviceQueueGlobalPriorityCreateInfoEXT *global_priority)
{
queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
queue->device = device;
queue->queue_family_index = queue_family_index;
queue->queue_idx = idx;
+ queue->priority = radv_get_queue_global_priority(global_priority);
- queue->hw_ctx = device->ws->ctx_create(device->ws);
+ queue->hw_ctx = device->ws->ctx_create(device->ws, queue->priority);
if (!queue->hw_ctx)
- return VK_ERROR_OUT_OF_HOST_MEMORY;
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
return VK_SUCCESS;
}
VkResult result;
struct radv_device *device;
+ bool keep_shader_info = false;
+
for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
- if (!is_extension_enabled(physical_device->extensions.ext_array,
- physical_device->extensions.num_ext,
- pCreateInfo->ppEnabledExtensionNames[i]))
+ const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
+ if (!radv_physical_device_extension_supported(physical_device, ext_name))
return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
+
+ if (strcmp(ext_name, VK_AMD_SHADER_INFO_EXTENSION_NAME) == 0)
+ keep_shader_info = true;
}
/* Check enabled features */
}
}
- device = vk_alloc2(&physical_device->instance->alloc, pAllocator,
- sizeof(*device), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
+ device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
+ sizeof(*device), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!device)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- memset(device, 0, sizeof(*device));
-
device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
device->instance = physical_device->instance;
device->physical_device = physical_device;
- device->debug_flags = device->instance->debug_flags;
-
device->ws = physical_device->ws;
if (pAllocator)
device->alloc = *pAllocator;
for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
const VkDeviceQueueCreateInfo *queue_create = &pCreateInfo->pQueueCreateInfos[i];
uint32_t qfi = queue_create->queueFamilyIndex;
+ const VkDeviceQueueGlobalPriorityCreateInfoEXT *global_priority =
+ vk_find_struct_const(queue_create->pNext, DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT);
+
+ assert(!global_priority || device->physical_device->rad_info.has_ctx_priority);
device->queues[qfi] = vk_alloc(&device->alloc,
queue_create->queueCount * sizeof(struct radv_queue), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
device->queue_count[qfi] = queue_create->queueCount;
for (unsigned q = 0; q < queue_create->queueCount; q++) {
- result = radv_queue_init(device, &device->queues[qfi][q], qfi, q);
+ result = radv_queue_init(device, &device->queues[qfi][q], qfi, q, global_priority);
if (result != VK_SUCCESS)
goto fail;
}
device->physical_device->rad_info.max_se >= 2;
if (getenv("RADV_TRACE_FILE")) {
+ keep_shader_info = true;
+
if (!radv_init_trace(device))
goto fail;
}
+ device->keep_shader_info = keep_shader_info;
+
result = radv_device_init_meta(device);
if (result != VK_SUCCESS)
goto fail;
vk_free(&device->alloc, device);
}
-VkResult radv_EnumerateInstanceExtensionProperties(
- const char* pLayerName,
- uint32_t* pPropertyCount,
- VkExtensionProperties* pProperties)
-{
- if (pProperties == NULL) {
- *pPropertyCount = ARRAY_SIZE(instance_extensions);
- return VK_SUCCESS;
- }
-
- *pPropertyCount = MIN2(*pPropertyCount, ARRAY_SIZE(instance_extensions));
- typed_memcpy(pProperties, instance_extensions, *pPropertyCount);
-
- if (*pPropertyCount < ARRAY_SIZE(instance_extensions))
- return VK_INCOMPLETE;
-
- return VK_SUCCESS;
-}
-
-VkResult radv_EnumerateDeviceExtensionProperties(
- VkPhysicalDevice physicalDevice,
- const char* pLayerName,
- uint32_t* pPropertyCount,
- VkExtensionProperties* pProperties)
-{
- RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
-
- if (pProperties == NULL) {
- *pPropertyCount = pdevice->extensions.num_ext;
- return VK_SUCCESS;
- }
-
- *pPropertyCount = MIN2(*pPropertyCount, pdevice->extensions.num_ext);
- typed_memcpy(pProperties, pdevice->extensions.ext_array, *pPropertyCount);
-
- if (*pPropertyCount < pdevice->extensions.num_ext)
- return VK_INCOMPLETE;
-
- return VK_SUCCESS;
-}
-
VkResult radv_EnumerateInstanceLayerProperties(
uint32_t* pPropertyCount,
VkLayerProperties* pProperties)
unsigned tess_factor_ring_size = 0, tess_offchip_ring_size = 0;
unsigned max_offchip_buffers;
unsigned hs_offchip_param = 0;
+ uint32_t ring_bo_flags = RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING;
if (!queue->has_tess_rings) {
if (needs_tess_rings)
add_tess_rings = true;
scratch_size,
4096,
RADEON_DOMAIN_VRAM,
- RADEON_FLAG_NO_CPU_ACCESS);
+ ring_bo_flags);
if (!scratch_bo)
goto fail;
} else
compute_scratch_size,
4096,
RADEON_DOMAIN_VRAM,
- RADEON_FLAG_NO_CPU_ACCESS);
+ ring_bo_flags);
if (!compute_scratch_bo)
goto fail;
esgs_ring_size,
4096,
RADEON_DOMAIN_VRAM,
- RADEON_FLAG_NO_CPU_ACCESS);
+ ring_bo_flags);
if (!esgs_ring_bo)
goto fail;
} else {
gsvs_ring_size,
4096,
RADEON_DOMAIN_VRAM,
- RADEON_FLAG_NO_CPU_ACCESS);
+ ring_bo_flags);
if (!gsvs_ring_bo)
goto fail;
} else {
tess_factor_ring_size,
256,
RADEON_DOMAIN_VRAM,
- RADEON_FLAG_NO_CPU_ACCESS);
+ ring_bo_flags);
if (!tess_factor_ring_bo)
goto fail;
tess_offchip_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
tess_offchip_ring_size,
256,
RADEON_DOMAIN_VRAM,
- RADEON_FLAG_NO_CPU_ACCESS);
+ ring_bo_flags);
if (!tess_offchip_ring_bo)
goto fail;
} else {
size,
4096,
RADEON_DOMAIN_VRAM,
- RADEON_FLAG_CPU_ACCESS);
+ RADEON_FLAG_CPU_ACCESS|RADEON_FLAG_NO_INTERPROCESS_SHARING);
if (!descriptor_bo)
goto fail;
} else
dest_cs[i] = cs;
if (scratch_bo)
- queue->device->ws->cs_add_buffer(cs, scratch_bo, 8);
+ radv_cs_add_buffer(queue->device->ws, cs, scratch_bo, 8);
if (esgs_ring_bo)
- queue->device->ws->cs_add_buffer(cs, esgs_ring_bo, 8);
+ radv_cs_add_buffer(queue->device->ws, cs, esgs_ring_bo, 8);
if (gsvs_ring_bo)
- queue->device->ws->cs_add_buffer(cs, gsvs_ring_bo, 8);
+ radv_cs_add_buffer(queue->device->ws, cs, gsvs_ring_bo, 8);
if (tess_factor_ring_bo)
- queue->device->ws->cs_add_buffer(cs, tess_factor_ring_bo, 8);
+ radv_cs_add_buffer(queue->device->ws, cs, tess_factor_ring_bo, 8);
if (tess_offchip_ring_bo)
- queue->device->ws->cs_add_buffer(cs, tess_offchip_ring_bo, 8);
+ radv_cs_add_buffer(queue->device->ws, cs, tess_offchip_ring_bo, 8);
if (descriptor_bo)
- queue->device->ws->cs_add_buffer(cs, descriptor_bo, 8);
+ radv_cs_add_buffer(queue->device->ws, cs, descriptor_bo, 8);
if (descriptor_bo != queue->descriptor_bo) {
uint32_t *map = (uint32_t*)queue->device->ws->buffer_map(descriptor_bo);
}
if (descriptor_bo) {
- uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0,
- R_00B130_SPI_SHADER_USER_DATA_VS_0,
- R_00B230_SPI_SHADER_USER_DATA_GS_0,
- R_00B330_SPI_SHADER_USER_DATA_ES_0,
- R_00B430_SPI_SHADER_USER_DATA_HS_0,
- R_00B530_SPI_SHADER_USER_DATA_LS_0};
-
uint64_t va = radv_buffer_get_va(descriptor_bo);
-
- for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
- radeon_set_sh_reg_seq(cs, regs[i], 2);
- radeon_emit(cs, va);
- radeon_emit(cs, va >> 32);
+ if (queue->device->physical_device->rad_info.chip_class >= GFX9) {
+ uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0,
+ R_00B130_SPI_SHADER_USER_DATA_VS_0,
+ R_00B208_SPI_SHADER_USER_DATA_ADDR_LO_GS,
+ R_00B408_SPI_SHADER_USER_DATA_ADDR_LO_HS};
+
+ for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
+ radeon_set_sh_reg_seq(cs, regs[i], 2);
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ }
+ } else {
+ uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0,
+ R_00B130_SPI_SHADER_USER_DATA_VS_0,
+ R_00B230_SPI_SHADER_USER_DATA_GS_0,
+ R_00B330_SPI_SHADER_USER_DATA_ES_0,
+ R_00B430_SPI_SHADER_USER_DATA_HS_0,
+ R_00B530_SPI_SHADER_USER_DATA_LS_0};
+
+ for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
+ radeon_set_sh_reg_seq(cs, regs[i], 2);
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ }
}
}
uint32_t rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
S_008F04_SWIZZLE_ENABLE(1);
- queue->device->ws->cs_add_buffer(cs, compute_scratch_bo, 8);
+ radv_cs_add_buffer(queue->device->ws, cs, compute_scratch_bo, 8);
radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0, 2);
radeon_emit(cs, scratch_va);
queue->device->ws->buffer_destroy(tess_factor_ring_bo);
if (tess_offchip_ring_bo && tess_offchip_ring_bo != queue->tess_offchip_ring_bo)
queue->device->ws->buffer_destroy(tess_offchip_ring_bo);
- return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+ return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
}
static VkResult radv_alloc_sem_counts(struct radv_winsys_sem_counts *counts,
if (counts->syncobj_count) {
counts->syncobj = (uint32_t *)malloc(sizeof(uint32_t) * counts->syncobj_count);
if (!counts->syncobj)
- return VK_ERROR_OUT_OF_HOST_MEMORY;
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
if (counts->sem_count) {
counts->sem = (struct radeon_winsys_sem **)malloc(sizeof(struct radeon_winsys_sem *) * counts->sem_count);
if (!counts->sem) {
free(counts->syncobj);
- return VK_ERROR_OUT_OF_HOST_MEMORY;
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
}
if (sem->temp_syncobj) {
counts->syncobj[syncobj_idx++] = sem->temp_syncobj;
- if (reset_temp) {
- /* after we wait on a temp import - drop it */
- sem->temp_syncobj = 0;
- }
}
else if (sem->syncobj)
counts->syncobj[syncobj_idx++] = sem->syncobj;
free(sem_info->signal.sem);
}
+
+static void radv_free_temp_syncobjs(struct radv_device *device,
+ int num_sems,
+ const VkSemaphore *sems)
+{
+ for (uint32_t i = 0; i < num_sems; i++) {
+ RADV_FROM_HANDLE(radv_semaphore, sem, sems[i]);
+
+ if (sem->temp_syncobj) {
+ device->ws->destroy_syncobj(device->ws, sem->temp_syncobj);
+ sem->temp_syncobj = 0;
+ }
+ }
+}
+
VkResult radv_alloc_sem_info(struct radv_winsys_sem_info *sem_info,
int num_wait_sems,
const VkSemaphore *wait_sems,
}
}
+ radv_free_temp_syncobjs(queue->device,
+ pSubmits[i].waitSemaphoreCount,
+ pSubmits[i].pWaitSemaphores);
radv_free_sem_info(&sem_info);
free(cs_array);
}
pFD);
}
-VkResult radv_AllocateMemory(
- VkDevice _device,
- const VkMemoryAllocateInfo* pAllocateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkDeviceMemory* pMem)
+static VkResult radv_alloc_memory(struct radv_device *device,
+ const VkMemoryAllocateInfo* pAllocateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkDeviceMemory* pMem)
{
- RADV_FROM_HANDLE(radv_device, device, _device);
struct radv_device_memory *mem;
VkResult result;
enum radeon_bo_domain domain;
uint32_t flags = 0;
+ enum radv_mem_type mem_type_index = device->physical_device->mem_type_indices[pAllocateInfo->memoryTypeIndex];
assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
const VkMemoryDedicatedAllocateInfoKHR *dedicate_info =
vk_find_struct_const(pAllocateInfo->pNext, MEMORY_DEDICATED_ALLOCATE_INFO_KHR);
+ const struct wsi_memory_allocate_info *wsi_info =
+ vk_find_struct_const(pAllocateInfo->pNext, WSI_MEMORY_ALLOCATE_INFO_MESA);
+
mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (mem == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ if (wsi_info && wsi_info->implicit_sync)
+ flags |= RADEON_FLAG_IMPLICIT_SYNC;
+
if (dedicate_info) {
mem->image = radv_image_from_handle(dedicate_info->image);
mem->buffer = radv_buffer_from_handle(dedicate_info->buffer);
if (import_info) {
assert(import_info->handleType ==
- VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR ||
+ import_info->handleType ==
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
mem->bo = device->ws->buffer_from_fd(device->ws, import_info->fd,
NULL, NULL);
if (!mem->bo) {
}
uint64_t alloc_size = align_u64(pAllocateInfo->allocationSize, 4096);
- if (pAllocateInfo->memoryTypeIndex == RADV_MEM_TYPE_GTT_WRITE_COMBINE ||
- pAllocateInfo->memoryTypeIndex == RADV_MEM_TYPE_GTT_CACHED)
+ if (mem_type_index == RADV_MEM_TYPE_GTT_WRITE_COMBINE ||
+ mem_type_index == RADV_MEM_TYPE_GTT_CACHED)
domain = RADEON_DOMAIN_GTT;
else
domain = RADEON_DOMAIN_VRAM;
- if (pAllocateInfo->memoryTypeIndex == RADV_MEM_TYPE_VRAM)
+ if (mem_type_index == RADV_MEM_TYPE_VRAM)
flags |= RADEON_FLAG_NO_CPU_ACCESS;
else
flags |= RADEON_FLAG_CPU_ACCESS;
- if (pAllocateInfo->memoryTypeIndex == RADV_MEM_TYPE_GTT_WRITE_COMBINE)
+ if (mem_type_index == RADV_MEM_TYPE_GTT_WRITE_COMBINE)
flags |= RADEON_FLAG_GTT_WC;
+ if (!dedicate_info && !import_info)
+ flags |= RADEON_FLAG_NO_INTERPROCESS_SHARING;
+
mem->bo = device->ws->buffer_create(device->ws, alloc_size, device->physical_device->rad_info.max_alignment,
domain, flags);
result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
goto fail;
}
- mem->type_index = pAllocateInfo->memoryTypeIndex;
+ mem->type_index = mem_type_index;
out_success:
*pMem = radv_device_memory_to_handle(mem);
return result;
}
+VkResult radv_AllocateMemory(
+ VkDevice _device,
+ const VkMemoryAllocateInfo* pAllocateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkDeviceMemory* pMem)
+{
+ RADV_FROM_HANDLE(radv_device, device, _device);
+ return radv_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
+}
+
void radv_FreeMemory(
VkDevice _device,
VkDeviceMemory _mem,
return VK_SUCCESS;
}
- return VK_ERROR_MEMORY_MAP_FAILED;
+ return vk_error(VK_ERROR_MEMORY_MAP_FAILED);
}
void radv_UnmapMemory(
}
void radv_GetBufferMemoryRequirements(
- VkDevice device,
+ VkDevice _device,
VkBuffer _buffer,
VkMemoryRequirements* pMemoryRequirements)
{
+ RADV_FROM_HANDLE(radv_device, device, _device);
RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
- pMemoryRequirements->memoryTypeBits = (1u << RADV_MEM_TYPE_COUNT) - 1;
+ pMemoryRequirements->memoryTypeBits = (1u << device->physical_device->memory_properties.memoryTypeCount) - 1;
if (buffer->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)
pMemoryRequirements->alignment = 4096;
{
radv_GetBufferMemoryRequirements(device, pInfo->buffer,
&pMemoryRequirements->memoryRequirements);
-
+ RADV_FROM_HANDLE(radv_buffer, buffer, pInfo->buffer);
vk_foreach_struct(ext, pMemoryRequirements->pNext) {
switch (ext->sType) {
case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR: {
VkMemoryDedicatedRequirementsKHR *req =
(VkMemoryDedicatedRequirementsKHR *) ext;
- req->requiresDedicatedAllocation = false;
+ req->requiresDedicatedAllocation = buffer->shareable;
req->prefersDedicatedAllocation = req->requiresDedicatedAllocation;
break;
}
}
void radv_GetImageMemoryRequirements(
- VkDevice device,
+ VkDevice _device,
VkImage _image,
VkMemoryRequirements* pMemoryRequirements)
{
+ RADV_FROM_HANDLE(radv_device, device, _device);
RADV_FROM_HANDLE(radv_image, image, _image);
- pMemoryRequirements->memoryTypeBits = (1u << RADV_MEM_TYPE_COUNT) - 1;
+ pMemoryRequirements->memoryTypeBits = (1u << device->physical_device->memory_properties.memoryTypeCount) - 1;
pMemoryRequirements->size = image->size;
pMemoryRequirements->alignment = image->alignment;
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!fence)
- return VK_ERROR_OUT_OF_HOST_MEMORY;
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- memset(fence, 0, sizeof(*fence));
fence->submitted = false;
fence->signalled = !!(pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT);
fence->fence = device->ws->create_fence();
if (!fence->fence) {
vk_free2(&device->alloc, pAllocator, fence);
- return VK_ERROR_OUT_OF_HOST_MEMORY;
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
*pFence = radv_fence_to_handle(fence);
sizeof(*sem), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!sem)
- return VK_ERROR_OUT_OF_HOST_MEMORY;
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
sem->temp_syncobj = 0;
/* create a syncobject if we are going to export this semaphore */
int ret = device->ws->create_syncobj(device->ws, &sem->syncobj);
if (ret) {
vk_free2(&device->alloc, pAllocator, sem);
- return VK_ERROR_OUT_OF_HOST_MEMORY;
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
sem->sem = NULL;
} else {
sem->sem = device->ws->create_sem(device->ws);
if (!sem->sem) {
vk_free2(&device->alloc, pAllocator, sem);
- return VK_ERROR_OUT_OF_HOST_MEMORY;
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
sem->syncobj = 0;
}
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!event)
- return VK_ERROR_OUT_OF_HOST_MEMORY;
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
event->bo = device->ws->buffer_create(device->ws, 8, 8,
RADEON_DOMAIN_GTT,
- RADEON_FLAG_VA_UNCACHED | RADEON_FLAG_CPU_ACCESS);
+ RADEON_FLAG_VA_UNCACHED | RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING);
if (!event->bo) {
vk_free2(&device->alloc, pAllocator, event);
- return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+ return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
}
event->map = (uint64_t*)device->ws->buffer_map(event->bo);
buffer->offset = 0;
buffer->flags = pCreateInfo->flags;
+ buffer->shareable = vk_find_struct_const(pCreateInfo->pNext,
+ EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR) != NULL;
+
if (pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) {
buffer->bo = device->ws->buffer_create(device->ws,
align64(buffer->size, 4096),
cb->cb_color_cmask_slice = iview->image->cmask.slice_tile_max;
cb->cb_color_attrib |= S_028C74_TILE_MODE_INDEX(tile_mode_index);
- cb->micro_tile_mode = iview->image->surface.micro_tile_mode;
if (iview->image->fmask.size) {
if (device->physical_device->rad_info.chip_class >= CIK)
}
if (iview->image->cmask.size &&
- !(device->debug_flags & RADV_DEBUG_NO_FAST_CLEARS))
+ !(device->instance->debug_flags & RADV_DEBUG_NO_FAST_CLEARS))
cb->cb_color_info |= S_028C70_FAST_CLEAR(1);
if (radv_vi_dcc_enabled(iview->image, iview->base_mip))
cb->cb_color_attrib2 = S_028C68_MIP0_WIDTH(iview->extent.width - 1) |
S_028C68_MIP0_HEIGHT(iview->extent.height - 1) |
S_028C68_MAX_MIP(iview->image->info.levels - 1);
-
- cb->gfx9_epitch = S_0287A0_EPITCH(iview->image->surface.u.gfx9.surf.epitch);
-
}
}
ds->db_depth_size = S_02801C_X_MAX(iview->image->info.width - 1) |
S_02801C_Y_MAX(iview->image->info.height - 1);
- /* Only use HTILE for the first level. */
- if (iview->image->surface.htile_size && !level) {
+ if (radv_htile_enabled(iview->image, level)) {
ds->db_z_info |= S_028038_TILE_SURFACE_ENABLE(1);
if (iview->image->tc_compatible_htile) {
S_028058_HEIGHT_TILE_MAX((level_info->nblk_y / 8) - 1);
ds->db_depth_slice = S_02805C_SLICE_TILE_MAX((level_info->nblk_x * level_info->nblk_y) / 64 - 1);
- if (iview->image->surface.htile_size && !level) {
+ if (radv_htile_enabled(iview->image, level)) {
ds->db_z_info |= S_028040_TILE_SURFACE_ENABLE(1);
if (!iview->image->surface.has_stencil &&
S_008F38_XY_MIN_FILTER(radv_tex_filter(pCreateInfo->minFilter, max_aniso)) |
S_008F38_MIP_FILTER(radv_tex_mipfilter(pCreateInfo->mipmapMode)) |
S_008F38_MIP_POINT_PRECLAMP(0) |
- S_008F38_DISABLE_LSB_CEIL(1) |
+ S_008F38_DISABLE_LSB_CEIL(device->physical_device->rad_info.chip_class <= VI) |
S_008F38_FILTER_PREC_FIX(1) |
S_008F38_ANISO_OVERRIDE(is_vi));
sampler->state[3] = (S_008F3C_BORDER_COLOR_PTR(0) |
assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
- /* We support only one handle type. */
+ /* At the moment, we support only the below handle types. */
assert(pGetFdInfo->handleType ==
- VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR ||
+ pGetFdInfo->handleType ==
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
bool ret = radv_get_memory_fd(device, memory, pFD);
if (ret == false)
- return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+ return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
return VK_SUCCESS;
}
int fd,
VkMemoryFdPropertiesKHR *pMemoryFdProperties)
{
- /* The valid usage section for this function says:
- *
- * "handleType must not be one of the handle types defined as opaque."
- *
- * Since we only handle opaque handles for now, there are no FD properties.
- */
- return VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR;
+ switch (handleType) {
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR:
+ pMemoryFdProperties->memoryTypeBits = (1 << RADV_MEM_TYPE_COUNT) - 1;
+ return VK_SUCCESS;
+
+ default:
+ /* The valid usage section for this function says:
+ *
+ * "handleType must not be one of the handle types defined as
+ * opaque."
+ *
+ * So opaque handle types fall into the default "unsupported" case.
+ */
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
+ }
}
VkResult radv_ImportSemaphoreFdKHR(VkDevice _device,
RADV_FROM_HANDLE(radv_device, device, _device);
RADV_FROM_HANDLE(radv_semaphore, sem, pImportSemaphoreFdInfo->semaphore);
uint32_t syncobj_handle = 0;
+ uint32_t *syncobj_dst = NULL;
assert(pImportSemaphoreFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
int ret = device->ws->import_syncobj(device->ws, pImportSemaphoreFdInfo->fd, &syncobj_handle);
if (ret != 0)
- return VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR;
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR) {
- sem->temp_syncobj = syncobj_handle;
+ syncobj_dst = &sem->temp_syncobj;
} else {
- sem->syncobj = syncobj_handle;
+ syncobj_dst = &sem->syncobj;
}
+
+ if (*syncobj_dst)
+ device->ws->destroy_syncobj(device->ws, *syncobj_dst);
+
+ *syncobj_dst = syncobj_handle;
close(pImportSemaphoreFdInfo->fd);
return VK_SUCCESS;
}