break;
}
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_KHR: {
+ VkPhysicalDeviceBufferDeviceAddressFeaturesKHR *features = (void *)ext;
+ features->bufferDeviceAddress = pdevice->has_a64_buffer_access;
+ features->bufferDeviceAddressCaptureReplay =
+ pdevice->has_a64_buffer_access;
+ features->bufferDeviceAddressMultiDevice = false;
+ break;
+ }
+
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV: {
VkPhysicalDeviceComputeShaderDerivativesFeaturesNV *features =
(VkPhysicalDeviceComputeShaderDerivativesFeaturesNV *)ext;
anv_vma_alloc(struct anv_device *device, struct anv_bo *bo,
uint64_t client_address)
{
+ const struct anv_physical_device *pdevice = &device->instance->physicalDevice;
+ const struct gen_device_info *devinfo = &pdevice->info;
+ /* Gen12 CCS surface addresses need to be 64K aligned. We have no way of
+ * telling what this allocation is for so pick the largest alignment.
+ */
+ const uint32_t vma_alignment =
+ devinfo->gen >= 12 ? (64 * 1024) : (4 * 1024);
+
if (!(bo->flags & EXEC_OBJECT_PINNED)) {
assert(!(bo->has_client_visible_address));
return true;
bo->offset = gen_canonical_address(client_address);
}
} else {
- uint64_t addr = util_vma_heap_alloc(&device->vma_cva, bo->size, 4096);
+ uint64_t addr =
+ util_vma_heap_alloc(&device->vma_cva, bo->size, vma_alignment);
if (addr) {
bo->offset = gen_canonical_address(addr);
assert(addr == gen_48b_address(bo->offset));
assert(client_address == 0);
if (bo->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) {
- uint64_t addr = util_vma_heap_alloc(&device->vma_hi, bo->size, 4096);
+ uint64_t addr =
+ util_vma_heap_alloc(&device->vma_hi, bo->size, vma_alignment);
if (addr) {
bo->offset = gen_canonical_address(addr);
assert(addr == gen_48b_address(bo->offset));
}
if (bo->offset == 0) {
- uint64_t addr = util_vma_heap_alloc(&device->vma_lo, bo->size, 4096);
+ uint64_t addr =
+ util_vma_heap_alloc(&device->vma_lo, bo->size, vma_alignment);
if (addr) {
bo->offset = gen_canonical_address(addr);
assert(addr == gen_48b_address(bo->offset));
enum anv_bo_alloc_flags alloc_flags = 0;
- const struct wsi_memory_allocate_info *wsi_info =
- vk_find_struct_const(pAllocateInfo->pNext, WSI_MEMORY_ALLOCATE_INFO_MESA);
- if (wsi_info && wsi_info->implicit_sync) {
- /* We need to set the WRITE flag on window system buffers so that GEM
- * will know we're writing to them and synchronize uses on other rings
- * (eg if the display server uses the blitter ring).
- */
- alloc_flags |= ANV_BO_ALLOC_IMPLICIT_SYNC |
- ANV_BO_ALLOC_IMPLICIT_WRITE;
+ const VkExportMemoryAllocateInfo *export_info = NULL;
+ const VkImportAndroidHardwareBufferInfoANDROID *ahw_import_info = NULL;
+ const VkImportMemoryFdInfoKHR *fd_info = NULL;
+ const VkImportMemoryHostPointerInfoEXT *host_ptr_info = NULL;
+ const VkMemoryDedicatedAllocateInfo *dedicated_info = NULL;
+ VkMemoryAllocateFlags vk_flags = 0;
+ uint64_t client_address = 0;
+
+ vk_foreach_struct_const(ext, pAllocateInfo->pNext) {
+ switch (ext->sType) {
+ case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO:
+ export_info = (void *)ext;
+ break;
+
+ case VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID:
+ ahw_import_info = (void *)ext;
+ break;
+
+ case VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR:
+ fd_info = (void *)ext;
+ break;
+
+ case VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT:
+ host_ptr_info = (void *)ext;
+ break;
+
+ case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO: {
+ const VkMemoryAllocateFlagsInfo *flags_info = (void *)ext;
+ vk_flags = flags_info->flags;
+ break;
+ }
+
+ case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO:
+ dedicated_info = (void *)ext;
+ break;
+
+ case VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO_KHR: {
+ const VkMemoryOpaqueCaptureAddressAllocateInfoKHR *addr_info =
+ (const VkMemoryOpaqueCaptureAddressAllocateInfoKHR *)ext;
+ client_address = addr_info->opaqueCaptureAddress;
+ break;
+ }
+
+ default:
+ anv_debug_ignored_stype(ext->sType);
+ break;
+ }
}
- const VkExportMemoryAllocateInfo *export_info =
- vk_find_struct_const(pAllocateInfo->pNext, EXPORT_MEMORY_ALLOCATE_INFO);
+ if (vk_flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR)
+ alloc_flags |= ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS;
/* Check if we need to support Android HW buffer export. If so,
* create AHardwareBuffer and import memory from it.
VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID)
android_export = true;
- /* Android memory import. */
- const struct VkImportAndroidHardwareBufferInfoANDROID *ahw_import_info =
- vk_find_struct_const(pAllocateInfo->pNext,
- IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID);
-
if (ahw_import_info) {
result = anv_import_ahw_memory(_device, mem, ahw_import_info);
if (result != VK_SUCCESS)
goto success;
}
- const VkImportMemoryFdInfoKHR *fd_info =
- vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
-
/* The Vulkan spec permits handleType to be 0, in which case the struct is
* ignored.
*/
VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
result = anv_device_import_bo(device, fd_info->fd, alloc_flags,
- 0 /* client_address */, &mem->bo);
+ client_address, &mem->bo);
if (result != VK_SUCCESS)
goto fail;
goto success;
}
- const VkImportMemoryHostPointerInfoEXT *host_ptr_info =
- vk_find_struct_const(pAllocateInfo->pNext,
- IMPORT_MEMORY_HOST_POINTER_INFO_EXT);
if (host_ptr_info && host_ptr_info->handleType) {
if (host_ptr_info->handleType ==
VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT) {
host_ptr_info->pHostPointer,
pAllocateInfo->allocationSize,
alloc_flags,
- 0 /* client_address */,
+ client_address,
&mem->bo);
-
if (result != VK_SUCCESS)
goto fail;
alloc_flags |= ANV_BO_ALLOC_EXTERNAL;
result = anv_device_alloc_bo(device, pAllocateInfo->allocationSize,
- alloc_flags, 0 /* explicit_address */,
- &mem->bo);
+ alloc_flags, client_address, &mem->bo);
if (result != VK_SUCCESS)
goto fail;
- const VkMemoryDedicatedAllocateInfo *dedicated_info =
- vk_find_struct_const(pAllocateInfo->pNext, MEMORY_DEDICATED_ALLOCATE_INFO);
if (dedicated_info && dedicated_info->image != VK_NULL_HANDLE) {
ANV_FROM_HANDLE(anv_image, image, dedicated_info->image);
VkBuffer* pBuffer)
{
ANV_FROM_HANDLE(anv_device, device, _device);
+ struct anv_physical_device *pdevice = &device->instance->physicalDevice;
struct anv_buffer *buffer;
+ /* Don't allow creating buffers bigger than our address space. The real
+ * issue here is that we may align up the buffer size and we don't want
+ * doing so to cause roll-over. However, no one has any business
+ * allocating a buffer larger than our GTT size.
+ */
+ if (pCreateInfo->size > pdevice->gtt_size)
+ return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
vk_free2(&device->alloc, pAllocator, buffer);
}
-VkDeviceAddress anv_GetBufferDeviceAddressEXT(
+VkDeviceAddress anv_GetBufferDeviceAddressKHR(
VkDevice device,
- const VkBufferDeviceAddressInfoEXT* pInfo)
+ const VkBufferDeviceAddressInfoKHR* pInfo)
{
ANV_FROM_HANDLE(anv_buffer, buffer, pInfo->buffer);
+ assert(!anv_address_is_null(buffer->address));
assert(buffer->address.bo->flags & EXEC_OBJECT_PINNED);
return anv_address_physical(buffer->address);
}
+uint64_t anv_GetBufferOpaqueCaptureAddressKHR(
+ VkDevice device,
+ const VkBufferDeviceAddressInfoKHR* pInfo)
+{
+ return 0;
+}
+
+uint64_t anv_GetDeviceMemoryOpaqueCaptureAddressKHR(
+ VkDevice device,
+ const VkDeviceMemoryOpaqueCaptureAddressInfoKHR* pInfo)
+{
+ ANV_FROM_HANDLE(anv_device_memory, memory, pInfo->memory);
+
+ assert(memory->bo->flags & EXEC_OBJECT_PINNED);
+ assert(memory->bo->has_client_visible_address);
+
+ return gen_48b_address(memory->bo->offset);
+}
+
void
anv_fill_buffer_surface_state(struct anv_device *device, struct anv_state state,
enum isl_format format,