radv: track memory heaps usage if overallocation is explicitly disallowed
authorSamuel Pitoiset <samuel.pitoiset@gmail.com>
Tue, 28 Apr 2020 11:10:56 +0000 (13:10 +0200)
committerMarge Bot <eric+marge@anholt.net>
Tue, 28 Apr 2020 21:03:26 +0000 (21:03 +0000)
By default, RADV supports overallocation by the sense that it doesn't
reject an allocation if the target heap is full.

With VK_AMD_overallocation_behaviour, apps can disable overallocation
and the driver should account for all allocations explicitly made by
the application, and reject if the heap is full.

Signed-off-by: Samuel Pitoiset <samuel.pitoiset@gmail.com>
Reviewed-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4785>

src/amd/vulkan/radv_device.c
src/amd/vulkan/radv_private.h

index dd1178025b58415a6ed86aba5688169045d9fb69..cacd47f269f6aacc67dc903ed2dbd017cdc7fcf9 100644 (file)
@@ -2887,6 +2887,7 @@ VkResult radv_CreateDevice(
 
        bool keep_shader_info = false;
        bool robust_buffer_access = false;
+       bool overallocation_disallowed = false;
 
        /* Check enabled features */
        if (pCreateInfo->pEnabledFeatures) {
@@ -2912,6 +2913,12 @@ VkResult radv_CreateDevice(
                                robust_buffer_access = true;
                        break;
                }
+               case VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD: {
+                       const VkDeviceMemoryOverallocationCreateInfoAMD *overallocation = (const void *)ext;
+                       if (overallocation->overallocationBehavior == VK_MEMORY_OVERALLOCATION_BEHAVIOR_DISALLOWED_AMD)
+                               overallocation_disallowed = true;
+                       break;
+               }
                default:
                        break;
                }
@@ -2962,6 +2969,9 @@ VkResult radv_CreateDevice(
        mtx_init(&device->shader_slab_mutex, mtx_plain);
        list_inithead(&device->shader_slabs);
 
+       device->overallocation_disallowed = overallocation_disallowed;
+       mtx_init(&device->overallocation_mutex, mtx_plain);
+
        radv_bo_list_init(&device->bo_list);
 
        for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
@@ -5050,6 +5060,12 @@ static void radv_free_memory(struct radv_device *device,
 #endif
 
        if (mem->bo) {
+               if (device->overallocation_disallowed) {
+                       mtx_lock(&device->overallocation_mutex);
+                       device->allocated_memory_size[mem->heap_index] -= mem->alloc_size;
+                       mtx_unlock(&device->overallocation_mutex);
+               }
+
                radv_bo_list_remove(device, mem->bo);
                device->ws->buffer_destroy(mem->bo);
                mem->bo = NULL;
@@ -5159,6 +5175,9 @@ static VkResult radv_alloc_memory(struct radv_device *device,
                }
        } else {
                uint64_t alloc_size = align_u64(pAllocateInfo->allocationSize, 4096);
+               uint32_t heap_index;
+
+               heap_index = device->physical_device->memory_properties.memoryTypes[pAllocateInfo->memoryTypeIndex].heapIndex;
                domain = device->physical_device->memory_domains[pAllocateInfo->memoryTypeIndex];
                flags = device->physical_device->memory_flags[pAllocateInfo->memoryTypeIndex];
 
@@ -5169,13 +5188,35 @@ static VkResult radv_alloc_memory(struct radv_device *device,
                        }
                }
 
+               if (device->overallocation_disallowed) {
+                       uint64_t total_size =
+                               device->physical_device->memory_properties.memoryHeaps[heap_index].size;
+
+                       mtx_lock(&device->overallocation_mutex);
+                       if (device->allocated_memory_size[heap_index] + alloc_size > total_size) {
+                               mtx_unlock(&device->overallocation_mutex);
+                               result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
+                               goto fail;
+                       }
+                       device->allocated_memory_size[heap_index] += alloc_size;
+                       mtx_unlock(&device->overallocation_mutex);
+               }
+
                mem->bo = device->ws->buffer_create(device->ws, alloc_size, device->physical_device->rad_info.max_alignment,
                                                    domain, flags, priority);
 
                if (!mem->bo) {
+                       if (device->overallocation_disallowed) {
+                               mtx_lock(&device->overallocation_mutex);
+                               device->allocated_memory_size[heap_index] -= alloc_size;
+                               mtx_unlock(&device->overallocation_mutex);
+                       }
                        result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
                        goto fail;
                }
+
+               mem->heap_index = heap_index;
+               mem->alloc_size = alloc_size;
        }
 
        if (!wsi_info) {
index f677de6943727b13255ac97e4c676fd0081be318..ec4b45235c637afed041a8cb4f30bc4d98a82a67 100644 (file)
@@ -860,6 +860,11 @@ struct radv_device {
        void *thread_trace_ptr;
        uint32_t thread_trace_buffer_size;
        int thread_trace_start_frame;
+
+       /* Overallocation. */
+       bool overallocation_disallowed;
+       uint64_t allocated_memory_size[VK_MAX_MEMORY_HEAPS];
+       mtx_t overallocation_mutex;
 };
 
 struct radv_device_memory {
@@ -867,6 +872,8 @@ struct radv_device_memory {
        /* for dedicated allocations */
        struct radv_image                            *image;
        struct radv_buffer                           *buffer;
+       uint32_t                                     heap_index;
+       uint64_t                                     alloc_size;
        void *                                       map;
        void *                                       user_ptr;