Which can happen if we have to many mmaps active in the process.
CC: <mesa-stable@lists.freedesktop.org>
Reviewed-by: Samuel Pitoiset <samuel.pitoiset@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5578>
        vk_free2(&device->vk.alloc, NULL, set);
 }
 
+static void radv_destroy_descriptor_pool(struct radv_device *device,
+                                         const VkAllocationCallbacks *pAllocator,
+                                         struct radv_descriptor_pool *pool)
+{
+       if (!pool->host_memory_base) {
+               for(int i = 0; i < pool->entry_count; ++i) {
+                       radv_descriptor_set_destroy(device, pool, pool->entries[i].set, false);
+               }
+       }
+
+       if (pool->bo)
+               device->ws->buffer_destroy(pool->bo);
+
+       vk_object_base_finish(&pool->base);
+       vk_free2(&device->vk.alloc, pAllocator, pool);
+}
+
 VkResult radv_CreateDescriptorPool(
        VkDevice                                    _device,
        const VkDescriptorPoolCreateInfo*           pCreateInfo,
                                                     RADEON_FLAG_READ_ONLY |
                                                     RADEON_FLAG_32BIT,
                                                     RADV_BO_PRIORITY_DESCRIPTOR);
+               if (!pool->bo) {
+                       radv_destroy_descriptor_pool(device, pAllocator, pool);
+                       return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
+               }
                pool->mapped_ptr = (uint8_t*)device->ws->buffer_map(pool->bo);
+               if (!pool->mapped_ptr) {
+                       radv_destroy_descriptor_pool(device, pAllocator, pool);
+                       return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
+               }
        }
        pool->size = bo_size;
        pool->max_entry_count = pCreateInfo->maxSets;
        if (!pool)
                return;
 
-       if (!pool->host_memory_base) {
-               for(int i = 0; i < pool->entry_count; ++i) {
-                       radv_descriptor_set_destroy(device, pool, pool->entries[i].set, false);
-               }
-       }
-
-       if (pool->bo)
-               device->ws->buffer_destroy(pool->bo);
-
-       vk_object_base_finish(&pool->base);
-       vk_free2(&device->vk.alloc, pAllocator, pool);
+       radv_destroy_descriptor_pool(device, pAllocator, pool);
 }
 
 VkResult radv_ResetDescriptorPool(
 
 
        device->border_color_data.colors_gpu_ptr =
                device->ws->buffer_map(device->border_color_data.bo);
+       if (!device->border_color_data.colors_gpu_ptr)
+               return vk_error(device->physical_device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
        pthread_mutex_init(&device->border_color_data.mutex, NULL);
 
        return VK_SUCCESS;
 
        if (descriptor_bo != queue->descriptor_bo) {
                uint32_t *map = (uint32_t*)queue->device->ws->buffer_map(descriptor_bo);
+               if (!map)
+                       goto fail;
 
                if (scratch_bo) {
                        uint64_t scratch_va = radv_buffer_get_va(scratch_bo);
        return VK_SUCCESS;
 }
 
-
+static void radv_destroy_event(struct radv_device *device,
+                               const VkAllocationCallbacks* pAllocator,
+                               struct radv_event *event)
+{
+       device->ws->buffer_destroy(event->bo);
+       vk_object_base_finish(&event->base);
+       vk_free2(&device->vk.alloc, pAllocator, event);
+}
 
 VkResult radv_CreateEvent(
        VkDevice                                    _device,
        }
 
        event->map = (uint64_t*)device->ws->buffer_map(event->bo);
+       if (!event->map) {
+               radv_destroy_event(device, pAllocator, event);
+               return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
+       }
 
        *pEvent = radv_event_to_handle(event);
 
 
        if (!event)
                return;
-       device->ws->buffer_destroy(event->bo);
-       vk_object_base_finish(&event->base);
-       vk_free2(&device->vk.alloc, pAllocator, event);
+
+       radv_destroy_event(device, pAllocator, event);
 }
 
 VkResult radv_GetEventStatus(
 
                                             (device->physical_device->rad_info.cpdma_prefetch_writes_memory ?
                                                     0 : RADEON_FLAG_READ_ONLY),
                                             RADV_BO_PRIORITY_SHADER);
+       if (!slab->bo) {
+               free(slab);
+               return NULL;
+       }
+
        slab->ptr = (char*)device->ws->buffer_map(slab->bo);
+       if (!slab->ptr) {
+               device->ws->buffer_destroy(slab->bo);
+               free(slab);
+               return NULL;
+       }
+
        list_inithead(&slab->shaders);
 
        mtx_lock(&device->shader_slab_mutex);
        }
 
        void *dest_ptr = radv_alloc_shader_memory(device, variant);
+       if (!dest_ptr) {
+               if (binary->type == RADV_BINARY_TYPE_RTLD)
+                       ac_rtld_close(&rtld_binary);
+               free(variant);
+               return NULL;
+       }
 
        if (binary->type == RADV_BINARY_TYPE_RTLD) {
                struct radv_shader_binary_rtld* bin = (struct radv_shader_binary_rtld *)binary;