From: Bas Nieuwenhuizen Date: Sun, 30 Aug 2020 23:57:36 +0000 (+0200) Subject: radv: Avoid deadlock on bo_list. X-Git-Url: https://git.libre-soc.org/?p=mesa.git;a=commitdiff_plain;h=61b714a42ee676fe03b383c0caf050169e404c7d;hp=6b75262941b55960e2f73d93f85020fa6c9c2d2f radv: Avoid deadlock on bo_list. With the kernel timeline sysncobj changes, the kernel submits do not necessarily happen in global vkQueueSubmit order. Which should be fine, we added the appropriate waits for that. (See DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT in the winsys) However, all kernel submissions take a lock on the bo_list mutex, and since we do the wait in the winsys, we wait while having the bo_list mutex held. This means that as soon as a wait and a signal submission are out of order we have a deadlock on the bo_list mutex and the wait. Solution is to use a shared reader lock during the kernel submission, as we only need read access for the submission. Fixes: 6bc5ce7a91d "radv: Add timeline syncobj for timeline semaphores." Closes: https://gitlab.freedesktop.org/mesa/mesa/-/issues/3446 Reviewed-by: Samuel Pitoiset Part-of: --- diff --git a/src/amd/vulkan/radv_device.c b/src/amd/vulkan/radv_device.c index 90167a9d603..ff62890217b 100644 --- a/src/amd/vulkan/radv_device.c +++ b/src/amd/vulkan/radv_device.c @@ -2372,7 +2372,7 @@ radv_queue_finish(struct radv_queue *queue) static void radv_bo_list_init(struct radv_bo_list *bo_list) { - pthread_mutex_init(&bo_list->mutex, NULL); + pthread_rwlock_init(&bo_list->rwlock, NULL); bo_list->list.count = bo_list->capacity = 0; bo_list->list.bos = NULL; } @@ -2381,7 +2381,7 @@ static void radv_bo_list_finish(struct radv_bo_list *bo_list) { free(bo_list->list.bos); - pthread_mutex_destroy(&bo_list->mutex); + pthread_rwlock_destroy(&bo_list->rwlock); } VkResult radv_bo_list_add(struct radv_device *device, @@ -2395,13 +2395,13 @@ VkResult radv_bo_list_add(struct radv_device *device, if (unlikely(!device->use_global_bo_list)) return VK_SUCCESS; - pthread_mutex_lock(&bo_list->mutex); + pthread_rwlock_wrlock(&bo_list->rwlock); if (bo_list->list.count == bo_list->capacity) { unsigned capacity = MAX2(4, bo_list->capacity * 2); void *data = realloc(bo_list->list.bos, capacity * sizeof(struct radeon_winsys_bo*)); if (!data) { - pthread_mutex_unlock(&bo_list->mutex); + pthread_rwlock_unlock(&bo_list->rwlock); return VK_ERROR_OUT_OF_HOST_MEMORY; } @@ -2410,7 +2410,7 @@ VkResult radv_bo_list_add(struct radv_device *device, } bo_list->list.bos[bo_list->list.count++] = bo; - pthread_mutex_unlock(&bo_list->mutex); + pthread_rwlock_unlock(&bo_list->rwlock); return VK_SUCCESS; } @@ -2425,7 +2425,7 @@ void radv_bo_list_remove(struct radv_device *device, if (unlikely(!device->use_global_bo_list)) return; - pthread_mutex_lock(&bo_list->mutex); + pthread_rwlock_wrlock(&bo_list->rwlock); /* Loop the list backwards so we find the most recently added * memory first. */ for(unsigned i = bo_list->list.count; i-- > 0;) { @@ -2435,7 +2435,7 @@ void radv_bo_list_remove(struct radv_device *device, break; } } - pthread_mutex_unlock(&bo_list->mutex); + pthread_rwlock_unlock(&bo_list->rwlock); } static void @@ -4549,7 +4549,7 @@ radv_queue_submit_deferred(struct radv_deferred_queue_submission *submission, sem_info.cs_emit_signal = j + advance == submission->cmd_buffer_count; if (unlikely(queue->device->use_global_bo_list)) { - pthread_mutex_lock(&queue->device->bo_list.mutex); + pthread_rwlock_rdlock(&queue->device->bo_list.rwlock); bo_list = &queue->device->bo_list.list; } @@ -4559,7 +4559,7 @@ radv_queue_submit_deferred(struct radv_deferred_queue_submission *submission, can_patch, base_fence); if (unlikely(queue->device->use_global_bo_list)) - pthread_mutex_unlock(&queue->device->bo_list.mutex); + pthread_rwlock_unlock(&queue->device->bo_list.rwlock); if (result != VK_SUCCESS) goto fail; diff --git a/src/amd/vulkan/radv_private.h b/src/amd/vulkan/radv_private.h index bb58fa700fe..90012cdf45a 100644 --- a/src/amd/vulkan/radv_private.h +++ b/src/amd/vulkan/radv_private.h @@ -741,7 +741,7 @@ struct radv_queue { struct radv_bo_list { struct radv_winsys_bo_list list; unsigned capacity; - pthread_mutex_t mutex; + pthread_rwlock_t rwlock; }; VkResult radv_bo_list_add(struct radv_device *device,