From: Thomas Hellstrom Date: Thu, 15 Sep 2016 11:18:13 +0000 (+0200) Subject: winsys/svga: Resolve command submission buffer contention v3 X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=0864f9c77a16aefa2512d47c5cc4089c3b6fb30d;p=mesa.git winsys/svga: Resolve command submission buffer contention v3 If two contexts wanted to access the same buffer at the same time, it would end up on two validation lists simultaneously, which might cause a PIPE_ERROR_RETRY when trying to validate it from one context while the other context already had it validated but not yet fenced. In that situation we could spin until the error goes away, or apply various more or less expensive locking schemes to save cpu. Here we use a scheme that briefly locks after fencing but avoids locking on validation in the non-contended case. v2: Make sure we broadcast not only on releasing buffers after fencing, but also after releasing buffers in the pb_validate_validate error path. v3: Don't broadcast on PIPE_ERROR_RETRY because that would increase the chance of starvation. Signed-off-by: Thomas Hellstrom --- diff --git a/src/gallium/winsys/svga/drm/vmw_context.c b/src/gallium/winsys/svga/drm/vmw_context.c index 002994e9dc9..00c401a0174 100644 --- a/src/gallium/winsys/svga/drm/vmw_context.c +++ b/src/gallium/winsys/svga/drm/vmw_context.c @@ -179,11 +179,36 @@ vmw_swc_flush(struct svga_winsys_context *swc, struct pipe_fence_handle **pfence) { struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc); + struct vmw_winsys_screen *vws = vswc->vws; struct pipe_fence_handle *fence = NULL; unsigned i; enum pipe_error ret; + /* + * If we hit a retry, lock the mutex and retry immediately. + * If we then still hit a retry, sleep until another thread + * wakes us up after it has released its buffers from the + * validate list. + * + * If we hit another error condition, we still need to broadcast since + * pb_validate_validate releases validated buffers in its error path. + */ + ret = pb_validate_validate(vswc->validate); + if (ret != PIPE_OK) { + pipe_mutex_lock(vws->cs_mutex); + while (ret == PIPE_ERROR_RETRY) { + ret = pb_validate_validate(vswc->validate); + if (ret == PIPE_ERROR_RETRY) { + pipe_condvar_wait(vws->cs_cond, vws->cs_mutex); + } + } + if (ret != PIPE_OK) { + pipe_condvar_broadcast(vws->cs_cond); + } + pipe_mutex_unlock(vws->cs_mutex); + } + assert(ret == PIPE_OK); if(ret == PIPE_OK) { @@ -210,7 +235,7 @@ vmw_swc_flush(struct svga_winsys_context *swc, } if (vswc->command.used || pfence != NULL) - vmw_ioctl_command(vswc->vws, + vmw_ioctl_command(vws, vswc->base.cid, 0, vswc->command.buffer, @@ -218,6 +243,9 @@ vmw_swc_flush(struct svga_winsys_context *swc, &fence); pb_validate_fence(vswc->validate, fence); + pipe_mutex_lock(vws->cs_mutex); + pipe_condvar_broadcast(vws->cs_cond); + pipe_mutex_unlock(vws->cs_mutex); } vswc->command.used = 0; diff --git a/src/gallium/winsys/svga/drm/vmw_screen.c b/src/gallium/winsys/svga/drm/vmw_screen.c index d0bfcd728bf..6041598cac1 100644 --- a/src/gallium/winsys/svga/drm/vmw_screen.c +++ b/src/gallium/winsys/svga/drm/vmw_screen.c @@ -109,6 +109,9 @@ vmw_winsys_create( int fd ) if (util_hash_table_set(dev_hash, &vws->device, vws) != PIPE_OK) goto out_no_hash_insert; + pipe_condvar_init(vws->cs_cond); + pipe_mutex_init(vws->cs_mutex); + return vws; out_no_hash_insert: out_no_svga: @@ -133,6 +136,8 @@ vmw_winsys_destroy(struct vmw_winsys_screen *vws) vws->fence_ops->destroy(vws->fence_ops); vmw_ioctl_cleanup(vws); close(vws->ioctl.drm_fd); + pipe_mutex_destroy(vws->cs_mutex); + pipe_condvar_destroy(vws->cs_cond); FREE(vws); } } diff --git a/src/gallium/winsys/svga/drm/vmw_screen.h b/src/gallium/winsys/svga/drm/vmw_screen.h index 79d0949e96a..c1cc7c32535 100644 --- a/src/gallium/winsys/svga/drm/vmw_screen.h +++ b/src/gallium/winsys/svga/drm/vmw_screen.h @@ -40,7 +40,7 @@ #include "svga_winsys.h" #include "pipebuffer/pb_buffer_fenced.h" - +#include #define VMW_GMR_POOL_SIZE (16*1024*1024) #define VMW_QUERY_POOL_SIZE (8192) @@ -99,6 +99,9 @@ struct vmw_winsys_screen */ dev_t device; int open_count; + + pipe_condvar cs_cond; + pipe_mutex cs_mutex; };