freedreno: proper locking for iterating dependent batches
authorRob Clark <robdclark@gmail.com>
Mon, 20 Nov 2017 14:52:04 +0000 (09:52 -0500)
committerRob Clark <robdclark@gmail.com>
Sun, 3 Dec 2017 19:17:40 +0000 (14:17 -0500)
In transfer_map(), when we need to flush batches that read from a
resource, we should be holding screen->lock to guard against race
conditions.  Somehow deferred flush seems to make this existing
race more obvious.

Signed-off-by: Rob Clark <robdclark@gmail.com>
src/gallium/drivers/freedreno/freedreno_batch.h
src/gallium/drivers/freedreno/freedreno_resource.c

index 41356e3519fbda5d63aa44f2517c490d94103786..8b05f0657aa36ccf6126102f4ccc79bab03d7671 100644 (file)
@@ -66,7 +66,7 @@ struct fd_hw_sample;
 struct fd_batch {
        struct pipe_reference reference;
        unsigned seqno;
-       unsigned idx;
+       unsigned idx;       /* index into cache->batches[] */
 
        int in_fence_fd;
        bool needs_out_fence_fd;
index 880666d3af5dd117e6b068e099f0b8bf666e021e..e305b5f9eb87e51543916dd11d5c3a7e8c1b9db8 100644 (file)
@@ -531,14 +531,26 @@ fd_resource_transfer_map(struct pipe_context *pctx,
 
                if (needs_flush) {
                        if (usage & PIPE_TRANSFER_WRITE) {
-                               struct fd_batch *batch, *last_batch = NULL;
-                               foreach_batch(batch, &ctx->screen->batch_cache, rsc->batch_mask) {
-                                       fd_batch_reference(&last_batch, batch);
+                               struct fd_batch *batch, *batches[32] = {0};
+                               uint32_t batch_mask;
+
+                               /* This is a bit awkward, probably a fd_batch_flush_locked()
+                                * would make things simpler.. but we need to hold the lock
+                                * to iterate the batches which reference this resource.  So
+                                * we must first grab references under a lock, then flush.
+                                */
+                               mtx_lock(&ctx->screen->lock);
+                               batch_mask = rsc->batch_mask;
+                               foreach_batch(batch, &ctx->screen->batch_cache, batch_mask)
+                                       fd_batch_reference(&batches[batch->idx], batch);
+                               mtx_unlock(&ctx->screen->lock);
+
+                               foreach_batch(batch, &ctx->screen->batch_cache, batch_mask)
                                        fd_batch_flush(batch, false);
-                               }
-                               if (last_batch) {
-                                       fd_batch_sync(last_batch);
-                                       fd_batch_reference(&last_batch, NULL);
+
+                               foreach_batch(batch, &ctx->screen->batch_cache, batch_mask) {
+                                       fd_batch_sync(batch);
+                                       fd_batch_reference(&batches[batch->idx], NULL);
                                }
                                assert(rsc->batch_mask == 0);
                        } else {