}
static void
-batch_reset_resources(struct fd_batch *batch)
+batch_reset_resources_locked(struct fd_batch *batch)
{
struct set_entry *entry;
+ pipe_mutex_assert_locked(batch->ctx->screen->lock);
+
set_foreach(batch->resources, entry) {
struct fd_resource *rsc = (struct fd_resource *)entry->key;
_mesa_set_remove(batch->resources, entry);
debug_assert(rsc->batch_mask & (1 << batch->idx));
rsc->batch_mask &= ~(1 << batch->idx);
if (rsc->write_batch == batch)
- fd_batch_reference(&rsc->write_batch, NULL);
+ fd_batch_reference_locked(&rsc->write_batch, NULL);
}
}
+static void
+batch_reset_resources(struct fd_batch *batch)
+{
+ pipe_mutex_lock(batch->ctx->screen->lock);
+ batch_reset_resources_locked(batch);
+ pipe_mutex_unlock(batch->ctx->screen->lock);
+}
+
static void
batch_reset(struct fd_batch *batch)
{
void
__fd_batch_destroy(struct fd_batch *batch)
{
- fd_bc_invalidate_batch(batch, true);
-
DBG("%p", batch);
util_copy_framebuffer_state(&batch->framebuffer, NULL);
+ pipe_mutex_lock(batch->ctx->screen->lock);
+ fd_bc_invalidate_batch(batch, true);
+ pipe_mutex_unlock(batch->ctx->screen->lock);
+
batch_fini(batch);
batch_reset_resources(batch);
if (batch == batch->ctx->batch) {
batch_reset(batch);
} else {
+ pipe_mutex_lock(batch->ctx->screen->lock);
fd_bc_invalidate_batch(batch, false);
+ pipe_mutex_unlock(batch->ctx->screen->lock);
}
}
*/
if (batch_depends_on(dep, batch)) {
DBG("%p: flush forced on %p!", batch, dep);
+ pipe_mutex_unlock(batch->ctx->screen->lock);
fd_batch_flush(dep, false);
+ pipe_mutex_lock(batch->ctx->screen->lock);
} else {
struct fd_batch *other = NULL;
- fd_batch_reference(&other, dep);
+ fd_batch_reference_locked(&other, dep);
batch->dependents_mask |= (1 << dep->idx);
DBG("%p: added dependency on %p", batch, dep);
}
void
fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool write)
{
+ pipe_mutex_assert_locked(batch->ctx->screen->lock);
+
if (rsc->stencil)
fd_batch_resource_used(batch, rsc->stencil, write);
fd_batch_reference_locked(&b, NULL);
}
}
- fd_batch_reference(&rsc->write_batch, batch);
+ fd_batch_reference_locked(&rsc->write_batch, batch);
} else {
if (rsc->write_batch) {
batch_add_dep(batch, rsc->write_batch);
void __fd_batch_describe(char* buf, const struct fd_batch *batch);
void __fd_batch_destroy(struct fd_batch *batch);
+/*
+ * NOTE the rule is, you need to hold the screen->lock when destroying
+ * a batch.. so either use fd_batch_reference() (which grabs the lock
+ * for you) if you don't hold the lock, or fd_batch_reference_locked()
+ * if you do hold the lock.
+ *
+ * WARNING the _locked() version can briefly drop the lock. Without
+ * recursive mutexes, I'm not sure there is much else we can do (since
+ * __fd_batch_destroy() needs to unref resources)
+ */
+
static inline void
fd_batch_reference(struct fd_batch **ptr, struct fd_batch *batch)
{
*ptr = batch;
}
+/* fwd-decl prototypes to untangle header dependency :-/ */
+static inline void fd_context_assert_locked(struct fd_context *ctx);
+static inline void fd_context_lock(struct fd_context *ctx);
+static inline void fd_context_unlock(struct fd_context *ctx);
+
+static inline void
+fd_batch_reference_locked(struct fd_batch **ptr, struct fd_batch *batch)
+{
+ struct fd_batch *old_batch = *ptr;
+
+ if (old_batch)
+ fd_context_assert_locked(old_batch->ctx);
+ else if (batch)
+ fd_context_assert_locked(batch->ctx);
+
+ if (pipe_reference_described(&(*ptr)->reference, &batch->reference,
+ (debug_reference_descriptor)__fd_batch_describe)) {
+ struct fd_context *ctx = old_batch->ctx;
+ fd_context_unlock(ctx);
+ __fd_batch_destroy(old_batch);
+ fd_context_lock(ctx);
+ }
+ *ptr = batch;
+}
+
+#include "freedreno_context.h"
+
static inline void
fd_reset_wfi(struct fd_batch *batch)
{
struct hash_entry *entry;
struct fd_batch *last_batch = NULL;
+ pipe_mutex_lock(ctx->screen->lock);
+
hash_table_foreach(cache->ht, entry) {
struct fd_batch *batch = NULL;
- fd_batch_reference(&batch, (struct fd_batch *)entry->data);
+ fd_batch_reference_locked(&batch, (struct fd_batch *)entry->data);
if (batch->ctx == ctx) {
+ pipe_mutex_unlock(ctx->screen->lock);
fd_batch_reference(&last_batch, batch);
fd_batch_flush(batch, false);
+ pipe_mutex_lock(ctx->screen->lock);
}
- fd_batch_reference(&batch, NULL);
+ fd_batch_reference_locked(&batch, NULL);
}
+ pipe_mutex_unlock(ctx->screen->lock);
+
if (last_batch) {
fd_batch_sync(last_batch);
fd_batch_reference(&last_batch, NULL);
struct fd_batch_cache *cache = &ctx->screen->batch_cache;
struct fd_batch *batch;
+ pipe_mutex_lock(ctx->screen->lock);
+
foreach_batch(batch, cache, cache->batch_mask) {
- if (batch->ctx == ctx) {
- fd_batch_reset(batch);
- fd_batch_reference(&batch, NULL);
- }
+ if (batch->ctx == ctx)
+ fd_batch_reference_locked(&batch, NULL);
}
+
+ pipe_mutex_unlock(ctx->screen->lock);
}
void
fd_bc_invalidate_batch(struct fd_batch *batch, bool destroy)
{
+ if (!batch)
+ return;
+
struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
struct key *key = (struct key *)batch->key;
+ pipe_mutex_assert_locked(batch->ctx->screen->lock);
+
if (destroy) {
cache->batches[batch->idx] = NULL;
cache->batch_mask &= ~(1 << batch->idx);
fd_bc_invalidate_resource(struct fd_resource *rsc, bool destroy)
{
struct fd_screen *screen = fd_screen(rsc->base.b.screen);
- struct fd_batch *batch;
+ struct fd_batch *batch;
+
+ pipe_mutex_lock(screen->lock);
if (destroy) {
foreach_batch(batch, &screen->batch_cache, rsc->batch_mask) {
}
rsc->batch_mask = 0;
- fd_batch_reference(&rsc->write_batch, NULL);
+ fd_batch_reference_locked(&rsc->write_batch, NULL);
}
foreach_batch(batch, &screen->batch_cache, rsc->bc_batch_mask)
fd_bc_invalidate_batch(batch, false);
rsc->bc_batch_mask = 0;
+
+ pipe_mutex_unlock(screen->lock);
}
struct fd_batch *
struct fd_batch *batch;
uint32_t idx;
+ pipe_mutex_lock(ctx->screen->lock);
+
while ((idx = ffs(~cache->batch_mask)) == 0) {
#if 0
for (unsigned i = 0; i < ARRAY_SIZE(cache->batches); i++) {
!cache->batches[i]->needs_flush)
continue;
if (!flush_batch || (cache->batches[i]->seqno < flush_batch->seqno))
- fd_batch_reference(&flush_batch, cache->batches[i]);
+ fd_batch_reference_locked(&flush_batch, cache->batches[i]);
}
+
+ /* we can drop lock temporarily here, since we hold a ref,
+ * flush_batch won't disappear under us.
+ */
+ pipe_mutex_unlock(ctx->screen->lock);
DBG("%p: too many batches! flush forced!", flush_batch);
fd_batch_flush(flush_batch, true);
+ pipe_mutex_lock(ctx->screen->lock);
/* While the resources get cleaned up automatically, the flush_batch
* doesn't get removed from the dependencies of other batches, so
if (other->dependents_mask & (1 << flush_batch->idx)) {
other->dependents_mask &= ~(1 << flush_batch->idx);
struct fd_batch *ref = flush_batch;
- fd_batch_reference(&ref, NULL);
+ fd_batch_reference_locked(&ref, NULL);
}
}
- fd_batch_reference(&flush_batch, NULL);
+ fd_batch_reference_locked(&flush_batch, NULL);
}
idx--; /* bit zero returns 1 for ffs() */
batch = fd_batch_create(ctx);
if (!batch)
- return NULL;
+ goto out;
batch->seqno = cache->cnt++;
batch->idx = idx;
debug_assert(cache->batches[idx] == NULL);
cache->batches[idx] = batch;
+out:
+ pipe_mutex_unlock(ctx->screen->lock);
+
return batch;
}
if (!batch)
return NULL;
+ pipe_mutex_lock(ctx->screen->lock);
+
_mesa_hash_table_insert_pre_hashed(cache->ht, hash, key, batch);
batch->key = key;
batch->hash = hash;
rsc->bc_batch_mask = (1 << batch->idx);
}
+ pipe_mutex_unlock(ctx->screen->lock);
+
return batch;
}
#include "pipe/p_state.h"
-#include "freedreno_batch.h"
+struct fd_resource;
+struct fd_batch;
+struct fd_context;
struct hash_table;
return (struct fd_context *)pctx;
}
+static inline void
+fd_context_assert_locked(struct fd_context *ctx)
+{
+ pipe_mutex_assert_locked(ctx->screen->lock);
+}
+
+static inline void
+fd_context_lock(struct fd_context *ctx)
+{
+ pipe_mutex_lock(ctx->screen->lock);
+}
+
+static inline void
+fd_context_unlock(struct fd_context *ctx)
+{
+ pipe_mutex_unlock(ctx->screen->lock);
+}
+
static inline struct pipe_scissor_state *
fd_context_get_scissor(struct fd_context *ctx)
{
* Figure out the buffers/features we need:
*/
+ pipe_mutex_lock(ctx->screen->lock);
+
if (fd_depth_enabled(ctx)) {
buffers |= FD_BUFFER_DEPTH;
resource_written(batch, pfb->zsbuf->texture);
resource_written(batch, batch->query_buf);
+ pipe_mutex_unlock(ctx->screen->lock);
+
batch->num_draws++;
prims = u_reduced_prims_for_vertices(info->mode, info->count);
batch->resolve |= buffers;
batch->needs_flush = true;
+ pipe_mutex_lock(ctx->screen->lock);
+
if (buffers & PIPE_CLEAR_COLOR)
for (i = 0; i < pfb->nr_cbufs; i++)
if (buffers & (PIPE_CLEAR_COLOR0 << i))
resource_written(batch, batch->query_buf);
+ pipe_mutex_unlock(ctx->screen->lock);
+
DBG("%p: %x %ux%u depth=%f, stencil=%u (%s/%s)", batch, buffers,
pfb->width, pfb->height, depth, stencil,
util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
*/
fd_bc_invalidate_resource(rsc, false);
+ pipe_mutex_lock(ctx->screen->lock);
+
/* Swap the backing bo's, so shadow becomes the old buffer,
* blit from shadow to new buffer. From here on out, we
* cannot fail.
}
swap(rsc->batch_mask, shadow->batch_mask);
+ pipe_mutex_unlock(ctx->screen->lock);
+
struct pipe_blit_info blit = {0};
blit.dst.resource = prsc;
blit.dst.format = prsc->format;
* to wait.
*/
} else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
- if ((usage & PIPE_TRANSFER_WRITE) && rsc->write_batch &&
- rsc->write_batch->back_blit) {
+ struct fd_batch *write_batch = NULL;
+
+ /* hold a reference, so it doesn't disappear under us: */
+ fd_batch_reference(&write_batch, rsc->write_batch);
+
+ if ((usage & PIPE_TRANSFER_WRITE) && write_batch &&
+ write_batch->back_blit) {
/* if only thing pending is a back-blit, we can discard it: */
- fd_batch_reset(rsc->write_batch);
+ fd_batch_reset(write_batch);
}
/* If the GPU is writing to the resource, or if it is reading from the
}
assert(rsc->batch_mask == 0);
} else {
- fd_batch_flush(rsc->write_batch, true);
+ fd_batch_flush(write_batch, true);
}
assert(!rsc->write_batch);
}
+ fd_batch_reference(&write_batch, NULL);
+
/* The GPU keeps track of how the various bo's are being used, and
* will wait if necessary for the proper operation to have
* completed.
fd_bc_fini(&screen->batch_cache);
+ pipe_mutex_destroy(screen->lock);
+
free(screen);
}
fd_bc_init(&screen->batch_cache);
+ pipe_mutex_init(screen->lock);
+
pscreen->destroy = fd_screen_destroy;
pscreen->get_param = fd_screen_get_param;
pscreen->get_paramf = fd_screen_get_paramf;
#include "pipe/p_screen.h"
#include "util/u_memory.h"
+#include "os/os_thread.h"
#include "freedreno_batch_cache.h"
struct fd_screen {
struct pipe_screen base;
+ pipe_mutex lock;
+
/* it would be tempting to use pipe_reference here, but that
* really doesn't work well if it isn't the first member of
* the struct, so not quite so awesome to be adding refcnting