fd6_emit_restore(batch, batch->draw);
fd6_emit_lrz_flush(batch->draw);
- mtx_lock(&ctx->screen->lock);
+ fd_screen_lock(ctx->screen);
fd_batch_resource_used(batch, fd_resource(info->src.resource), false);
fd_batch_resource_used(batch, fd_resource(info->dst.resource), true);
- mtx_unlock(&ctx->screen->lock);
+ fd_screen_unlock(ctx->screen);
fd_batch_set_stage(batch, FD_STAGE_BLIT);
static void
batch_reset_resources_locked(struct fd_batch *batch)
{
- pipe_mutex_assert_locked(batch->ctx->screen->lock);
+ fd_screen_assert_locked(batch->ctx->screen);
set_foreach(batch->resources, entry) {
struct fd_resource *rsc = (struct fd_resource *)entry->key;
static void
batch_reset_resources(struct fd_batch *batch)
{
- mtx_lock(&batch->ctx->screen->lock);
+ fd_screen_lock(batch->ctx->screen);
batch_reset_resources_locked(batch);
- mtx_unlock(&batch->ctx->screen->lock);
+ fd_screen_unlock(batch->ctx->screen);
}
static void
debug_assert(batch->reference.count > 0);
- mtx_lock(&batch->ctx->screen->lock);
+ fd_screen_lock(batch->ctx->screen);
fd_bc_invalidate_batch(batch, false);
- mtx_unlock(&batch->ctx->screen->lock);
+ fd_screen_unlock(batch->ctx->screen);
}
/* NOTE: could drop the last ref to batch
void
fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
{
- pipe_mutex_assert_locked(batch->ctx->screen->lock);
+ fd_screen_assert_locked(batch->ctx->screen);
if (batch->dependents_mask & (1 << dep->idx))
return;
struct fd_batch *b = NULL;
fd_batch_reference_locked(&b, rsc->write_batch);
- mtx_unlock(&b->ctx->screen->lock);
+ fd_screen_unlock(b->ctx->screen);
fd_batch_flush(b);
- mtx_lock(&b->ctx->screen->lock);
+ fd_screen_lock(b->ctx->screen);
fd_bc_invalidate_batch(b, false);
fd_batch_reference_locked(&b, NULL);
void
fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool write)
{
- pipe_mutex_assert_locked(batch->ctx->screen->lock);
+ fd_screen_assert_locked(batch->ctx->screen);
if (rsc->stencil)
fd_batch_resource_used(batch, rsc->stencil, write);
struct fd_batch_cache *cache = &ctx->screen->batch_cache;
struct fd_batch *batch;
- mtx_lock(&ctx->screen->lock);
+ fd_screen_lock(ctx->screen);
foreach_batch(batch, cache, cache->batch_mask) {
if (batch->ctx == ctx)
fd_bc_invalidate_batch(batch, true);
}
- mtx_unlock(&ctx->screen->lock);
+ fd_screen_unlock(ctx->screen);
}
/**
struct fd_screen *screen = fd_screen(rsc->base.screen);
struct fd_batch *batch;
- mtx_lock(&screen->lock);
+ fd_screen_lock(screen);
if (destroy) {
foreach_batch(batch, &screen->batch_cache, rsc->batch_mask) {
rsc->bc_batch_mask = 0;
- mtx_unlock(&screen->lock);
+ fd_screen_unlock(screen);
}
struct fd_batch *
struct fd_batch *batch;
uint32_t idx;
- mtx_lock(&ctx->screen->lock);
+ fd_screen_lock(ctx->screen);
while ((idx = ffs(~cache->batch_mask)) == 0) {
#if 0
/* we can drop lock temporarily here, since we hold a ref,
* flush_batch won't disappear under us.
*/
- mtx_unlock(&ctx->screen->lock);
+ fd_screen_unlock(ctx->screen);
DBG("%p: too many batches! flush forced!", flush_batch);
fd_batch_flush(flush_batch);
- mtx_lock(&ctx->screen->lock);
+ fd_screen_lock(ctx->screen);
/* While the resources get cleaned up automatically, the flush_batch
* doesn't get removed from the dependencies of other batches, so
cache->batches[idx] = batch;
out:
- mtx_unlock(&ctx->screen->lock);
+ fd_screen_unlock(ctx->screen);
return batch;
}
if (!batch)
return NULL;
- mtx_lock(&ctx->screen->lock);
+ fd_screen_lock(ctx->screen);
_mesa_hash_table_insert_pre_hashed(cache->ht, hash, key, batch);
batch->key = key;
rsc->bc_batch_mask = (1 << batch->idx);
}
- mtx_unlock(&ctx->screen->lock);
+ fd_screen_unlock(ctx->screen);
return batch;
}
DBG("");
- mtx_lock(&ctx->screen->lock);
+ fd_screen_lock(ctx->screen);
list_del(&ctx->node);
- mtx_unlock(&ctx->screen->lock);
+ fd_screen_unlock(ctx->screen);
fd_log_process(ctx, true);
assert(list_is_empty(&ctx->log_chunks));
list_inithead(&ctx->acc_active_queries);
list_inithead(&ctx->log_chunks);
- mtx_lock(&ctx->screen->lock);
+ fd_screen_lock(ctx->screen);
list_add(&ctx->node, &ctx->screen->context_list);
- mtx_unlock(&ctx->screen->lock);
+ fd_screen_unlock(ctx->screen);
ctx->log_out = stdout;
static inline void
fd_context_assert_locked(struct fd_context *ctx)
{
- pipe_mutex_assert_locked(ctx->screen->lock);
+ fd_screen_assert_locked(ctx->screen);
}
static inline void
fd_context_lock(struct fd_context *ctx)
{
- mtx_lock(&ctx->screen->lock);
+ fd_screen_lock(ctx->screen);
}
static inline void
fd_context_unlock(struct fd_context *ctx)
{
- mtx_unlock(&ctx->screen->lock);
+ fd_screen_unlock(ctx->screen);
}
/* mark all state dirty: */
* Figure out the buffers/features we need:
*/
- mtx_lock(&ctx->screen->lock);
+ fd_screen_lock(ctx->screen);
if (ctx->dirty & (FD_DIRTY_FRAMEBUFFER | FD_DIRTY_ZSA)) {
if (fd_depth_enabled(ctx)) {
list_for_each_entry(struct fd_acc_query, aq, &ctx->acc_active_queries, node)
resource_written(batch, aq->prsc);
- mtx_unlock(&ctx->screen->lock);
+ fd_screen_unlock(ctx->screen);
batch->num_draws++;
batch->resolve |= buffers;
batch->needs_flush = true;
- mtx_lock(&ctx->screen->lock);
+ fd_screen_lock(ctx->screen);
if (buffers & PIPE_CLEAR_COLOR)
for (i = 0; i < pfb->nr_cbufs; i++)
list_for_each_entry(struct fd_acc_query, aq, &ctx->acc_active_queries, node)
resource_written(batch, aq->prsc);
- mtx_unlock(&ctx->screen->lock);
+ fd_screen_unlock(ctx->screen);
DBG("%p: %x %ux%u depth=%f, stencil=%u (%s/%s)", batch, buffers,
pfb->width, pfb->height, depth, stencil,
fd_batch_reference(&ctx->batch, batch);
fd_context_all_dirty(ctx);
- mtx_lock(&ctx->screen->lock);
+ fd_screen_lock(ctx->screen);
/* Mark SSBOs */
foreach_bit (i, so->enabled_mask & so->writable_mask)
if (info->indirect)
resource_read(batch, info->indirect);
- mtx_unlock(&ctx->screen->lock);
+ fd_screen_unlock(ctx->screen);
batch->needs_flush = true;
ctx->launch_grid(ctx, info);
{
struct fd_gmem_cache *cache = &gmem->screen->gmem_cache;
- pipe_mutex_assert_locked(gmem->screen->lock);
+ fd_screen_assert_locked(gmem->screen);
_mesa_hash_table_remove_key(cache->ht, gmem->key);
list_del(&gmem->node);
struct gmem_key *key = gmem_key_init(batch, assume_zs);
uint32_t hash = gmem_key_hash(key);
- mtx_lock(&screen->lock);
+ fd_screen_lock(screen);
struct hash_entry *entry =
_mesa_hash_table_search_pre_hashed(cache->ht, hash, key);
list_delinit(&gmem->node);
list_add(&gmem->node, &cache->lru);
- mtx_unlock(&screen->lock);
+ fd_screen_unlock(screen);
return gmem;
}
render_tiles(batch, gmem);
batch->gmem_state = NULL;
- mtx_lock(&ctx->screen->lock);
+ fd_screen_lock(ctx->screen);
fd_gmem_reference(&gmem, NULL);
- mtx_unlock(&ctx->screen->lock);
+ fd_screen_unlock(ctx->screen);
ctx->stats.batch_gmem++;
}
struct fd_gmem_stateobj *gmem = lookup_gmem_state(batch, !!pfb->zsbuf);
unsigned nbins = gmem->maxpw * gmem->maxph;
- mtx_lock(&screen->lock);
+ fd_screen_lock(screen);
fd_gmem_reference(&gmem, NULL);
- mtx_unlock(&screen->lock);
+ fd_screen_unlock(screen);
return nbins;
}
aq->batch = batch;
p->resume(aq, aq->batch);
- mtx_lock(&batch->ctx->screen->lock);
+ fd_screen_lock(batch->ctx->screen);
fd_batch_resource_used(batch, fd_resource(aq->prsc), true);
- mtx_unlock(&batch->ctx->screen->lock);
+ fd_screen_unlock(batch->ctx->screen);
}
static void
{
struct fd_screen *screen = fd_screen(rsc->base.screen);
- mtx_lock(&screen->lock);
+ fd_screen_lock(screen);
fd_resource_lock(rsc);
if (rsc->dirty)
rebind_resource_in_ctx(ctx, rsc);
fd_resource_unlock(rsc);
- mtx_unlock(&screen->lock);
+ fd_screen_unlock(screen);
}
static void
fd_bc_invalidate_resource(rsc, false);
rebind_resource(rsc);
- mtx_lock(&ctx->screen->lock);
+ fd_screen_lock(ctx->screen);
/* Swap the backing bo's, so shadow becomes the old buffer,
* blit from shadow to new buffer. From here on out, we
}
swap(rsc->batch_mask, shadow->batch_mask);
- mtx_unlock(&ctx->screen->lock);
+ fd_screen_unlock(ctx->screen);
struct pipe_blit_info blit = {};
blit.dst.resource = prsc;
{
struct fd_batch *write_batch = NULL;
- mtx_lock(&ctx->screen->lock);
+ fd_screen_lock(ctx->screen);
fd_batch_reference_locked(&write_batch, rsc->write_batch);
- mtx_unlock(&ctx->screen->lock);
+ fd_screen_unlock(ctx->screen);
if (usage & PIPE_TRANSFER_WRITE) {
struct fd_batch *batch, *batches[32] = {};
* to iterate the batches which reference this resource. So
* we must first grab references under a lock, then flush.
*/
- mtx_lock(&ctx->screen->lock);
+ fd_screen_lock(ctx->screen);
batch_mask = rsc->batch_mask;
foreach_batch(batch, &ctx->screen->batch_cache, batch_mask)
fd_batch_reference_locked(&batches[batch->idx], batch);
- mtx_unlock(&ctx->screen->lock);
+ fd_screen_unlock(ctx->screen);
foreach_batch(batch, &ctx->screen->batch_cache, batch_mask)
fd_batch_flush(batch);
return (struct fd_screen *)pscreen;
}
+static inline void
+fd_screen_lock(struct fd_screen *screen)
+{
+ mtx_lock(&screen->lock);
+}
+
+static inline void
+fd_screen_unlock(struct fd_screen *screen)
+{
+ mtx_unlock(&screen->lock);
+}
+
+static inline void
+fd_screen_assert_locked(struct fd_screen *screen)
+{
+ pipe_mutex_assert_locked(screen->lock);
+}
+
bool fd_screen_bo_get_handle(struct pipe_screen *pscreen,
struct fd_bo *bo,
struct renderonly_scanout *scanout,