const struct pipe_resource *templat)
{
struct etna_screen *screen = etna_screen(pscreen);
- if (!translate_samples_to_xyscale(templat->nr_samples, NULL, NULL, NULL))
+ if (!translate_samples_to_xyscale(templat->nr_samples, NULL, NULL))
return false;
/* templat->bind is not set here, so we must use the minimum sizes */
}
int msaa_xscale = 1, msaa_yscale = 1;
- if (!translate_samples_to_xyscale(nr_samples, &msaa_xscale, &msaa_yscale, NULL)) {
+ if (!translate_samples_to_xyscale(nr_samples, &msaa_xscale, &msaa_yscale)) {
/* Number of samples not supported */
return NULL;
}
rsc->halign = halign;
pipe_reference_init(&rsc->base.reference, 1);
+ util_range_init(&rsc->valid_buffer_range);
size = setup_miptree(rsc, paddingX, paddingY, msaa_xscale, msaa_yscale);
memset(map, 0, size);
}
+ mtx_init(&rsc->lock, mtx_recursive);
rsc->pending_ctx = _mesa_set_create(NULL, _mesa_hash_pointer,
_mesa_key_pointer_equal);
if (!rsc->pending_ctx)
* and a texture-compatible base buffer in other cases
*
*/
-
if (templat->bind & (PIPE_BIND_SCANOUT | PIPE_BIND_DEPTH_STENCIL)) {
if (screen->specs.pixel_pipes > 1 && !screen->specs.single_buffer)
layout |= ETNA_LAYOUT_BIT_MULTI;
if (screen->specs.can_supertile)
layout |= ETNA_LAYOUT_BIT_SUPER;
} else if (VIV_FEATURE(screen, chipMinorFeatures2, SUPERTILED_TEXTURE) &&
- /* RS can't tile 1 byte per pixel formats, will have to CPU tile,
- * which doesn't support super-tiling
- */
- util_format_get_blocksize(templat->format) > 1) {
+ etna_resource_hw_tileable(screen->specs.use_blt, templat)) {
layout |= ETNA_LAYOUT_BIT_SUPER;
}
static void
etna_resource_destroy(struct pipe_screen *pscreen, struct pipe_resource *prsc)
{
- struct etna_screen *screen = etna_screen(pscreen);
struct etna_resource *rsc = etna_resource(prsc);
- mtx_lock(&screen->lock);
- _mesa_set_remove_key(screen->used_resources, rsc);
+ mtx_lock(&rsc->lock);
+ assert(!_mesa_set_next_entry(rsc->pending_ctx, NULL));
_mesa_set_destroy(rsc->pending_ctx, NULL);
- mtx_unlock(&screen->lock);
+ mtx_unlock(&rsc->lock);
if (rsc->bo)
etna_bo_del(rsc->bo);
if (rsc->scanout)
renderonly_scanout_destroy(rsc->scanout, etna_screen(pscreen)->ro);
+ util_range_destroy(&rsc->valid_buffer_range);
+
pipe_resource_reference(&rsc->texture, NULL);
pipe_resource_reference(&rsc->render, NULL);
for (unsigned i = 0; i < ETNA_NUM_LOD; i++)
FREE(rsc->levels[i].patch_offsets);
+ mtx_destroy(&rsc->lock);
+
FREE(rsc);
}
struct etna_resource *rsc;
struct etna_resource_level *level;
struct pipe_resource *prsc;
- struct pipe_resource *ptiled = NULL;
DBG("target=%d, format=%s, %ux%ux%u, array_size=%u, last_level=%u, "
"nr_samples=%u, usage=%u, bind=%x, flags=%x",
*prsc = *tmpl;
pipe_reference_init(&prsc->reference, 1);
+ util_range_init(&rsc->valid_buffer_range);
prsc->screen = pscreen;
rsc->bo = etna_screen_bo_from_handle(pscreen, handle, &level->stride);
goto fail;
}
+ mtx_init(&rsc->lock, mtx_recursive);
rsc->pending_ctx = _mesa_set_create(NULL, _mesa_hash_pointer,
_mesa_key_pointer_equal);
if (!rsc->pending_ctx)
fail:
etna_resource_destroy(pscreen, prsc);
- if (ptiled)
- etna_resource_destroy(pscreen, ptiled);
return NULL;
}
etna_resource_used(struct etna_context *ctx, struct pipe_resource *prsc,
enum etna_resource_status status)
{
- struct etna_screen *screen = ctx->screen;
+ struct pipe_resource *referenced = NULL;
struct etna_resource *rsc;
if (!prsc)
return;
+ mtx_lock(&ctx->lock);
+
rsc = etna_resource(prsc);
+again:
+ mtx_lock(&rsc->lock);
+
+ set_foreach(rsc->pending_ctx, entry) {
+ struct etna_context *extctx = (struct etna_context *)entry->key;
+ struct pipe_context *pctx = &extctx->base;
+ bool need_flush = false;
+
+ if (mtx_trylock(&extctx->lock) != thrd_success) {
+ /*
+ * The other context could be locked in etna_flush() and
+ * stuck waiting for the resource lock, so release the
+ * resource lock here, let etna_flush() finish, and try
+ * again.
+ */
+ mtx_unlock(&rsc->lock);
+ thrd_yield();
+ goto again;
+ }
- mtx_lock(&screen->lock);
+ set_foreach(extctx->used_resources_read, entry2) {
+ struct etna_resource *rsc2 = (struct etna_resource *)entry2->key;
+ if (ctx == extctx || rsc2 != rsc)
+ continue;
- /*
- * if we are pending read or write by any other context or
- * if reading a resource pending a write, then
- * flush all the contexts to maintain coherency
- */
- if (((status & ETNA_PENDING_WRITE) && rsc->status) ||
- ((status & ETNA_PENDING_READ) && (rsc->status & ETNA_PENDING_WRITE))) {
- set_foreach(rsc->pending_ctx, entry) {
- struct etna_context *extctx = (struct etna_context *)entry->key;
- struct pipe_context *pctx = &extctx->base;
+ if (status & ETNA_PENDING_WRITE) {
+ need_flush = true;
+ break;
+ }
+ }
- if (extctx == ctx)
+ if (need_flush) {
+ pctx->flush(pctx, NULL, 0);
+ mtx_unlock(&extctx->lock);
+ continue;
+ }
+
+ set_foreach(extctx->used_resources_write, entry2) {
+ struct etna_resource *rsc2 = (struct etna_resource *)entry2->key;
+ if (ctx == extctx || rsc2 != rsc)
continue;
- pctx->flush(pctx, NULL, 0);
- /* It's safe to clear the status here. If we need to flush it means
- * either another context had the resource in exclusive (write) use,
- * or we transition the resource to exclusive use in our context.
- * In both cases the new status accurately reflects the resource use
- * after the flush.
- */
- rsc->status = 0;
+ need_flush = true;
+ break;
}
+
+ if (need_flush)
+ pctx->flush(pctx, NULL, 0);
+
+ mtx_unlock(&extctx->lock);
}
- rsc->status |= status;
+ rsc->status = status;
- _mesa_set_add(screen->used_resources, rsc);
- _mesa_set_add(rsc->pending_ctx, ctx);
+ if (!_mesa_set_search(rsc->pending_ctx, ctx)) {
+ pipe_resource_reference(&referenced, prsc);
+ _mesa_set_add((status & ETNA_PENDING_READ) ?
+ ctx->used_resources_read : ctx->used_resources_write, rsc);
+ _mesa_set_add(rsc->pending_ctx, ctx);
+ }
- mtx_unlock(&screen->lock);
+ mtx_unlock(&rsc->lock);
+ mtx_unlock(&ctx->lock);
}
bool