fd_bo_del(rsc->bo);
rsc->bo = fd_bo_new(screen->dev, size, flags, "%ux%ux%u@%u:%x",
- prsc->width0, prsc->height0, prsc->depth0, rsc->cpp, prsc->bind);
+ prsc->width0, prsc->height0, prsc->depth0, rsc->layout.cpp, prsc->bind);
rsc->seqno = p_atomic_inc_return(&screen->rsc_seqno);
util_range_set_empty(&rsc->valid_buffer_range);
fd_bc_invalidate_resource(rsc, true);
/* TODO valid_buffer_range?? */
swap(rsc->bo, shadow->bo);
swap(rsc->write_batch, shadow->write_batch);
- swap(rsc->offset, shadow->offset);
- swap(rsc->ubwc_offset, shadow->ubwc_offset);
- swap(rsc->ubwc_pitch, shadow->ubwc_pitch);
- swap(rsc->ubwc_size, shadow->ubwc_size);
+ for (int level = 0; level <= prsc->last_level; level++) {
+ swap(rsc->layout.slices[level], shadow->layout.slices[level]);
+ swap(rsc->layout.ubwc_slices[level], shadow->layout.ubwc_slices[level]);
+ }
+ swap(rsc->layout.ubwc_layer_size, shadow->layout.ubwc_layer_size);
rsc->seqno = p_atomic_inc_return(&ctx->screen->rsc_seqno);
/* at this point, the newly created shadow buffer is not referenced
mtx_unlock(&ctx->screen->lock);
foreach_batch(batch, &ctx->screen->batch_cache, batch_mask)
- fd_batch_flush(batch, false);
+ fd_batch_flush(batch);
foreach_batch(batch, &ctx->screen->batch_cache, batch_mask) {
- fd_batch_sync(batch);
fd_batch_reference(&batches[batch->idx], NULL);
}
assert(rsc->batch_mask == 0);
} else if (write_batch) {
- fd_batch_flush(write_batch, true);
+ fd_batch_flush(write_batch);
}
fd_batch_reference(&write_batch, NULL);
ptrans->level = level;
ptrans->usage = usage;
ptrans->box = *box;
- ptrans->stride = util_format_get_nblocksx(format, slice->pitch) * rsc->cpp;
+ ptrans->stride = util_format_get_nblocksx(format, slice->pitch) * rsc->layout.cpp;
ptrans->layer_stride = fd_resource_layer_stride(rsc, level);
/* we always need a staging texture for tiled buffers:
* splitting a batch.. for ex, mid-frame texture uploads to a tiled
* texture.
*/
- if (rsc->tile_mode) {
+ if (rsc->layout.tile_mode) {
struct fd_resource *staging_rsc;
staging_rsc = fd_alloc_staging(ctx, rsc, level, box);
// TODO for PIPE_TRANSFER_READ, need to do untiling blit..
trans->staging_prsc = &staging_rsc->base;
trans->base.stride = util_format_get_nblocksx(format,
- staging_slice->pitch) * staging_rsc->cpp;
+ staging_slice->pitch) * staging_rsc->layout.cpp;
trans->base.layer_stride = fd_resource_layer_stride(staging_rsc, 0);
trans->staging_box = *box;
trans->staging_box.x = 0;
if (usage & PIPE_TRANSFER_READ) {
fd_blit_to_staging(ctx, trans);
- struct fd_batch *batch = NULL;
-
- fd_context_lock(ctx);
- fd_batch_reference_locked(&batch, staging_rsc->write_batch);
- fd_context_unlock(ctx);
-
- /* we can't fd_bo_cpu_prep() until the blit to staging
- * is submitted to kernel.. in that case write_batch
- * wouldn't be NULL yet:
- */
- if (batch) {
- fd_batch_sync(batch);
- fd_batch_reference(&batch, NULL);
- }
-
fd_bo_cpu_prep(staging_rsc->bo, ctx->pipe,
DRM_FREEDRENO_PREP_READ);
}
fd_resource_slice(staging_rsc, 0);
trans->staging_prsc = &staging_rsc->base;
trans->base.stride = util_format_get_nblocksx(format,
- staging_slice->pitch) * staging_rsc->cpp;
+ staging_slice->pitch) * staging_rsc->layout.cpp;
trans->base.layer_stride =
fd_resource_layer_stride(staging_rsc, 0);
trans->staging_box = *box;
buf = fd_bo_map(rsc->bo);
offset =
box->y / util_format_get_blockheight(format) * ptrans->stride +
- box->x / util_format_get_blockwidth(format) * rsc->cpp +
+ box->x / util_format_get_blockwidth(format) * rsc->layout.cpp +
fd_resource_offset(rsc, level, box->z);
if (usage & PIPE_TRANSFER_WRITE)
static uint64_t
fd_resource_modifier(struct fd_resource *rsc)
{
- if (!rsc->tile_mode)
+ if (!rsc->layout.tile_mode)
return DRM_FORMAT_MOD_LINEAR;
- if (rsc->ubwc_size)
+ if (rsc->layout.ubwc_layer_size)
return DRM_FORMAT_MOD_QCOM_COMPRESSED;
/* TODO invent a modifier for tiled but not UBWC buffers: */
handle->modifier = fd_resource_modifier(rsc);
return fd_screen_bo_get_handle(pscreen, rsc->bo, rsc->scanout,
- fd_resource_slice(rsc, 0)->pitch * rsc->cpp, handle);
+ fd_resource_slice(rsc, 0)->pitch * rsc->layout.cpp, handle);
}
static uint32_t
/* in layer_first layout, the level (slice) contains just one
* layer (since in fact the layer contains the slices)
*/
- uint32_t layers_in_level = rsc->layer_first ? 1 : prsc->array_size;
+ uint32_t layers_in_level = rsc->layout.layer_first ? 1 : prsc->array_size;
for (level = 0; level <= prsc->last_level; level++) {
struct fdl_slice *slice = fd_resource_slice(rsc, level);
if (prsc->target == PIPE_TEXTURE_3D && (
level == 1 ||
(level > 1 && fd_resource_slice(rsc, level - 1)->size0 > 0xf000)))
- slice->size0 = align(blocks * rsc->cpp, alignment);
- else if (level == 0 || rsc->layer_first || alignment == 1)
- slice->size0 = align(blocks * rsc->cpp, alignment);
+ slice->size0 = align(blocks * rsc->layout.cpp, alignment);
+ else if (level == 0 || rsc->layout.layer_first || alignment == 1)
+ slice->size0 = align(blocks * rsc->layout.cpp, alignment);
else
slice->size0 = fd_resource_slice(rsc, level - 1)->size0;
if (is_a4xx(screen)) {
switch (rsc->base.target) {
case PIPE_TEXTURE_3D:
- rsc->layer_first = false;
+ rsc->layout.layer_first = false;
break;
default:
- rsc->layer_first = true;
+ rsc->layout.layer_first = true;
alignment = 1;
break;
}
realloc_bo(rsc, fd_screen(prsc->screen)->setup_slices(rsc));
}
-// TODO common helper?
-static bool
-has_depth(enum pipe_format format)
+static void
+fd_resource_layout_init(struct pipe_resource *prsc)
{
- switch (format) {
- case PIPE_FORMAT_Z16_UNORM:
- case PIPE_FORMAT_Z32_UNORM:
- case PIPE_FORMAT_Z32_FLOAT:
- case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
- case PIPE_FORMAT_Z24_UNORM_S8_UINT:
- case PIPE_FORMAT_S8_UINT_Z24_UNORM:
- case PIPE_FORMAT_Z24X8_UNORM:
- case PIPE_FORMAT_X8Z24_UNORM:
- return true;
- default:
- return false;
- }
+ struct fd_resource *rsc = fd_resource(prsc);
+ struct fdl_layout *layout = &rsc->layout;
+
+ layout->width0 = prsc->width0;
+ layout->height0 = prsc->height0;
+ layout->depth0 = prsc->depth0;
+
+ layout->cpp = util_format_get_blocksize(prsc->format);
+ layout->cpp *= fd_resource_nr_samples(prsc);
}
/**
return NULL;
*prsc = *tmpl;
+ fd_resource_layout_init(prsc);
#define LINEAR \
(PIPE_BIND_SCANOUT | \
if (tmpl->bind & LINEAR)
linear = true;
+ if (fd_mesa_debug & FD_DBG_NOTILE)
+ linear = true;
+
/* Normally, for non-shared buffers, allow buffer compression if
* not shared, otherwise only allow if QCOM_COMPRESSED modifier
* is requested:
if (screen->tile_mode &&
(tmpl->target != PIPE_BUFFER) &&
!linear) {
- rsc->tile_mode = screen->tile_mode(prsc);
+ rsc->layout.tile_mode = screen->tile_mode(prsc);
}
util_range_init(&rsc->valid_buffer_range);
rsc->internal_format = format;
- rsc->cpp = util_format_get_blocksize(format);
- rsc->cpp *= fd_resource_nr_samples(prsc);
-
- assert(rsc->cpp);
-
- // XXX probably need some extra work if we hit rsc shadowing path w/ lrz..
- if ((is_a5xx(screen) || is_a6xx(screen)) &&
- (fd_mesa_debug & FD_DBG_LRZ) && has_depth(format)) {
- const uint32_t flags = DRM_FREEDRENO_GEM_CACHE_WCOMBINE |
- DRM_FREEDRENO_GEM_TYPE_KMEM; /* TODO */
- unsigned lrz_pitch = align(DIV_ROUND_UP(tmpl->width0, 8), 64);
- unsigned lrz_height = DIV_ROUND_UP(tmpl->height0, 8);
-
- /* LRZ buffer is super-sampled: */
- switch (prsc->nr_samples) {
- case 4:
- lrz_pitch *= 2;
- case 2:
- lrz_height *= 2;
- }
- unsigned size = lrz_pitch * lrz_height * 2;
-
- size += 0x1000; /* for GRAS_LRZ_FAST_CLEAR_BUFFER */
-
- rsc->lrz_height = lrz_height;
- rsc->lrz_width = lrz_pitch;
- rsc->lrz_pitch = lrz_pitch;
- rsc->lrz = fd_bo_new(screen->dev, size, flags, "lrz");
+ if (prsc->target == PIPE_BUFFER) {
+ assert(prsc->format == PIPE_FORMAT_R8_UNORM);
+ size = prsc->width0;
+ fdl_layout_buffer(&rsc->layout, size);
+ } else {
+ size = screen->setup_slices(rsc);
}
- size = screen->setup_slices(rsc);
-
- if (allow_ubwc && screen->fill_ubwc_buffer_sizes && rsc->tile_mode)
+ if (allow_ubwc && screen->fill_ubwc_buffer_sizes && rsc->layout.tile_mode)
size += screen->fill_ubwc_buffer_sizes(rsc);
/* special case for hw-query buffer, which we need to allocate before we
return prsc;
}
- if (rsc->layer_first) {
- rsc->layer_size = align(size, 4096);
- size = rsc->layer_size * prsc->array_size;
+ /* Set the layer size if the (non-a6xx) backend hasn't done so. */
+ if (rsc->layout.layer_first && !rsc->layout.layer_size) {
+ rsc->layout.layer_size = align(size, 4096);
+ size = rsc->layout.layer_size * prsc->array_size;
}
+ if (fd_mesa_debug & FD_DBG_LAYOUT)
+ fdl_dump_layout(&rsc->layout);
+
realloc_bo(rsc, size);
if (!rsc->bo)
goto fail;
return NULL;
*prsc = *tmpl;
+ fd_resource_layout_init(prsc);
pipe_reference_init(&prsc->reference, 1);
goto fail;
rsc->internal_format = tmpl->format;
- rsc->cpp = util_format_get_blocksize(tmpl->format);
- rsc->cpp *= fd_resource_nr_samples(prsc);
- slice->pitch = handle->stride / rsc->cpp;
+ slice->pitch = handle->stride / rsc->layout.cpp;
slice->offset = handle->offset;
slice->size0 = handle->stride * prsc->height0;
goto fail;
}
- assert(rsc->cpp);
+ assert(rsc->layout.cpp);
if (screen->ro) {
rsc->scanout =