X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Ffreedreno%2Ffreedreno_resource.c;h=a4933acdf3bcacea3025ac730b19070bfe884ebc;hb=d9e56d8a695304a0f2fb109cea6fc46991f98007;hp=fcc8fc66189e0583b4d20f642023d6bf62f800b6;hpb=85a23a8991cfb78960362114bf16304ea987f7f4;p=mesa.git diff --git a/src/gallium/drivers/freedreno/freedreno_resource.c b/src/gallium/drivers/freedreno/freedreno_resource.c index fcc8fc66189..a4933acdf3b 100644 --- a/src/gallium/drivers/freedreno/freedreno_resource.c +++ b/src/gallium/drivers/freedreno/freedreno_resource.c @@ -24,9 +24,9 @@ * Rob Clark */ -#include "util/u_format.h" -#include "util/u_format_rgtc.h" -#include "util/u_format_zs.h" +#include "util/format/u_format.h" +#include "util/format/u_format_rgtc.h" +#include "util/format/u_format_zs.h" #include "util/u_inlines.h" #include "util/u_transfer.h" #include "util/u_string.h" @@ -61,59 +61,106 @@ /** * Go through the entire state and see if the resource is bound * anywhere. If it is, mark the relevant state as dirty. This is - * called on realloc_bo to ensure the neccessary state is re- + * called on realloc_bo to ensure the necessary state is re- * emitted so the GPU looks at the new backing bo. */ static void -rebind_resource(struct fd_context *ctx, struct pipe_resource *prsc) +rebind_resource_in_ctx(struct fd_context *ctx, struct fd_resource *rsc) { + struct pipe_resource *prsc = &rsc->base; + /* VBOs */ - for (unsigned i = 0; i < ctx->vtx.vertexbuf.count && !(ctx->dirty & FD_DIRTY_VTXBUF); i++) { - if (ctx->vtx.vertexbuf.vb[i].buffer.resource == prsc) - ctx->dirty |= FD_DIRTY_VTXBUF; + if (rsc->dirty & FD_DIRTY_VTXBUF) { + struct fd_vertexbuf_stateobj *vb = &ctx->vtx.vertexbuf; + for (unsigned i = 0; i < vb->count && !(ctx->dirty & FD_DIRTY_VTXBUF); i++) { + if (vb->vb[i].buffer.resource == prsc) + ctx->dirty |= FD_DIRTY_VTXBUF; + } } + const enum fd_dirty_3d_state per_stage_dirty = + FD_DIRTY_CONST | FD_DIRTY_TEX | FD_DIRTY_IMAGE | FD_DIRTY_SSBO; + + if (!(rsc->dirty & per_stage_dirty)) + return; + /* per-shader-stage resources: */ for (unsigned stage = 0; stage < PIPE_SHADER_TYPES; stage++) { /* Constbufs.. note that constbuf[0] is normal uniforms emitted in * cmdstream rather than by pointer.. */ - const unsigned num_ubos = util_last_bit(ctx->constbuf[stage].enabled_mask); - for (unsigned i = 1; i < num_ubos; i++) { - if (ctx->dirty_shader[stage] & FD_DIRTY_SHADER_CONST) - break; - if (ctx->constbuf[stage].cb[i].buffer == prsc) - ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_CONST; + if ((rsc->dirty & FD_DIRTY_CONST) && + !(ctx->dirty_shader[stage] & FD_DIRTY_CONST)) { + struct fd_constbuf_stateobj *cb = &ctx->constbuf[stage]; + const unsigned num_ubos = util_last_bit(cb->enabled_mask); + for (unsigned i = 1; i < num_ubos; i++) { + if (cb->cb[i].buffer == prsc) { + ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_CONST; + ctx->dirty |= FD_DIRTY_CONST; + break; + } + } } /* Textures */ - for (unsigned i = 0; i < ctx->tex[stage].num_textures; i++) { - if (ctx->dirty_shader[stage] & FD_DIRTY_SHADER_TEX) - break; - if (ctx->tex[stage].textures[i] && (ctx->tex[stage].textures[i]->texture == prsc)) - ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_TEX; + if ((rsc->dirty & FD_DIRTY_TEX) && + !(ctx->dirty_shader[stage] & FD_DIRTY_TEX)) { + struct fd_texture_stateobj *tex = &ctx->tex[stage]; + for (unsigned i = 0; i < tex->num_textures; i++) { + if (tex->textures[i] && (tex->textures[i]->texture == prsc)) { + ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_TEX; + ctx->dirty |= FD_DIRTY_TEX; + break; + } + } } /* Images */ - const unsigned num_images = util_last_bit(ctx->shaderimg[stage].enabled_mask); - for (unsigned i = 0; i < num_images; i++) { - if (ctx->dirty_shader[stage] & FD_DIRTY_SHADER_IMAGE) - break; - if (ctx->shaderimg[stage].si[i].resource == prsc) - ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_IMAGE; + if ((rsc->dirty & FD_DIRTY_IMAGE) && + !(ctx->dirty_shader[stage] & FD_DIRTY_IMAGE)) { + struct fd_shaderimg_stateobj *si = &ctx->shaderimg[stage]; + const unsigned num_images = util_last_bit(si->enabled_mask); + for (unsigned i = 0; i < num_images; i++) { + if (si->si[i].resource == prsc) { + ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_IMAGE; + ctx->dirty |= FD_DIRTY_IMAGE; + break; + } + } } /* SSBOs */ - const unsigned num_ssbos = util_last_bit(ctx->shaderbuf[stage].enabled_mask); - for (unsigned i = 0; i < num_ssbos; i++) { - if (ctx->dirty_shader[stage] & FD_DIRTY_SHADER_SSBO) - break; - if (ctx->shaderbuf[stage].sb[i].buffer == prsc) - ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_SSBO; + if ((rsc->dirty & FD_DIRTY_SSBO) && + !(ctx->dirty_shader[stage] & FD_DIRTY_SSBO)) { + struct fd_shaderbuf_stateobj *sb = &ctx->shaderbuf[stage]; + const unsigned num_ssbos = util_last_bit(sb->enabled_mask); + for (unsigned i = 0; i < num_ssbos; i++) { + if (sb->sb[i].buffer == prsc) { + ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_SSBO; + ctx->dirty |= FD_DIRTY_SSBO; + break; + } + } } } } +static void +rebind_resource(struct fd_resource *rsc) +{ + struct fd_screen *screen = fd_screen(rsc->base.screen); + + mtx_lock(&screen->lock); + fd_resource_lock(rsc); + + if (rsc->dirty) + list_for_each_entry (struct fd_context, ctx, &screen->context_list, node) + rebind_resource_in_ctx(ctx, rsc); + + fd_resource_unlock(rsc); + mtx_unlock(&screen->lock); +} + static void realloc_bo(struct fd_resource *rsc, uint32_t size) { @@ -132,7 +179,20 @@ realloc_bo(struct fd_resource *rsc, uint32_t size) fd_bo_del(rsc->bo); rsc->bo = fd_bo_new(screen->dev, size, flags, "%ux%ux%u@%u:%x", - prsc->width0, prsc->height0, prsc->depth0, rsc->cpp, prsc->bind); + prsc->width0, prsc->height0, prsc->depth0, rsc->layout.cpp, prsc->bind); + + /* Zero out the UBWC area on allocation. This fixes intermittent failures + * with UBWC, which I suspect are due to the HW having a hard time + * interpreting arbitrary values populating the flags buffer when the BO + * was recycled through the bo cache (instead of fresh allocations from + * the kernel, which are zeroed). sleep(1) in this spot didn't work + * around the issue, but any memset value seems to. + */ + if (rsc->layout.ubwc) { + void *buf = fd_bo_map(rsc->bo); + memset(buf, 0, rsc->layout.slices[0].offset); + } + rsc->seqno = p_atomic_inc_return(&screen->rsc_seqno); util_range_set_empty(&rsc->valid_buffer_range); fd_bc_invalidate_resource(rsc, true); @@ -223,10 +283,7 @@ fd_try_shadow_resource(struct fd_context *ctx, struct fd_resource *rsc, /* TODO valid_buffer_range?? */ swap(rsc->bo, shadow->bo); swap(rsc->write_batch, shadow->write_batch); - swap(rsc->offset, shadow->offset); - swap(rsc->ubwc_offset, shadow->ubwc_offset); - swap(rsc->ubwc_pitch, shadow->ubwc_pitch); - swap(rsc->ubwc_size, shadow->ubwc_size); + swap(rsc->layout, shadow->layout); rsc->seqno = p_atomic_inc_return(&ctx->screen->rsc_seqno); /* at this point, the newly created shadow buffer is not referenced @@ -268,7 +325,10 @@ fd_try_shadow_resource(struct fd_context *ctx, struct fd_resource *rsc, set_box(box.height, u_minify(prsc->height0, l)); set_box(box.depth, u_minify(prsc->depth0, l)); - do_blit(ctx, &blit, fallback); + for (int i = 0; i < prsc->array_size; i++) { + set_box(box.z, i); + do_blit(ctx, &blit, fallback); + } } /* deal w/ current level specially, since we might need to split @@ -327,17 +387,7 @@ fd_resource_uncompress(struct fd_context *ctx, struct fd_resource *rsc) /* shadow should not fail in any cases where we need to uncompress: */ debug_assert(success); - /* - * TODO what if rsc is used in other contexts, we don't currently - * have a good way to rebind_resource() in other contexts. And an - * app that is reading one resource in multiple contexts, isn't - * going to expect that the resource is modified. - * - * Hopefully the edge cases where we need to uncompress are rare - * enough that they mostly only show up in deqp. - */ - - rebind_resource(ctx, &rsc->base); + rebind_resource(rsc); } static struct fd_resource * @@ -419,7 +469,7 @@ static void fd_resource_transfer_flush_region(struct pipe_context *pctx, struct fd_resource *rsc = fd_resource(ptrans->resource); if (ptrans->resource->target == PIPE_BUFFER) - util_range_add(&rsc->valid_buffer_range, + util_range_add(&rsc->base, &rsc->valid_buffer_range, ptrans->box.x + box->x, ptrans->box.x + box->x + box->width); } @@ -449,15 +499,14 @@ flush_resource(struct fd_context *ctx, struct fd_resource *rsc, unsigned usage) mtx_unlock(&ctx->screen->lock); foreach_batch(batch, &ctx->screen->batch_cache, batch_mask) - fd_batch_flush(batch, false); + fd_batch_flush(batch); foreach_batch(batch, &ctx->screen->batch_cache, batch_mask) { - fd_batch_sync(batch); fd_batch_reference(&batches[batch->idx], NULL); } assert(rsc->batch_mask == 0); } else if (write_batch) { - fd_batch_flush(write_batch, true); + fd_batch_flush(write_batch); } fd_batch_reference(&write_batch, NULL); @@ -489,7 +538,7 @@ fd_resource_transfer_unmap(struct pipe_context *pctx, fd_bo_cpu_fini(rsc->bo); } - util_range_add(&rsc->valid_buffer_range, + util_range_add(&rsc->base, &rsc->valid_buffer_range, ptrans->box.x, ptrans->box.x + ptrans->box.width); @@ -506,7 +555,7 @@ fd_resource_transfer_map(struct pipe_context *pctx, { struct fd_context *ctx = fd_context(pctx); struct fd_resource *rsc = fd_resource(prsc); - struct fd_resource_slice *slice = fd_resource_slice(rsc, level); + struct fdl_slice *slice = fd_resource_slice(rsc, level); struct fd_transfer *trans; struct pipe_transfer *ptrans; enum pipe_format format = prsc->format; @@ -530,8 +579,8 @@ fd_resource_transfer_map(struct pipe_context *pctx, ptrans->level = level; ptrans->usage = usage; ptrans->box = *box; - ptrans->stride = util_format_get_nblocksx(format, slice->pitch) * rsc->cpp; - ptrans->layer_stride = rsc->layer_first ? rsc->layer_size : slice->size0; + ptrans->stride = slice->pitch; + ptrans->layer_stride = fd_resource_layer_stride(rsc, level); /* we always need a staging texture for tiled buffers: * @@ -539,17 +588,17 @@ fd_resource_transfer_map(struct pipe_context *pctx, * splitting a batch.. for ex, mid-frame texture uploads to a tiled * texture. */ - if (rsc->tile_mode) { + if (rsc->layout.tile_mode) { struct fd_resource *staging_rsc; staging_rsc = fd_alloc_staging(ctx, rsc, level, box); if (staging_rsc) { + struct fdl_slice *staging_slice = + fd_resource_slice(staging_rsc, 0); // TODO for PIPE_TRANSFER_READ, need to do untiling blit.. trans->staging_prsc = &staging_rsc->base; - trans->base.stride = util_format_get_nblocksx(format, - staging_rsc->slices[0].pitch) * staging_rsc->cpp; - trans->base.layer_stride = staging_rsc->layer_first ? - staging_rsc->layer_size : staging_rsc->slices[0].size0; + trans->base.stride = staging_slice->pitch; + trans->base.layer_stride = fd_resource_layer_stride(staging_rsc, 0); trans->staging_box = *box; trans->staging_box.x = 0; trans->staging_box.y = 0; @@ -558,21 +607,6 @@ fd_resource_transfer_map(struct pipe_context *pctx, if (usage & PIPE_TRANSFER_READ) { fd_blit_to_staging(ctx, trans); - struct fd_batch *batch = NULL; - - fd_context_lock(ctx); - fd_batch_reference_locked(&batch, staging_rsc->write_batch); - fd_context_unlock(ctx); - - /* we can't fd_bo_cpu_prep() until the blit to staging - * is submitted to kernel.. in that case write_batch - * wouldn't be NULL yet: - */ - if (batch) { - fd_batch_sync(batch); - fd_batch_reference(&batch, NULL); - } - fd_bo_cpu_prep(staging_rsc->bo, ctx->pipe, DRM_FREEDRENO_PREP_READ); } @@ -597,9 +631,13 @@ fd_resource_transfer_map(struct pipe_context *pctx, if (usage & PIPE_TRANSFER_WRITE) op |= DRM_FREEDRENO_PREP_WRITE; + bool needs_flush = pending(rsc, !!(usage & PIPE_TRANSFER_WRITE)); + if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) { - realloc_bo(rsc, fd_bo_size(rsc->bo)); - rebind_resource(ctx, prsc); + if (needs_flush || fd_resource_busy(rsc, op)) { + realloc_bo(rsc, fd_bo_size(rsc->bo)); + rebind_resource(rsc); + } } else if ((usage & PIPE_TRANSFER_WRITE) && prsc->target == PIPE_BUFFER && !util_ranges_intersect(&rsc->valid_buffer_range, @@ -624,9 +662,7 @@ fd_resource_transfer_map(struct pipe_context *pctx, /* If the GPU is writing to the resource, or if it is reading from the * resource and we're trying to write to it, flush the renders. */ - bool needs_flush = pending(rsc, !!(usage & PIPE_TRANSFER_WRITE)); - bool busy = needs_flush || (0 != fd_bo_cpu_prep(rsc->bo, - ctx->pipe, op | DRM_FREEDRENO_PREP_NOSYNC)); + bool busy = needs_flush || fd_resource_busy(rsc, op); /* if we need to flush/stall, see if we can make a shadow buffer * to avoid this: @@ -643,7 +679,7 @@ fd_resource_transfer_map(struct pipe_context *pctx, if (needs_flush && fd_try_shadow_resource(ctx, rsc, level, box, DRM_FORMAT_MOD_LINEAR)) { needs_flush = busy = false; - rebind_resource(ctx, prsc); + rebind_resource(rsc); ctx->stats.shadow_uploads++; } else { struct fd_resource *staging_rsc; @@ -660,11 +696,12 @@ fd_resource_transfer_map(struct pipe_context *pctx, */ staging_rsc = fd_alloc_staging(ctx, rsc, level, box); if (staging_rsc) { + struct fdl_slice *staging_slice = + fd_resource_slice(staging_rsc, 0); trans->staging_prsc = &staging_rsc->base; - trans->base.stride = util_format_get_nblocksx(format, - staging_rsc->slices[0].pitch) * staging_rsc->cpp; - trans->base.layer_stride = staging_rsc->layer_first ? - staging_rsc->layer_size : staging_rsc->slices[0].size0; + trans->base.stride = staging_slice->pitch; + trans->base.layer_stride = + fd_resource_layer_stride(staging_rsc, 0); trans->staging_box = *box; trans->staging_box.x = 0; trans->staging_box.y = 0; @@ -704,7 +741,7 @@ fd_resource_transfer_map(struct pipe_context *pctx, buf = fd_bo_map(rsc->bo); offset = box->y / util_format_get_blockheight(format) * ptrans->stride + - box->x / util_format_get_blockwidth(format) * rsc->cpp + + box->x / util_format_get_blockwidth(format) * rsc->layout.cpp + fd_resource_offset(rsc, level, box->z); if (usage & PIPE_TRANSFER_WRITE) @@ -731,16 +768,17 @@ fd_resource_destroy(struct pipe_screen *pscreen, renderonly_scanout_destroy(rsc->scanout, fd_screen(pscreen)->ro); util_range_destroy(&rsc->valid_buffer_range); + simple_mtx_destroy(&rsc->lock); FREE(rsc); } static uint64_t fd_resource_modifier(struct fd_resource *rsc) { - if (!rsc->tile_mode) + if (!rsc->layout.tile_mode) return DRM_FORMAT_MOD_LINEAR; - if (rsc->ubwc_size) + if (rsc->layout.ubwc_layer_size) return DRM_FORMAT_MOD_QCOM_COMPRESSED; /* TODO invent a modifier for tiled but not UBWC buffers: */ @@ -759,7 +797,7 @@ fd_resource_get_handle(struct pipe_screen *pscreen, handle->modifier = fd_resource_modifier(rsc); return fd_screen_bo_get_handle(pscreen, rsc->bo, rsc->scanout, - rsc->slices[0].pitch * rsc->cpp, handle); + fd_resource_slice(rsc, 0)->pitch, handle); } static uint32_t @@ -776,17 +814,17 @@ setup_slices(struct fd_resource *rsc, uint32_t alignment, enum pipe_format forma /* in layer_first layout, the level (slice) contains just one * layer (since in fact the layer contains the slices) */ - uint32_t layers_in_level = rsc->layer_first ? 1 : prsc->array_size; + uint32_t layers_in_level = rsc->layout.layer_first ? 1 : prsc->array_size; for (level = 0; level <= prsc->last_level; level++) { - struct fd_resource_slice *slice = fd_resource_slice(rsc, level); + struct fdl_slice *slice = fd_resource_slice(rsc, level); uint32_t blocks; if (layout == UTIL_FORMAT_LAYOUT_ASTC) - slice->pitch = width = - util_align_npot(width, pitchalign * util_format_get_blockwidth(format)); + width = util_align_npot(width, pitchalign * util_format_get_blockwidth(format)); else - slice->pitch = width = align(width, pitchalign); + width = align(width, pitchalign); + slice->pitch = util_format_get_nblocksx(format, width) * rsc->layout.cpp; slice->offset = size; blocks = util_format_get_nblocks(format, width, height); /* 1d array and 2d array textures must all have the same layer size @@ -797,12 +835,12 @@ setup_slices(struct fd_resource *rsc, uint32_t alignment, enum pipe_format forma */ if (prsc->target == PIPE_TEXTURE_3D && ( level == 1 || - (level > 1 && rsc->slices[level - 1].size0 > 0xf000))) - slice->size0 = align(blocks * rsc->cpp, alignment); - else if (level == 0 || rsc->layer_first || alignment == 1) - slice->size0 = align(blocks * rsc->cpp, alignment); + (level > 1 && fd_resource_slice(rsc, level - 1)->size0 > 0xf000))) + slice->size0 = align(blocks * rsc->layout.cpp, alignment); + else if (level == 0 || rsc->layout.layer_first || alignment == 1) + slice->size0 = align(blocks * rsc->layout.cpp, alignment); else - slice->size0 = rsc->slices[level - 1].size0; + slice->size0 = fd_resource_slice(rsc, level - 1)->size0; size += slice->size0 * depth * layers_in_level; @@ -848,10 +886,10 @@ fd_setup_slices(struct fd_resource *rsc) if (is_a4xx(screen)) { switch (rsc->base.target) { case PIPE_TEXTURE_3D: - rsc->layer_first = false; + rsc->layout.layer_first = false; break; default: - rsc->layer_first = true; + rsc->layout.layer_first = true; alignment = 1; break; } @@ -874,23 +912,19 @@ fd_resource_resize(struct pipe_resource *prsc, uint32_t sz) realloc_bo(rsc, fd_screen(prsc->screen)->setup_slices(rsc)); } -// TODO common helper? -static bool -has_depth(enum pipe_format format) +static void +fd_resource_layout_init(struct pipe_resource *prsc) { - switch (format) { - case PIPE_FORMAT_Z16_UNORM: - case PIPE_FORMAT_Z32_UNORM: - case PIPE_FORMAT_Z32_FLOAT: - case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT: - case PIPE_FORMAT_Z24_UNORM_S8_UINT: - case PIPE_FORMAT_S8_UINT_Z24_UNORM: - case PIPE_FORMAT_Z24X8_UNORM: - case PIPE_FORMAT_X8Z24_UNORM: - return true; - default: - return false; - } + struct fd_resource *rsc = fd_resource(prsc); + struct fdl_layout *layout = &rsc->layout; + + layout->width0 = prsc->width0; + layout->height0 = prsc->height0; + layout->depth0 = prsc->depth0; + + layout->cpp = util_format_get_blocksize(prsc->format); + layout->cpp *= fd_resource_nr_samples(prsc); + layout->cpp_shift = ffs(layout->cpp) - 1; } /** @@ -953,6 +987,7 @@ fd_resource_create_with_modifiers(struct pipe_screen *pscreen, return NULL; *prsc = *tmpl; + fd_resource_layout_init(prsc); #define LINEAR \ (PIPE_BIND_SCANOUT | \ @@ -963,6 +998,9 @@ fd_resource_create_with_modifiers(struct pipe_screen *pscreen, if (tmpl->bind & LINEAR) linear = true; + if (fd_mesa_debug & FD_DBG_NOTILE) + linear = true; + /* Normally, for non-shared buffers, allow buffer compression if * not shared, otherwise only allow if QCOM_COMPRESSED modifier * is requested: @@ -984,48 +1022,25 @@ fd_resource_create_with_modifiers(struct pipe_screen *pscreen, if (screen->tile_mode && (tmpl->target != PIPE_BUFFER) && !linear) { - rsc->tile_mode = screen->tile_mode(prsc); + rsc->layout.tile_mode = screen->tile_mode(prsc); } util_range_init(&rsc->valid_buffer_range); - rsc->internal_format = format; - rsc->cpp = util_format_get_blocksize(format); - rsc->cpp *= fd_resource_nr_samples(prsc); - - assert(rsc->cpp); - - // XXX probably need some extra work if we hit rsc shadowing path w/ lrz.. - if ((is_a5xx(screen) || is_a6xx(screen)) && - (fd_mesa_debug & FD_DBG_LRZ) && has_depth(format)) { - const uint32_t flags = DRM_FREEDRENO_GEM_CACHE_WCOMBINE | - DRM_FREEDRENO_GEM_TYPE_KMEM; /* TODO */ - unsigned lrz_pitch = align(DIV_ROUND_UP(tmpl->width0, 8), 64); - unsigned lrz_height = DIV_ROUND_UP(tmpl->height0, 8); - - /* LRZ buffer is super-sampled: */ - switch (prsc->nr_samples) { - case 4: - lrz_pitch *= 2; - case 2: - lrz_height *= 2; - } + simple_mtx_init(&rsc->lock, mtx_plain); - unsigned size = lrz_pitch * lrz_height * 2; + rsc->internal_format = format; - size += 0x1000; /* for GRAS_LRZ_FAST_CLEAR_BUFFER */ + rsc->layout.ubwc = rsc->layout.tile_mode && is_a6xx(screen) && allow_ubwc; - rsc->lrz_height = lrz_height; - rsc->lrz_width = lrz_pitch; - rsc->lrz_pitch = lrz_pitch; - rsc->lrz = fd_bo_new(screen->dev, size, flags, "lrz"); + if (prsc->target == PIPE_BUFFER) { + assert(prsc->format == PIPE_FORMAT_R8_UNORM); + size = prsc->width0; + fdl_layout_buffer(&rsc->layout, size); + } else { + size = screen->setup_slices(rsc); } - size = screen->setup_slices(rsc); - - if (allow_ubwc && screen->fill_ubwc_buffer_sizes && rsc->tile_mode) - size += screen->fill_ubwc_buffer_sizes(rsc); - /* special case for hw-query buffer, which we need to allocate before we * know the size: */ @@ -1035,11 +1050,15 @@ fd_resource_create_with_modifiers(struct pipe_screen *pscreen, return prsc; } - if (rsc->layer_first) { - rsc->layer_size = align(size, 4096); - size = rsc->layer_size * prsc->array_size; + /* Set the layer size if the (non-a6xx) backend hasn't done so. */ + if (rsc->layout.layer_first && !rsc->layout.layer_size) { + rsc->layout.layer_size = align(size, 4096); + size = rsc->layout.layer_size * prsc->array_size; } + if (fd_mesa_debug & FD_DBG_LAYOUT) + fdl_dump_layout(&rsc->layout); + realloc_bo(rsc, size); if (!rsc->bo) goto fail; @@ -1058,26 +1077,6 @@ fd_resource_create(struct pipe_screen *pscreen, return fd_resource_create_with_modifiers(pscreen, tmpl, &mod, 1); } -static bool -is_supported_modifier(struct pipe_screen *pscreen, enum pipe_format pfmt, - uint64_t mod) -{ - int count; - - /* Get the count of supported modifiers: */ - pscreen->query_dmabuf_modifiers(pscreen, pfmt, 0, NULL, NULL, &count); - - /* Get the supported modifiers: */ - uint64_t modifiers[count]; - pscreen->query_dmabuf_modifiers(pscreen, pfmt, count, modifiers, NULL, &count); - - for (int i = 0; i < count; i++) - if (modifiers[i] == mod) - return true; - - return false; -} - /** * Create a texture from a winsys_handle. The handle is often created in * another process by first creating a pipe texture and then calling @@ -1090,9 +1089,9 @@ fd_resource_from_handle(struct pipe_screen *pscreen, { struct fd_screen *screen = fd_screen(pscreen); struct fd_resource *rsc = CALLOC_STRUCT(fd_resource); - struct fd_resource_slice *slice = &rsc->slices[0]; + struct fdl_slice *slice = fd_resource_slice(rsc, 0); struct pipe_resource *prsc = &rsc->base; - uint32_t pitchalign = fd_screen(pscreen)->gmem_alignw; + uint32_t pitchalign = fd_screen(pscreen)->gmem_alignw * rsc->layout.cpp; DBG("target=%d, format=%s, %ux%ux%u, array_size=%u, last_level=%u, " "nr_samples=%u, usage=%u, bind=%x, flags=%x", @@ -1105,6 +1104,7 @@ fd_resource_from_handle(struct pipe_screen *pscreen, return NULL; *prsc = *tmpl; + fd_resource_layout_init(prsc); pipe_reference_init(&prsc->reference, 1); @@ -1112,35 +1112,25 @@ fd_resource_from_handle(struct pipe_screen *pscreen, util_range_init(&rsc->valid_buffer_range); + simple_mtx_init(&rsc->lock, mtx_plain); + rsc->bo = fd_screen_bo_from_handle(pscreen, handle); if (!rsc->bo) goto fail; rsc->internal_format = tmpl->format; - rsc->cpp = util_format_get_blocksize(tmpl->format); - rsc->cpp *= fd_resource_nr_samples(prsc); - slice->pitch = handle->stride / rsc->cpp; + slice->pitch = handle->stride; slice->offset = handle->offset; slice->size0 = handle->stride * prsc->height0; - if ((slice->pitch < align(prsc->width0, pitchalign)) || + if ((slice->pitch < align(prsc->width0 * rsc->layout.cpp, pitchalign)) || (slice->pitch & (pitchalign - 1))) goto fail; - if (handle->modifier == DRM_FORMAT_MOD_QCOM_COMPRESSED) { - if (!is_supported_modifier(pscreen, tmpl->format, - DRM_FORMAT_MOD_QCOM_COMPRESSED)) { - DBG("bad modifier: %"PRIx64, handle->modifier); - goto fail; - } - debug_assert(screen->fill_ubwc_buffer_sizes); - screen->fill_ubwc_buffer_sizes(rsc); - } else if (handle->modifier && - (handle->modifier != DRM_FORMAT_MOD_INVALID)) { - goto fail; - } + assert(rsc->layout.cpp); - assert(rsc->cpp); + if (screen->layout_resource_for_modifier(rsc, handle->modifier) < 0) + goto fail; if (screen->ro) { rsc->scanout = @@ -1243,6 +1233,21 @@ static const struct u_transfer_vtbl transfer_vtbl = { .get_stencil = fd_resource_get_stencil, }; +static const uint64_t supported_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, +}; + +static int +fd_layout_resource_for_modifier(struct fd_resource *rsc, uint64_t modifier) +{ + switch (modifier) { + case DRM_FORMAT_MOD_LINEAR: + return 0; + default: + return -1; + } +} + void fd_resource_screen_init(struct pipe_screen *pscreen) { @@ -1263,6 +1268,12 @@ fd_resource_screen_init(struct pipe_screen *pscreen) if (!screen->setup_slices) screen->setup_slices = fd_setup_slices; + if (!screen->layout_resource_for_modifier) + screen->layout_resource_for_modifier = fd_layout_resource_for_modifier; + if (!screen->supported_modifiers) { + screen->supported_modifiers = supported_modifiers; + screen->num_supported_modifiers = ARRAY_SIZE(supported_modifiers); + } } static void