* Rob Clark <robclark@freedesktop.org>
*/
-#include "util/u_format.h"
-#include "util/u_format_rgtc.h"
-#include "util/u_format_zs.h"
+#include "util/format/u_format.h"
+#include "util/format/u_format_rgtc.h"
+#include "util/format/u_format_zs.h"
#include "util/u_inlines.h"
#include "util/u_transfer.h"
#include "util/u_string.h"
#include "util/u_surface.h"
#include "util/set.h"
+#include "util/u_drm.h"
#include "freedreno_resource.h"
#include "freedreno_batch_cache.h"
#include "freedreno_query_hw.h"
#include "freedreno_util.h"
+#include "drm-uapi/drm_fourcc.h"
#include <errno.h>
/* XXX this should go away, needed for 'struct winsys_handle' */
#include "state_tracker/drm_driver.h"
+/* A private modifier for now, so we have a way to request tiled but not
+ * compressed. It would perhaps be good to get real modifiers for the
+ * tiled formats, but would probably need to do some work to figure out
+ * the layout(s) of the tiled modes, and whether they are the same
+ * across generations.
+ */
+#define FD_FORMAT_MOD_QCOM_TILED fourcc_mod_code(QCOM, 0xffffffff)
+
/**
* Go through the entire state and see if the resource is bound
* anywhere. If it is, mark the relevant state as dirty. This is
ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_TEX;
}
+ /* Images */
+ const unsigned num_images = util_last_bit(ctx->shaderimg[stage].enabled_mask);
+ for (unsigned i = 0; i < num_images; i++) {
+ if (ctx->dirty_shader[stage] & FD_DIRTY_SHADER_IMAGE)
+ break;
+ if (ctx->shaderimg[stage].si[i].resource == prsc)
+ ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_IMAGE;
+ }
+
/* SSBOs */
const unsigned num_ssbos = util_last_bit(ctx->shaderbuf[stage].enabled_mask);
for (unsigned i = 0; i < num_ssbos; i++) {
static void
do_blit(struct fd_context *ctx, const struct pipe_blit_info *blit, bool fallback)
{
+ struct pipe_context *pctx = &ctx->base;
+
/* TODO size threshold too?? */
- if (!fallback) {
- /* do blit on gpu: */
- fd_blitter_pipe_begin(ctx, false, true, FD_STAGE_BLIT);
- ctx->blit(ctx, blit);
- fd_blitter_pipe_end(ctx);
- } else {
+ if (fallback || !fd_blit(pctx, blit)) {
/* do blit on cpu: */
- util_resource_copy_region(&ctx->base,
+ util_resource_copy_region(pctx,
blit->dst.resource, blit->dst.level, blit->dst.box.x,
blit->dst.box.y, blit->dst.box.z,
blit->src.resource, blit->src.level, &blit->src.box);
}
}
+/**
+ * @rsc: the resource to shadow
+ * @level: the level to discard (if box != NULL, otherwise ignored)
+ * @box: the box to discard (or NULL if none)
+ * @modifier: the modifier for the new buffer state
+ */
static bool
fd_try_shadow_resource(struct fd_context *ctx, struct fd_resource *rsc,
- unsigned level, const struct pipe_box *box)
+ unsigned level, const struct pipe_box *box, uint64_t modifier)
{
struct pipe_context *pctx = &ctx->base;
struct pipe_resource *prsc = &rsc->base;
if (prsc->target == PIPE_BUFFER)
fallback = true;
- bool whole_level = util_texrange_covers_whole_level(prsc, level,
+ bool discard_whole_level = box && util_texrange_covers_whole_level(prsc, level,
box->x, box->y, box->z, box->width, box->height, box->depth);
/* TODO need to be more clever about current level */
- if ((prsc->target >= PIPE_TEXTURE_2D) && !whole_level)
+ if ((prsc->target >= PIPE_TEXTURE_2D) && box && !discard_whole_level)
return false;
struct pipe_resource *pshadow =
- pctx->screen->resource_create(pctx->screen, prsc);
+ pctx->screen->resource_create_with_modifiers(pctx->screen,
+ prsc, &modifier, 1);
if (!pshadow)
return false;
/* TODO valid_buffer_range?? */
swap(rsc->bo, shadow->bo);
swap(rsc->write_batch, shadow->write_batch);
+ swap(rsc->offset, shadow->offset);
+ swap(rsc->ubwc_offset, shadow->ubwc_offset);
+ swap(rsc->ubwc_pitch, shadow->ubwc_pitch);
+ swap(rsc->ubwc_size, shadow->ubwc_size);
rsc->seqno = p_atomic_inc_return(&ctx->screen->rsc_seqno);
/* at this point, the newly created shadow buffer is not referenced
/* blit the other levels in their entirety: */
for (unsigned l = 0; l <= prsc->last_level; l++) {
- if (l == level)
+ if (box && l == level)
continue;
/* just blit whole level: */
/* deal w/ current level specially, since we might need to split
* it up into a couple blits:
*/
- if (!whole_level) {
+ if (box && !discard_whole_level) {
set_box(level, level);
switch (prsc->target) {
return true;
}
+/**
+ * Uncompress an UBWC compressed buffer "in place". This works basically
+ * like resource shadowing, creating a new resource, and doing an uncompress
+ * blit, and swapping the state between shadow and original resource so it
+ * appears to the state tracker as if nothing changed.
+ */
+void
+fd_resource_uncompress(struct fd_context *ctx, struct fd_resource *rsc)
+{
+ bool success =
+ fd_try_shadow_resource(ctx, rsc, 0, NULL, FD_FORMAT_MOD_QCOM_TILED);
+
+ /* shadow should not fail in any cases where we need to uncompress: */
+ debug_assert(success);
+
+ /*
+ * TODO what if rsc is used in other contexts, we don't currently
+ * have a good way to rebind_resource() in other contexts. And an
+ * app that is reading one resource in multiple contexts, isn't
+ * going to expect that the resource is modified.
+ *
+ * Hopefully the edge cases where we need to uncompress are rare
+ * enough that they mostly only show up in deqp.
+ */
+
+ rebind_resource(ctx, &rsc->base);
+}
+
static struct fd_resource *
fd_alloc_staging(struct fd_context *ctx, struct fd_resource *rsc,
unsigned level, const struct pipe_box *box)
tmpl.width0 = box->width;
tmpl.height0 = box->height;
- tmpl.depth0 = box->depth;
- tmpl.array_size = 1;
+ /* for array textures, box->depth is the array_size, otherwise
+ * for 3d textures, it is the depth:
+ */
+ if (tmpl.array_size > 1) {
+ if (tmpl.target == PIPE_TEXTURE_CUBE)
+ tmpl.target = PIPE_TEXTURE_2D_ARRAY;
+ tmpl.array_size = box->depth;
+ tmpl.depth0 = 1;
+ } else {
+ tmpl.array_size = 1;
+ tmpl.depth0 = box->depth;
+ }
tmpl.last_level = 0;
tmpl.bind |= PIPE_BIND_LINEAR;
struct fd_resource *rsc = fd_resource(ptrans->resource);
if (ptrans->resource->target == PIPE_BUFFER)
- util_range_add(&rsc->valid_buffer_range,
+ util_range_add(&rsc->base, &rsc->valid_buffer_range,
ptrans->box.x + box->x,
ptrans->box.x + box->x + box->width);
}
{
struct fd_batch *write_batch = NULL;
- fd_batch_reference(&write_batch, rsc->write_batch);
+ mtx_lock(&ctx->screen->lock);
+ fd_batch_reference_locked(&write_batch, rsc->write_batch);
+ mtx_unlock(&ctx->screen->lock);
if (usage & PIPE_TRANSFER_WRITE) {
struct fd_batch *batch, *batches[32] = {};
mtx_lock(&ctx->screen->lock);
batch_mask = rsc->batch_mask;
foreach_batch(batch, &ctx->screen->batch_cache, batch_mask)
- fd_batch_reference(&batches[batch->idx], batch);
+ fd_batch_reference_locked(&batches[batch->idx], batch);
mtx_unlock(&ctx->screen->lock);
foreach_batch(batch, &ctx->screen->batch_cache, batch_mask)
- fd_batch_flush(batch, false, false);
+ fd_batch_flush(batch, false);
foreach_batch(batch, &ctx->screen->batch_cache, batch_mask) {
fd_batch_sync(batch);
}
assert(rsc->batch_mask == 0);
} else if (write_batch) {
- fd_batch_flush(write_batch, true, false);
+ fd_batch_flush(write_batch, true);
}
fd_batch_reference(&write_batch, NULL);
fd_bo_cpu_fini(rsc->bo);
}
- util_range_add(&rsc->valid_buffer_range,
+ util_range_add(&rsc->base, &rsc->valid_buffer_range,
ptrans->box.x,
ptrans->box.x + ptrans->box.width);
staging_rsc = fd_alloc_staging(ctx, rsc, level, box);
if (staging_rsc) {
+ struct fd_resource_slice *staging_slice =
+ fd_resource_slice(staging_rsc, 0);
// TODO for PIPE_TRANSFER_READ, need to do untiling blit..
trans->staging_prsc = &staging_rsc->base;
trans->base.stride = util_format_get_nblocksx(format,
- staging_rsc->slices[0].pitch) * staging_rsc->cpp;
+ staging_slice->pitch) * staging_rsc->cpp;
trans->base.layer_stride = staging_rsc->layer_first ?
- staging_rsc->layer_size : staging_rsc->slices[0].size0;
+ staging_rsc->layer_size : staging_slice->size0;
trans->staging_box = *box;
trans->staging_box.x = 0;
trans->staging_box.y = 0;
if (usage & PIPE_TRANSFER_READ) {
fd_blit_to_staging(ctx, trans);
- fd_bo_cpu_prep(rsc->bo, ctx->pipe, DRM_FREEDRENO_PREP_READ);
+
+ struct fd_batch *batch = NULL;
+
+ fd_context_lock(ctx);
+ fd_batch_reference_locked(&batch, staging_rsc->write_batch);
+ fd_context_unlock(ctx);
+
+ /* we can't fd_bo_cpu_prep() until the blit to staging
+ * is submitted to kernel.. in that case write_batch
+ * wouldn't be NULL yet:
+ */
+ if (batch) {
+ fd_batch_sync(batch);
+ fd_batch_reference(&batch, NULL);
+ }
+
+ fd_bo_cpu_prep(staging_rsc->bo, ctx->pipe,
+ DRM_FREEDRENO_PREP_READ);
}
buf = fd_bo_map(staging_rsc->bo);
struct fd_batch *write_batch = NULL;
/* hold a reference, so it doesn't disappear under us: */
- fd_batch_reference(&write_batch, rsc->write_batch);
+ fd_context_lock(ctx);
+ fd_batch_reference_locked(&write_batch, rsc->write_batch);
+ fd_context_unlock(ctx);
if ((usage & PIPE_TRANSFER_WRITE) && write_batch &&
write_batch->back_blit) {
/* try shadowing only if it avoids a flush, otherwise staging would
* be better:
*/
- if (needs_flush && fd_try_shadow_resource(ctx, rsc, level, box)) {
+ if (needs_flush && fd_try_shadow_resource(ctx, rsc, level,
+ box, DRM_FORMAT_MOD_LINEAR)) {
needs_flush = busy = false;
rebind_resource(ctx, prsc);
ctx->stats.shadow_uploads++;
*/
staging_rsc = fd_alloc_staging(ctx, rsc, level, box);
if (staging_rsc) {
+ struct fd_resource_slice *staging_slice =
+ fd_resource_slice(staging_rsc, 0);
trans->staging_prsc = &staging_rsc->base;
trans->base.stride = util_format_get_nblocksx(format,
- staging_rsc->slices[0].pitch) * staging_rsc->cpp;
+ staging_slice->pitch) * staging_rsc->cpp;
trans->base.layer_stride = staging_rsc->layer_first ?
- staging_rsc->layer_size : staging_rsc->slices[0].size0;
+ staging_rsc->layer_size : staging_slice->size0;
trans->staging_box = *box;
trans->staging_box.x = 0;
trans->staging_box.y = 0;
fd_bc_invalidate_resource(rsc, true);
if (rsc->bo)
fd_bo_del(rsc->bo);
+ if (rsc->scanout)
+ renderonly_scanout_destroy(rsc->scanout, fd_screen(pscreen)->ro);
+
util_range_destroy(&rsc->valid_buffer_range);
FREE(rsc);
}
-static boolean
+static uint64_t
+fd_resource_modifier(struct fd_resource *rsc)
+{
+ if (!rsc->tile_mode)
+ return DRM_FORMAT_MOD_LINEAR;
+
+ if (rsc->ubwc_size)
+ return DRM_FORMAT_MOD_QCOM_COMPRESSED;
+
+ /* TODO invent a modifier for tiled but not UBWC buffers: */
+ return DRM_FORMAT_MOD_INVALID;
+}
+
+static bool
fd_resource_get_handle(struct pipe_screen *pscreen,
struct pipe_context *pctx,
struct pipe_resource *prsc,
{
struct fd_resource *rsc = fd_resource(prsc);
- return fd_screen_bo_get_handle(pscreen, rsc->bo,
- rsc->slices[0].pitch * rsc->cpp, handle);
+ handle->modifier = fd_resource_modifier(rsc);
+
+ return fd_screen_bo_get_handle(pscreen, rsc->bo, rsc->scanout,
+ fd_resource_slice(rsc, 0)->pitch * rsc->cpp, handle);
}
static uint32_t
*/
if (prsc->target == PIPE_TEXTURE_3D && (
level == 1 ||
- (level > 1 && rsc->slices[level - 1].size0 > 0xf000)))
+ (level > 1 && fd_resource_slice(rsc, level - 1)->size0 > 0xf000)))
slice->size0 = align(blocks * rsc->cpp, alignment);
else if (level == 0 || rsc->layer_first || alignment == 1)
slice->size0 = align(blocks * rsc->cpp, alignment);
else
- slice->size0 = rsc->slices[level - 1].size0;
+ slice->size0 = fd_resource_slice(rsc, level - 1)->size0;
size += slice->size0 * depth * layers_in_level;
* Create a new texture object, using the given template info.
*/
static struct pipe_resource *
-fd_resource_create(struct pipe_screen *pscreen,
- const struct pipe_resource *tmpl)
+fd_resource_create_with_modifiers(struct pipe_screen *pscreen,
+ const struct pipe_resource *tmpl,
+ const uint64_t *modifiers, int count)
{
struct fd_screen *screen = fd_screen(pscreen);
- struct fd_resource *rsc = CALLOC_STRUCT(fd_resource);
- struct pipe_resource *prsc = &rsc->base;
+ struct fd_resource *rsc;
+ struct pipe_resource *prsc;
enum pipe_format format = tmpl->format;
uint32_t size;
+ /* when using kmsro, scanout buffers are allocated on the display device
+ * create_with_modifiers() doesn't give us usage flags, so we have to
+ * assume that all calls with modifiers are scanout-possible
+ */
+ if (screen->ro &&
+ ((tmpl->bind & PIPE_BIND_SCANOUT) ||
+ !(count == 1 && modifiers[0] == DRM_FORMAT_MOD_INVALID))) {
+ struct pipe_resource scanout_templat = *tmpl;
+ struct renderonly_scanout *scanout;
+ struct winsys_handle handle;
+
+ /* apply freedreno alignment requirement */
+ scanout_templat.width0 = align(tmpl->width0, screen->gmem_alignw);
+
+ scanout = renderonly_scanout_for_resource(&scanout_templat,
+ screen->ro, &handle);
+ if (!scanout)
+ return NULL;
+
+ renderonly_scanout_destroy(scanout, screen->ro);
+
+ assert(handle.type == WINSYS_HANDLE_TYPE_FD);
+ rsc = fd_resource(pscreen->resource_from_handle(pscreen, tmpl,
+ &handle,
+ PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE));
+ close(handle.handle);
+ if (!rsc)
+ return NULL;
+
+ return &rsc->base;
+ }
+
+ rsc = CALLOC_STRUCT(fd_resource);
+ prsc = &rsc->base;
+
DBG("%p: target=%d, format=%s, %ux%ux%u, array_size=%u, last_level=%u, "
"nr_samples=%u, usage=%u, bind=%x, flags=%x", prsc,
tmpl->target, util_format_name(format),
PIPE_BIND_LINEAR | \
PIPE_BIND_DISPLAY_TARGET)
- if (screen->tile_mode &&
- (tmpl->target != PIPE_BUFFER) &&
- (tmpl->bind & PIPE_BIND_SAMPLER_VIEW) &&
- !(tmpl->bind & LINEAR)) {
- rsc->tile_mode = screen->tile_mode(tmpl);
- }
+ bool linear = drm_find_modifier(DRM_FORMAT_MOD_LINEAR, modifiers, count);
+ if (tmpl->bind & LINEAR)
+ linear = true;
+
+ /* Normally, for non-shared buffers, allow buffer compression if
+ * not shared, otherwise only allow if QCOM_COMPRESSED modifier
+ * is requested:
+ *
+ * TODO we should probably also limit tiled in a similar way,
+ * except we don't have a format modifier for tiled. (We probably
+ * should.)
+ */
+ bool allow_ubwc = drm_find_modifier(DRM_FORMAT_MOD_INVALID, modifiers, count);
+ if (tmpl->bind & PIPE_BIND_SHARED)
+ allow_ubwc = drm_find_modifier(DRM_FORMAT_MOD_QCOM_COMPRESSED, modifiers, count);
+
+ allow_ubwc &= !(fd_mesa_debug & FD_DBG_NOUBWC);
pipe_reference_init(&prsc->reference, 1);
prsc->screen = pscreen;
+ if (screen->tile_mode &&
+ (tmpl->target != PIPE_BUFFER) &&
+ !linear) {
+ rsc->tile_mode = screen->tile_mode(prsc);
+ }
+
util_range_init(&rsc->valid_buffer_range);
rsc->internal_format = format;
rsc->cpp = util_format_get_blocksize(format);
- prsc->nr_samples = MAX2(1, prsc->nr_samples);
- rsc->cpp *= prsc->nr_samples;
+ rsc->cpp *= fd_resource_nr_samples(prsc);
assert(rsc->cpp);
size = screen->setup_slices(rsc);
+ if (allow_ubwc && screen->fill_ubwc_buffer_sizes && rsc->tile_mode)
+ size += screen->fill_ubwc_buffer_sizes(rsc);
+
/* special case for hw-query buffer, which we need to allocate before we
* know the size:
*/
return NULL;
}
+static struct pipe_resource *
+fd_resource_create(struct pipe_screen *pscreen,
+ const struct pipe_resource *tmpl)
+{
+ const uint64_t mod = DRM_FORMAT_MOD_INVALID;
+ return fd_resource_create_with_modifiers(pscreen, tmpl, &mod, 1);
+}
+
+static bool
+is_supported_modifier(struct pipe_screen *pscreen, enum pipe_format pfmt,
+ uint64_t mod)
+{
+ int count;
+
+ /* Get the count of supported modifiers: */
+ pscreen->query_dmabuf_modifiers(pscreen, pfmt, 0, NULL, NULL, &count);
+
+ /* Get the supported modifiers: */
+ uint64_t modifiers[count];
+ pscreen->query_dmabuf_modifiers(pscreen, pfmt, count, modifiers, NULL, &count);
+
+ for (int i = 0; i < count; i++)
+ if (modifiers[i] == mod)
+ return true;
+
+ return false;
+}
+
/**
* Create a texture from a winsys_handle. The handle is often created in
* another process by first creating a pipe texture and then calling
const struct pipe_resource *tmpl,
struct winsys_handle *handle, unsigned usage)
{
+ struct fd_screen *screen = fd_screen(pscreen);
struct fd_resource *rsc = CALLOC_STRUCT(fd_resource);
- struct fd_resource_slice *slice = &rsc->slices[0];
+ struct fd_resource_slice *slice = fd_resource_slice(rsc, 0);
struct pipe_resource *prsc = &rsc->base;
uint32_t pitchalign = fd_screen(pscreen)->gmem_alignw;
if (!rsc->bo)
goto fail;
- prsc->nr_samples = MAX2(1, prsc->nr_samples);
rsc->internal_format = tmpl->format;
- rsc->cpp = prsc->nr_samples * util_format_get_blocksize(tmpl->format);
+ rsc->cpp = util_format_get_blocksize(tmpl->format);
+ rsc->cpp *= fd_resource_nr_samples(prsc);
slice->pitch = handle->stride / rsc->cpp;
slice->offset = handle->offset;
slice->size0 = handle->stride * prsc->height0;
(slice->pitch & (pitchalign - 1)))
goto fail;
+ if (handle->modifier == DRM_FORMAT_MOD_QCOM_COMPRESSED) {
+ if (!is_supported_modifier(pscreen, tmpl->format,
+ DRM_FORMAT_MOD_QCOM_COMPRESSED)) {
+ DBG("bad modifier: %"PRIx64, handle->modifier);
+ goto fail;
+ }
+ debug_assert(screen->fill_ubwc_buffer_sizes);
+ screen->fill_ubwc_buffer_sizes(rsc);
+ } else if (handle->modifier &&
+ (handle->modifier != DRM_FORMAT_MOD_INVALID)) {
+ goto fail;
+ }
+
assert(rsc->cpp);
+ if (screen->ro) {
+ rsc->scanout =
+ renderonly_create_gpu_import_for_resource(prsc, screen->ro, NULL);
+ /* failure is expected in some cases.. */
+ }
+
+ rsc->valid = true;
+
return prsc;
fail:
return true;
}
-/**
- * Optimal hardware path for blitting pixels.
- * Scaling, format conversion, up- and downsampling (resolve) are allowed.
- */
-static void
-fd_blit(struct pipe_context *pctx, const struct pipe_blit_info *blit_info)
-{
- struct fd_context *ctx = fd_context(pctx);
- struct pipe_blit_info info = *blit_info;
- bool discard = false;
-
- if (info.render_condition_enable && !fd_render_condition_check(pctx))
- return;
-
- if (!info.scissor_enable && !info.alpha_blend) {
- discard = util_texrange_covers_whole_level(info.dst.resource,
- info.dst.level, info.dst.box.x, info.dst.box.y,
- info.dst.box.z, info.dst.box.width,
- info.dst.box.height, info.dst.box.depth);
- }
-
- if (util_try_blit_via_copy_region(pctx, &info)) {
- return; /* done */
- }
-
- if (info.mask & PIPE_MASK_S) {
- DBG("cannot blit stencil, skipping");
- info.mask &= ~PIPE_MASK_S;
- }
-
- if (!util_blitter_is_blit_supported(ctx->blitter, &info)) {
- DBG("blit unsupported %s -> %s",
- util_format_short_name(info.src.resource->format),
- util_format_short_name(info.dst.resource->format));
- return;
- }
-
- fd_blitter_pipe_begin(ctx, info.render_condition_enable, discard, FD_STAGE_BLIT);
- ctx->blit(ctx, &info);
- fd_blitter_pipe_end(ctx);
-}
-
-void
-fd_blitter_pipe_begin(struct fd_context *ctx, bool render_cond, bool discard,
- enum fd_render_stage stage)
-{
- fd_fence_ref(ctx->base.screen, &ctx->last_fence, NULL);
-
- util_blitter_save_fragment_constant_buffer_slot(ctx->blitter,
- ctx->constbuf[PIPE_SHADER_FRAGMENT].cb);
- util_blitter_save_vertex_buffer_slot(ctx->blitter, ctx->vtx.vertexbuf.vb);
- util_blitter_save_vertex_elements(ctx->blitter, ctx->vtx.vtx);
- util_blitter_save_vertex_shader(ctx->blitter, ctx->prog.vp);
- util_blitter_save_so_targets(ctx->blitter, ctx->streamout.num_targets,
- ctx->streamout.targets);
- util_blitter_save_rasterizer(ctx->blitter, ctx->rasterizer);
- util_blitter_save_viewport(ctx->blitter, &ctx->viewport);
- util_blitter_save_scissor(ctx->blitter, &ctx->scissor);
- util_blitter_save_fragment_shader(ctx->blitter, ctx->prog.fp);
- util_blitter_save_blend(ctx->blitter, ctx->blend);
- util_blitter_save_depth_stencil_alpha(ctx->blitter, ctx->zsa);
- util_blitter_save_stencil_ref(ctx->blitter, &ctx->stencil_ref);
- util_blitter_save_sample_mask(ctx->blitter, ctx->sample_mask);
- util_blitter_save_framebuffer(ctx->blitter, &ctx->framebuffer);
- util_blitter_save_fragment_sampler_states(ctx->blitter,
- ctx->tex[PIPE_SHADER_FRAGMENT].num_samplers,
- (void **)ctx->tex[PIPE_SHADER_FRAGMENT].samplers);
- util_blitter_save_fragment_sampler_views(ctx->blitter,
- ctx->tex[PIPE_SHADER_FRAGMENT].num_textures,
- ctx->tex[PIPE_SHADER_FRAGMENT].textures);
- if (!render_cond)
- util_blitter_save_render_condition(ctx->blitter,
- ctx->cond_query, ctx->cond_cond, ctx->cond_mode);
-
- if (ctx->batch)
- fd_batch_set_stage(ctx->batch, stage);
-
- ctx->in_blit = discard;
-}
-
-void
-fd_blitter_pipe_end(struct fd_context *ctx)
-{
- if (ctx->batch)
- fd_batch_set_stage(ctx->batch, FD_STAGE_NULL);
- ctx->in_blit = false;
-}
-
static void
fd_invalidate_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
{
+ struct fd_context *ctx = fd_context(pctx);
struct fd_resource *rsc = fd_resource(prsc);
/*
* TODO I guess we could track that the resource is invalidated and
* use that as a hint to realloc rather than stall in _transfer_map(),
* even in the non-DISCARD_WHOLE_RESOURCE case?
+ *
+ * Note: we set dirty bits to trigger invalidate logic fd_draw_vbo
*/
if (rsc->write_batch) {
struct fd_batch *batch = rsc->write_batch;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
- if (pfb->zsbuf && pfb->zsbuf->texture == prsc)
+ if (pfb->zsbuf && pfb->zsbuf->texture == prsc) {
batch->resolve &= ~(FD_BUFFER_DEPTH | FD_BUFFER_STENCIL);
+ ctx->dirty |= FD_DIRTY_ZSA;
+ }
for (unsigned i = 0; i < pfb->nr_cbufs; i++) {
if (pfb->cbufs[i] && pfb->cbufs[i]->texture == prsc) {
batch->resolve &= ~(PIPE_CLEAR_COLOR0 << i);
+ ctx->dirty |= FD_DIRTY_FRAMEBUFFER;
}
}
}
bool fake_rgtc = screen->gpu_id < 400;
pscreen->resource_create = u_transfer_helper_resource_create;
+ /* NOTE: u_transfer_helper does not yet support the _with_modifiers()
+ * variant:
+ */
+ pscreen->resource_create_with_modifiers = fd_resource_create_with_modifiers;
pscreen->resource_from_handle = fd_resource_from_handle;
pscreen->resource_get_handle = fd_resource_get_handle;
pscreen->resource_destroy = u_transfer_helper_resource_destroy;
pos_out[1] = ptr[sample_index][1] / 16.0f;
}
+static void
+fd_blit_pipe(struct pipe_context *pctx, const struct pipe_blit_info *blit_info)
+{
+ /* wrap fd_blit to return void */
+ fd_blit(pctx, blit_info);
+}
+
void
fd_resource_context_init(struct pipe_context *pctx)
{
pctx->create_surface = fd_create_surface;
pctx->surface_destroy = fd_surface_destroy;
pctx->resource_copy_region = fd_resource_copy_region;
- pctx->blit = fd_blit;
+ pctx->blit = fd_blit_pipe;
pctx->flush_resource = fd_flush_resource;
pctx->invalidate_resource = fd_invalidate_resource;
pctx->get_sample_position = fd_get_sample_position;