*/
#include "util/u_format.h"
+#include "util/u_format_rgtc.h"
+#include "util/u_format_zs.h"
#include "util/u_inlines.h"
#include "util/u_transfer.h"
#include "util/u_string.h"
+#include "util/u_surface.h"
#include "freedreno_resource.h"
#include "freedreno_screen.h"
#include "freedreno_surface.h"
#include "freedreno_context.h"
+#include "freedreno_query_hw.h"
#include "freedreno_util.h"
+#include <errno.h>
+
+/* XXX this should go away, needed for 'struct winsys_handle' */
+#include "state_tracker/drm_driver.h"
+
+static bool
+pending(struct fd_resource *rsc, enum fd_resource_status status)
+{
+ return (rsc->status & status) ||
+ (rsc->stencil && (rsc->stencil->status & status));
+}
+
+static void
+fd_invalidate_resource(struct fd_context *ctx, struct pipe_resource *prsc)
+{
+ int i;
+
+ /* Go through the entire state and see if the resource is bound
+ * anywhere. If it is, mark the relevant state as dirty. This is called on
+ * realloc_bo.
+ */
+
+ /* Constbufs */
+ for (i = 1; i < PIPE_MAX_CONSTANT_BUFFERS && !(ctx->dirty & FD_DIRTY_CONSTBUF); i++) {
+ if (ctx->constbuf[PIPE_SHADER_VERTEX].cb[i].buffer == prsc)
+ ctx->dirty |= FD_DIRTY_CONSTBUF;
+ if (ctx->constbuf[PIPE_SHADER_FRAGMENT].cb[i].buffer == prsc)
+ ctx->dirty |= FD_DIRTY_CONSTBUF;
+ }
+
+ /* VBOs */
+ for (i = 0; i < ctx->vtx.vertexbuf.count && !(ctx->dirty & FD_DIRTY_VTXBUF); i++) {
+ if (ctx->vtx.vertexbuf.vb[i].buffer == prsc)
+ ctx->dirty |= FD_DIRTY_VTXBUF;
+ }
+
+ /* Index buffer */
+ if (ctx->indexbuf.buffer == prsc)
+ ctx->dirty |= FD_DIRTY_INDEXBUF;
+
+ /* Textures */
+ for (i = 0; i < ctx->verttex.num_textures && !(ctx->dirty & FD_DIRTY_VERTTEX); i++) {
+ if (ctx->verttex.textures[i] && (ctx->verttex.textures[i]->texture == prsc))
+ ctx->dirty |= FD_DIRTY_VERTTEX;
+ }
+ for (i = 0; i < ctx->fragtex.num_textures && !(ctx->dirty & FD_DIRTY_FRAGTEX); i++) {
+ if (ctx->fragtex.textures[i] && (ctx->fragtex.textures[i]->texture == prsc))
+ ctx->dirty |= FD_DIRTY_FRAGTEX;
+ }
+}
+
+static void
+realloc_bo(struct fd_resource *rsc, uint32_t size)
+{
+ struct fd_screen *screen = fd_screen(rsc->base.b.screen);
+ uint32_t flags = DRM_FREEDRENO_GEM_CACHE_WCOMBINE |
+ DRM_FREEDRENO_GEM_TYPE_KMEM; /* TODO */
+
+ /* if we start using things other than write-combine,
+ * be sure to check for PIPE_RESOURCE_FLAG_MAP_COHERENT
+ */
+
+ if (rsc->bo)
+ fd_bo_del(rsc->bo);
+
+ rsc->bo = fd_bo_new(screen->dev, size, flags);
+ rsc->timestamp = 0;
+ rsc->status = 0;
+ rsc->pending_ctx = NULL;
+ list_delinit(&rsc->list);
+ util_range_set_empty(&rsc->valid_buffer_range);
+}
+
+static unsigned
+fd_resource_layer_offset(struct fd_resource *rsc,
+ struct fd_resource_slice *slice,
+ unsigned layer)
+{
+ if (rsc->layer_first)
+ return layer * rsc->layer_size;
+ else
+ return layer * slice->size0;
+}
+
+static void
+fd_resource_flush_z32s8(struct fd_transfer *trans, const struct pipe_box *box)
+{
+ struct fd_resource *rsc = fd_resource(trans->base.resource);
+ struct fd_resource_slice *slice = fd_resource_slice(rsc, trans->base.level);
+ struct fd_resource_slice *sslice = fd_resource_slice(rsc->stencil, trans->base.level);
+ enum pipe_format format = trans->base.resource->format;
+
+ float *depth = fd_bo_map(rsc->bo) + slice->offset +
+ fd_resource_layer_offset(rsc, slice, trans->base.box.z) +
+ (trans->base.box.y + box->y) * slice->pitch * 4 + (trans->base.box.x + box->x) * 4;
+ uint8_t *stencil = fd_bo_map(rsc->stencil->bo) + sslice->offset +
+ fd_resource_layer_offset(rsc->stencil, sslice, trans->base.box.z) +
+ (trans->base.box.y + box->y) * sslice->pitch + trans->base.box.x + box->x;
+
+ if (format != PIPE_FORMAT_X32_S8X24_UINT)
+ util_format_z32_float_s8x24_uint_unpack_z_float(
+ depth, slice->pitch * 4,
+ trans->staging, trans->base.stride,
+ box->width, box->height);
+
+ util_format_z32_float_s8x24_uint_unpack_s_8uint(
+ stencil, sslice->pitch,
+ trans->staging, trans->base.stride,
+ box->width, box->height);
+}
+
+static void
+fd_resource_flush_rgtc(struct fd_transfer *trans, const struct pipe_box *box)
+{
+ struct fd_resource *rsc = fd_resource(trans->base.resource);
+ struct fd_resource_slice *slice = fd_resource_slice(rsc, trans->base.level);
+ enum pipe_format format = trans->base.resource->format;
+
+ uint8_t *data = fd_bo_map(rsc->bo) + slice->offset +
+ fd_resource_layer_offset(rsc, slice, trans->base.box.z) +
+ ((trans->base.box.y + box->y) * slice->pitch +
+ trans->base.box.x + box->x) * rsc->cpp;
+
+ uint8_t *source = trans->staging +
+ util_format_get_nblocksy(format, box->y) * trans->base.stride +
+ util_format_get_stride(format, box->x);
+
+ switch (format) {
+ case PIPE_FORMAT_RGTC1_UNORM:
+ case PIPE_FORMAT_RGTC1_SNORM:
+ case PIPE_FORMAT_LATC1_UNORM:
+ case PIPE_FORMAT_LATC1_SNORM:
+ util_format_rgtc1_unorm_unpack_rgba_8unorm(
+ data, slice->pitch * rsc->cpp,
+ source, trans->base.stride,
+ box->width, box->height);
+ break;
+ case PIPE_FORMAT_RGTC2_UNORM:
+ case PIPE_FORMAT_RGTC2_SNORM:
+ case PIPE_FORMAT_LATC2_UNORM:
+ case PIPE_FORMAT_LATC2_SNORM:
+ util_format_rgtc2_unorm_unpack_rgba_8unorm(
+ data, slice->pitch * rsc->cpp,
+ source, trans->base.stride,
+ box->width, box->height);
+ break;
+ default:
+ assert(!"Unexpected format\n");
+ break;
+ }
+}
+
+static void
+fd_resource_flush(struct fd_transfer *trans, const struct pipe_box *box)
+{
+ enum pipe_format format = trans->base.resource->format;
+
+ switch (format) {
+ case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
+ case PIPE_FORMAT_X32_S8X24_UINT:
+ fd_resource_flush_z32s8(trans, box);
+ break;
+ case PIPE_FORMAT_RGTC1_UNORM:
+ case PIPE_FORMAT_RGTC1_SNORM:
+ case PIPE_FORMAT_RGTC2_UNORM:
+ case PIPE_FORMAT_RGTC2_SNORM:
+ case PIPE_FORMAT_LATC1_UNORM:
+ case PIPE_FORMAT_LATC1_SNORM:
+ case PIPE_FORMAT_LATC2_UNORM:
+ case PIPE_FORMAT_LATC2_SNORM:
+ fd_resource_flush_rgtc(trans, box);
+ break;
+ default:
+ assert(!"Unexpected staging transfer type");
+ break;
+ }
+}
+
+static void fd_resource_transfer_flush_region(struct pipe_context *pctx,
+ struct pipe_transfer *ptrans,
+ const struct pipe_box *box)
+{
+ struct fd_resource *rsc = fd_resource(ptrans->resource);
+ struct fd_transfer *trans = fd_transfer(ptrans);
+
+ if (ptrans->resource->target == PIPE_BUFFER)
+ util_range_add(&rsc->valid_buffer_range,
+ ptrans->box.x + box->x,
+ ptrans->box.x + box->x + box->width);
+
+ if (trans->staging)
+ fd_resource_flush(trans, box);
+}
+
+static void
+fd_resource_transfer_unmap(struct pipe_context *pctx,
+ struct pipe_transfer *ptrans)
+{
+ struct fd_context *ctx = fd_context(pctx);
+ struct fd_resource *rsc = fd_resource(ptrans->resource);
+ struct fd_transfer *trans = fd_transfer(ptrans);
+
+ if (trans->staging && !(ptrans->usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) {
+ struct pipe_box box;
+ u_box_2d(0, 0, ptrans->box.width, ptrans->box.height, &box);
+ fd_resource_flush(trans, &box);
+ }
+
+ if (!(ptrans->usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
+ fd_bo_cpu_fini(rsc->bo);
+ if (rsc->stencil)
+ fd_bo_cpu_fini(rsc->stencil->bo);
+ }
+
+ util_range_add(&rsc->valid_buffer_range,
+ ptrans->box.x,
+ ptrans->box.x + ptrans->box.width);
+
+ pipe_resource_reference(&ptrans->resource, NULL);
+ util_slab_free(&ctx->transfer_pool, ptrans);
+
+ if (trans->staging)
+ free(trans->staging);
+}
+
static void *
fd_resource_transfer_map(struct pipe_context *pctx,
struct pipe_resource *prsc,
{
struct fd_context *ctx = fd_context(pctx);
struct fd_resource *rsc = fd_resource(prsc);
- struct pipe_transfer *ptrans = util_slab_alloc(&ctx->transfer_pool);
+ struct fd_resource_slice *slice = fd_resource_slice(rsc, level);
+ struct fd_transfer *trans;
+ struct pipe_transfer *ptrans;
enum pipe_format format = prsc->format;
+ uint32_t op = 0;
+ uint32_t offset;
char *buf;
+ int ret = 0;
+ DBG("prsc=%p, level=%u, usage=%x, box=%dx%d+%d,%d", prsc, level, usage,
+ box->width, box->height, box->x, box->y);
+
+ ptrans = util_slab_alloc(&ctx->transfer_pool);
if (!ptrans)
return NULL;
- ptrans->resource = prsc;
+ /* util_slab_alloc() doesn't zero: */
+ trans = fd_transfer(ptrans);
+ memset(trans, 0, sizeof(*trans));
+
+ pipe_resource_reference(&ptrans->resource, prsc);
ptrans->level = level;
ptrans->usage = usage;
ptrans->box = *box;
- ptrans->stride = rsc->pitch * rsc->cpp;
- ptrans->layer_stride = ptrans->stride;
+ ptrans->stride = util_format_get_nblocksx(format, slice->pitch) * rsc->cpp;
+ ptrans->layer_stride = slice->size0;
+
+ if (usage & PIPE_TRANSFER_READ)
+ op |= DRM_FREEDRENO_PREP_READ;
+
+ if (usage & PIPE_TRANSFER_WRITE)
+ op |= DRM_FREEDRENO_PREP_WRITE;
+
+ if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
+ realloc_bo(rsc, fd_bo_size(rsc->bo));
+ if (rsc->stencil)
+ realloc_bo(rsc->stencil, fd_bo_size(rsc->stencil->bo));
+ fd_invalidate_resource(ctx, prsc);
+ } else if ((usage & PIPE_TRANSFER_WRITE) &&
+ prsc->target == PIPE_BUFFER &&
+ !util_ranges_intersect(&rsc->valid_buffer_range,
+ box->x, box->x + box->width)) {
+ /* We are trying to write to a previously uninitialized range. No need
+ * to wait.
+ */
+ } else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
+ /* If the GPU is writing to the resource, or if it is reading from the
+ * resource and we're trying to write to it, flush the renders.
+ */
+ if (((ptrans->usage & PIPE_TRANSFER_WRITE) &&
+ pending(rsc, FD_PENDING_READ | FD_PENDING_WRITE)) ||
+ pending(rsc, FD_PENDING_WRITE))
+ fd_context_render(pctx);
+
+ /* The GPU keeps track of how the various bo's are being used, and
+ * will wait if necessary for the proper operation to have
+ * completed.
+ */
+ ret = fd_bo_cpu_prep(rsc->bo, ctx->screen->pipe, op);
+ if (ret)
+ goto fail;
+ }
buf = fd_bo_map(rsc->bo);
+ if (!buf)
+ goto fail;
- *pptrans = ptrans;
-
- return buf +
+ offset = slice->offset +
box->y / util_format_get_blockheight(format) * ptrans->stride +
- box->x / util_format_get_blockwidth(format) * rsc->cpp;
-}
+ box->x / util_format_get_blockwidth(format) * rsc->cpp +
+ fd_resource_layer_offset(rsc, slice, box->z);
+
+ if (prsc->format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT ||
+ prsc->format == PIPE_FORMAT_X32_S8X24_UINT) {
+ assert(trans->base.box.depth == 1);
+
+ trans->base.stride = trans->base.box.width * rsc->cpp * 2;
+ trans->staging = malloc(trans->base.stride * trans->base.box.height);
+ if (!trans->staging)
+ goto fail;
+
+ /* if we're not discarding the whole range (or resource), we must copy
+ * the real data in.
+ */
+ if (!(usage & (PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE |
+ PIPE_TRANSFER_DISCARD_RANGE))) {
+ struct fd_resource_slice *sslice =
+ fd_resource_slice(rsc->stencil, level);
+ void *sbuf = fd_bo_map(rsc->stencil->bo);
+ if (!sbuf)
+ goto fail;
+
+ float *depth = (float *)(buf + slice->offset +
+ fd_resource_layer_offset(rsc, slice, box->z) +
+ box->y * slice->pitch * 4 + box->x * 4);
+ uint8_t *stencil = sbuf + sslice->offset +
+ fd_resource_layer_offset(rsc->stencil, sslice, box->z) +
+ box->y * sslice->pitch + box->x;
+
+ if (format != PIPE_FORMAT_X32_S8X24_UINT)
+ util_format_z32_float_s8x24_uint_pack_z_float(
+ trans->staging, trans->base.stride,
+ depth, slice->pitch * 4,
+ box->width, box->height);
+
+ util_format_z32_float_s8x24_uint_pack_s_8uint(
+ trans->staging, trans->base.stride,
+ stencil, sslice->pitch,
+ box->width, box->height);
+ }
+
+ buf = trans->staging;
+ offset = 0;
+ } else if (rsc->internal_format != format &&
+ util_format_description(format)->layout == UTIL_FORMAT_LAYOUT_RGTC) {
+ assert(trans->base.box.depth == 1);
+
+ trans->base.stride = util_format_get_stride(
+ format, trans->base.box.width);
+ trans->staging = malloc(
+ util_format_get_2d_size(format, trans->base.stride,
+ trans->base.box.height));
+ if (!trans->staging)
+ goto fail;
+
+ /* if we're not discarding the whole range (or resource), we must copy
+ * the real data in.
+ */
+ if (!(usage & (PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE |
+ PIPE_TRANSFER_DISCARD_RANGE))) {
+ uint8_t *rgba8 = (uint8_t *)buf + slice->offset +
+ fd_resource_layer_offset(rsc, slice, box->z) +
+ box->y * slice->pitch * rsc->cpp + box->x * rsc->cpp;
+
+ switch (format) {
+ case PIPE_FORMAT_RGTC1_UNORM:
+ case PIPE_FORMAT_RGTC1_SNORM:
+ case PIPE_FORMAT_LATC1_UNORM:
+ case PIPE_FORMAT_LATC1_SNORM:
+ util_format_rgtc1_unorm_pack_rgba_8unorm(
+ trans->staging, trans->base.stride,
+ rgba8, slice->pitch * rsc->cpp,
+ box->width, box->height);
+ break;
+ case PIPE_FORMAT_RGTC2_UNORM:
+ case PIPE_FORMAT_RGTC2_SNORM:
+ case PIPE_FORMAT_LATC2_UNORM:
+ case PIPE_FORMAT_LATC2_SNORM:
+ util_format_rgtc2_unorm_pack_rgba_8unorm(
+ trans->staging, trans->base.stride,
+ rgba8, slice->pitch * rsc->cpp,
+ box->width, box->height);
+ break;
+ default:
+ assert(!"Unexpected format");
+ break;
+ }
+ }
+
+ buf = trans->staging;
+ offset = 0;
+ }
-static void fd_resource_transfer_flush_region(struct pipe_context *pctx,
- struct pipe_transfer *ptrans,
- const struct pipe_box *box)
-{
- struct fd_context *ctx = fd_context(pctx);
- struct fd_resource *rsc = fd_resource(ptrans->resource);
+ *pptrans = ptrans;
- if (rsc->dirty)
- fd_context_render(pctx);
+ return buf + offset;
- if (rsc->timestamp) {
- fd_pipe_wait(ctx->screen->pipe, rsc->timestamp);
- rsc->timestamp = 0;
- }
-}
-
-static void
-fd_resource_transfer_unmap(struct pipe_context *pctx,
- struct pipe_transfer *ptrans)
-{
- struct fd_context *ctx = fd_context(pctx);
- util_slab_free(&ctx->transfer_pool, ptrans);
+fail:
+ fd_resource_transfer_unmap(pctx, ptrans);
+ return NULL;
}
static void
struct pipe_resource *prsc)
{
struct fd_resource *rsc = fd_resource(prsc);
- fd_bo_del(rsc->bo);
+ if (rsc->bo)
+ fd_bo_del(rsc->bo);
+ list_delinit(&rsc->list);
+ util_range_destroy(&rsc->valid_buffer_range);
FREE(rsc);
}
{
struct fd_resource *rsc = fd_resource(prsc);
- return fd_screen_bo_get_handle(pscreen, rsc->bo, rsc->pitch, handle);
+ return fd_screen_bo_get_handle(pscreen, rsc->bo,
+ rsc->slices[0].pitch * rsc->cpp, handle);
}
-const struct u_resource_vtbl fd_resource_vtbl = {
+static const struct u_resource_vtbl fd_resource_vtbl = {
.resource_get_handle = fd_resource_get_handle,
.resource_destroy = fd_resource_destroy,
.transfer_map = fd_resource_transfer_map,
.transfer_inline_write = u_default_transfer_inline_write,
};
+static uint32_t
+setup_slices(struct fd_resource *rsc, uint32_t alignment, enum pipe_format format)
+{
+ struct pipe_resource *prsc = &rsc->base.b;
+ enum util_format_layout layout = util_format_description(format)->layout;
+ uint32_t level, size = 0;
+ uint32_t width = prsc->width0;
+ uint32_t height = prsc->height0;
+ uint32_t depth = prsc->depth0;
+ /* in layer_first layout, the level (slice) contains just one
+ * layer (since in fact the layer contains the slices)
+ */
+ uint32_t layers_in_level = rsc->layer_first ? 1 : prsc->array_size;
+
+ for (level = 0; level <= prsc->last_level; level++) {
+ struct fd_resource_slice *slice = fd_resource_slice(rsc, level);
+ uint32_t blocks;
+
+ if (layout == UTIL_FORMAT_LAYOUT_ASTC)
+ slice->pitch = width =
+ util_align_npot(width, 32 * util_format_get_blockwidth(format));
+ else
+ slice->pitch = width = align(width, 32);
+ slice->offset = size;
+ blocks = util_format_get_nblocks(format, width, height);
+ /* 1d array and 2d array textures must all have the same layer size
+ * for each miplevel on a3xx. 3d textures can have different layer
+ * sizes for high levels, but the hw auto-sizer is buggy (or at least
+ * different than what this code does), so as soon as the layer size
+ * range gets into range, we stop reducing it.
+ */
+ if (prsc->target == PIPE_TEXTURE_3D && (
+ level == 1 ||
+ (level > 1 && rsc->slices[level - 1].size0 > 0xf000)))
+ slice->size0 = align(blocks * rsc->cpp, alignment);
+ else if (level == 0 || rsc->layer_first || alignment == 1)
+ slice->size0 = align(blocks * rsc->cpp, alignment);
+ else
+ slice->size0 = rsc->slices[level - 1].size0;
+
+ size += slice->size0 * depth * layers_in_level;
+
+ width = u_minify(width, 1);
+ height = u_minify(height, 1);
+ depth = u_minify(depth, 1);
+ }
+
+ return size;
+}
+
+static uint32_t
+slice_alignment(struct pipe_screen *pscreen, const struct pipe_resource *tmpl)
+{
+ /* on a3xx, 2d array and 3d textures seem to want their
+ * layers aligned to page boundaries:
+ */
+ switch (tmpl->target) {
+ case PIPE_TEXTURE_3D:
+ case PIPE_TEXTURE_1D_ARRAY:
+ case PIPE_TEXTURE_2D_ARRAY:
+ return 4096;
+ default:
+ return 1;
+ }
+}
+
/**
* Create a new texture object, using the given template info.
*/
fd_resource_create(struct pipe_screen *pscreen,
const struct pipe_resource *tmpl)
{
- struct fd_screen *screen = fd_screen(pscreen);
struct fd_resource *rsc = CALLOC_STRUCT(fd_resource);
struct pipe_resource *prsc = &rsc->base.b;
- uint32_t flags, size;
+ enum pipe_format format = tmpl->format;
+ uint32_t size, alignment;
- DBG("target=%d, format=%s, %ux%u@%u, array_size=%u, last_level=%u, "
+ DBG("target=%d, format=%s, %ux%ux%u, array_size=%u, last_level=%u, "
"nr_samples=%u, usage=%u, bind=%x, flags=%x",
- tmpl->target, util_format_name(tmpl->format),
+ tmpl->target, util_format_name(format),
tmpl->width0, tmpl->height0, tmpl->depth0,
tmpl->array_size, tmpl->last_level, tmpl->nr_samples,
tmpl->usage, tmpl->bind, tmpl->flags);
*prsc = *tmpl;
pipe_reference_init(&prsc->reference, 1);
+ list_inithead(&rsc->list);
prsc->screen = pscreen;
+ util_range_init(&rsc->valid_buffer_range);
+
rsc->base.vtbl = &fd_resource_vtbl;
- rsc->pitch = ALIGN(tmpl->width0, 32);
- rsc->cpp = util_format_get_blocksize(tmpl->format);
- size = rsc->pitch * tmpl->height0 * rsc->cpp;
- flags = DRM_FREEDRENO_GEM_TYPE_KMEM; /* TODO */
+ if (format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT)
+ format = PIPE_FORMAT_Z32_FLOAT;
+ else if (fd_screen(pscreen)->gpu_id < 400 &&
+ util_format_description(format)->layout == UTIL_FORMAT_LAYOUT_RGTC)
+ format = PIPE_FORMAT_R8G8B8A8_UNORM;
+ rsc->internal_format = format;
+ rsc->cpp = util_format_get_blocksize(format);
+
+ assert(rsc->cpp);
+
+ alignment = slice_alignment(pscreen, tmpl);
+ if (is_a4xx(fd_screen(pscreen))) {
+ switch (tmpl->target) {
+ case PIPE_TEXTURE_3D:
+ rsc->layer_first = false;
+ break;
+ default:
+ rsc->layer_first = true;
+ alignment = 1;
+ break;
+ }
+ }
- rsc->bo = fd_bo_new(screen->dev, size, flags);
+ size = setup_slices(rsc, alignment, format);
+
+ if (rsc->layer_first) {
+ rsc->layer_size = align(size, 4096);
+ size = rsc->layer_size * prsc->array_size;
+ }
+
+ realloc_bo(rsc, size);
+ if (!rsc->bo)
+ goto fail;
+
+ /* There is no native Z32F_S8 sampling or rendering format, so this must
+ * be emulated via two separate textures. The depth texture still keeps
+ * its Z32F_S8 format though, and we also keep a reference to a separate
+ * S8 texture.
+ */
+ if (tmpl->format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) {
+ struct pipe_resource stencil = *tmpl;
+ stencil.format = PIPE_FORMAT_S8_UINT;
+ rsc->stencil = fd_resource(fd_resource_create(pscreen, &stencil));
+ if (!rsc->stencil)
+ goto fail;
+ }
return prsc;
+fail:
+ fd_resource_destroy(pscreen, prsc);
+ return NULL;
}
/**
static struct pipe_resource *
fd_resource_from_handle(struct pipe_screen *pscreen,
const struct pipe_resource *tmpl,
- struct winsys_handle *handle)
+ struct winsys_handle *handle, unsigned usage)
{
struct fd_resource *rsc = CALLOC_STRUCT(fd_resource);
+ struct fd_resource_slice *slice = &rsc->slices[0];
struct pipe_resource *prsc = &rsc->base.b;
- DBG("target=%d, format=%s, %ux%u@%u, array_size=%u, last_level=%u, "
+ DBG("target=%d, format=%s, %ux%ux%u, array_size=%u, last_level=%u, "
"nr_samples=%u, usage=%u, bind=%x, flags=%x",
tmpl->target, util_format_name(tmpl->format),
tmpl->width0, tmpl->height0, tmpl->depth0,
*prsc = *tmpl;
pipe_reference_init(&prsc->reference, 1);
+ list_inithead(&rsc->list);
prsc->screen = pscreen;
- rsc->bo = fd_screen_bo_from_handle(pscreen, handle, &rsc->pitch);
+ util_range_init(&rsc->valid_buffer_range);
+
+ rsc->bo = fd_screen_bo_from_handle(pscreen, handle, &slice->pitch);
+ if (!rsc->bo)
+ goto fail;
rsc->base.vtbl = &fd_resource_vtbl;
- rsc->pitch = ALIGN(tmpl->width0, 32);
+ rsc->cpp = util_format_get_blocksize(tmpl->format);
+ slice->pitch /= rsc->cpp;
+ slice->offset = handle->offset;
+
+ assert(rsc->cpp);
return prsc;
+
+fail:
+ fd_resource_destroy(pscreen, prsc);
+ return NULL;
+}
+
+static void fd_blitter_pipe_begin(struct fd_context *ctx, bool render_cond);
+static void fd_blitter_pipe_end(struct fd_context *ctx);
+
+/**
+ * _copy_region using pipe (3d engine)
+ */
+static bool
+fd_blitter_pipe_copy_region(struct fd_context *ctx,
+ struct pipe_resource *dst,
+ unsigned dst_level,
+ unsigned dstx, unsigned dsty, unsigned dstz,
+ struct pipe_resource *src,
+ unsigned src_level,
+ const struct pipe_box *src_box)
+{
+ /* not until we allow rendertargets to be buffers */
+ if (dst->target == PIPE_BUFFER || src->target == PIPE_BUFFER)
+ return false;
+
+ if (!util_blitter_is_copy_supported(ctx->blitter, dst, src))
+ return false;
+
+ fd_blitter_pipe_begin(ctx, false);
+ util_blitter_copy_texture(ctx->blitter,
+ dst, dst_level, dstx, dsty, dstz,
+ src, src_level, src_box);
+ fd_blitter_pipe_end(ctx);
+
+ return true;
}
/**
unsigned src_level,
const struct pipe_box *src_box)
{
- DBG("TODO: ");
- // TODO
+ struct fd_context *ctx = fd_context(pctx);
+
+ /* TODO if we have 2d core, or other DMA engine that could be used
+ * for simple copies and reasonably easily synchronized with the 3d
+ * core, this is where we'd plug it in..
+ */
+
+ /* try blit on 3d pipe: */
+ if (fd_blitter_pipe_copy_region(ctx,
+ dst, dst_level, dstx, dsty, dstz,
+ src, src_level, src_box))
+ return;
+
+ /* else fallback to pure sw: */
+ util_resource_copy_region(pctx,
+ dst, dst_level, dstx, dsty, dstz,
+ src, src_level, src_box);
}
-/* Optimal hardware path for blitting pixels.
+bool
+fd_render_condition_check(struct pipe_context *pctx)
+{
+ struct fd_context *ctx = fd_context(pctx);
+
+ if (!ctx->cond_query)
+ return true;
+
+ union pipe_query_result res = { 0 };
+ bool wait =
+ ctx->cond_mode != PIPE_RENDER_COND_NO_WAIT &&
+ ctx->cond_mode != PIPE_RENDER_COND_BY_REGION_NO_WAIT;
+
+ if (pctx->get_query_result(pctx, ctx->cond_query, wait, &res))
+ return (bool)res.u64 != ctx->cond_cond;
+
+ return true;
+}
+
+/**
+ * Optimal hardware path for blitting pixels.
* Scaling, format conversion, up- and downsampling (resolve) are allowed.
*/
static void
-fd_blit(struct pipe_context *pctx, const struct pipe_blit_info *info)
+fd_blit(struct pipe_context *pctx, const struct pipe_blit_info *blit_info)
{
- DBG("TODO: ");
- // TODO
+ struct fd_context *ctx = fd_context(pctx);
+ struct pipe_blit_info info = *blit_info;
+
+ if (info.src.resource->nr_samples > 1 &&
+ info.dst.resource->nr_samples <= 1 &&
+ !util_format_is_depth_or_stencil(info.src.resource->format) &&
+ !util_format_is_pure_integer(info.src.resource->format)) {
+ DBG("color resolve unimplemented");
+ return;
+ }
+
+ if (info.render_condition_enable && !fd_render_condition_check(pctx))
+ return;
+
+ if (util_try_blit_via_copy_region(pctx, &info)) {
+ return; /* done */
+ }
+
+ if (info.mask & PIPE_MASK_S) {
+ DBG("cannot blit stencil, skipping");
+ info.mask &= ~PIPE_MASK_S;
+ }
+
+ if (!util_blitter_is_blit_supported(ctx->blitter, &info)) {
+ DBG("blit unsupported %s -> %s",
+ util_format_short_name(info.src.resource->format),
+ util_format_short_name(info.dst.resource->format));
+ return;
+ }
+
+ fd_blitter_pipe_begin(ctx, info.render_condition_enable);
+ util_blitter_blit(ctx->blitter, &info);
+ fd_blitter_pipe_end(ctx);
+}
+
+static void
+fd_blitter_pipe_begin(struct fd_context *ctx, bool render_cond)
+{
+ util_blitter_save_vertex_buffer_slot(ctx->blitter, ctx->vtx.vertexbuf.vb);
+ util_blitter_save_vertex_elements(ctx->blitter, ctx->vtx.vtx);
+ util_blitter_save_vertex_shader(ctx->blitter, ctx->prog.vp);
+ util_blitter_save_so_targets(ctx->blitter, ctx->streamout.num_targets,
+ ctx->streamout.targets);
+ util_blitter_save_rasterizer(ctx->blitter, ctx->rasterizer);
+ util_blitter_save_viewport(ctx->blitter, &ctx->viewport);
+ util_blitter_save_scissor(ctx->blitter, &ctx->scissor);
+ util_blitter_save_fragment_shader(ctx->blitter, ctx->prog.fp);
+ util_blitter_save_blend(ctx->blitter, ctx->blend);
+ util_blitter_save_depth_stencil_alpha(ctx->blitter, ctx->zsa);
+ util_blitter_save_stencil_ref(ctx->blitter, &ctx->stencil_ref);
+ util_blitter_save_sample_mask(ctx->blitter, ctx->sample_mask);
+ util_blitter_save_framebuffer(ctx->blitter, &ctx->framebuffer);
+ util_blitter_save_fragment_sampler_states(ctx->blitter,
+ ctx->fragtex.num_samplers,
+ (void **)ctx->fragtex.samplers);
+ util_blitter_save_fragment_sampler_views(ctx->blitter,
+ ctx->fragtex.num_textures, ctx->fragtex.textures);
+ if (!render_cond)
+ util_blitter_save_render_condition(ctx->blitter,
+ ctx->cond_query, ctx->cond_cond, ctx->cond_mode);
+
+ fd_hw_query_set_stage(ctx, ctx->ring, FD_STAGE_BLIT);
+}
+
+static void
+fd_blitter_pipe_end(struct fd_context *ctx)
+{
+ fd_hw_query_set_stage(ctx, ctx->ring, FD_STAGE_NULL);
+}
+
+static void
+fd_flush_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
+{
+ struct fd_resource *rsc = fd_resource(prsc);
+
+ if (pending(rsc, FD_PENDING_WRITE | FD_PENDING_READ))
+ fd_context_render(pctx);
}
void
pctx->surface_destroy = fd_surface_destroy;
pctx->resource_copy_region = fd_resource_copy_region;
pctx->blit = fd_blit;
+ pctx->flush_resource = fd_flush_resource;
}