/*
* Copyright (C) 2008 VMware, Inc.
- * Copyright (C) 2014 Broadcom
+ * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
+ * Copyright (C) 2014-2017 Broadcom
* Copyright (C) 2018-2019 Alyssa Rosenzweig
* Copyright (C) 2019 Collabora, Ltd.
*
#include <fcntl.h>
#include "drm-uapi/drm_fourcc.h"
-#include "state_tracker/winsys_handle.h"
-#include "util/u_format.h"
+#include "frontend/winsys_handle.h"
+#include "util/format/u_format.h"
#include "util/u_memory.h"
#include "util/u_surface.h"
#include "util/u_transfer.h"
#include "util/u_transfer_helper.h"
#include "util/u_gen_mipmap.h"
+#include "util/u_drm.h"
+#include "pan_bo.h"
#include "pan_context.h"
#include "pan_screen.h"
#include "pan_resource.h"
#include "pan_util.h"
#include "pan_tiling.h"
+#include "decode.h"
+#include "panfrost-quirks.h"
static struct pipe_resource *
panfrost_resource_from_handle(struct pipe_screen *pscreen,
struct winsys_handle *whandle,
unsigned usage)
{
- struct panfrost_screen *screen = pan_screen(pscreen);
+ struct panfrost_device *dev = pan_device(pscreen);
struct panfrost_resource *rsc;
struct pipe_resource *prsc;
pipe_reference_init(&prsc->reference, 1);
prsc->screen = pscreen;
- rsc->bo = panfrost_drm_import_bo(screen, whandle->handle);
- rsc->slices[0].stride = whandle->stride;
- rsc->slices[0].initialized = true;
+ rsc->bo = panfrost_bo_import(dev, whandle->handle);
+ rsc->internal_format = templat->format;
+ rsc->modifier = (whandle->modifier == DRM_FORMAT_MOD_INVALID) ?
+ DRM_FORMAT_MOD_LINEAR : whandle->modifier;
+ rsc->slices[0].stride = whandle->stride;
+ rsc->slices[0].offset = whandle->offset;
+ rsc->slices[0].initialized = true;
+ panfrost_resource_set_damage_region(NULL, &rsc->base, 0, NULL);
+
+ if (dev->quirks & IS_BIFROST &&
+ templat->bind & PIPE_BIND_RENDER_TARGET) {
+ unsigned size = panfrost_compute_checksum_size(
+ &rsc->slices[0], templat->width0, templat->height0);
+ rsc->slices[0].checksum_bo = panfrost_bo_create(dev, size, 0);
+ rsc->checksummed = true;
+ }
+
+ if (drm_is_afbc(whandle->modifier)) {
+ rsc->slices[0].header_size =
+ panfrost_afbc_header_size(templat->width0, templat->height0);
+ }
- if (screen->ro) {
- rsc->scanout =
- renderonly_create_gpu_import_for_resource(prsc, screen->ro, NULL);
- /* failure is expected in some cases.. */
- }
+ if (dev->ro) {
+ rsc->scanout =
+ renderonly_create_gpu_import_for_resource(prsc, dev->ro, NULL);
+ /* failure is expected in some cases.. */
+ }
return prsc;
}
-static boolean
+static bool
panfrost_resource_get_handle(struct pipe_screen *pscreen,
struct pipe_context *ctx,
struct pipe_resource *pt,
struct winsys_handle *handle,
unsigned usage)
{
- struct panfrost_screen *screen = pan_screen(pscreen);
+ struct panfrost_device *dev = pan_device(pscreen);
struct panfrost_resource *rsrc = (struct panfrost_resource *) pt;
struct renderonly_scanout *scanout = rsrc->scanout;
- handle->modifier = DRM_FORMAT_MOD_INVALID;
+ handle->modifier = rsrc->modifier;
- if (handle->type == WINSYS_HANDLE_TYPE_SHARED) {
- return FALSE;
- } else if (handle->type == WINSYS_HANDLE_TYPE_KMS) {
- if (renderonly_get_handle(scanout, handle))
- return TRUE;
+ if (handle->type == WINSYS_HANDLE_TYPE_SHARED) {
+ return false;
+ } else if (handle->type == WINSYS_HANDLE_TYPE_KMS) {
+ if (renderonly_get_handle(scanout, handle))
+ return true;
- handle->handle = rsrc->bo->gem_handle;
- handle->stride = rsrc->slices[0].stride;
- return TRUE;
- } else if (handle->type == WINSYS_HANDLE_TYPE_FD) {
+ handle->handle = rsrc->bo->gem_handle;
+ handle->stride = rsrc->slices[0].stride;
+ handle->offset = rsrc->slices[0].offset;
+ return TRUE;
+ } else if (handle->type == WINSYS_HANDLE_TYPE_FD) {
if (scanout) {
struct drm_prime_handle args = {
.handle = scanout->handle,
.flags = DRM_CLOEXEC,
};
- int ret = drmIoctl(screen->ro->kms_fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
+ int ret = drmIoctl(dev->ro->kms_fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
if (ret == -1)
- return FALSE;
+ return false;
handle->stride = scanout->stride;
handle->handle = args.fd;
- return TRUE;
+ return true;
} else {
- int fd = panfrost_drm_export_bo(screen, rsrc->bo);
+ int fd = panfrost_bo_export(rsrc->bo);
if (fd < 0)
- return FALSE;
+ return false;
handle->handle = fd;
handle->stride = rsrc->slices[0].stride;
- return TRUE;
- }
- }
+ handle->offset = rsrc->slices[0].offset;
+ return true;
+ }
+ }
- return FALSE;
+ return false;
}
static void
panfrost_flush_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
{
- //DBG("TODO %s\n", __func__);
+ /* TODO */
}
static struct pipe_surface *
assert(surf_tmpl->u.tex.level <= pt->last_level);
ps->width = u_minify(pt->width0, surf_tmpl->u.tex.level);
ps->height = u_minify(pt->height0, surf_tmpl->u.tex.level);
+ ps->nr_samples = surf_tmpl->nr_samples;
ps->u.tex.level = surf_tmpl->u.tex.level;
ps->u.tex.first_layer = surf_tmpl->u.tex.first_layer;
ps->u.tex.last_layer = surf_tmpl->u.tex.last_layer;
static struct pipe_resource *
panfrost_create_scanout_res(struct pipe_screen *screen,
- const struct pipe_resource *template)
+ const struct pipe_resource *template,
+ uint64_t modifier)
{
- struct panfrost_screen *pscreen = pan_screen(screen);
- struct pipe_resource scanout_templat = *template;
+ struct panfrost_device *dev = pan_device(screen);
struct renderonly_scanout *scanout;
struct winsys_handle handle;
struct pipe_resource *res;
+ struct pipe_resource scanout_templat = *template;
+
+ /* Tiled formats need to be tile aligned */
+ if (modifier == DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED) {
+ scanout_templat.width0 = ALIGN_POT(template->width0, 16);
+ scanout_templat.height0 = ALIGN_POT(template->height0, 16);
+ }
+
+ /* AFBC formats need a header. Thankfully we don't care about the
+ * stride so we can just use wonky dimensions as long as the right
+ * number of bytes are allocated at the end of the day... this implies
+ * that stride/pitch is invalid for AFBC buffers */
+
+ if (drm_is_afbc(modifier)) {
+ /* Space for the header. We need to keep vaguely similar
+ * dimensions because... reasons... to allocate with renderonly
+ * as a dumb buffer. To do so, after the usual 16x16 alignment,
+ * we add on extra rows for the header. The order of operations
+ * matters here, the extra rows of padding can in fact be
+ * needed and missing them can lead to faults. */
+
+ unsigned header_size = panfrost_afbc_header_size(
+ template->width0, template->height0);
+
+ unsigned pitch = ALIGN_POT(template->width0, 16) *
+ util_format_get_blocksize(template->format);
+
+ unsigned header_rows =
+ DIV_ROUND_UP(header_size, pitch);
+
+ scanout_templat.width0 = ALIGN_POT(template->width0, 16);
+ scanout_templat.height0 = ALIGN_POT(template->height0, 16) + header_rows;
+ }
scanout = renderonly_scanout_for_resource(&scanout_templat,
- pscreen->ro, &handle);
+ dev->ro, &handle);
if (!scanout)
return NULL;
assert(handle.type == WINSYS_HANDLE_TYPE_FD);
- /* TODO: handle modifiers? */
+ handle.modifier = modifier;
res = screen->resource_from_handle(screen, template, &handle,
PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE);
close(handle.handle);
struct panfrost_resource *pres = pan_resource(res);
pres->scanout = scanout;
- pscreen->display_target = pres;
return res;
}
-/* Computes sizes for checksumming, which is 8 bytes per 16x16 tile */
-
-#define CHECKSUM_TILE_WIDTH 16
-#define CHECKSUM_TILE_HEIGHT 16
-#define CHECKSUM_BYTES_PER_TILE 8
-
-static unsigned
-panfrost_compute_checksum_sizes(
- struct panfrost_slice *slice,
- unsigned width,
- unsigned height)
-{
- unsigned aligned_width = ALIGN(width, CHECKSUM_TILE_WIDTH);
- unsigned aligned_height = ALIGN(height, CHECKSUM_TILE_HEIGHT);
-
- unsigned tile_count_x = aligned_width / CHECKSUM_TILE_WIDTH;
- unsigned tile_count_y = aligned_height / CHECKSUM_TILE_HEIGHT;
-
- slice->checksum_stride = tile_count_x * CHECKSUM_BYTES_PER_TILE;
-
- return slice->checksum_stride * tile_count_y;
-}
-
-/* Setup the mip tree given a particular layout, possibly with checksumming */
+/* Setup the mip tree given a particular modifier, possibly with checksumming */
static void
panfrost_setup_slices(struct panfrost_resource *pres, size_t *bo_size)
unsigned width = res->width0;
unsigned height = res->height0;
unsigned depth = res->depth0;
- unsigned bytes_per_pixel = util_format_get_blocksize(res->format);
+ unsigned bytes_per_pixel = util_format_get_blocksize(pres->internal_format);
+
+ /* MSAA is implemented as a 3D texture with z corresponding to the
+ * sample #, horrifyingly enough */
+
+ bool msaa = res->nr_samples > 1;
+
+ if (msaa) {
+ assert(depth == 1);
+ depth = res->nr_samples;
+ }
assert(depth > 0);
* makes code a lot simpler */
bool renderable = res->bind &
- (PIPE_BIND_RENDER_TARGET | PIPE_BIND_DEPTH_STENCIL);
- bool afbc = pres->layout == PAN_AFBC;
- bool tiled = pres->layout == PAN_TILED;
- bool should_align = renderable || tiled;
+ (PIPE_BIND_RENDER_TARGET | PIPE_BIND_DEPTH_STENCIL) &&
+ res->target != PIPE_BUFFER;
+ bool afbc = drm_is_afbc(pres->modifier);
+ bool tiled = pres->modifier == DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED;
+ bool linear = pres->modifier == DRM_FORMAT_MOD_LINEAR;
+ bool should_align = renderable || tiled || afbc;
/* We don't know how to specify a 2D stride for 3D textures */
unsigned effective_depth = depth;
if (should_align) {
- effective_width = ALIGN(effective_width, 16);
- effective_height = ALIGN(effective_height, 16);
+ effective_width = ALIGN_POT(effective_width, 16);
+ effective_height = ALIGN_POT(effective_height, 16);
/* We don't need to align depth */
}
+ /* Align levels to cache-line as a performance improvement for
+ * linear/tiled and as a requirement for AFBC */
+
+ offset = ALIGN_POT(offset, 64);
+
slice->offset = offset;
/* Compute the would-be stride */
unsigned stride = bytes_per_pixel * effective_width;
+ if (util_format_is_compressed(pres->internal_format))
+ stride /= 4;
+
/* ..but cache-line align it for performance */
- if (can_align_stride && pres->layout == PAN_LINEAR)
- stride = ALIGN(stride, 64);
+ if (can_align_stride && linear)
+ stride = ALIGN_POT(stride, 64);
slice->stride = stride;
unsigned slice_one_size = slice->stride * effective_height;
unsigned slice_full_size = slice_one_size * effective_depth;
+ slice->size0 = slice_one_size;
+
/* Report 2D size for 3D texturing */
if (l == 0)
if (pres->checksummed) {
slice->checksum_offset = offset;
- unsigned size = panfrost_compute_checksum_sizes(
- slice, width, height);
+ unsigned size = panfrost_compute_checksum_size(
+ slice, width, height);
offset += size;
}
width = u_minify(width, 1);
height = u_minify(height, 1);
- depth = u_minify(depth, 1);
+
+ /* Don't mipmap the sample count */
+ if (!msaa)
+ depth = u_minify(depth, 1);
}
assert(res->array_size);
if (res->target != PIPE_TEXTURE_3D) {
/* Arrays and cubemaps have the entire miptree duplicated */
- pres->cubemap_stride = ALIGN(offset, 64);
- *bo_size = ALIGN(pres->cubemap_stride * res->array_size, 4096);
+ pres->cubemap_stride = ALIGN_POT(offset, 64);
+ *bo_size = ALIGN_POT(pres->cubemap_stride * res->array_size, 4096);
} else {
/* 3D strides across the 2D layers */
assert(res->array_size == 1);
pres->cubemap_stride = size_2d;
- *bo_size = ALIGN(offset, 4096);
+ *bo_size = ALIGN_POT(offset, 4096);
}
}
-static void
-panfrost_resource_create_bo(struct panfrost_screen *screen, struct panfrost_resource *pres)
+/* Based on the usage, determine if it makes sense to use u-inteleaved tiling.
+ * We only have routines to tile 2D textures of sane bpps. On the hardware
+ * level, not all usages are valid for tiling. Finally, if the app is hinting
+ * that the contents frequently change, tiling will be a loss.
+ *
+ * Due to incomplete information on some platforms, we may need to force tiling
+ * in some cases.
+ *
+ * On platforms where it is supported, AFBC is even better. */
+
+static bool
+panfrost_can_linear(struct panfrost_device *dev, const struct panfrost_resource *pres)
{
- struct pipe_resource *res = &pres->base;
-
- /* Based on the usage, figure out what storing will be used. There are
- * various tradeoffs:
- *
- * Linear: the basic format, bad for memory bandwidth, bad for cache
- * use. Zero-copy, though. Renderable.
- *
- * Tiled: Not compressed, but cache-optimized. Expensive to write into
- * (due to software tiling), but cheap to sample from. Ideal for most
- * textures.
- *
- * AFBC: Compressed and renderable (so always desirable for non-scanout
- * rendertargets). Cheap to sample from. The format is black box, so we
- * can't read/write from software.
- */
+ /* XXX: We should be able to do linear Z/S with the right bits.. */
+ return !((pres->base.bind & PIPE_BIND_DEPTH_STENCIL) &&
+ (dev->quirks & (MIDGARD_SFBD | IS_BIFROST)));
+}
+
+static bool
+panfrost_should_afbc(struct panfrost_device *dev, const struct panfrost_resource *pres)
+{
+ /* AFBC resources may be rendered to, textured from, or shared across
+ * processes, but may not be used as e.g buffers */
+ const unsigned valid_binding =
+ PIPE_BIND_DEPTH_STENCIL |
+ PIPE_BIND_RENDER_TARGET |
+ PIPE_BIND_BLENDABLE |
+ PIPE_BIND_SAMPLER_VIEW |
+ PIPE_BIND_DISPLAY_TARGET |
+ PIPE_BIND_SCANOUT |
+ PIPE_BIND_SHARED;
+
+ if (pres->base.bind & ~valid_binding)
+ return false;
+
+ /* AFBC introduced with Mali T760 */
+ if (dev->quirks & MIDGARD_NO_AFBC)
+ return false;
+
+ /* AFBC<-->staging is expensive */
+ if (pres->base.usage == PIPE_USAGE_STREAM)
+ return false;
+
+ /* Only a small selection of formats are AFBC'able */
+ if (!panfrost_format_supports_afbc(pres->internal_format))
+ return false;
+
+ /* AFBC does not support layered (GLES3 style) multisampling. Use
+ * EXT_multisampled_render_to_texture instead */
+ if (pres->base.nr_samples > 1)
+ return false;
+
+ /* TODO: Is AFBC of 3D textures possible? */
+ if ((pres->base.target != PIPE_TEXTURE_2D) && (pres->base.target != PIPE_TEXTURE_RECT))
+ return false;
+
+ /* For one tile, AFBC is a loss compared to u-interleaved */
+ if (pres->base.width0 <= 16 && pres->base.height0 <= 16)
+ return false;
+
+ /* Otherwise, we'd prefer AFBC as it is dramatically more efficient
+ * than linear or usually even u-interleaved */
+ return true;
+}
+
+static bool
+panfrost_should_tile(struct panfrost_device *dev, const struct panfrost_resource *pres)
+{
+ const unsigned valid_binding =
+ PIPE_BIND_DEPTH_STENCIL |
+ PIPE_BIND_RENDER_TARGET |
+ PIPE_BIND_BLENDABLE |
+ PIPE_BIND_SAMPLER_VIEW |
+ PIPE_BIND_DISPLAY_TARGET |
+ PIPE_BIND_SCANOUT |
+ PIPE_BIND_SHARED;
+
+ unsigned bpp = util_format_get_blocksizebits(pres->internal_format);
- /* Tiling textures is almost always faster, unless we only use it once */
+ bool is_sane_bpp =
+ bpp == 8 || bpp == 16 || bpp == 24 || bpp == 32 ||
+ bpp == 64 || bpp == 128;
- bool is_texture = (res->bind & PIPE_BIND_SAMPLER_VIEW);
- bool is_2d = res->depth0 == 1 && res->array_size == 1;
- bool is_streaming = (res->usage != PIPE_USAGE_STREAM);
+ bool is_2d = (pres->base.target == PIPE_TEXTURE_2D)
+ || (pres->base.target == PIPE_TEXTURE_RECT);
+
+ bool can_tile = is_2d && is_sane_bpp && ((pres->base.bind & ~valid_binding) == 0);
+
+ if (!panfrost_can_linear(dev, pres)) {
+ assert(can_tile);
+ return true;
+ }
- bool should_tile = is_streaming && is_texture && is_2d;
+ return can_tile && (pres->base.usage != PIPE_USAGE_STREAM);
+}
- /* Depth/stencil can't be tiled, only linear or AFBC */
- should_tile &= !(res->bind & PIPE_BIND_DEPTH_STENCIL);
+static uint64_t
+panfrost_best_modifier(struct panfrost_device *dev,
+ const struct panfrost_resource *pres)
+{
+ if (panfrost_should_afbc(dev, pres)) {
+ uint64_t afbc =
+ AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
+ AFBC_FORMAT_MOD_SPARSE;
+
+ if (panfrost_afbc_can_ytr(pres->base.format))
+ afbc |= AFBC_FORMAT_MOD_YTR;
+
+ return DRM_FORMAT_MOD_ARM_AFBC(afbc);
+ } else if (panfrost_should_tile(dev, pres))
+ return DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED;
+ else
+ return DRM_FORMAT_MOD_LINEAR;
+}
- /* FBOs we would like to checksum, if at all possible */
- bool can_checksum = !(res->bind & (PIPE_BIND_SCANOUT | PIPE_BIND_SHARED));
- bool should_checksum = res->bind & PIPE_BIND_RENDER_TARGET;
+static void
+panfrost_resource_create_bo(struct panfrost_device *dev, struct panfrost_resource *pres,
+ uint64_t modifier)
+{
+ struct pipe_resource *res = &pres->base;
- pres->checksummed = can_checksum && should_checksum;
+ pres->modifier = (modifier != DRM_FORMAT_MOD_INVALID) ? modifier :
+ panfrost_best_modifier(dev, pres);
+ pres->checksummed = (res->bind & PIPE_BIND_RENDER_TARGET);
- /* Set the layout appropriately */
- pres->layout = should_tile ? PAN_TILED : PAN_LINEAR;
+ /* We can only switch tiled->linear if the resource isn't already
+ * linear, and if we control the modifier, and if the resource can be
+ * linear. */
+ pres->modifier_constant = !((pres->modifier != DRM_FORMAT_MOD_LINEAR)
+ && (modifier == DRM_FORMAT_INVALID)
+ && panfrost_can_linear(dev, pres));
size_t bo_size;
panfrost_setup_slices(pres, &bo_size);
- struct panfrost_memory mem;
- struct panfrost_bo *bo = rzalloc(screen, struct panfrost_bo);
+ /* We create a BO immediately but don't bother mapping, since we don't
+ * care to map e.g. FBOs which the CPU probably won't touch */
+ pres->bo = panfrost_bo_create(dev, bo_size, PAN_BO_DELAY_MMAP);
+}
+
+void
+panfrost_resource_set_damage_region(struct pipe_screen *screen,
+ struct pipe_resource *res,
+ unsigned int nrects,
+ const struct pipe_box *rects)
+{
+ struct panfrost_resource *pres = pan_resource(res);
+ struct pipe_scissor_state *damage_extent = &pres->damage.extent;
+ unsigned int i;
+
+ if (pres->damage.inverted_rects)
+ ralloc_free(pres->damage.inverted_rects);
- pipe_reference_init(&bo->reference, 1);
- panfrost_drm_allocate_slab(screen, &mem, bo_size / 4096, true, 0, 0, 0);
+ memset(&pres->damage, 0, sizeof(pres->damage));
+
+ pres->damage.inverted_rects =
+ pan_subtract_damage(pres,
+ res->width0, res->height0,
+ nrects, rects, &pres->damage.inverted_len);
+
+ /* Track the damage extent: the quad including all damage regions. Will
+ * be used restrict the rendering area */
+
+ damage_extent->minx = 0xffff;
+ damage_extent->miny = 0xffff;
+
+ for (i = 0; i < nrects; i++) {
+ int x = rects[i].x, w = rects[i].width, h = rects[i].height;
+ int y = res->height0 - (rects[i].y + h);
+
+ damage_extent->minx = MIN2(damage_extent->minx, x);
+ damage_extent->miny = MIN2(damage_extent->miny, y);
+ damage_extent->maxx = MAX2(damage_extent->maxx,
+ MIN2(x + w, res->width0));
+ damage_extent->maxy = MAX2(damage_extent->maxy,
+ MIN2(y + h, res->height0));
+ }
+
+ if (nrects == 0) {
+ damage_extent->minx = 0;
+ damage_extent->miny = 0;
+ damage_extent->maxx = res->width0;
+ damage_extent->maxy = res->height0;
+ }
- bo->cpu = mem.cpu;
- bo->gpu = mem.gpu;
- bo->gem_handle = mem.gem_handle;
- bo->size = bo_size;
- pres->bo = bo;
}
static struct pipe_resource *
-panfrost_resource_create(struct pipe_screen *screen,
- const struct pipe_resource *template)
+panfrost_resource_create_with_modifier(struct pipe_screen *screen,
+ const struct pipe_resource *template,
+ uint64_t modifier)
{
+ struct panfrost_device *dev = pan_device(screen);
+
/* Make sure we're familiar */
switch (template->target) {
- case PIPE_BUFFER:
- case PIPE_TEXTURE_1D:
- case PIPE_TEXTURE_2D:
- case PIPE_TEXTURE_3D:
- case PIPE_TEXTURE_CUBE:
- case PIPE_TEXTURE_RECT:
- case PIPE_TEXTURE_2D_ARRAY:
- break;
- default:
- DBG("Unknown texture target %d\n", template->target);
- assert(0);
+ case PIPE_BUFFER:
+ case PIPE_TEXTURE_1D:
+ case PIPE_TEXTURE_2D:
+ case PIPE_TEXTURE_3D:
+ case PIPE_TEXTURE_CUBE:
+ case PIPE_TEXTURE_RECT:
+ case PIPE_TEXTURE_1D_ARRAY:
+ case PIPE_TEXTURE_2D_ARRAY:
+ break;
+ default:
+ unreachable("Unknown texture target\n");
}
- if (template->bind &
- (PIPE_BIND_DISPLAY_TARGET | PIPE_BIND_SCANOUT | PIPE_BIND_SHARED))
- return panfrost_create_scanout_res(screen, template);
+ if (dev->ro && (template->bind &
+ (PIPE_BIND_DISPLAY_TARGET | PIPE_BIND_SCANOUT | PIPE_BIND_SHARED)))
+ return panfrost_create_scanout_res(screen, template, modifier);
struct panfrost_resource *so = rzalloc(screen, struct panfrost_resource);
- struct panfrost_screen *pscreen = (struct panfrost_screen *) screen;
-
so->base = *template;
so->base.screen = screen;
+ so->internal_format = template->format;
pipe_reference_init(&so->base.reference, 1);
util_range_init(&so->valid_buffer_range);
- panfrost_resource_create_bo(pscreen, so);
+ panfrost_resource_create_bo(dev, so, modifier);
+ panfrost_resource_set_damage_region(NULL, &so->base, 0, NULL);
+
+ if (template->bind & PIPE_BIND_INDEX_BUFFER)
+ so->index_cache = rzalloc(so, struct panfrost_minmax_cache);
+
return (struct pipe_resource *)so;
}
-static void
-panfrost_destroy_bo(struct panfrost_screen *screen, struct panfrost_bo *bo)
-{
- struct panfrost_memory mem = {
- .cpu = bo->cpu,
- .gpu = bo->gpu,
- .size = bo->size,
- .gem_handle = bo->gem_handle,
- };
-
- panfrost_drm_free_slab(screen, &mem);
- ralloc_free(bo);
-}
+/* Default is to create a resource as don't care */
-void
-panfrost_bo_reference(struct panfrost_bo *bo)
+static struct pipe_resource *
+panfrost_resource_create(struct pipe_screen *screen,
+ const struct pipe_resource *template)
{
- pipe_reference(NULL, &bo->reference);
+ return panfrost_resource_create_with_modifier(screen, template,
+ DRM_FORMAT_MOD_INVALID);
}
-void
-panfrost_bo_unreference(struct pipe_screen *screen, struct panfrost_bo *bo)
-{
- /* When the reference count goes to zero, we need to cleanup */
+/* If no modifier is specified, we'll choose. Otherwise, the order of
+ * preference is compressed, tiled, linear. */
- if (pipe_reference(&bo->reference, NULL)) {
- panfrost_destroy_bo(pan_screen(screen), bo);
+static struct pipe_resource *
+panfrost_resource_create_with_modifiers(struct pipe_screen *screen,
+ const struct pipe_resource *template,
+ const uint64_t *modifiers, int count)
+{
+ for (unsigned i = 0; i < PAN_MODIFIER_COUNT; ++i) {
+ if (drm_find_modifier(pan_best_modifiers[i], modifiers, count)) {
+ return panfrost_resource_create_with_modifier(screen, template,
+ pan_best_modifiers[i]);
+ }
}
+
+ /* If we didn't find one, app specified invalid */
+ assert(count == 1 && modifiers[0] == DRM_FORMAT_MOD_INVALID);
+ return panfrost_resource_create(screen, template);
}
static void
panfrost_resource_destroy(struct pipe_screen *screen,
struct pipe_resource *pt)
{
- struct panfrost_screen *pscreen = pan_screen(screen);
+ struct panfrost_device *dev = pan_device(screen);
struct panfrost_resource *rsrc = (struct panfrost_resource *) pt;
- if (rsrc->scanout)
- renderonly_scanout_destroy(rsrc->scanout, pscreen->ro);
+ if (rsrc->scanout)
+ renderonly_scanout_destroy(rsrc->scanout, dev->ro);
- if (rsrc->bo)
- panfrost_bo_unreference(screen, rsrc->bo);
+ if (rsrc->bo)
+ panfrost_bo_unreference(rsrc->bo);
+
+ if (rsrc->slices[0].checksum_bo)
+ panfrost_bo_unreference(rsrc->slices[0].checksum_bo);
util_range_destroy(&rsrc->valid_buffer_range);
- ralloc_free(rsrc);
+ ralloc_free(rsrc);
+}
+
+/* Most of the time we can do CPU-side transfers, but sometimes we need to use
+ * the 3D pipe for this. Let's wrap u_blitter to blit to/from staging textures.
+ * Code adapted from freedreno */
+
+static struct panfrost_resource *
+pan_alloc_staging(struct panfrost_context *ctx, struct panfrost_resource *rsc,
+ unsigned level, const struct pipe_box *box)
+{
+ struct pipe_context *pctx = &ctx->base;
+ struct pipe_resource tmpl = rsc->base;
+
+ tmpl.width0 = box->width;
+ tmpl.height0 = box->height;
+ /* for array textures, box->depth is the array_size, otherwise
+ * for 3d textures, it is the depth:
+ */
+ if (tmpl.array_size > 1) {
+ if (tmpl.target == PIPE_TEXTURE_CUBE)
+ tmpl.target = PIPE_TEXTURE_2D_ARRAY;
+ tmpl.array_size = box->depth;
+ tmpl.depth0 = 1;
+ } else {
+ tmpl.array_size = 1;
+ tmpl.depth0 = box->depth;
+ }
+ tmpl.last_level = 0;
+ tmpl.bind |= PIPE_BIND_LINEAR;
+
+ struct pipe_resource *pstaging =
+ pctx->screen->resource_create(pctx->screen, &tmpl);
+ if (!pstaging)
+ return NULL;
+
+ return pan_resource(pstaging);
+}
+
+static void
+pan_blit_from_staging(struct pipe_context *pctx, struct panfrost_gtransfer *trans)
+{
+ struct pipe_resource *dst = trans->base.resource;
+ struct pipe_blit_info blit = {};
+
+ blit.dst.resource = dst;
+ blit.dst.format = dst->format;
+ blit.dst.level = trans->base.level;
+ blit.dst.box = trans->base.box;
+ blit.src.resource = trans->staging.rsrc;
+ blit.src.format = trans->staging.rsrc->format;
+ blit.src.level = 0;
+ blit.src.box = trans->staging.box;
+ blit.mask = util_format_get_mask(trans->staging.rsrc->format);
+ blit.filter = PIPE_TEX_FILTER_NEAREST;
+
+ panfrost_blit(pctx, &blit);
+}
+
+static void
+pan_blit_to_staging(struct pipe_context *pctx, struct panfrost_gtransfer *trans)
+{
+ struct pipe_resource *src = trans->base.resource;
+ struct pipe_blit_info blit = {};
+
+ blit.src.resource = src;
+ blit.src.format = src->format;
+ blit.src.level = trans->base.level;
+ blit.src.box = trans->base.box;
+ blit.dst.resource = trans->staging.rsrc;
+ blit.dst.format = trans->staging.rsrc->format;
+ blit.dst.level = 0;
+ blit.dst.box = trans->staging.box;
+ blit.mask = util_format_get_mask(trans->staging.rsrc->format);
+ blit.filter = PIPE_TEX_FILTER_NEAREST;
+
+ panfrost_blit(pctx, &blit);
}
static void *
const struct pipe_box *box,
struct pipe_transfer **out_transfer)
{
- int bytes_per_pixel = util_format_get_blocksize(resource->format);
+ struct panfrost_context *ctx = pan_context(pctx);
+ struct panfrost_device *dev = pan_device(pctx->screen);
struct panfrost_resource *rsrc = pan_resource(resource);
+ int bytes_per_pixel = util_format_get_blocksize(rsrc->internal_format);
struct panfrost_bo *bo = rsrc->bo;
+ /* Can't map tiled/compressed directly */
+ if ((usage & PIPE_TRANSFER_MAP_DIRECTLY) && rsrc->modifier != DRM_FORMAT_MOD_LINEAR)
+ return NULL;
+
struct panfrost_gtransfer *transfer = rzalloc(pctx, struct panfrost_gtransfer);
transfer->base.level = level;
transfer->base.usage = usage;
transfer->base.box = *box;
pipe_resource_reference(&transfer->base.resource, resource);
-
*out_transfer = &transfer->base;
- /* Check if we're bound for rendering and this is a read pixels. If so,
- * we need to flush */
+ /* We don't have s/w routines for AFBC, so use a staging texture */
+ if (drm_is_afbc(rsrc->modifier)) {
+ struct panfrost_resource *staging = pan_alloc_staging(ctx, rsrc, level, box);
+ transfer->base.stride = staging->slices[0].stride;
+ transfer->base.layer_stride = transfer->base.stride * box->height;
- struct panfrost_context *ctx = pan_context(pctx);
- struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
+ transfer->staging.rsrc = &staging->base;
- bool is_bound = false;
+ transfer->staging.box = *box;
+ transfer->staging.box.x = 0;
+ transfer->staging.box.y = 0;
+ transfer->staging.box.z = 0;
- for (unsigned c = 0; c < fb->nr_cbufs; ++c) {
- is_bound |= fb->cbufs[c]->texture == resource;
- }
+ assert(transfer->staging.rsrc != NULL);
+
+ /* TODO: Eliminate this flush. It's only there to determine if
+ * we're initialized or not, when the initialization could come
+ * from a pending batch XXX */
+ panfrost_flush_batches_accessing_bo(ctx, rsrc->bo, true);
+
+ if ((usage & PIPE_TRANSFER_READ) && rsrc->slices[level].initialized) {
+ pan_blit_to_staging(pctx, transfer);
+ panfrost_flush_batches_accessing_bo(ctx, staging->bo, true);
+ panfrost_bo_wait(staging->bo, INT64_MAX, false);
+ }
- if (is_bound && (usage & PIPE_TRANSFER_READ)) {
- assert(level == 0);
- panfrost_flush(pctx, NULL, PIPE_FLUSH_END_OF_FRAME);
+ panfrost_bo_mmap(staging->bo);
+ return staging->bo->cpu;
}
- /* TODO: Respect usage flags */
+ /* If we haven't already mmaped, now's the time */
+ panfrost_bo_mmap(bo);
+
+ if (dev->debug & (PAN_DBG_TRACE | PAN_DBG_SYNC))
+ pandecode_inject_mmap(bo->gpu, bo->cpu, bo->size, NULL);
+
+ bool create_new_bo = usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
+ bool copy_resource = false;
+
+ if (!create_new_bo &&
+ !(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
+ (usage & PIPE_TRANSFER_WRITE) &&
+ !(resource->target == PIPE_BUFFER
+ && !util_ranges_intersect(&rsrc->valid_buffer_range, box->x, box->x + box->width)) &&
+ panfrost_pending_batches_access_bo(ctx, bo)) {
+
+ /* When a resource to be modified is already being used by a
+ * pending batch, it is often faster to copy the whole BO than
+ * to flush and split the frame in two. This also mostly
+ * mitigates broken depth reload.
+ */
+
+ panfrost_flush_batches_accessing_bo(ctx, bo, false);
+ panfrost_bo_wait(bo, INT64_MAX, false);
+
+ create_new_bo = true;
+ copy_resource = true;
+ }
- if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
- /* TODO: reallocate */
- //printf("debug: Missed reallocate\n");
+ if (create_new_bo) {
+ /* If the BO is used by one of the pending batches or if it's
+ * not ready yet (still accessed by one of the already flushed
+ * batches), we try to allocate a new one to avoid waiting.
+ */
+ if (panfrost_pending_batches_access_bo(ctx, bo) ||
+ !panfrost_bo_wait(bo, 0, true)) {
+ /* We want the BO to be MMAPed. */
+ uint32_t flags = bo->flags & ~PAN_BO_DELAY_MMAP;
+ struct panfrost_bo *newbo = NULL;
+
+ /* When the BO has been imported/exported, we can't
+ * replace it by another one, otherwise the
+ * importer/exporter wouldn't see the change we're
+ * doing to it.
+ */
+ if (!(bo->flags & PAN_BO_SHARED))
+ newbo = panfrost_bo_create(dev, bo->size,
+ flags);
+
+ if (newbo) {
+ if (copy_resource)
+ memcpy(newbo->cpu, rsrc->bo->cpu, bo->size);
+
+ panfrost_bo_unreference(bo);
+ rsrc->bo = newbo;
+ bo = newbo;
+ } else {
+ /* Allocation failed or was impossible, let's
+ * fall back on a flush+wait.
+ */
+ panfrost_flush_batches_accessing_bo(ctx, bo, true);
+ panfrost_bo_wait(bo, INT64_MAX, true);
+ }
+ }
} else if ((usage & PIPE_TRANSFER_WRITE)
- && resource->target == PIPE_BUFFER
- && !util_ranges_intersect(&rsrc->valid_buffer_range, box->x, box->x + box->width)) {
+ && resource->target == PIPE_BUFFER
+ && !util_ranges_intersect(&rsrc->valid_buffer_range, box->x, box->x + box->width)) {
/* No flush for writes to uninitialized */
} else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
if (usage & PIPE_TRANSFER_WRITE) {
- /* STUB: flush reading */
- //printf("debug: missed reading flush %d\n", resource->target);
+ panfrost_flush_batches_accessing_bo(ctx, bo, true);
+ panfrost_bo_wait(bo, INT64_MAX, true);
} else if (usage & PIPE_TRANSFER_READ) {
- /* STUB: flush writing */
- //printf("debug: missed writing flush %d (%d-%d)\n", resource->target, box->x, box->x + box->width);
- } else {
- /* Why are you even mapping?! */
+ panfrost_flush_batches_accessing_bo(ctx, bo, false);
+ panfrost_bo_wait(bo, INT64_MAX, false);
}
}
- if (rsrc->layout != PAN_LINEAR) {
- /* Non-linear resources need to be indirectly mapped */
-
- if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
- return NULL;
-
+ if (rsrc->modifier == DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED) {
transfer->base.stride = box->width * bytes_per_pixel;
transfer->base.layer_stride = transfer->base.stride * box->height;
- transfer->map = rzalloc_size(transfer, transfer->base.layer_stride * box->depth);
+ transfer->map = ralloc_size(transfer, transfer->base.layer_stride * box->depth);
assert(box->depth == 1);
if ((usage & PIPE_TRANSFER_READ) && rsrc->slices[level].initialized) {
- if (rsrc->layout == PAN_AFBC) {
- DBG("Unimplemented: reads from AFBC");
- } else if (rsrc->layout == PAN_TILED) {
- panfrost_load_tiled_image(
- transfer->map,
- bo->cpu + rsrc->slices[level].offset,
- box,
- transfer->base.stride,
- rsrc->slices[level].stride,
- util_format_get_blocksize(resource->format));
- }
+ panfrost_load_tiled_image(
+ transfer->map,
+ bo->cpu + rsrc->slices[level].offset,
+ box->x, box->y, box->width, box->height,
+ transfer->base.stride,
+ rsrc->slices[level].stride,
+ rsrc->internal_format);
}
return transfer->map;
} else {
+ assert (rsrc->modifier == DRM_FORMAT_MOD_LINEAR);
+
+ /* Direct, persistent writes create holes in time for
+ * caching... I don't know if this is actually possible but we
+ * should still get it right */
+
+ unsigned dpw = PIPE_TRANSFER_MAP_DIRECTLY | PIPE_TRANSFER_WRITE | PIPE_TRANSFER_PERSISTENT;
+
+ if ((usage & dpw) == dpw && rsrc->index_cache)
+ return NULL;
+
transfer->base.stride = rsrc->slices[level].stride;
- transfer->base.layer_stride = rsrc->cubemap_stride;
+ transfer->base.layer_stride = panfrost_get_layer_stride(
+ rsrc->slices, rsrc->base.target == PIPE_TEXTURE_3D,
+ rsrc->cubemap_stride, level);
/* By mapping direct-write, we're implicitly already
* initialized (maybe), so be conservative */
- if ((usage & PIPE_TRANSFER_WRITE) && (usage & PIPE_TRANSFER_MAP_DIRECTLY))
+ if (usage & PIPE_TRANSFER_WRITE) {
rsrc->slices[level].initialized = true;
+ panfrost_minmax_cache_invalidate(rsrc->index_cache, &transfer->base);
+ }
return bo->cpu
- + rsrc->slices[level].offset
- + transfer->base.box.z * rsrc->cubemap_stride
- + transfer->base.box.y * rsrc->slices[level].stride
- + transfer->base.box.x * bytes_per_pixel;
+ + rsrc->slices[level].offset
+ + transfer->base.box.z * transfer->base.layer_stride
+ + transfer->base.box.y * rsrc->slices[level].stride
+ + transfer->base.box.x * bytes_per_pixel;
}
}
struct panfrost_gtransfer *trans = pan_transfer(transfer);
struct panfrost_resource *prsrc = (struct panfrost_resource *) transfer->resource;
+ /* AFBC will use a staging resource. `initialized` will be set when the
+ * fragment job is created; this is deferred to prevent useless surface
+ * reloads that can cascade into DATA_INVALID_FAULTs due to reading
+ * malformed AFBC data if uninitialized */
+
+ if (trans->staging.rsrc) {
+ if (transfer->usage & PIPE_TRANSFER_WRITE) {
+ pan_blit_from_staging(pctx, trans);
+ panfrost_flush_batches_accessing_bo(pan_context(pctx), pan_resource(trans->staging.rsrc)->bo, true);
+ }
+
+ pipe_resource_reference(&trans->staging.rsrc, NULL);
+ }
+
+ /* Tiling will occur in software from a staging cpu buffer */
if (trans->map) {
struct panfrost_bo *bo = prsrc->bo;
if (transfer->usage & PIPE_TRANSFER_WRITE) {
- unsigned level = transfer->level;
- prsrc->slices[level].initialized = true;
+ prsrc->slices[transfer->level].initialized = true;
- if (prsrc->layout == PAN_AFBC) {
- DBG("Unimplemented: writes to AFBC\n");
- } else if (prsrc->layout == PAN_TILED) {
+ if (prsrc->modifier == DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED) {
assert(transfer->box.depth == 1);
- panfrost_store_tiled_image(
- bo->cpu + prsrc->slices[level].offset,
+ /* Do we overwrite the entire resource? If so,
+ * we don't need an intermediate blit so it's a
+ * good time to switch the modifier. */
+
+ bool discards_content = prsrc->base.last_level == 0
+ && transfer->box.width == prsrc->base.width0
+ && transfer->box.height == prsrc->base.height0
+ && transfer->box.x == 0
+ && transfer->box.y == 0
+ && !prsrc->modifier_constant;
+
+ /* It also serves as a good heuristic for
+ * streaming textures (e.g. in video players),
+ * but we could do better */
+
+ if (discards_content)
+ ++prsrc->modifier_updates;
+
+ if (prsrc->modifier_updates >= LAYOUT_CONVERT_THRESHOLD)
+ {
+ prsrc->modifier = DRM_FORMAT_MOD_LINEAR;
+
+ util_copy_rect(
+ bo->cpu + prsrc->slices[0].offset,
+ prsrc->base.format,
+ prsrc->slices[0].stride,
+ 0, 0,
+ transfer->box.width,
+ transfer->box.height,
+ trans->map,
+ transfer->stride,
+ 0, 0);
+ } else {
+ panfrost_store_tiled_image(
+ bo->cpu + prsrc->slices[transfer->level].offset,
trans->map,
- &transfer->box,
- prsrc->slices[level].stride,
+ transfer->box.x, transfer->box.y,
+ transfer->box.width, transfer->box.height,
+ prsrc->slices[transfer->level].stride,
transfer->stride,
- util_format_get_blocksize(prsrc->base.format));
+ prsrc->internal_format);
+ }
}
}
}
- util_range_add(&prsrc->valid_buffer_range,
- transfer->box.x,
- transfer->box.x + transfer->box.width);
+ util_range_add(&prsrc->base, &prsrc->valid_buffer_range,
+ transfer->box.x,
+ transfer->box.x + transfer->box.width);
+
+ panfrost_minmax_cache_invalidate(prsrc->index_cache, transfer);
/* Derefence the resource */
pipe_resource_reference(&transfer->resource, NULL);
static void
panfrost_transfer_flush_region(struct pipe_context *pctx,
- struct pipe_transfer *transfer,
- const struct pipe_box *box)
-{
- struct panfrost_resource *rsc = pan_resource(transfer->resource);
-
- if (transfer->resource->target == PIPE_BUFFER) {
- util_range_add(&rsc->valid_buffer_range,
- transfer->box.x + box->x,
- transfer->box.x + box->x + box->width);
- }
-}
-
-static struct pb_slab *
-panfrost_slab_alloc(void *priv, unsigned heap, unsigned entry_size, unsigned group_index)
+ struct pipe_transfer *transfer,
+ const struct pipe_box *box)
{
- struct panfrost_screen *screen = (struct panfrost_screen *) priv;
- struct panfrost_memory *mem = rzalloc(screen, struct panfrost_memory);
-
- size_t slab_size = (1 << (MAX_SLAB_ENTRY_SIZE + 1));
-
- mem->slab.num_entries = slab_size / entry_size;
- mem->slab.num_free = mem->slab.num_entries;
+ struct panfrost_resource *rsc = pan_resource(transfer->resource);
- LIST_INITHEAD(&mem->slab.free);
- for (unsigned i = 0; i < mem->slab.num_entries; ++i) {
- /* Create a slab entry */
- struct panfrost_memory_entry *entry = rzalloc(mem, struct panfrost_memory_entry);
- entry->offset = entry_size * i;
-
- entry->base.slab = &mem->slab;
- entry->base.group_index = group_index;
-
- LIST_ADDTAIL(&entry->base.head, &mem->slab.free);
+ if (transfer->resource->target == PIPE_BUFFER) {
+ util_range_add(&rsc->base, &rsc->valid_buffer_range,
+ transfer->box.x + box->x,
+ transfer->box.x + box->x + box->width);
+ } else {
+ unsigned level = transfer->level;
+ rsc->slices[level].initialized = true;
}
-
- /* Actually allocate the memory from kernel-space. Mapped, same_va, no
- * special flags */
-
- panfrost_drm_allocate_slab(screen, mem, slab_size / 4096, true, 0, 0, 0);
-
- return &mem->slab;
-}
-
-static bool
-panfrost_slab_can_reclaim(void *priv, struct pb_slab_entry *entry)
-{
- struct panfrost_memory_entry *p_entry = (struct panfrost_memory_entry *) entry;
- return p_entry->freed;
-}
-
-static void
-panfrost_slab_free(void *priv, struct pb_slab *slab)
-{
- struct panfrost_memory *mem = (struct panfrost_memory *) slab;
- struct panfrost_screen *screen = (struct panfrost_screen *) priv;
-
- panfrost_drm_free_slab(screen, mem);
- ralloc_free(mem);
}
static void
panfrost_invalidate_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
{
- //DBG("TODO %s\n", __func__);
+ /* TODO */
}
static enum pipe_format
-panfrost_resource_get_internal_format(struct pipe_resource *prsrc)
+panfrost_resource_get_internal_format(struct pipe_resource *rsrc)
{
- return prsrc->format;
+ struct panfrost_resource *prsrc = (struct panfrost_resource *) rsrc;
+ return prsrc->internal_format;
}
-static boolean
+static bool
panfrost_generate_mipmap(
- struct pipe_context *pctx,
- struct pipe_resource *prsrc,
- enum pipe_format format,
- unsigned base_level,
- unsigned last_level,
- unsigned first_layer,
- unsigned last_layer)
+ struct pipe_context *pctx,
+ struct pipe_resource *prsrc,
+ enum pipe_format format,
+ unsigned base_level,
+ unsigned last_level,
+ unsigned first_layer,
+ unsigned last_layer)
{
- struct panfrost_context *ctx = pan_context(pctx);
struct panfrost_resource *rsrc = pan_resource(prsrc);
/* Generating a mipmap invalidates the written levels, so make that
for (unsigned l = base_level + 1; l <= last_level; ++l)
rsrc->slices[l].initialized = false;
- /* Beyond that, we just delegate the hard stuff. We're careful to
- * include flushes on both ends to make sure the data is really valid.
- * We could be doing a lot better perf-wise, especially once we have
- * reorder-type optimizations in place. But for now prioritize
- * correctness. */
-
- struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
- bool has_draws = job->last_job.gpu;
-
- if (has_draws)
- panfrost_flush(pctx, NULL, PIPE_FLUSH_END_OF_FRAME);
-
- /* We've flushed the original buffer if needed, now trigger a blit */
+ /* Beyond that, we just delegate the hard stuff. */
bool blit_res = util_gen_mipmap(
- pctx, prsrc, format,
- base_level, last_level,
- first_layer, last_layer,
- PIPE_TEX_FILTER_LINEAR);
-
- /* If the blit was successful, flush once more. If it wasn't, well, let
- * the state tracker deal with it. */
-
- if (blit_res)
- panfrost_flush(pctx, NULL, PIPE_FLUSH_END_OF_FRAME);
+ pctx, prsrc, format,
+ base_level, last_level,
+ first_layer, last_layer,
+ PIPE_TEX_FILTER_LINEAR);
return blit_res;
}
mali_ptr
panfrost_get_texture_address(
- struct panfrost_resource *rsrc,
- unsigned level, unsigned face)
+ struct panfrost_resource *rsrc,
+ unsigned level, unsigned face, unsigned sample)
{
- unsigned level_offset = rsrc->slices[level].offset;
- unsigned face_offset = face * rsrc->cubemap_stride;
-
- return rsrc->bo->gpu + level_offset + face_offset;
+ bool is_3d = rsrc->base.target == PIPE_TEXTURE_3D;
+ return rsrc->bo->gpu + panfrost_texture_offset(rsrc->slices, is_3d, rsrc->cubemap_stride, level, face, sample);
}
static void
};
void
-panfrost_resource_screen_init(struct panfrost_screen *pscreen)
+panfrost_resource_screen_init(struct pipe_screen *pscreen)
{
- //pscreen->base.resource_create_with_modifiers =
- // panfrost_resource_create_with_modifiers;
- pscreen->base.resource_create = u_transfer_helper_resource_create;
- pscreen->base.resource_destroy = u_transfer_helper_resource_destroy;
- pscreen->base.resource_from_handle = panfrost_resource_from_handle;
- pscreen->base.resource_get_handle = panfrost_resource_get_handle;
- pscreen->base.transfer_helper = u_transfer_helper_create(&transfer_vtbl,
- true, false,
- true, true);
-
- pb_slabs_init(&pscreen->slabs,
- MIN_SLAB_ENTRY_SIZE,
- MAX_SLAB_ENTRY_SIZE,
-
- 3, /* Number of heaps */
-
- pscreen,
-
- panfrost_slab_can_reclaim,
- panfrost_slab_alloc,
- panfrost_slab_free);
-}
-
-void
-panfrost_resource_screen_deinit(struct panfrost_screen *pscreen)
-{
- pb_slabs_deinit(&pscreen->slabs);
+ struct panfrost_device *dev = pan_device(pscreen);
+
+ bool fake_rgtc = !panfrost_supports_compressed_format(dev, MALI_BC4_UNORM);
+
+ pscreen->resource_create_with_modifiers =
+ panfrost_resource_create_with_modifiers;
+ pscreen->resource_create = u_transfer_helper_resource_create;
+ pscreen->resource_destroy = u_transfer_helper_resource_destroy;
+ pscreen->resource_from_handle = panfrost_resource_from_handle;
+ pscreen->resource_get_handle = panfrost_resource_get_handle;
+ pscreen->transfer_helper = u_transfer_helper_create(&transfer_vtbl,
+ true, false,
+ fake_rgtc, true);
}
void
panfrost_resource_context_init(struct pipe_context *pctx)
{
pctx->transfer_map = u_transfer_helper_transfer_map;
- pctx->transfer_flush_region = u_transfer_helper_transfer_flush_region;
pctx->transfer_unmap = u_transfer_helper_transfer_unmap;
- pctx->buffer_subdata = u_default_buffer_subdata;
pctx->create_surface = panfrost_create_surface;
pctx->surface_destroy = panfrost_surface_destroy;
pctx->resource_copy_region = util_resource_copy_region;