#include <fcntl.h>
#include "drm-uapi/drm_fourcc.h"
-#include "state_tracker/winsys_handle.h"
-#include "util/u_format.h"
+#include "frontend/winsys_handle.h"
+#include "util/format/u_format.h"
#include "util/u_memory.h"
#include "util/u_surface.h"
#include "util/u_transfer.h"
#include "util/u_transfer_helper.h"
#include "util/u_gen_mipmap.h"
+#include "pan_bo.h"
#include "pan_context.h"
#include "pan_screen.h"
#include "pan_resource.h"
#include "pan_util.h"
#include "pan_tiling.h"
+#include "pandecode/decode.h"
+#include "panfrost-quirks.h"
+
+void
+panfrost_resource_reset_damage(struct panfrost_resource *pres)
+{
+ /* We set the damage extent to the full resource size but keep the
+ * damage box empty so that the FB content is reloaded by default.
+ */
+ memset(&pres->damage, 0, sizeof(pres->damage));
+ pres->damage.extent.maxx = pres->base.width0;
+ pres->damage.extent.maxy = pres->base.height0;
+}
static struct pipe_resource *
panfrost_resource_from_handle(struct pipe_screen *pscreen,
struct winsys_handle *whandle,
unsigned usage)
{
- struct panfrost_screen *screen = pan_screen(pscreen);
+ struct panfrost_device *dev = pan_device(pscreen);
struct panfrost_resource *rsc;
struct pipe_resource *prsc;
pipe_reference_init(&prsc->reference, 1);
prsc->screen = pscreen;
- rsc->bo = panfrost_drm_import_bo(screen, whandle);
- rsc->slices[0].stride = whandle->stride;
- rsc->slices[0].initialized = true;
+ rsc->bo = panfrost_bo_import(dev, whandle->handle);
+ rsc->internal_format = templat->format;
+ rsc->layout = MALI_TEXTURE_LINEAR;
+ rsc->slices[0].stride = whandle->stride;
+ rsc->slices[0].offset = whandle->offset;
+ rsc->slices[0].initialized = true;
+ panfrost_resource_reset_damage(rsc);
+
+ if (dev->quirks & IS_BIFROST &&
+ templat->bind & PIPE_BIND_RENDER_TARGET) {
+ unsigned size = panfrost_compute_checksum_size(
+ &rsc->slices[0], templat->width0, templat->height0);
+ rsc->slices[0].checksum_bo = panfrost_bo_create(dev, size, 0);
+ rsc->checksummed = true;
+ }
- if (screen->ro) {
- rsc->scanout =
- renderonly_create_gpu_import_for_resource(prsc, screen->ro, NULL);
- /* failure is expected in some cases.. */
- }
+ if (dev->ro) {
+ rsc->scanout =
+ renderonly_create_gpu_import_for_resource(prsc, dev->ro, NULL);
+ /* failure is expected in some cases.. */
+ }
return prsc;
}
-static boolean
+static bool
panfrost_resource_get_handle(struct pipe_screen *pscreen,
struct pipe_context *ctx,
struct pipe_resource *pt,
struct winsys_handle *handle,
unsigned usage)
{
- struct panfrost_screen *screen = pan_screen(pscreen);
+ struct panfrost_device *dev = pan_device(pscreen);
struct panfrost_resource *rsrc = (struct panfrost_resource *) pt;
struct renderonly_scanout *scanout = rsrc->scanout;
handle->modifier = DRM_FORMAT_MOD_INVALID;
- if (handle->type == WINSYS_HANDLE_TYPE_SHARED) {
- return FALSE;
- } else if (handle->type == WINSYS_HANDLE_TYPE_KMS) {
- if (renderonly_get_handle(scanout, handle))
- return TRUE;
-
- handle->handle = rsrc->bo->gem_handle;
- handle->stride = rsrc->slices[0].stride;
- return TRUE;
- } else if (handle->type == WINSYS_HANDLE_TYPE_FD) {
+ if (handle->type == WINSYS_HANDLE_TYPE_SHARED) {
+ return false;
+ } else if (handle->type == WINSYS_HANDLE_TYPE_KMS) {
+ if (renderonly_get_handle(scanout, handle))
+ return true;
+
+ handle->handle = rsrc->bo->gem_handle;
+ handle->stride = rsrc->slices[0].stride;
+ handle->offset = rsrc->slices[0].offset;
+ return TRUE;
+ } else if (handle->type == WINSYS_HANDLE_TYPE_FD) {
if (scanout) {
struct drm_prime_handle args = {
.handle = scanout->handle,
.flags = DRM_CLOEXEC,
};
- int ret = drmIoctl(screen->ro->kms_fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
+ int ret = drmIoctl(dev->ro->kms_fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
if (ret == -1)
- return FALSE;
+ return false;
handle->stride = scanout->stride;
handle->handle = args.fd;
- return TRUE;
- } else
- return panfrost_drm_export_bo(screen, rsrc->bo->gem_handle,
- rsrc->slices[0].stride,
- handle);
- }
+ return true;
+ } else {
+ int fd = panfrost_bo_export(rsrc->bo);
+
+ if (fd < 0)
+ return false;
- return FALSE;
+ handle->handle = fd;
+ handle->stride = rsrc->slices[0].stride;
+ handle->offset = rsrc->slices[0].offset;
+ return true;
+ }
+ }
+
+ return false;
}
static void
panfrost_flush_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
{
- //DBG("TODO %s\n", __func__);
+ /* TODO */
}
static struct pipe_surface *
panfrost_create_scanout_res(struct pipe_screen *screen,
const struct pipe_resource *template)
{
- struct panfrost_screen *pscreen = pan_screen(screen);
+ struct panfrost_device *dev = pan_device(screen);
struct pipe_resource scanout_templat = *template;
struct renderonly_scanout *scanout;
struct winsys_handle handle;
struct pipe_resource *res;
scanout = renderonly_scanout_for_resource(&scanout_templat,
- pscreen->ro, &handle);
+ dev->ro, &handle);
if (!scanout)
return NULL;
struct panfrost_resource *pres = pan_resource(res);
pres->scanout = scanout;
- pscreen->display_target = pres;
return res;
}
-/* Computes sizes for checksumming, which is 8 bytes per 16x16 tile */
-
-#define CHECKSUM_TILE_WIDTH 16
-#define CHECKSUM_TILE_HEIGHT 16
-#define CHECKSUM_BYTES_PER_TILE 8
-
-static unsigned
-panfrost_compute_checksum_sizes(
- struct panfrost_slice *slice,
- unsigned width,
- unsigned height)
-{
- unsigned aligned_width = ALIGN(width, CHECKSUM_TILE_WIDTH);
- unsigned aligned_height = ALIGN(height, CHECKSUM_TILE_HEIGHT);
-
- unsigned tile_count_x = aligned_width / CHECKSUM_TILE_WIDTH;
- unsigned tile_count_y = aligned_height / CHECKSUM_TILE_HEIGHT;
-
- slice->checksum_stride = tile_count_x * CHECKSUM_BYTES_PER_TILE;
-
- return slice->checksum_stride * tile_count_y;
-}
-
/* Setup the mip tree given a particular layout, possibly with checksumming */
static void
unsigned width = res->width0;
unsigned height = res->height0;
unsigned depth = res->depth0;
- unsigned bytes_per_pixel = util_format_get_blocksize(res->format);
+ unsigned bytes_per_pixel = util_format_get_blocksize(pres->internal_format);
+
+ /* MSAA is implemented as a 3D texture with z corresponding to the
+ * sample #, horrifyingly enough */
+
+ bool msaa = res->nr_samples > 1;
+
+ if (msaa) {
+ assert(depth == 1);
+ depth = res->nr_samples;
+ }
assert(depth > 0);
* makes code a lot simpler */
bool renderable = res->bind &
- (PIPE_BIND_RENDER_TARGET | PIPE_BIND_DEPTH_STENCIL);
- bool afbc = pres->layout == PAN_AFBC;
- bool tiled = pres->layout == PAN_TILED;
+ (PIPE_BIND_RENDER_TARGET | PIPE_BIND_DEPTH_STENCIL) &&
+ res->target != PIPE_BUFFER;
+ bool afbc = pres->layout == MALI_TEXTURE_AFBC;
+ bool tiled = pres->layout == MALI_TEXTURE_TILED;
bool should_align = renderable || tiled;
/* We don't know how to specify a 2D stride for 3D textures */
unsigned effective_depth = depth;
if (should_align) {
- effective_width = ALIGN(effective_width, 16);
- effective_height = ALIGN(effective_height, 16);
+ effective_width = ALIGN_POT(effective_width, 16);
+ effective_height = ALIGN_POT(effective_height, 16);
/* We don't need to align depth */
}
+ /* Align levels to cache-line as a performance improvement for
+ * linear/tiled and as a requirement for AFBC */
+
+ offset = ALIGN_POT(offset, 64);
+
slice->offset = offset;
/* Compute the would-be stride */
unsigned stride = bytes_per_pixel * effective_width;
+ if (util_format_is_compressed(pres->internal_format))
+ stride /= 4;
+
/* ..but cache-line align it for performance */
- if (can_align_stride && pres->layout == PAN_LINEAR)
- stride = ALIGN(stride, 64);
+ if (can_align_stride && pres->layout == MALI_TEXTURE_LINEAR)
+ stride = ALIGN_POT(stride, 64);
slice->stride = stride;
unsigned slice_one_size = slice->stride * effective_height;
unsigned slice_full_size = slice_one_size * effective_depth;
+ slice->size0 = slice_one_size;
+
/* Report 2D size for 3D texturing */
if (l == 0)
if (pres->checksummed) {
slice->checksum_offset = offset;
- unsigned size = panfrost_compute_checksum_sizes(
- slice, width, height);
+ unsigned size = panfrost_compute_checksum_size(
+ slice, width, height);
offset += size;
}
width = u_minify(width, 1);
height = u_minify(height, 1);
- depth = u_minify(depth, 1);
+
+ /* Don't mipmap the sample count */
+ if (!msaa)
+ depth = u_minify(depth, 1);
}
assert(res->array_size);
if (res->target != PIPE_TEXTURE_3D) {
/* Arrays and cubemaps have the entire miptree duplicated */
- pres->cubemap_stride = ALIGN(offset, 64);
- *bo_size = ALIGN(pres->cubemap_stride * res->array_size, 4096);
+ pres->cubemap_stride = ALIGN_POT(offset, 64);
+ *bo_size = ALIGN_POT(pres->cubemap_stride * res->array_size, 4096);
} else {
/* 3D strides across the 2D layers */
assert(res->array_size == 1);
pres->cubemap_stride = size_2d;
- *bo_size = ALIGN(offset, 4096);
+ *bo_size = ALIGN_POT(offset, 4096);
}
}
static void
-panfrost_resource_create_bo(struct panfrost_screen *screen, struct panfrost_resource *pres)
+panfrost_resource_create_bo(struct panfrost_device *dev, struct panfrost_resource *pres)
{
- struct pipe_resource *res = &pres->base;
+ struct pipe_resource *res = &pres->base;
/* Based on the usage, figure out what storing will be used. There are
* various tradeoffs:
*
* Tiled: Not compressed, but cache-optimized. Expensive to write into
* (due to software tiling), but cheap to sample from. Ideal for most
- * textures.
+ * textures.
*
* AFBC: Compressed and renderable (so always desirable for non-scanout
* rendertargets). Cheap to sample from. The format is black box, so we
* can't read/write from software.
- */
-
- /* Tiling textures is almost always faster, unless we only use it once */
-
- bool is_texture = (res->bind & PIPE_BIND_SAMPLER_VIEW);
- bool is_2d = res->depth0 == 1 && res->array_size == 1;
- bool is_streaming = (res->usage != PIPE_USAGE_STREAM);
-
- bool should_tile = is_streaming && is_texture && is_2d;
-
- /* Depth/stencil can't be tiled, only linear or AFBC */
- should_tile &= !(res->bind & PIPE_BIND_DEPTH_STENCIL);
+ *
+ * Tiling textures is almost always faster, unless we only use it once.
+ * Only a few types of resources can be tiled, ensure the bind is only
+ * (a combination of) one of the following */
+
+ const unsigned valid_binding =
+ PIPE_BIND_DEPTH_STENCIL |
+ PIPE_BIND_RENDER_TARGET |
+ PIPE_BIND_BLENDABLE |
+ PIPE_BIND_SAMPLER_VIEW |
+ PIPE_BIND_DISPLAY_TARGET;
+
+ unsigned bpp = util_format_get_blocksizebits(pres->internal_format);
+ bool is_2d = (res->target == PIPE_TEXTURE_2D) || (res->target == PIPE_TEXTURE_RECT);
+ bool is_sane_bpp = bpp == 8 || bpp == 16 || bpp == 24 || bpp == 32 || bpp == 64 || bpp == 128;
+ bool should_tile = (res->usage != PIPE_USAGE_STREAM);
+ bool must_tile = (res->bind & PIPE_BIND_DEPTH_STENCIL) &&
+ (dev->quirks & (MIDGARD_SFBD | IS_BIFROST));
+ bool can_tile = is_2d && is_sane_bpp && ((res->bind & ~valid_binding) == 0);
/* FBOs we would like to checksum, if at all possible */
- bool can_checksum = !(res->bind & (PIPE_BIND_SCANOUT | PIPE_BIND_SHARED));
+ bool can_checksum = !(res->bind & ~valid_binding);
bool should_checksum = res->bind & PIPE_BIND_RENDER_TARGET;
pres->checksummed = can_checksum && should_checksum;
/* Set the layout appropriately */
- pres->layout = should_tile ? PAN_TILED : PAN_LINEAR;
+ assert(!(must_tile && !can_tile)); /* must_tile => can_tile */
+ pres->layout = ((can_tile && should_tile) || must_tile) ? MALI_TEXTURE_TILED : MALI_TEXTURE_LINEAR;
+ pres->layout_constant = must_tile || !can_tile;
size_t bo_size;
panfrost_setup_slices(pres, &bo_size);
- struct panfrost_memory mem;
- struct panfrost_bo *bo = rzalloc(screen, struct panfrost_bo);
+ /* We create a BO immediately but don't bother mapping, since we don't
+ * care to map e.g. FBOs which the CPU probably won't touch */
+ pres->bo = panfrost_bo_create(dev, bo_size, PAN_BO_DELAY_MMAP);
+}
+
+void
+panfrost_resource_set_damage_region(struct pipe_screen *screen,
+ struct pipe_resource *res,
+ unsigned int nrects,
+ const struct pipe_box *rects)
+{
+ struct panfrost_resource *pres = pan_resource(res);
+ struct pipe_box *damage_rect = &pres->damage.biggest_rect;
+ struct pipe_scissor_state *damage_extent = &pres->damage.extent;
+ unsigned int i;
- pipe_reference_init(&bo->reference, 1);
- panfrost_drm_allocate_slab(screen, &mem, bo_size / 4096, true, 0, 0, 0);
+ if (!nrects) {
+ panfrost_resource_reset_damage(pres);
+ return;
+ }
+
+ /* We keep track of 2 different things here:
+ * 1 the damage extent: the quad including all damage regions. Will be
+ * used restrict the rendering area
+ * 2 the biggest damage rectangle: when there are more than one damage
+ * rect we keep the biggest one and will generate 4 wallpaper quads
+ * out of it (see panfrost_draw_wallpaper() for more details). We
+ * might want to do something smarter at some point.
+ *
+ * _________________________________
+ * | |
+ * | _________________________ |
+ * | | rect1| _________| |
+ * | |______|_____ | rect 3: | |
+ * | | | rect2 | | biggest | |
+ * | | |_______| | rect | |
+ * | |_______________|_________| |
+ * | damage extent |
+ * |_______________________________|
+ * resource
+ */
+ memset(&pres->damage, 0, sizeof(pres->damage));
+ damage_extent->minx = 0xffff;
+ damage_extent->miny = 0xffff;
+ for (i = 0; i < nrects; i++) {
+ int x = rects[i].x, w = rects[i].width, h = rects[i].height;
+ int y = res->height0 - (rects[i].y + h);
+
+ /* Clamp x,y,w,h to prevent negative values. */
+ if (x < 0) {
+ h += x;
+ x = 0;
+ }
+ if (y < 0) {
+ w += y;
+ y = 0;
+ }
+ w = MAX2(w, 0);
+ h = MAX2(h, 0);
+
+ if (damage_rect->width * damage_rect->height < w * h)
+ u_box_2d(x, y, w, h, damage_rect);
+
+ damage_extent->minx = MIN2(damage_extent->minx, x);
+ damage_extent->miny = MIN2(damage_extent->miny, y);
+ damage_extent->maxx = MAX2(damage_extent->maxx,
+ MIN2(x + w, res->width0));
+ damage_extent->maxy = MAX2(damage_extent->maxy,
+ MIN2(y + h, res->height0));
+ }
+
+ if (nrects == 0) {
+ damage_extent->minx = 0;
+ damage_extent->miny = 0;
+ damage_extent->maxx = res->width0;
+ damage_extent->maxy = res->height0;
+ }
- bo->cpu = mem.cpu;
- bo->gpu = mem.gpu;
- bo->gem_handle = mem.gem_handle;
- bo->size = bo_size;
- pres->bo = bo;
}
static struct pipe_resource *
panfrost_resource_create(struct pipe_screen *screen,
const struct pipe_resource *template)
{
+ struct panfrost_device *dev = pan_device(screen);
+
/* Make sure we're familiar */
switch (template->target) {
- case PIPE_BUFFER:
- case PIPE_TEXTURE_1D:
- case PIPE_TEXTURE_2D:
- case PIPE_TEXTURE_3D:
- case PIPE_TEXTURE_CUBE:
- case PIPE_TEXTURE_RECT:
- case PIPE_TEXTURE_2D_ARRAY:
- break;
- default:
- DBG("Unknown texture target %d\n", template->target);
- assert(0);
+ case PIPE_BUFFER:
+ case PIPE_TEXTURE_1D:
+ case PIPE_TEXTURE_2D:
+ case PIPE_TEXTURE_3D:
+ case PIPE_TEXTURE_CUBE:
+ case PIPE_TEXTURE_RECT:
+ case PIPE_TEXTURE_2D_ARRAY:
+ break;
+ default:
+ unreachable("Unknown texture target\n");
}
- if (template->bind &
- (PIPE_BIND_DISPLAY_TARGET | PIPE_BIND_SCANOUT | PIPE_BIND_SHARED))
+ if (dev->ro && (template->bind &
+ (PIPE_BIND_DISPLAY_TARGET | PIPE_BIND_SCANOUT | PIPE_BIND_SHARED)))
return panfrost_create_scanout_res(screen, template);
struct panfrost_resource *so = rzalloc(screen, struct panfrost_resource);
- struct panfrost_screen *pscreen = (struct panfrost_screen *) screen;
-
so->base = *template;
so->base.screen = screen;
+ so->internal_format = template->format;
pipe_reference_init(&so->base.reference, 1);
util_range_init(&so->valid_buffer_range);
- panfrost_resource_create_bo(pscreen, so);
- return (struct pipe_resource *)so;
-}
-
-static void
-panfrost_destroy_bo(struct panfrost_screen *screen, struct panfrost_bo *bo)
-{
- struct panfrost_memory mem = {
- .cpu = bo->cpu,
- .gpu = bo->gpu,
- .size = bo->size,
- .gem_handle = bo->gem_handle,
- };
-
- panfrost_drm_free_slab(screen, &mem);
- ralloc_free(bo);
-}
-
-void
-panfrost_bo_reference(struct panfrost_bo *bo)
-{
- pipe_reference(NULL, &bo->reference);
-}
+ panfrost_resource_create_bo(dev, so);
+ panfrost_resource_reset_damage(so);
-void
-panfrost_bo_unreference(struct pipe_screen *screen, struct panfrost_bo *bo)
-{
- /* When the reference count goes to zero, we need to cleanup */
+ if (template->bind & PIPE_BIND_INDEX_BUFFER)
+ so->index_cache = rzalloc(so, struct panfrost_minmax_cache);
- if (pipe_reference(&bo->reference, NULL)) {
- panfrost_destroy_bo(pan_screen(screen), bo);
- }
+ return (struct pipe_resource *)so;
}
static void
panfrost_resource_destroy(struct pipe_screen *screen,
struct pipe_resource *pt)
{
- struct panfrost_screen *pscreen = pan_screen(screen);
+ struct panfrost_device *dev = pan_device(screen);
struct panfrost_resource *rsrc = (struct panfrost_resource *) pt;
- if (rsrc->scanout)
- renderonly_scanout_destroy(rsrc->scanout, pscreen->ro);
+ if (rsrc->scanout)
+ renderonly_scanout_destroy(rsrc->scanout, dev->ro);
+
+ if (rsrc->bo)
+ panfrost_bo_unreference(rsrc->bo);
- if (rsrc->bo)
- panfrost_bo_unreference(screen, rsrc->bo);
+ if (rsrc->slices[0].checksum_bo)
+ panfrost_bo_unreference(rsrc->slices[0].checksum_bo);
util_range_destroy(&rsrc->valid_buffer_range);
- ralloc_free(rsrc);
+ ralloc_free(rsrc);
}
+
static void *
panfrost_transfer_map(struct pipe_context *pctx,
struct pipe_resource *resource,
const struct pipe_box *box,
struct pipe_transfer **out_transfer)
{
- int bytes_per_pixel = util_format_get_blocksize(resource->format);
+ struct panfrost_context *ctx = pan_context(pctx);
+ struct panfrost_device *dev = pan_device(pctx->screen);
struct panfrost_resource *rsrc = pan_resource(resource);
+ int bytes_per_pixel = util_format_get_blocksize(rsrc->internal_format);
struct panfrost_bo *bo = rsrc->bo;
struct panfrost_gtransfer *transfer = rzalloc(pctx, struct panfrost_gtransfer);
*out_transfer = &transfer->base;
- /* Check if we're bound for rendering and this is a read pixels. If so,
- * we need to flush */
+ /* If we haven't already mmaped, now's the time */
+ panfrost_bo_mmap(bo);
- struct panfrost_context *ctx = pan_context(pctx);
- struct pipe_framebuffer_state *fb = &ctx->pipe_framebuffer;
+ if (dev->debug & (PAN_DBG_TRACE | PAN_DBG_SYNC))
+ pandecode_inject_mmap(bo->gpu, bo->cpu, bo->size, NULL);
- bool is_bound = false;
+ bool create_new_bo = usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
+ bool copy_resource = false;
- for (unsigned c = 0; c < fb->nr_cbufs; ++c) {
- is_bound |= fb->cbufs[c]->texture == resource;
- }
+ if (!create_new_bo &&
+ !(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
+ (usage & PIPE_TRANSFER_WRITE) &&
+ !(resource->target == PIPE_BUFFER
+ && !util_ranges_intersect(&rsrc->valid_buffer_range, box->x, box->x + box->width)) &&
+ panfrost_pending_batches_access_bo(ctx, bo)) {
- if (is_bound && (usage & PIPE_TRANSFER_READ)) {
- assert(level == 0);
- panfrost_flush(pctx, NULL, PIPE_FLUSH_END_OF_FRAME);
- }
+ /* When a resource to be modified is already being used by a
+ * pending batch, it is often faster to copy the whole BO than
+ * to flush and split the frame in two. This also mostly
+ * mitigates broken depth reload.
+ */
+
+ panfrost_flush_batches_accessing_bo(ctx, bo, PAN_BO_ACCESS_WRITE);
+ panfrost_bo_wait(bo, INT64_MAX, PAN_BO_ACCESS_WRITE);
- /* TODO: Respect usage flags */
+ create_new_bo = true;
+ copy_resource = true;
+ }
- if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
- /* TODO: reallocate */
- //printf("debug: Missed reallocate\n");
+ if (create_new_bo) {
+ /* If the BO is used by one of the pending batches or if it's
+ * not ready yet (still accessed by one of the already flushed
+ * batches), we try to allocate a new one to avoid waiting.
+ */
+ if (panfrost_pending_batches_access_bo(ctx, bo) ||
+ !panfrost_bo_wait(bo, 0, PAN_BO_ACCESS_RW)) {
+ /* We want the BO to be MMAPed. */
+ uint32_t flags = bo->flags & ~PAN_BO_DELAY_MMAP;
+ struct panfrost_bo *newbo = NULL;
+
+ /* When the BO has been imported/exported, we can't
+ * replace it by another one, otherwise the
+ * importer/exporter wouldn't see the change we're
+ * doing to it.
+ */
+ if (!(bo->flags & PAN_BO_SHARED))
+ newbo = panfrost_bo_create(dev, bo->size,
+ flags);
+
+ if (newbo) {
+ if (copy_resource)
+ memcpy(newbo->cpu, rsrc->bo->cpu, bo->size);
+
+ panfrost_bo_unreference(bo);
+ rsrc->bo = newbo;
+ bo = newbo;
+ } else {
+ /* Allocation failed or was impossible, let's
+ * fall back on a flush+wait.
+ */
+ panfrost_flush_batches_accessing_bo(ctx, bo, true);
+ panfrost_bo_wait(bo, INT64_MAX, true);
+ }
+ }
} else if ((usage & PIPE_TRANSFER_WRITE)
- && resource->target == PIPE_BUFFER
- && !util_ranges_intersect(&rsrc->valid_buffer_range, box->x, box->x + box->width)) {
+ && resource->target == PIPE_BUFFER
+ && !util_ranges_intersect(&rsrc->valid_buffer_range, box->x, box->x + box->width)) {
/* No flush for writes to uninitialized */
} else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
if (usage & PIPE_TRANSFER_WRITE) {
- /* STUB: flush reading */
- //printf("debug: missed reading flush %d\n", resource->target);
+ panfrost_flush_batches_accessing_bo(ctx, bo, true);
+ panfrost_bo_wait(bo, INT64_MAX, PAN_BO_ACCESS_RW);
} else if (usage & PIPE_TRANSFER_READ) {
- /* STUB: flush writing */
- //printf("debug: missed writing flush %d (%d-%d)\n", resource->target, box->x, box->x + box->width);
- } else {
- /* Why are you even mapping?! */
+ panfrost_flush_batches_accessing_bo(ctx, bo, false);
+ panfrost_bo_wait(bo, INT64_MAX, PAN_BO_ACCESS_WRITE);
}
}
- if (rsrc->layout != PAN_LINEAR) {
+ if (rsrc->layout != MALI_TEXTURE_LINEAR) {
/* Non-linear resources need to be indirectly mapped */
if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
transfer->base.stride = box->width * bytes_per_pixel;
transfer->base.layer_stride = transfer->base.stride * box->height;
- transfer->map = rzalloc_size(transfer, transfer->base.layer_stride * box->depth);
+ transfer->map = ralloc_size(transfer, transfer->base.layer_stride * box->depth);
assert(box->depth == 1);
if ((usage & PIPE_TRANSFER_READ) && rsrc->slices[level].initialized) {
- if (rsrc->layout == PAN_AFBC) {
- DBG("Unimplemented: reads from AFBC");
- } else if (rsrc->layout == PAN_TILED) {
+ if (rsrc->layout == MALI_TEXTURE_AFBC) {
+ unreachable("Unimplemented: reads from AFBC");
+ } else if (rsrc->layout == MALI_TEXTURE_TILED) {
panfrost_load_tiled_image(
- transfer->map,
- bo->cpu + rsrc->slices[level].offset,
- box,
- transfer->base.stride,
- rsrc->slices[level].stride,
- util_format_get_blocksize(resource->format));
+ transfer->map,
+ bo->cpu + rsrc->slices[level].offset,
+ box->x, box->y, box->width, box->height,
+ transfer->base.stride,
+ rsrc->slices[level].stride,
+ rsrc->internal_format);
}
}
return transfer->map;
} else {
+ /* Direct, persistent writes create holes in time for
+ * caching... I don't know if this is actually possible but we
+ * should still get it right */
+
+ unsigned dpw = PIPE_TRANSFER_MAP_DIRECTLY | PIPE_TRANSFER_WRITE | PIPE_TRANSFER_PERSISTENT;
+
+ if ((usage & dpw) == dpw && rsrc->index_cache)
+ return NULL;
+
transfer->base.stride = rsrc->slices[level].stride;
- transfer->base.layer_stride = rsrc->cubemap_stride;
+ transfer->base.layer_stride = panfrost_get_layer_stride(
+ rsrc->slices, rsrc->base.target == PIPE_TEXTURE_3D,
+ rsrc->cubemap_stride, level);
/* By mapping direct-write, we're implicitly already
* initialized (maybe), so be conservative */
- if ((usage & PIPE_TRANSFER_WRITE) && (usage & PIPE_TRANSFER_MAP_DIRECTLY))
+ if ((usage & PIPE_TRANSFER_WRITE) && (usage & PIPE_TRANSFER_MAP_DIRECTLY)) {
rsrc->slices[level].initialized = true;
+ panfrost_minmax_cache_invalidate(rsrc->index_cache, &transfer->base);
+ }
return bo->cpu
- + rsrc->slices[level].offset
- + transfer->base.box.z * rsrc->cubemap_stride
- + transfer->base.box.y * rsrc->slices[level].stride
- + transfer->base.box.x * bytes_per_pixel;
+ + rsrc->slices[level].offset
+ + transfer->base.box.z * transfer->base.layer_stride
+ + transfer->base.box.y * rsrc->slices[level].stride
+ + transfer->base.box.x * bytes_per_pixel;
}
}
struct panfrost_gtransfer *trans = pan_transfer(transfer);
struct panfrost_resource *prsrc = (struct panfrost_resource *) transfer->resource;
+ /* Mark whatever we wrote as written */
+ if (transfer->usage & PIPE_TRANSFER_WRITE)
+ prsrc->slices[transfer->level].initialized = true;
+
if (trans->map) {
struct panfrost_bo *bo = prsrc->bo;
if (transfer->usage & PIPE_TRANSFER_WRITE) {
- unsigned level = transfer->level;
- prsrc->slices[level].initialized = true;
-
- if (prsrc->layout == PAN_AFBC) {
- DBG("Unimplemented: writes to AFBC\n");
- } else if (prsrc->layout == PAN_TILED) {
+ if (prsrc->layout == MALI_TEXTURE_AFBC) {
+ unreachable("Unimplemented: writes to AFBC\n");
+ } else if (prsrc->layout == MALI_TEXTURE_TILED) {
assert(transfer->box.depth == 1);
- panfrost_store_tiled_image(
- bo->cpu + prsrc->slices[level].offset,
+ /* Do we overwrite the entire resource? If so,
+ * we don't need an intermediate blit so it's a
+ * good time to switch the layout. */
+
+ bool discards_content = prsrc->base.last_level == 0
+ && transfer->box.width == prsrc->base.width0
+ && transfer->box.height == prsrc->base.height0
+ && transfer->box.x == 0
+ && transfer->box.y == 0
+ && !prsrc->layout_constant;
+
+ /* It also serves as a good heuristic for
+ * streaming textures (e.g. in video players),
+ * but we could do better */
+
+ if (discards_content)
+ ++prsrc->layout_updates;
+
+ if (prsrc->layout_updates >= LAYOUT_CONVERT_THRESHOLD)
+ {
+ prsrc->layout = MALI_TEXTURE_LINEAR;
+
+ util_copy_rect(
+ bo->cpu + prsrc->slices[0].offset,
+ prsrc->base.format,
+ prsrc->slices[0].stride,
+ 0, 0,
+ transfer->box.width,
+ transfer->box.height,
+ trans->map,
+ transfer->stride,
+ 0, 0);
+ } else {
+ panfrost_store_tiled_image(
+ bo->cpu + prsrc->slices[transfer->level].offset,
trans->map,
- &transfer->box,
- prsrc->slices[level].stride,
+ transfer->box.x, transfer->box.y,
+ transfer->box.width, transfer->box.height,
+ prsrc->slices[transfer->level].stride,
transfer->stride,
- util_format_get_blocksize(prsrc->base.format));
+ prsrc->internal_format);
+ }
}
}
}
- util_range_add(&prsrc->valid_buffer_range,
- transfer->box.x,
- transfer->box.x + transfer->box.width);
+ util_range_add(&prsrc->base, &prsrc->valid_buffer_range,
+ transfer->box.x,
+ transfer->box.x + transfer->box.width);
+
+ panfrost_minmax_cache_invalidate(prsrc->index_cache, transfer);
/* Derefence the resource */
pipe_resource_reference(&transfer->resource, NULL);
static void
panfrost_transfer_flush_region(struct pipe_context *pctx,
- struct pipe_transfer *transfer,
- const struct pipe_box *box)
-{
- struct panfrost_resource *rsc = pan_resource(transfer->resource);
-
- if (transfer->resource->target == PIPE_BUFFER) {
- util_range_add(&rsc->valid_buffer_range,
- transfer->box.x + box->x,
- transfer->box.x + box->x + box->width);
- }
-}
-
-static struct pb_slab *
-panfrost_slab_alloc(void *priv, unsigned heap, unsigned entry_size, unsigned group_index)
+ struct pipe_transfer *transfer,
+ const struct pipe_box *box)
{
- struct panfrost_screen *screen = (struct panfrost_screen *) priv;
- struct panfrost_memory *mem = rzalloc(screen, struct panfrost_memory);
-
- size_t slab_size = (1 << (MAX_SLAB_ENTRY_SIZE + 1));
-
- mem->slab.num_entries = slab_size / entry_size;
- mem->slab.num_free = mem->slab.num_entries;
-
- LIST_INITHEAD(&mem->slab.free);
- for (unsigned i = 0; i < mem->slab.num_entries; ++i) {
- /* Create a slab entry */
- struct panfrost_memory_entry *entry = rzalloc(mem, struct panfrost_memory_entry);
- entry->offset = entry_size * i;
+ struct panfrost_resource *rsc = pan_resource(transfer->resource);
- entry->base.slab = &mem->slab;
- entry->base.group_index = group_index;
-
- LIST_ADDTAIL(&entry->base.head, &mem->slab.free);
+ if (transfer->resource->target == PIPE_BUFFER) {
+ util_range_add(&rsc->base, &rsc->valid_buffer_range,
+ transfer->box.x + box->x,
+ transfer->box.x + box->x + box->width);
+ } else {
+ unsigned level = transfer->level;
+ rsc->slices[level].initialized = true;
}
-
- /* Actually allocate the memory from kernel-space. Mapped, same_va, no
- * special flags */
-
- panfrost_drm_allocate_slab(screen, mem, slab_size / 4096, true, 0, 0, 0);
-
- return &mem->slab;
-}
-
-static bool
-panfrost_slab_can_reclaim(void *priv, struct pb_slab_entry *entry)
-{
- struct panfrost_memory_entry *p_entry = (struct panfrost_memory_entry *) entry;
- return p_entry->freed;
-}
-
-static void
-panfrost_slab_free(void *priv, struct pb_slab *slab)
-{
- struct panfrost_memory *mem = (struct panfrost_memory *) slab;
- struct panfrost_screen *screen = (struct panfrost_screen *) priv;
-
- panfrost_drm_free_slab(screen, mem);
- ralloc_free(mem);
}
static void
panfrost_invalidate_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
{
- //DBG("TODO %s\n", __func__);
+ /* TODO */
}
static enum pipe_format
-panfrost_resource_get_internal_format(struct pipe_resource *prsrc)
+panfrost_resource_get_internal_format(struct pipe_resource *rsrc)
{
- return prsrc->format;
+ struct panfrost_resource *prsrc = (struct panfrost_resource *) rsrc;
+ return prsrc->internal_format;
}
-static boolean
+static bool
panfrost_generate_mipmap(
- struct pipe_context *pctx,
- struct pipe_resource *prsrc,
- enum pipe_format format,
- unsigned base_level,
- unsigned last_level,
- unsigned first_layer,
- unsigned last_layer)
+ struct pipe_context *pctx,
+ struct pipe_resource *prsrc,
+ enum pipe_format format,
+ unsigned base_level,
+ unsigned last_level,
+ unsigned first_layer,
+ unsigned last_layer)
{
- struct panfrost_context *ctx = pan_context(pctx);
struct panfrost_resource *rsrc = pan_resource(prsrc);
/* Generating a mipmap invalidates the written levels, so make that
for (unsigned l = base_level + 1; l <= last_level; ++l)
rsrc->slices[l].initialized = false;
- /* Beyond that, we just delegate the hard stuff. We're careful to
- * include flushes on both ends to make sure the data is really valid.
- * We could be doing a lot better perf-wise, especially once we have
- * reorder-type optimizations in place. But for now prioritize
- * correctness. */
-
- struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
- bool has_draws = job->last_job.gpu;
-
- if (has_draws)
- panfrost_flush(pctx, NULL, PIPE_FLUSH_END_OF_FRAME);
-
- /* We've flushed the original buffer if needed, now trigger a blit */
+ /* Beyond that, we just delegate the hard stuff. */
bool blit_res = util_gen_mipmap(
- pctx, prsrc, format,
- base_level, last_level,
- first_layer, last_layer,
- PIPE_TEX_FILTER_LINEAR);
-
- /* If the blit was successful, flush once more. If it wasn't, well, let
- * the state tracker deal with it. */
-
- if (blit_res)
- panfrost_flush(pctx, NULL, PIPE_FLUSH_END_OF_FRAME);
+ pctx, prsrc, format,
+ base_level, last_level,
+ first_layer, last_layer,
+ PIPE_TEX_FILTER_LINEAR);
return blit_res;
}
mali_ptr
panfrost_get_texture_address(
+ struct panfrost_resource *rsrc,
+ unsigned level, unsigned face, unsigned sample)
+{
+ bool is_3d = rsrc->base.target == PIPE_TEXTURE_3D;
+ return rsrc->bo->gpu + panfrost_texture_offset(rsrc->slices, is_3d, rsrc->cubemap_stride, level, face, sample);
+}
+
+/* Given a resource that has already been allocated, hint that it should use a
+ * given layout. These are suggestions, not commands; it is perfectly legal to
+ * stub out this function, but there will be performance implications. */
+
+void
+panfrost_resource_hint_layout(
+ struct panfrost_device *dev,
struct panfrost_resource *rsrc,
- unsigned level, unsigned face)
+ enum mali_texture_layout layout,
+ signed weight)
{
- unsigned level_offset = rsrc->slices[level].offset;
- unsigned face_offset = face * rsrc->cubemap_stride;
+ /* Nothing to do, although a sophisticated implementation might store
+ * the hint */
+
+ if (rsrc->layout == layout)
+ return;
+
+ /* We don't use the weight yet, but we should check that it's positive
+ * (semantically meaning that we should choose the given `layout`) */
+
+ if (weight <= 0)
+ return;
+
+ /* Check if the preferred layout is legal for this buffer */
+
+ if (layout == MALI_TEXTURE_AFBC) {
+ bool can_afbc = panfrost_format_supports_afbc(rsrc->internal_format);
+ bool is_scanout = rsrc->base.bind &
+ (PIPE_BIND_DISPLAY_TARGET | PIPE_BIND_SCANOUT | PIPE_BIND_SHARED);
+
+ if (!can_afbc || is_scanout)
+ return;
+ }
- return rsrc->bo->gpu + level_offset + face_offset;
+ /* Simple heuristic so far: if the resource is uninitialized, switch to
+ * the hinted layout. If it is initialized, keep the original layout.
+ * This misses some cases where it would be beneficial to switch and
+ * blit. */
+
+ bool is_initialized = false;
+
+ for (unsigned i = 0; i < MAX_MIP_LEVELS; ++i)
+ is_initialized |= rsrc->slices[i].initialized;
+
+ if (is_initialized)
+ return;
+
+ /* We're uninitialized, so do a layout switch. Reinitialize slices. */
+
+ size_t new_size;
+ rsrc->layout = layout;
+ panfrost_setup_slices(rsrc, &new_size);
+
+ /* If we grew in size, reallocate the BO */
+ if (new_size > rsrc->bo->size) {
+ panfrost_bo_unreference(rsrc->bo);
+ rsrc->bo = panfrost_bo_create(dev, new_size, PAN_BO_DELAY_MMAP);
+ }
+
+ /* TODO: If there are textures bound, regenerate their descriptors */
}
static void
};
void
-panfrost_resource_screen_init(struct panfrost_screen *pscreen)
+panfrost_resource_screen_init(struct pipe_screen *pscreen)
{
//pscreen->base.resource_create_with_modifiers =
// panfrost_resource_create_with_modifiers;
- pscreen->base.resource_create = u_transfer_helper_resource_create;
- pscreen->base.resource_destroy = u_transfer_helper_resource_destroy;
- pscreen->base.resource_from_handle = panfrost_resource_from_handle;
- pscreen->base.resource_get_handle = panfrost_resource_get_handle;
- pscreen->base.transfer_helper = u_transfer_helper_create(&transfer_vtbl,
- true, false,
- true, true);
-
- pb_slabs_init(&pscreen->slabs,
- MIN_SLAB_ENTRY_SIZE,
- MAX_SLAB_ENTRY_SIZE,
-
- 3, /* Number of heaps */
-
- pscreen,
-
- panfrost_slab_can_reclaim,
- panfrost_slab_alloc,
- panfrost_slab_free);
-}
-
-void
-panfrost_resource_screen_deinit(struct panfrost_screen *pscreen)
-{
- pb_slabs_deinit(&pscreen->slabs);
+ pscreen->resource_create = u_transfer_helper_resource_create;
+ pscreen->resource_destroy = u_transfer_helper_resource_destroy;
+ pscreen->resource_from_handle = panfrost_resource_from_handle;
+ pscreen->resource_get_handle = panfrost_resource_get_handle;
+ pscreen->transfer_helper = u_transfer_helper_create(&transfer_vtbl,
+ true, false,
+ true, true);
}
void
panfrost_resource_context_init(struct pipe_context *pctx)
{
pctx->transfer_map = u_transfer_helper_transfer_map;
- pctx->transfer_flush_region = u_transfer_helper_transfer_flush_region;
pctx->transfer_unmap = u_transfer_helper_transfer_unmap;
- pctx->buffer_subdata = u_default_buffer_subdata;
pctx->create_surface = panfrost_create_surface;
pctx->surface_destroy = panfrost_surface_destroy;
pctx->resource_copy_region = util_resource_copy_region;