#include "util/u_surface.h"
#include "util/u_transfer.h"
#include "util/u_transfer_helper.h"
+#include "util/u_gen_mipmap.h"
#include "pan_context.h"
#include "pan_screen.h"
pipe_reference_init(&prsc->reference, 1);
prsc->screen = pscreen;
- rsc->bo = screen->driver->import_bo(screen, whandle);
+ rsc->bo = panfrost_drm_import_bo(screen, whandle);
rsc->bo->slices[0].stride = whandle->stride;
+ rsc->bo->slices[0].initialized = true;
if (screen->ro) {
rsc->scanout =
return TRUE;
} else
- return screen->driver->export_bo(screen, rsrc->bo->gem_handle, rsrc->bo->slices[0].stride, handle);
+ return panfrost_drm_export_bo(screen, rsrc->bo->gem_handle,
+ rsrc->bo->slices[0].stride,
+ handle);
}
return FALSE;
if (bo->layout == PAN_TILED || bo->layout == PAN_LINEAR) {
struct panfrost_memory mem;
- screen->driver->allocate_slab(screen, &mem, bo->size / 4096, true, 0, 0, 0);
+ panfrost_drm_allocate_slab(screen, &mem, bo->size / 4096, true, 0, 0, 0);
bo->cpu = mem.cpu;
bo->gpu = mem.gpu;
.gem_handle = bo->gem_handle,
};
- screen->driver->free_slab(screen, &mem);
+ panfrost_drm_free_slab(screen, &mem);
}
if (bo->layout == PAN_AFBC) {
.gem_handle = bo->checksum_slab.gem_handle,
};
- screen->driver->free_slab(screen, &mem);
+ panfrost_drm_free_slab(screen, &mem);
}
if (bo->imported) {
- screen->driver->free_imported_bo(screen, bo);
+ panfrost_drm_free_imported_bo(screen, bo);
}
ralloc_free(bo);
transfer->base.stride = box->width * bytes_per_pixel;
transfer->base.layer_stride = transfer->base.stride * box->height;
-
- /* TODO: Reads */
transfer->map = rzalloc_size(transfer, transfer->base.layer_stride * box->depth);
+ assert(box->depth == 1);
+
+ if ((usage & PIPE_TRANSFER_READ) && bo->slices[level].initialized) {
+ if (bo->layout == PAN_AFBC) {
+ DBG("Unimplemented: reads from AFBC");
+ } else if (bo->layout == PAN_TILED) {
+ panfrost_load_tiled_image(
+ transfer->map,
+ bo->cpu + bo->slices[level].offset,
+ box,
+ transfer->base.stride,
+ bo->slices[level].stride,
+ util_format_get_blocksize(resource->format));
+ }
+ }
return transfer->map;
} else {
transfer->base.stride = bo->slices[level].stride;
transfer->base.layer_stride = bo->cubemap_stride;
+ /* By mapping direct-write, we're implicitly already
+ * initialized (maybe), so be conservative */
+
+ if ((usage & PIPE_TRANSFER_WRITE) && (usage & PIPE_TRANSFER_MAP_DIRECTLY))
+ bo->slices[level].initialized = true;
+
return bo->cpu
+ bo->slices[level].offset
+ transfer->base.box.z * bo->cubemap_stride
struct panfrost_bo *bo = prsrc->bo;
if (transfer->usage & PIPE_TRANSFER_WRITE) {
+ unsigned level = transfer->level;
+ bo->slices[level].initialized = true;
if (bo->layout == PAN_AFBC) {
DBG("Unimplemented: writes to AFBC\n");
} else if (bo->layout == PAN_TILED) {
- unsigned level = transfer->level;
assert(transfer->box.depth == 1);
panfrost_store_tiled_image(
/* Actually allocate the memory from kernel-space. Mapped, same_va, no
* special flags */
- screen->driver->allocate_slab(screen, mem, slab_size / 4096, true, 0, 0, 0);
+ panfrost_drm_allocate_slab(screen, mem, slab_size / 4096, true, 0, 0, 0);
return &mem->slab;
}
struct panfrost_memory *mem = (struct panfrost_memory *) slab;
struct panfrost_screen *screen = (struct panfrost_screen *) priv;
- screen->driver->free_slab(screen, mem);
+ panfrost_drm_free_slab(screen, mem);
ralloc_free(mem);
}
return prsrc->format;
}
+static boolean
+panfrost_generate_mipmap(
+ struct pipe_context *pctx,
+ struct pipe_resource *prsrc,
+ enum pipe_format format,
+ unsigned base_level,
+ unsigned last_level,
+ unsigned first_layer,
+ unsigned last_layer)
+{
+ struct panfrost_context *ctx = pan_context(pctx);
+ struct panfrost_resource *rsrc = pan_resource(prsrc);
+
+ /* Generating a mipmap invalidates the written levels, so make that
+ * explicit so we don't try to wallpaper them back and end up with
+ * u_blitter recursion */
+
+ assert(rsrc->bo);
+ for (unsigned l = base_level + 1; l <= last_level; ++l)
+ rsrc->bo->slices[l].initialized = false;
+
+ /* Beyond that, we just delegate the hard stuff. We're careful to
+ * include flushes on both ends to make sure the data is really valid.
+ * We could be doing a lot better perf-wise, especially once we have
+ * reorder-type optimizations in place. But for now prioritize
+ * correctness. */
+
+ struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
+ bool has_draws = job->last_job.gpu;
+
+ if (has_draws)
+ panfrost_flush(pctx, NULL, PIPE_FLUSH_END_OF_FRAME);
+
+ /* We've flushed the original buffer if needed, now trigger a blit */
+
+ bool blit_res = util_gen_mipmap(
+ pctx, prsrc, format,
+ base_level, last_level,
+ first_layer, last_layer,
+ PIPE_TEX_FILTER_LINEAR);
+
+ /* If the blit was successful, flush once more. If it wasn't, well, let
+ * the state tracker deal with it. */
+
+ if (blit_res)
+ panfrost_flush(pctx, NULL, PIPE_FLUSH_END_OF_FRAME);
+
+ return blit_res;
+}
+
static void
panfrost_resource_set_stencil(struct pipe_resource *prsrc,
struct pipe_resource *stencil)
pctx->surface_destroy = panfrost_surface_destroy;
pctx->resource_copy_region = util_resource_copy_region;
pctx->blit = panfrost_blit;
+ pctx->generate_mipmap = panfrost_generate_mipmap;
pctx->flush_resource = panfrost_flush_resource;
pctx->invalidate_resource = panfrost_invalidate_resource;
pctx->transfer_flush_region = u_transfer_helper_transfer_flush_region;