#include "util/u_transfer_helper.h"
#include "util/u_upload_mgr.h"
#include "util/u_format_zs.h"
+#include "util/u_drm.h"
-#include "drm_fourcc.h"
+#include "drm-uapi/drm_fourcc.h"
#include "v3d_screen.h"
#include "v3d_context.h"
#include "v3d_resource.h"
#include "broadcom/cle/v3d_packet_v33_pack.h"
static void
-vc5_debug_resource_layout(struct vc5_resource *rsc, const char *caller)
+v3d_debug_resource_layout(struct v3d_resource *rsc, const char *caller)
{
if (!(V3D_DEBUG & V3D_DEBUG_SURFACE))
return;
};
for (int i = 0; i <= prsc->last_level; i++) {
- struct vc5_resource_slice *slice = &rsc->slices[i];
+ struct v3d_resource_slice *slice = &rsc->slices[i];
int level_width = slice->stride / rsc->cpp;
int level_height = slice->padded_height;
}
static bool
-vc5_resource_bo_alloc(struct vc5_resource *rsc)
+v3d_resource_bo_alloc(struct v3d_resource *rsc)
{
struct pipe_resource *prsc = &rsc->base;
struct pipe_screen *pscreen = prsc->screen;
- struct vc5_bo *bo;
+ struct v3d_bo *bo;
- bo = vc5_bo_alloc(vc5_screen(pscreen), rsc->size, "resource");
+ bo = v3d_bo_alloc(v3d_screen(pscreen), rsc->size, "resource");
if (bo) {
- vc5_bo_unreference(&rsc->bo);
+ v3d_bo_unreference(&rsc->bo);
rsc->bo = bo;
- vc5_debug_resource_layout(rsc, "alloc");
+ v3d_debug_resource_layout(rsc, "alloc");
return true;
} else {
return false;
}
static void
-vc5_resource_transfer_unmap(struct pipe_context *pctx,
+v3d_resource_transfer_unmap(struct pipe_context *pctx,
struct pipe_transfer *ptrans)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- struct vc5_transfer *trans = vc5_transfer(ptrans);
+ struct v3d_context *v3d = v3d_context(pctx);
+ struct v3d_transfer *trans = v3d_transfer(ptrans);
if (trans->map) {
- struct vc5_resource *rsc = vc5_resource(ptrans->resource);
- struct vc5_resource_slice *slice = &rsc->slices[ptrans->level];
+ struct v3d_resource *rsc = v3d_resource(ptrans->resource);
+ struct v3d_resource_slice *slice = &rsc->slices[ptrans->level];
if (ptrans->usage & PIPE_TRANSFER_WRITE) {
for (int z = 0; z < ptrans->box.depth; z++) {
void *dst = rsc->bo->map +
- vc5_layer_offset(&rsc->base,
+ v3d_layer_offset(&rsc->base,
ptrans->level,
ptrans->box.z + z);
- vc5_store_tiled_image(dst,
+ v3d_store_tiled_image(dst,
slice->stride,
(trans->map +
ptrans->stride *
}
pipe_resource_reference(&ptrans->resource, NULL);
- slab_free(&vc5->transfer_pool, ptrans);
+ slab_free(&v3d->transfer_pool, ptrans);
}
-static void *
-vc5_resource_transfer_map(struct pipe_context *pctx,
- struct pipe_resource *prsc,
- unsigned level, unsigned usage,
- const struct pipe_box *box,
- struct pipe_transfer **pptrans)
+static void
+v3d_map_usage_prep(struct pipe_context *pctx,
+ struct pipe_resource *prsc,
+ unsigned usage)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- struct vc5_resource *rsc = vc5_resource(prsc);
- struct vc5_transfer *trans;
- struct pipe_transfer *ptrans;
- enum pipe_format format = prsc->format;
- char *buf;
-
- /* MSAA maps should have been handled by u_transfer_helper. */
- assert(prsc->nr_samples <= 1);
-
- /* Upgrade DISCARD_RANGE to WHOLE_RESOURCE if the whole resource is
- * being mapped.
- */
- if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
- !(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
- !(prsc->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT) &&
- prsc->last_level == 0 &&
- prsc->width0 == box->width &&
- prsc->height0 == box->height &&
- prsc->depth0 == box->depth &&
- prsc->array_size == 1 &&
- rsc->bo->private) {
- usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
- }
+ struct v3d_context *v3d = v3d_context(pctx);
+ struct v3d_resource *rsc = v3d_resource(prsc);
if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
- if (vc5_resource_bo_alloc(rsc)) {
+ if (v3d_resource_bo_alloc(rsc)) {
/* If it might be bound as one of our vertex buffers
* or UBOs, make sure we re-emit vertex buffer state
* or uniforms.
*/
if (prsc->bind & PIPE_BIND_VERTEX_BUFFER)
- vc5->dirty |= VC5_DIRTY_VTXBUF;
+ v3d->dirty |= VC5_DIRTY_VTXBUF;
if (prsc->bind & PIPE_BIND_CONSTANT_BUFFER)
- vc5->dirty |= VC5_DIRTY_CONSTBUF;
+ v3d->dirty |= VC5_DIRTY_CONSTBUF;
} else {
/* If we failed to reallocate, flush users so that we
* don't violate any syncing requirements.
*/
- vc5_flush_jobs_reading_resource(vc5, prsc);
+ v3d_flush_jobs_reading_resource(v3d, prsc);
}
} else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
/* If we're writing and the buffer is being used by the CL, we
* to flush if the CL has written our buffer.
*/
if (usage & PIPE_TRANSFER_WRITE)
- vc5_flush_jobs_reading_resource(vc5, prsc);
+ v3d_flush_jobs_reading_resource(v3d, prsc);
else
- vc5_flush_jobs_writing_resource(vc5, prsc);
+ v3d_flush_jobs_writing_resource(v3d, prsc, true);
}
if (usage & PIPE_TRANSFER_WRITE) {
rsc->writes++;
rsc->initialized_buffers = ~0;
}
+}
+
+static void *
+v3d_resource_transfer_map(struct pipe_context *pctx,
+ struct pipe_resource *prsc,
+ unsigned level, unsigned usage,
+ const struct pipe_box *box,
+ struct pipe_transfer **pptrans)
+{
+ struct v3d_context *v3d = v3d_context(pctx);
+ struct v3d_resource *rsc = v3d_resource(prsc);
+ struct v3d_transfer *trans;
+ struct pipe_transfer *ptrans;
+ enum pipe_format format = prsc->format;
+ char *buf;
+
+ /* MSAA maps should have been handled by u_transfer_helper. */
+ assert(prsc->nr_samples <= 1);
+
+ /* Upgrade DISCARD_RANGE to WHOLE_RESOURCE if the whole resource is
+ * being mapped.
+ */
+ if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
+ !(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
+ !(prsc->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT) &&
+ prsc->last_level == 0 &&
+ prsc->width0 == box->width &&
+ prsc->height0 == box->height &&
+ prsc->depth0 == box->depth &&
+ prsc->array_size == 1 &&
+ rsc->bo->private) {
+ usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
+ }
- trans = slab_alloc(&vc5->transfer_pool);
+ v3d_map_usage_prep(pctx, prsc, usage);
+
+ trans = slab_alloc(&v3d->transfer_pool);
if (!trans)
return NULL;
*/
if (usage & PIPE_TRANSFER_UNSYNCHRONIZED)
- buf = vc5_bo_map_unsynchronized(rsc->bo);
+ buf = v3d_bo_map_unsynchronized(rsc->bo);
else
- buf = vc5_bo_map(rsc->bo);
+ buf = v3d_bo_map(rsc->bo);
if (!buf) {
fprintf(stderr, "Failed to map bo\n");
goto fail;
ptrans->box.height = DIV_ROUND_UP(ptrans->box.height,
util_format_get_blockheight(format));
- struct vc5_resource_slice *slice = &rsc->slices[level];
+ struct v3d_resource_slice *slice = &rsc->slices[level];
if (rsc->tiled) {
/* No direct mappings of tiled, since we need to manually
* tile/untile.
if (usage & PIPE_TRANSFER_READ) {
for (int z = 0; z < ptrans->box.depth; z++) {
void *src = rsc->bo->map +
- vc5_layer_offset(&rsc->base,
+ v3d_layer_offset(&rsc->base,
ptrans->level,
ptrans->box.z + z);
- vc5_load_tiled_image((trans->map +
+ v3d_load_tiled_image((trans->map +
ptrans->stride *
ptrans->box.height * z),
ptrans->stride,
return trans->map;
} else {
ptrans->stride = slice->stride;
- ptrans->layer_stride = ptrans->stride;
+ ptrans->layer_stride = rsc->cube_map_stride;
return buf + slice->offset +
ptrans->box.y * ptrans->stride +
fail:
- vc5_resource_transfer_unmap(pctx, ptrans);
+ v3d_resource_transfer_unmap(pctx, ptrans);
return NULL;
}
static void
-vc5_resource_destroy(struct pipe_screen *pscreen,
+v3d_texture_subdata(struct pipe_context *pctx,
+ struct pipe_resource *prsc,
+ unsigned level,
+ unsigned usage,
+ const struct pipe_box *box,
+ const void *data,
+ unsigned stride,
+ unsigned layer_stride)
+{
+ struct v3d_resource *rsc = v3d_resource(prsc);
+ struct v3d_resource_slice *slice = &rsc->slices[level];
+
+ /* For a direct mapping, we can just take the u_transfer path. */
+ if (!rsc->tiled) {
+ return u_default_texture_subdata(pctx, prsc, level, usage, box,
+ data, stride, layer_stride);
+ }
+
+ /* Otherwise, map and store the texture data directly into the tiled
+ * texture. Note that gallium's texture_subdata may be called with
+ * obvious usage flags missing!
+ */
+ v3d_map_usage_prep(pctx, prsc, usage | (PIPE_TRANSFER_WRITE |
+ PIPE_TRANSFER_DISCARD_RANGE));
+
+ void *buf;
+ if (usage & PIPE_TRANSFER_UNSYNCHRONIZED)
+ buf = v3d_bo_map_unsynchronized(rsc->bo);
+ else
+ buf = v3d_bo_map(rsc->bo);
+
+ for (int i = 0; i < box->depth; i++) {
+ v3d_store_tiled_image(buf +
+ v3d_layer_offset(&rsc->base,
+ level,
+ box->z + i),
+ slice->stride,
+ (void *)data + layer_stride * i,
+ stride,
+ slice->tiling, rsc->cpp, slice->padded_height,
+ box);
+ }
+}
+
+static void
+v3d_resource_destroy(struct pipe_screen *pscreen,
struct pipe_resource *prsc)
{
- struct vc5_resource *rsc = vc5_resource(prsc);
+ struct v3d_screen *screen = v3d_screen(pscreen);
+ struct v3d_resource *rsc = v3d_resource(prsc);
+
+ if (rsc->scanout)
+ renderonly_scanout_destroy(rsc->scanout, screen->ro);
- vc5_bo_unreference(&rsc->bo);
+ v3d_bo_unreference(&rsc->bo);
free(rsc);
}
-static boolean
-vc5_resource_get_handle(struct pipe_screen *pscreen,
+static bool
+v3d_resource_get_handle(struct pipe_screen *pscreen,
struct pipe_context *pctx,
struct pipe_resource *prsc,
struct winsys_handle *whandle,
unsigned usage)
{
- struct vc5_resource *rsc = vc5_resource(prsc);
- struct vc5_bo *bo = rsc->bo;
+ struct v3d_screen *screen = v3d_screen(pscreen);
+ struct v3d_resource *rsc = v3d_resource(prsc);
+ struct v3d_bo *bo = rsc->bo;
whandle->stride = rsc->slices[0].stride;
+ whandle->offset = 0;
/* If we're passing some reference to our BO out to some other part of
* the system, then we can't do any optimizations about only us being
*/
bo->private = false;
+ if (rsc->tiled) {
+ /* A shared tiled buffer should always be allocated as UIF,
+ * not UBLINEAR or LT.
+ */
+ assert(rsc->slices[0].tiling == VC5_TILING_UIF_XOR ||
+ rsc->slices[0].tiling == VC5_TILING_UIF_NO_XOR);
+ whandle->modifier = DRM_FORMAT_MOD_BROADCOM_UIF;
+ } else {
+ whandle->modifier = DRM_FORMAT_MOD_LINEAR;
+ }
+
switch (whandle->type) {
- case DRM_API_HANDLE_TYPE_SHARED:
- return vc5_bo_flink(bo, &whandle->handle);
- case DRM_API_HANDLE_TYPE_KMS:
+ case WINSYS_HANDLE_TYPE_SHARED:
+ return v3d_bo_flink(bo, &whandle->handle);
+ case WINSYS_HANDLE_TYPE_KMS:
+ if (screen->ro) {
+ assert(rsc->scanout);
+ bool ok = renderonly_get_handle(rsc->scanout, whandle);
+ whandle->stride = rsc->slices[0].stride;
+ return ok;
+ }
whandle->handle = bo->handle;
- return TRUE;
- case DRM_API_HANDLE_TYPE_FD:
- whandle->handle = vc5_bo_get_dmabuf(bo);
+ return true;
+ case WINSYS_HANDLE_TYPE_FD:
+ whandle->handle = v3d_bo_get_dmabuf(bo);
return whandle->handle != -1;
}
- return FALSE;
+ return false;
}
#define PAGE_UB_ROWS (VC5_UIFCFG_PAGE_SIZE / VC5_UIFBLOCK_ROW_SIZE)
* between columns of UIF blocks.
*/
static uint32_t
-vc5_get_ub_pad(struct vc5_resource *rsc, uint32_t height)
+v3d_get_ub_pad(struct v3d_resource *rsc, uint32_t height)
{
- uint32_t utile_h = vc5_utile_height(rsc->cpp);
+ uint32_t utile_h = v3d_utile_height(rsc->cpp);
uint32_t uif_block_h = utile_h * 2;
uint32_t height_ub = height / uif_block_h;
}
static void
-vc5_setup_slices(struct vc5_resource *rsc)
+v3d_setup_slices(struct v3d_resource *rsc, uint32_t winsys_stride,
+ bool uif_top)
{
struct pipe_resource *prsc = &rsc->base;
uint32_t width = prsc->width0;
uint32_t pot_height = 2 * util_next_power_of_two(u_minify(height, 1));
uint32_t pot_depth = 2 * util_next_power_of_two(u_minify(depth, 1));
uint32_t offset = 0;
- uint32_t utile_w = vc5_utile_width(rsc->cpp);
- uint32_t utile_h = vc5_utile_height(rsc->cpp);
+ uint32_t utile_w = v3d_utile_width(rsc->cpp);
+ uint32_t utile_h = v3d_utile_height(rsc->cpp);
uint32_t uif_block_w = utile_w * 2;
uint32_t uif_block_h = utile_h * 2;
uint32_t block_width = util_format_get_blockwidth(prsc->format);
uint32_t block_height = util_format_get_blockheight(prsc->format);
bool msaa = prsc->nr_samples > 1;
+
/* MSAA textures/renderbuffers are always laid out as single-level
* UIF.
*/
- bool uif_top = msaa;
+ uif_top |= msaa;
+
+ /* Check some easy mistakes to make in a resource_create() call that
+ * will break our setup.
+ */
+ assert(prsc->array_size != 0);
+ assert(prsc->depth0 != 0);
for (int i = prsc->last_level; i >= 0; i--) {
- struct vc5_resource_slice *slice = &rsc->slices[i];
+ struct v3d_resource_slice *slice = &rsc->slices[i];
uint32_t level_width, level_height, level_depth;
if (i < 2) {
level_height = align(level_height,
uif_block_h);
- slice->ub_pad = vc5_get_ub_pad(rsc,
+ slice->ub_pad = v3d_get_ub_pad(rsc,
level_height);
level_height += slice->ub_pad * uif_block_h;
}
slice->offset = offset;
- slice->stride = level_width * rsc->cpp;
+ if (winsys_stride)
+ slice->stride = winsys_stride;
+ else
+ slice->stride = level_width * rsc->cpp;
slice->padded_height = level_height;
slice->size = level_height * slice->stride;
}
uint32_t
-vc5_layer_offset(struct pipe_resource *prsc, uint32_t level, uint32_t layer)
+v3d_layer_offset(struct pipe_resource *prsc, uint32_t level, uint32_t layer)
{
- struct vc5_resource *rsc = vc5_resource(prsc);
- struct vc5_resource_slice *slice = &rsc->slices[level];
+ struct v3d_resource *rsc = v3d_resource(prsc);
+ struct v3d_resource_slice *slice = &rsc->slices[level];
if (prsc->target == PIPE_TEXTURE_3D)
return slice->offset + layer * slice->size;
return slice->offset + layer * rsc->cube_map_stride;
}
-static struct vc5_resource *
-vc5_resource_setup(struct pipe_screen *pscreen,
+static struct v3d_resource *
+v3d_resource_setup(struct pipe_screen *pscreen,
const struct pipe_resource *tmpl)
{
- struct vc5_screen *screen = vc5_screen(pscreen);
- struct vc5_resource *rsc = CALLOC_STRUCT(vc5_resource);
+ struct v3d_screen *screen = v3d_screen(pscreen);
+ struct v3d_resource *rsc = CALLOC_STRUCT(v3d_resource);
if (!rsc)
return NULL;
struct pipe_resource *prsc = &rsc->base;
if (screen->devinfo.ver < 40 && prsc->nr_samples > 1)
rsc->cpp *= prsc->nr_samples;
} else {
- assert(vc5_rt_format_supported(&screen->devinfo, prsc->format));
+ assert(v3d_rt_format_supported(&screen->devinfo, prsc->format));
uint32_t output_image_format =
- vc5_get_rt_format(&screen->devinfo, prsc->format);
+ v3d_get_rt_format(&screen->devinfo, prsc->format);
uint32_t internal_type;
uint32_t internal_bpp;
- vc5_get_internal_type_bpp_for_output_format(&screen->devinfo,
+ v3d_get_internal_type_bpp_for_output_format(&screen->devinfo,
output_image_format,
&internal_type,
&internal_bpp);
return rsc;
}
-static bool
-find_modifier(uint64_t needle, const uint64_t *haystack, int count)
-{
- int i;
-
- for (i = 0; i < count; i++) {
- if (haystack[i] == needle)
- return true;
- }
-
- return false;
-}
-
static struct pipe_resource *
-vc5_resource_create_with_modifiers(struct pipe_screen *pscreen,
+v3d_resource_create_with_modifiers(struct pipe_screen *pscreen,
const struct pipe_resource *tmpl,
const uint64_t *modifiers,
int count)
{
- bool linear_ok = find_modifier(DRM_FORMAT_MOD_LINEAR, modifiers, count);
- struct vc5_resource *rsc = vc5_resource_setup(pscreen, tmpl);
+ struct v3d_screen *screen = v3d_screen(pscreen);
+
+ bool linear_ok = drm_find_modifier(DRM_FORMAT_MOD_LINEAR, modifiers, count);
+ struct v3d_resource *rsc = v3d_resource_setup(pscreen, tmpl);
struct pipe_resource *prsc = &rsc->base;
/* Use a tiled layout if we can, for better 3D performance. */
bool should_tile = true;
/* Scanout BOs for simulator need to be linear for interaction with
* i965.
*/
- if (using_vc5_simulator &&
+ if (using_v3d_simulator &&
tmpl->bind & (PIPE_BIND_SHARED | PIPE_BIND_SCANOUT))
should_tile = false;
+ /* If using the old-school SCANOUT flag, we don't know what the screen
+ * might support other than linear. Just force linear.
+ */
+ if (tmpl->bind & PIPE_BIND_SCANOUT)
+ should_tile = false;
+
/* No user-specified modifier; determine our own. */
if (count == 1 && modifiers[0] == DRM_FORMAT_MOD_INVALID) {
linear_ok = true;
rsc->tiled = should_tile;
} else if (should_tile &&
- find_modifier(DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED,
+ drm_find_modifier(DRM_FORMAT_MOD_BROADCOM_UIF,
modifiers, count)) {
rsc->tiled = true;
} else if (linear_ok) {
rsc->tiled = false;
} else {
fprintf(stderr, "Unsupported modifier requested\n");
- return NULL;
+ goto fail;
}
rsc->internal_format = prsc->format;
- vc5_setup_slices(rsc);
- if (!vc5_resource_bo_alloc(rsc))
- goto fail;
+ v3d_setup_slices(rsc, 0, tmpl->bind & PIPE_BIND_SHARED);
+
+ /* If we're in a renderonly setup, use the other device to perform our
+ * allocation and just import it to v3d. The other device may be
+ * using CMA, and V3D can import from CMA but doesn't do CMA
+ * allocations on its own.
+ *
+ * We always allocate this way for SHARED, because get_handle will
+ * need a resource on the display fd.
+ */
+ if (screen->ro && (tmpl->bind & (PIPE_BIND_SCANOUT |
+ PIPE_BIND_SHARED))) {
+ struct winsys_handle handle;
+ struct pipe_resource scanout_tmpl = {
+ .target = prsc->target,
+ .format = PIPE_FORMAT_RGBA8888_UNORM,
+ .width0 = 1024, /* one page */
+ .height0 = align(rsc->size, 4096) / 4096,
+ .depth0 = 1,
+ .array_size = 1,
+ };
+
+ rsc->scanout =
+ renderonly_scanout_for_resource(&scanout_tmpl,
+ screen->ro,
+ &handle);
+
+ if (!rsc->scanout) {
+ fprintf(stderr, "Failed to create scanout resource\n");
+ return NULL;
+ }
+ assert(handle.type == WINSYS_HANDLE_TYPE_FD);
+ rsc->bo = v3d_bo_open_dmabuf(screen, handle.handle);
+ close(handle.handle);
+
+ if (!rsc->bo)
+ goto fail;
+
+ v3d_debug_resource_layout(rsc, "renderonly");
+
+ return prsc;
+ } else {
+ if (!v3d_resource_bo_alloc(rsc))
+ goto fail;
+ }
return prsc;
fail:
- vc5_resource_destroy(pscreen, prsc);
+ v3d_resource_destroy(pscreen, prsc);
return NULL;
}
struct pipe_resource *
-vc5_resource_create(struct pipe_screen *pscreen,
+v3d_resource_create(struct pipe_screen *pscreen,
const struct pipe_resource *tmpl)
{
const uint64_t mod = DRM_FORMAT_MOD_INVALID;
- return vc5_resource_create_with_modifiers(pscreen, tmpl, &mod, 1);
+ return v3d_resource_create_with_modifiers(pscreen, tmpl, &mod, 1);
}
static struct pipe_resource *
-vc5_resource_from_handle(struct pipe_screen *pscreen,
+v3d_resource_from_handle(struct pipe_screen *pscreen,
const struct pipe_resource *tmpl,
struct winsys_handle *whandle,
unsigned usage)
{
- struct vc5_screen *screen = vc5_screen(pscreen);
- struct vc5_resource *rsc = vc5_resource_setup(pscreen, tmpl);
+ struct v3d_screen *screen = v3d_screen(pscreen);
+ struct v3d_resource *rsc = v3d_resource_setup(pscreen, tmpl);
struct pipe_resource *prsc = &rsc->base;
- struct vc5_resource_slice *slice = &rsc->slices[0];
+ struct v3d_resource_slice *slice = &rsc->slices[0];
if (!rsc)
return NULL;
switch (whandle->modifier) {
case DRM_FORMAT_MOD_LINEAR:
- case DRM_FORMAT_MOD_INVALID:
rsc->tiled = false;
break;
- /* XXX: UIF */
+ case DRM_FORMAT_MOD_BROADCOM_UIF:
+ rsc->tiled = true;
+ break;
+ case DRM_FORMAT_MOD_INVALID:
+ rsc->tiled = screen->ro == NULL;
+ break;
default:
fprintf(stderr,
"Attempt to import unsupported modifier 0x%llx\n",
}
switch (whandle->type) {
- case DRM_API_HANDLE_TYPE_SHARED:
- rsc->bo = vc5_bo_open_name(screen,
- whandle->handle, whandle->stride);
+ case WINSYS_HANDLE_TYPE_SHARED:
+ rsc->bo = v3d_bo_open_name(screen, whandle->handle);
break;
- case DRM_API_HANDLE_TYPE_FD:
- rsc->bo = vc5_bo_open_dmabuf(screen,
- whandle->handle, whandle->stride);
+ case WINSYS_HANDLE_TYPE_FD:
+ rsc->bo = v3d_bo_open_dmabuf(screen, whandle->handle);
break;
default:
fprintf(stderr,
rsc->internal_format = prsc->format;
- vc5_setup_slices(rsc);
- vc5_debug_resource_layout(rsc, "import");
+ v3d_setup_slices(rsc, whandle->stride, true);
+ v3d_debug_resource_layout(rsc, "import");
+
+ if (screen->ro) {
+ /* Make sure that renderonly has a handle to our buffer in the
+ * display's fd, so that a later renderonly_get_handle()
+ * returns correct handles or GEM names.
+ */
+ rsc->scanout =
+ renderonly_create_gpu_import_for_resource(prsc,
+ screen->ro,
+ NULL);
+ if (!rsc->scanout) {
+ fprintf(stderr, "Failed to create scanout resource.\n");
+ goto fail;
+ }
+ }
if (whandle->stride != slice->stride) {
static bool warned = false;
return prsc;
fail:
- vc5_resource_destroy(pscreen, prsc);
+ v3d_resource_destroy(pscreen, prsc);
return NULL;
}
+void
+v3d_update_shadow_texture(struct pipe_context *pctx,
+ struct pipe_sampler_view *pview)
+{
+ struct v3d_context *v3d = v3d_context(pctx);
+ struct v3d_sampler_view *view = v3d_sampler_view(pview);
+ struct v3d_resource *shadow = v3d_resource(view->texture);
+ struct v3d_resource *orig = v3d_resource(pview->texture);
+
+ assert(view->texture != pview->texture);
+
+ if (shadow->writes == orig->writes && orig->bo->private)
+ return;
+
+ perf_debug("Updating %dx%d@%d shadow for linear texture\n",
+ orig->base.width0, orig->base.height0,
+ pview->u.tex.first_level);
+
+ for (int i = 0; i <= shadow->base.last_level; i++) {
+ unsigned width = u_minify(shadow->base.width0, i);
+ unsigned height = u_minify(shadow->base.height0, i);
+ struct pipe_blit_info info = {
+ .dst = {
+ .resource = &shadow->base,
+ .level = i,
+ .box = {
+ .x = 0,
+ .y = 0,
+ .z = 0,
+ .width = width,
+ .height = height,
+ .depth = 1,
+ },
+ .format = shadow->base.format,
+ },
+ .src = {
+ .resource = &orig->base,
+ .level = pview->u.tex.first_level + i,
+ .box = {
+ .x = 0,
+ .y = 0,
+ .z = 0,
+ .width = width,
+ .height = height,
+ .depth = 1,
+ },
+ .format = orig->base.format,
+ },
+ .mask = util_format_get_mask(orig->base.format),
+ };
+ pctx->blit(pctx, &info);
+ }
+
+ shadow->writes = orig->writes;
+}
+
static struct pipe_surface *
-vc5_create_surface(struct pipe_context *pctx,
+v3d_create_surface(struct pipe_context *pctx,
struct pipe_resource *ptex,
const struct pipe_surface *surf_tmpl)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- struct vc5_screen *screen = vc5->screen;
- struct vc5_surface *surface = CALLOC_STRUCT(vc5_surface);
- struct vc5_resource *rsc = vc5_resource(ptex);
+ struct v3d_context *v3d = v3d_context(pctx);
+ struct v3d_screen *screen = v3d->screen;
+ struct v3d_surface *surface = CALLOC_STRUCT(v3d_surface);
+ struct v3d_resource *rsc = v3d_resource(ptex);
if (!surface)
return NULL;
struct pipe_surface *psurf = &surface->base;
unsigned level = surf_tmpl->u.tex.level;
- struct vc5_resource_slice *slice = &rsc->slices[level];
+ struct v3d_resource_slice *slice = &rsc->slices[level];
pipe_reference_init(&psurf->reference, 1);
pipe_resource_reference(&psurf->texture, ptex);
psurf->u.tex.first_layer = surf_tmpl->u.tex.first_layer;
psurf->u.tex.last_layer = surf_tmpl->u.tex.last_layer;
- surface->offset = vc5_layer_offset(ptex, level,
+ surface->offset = v3d_layer_offset(ptex, level,
psurf->u.tex.first_layer);
surface->tiling = slice->tiling;
- surface->format = vc5_get_rt_format(&screen->devinfo, psurf->format);
+ surface->format = v3d_get_rt_format(&screen->devinfo, psurf->format);
+
+ const struct util_format_description *desc =
+ util_format_description(psurf->format);
+
+ surface->swap_rb = (desc->swizzle[0] == PIPE_SWIZZLE_Z &&
+ psurf->format != PIPE_FORMAT_B5G6R5_UNORM);
if (util_format_is_depth_or_stencil(psurf->format)) {
switch (psurf->format) {
}
} else {
uint32_t bpp, type;
- vc5_get_internal_type_bpp_for_output_format(&screen->devinfo,
+ v3d_get_internal_type_bpp_for_output_format(&screen->devinfo,
surface->format,
&type, &bpp);
surface->internal_type = type;
surface->tiling == VC5_TILING_UIF_XOR) {
surface->padded_height_of_output_image_in_uif_blocks =
(slice->padded_height /
- (2 * vc5_utile_height(rsc->cpp)));
+ (2 * v3d_utile_height(rsc->cpp)));
}
if (rsc->separate_stencil) {
surface->separate_stencil =
- vc5_create_surface(pctx, &rsc->separate_stencil->base,
+ v3d_create_surface(pctx, &rsc->separate_stencil->base,
surf_tmpl);
}
}
static void
-vc5_surface_destroy(struct pipe_context *pctx, struct pipe_surface *psurf)
+v3d_surface_destroy(struct pipe_context *pctx, struct pipe_surface *psurf)
{
- struct vc5_surface *surf = vc5_surface(psurf);
+ struct v3d_surface *surf = v3d_surface(psurf);
if (surf->separate_stencil)
pipe_surface_reference(&surf->separate_stencil, NULL);
}
static void
-vc5_flush_resource(struct pipe_context *pctx, struct pipe_resource *resource)
+v3d_flush_resource(struct pipe_context *pctx, struct pipe_resource *resource)
{
/* All calls to flush_resource are followed by a flush of the context,
* so there's nothing to do.
}
static enum pipe_format
-vc5_resource_get_internal_format(struct pipe_resource *prsc)
+v3d_resource_get_internal_format(struct pipe_resource *prsc)
{
- return vc5_resource(prsc)->internal_format;
+ return v3d_resource(prsc)->internal_format;
}
static void
-vc5_resource_set_stencil(struct pipe_resource *prsc,
+v3d_resource_set_stencil(struct pipe_resource *prsc,
struct pipe_resource *stencil)
{
- vc5_resource(prsc)->separate_stencil = vc5_resource(stencil);
+ v3d_resource(prsc)->separate_stencil = v3d_resource(stencil);
}
static struct pipe_resource *
-vc5_resource_get_stencil(struct pipe_resource *prsc)
+v3d_resource_get_stencil(struct pipe_resource *prsc)
{
- struct vc5_resource *rsc = vc5_resource(prsc);
+ struct v3d_resource *rsc = v3d_resource(prsc);
return &rsc->separate_stencil->base;
}
static const struct u_transfer_vtbl transfer_vtbl = {
- .resource_create = vc5_resource_create,
- .resource_destroy = vc5_resource_destroy,
- .transfer_map = vc5_resource_transfer_map,
- .transfer_unmap = vc5_resource_transfer_unmap,
+ .resource_create = v3d_resource_create,
+ .resource_destroy = v3d_resource_destroy,
+ .transfer_map = v3d_resource_transfer_map,
+ .transfer_unmap = v3d_resource_transfer_unmap,
.transfer_flush_region = u_default_transfer_flush_region,
- .get_internal_format = vc5_resource_get_internal_format,
- .set_stencil = vc5_resource_set_stencil,
- .get_stencil = vc5_resource_get_stencil,
+ .get_internal_format = v3d_resource_get_internal_format,
+ .set_stencil = v3d_resource_set_stencil,
+ .get_stencil = v3d_resource_get_stencil,
};
void
-vc5_resource_screen_init(struct pipe_screen *pscreen)
+v3d_resource_screen_init(struct pipe_screen *pscreen)
{
pscreen->resource_create_with_modifiers =
- vc5_resource_create_with_modifiers;
+ v3d_resource_create_with_modifiers;
pscreen->resource_create = u_transfer_helper_resource_create;
- pscreen->resource_from_handle = vc5_resource_from_handle;
- pscreen->resource_get_handle = vc5_resource_get_handle;
+ pscreen->resource_from_handle = v3d_resource_from_handle;
+ pscreen->resource_get_handle = v3d_resource_get_handle;
pscreen->resource_destroy = u_transfer_helper_resource_destroy;
pscreen->transfer_helper = u_transfer_helper_create(&transfer_vtbl,
- true, true, true);
+ true, false,
+ true, true);
}
void
-vc5_resource_context_init(struct pipe_context *pctx)
+v3d_resource_context_init(struct pipe_context *pctx)
{
pctx->transfer_map = u_transfer_helper_transfer_map;
pctx->transfer_flush_region = u_transfer_helper_transfer_flush_region;
pctx->transfer_unmap = u_transfer_helper_transfer_unmap;
pctx->buffer_subdata = u_default_buffer_subdata;
- pctx->texture_subdata = u_default_texture_subdata;
- pctx->create_surface = vc5_create_surface;
- pctx->surface_destroy = vc5_surface_destroy;
+ pctx->texture_subdata = v3d_texture_subdata;
+ pctx->create_surface = v3d_create_surface;
+ pctx->surface_destroy = v3d_surface_destroy;
pctx->resource_copy_region = util_resource_copy_region;
- pctx->blit = vc5_blit;
- pctx->flush_resource = vc5_flush_resource;
+ pctx->blit = v3d_blit;
+ pctx->generate_mipmap = v3d_generate_mipmap;
+ pctx->flush_resource = v3d_flush_resource;
}