#include "util/u_cpu_detect.h"
#include "util/u_inlines.h"
#include "util/u_format.h"
+#include "util/u_threaded_context.h"
#include "util/u_transfer.h"
#include "util/u_transfer_helper.h"
#include "util/u_upload_mgr.h"
#include "iris_context.h"
#include "iris_resource.h"
#include "iris_screen.h"
-#include "intel/common/gen_debug.h"
+#include "intel/dev/gen_debug.h"
#include "isl/isl.h"
#include "drm-uapi/drm_fourcc.h"
#include "drm-uapi/i915_drm.h"
unreachable("invalid texture type");
}
+static void
+iris_query_dmabuf_modifiers(struct pipe_screen *pscreen,
+ enum pipe_format pfmt,
+ int max,
+ uint64_t *modifiers,
+ unsigned int *external_only,
+ int *count)
+{
+ struct iris_screen *screen = (void *) pscreen;
+ const struct gen_device_info *devinfo = &screen->devinfo;
+
+ uint64_t all_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ I915_FORMAT_MOD_X_TILED,
+ I915_FORMAT_MOD_Y_TILED,
+ // XXX: (broken) I915_FORMAT_MOD_Y_TILED_CCS,
+ };
+
+ int supported_mods = 0;
+
+ for (int i = 0; i < ARRAY_SIZE(all_modifiers); i++) {
+ if (!modifier_is_supported(devinfo, all_modifiers[i]))
+ continue;
+
+ if (supported_mods < max) {
+ if (modifiers)
+ modifiers[supported_mods] = all_modifiers[i];
+
+ if (external_only)
+ external_only[supported_mods] = util_format_is_yuv(pfmt);
+ }
+
+ supported_mods++;
+ }
+
+ *count = supported_mods;
+}
+
static isl_surf_usage_flags_t
pipe_bind_to_isl_usage(unsigned bindings)
{
/* For packed depth-stencil, we treat depth as the primary resource
* and store S8 as the "second plane" resource.
*/
- return p_res->next;
+ if (p_res->next && p_res->next->format == PIPE_FORMAT_S8_UINT)
+ return p_res->next;
+
+ return NULL;
+
}
static void
iris_resource_disable_aux(struct iris_resource *res)
{
iris_bo_unreference(res->aux.bo);
+ iris_bo_unreference(res->aux.clear_color_bo);
free(res->aux.state);
- // XXX: clear color BO
-
res->aux.usage = ISL_AUX_USAGE_NONE;
res->aux.possible_usages = 1 << ISL_AUX_USAGE_NONE;
+ res->aux.sampler_usages = 1 << ISL_AUX_USAGE_NONE;
res->aux.surf.size_B = 0;
res->aux.bo = NULL;
+ res->aux.clear_color_bo = NULL;
res->aux.state = NULL;
}
{
struct iris_resource *res = (struct iris_resource *)resource;
+ if (resource->target == PIPE_BUFFER)
+ util_range_destroy(&res->valid_buffer_range);
+
iris_resource_disable_aux(res);
iris_bo_unreference(res->bo);
pipe_reference_init(&res->base.reference, 1);
res->aux.possible_usages = 1 << ISL_AUX_USAGE_NONE;
+ res->aux.sampler_usages = 1 << ISL_AUX_USAGE_NONE;
+
+ if (templ->target == PIPE_BUFFER)
+ util_range_init(&res->valid_buffer_range);
return res;
}
UNUSED bool ok = false;
uint8_t memset_value = 0;
uint32_t alloc_flags = 0;
+ const struct gen_device_info *devinfo = &screen->devinfo;
+ const unsigned clear_color_state_size = devinfo->gen >= 10 ?
+ screen->isl_dev.ss.clear_color_state_size :
+ (devinfo->gen >= 9 ? screen->isl_dev.ss.clear_value_size : 0);
assert(!res->aux.bo);
switch (res->aux.usage) {
case ISL_AUX_USAGE_NONE:
res->aux.surf.size_B = 0;
+ ok = true;
break;
case ISL_AUX_USAGE_HIZ:
initial_state = ISL_AUX_STATE_AUX_INVALID;
break;
}
+ /* We should have a valid aux_surf. */
+ if (!ok)
+ return false;
+
/* No work is needed for a zero-sized auxiliary buffer. */
if (res->aux.surf.size_B == 0)
return true;
- /* Assert that ISL gave us a valid aux surf */
- assert(ok);
-
/* Create the aux_state for the auxiliary buffer. */
res->aux.state = create_aux_state_map(res, initial_state);
if (!res->aux.state)
return false;
+ uint64_t size = res->aux.surf.size_B;
+
+ /* Allocate space in the buffer for storing the clear color. On modern
+ * platforms (gen > 9), we can read it directly from such buffer.
+ *
+ * On gen <= 9, we are going to store the clear color on the buffer
+ * anyways, and copy it back to the surface state during state emission.
+ */
+ res->aux.clear_color_offset = size;
+ size += clear_color_state_size;
+
/* Allocate the auxiliary buffer. ISL has stricter set of alignment rules
* the drm allocator. Therefore, one can pass the ISL dimensions in terms
* of bytes instead of trying to recalculate based on different format
* block sizes.
*/
- res->aux.bo = iris_bo_alloc_tiled(screen->bufmgr, "aux buffer",
- res->aux.surf.size_B,
+ res->aux.bo = iris_bo_alloc_tiled(screen->bufmgr, "aux buffer", size,
IRIS_MEMZONE_OTHER, I915_TILING_Y,
res->aux.surf.row_pitch_B, alloc_flags);
- if (!res->aux.bo)
+ if (!res->aux.bo) {
return false;
+ }
- /* Optionally, initialize the auxiliary data to the desired value. */
- if (memset_value != 0) {
+ if (!(alloc_flags & BO_ALLOC_ZEROED)) {
void *map = iris_bo_map(NULL, res->aux.bo, MAP_WRITE | MAP_RAW);
- if (!map)
+
+ if (!map) {
+ iris_resource_disable_aux(res);
return false;
+ }
+
+ if (memset_value != 0)
+ memset(map, memset_value, res->aux.surf.size_B);
+
+ /* Zero the indirect clear color to match ::fast_clear_color. */
+ memset((char *)map + res->aux.clear_color_offset, 0,
+ clear_color_state_size);
- memset(map, memset_value, res->aux.surf.size_B);
iris_bo_unmap(res->aux.bo);
}
+ if (clear_color_state_size > 0) {
+ res->aux.clear_color_bo = res->aux.bo;
+ iris_bo_reference(res->aux.clear_color_bo);
+ }
+
if (res->aux.usage == ISL_AUX_USAGE_HIZ) {
for (unsigned level = 0; level < res->surf.levels; ++level) {
uint32_t width = u_minify(res->surf.phys_level0_sa.width, level);
if (surf->samples <= 1)
return false;
- /* See isl_surf_get_mcs_surf for details. */
- if (surf->samples == 16 && surf->logical_level0_px.width > 8192)
- return false;
-
/* Depth and stencil buffers use the IMS (interleaved) layout. */
if (isl_surf_usage_is_depth_or_stencil(surf->usage))
return false;
supports_ccs(const struct gen_device_info *devinfo,
const struct isl_surf *surf)
{
- /* Gen9+ only supports CCS for Y-tiled buffers. */
- if (surf->tiling != ISL_TILING_Y0)
- return false;
-
/* CCS only supports singlesampled resources. */
if (surf->samples > 1)
return false;
- /* The PRM doesn't say this explicitly, but fast-clears don't appear to
- * work for 3D textures until Gen9 where the layout of 3D textures changes
- * to match 2D array textures.
- */
- if (devinfo->gen < 9 && surf->dim != ISL_SURF_DIM_2D)
- return false;
-
/* Note: still need to check the format! */
return true;
if (res->mod_info) {
res->aux.possible_usages |= 1 << res->mod_info->aux_usage;
- } else if (res->surf.samples > 1) {
- if (supports_mcs(&res->surf))
- res->aux.possible_usages |= 1 << ISL_AUX_USAGE_MCS;
- } else {
- if (has_depth) {
+ } else if (supports_mcs(&res->surf)) {
+ res->aux.possible_usages |= 1 << ISL_AUX_USAGE_MCS;
+ } else if (has_depth) {
+ if (likely(!(INTEL_DEBUG & DEBUG_NO_HIZ)))
res->aux.possible_usages |= 1 << ISL_AUX_USAGE_HIZ;
- } else if (supports_ccs(devinfo, &res->surf)) {
- if (isl_format_supports_ccs_e(devinfo, res->surf.format))
- res->aux.possible_usages |= 1 << ISL_AUX_USAGE_CCS_E;
+ } else if (likely(!(INTEL_DEBUG & DEBUG_NO_RBC)) &&
+ supports_ccs(devinfo, &res->surf)) {
+ if (isl_format_supports_ccs_e(devinfo, res->surf.format))
+ res->aux.possible_usages |= 1 << ISL_AUX_USAGE_CCS_E;
- if (isl_format_supports_ccs_d(devinfo, res->surf.format))
- res->aux.possible_usages |= 1 << ISL_AUX_USAGE_CCS_D;
- }
+ if (isl_format_supports_ccs_d(devinfo, res->surf.format))
+ res->aux.possible_usages |= 1 << ISL_AUX_USAGE_CCS_D;
}
- // XXX: we don't actually do aux yet
- res->aux.possible_usages = 1 << ISL_AUX_USAGE_NONE;
-
res->aux.usage = util_last_bit(res->aux.possible_usages) - 1;
+ res->aux.sampler_usages = res->aux.possible_usages;
+
+ /* We don't always support sampling with hiz. But when we do, it must be
+ * single sampled.
+ */
+ if (!devinfo->has_sample_with_hiz || res->surf.samples > 1) {
+ res->aux.sampler_usages &= ~(1 << ISL_AUX_USAGE_HIZ);
+ }
+
const char *name = "miptree";
enum iris_memory_zone memzone = IRIS_MEMZONE_OTHER;
+ unsigned int flags = 0;
+ if (templ->usage == PIPE_USAGE_STAGING)
+ flags |= BO_ALLOC_COHERENT;
+
/* These are for u_upload_mgr buffers only */
assert(!(templ->flags & (IRIS_RESOURCE_FLAG_SHADER_MEMZONE |
IRIS_RESOURCE_FLAG_SURFACE_MEMZONE |
res->bo = iris_bo_alloc_tiled(screen->bufmgr, name, res->surf.size_B,
memzone,
isl_tiling_to_i915_tiling(res->surf.tiling),
- res->surf.row_pitch_B, 0);
+ res->surf.row_pitch_B, flags);
if (!res->bo)
goto fail;
if (!iris_resource_alloc_aux(screen, res))
- goto fail;
+ iris_resource_disable_aux(res);
return &res->base;
return NULL;
}
+ util_range_add(&res->valid_buffer_range, 0, templ->width0);
+
return &res->base;
}
if (!res)
return NULL;
- if (whandle->offset != 0) {
- dbg_printf("Attempt to import unsupported winsys offset %u\n",
- whandle->offset);
- goto fail;
- }
-
switch (whandle->type) {
case WINSYS_HANDLE_TYPE_FD:
res->bo = iris_bo_import_dmabuf(bufmgr, whandle->handle);
if (!res->bo)
return NULL;
+ res->offset = whandle->offset;
+
uint64_t modifier = whandle->modifier;
if (modifier == DRM_FORMAT_MOD_INVALID) {
modifier = tiling_to_modifier(res->bo->tiling_mode);
return NULL;
}
+static void
+iris_flush_resource(struct pipe_context *ctx, struct pipe_resource *resource)
+{
+ struct iris_context *ice = (struct iris_context *)ctx;
+ struct iris_batch *render_batch = &ice->batches[IRIS_BATCH_RENDER];
+ struct iris_resource *res = (void *) resource;
+ const struct isl_drm_modifier_info *mod = res->mod_info;
+
+ iris_resource_prepare_access(ice, render_batch, res,
+ 0, INTEL_REMAINING_LEVELS,
+ 0, INTEL_REMAINING_LAYERS,
+ mod ? mod->aux_usage : ISL_AUX_USAGE_NONE,
+ mod ? mod->supports_clear_color : false);
+}
+
static boolean
iris_resource_get_handle(struct pipe_screen *pscreen,
struct pipe_context *ctx,
struct winsys_handle *whandle,
unsigned usage)
{
- struct iris_context *ice = (struct iris_context *)ctx;
struct iris_resource *res = (struct iris_resource *)resource;
+ /* Disable aux usage if explicit flush not set and this is the
+ * first time we are dealing with this resource.
+ */
+ if ((!(usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH) && res->aux.usage != 0)) {
+ if (p_atomic_read(&resource->reference.count) == 1)
+ iris_resource_disable_aux(res);
+ }
+
/* If this is a buffer, stride should be 0 - no need to special case */
whandle->stride = res->surf.row_pitch_B;
whandle->modifier =
res->mod_info ? res->mod_info->modifier
: tiling_to_modifier(res->bo->tiling_mode);
- if (!res->mod_info || res->mod_info->aux_usage != res->aux.usage) {
- struct iris_batch *render_batch = &ice->batches[IRIS_BATCH_RENDER];
- iris_resource_prepare_access(ice, render_batch, res,
- 0, INTEL_REMAINING_LEVELS,
- 0, INTEL_REMAINING_LAYERS,
- ISL_AUX_USAGE_NONE, false);
- iris_resource_disable_aux(res);
+#ifndef NDEBUG
+ enum isl_aux_usage allowed_usage =
+ res->mod_info ? res->mod_info->aux_usage : ISL_AUX_USAGE_NONE;
+
+ if (res->aux.usage != allowed_usage) {
+ enum isl_aux_state aux_state = iris_resource_get_aux_state(res, 0, 0);
+ assert(aux_state == ISL_AUX_STATE_RESOLVED ||
+ aux_state == ISL_AUX_STATE_PASS_THROUGH);
}
+#endif
switch (whandle->type) {
case WINSYS_HANDLE_TYPE_SHARED:
return false;
}
+static bool
+resource_is_busy(struct iris_context *ice,
+ struct iris_resource *res)
+{
+ bool busy = iris_bo_busy(res->bo);
+
+ for (int i = 0; i < IRIS_BATCH_COUNT; i++)
+ busy |= iris_batch_references(&ice->batches[i], res->bo);
+
+ return busy;
+}
+
+static void
+iris_invalidate_resource(struct pipe_context *ctx,
+ struct pipe_resource *resource)
+{
+ struct iris_screen *screen = (void *) ctx->screen;
+ struct iris_context *ice = (void *) ctx;
+ struct iris_resource *res = (void *) resource;
+
+ if (resource->target != PIPE_BUFFER)
+ return;
+
+ if (!resource_is_busy(ice, res)) {
+ /* The resource is idle, so just mark that it contains no data and
+ * keep using the same underlying buffer object.
+ */
+ util_range_set_empty(&res->valid_buffer_range);
+ return;
+ }
+
+ /* Otherwise, try and replace the backing storage with a new BO. */
+
+ /* We can't reallocate memory we didn't allocate in the first place. */
+ if (res->bo->userptr)
+ return;
+
+ // XXX: We should support this.
+ if (res->bind_history & PIPE_BIND_STREAM_OUTPUT)
+ return;
+
+ struct iris_bo *old_bo = res->bo;
+ struct iris_bo *new_bo =
+ iris_bo_alloc(screen->bufmgr, res->bo->name, resource->width0,
+ iris_memzone_for_address(old_bo->gtt_offset));
+ if (!new_bo)
+ return;
+
+ /* Swap out the backing storage */
+ res->bo = new_bo;
+
+ /* Rebind the buffer, replacing any state referring to the old BO's
+ * address, and marking state dirty so it's reemitted.
+ */
+ ice->vtbl.rebind_buffer(ice, res, old_bo->gtt_offset);
+
+ util_range_set_empty(&res->valid_buffer_range);
+
+ iris_bo_unreference(old_bo);
+}
+
+static void
+iris_flush_staging_region(struct pipe_transfer *xfer,
+ const struct pipe_box *flush_box)
+{
+ if (!(xfer->usage & PIPE_TRANSFER_WRITE))
+ return;
+
+ struct iris_transfer *map = (void *) xfer;
+
+ struct pipe_box src_box = *flush_box;
+
+ /* Account for extra alignment padding in staging buffer */
+ if (xfer->resource->target == PIPE_BUFFER)
+ src_box.x += xfer->box.x % IRIS_MAP_BUFFER_ALIGNMENT;
+
+ struct pipe_box dst_box = (struct pipe_box) {
+ .x = xfer->box.x + flush_box->x,
+ .y = xfer->box.y + flush_box->y,
+ .z = xfer->box.z + flush_box->z,
+ .width = flush_box->width,
+ .height = flush_box->height,
+ .depth = flush_box->depth,
+ };
+
+ iris_copy_region(map->blorp, map->batch, xfer->resource, xfer->level,
+ dst_box.x, dst_box.y, dst_box.z, map->staging, 0,
+ &src_box);
+}
+
+static void
+iris_unmap_copy_region(struct iris_transfer *map)
+{
+ iris_resource_destroy(map->staging->screen, map->staging);
+
+ map->ptr = NULL;
+}
+
+static void
+iris_map_copy_region(struct iris_transfer *map)
+{
+ struct pipe_screen *pscreen = &map->batch->screen->base;
+ struct pipe_transfer *xfer = &map->base;
+ struct pipe_box *box = &xfer->box;
+ struct iris_resource *res = (void *) xfer->resource;
+
+ unsigned extra = xfer->resource->target == PIPE_BUFFER ?
+ box->x % IRIS_MAP_BUFFER_ALIGNMENT : 0;
+
+ struct pipe_resource templ = (struct pipe_resource) {
+ .usage = PIPE_USAGE_STAGING,
+ .width0 = box->width + extra,
+ .height0 = box->height,
+ .depth0 = 1,
+ .nr_samples = xfer->resource->nr_samples,
+ .nr_storage_samples = xfer->resource->nr_storage_samples,
+ .array_size = box->depth,
+ };
+
+ if (xfer->resource->target == PIPE_BUFFER)
+ templ.target = PIPE_BUFFER;
+ else if (templ.array_size > 1)
+ templ.target = PIPE_TEXTURE_2D_ARRAY;
+ else
+ templ.target = PIPE_TEXTURE_2D;
+
+ /* Depth, stencil, and ASTC can't be linear surfaces, so we can't use
+ * xfer->resource->format directly. Pick a bpb compatible format so
+ * resource creation will succeed; blorp_copy will override it anyway.
+ */
+ switch (util_format_get_blocksizebits(res->internal_format)) {
+ case 8: templ.format = PIPE_FORMAT_R8_UINT; break;
+ case 16: templ.format = PIPE_FORMAT_R8G8_UINT; break;
+ case 24: templ.format = PIPE_FORMAT_R8G8B8_UINT; break;
+ case 32: templ.format = PIPE_FORMAT_R8G8B8A8_UINT; break;
+ case 48: templ.format = PIPE_FORMAT_R16G16B16_UINT; break;
+ case 64: templ.format = PIPE_FORMAT_R16G16B16A16_UINT; break;
+ case 96: templ.format = PIPE_FORMAT_R32G32B32_UINT; break;
+ case 128: templ.format = PIPE_FORMAT_R32G32B32A32_UINT; break;
+ default: unreachable("Invalid bpb");
+ }
+
+ map->staging = iris_resource_create(pscreen, &templ);
+ assert(map->staging);
+
+ if (templ.target != PIPE_BUFFER) {
+ struct isl_surf *surf = &((struct iris_resource *) map->staging)->surf;
+ xfer->stride = isl_surf_get_row_pitch_B(surf);
+ xfer->layer_stride = isl_surf_get_array_pitch(surf);
+ }
+
+ if (!(xfer->usage & PIPE_TRANSFER_DISCARD_RANGE)) {
+ iris_copy_region(map->blorp, map->batch, map->staging, 0, extra, 0, 0,
+ xfer->resource, xfer->level, box);
+ /* Ensure writes to the staging BO land before we map it below. */
+ iris_emit_pipe_control_flush(map->batch,
+ "transfer read: flush before mapping",
+ PIPE_CONTROL_RENDER_TARGET_FLUSH |
+ PIPE_CONTROL_CS_STALL);
+ }
+
+ struct iris_bo *staging_bo = iris_resource_bo(map->staging);
+
+ if (iris_batch_references(map->batch, staging_bo))
+ iris_batch_flush(map->batch);
+
+ map->ptr =
+ iris_bo_map(map->dbg, staging_bo, xfer->usage & MAP_FLAGS) + extra;
+
+ map->unmap = iris_unmap_copy_region;
+}
+
static void
-get_image_offset_el(struct isl_surf *surf, unsigned level, unsigned z,
+get_image_offset_el(const struct isl_surf *surf, unsigned level, unsigned z,
unsigned *out_x0_el, unsigned *out_y0_el)
{
if (surf->dim == ISL_SURF_DIM_3D) {
iris_unmap_s8(struct iris_transfer *map)
{
struct pipe_transfer *xfer = &map->base;
+ const struct pipe_box *box = &xfer->box;
struct iris_resource *res = (struct iris_resource *) xfer->resource;
struct isl_surf *surf = &res->surf;
const bool has_swizzling = false;
if (xfer->usage & PIPE_TRANSFER_WRITE) {
uint8_t *untiled_s8_map = map->ptr;
uint8_t *tiled_s8_map =
- iris_bo_map(map->dbg, res->bo, xfer->usage | MAP_RAW);
+ iris_bo_map(map->dbg, res->bo, (xfer->usage | MAP_RAW) & MAP_FLAGS);
- struct pipe_box box = xfer->box;
-
- for (int s = 0; s < box.depth; s++) {
+ for (int s = 0; s < box->depth; s++) {
unsigned x0_el, y0_el;
- get_image_offset_el(surf, xfer->level, box.z, &x0_el, &y0_el);
+ get_image_offset_el(surf, xfer->level, box->z + s, &x0_el, &y0_el);
- for (uint32_t y = 0; y < box.height; y++) {
- for (uint32_t x = 0; x < box.width; x++) {
+ for (uint32_t y = 0; y < box->height; y++) {
+ for (uint32_t x = 0; x < box->width; x++) {
ptrdiff_t offset = s8_offset(surf->row_pitch_B,
- x0_el + box.x + x,
- y0_el + box.y + y,
+ x0_el + box->x + x,
+ y0_el + box->y + y,
has_swizzling);
tiled_s8_map[offset] =
untiled_s8_map[s * xfer->layer_stride + y * xfer->stride + x];
}
}
-
- box.z++;
}
}
iris_map_s8(struct iris_transfer *map)
{
struct pipe_transfer *xfer = &map->base;
+ const struct pipe_box *box = &xfer->box;
struct iris_resource *res = (struct iris_resource *) xfer->resource;
struct isl_surf *surf = &res->surf;
xfer->stride = surf->row_pitch_B;
- xfer->layer_stride = xfer->stride * xfer->box.height;
+ xfer->layer_stride = xfer->stride * box->height;
/* The tiling and detiling functions require that the linear buffer has
* a 16-byte alignment (that is, its `x0` is 16-byte aligned). Here we
* over-allocate the linear buffer to get the proper alignment.
*/
- map->buffer = map->ptr = malloc(xfer->layer_stride * xfer->box.depth);
+ map->buffer = map->ptr = malloc(xfer->layer_stride * box->depth);
assert(map->buffer);
const bool has_swizzling = false;
if (!(xfer->usage & PIPE_TRANSFER_DISCARD_RANGE)) {
uint8_t *untiled_s8_map = map->ptr;
uint8_t *tiled_s8_map =
- iris_bo_map(map->dbg, res->bo, xfer->usage | MAP_RAW);
+ iris_bo_map(map->dbg, res->bo, (xfer->usage | MAP_RAW) & MAP_FLAGS);
- struct pipe_box box = xfer->box;
-
- for (int s = 0; s < box.depth; s++) {
+ for (int s = 0; s < box->depth; s++) {
unsigned x0_el, y0_el;
- get_image_offset_el(surf, xfer->level, box.z, &x0_el, &y0_el);
+ get_image_offset_el(surf, xfer->level, box->z + s, &x0_el, &y0_el);
- for (uint32_t y = 0; y < box.height; y++) {
- for (uint32_t x = 0; x < box.width; x++) {
+ for (uint32_t y = 0; y < box->height; y++) {
+ for (uint32_t x = 0; x < box->width; x++) {
ptrdiff_t offset = s8_offset(surf->row_pitch_B,
- x0_el + box.x + x,
- y0_el + box.y + y,
+ x0_el + box->x + x,
+ y0_el + box->y + y,
has_swizzling);
untiled_s8_map[s * xfer->layer_stride + y * xfer->stride + x] =
tiled_s8_map[offset];
}
}
-
- box.z++;
}
}
* xs are in units of bytes and ys are in units of strides.
*/
static inline void
-tile_extents(struct isl_surf *surf,
+tile_extents(const struct isl_surf *surf,
const struct pipe_box *box,
- unsigned level,
+ unsigned level, int z,
unsigned *x1_B, unsigned *x2_B,
unsigned *y1_el, unsigned *y2_el)
{
assert(box->y % fmtl->bh == 0);
unsigned x0_el, y0_el;
- get_image_offset_el(surf, level, box->z, &x0_el, &y0_el);
+ get_image_offset_el(surf, level, box->z + z, &x0_el, &y0_el);
*x1_B = (box->x / fmtl->bw + x0_el) * cpp;
*y1_el = box->y / fmtl->bh + y0_el;
iris_unmap_tiled_memcpy(struct iris_transfer *map)
{
struct pipe_transfer *xfer = &map->base;
- struct pipe_box box = xfer->box;
+ const struct pipe_box *box = &xfer->box;
struct iris_resource *res = (struct iris_resource *) xfer->resource;
struct isl_surf *surf = &res->surf;
const bool has_swizzling = false;
if (xfer->usage & PIPE_TRANSFER_WRITE) {
- char *dst = iris_bo_map(map->dbg, res->bo, xfer->usage | MAP_RAW);
+ char *dst =
+ iris_bo_map(map->dbg, res->bo, (xfer->usage | MAP_RAW) & MAP_FLAGS);
- for (int s = 0; s < box.depth; s++) {
+ for (int s = 0; s < box->depth; s++) {
unsigned x1, x2, y1, y2;
- tile_extents(surf, &box, xfer->level, &x1, &x2, &y1, &y2);
+ tile_extents(surf, box, xfer->level, s, &x1, &x2, &y1, &y2);
void *ptr = map->ptr + s * xfer->layer_stride;
isl_memcpy_linear_to_tiled(x1, x2, y1, y2, dst, ptr,
surf->row_pitch_B, xfer->stride,
has_swizzling, surf->tiling, ISL_MEMCPY);
- box.z++;
}
}
os_free_aligned(map->buffer);
iris_map_tiled_memcpy(struct iris_transfer *map)
{
struct pipe_transfer *xfer = &map->base;
+ const struct pipe_box *box = &xfer->box;
struct iris_resource *res = (struct iris_resource *) xfer->resource;
struct isl_surf *surf = &res->surf;
xfer->stride = ALIGN(surf->row_pitch_B, 16);
- xfer->layer_stride = xfer->stride * xfer->box.height;
+ xfer->layer_stride = xfer->stride * box->height;
unsigned x1, x2, y1, y2;
- tile_extents(surf, &xfer->box, xfer->level, &x1, &x2, &y1, &y2);
+ tile_extents(surf, box, xfer->level, 0, &x1, &x2, &y1, &y2);
/* The tiling and detiling functions require that the linear buffer has
* a 16-byte alignment (that is, its `x0` is 16-byte aligned). Here we
* over-allocate the linear buffer to get the proper alignment.
*/
map->buffer =
- os_malloc_aligned(xfer->layer_stride * xfer->box.depth, 16);
+ os_malloc_aligned(xfer->layer_stride * box->depth, 16);
assert(map->buffer);
map->ptr = (char *)map->buffer + (x1 & 0xf);
// XXX: PIPE_TRANSFER_READ?
if (!(xfer->usage & PIPE_TRANSFER_DISCARD_RANGE)) {
- char *src = iris_bo_map(map->dbg, res->bo, xfer->usage | MAP_RAW);
+ char *src =
+ iris_bo_map(map->dbg, res->bo, (xfer->usage | MAP_RAW) & MAP_FLAGS);
- struct pipe_box box = xfer->box;
-
- for (int s = 0; s < box.depth; s++) {
+ for (int s = 0; s < box->depth; s++) {
unsigned x1, x2, y1, y2;
- tile_extents(surf, &box, xfer->level, &x1, &x2, &y1, &y2);
+ tile_extents(surf, box, xfer->level, s, &x1, &x2, &y1, &y2);
- /* Use 's' rather than 'box.z' to rebase the first slice to 0. */
+ /* Use 's' rather than 'box->z' to rebase the first slice to 0. */
void *ptr = map->ptr + s * xfer->layer_stride;
isl_memcpy_tiled_to_linear(x1, x2, y1, y2, ptr, src, xfer->stride,
surf->row_pitch_B, has_swizzling,
- surf->tiling, ISL_MEMCPY);
- box.z++;
+ surf->tiling, ISL_MEMCPY_STREAMING_LOAD);
}
}
struct pipe_box *box = &xfer->box;
struct iris_resource *res = (struct iris_resource *) xfer->resource;
- void *ptr = iris_bo_map(map->dbg, res->bo, xfer->usage);
+ void *ptr = iris_bo_map(map->dbg, res->bo, xfer->usage & MAP_FLAGS);
if (res->base.target == PIPE_BUFFER) {
xfer->stride = 0;
}
}
+static bool
+can_promote_to_async(const struct iris_resource *res,
+ const struct pipe_box *box,
+ enum pipe_transfer_usage usage)
+{
+ /* If we're writing to a section of the buffer that hasn't even been
+ * initialized with useful data, then we can safely promote this write
+ * to be unsynchronized. This helps the common pattern of appending data.
+ */
+ return res->base.target == PIPE_BUFFER && (usage & PIPE_TRANSFER_WRITE) &&
+ !(usage & TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED) &&
+ !util_ranges_intersect(&res->valid_buffer_range, box->x,
+ box->x + box->width);
+}
+
static void *
iris_transfer_map(struct pipe_context *ctx,
struct pipe_resource *resource,
struct iris_resource *res = (struct iris_resource *)resource;
struct isl_surf *surf = &res->surf;
- /* If we can discard the whole resource, we can also discard the
- * subrange being accessed.
- */
- if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
- usage |= PIPE_TRANSFER_DISCARD_RANGE;
+ if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
+ /* Replace the backing storage with a fresh buffer for non-async maps */
+ if (!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
+ TC_TRANSFER_MAP_NO_INVALIDATE)))
+ iris_invalidate_resource(ctx, resource);
- if (surf->tiling != ISL_TILING_LINEAR &&
- (usage & PIPE_TRANSFER_MAP_DIRECTLY))
- return NULL;
+ /* If we can discard the whole resource, we can discard the range. */
+ usage |= PIPE_TRANSFER_DISCARD_RANGE;
+ }
+
+ bool map_would_stall = false;
if (resource->target != PIPE_BUFFER) {
iris_resource_access_raw(ice, &ice->batches[IRIS_BATCH_RENDER], res,
usage & PIPE_TRANSFER_WRITE);
}
+ if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
+ can_promote_to_async(res, box, usage)) {
+ usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
+ }
+
if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
- for (int i = 0; i < IRIS_BATCH_COUNT; i++) {
- if (iris_batch_references(&ice->batches[i], res->bo))
- iris_batch_flush(&ice->batches[i]);
- }
+ map_would_stall = resource_is_busy(ice, res);
+
+ if (map_would_stall && (usage & PIPE_TRANSFER_DONTBLOCK) &&
+ (usage & PIPE_TRANSFER_MAP_DIRECTLY))
+ return NULL;
}
- if ((usage & PIPE_TRANSFER_DONTBLOCK) && iris_bo_busy(res->bo))
+ if (surf->tiling != ISL_TILING_LINEAR &&
+ (usage & PIPE_TRANSFER_MAP_DIRECTLY))
return NULL;
struct iris_transfer *map = slab_alloc(&ice->transfer_pool);
xfer->box = *box;
*ptransfer = xfer;
- xfer->usage &= (PIPE_TRANSFER_READ |
- PIPE_TRANSFER_WRITE |
- PIPE_TRANSFER_UNSYNCHRONIZED |
- PIPE_TRANSFER_PERSISTENT |
- PIPE_TRANSFER_COHERENT |
- PIPE_TRANSFER_DISCARD_RANGE);
-
- if (surf->tiling == ISL_TILING_W) {
- // XXX: just teach iris_map_tiled_memcpy about W tiling...
- iris_map_s8(map);
- } else if (surf->tiling != ISL_TILING_LINEAR) {
- iris_map_tiled_memcpy(map);
+ if (usage & PIPE_TRANSFER_WRITE)
+ util_range_add(&res->valid_buffer_range, box->x, box->x + box->width);
+
+ /* Avoid using GPU copies for persistent/coherent buffers, as the idea
+ * there is to access them simultaneously on the CPU & GPU. This also
+ * avoids trying to use GPU copies for our u_upload_mgr buffers which
+ * contain state we're constructing for a GPU draw call, which would
+ * kill us with infinite stack recursion.
+ */
+ bool no_gpu = usage & (PIPE_TRANSFER_PERSISTENT |
+ PIPE_TRANSFER_COHERENT |
+ PIPE_TRANSFER_MAP_DIRECTLY);
+
+ /* GPU copies are not useful for buffer reads. Instead of stalling to
+ * read from the original buffer, we'd simply copy it to a temporary...
+ * then stall (a bit longer) to read from that buffer.
+ *
+ * Images are less clear-cut. Color resolves are destructive, removing
+ * the underlying compression, so we'd rather blit the data to a linear
+ * temporary and map that, to avoid the resolve. (It might be better to
+ * a tiled temporary and use the tiled_memcpy paths...)
+ */
+ if (!(usage & PIPE_TRANSFER_DISCARD_RANGE) &&
+ res->aux.usage != ISL_AUX_USAGE_CCS_E &&
+ res->aux.usage != ISL_AUX_USAGE_CCS_D) {
+ no_gpu = true;
+ }
+
+ if ((map_would_stall || res->aux.usage == ISL_AUX_USAGE_CCS_E) && !no_gpu) {
+ /* If we need a synchronous mapping and the resource is busy,
+ * we copy to/from a linear temporary buffer using the GPU.
+ */
+ map->batch = &ice->batches[IRIS_BATCH_RENDER];
+ map->blorp = &ice->blorp;
+ iris_map_copy_region(map);
} else {
- iris_map_direct(map);
+ /* Otherwise we're free to map on the CPU. Flush if needed. */
+ if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
+ for (int i = 0; i < IRIS_BATCH_COUNT; i++) {
+ if (iris_batch_references(&ice->batches[i], res->bo))
+ iris_batch_flush(&ice->batches[i]);
+ }
+ }
+
+ if (surf->tiling == ISL_TILING_W) {
+ /* TODO: Teach iris_map_tiled_memcpy about W-tiling... */
+ iris_map_s8(map);
+ } else if (surf->tiling != ISL_TILING_LINEAR) {
+ iris_map_tiled_memcpy(map);
+ } else {
+ iris_map_direct(map);
+ }
}
return map->ptr;
{
struct iris_context *ice = (struct iris_context *)ctx;
struct iris_resource *res = (struct iris_resource *) xfer->resource;
+ struct iris_transfer *map = (void *) xfer;
+ if (map->staging)
+ iris_flush_staging_region(xfer, box);
- // XXX: don't emit flushes in both engines...? we may also need to flush
- // even if there isn't a draw yet - may still be stale data in caches...
for (int i = 0; i < IRIS_BATCH_COUNT; i++) {
- if (ice->batches[i].contains_draw) {
+ if (ice->batches[i].contains_draw ||
+ ice->batches[i].cache.render->entries) {
iris_batch_maybe_flush(&ice->batches[i], 24);
- iris_flush_and_dirty_for_history(ice, &ice->batches[i], res);
+ iris_flush_and_dirty_for_history(ice, &ice->batches[i], res,
+ "cache history: transfer flush");
}
}
+
+ /* Make sure we flag constants dirty even if there's no need to emit
+ * any PIPE_CONTROLs to a batch.
+ */
+ iris_dirty_for_history(ice, res);
}
static void
{
struct iris_context *ice = (struct iris_context *)ctx;
struct iris_transfer *map = (void *) xfer;
- struct iris_resource *res = (struct iris_resource *) xfer->resource;
+
+ if (!(xfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) {
+ struct pipe_box flush_box = {
+ .x = 0, .y = 0, .z = 0,
+ .width = xfer->box.width,
+ .height = xfer->box.height,
+ .depth = xfer->box.depth,
+ };
+ iris_transfer_flush_region(ctx, xfer, &flush_box);
+ }
if (map->unmap)
map->unmap(map);
- // XXX: don't emit flushes in both engines...?
- for (int i = 0; i < IRIS_BATCH_COUNT; i++) {
- if (ice->batches[i].contains_draw) {
- iris_batch_maybe_flush(&ice->batches[i], 24);
- iris_flush_and_dirty_for_history(ice, &ice->batches[i], res);
- }
- }
-
pipe_resource_reference(&xfer->resource, NULL);
slab_free(&ice->transfer_pool, map);
}
-static void
-iris_flush_resource(struct pipe_context *ctx, struct pipe_resource *resource)
-{
-}
-
+/**
+ * Mark state dirty that needs to be re-emitted when a resource is written.
+ */
void
-iris_flush_and_dirty_for_history(struct iris_context *ice,
- struct iris_batch *batch,
- struct iris_resource *res)
+iris_dirty_for_history(struct iris_context *ice,
+ struct iris_resource *res)
{
- if (res->base.target != PIPE_BUFFER)
- return;
-
- unsigned flush = PIPE_CONTROL_CS_STALL;
-
- /* We've likely used the rendering engine (i.e. BLORP) to write to this
- * surface. Flush the render cache so the data actually lands.
- */
- if (batch->name != IRIS_BATCH_COMPUTE)
- flush |= PIPE_CONTROL_RENDER_TARGET_FLUSH;
-
uint64_t dirty = 0ull;
if (res->bind_history & PIPE_BIND_CONSTANT_BUFFER) {
- flush |= PIPE_CONTROL_CONST_CACHE_INVALIDATE |
- PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
dirty |= IRIS_DIRTY_CONSTANTS_VS |
IRIS_DIRTY_CONSTANTS_TCS |
IRIS_DIRTY_CONSTANTS_TES |
IRIS_ALL_DIRTY_BINDINGS;
}
+ ice->state.dirty |= dirty;
+}
+
+/**
+ * Produce a set of PIPE_CONTROL bits which ensure data written to a
+ * resource becomes visible, and any stale read cache data is invalidated.
+ */
+uint32_t
+iris_flush_bits_for_history(struct iris_resource *res)
+{
+ uint32_t flush = PIPE_CONTROL_CS_STALL;
+
+ if (res->bind_history & PIPE_BIND_CONSTANT_BUFFER) {
+ flush |= PIPE_CONTROL_CONST_CACHE_INVALIDATE |
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ }
+
if (res->bind_history & PIPE_BIND_SAMPLER_VIEW)
flush |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
if (res->bind_history & (PIPE_BIND_SHADER_BUFFER | PIPE_BIND_SHADER_IMAGE))
flush |= PIPE_CONTROL_DATA_CACHE_FLUSH;
- iris_emit_pipe_control_flush(batch, flush);
+ return flush;
+}
- ice->state.dirty |= dirty;
+void
+iris_flush_and_dirty_for_history(struct iris_context *ice,
+ struct iris_batch *batch,
+ struct iris_resource *res,
+ const char *reason)
+{
+ if (res->base.target != PIPE_BUFFER)
+ return;
+
+ uint32_t flush = iris_flush_bits_for_history(res);
+
+ /* We've likely used the rendering engine (i.e. BLORP) to write to this
+ * surface. Flush the render cache so the data actually lands.
+ */
+ if (batch->name != IRIS_BATCH_COMPUTE)
+ flush |= PIPE_CONTROL_RENDER_TARGET_FLUSH;
+
+ iris_emit_pipe_control_flush(batch, reason, flush);
+}
+
+bool
+iris_resource_set_clear_color(struct iris_context *ice,
+ struct iris_resource *res,
+ union isl_color_value color)
+{
+ if (memcmp(&res->aux.clear_color, &color, sizeof(color)) != 0) {
+ res->aux.clear_color = color;
+ return true;
+ }
+
+ return false;
+}
+
+union isl_color_value
+iris_resource_get_clear_color(const struct iris_resource *res,
+ struct iris_bo **clear_color_bo,
+ uint64_t *clear_color_offset)
+{
+ assert(res->aux.bo);
+
+ if (clear_color_bo)
+ *clear_color_bo = res->aux.clear_color_bo;
+ if (clear_color_offset)
+ *clear_color_offset = res->aux.clear_color_offset;
+ return res->aux.clear_color;
}
static enum pipe_format
void
iris_init_screen_resource_functions(struct pipe_screen *pscreen)
{
+ pscreen->query_dmabuf_modifiers = iris_query_dmabuf_modifiers;
pscreen->resource_create_with_modifiers =
iris_resource_create_with_modifiers;
pscreen->resource_create = u_transfer_helper_resource_create;
iris_init_resource_functions(struct pipe_context *ctx)
{
ctx->flush_resource = iris_flush_resource;
+ ctx->invalidate_resource = iris_invalidate_resource;
ctx->transfer_map = u_transfer_helper_transfer_map;
ctx->transfer_flush_region = u_transfer_helper_transfer_flush_region;
ctx->transfer_unmap = u_transfer_helper_transfer_unmap;