#include <errno.h>
#include <inttypes.h>
+static void r600_texture_discard_cmask(struct r600_common_screen *rscreen,
+ struct r600_texture *rtex);
+static unsigned r600_choose_tiling(struct r600_common_screen *rscreen,
+ const struct pipe_resource *templ);
+
+
+bool r600_prepare_for_dma_blit(struct r600_common_context *rctx,
+ struct r600_texture *rdst,
+ unsigned dst_level, unsigned dstx,
+ unsigned dsty, unsigned dstz,
+ struct r600_texture *rsrc,
+ unsigned src_level,
+ const struct pipe_box *src_box)
+{
+ if (!rctx->dma.cs)
+ return false;
+
+ if (util_format_get_blocksizebits(rdst->resource.b.b.format) !=
+ util_format_get_blocksizebits(rsrc->resource.b.b.format))
+ return false;
+
+ /* MSAA: Blits don't exist in the real world. */
+ if (rsrc->resource.b.b.nr_samples > 1 ||
+ rdst->resource.b.b.nr_samples > 1)
+ return false;
+
+ /* Depth-stencil surfaces:
+ * When dst is linear, the DB->CB copy preserves HTILE.
+ * When dst is tiled, the 3D path must be used to update HTILE.
+ */
+ if (rsrc->is_depth || rdst->is_depth)
+ return false;
+
+ /* DCC as:
+ * src: Use the 3D path. DCC decompression is expensive.
+ * dst: Use the 3D path to compress the pixels with DCC.
+ */
+ if ((rsrc->dcc_offset && rsrc->surface.level[src_level].dcc_enabled) ||
+ (rdst->dcc_offset && rdst->surface.level[dst_level].dcc_enabled))
+ return false;
+
+ /* CMASK as:
+ * src: Both texture and SDMA paths need decompression. Use SDMA.
+ * dst: If overwriting the whole texture, discard CMASK and use
+ * SDMA. Otherwise, use the 3D path.
+ */
+ if (rdst->cmask.size && rdst->dirty_level_mask & (1 << dst_level)) {
+ /* The CMASK clear is only enabled for the first level. */
+ assert(dst_level == 0);
+ if (!util_texrange_covers_whole_level(&rdst->resource.b.b, dst_level,
+ dstx, dsty, dstz, src_box->width,
+ src_box->height, src_box->depth))
+ return false;
+
+ r600_texture_discard_cmask(rctx->screen, rdst);
+ }
+
+ /* All requirements are met. Prepare textures for SDMA. */
+ if (rsrc->cmask.size && rsrc->dirty_level_mask & (1 << src_level))
+ rctx->b.flush_resource(&rctx->b, &rsrc->resource.b.b);
+
+ assert(!(rsrc->dirty_level_mask & (1 << src_level)));
+ assert(!(rdst->dirty_level_mask & (1 << dst_level)));
+
+ return true;
+}
+
/* Same as resource_copy_region, except that both upsampling and downsampling are allowed. */
static void r600_copy_region_with_blit(struct pipe_context *pipe,
struct pipe_resource *dst,
surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_1D_ARRAY, TYPE);
surface->array_size = ptex->array_size;
break;
- case PIPE_TEXTURE_2D_ARRAY:
case PIPE_TEXTURE_CUBE_ARRAY: /* cube array layout like 2d array */
+ assert(ptex->array_size % 6 == 0);
+ case PIPE_TEXTURE_2D_ARRAY:
surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_2D_ARRAY, TYPE);
surface->array_size = ptex->array_size;
break;
default:
return -EINVAL;
}
- if (ptex->bind & PIPE_BIND_SCANOUT) {
- surface->flags |= RADEON_SURF_SCANOUT;
- }
if (!is_flushed_depth && is_depth) {
surface->flags |= RADEON_SURF_ZBUFFER;
if (rscreen->chip_class >= SI) {
surface->flags |= RADEON_SURF_HAS_TILE_MODE_INDEX;
}
+
+ if (rscreen->chip_class >= VI &&
+ (ptex->flags & R600_RESOURCE_FLAG_DISABLE_DCC ||
+ ptex->format == PIPE_FORMAT_R9G9B9E5_FLOAT))
+ surface->flags |= RADEON_SURF_DISABLE_DCC;
+
+ if (ptex->bind & PIPE_BIND_SCANOUT) {
+ /* This should catch bugs in gallium users setting incorrect flags. */
+ assert(surface->nsamples == 1 &&
+ surface->array_size == 1 &&
+ surface->npix_z == 1 &&
+ surface->last_level == 0 &&
+ !(surface->flags & RADEON_SURF_Z_OR_SBUFFER));
+
+ surface->flags |= RADEON_SURF_SCANOUT;
+ }
return 0;
}
static int r600_setup_surface(struct pipe_screen *screen,
struct r600_texture *rtex,
- unsigned pitch_in_bytes_override)
+ unsigned pitch_in_bytes_override,
+ unsigned offset)
{
struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
+ unsigned i;
int r;
r = rscreen->ws->surface_init(rscreen->ws, &rtex->surface);
rtex->surface.level[0].nblk_x = pitch_in_bytes_override / rtex->surface.bpe;
rtex->surface.level[0].pitch_bytes = pitch_in_bytes_override;
rtex->surface.level[0].slice_size = pitch_in_bytes_override * rtex->surface.level[0].nblk_y;
- if (rtex->surface.flags & RADEON_SURF_SBUFFER) {
- rtex->surface.stencil_offset =
- rtex->surface.stencil_level[0].offset = rtex->surface.level[0].slice_size;
- }
+ }
+
+ if (offset) {
+ for (i = 0; i < ARRAY_SIZE(rtex->surface.level); ++i)
+ rtex->surface.level[i].offset += offset;
}
return 0;
}
-static boolean r600_texture_get_handle(struct pipe_screen* screen,
- struct pipe_resource *ptex,
- struct winsys_handle *whandle)
+static void r600_texture_init_metadata(struct r600_texture *rtex,
+ struct radeon_bo_metadata *metadata)
{
- struct r600_texture *rtex = (struct r600_texture*)ptex;
- struct r600_resource *resource = &rtex->resource;
struct radeon_surf *surface = &rtex->surface;
+
+ memset(metadata, 0, sizeof(*metadata));
+ metadata->microtile = surface->level[0].mode >= RADEON_SURF_MODE_1D ?
+ RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR;
+ metadata->macrotile = surface->level[0].mode >= RADEON_SURF_MODE_2D ?
+ RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR;
+ metadata->pipe_config = surface->pipe_config;
+ metadata->bankw = surface->bankw;
+ metadata->bankh = surface->bankh;
+ metadata->tile_split = surface->tile_split;
+ metadata->mtilea = surface->mtilea;
+ metadata->num_banks = surface->num_banks;
+ metadata->stride = surface->level[0].pitch_bytes;
+ metadata->scanout = (surface->flags & RADEON_SURF_SCANOUT) != 0;
+}
+
+static void r600_dirty_all_framebuffer_states(struct r600_common_screen *rscreen)
+{
+ p_atomic_inc(&rscreen->dirty_fb_counter);
+}
+
+static void r600_eliminate_fast_color_clear(struct r600_common_screen *rscreen,
+ struct r600_texture *rtex)
+{
+ struct pipe_context *ctx = rscreen->aux_context;
+
+ pipe_mutex_lock(rscreen->aux_context_lock);
+ ctx->flush_resource(ctx, &rtex->resource.b.b);
+ ctx->flush(ctx, NULL, 0);
+ pipe_mutex_unlock(rscreen->aux_context_lock);
+}
+
+static void r600_texture_discard_cmask(struct r600_common_screen *rscreen,
+ struct r600_texture *rtex)
+{
+ if (!rtex->cmask.size)
+ return;
+
+ assert(rtex->resource.b.b.nr_samples <= 1);
+
+ /* Disable CMASK. */
+ memset(&rtex->cmask, 0, sizeof(rtex->cmask));
+ rtex->cmask.base_address_reg = rtex->resource.gpu_address >> 8;
+
+ if (rscreen->chip_class >= SI)
+ rtex->cb_color_info &= ~SI_S_028C70_FAST_CLEAR(1);
+ else
+ rtex->cb_color_info &= ~EG_S_028C70_FAST_CLEAR(1);
+
+ if (rtex->cmask_buffer != &rtex->resource)
+ r600_resource_reference(&rtex->cmask_buffer, NULL);
+
+ /* Notify all contexts about the change. */
+ r600_dirty_all_framebuffer_states(rscreen);
+ p_atomic_inc(&rscreen->compressed_colortex_counter);
+}
+
+static bool r600_can_disable_dcc(struct r600_texture *rtex)
+{
+ /* We can't disable DCC if it can be written by another process. */
+ return rtex->dcc_offset &&
+ (!rtex->resource.is_shared ||
+ !(rtex->resource.external_usage & PIPE_HANDLE_USAGE_WRITE));
+}
+
+static bool r600_texture_discard_dcc(struct r600_common_screen *rscreen,
+ struct r600_texture *rtex)
+{
+ if (!r600_can_disable_dcc(rtex))
+ return false;
+
+ /* Disable DCC. */
+ rtex->dcc_offset = 0;
+
+ /* Notify all contexts about the change. */
+ r600_dirty_all_framebuffer_states(rscreen);
+ return true;
+}
+
+bool r600_texture_disable_dcc(struct r600_common_screen *rscreen,
+ struct r600_texture *rtex)
+{
+ struct r600_common_context *rctx =
+ (struct r600_common_context *)rscreen->aux_context;
+
+ if (!r600_can_disable_dcc(rtex))
+ return false;
+
+ /* Decompress DCC. */
+ pipe_mutex_lock(rscreen->aux_context_lock);
+ rctx->decompress_dcc(&rctx->b, rtex);
+ rctx->b.flush(&rctx->b, NULL, 0);
+ pipe_mutex_unlock(rscreen->aux_context_lock);
+
+ return r600_texture_discard_dcc(rscreen, rtex);
+}
+
+static void r600_degrade_tile_mode_to_linear(struct r600_common_context *rctx,
+ struct r600_texture *rtex,
+ bool invalidate_storage)
+{
+ struct pipe_screen *screen = rctx->b.screen;
+ struct r600_texture *new_tex;
+ struct pipe_resource templ = rtex->resource.b.b;
+ unsigned i;
+
+ templ.bind |= PIPE_BIND_LINEAR;
+
+ /* r600g doesn't react to dirty_tex_descriptor_counter */
+ if (rctx->chip_class < SI)
+ return;
+
+ if (rtex->resource.is_shared ||
+ rtex->surface.level[0].mode == RADEON_SURF_MODE_LINEAR_ALIGNED)
+ return;
+
+ /* This fails with MSAA, depth, and compressed textures. */
+ if (r600_choose_tiling(rctx->screen, &templ) !=
+ RADEON_SURF_MODE_LINEAR_ALIGNED)
+ return;
+
+ new_tex = (struct r600_texture*)screen->resource_create(screen, &templ);
+ if (!new_tex)
+ return;
+
+ /* Copy the pixels to the new texture. */
+ if (!invalidate_storage) {
+ for (i = 0; i <= templ.last_level; i++) {
+ struct pipe_box box;
+
+ u_box_3d(0, 0, 0,
+ u_minify(templ.width0, i), u_minify(templ.height0, i),
+ util_max_layer(&templ, i) + 1, &box);
+
+ rctx->dma_copy(&rctx->b, &new_tex->resource.b.b, i, 0, 0, 0,
+ &rtex->resource.b.b, i, &box);
+ }
+ }
+
+ r600_texture_discard_cmask(rctx->screen, rtex);
+ r600_texture_discard_dcc(rctx->screen, rtex);
+
+ /* Replace the structure fields of rtex. */
+ rtex->resource.b.b.bind = templ.bind;
+ pb_reference(&rtex->resource.buf, new_tex->resource.buf);
+ rtex->resource.gpu_address = new_tex->resource.gpu_address;
+ rtex->resource.domains = new_tex->resource.domains;
+ rtex->size = new_tex->size;
+ rtex->surface = new_tex->surface;
+ rtex->non_disp_tiling = new_tex->non_disp_tiling;
+ rtex->cb_color_info = new_tex->cb_color_info;
+ rtex->cmask = new_tex->cmask; /* needed even without CMASK */
+
+ assert(!rtex->htile_buffer);
+ assert(!rtex->cmask.size);
+ assert(!rtex->fmask.size);
+ assert(!rtex->dcc_offset);
+ assert(!rtex->is_depth);
+
+ pipe_resource_reference((struct pipe_resource**)&new_tex, NULL);
+
+ r600_dirty_all_framebuffer_states(rctx->screen);
+ p_atomic_inc(&rctx->screen->dirty_tex_descriptor_counter);
+}
+
+static boolean r600_texture_get_handle(struct pipe_screen* screen,
+ struct pipe_resource *resource,
+ struct winsys_handle *whandle,
+ unsigned usage)
+{
struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
+ struct r600_resource *res = (struct r600_resource*)resource;
+ struct r600_texture *rtex = (struct r600_texture*)resource;
+ struct radeon_bo_metadata metadata;
+ bool update_metadata = false;
+
+ /* This is not supported now, but it might be required for OpenCL
+ * interop in the future.
+ */
+ if (resource->target != PIPE_BUFFER &&
+ (resource->nr_samples > 1 || rtex->is_depth))
+ return false;
+
+ if (resource->target != PIPE_BUFFER) {
+ /* Since shader image stores don't support DCC on VI,
+ * disable it for external clients that want write
+ * access.
+ */
+ if (usage & PIPE_HANDLE_USAGE_WRITE && rtex->dcc_offset) {
+ if (r600_texture_disable_dcc(rscreen, rtex))
+ update_metadata = true;
+ }
+
+ if (!(usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH) &&
+ rtex->cmask.size) {
+ /* Eliminate fast clear (both CMASK and DCC) */
+ r600_eliminate_fast_color_clear(rscreen, rtex);
+
+ /* Disable CMASK if flush_resource isn't going
+ * to be called.
+ */
+ r600_texture_discard_cmask(rscreen, rtex);
+ }
+
+ /* Set metadata. */
+ if (!res->is_shared || update_metadata) {
+ r600_texture_init_metadata(rtex, &metadata);
+ if (rscreen->query_opaque_metadata)
+ rscreen->query_opaque_metadata(rscreen, rtex,
+ &metadata);
+
+ rscreen->ws->buffer_set_metadata(res->buf, &metadata);
+ }
+ }
+
+ if (res->is_shared) {
+ /* USAGE_EXPLICIT_FLUSH must be cleared if at least one user
+ * doesn't set it.
+ */
+ res->external_usage |= usage & ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH;
+ if (!(usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))
+ res->external_usage &= ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH;
+ } else {
+ res->is_shared = true;
+ res->external_usage = usage;
+ }
- rscreen->ws->buffer_set_tiling(resource->buf,
- NULL,
- surface->level[0].mode >= RADEON_SURF_MODE_1D ?
- RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR,
- surface->level[0].mode >= RADEON_SURF_MODE_2D ?
- RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR,
- surface->pipe_config,
- surface->bankw, surface->bankh,
- surface->tile_split,
- surface->stencil_tile_split,
- surface->mtilea, surface->num_banks,
- surface->level[0].pitch_bytes,
- (surface->flags & RADEON_SURF_SCANOUT) != 0);
-
- return rscreen->ws->buffer_get_handle(resource->buf,
- surface->level[0].pitch_bytes, whandle);
+ return rscreen->ws->buffer_get_handle(res->buf,
+ rtex->surface.level[0].pitch_bytes,
+ rtex->surface.level[0].offset,
+ rtex->surface.level[0].slice_size,
+ whandle);
}
static void r600_texture_destroy(struct pipe_screen *screen,
if (rtex->flushed_depth_texture)
pipe_resource_reference((struct pipe_resource **)&rtex->flushed_depth_texture, NULL);
- pipe_resource_reference((struct pipe_resource**)&rtex->htile_buffer, NULL);
+ r600_resource_reference(&rtex->htile_buffer, NULL);
if (rtex->cmask_buffer != &rtex->resource) {
- pipe_resource_reference((struct pipe_resource**)&rtex->cmask_buffer, NULL);
+ r600_resource_reference(&rtex->cmask_buffer, NULL);
}
- pipe_resource_reference((struct pipe_resource**)&rtex->dcc_buffer, NULL);
pb_reference(&resource->buf, NULL);
FREE(rtex);
}
r600_texture_get_fmask_info(rscreen, rtex,
rtex->resource.b.b.nr_samples, &rtex->fmask);
- rtex->fmask.offset = align(rtex->size, rtex->fmask.alignment);
+ rtex->fmask.offset = align64(rtex->size, rtex->fmask.alignment);
rtex->size = rtex->fmask.offset + rtex->fmask.size;
}
unsigned cmask_tile_elements = cmask_tile_width * cmask_tile_height;
unsigned element_bits = 4;
unsigned cmask_cache_bits = 1024;
- unsigned num_pipes = rscreen->tiling_info.num_channels;
- unsigned pipe_interleave_bytes = rscreen->tiling_info.group_bytes;
+ unsigned num_pipes = rscreen->info.num_tile_pipes;
+ unsigned pipe_interleave_bytes = rscreen->info.pipe_interleave_bytes;
unsigned elements_per_macro_tile = (cmask_cache_bits / element_bits) * num_pipes;
unsigned pixels_per_macro_tile = elements_per_macro_tile * cmask_tile_elements;
struct r600_texture *rtex,
struct r600_cmask_info *out)
{
- unsigned pipe_interleave_bytes = rscreen->tiling_info.group_bytes;
- unsigned num_pipes = rscreen->tiling_info.num_channels;
+ unsigned pipe_interleave_bytes = rscreen->info.pipe_interleave_bytes;
+ unsigned num_pipes = rscreen->info.num_tile_pipes;
unsigned cl_width, cl_height;
switch (num_pipes) {
r600_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
}
- rtex->cmask.offset = align(rtex->size, rtex->cmask.alignment);
+ rtex->cmask.offset = align64(rtex->size, rtex->cmask.alignment);
rtex->size = rtex->cmask.offset + rtex->cmask.size;
if (rscreen->chip_class >= SI)
rtex->cb_color_info |= SI_S_028C70_FAST_CLEAR(1);
else
rtex->cb_color_info |= EG_S_028C70_FAST_CLEAR(1);
-}
-
-static void vi_texture_alloc_dcc_separate(struct r600_common_screen *rscreen,
- struct r600_texture *rtex)
-{
- if (rscreen->debug_flags & DBG_NO_DCC)
- return;
-
- /* TODO: DCC is broken on Stoney */
- if (rscreen->family == CHIP_STONEY)
- return;
- rtex->dcc_buffer = (struct r600_resource *)
- r600_aligned_buffer_create(&rscreen->b, PIPE_BIND_CUSTOM,
- PIPE_USAGE_DEFAULT, rtex->surface.dcc_size, rtex->surface.dcc_alignment);
- if (rtex->dcc_buffer == NULL) {
- return;
- }
-
- r600_screen_clear_buffer(rscreen, &rtex->dcc_buffer->b.b, 0, rtex->surface.dcc_size,
- 0xFFFFFFFF, true);
-
- rtex->cb_color_info |= VI_S_028C70_DCC_ENABLE(1);
+ p_atomic_inc(&rscreen->compressed_colortex_counter);
}
static unsigned r600_texture_get_htile_size(struct r600_common_screen *rscreen,
{
unsigned cl_width, cl_height, width, height;
unsigned slice_elements, slice_bytes, pipe_interleave_bytes, base_align;
- unsigned num_pipes = rscreen->tiling_info.num_channels;
+ unsigned num_pipes = rscreen->info.num_tile_pipes;
if (rscreen->chip_class <= EVERGREEN &&
rscreen->info.drm_major == 2 && rscreen->info.drm_minor < 26)
rscreen->info.drm_major == 2 && rscreen->info.drm_minor < 38)
return 0;
+ /* Overalign HTILE on P2 configs to work around GPU hangs in
+ * piglit/depthstencil-render-miplevels 585.
+ *
+ * This has been confirmed to help Kabini & Stoney, where the hangs
+ * are always reproducible. I think I have seen the test hang
+ * on Carrizo too, though it was very rare there.
+ */
+ if (rscreen->chip_class >= CIK && num_pipes < 4)
+ num_pipes = 4;
+
switch (num_pipes) {
case 1:
cl_width = 32;
slice_elements = (width * height) / (8 * 8);
slice_bytes = slice_elements * 4;
- pipe_interleave_bytes = rscreen->tiling_info.group_bytes;
+ pipe_interleave_bytes = rscreen->info.pipe_interleave_bytes;
base_align = num_pipes * pipe_interleave_bytes;
rtex->htile.pitch = width;
R600_ERR("Failed to create buffer object for htile buffer.\n");
} else {
r600_screen_clear_buffer(rscreen, &rtex->htile_buffer->b.b, 0,
- htile_size, 0, true);
+ htile_size, 0, R600_COHERENCY_NONE);
}
}
(rtex->surface.flags & RADEON_SURF_SCANOUT) != 0);
if (rtex->fmask.size)
- fprintf(f, " FMask: offset=%u, size=%u, alignment=%u, pitch_in_pixels=%u, "
+ fprintf(f, " FMask: offset=%"PRIu64", size=%"PRIu64", alignment=%u, pitch_in_pixels=%u, "
"bankh=%u, slice_tile_max=%u, tile_mode_index=%u\n",
rtex->fmask.offset, rtex->fmask.size, rtex->fmask.alignment,
rtex->fmask.pitch_in_pixels, rtex->fmask.bank_height,
rtex->fmask.slice_tile_max, rtex->fmask.tile_mode_index);
if (rtex->cmask.size)
- fprintf(f, " CMask: offset=%u, size=%u, alignment=%u, pitch=%u, "
+ fprintf(f, " CMask: offset=%"PRIu64", size=%"PRIu64", alignment=%u, pitch=%u, "
"height=%u, xalign=%u, yalign=%u, slice_tile_max=%u\n",
rtex->cmask.offset, rtex->cmask.size, rtex->cmask.alignment,
rtex->cmask.pitch, rtex->cmask.height, rtex->cmask.xalign,
rtex->htile_buffer->buf->alignment, rtex->htile.pitch,
rtex->htile.height, rtex->htile.xalign, rtex->htile.yalign);
- if (rtex->dcc_buffer) {
- fprintf(f, " DCC: size=%u, alignment=%u\n",
- rtex->dcc_buffer->b.b.width0,
- rtex->dcc_buffer->buf->alignment);
+ if (rtex->dcc_offset) {
+ fprintf(f, " DCC: offset=%"PRIu64", size=%"PRIu64", alignment=%"PRIu64"\n",
+ rtex->dcc_offset, rtex->surface.dcc_size,
+ rtex->surface.dcc_alignment);
for (i = 0; i <= rtex->surface.last_level; i++)
- fprintf(f, " DCCLevel[%i]: offset=%"PRIu64"\n",
- i, rtex->surface.level[i].dcc_offset);
+ fprintf(f, " DCCLevel[%i]: enabled=%u, offset=%"PRIu64", "
+ "fast_clear_size=%"PRIu64"\n",
+ i, rtex->surface.level[i].dcc_enabled,
+ rtex->surface.level[i].dcc_offset,
+ rtex->surface.level[i].dcc_fast_clear_size);
}
for (i = 0; i <= rtex->surface.last_level; i++)
r600_texture_create_object(struct pipe_screen *screen,
const struct pipe_resource *base,
unsigned pitch_in_bytes_override,
+ unsigned offset,
struct pb_buffer *buf,
struct radeon_surf *surface)
{
rtex->is_depth = util_format_has_depth(util_format_description(rtex->resource.b.b.format));
rtex->surface = *surface;
- if (r600_setup_surface(screen, rtex, pitch_in_bytes_override)) {
+ if (r600_setup_surface(screen, rtex, pitch_in_bytes_override, offset)) {
FREE(rtex);
return NULL;
}
* This must be done after r600_setup_surface.
* Applies to R600-Cayman. */
rtex->non_disp_tiling = rtex->is_depth && rtex->surface.level[0].mode >= RADEON_SURF_MODE_1D;
+ /* Applies to GCN. */
+ rtex->last_msaa_resolve_target_micro_mode = rtex->surface.micro_tile_mode;
if (rtex->is_depth) {
if (!(base->flags & (R600_RESOURCE_FLAG_TRANSFER |
return NULL;
}
}
- if (rtex->surface.dcc_size)
- vi_texture_alloc_dcc_separate(rscreen, rtex);
+
+ /* Shared textures must always set up DCC here.
+ * If it's not present, it will be disabled by
+ * apply_opaque_metadata later.
+ */
+ if (rtex->surface.dcc_size &&
+ (buf || !(rscreen->debug_flags & DBG_NO_DCC))) {
+ /* Reserve space for the DCC buffer. */
+ rtex->dcc_offset = align64(rtex->size, rtex->surface.dcc_alignment);
+ rtex->size = rtex->dcc_offset + rtex->surface.dcc_size;
+ }
}
/* Now create the backing buffer. */
if (!buf) {
if (!r600_init_resource(rscreen, resource, rtex->size,
- rtex->surface.bo_alignment, TRUE)) {
+ rtex->surface.bo_alignment)) {
FREE(rtex);
return NULL;
}
} else {
resource->buf = buf;
- resource->cs_buf = rscreen->ws->buffer_get_cs_handle(buf);
- resource->gpu_address = rscreen->ws->buffer_get_virtual_address(resource->cs_buf);
- resource->domains = rscreen->ws->buffer_get_initial_domain(resource->cs_buf);
+ resource->gpu_address = rscreen->ws->buffer_get_virtual_address(resource->buf);
+ resource->domains = rscreen->ws->buffer_get_initial_domain(resource->buf);
}
if (rtex->cmask.size) {
/* Initialize the cmask to 0xCC (= compressed state). */
r600_screen_clear_buffer(rscreen, &rtex->cmask_buffer->b.b,
rtex->cmask.offset, rtex->cmask.size,
- 0xCCCCCCCC, true);
+ 0xCCCCCCCC, R600_COHERENCY_NONE);
+ }
+
+ /* Initialize DCC only if the texture is not being imported. */
+ if (!buf && rtex->dcc_offset) {
+ r600_screen_clear_buffer(rscreen, &rtex->resource.b.b,
+ rtex->dcc_offset,
+ rtex->surface.dcc_size,
+ 0xFFFFFFFF, R600_COHERENCY_NONE);
}
/* Initialize the CMASK base register value. */
force_tiling = true;
/* Handle common candidates for the linear mode.
- * Compressed textures must always be tiled. */
- if (!force_tiling && !util_format_is_compressed(templ->format)) {
- /* Not everything can be linear, so we cannot enforce it
- * for all textures. */
- if ((rscreen->debug_flags & DBG_NO_TILING) &&
- (!util_format_is_depth_or_stencil(templ->format) ||
- !(templ->flags & R600_RESOURCE_FLAG_FLUSHED_DEPTH)))
+ * Compressed textures and DB surfaces must always be tiled.
+ */
+ if (!force_tiling && !util_format_is_compressed(templ->format) &&
+ (!util_format_is_depth_or_stencil(templ->format) ||
+ templ->flags & R600_RESOURCE_FLAG_FLUSHED_DEPTH)) {
+ if (rscreen->debug_flags & DBG_NO_TILING)
return RADEON_SURF_MODE_LINEAR_ALIGNED;
/* Tiling doesn't work with the 422 (SUBSAMPLED) formats on R600+. */
if (r) {
return NULL;
}
- return (struct pipe_resource *)r600_texture_create_object(screen, templ,
+ return (struct pipe_resource *)r600_texture_create_object(screen, templ, 0,
0, NULL, &surface);
}
static struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen,
const struct pipe_resource *templ,
- struct winsys_handle *whandle)
+ struct winsys_handle *whandle,
+ unsigned usage)
{
struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
struct pb_buffer *buf = NULL;
- unsigned stride = 0;
+ unsigned stride = 0, offset = 0;
unsigned array_mode;
- enum radeon_bo_layout micro, macro;
struct radeon_surf surface;
- bool scanout;
int r;
+ struct radeon_bo_metadata metadata = {};
+ struct r600_texture *rtex;
/* Support only 2D textures without mipmaps */
if ((templ->target != PIPE_TEXTURE_2D && templ->target != PIPE_TEXTURE_RECT) ||
templ->depth0 != 1 || templ->last_level != 0)
return NULL;
- buf = rscreen->ws->buffer_from_handle(rscreen->ws, whandle, &stride);
+ buf = rscreen->ws->buffer_from_handle(rscreen->ws, whandle, &stride, &offset);
if (!buf)
return NULL;
- rscreen->ws->buffer_get_tiling(buf, µ, ¯o,
- &surface.bankw, &surface.bankh,
- &surface.tile_split,
- &surface.stencil_tile_split,
- &surface.mtilea, &scanout);
+ rscreen->ws->buffer_get_metadata(buf, &metadata);
+
+ surface.pipe_config = metadata.pipe_config;
+ surface.bankw = metadata.bankw;
+ surface.bankh = metadata.bankh;
+ surface.tile_split = metadata.tile_split;
+ surface.mtilea = metadata.mtilea;
+ surface.num_banks = metadata.num_banks;
- if (macro == RADEON_LAYOUT_TILED)
+ if (metadata.macrotile == RADEON_LAYOUT_TILED)
array_mode = RADEON_SURF_MODE_2D;
- else if (micro == RADEON_LAYOUT_TILED)
+ else if (metadata.microtile == RADEON_LAYOUT_TILED)
array_mode = RADEON_SURF_MODE_1D;
else
array_mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
return NULL;
}
- if (scanout)
+ if (metadata.scanout)
surface.flags |= RADEON_SURF_SCANOUT;
- return (struct pipe_resource *)r600_texture_create_object(screen, templ,
- stride, buf, &surface);
+ rtex = r600_texture_create_object(screen, templ, stride,
+ offset, buf, &surface);
+ if (!rtex)
+ return NULL;
+
+ rtex->resource.is_shared = true;
+ rtex->resource.external_usage = usage;
+
+ if (rscreen->apply_opaque_metadata)
+ rscreen->apply_opaque_metadata(rscreen, rtex, &metadata);
+
+ return &rtex->resource.b.b;
}
bool r600_init_flushed_depth_texture(struct pipe_context *ctx,
return false;
}
- (*flushed_depth_texture)->is_flushing_texture = TRUE;
+ (*flushed_depth_texture)->is_flushing_texture = true;
(*flushed_depth_texture)->non_disp_tiling = false;
return true;
}
res->flags = flags;
/* We must set the correct texture target and dimensions for a 3D box. */
- if (box->depth > 1 && util_max_layer(orig, level) > 0)
- res->target = orig->target;
- else
- res->target = PIPE_TEXTURE_2D;
-
- switch (res->target) {
- case PIPE_TEXTURE_1D_ARRAY:
- case PIPE_TEXTURE_2D_ARRAY:
- case PIPE_TEXTURE_CUBE_ARRAY:
+ if (box->depth > 1 && util_max_layer(orig, level) > 0) {
+ res->target = PIPE_TEXTURE_2D_ARRAY;
res->array_size = box->depth;
- break;
- case PIPE_TEXTURE_3D:
- res->depth0 = box->depth;
- break;
- default:;
+ } else {
+ res->target = PIPE_TEXTURE_2D;
}
}
+static bool r600_can_invalidate_texture(struct r600_common_screen *rscreen,
+ struct r600_texture *rtex,
+ unsigned transfer_usage,
+ const struct pipe_box *box)
+{
+ /* r600g doesn't react to dirty_tex_descriptor_counter */
+ return rscreen->chip_class >= SI &&
+ !rtex->resource.is_shared &&
+ !(transfer_usage & PIPE_TRANSFER_READ) &&
+ rtex->resource.b.b.last_level == 0 &&
+ util_texrange_covers_whole_level(&rtex->resource.b.b, 0,
+ box->x, box->y, box->z,
+ box->width, box->height,
+ box->depth);
+}
+
+static void r600_texture_invalidate_storage(struct r600_common_context *rctx,
+ struct r600_texture *rtex)
+{
+ struct r600_common_screen *rscreen = rctx->screen;
+
+ /* There is no point in discarding depth and tiled buffers. */
+ assert(!rtex->is_depth);
+ assert(rtex->surface.level[0].mode == RADEON_SURF_MODE_LINEAR_ALIGNED);
+
+ /* Reallocate the buffer in the same pipe_resource. */
+ r600_init_resource(rscreen, &rtex->resource, rtex->size,
+ rtex->surface.bo_alignment);
+
+ /* Initialize the CMASK base address (needed even without CMASK). */
+ rtex->cmask.base_address_reg =
+ (rtex->resource.gpu_address + rtex->cmask.offset) >> 8;
+
+ r600_dirty_all_framebuffer_states(rscreen);
+ p_atomic_inc(&rscreen->dirty_tex_descriptor_counter);
+
+ rctx->num_alloc_tex_transfer_bytes += rtex->size;
+}
+
static void *r600_texture_transfer_map(struct pipe_context *ctx,
struct pipe_resource *texture,
unsigned level,
struct r600_common_context *rctx = (struct r600_common_context*)ctx;
struct r600_texture *rtex = (struct r600_texture*)texture;
struct r600_transfer *trans;
- boolean use_staging_texture = FALSE;
struct r600_resource *buf;
unsigned offset = 0;
char *map;
+ bool use_staging_texture = false;
- /* We cannot map a tiled texture directly because the data is
- * in a different order, therefore we do detiling using a blit.
- *
- * Also, use a temporary in GTT memory for read transfers, as
- * the CPU is much happier reading out of cached system memory
- * than uncached VRAM.
- */
- if (rtex->surface.level[0].mode >= RADEON_SURF_MODE_1D) {
- use_staging_texture = TRUE;
- } else if ((usage & PIPE_TRANSFER_READ) && !(usage & PIPE_TRANSFER_MAP_DIRECTLY) &&
- (rtex->resource.domains == RADEON_DOMAIN_VRAM)) {
- /* Untiled buffers in VRAM, which is slow for CPU reads */
- use_staging_texture = TRUE;
- } else if (!(usage & PIPE_TRANSFER_READ) &&
- (r600_rings_is_buffer_referenced(rctx, rtex->resource.cs_buf, RADEON_USAGE_READWRITE) ||
- !rctx->ws->buffer_wait(rtex->resource.buf, 0, RADEON_USAGE_READWRITE))) {
- /* Use a staging texture for uploads if the underlying BO is busy. */
- use_staging_texture = TRUE;
- }
+ assert(!(texture->flags & R600_RESOURCE_FLAG_TRANSFER));
- if (texture->flags & R600_RESOURCE_FLAG_TRANSFER) {
- use_staging_texture = FALSE;
- }
+ /* Depth textures use staging unconditionally. */
+ if (!rtex->is_depth) {
+ /* Degrade the tile mode if we get too many transfers on APUs.
+ * On dGPUs, the staging texture is always faster.
+ * Only count uploads that are at least 4x4 pixels large.
+ */
+ if (!rctx->screen->info.has_dedicated_vram &&
+ level == 0 &&
+ box->width >= 4 && box->height >= 4 &&
+ p_atomic_inc_return(&rtex->num_level0_transfers) == 10) {
+ bool can_invalidate =
+ r600_can_invalidate_texture(rctx->screen, rtex,
+ usage, box);
+
+ r600_degrade_tile_mode_to_linear(rctx, rtex,
+ can_invalidate);
+ }
- if (use_staging_texture && (usage & PIPE_TRANSFER_MAP_DIRECTLY)) {
- return NULL;
+ /* Tiled textures need to be converted into a linear texture for CPU
+ * access. The staging texture is always linear and is placed in GART.
+ *
+ * Reading from VRAM is slow, always use the staging texture in
+ * this case.
+ *
+ * Use the staging texture for uploads if the underlying BO
+ * is busy.
+ */
+ if (rtex->surface.level[0].mode >= RADEON_SURF_MODE_1D)
+ use_staging_texture = true;
+ else if (usage & PIPE_TRANSFER_READ)
+ use_staging_texture = (rtex->resource.domains &
+ RADEON_DOMAIN_VRAM) != 0;
+ /* Write & linear only: */
+ else if (r600_rings_is_buffer_referenced(rctx, rtex->resource.buf,
+ RADEON_USAGE_READWRITE) ||
+ !rctx->ws->buffer_wait(rtex->resource.buf, 0,
+ RADEON_USAGE_READWRITE)) {
+ /* It's busy. */
+ if (r600_can_invalidate_texture(rctx->screen, rtex,
+ usage, box))
+ r600_texture_invalidate_storage(rctx, rtex);
+ else
+ use_staging_texture = true;
+ }
}
trans = CALLOC_STRUCT(r600_transfer);
trans->transfer.stride = staging_depth->surface.level[level].pitch_bytes;
trans->transfer.layer_stride = staging_depth->surface.level[level].slice_size;
trans->staging = (struct r600_resource*)staging_depth;
+ buf = trans->staging;
} else if (use_staging_texture) {
struct pipe_resource resource;
struct r600_texture *staging;
trans->staging = &staging->resource;
trans->transfer.stride = staging->surface.level[0].pitch_bytes;
trans->transfer.layer_stride = staging->surface.level[0].slice_size;
- if (usage & PIPE_TRANSFER_READ) {
+
+ if (usage & PIPE_TRANSFER_READ)
r600_copy_to_staging_texture(ctx, trans);
- }
+ else
+ usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
+
+ buf = trans->staging;
} else {
/* the resource is mapped directly */
trans->transfer.stride = rtex->surface.level[level].pitch_bytes;
trans->transfer.layer_stride = rtex->surface.level[level].slice_size;
offset = r600_texture_get_offset(rtex, level, box);
- }
-
- if (trans->staging) {
- buf = trans->staging;
- if (!rtex->is_depth && !(usage & PIPE_TRANSFER_READ))
- usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
- } else {
buf = &rtex->resource;
}
if (!(map = r600_buffer_map_sync_with_rings(rctx, buf, usage))) {
- pipe_resource_reference((struct pipe_resource**)&trans->staging, NULL);
+ r600_resource_reference(&trans->staging, NULL);
FREE(trans);
return NULL;
}
static void r600_texture_transfer_unmap(struct pipe_context *ctx,
struct pipe_transfer* transfer)
{
+ struct r600_common_context *rctx = (struct r600_common_context*)ctx;
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
struct pipe_resource *texture = transfer->resource;
struct r600_texture *rtex = (struct r600_texture*)texture;
}
}
- if (rtransfer->staging)
- pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL);
+ if (rtransfer->staging) {
+ rctx->num_alloc_tex_transfer_bytes += rtransfer->staging->buf->size;
+ r600_resource_reference(&rtransfer->staging, NULL);
+ }
+
+ /* Heuristic for {upload, draw, upload, draw, ..}:
+ *
+ * Flush the gfx IB if we've allocated too much texture storage.
+ *
+ * The idea is that we don't want to build IBs that use too much
+ * memory and put pressure on the kernel memory manager and we also
+ * want to make temporary and invalidated buffers go idle ASAP to
+ * decrease the total memory usage or make them reusable. The memory
+ * usage will be slightly higher than given here because of the buffer
+ * cache in the winsys.
+ *
+ * The result is that the kernel memory manager is never a bottleneck.
+ */
+ if (rctx->num_alloc_tex_transfer_bytes > rctx->screen->info.gart_size / 4) {
+ rctx->gfx.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
+ rctx->num_alloc_tex_transfer_bytes = 0;
+ }
FREE(transfer);
}
const struct pipe_surface *templ,
unsigned width, unsigned height)
{
+ struct r600_texture *rtex = (struct r600_texture*)texture;
struct r600_surface *surface = CALLOC_STRUCT(r600_surface);
if (!surface)
surface->base.width = width;
surface->base.height = height;
surface->base.u = templ->u;
+ surface->level_info = &rtex->surface.level[templ->u.tex.level];
return &surface->base;
}
const struct pipe_surface *templ)
{
unsigned level = templ->u.tex.level;
+ unsigned width = u_minify(tex->width0, level);
+ unsigned height = u_minify(tex->height0, level);
+
+ if (tex->target != PIPE_BUFFER && templ->format != tex->format) {
+ const struct util_format_description *tex_desc
+ = util_format_description(tex->format);
+ const struct util_format_description *templ_desc
+ = util_format_description(templ->format);
+
+ assert(tex_desc->block.bits == templ_desc->block.bits);
+
+ /* Adjust size of surface if and only if the block width or
+ * height is changed. */
+ if (tex_desc->block.width != templ_desc->block.width ||
+ tex_desc->block.height != templ_desc->block.height) {
+ unsigned nblks_x = util_format_get_nblocksx(tex->format, width);
+ unsigned nblks_y = util_format_get_nblocksy(tex->format, height);
+
+ width = nblks_x * templ_desc->block.width;
+ height = nblks_y * templ_desc->block.height;
+ }
+ }
- return r600_create_surface_custom(pipe, tex, templ,
- u_minify(tex->width0, level),
- u_minify(tex->height0, level));
+ return r600_create_surface_custom(pipe, tex, templ, width, height);
}
static void r600_surface_destroy(struct pipe_context *pipe,
struct pipe_surface *surface)
{
struct r600_surface *surf = (struct r600_surface*)surface;
- pipe_resource_reference((struct pipe_resource**)&surf->cb_buffer_fmask, NULL);
- pipe_resource_reference((struct pipe_resource**)&surf->cb_buffer_cmask, NULL);
+ r600_resource_reference(&surf->cb_buffer_fmask, NULL);
+ r600_resource_reference(&surf->cb_buffer_cmask, NULL);
pipe_resource_reference(&surface->texture, NULL);
FREE(surface);
}
-unsigned r600_translate_colorswap(enum pipe_format format)
+unsigned r600_translate_colorswap(enum pipe_format format, bool do_endian_swap)
{
const struct util_format_description *desc = util_format_description(format);
-#define HAS_SWIZZLE(chan,swz) (desc->swizzle[chan] == UTIL_FORMAT_SWIZZLE_##swz)
+#define HAS_SWIZZLE(chan,swz) (desc->swizzle[chan] == PIPE_SWIZZLE_##swz)
if (format == PIPE_FORMAT_R11G11B10_FLOAT) /* isn't plain */
return V_0280A0_SWAP_STD;
else if ((HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(1,X)) ||
(HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(1,NONE)) ||
(HAS_SWIZZLE(0,NONE) && HAS_SWIZZLE(1,X)))
- return V_0280A0_SWAP_STD_REV; /* YX__ */
+ /* YX__ */
+ return (do_endian_swap ? V_0280A0_SWAP_STD : V_0280A0_SWAP_STD_REV);
else if (HAS_SWIZZLE(0,X) && HAS_SWIZZLE(3,Y))
return V_0280A0_SWAP_ALT; /* X__Y */
else if (HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(3,X))
break;
case 3:
if (HAS_SWIZZLE(0,X))
- return V_0280A0_SWAP_STD; /* XYZ */
+ return (do_endian_swap ? V_0280A0_SWAP_STD_REV : V_0280A0_SWAP_STD);
else if (HAS_SWIZZLE(0,Z))
return V_0280A0_SWAP_STD_REV; /* ZYX */
break;
case 4:
/* check the middle channels, the 1st and 4th channel can be NONE */
- if (HAS_SWIZZLE(1,Y) && HAS_SWIZZLE(2,Z))
+ if (HAS_SWIZZLE(1,Y) && HAS_SWIZZLE(2,Z)) {
return V_0280A0_SWAP_STD; /* XYZW */
- else if (HAS_SWIZZLE(1,Z) && HAS_SWIZZLE(2,Y))
+ } else if (HAS_SWIZZLE(1,Z) && HAS_SWIZZLE(2,Y)) {
return V_0280A0_SWAP_STD_REV; /* WZYX */
- else if (HAS_SWIZZLE(1,Y) && HAS_SWIZZLE(2,X))
+ } else if (HAS_SWIZZLE(1,Y) && HAS_SWIZZLE(2,X)) {
return V_0280A0_SWAP_ALT; /* ZYXW */
- else if (HAS_SWIZZLE(1,X) && HAS_SWIZZLE(2,Y))
- return V_0280A0_SWAP_ALT_REV; /* WXYZ */
+ } else if (HAS_SWIZZLE(1,Z) && HAS_SWIZZLE(2,W)) {
+ /* YZWX */
+ if (desc->is_array)
+ return V_0280A0_SWAP_ALT_REV;
+ else
+ return (do_endian_swap ? V_0280A0_SWAP_ALT : V_0280A0_SWAP_ALT_REV);
+ }
break;
}
return ~0U;
surface_format == PIPE_FORMAT_B5G6R5_SRGB) {
extra_channel = -1;
} else if (desc->layout == UTIL_FORMAT_LAYOUT_PLAIN) {
- if(r600_translate_colorswap(surface_format) <= 1)
+ if(r600_translate_colorswap(surface_format, false) <= 1)
extra_channel = desc->nr_channels - 1;
else
extra_channel = 0;
return;
for (i = 0; i < 4; ++i) {
- int index = desc->swizzle[i] - UTIL_FORMAT_SWIZZLE_X;
+ int index = desc->swizzle[i] - PIPE_SWIZZLE_X;
- if (desc->swizzle[i] < UTIL_FORMAT_SWIZZLE_X ||
- desc->swizzle[i] > UTIL_FORMAT_SWIZZLE_W)
+ if (desc->swizzle[i] < PIPE_SWIZZLE_X ||
+ desc->swizzle[i] > PIPE_SWIZZLE_W)
continue;
if (util_format_is_pure_sint(surface_format)) {
for (int i = 0; i < 4; ++i)
if (values[i] != main_value &&
- desc->swizzle[i] - UTIL_FORMAT_SWIZZLE_X != extra_channel &&
- desc->swizzle[i] >= UTIL_FORMAT_SWIZZLE_X &&
- desc->swizzle[i] <= UTIL_FORMAT_SWIZZLE_W)
+ desc->swizzle[i] - PIPE_SWIZZLE_X != extra_channel &&
+ desc->swizzle[i] >= PIPE_SWIZZLE_X &&
+ desc->swizzle[i] <= PIPE_SWIZZLE_W)
return;
*clear_words_needed = false;
*reset_value |= 0x40404040U;
}
+void vi_dcc_clear_level(struct r600_common_context *rctx,
+ struct r600_texture *rtex,
+ unsigned level, unsigned clear_value)
+{
+ struct pipe_resource *dcc_buffer = &rtex->resource.b.b;
+ uint64_t dcc_offset = rtex->dcc_offset +
+ rtex->surface.level[level].dcc_offset;
+
+ assert(rtex->dcc_offset && rtex->surface.level[level].dcc_enabled);
+
+ rctx->clear_buffer(&rctx->b, dcc_buffer, dcc_offset,
+ rtex->surface.level[level].dcc_fast_clear_size,
+ clear_value, R600_COHERENCY_CB_META);
+}
+
+/* Set the same micro tile mode as the destination of the last MSAA resolve.
+ * This allows hitting the MSAA resolve fast path, which requires that both
+ * src and dst micro tile modes match.
+ */
+static void si_set_optimal_micro_tile_mode(struct r600_common_screen *rscreen,
+ struct r600_texture *rtex)
+{
+ if (rtex->resource.is_shared ||
+ rtex->surface.nsamples <= 1 ||
+ rtex->surface.micro_tile_mode == rtex->last_msaa_resolve_target_micro_mode)
+ return;
+
+ assert(rtex->surface.level[0].mode == RADEON_SURF_MODE_2D);
+ assert(rtex->surface.last_level == 0);
+
+ /* These magic numbers were copied from addrlib. It doesn't use any
+ * definitions for them either. They are all 2D_TILED_THIN1 modes with
+ * different bpp and micro tile mode.
+ */
+ if (rscreen->chip_class >= CIK) {
+ switch (rtex->last_msaa_resolve_target_micro_mode) {
+ case 0: /* displayable */
+ rtex->surface.tiling_index[0] = 10;
+ break;
+ case 1: /* thin */
+ rtex->surface.tiling_index[0] = 14;
+ break;
+ case 3: /* rotated */
+ rtex->surface.tiling_index[0] = 28;
+ break;
+ default: /* depth, thick */
+ assert(!"unexpected micro mode");
+ return;
+ }
+ } else { /* SI */
+ switch (rtex->last_msaa_resolve_target_micro_mode) {
+ case 0: /* displayable */
+ switch (rtex->surface.bpe) {
+ case 8:
+ rtex->surface.tiling_index[0] = 10;
+ break;
+ case 16:
+ rtex->surface.tiling_index[0] = 11;
+ break;
+ default: /* 32, 64 */
+ rtex->surface.tiling_index[0] = 12;
+ break;
+ }
+ break;
+ case 1: /* thin */
+ switch (rtex->surface.bpe) {
+ case 8:
+ rtex->surface.tiling_index[0] = 14;
+ break;
+ case 16:
+ rtex->surface.tiling_index[0] = 15;
+ break;
+ case 32:
+ rtex->surface.tiling_index[0] = 16;
+ break;
+ default: /* 64, 128 */
+ rtex->surface.tiling_index[0] = 17;
+ break;
+ }
+ break;
+ default: /* depth, thick */
+ assert(!"unexpected micro mode");
+ return;
+ }
+ }
+
+ rtex->surface.micro_tile_mode = rtex->last_msaa_resolve_target_micro_mode;
+
+ p_atomic_inc(&rscreen->dirty_fb_counter);
+ p_atomic_inc(&rscreen->dirty_tex_descriptor_counter);
+}
+
void evergreen_do_fast_color_clear(struct r600_common_context *rctx,
struct pipe_framebuffer_state *fb,
struct r600_atom *fb_state,
{
int i;
+ /* This function is broken in BE, so just disable this path for now */
+#ifdef PIPE_ARCH_BIG_ENDIAN
+ return;
+#endif
+
if (rctx->render_cond)
return;
continue;
}
+ /* shared textures can't use fast clear without an explicit flush,
+ * because there is no way to communicate the clear color among
+ * all clients
+ */
+ if (tex->resource.is_shared &&
+ !(tex->resource.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))
+ continue;
+
/* fast color clear with 1D tiling doesn't work on old kernels and CIK */
if (tex->surface.level[0].mode == RADEON_SURF_MODE_1D &&
rctx->chip_class >= CIK &&
continue;
}
- if (tex->dcc_buffer) {
+ if (tex->dcc_offset && tex->surface.level[0].dcc_enabled) {
uint32_t reset_value;
bool clear_words_needed;
if (rctx->screen->debug_flags & DBG_NO_DCC_CLEAR)
continue;
- vi_get_fast_clear_parameters(fb->cbufs[i]->format, color, &reset_value, &clear_words_needed);
+ /* We can change the micro tile mode before a full clear. */
+ if (rctx->screen->chip_class >= SI)
+ si_set_optimal_micro_tile_mode(rctx->screen, tex);
- rctx->clear_buffer(&rctx->b, &tex->dcc_buffer->b.b,
- 0, tex->surface.dcc_size, reset_value, true);
+ vi_get_fast_clear_parameters(fb->cbufs[i]->format, color, &reset_value, &clear_words_needed);
+ vi_dcc_clear_level(rctx, tex, 0, reset_value);
if (clear_words_needed)
tex->dirty_level_mask |= 1 << fb->cbufs[i]->u.tex.level;
} else {
+ /* Stoney/RB+ doesn't work with CMASK fast clear. */
+ if (rctx->family == CHIP_STONEY)
+ continue;
+
/* ensure CMASK is enabled */
r600_texture_alloc_cmask_separate(rctx->screen, tex);
if (tex->cmask.size == 0) {
continue;
}
+ /* We can change the micro tile mode before a full clear. */
+ if (rctx->screen->chip_class >= SI)
+ si_set_optimal_micro_tile_mode(rctx->screen, tex);
+
/* Do the fast clear. */
rctx->clear_buffer(&rctx->b, &tex->cmask_buffer->b.b,
- tex->cmask.offset, tex->cmask.size, 0, true);
+ tex->cmask.offset, tex->cmask.size, 0,
+ R600_COHERENCY_CB_META);
tex->dirty_level_mask |= 1 << fb->cbufs[i]->u.tex.level;
}