radeonsi: start using u_log_context for debugging
[mesa.git] / src / gallium / drivers / radeon / r600_texture.c
index 364ed4078cd6728c2c3ef01d3e95e26328914639..22850e0c87d43e3b6667052cbd926af54960cc4d 100644 (file)
@@ -28,6 +28,7 @@
 #include "r600_cs.h"
 #include "r600_query.h"
 #include "util/u_format.h"
+#include "util/u_log.h"
 #include "util/u_memory.h"
 #include "util/u_pack_color.h"
 #include "util/u_surface.h"
@@ -72,8 +73,8 @@ bool r600_prepare_for_dma_blit(struct r600_common_context *rctx,
         *   src: Use the 3D path. DCC decompression is expensive.
         *   dst: Use the 3D path to compress the pixels with DCC.
         */
-       if ((rsrc->dcc_offset && src_level < rsrc->surface.num_dcc_levels) ||
-           (rdst->dcc_offset && dst_level < rdst->surface.num_dcc_levels))
+       if (vi_dcc_enabled(rsrc, src_level) ||
+           vi_dcc_enabled(rdst, dst_level))
                return false;
 
        /* CMASK as:
@@ -177,13 +178,42 @@ static void r600_copy_from_staging_texture(struct pipe_context *ctx, struct r600
                       src, 0, &sbox);
 }
 
-static unsigned r600_texture_get_offset(struct r600_texture *rtex, unsigned level,
-                                       const struct pipe_box *box)
+static unsigned r600_texture_get_offset(struct r600_common_screen *rscreen,
+                                       struct r600_texture *rtex, unsigned level,
+                                       const struct pipe_box *box,
+                                       unsigned *stride,
+                                       unsigned *layer_stride)
 {
-       return rtex->surface.level[level].offset +
-              box->z * rtex->surface.level[level].slice_size +
-              box->y / rtex->surface.blk_h * rtex->surface.level[level].pitch_bytes +
-              box->x / rtex->surface.blk_w * rtex->surface.bpe;
+       if (rscreen->chip_class >= GFX9) {
+               *stride = rtex->surface.u.gfx9.surf_pitch * rtex->surface.bpe;
+               *layer_stride = rtex->surface.u.gfx9.surf_slice_size;
+
+               if (!box)
+                       return 0;
+
+               /* Each texture is an array of slices. Each slice is an array
+                * of mipmap levels. */
+               return box->z * rtex->surface.u.gfx9.surf_slice_size +
+                      rtex->surface.u.gfx9.offset[level] +
+                      (box->y / rtex->surface.blk_h *
+                       rtex->surface.u.gfx9.surf_pitch +
+                       box->x / rtex->surface.blk_w) * rtex->surface.bpe;
+       } else {
+               *stride = rtex->surface.u.legacy.level[level].nblk_x *
+                         rtex->surface.bpe;
+               *layer_stride = rtex->surface.u.legacy.level[level].slice_size;
+
+               if (!box)
+                       return rtex->surface.u.legacy.level[level].offset;
+
+               /* Each texture is an array of mipmap levels. Each level is
+                * an array of slices. */
+               return rtex->surface.u.legacy.level[level].offset +
+                      box->z * rtex->surface.u.legacy.level[level].slice_size +
+                      (box->y / rtex->surface.blk_h *
+                       rtex->surface.u.legacy.level[level].nblk_x +
+                       box->x / rtex->surface.blk_w) * rtex->surface.bpe;
+       }
 }
 
 static int r600_init_surface(struct r600_common_screen *rscreen,
@@ -211,22 +241,23 @@ static int r600_init_surface(struct r600_common_screen *rscreen,
                bpe = 4; /* stencil is allocated separately on evergreen */
        } else {
                bpe = util_format_get_blocksize(ptex->format);
-               /* align byte per element on dword */
-               if (bpe == 3) {
-                       bpe = 4;
-               }
+               assert(util_is_power_of_two(bpe));
        }
 
        if (!is_flushed_depth && is_depth) {
                flags |= RADEON_SURF_ZBUFFER;
 
                if (tc_compatible_htile &&
-                   array_mode == RADEON_SURF_MODE_2D) {
+                   (rscreen->chip_class >= GFX9 ||
+                    array_mode == RADEON_SURF_MODE_2D)) {
                        /* TC-compatible HTILE only supports Z32_FLOAT.
-                        * Promote Z16 to Z32. DB->CB copies will convert
+                        * GFX9 also supports Z16_UNORM.
+                        * On VI, promote Z16 to Z32. DB->CB copies will convert
                         * the format for transfers.
                         */
-                       bpe = 4;
+                       if (rscreen->chip_class == VI)
+                               bpe = 4;
+
                        flags |= RADEON_SURF_TC_COMPATIBLE_HTILE;
                }
 
@@ -250,8 +281,12 @@ static int r600_init_surface(struct r600_common_screen *rscreen,
                flags |= RADEON_SURF_SCANOUT;
        }
 
+       if (ptex->bind & PIPE_BIND_SHARED)
+               flags |= RADEON_SURF_SHAREABLE;
        if (is_imported)
-               flags |= RADEON_SURF_IMPORTED;
+               flags |= RADEON_SURF_IMPORTED | RADEON_SURF_SHAREABLE;
+       if (!(ptex->flags & R600_RESOURCE_FLAG_FORCE_TILING))
+               flags |= RADEON_SURF_OPTIMIZE_FOR_SPACE;
 
        r = rscreen->ws->surface_init(rscreen->ws, ptex, flags, bpe,
                                      array_mode, surface);
@@ -259,45 +294,86 @@ static int r600_init_surface(struct r600_common_screen *rscreen,
                return r;
        }
 
-       if (pitch_in_bytes_override && pitch_in_bytes_override != surface->level[0].pitch_bytes) {
-               /* old ddx on evergreen over estimate alignment for 1d, only 1 level
-                * for those
-                */
-               surface->level[0].nblk_x = pitch_in_bytes_override / bpe;
-               surface->level[0].pitch_bytes = pitch_in_bytes_override;
-               surface->level[0].slice_size = pitch_in_bytes_override * surface->level[0].nblk_y;
-       }
+       if (rscreen->chip_class >= GFX9) {
+               assert(!pitch_in_bytes_override ||
+                      pitch_in_bytes_override == surface->u.gfx9.surf_pitch * bpe);
+               surface->u.gfx9.surf_offset = offset;
+       } else {
+               if (pitch_in_bytes_override &&
+                   pitch_in_bytes_override != surface->u.legacy.level[0].nblk_x * bpe) {
+                       /* old ddx on evergreen over estimate alignment for 1d, only 1 level
+                        * for those
+                        */
+                       surface->u.legacy.level[0].nblk_x = pitch_in_bytes_override / bpe;
+                       surface->u.legacy.level[0].slice_size = pitch_in_bytes_override *
+                                                               surface->u.legacy.level[0].nblk_y;
+               }
 
-       if (offset) {
-               for (i = 0; i < ARRAY_SIZE(surface->level); ++i)
-                       surface->level[i].offset += offset;
+               if (offset) {
+                       for (i = 0; i < ARRAY_SIZE(surface->u.legacy.level); ++i)
+                               surface->u.legacy.level[i].offset += offset;
+               }
        }
        return 0;
 }
 
-static void r600_texture_init_metadata(struct r600_texture *rtex,
+static void r600_texture_init_metadata(struct r600_common_screen *rscreen,
+                                      struct r600_texture *rtex,
                                       struct radeon_bo_metadata *metadata)
 {
        struct radeon_surf *surface = &rtex->surface;
 
        memset(metadata, 0, sizeof(*metadata));
-       metadata->microtile = surface->level[0].mode >= RADEON_SURF_MODE_1D ?
-                                  RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR;
-       metadata->macrotile = surface->level[0].mode >= RADEON_SURF_MODE_2D ?
-                                  RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR;
-       metadata->pipe_config = surface->pipe_config;
-       metadata->bankw = surface->bankw;
-       metadata->bankh = surface->bankh;
-       metadata->tile_split = surface->tile_split;
-       metadata->mtilea = surface->mtilea;
-       metadata->num_banks = surface->num_banks;
-       metadata->stride = surface->level[0].pitch_bytes;
-       metadata->scanout = (surface->flags & RADEON_SURF_SCANOUT) != 0;
+
+       if (rscreen->chip_class >= GFX9) {
+               metadata->u.gfx9.swizzle_mode = surface->u.gfx9.surf.swizzle_mode;
+       } else {
+               metadata->u.legacy.microtile = surface->u.legacy.level[0].mode >= RADEON_SURF_MODE_1D ?
+                                          RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR;
+               metadata->u.legacy.macrotile = surface->u.legacy.level[0].mode >= RADEON_SURF_MODE_2D ?
+                                          RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR;
+               metadata->u.legacy.pipe_config = surface->u.legacy.pipe_config;
+               metadata->u.legacy.bankw = surface->u.legacy.bankw;
+               metadata->u.legacy.bankh = surface->u.legacy.bankh;
+               metadata->u.legacy.tile_split = surface->u.legacy.tile_split;
+               metadata->u.legacy.mtilea = surface->u.legacy.mtilea;
+               metadata->u.legacy.num_banks = surface->u.legacy.num_banks;
+               metadata->u.legacy.stride = surface->u.legacy.level[0].nblk_x * surface->bpe;
+               metadata->u.legacy.scanout = (surface->flags & RADEON_SURF_SCANOUT) != 0;
+       }
 }
 
-static void r600_dirty_all_framebuffer_states(struct r600_common_screen *rscreen)
+static void r600_surface_import_metadata(struct r600_common_screen *rscreen,
+                                        struct radeon_surf *surf,
+                                        struct radeon_bo_metadata *metadata,
+                                        enum radeon_surf_mode *array_mode,
+                                        bool *is_scanout)
 {
-       p_atomic_inc(&rscreen->dirty_fb_counter);
+       if (rscreen->chip_class >= GFX9) {
+               if (metadata->u.gfx9.swizzle_mode > 0)
+                       *array_mode = RADEON_SURF_MODE_2D;
+               else
+                       *array_mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
+
+               *is_scanout = metadata->u.gfx9.swizzle_mode == 0 ||
+                       metadata->u.gfx9.swizzle_mode % 4 == 2;
+       } else {
+               surf->u.legacy.pipe_config = metadata->u.legacy.pipe_config;
+               surf->u.legacy.bankw = metadata->u.legacy.bankw;
+               surf->u.legacy.bankh = metadata->u.legacy.bankh;
+               surf->u.legacy.tile_split = metadata->u.legacy.tile_split;
+               surf->u.legacy.mtilea = metadata->u.legacy.mtilea;
+               surf->u.legacy.num_banks = metadata->u.legacy.num_banks;
+
+               if (metadata->u.legacy.macrotile == RADEON_LAYOUT_TILED)
+                       *array_mode = RADEON_SURF_MODE_2D;
+               else if (metadata->u.legacy.microtile == RADEON_LAYOUT_TILED)
+                       *array_mode = RADEON_SURF_MODE_1D;
+               else
+                       *array_mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
+
+               *is_scanout = metadata->u.legacy.scanout;
+       }
 }
 
 static void r600_eliminate_fast_color_clear(struct r600_common_context *rctx,
@@ -307,13 +383,13 @@ static void r600_eliminate_fast_color_clear(struct r600_common_context *rctx,
        struct pipe_context *ctx = &rctx->b;
 
        if (ctx == rscreen->aux_context)
-               pipe_mutex_lock(rscreen->aux_context_lock);
+               mtx_lock(&rscreen->aux_context_lock);
 
        ctx->flush_resource(ctx, &rtex->resource.b.b);
        ctx->flush(ctx, NULL, 0);
 
        if (ctx == rscreen->aux_context)
-               pipe_mutex_unlock(rscreen->aux_context_lock);
+               mtx_unlock(&rscreen->aux_context_lock);
 }
 
 static void r600_texture_discard_cmask(struct r600_common_screen *rscreen,
@@ -338,7 +414,7 @@ static void r600_texture_discard_cmask(struct r600_common_screen *rscreen,
            r600_resource_reference(&rtex->cmask_buffer, NULL);
 
        /* Notify all contexts about the change. */
-       r600_dirty_all_framebuffer_states(rscreen);
+       p_atomic_inc(&rscreen->dirty_tex_counter);
        p_atomic_inc(&rscreen->compressed_colortex_counter);
 }
 
@@ -346,7 +422,7 @@ static bool r600_can_disable_dcc(struct r600_texture *rtex)
 {
        /* We can't disable DCC if it can be written by another process. */
        return rtex->dcc_offset &&
-              (!rtex->resource.is_shared ||
+              (!rtex->resource.b.is_shared ||
                !(rtex->resource.external_usage & PIPE_HANDLE_USAGE_WRITE));
 }
 
@@ -362,7 +438,7 @@ static bool r600_texture_discard_dcc(struct r600_common_screen *rscreen,
        rtex->dcc_offset = 0;
 
        /* Notify all contexts about the change. */
-       r600_dirty_all_framebuffer_states(rscreen);
+       p_atomic_inc(&rscreen->dirty_tex_counter);
        return true;
 }
 
@@ -396,41 +472,46 @@ bool r600_texture_disable_dcc(struct r600_common_context *rctx,
                return false;
 
        if (&rctx->b == rscreen->aux_context)
-               pipe_mutex_lock(rscreen->aux_context_lock);
+               mtx_lock(&rscreen->aux_context_lock);
 
        /* Decompress DCC. */
        rctx->decompress_dcc(&rctx->b, rtex);
        rctx->b.flush(&rctx->b, NULL, 0);
 
        if (&rctx->b == rscreen->aux_context)
-               pipe_mutex_unlock(rscreen->aux_context_lock);
+               mtx_unlock(&rscreen->aux_context_lock);
 
        return r600_texture_discard_dcc(rscreen, rtex);
 }
 
-static void r600_degrade_tile_mode_to_linear(struct r600_common_context *rctx,
-                                            struct r600_texture *rtex,
-                                            bool invalidate_storage)
+static void r600_reallocate_texture_inplace(struct r600_common_context *rctx,
+                                           struct r600_texture *rtex,
+                                           unsigned new_bind_flag,
+                                           bool invalidate_storage)
 {
        struct pipe_screen *screen = rctx->b.screen;
        struct r600_texture *new_tex;
        struct pipe_resource templ = rtex->resource.b.b;
        unsigned i;
 
-       templ.bind |= PIPE_BIND_LINEAR;
+       templ.bind |= new_bind_flag;
 
        /* r600g doesn't react to dirty_tex_descriptor_counter */
        if (rctx->chip_class < SI)
                return;
 
-       if (rtex->resource.is_shared ||
-           rtex->surface.level[0].mode == RADEON_SURF_MODE_LINEAR_ALIGNED)
+       if (rtex->resource.b.is_shared)
                return;
 
-       /* This fails with MSAA, depth, and compressed textures. */
-       if (r600_choose_tiling(rctx->screen, &templ) !=
-           RADEON_SURF_MODE_LINEAR_ALIGNED)
-               return;
+       if (new_bind_flag == PIPE_BIND_LINEAR) {
+               if (rtex->surface.is_linear)
+                       return;
+
+               /* This fails with MSAA, depth, and compressed textures. */
+               if (r600_choose_tiling(rctx->screen, &templ) !=
+                   RADEON_SURF_MODE_LINEAR_ALIGNED)
+                       return;
+       }
 
        new_tex = (struct r600_texture*)screen->resource_create(screen, &templ);
        if (!new_tex)
@@ -450,8 +531,10 @@ static void r600_degrade_tile_mode_to_linear(struct r600_common_context *rctx,
                }
        }
 
-       r600_texture_discard_cmask(rctx->screen, rtex);
-       r600_texture_discard_dcc(rctx->screen, rtex);
+       if (new_bind_flag == PIPE_BIND_LINEAR) {
+               r600_texture_discard_cmask(rctx->screen, rtex);
+               r600_texture_discard_dcc(rctx->screen, rtex);
+       }
 
        /* Replace the structure fields of rtex. */
        rtex->resource.b.b.bind = templ.bind;
@@ -464,21 +547,34 @@ static void r600_degrade_tile_mode_to_linear(struct r600_common_context *rctx,
        rtex->resource.domains = new_tex->resource.domains;
        rtex->resource.flags = new_tex->resource.flags;
        rtex->size = new_tex->size;
+       rtex->db_render_format = new_tex->db_render_format;
+       rtex->db_compatible = new_tex->db_compatible;
+       rtex->can_sample_z = new_tex->can_sample_z;
+       rtex->can_sample_s = new_tex->can_sample_s;
        rtex->surface = new_tex->surface;
-       rtex->non_disp_tiling = new_tex->non_disp_tiling;
+       rtex->fmask = new_tex->fmask;
+       rtex->cmask = new_tex->cmask;
        rtex->cb_color_info = new_tex->cb_color_info;
-       rtex->cmask = new_tex->cmask; /* needed even without CMASK */
-
-       assert(!rtex->htile_buffer);
-       assert(!rtex->cmask.size);
-       assert(!rtex->fmask.size);
-       assert(!rtex->dcc_offset);
-       assert(!rtex->is_depth);
+       rtex->last_msaa_resolve_target_micro_mode = new_tex->last_msaa_resolve_target_micro_mode;
+       rtex->htile_offset = new_tex->htile_offset;
+       rtex->tc_compatible_htile = new_tex->tc_compatible_htile;
+       rtex->depth_cleared = new_tex->depth_cleared;
+       rtex->stencil_cleared = new_tex->stencil_cleared;
+       rtex->non_disp_tiling = new_tex->non_disp_tiling;
+       rtex->dcc_gather_statistics = new_tex->dcc_gather_statistics;
+       rtex->framebuffers_bound = new_tex->framebuffers_bound;
+
+       if (new_bind_flag == PIPE_BIND_LINEAR) {
+               assert(!rtex->htile_offset);
+               assert(!rtex->cmask.size);
+               assert(!rtex->fmask.size);
+               assert(!rtex->dcc_offset);
+               assert(!rtex->is_depth);
+       }
 
        r600_texture_reference(&new_tex, NULL);
 
-       r600_dirty_all_framebuffer_states(rctx->screen);
-       p_atomic_inc(&rctx->screen->dirty_tex_descriptor_counter);
+       p_atomic_inc(&rctx->screen->dirty_tex_counter);
 }
 
 static boolean r600_texture_get_handle(struct pipe_screen* screen,
@@ -488,21 +584,35 @@ static boolean r600_texture_get_handle(struct pipe_screen* screen,
                                        unsigned usage)
 {
        struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
-       struct r600_common_context *rctx = (struct r600_common_context*)
-                                          (ctx ? ctx : rscreen->aux_context);
+       struct r600_common_context *rctx;
        struct r600_resource *res = (struct r600_resource*)resource;
        struct r600_texture *rtex = (struct r600_texture*)resource;
        struct radeon_bo_metadata metadata;
        bool update_metadata = false;
+       unsigned stride, offset, slice_size;
 
-       /* This is not supported now, but it might be required for OpenCL
-        * interop in the future.
-        */
-       if (resource->target != PIPE_BUFFER &&
-           (resource->nr_samples > 1 || rtex->is_depth))
-               return false;
+       ctx = threaded_context_unwrap_sync(ctx);
+       rctx = (struct r600_common_context*)(ctx ? ctx : rscreen->aux_context);
 
        if (resource->target != PIPE_BUFFER) {
+               /* This is not supported now, but it might be required for OpenCL
+                * interop in the future.
+                */
+               if (resource->nr_samples > 1 || rtex->is_depth)
+                       return false;
+
+               /* Move a suballocated texture into a non-suballocated allocation. */
+               if (rscreen->ws->buffer_is_suballocated(res->buf) ||
+                   rtex->surface.tile_swizzle) {
+                       assert(!res->b.is_shared);
+                       r600_reallocate_texture_inplace(rctx, rtex,
+                                                       PIPE_BIND_SHARED, false);
+                       rctx->b.flush(&rctx->b, NULL, 0);
+                       assert(res->b.b.bind & PIPE_BIND_SHARED);
+                       assert(res->flags & RADEON_FLAG_NO_SUBALLOC);
+                       assert(rtex->surface.tile_swizzle == 0);
+               }
+
                /* Since shader image stores don't support DCC on VI,
                 * disable it for external clients that want write
                 * access.
@@ -525,17 +635,60 @@ static boolean r600_texture_get_handle(struct pipe_screen* screen,
                }
 
                /* Set metadata. */
-               if (!res->is_shared || update_metadata) {
-                       r600_texture_init_metadata(rtex, &metadata);
+               if (!res->b.is_shared || update_metadata) {
+                       r600_texture_init_metadata(rscreen, rtex, &metadata);
                        if (rscreen->query_opaque_metadata)
                                rscreen->query_opaque_metadata(rscreen, rtex,
                                                               &metadata);
 
                        rscreen->ws->buffer_set_metadata(res->buf, &metadata);
                }
+
+               if (rscreen->chip_class >= GFX9) {
+                       offset = rtex->surface.u.gfx9.surf_offset;
+                       stride = rtex->surface.u.gfx9.surf_pitch *
+                                rtex->surface.bpe;
+                       slice_size = rtex->surface.u.gfx9.surf_slice_size;
+               } else {
+                       offset = rtex->surface.u.legacy.level[0].offset;
+                       stride = rtex->surface.u.legacy.level[0].nblk_x *
+                                rtex->surface.bpe;
+                       slice_size = rtex->surface.u.legacy.level[0].slice_size;
+               }
+       } else {
+               /* Move a suballocated buffer into a non-suballocated allocation. */
+               if (rscreen->ws->buffer_is_suballocated(res->buf)) {
+                       assert(!res->b.is_shared);
+
+                       /* Allocate a new buffer with PIPE_BIND_SHARED. */
+                       struct pipe_resource templ = res->b.b;
+                       templ.bind |= PIPE_BIND_SHARED;
+
+                       struct pipe_resource *newb =
+                               screen->resource_create(screen, &templ);
+                       if (!newb)
+                               return false;
+
+                       /* Copy the old buffer contents to the new one. */
+                       struct pipe_box box;
+                       u_box_1d(0, newb->width0, &box);
+                       rctx->b.resource_copy_region(&rctx->b, newb, 0, 0, 0, 0,
+                                                    &res->b.b, 0, &box);
+                       /* Move the new buffer storage to the old pipe_resource. */
+                       r600_replace_buffer_storage(&rctx->b, &res->b.b, newb);
+                       pipe_resource_reference(&newb, NULL);
+
+                       assert(res->b.b.bind & PIPE_BIND_SHARED);
+                       assert(res->flags & RADEON_FLAG_NO_SUBALLOC);
+               }
+
+               /* Buffers */
+               offset = 0;
+               stride = 0;
+               slice_size = 0;
        }
 
-       if (res->is_shared) {
+       if (res->b.is_shared) {
                /* USAGE_EXPLICIT_FLUSH must be cleared if at least one user
                 * doesn't set it.
                 */
@@ -543,15 +696,12 @@ static boolean r600_texture_get_handle(struct pipe_screen* screen,
                if (!(usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))
                        res->external_usage &= ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH;
        } else {
-               res->is_shared = true;
+               res->b.is_shared = true;
                res->external_usage = usage;
        }
 
-       return rscreen->ws->buffer_get_handle(res->buf,
-                                             rtex->surface.level[0].pitch_bytes,
-                                             rtex->surface.level[0].offset,
-                                             rtex->surface.level[0].slice_size,
-                                             whandle);
+       return rscreen->ws->buffer_get_handle(res->buf, stride, offset,
+                                             slice_size, whandle);
 }
 
 static void r600_texture_destroy(struct pipe_screen *screen,
@@ -562,7 +712,6 @@ static void r600_texture_destroy(struct pipe_screen *screen,
 
        r600_texture_reference(&rtex->flushed_depth_texture, NULL);
 
-       r600_resource_reference(&rtex->htile_buffer, NULL);
        if (rtex->cmask_buffer != &rtex->resource) {
            r600_resource_reference(&rtex->cmask_buffer, NULL);
        }
@@ -587,18 +736,24 @@ void r600_texture_get_fmask_info(struct r600_common_screen *rscreen,
 
        memset(out, 0, sizeof(*out));
 
+       if (rscreen->chip_class >= GFX9) {
+               out->alignment = rtex->surface.u.gfx9.fmask_alignment;
+               out->size = rtex->surface.u.gfx9.fmask_size;
+               return;
+       }
+
        templ.nr_samples = 1;
        flags = rtex->surface.flags | RADEON_SURF_FMASK;
 
        if (rscreen->chip_class <= CAYMAN) {
                /* Use the same parameters and tile mode. */
-               fmask.bankw = rtex->surface.bankw;
-               fmask.bankh = rtex->surface.bankh;
-               fmask.mtilea = rtex->surface.mtilea;
-               fmask.tile_split = rtex->surface.tile_split;
+               fmask.u.legacy.bankw = rtex->surface.u.legacy.bankw;
+               fmask.u.legacy.bankh = rtex->surface.u.legacy.bankh;
+               fmask.u.legacy.mtilea = rtex->surface.u.legacy.mtilea;
+               fmask.u.legacy.tile_split = rtex->surface.u.legacy.tile_split;
 
                if (nr_samples <= 4)
-                       fmask.bankh = 4;
+                       fmask.u.legacy.bankh = 4;
        }
 
        switch (nr_samples) {
@@ -627,15 +782,16 @@ void r600_texture_get_fmask_info(struct r600_common_screen *rscreen,
                return;
        }
 
-       assert(fmask.level[0].mode == RADEON_SURF_MODE_2D);
+       assert(fmask.u.legacy.level[0].mode == RADEON_SURF_MODE_2D);
 
-       out->slice_tile_max = (fmask.level[0].nblk_x * fmask.level[0].nblk_y) / 64;
+       out->slice_tile_max = (fmask.u.legacy.level[0].nblk_x * fmask.u.legacy.level[0].nblk_y) / 64;
        if (out->slice_tile_max)
                out->slice_tile_max -= 1;
 
-       out->tile_mode_index = fmask.tiling_index[0];
-       out->pitch_in_pixels = fmask.level[0].nblk_x;
-       out->bank_height = fmask.bankh;
+       out->tile_mode_index = fmask.u.legacy.tiling_index[0];
+       out->pitch_in_pixels = fmask.u.legacy.level[0].nblk_x;
+       out->bank_height = fmask.u.legacy.bankh;
+       out->tile_swizzle = fmask.tile_swizzle;
        out->alignment = MAX2(256, fmask.surf_alignment);
        out->size = fmask.surf_size;
 }
@@ -692,6 +848,12 @@ static void si_texture_get_cmask_info(struct r600_common_screen *rscreen,
        unsigned num_pipes = rscreen->info.num_tile_pipes;
        unsigned cl_width, cl_height;
 
+       if (rscreen->chip_class >= GFX9) {
+               out->alignment = rtex->surface.u.gfx9.cmask_alignment;
+               out->size = rtex->surface.u.gfx9.cmask_size;
+               return;
+       }
+
        switch (num_pipes) {
        case 2:
                cl_width = 32;
@@ -765,7 +927,9 @@ static void r600_texture_alloc_cmask_separate(struct r600_common_screen *rscreen
        }
 
        rtex->cmask_buffer = (struct r600_resource *)
-               r600_aligned_buffer_create(&rscreen->b, 0, PIPE_USAGE_DEFAULT,
+               r600_aligned_buffer_create(&rscreen->b,
+                                          R600_RESOURCE_FLAG_UNMAPPABLE,
+                                          PIPE_USAGE_DEFAULT,
                                           rtex->cmask.size,
                                           rtex->cmask.alignment);
        if (rtex->cmask_buffer == NULL) {
@@ -791,6 +955,8 @@ static void r600_texture_get_htile_size(struct r600_common_screen *rscreen,
        unsigned slice_elements, slice_bytes, pipe_interleave_bytes, base_align;
        unsigned num_pipes = rscreen->info.num_tile_pipes;
 
+       assert(rscreen->chip_class <= VI);
+
        rtex->surface.htile_size = 0;
 
        if (rscreen->chip_class <= EVERGREEN &&
@@ -805,7 +971,7 @@ static void r600_texture_get_htile_size(struct r600_common_screen *rscreen,
 
        /* HTILE is broken with 1D tiling on old kernels and CIK. */
        if (rscreen->chip_class >= CIK &&
-           rtex->surface.level[0].mode == RADEON_SURF_MODE_1D &&
+           rtex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_1D &&
            rscreen->info.drm_major == 2 && rscreen->info.drm_minor < 38)
                return;
 
@@ -863,38 +1029,23 @@ static void r600_texture_get_htile_size(struct r600_common_screen *rscreen,
 static void r600_texture_allocate_htile(struct r600_common_screen *rscreen,
                                        struct r600_texture *rtex)
 {
-       uint32_t clear_value;
-
-       if (rtex->tc_compatible_htile) {
-               clear_value = 0x0000030F;
-       } else {
+       if (rscreen->chip_class <= VI && !rtex->tc_compatible_htile)
                r600_texture_get_htile_size(rscreen, rtex);
-               clear_value = 0;
-       }
 
        if (!rtex->surface.htile_size)
                return;
 
-       rtex->htile_buffer = (struct r600_resource*)
-               r600_aligned_buffer_create(&rscreen->b, 0, PIPE_USAGE_DEFAULT,
-                                          rtex->surface.htile_size,
-                                          rtex->surface.htile_alignment);
-       if (rtex->htile_buffer == NULL) {
-               /* this is not a fatal error as we can still keep rendering
-                * without htile buffer */
-               R600_ERR("Failed to create buffer object for htile buffer.\n");
-       } else {
-               r600_screen_clear_buffer(rscreen, &rtex->htile_buffer->b.b,
-                                        0, rtex->surface.htile_size,
-                                        clear_value, R600_COHERENCY_NONE);
-       }
+       rtex->htile_offset = align(rtex->size, rtex->surface.htile_alignment);
+       rtex->size = rtex->htile_offset + rtex->surface.htile_size;
 }
 
-void r600_print_texture_info(struct r600_texture *rtex, FILE *f)
+void r600_print_texture_info(struct r600_common_screen *rscreen,
+                            struct r600_texture *rtex, struct u_log_context *log)
 {
        int i;
 
-       fprintf(f, "  Info: npix_x=%u, npix_y=%u, npix_z=%u, blk_w=%u, "
+       /* Common parameters. */
+       u_log_printf(log, "  Info: npix_x=%u, npix_y=%u, npix_z=%u, blk_w=%u, "
                "blk_h=%u, array_size=%u, last_level=%u, "
                "bpe=%u, nsamples=%u, flags=0x%x, %s\n",
                rtex->resource.b.b.width0, rtex->resource.b.b.height0,
@@ -904,77 +1055,134 @@ void r600_print_texture_info(struct r600_texture *rtex, FILE *f)
                rtex->surface.bpe, rtex->resource.b.b.nr_samples,
                rtex->surface.flags, util_format_short_name(rtex->resource.b.b.format));
 
-       fprintf(f, "  Layout: size=%"PRIu64", alignment=%u, bankw=%u, "
+       if (rscreen->chip_class >= GFX9) {
+               u_log_printf(log, "  Surf: size=%"PRIu64", slice_size=%"PRIu64", "
+                       "alignment=%u, swmode=%u, epitch=%u, pitch=%u\n",
+                       rtex->surface.surf_size,
+                       rtex->surface.u.gfx9.surf_slice_size,
+                       rtex->surface.surf_alignment,
+                       rtex->surface.u.gfx9.surf.swizzle_mode,
+                       rtex->surface.u.gfx9.surf.epitch,
+                       rtex->surface.u.gfx9.surf_pitch);
+
+               if (rtex->fmask.size) {
+                       u_log_printf(log, "  FMASK: offset=%"PRIu64", size=%"PRIu64", "
+                               "alignment=%u, swmode=%u, epitch=%u\n",
+                               rtex->fmask.offset,
+                               rtex->surface.u.gfx9.fmask_size,
+                               rtex->surface.u.gfx9.fmask_alignment,
+                               rtex->surface.u.gfx9.fmask.swizzle_mode,
+                               rtex->surface.u.gfx9.fmask.epitch);
+               }
+
+               if (rtex->cmask.size) {
+                       u_log_printf(log, "  CMask: offset=%"PRIu64", size=%"PRIu64", "
+                               "alignment=%u, rb_aligned=%u, pipe_aligned=%u\n",
+                               rtex->cmask.offset,
+                               rtex->surface.u.gfx9.cmask_size,
+                               rtex->surface.u.gfx9.cmask_alignment,
+                               rtex->surface.u.gfx9.cmask.rb_aligned,
+                               rtex->surface.u.gfx9.cmask.pipe_aligned);
+               }
+
+               if (rtex->htile_offset) {
+                       u_log_printf(log, "  HTile: offset=%"PRIu64", size=%"PRIu64", alignment=%u, "
+                               "rb_aligned=%u, pipe_aligned=%u\n",
+                               rtex->htile_offset,
+                               rtex->surface.htile_size,
+                               rtex->surface.htile_alignment,
+                               rtex->surface.u.gfx9.htile.rb_aligned,
+                               rtex->surface.u.gfx9.htile.pipe_aligned);
+               }
+
+               if (rtex->dcc_offset) {
+                       u_log_printf(log, "  DCC: offset=%"PRIu64", size=%"PRIu64", "
+                               "alignment=%u, pitch_max=%u, num_dcc_levels=%u\n",
+                               rtex->dcc_offset, rtex->surface.dcc_size,
+                               rtex->surface.dcc_alignment,
+                               rtex->surface.u.gfx9.dcc_pitch_max,
+                               rtex->surface.num_dcc_levels);
+               }
+
+               if (rtex->surface.u.gfx9.stencil_offset) {
+                       u_log_printf(log, "  Stencil: offset=%"PRIu64", swmode=%u, epitch=%u\n",
+                               rtex->surface.u.gfx9.stencil_offset,
+                               rtex->surface.u.gfx9.stencil.swizzle_mode,
+                               rtex->surface.u.gfx9.stencil.epitch);
+               }
+               return;
+       }
+
+       u_log_printf(log, "  Layout: size=%"PRIu64", alignment=%u, bankw=%u, "
                "bankh=%u, nbanks=%u, mtilea=%u, tilesplit=%u, pipeconfig=%u, scanout=%u\n",
-               rtex->surface.surf_size, rtex->surface.surf_alignment, rtex->surface.bankw,
-               rtex->surface.bankh, rtex->surface.num_banks, rtex->surface.mtilea,
-               rtex->surface.tile_split, rtex->surface.pipe_config,
+               rtex->surface.surf_size, rtex->surface.surf_alignment, rtex->surface.u.legacy.bankw,
+               rtex->surface.u.legacy.bankh, rtex->surface.u.legacy.num_banks, rtex->surface.u.legacy.mtilea,
+               rtex->surface.u.legacy.tile_split, rtex->surface.u.legacy.pipe_config,
                (rtex->surface.flags & RADEON_SURF_SCANOUT) != 0);
 
        if (rtex->fmask.size)
-               fprintf(f, "  FMask: offset=%"PRIu64", size=%"PRIu64", alignment=%u, pitch_in_pixels=%u, "
+               u_log_printf(log, "  FMask: offset=%"PRIu64", size=%"PRIu64", alignment=%u, pitch_in_pixels=%u, "
                        "bankh=%u, slice_tile_max=%u, tile_mode_index=%u\n",
                        rtex->fmask.offset, rtex->fmask.size, rtex->fmask.alignment,
                        rtex->fmask.pitch_in_pixels, rtex->fmask.bank_height,
                        rtex->fmask.slice_tile_max, rtex->fmask.tile_mode_index);
 
        if (rtex->cmask.size)
-               fprintf(f, "  CMask: offset=%"PRIu64", size=%"PRIu64", alignment=%u, "
+               u_log_printf(log, "  CMask: offset=%"PRIu64", size=%"PRIu64", alignment=%u, "
                        "slice_tile_max=%u\n",
                        rtex->cmask.offset, rtex->cmask.size, rtex->cmask.alignment,
                        rtex->cmask.slice_tile_max);
 
-       if (rtex->htile_buffer)
-               fprintf(f, "  HTile: size=%u, alignment=%u, TC_compatible = %u\n",
-                       rtex->htile_buffer->b.b.width0,
-                       rtex->htile_buffer->buf->alignment,
+       if (rtex->htile_offset)
+               u_log_printf(log, "  HTile: offset=%"PRIu64", size=%"PRIu64", "
+                       "alignment=%u, TC_compatible = %u\n",
+                       rtex->htile_offset, rtex->surface.htile_size,
+                       rtex->surface.htile_alignment,
                        rtex->tc_compatible_htile);
 
        if (rtex->dcc_offset) {
-               fprintf(f, "  DCC: offset=%"PRIu64", size=%"PRIu64", alignment=%u\n",
+               u_log_printf(log, "  DCC: offset=%"PRIu64", size=%"PRIu64", alignment=%u\n",
                        rtex->dcc_offset, rtex->surface.dcc_size,
                        rtex->surface.dcc_alignment);
                for (i = 0; i <= rtex->resource.b.b.last_level; i++)
-                       fprintf(f, "  DCCLevel[%i]: enabled=%u, offset=%"PRIu64", "
+                       u_log_printf(log, "  DCCLevel[%i]: enabled=%u, offset=%"PRIu64", "
                                "fast_clear_size=%"PRIu64"\n",
                                i, i < rtex->surface.num_dcc_levels,
-                               rtex->surface.level[i].dcc_offset,
-                               rtex->surface.level[i].dcc_fast_clear_size);
+                               rtex->surface.u.legacy.level[i].dcc_offset,
+                               rtex->surface.u.legacy.level[i].dcc_fast_clear_size);
        }
 
        for (i = 0; i <= rtex->resource.b.b.last_level; i++)
-               fprintf(f, "  Level[%i]: offset=%"PRIu64", slice_size=%"PRIu64", "
+               u_log_printf(log, "  Level[%i]: offset=%"PRIu64", slice_size=%"PRIu64", "
                        "npix_x=%u, npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
-                       "pitch_bytes=%u, mode=%u, tiling_index = %u\n",
-                       i, rtex->surface.level[i].offset,
-                       rtex->surface.level[i].slice_size,
+                       "mode=%u, tiling_index = %u\n",
+                       i, rtex->surface.u.legacy.level[i].offset,
+                       rtex->surface.u.legacy.level[i].slice_size,
                        u_minify(rtex->resource.b.b.width0, i),
                        u_minify(rtex->resource.b.b.height0, i),
                        u_minify(rtex->resource.b.b.depth0, i),
-                       rtex->surface.level[i].nblk_x,
-                       rtex->surface.level[i].nblk_y,
-                       rtex->surface.level[i].pitch_bytes,
-                       rtex->surface.level[i].mode,
-                       rtex->surface.tiling_index[i]);
+                       rtex->surface.u.legacy.level[i].nblk_x,
+                       rtex->surface.u.legacy.level[i].nblk_y,
+                       rtex->surface.u.legacy.level[i].mode,
+                       rtex->surface.u.legacy.tiling_index[i]);
 
        if (rtex->surface.flags & RADEON_SURF_SBUFFER) {
-               fprintf(f, "  StencilLayout: tilesplit=%u\n",
-                       rtex->surface.stencil_tile_split);
+               u_log_printf(log, "  StencilLayout: tilesplit=%u\n",
+                       rtex->surface.u.legacy.stencil_tile_split);
                for (i = 0; i <= rtex->resource.b.b.last_level; i++) {
-                       fprintf(f, "  StencilLevel[%i]: offset=%"PRIu64", "
+                       u_log_printf(log, "  StencilLevel[%i]: offset=%"PRIu64", "
                                "slice_size=%"PRIu64", npix_x=%u, "
                                "npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
-                               "pitch_bytes=%u, mode=%u, tiling_index = %u\n",
-                               i, rtex->surface.stencil_level[i].offset,
-                               rtex->surface.stencil_level[i].slice_size,
+                               "mode=%u, tiling_index = %u\n",
+                               i, rtex->surface.u.legacy.stencil_level[i].offset,
+                               rtex->surface.u.legacy.stencil_level[i].slice_size,
                                u_minify(rtex->resource.b.b.width0, i),
                                u_minify(rtex->resource.b.b.height0, i),
                                u_minify(rtex->resource.b.b.depth0, i),
-                               rtex->surface.stencil_level[i].nblk_x,
-                               rtex->surface.stencil_level[i].nblk_y,
-                               rtex->surface.stencil_level[i].pitch_bytes,
-                               rtex->surface.stencil_level[i].mode,
-                               rtex->surface.stencil_tiling_index[i]);
+                               rtex->surface.u.legacy.stencil_level[i].nblk_x,
+                               rtex->surface.u.legacy.stencil_level[i].nblk_y,
+                               rtex->surface.u.legacy.stencil_level[i].mode,
+                               rtex->surface.u.legacy.stencil_tiling_index[i]);
                }
        }
 }
@@ -1007,20 +1215,27 @@ r600_texture_create_object(struct pipe_screen *screen,
        rtex->surface = *surface;
        rtex->size = rtex->surface.surf_size;
 
-       rtex->tc_compatible_htile = rtex->surface.htile_size != 0;
-       assert(!!(rtex->surface.flags & RADEON_SURF_TC_COMPATIBLE_HTILE) ==
-              rtex->tc_compatible_htile);
+       rtex->tc_compatible_htile = rtex->surface.htile_size != 0 &&
+                                   (rtex->surface.flags &
+                                    RADEON_SURF_TC_COMPATIBLE_HTILE);
 
-       /* TC-compatible HTILE only supports Z32_FLOAT. */
-       if (rtex->tc_compatible_htile)
-               rtex->db_render_format = PIPE_FORMAT_Z32_FLOAT;
-       else
+       /* TC-compatible HTILE:
+        * - VI only supports Z32_FLOAT.
+        * - GFX9 only supports Z32_FLOAT and Z16_UNORM. */
+       if (rtex->tc_compatible_htile) {
+               if (rscreen->chip_class >= GFX9 &&
+                   base->format == PIPE_FORMAT_Z16_UNORM)
+                       rtex->db_render_format = base->format;
+               else
+                       rtex->db_render_format = PIPE_FORMAT_Z32_FLOAT;
+       } else {
                rtex->db_render_format = base->format;
+       }
 
        /* Tiled depth textures utilize the non-displayable tile order.
         * This must be done after r600_setup_surface.
         * Applies to R600-Cayman. */
-       rtex->non_disp_tiling = rtex->is_depth && rtex->surface.level[0].mode >= RADEON_SURF_MODE_1D;
+       rtex->non_disp_tiling = rtex->is_depth && rtex->surface.u.legacy.level[0].mode >= RADEON_SURF_MODE_1D;
        /* Applies to GCN. */
        rtex->last_msaa_resolve_target_micro_mode = rtex->surface.micro_tile_mode;
 
@@ -1034,8 +1249,13 @@ r600_texture_create_object(struct pipe_screen *screen,
                if (base->flags & (R600_RESOURCE_FLAG_TRANSFER |
                                   R600_RESOURCE_FLAG_FLUSHED_DEPTH) ||
                    rscreen->chip_class >= EVERGREEN) {
-                       rtex->can_sample_z = !rtex->surface.depth_adjusted;
-                       rtex->can_sample_s = !rtex->surface.stencil_adjusted;
+                       if (rscreen->chip_class >= GFX9) {
+                               rtex->can_sample_z = true;
+                               rtex->can_sample_s = true;
+                       } else {
+                               rtex->can_sample_z = !rtex->surface.u.legacy.depth_adjusted;
+                               rtex->can_sample_s = !rtex->surface.u.legacy.stencil_adjusted;
+                       }
                } else {
                        if (rtex->resource.b.b.nr_samples <= 1 &&
                            (rtex->resource.b.b.format == PIPE_FORMAT_Z16_UNORM ||
@@ -1081,7 +1301,9 @@ r600_texture_create_object(struct pipe_screen *screen,
                r600_init_resource_fields(rscreen, resource, rtex->size,
                                          rtex->surface.surf_alignment);
 
-               resource->flags |= RADEON_FLAG_HANDLE;
+               /* Displayable surfaces are not suballocated. */
+               if (resource->b.b.bind & PIPE_BIND_SCANOUT)
+                       resource->flags |= RADEON_FLAG_NO_SUBALLOC;
 
                if (!r600_alloc_resource(rscreen, resource)) {
                        FREE(rtex);
@@ -1103,7 +1325,18 @@ r600_texture_create_object(struct pipe_screen *screen,
                /* Initialize the cmask to 0xCC (= compressed state). */
                r600_screen_clear_buffer(rscreen, &rtex->cmask_buffer->b.b,
                                         rtex->cmask.offset, rtex->cmask.size,
-                                        0xCCCCCCCC, R600_COHERENCY_NONE);
+                                        0xCCCCCCCC);
+       }
+       if (rtex->htile_offset) {
+               uint32_t clear_value = 0;
+
+               if (rscreen->chip_class >= GFX9 || rtex->tc_compatible_htile)
+                       clear_value = 0x0000030F;
+
+               r600_screen_clear_buffer(rscreen, &rtex->resource.b.b,
+                                        rtex->htile_offset,
+                                        rtex->surface.htile_size,
+                                        clear_value);
        }
 
        /* Initialize DCC only if the texture is not being imported. */
@@ -1111,7 +1344,7 @@ r600_texture_create_object(struct pipe_screen *screen,
                r600_screen_clear_buffer(rscreen, &rtex->resource.b.b,
                                         rtex->dcc_offset,
                                         rtex->surface.dcc_size,
-                                        0xFFFFFFFF, R600_COHERENCY_NONE);
+                                        0xFFFFFFFF);
        }
 
        /* Initialize the CMASK base register value. */
@@ -1128,8 +1361,12 @@ r600_texture_create_object(struct pipe_screen *screen,
 
        if (rscreen->debug_flags & DBG_TEX) {
                puts("Texture:");
-               r600_print_texture_info(rtex, stdout);
+               struct u_log_context log;
+               u_log_context_init(&log);
+               r600_print_texture_info(rscreen, rtex, &log);
+               u_log_new_page_print(&log, stdout);
                fflush(stdout);
+               u_log_context_destroy(&log);
        }
 
        return rtex;
@@ -1141,6 +1378,8 @@ r600_choose_tiling(struct r600_common_screen *rscreen,
 {
        const struct util_format_description *desc = util_format_description(templ->format);
        bool force_tiling = templ->flags & R600_RESOURCE_FLAG_FORCE_TILING;
+       bool is_depth_stencil = util_format_is_depth_or_stencil(templ->format) &&
+                               !(templ->flags & R600_RESOURCE_FLAG_FLUSHED_DEPTH);
 
        /* MSAA resources must be 2D tiled. */
        if (templ->nr_samples > 1)
@@ -1150,6 +1389,14 @@ r600_choose_tiling(struct r600_common_screen *rscreen,
        if (templ->flags & R600_RESOURCE_FLAG_TRANSFER)
                return RADEON_SURF_MODE_LINEAR_ALIGNED;
 
+       /* Avoid Z/S decompress blits by forcing TC-compatible HTILE on VI,
+        * which requires 2D tiling.
+        */
+       if (rscreen->chip_class == VI &&
+           is_depth_stencil &&
+           (templ->flags & PIPE_RESOURCE_FLAG_TEXTURING_MORE_LIKELY))
+               return RADEON_SURF_MODE_2D;
+
        /* r600g: force tiling on TEXTURE_2D and TEXTURE_3D compute resources. */
        if (rscreen->chip_class >= R600 && rscreen->chip_class <= CAYMAN &&
            (templ->bind & PIPE_BIND_COMPUTE_RESOURCE) &&
@@ -1160,9 +1407,9 @@ r600_choose_tiling(struct r600_common_screen *rscreen,
        /* Handle common candidates for the linear mode.
         * Compressed textures and DB surfaces must always be tiled.
         */
-       if (!force_tiling && !util_format_is_compressed(templ->format) &&
-           (!util_format_is_depth_or_stencil(templ->format) ||
-            templ->flags & R600_RESOURCE_FLAG_FLUSHED_DEPTH)) {
+       if (!force_tiling &&
+           !is_depth_stencil &&
+           !util_format_is_compressed(templ->format)) {
                if (rscreen->debug_flags & DBG_NO_TILING)
                        return RADEON_SURF_MODE_LINEAR_ALIGNED;
 
@@ -1182,7 +1429,9 @@ r600_choose_tiling(struct r600_common_screen *rscreen,
                /* Textures with a very small height are recommended to be linear. */
                if (templ->target == PIPE_TEXTURE_1D ||
                    templ->target == PIPE_TEXTURE_1D_ARRAY ||
-                   templ->height0 <= 4)
+                   /* Only very thin and long 2D textures should benefit from
+                    * linear_aligned. */
+                   (templ->width0 > 8 && templ->height0 <= 2))
                        return RADEON_SURF_MODE_LINEAR_ALIGNED;
 
                /* Textures likely to be mapped often. */
@@ -1236,11 +1485,12 @@ static struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen
        struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
        struct pb_buffer *buf = NULL;
        unsigned stride = 0, offset = 0;
-       unsigned array_mode;
-       struct radeon_surf surface;
+       enum radeon_surf_mode array_mode;
+       struct radeon_surf surface = {};
        int r;
        struct radeon_bo_metadata metadata = {};
        struct r600_texture *rtex;
+       bool is_scanout;
 
        /* Support only 2D textures without mipmaps */
        if ((templ->target != PIPE_TEXTURE_2D && templ->target != PIPE_TEXTURE_RECT) ||
@@ -1252,23 +1502,11 @@ static struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen
                return NULL;
 
        rscreen->ws->buffer_get_metadata(buf, &metadata);
-
-       surface.pipe_config = metadata.pipe_config;
-       surface.bankw = metadata.bankw;
-       surface.bankh = metadata.bankh;
-       surface.tile_split = metadata.tile_split;
-       surface.mtilea = metadata.mtilea;
-       surface.num_banks = metadata.num_banks;
-
-       if (metadata.macrotile == RADEON_LAYOUT_TILED)
-               array_mode = RADEON_SURF_MODE_2D;
-       else if (metadata.microtile == RADEON_LAYOUT_TILED)
-               array_mode = RADEON_SURF_MODE_1D;
-       else
-               array_mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
+       r600_surface_import_metadata(rscreen, &surface, &metadata,
+                                    &array_mode, &is_scanout);
 
        r = r600_init_surface(rscreen, &surface, templ, array_mode, stride,
-                             offset, true, metadata.scanout, false, false);
+                             offset, true, is_scanout, false, false);
        if (r) {
                return NULL;
        }
@@ -1277,12 +1515,18 @@ static struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen
        if (!rtex)
                return NULL;
 
-       rtex->resource.is_shared = true;
+       rtex->resource.b.is_shared = true;
        rtex->resource.external_usage = usage;
 
        if (rscreen->apply_opaque_metadata)
                rscreen->apply_opaque_metadata(rscreen, rtex, &metadata);
 
+       /* Validate that addrlib arrived at the same surface parameters. */
+       if (rscreen->chip_class >= GFX9) {
+               assert(metadata.u.gfx9.swizzle_mode == surface.u.gfx9.surf.swizzle_mode);
+       }
+
+       assert(rtex->surface.tile_swizzle == 0);
        return &rtex->resource.b.b;
 }
 
@@ -1390,7 +1634,7 @@ static bool r600_can_invalidate_texture(struct r600_common_screen *rscreen,
 {
        /* r600g doesn't react to dirty_tex_descriptor_counter */
        return rscreen->chip_class >= SI &&
-               !rtex->resource.is_shared &&
+               !rtex->resource.b.is_shared &&
                !(transfer_usage & PIPE_TRANSFER_READ) &&
                rtex->resource.b.b.last_level == 0 &&
                util_texrange_covers_whole_level(&rtex->resource.b.b, 0,
@@ -1406,7 +1650,7 @@ static void r600_texture_invalidate_storage(struct r600_common_context *rctx,
 
        /* There is no point in discarding depth and tiled buffers. */
        assert(!rtex->is_depth);
-       assert(rtex->surface.level[0].mode == RADEON_SURF_MODE_LINEAR_ALIGNED);
+       assert(rtex->surface.is_linear);
 
        /* Reallocate the buffer in the same pipe_resource. */
        r600_alloc_resource(rscreen, &rtex->resource);
@@ -1415,8 +1659,7 @@ static void r600_texture_invalidate_storage(struct r600_common_context *rctx,
        rtex->cmask.base_address_reg =
                (rtex->resource.gpu_address + rtex->cmask.offset) >> 8;
 
-       r600_dirty_all_framebuffer_states(rscreen);
-       p_atomic_inc(&rscreen->dirty_tex_descriptor_counter);
+       p_atomic_inc(&rscreen->dirty_tex_counter);
 
        rctx->num_alloc_tex_transfer_bytes += rtex->size;
 }
@@ -1437,6 +1680,7 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx,
        bool use_staging_texture = false;
 
        assert(!(texture->flags & R600_RESOURCE_FLAG_TRANSFER));
+       assert(box->width && box->height && box->depth);
 
        /* Depth textures use staging unconditionally. */
        if (!rtex->is_depth) {
@@ -1452,24 +1696,26 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx,
                                r600_can_invalidate_texture(rctx->screen, rtex,
                                                            usage, box);
 
-                       r600_degrade_tile_mode_to_linear(rctx, rtex,
-                                                        can_invalidate);
+                       r600_reallocate_texture_inplace(rctx, rtex,
+                                                       PIPE_BIND_LINEAR,
+                                                       can_invalidate);
                }
 
                /* Tiled textures need to be converted into a linear texture for CPU
                 * access. The staging texture is always linear and is placed in GART.
                 *
-                * Reading from VRAM is slow, always use the staging texture in
-                * this case.
+                * Reading from VRAM or GTT WC is slow, always use the staging
+                * texture in this case.
                 *
                 * Use the staging texture for uploads if the underlying BO
                 * is busy.
                 */
-               if (rtex->surface.level[0].mode >= RADEON_SURF_MODE_1D)
+               if (!rtex->surface.is_linear)
                        use_staging_texture = true;
                else if (usage & PIPE_TRANSFER_READ)
-                       use_staging_texture = (rtex->resource.domains &
-                                              RADEON_DOMAIN_VRAM) != 0;
+                       use_staging_texture =
+                               rtex->resource.domains & RADEON_DOMAIN_VRAM ||
+                               rtex->resource.flags & RADEON_FLAG_GTT_WC;
                /* Write & linear only: */
                else if (r600_rings_is_buffer_referenced(rctx, rtex->resource.buf,
                                                         RADEON_USAGE_READWRITE) ||
@@ -1487,10 +1733,10 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx,
        trans = CALLOC_STRUCT(r600_transfer);
        if (!trans)
                return NULL;
-       trans->transfer.resource = texture;
-       trans->transfer.level = level;
-       trans->transfer.usage = usage;
-       trans->transfer.box = *box;
+       pipe_resource_reference(&trans->b.b.resource, texture);
+       trans->b.b.level = level;
+       trans->b.b.usage = usage;
+       trans->b.b.box = *box;
 
        if (rtex->is_depth) {
                struct r600_texture *staging_depth;
@@ -1529,8 +1775,12 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx,
                                                            0, 0, 0, box->depth, 0, 0);
                                pipe_resource_reference(&temp, NULL);
                        }
-               }
-               else {
+
+                       /* Just get the strides. */
+                       r600_texture_get_offset(rctx->screen, staging_depth, level, NULL,
+                                               &trans->b.b.stride,
+                                               &trans->b.b.layer_stride);
+               } else {
                        /* XXX: only readback the rectangle which is being mapped? */
                        /* XXX: when discard is true, no need to read back from depth texture */
                        if (!r600_init_flushed_depth_texture(ctx, texture, &staging_depth)) {
@@ -1544,11 +1794,12 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx,
                                                    box->z, box->z + box->depth - 1,
                                                    0, 0);
 
-                       offset = r600_texture_get_offset(staging_depth, level, box);
+                       offset = r600_texture_get_offset(rctx->screen, staging_depth,
+                                                        level, box,
+                                                        &trans->b.b.stride,
+                                                        &trans->b.b.layer_stride);
                }
 
-               trans->transfer.stride = staging_depth->surface.level[level].pitch_bytes;
-               trans->transfer.layer_stride = staging_depth->surface.level[level].slice_size;
                trans->staging = (struct r600_resource*)staging_depth;
                buf = trans->staging;
        } else if (use_staging_texture) {
@@ -1568,8 +1819,11 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx,
                        return NULL;
                }
                trans->staging = &staging->resource;
-               trans->transfer.stride = staging->surface.level[0].pitch_bytes;
-               trans->transfer.layer_stride = staging->surface.level[0].slice_size;
+
+               /* Just get the strides. */
+               r600_texture_get_offset(rctx->screen, staging, 0, NULL,
+                                       &trans->b.b.stride,
+                                       &trans->b.b.layer_stride);
 
                if (usage & PIPE_TRANSFER_READ)
                        r600_copy_to_staging_texture(ctx, trans);
@@ -1579,9 +1833,9 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx,
                buf = trans->staging;
        } else {
                /* the resource is mapped directly */
-               trans->transfer.stride = rtex->surface.level[level].pitch_bytes;
-               trans->transfer.layer_stride = rtex->surface.level[level].slice_size;
-               offset = r600_texture_get_offset(rtex, level, box);
+               offset = r600_texture_get_offset(rctx->screen, rtex, level, box,
+                                                &trans->b.b.stride,
+                                                &trans->b.b.layer_stride);
                buf = &rtex->resource;
        }
 
@@ -1591,7 +1845,7 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx,
                return NULL;
        }
 
-       *ptransfer = &trans->transfer;
+       *ptransfer = &trans->b.b;
        return map + offset;
 }
 
@@ -1637,6 +1891,7 @@ static void r600_texture_transfer_unmap(struct pipe_context *ctx,
                rctx->num_alloc_tex_transfer_bytes = 0;
        }
 
+       pipe_resource_reference(&transfer->resource, NULL);
        FREE(transfer);
 }
 
@@ -1733,15 +1988,26 @@ bool vi_dcc_formats_compatible(enum pipe_format format1,
               type1 == type2;
 }
 
-void vi_dcc_disable_if_incompatible_format(struct r600_common_context *rctx,
+bool vi_dcc_formats_are_incompatible(struct pipe_resource *tex,
+                                    unsigned level,
+                                    enum pipe_format view_format)
+{
+       struct r600_texture *rtex = (struct r600_texture *)tex;
+
+       return vi_dcc_enabled(rtex, level) &&
+              !vi_dcc_formats_compatible(tex->format, view_format);
+}
+
+/* This can't be merged with the above function, because
+ * vi_dcc_formats_compatible should be called only when DCC is enabled. */
+void vi_disable_dcc_if_incompatible_format(struct r600_common_context *rctx,
                                           struct pipe_resource *tex,
                                           unsigned level,
                                           enum pipe_format view_format)
 {
        struct r600_texture *rtex = (struct r600_texture *)tex;
 
-       if (rtex->dcc_offset &&
-           level < rtex->surface.num_dcc_levels &&
+       if (vi_dcc_enabled(rtex, level) &&
            !vi_dcc_formats_compatible(tex->format, view_format))
                if (!r600_texture_disable_dcc(rctx, (struct r600_texture*)tex))
                        rctx->decompress_dcc(&rctx->b, rtex);
@@ -1750,10 +2016,9 @@ void vi_dcc_disable_if_incompatible_format(struct r600_common_context *rctx,
 struct pipe_surface *r600_create_surface_custom(struct pipe_context *pipe,
                                                struct pipe_resource *texture,
                                                const struct pipe_surface *templ,
+                                               unsigned width0, unsigned height0,
                                                unsigned width, unsigned height)
 {
-       struct r600_common_context *rctx = (struct r600_common_context*)pipe;
-       struct r600_texture *rtex = (struct r600_texture*)texture;
        struct r600_surface *surface = CALLOC_STRUCT(r600_surface);
 
        if (!surface)
@@ -1769,13 +2034,14 @@ struct pipe_surface *r600_create_surface_custom(struct pipe_context *pipe,
        surface->base.width = width;
        surface->base.height = height;
        surface->base.u = templ->u;
-       surface->level_info = &rtex->surface.level[templ->u.tex.level];
 
-       if (texture->target != PIPE_BUFFER)
-               vi_dcc_disable_if_incompatible_format(rctx, texture,
-                                                     templ->u.tex.level,
-                                                     templ->format);
+       surface->width0 = width0;
+       surface->height0 = height0;
 
+       surface->dcc_incompatible =
+               texture->target != PIPE_BUFFER &&
+               vi_dcc_formats_are_incompatible(texture, templ->u.tex.level,
+                                               templ->format);
        return &surface->base;
 }
 
@@ -1786,6 +2052,8 @@ static struct pipe_surface *r600_create_surface(struct pipe_context *pipe,
        unsigned level = templ->u.tex.level;
        unsigned width = u_minify(tex->width0, level);
        unsigned height = u_minify(tex->height0, level);
+       unsigned width0 = tex->width0;
+       unsigned height0 = tex->height0;
 
        if (tex->target != PIPE_BUFFER && templ->format != tex->format) {
                const struct util_format_description *tex_desc
@@ -1804,10 +2072,15 @@ static struct pipe_surface *r600_create_surface(struct pipe_context *pipe,
 
                        width = nblks_x * templ_desc->block.width;
                        height = nblks_y * templ_desc->block.height;
+
+                       width0 = util_format_get_nblocksx(tex->format, width0);
+                       height0 = util_format_get_nblocksy(tex->format, height0);
                }
        }
 
-       return r600_create_surface_custom(pipe, tex, templ, width, height);
+       return r600_create_surface_custom(pipe, tex, templ,
+                                         width0, height0,
+                                         width, height);
 }
 
 static void r600_surface_destroy(struct pipe_context *pipe,
@@ -2074,7 +2347,7 @@ static void vi_separate_dcc_try_enable(struct r600_common_context *rctx,
        /* The intent is to use this with shared displayable back buffers,
         * but it's not strictly limited only to them.
         */
-       if (!tex->resource.is_shared ||
+       if (!tex->resource.b.is_shared ||
            !(tex->resource.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH) ||
            tex->resource.b.b.target != PIPE_TEXTURE_2D ||
            tex->resource.b.b.last_level > 0 ||
@@ -2106,7 +2379,8 @@ static void vi_separate_dcc_try_enable(struct r600_common_context *rctx,
                tex->last_dcc_separate_buffer = NULL;
        } else {
                tex->dcc_separate_buffer = (struct r600_resource*)
-                       r600_aligned_buffer_create(rctx->b.screen, 0,
+                       r600_aligned_buffer_create(rctx->b.screen,
+                                                  R600_RESOURCE_FLAG_UNMAPPABLE,
                                                   PIPE_USAGE_DEFAULT,
                                                   tex->surface.dcc_size,
                                                   tex->surface.dcc_alignment);
@@ -2222,6 +2496,14 @@ static bool vi_get_fast_clear_parameters(enum pipe_format surface_format,
        bool main_value = false;
        bool extra_value = false;
        int extra_channel;
+
+       /* This is needed to get the correct DCC clear value for luminance formats.
+        * 1) Get the linear format (because the next step can't handle L8_SRGB).
+        * 2) Convert luminance to red. (the real hw format for luminance)
+        */
+       surface_format = util_format_linear(surface_format);
+       surface_format = util_format_luminance_to_red(surface_format);
+
        const struct util_format_description *desc = util_format_description(surface_format);
 
        if (desc->block.bits == 128 &&
@@ -2240,7 +2522,8 @@ static bool vi_get_fast_clear_parameters(enum pipe_format surface_format,
 
        if (surface_format == PIPE_FORMAT_R11G11B10_FLOAT ||
            surface_format == PIPE_FORMAT_B5G6R5_UNORM ||
-           surface_format == PIPE_FORMAT_B5G6R5_SRGB) {
+           surface_format == PIPE_FORMAT_B5G6R5_SRGB ||
+           util_format_is_alpha(surface_format)) {
                extra_channel = -1;
        } else if (desc->layout == UTIL_FORMAT_LAYOUT_PLAIN) {
                if(r600_translate_colorswap(surface_format, false) <= 1)
@@ -2306,9 +2589,9 @@ void vi_dcc_clear_level(struct r600_common_context *rctx,
                        unsigned level, unsigned clear_value)
 {
        struct pipe_resource *dcc_buffer;
-       uint64_t dcc_offset;
+       uint64_t dcc_offset, clear_size;
 
-       assert(rtex->dcc_offset && level < rtex->surface.num_dcc_levels);
+       assert(vi_dcc_enabled(rtex, level));
 
        if (rtex->dcc_separate_buffer) {
                dcc_buffer = &rtex->dcc_separate_buffer->b.b;
@@ -2318,10 +2601,18 @@ void vi_dcc_clear_level(struct r600_common_context *rctx,
                dcc_offset = rtex->dcc_offset;
        }
 
-       dcc_offset += rtex->surface.level[level].dcc_offset;
+       if (rctx->chip_class >= GFX9) {
+               /* Mipmap level clears aren't implemented. */
+               assert(rtex->resource.b.b.last_level == 0);
+               /* MSAA needs a different clear size. */
+               assert(rtex->resource.b.b.nr_samples <= 1);
+               clear_size = rtex->surface.dcc_size;
+       } else {
+               dcc_offset += rtex->surface.u.legacy.level[level].dcc_offset;
+               clear_size = rtex->surface.u.legacy.level[level].dcc_fast_clear_size;
+       }
 
-       rctx->clear_buffer(&rctx->b, dcc_buffer, dcc_offset,
-                          rtex->surface.level[level].dcc_fast_clear_size,
+       rctx->clear_buffer(&rctx->b, dcc_buffer, dcc_offset, clear_size,
                           clear_value, R600_COHERENCY_CB_META);
 }
 
@@ -2332,28 +2623,60 @@ void vi_dcc_clear_level(struct r600_common_context *rctx,
 static void si_set_optimal_micro_tile_mode(struct r600_common_screen *rscreen,
                                           struct r600_texture *rtex)
 {
-       if (rtex->resource.is_shared ||
+       if (rtex->resource.b.is_shared ||
            rtex->resource.b.b.nr_samples <= 1 ||
            rtex->surface.micro_tile_mode == rtex->last_msaa_resolve_target_micro_mode)
                return;
 
-       assert(rtex->surface.level[0].mode == RADEON_SURF_MODE_2D);
+       assert(rscreen->chip_class >= GFX9 ||
+              rtex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_2D);
        assert(rtex->resource.b.b.last_level == 0);
 
-       /* These magic numbers were copied from addrlib. It doesn't use any
-        * definitions for them either. They are all 2D_TILED_THIN1 modes with
-        * different bpp and micro tile mode.
-        */
-       if (rscreen->chip_class >= CIK) {
+       if (rscreen->chip_class >= GFX9) {
+               /* 4K or larger tiles only. 0 is linear. 1-3 are 256B tiles. */
+               assert(rtex->surface.u.gfx9.surf.swizzle_mode >= 4);
+
+               /* If you do swizzle_mode % 4, you'll get:
+                *   0 = Depth
+                *   1 = Standard,
+                *   2 = Displayable
+                *   3 = Rotated
+                *
+                * Depth-sample order isn't allowed:
+                */
+               assert(rtex->surface.u.gfx9.surf.swizzle_mode % 4 != 0);
+
+               switch (rtex->last_msaa_resolve_target_micro_mode) {
+               case RADEON_MICRO_MODE_DISPLAY:
+                       rtex->surface.u.gfx9.surf.swizzle_mode &= ~0x3;
+                       rtex->surface.u.gfx9.surf.swizzle_mode += 2; /* D */
+                       break;
+               case RADEON_MICRO_MODE_THIN:
+                       rtex->surface.u.gfx9.surf.swizzle_mode &= ~0x3;
+                       rtex->surface.u.gfx9.surf.swizzle_mode += 1; /* S */
+                       break;
+               case RADEON_MICRO_MODE_ROTATED:
+                       rtex->surface.u.gfx9.surf.swizzle_mode &= ~0x3;
+                       rtex->surface.u.gfx9.surf.swizzle_mode += 3; /* R */
+                       break;
+               default: /* depth */
+                       assert(!"unexpected micro mode");
+                       return;
+               }
+       } else if (rscreen->chip_class >= CIK) {
+               /* These magic numbers were copied from addrlib. It doesn't use
+                * any definitions for them either. They are all 2D_TILED_THIN1
+                * modes with different bpp and micro tile mode.
+                */
                switch (rtex->last_msaa_resolve_target_micro_mode) {
-               case 0: /* displayable */
-                       rtex->surface.tiling_index[0] = 10;
+               case RADEON_MICRO_MODE_DISPLAY:
+                       rtex->surface.u.legacy.tiling_index[0] = 10;
                        break;
-               case 1: /* thin */
-                       rtex->surface.tiling_index[0] = 14;
+               case RADEON_MICRO_MODE_THIN:
+                       rtex->surface.u.legacy.tiling_index[0] = 14;
                        break;
-               case 3: /* rotated */
-                       rtex->surface.tiling_index[0] = 28;
+               case RADEON_MICRO_MODE_ROTATED:
+                       rtex->surface.u.legacy.tiling_index[0] = 28;
                        break;
                default: /* depth, thick */
                        assert(!"unexpected micro mode");
@@ -2361,32 +2684,32 @@ static void si_set_optimal_micro_tile_mode(struct r600_common_screen *rscreen,
                }
        } else { /* SI */
                switch (rtex->last_msaa_resolve_target_micro_mode) {
-               case 0: /* displayable */
+               case RADEON_MICRO_MODE_DISPLAY:
                        switch (rtex->surface.bpe) {
                        case 1:
-                            rtex->surface.tiling_index[0] = 10;
+                            rtex->surface.u.legacy.tiling_index[0] = 10;
                             break;
                        case 2:
-                            rtex->surface.tiling_index[0] = 11;
+                            rtex->surface.u.legacy.tiling_index[0] = 11;
                             break;
                        default: /* 4, 8 */
-                            rtex->surface.tiling_index[0] = 12;
+                            rtex->surface.u.legacy.tiling_index[0] = 12;
                             break;
                        }
                        break;
-               case 1: /* thin */
+               case RADEON_MICRO_MODE_THIN:
                        switch (rtex->surface.bpe) {
                        case 1:
-                                rtex->surface.tiling_index[0] = 14;
+                                rtex->surface.u.legacy.tiling_index[0] = 14;
                                 break;
                        case 2:
-                                rtex->surface.tiling_index[0] = 15;
+                                rtex->surface.u.legacy.tiling_index[0] = 15;
                                 break;
                        case 4:
-                                rtex->surface.tiling_index[0] = 16;
+                                rtex->surface.u.legacy.tiling_index[0] = 16;
                                 break;
                        default: /* 8, 16 */
-                                rtex->surface.tiling_index[0] = 17;
+                                rtex->surface.u.legacy.tiling_index[0] = 17;
                                 break;
                        }
                        break;
@@ -2398,14 +2721,13 @@ static void si_set_optimal_micro_tile_mode(struct r600_common_screen *rscreen,
 
        rtex->surface.micro_tile_mode = rtex->last_msaa_resolve_target_micro_mode;
 
-       p_atomic_inc(&rscreen->dirty_fb_counter);
-       p_atomic_inc(&rscreen->dirty_tex_descriptor_counter);
+       p_atomic_inc(&rscreen->dirty_tex_counter);
 }
 
 void evergreen_do_fast_color_clear(struct r600_common_context *rctx,
                                   struct pipe_framebuffer_state *fb,
                                   struct r600_atom *fb_state,
-                                  unsigned *buffers, unsigned *dirty_cbufs,
+                                  unsigned *buffers, ubyte *dirty_cbufs,
                                   const union pipe_color_union *color)
 {
        int i;
@@ -2443,7 +2765,7 @@ void evergreen_do_fast_color_clear(struct r600_common_context *rctx,
                }
 
                /* only supported on tiled surfaces */
-               if (tex->surface.level[0].mode < RADEON_SURF_MODE_1D) {
+               if (tex->surface.is_linear) {
                        continue;
                }
 
@@ -2451,13 +2773,13 @@ void evergreen_do_fast_color_clear(struct r600_common_context *rctx,
                 * because there is no way to communicate the clear color among
                 * all clients
                 */
-               if (tex->resource.is_shared &&
+               if (tex->resource.b.is_shared &&
                    !(tex->resource.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))
                        continue;
 
                /* fast color clear with 1D tiling doesn't work on old kernels and CIK */
-               if (tex->surface.level[0].mode == RADEON_SURF_MODE_1D &&
-                   rctx->chip_class >= CIK &&
+               if (rctx->chip_class == CIK &&
+                   tex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_1D &&
                    rctx->screen->info.drm_major == 2 &&
                    rctx->screen->info.drm_minor < 38) {
                        continue;
@@ -2470,9 +2792,10 @@ void evergreen_do_fast_color_clear(struct r600_common_context *rctx,
                    !(rctx->screen->debug_flags & DBG_NO_DCC_FB)) {
                        vi_separate_dcc_try_enable(rctx, tex);
 
-                       /* Stoney can't do a CMASK-based clear, so all clears are
-                        * considered to be hypothetically slow clears, which
-                        * is weighed when determining to enable separate DCC.
+                       /* RB+ isn't supported with a CMASK clear only on Stoney,
+                        * so all clears are considered to be hypothetically slow
+                        * clears, which is weighed when determining whether to
+                        * enable separate DCC.
                         */
                        if (tex->dcc_gather_statistics &&
                            rctx->family == CHIP_STONEY)
@@ -2480,7 +2803,7 @@ void evergreen_do_fast_color_clear(struct r600_common_context *rctx,
                }
 
                /* Try to clear DCC first, otherwise try CMASK. */
-               if (tex->dcc_offset && tex->surface.num_dcc_levels) {
+               if (vi_dcc_enabled(tex, 0)) {
                        uint32_t reset_value;
                        bool clear_words_needed;
 
@@ -2494,8 +2817,15 @@ void evergreen_do_fast_color_clear(struct r600_common_context *rctx,
 
                        vi_dcc_clear_level(rctx, tex, 0, reset_value);
 
-                       if (clear_words_needed)
-                               tex->dirty_level_mask |= 1 << fb->cbufs[i]->u.tex.level;
+                       unsigned level_bit = 1 << fb->cbufs[i]->u.tex.level;
+                       if (clear_words_needed) {
+                               bool need_compressed_update = !tex->dirty_level_mask;
+
+                               tex->dirty_level_mask |= level_bit;
+
+                               if (need_compressed_update)
+                                       p_atomic_inc(&rctx->screen->compressed_colortex_counter);
+                       }
                        tex->separate_dcc_dirty = true;
                } else {
                        /* 128-bit formats are unusupported */
@@ -2503,7 +2833,7 @@ void evergreen_do_fast_color_clear(struct r600_common_context *rctx,
                                continue;
                        }
 
-                       /* Stoney/RB+ doesn't work with CMASK fast clear. */
+                       /* RB+ doesn't work with CMASK fast clear on Stoney. */
                        if (rctx->family == CHIP_STONEY)
                                continue;
 
@@ -2518,7 +2848,12 @@ void evergreen_do_fast_color_clear(struct r600_common_context *rctx,
                                           tex->cmask.offset, tex->cmask.size, 0,
                                           R600_COHERENCY_CB_META);
 
+                       bool need_compressed_update = !tex->dirty_level_mask;
+
                        tex->dirty_level_mask |= 1 << fb->cbufs[i]->u.tex.level;
+
+                       if (need_compressed_update)
+                               p_atomic_inc(&rctx->screen->compressed_colortex_counter);
                }
 
                /* We can change the micro tile mode before a full clear. */
@@ -2534,10 +2869,125 @@ void evergreen_do_fast_color_clear(struct r600_common_context *rctx,
        }
 }
 
+static struct pipe_memory_object *
+r600_memobj_from_handle(struct pipe_screen *screen,
+                       struct winsys_handle *whandle,
+                       bool dedicated)
+{
+       struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
+       struct r600_memory_object *memobj = CALLOC_STRUCT(r600_memory_object);
+       struct pb_buffer *buf = NULL;
+       uint32_t stride, offset;
+
+       if (!memobj)
+               return NULL;
+
+       buf = rscreen->ws->buffer_from_handle(rscreen->ws, whandle,
+                                             &stride, &offset);
+       if (!buf) {
+               free(memobj);
+               return NULL;
+       }
+
+       memobj->b.dedicated = dedicated;
+       memobj->buf = buf;
+       memobj->stride = stride;
+       memobj->offset = offset;
+
+       return (struct pipe_memory_object *)memobj;
+
+}
+
+static void
+r600_memobj_destroy(struct pipe_screen *screen,
+                   struct pipe_memory_object *_memobj)
+{
+       struct r600_memory_object *memobj = (struct r600_memory_object *)_memobj;
+
+       pb_reference(&memobj->buf, NULL);
+       free(memobj);
+}
+
+static struct pipe_resource *
+r600_texture_from_memobj(struct pipe_screen *screen,
+                        const struct pipe_resource *templ,
+                        struct pipe_memory_object *_memobj,
+                        uint64_t offset)
+{
+       int r;
+       struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
+       struct r600_memory_object *memobj = (struct r600_memory_object *)_memobj;
+       struct r600_texture *rtex;
+       struct radeon_surf surface = {};
+       struct radeon_bo_metadata metadata = {};
+       enum radeon_surf_mode array_mode;
+       bool is_scanout;
+       struct pb_buffer *buf = NULL;
+
+       if (memobj->b.dedicated) {
+               rscreen->ws->buffer_get_metadata(memobj->buf, &metadata);
+               r600_surface_import_metadata(rscreen, &surface, &metadata,
+                                    &array_mode, &is_scanout);
+       } else {
+               /**
+                * The bo metadata is unset for un-dedicated images. So we fall
+                * back to linear. See answer to question 5 of the
+                * VK_KHX_external_memory spec for some details.
+                *
+                * It is possible that this case isn't going to work if the
+                * surface pitch isn't correctly aligned by default.
+                *
+                * In order to support it correctly we require multi-image
+                * metadata to be syncrhonized between radv and radeonsi. The
+                * semantics of associating multiple image metadata to a memory
+                * object on the vulkan export side are not concretely defined
+                * either.
+                *
+                * All the use cases we are aware of at the moment for memory
+                * objects use dedicated allocations. So lets keep the initial
+                * implementation simple.
+                *
+                * A possible alternative is to attempt to reconstruct the
+                * tiling information when the TexParameter TEXTURE_TILING_EXT
+                * is set.
+                */
+               array_mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
+               is_scanout = false;
+
+       }
+
+       r = r600_init_surface(rscreen, &surface, templ,
+                             array_mode, memobj->stride,
+                             offset, true, is_scanout,
+                             false, false);
+       if (r)
+               return NULL;
+
+       rtex = r600_texture_create_object(screen, templ, memobj->buf, &surface);
+       if (!rtex)
+               return NULL;
+
+       /* r600_texture_create_object doesn't increment refcount of
+        * memobj->buf, so increment it here.
+        */
+       pb_reference(&buf, memobj->buf);
+
+       rtex->resource.b.is_shared = true;
+       rtex->resource.external_usage = PIPE_HANDLE_USAGE_READ_WRITE;
+
+       if (rscreen->apply_opaque_metadata)
+               rscreen->apply_opaque_metadata(rscreen, rtex, &metadata);
+
+       return &rtex->resource.b.b;
+}
+
 void r600_init_screen_texture_functions(struct r600_common_screen *rscreen)
 {
        rscreen->b.resource_from_handle = r600_texture_from_handle;
        rscreen->b.resource_get_handle = r600_texture_get_handle;
+       rscreen->b.resource_from_memobj = r600_texture_from_memobj;
+       rscreen->b.memobj_create_from_handle = r600_memobj_from_handle;
+       rscreen->b.memobj_destroy = r600_memobj_destroy;
 }
 
 void r600_init_context_texture_functions(struct r600_common_context *rctx)