radeonsi: rename variable to clarify its meaning
[mesa.git] / src / gallium / drivers / radeonsi / si_state.c
index 92d2c78a56a587ce640771b20f817177067afcac..da3c7debd5780519675aa70cc2279eb89c7be5e2 100644 (file)
@@ -74,11 +74,6 @@ static unsigned si_map_swizzle(unsigned swizzle)
        }
 }
 
-static uint32_t S_FIXED(float value, uint32_t frac_bits)
-{
-       return value * (1 << frac_bits);
-}
-
 /* 12.4 fixed-point */
 static unsigned si_pack_float_12p4(float x)
 {
@@ -117,6 +112,17 @@ static void si_emit_cb_render_state(struct si_context *sctx, struct r600_atom *a
 
        radeon_set_context_reg(cs, R_028238_CB_TARGET_MASK, cb_target_mask);
 
+       /* GFX9: Flush DFSM when CB_TARGET_MASK changes.
+        * I think we don't have to do anything between IBs.
+        */
+       if (sctx->screen->dfsm_allowed &&
+           sctx->last_cb_target_mask != cb_target_mask) {
+               sctx->last_cb_target_mask = cb_target_mask;
+
+               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+               radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0));
+       }
+
        /* RB+ register settings. */
        if (sctx->screen->b.rbplus_allowed) {
                unsigned spi_shader_col_format =
@@ -435,6 +441,8 @@ static void *si_create_blend_state_mode(struct pipe_context *ctx,
                blend->need_src_alpha_4bit |= 0xf;
 
        blend->cb_target_mask = 0;
+       blend->cb_target_enabled_4bit = 0;
+
        for (int i = 0; i < 8; i++) {
                /* state->rt entries > 0 only written if independent blending */
                const int j = state->independent_blend_enable ? i : 0;
@@ -476,6 +484,8 @@ static void *si_create_blend_state_mode(struct pipe_context *ctx,
 
                /* cb_render_state will disable unused ones */
                blend->cb_target_mask |= (unsigned)state->rt[j].colormask << (4 * i);
+               if (state->rt[j].colormask)
+                       blend->cb_target_enabled_4bit |= 0xf << (4 * i);
 
                if (!state->rt[j].colormask || !state->rt[j].blend_enable) {
                        si_pm4_set_reg(pm4, R_028780_CB_BLEND0_CONTROL + i * 4, blend_cntl);
@@ -592,9 +602,34 @@ static void *si_create_blend_state(struct pipe_context *ctx,
 static void si_bind_blend_state(struct pipe_context *ctx, void *state)
 {
        struct si_context *sctx = (struct si_context *)ctx;
-       si_pm4_bind_state(sctx, blend, (struct si_state_blend *)state);
-       si_mark_atom_dirty(sctx, &sctx->cb_render_state);
-       sctx->do_update_shaders = true;
+       struct si_state_blend *old_blend = sctx->queued.named.blend;
+       struct si_state_blend *blend = (struct si_state_blend *)state;
+
+       if (!state)
+               return;
+
+       if (!old_blend ||
+            old_blend->cb_target_mask != blend->cb_target_mask ||
+            old_blend->dual_src_blend != blend->dual_src_blend)
+               si_mark_atom_dirty(sctx, &sctx->cb_render_state);
+
+       si_pm4_bind_state(sctx, blend, state);
+
+       if (!old_blend ||
+           old_blend->cb_target_mask != blend->cb_target_mask ||
+           old_blend->alpha_to_coverage != blend->alpha_to_coverage ||
+           old_blend->alpha_to_one != blend->alpha_to_one ||
+           old_blend->dual_src_blend != blend->dual_src_blend ||
+           old_blend->blend_enable_4bit != blend->blend_enable_4bit ||
+           old_blend->need_src_alpha_4bit != blend->need_src_alpha_4bit)
+               sctx->do_update_shaders = true;
+
+       if (sctx->screen->dpbb_allowed &&
+           (!old_blend ||
+            old_blend->alpha_to_coverage != blend->alpha_to_coverage ||
+            old_blend->blend_enable_4bit != blend->blend_enable_4bit ||
+            old_blend->cb_target_enabled_4bit != blend->cb_target_enabled_4bit))
+               si_mark_atom_dirty(sctx, &sctx->dpbb_state);
 }
 
 static void si_delete_blend_state(struct pipe_context *ctx, void *state)
@@ -607,11 +642,10 @@ static void si_set_blend_color(struct pipe_context *ctx,
                               const struct pipe_blend_color *state)
 {
        struct si_context *sctx = (struct si_context *)ctx;
-
-       if (memcmp(&sctx->blend_color.state, state, sizeof(*state)) == 0)
-               return;
+       static const struct pipe_blend_color zeros;
 
        sctx->blend_color.state = *state;
+       sctx->blend_color.any_nonzeros = memcmp(state, &zeros, sizeof(*state)) != 0;
        si_mark_atom_dirty(sctx, &sctx->blend_color.atom);
 }
 
@@ -632,11 +666,13 @@ static void si_set_clip_state(struct pipe_context *ctx,
 {
        struct si_context *sctx = (struct si_context *)ctx;
        struct pipe_constant_buffer cb;
+       static const struct pipe_clip_state zeros;
 
        if (memcmp(&sctx->clip_state.state, state, sizeof(*state)) == 0)
                return;
 
        sctx->clip_state.state = *state;
+       sctx->clip_state.any_nonzeros = memcmp(state, &zeros, sizeof(*state)) != 0;
        si_mark_atom_dirty(sctx, &sctx->clip_state.atom);
 
        cb.buffer = NULL;
@@ -655,24 +691,21 @@ static void si_emit_clip_state(struct si_context *sctx, struct r600_atom *atom)
        radeon_emit_array(cs, (uint32_t*)sctx->clip_state.state.ucp, 6*4);
 }
 
-#define SIX_BITS 0x3F
-
 static void si_emit_clip_regs(struct si_context *sctx, struct r600_atom *atom)
 {
        struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
        struct si_shader *vs = si_get_vs_state(sctx);
-       struct tgsi_shader_info *info = si_get_vs_info(sctx);
+       struct si_shader_selector *vs_sel = vs->selector;
+       struct tgsi_shader_info *info = &vs_sel->info;
        struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
        unsigned window_space =
           info->properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION];
-       unsigned clipdist_mask =
-               info->writes_clipvertex ? SIX_BITS : info->clipdist_writemask;
+       unsigned clipdist_mask = vs_sel->clipdist_mask;
        unsigned ucp_mask = clipdist_mask ? 0 : rs->clip_plane_enable & SIX_BITS;
-       unsigned culldist_mask = info->culldist_writemask << info->num_written_clipdistance;
+       unsigned culldist_mask = vs_sel->culldist_mask;
        unsigned total_mask;
-       bool misc_vec_ena;
 
-       if (vs->key.opt.hw_vs.clip_disable) {
+       if (vs->key.opt.clip_disable) {
                assert(!info->culldist_writemask);
                clipdist_mask = 0;
                culldist_mask = 0;
@@ -688,27 +721,15 @@ static void si_emit_clip_regs(struct si_context *sctx, struct r600_atom *atom)
        clipdist_mask &= rs->clip_plane_enable;
        culldist_mask |= clipdist_mask;
 
-       misc_vec_ena = info->writes_psize || info->writes_edgeflag ||
-                      info->writes_layer || info->writes_viewport_index;
-
        radeon_set_context_reg(cs, R_02881C_PA_CL_VS_OUT_CNTL,
-               S_02881C_USE_VTX_POINT_SIZE(info->writes_psize) |
-               S_02881C_USE_VTX_EDGE_FLAG(info->writes_edgeflag) |
-               S_02881C_USE_VTX_RENDER_TARGET_INDX(info->writes_layer) |
-               S_02881C_USE_VTX_VIEWPORT_INDX(info->writes_viewport_index) |
+               vs_sel->pa_cl_vs_out_cntl |
                S_02881C_VS_OUT_CCDIST0_VEC_ENA((total_mask & 0x0F) != 0) |
                S_02881C_VS_OUT_CCDIST1_VEC_ENA((total_mask & 0xF0) != 0) |
-               S_02881C_VS_OUT_MISC_VEC_ENA(misc_vec_ena) |
-               S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena) |
                clipdist_mask | (culldist_mask << 8));
        radeon_set_context_reg(cs, R_028810_PA_CL_CLIP_CNTL,
                rs->pa_cl_clip_cntl |
                ucp_mask |
                S_028810_CLIP_DISABLE(window_space));
-
-       /* reuse needs to be set off if we write oViewport */
-       radeon_set_context_reg(cs, R_028AB4_VGT_REUSE_OFF,
-                              S_028AB4_REUSE_OFF(info->writes_viewport_index));
 }
 
 /*
@@ -785,6 +806,7 @@ static void *si_create_rs_state(struct pipe_context *ctx,
        rs->uses_poly_offset = state->offset_point || state->offset_line ||
                               state->offset_tri;
        rs->clamp_fragment_color = state->clamp_fragment_color;
+       rs->clamp_vertex_color = state->clamp_vertex_color;
        rs->flatshade = state->flatshade;
        rs->sprite_coord_enable = state->sprite_coord_enable;
        rs->rasterizer_discard = state->rasterizer_discard;
@@ -851,8 +873,15 @@ static void *si_create_rs_state(struct pipe_context *ctx,
                                   state->fill_back != PIPE_POLYGON_MODE_FILL) |
                S_028814_POLYMODE_FRONT_PTYPE(si_translate_fill(state->fill_front)) |
                S_028814_POLYMODE_BACK_PTYPE(si_translate_fill(state->fill_back)));
-       si_pm4_set_reg(pm4, R_00B130_SPI_SHADER_USER_DATA_VS_0 +
-                      SI_SGPR_VS_STATE_BITS * 4, state->clamp_vertex_color);
+
+       if (!rs->uses_poly_offset)
+               return rs;
+
+       rs->pm4_poly_offset = CALLOC(3, sizeof(struct si_pm4_state));
+       if (!rs->pm4_poly_offset) {
+               FREE(rs);
+               return NULL;
+       }
 
        /* Precalculate polygon offset states for 16-bit, 24-bit, and 32-bit zbuffers. */
        for (i = 0; i < 3; i++) {
@@ -910,29 +939,52 @@ static void si_bind_rs_state(struct pipe_context *ctx, void *state)
                si_mark_atom_dirty(sctx, &sctx->db_render_state);
 
                /* Update the small primitive filter workaround if necessary. */
-               if (sctx->b.family >= CHIP_POLARIS10 &&
+               if (sctx->screen->has_msaa_sample_loc_bug &&
                    sctx->framebuffer.nr_samples > 1)
                        si_mark_atom_dirty(sctx, &sctx->msaa_sample_locs.atom);
        }
 
+       sctx->current_vs_state &= C_VS_STATE_CLAMP_VERTEX_COLOR;
+       sctx->current_vs_state |= S_VS_STATE_CLAMP_VERTEX_COLOR(rs->clamp_vertex_color);
+
        r600_viewport_set_rast_deps(&sctx->b, rs->scissor_enable, rs->clip_halfz);
 
        si_pm4_bind_state(sctx, rasterizer, rs);
        si_update_poly_offset_state(sctx);
 
-       si_mark_atom_dirty(sctx, &sctx->clip_regs);
+       if (!old_rs ||
+           old_rs->clip_plane_enable != rs->clip_plane_enable ||
+           old_rs->pa_cl_clip_cntl != rs->pa_cl_clip_cntl)
+               si_mark_atom_dirty(sctx, &sctx->clip_regs);
+
        sctx->ia_multi_vgt_param_key.u.line_stipple_enabled =
                rs->line_stipple_enable;
-       sctx->do_update_shaders = true;
+
+       if (!old_rs ||
+           old_rs->clip_plane_enable != rs->clip_plane_enable ||
+           old_rs->rasterizer_discard != rs->rasterizer_discard ||
+           old_rs->sprite_coord_enable != rs->sprite_coord_enable ||
+           old_rs->flatshade != rs->flatshade ||
+           old_rs->two_side != rs->two_side ||
+           old_rs->multisample_enable != rs->multisample_enable ||
+           old_rs->poly_stipple_enable != rs->poly_stipple_enable ||
+           old_rs->poly_smooth != rs->poly_smooth ||
+           old_rs->line_smooth != rs->line_smooth ||
+           old_rs->clamp_fragment_color != rs->clamp_fragment_color ||
+           old_rs->force_persample_interp != rs->force_persample_interp)
+               sctx->do_update_shaders = true;
 }
 
 static void si_delete_rs_state(struct pipe_context *ctx, void *state)
 {
        struct si_context *sctx = (struct si_context *)ctx;
+       struct si_state_rasterizer *rs = (struct si_state_rasterizer *)state;
 
        if (sctx->queued.named.rasterizer == state)
                si_pm4_bind_state(sctx, poly_offset, NULL);
-       si_pm4_delete_state(sctx, rasterizer, (struct si_state_rasterizer *)state);
+
+       FREE(rs->pm4_poly_offset);
+       si_pm4_delete_state(sctx, rasterizer, rs);
 }
 
 /*
@@ -999,6 +1051,14 @@ static uint32_t si_translate_stencil_op(int s_op)
        return 0;
 }
 
+static bool si_dsa_writes_stencil(const struct pipe_stencil_state *s)
+{
+       return s->enabled && s->writemask &&
+              (s->fail_op  != PIPE_STENCIL_OP_KEEP ||
+               s->zfail_op != PIPE_STENCIL_OP_KEEP ||
+               s->zpass_op != PIPE_STENCIL_OP_KEEP);
+}
+
 static void *si_create_dsa_state(struct pipe_context *ctx,
                                 const struct pipe_depth_stencil_alpha_state *state)
 {
@@ -1049,18 +1109,29 @@ static void *si_create_dsa_state(struct pipe_context *ctx,
        }
 
        si_pm4_set_reg(pm4, R_028800_DB_DEPTH_CONTROL, db_depth_control);
-       si_pm4_set_reg(pm4, R_02842C_DB_STENCIL_CONTROL, db_stencil_control);
+       if (state->stencil[0].enabled)
+               si_pm4_set_reg(pm4, R_02842C_DB_STENCIL_CONTROL, db_stencil_control);
        if (state->depth.bounds_test) {
                si_pm4_set_reg(pm4, R_028020_DB_DEPTH_BOUNDS_MIN, fui(state->depth.bounds_min));
                si_pm4_set_reg(pm4, R_028024_DB_DEPTH_BOUNDS_MAX, fui(state->depth.bounds_max));
        }
 
+       dsa->depth_enabled = state->depth.enabled;
+       dsa->depth_write_enabled = state->depth.enabled &&
+                                  state->depth.writemask;
+       dsa->stencil_enabled = state->stencil[0].enabled;
+       dsa->stencil_write_enabled = state->stencil[0].enabled &&
+                                    (si_dsa_writes_stencil(&state->stencil[0]) ||
+                                     si_dsa_writes_stencil(&state->stencil[1]));
+       dsa->db_can_write = dsa->depth_write_enabled ||
+                           dsa->stencil_write_enabled;
        return dsa;
 }
 
 static void si_bind_dsa_state(struct pipe_context *ctx, void *state)
 {
         struct si_context *sctx = (struct si_context *)ctx;
+       struct si_state_dsa *old_dsa = sctx->queued.named.dsa;
         struct si_state_dsa *dsa = state;
 
         if (!state)
@@ -1073,7 +1144,16 @@ static void si_bind_dsa_state(struct pipe_context *ctx, void *state)
                sctx->stencil_ref.dsa_part = dsa->stencil_ref;
                si_mark_atom_dirty(sctx, &sctx->stencil_ref.atom);
        }
-       sctx->do_update_shaders = true;
+
+       if (!old_dsa || old_dsa->alpha_func != dsa->alpha_func)
+               sctx->do_update_shaders = true;
+
+       if (sctx->screen->dpbb_allowed &&
+           (!old_dsa ||
+            (old_dsa->depth_enabled != dsa->depth_enabled ||
+             old_dsa->stencil_enabled != dsa->stencil_enabled ||
+             old_dsa->db_can_write != dsa->db_can_write)))
+               si_mark_atom_dirty(sctx, &sctx->dpbb_state);
 }
 
 static void si_delete_dsa_state(struct pipe_context *ctx, void *state)
@@ -1665,9 +1745,11 @@ static unsigned si_tex_compare(unsigned compare)
        }
 }
 
-static unsigned si_tex_dim(unsigned res_target, unsigned view_target,
-                          unsigned nr_samples)
+static unsigned si_tex_dim(struct si_screen *sscreen, struct r600_texture *rtex,
+                          unsigned view_target, unsigned nr_samples)
 {
+       unsigned res_target = rtex->resource.b.b.target;
+
        if (view_target == PIPE_TEXTURE_CUBE ||
            view_target == PIPE_TEXTURE_CUBE_ARRAY)
                res_target = view_target;
@@ -1676,6 +1758,17 @@ static unsigned si_tex_dim(unsigned res_target, unsigned view_target,
                 res_target == PIPE_TEXTURE_CUBE_ARRAY)
                res_target = PIPE_TEXTURE_2D_ARRAY;
 
+       /* GFX9 allocates 1D textures as 2D. */
+       if ((res_target == PIPE_TEXTURE_1D ||
+            res_target == PIPE_TEXTURE_1D_ARRAY) &&
+           sscreen->b.chip_class >= GFX9 &&
+           rtex->surface.u.gfx9.resource_type == RADEON_RESOURCE_2D) {
+               if (res_target == PIPE_TEXTURE_1D)
+                       res_target = PIPE_TEXTURE_2D;
+               else
+                       res_target = PIPE_TEXTURE_2D_ARRAY;
+       }
+
        switch (res_target) {
        default:
        case PIPE_TEXTURE_1D:
@@ -2082,36 +2175,36 @@ static void si_initialize_color_surface(struct si_context *sctx,
        unsigned color_info, color_attrib, color_view;
        unsigned format, swap, ntype, endian;
        const struct util_format_description *desc;
-       int i;
+       int firstchan;
        unsigned blend_clamp = 0, blend_bypass = 0;
 
        color_view = S_028C6C_SLICE_START(surf->base.u.tex.first_layer) |
                     S_028C6C_SLICE_MAX(surf->base.u.tex.last_layer);
 
        desc = util_format_description(surf->base.format);
-       for (i = 0; i < 4; i++) {
-               if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) {
+       for (firstchan = 0; firstchan < 4; firstchan++) {
+               if (desc->channel[firstchan].type != UTIL_FORMAT_TYPE_VOID) {
                        break;
                }
        }
-       if (i == 4 || desc->channel[i].type == UTIL_FORMAT_TYPE_FLOAT) {
+       if (firstchan == 4 || desc->channel[firstchan].type == UTIL_FORMAT_TYPE_FLOAT) {
                ntype = V_028C70_NUMBER_FLOAT;
        } else {
                ntype = V_028C70_NUMBER_UNORM;
                if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB)
                        ntype = V_028C70_NUMBER_SRGB;
-               else if (desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) {
-                       if (desc->channel[i].pure_integer) {
+               else if (desc->channel[firstchan].type == UTIL_FORMAT_TYPE_SIGNED) {
+                       if (desc->channel[firstchan].pure_integer) {
                                ntype = V_028C70_NUMBER_SINT;
                        } else {
-                               assert(desc->channel[i].normalized);
+                               assert(desc->channel[firstchan].normalized);
                                ntype = V_028C70_NUMBER_SNORM;
                        }
-               } else if (desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED) {
-                       if (desc->channel[i].pure_integer) {
+               } else if (desc->channel[firstchan].type == UTIL_FORMAT_TYPE_UNSIGNED) {
+                       if (desc->channel[firstchan].pure_integer) {
                                ntype = V_028C70_NUMBER_UINT;
                        } else {
-                               assert(desc->channel[i].normalized);
+                               assert(desc->channel[firstchan].normalized);
                                ntype = V_028C70_NUMBER_UNORM;
                        }
                }
@@ -2210,26 +2303,12 @@ static void si_initialize_color_surface(struct si_context *sctx,
 
        if (sctx->b.chip_class >= GFX9) {
                unsigned mip0_depth = util_max_layer(&rtex->resource.b.b, 0);
-               unsigned type;
-
-               switch (rtex->resource.b.b.target) {
-               case PIPE_TEXTURE_1D:
-               case PIPE_TEXTURE_1D_ARRAY:
-                       type = V_028C74_1D;
-                       break;
-               default:
-                       type = V_028C74_2D;
-                       break;
-               case PIPE_TEXTURE_3D:
-                       type = V_028C74_3D;
-                       break;
-               }
 
                surf->cb_color_view |= S_028C6C_MIP_LEVEL(surf->base.u.tex.level);
                surf->cb_color_attrib |= S_028C74_MIP0_DEPTH(mip0_depth) |
-                                        S_028C74_RESOURCE_TYPE(type);
-               surf->cb_color_attrib2 = S_028C68_MIP0_WIDTH(rtex->resource.b.b.width0 - 1) |
-                                        S_028C68_MIP0_HEIGHT(rtex->resource.b.b.height0 - 1) |
+                                        S_028C74_RESOURCE_TYPE(rtex->surface.u.gfx9.resource_type);
+               surf->cb_color_attrib2 = S_028C68_MIP0_WIDTH(surf->width0 - 1) |
+                                        S_028C68_MIP0_HEIGHT(surf->height0 - 1) |
                                         S_028C68_MAX_MIP(rtex->resource.b.b.last_level);
        }
 
@@ -2248,7 +2327,7 @@ static void si_init_depth_surface(struct si_context *sctx,
        uint32_t z_info, s_info;
 
        format = si_translate_dbformat(rtex->db_render_format);
-       stencil_format = rtex->surface.flags & RADEON_SURF_SBUFFER ?
+       stencil_format = rtex->surface.has_stencil ?
                                 V_028044_STENCIL_8 : V_028044_STENCIL_INVALID;
 
        assert(format != V_028040_Z_INVALID);
@@ -2261,6 +2340,7 @@ static void si_init_depth_surface(struct si_context *sctx,
        surf->db_htile_surface = 0;
 
        if (sctx->b.chip_class >= GFX9) {
+               assert(rtex->surface.u.gfx9.surf_offset == 0);
                surf->db_depth_base = rtex->resource.gpu_address >> 8;
                surf->db_stencil_base = (rtex->resource.gpu_address +
                                         rtex->surface.u.gfx9.stencil_offset) >> 8;
@@ -2276,8 +2356,7 @@ static void si_init_depth_surface(struct si_context *sctx,
                surf->db_depth_size = S_02801C_X_MAX(rtex->resource.b.b.width0 - 1) |
                                      S_02801C_Y_MAX(rtex->resource.b.b.height0 - 1);
 
-               /* Only use HTILE for the first level. */
-               if (rtex->htile_buffer && !level) {
+               if (r600_htile_enabled(rtex, level)) {
                        z_info |= S_028038_TILE_SURFACE_ENABLE(1) |
                                  S_028038_ALLOW_EXPCLEAR(1);
 
@@ -2293,7 +2372,7 @@ static void si_init_depth_surface(struct si_context *sctx,
                                s_info |= S_02803C_ITERATE_FLUSH(1);
                        }
 
-                       if (rtex->surface.flags & RADEON_SURF_SBUFFER) {
+                       if (rtex->surface.has_stencil) {
                                /* Stencil buffer workaround ported from the SI-CI-VI code.
                                 * See that for explanation.
                                 */
@@ -2303,7 +2382,8 @@ static void si_init_depth_surface(struct si_context *sctx,
                                s_info |= S_02803C_TILE_STENCIL_DISABLE(1);
                        }
 
-                       surf->db_htile_data_base = rtex->htile_buffer->gpu_address >> 8;
+                       surf->db_htile_data_base = (rtex->resource.gpu_address +
+                                                   rtex->htile_offset) >> 8;
                        surf->db_htile_surface = S_028ABC_FULL_CACHE(1) |
                                                 S_028ABC_PIPE_ALIGNED(rtex->surface.u.gfx9.htile.pipe_aligned) |
                                                 S_028ABC_RB_ALIGNED(rtex->surface.u.gfx9.htile.rb_aligned);
@@ -2354,12 +2434,11 @@ static void si_init_depth_surface(struct si_context *sctx,
                surf->db_depth_slice = S_02805C_SLICE_TILE_MAX((levelinfo->nblk_x *
                                                                levelinfo->nblk_y) / 64 - 1);
 
-               /* Only use HTILE for the first level. */
-               if (rtex->htile_buffer && !level) {
+               if (r600_htile_enabled(rtex, level)) {
                        z_info |= S_028040_TILE_SURFACE_ENABLE(1) |
                                  S_028040_ALLOW_EXPCLEAR(1);
 
-                       if (rtex->surface.flags & RADEON_SURF_SBUFFER) {
+                       if (rtex->surface.has_stencil) {
                                /* Workaround: For a not yet understood reason, the
                                 * combination of MSAA, fast stencil clear and stencil
                                 * decompress messes with subsequent stencil buffer
@@ -2381,7 +2460,8 @@ static void si_init_depth_surface(struct si_context *sctx,
                                s_info |= S_028044_TILE_STENCIL_DISABLE(1);
                        }
 
-                       surf->db_htile_data_base = rtex->htile_buffer->gpu_address >> 8;
+                       surf->db_htile_data_base = (rtex->resource.gpu_address +
+                                                   rtex->htile_offset) >> 8;
                        surf->db_htile_surface = S_028ABC_FULL_CACHE(1);
 
                        if (rtex->tc_compatible_htile) {
@@ -2403,6 +2483,38 @@ static void si_init_depth_surface(struct si_context *sctx,
        surf->depth_initialized = true;
 }
 
+void si_update_fb_dirtiness_after_rendering(struct si_context *sctx)
+{
+       if (sctx->decompression_enabled)
+               return;
+
+       if (sctx->framebuffer.state.zsbuf) {
+               struct pipe_surface *surf = sctx->framebuffer.state.zsbuf;
+               struct r600_texture *rtex = (struct r600_texture *)surf->texture;
+
+               rtex->dirty_level_mask |= 1 << surf->u.tex.level;
+
+               if (rtex->surface.has_stencil)
+                       rtex->stencil_dirty_level_mask |= 1 << surf->u.tex.level;
+       }
+       if (sctx->framebuffer.compressed_cb_mask) {
+               struct pipe_surface *surf;
+               struct r600_texture *rtex;
+               unsigned mask = sctx->framebuffer.compressed_cb_mask;
+
+               do {
+                       unsigned i = u_bit_scan(&mask);
+                       surf = sctx->framebuffer.state.cbufs[i];
+                       rtex = (struct r600_texture*)surf->texture;
+
+                       if (rtex->fmask.size)
+                               rtex->dirty_level_mask |= 1 << surf->u.tex.level;
+                       if (rtex->dcc_gather_statistics)
+                               rtex->separate_dcc_dirty = true;
+               } while (mask);
+       }
+}
+
 static void si_dec_framebuffer_counters(const struct pipe_framebuffer_state *state)
 {
        for (int i = 0; i < state->nr_cbufs; ++i) {
@@ -2427,8 +2539,11 @@ static void si_set_framebuffer_state(struct pipe_context *ctx,
        struct r600_texture *rtex;
        bool old_any_dst_linear = sctx->framebuffer.any_dst_linear;
        unsigned old_nr_samples = sctx->framebuffer.nr_samples;
+       bool unbound = false;
        int i;
 
+       si_update_fb_dirtiness_after_rendering(sctx);
+
        for (i = 0; i < sctx->framebuffer.state.nr_cbufs; i++) {
                if (!sctx->framebuffer.state.cbufs[i])
                        continue;
@@ -2438,18 +2553,76 @@ static void si_set_framebuffer_state(struct pipe_context *ctx,
                        vi_separate_dcc_stop_query(ctx, rtex);
        }
 
+       /* Disable DCC if the formats are incompatible. */
+       for (i = 0; i < state->nr_cbufs; i++) {
+               if (!state->cbufs[i])
+                       continue;
+
+               surf = (struct r600_surface*)state->cbufs[i];
+               rtex = (struct r600_texture*)surf->base.texture;
+
+               if (!surf->dcc_incompatible)
+                       continue;
+
+               /* Since the DCC decompression calls back into set_framebuffer-
+                * _state, we need to unbind the framebuffer, so that
+                * vi_separate_dcc_stop_query isn't called twice with the same
+                * color buffer.
+                */
+               if (!unbound) {
+                       util_copy_framebuffer_state(&sctx->framebuffer.state, NULL);
+                       unbound = true;
+               }
+
+               if (vi_dcc_enabled(rtex, surf->base.u.tex.level))
+                       if (!r600_texture_disable_dcc(&sctx->b, rtex))
+                               sctx->b.decompress_dcc(ctx, rtex);
+
+               surf->dcc_incompatible = false;
+       }
+
        /* Only flush TC when changing the framebuffer state, because
         * the only client not using TC that can change textures is
         * the framebuffer.
         *
-        * Flush all CB and DB caches here because all buffers can be used
-        * for write by both TC (with shader image stores) and CB/DB.
+        * Wait for compute shaders because of possible transitions:
+        * - FB write -> shader read
+        * - shader write -> FB read
+        *
+        * DB caches are flushed on demand (using si_decompress_textures).
+        *
+        * When MSAA is enabled, CB and TC caches are flushed on demand
+        * (after FMASK decompression). Shader write -> FB read transitions
+        * cannot happen for MSAA textures, because MSAA shader images are
+        * not supported.
+        *
+        * Only flush and wait for CB if there is actually a bound color buffer.
         */
-       sctx->b.flags |= SI_CONTEXT_INV_VMEM_L1 |
-                        SI_CONTEXT_INV_GLOBAL_L2 |
-                        SI_CONTEXT_FLUSH_AND_INV_CB |
-                        SI_CONTEXT_FLUSH_AND_INV_DB |
-                        SI_CONTEXT_CS_PARTIAL_FLUSH;
+       if (sctx->framebuffer.nr_samples <= 1 &&
+           sctx->framebuffer.state.nr_cbufs)
+               si_make_CB_shader_coherent(sctx, sctx->framebuffer.nr_samples,
+                                          sctx->framebuffer.CB_has_shader_readable_metadata);
+
+       sctx->b.flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
+
+       /* u_blitter doesn't invoke depth decompression when it does multiple
+        * blits in a row, but the only case when it matters for DB is when
+        * doing generate_mipmap. So here we flush DB manually between
+        * individual generate_mipmap blits.
+        * Note that lower mipmap levels aren't compressed.
+        */
+       if (sctx->generate_mipmap_for_depth) {
+               si_make_DB_shader_coherent(sctx, 1, false,
+                                          sctx->framebuffer.DB_has_shader_readable_metadata);
+       } else if (sctx->b.chip_class == GFX9) {
+               /* It appears that DB metadata "leaks" in a sequence of:
+                *  - depth clear
+                *  - DCC decompress for shader image writes (with DB disabled)
+                *  - render with DEPTH_BEFORE_SHADER=1
+                * Flushing DB metadata works around the problem.
+                */
+               sctx->b.flags |= SI_CONTEXT_FLUSH_AND_INV_DB_META;
+       }
 
        /* Take the maximum of the old and new count. If the new count is lower,
         * dirtying is needed to disable the unbound colorbuffers.
@@ -2473,6 +2646,8 @@ static void si_set_framebuffer_state(struct pipe_context *ctx,
        sctx->framebuffer.nr_samples = util_framebuffer_get_num_samples(state);
        sctx->framebuffer.log_samples = util_logbase2(sctx->framebuffer.nr_samples);
        sctx->framebuffer.any_dst_linear = false;
+       sctx->framebuffer.CB_has_shader_readable_metadata = false;
+       sctx->framebuffer.DB_has_shader_readable_metadata = false;
 
        for (i = 0; i < state->nr_cbufs; i++) {
                if (!state->cbufs[i])
@@ -2507,6 +2682,9 @@ static void si_set_framebuffer_state(struct pipe_context *ctx,
                if (rtex->surface.is_linear)
                        sctx->framebuffer.any_dst_linear = true;
 
+               if (vi_dcc_enabled(rtex, surf->base.u.tex.level))
+                       sctx->framebuffer.CB_has_shader_readable_metadata = true;
+
                r600_context_add_resource_size(ctx, surf->base.texture);
 
                p_atomic_inc(&rtex->framebuffers_bound);
@@ -2525,6 +2703,10 @@ static void si_set_framebuffer_state(struct pipe_context *ctx,
                if (!surf->depth_initialized) {
                        si_init_depth_surface(sctx, surf);
                }
+
+               if (vi_tc_compat_htile_enabled(rtex, surf->base.u.tex.level))
+                       sctx->framebuffer.DB_has_shader_readable_metadata = true;
+
                r600_context_add_resource_size(ctx, surf->base.texture);
        }
 
@@ -2532,6 +2714,9 @@ static void si_set_framebuffer_state(struct pipe_context *ctx,
        si_mark_atom_dirty(sctx, &sctx->cb_render_state);
        si_mark_atom_dirty(sctx, &sctx->framebuffer.atom);
 
+       if (sctx->screen->dpbb_allowed)
+               si_mark_atom_dirty(sctx, &sctx->dpbb_state);
+
        if (sctx->framebuffer.any_dst_linear != old_any_dst_linear)
                si_mark_atom_dirty(sctx, &sctx->msaa_config);
 
@@ -2567,9 +2752,14 @@ static void si_set_framebuffer_state(struct pipe_context *ctx,
                si_mark_atom_dirty(sctx, &sctx->msaa_sample_locs.atom);
        }
 
-       sctx->need_check_render_feedback = true;
        sctx->do_update_shaders = true;
-       sctx->framebuffer.do_update_surf_dirtiness = true;
+
+       if (!sctx->decompression_enabled) {
+               /* Prevent textures decompression when the framebuffer state
+                * changes come from the decompression passes themselves.
+                */
+               sctx->need_check_render_feedback = true;
+       }
 }
 
 static void si_emit_framebuffer_state(struct si_context *sctx, struct r600_atom *atom)
@@ -2617,16 +2807,18 @@ static void si_emit_framebuffer_state(struct si_context *sctx, struct r600_atom
 
                /* Compute mutable surface parameters. */
                cb_color_base = tex->resource.gpu_address >> 8;
-               cb_color_fmask = cb_color_base;
+               cb_color_fmask = 0;
                cb_dcc_base = 0;
                cb_color_info = cb->cb_color_info | tex->cb_color_info;
                cb_color_attrib = cb->cb_color_attrib;
 
-               if (tex->fmask.size)
+               if (tex->fmask.size) {
                        cb_color_fmask = (tex->resource.gpu_address + tex->fmask.offset) >> 8;
+                       cb_color_fmask |= tex->fmask.tile_swizzle;
+               }
 
                /* Set up DCC. */
-               if (tex->dcc_offset && cb->base.u.tex.level < tex->surface.num_dcc_levels) {
+               if (vi_dcc_enabled(tex, cb->base.u.tex.level)) {
                        bool is_msaa_resolve_dst = state->cbufs[0] &&
                                                   state->cbufs[0]->texture->nr_samples > 1 &&
                                                   state->cbufs[1] == &cb->base &&
@@ -2637,6 +2829,7 @@ static void si_emit_framebuffer_state(struct si_context *sctx, struct r600_atom
 
                        cb_dcc_base = ((!tex->dcc_separate_buffer ? tex->resource.gpu_address : 0) +
                                       tex->dcc_offset) >> 8;
+                       cb_dcc_base |= tex->surface.tile_swizzle;
                }
 
                if (sctx->b.chip_class >= GFX9) {
@@ -2648,6 +2841,10 @@ static void si_emit_framebuffer_state(struct si_context *sctx, struct r600_atom
                                meta = tex->surface.u.gfx9.cmask;
 
                        /* Set mutable surface parameters. */
+                       cb_color_base += tex->surface.u.gfx9.surf_offset >> 8;
+                       cb_color_base |= tex->surface.tile_swizzle;
+                       if (!tex->fmask.size)
+                               cb_color_fmask = cb_color_base;
                        cb_color_attrib |= S_028C74_COLOR_SW_MODE(tex->surface.u.gfx9.surf.swizzle_mode) |
                                           S_028C74_FMASK_SW_MODE(tex->surface.u.gfx9.fmask.swizzle_mode) |
                                           S_028C74_RB_ALIGNED(meta.rb_aligned) |
@@ -2680,6 +2877,12 @@ static void si_emit_framebuffer_state(struct si_context *sctx, struct r600_atom
                        unsigned cb_color_pitch, cb_color_slice, cb_color_fmask_slice;
 
                        cb_color_base += level_info->offset >> 8;
+                       /* Only macrotiled modes can set tile swizzle. */
+                       if (level_info->mode == RADEON_SURF_MODE_2D)
+                               cb_color_base |= tex->surface.tile_swizzle;
+
+                       if (!tex->fmask.size)
+                               cb_color_fmask = cb_color_base;
                        if (cb_dcc_base)
                                cb_dcc_base += level_info->dcc_offset >> 8;
 
@@ -2740,12 +2943,6 @@ static void si_emit_framebuffer_state(struct si_context *sctx, struct r600_atom
                                              RADEON_PRIO_DEPTH_BUFFER_MSAA :
                                              RADEON_PRIO_DEPTH_BUFFER);
 
-               if (zb->db_htile_data_base) {
-                       radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
-                                             rtex->htile_buffer, RADEON_USAGE_READWRITE,
-                                             RADEON_PRIO_HTILE);
-               }
-
                if (sctx->b.chip_class >= GFX9) {
                        radeon_set_context_reg_seq(cs, R_028014_DB_HTILE_DATA_BASE, 3);
                        radeon_emit(cs, zb->db_htile_data_base);        /* DB_HTILE_DATA_BASE */
@@ -2805,6 +3002,11 @@ static void si_emit_framebuffer_state(struct si_context *sctx, struct r600_atom
        radeon_set_context_reg(cs, R_028208_PA_SC_WINDOW_SCISSOR_BR,
                               S_028208_BR_X(state->width) | S_028208_BR_Y(state->height));
 
+       if (sctx->screen->dfsm_allowed) {
+               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+               radeon_emit(cs, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0));
+       }
+
        sctx->framebuffer.dirty_cbufs = 0;
        sctx->framebuffer.dirty_zsbuf = false;
 }
@@ -2814,6 +3016,7 @@ static void si_emit_msaa_sample_locs(struct si_context *sctx,
 {
        struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
        unsigned nr_samples = sctx->framebuffer.nr_samples;
+       bool has_msaa_sample_loc_bug = sctx->screen->has_msaa_sample_loc_bug;
 
        /* Smoothing (only possible with nr_samples == 1) uses the same
         * sample locations as the MSAA it simulates.
@@ -2824,11 +3027,10 @@ static void si_emit_msaa_sample_locs(struct si_context *sctx,
        /* On Polaris, the small primitive filter uses the sample locations
         * even when MSAA is off, so we need to make sure they're set to 0.
         */
-       if (sctx->b.family >= CHIP_POLARIS10)
+       if (has_msaa_sample_loc_bug)
                nr_samples = MAX2(nr_samples, 1);
 
-       if (nr_samples >= 1 &&
-           (nr_samples != sctx->msaa_sample_locs.nr_samples)) {
+       if (nr_samples != sctx->msaa_sample_locs.nr_samples) {
                sctx->msaa_sample_locs.nr_samples = nr_samples;
                cayman_emit_msaa_sample_locs(cs, nr_samples);
        }
@@ -2837,13 +3039,16 @@ static void si_emit_msaa_sample_locs(struct si_context *sctx,
                struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
                unsigned small_prim_filter_cntl =
                        S_028830_SMALL_PRIM_FILTER_ENABLE(1) |
-                       S_028830_LINE_FILTER_DISABLE(sctx->b.chip_class == VI); /* line bug */
+                       /* line bug */
+                       S_028830_LINE_FILTER_DISABLE(sctx->b.family <= CHIP_POLARIS12);
 
                /* The alternative of setting sample locations to 0 would
                 * require a DB flush to avoid Z errors, see
                 * https://bugs.freedesktop.org/show_bug.cgi?id=96908
                 */
-               if (sctx->framebuffer.nr_samples > 1 && rs && !rs->multisample_enable)
+               if (has_msaa_sample_loc_bug &&
+                   sctx->framebuffer.nr_samples > 1 &&
+                   rs && !rs->multisample_enable)
                        small_prim_filter_cntl &= C_028830_SMALL_PRIM_FILTER_ENABLE;
 
                radeon_set_context_reg(cs, R_028830_PA_SU_SMALL_PRIM_FILTER_CNTL,
@@ -2873,6 +3078,12 @@ static void si_emit_msaa_config(struct si_context *sctx, struct r600_atom *atom)
                                sctx->ps_iter_samples,
                                sctx->smoothing_enabled ? SI_NUM_SMOOTH_AA_SAMPLES : 0,
                                sc_mode_cntl_1);
+
+       /* GFX9: Flush DFSM when the AA mode changes. */
+       if (sctx->screen->dfsm_allowed) {
+               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+               radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0));
+       }
 }
 
 static void si_set_min_samples(struct pipe_context *ctx, unsigned min_samples)
@@ -2887,6 +3098,8 @@ static void si_set_min_samples(struct pipe_context *ctx, unsigned min_samples)
 
        if (sctx->framebuffer.nr_samples > 1)
                si_mark_atom_dirty(sctx, &sctx->msaa_config);
+       if (sctx->screen->dpbb_allowed)
+               si_mark_atom_dirty(sctx, &sctx->dpbb_state);
 }
 
 /*
@@ -2918,7 +3131,40 @@ si_make_buffer_descriptor(struct si_screen *screen, struct r600_resource *buf,
        num_records = size / stride;
        num_records = MIN2(num_records, (buf->b.b.width0 - offset) / stride);
 
-       if (screen->b.chip_class == VI)
+       /* The NUM_RECORDS field has a different meaning depending on the chip,
+        * instruction type, STRIDE, and SWIZZLE_ENABLE.
+        *
+        * SI-CIK:
+        * - If STRIDE == 0, it's in byte units.
+        * - If STRIDE != 0, it's in units of STRIDE, used with inst.IDXEN.
+        *
+        * VI:
+        * - For SMEM and STRIDE == 0, it's in byte units.
+        * - For SMEM and STRIDE != 0, it's in units of STRIDE.
+        * - For VMEM and STRIDE == 0 or SWIZZLE_ENABLE == 0, it's in byte units.
+        * - For VMEM and STRIDE != 0 and SWIZZLE_ENABLE == 1, it's in units of STRIDE.
+        * NOTE: There is incompatibility between VMEM and SMEM opcodes due to SWIZZLE_-
+        *       ENABLE. The workaround is to set STRIDE = 0 if SWIZZLE_ENABLE == 0 when
+        *       using SMEM. This can be done in the shader by clearing STRIDE with s_and.
+        *       That way the same descriptor can be used by both SMEM and VMEM.
+        *
+        * GFX9:
+        * - For SMEM and STRIDE == 0, it's in byte units.
+        * - For SMEM and STRIDE != 0, it's in units of STRIDE.
+        * - For VMEM and inst.IDXEN == 0 or STRIDE == 0, it's in byte units.
+        * - For VMEM and inst.IDXEN == 1 and STRIDE != 0, it's in units of STRIDE.
+        */
+       if (screen->b.chip_class >= GFX9)
+               /* When vindex == 0, LLVM sets IDXEN = 0, thus changing units
+                * from STRIDE to bytes. This works around it by setting
+                * NUM_RECORDS to at least the size of one element, so that
+                * the first element is readable when IDXEN == 0.
+                *
+                * TODO: Fix this in LLVM, but do we need a new intrinsic where
+                *       IDXEN is enforced?
+                */
+               num_records = num_records ? MAX2(num_records, stride) : 0;
+       else if (screen->b.chip_class == VI)
                num_records *= stride;
 
        state[4] = 0;
@@ -3085,10 +3331,17 @@ si_make_texture_descriptor(struct si_screen *screen,
                data_format = 0;
        }
 
+       /* S8 with Z32 HTILE needs a special format. */
+       if (screen->b.chip_class >= GFX9 &&
+           pipe_format == PIPE_FORMAT_S8_UINT &&
+           tex->tc_compatible_htile)
+               data_format = V_008F14_IMG_DATA_FORMAT_S8_32;
+
        if (!sampler &&
            (res->target == PIPE_TEXTURE_CUBE ||
             res->target == PIPE_TEXTURE_CUBE_ARRAY ||
-            res->target == PIPE_TEXTURE_3D)) {
+            (screen->b.chip_class <= VI &&
+             res->target == PIPE_TEXTURE_3D))) {
                /* For the purpose of shader images, treat cube maps and 3D
                 * textures as 2D arrays. For 3D textures, the address
                 * calculations for mipmaps are different, so we rely on the
@@ -3098,7 +3351,7 @@ si_make_texture_descriptor(struct si_screen *screen,
 
                assert(res->target != PIPE_TEXTURE_3D || (first_level == 0 && last_level == 0));
        } else {
-               type = si_tex_dim(res->target, target, res->nr_samples);
+               type = si_tex_dim(screen, tex, target, res->nr_samples);
        }
 
        if (type == V_008F1C_SQ_RSRC_IMG_1D_ARRAY) {
@@ -3188,7 +3441,7 @@ si_make_texture_descriptor(struct si_screen *screen,
                                num_format = V_008F14_IMG_FMASK_32_8_8;
                                break;
                        default:
-                               assert(0);
+                               unreachable("invalid nr_samples");
                        }
                } else {
                        switch (res->nr_samples) {
@@ -3202,12 +3455,12 @@ si_make_texture_descriptor(struct si_screen *screen,
                                data_format = V_008F14_IMG_DATA_FORMAT_FMASK32_S8_F8;
                                break;
                        default:
-                               assert(0);
+                               unreachable("invalid nr_samples");
                        }
                        num_format = V_008F14_IMG_NUM_FORMAT_UINT;
                }
 
-               fmask_state[0] = va >> 8;
+               fmask_state[0] = (va >> 8) | tex->fmask.tile_swizzle;
                fmask_state[1] = S_008F14_BASE_ADDRESS_HI(va >> 40) |
                                 S_008F14_DATA_FORMAT_GFX6(data_format) |
                                 S_008F14_NUM_FORMAT_GFX6(num_format);
@@ -3217,7 +3470,7 @@ si_make_texture_descriptor(struct si_screen *screen,
                                 S_008F1C_DST_SEL_Y(V_008F1C_SQ_SEL_X) |
                                 S_008F1C_DST_SEL_Z(V_008F1C_SQ_SEL_X) |
                                 S_008F1C_DST_SEL_W(V_008F1C_SQ_SEL_X) |
-                                S_008F1C_TYPE(si_tex_dim(res->target, target, 0));
+                                S_008F1C_TYPE(si_tex_dim(screen, tex, target, 0));
                fmask_state[4] = 0;
                fmask_state[5] = S_008F24_BASE_ARRAY(first_layer);
                fmask_state[6] = 0;
@@ -3306,7 +3559,7 @@ si_create_sampler_view_custom(struct pipe_context *ctx,
        height = height0;
        depth = texture->depth0;
 
-       if (force_level) {
+       if (sctx->b.chip_class <= VI && force_level) {
                assert(force_level == first_level &&
                       force_level == last_level);
                base_level = force_level;
@@ -3374,9 +3627,10 @@ si_create_sampler_view_custom(struct pipe_context *ctx,
                }
        }
 
-       vi_dcc_disable_if_incompatible_format(&sctx->b, texture,
-                                             state->u.tex.first_level,
-                                             state->format);
+       view->dcc_incompatible =
+               vi_dcc_formats_are_incompatible(texture,
+                                               state->u.tex.first_level,
+                                               state->format);
 
        si_make_texture_descriptor(sctx->screen, tmp, true,
                                   state->target, pipe_format, state_swizzle,
@@ -3572,7 +3826,7 @@ static void *si_create_vertex_elements(struct pipe_context *ctx,
                                       const struct pipe_vertex_element *elements)
 {
        struct si_screen *sscreen = (struct si_screen*)ctx->screen;
-       struct si_vertex_element *v = CALLOC_STRUCT(si_vertex_element);
+       struct si_vertex_elements *v = CALLOC_STRUCT(si_vertex_elements);
        bool used[SI_NUM_VERTEX_BUFFERS] = {};
        int i;
 
@@ -3596,6 +3850,16 @@ static void *si_create_vertex_elements(struct pipe_context *ctx,
                        return NULL;
                }
 
+               if (elements[i].instance_divisor) {
+                       v->uses_instance_divisors = true;
+                       v->instance_divisors[i] = elements[i].instance_divisor;
+
+                       if (v->instance_divisors[i] == 1)
+                               v->instance_divisor_is_one |= 1u << i;
+                       else
+                               v->instance_divisor_is_fetched |= 1u << i;
+               }
+
                if (!used[vbo_index]) {
                        v->first_vb_use_mask |= 1 << i;
                        used[vbo_index] = true;
@@ -3609,6 +3873,8 @@ static void *si_create_vertex_elements(struct pipe_context *ctx,
                memcpy(swizzle, desc->swizzle, sizeof(swizzle));
 
                v->format_size[i] = desc->block.bits / 8;
+               v->src_offset[i] = elements[i].src_offset;
+               v->vertex_buffer_index[i] = vbo_index;
 
                /* The hardware always treats the 2-bit alpha channel as
                 * unsigned, so a shader workaround is needed. The affected
@@ -3701,19 +3967,35 @@ static void *si_create_vertex_elements(struct pipe_context *ctx,
                                   S_008F0C_NUM_FORMAT(num_format) |
                                   S_008F0C_DATA_FORMAT(data_format);
        }
-       memcpy(v->elements, elements, sizeof(struct pipe_vertex_element) * count);
-
        return v;
 }
 
 static void si_bind_vertex_elements(struct pipe_context *ctx, void *state)
 {
        struct si_context *sctx = (struct si_context *)ctx;
-       struct si_vertex_element *v = (struct si_vertex_element*)state;
+       struct si_vertex_elements *old = sctx->vertex_elements;
+       struct si_vertex_elements *v = (struct si_vertex_elements*)state;
 
        sctx->vertex_elements = v;
        sctx->vertex_buffers_dirty = true;
-       sctx->do_update_shaders = true;
+
+       if (v &&
+           (!old ||
+            old->count != v->count ||
+            old->uses_instance_divisors != v->uses_instance_divisors ||
+            v->uses_instance_divisors || /* we don't check which divisors changed */
+            memcmp(old->fix_fetch, v->fix_fetch, sizeof(v->fix_fetch[0]) * v->count)))
+               sctx->do_update_shaders = true;
+
+       if (v && v->instance_divisor_is_fetched) {
+               struct pipe_constant_buffer cb;
+
+               cb.buffer = NULL;
+               cb.user_buffer = v->instance_divisors;
+               cb.buffer_offset = 0;
+               cb.buffer_size = sizeof(uint32_t) * v->count;
+               si_set_rw_buffer(sctx, SI_VS_CONST_INSTANCE_DIVISORS, &cb);
+       }
 }
 
 static void si_delete_vertex_element(struct pipe_context *ctx, void *state)
@@ -3739,59 +4021,23 @@ static void si_set_vertex_buffers(struct pipe_context *ctx,
                for (i = 0; i < count; i++) {
                        const struct pipe_vertex_buffer *src = buffers + i;
                        struct pipe_vertex_buffer *dsti = dst + i;
-
-                       if (unlikely(src->user_buffer)) {
-                               /* Zero-stride attribs only. */
-                               assert(src->stride == 0);
-
-                               /* Assume that the user_buffer comes from
-                                * gl_current_attrib, which implies it has
-                                * 4 * 8 bytes (for dvec4 attributes).
-                                *
-                                * Use const_uploader to upload into VRAM directly.
-                                */
-                               u_upload_data(sctx->b.b.const_uploader, 0, 32, 32,
-                                             src->user_buffer,
-                                             &dsti->buffer_offset,
-                                             &dsti->buffer);
-                               dsti->stride = 0;
-                       } else {
-                               struct pipe_resource *buf = src->buffer;
-
-                               pipe_resource_reference(&dsti->buffer, buf);
-                               dsti->buffer_offset = src->buffer_offset;
-                               dsti->stride = src->stride;
-                               r600_context_add_resource_size(ctx, buf);
-                               if (buf)
-                                       r600_resource(buf)->bind_history |= PIPE_BIND_VERTEX_BUFFER;
-                       }
+                       struct pipe_resource *buf = src->buffer.resource;
+
+                       pipe_resource_reference(&dsti->buffer.resource, buf);
+                       dsti->buffer_offset = src->buffer_offset;
+                       dsti->stride = src->stride;
+                       r600_context_add_resource_size(ctx, buf);
+                       if (buf)
+                               r600_resource(buf)->bind_history |= PIPE_BIND_VERTEX_BUFFER;
                }
        } else {
                for (i = 0; i < count; i++) {
-                       pipe_resource_reference(&dst[i].buffer, NULL);
+                       pipe_resource_reference(&dst[i].buffer.resource, NULL);
                }
        }
        sctx->vertex_buffers_dirty = true;
 }
 
-static void si_set_index_buffer(struct pipe_context *ctx,
-                               const struct pipe_index_buffer *ib)
-{
-       struct si_context *sctx = (struct si_context *)ctx;
-
-       if (ib) {
-               struct pipe_resource *buf = ib->buffer;
-
-               pipe_resource_reference(&sctx->index_buffer.buffer, buf);
-               memcpy(&sctx->index_buffer, ib, sizeof(*ib));
-               r600_context_add_resource_size(ctx, buf);
-               if (buf)
-                       r600_resource(buf)->bind_history |= PIPE_BIND_INDEX_BUFFER;
-       } else {
-               pipe_resource_reference(&sctx->index_buffer.buffer, NULL);
-       }
-}
-
 /*
  * Misc
  */
@@ -3823,10 +4069,13 @@ static void si_texture_barrier(struct pipe_context *ctx, unsigned flags)
 {
        struct si_context *sctx = (struct si_context *)ctx;
 
-       sctx->b.flags |= SI_CONTEXT_INV_VMEM_L1 |
-                        SI_CONTEXT_INV_GLOBAL_L2 |
-                        SI_CONTEXT_FLUSH_AND_INV_CB;
-       sctx->framebuffer.do_update_surf_dirtiness = true;
+       si_update_fb_dirtiness_after_rendering(sctx);
+
+       /* Multisample surfaces are flushed in si_decompress_textures. */
+       if (sctx->framebuffer.nr_samples <= 1 &&
+           sctx->framebuffer.state.nr_cbufs)
+               si_make_CB_shader_coherent(sctx, sctx->framebuffer.nr_samples,
+                                          sctx->framebuffer.CB_has_shader_readable_metadata);
 }
 
 /* This only ensures coherency for shader image/buffer stores. */
@@ -3863,12 +4112,21 @@ static void si_memory_barrier(struct pipe_context *ctx, unsigned flags)
                        sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
        }
 
-       if (flags & PIPE_BARRIER_FRAMEBUFFER)
-               sctx->b.flags |= SI_CONTEXT_FLUSH_AND_INV_CB |
-                                SI_CONTEXT_FLUSH_AND_INV_DB;
+       /* MSAA color, any depth and any stencil are flushed in
+        * si_decompress_textures when needed.
+        */
+       if (flags & PIPE_BARRIER_FRAMEBUFFER &&
+           sctx->framebuffer.nr_samples <= 1 &&
+           sctx->framebuffer.state.nr_cbufs) {
+               sctx->b.flags |= SI_CONTEXT_FLUSH_AND_INV_CB;
+
+               if (sctx->b.chip_class <= VI)
+                       sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
+       }
 
-       if (flags & (PIPE_BARRIER_FRAMEBUFFER |
-                    PIPE_BARRIER_INDIRECT_BUFFER))
+       /* Indirect buffers use TC L2 on GFX9, but not older hw. */
+       if (sctx->screen->b.chip_class <= VI &&
+           flags & PIPE_BARRIER_INDIRECT_BUFFER)
                sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
 }
 
@@ -3901,6 +4159,7 @@ void si_init_state_functions(struct si_context *sctx)
        si_init_atom(sctx, &sctx->framebuffer.atom, &sctx->atoms.s.framebuffer, si_emit_framebuffer_state);
        si_init_atom(sctx, &sctx->msaa_sample_locs.atom, &sctx->atoms.s.msaa_sample_locs, si_emit_msaa_sample_locs);
        si_init_atom(sctx, &sctx->db_render_state, &sctx->atoms.s.db_render_state, si_emit_db_render_state);
+       si_init_atom(sctx, &sctx->dpbb_state, &sctx->atoms.s.dpbb_state, si_emit_dpbb_state);
        si_init_atom(sctx, &sctx->msaa_config, &sctx->atoms.s.msaa_config, si_emit_msaa_config);
        si_init_atom(sctx, &sctx->sample_mask.atom, &sctx->atoms.s.sample_mask, si_emit_sample_mask);
        si_init_atom(sctx, &sctx->cb_render_state, &sctx->atoms.s.cb_render_state, si_emit_cb_render_state);
@@ -3924,8 +4183,8 @@ void si_init_state_functions(struct si_context *sctx)
 
        sctx->custom_dsa_flush = si_create_db_flush_dsa(sctx);
        sctx->custom_blend_resolve = si_create_blend_custom(sctx, V_028808_CB_RESOLVE);
-       sctx->custom_blend_decompress = si_create_blend_custom(sctx, V_028808_CB_FMASK_DECOMPRESS);
-       sctx->custom_blend_fastclear = si_create_blend_custom(sctx, V_028808_CB_ELIMINATE_FAST_CLEAR);
+       sctx->custom_blend_fmask_decompress = si_create_blend_custom(sctx, V_028808_CB_FMASK_DECOMPRESS);
+       sctx->custom_blend_eliminate_fastclear = si_create_blend_custom(sctx, V_028808_CB_ELIMINATE_FAST_CLEAR);
        sctx->custom_blend_dcc_decompress = si_create_blend_custom(sctx, V_028808_CB_DCC_DECOMPRESS);
 
        sctx->b.b.set_clip_state = si_set_clip_state;
@@ -3946,7 +4205,6 @@ void si_init_state_functions(struct si_context *sctx)
        sctx->b.b.bind_vertex_elements_state = si_bind_vertex_elements;
        sctx->b.b.delete_vertex_elements_state = si_delete_vertex_element;
        sctx->b.b.set_vertex_buffers = si_set_vertex_buffers;
-       sctx->b.b.set_index_buffer = si_set_index_buffer;
 
        sctx->b.b.texture_barrier = si_texture_barrier;
        sctx->b.b.memory_barrier = si_memory_barrier;
@@ -4022,12 +4280,15 @@ static void si_query_opaque_metadata(struct r600_common_screen *rscreen,
 
        /* Dwords [2:9] contain the image descriptor. */
        memcpy(&md->metadata[2], desc, sizeof(desc));
+       md->size_metadata = 10 * 4;
 
        /* Dwords [10:..] contain the mipmap level offsets. */
-       for (i = 0; i <= res->last_level; i++)
-               md->metadata[10+i] = rtex->surface.u.legacy.level[i].offset >> 8;
+       if (rscreen->chip_class <= VI) {
+               for (i = 0; i <= res->last_level; i++)
+                       md->metadata[10+i] = rtex->surface.u.legacy.level[i].offset >> 8;
 
-       md->size_metadata = (11 + res->last_level) * 4;
+               md->size_metadata += (1 + res->last_level) * 4;
+       }
 }
 
 static void si_apply_opaque_metadata(struct r600_common_screen *rscreen,
@@ -4063,6 +4324,25 @@ void si_init_screen_state_functions(struct si_screen *sscreen)
        sscreen->b.apply_opaque_metadata = si_apply_opaque_metadata;
 }
 
+static void si_set_grbm_gfx_index(struct si_context *sctx,
+                                 struct si_pm4_state *pm4,  unsigned value)
+{
+       unsigned reg = sctx->b.chip_class >= CIK ? R_030800_GRBM_GFX_INDEX :
+                                                  GRBM_GFX_INDEX;
+       si_pm4_set_reg(pm4, reg, value);
+}
+
+static void si_set_grbm_gfx_index_se(struct si_context *sctx,
+                                    struct si_pm4_state *pm4, unsigned se)
+{
+       assert(se == ~0 || se < sctx->screen->b.info.max_se);
+       si_set_grbm_gfx_index(sctx, pm4,
+                             (se == ~0 ? S_030800_SE_BROADCAST_WRITES(1) :
+                                         S_030800_SE_INDEX(se)) |
+                             S_030800_SH_BROADCAST_WRITES(1) |
+                             S_030800_INSTANCE_BROADCAST_WRITES(1));
+}
+
 static void
 si_write_harvested_raster_configs(struct si_context *sctx,
                                  struct si_pm4_state *pm4,
@@ -4165,28 +4445,12 @@ si_write_harvested_raster_configs(struct si_context *sctx,
                        }
                }
 
-               /* GRBM_GFX_INDEX has a different offset on SI and CI+ */
-               if (sctx->b.chip_class < CIK)
-                       si_pm4_set_reg(pm4, GRBM_GFX_INDEX,
-                                      SE_INDEX(se) | SH_BROADCAST_WRITES |
-                                      INSTANCE_BROADCAST_WRITES);
-               else
-                       si_pm4_set_reg(pm4, R_030800_GRBM_GFX_INDEX,
-                                      S_030800_SE_INDEX(se) | S_030800_SH_BROADCAST_WRITES(1) |
-                                      S_030800_INSTANCE_BROADCAST_WRITES(1));
+               si_set_grbm_gfx_index_se(sctx, pm4, se);
                si_pm4_set_reg(pm4, R_028350_PA_SC_RASTER_CONFIG, raster_config_se);
        }
+       si_set_grbm_gfx_index(sctx, pm4, ~0);
 
-       /* GRBM_GFX_INDEX has a different offset on SI and CI+ */
-       if (sctx->b.chip_class < CIK)
-               si_pm4_set_reg(pm4, GRBM_GFX_INDEX,
-                              SE_BROADCAST_WRITES | SH_BROADCAST_WRITES |
-                              INSTANCE_BROADCAST_WRITES);
-       else {
-               si_pm4_set_reg(pm4, R_030800_GRBM_GFX_INDEX,
-                              S_030800_SE_BROADCAST_WRITES(1) | S_030800_SH_BROADCAST_WRITES(1) |
-                              S_030800_INSTANCE_BROADCAST_WRITES(1));
-
+       if (sctx->b.chip_class >= CIK) {
                if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
                                     (!se_mask[2] && !se_mask[3]))) {
                        raster_config_1 &= C_028354_SE_PAIR_MAP;
@@ -4204,46 +4468,14 @@ si_write_harvested_raster_configs(struct si_context *sctx,
        }
 }
 
-static void si_init_config(struct si_context *sctx)
+static void si_set_raster_config(struct si_context *sctx, struct si_pm4_state *pm4)
 {
        struct si_screen *sscreen = sctx->screen;
        unsigned num_rb = MIN2(sctx->screen->b.info.num_render_backends, 16);
        unsigned rb_mask = sctx->screen->b.info.enabled_rb_mask;
        unsigned raster_config, raster_config_1;
-       uint64_t border_color_va = sctx->border_color_buffer->gpu_address;
-       struct si_pm4_state *pm4 = CALLOC_STRUCT(si_pm4_state);
-
-       if (!pm4)
-               return;
 
-       si_pm4_cmd_begin(pm4, PKT3_CONTEXT_CONTROL);
-       si_pm4_cmd_add(pm4, CONTEXT_CONTROL_LOAD_ENABLE(1));
-       si_pm4_cmd_add(pm4, CONTEXT_CONTROL_SHADOW_ENABLE(1));
-       si_pm4_cmd_end(pm4, false);
-
-       si_pm4_set_reg(pm4, R_028A18_VGT_HOS_MAX_TESS_LEVEL, fui(64));
-       si_pm4_set_reg(pm4, R_028A1C_VGT_HOS_MIN_TESS_LEVEL, fui(0));
-
-       /* FIXME calculate these values somehow ??? */
-       si_pm4_set_reg(pm4, R_028A54_VGT_GS_PER_ES, SI_GS_PER_ES);
-       si_pm4_set_reg(pm4, R_028A58_VGT_ES_PER_GS, 0x40);
-       si_pm4_set_reg(pm4, R_028A5C_VGT_GS_PER_VS, 0x2);
-
-       si_pm4_set_reg(pm4, R_028A8C_VGT_PRIMITIVEID_RESET, 0x0);
-       si_pm4_set_reg(pm4, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET, 0);
-
-       si_pm4_set_reg(pm4, R_028B98_VGT_STRMOUT_BUFFER_CONFIG, 0x0);
-       si_pm4_set_reg(pm4, R_028AB8_VGT_VTX_CNT_EN, 0x0);
-       if (sctx->b.chip_class < CIK)
-               si_pm4_set_reg(pm4, R_008A14_PA_CL_ENHANCE, S_008A14_NUM_CLIP_SEQ(3) |
-                              S_008A14_CLIP_VTX_REORDER_ENA(1));
-
-       si_pm4_set_reg(pm4, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 0x76543210);
-       si_pm4_set_reg(pm4, R_028BD8_PA_SC_CENTROID_PRIORITY_1, 0xfedcba98);
-
-       si_pm4_set_reg(pm4, R_02882C_PA_SU_PRIM_FILTER_CNTL, 0);
-
-       switch (sctx->screen->b.family) {
+       switch (sctx->b.family) {
        case CHIP_TAHITI:
        case CHIP_PITCAIRN:
                raster_config = 0x2a00126a;
@@ -4315,104 +4547,181 @@ static void si_init_config(struct si_context *sctx)
                raster_config_1 = 0x00000000;
                break;
        default:
-               if (sctx->b.chip_class <= VI) {
-                       fprintf(stderr,
-                               "radeonsi: Unknown GPU, using 0 for raster_config\n");
-                       raster_config = 0x00000000;
-                       raster_config_1 = 0x00000000;
-               }
-               break;
+               fprintf(stderr,
+                       "radeonsi: Unknown GPU, using 0 for raster_config\n");
+               raster_config = 0x00000000;
+               raster_config_1 = 0x00000000;
        }
 
+       if (!rb_mask || util_bitcount(rb_mask) >= num_rb) {
+               /* Always use the default config when all backends are enabled
+                * (or when we failed to determine the enabled backends).
+                */
+               si_pm4_set_reg(pm4, R_028350_PA_SC_RASTER_CONFIG,
+                              raster_config);
+               if (sctx->b.chip_class >= CIK)
+                       si_pm4_set_reg(pm4, R_028354_PA_SC_RASTER_CONFIG_1,
+                                      raster_config_1);
+       } else {
+               si_write_harvested_raster_configs(sctx, pm4, raster_config, raster_config_1);
+       }
+}
+
+static void si_init_config(struct si_context *sctx)
+{
+       struct si_screen *sscreen = sctx->screen;
+       uint64_t border_color_va = sctx->border_color_buffer->gpu_address;
+       bool has_clear_state = sscreen->has_clear_state;
+       struct si_pm4_state *pm4 = CALLOC_STRUCT(si_pm4_state);
+
+       /* Only SI can disable CLEAR_STATE for now. */
+       assert(has_clear_state || sscreen->b.chip_class == SI);
+
+       if (!pm4)
+               return;
+
+       si_pm4_cmd_begin(pm4, PKT3_CONTEXT_CONTROL);
+       si_pm4_cmd_add(pm4, CONTEXT_CONTROL_LOAD_ENABLE(1));
+       si_pm4_cmd_add(pm4, CONTEXT_CONTROL_SHADOW_ENABLE(1));
+       si_pm4_cmd_end(pm4, false);
+
+       if (has_clear_state) {
+               si_pm4_cmd_begin(pm4, PKT3_CLEAR_STATE);
+               si_pm4_cmd_add(pm4, 0);
+               si_pm4_cmd_end(pm4, false);
+       }
+
+       if (sctx->b.chip_class <= VI)
+               si_set_raster_config(sctx, pm4);
+
+       si_pm4_set_reg(pm4, R_028A18_VGT_HOS_MAX_TESS_LEVEL, fui(64));
+       if (!has_clear_state)
+               si_pm4_set_reg(pm4, R_028A1C_VGT_HOS_MIN_TESS_LEVEL, fui(0));
+
+       /* FIXME calculate these values somehow ??? */
        if (sctx->b.chip_class <= VI) {
-               if (!rb_mask || util_bitcount(rb_mask) >= num_rb) {
-                       /* Always use the default config when all backends are enabled
-                        * (or when we failed to determine the enabled backends).
-                        */
-                       si_pm4_set_reg(pm4, R_028350_PA_SC_RASTER_CONFIG,
-                                      raster_config);
-                       if (sctx->b.chip_class >= CIK)
-                               si_pm4_set_reg(pm4, R_028354_PA_SC_RASTER_CONFIG_1,
-                                              raster_config_1);
-               } else {
-                       si_write_harvested_raster_configs(sctx, pm4, raster_config, raster_config_1);
-               }
+               si_pm4_set_reg(pm4, R_028A54_VGT_GS_PER_ES, SI_GS_PER_ES);
+               si_pm4_set_reg(pm4, R_028A58_VGT_ES_PER_GS, 0x40);
        }
 
-       si_pm4_set_reg(pm4, R_028204_PA_SC_WINDOW_SCISSOR_TL, S_028204_WINDOW_OFFSET_DISABLE(1));
-       si_pm4_set_reg(pm4, R_028240_PA_SC_GENERIC_SCISSOR_TL, S_028240_WINDOW_OFFSET_DISABLE(1));
-       si_pm4_set_reg(pm4, R_028244_PA_SC_GENERIC_SCISSOR_BR,
-                      S_028244_BR_X(16384) | S_028244_BR_Y(16384));
-       si_pm4_set_reg(pm4, R_028030_PA_SC_SCREEN_SCISSOR_TL, 0);
-       si_pm4_set_reg(pm4, R_028034_PA_SC_SCREEN_SCISSOR_BR,
-                      S_028034_BR_X(16384) | S_028034_BR_Y(16384));
-
-       si_pm4_set_reg(pm4, R_02820C_PA_SC_CLIPRECT_RULE, 0xFFFF);
-       si_pm4_set_reg(pm4, R_028230_PA_SC_EDGERULE,
-                      S_028230_ER_TRI(0xA) |
-                      S_028230_ER_POINT(0xA) |
-                      S_028230_ER_RECT(0xA) |
-                      /* Required by DX10_DIAMOND_TEST_ENA: */
-                      S_028230_ER_LINE_LR(0x1A) |
-                      S_028230_ER_LINE_RL(0x26) |
-                      S_028230_ER_LINE_TB(0xA) |
-                      S_028230_ER_LINE_BT(0xA));
-       /* PA_SU_HARDWARE_SCREEN_OFFSET must be 0 due to hw bug on SI */
-       si_pm4_set_reg(pm4, R_028234_PA_SU_HARDWARE_SCREEN_OFFSET, 0);
-       si_pm4_set_reg(pm4, R_028820_PA_CL_NANINF_CNTL, 0);
-       si_pm4_set_reg(pm4, R_028AC0_DB_SRESULTS_COMPARE_STATE0, 0x0);
-       si_pm4_set_reg(pm4, R_028AC4_DB_SRESULTS_COMPARE_STATE1, 0x0);
-       si_pm4_set_reg(pm4, R_028AC8_DB_PRELOAD_CONTROL, 0x0);
-       si_pm4_set_reg(pm4, R_02800C_DB_RENDER_OVERRIDE, 0);
+       if (!has_clear_state) {
+               si_pm4_set_reg(pm4, R_028A5C_VGT_GS_PER_VS, 0x2);
+               si_pm4_set_reg(pm4, R_028A8C_VGT_PRIMITIVEID_RESET, 0x0);
+               si_pm4_set_reg(pm4, R_028B98_VGT_STRMOUT_BUFFER_CONFIG, 0x0);
+       }
+
+       si_pm4_set_reg(pm4, R_028AA0_VGT_INSTANCE_STEP_RATE_0, 1);
+       if (!has_clear_state)
+               si_pm4_set_reg(pm4, R_028AB8_VGT_VTX_CNT_EN, 0x0);
+       if (sctx->b.chip_class < CIK)
+               si_pm4_set_reg(pm4, R_008A14_PA_CL_ENHANCE, S_008A14_NUM_CLIP_SEQ(3) |
+                              S_008A14_CLIP_VTX_REORDER_ENA(1));
+
+       si_pm4_set_reg(pm4, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 0x76543210);
+       si_pm4_set_reg(pm4, R_028BD8_PA_SC_CENTROID_PRIORITY_1, 0xfedcba98);
+
+       if (!has_clear_state)
+               si_pm4_set_reg(pm4, R_02882C_PA_SU_PRIM_FILTER_CNTL, 0);
+
+       /* CLEAR_STATE doesn't clear these correctly on certain generations.
+        * I don't know why. Deduced by trial and error.
+        */
+       if (sctx->b.chip_class <= CIK) {
+               si_pm4_set_reg(pm4, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET, 0);
+               si_pm4_set_reg(pm4, R_028204_PA_SC_WINDOW_SCISSOR_TL, S_028204_WINDOW_OFFSET_DISABLE(1));
+               si_pm4_set_reg(pm4, R_028240_PA_SC_GENERIC_SCISSOR_TL, S_028240_WINDOW_OFFSET_DISABLE(1));
+               si_pm4_set_reg(pm4, R_028244_PA_SC_GENERIC_SCISSOR_BR,
+                              S_028244_BR_X(16384) | S_028244_BR_Y(16384));
+               si_pm4_set_reg(pm4, R_028030_PA_SC_SCREEN_SCISSOR_TL, 0);
+               si_pm4_set_reg(pm4, R_028034_PA_SC_SCREEN_SCISSOR_BR,
+                              S_028034_BR_X(16384) | S_028034_BR_Y(16384));
+       }
+
+       if (!has_clear_state) {
+               si_pm4_set_reg(pm4, R_02820C_PA_SC_CLIPRECT_RULE, 0xFFFF);
+               si_pm4_set_reg(pm4, R_028230_PA_SC_EDGERULE,
+                              S_028230_ER_TRI(0xA) |
+                              S_028230_ER_POINT(0xA) |
+                              S_028230_ER_RECT(0xA) |
+                              /* Required by DX10_DIAMOND_TEST_ENA: */
+                              S_028230_ER_LINE_LR(0x1A) |
+                              S_028230_ER_LINE_RL(0x26) |
+                              S_028230_ER_LINE_TB(0xA) |
+                              S_028230_ER_LINE_BT(0xA));
+               /* PA_SU_HARDWARE_SCREEN_OFFSET must be 0 due to hw bug on SI */
+               si_pm4_set_reg(pm4, R_028234_PA_SU_HARDWARE_SCREEN_OFFSET, 0);
+               si_pm4_set_reg(pm4, R_028820_PA_CL_NANINF_CNTL, 0);
+               si_pm4_set_reg(pm4, R_028AC0_DB_SRESULTS_COMPARE_STATE0, 0x0);
+               si_pm4_set_reg(pm4, R_028AC4_DB_SRESULTS_COMPARE_STATE1, 0x0);
+               si_pm4_set_reg(pm4, R_028AC8_DB_PRELOAD_CONTROL, 0x0);
+               si_pm4_set_reg(pm4, R_02800C_DB_RENDER_OVERRIDE, 0);
+       }
 
        if (sctx->b.chip_class >= GFX9) {
                si_pm4_set_reg(pm4, R_030920_VGT_MAX_VTX_INDX, ~0);
                si_pm4_set_reg(pm4, R_030924_VGT_MIN_VTX_INDX, 0);
                si_pm4_set_reg(pm4, R_030928_VGT_INDX_OFFSET, 0);
        } else {
+               /* These registers, when written, also overwrite the CLEAR_STATE
+                * context, so we can't rely on CLEAR_STATE setting them.
+                * It would be an issue if there was another UMD changing them.
+                */
                si_pm4_set_reg(pm4, R_028400_VGT_MAX_VTX_INDX, ~0);
                si_pm4_set_reg(pm4, R_028404_VGT_MIN_VTX_INDX, 0);
                si_pm4_set_reg(pm4, R_028408_VGT_INDX_OFFSET, 0);
        }
 
        if (sctx->b.chip_class >= CIK) {
-               /* If this is 0, Bonaire can hang even if GS isn't being used.
-                * Other chips are unaffected. These are suboptimal values,
-                * but we don't use on-chip GS.
-                */
-               si_pm4_set_reg(pm4, R_028A44_VGT_GS_ONCHIP_CNTL,
-                              S_028A44_ES_VERTS_PER_SUBGRP(64) |
-                              S_028A44_GS_PRIMS_PER_SUBGRP(4));
-
                if (sctx->b.chip_class >= GFX9) {
                        si_pm4_set_reg(pm4, R_00B41C_SPI_SHADER_PGM_RSRC3_HS, S_00B41C_CU_EN(0xffff));
                } else {
                        si_pm4_set_reg(pm4, R_00B51C_SPI_SHADER_PGM_RSRC3_LS, S_00B51C_CU_EN(0xffff));
                        si_pm4_set_reg(pm4, R_00B41C_SPI_SHADER_PGM_RSRC3_HS, 0);
                        si_pm4_set_reg(pm4, R_00B31C_SPI_SHADER_PGM_RSRC3_ES, S_00B31C_CU_EN(0xffff));
+
+                       /* If this is 0, Bonaire can hang even if GS isn't being used.
+                        * Other chips are unaffected. These are suboptimal values,
+                        * but we don't use on-chip GS.
+                        */
+                       si_pm4_set_reg(pm4, R_028A44_VGT_GS_ONCHIP_CNTL,
+                                      S_028A44_ES_VERTS_PER_SUBGRP(64) |
+                                      S_028A44_GS_PRIMS_PER_SUBGRP(4));
                }
                si_pm4_set_reg(pm4, R_00B21C_SPI_SHADER_PGM_RSRC3_GS, S_00B21C_CU_EN(0xffff));
 
-               if (sscreen->b.info.num_good_compute_units /
-                   (sscreen->b.info.max_se * sscreen->b.info.max_sh_per_se) <= 4) {
+               /* Compute LATE_ALLOC_VS.LIMIT. */
+               unsigned num_cu_per_sh = sscreen->b.info.num_good_compute_units /
+                                        (sscreen->b.info.max_se *
+                                         sscreen->b.info.max_sh_per_se);
+               unsigned late_alloc_limit; /* The limit is per SH. */
+
+               if (sctx->b.family == CHIP_KABINI) {
+                       late_alloc_limit = 0; /* Potential hang on Kabini. */
+               } else if (num_cu_per_sh <= 4) {
                        /* Too few available compute units per SH. Disallowing
-                        * VS to run on CU0 could hurt us more than late VS
+                        * VS to run on one CU could hurt us more than late VS
                         * allocation would help.
                         *
-                        * LATE_ALLOC_VS = 2 is the highest safe number.
+                        * 2 is the highest safe number that allows us to keep
+                        * all CUs enabled.
                         */
-                       si_pm4_set_reg(pm4, R_00B118_SPI_SHADER_PGM_RSRC3_VS, S_00B118_CU_EN(0xffff));
-                       si_pm4_set_reg(pm4, R_00B11C_SPI_SHADER_LATE_ALLOC_VS, S_00B11C_LIMIT(2));
+                       late_alloc_limit = 2;
                } else {
-                       /* Set LATE_ALLOC_VS == 31. It should be less than
-                        * the number of scratch waves. Limitations:
-                        * - VS can't execute on CU0.
-                        * - If HS writes outputs to LDS, LS can't execute on CU0.
+                       /* This is a good initial value, allowing 1 late_alloc
+                        * wave per SIMD on num_cu - 2.
                         */
-                       si_pm4_set_reg(pm4, R_00B118_SPI_SHADER_PGM_RSRC3_VS, S_00B118_CU_EN(0xfffe));
-                       si_pm4_set_reg(pm4, R_00B11C_SPI_SHADER_LATE_ALLOC_VS, S_00B11C_LIMIT(31));
+                       late_alloc_limit = (num_cu_per_sh - 2) * 4;
+
+                       /* The limit is 0-based, so 0 means 1. */
+                       assert(late_alloc_limit > 0 && late_alloc_limit <= 64);
+                       late_alloc_limit -= 1;
                }
 
+               /* VS can't execute on one CU if the limit is > 2. */
+               si_pm4_set_reg(pm4, R_00B118_SPI_SHADER_PGM_RSRC3_VS,
+                              S_00B118_CU_EN(late_alloc_limit > 2 ? 0xfffe : 0xffff));
+               si_pm4_set_reg(pm4, R_00B11C_SPI_SHADER_LATE_ALLOC_VS,
+                              S_00B11C_LIMIT(late_alloc_limit));
                si_pm4_set_reg(pm4, R_00B01C_SPI_SHADER_PGM_RSRC3_PS, S_00B01C_CU_EN(0xffff));
        }
 
@@ -4422,9 +4731,6 @@ static void si_init_config(struct si_context *sctx)
                si_pm4_set_reg(pm4, R_028424_CB_DCC_CONTROL,
                               S_028424_OVERWRITE_COMBINER_MRT_SHARING_DISABLE(1) |
                               S_028424_OVERWRITE_COMBINER_WATERMARK(4));
-               if (sctx->b.family < CHIP_POLARIS10)
-                       si_pm4_set_reg(pm4, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL, 30);
-               si_pm4_set_reg(pm4, R_028C5C_VGT_OUT_DEALLOC_CNTL, 32);
 
                vgt_tess_distribution =
                        S_028B50_ACCUM_ISOLINE(32) |
@@ -4440,14 +4746,11 @@ static void si_init_config(struct si_context *sctx)
                        vgt_tess_distribution |= S_028B50_TRAP_SPLIT(3);
 
                si_pm4_set_reg(pm4, R_028B50_VGT_TESS_DISTRIBUTION, vgt_tess_distribution);
-       } else {
+       } else if (!has_clear_state) {
                si_pm4_set_reg(pm4, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
                si_pm4_set_reg(pm4, R_028C5C_VGT_OUT_DEALLOC_CNTL, 16);
        }
 
-       if (sctx->screen->b.has_rbplus)
-               si_pm4_set_reg(pm4, R_028C40_PA_SC_SHADER_CONTROL, 0);
-
        si_pm4_set_reg(pm4, R_028080_TA_BC_BASE_ADDR, border_color_va >> 8);
        if (sctx->b.chip_class >= CIK)
                si_pm4_set_reg(pm4, R_028084_TA_BC_BASE_ADDR_HI, border_color_va >> 40);
@@ -4455,15 +4758,23 @@ static void si_init_config(struct si_context *sctx)
                      RADEON_PRIO_BORDER_COLORS);
 
        if (sctx->b.chip_class >= GFX9) {
-               si_pm4_set_reg(pm4, R_028060_DB_DFSM_CONTROL, 0);
-               si_pm4_set_reg(pm4, R_028064_DB_RENDER_FILTER, 0);
-               /* TODO: We can use this to disable RBs for rendering to GART: */
-               si_pm4_set_reg(pm4, R_02835C_PA_SC_TILE_STEERING_OVERRIDE, 0);
-               si_pm4_set_reg(pm4, R_02883C_PA_SU_OVER_RASTERIZATION_CNTL, 0);
-               /* TODO: Enable the binner: */
-               si_pm4_set_reg(pm4, R_028C44_PA_SC_BINNER_CNTL_0,
-                              S_028C44_BINNING_MODE(V_028C44_DISABLE_BINNING_USE_LEGACY_SC));
-               si_pm4_set_reg(pm4, R_028C48_PA_SC_BINNER_CNTL_1, 0);
+               unsigned num_se = sscreen->b.info.max_se;
+               unsigned pc_lines = 0;
+
+               switch (sctx->b.family) {
+               case CHIP_VEGA10:
+                       pc_lines = 4096;
+                       break;
+               case CHIP_RAVEN:
+                       pc_lines = 1024;
+                       break;
+               default:
+                       assert(0);
+               }
+
+               si_pm4_set_reg(pm4, R_028C48_PA_SC_BINNER_CNTL_1,
+                              S_028C48_MAX_ALLOC_COUNT(MIN2(128, pc_lines / (4 * num_se))) |
+                              S_028C48_MAX_PRIM_PER_BATCH(1023));
                si_pm4_set_reg(pm4, R_028C4C_PA_SC_CONSERVATIVE_RASTERIZATION_CNTL,
                               S_028C4C_NULL_SQUAD_AA_MASK_ENABLE(1));
                si_pm4_set_reg(pm4, R_030968_VGT_INSTANCE_BASE_ID, 0);