radv/gfx9: use a bigger hammer to flush cb/db caches.
[mesa.git] / src / amd / vulkan / si_cmd_buffer.c
index d94e23b975f2e38c5f74db2fcec5ebe48ca3478c..a6981c136e70c5ce4ed2b832cb52804378f0f659 100644 (file)
 /* command buffer handling for SI */
 
 #include "radv_private.h"
+#include "radv_shader.h"
 #include "radv_cs.h"
 #include "sid.h"
+#include "gfx9d.h"
 #include "radv_util.h"
 #include "main/macros.h"
 
-#define SI_GS_PER_ES 128
-
 static void
 si_write_harvested_raster_configs(struct radv_physical_device *physical_device,
                                   struct radeon_winsys_cs *cs,
@@ -147,9 +147,10 @@ si_write_harvested_raster_configs(struct radv_physical_device *physical_device,
 
                /* GRBM_GFX_INDEX has a different offset on SI and CI+ */
                if (physical_device->rad_info.chip_class < CIK)
-                       radeon_set_config_reg(cs, GRBM_GFX_INDEX,
-                                             SE_INDEX(se) | SH_BROADCAST_WRITES |
-                                             INSTANCE_BROADCAST_WRITES);
+                       radeon_set_config_reg(cs, R_00802C_GRBM_GFX_INDEX,
+                                             S_00802C_SE_INDEX(se) |
+                                             S_00802C_SH_BROADCAST_WRITES(1) |
+                                             S_00802C_INSTANCE_BROADCAST_WRITES(1));
                else
                        radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX,
                                               S_030800_SE_INDEX(se) | S_030800_SH_BROADCAST_WRITES(1) |
@@ -161,9 +162,10 @@ si_write_harvested_raster_configs(struct radv_physical_device *physical_device,
 
        /* GRBM_GFX_INDEX has a different offset on SI and CI+ */
        if (physical_device->rad_info.chip_class < CIK)
-               radeon_set_config_reg(cs, GRBM_GFX_INDEX,
-                                     SE_BROADCAST_WRITES | SH_BROADCAST_WRITES |
-                                     INSTANCE_BROADCAST_WRITES);
+               radeon_set_config_reg(cs, R_00802C_GRBM_GFX_INDEX,
+                                     S_00802C_SE_BROADCAST_WRITES(1) |
+                                     S_00802C_SH_BROADCAST_WRITES(1) |
+                                     S_00802C_INSTANCE_BROADCAST_WRITES(1));
        else
                radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX,
                                       S_030800_SE_BROADCAST_WRITES(1) | S_030800_SH_BROADCAST_WRITES(1) |
@@ -179,7 +181,8 @@ si_emit_compute(struct radv_physical_device *physical_device,
        radeon_emit(cs, 0);
        radeon_emit(cs, 0);
 
-       radeon_set_sh_reg_seq(cs, R_00B854_COMPUTE_RESOURCE_LIMITS, 3);
+       radeon_set_sh_reg_seq(cs, R_00B854_COMPUTE_RESOURCE_LIMITS,
+                             S_00B854_WAVES_PER_SH(0x3));
        radeon_emit(cs, 0);
        /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1 */
        radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
@@ -216,45 +219,20 @@ si_init_compute(struct radv_cmd_buffer *cmd_buffer)
        si_emit_compute(physical_device, cmd_buffer->cs);
 }
 
+/* 12.4 fixed-point */
+static unsigned radv_pack_float_12p4(float x)
+{
+       return x <= 0    ? 0 :
+              x >= 4096 ? 0xffff : x * 16;
+}
+
 static void
-si_emit_config(struct radv_physical_device *physical_device,
-              struct radeon_winsys_cs *cs)
+si_set_raster_config(struct radv_physical_device *physical_device,
+                    struct radeon_winsys_cs *cs)
 {
        unsigned num_rb = MIN2(physical_device->rad_info.num_render_backends, 16);
        unsigned rb_mask = physical_device->rad_info.enabled_rb_mask;
        unsigned raster_config, raster_config_1;
-       int i;
-
-       radeon_emit(cs, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
-       radeon_emit(cs, CONTEXT_CONTROL_LOAD_ENABLE(1));
-       radeon_emit(cs, CONTEXT_CONTROL_SHADOW_ENABLE(1));
-
-       radeon_set_context_reg(cs, R_028A18_VGT_HOS_MAX_TESS_LEVEL, fui(64));
-       radeon_set_context_reg(cs, R_028A1C_VGT_HOS_MIN_TESS_LEVEL, fui(0));
-
-       /* FIXME calculate these values somehow ??? */
-       radeon_set_context_reg(cs, R_028A54_VGT_GS_PER_ES, SI_GS_PER_ES);
-       radeon_set_context_reg(cs, R_028A58_VGT_ES_PER_GS, 0x40);
-       radeon_set_context_reg(cs, R_028A5C_VGT_GS_PER_VS, 0x2);
-
-       radeon_set_context_reg(cs, R_028A8C_VGT_PRIMITIVEID_RESET, 0x0);
-       radeon_set_context_reg(cs, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET, 0);
-
-       radeon_set_context_reg(cs, R_028B98_VGT_STRMOUT_BUFFER_CONFIG, 0x0);
-       radeon_set_context_reg(cs, R_028AB8_VGT_VTX_CNT_EN, 0x0);
-       if (physical_device->rad_info.chip_class < CIK)
-               radeon_set_config_reg(cs, R_008A14_PA_CL_ENHANCE, S_008A14_NUM_CLIP_SEQ(3) |
-                                     S_008A14_CLIP_VTX_REORDER_ENA(1));
-
-       radeon_set_context_reg(cs, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 0x76543210);
-       radeon_set_context_reg(cs, R_028BD8_PA_SC_CENTROID_PRIORITY_1, 0xfedcba98);
-
-       radeon_set_context_reg(cs, R_02882C_PA_SU_PRIM_FILTER_CNTL, 0);
-
-       for (i = 0; i < 16; i++) {
-               radeon_set_context_reg(cs, R_0282D0_PA_SC_VPORT_ZMIN_0 + i*8, 0);
-               radeon_set_context_reg(cs, R_0282D4_PA_SC_VPORT_ZMAX_0 + i*8, fui(1.0));
-       }
 
        switch (physical_device->rad_info.family) {
        case CHIP_TAHITI:
@@ -329,7 +307,7 @@ si_emit_config(struct radv_physical_device *physical_device,
                break;
        default:
                fprintf(stderr,
-                       "radeonsi: Unknown GPU, using 0 for raster_config\n");
+                       "radv: Unknown GPU, using 0 for raster_config\n");
                raster_config = 0x00000000;
                raster_config_1 = 0x00000000;
                break;
@@ -345,47 +323,137 @@ si_emit_config(struct radv_physical_device *physical_device,
                        radeon_set_context_reg(cs, R_028354_PA_SC_RASTER_CONFIG_1,
                                               raster_config_1);
        } else {
-               si_write_harvested_raster_configs(physical_device, cs, raster_config, raster_config_1);
+               si_write_harvested_raster_configs(physical_device, cs,
+                                                 raster_config,
+                                                 raster_config_1);
+       }
+}
+
+static void
+si_emit_config(struct radv_physical_device *physical_device,
+              struct radeon_winsys_cs *cs)
+{
+       int i;
+
+       /* Only SI can disable CLEAR_STATE for now. */
+       assert(physical_device->has_clear_state ||
+              physical_device->rad_info.chip_class == SI);
+
+       radeon_emit(cs, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
+       radeon_emit(cs, CONTEXT_CONTROL_LOAD_ENABLE(1));
+       radeon_emit(cs, CONTEXT_CONTROL_SHADOW_ENABLE(1));
+
+       if (physical_device->has_clear_state) {
+               radeon_emit(cs, PKT3(PKT3_CLEAR_STATE, 0, 0));
+               radeon_emit(cs, 0);
+       }
+
+       if (physical_device->rad_info.chip_class <= VI)
+               si_set_raster_config(physical_device, cs);
+
+       radeon_set_context_reg(cs, R_028A18_VGT_HOS_MAX_TESS_LEVEL, fui(64));
+       if (!physical_device->has_clear_state)
+               radeon_set_context_reg(cs, R_028A1C_VGT_HOS_MIN_TESS_LEVEL, fui(0));
+
+       /* FIXME calculate these values somehow ??? */
+       if (physical_device->rad_info.chip_class <= VI) {
+               radeon_set_context_reg(cs, R_028A54_VGT_GS_PER_ES, SI_GS_PER_ES);
+               radeon_set_context_reg(cs, R_028A58_VGT_ES_PER_GS, 0x40);
+       }
+
+       if (!physical_device->has_clear_state) {
+               radeon_set_context_reg(cs, R_028A5C_VGT_GS_PER_VS, 0x2);
+               radeon_set_context_reg(cs, R_028A8C_VGT_PRIMITIVEID_RESET, 0x0);
+               radeon_set_context_reg(cs, R_028B98_VGT_STRMOUT_BUFFER_CONFIG, 0x0);
+       }
+
+       radeon_set_context_reg(cs, R_028AA0_VGT_INSTANCE_STEP_RATE_0, 1);
+       if (!physical_device->has_clear_state)
+               radeon_set_context_reg(cs, R_028AB8_VGT_VTX_CNT_EN, 0x0);
+       if (physical_device->rad_info.chip_class < CIK)
+               radeon_set_config_reg(cs, R_008A14_PA_CL_ENHANCE, S_008A14_NUM_CLIP_SEQ(3) |
+                                     S_008A14_CLIP_VTX_REORDER_ENA(1));
+
+       radeon_set_context_reg(cs, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 0x76543210);
+       radeon_set_context_reg(cs, R_028BD8_PA_SC_CENTROID_PRIORITY_1, 0xfedcba98);
+
+       if (!physical_device->has_clear_state)
+               radeon_set_context_reg(cs, R_02882C_PA_SU_PRIM_FILTER_CNTL, 0);
+
+       /* CLEAR_STATE doesn't clear these correctly on certain generations.
+        * I don't know why. Deduced by trial and error.
+        */
+       if (physical_device->rad_info.chip_class <= CIK) {
+               radeon_set_context_reg(cs, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET, 0);
+               radeon_set_context_reg(cs, R_028204_PA_SC_WINDOW_SCISSOR_TL,
+                                      S_028204_WINDOW_OFFSET_DISABLE(1));
+               radeon_set_context_reg(cs, R_028240_PA_SC_GENERIC_SCISSOR_TL,
+                                      S_028240_WINDOW_OFFSET_DISABLE(1));
+               radeon_set_context_reg(cs, R_028244_PA_SC_GENERIC_SCISSOR_BR,
+                                      S_028244_BR_X(16384) | S_028244_BR_Y(16384));
+               radeon_set_context_reg(cs, R_028030_PA_SC_SCREEN_SCISSOR_TL, 0);
+               radeon_set_context_reg(cs, R_028034_PA_SC_SCREEN_SCISSOR_BR,
+                                      S_028034_BR_X(16384) | S_028034_BR_Y(16384));
+       }
+
+       if (!physical_device->has_clear_state) {
+               for (i = 0; i < 16; i++) {
+                       radeon_set_context_reg(cs, R_0282D0_PA_SC_VPORT_ZMIN_0 + i*8, 0);
+                       radeon_set_context_reg(cs, R_0282D4_PA_SC_VPORT_ZMAX_0 + i*8, fui(1.0));
+               }
+       }
+
+       if (!physical_device->has_clear_state) {
+               radeon_set_context_reg(cs, R_02820C_PA_SC_CLIPRECT_RULE, 0xFFFF);
+               radeon_set_context_reg(cs, R_028230_PA_SC_EDGERULE, 0xAAAAAAAA);
+               /* PA_SU_HARDWARE_SCREEN_OFFSET must be 0 due to hw bug on SI */
+               radeon_set_context_reg(cs, R_028234_PA_SU_HARDWARE_SCREEN_OFFSET, 0);
+               radeon_set_context_reg(cs, R_028820_PA_CL_NANINF_CNTL, 0);
+               radeon_set_context_reg(cs, R_028AC0_DB_SRESULTS_COMPARE_STATE0, 0x0);
+               radeon_set_context_reg(cs, R_028AC4_DB_SRESULTS_COMPARE_STATE1, 0x0);
+               radeon_set_context_reg(cs, R_028AC8_DB_PRELOAD_CONTROL, 0x0);
        }
 
-       radeon_set_context_reg(cs, R_028204_PA_SC_WINDOW_SCISSOR_TL, S_028204_WINDOW_OFFSET_DISABLE(1));
-       radeon_set_context_reg(cs, R_028240_PA_SC_GENERIC_SCISSOR_TL, S_028240_WINDOW_OFFSET_DISABLE(1));
-       radeon_set_context_reg(cs, R_028244_PA_SC_GENERIC_SCISSOR_BR,
-                              S_028244_BR_X(16384) | S_028244_BR_Y(16384));
-       radeon_set_context_reg(cs, R_028030_PA_SC_SCREEN_SCISSOR_TL, 0);
-       radeon_set_context_reg(cs, R_028034_PA_SC_SCREEN_SCISSOR_BR,
-                              S_028034_BR_X(16384) | S_028034_BR_Y(16384));
-
-       radeon_set_context_reg(cs, R_02820C_PA_SC_CLIPRECT_RULE, 0xFFFF);
-       radeon_set_context_reg(cs, R_028230_PA_SC_EDGERULE, 0xAAAAAAAA);
-       /* PA_SU_HARDWARE_SCREEN_OFFSET must be 0 due to hw bug on SI */
-       radeon_set_context_reg(cs, R_028234_PA_SU_HARDWARE_SCREEN_OFFSET, 0);
-       radeon_set_context_reg(cs, R_028820_PA_CL_NANINF_CNTL, 0);
-
-       radeon_set_context_reg(cs, R_028AC0_DB_SRESULTS_COMPARE_STATE0, 0x0);
-       radeon_set_context_reg(cs, R_028AC4_DB_SRESULTS_COMPARE_STATE1, 0x0);
-       radeon_set_context_reg(cs, R_028AC8_DB_PRELOAD_CONTROL, 0x0);
        radeon_set_context_reg(cs, R_02800C_DB_RENDER_OVERRIDE,
                               S_02800C_FORCE_HIS_ENABLE0(V_02800C_FORCE_DISABLE) |
                               S_02800C_FORCE_HIS_ENABLE1(V_02800C_FORCE_DISABLE));
 
-       radeon_set_context_reg(cs, R_028400_VGT_MAX_VTX_INDX, ~0);
-       radeon_set_context_reg(cs, R_028404_VGT_MIN_VTX_INDX, 0);
-       radeon_set_context_reg(cs, R_028408_VGT_INDX_OFFSET, 0);
-
-       if (physical_device->rad_info.chip_class >= CIK) {
-               /* If this is 0, Bonaire can hang even if GS isn't being used.
-                * Other chips are unaffected. These are suboptimal values,
-                * but we don't use on-chip GS.
+       if (physical_device->rad_info.chip_class >= GFX9) {
+               radeon_set_uconfig_reg(cs, R_030920_VGT_MAX_VTX_INDX, ~0);
+               radeon_set_uconfig_reg(cs, R_030924_VGT_MIN_VTX_INDX, 0);
+               radeon_set_uconfig_reg(cs, R_030928_VGT_INDX_OFFSET, 0);
+       } else {
+               /* These registers, when written, also overwrite the
+                * CLEAR_STATE context, so we can't rely on CLEAR_STATE setting
+                * them.  It would be an issue if there was another UMD
+                * changing them.
                 */
-               radeon_set_context_reg(cs, R_028A44_VGT_GS_ONCHIP_CNTL,
-                                      S_028A44_ES_VERTS_PER_SUBGRP(64) |
-                                      S_028A44_GS_PRIMS_PER_SUBGRP(4));
+               radeon_set_context_reg(cs, R_028400_VGT_MAX_VTX_INDX, ~0);
+               radeon_set_context_reg(cs, R_028404_VGT_MIN_VTX_INDX, 0);
+               radeon_set_context_reg(cs, R_028408_VGT_INDX_OFFSET, 0);
+       }
 
-               radeon_set_sh_reg(cs, R_00B51C_SPI_SHADER_PGM_RSRC3_LS, S_00B51C_CU_EN(0xffff));
-               radeon_set_sh_reg(cs, R_00B41C_SPI_SHADER_PGM_RSRC3_HS, 0);
-               radeon_set_sh_reg(cs, R_00B31C_SPI_SHADER_PGM_RSRC3_ES, S_00B31C_CU_EN(0xffff));
-               radeon_set_sh_reg(cs, R_00B21C_SPI_SHADER_PGM_RSRC3_GS, S_00B21C_CU_EN(0xffff));
+       if (physical_device->rad_info.chip_class >= CIK) {
+               if (physical_device->rad_info.chip_class >= GFX9) {
+                       radeon_set_sh_reg(cs, R_00B41C_SPI_SHADER_PGM_RSRC3_HS,
+                                         S_00B41C_CU_EN(0xffff) | S_00B41C_WAVE_LIMIT(0x3F));
+               } else {
+                       radeon_set_sh_reg(cs, R_00B51C_SPI_SHADER_PGM_RSRC3_LS,
+                                         S_00B51C_CU_EN(0xffff) | S_00B51C_WAVE_LIMIT(0x3F));
+                       radeon_set_sh_reg(cs, R_00B41C_SPI_SHADER_PGM_RSRC3_HS,
+                                         S_00B41C_WAVE_LIMIT(0x3F));
+                       radeon_set_sh_reg(cs, R_00B31C_SPI_SHADER_PGM_RSRC3_ES,
+                                         S_00B31C_CU_EN(0xffff) | S_00B31C_WAVE_LIMIT(0x3F));
+                       /* If this is 0, Bonaire can hang even if GS isn't being used.
+                        * Other chips are unaffected. These are suboptimal values,
+                        * but we don't use on-chip GS.
+                        */
+                       radeon_set_context_reg(cs, R_028A44_VGT_GS_ONCHIP_CNTL,
+                                              S_028A44_ES_VERTS_PER_SUBGRP(64) |
+                                              S_028A44_GS_PRIMS_PER_SUBGRP(4));
+               }
+               radeon_set_sh_reg(cs, R_00B21C_SPI_SHADER_PGM_RSRC3_GS,
+                                 S_00B21C_CU_EN(0xffff) | S_00B21C_WAVE_LIMIT(0x3F));
 
                if (physical_device->rad_info.num_good_compute_units /
                    (physical_device->rad_info.max_se * physical_device->rad_info.max_sh_per_se) <= 4) {
@@ -395,7 +463,8 @@ si_emit_config(struct radv_physical_device *physical_device,
                         *
                         * LATE_ALLOC_VS = 2 is the highest safe number.
                         */
-                       radeon_set_sh_reg(cs, R_00B118_SPI_SHADER_PGM_RSRC3_VS, S_00B118_CU_EN(0xffff));
+                       radeon_set_sh_reg(cs, R_00B118_SPI_SHADER_PGM_RSRC3_VS,
+                                         S_00B118_CU_EN(0xffff) | S_00B118_WAVE_LIMIT(0x3F) );
                        radeon_set_sh_reg(cs, R_00B11C_SPI_SHADER_LATE_ALLOC_VS, S_00B11C_LIMIT(2));
                } else {
                        /* Set LATE_ALLOC_VS == 31. It should be less than
@@ -403,11 +472,13 @@ si_emit_config(struct radv_physical_device *physical_device,
                         * - VS can't execute on CU0.
                         * - If HS writes outputs to LDS, LS can't execute on CU0.
                         */
-                       radeon_set_sh_reg(cs, R_00B118_SPI_SHADER_PGM_RSRC3_VS, S_00B118_CU_EN(0xfffe));
+                       radeon_set_sh_reg(cs, R_00B118_SPI_SHADER_PGM_RSRC3_VS,
+                                         S_00B118_CU_EN(0xfffe) | S_00B118_WAVE_LIMIT(0x3F));
                        radeon_set_sh_reg(cs, R_00B11C_SPI_SHADER_LATE_ALLOC_VS, S_00B11C_LIMIT(31));
                }
 
-               radeon_set_sh_reg(cs, R_00B01C_SPI_SHADER_PGM_RSRC3_PS, S_00B01C_CU_EN(0xffff));
+               radeon_set_sh_reg(cs, R_00B01C_SPI_SHADER_PGM_RSRC3_PS,
+                                 S_00B01C_CU_EN(0xffff) | S_00B01C_WAVE_LIMIT(0x3F));
        }
 
        if (physical_device->rad_info.chip_class >= VI) {
@@ -415,9 +486,6 @@ si_emit_config(struct radv_physical_device *physical_device,
                radeon_set_context_reg(cs, R_028424_CB_DCC_CONTROL,
                                       S_028424_OVERWRITE_COMBINER_MRT_SHARING_DISABLE(1) |
                                       S_028424_OVERWRITE_COMBINER_WATERMARK(4));
-               if (physical_device->rad_info.family < CHIP_POLARIS10)
-                       radeon_set_context_reg(cs, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL, 30);
-               radeon_set_context_reg(cs, R_028C5C_VGT_OUT_DEALLOC_CNTL, 32);
 
                vgt_tess_distribution = S_028B50_ACCUM_ISOLINE(32) |
                        S_028B50_ACCUM_TRI(11) |
@@ -430,13 +498,51 @@ si_emit_config(struct radv_physical_device *physical_device,
 
                radeon_set_context_reg(cs, R_028B50_VGT_TESS_DISTRIBUTION,
                                       vgt_tess_distribution);
-       } else {
+       } else if (!physical_device->has_clear_state) {
                radeon_set_context_reg(cs, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
                radeon_set_context_reg(cs, R_028C5C_VGT_OUT_DEALLOC_CNTL, 16);
        }
 
-       if (physical_device->rad_info.family == CHIP_STONEY)
-               radeon_set_context_reg(cs, R_028C40_PA_SC_SHADER_CONTROL, 0);
+       if (physical_device->rad_info.chip_class >= GFX9) {
+               unsigned num_se = physical_device->rad_info.max_se;
+               unsigned pc_lines = 0;
+
+               switch (physical_device->rad_info.family) {
+               case CHIP_VEGA10:
+                       pc_lines = 4096;
+                       break;
+               case CHIP_RAVEN:
+                       pc_lines = 1024;
+                       break;
+               default:
+                       assert(0);
+               }
+
+               radeon_set_context_reg(cs, R_028060_DB_DFSM_CONTROL,
+                                      S_028060_PUNCHOUT_MODE(V_028060_FORCE_OFF));
+               /* TODO: Enable the binner: */
+               radeon_set_context_reg(cs, R_028C44_PA_SC_BINNER_CNTL_0,
+                                      S_028C44_BINNING_MODE(V_028C44_DISABLE_BINNING_USE_LEGACY_SC) |
+                                      S_028C44_DISABLE_START_OF_PRIM(1));
+               radeon_set_context_reg(cs, R_028C48_PA_SC_BINNER_CNTL_1,
+                                      S_028C48_MAX_ALLOC_COUNT(MIN2(128, pc_lines / (4 * num_se))) |
+                                      S_028C48_MAX_PRIM_PER_BATCH(1023));
+               radeon_set_context_reg(cs, R_028C4C_PA_SC_CONSERVATIVE_RASTERIZATION_CNTL,
+                                      S_028C4C_NULL_SQUAD_AA_MASK_ENABLE(1));
+               radeon_set_uconfig_reg(cs, R_030968_VGT_INSTANCE_BASE_ID, 0);
+       }
+
+       unsigned tmp = (unsigned)(1.0 * 8.0);
+       radeon_set_context_reg_seq(cs, R_028A00_PA_SU_POINT_SIZE, 1);
+       radeon_emit(cs, S_028A00_HEIGHT(tmp) | S_028A00_WIDTH(tmp));
+       radeon_set_context_reg_seq(cs, R_028A04_PA_SU_POINT_MINMAX, 1);
+       radeon_emit(cs, S_028A04_MIN_SIZE(radv_pack_float_12p4(0)) |
+                   S_028A04_MAX_SIZE(radv_pack_float_12p4(8192/2)));
+
+       if (!physical_device->has_clear_state) {
+               radeon_set_context_reg(cs, R_028004_DB_COUNT_CONTROL,
+                                      S_028004_ZPASS_INCREMENT_DISABLE(1));
+       }
 
        si_emit_compute(physical_device, cs);
 }
@@ -467,7 +573,8 @@ cik_create_gfx_config(struct radv_device *device)
        device->gfx_init = device->ws->buffer_create(device->ws,
                                                     cs->cdw * 4, 4096,
                                                     RADEON_DOMAIN_GTT,
-                                                    RADEON_FLAG_CPU_ACCESS);
+                                                    RADEON_FLAG_CPU_ACCESS|
+                                                    RADEON_FLAG_NO_INTERPROCESS_SHARING);
        if (!device->gfx_init)
                goto fail;
 
@@ -632,71 +739,27 @@ si_get_ia_multi_vgt_param(struct radv_cmd_buffer *cmd_buffer,
        enum chip_class chip_class = cmd_buffer->device->physical_device->rad_info.chip_class;
        enum radeon_family family = cmd_buffer->device->physical_device->rad_info.family;
        struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info;
-       unsigned prim = cmd_buffer->state.pipeline->graphics.prim;
-       unsigned primgroup_size = 128; /* recommended without a GS */
-       unsigned max_primgroup_in_wave = 2;
+       const unsigned max_primgroup_in_wave = 2;
        /* SWITCH_ON_EOP(0) is always preferable. */
        bool wd_switch_on_eop = false;
        bool ia_switch_on_eop = false;
        bool ia_switch_on_eoi = false;
        bool partial_vs_wave = false;
-       bool partial_es_wave = false;
-       uint32_t num_prims = radv_prims_for_vertices(&cmd_buffer->state.pipeline->graphics.prim_vertex_count, draw_vertex_count);
+       bool partial_es_wave = cmd_buffer->state.pipeline->graphics.partial_es_wave;
        bool multi_instances_smaller_than_primgroup;
 
-       if (radv_pipeline_has_tess(cmd_buffer->state.pipeline))
-               primgroup_size = cmd_buffer->state.pipeline->graphics.tess.num_patches;
-       else if (radv_pipeline_has_gs(cmd_buffer->state.pipeline))
-               primgroup_size = 64;  /* recommended with a GS */
-
-       multi_instances_smaller_than_primgroup = indirect_draw || (instanced_draw &&
-                                                                  num_prims < primgroup_size);
-       if (radv_pipeline_has_tess(cmd_buffer->state.pipeline)) {
-               /* SWITCH_ON_EOI must be set if PrimID is used. */
-               if (cmd_buffer->state.pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.tcs.uses_prim_id ||
-                   cmd_buffer->state.pipeline->shaders[MESA_SHADER_TESS_EVAL]->info.tes.uses_prim_id)
-                       ia_switch_on_eoi = true;
-
-               /* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
-               if ((family == CHIP_TAHITI ||
-                    family == CHIP_PITCAIRN ||
-                    family == CHIP_BONAIRE) &&
-                   radv_pipeline_has_gs(cmd_buffer->state.pipeline))
-                       partial_vs_wave = true;
-
-               /* Needed for 028B6C_DISTRIBUTION_MODE != 0 */
-               if (cmd_buffer->device->has_distributed_tess) {
-                       if (radv_pipeline_has_gs(cmd_buffer->state.pipeline)) {
-                               partial_es_wave = true;
-
-                               if (family == CHIP_TONGA ||
-                                   family == CHIP_FIJI ||
-                                   family == CHIP_POLARIS10 ||
-                                   family == CHIP_POLARIS11 ||
-                                   family == CHIP_POLARIS12)
-                                       partial_vs_wave = true;
-                       } else {
-                               partial_vs_wave = true;
-                       }
-               }
+       multi_instances_smaller_than_primgroup = indirect_draw;
+       if (!multi_instances_smaller_than_primgroup && instanced_draw) {
+               uint32_t num_prims = radv_prims_for_vertices(&cmd_buffer->state.pipeline->graphics.prim_vertex_count, draw_vertex_count);
+               if (num_prims < cmd_buffer->state.pipeline->graphics.primgroup_size)
+                       multi_instances_smaller_than_primgroup = true;
        }
-       /* TODO linestipple */
+
+       ia_switch_on_eoi = cmd_buffer->state.pipeline->graphics.ia_switch_on_eoi;
+       partial_vs_wave = cmd_buffer->state.pipeline->graphics.partial_vs_wave;
 
        if (chip_class >= CIK) {
-               /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
-                * 4 shader engines. Set 1 to pass the assertion below.
-                * The other cases are hardware requirements. */
-               if (info->max_se < 4 ||
-                   prim == V_008958_DI_PT_POLYGON ||
-                   prim == V_008958_DI_PT_LINELOOP ||
-                   prim == V_008958_DI_PT_TRIFAN ||
-                   prim == V_008958_DI_PT_TRISTRIP_ADJ ||
-                   (cmd_buffer->state.pipeline->graphics.prim_restart_enable &&
-                    (family < CHIP_POLARIS10 ||
-                     (prim != V_008958_DI_PT_POINTLIST &&
-                     prim != V_008958_DI_PT_LINESTRIP &&
-                      prim != V_008958_DI_PT_TRISTRIP))))
-                       wd_switch_on_eop = true;
+               wd_switch_on_eop = cmd_buffer->state.pipeline->graphics.wd_switch_on_eop;
 
                /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
                 * We don't know that for indirect drawing, so treat it as
@@ -723,6 +786,7 @@ si_get_ia_multi_vgt_param(struct radv_cmd_buffer *cmd_buffer,
                if (ia_switch_on_eoi &&
                    (family == CHIP_HAWAII ||
                     (chip_class == VI &&
+                     /* max primgroup in wave is always 2 - leave this for documentation */
                      (radv_pipeline_has_gs(cmd_buffer->state.pipeline) || max_primgroup_in_wave != 2))))
                        partial_vs_wave = true;
 
@@ -735,50 +799,119 @@ si_get_ia_multi_vgt_param(struct radv_cmd_buffer *cmd_buffer,
                assert(wd_switch_on_eop || !ia_switch_on_eop);
        }
        /* If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
-       if (ia_switch_on_eoi)
+       if (chip_class <= VI && ia_switch_on_eoi)
                partial_es_wave = true;
 
        if (radv_pipeline_has_gs(cmd_buffer->state.pipeline)) {
-               /* GS requirement. */
-               if (SI_GS_PER_ES / primgroup_size >= cmd_buffer->device->gs_table_depth - 3)
-                       partial_es_wave = true;
-
-               /* Hw bug with single-primitive instances and SWITCH_ON_EOI
-                * on multi-SE chips. */
-               if (info->max_se >= 2 && ia_switch_on_eoi &&
-                   ((instanced_draw || indirect_draw) &&
-                    num_prims <= 1))
-                       cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VGT_FLUSH;
+               /* GS hw bug with single-primitive instances and SWITCH_ON_EOI.
+                * The hw doc says all multi-SE chips are affected, but amdgpu-pro Vulkan
+                * only applies it to Hawaii. Do what amdgpu-pro Vulkan does.
+                */
+               if (family == CHIP_HAWAII && ia_switch_on_eoi) {
+                       bool set_vgt_flush = indirect_draw;
+                       if (!set_vgt_flush && instanced_draw) {
+                               uint32_t num_prims = radv_prims_for_vertices(&cmd_buffer->state.pipeline->graphics.prim_vertex_count, draw_vertex_count);
+                               if (num_prims <= 1)
+                                       set_vgt_flush = true;
+                       }
+                       if (set_vgt_flush)
+                               cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VGT_FLUSH;
+               }
        }
 
-       return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) |
+       return cmd_buffer->state.pipeline->graphics.base_ia_multi_vgt_param |
+               S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) |
                S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi) |
                S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) |
                S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave) |
-               S_028AA8_PRIMGROUP_SIZE(primgroup_size - 1) |
-               S_028AA8_WD_SWITCH_ON_EOP(chip_class >= CIK ? wd_switch_on_eop : 0) |
-               S_028AA8_MAX_PRIMGRP_IN_WAVE(chip_class >= VI ?
-                                            max_primgroup_in_wave : 0);
+               S_028AA8_WD_SWITCH_ON_EOP(chip_class >= CIK ? wd_switch_on_eop : 0);
+
+}
+
+void si_cs_emit_write_event_eop(struct radeon_winsys_cs *cs,
+                               bool predicated,
+                               enum chip_class chip_class,
+                               bool is_mec,
+                               unsigned event, unsigned event_flags,
+                               unsigned data_sel,
+                               uint64_t va,
+                               uint32_t old_fence,
+                               uint32_t new_fence)
+{
+       unsigned op = EVENT_TYPE(event) |
+               EVENT_INDEX(5) |
+               event_flags;
+       unsigned is_gfx8_mec = is_mec && chip_class < GFX9;
+
+       if (chip_class >= GFX9 || is_gfx8_mec) {
+               radeon_emit(cs, PKT3(PKT3_RELEASE_MEM, is_gfx8_mec ? 5 : 6, predicated));
+               radeon_emit(cs, op);
+               radeon_emit(cs, EOP_DATA_SEL(data_sel));
+               radeon_emit(cs, va);            /* address lo */
+               radeon_emit(cs, va >> 32);      /* address hi */
+               radeon_emit(cs, new_fence);     /* immediate data lo */
+               radeon_emit(cs, 0); /* immediate data hi */
+               if (!is_gfx8_mec)
+                       radeon_emit(cs, 0); /* unused */
+       } else {
+               if (chip_class == CIK ||
+                   chip_class == VI) {
+                       /* Two EOP events are required to make all engines go idle
+                        * (and optional cache flushes executed) before the timestamp
+                        * is written.
+                        */
+                       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, predicated));
+                       radeon_emit(cs, op);
+                       radeon_emit(cs, va);
+                       radeon_emit(cs, ((va >> 32) & 0xffff) | EOP_DATA_SEL(data_sel));
+                       radeon_emit(cs, old_fence); /* immediate data */
+                       radeon_emit(cs, 0); /* unused */
+               }
 
+               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, predicated));
+               radeon_emit(cs, op);
+               radeon_emit(cs, va);
+               radeon_emit(cs, ((va >> 32) & 0xffff) | EOP_DATA_SEL(data_sel));
+               radeon_emit(cs, new_fence); /* immediate data */
+               radeon_emit(cs, 0); /* unused */
+       }
+}
+
+void
+si_emit_wait_fence(struct radeon_winsys_cs *cs,
+                  bool predicated,
+                  uint64_t va, uint32_t ref,
+                  uint32_t mask)
+{
+       radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, predicated));
+       radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1));
+       radeon_emit(cs, va);
+       radeon_emit(cs, va >> 32);
+       radeon_emit(cs, ref); /* reference value */
+       radeon_emit(cs, mask); /* mask */
+       radeon_emit(cs, 4); /* poll interval */
 }
 
 static void
 si_emit_acquire_mem(struct radeon_winsys_cs *cs,
                     bool is_mec,
+                   bool predicated,
+                   bool is_gfx9,
                     unsigned cp_coher_cntl)
 {
-       if (is_mec) {
-               radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, 0) |
-                                           PKT3_SHADER_TYPE_S(1));
+       if (is_mec || is_gfx9) {
+               uint32_t hi_val = is_gfx9 ? 0xffffff : 0xff;
+               radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, predicated) |
+                                           PKT3_SHADER_TYPE_S(is_mec));
                radeon_emit(cs, cp_coher_cntl);   /* CP_COHER_CNTL */
                radeon_emit(cs, 0xffffffff);      /* CP_COHER_SIZE */
-               radeon_emit(cs, 0xff);            /* CP_COHER_SIZE_HI */
+               radeon_emit(cs, hi_val);          /* CP_COHER_SIZE_HI */
                radeon_emit(cs, 0);               /* CP_COHER_BASE */
                radeon_emit(cs, 0);               /* CP_COHER_BASE_HI */
                radeon_emit(cs, 0x0000000A);      /* POLL_INTERVAL */
        } else {
                /* ACQUIRE_MEM is only required on a compute ring. */
-               radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, 0));
+               radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, predicated));
                radeon_emit(cs, cp_coher_cntl);   /* CP_COHER_CNTL */
                radeon_emit(cs, 0xffffffff);      /* CP_COHER_SIZE */
                radeon_emit(cs, 0);               /* CP_COHER_BASE */
@@ -788,109 +921,184 @@ si_emit_acquire_mem(struct radeon_winsys_cs *cs,
 
 void
 si_cs_emit_cache_flush(struct radeon_winsys_cs *cs,
+                      bool predicated,
                        enum chip_class chip_class,
+                      uint32_t *flush_cnt,
+                      uint64_t flush_va,
                        bool is_mec,
                        enum radv_cmd_flush_bits flush_bits)
 {
        unsigned cp_coher_cntl = 0;
-
+       uint32_t flush_cb_db = flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
+                                            RADV_CMD_FLAG_FLUSH_AND_INV_DB);
+       
        if (flush_bits & RADV_CMD_FLAG_INV_ICACHE)
                cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
        if (flush_bits & RADV_CMD_FLAG_INV_SMEM_L1)
                cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
 
-       if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_CB) {
-               cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) |
-                       S_0085F0_CB0_DEST_BASE_ENA(1) |
-                       S_0085F0_CB1_DEST_BASE_ENA(1) |
-                       S_0085F0_CB2_DEST_BASE_ENA(1) |
-                       S_0085F0_CB3_DEST_BASE_ENA(1) |
-                       S_0085F0_CB4_DEST_BASE_ENA(1) |
-                       S_0085F0_CB5_DEST_BASE_ENA(1) |
-                       S_0085F0_CB6_DEST_BASE_ENA(1) |
-                       S_0085F0_CB7_DEST_BASE_ENA(1);
-
-               /* Necessary for DCC */
-               if (chip_class >= VI) {
-                       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
-                       radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_DATA_TS) |
-                                                   EVENT_INDEX(5));
-                       radeon_emit(cs, 0);
-                       radeon_emit(cs, 0);
-                       radeon_emit(cs, 0);
-                       radeon_emit(cs, 0);
+       if (chip_class <= VI) {
+               if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_CB) {
+                       cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) |
+                               S_0085F0_CB0_DEST_BASE_ENA(1) |
+                               S_0085F0_CB1_DEST_BASE_ENA(1) |
+                               S_0085F0_CB2_DEST_BASE_ENA(1) |
+                               S_0085F0_CB3_DEST_BASE_ENA(1) |
+                               S_0085F0_CB4_DEST_BASE_ENA(1) |
+                               S_0085F0_CB5_DEST_BASE_ENA(1) |
+                               S_0085F0_CB6_DEST_BASE_ENA(1) |
+                               S_0085F0_CB7_DEST_BASE_ENA(1);
+
+                       /* Necessary for DCC */
+                       if (chip_class >= VI) {
+                               si_cs_emit_write_event_eop(cs,
+                                                          predicated,
+                                                          chip_class,
+                                                          is_mec,
+                                                          V_028A90_FLUSH_AND_INV_CB_DATA_TS,
+                                                          0, 0, 0, 0, 0);
+                       }
+               }
+               if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_DB) {
+                       cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) |
+                               S_0085F0_DB_DEST_BASE_ENA(1);
                }
-       }
-
-       if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_DB) {
-               cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) |
-                       S_0085F0_DB_DEST_BASE_ENA(1);
        }
 
        if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_CB_META) {
-               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, predicated));
                radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
        }
 
        if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_DB_META) {
-               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, predicated));
                radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
        }
 
-       if (!(flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
-                                             RADV_CMD_FLAG_FLUSH_AND_INV_DB))) {
-               if (flush_bits & RADV_CMD_FLAG_PS_PARTIAL_FLUSH) {
-                       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
-                       radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
-               } else if (flush_bits & RADV_CMD_FLAG_VS_PARTIAL_FLUSH) {
-                       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
-                       radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
-               }
+       if (flush_bits & RADV_CMD_FLAG_PS_PARTIAL_FLUSH) {
+               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+               radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
+       } else if (flush_bits & RADV_CMD_FLAG_VS_PARTIAL_FLUSH) {
+               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+               radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
        }
 
        if (flush_bits & RADV_CMD_FLAG_CS_PARTIAL_FLUSH) {
-               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, predicated));
                radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
        }
 
+       if (chip_class >= GFX9 && flush_cb_db) {
+               unsigned cb_db_event, tc_flags;
+
+#if 0
+               /* This breaks a bunch of:
+                  dEQP-VK.renderpass.dedicated_allocation.formats.d32_sfloat_s8_uint.input*.
+                  use the big hammer always.
+               */
+               /* Set the CB/DB flush event. */
+               switch (flush_cb_db) {
+               case RADV_CMD_FLAG_FLUSH_AND_INV_CB:
+                       cb_db_event = V_028A90_FLUSH_AND_INV_CB_DATA_TS;
+                       break;
+               case RADV_CMD_FLAG_FLUSH_AND_INV_DB:
+                       cb_db_event = V_028A90_FLUSH_AND_INV_DB_DATA_TS;
+                       break;
+               default:
+                       /* both CB & DB */
+                       cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
+               }
+#else
+               cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
+#endif
+               /* TC    | TC_WB         = invalidate L2 data
+                * TC_MD | TC_WB         = invalidate L2 metadata
+                * TC    | TC_WB | TC_MD = invalidate L2 data & metadata
+                *
+                * The metadata cache must always be invalidated for coherency
+                * between CB/DB and shaders. (metadata = HTILE, CMASK, DCC)
+                *
+                * TC must be invalidated on GFX9 only if the CB/DB surface is
+                * not pipe-aligned. If the surface is RB-aligned, it might not
+                * strictly be pipe-aligned since RB alignment takes precendence.
+                */
+               tc_flags = EVENT_TC_WB_ACTION_ENA |
+                          EVENT_TC_MD_ACTION_ENA;
+
+               /* Ideally flush TC together with CB/DB. */
+               if (flush_bits & RADV_CMD_FLAG_INV_GLOBAL_L2) {
+                       tc_flags |= EVENT_TC_ACTION_ENA |
+                                   EVENT_TCL1_ACTION_ENA;
+
+                       /* Clear the flags. */
+                       flush_bits &= ~(RADV_CMD_FLAG_INV_GLOBAL_L2 |
+                                        RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2 |
+                                        RADV_CMD_FLAG_INV_VMEM_L1);
+               }
+               assert(flush_cnt);
+               uint32_t old_fence = (*flush_cnt)++;
+
+               si_cs_emit_write_event_eop(cs, predicated, chip_class, false, cb_db_event, tc_flags, 1,
+                                          flush_va, old_fence, *flush_cnt);
+               si_emit_wait_fence(cs, predicated, flush_va, *flush_cnt, 0xffffffff);
+       }
+
        /* VGT state sync */
        if (flush_bits & RADV_CMD_FLAG_VGT_FLUSH) {
-               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, predicated));
                radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
        }
 
        /* Make sure ME is idle (it executes most packets) before continuing.
         * This prevents read-after-write hazards between PFP and ME.
         */
-       if ((cp_coher_cntl || (flush_bits & RADV_CMD_FLAG_CS_PARTIAL_FLUSH)) &&
+       if ((cp_coher_cntl ||
+            (flush_bits & (RADV_CMD_FLAG_CS_PARTIAL_FLUSH |
+                           RADV_CMD_FLAG_INV_VMEM_L1 |
+                           RADV_CMD_FLAG_INV_GLOBAL_L2 |
+                           RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2))) &&
            !is_mec) {
-               radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
+               radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, predicated));
                radeon_emit(cs, 0);
        }
 
        if ((flush_bits & RADV_CMD_FLAG_INV_GLOBAL_L2) ||
            (chip_class <= CIK && (flush_bits & RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2))) {
-               cp_coher_cntl |= S_0085F0_TC_ACTION_ENA(1);
-               if (chip_class >= VI)
-                       cp_coher_cntl |= S_0301F0_TC_WB_ACTION_ENA(1);
-       } else  if(flush_bits & RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2) {
-               cp_coher_cntl |= S_0301F0_TC_WB_ACTION_ENA(1) |
-                                S_0301F0_TC_NC_ACTION_ENA(1);
-
-               /* L2 writeback doesn't combine with L1 invalidate */
-               si_emit_acquire_mem(cs, is_mec, cp_coher_cntl);
-
+               si_emit_acquire_mem(cs, is_mec, predicated, chip_class >= GFX9,
+                                   cp_coher_cntl |
+                                   S_0085F0_TC_ACTION_ENA(1) |
+                                   S_0085F0_TCL1_ACTION_ENA(1) |
+                                   S_0301F0_TC_WB_ACTION_ENA(chip_class >= VI));
                cp_coher_cntl = 0;
+       } else {
+               if(flush_bits & RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2) {
+                       /* WB = write-back
+                        * NC = apply to non-coherent MTYPEs
+                        *      (i.e. MTYPE <= 1, which is what we use everywhere)
+                        *
+                        * WB doesn't work without NC.
+                        */
+                       si_emit_acquire_mem(cs, is_mec, predicated,
+                                           chip_class >= GFX9,
+                                           cp_coher_cntl |
+                                           S_0301F0_TC_WB_ACTION_ENA(1) |
+                                           S_0301F0_TC_NC_ACTION_ENA(1));
+                       cp_coher_cntl = 0;
+               }
+               if (flush_bits & RADV_CMD_FLAG_INV_VMEM_L1) {
+                       si_emit_acquire_mem(cs, is_mec,
+                                           predicated, chip_class >= GFX9,
+                                           cp_coher_cntl |
+                                           S_0085F0_TCL1_ACTION_ENA(1));
+                       cp_coher_cntl = 0;
+               }
        }
 
-       if (flush_bits & RADV_CMD_FLAG_INV_VMEM_L1)
-               cp_coher_cntl |= S_0085F0_TCL1_ACTION_ENA(1);
-
        /* When one of the DEST_BASE flags is set, SURFACE_SYNC waits for idle.
         * Therefore, it should be last. Done in PFP.
         */
        if (cp_coher_cntl)
-               si_emit_acquire_mem(cs, is_mec, cp_coher_cntl);
+               si_emit_acquire_mem(cs, is_mec, predicated, chip_class >= GFX9, cp_coher_cntl);
 }
 
 void
@@ -907,67 +1115,140 @@ si_emit_cache_flush(struct radv_cmd_buffer *cmd_buffer)
                                                  RADV_CMD_FLAG_VS_PARTIAL_FLUSH |
                                                  RADV_CMD_FLAG_VGT_FLUSH);
 
+       if (!cmd_buffer->state.flush_bits)
+               return;
+
+       enum chip_class chip_class = cmd_buffer->device->physical_device->rad_info.chip_class;
        radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 128);
 
+       uint32_t *ptr = NULL;
+       uint64_t va = 0;
+       if (chip_class == GFX9) {
+               va = radv_buffer_get_va(cmd_buffer->gfx9_fence_bo) + cmd_buffer->gfx9_fence_offset;
+               ptr = &cmd_buffer->gfx9_fence_idx;
+       }
        si_cs_emit_cache_flush(cmd_buffer->cs,
+                              cmd_buffer->state.predicating,
                               cmd_buffer->device->physical_device->rad_info.chip_class,
+                              ptr, va,
                               radv_cmd_buffer_uses_mec(cmd_buffer),
                               cmd_buffer->state.flush_bits);
 
 
-       if (cmd_buffer->state.flush_bits)
+       if (unlikely(cmd_buffer->device->trace_bo))
                radv_cmd_buffer_trace_emit(cmd_buffer);
+
        cmd_buffer->state.flush_bits = 0;
 }
 
+/* sets the CP predication state using a boolean stored at va */
+void
+si_emit_set_predication_state(struct radv_cmd_buffer *cmd_buffer, uint64_t va)
+{
+       uint32_t op = 0;
+
+       if (va)
+               op = PRED_OP(PREDICATION_OP_BOOL64) | PREDICATION_DRAW_VISIBLE;
+       if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
+               radeon_emit(cmd_buffer->cs, PKT3(PKT3_SET_PREDICATION, 2, 0));
+               radeon_emit(cmd_buffer->cs, op);
+               radeon_emit(cmd_buffer->cs, va);
+               radeon_emit(cmd_buffer->cs, va >> 32);
+       } else {
+               radeon_emit(cmd_buffer->cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
+               radeon_emit(cmd_buffer->cs, va);
+               radeon_emit(cmd_buffer->cs, op | ((va >> 32) & 0xFF));
+       }
+}
 
 /* Set this if you want the 3D engine to wait until CP DMA is done.
  * It should be set on the last CP DMA packet. */
-#define R600_CP_DMA_SYNC       (1 << 0) /* R600+ */
+#define CP_DMA_SYNC    (1 << 0)
 
 /* Set this if the source data was used as a destination in a previous CP DMA
  * packet. It's for preventing a read-after-write (RAW) hazard between two
  * CP DMA packets. */
-#define SI_CP_DMA_RAW_WAIT     (1 << 1) /* SI+ */
-#define CIK_CP_DMA_USE_L2      (1 << 2)
+#define CP_DMA_RAW_WAIT        (1 << 1)
+#define CP_DMA_USE_L2  (1 << 2)
+#define CP_DMA_CLEAR   (1 << 3)
 
 /* Alignment for optimal performance. */
-#define CP_DMA_ALIGNMENT       32
-/* The max number of bytes to copy per packet. */
-#define CP_DMA_MAX_BYTE_COUNT  ((1 << 21) - CP_DMA_ALIGNMENT)
+#define SI_CPDMA_ALIGNMENT     32
 
-static void si_emit_cp_dma_copy_buffer(struct radv_cmd_buffer *cmd_buffer,
-                                      uint64_t dst_va, uint64_t src_va,
-                                      unsigned size, unsigned flags)
+/* The max number of bytes that can be copied per packet. */
+static inline unsigned cp_dma_max_byte_count(struct radv_cmd_buffer *cmd_buffer)
+{
+       unsigned max = cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9 ?
+                              S_414_BYTE_COUNT_GFX9(~0u) :
+                              S_414_BYTE_COUNT_GFX6(~0u);
+
+       /* make it aligned for optimal performance */
+       return max & ~(SI_CPDMA_ALIGNMENT - 1);
+}
+
+/* Emit a CP DMA packet to do a copy from one buffer to another, or to clear
+ * a buffer. The size must fit in bits [20:0]. If CP_DMA_CLEAR is set, src_va is a 32-bit
+ * clear value.
+ */
+static void si_emit_cp_dma(struct radv_cmd_buffer *cmd_buffer,
+                          uint64_t dst_va, uint64_t src_va,
+                          unsigned size, unsigned flags)
 {
        struct radeon_winsys_cs *cs = cmd_buffer->cs;
-       uint32_t sync_flag = flags & R600_CP_DMA_SYNC ? S_411_CP_SYNC(1) : 0;
-       uint32_t wr_confirm = !(flags & R600_CP_DMA_SYNC) ? S_414_DISABLE_WR_CONFIRM_GFX6(1) : 0;
-       uint32_t raw_wait = flags & SI_CP_DMA_RAW_WAIT ? S_414_RAW_WAIT(1) : 0;
-       uint32_t sel = flags & CIK_CP_DMA_USE_L2 ?
-                          S_411_SRC_SEL(V_411_SRC_ADDR_TC_L2) |
-                          S_411_DSL_SEL(V_411_DST_ADDR_TC_L2) : 0;
+       uint32_t header = 0, command = 0;
 
        assert(size);
-       assert((size & ((1<<21)-1)) == size);
+       assert(size <= cp_dma_max_byte_count(cmd_buffer));
 
        radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 9);
+       if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9)
+               command |= S_414_BYTE_COUNT_GFX9(size);
+       else
+               command |= S_414_BYTE_COUNT_GFX6(size);
+
+       /* Sync flags. */
+       if (flags & CP_DMA_SYNC)
+               header |= S_411_CP_SYNC(1);
+       else {
+               if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9)
+                       command |= S_414_DISABLE_WR_CONFIRM_GFX9(1);
+               else
+                       command |= S_414_DISABLE_WR_CONFIRM_GFX6(1);
+       }
+
+       if (flags & CP_DMA_RAW_WAIT)
+               command |= S_414_RAW_WAIT(1);
+
+       /* Src and dst flags. */
+       if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9 &&
+           !(flags & CP_DMA_CLEAR) &&
+           src_va == dst_va)
+               header |= S_411_DSL_SEL(V_411_NOWHERE); /* prefetch only */
+       else if (flags & CP_DMA_USE_L2)
+               header |= S_411_DSL_SEL(V_411_DST_ADDR_TC_L2);
+
+       if (flags & CP_DMA_CLEAR)
+               header |= S_411_SRC_SEL(V_411_DATA);
+       else if (flags & CP_DMA_USE_L2)
+               header |= S_411_SRC_SEL(V_411_SRC_ADDR_TC_L2);
 
        if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
-               radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
-               radeon_emit(cs, sync_flag | sel);       /* CP_SYNC [31] */
+               radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, cmd_buffer->state.predicating));
+               radeon_emit(cs, header);
                radeon_emit(cs, src_va);                /* SRC_ADDR_LO [31:0] */
                radeon_emit(cs, src_va >> 32);          /* SRC_ADDR_HI [31:0] */
                radeon_emit(cs, dst_va);                /* DST_ADDR_LO [31:0] */
                radeon_emit(cs, dst_va >> 32);          /* DST_ADDR_HI [31:0] */
-               radeon_emit(cs, size | wr_confirm | raw_wait);  /* COMMAND [29:22] | BYTE_COUNT [20:0] */
+               radeon_emit(cs, command);
        } else {
-               radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
+               assert(!(flags & CP_DMA_USE_L2));
+               header |= S_411_SRC_ADDR_HI(src_va >> 32);
+               radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, cmd_buffer->state.predicating));
                radeon_emit(cs, src_va);                        /* SRC_ADDR_LO [31:0] */
-               radeon_emit(cs, sync_flag | ((src_va >> 32) & 0xffff)); /* CP_SYNC [31] | SRC_ADDR_HI [15:0] */
+               radeon_emit(cs, header);                        /* SRC_ADDR_HI [15:0] + flags. */
                radeon_emit(cs, dst_va);                        /* DST_ADDR_LO [31:0] */
                radeon_emit(cs, (dst_va >> 32) & 0xffff);       /* DST_ADDR_HI [15:0] */
-               radeon_emit(cs, size | wr_confirm | raw_wait);  /* COMMAND [29:22] | BYTE_COUNT [20:0] */
+               radeon_emit(cs, command);
        }
 
        /* CP DMA is executed in ME, but index buffers are read by PFP.
@@ -975,63 +1256,23 @@ static void si_emit_cp_dma_copy_buffer(struct radv_cmd_buffer *cmd_buffer,
         * indices. If we wanted to execute CP DMA in PFP, this packet
         * should precede it.
         */
-       if (sync_flag && cmd_buffer->queue_family_index == RADV_QUEUE_GENERAL) {
-               radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
+       if ((flags & CP_DMA_SYNC) && cmd_buffer->queue_family_index == RADV_QUEUE_GENERAL) {
+               radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, cmd_buffer->state.predicating));
                radeon_emit(cs, 0);
        }
 
-       radv_cmd_buffer_trace_emit(cmd_buffer);
-}
-
-/* Emit a CP DMA packet to clear a buffer. The size must fit in bits [20:0]. */
-static void si_emit_cp_dma_clear_buffer(struct radv_cmd_buffer *cmd_buffer,
-                                       uint64_t dst_va, unsigned size,
-                                       uint32_t clear_value, unsigned flags)
-{
-       struct radeon_winsys_cs *cs = cmd_buffer->cs;
-       uint32_t sync_flag = flags & R600_CP_DMA_SYNC ? S_411_CP_SYNC(1) : 0;
-       uint32_t wr_confirm = !(flags & R600_CP_DMA_SYNC) ? S_414_DISABLE_WR_CONFIRM_GFX6(1) : 0;
-       uint32_t raw_wait = flags & SI_CP_DMA_RAW_WAIT ? S_414_RAW_WAIT(1) : 0;
-       uint32_t dst_sel = flags & CIK_CP_DMA_USE_L2 ? S_411_DSL_SEL(V_411_DST_ADDR_TC_L2) : 0;
-
-       assert(size);
-       assert((size & ((1<<21)-1)) == size);
-
-       radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 9);
-
-       if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
-               radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
-               radeon_emit(cs, sync_flag | dst_sel | S_411_SRC_SEL(V_411_DATA)); /* CP_SYNC [31] | SRC_SEL[30:29] */
-               radeon_emit(cs, clear_value);           /* DATA [31:0] */
-               radeon_emit(cs, 0);
-               radeon_emit(cs, dst_va);                /* DST_ADDR_LO [31:0] */
-               radeon_emit(cs, dst_va >> 32);          /* DST_ADDR_HI [15:0] */
-               radeon_emit(cs, size | wr_confirm | raw_wait);  /* COMMAND [29:22] | BYTE_COUNT [20:0] */
-       } else {
-               radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
-               radeon_emit(cs, clear_value);           /* DATA [31:0] */
-               radeon_emit(cs, sync_flag | S_411_SRC_SEL(V_411_DATA)); /* CP_SYNC [31] | SRC_SEL[30:29] */
-               radeon_emit(cs, dst_va);                        /* DST_ADDR_LO [31:0] */
-               radeon_emit(cs, (dst_va >> 32) & 0xffff);       /* DST_ADDR_HI [15:0] */
-               radeon_emit(cs, size | wr_confirm | raw_wait);  /* COMMAND [29:22] | BYTE_COUNT [20:0] */
-       }
-
-       /* See "copy_buffer" for explanation. */
-       if (sync_flag && cmd_buffer->queue_family_index == RADV_QUEUE_GENERAL) {
-               radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
-               radeon_emit(cs, 0);
-       }
-       radv_cmd_buffer_trace_emit(cmd_buffer);
+       if (unlikely(cmd_buffer->device->trace_bo))
+               radv_cmd_buffer_trace_emit(cmd_buffer);
 }
 
 void si_cp_dma_prefetch(struct radv_cmd_buffer *cmd_buffer, uint64_t va,
                         unsigned size)
 {
-       uint64_t aligned_va = va & ~(CP_DMA_ALIGNMENT - 1);
-       uint64_t aligned_size = ((va + size + CP_DMA_ALIGNMENT -1) & ~(CP_DMA_ALIGNMENT - 1)) - aligned_va;
+       uint64_t aligned_va = va & ~(SI_CPDMA_ALIGNMENT - 1);
+       uint64_t aligned_size = ((va + size + SI_CPDMA_ALIGNMENT -1) & ~(SI_CPDMA_ALIGNMENT - 1)) - aligned_va;
 
-       si_emit_cp_dma_copy_buffer(cmd_buffer, aligned_va, aligned_va,
-                                  aligned_size, CIK_CP_DMA_USE_L2);
+       si_emit_cp_dma(cmd_buffer, aligned_va, aligned_va,
+                      aligned_size, CP_DMA_USE_L2);
 }
 
 static void si_cp_dma_prepare(struct radv_cmd_buffer *cmd_buffer, uint64_t byte_count,
@@ -1043,14 +1284,14 @@ static void si_cp_dma_prepare(struct radv_cmd_buffer *cmd_buffer, uint64_t byte_
         */
        if (cmd_buffer->state.flush_bits) {
                si_emit_cache_flush(cmd_buffer);
-               *flags |= SI_CP_DMA_RAW_WAIT;
+               *flags |= CP_DMA_RAW_WAIT;
        }
 
        /* Do the synchronization after the last dma, so that all data
         * is written to memory.
         */
        if (byte_count == remaining_size)
-               *flags |= R600_CP_DMA_SYNC;
+               *flags |= CP_DMA_SYNC;
 }
 
 static void si_cp_dma_realign_engine(struct radv_cmd_buffer *cmd_buffer, unsigned size)
@@ -1058,20 +1299,20 @@ static void si_cp_dma_realign_engine(struct radv_cmd_buffer *cmd_buffer, unsigne
        uint64_t va;
        uint32_t offset;
        unsigned dma_flags = 0;
-       unsigned buf_size = CP_DMA_ALIGNMENT * 2;
+       unsigned buf_size = SI_CPDMA_ALIGNMENT * 2;
        void *ptr;
 
-       assert(size < CP_DMA_ALIGNMENT);
+       assert(size < SI_CPDMA_ALIGNMENT);
 
-       radv_cmd_buffer_upload_alloc(cmd_buffer, buf_size, CP_DMA_ALIGNMENT,  &offset, &ptr);
+       radv_cmd_buffer_upload_alloc(cmd_buffer, buf_size, SI_CPDMA_ALIGNMENT,  &offset, &ptr);
 
-       va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
+       va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
        va += offset;
 
        si_cp_dma_prepare(cmd_buffer, size, size, &dma_flags);
 
-       si_emit_cp_dma_copy_buffer(cmd_buffer, va, va + CP_DMA_ALIGNMENT, size,
-                                  dma_flags);
+       si_emit_cp_dma(cmd_buffer, va, va + SI_CPDMA_ALIGNMENT, size,
+                      dma_flags);
 }
 
 void si_cp_dma_buffer_copy(struct radv_cmd_buffer *cmd_buffer,
@@ -1088,15 +1329,15 @@ void si_cp_dma_buffer_copy(struct radv_cmd_buffer *cmd_buffer,
                 * just to align the internal counter. Otherwise, the DMA engine
                 * would slow down by an order of magnitude for following copies.
                 */
-               if (size % CP_DMA_ALIGNMENT)
-                       realign_size = CP_DMA_ALIGNMENT - (size % CP_DMA_ALIGNMENT);
+               if (size % SI_CPDMA_ALIGNMENT)
+                       realign_size = SI_CPDMA_ALIGNMENT - (size % SI_CPDMA_ALIGNMENT);
 
                /* If the copy begins unaligned, we must start copying from the next
                 * aligned block and the skipped part should be copied after everything
                 * else has been copied. Only the src alignment matters, not dst.
                 */
-               if (src_va % CP_DMA_ALIGNMENT) {
-                       skipped_size = CP_DMA_ALIGNMENT - (src_va % CP_DMA_ALIGNMENT);
+               if (src_va % SI_CPDMA_ALIGNMENT) {
+                       skipped_size = SI_CPDMA_ALIGNMENT - (src_va % SI_CPDMA_ALIGNMENT);
                        /* The main part will be skipped if the size is too small. */
                        skipped_size = MIN2(skipped_size, size);
                        size -= skipped_size;
@@ -1107,14 +1348,14 @@ void si_cp_dma_buffer_copy(struct radv_cmd_buffer *cmd_buffer,
 
        while (size) {
                unsigned dma_flags = 0;
-               unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
+               unsigned byte_count = MIN2(size, cp_dma_max_byte_count(cmd_buffer));
 
                si_cp_dma_prepare(cmd_buffer, byte_count,
                                  size + skipped_size + realign_size,
                                  &dma_flags);
 
-               si_emit_cp_dma_copy_buffer(cmd_buffer, main_dest_va, main_src_va,
-                                          byte_count, dma_flags);
+               si_emit_cp_dma(cmd_buffer, main_dest_va, main_src_va,
+                              byte_count, dma_flags);
 
                size -= byte_count;
                main_src_va += byte_count;
@@ -1128,8 +1369,8 @@ void si_cp_dma_buffer_copy(struct radv_cmd_buffer *cmd_buffer,
                                  size + skipped_size + realign_size,
                                  &dma_flags);
 
-               si_emit_cp_dma_copy_buffer(cmd_buffer, dest_va, src_va,
-                                          skipped_size, dma_flags);
+               si_emit_cp_dma(cmd_buffer, dest_va, src_va,
+                              skipped_size, dma_flags);
        }
        if (realign_size)
                si_cp_dma_realign_engine(cmd_buffer, realign_size);
@@ -1145,14 +1386,14 @@ void si_cp_dma_clear_buffer(struct radv_cmd_buffer *cmd_buffer, uint64_t va,
        assert(va % 4 == 0 && size % 4 == 0);
 
        while (size) {
-               unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
-               unsigned dma_flags = 0;
+               unsigned byte_count = MIN2(size, cp_dma_max_byte_count(cmd_buffer));
+               unsigned dma_flags = CP_DMA_CLEAR;
 
                si_cp_dma_prepare(cmd_buffer, byte_count, size, &dma_flags);
 
                /* Emit the clear packet. */
-               si_emit_cp_dma_clear_buffer(cmd_buffer, va, byte_count, value,
-                                           dma_flags);
+               si_emit_cp_dma(cmd_buffer, va, value, byte_count,
+                              dma_flags);
 
                size -= byte_count;
                va += byte_count;
@@ -1236,25 +1477,25 @@ void radv_cayman_emit_msaa_sample_locs(struct radeon_winsys_cs *cs, int nr_sampl
        switch (nr_samples) {
        default:
        case 1:
-               radeon_set_context_reg(cs, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, 0);
-               radeon_set_context_reg(cs, CM_R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, 0);
-               radeon_set_context_reg(cs, CM_R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, 0);
-               radeon_set_context_reg(cs, CM_R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, 0);
+               radeon_set_context_reg(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, 0);
+               radeon_set_context_reg(cs, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, 0);
+               radeon_set_context_reg(cs, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, 0);
+               radeon_set_context_reg(cs, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, 0);
                break;
        case 2:
-               radeon_set_context_reg(cs, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, eg_sample_locs_2x[0]);
-               radeon_set_context_reg(cs, CM_R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, eg_sample_locs_2x[1]);
-               radeon_set_context_reg(cs, CM_R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, eg_sample_locs_2x[2]);
-               radeon_set_context_reg(cs, CM_R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, eg_sample_locs_2x[3]);
+               radeon_set_context_reg(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, eg_sample_locs_2x[0]);
+               radeon_set_context_reg(cs, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, eg_sample_locs_2x[1]);
+               radeon_set_context_reg(cs, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, eg_sample_locs_2x[2]);
+               radeon_set_context_reg(cs, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, eg_sample_locs_2x[3]);
                break;
        case 4:
-               radeon_set_context_reg(cs, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, eg_sample_locs_4x[0]);
-               radeon_set_context_reg(cs, CM_R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, eg_sample_locs_4x[1]);
-               radeon_set_context_reg(cs, CM_R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, eg_sample_locs_4x[2]);
-               radeon_set_context_reg(cs, CM_R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, eg_sample_locs_4x[3]);
+               radeon_set_context_reg(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, eg_sample_locs_4x[0]);
+               radeon_set_context_reg(cs, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, eg_sample_locs_4x[1]);
+               radeon_set_context_reg(cs, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, eg_sample_locs_4x[2]);
+               radeon_set_context_reg(cs, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, eg_sample_locs_4x[3]);
                break;
        case 8:
-               radeon_set_context_reg_seq(cs, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, 14);
+               radeon_set_context_reg_seq(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, 14);
                radeon_emit(cs, cm_sample_locs_8x[0]);
                radeon_emit(cs, cm_sample_locs_8x[4]);
                radeon_emit(cs, 0);
@@ -1271,7 +1512,7 @@ void radv_cayman_emit_msaa_sample_locs(struct radeon_winsys_cs *cs, int nr_sampl
                radeon_emit(cs, cm_sample_locs_8x[7]);
                break;
        case 16:
-               radeon_set_context_reg_seq(cs, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, 16);
+               radeon_set_context_reg_seq(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, 16);
                radeon_emit(cs, cm_sample_locs_16x[0]);
                radeon_emit(cs, cm_sample_locs_16x[4]);
                radeon_emit(cs, cm_sample_locs_16x[8]);