radv: handle primitive id input into fragment shader with no geom shader
[mesa.git] / src / amd / vulkan / si_cmd_buffer.c
index 6e50f64a29a07b0a6ecf5ebb9bf500b9544a3f0e..bf08214c551a8ef34a16ac48fa7a71e2dc25cd63 100644 (file)
@@ -30,6 +30,7 @@
 #include "radv_private.h"
 #include "radv_cs.h"
 #include "sid.h"
+#include "gfx9d.h"
 #include "radv_util.h"
 #include "main/macros.h"
 
@@ -241,6 +242,9 @@ si_emit_config(struct radv_physical_device *physical_device,
        radeon_set_context_reg(cs, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET, 0);
 
        radeon_set_context_reg(cs, R_028B98_VGT_STRMOUT_BUFFER_CONFIG, 0x0);
+       radeon_set_context_reg(cs, R_028AA0_VGT_INSTANCE_STEP_RATE_0, 1);
+       if (physical_device->rad_info.chip_class >= GFX9)
+               radeon_set_context_reg(cs, R_028AB4_VGT_REUSE_OFF, 0);
        radeon_set_context_reg(cs, R_028AB8_VGT_VTX_CNT_EN, 0x0);
        if (physical_device->rad_info.chip_class < CIK)
                radeon_set_config_reg(cs, R_008A14_PA_CL_ENHANCE, S_008A14_NUM_CLIP_SEQ(3) |
@@ -297,6 +301,7 @@ si_emit_config(struct radv_physical_device *physical_device,
                raster_config_1 = 0x0000002a;
                break;
        case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
                raster_config = 0x16000012;
                raster_config_1 = 0x00000000;
                break;
@@ -327,24 +332,28 @@ si_emit_config(struct radv_physical_device *physical_device,
                raster_config_1 = 0x00000000;
                break;
        default:
-               fprintf(stderr,
-                       "radeonsi: Unknown GPU, using 0 for raster_config\n");
-               raster_config = 0x00000000;
-               raster_config_1 = 0x00000000;
+               if (physical_device->rad_info.chip_class <= VI) {
+                       fprintf(stderr,
+                               "radeonsi: Unknown GPU, using 0 for raster_config\n");
+                       raster_config = 0x00000000;
+                       raster_config_1 = 0x00000000;
+               }
                break;
        }
 
        /* Always use the default config when all backends are enabled
         * (or when we failed to determine the enabled backends).
         */
-       if (!rb_mask || util_bitcount(rb_mask) >= num_rb) {
-               radeon_set_context_reg(cs, R_028350_PA_SC_RASTER_CONFIG,
-                                      raster_config);
-               if (physical_device->rad_info.chip_class >= CIK)
-                       radeon_set_context_reg(cs, R_028354_PA_SC_RASTER_CONFIG_1,
-                                              raster_config_1);
-       } else {
-               si_write_harvested_raster_configs(physical_device, cs, raster_config, raster_config_1);
+       if (physical_device->rad_info.chip_class <= VI) {
+               if (!rb_mask || util_bitcount(rb_mask) >= num_rb) {
+                       radeon_set_context_reg(cs, R_028350_PA_SC_RASTER_CONFIG,
+                                              raster_config);
+                       if (physical_device->rad_info.chip_class >= CIK)
+                               radeon_set_context_reg(cs, R_028354_PA_SC_RASTER_CONFIG_1,
+                                                      raster_config_1);
+               } else {
+                       si_write_harvested_raster_configs(physical_device, cs, raster_config, raster_config_1);
+               }
        }
 
        radeon_set_context_reg(cs, R_028204_PA_SC_WINDOW_SCISSOR_TL, S_028204_WINDOW_OFFSET_DISABLE(1));
@@ -361,11 +370,6 @@ si_emit_config(struct radv_physical_device *physical_device,
        radeon_set_context_reg(cs, R_028234_PA_SU_HARDWARE_SCREEN_OFFSET, 0);
        radeon_set_context_reg(cs, R_028820_PA_CL_NANINF_CNTL, 0);
 
-       radeon_set_context_reg(cs, R_028BE8_PA_CL_GB_VERT_CLIP_ADJ, fui(1.0));
-       radeon_set_context_reg(cs, R_028BEC_PA_CL_GB_VERT_DISC_ADJ, fui(1.0));
-       radeon_set_context_reg(cs, R_028BF0_PA_CL_GB_HORZ_CLIP_ADJ, fui(1.0));
-       radeon_set_context_reg(cs, R_028BF4_PA_CL_GB_HORZ_DISC_ADJ, fui(1.0));
-
        radeon_set_context_reg(cs, R_028AC0_DB_SRESULTS_COMPARE_STATE0, 0x0);
        radeon_set_context_reg(cs, R_028AC4_DB_SRESULTS_COMPARE_STATE1, 0x0);
        radeon_set_context_reg(cs, R_028AC8_DB_PRELOAD_CONTROL, 0x0);
@@ -373,22 +377,31 @@ si_emit_config(struct radv_physical_device *physical_device,
                               S_02800C_FORCE_HIS_ENABLE0(V_02800C_FORCE_DISABLE) |
                               S_02800C_FORCE_HIS_ENABLE1(V_02800C_FORCE_DISABLE));
 
-       radeon_set_context_reg(cs, R_028400_VGT_MAX_VTX_INDX, ~0);
-       radeon_set_context_reg(cs, R_028404_VGT_MIN_VTX_INDX, 0);
-       radeon_set_context_reg(cs, R_028408_VGT_INDX_OFFSET, 0);
+       if (physical_device->rad_info.chip_class >= GFX9) {
+               radeon_set_uconfig_reg(cs, R_030920_VGT_MAX_VTX_INDX, ~0);
+               radeon_set_uconfig_reg(cs, R_030924_VGT_MIN_VTX_INDX, 0);
+               radeon_set_uconfig_reg(cs, R_030928_VGT_INDX_OFFSET, 0);
+       } else {
+               radeon_set_context_reg(cs, R_028400_VGT_MAX_VTX_INDX, ~0);
+               radeon_set_context_reg(cs, R_028404_VGT_MIN_VTX_INDX, 0);
+               radeon_set_context_reg(cs, R_028408_VGT_INDX_OFFSET, 0);
+       }
 
        if (physical_device->rad_info.chip_class >= CIK) {
-               /* If this is 0, Bonaire can hang even if GS isn't being used.
-                * Other chips are unaffected. These are suboptimal values,
-                * but we don't use on-chip GS.
-                */
-               radeon_set_context_reg(cs, R_028A44_VGT_GS_ONCHIP_CNTL,
-                                      S_028A44_ES_VERTS_PER_SUBGRP(64) |
-                                      S_028A44_GS_PRIMS_PER_SUBGRP(4));
-
-               radeon_set_sh_reg(cs, R_00B51C_SPI_SHADER_PGM_RSRC3_LS, S_00B51C_CU_EN(0xffff));
-               radeon_set_sh_reg(cs, R_00B41C_SPI_SHADER_PGM_RSRC3_HS, 0);
-               radeon_set_sh_reg(cs, R_00B31C_SPI_SHADER_PGM_RSRC3_ES, S_00B31C_CU_EN(0xffff));
+               if (physical_device->rad_info.chip_class >= GFX9) {
+                       radeon_set_sh_reg(cs, R_00B41C_SPI_SHADER_PGM_RSRC3_HS, S_00B41C_CU_EN(0xffff));
+               } else {
+                       radeon_set_sh_reg(cs, R_00B51C_SPI_SHADER_PGM_RSRC3_LS, S_00B51C_CU_EN(0xffff));
+                       radeon_set_sh_reg(cs, R_00B41C_SPI_SHADER_PGM_RSRC3_HS, 0);
+                       radeon_set_sh_reg(cs, R_00B31C_SPI_SHADER_PGM_RSRC3_ES, S_00B31C_CU_EN(0xffff));
+                       /* If this is 0, Bonaire can hang even if GS isn't being used.
+                        * Other chips are unaffected. These are suboptimal values,
+                        * but we don't use on-chip GS.
+                        */
+                       radeon_set_context_reg(cs, R_028A44_VGT_GS_ONCHIP_CNTL,
+                                              S_028A44_ES_VERTS_PER_SUBGRP(64) |
+                                              S_028A44_GS_PRIMS_PER_SUBGRP(4));
+               }
                radeon_set_sh_reg(cs, R_00B21C_SPI_SHADER_PGM_RSRC3_GS, S_00B21C_CU_EN(0xffff));
 
                if (physical_device->rad_info.num_good_compute_units /
@@ -415,24 +428,65 @@ si_emit_config(struct radv_physical_device *physical_device,
        }
 
        if (physical_device->rad_info.chip_class >= VI) {
+               uint32_t vgt_tess_distribution;
                radeon_set_context_reg(cs, R_028424_CB_DCC_CONTROL,
                                       S_028424_OVERWRITE_COMBINER_MRT_SHARING_DISABLE(1) |
                                       S_028424_OVERWRITE_COMBINER_WATERMARK(4));
-               radeon_set_context_reg(cs, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL, 30);
+               if (physical_device->rad_info.family < CHIP_POLARIS10)
+                       radeon_set_context_reg(cs, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL, 30);
                radeon_set_context_reg(cs, R_028C5C_VGT_OUT_DEALLOC_CNTL, 32);
+
+               vgt_tess_distribution = S_028B50_ACCUM_ISOLINE(32) |
+                       S_028B50_ACCUM_TRI(11) |
+                       S_028B50_ACCUM_QUAD(11) |
+                       S_028B50_DONUT_SPLIT(16);
+
+               if (physical_device->rad_info.family == CHIP_FIJI ||
+                   physical_device->rad_info.family >= CHIP_POLARIS10)
+                       vgt_tess_distribution |= S_028B50_TRAP_SPLIT(3);
+
                radeon_set_context_reg(cs, R_028B50_VGT_TESS_DISTRIBUTION,
-                                      S_028B50_ACCUM_ISOLINE(32) |
-                                      S_028B50_ACCUM_TRI(11) |
-                                      S_028B50_ACCUM_QUAD(11) |
-                                      S_028B50_DONUT_SPLIT(16));
+                                      vgt_tess_distribution);
        } else {
                radeon_set_context_reg(cs, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
                radeon_set_context_reg(cs, R_028C5C_VGT_OUT_DEALLOC_CNTL, 16);
        }
 
-       if (physical_device->rad_info.family == CHIP_STONEY)
+       if (physical_device->has_rbplus)
                radeon_set_context_reg(cs, R_028C40_PA_SC_SHADER_CONTROL, 0);
 
+       if (physical_device->rad_info.chip_class >= GFX9) {
+               unsigned num_se = physical_device->rad_info.max_se;
+               unsigned pc_lines = 0;
+
+               switch (physical_device->rad_info.family) {
+               case CHIP_VEGA10:
+                       pc_lines = 4096;
+                       break;
+               case CHIP_RAVEN:
+                       pc_lines = 1024;
+                       break;
+               default:
+                       assert(0);
+               }
+
+               radeon_set_context_reg(cs, R_028060_DB_DFSM_CONTROL,
+                                      S_028060_PUNCHOUT_MODE(V_028060_FORCE_OFF));
+               radeon_set_context_reg(cs, R_028064_DB_RENDER_FILTER, 0);
+               /* TODO: We can use this to disable RBs for rendering to GART: */
+               radeon_set_context_reg(cs, R_02835C_PA_SC_TILE_STEERING_OVERRIDE, 0);
+               radeon_set_context_reg(cs, R_02883C_PA_SU_OVER_RASTERIZATION_CNTL, 0);
+               /* TODO: Enable the binner: */
+               radeon_set_context_reg(cs, R_028C44_PA_SC_BINNER_CNTL_0,
+                                      S_028C44_BINNING_MODE(V_028C44_DISABLE_BINNING_USE_LEGACY_SC) |
+                                      S_028C44_DISABLE_START_OF_PRIM(1));
+               radeon_set_context_reg(cs, R_028C48_PA_SC_BINNER_CNTL_1,
+                                      S_028C48_MAX_ALLOC_COUNT(MIN2(128, pc_lines / (4 * num_se))) |
+                                      S_028C48_MAX_PRIM_PER_BATCH(1023));
+               radeon_set_context_reg(cs, R_028C4C_PA_SC_CONSERVATIVE_RASTERIZATION_CNTL,
+                                      S_028C4C_NULL_SQUAD_AA_MASK_ENABLE(1));
+               radeon_set_uconfig_reg(cs, R_030968_VGT_INSTANCE_BASE_ID, 0);
+       }
        si_emit_compute(physical_device, cs);
 }
 
@@ -506,21 +560,7 @@ si_write_viewport(struct radeon_winsys_cs *cs, int first_vp,
 {
        int i;
 
-       if (count == 0) {
-               radeon_set_context_reg_seq(cs, R_02843C_PA_CL_VPORT_XSCALE, 6);
-               radeon_emit(cs, fui(1.0));
-               radeon_emit(cs, fui(0.0));
-               radeon_emit(cs, fui(1.0));
-               radeon_emit(cs, fui(0.0));
-               radeon_emit(cs, fui(1.0));
-               radeon_emit(cs, fui(0.0));
-
-               radeon_set_context_reg_seq(cs, R_0282D0_PA_SC_VPORT_ZMIN_0, 2);
-               radeon_emit(cs, fui(0.0));
-               radeon_emit(cs, fui(1.0));
-
-               return;
-       }
+       assert(count);
        radeon_set_context_reg_seq(cs, R_02843C_PA_CL_VPORT_XSCALE +
                                   first_vp * 4 * 6, count * 6);
 
@@ -547,22 +587,75 @@ si_write_viewport(struct radeon_winsys_cs *cs, int first_vp,
        }
 }
 
+static VkRect2D si_scissor_from_viewport(const VkViewport *viewport)
+{
+       float scale[3], translate[3];
+       VkRect2D rect;
+
+       get_viewport_xform(viewport, scale, translate);
+
+       rect.offset.x = translate[0] - abs(scale[0]);
+       rect.offset.y = translate[1] - abs(scale[1]);
+       rect.extent.width = ceilf(translate[0] + abs(scale[0])) - rect.offset.x;
+       rect.extent.height = ceilf(translate[1] + abs(scale[1])) - rect.offset.y;
+
+       return rect;
+}
+
+static VkRect2D si_intersect_scissor(const VkRect2D *a, const VkRect2D *b) {
+       VkRect2D ret;
+       ret.offset.x = MAX2(a->offset.x, b->offset.x);
+       ret.offset.y = MAX2(a->offset.y, b->offset.y);
+       ret.extent.width = MIN2(a->offset.x + a->extent.width,
+                               b->offset.x + b->extent.width) - ret.offset.x;
+       ret.extent.height = MIN2(a->offset.y + a->extent.height,
+                                b->offset.y + b->extent.height) - ret.offset.y;
+       return ret;
+}
+
 void
 si_write_scissors(struct radeon_winsys_cs *cs, int first,
-                  int count, const VkRect2D *scissors)
+                  int count, const VkRect2D *scissors,
+                  const VkViewport *viewports, bool can_use_guardband)
 {
        int i;
-       if (count == 0)
-               return;
+       float scale[3], translate[3], guardband_x = INFINITY, guardband_y = INFINITY;
+       const float max_range = 32767.0f;
+       assert(count);
 
        radeon_set_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL + first * 4 * 2, count * 2);
        for (i = 0; i < count; i++) {
-               radeon_emit(cs, S_028250_TL_X(scissors[i].offset.x) |
-                           S_028250_TL_Y(scissors[i].offset.y) |
+               VkRect2D viewport_scissor = si_scissor_from_viewport(viewports + i);
+               VkRect2D scissor = si_intersect_scissor(&scissors[i], &viewport_scissor);
+
+               get_viewport_xform(viewports + i, scale, translate);
+               scale[0] = abs(scale[0]);
+               scale[1] = abs(scale[1]);
+
+               if (scale[0] < 0.5)
+                       scale[0] = 0.5;
+               if (scale[1] < 0.5)
+                       scale[1] = 0.5;
+
+               guardband_x = MIN2(guardband_x, (max_range - abs(translate[0])) / scale[0]);
+               guardband_y = MIN2(guardband_y, (max_range - abs(translate[1])) / scale[1]);
+
+               radeon_emit(cs, S_028250_TL_X(scissor.offset.x) |
+                           S_028250_TL_Y(scissor.offset.y) |
                            S_028250_WINDOW_OFFSET_DISABLE(1));
-               radeon_emit(cs, S_028254_BR_X(scissors[i].offset.x + scissors[i].extent.width) |
-                           S_028254_BR_Y(scissors[i].offset.y + scissors[i].extent.height));
+               radeon_emit(cs, S_028254_BR_X(scissor.offset.x + scissor.extent.width) |
+                           S_028254_BR_Y(scissor.offset.y + scissor.extent.height));
        }
+       if (!can_use_guardband) {
+               guardband_x = 1.0;
+               guardband_y = 1.0;
+       }
+
+       radeon_set_context_reg_seq(cs, R_028BE8_PA_CL_GB_VERT_CLIP_ADJ, 4);
+       radeon_emit(cs, fui(guardband_y));
+       radeon_emit(cs, fui(1.0));
+       radeon_emit(cs, fui(guardband_x));
+       radeon_emit(cs, fui(1.0));
 }
 
 static inline unsigned
@@ -600,13 +693,46 @@ si_get_ia_multi_vgt_param(struct radv_cmd_buffer *cmd_buffer,
        uint32_t num_prims = radv_prims_for_vertices(&cmd_buffer->state.pipeline->graphics.prim_vertex_count, draw_vertex_count);
        bool multi_instances_smaller_than_primgroup;
 
-       if (radv_pipeline_has_gs(cmd_buffer->state.pipeline))
+       if (radv_pipeline_has_tess(cmd_buffer->state.pipeline))
+               primgroup_size = cmd_buffer->state.pipeline->graphics.tess.num_patches;
+       else if (radv_pipeline_has_gs(cmd_buffer->state.pipeline))
                primgroup_size = 64;  /* recommended with a GS */
 
        multi_instances_smaller_than_primgroup = indirect_draw || (instanced_draw &&
                                                                   num_prims < primgroup_size);
-       /* TODO TES */
+       if (cmd_buffer->state.pipeline->shaders[MESA_SHADER_FRAGMENT]->info.fs.prim_id_input)
+               ia_switch_on_eoi = true;
+
+       if (radv_pipeline_has_tess(cmd_buffer->state.pipeline)) {
+               /* SWITCH_ON_EOI must be set if PrimID is used. */
+               if (cmd_buffer->state.pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.tcs.uses_prim_id ||
+                   cmd_buffer->state.pipeline->shaders[MESA_SHADER_TESS_EVAL]->info.tes.uses_prim_id)
+                       ia_switch_on_eoi = true;
 
+               /* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
+               if ((family == CHIP_TAHITI ||
+                    family == CHIP_PITCAIRN ||
+                    family == CHIP_BONAIRE) &&
+                   radv_pipeline_has_gs(cmd_buffer->state.pipeline))
+                       partial_vs_wave = true;
+
+               /* Needed for 028B6C_DISTRIBUTION_MODE != 0 */
+               if (cmd_buffer->device->has_distributed_tess) {
+                       if (radv_pipeline_has_gs(cmd_buffer->state.pipeline)) {
+                               if (chip_class <= VI)
+                                       partial_es_wave = true;
+
+                               if (family == CHIP_TONGA ||
+                                   family == CHIP_FIJI ||
+                                   family == CHIP_POLARIS10 ||
+                                   family == CHIP_POLARIS11 ||
+                                   family == CHIP_POLARIS12)
+                                       partial_vs_wave = true;
+                       } else {
+                               partial_vs_wave = true;
+                       }
+               }
+       }
        /* TODO linestipple */
 
        if (chip_class >= CIK) {
@@ -662,10 +788,15 @@ si_get_ia_multi_vgt_param(struct radv_cmd_buffer *cmd_buffer,
                assert(wd_switch_on_eop || !ia_switch_on_eop);
        }
        /* If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
-       if (ia_switch_on_eoi)
+       if (chip_class <= VI && ia_switch_on_eoi)
                partial_es_wave = true;
 
        if (radv_pipeline_has_gs(cmd_buffer->state.pipeline)) {
+
+               if (radv_pipeline_has_gs(cmd_buffer->state.pipeline) &&
+                   cmd_buffer->state.pipeline->shaders[MESA_SHADER_GEOMETRY]->info.gs.uses_prim_id)
+                       ia_switch_on_eoi = true;
+
                /* GS requirement. */
                if (SI_GS_PER_ES / primgroup_size >= cmd_buffer->device->gs_table_depth - 3)
                        partial_es_wave = true;
@@ -684,22 +815,88 @@ si_get_ia_multi_vgt_param(struct radv_cmd_buffer *cmd_buffer,
                S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave) |
                S_028AA8_PRIMGROUP_SIZE(primgroup_size - 1) |
                S_028AA8_WD_SWITCH_ON_EOP(chip_class >= CIK ? wd_switch_on_eop : 0) |
-               S_028AA8_MAX_PRIMGRP_IN_WAVE(chip_class >= VI ?
-                                            max_primgroup_in_wave : 0);
+               /* The following field was moved to VGT_SHADER_STAGES_EN in GFX9. */
+               S_028AA8_MAX_PRIMGRP_IN_WAVE(chip_class == VI ?
+                                            max_primgroup_in_wave : 0) |
+               S_030960_EN_INST_OPT_BASIC(chip_class >= GFX9) |
+               S_030960_EN_INST_OPT_ADV(chip_class >= GFX9);
+
+}
+
+void si_cs_emit_write_event_eop(struct radeon_winsys_cs *cs,
+                               enum chip_class chip_class,
+                               bool is_mec,
+                               unsigned event, unsigned event_flags,
+                               unsigned data_sel,
+                               uint64_t va,
+                               uint32_t old_fence,
+                               uint32_t new_fence)
+{
+       unsigned op = EVENT_TYPE(event) |
+               EVENT_INDEX(5) |
+               event_flags;
+       unsigned is_gfx8_mec = is_mec && chip_class < GFX9;
+
+       if (chip_class >= GFX9 || is_gfx8_mec) {
+               radeon_emit(cs, PKT3(PKT3_RELEASE_MEM, is_gfx8_mec ? 5 : 6, 0));
+               radeon_emit(cs, op);
+               radeon_emit(cs, EOP_DATA_SEL(data_sel));
+               radeon_emit(cs, va);            /* address lo */
+               radeon_emit(cs, va >> 32);      /* address hi */
+               radeon_emit(cs, new_fence);     /* immediate data lo */
+               radeon_emit(cs, 0); /* immediate data hi */
+               if (!is_gfx8_mec)
+                       radeon_emit(cs, 0); /* unused */
+       } else {
+               if (chip_class == CIK ||
+                   chip_class == VI) {
+                       /* Two EOP events are required to make all engines go idle
+                        * (and optional cache flushes executed) before the timestamp
+                        * is written.
+                        */
+                       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
+                       radeon_emit(cs, op);
+                       radeon_emit(cs, va);
+                       radeon_emit(cs, ((va >> 32) & 0xffff) | EOP_DATA_SEL(data_sel));
+                       radeon_emit(cs, old_fence); /* immediate data */
+                       radeon_emit(cs, 0); /* unused */
+               }
+
+               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
+               radeon_emit(cs, op);
+               radeon_emit(cs, va);
+               radeon_emit(cs, ((va >> 32) & 0xffff) | EOP_DATA_SEL(data_sel));
+               radeon_emit(cs, new_fence); /* immediate data */
+               radeon_emit(cs, 0); /* unused */
+       }
+}
 
+void
+si_emit_wait_fence(struct radeon_winsys_cs *cs,
+                  uint64_t va, uint32_t ref,
+                  uint32_t mask)
+{
+       radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
+       radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1));
+       radeon_emit(cs, va);
+       radeon_emit(cs, va >> 32);
+       radeon_emit(cs, ref); /* reference value */
+       radeon_emit(cs, mask); /* mask */
+       radeon_emit(cs, 4); /* poll interval */
 }
 
 static void
 si_emit_acquire_mem(struct radeon_winsys_cs *cs,
-                    bool is_mec,
+                    bool is_mec, bool is_gfx9,
                     unsigned cp_coher_cntl)
 {
-       if (is_mec) {
+       if (is_mec || is_gfx9) {
+               uint32_t hi_val = is_gfx9 ? 0xffffff : 0xff;
                radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, 0) |
-                                           PKT3_SHADER_TYPE_S(1));
+                                           PKT3_SHADER_TYPE_S(is_mec));
                radeon_emit(cs, cp_coher_cntl);   /* CP_COHER_CNTL */
                radeon_emit(cs, 0xffffffff);      /* CP_COHER_SIZE */
-               radeon_emit(cs, 0xff);            /* CP_COHER_SIZE_HI */
+               radeon_emit(cs, hi_val);          /* CP_COHER_SIZE_HI */
                radeon_emit(cs, 0);               /* CP_COHER_BASE */
                radeon_emit(cs, 0);               /* CP_COHER_BASE_HI */
                radeon_emit(cs, 0x0000000A);      /* POLL_INTERVAL */
@@ -716,42 +913,45 @@ si_emit_acquire_mem(struct radeon_winsys_cs *cs,
 void
 si_cs_emit_cache_flush(struct radeon_winsys_cs *cs,
                        enum chip_class chip_class,
+                      uint32_t *flush_cnt,
+                      uint64_t flush_va,
                        bool is_mec,
                        enum radv_cmd_flush_bits flush_bits)
 {
        unsigned cp_coher_cntl = 0;
-
+       uint32_t flush_cb_db = flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
+                                            RADV_CMD_FLAG_FLUSH_AND_INV_DB);
+       
        if (flush_bits & RADV_CMD_FLAG_INV_ICACHE)
                cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
        if (flush_bits & RADV_CMD_FLAG_INV_SMEM_L1)
                cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
 
-       if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_CB) {
-               cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) |
-                       S_0085F0_CB0_DEST_BASE_ENA(1) |
-                       S_0085F0_CB1_DEST_BASE_ENA(1) |
-                       S_0085F0_CB2_DEST_BASE_ENA(1) |
-                       S_0085F0_CB3_DEST_BASE_ENA(1) |
-                       S_0085F0_CB4_DEST_BASE_ENA(1) |
-                       S_0085F0_CB5_DEST_BASE_ENA(1) |
-                       S_0085F0_CB6_DEST_BASE_ENA(1) |
-                       S_0085F0_CB7_DEST_BASE_ENA(1);
-
-               /* Necessary for DCC */
-               if (chip_class >= VI) {
-                       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
-                       radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_DATA_TS) |
-                                                   EVENT_INDEX(5));
-                       radeon_emit(cs, 0);
-                       radeon_emit(cs, 0);
-                       radeon_emit(cs, 0);
-                       radeon_emit(cs, 0);
+       if (chip_class <= VI) {
+               if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_CB) {
+                       cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) |
+                               S_0085F0_CB0_DEST_BASE_ENA(1) |
+                               S_0085F0_CB1_DEST_BASE_ENA(1) |
+                               S_0085F0_CB2_DEST_BASE_ENA(1) |
+                               S_0085F0_CB3_DEST_BASE_ENA(1) |
+                               S_0085F0_CB4_DEST_BASE_ENA(1) |
+                               S_0085F0_CB5_DEST_BASE_ENA(1) |
+                               S_0085F0_CB6_DEST_BASE_ENA(1) |
+                               S_0085F0_CB7_DEST_BASE_ENA(1);
+
+                       /* Necessary for DCC */
+                       if (chip_class >= VI) {
+                               si_cs_emit_write_event_eop(cs,
+                                                          chip_class,
+                                                          is_mec,
+                                                          V_028A90_FLUSH_AND_INV_CB_DATA_TS,
+                                                          0, 0, 0, 0, 0);
+                       }
+               }
+               if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_DB) {
+                       cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) |
+                               S_0085F0_DB_DEST_BASE_ENA(1);
                }
-       }
-
-       if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_DB) {
-               cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) |
-                       S_0085F0_DB_DEST_BASE_ENA(1);
        }
 
        if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_CB_META) {
@@ -764,8 +964,7 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs,
                radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
        }
 
-       if (!(flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
-                                             RADV_CMD_FLAG_FLUSH_AND_INV_DB))) {
+       if (!flush_cb_db) {
                if (flush_bits & RADV_CMD_FLAG_PS_PARTIAL_FLUSH) {
                        radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
                        radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
@@ -780,6 +979,54 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs,
                radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
        }
 
+       if (chip_class >= GFX9 && flush_cb_db) {
+               unsigned cb_db_event, tc_flags;
+
+               /* Set the CB/DB flush event. */
+               switch (flush_cb_db) {
+               case RADV_CMD_FLAG_FLUSH_AND_INV_CB:
+                       cb_db_event = V_028A90_FLUSH_AND_INV_CB_DATA_TS;
+                       break;
+               case RADV_CMD_FLAG_FLUSH_AND_INV_DB:
+                       cb_db_event = V_028A90_FLUSH_AND_INV_DB_DATA_TS;
+                       break;
+               default:
+                       /* both CB & DB */
+                       cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
+               }
+
+               /* TC    | TC_WB         = invalidate L2 data
+                * TC_MD | TC_WB         = invalidate L2 metadata
+                * TC    | TC_WB | TC_MD = invalidate L2 data & metadata
+                *
+                * The metadata cache must always be invalidated for coherency
+                * between CB/DB and shaders. (metadata = HTILE, CMASK, DCC)
+                *
+                * TC must be invalidated on GFX9 only if the CB/DB surface is
+                * not pipe-aligned. If the surface is RB-aligned, it might not
+                * strictly be pipe-aligned since RB alignment takes precendence.
+                */
+               tc_flags = EVENT_TC_WB_ACTION_ENA |
+                          EVENT_TC_MD_ACTION_ENA;
+
+               /* Ideally flush TC together with CB/DB. */
+               if (flush_bits & RADV_CMD_FLAG_INV_GLOBAL_L2) {
+                       tc_flags |= EVENT_TC_ACTION_ENA |
+                                   EVENT_TCL1_ACTION_ENA;
+
+                       /* Clear the flags. */
+                       flush_bits &= ~(RADV_CMD_FLAG_INV_GLOBAL_L2 |
+                                        RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2 |
+                                        RADV_CMD_FLAG_INV_VMEM_L1);
+               }
+               assert(flush_cnt);
+               uint32_t old_fence = (*flush_cnt)++;
+
+               si_cs_emit_write_event_eop(cs, chip_class, false, cb_db_event, tc_flags, 1,
+                                          flush_va, old_fence, *flush_cnt);
+               si_emit_wait_fence(cs, flush_va, *flush_cnt, 0xffffffff);
+       }
+
        /* VGT state sync */
        if (flush_bits & RADV_CMD_FLAG_VGT_FLUSH) {
                radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
@@ -789,7 +1036,11 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs,
        /* Make sure ME is idle (it executes most packets) before continuing.
         * This prevents read-after-write hazards between PFP and ME.
         */
-       if ((cp_coher_cntl || (flush_bits & RADV_CMD_FLAG_CS_PARTIAL_FLUSH)) &&
+       if ((cp_coher_cntl ||
+            (flush_bits & (RADV_CMD_FLAG_CS_PARTIAL_FLUSH |
+                           RADV_CMD_FLAG_INV_VMEM_L1 |
+                           RADV_CMD_FLAG_INV_GLOBAL_L2 |
+                           RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2))) &&
            !is_mec) {
                radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
                radeon_emit(cs, 0);
@@ -797,27 +1048,39 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs,
 
        if ((flush_bits & RADV_CMD_FLAG_INV_GLOBAL_L2) ||
            (chip_class <= CIK && (flush_bits & RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2))) {
-               cp_coher_cntl |= S_0085F0_TC_ACTION_ENA(1);
-               if (chip_class >= VI)
-                       cp_coher_cntl |= S_0301F0_TC_WB_ACTION_ENA(1);
-       } else  if(flush_bits & RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2) {
-               cp_coher_cntl |= S_0301F0_TC_WB_ACTION_ENA(1) |
-                                S_0301F0_TC_NC_ACTION_ENA(1);
-
-               /* L2 writeback doesn't combine with L1 invalidate */
-               si_emit_acquire_mem(cs, is_mec, cp_coher_cntl);
-
+               si_emit_acquire_mem(cs, is_mec, chip_class >= GFX9,
+                                   cp_coher_cntl |
+                                   S_0085F0_TC_ACTION_ENA(1) |
+                                   S_0085F0_TCL1_ACTION_ENA(1) |
+                                   S_0301F0_TC_WB_ACTION_ENA(chip_class >= VI));
                cp_coher_cntl = 0;
+       } else {
+               if(flush_bits & RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2) {
+                       /* WB = write-back
+                        * NC = apply to non-coherent MTYPEs
+                        *      (i.e. MTYPE <= 1, which is what we use everywhere)
+                        *
+                        * WB doesn't work without NC.
+                        */
+                       si_emit_acquire_mem(cs, is_mec, chip_class >= GFX9,
+                                           cp_coher_cntl |
+                                           S_0301F0_TC_WB_ACTION_ENA(1) |
+                                           S_0301F0_TC_NC_ACTION_ENA(1));
+                       cp_coher_cntl = 0;
+               }
+               if (flush_bits & RADV_CMD_FLAG_INV_VMEM_L1) {
+                       si_emit_acquire_mem(cs, is_mec, chip_class >= GFX9,
+                                           cp_coher_cntl |
+                                           S_0085F0_TCL1_ACTION_ENA(1));
+                       cp_coher_cntl = 0;
+               }
        }
 
-       if (flush_bits & RADV_CMD_FLAG_INV_VMEM_L1)
-               cp_coher_cntl |= S_0085F0_TCL1_ACTION_ENA(1);
-
        /* When one of the DEST_BASE flags is set, SURFACE_SYNC waits for idle.
         * Therefore, it should be last. Done in PFP.
         */
        if (cp_coher_cntl)
-               si_emit_acquire_mem(cs, is_mec, cp_coher_cntl);
+               si_emit_acquire_mem(cs, is_mec, chip_class >= GFX9, cp_coher_cntl);
 }
 
 void
@@ -834,67 +1097,118 @@ si_emit_cache_flush(struct radv_cmd_buffer *cmd_buffer)
                                                  RADV_CMD_FLAG_VS_PARTIAL_FLUSH |
                                                  RADV_CMD_FLAG_VGT_FLUSH);
 
+       if (!cmd_buffer->state.flush_bits)
+               return;
+
+       enum chip_class chip_class = cmd_buffer->device->physical_device->rad_info.chip_class;
        radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 128);
 
+       uint32_t *ptr = NULL;
+       uint64_t va = 0;
+       if (chip_class == GFX9) {
+               va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->gfx9_fence_bo) + cmd_buffer->gfx9_fence_offset;
+               ptr = &cmd_buffer->gfx9_fence_idx;
+       }
        si_cs_emit_cache_flush(cmd_buffer->cs,
                               cmd_buffer->device->physical_device->rad_info.chip_class,
+                              ptr, va,
                               radv_cmd_buffer_uses_mec(cmd_buffer),
                               cmd_buffer->state.flush_bits);
 
 
-       if (cmd_buffer->state.flush_bits)
-               radv_cmd_buffer_trace_emit(cmd_buffer);
+       radv_cmd_buffer_trace_emit(cmd_buffer);
        cmd_buffer->state.flush_bits = 0;
 }
 
 
 /* Set this if you want the 3D engine to wait until CP DMA is done.
  * It should be set on the last CP DMA packet. */
-#define R600_CP_DMA_SYNC       (1 << 0) /* R600+ */
+#define CP_DMA_SYNC    (1 << 0)
 
 /* Set this if the source data was used as a destination in a previous CP DMA
  * packet. It's for preventing a read-after-write (RAW) hazard between two
  * CP DMA packets. */
-#define SI_CP_DMA_RAW_WAIT     (1 << 1) /* SI+ */
-#define CIK_CP_DMA_USE_L2      (1 << 2)
+#define CP_DMA_RAW_WAIT        (1 << 1)
+#define CP_DMA_USE_L2  (1 << 2)
+#define CP_DMA_CLEAR   (1 << 3)
 
 /* Alignment for optimal performance. */
-#define CP_DMA_ALIGNMENT       32
-/* The max number of bytes to copy per packet. */
-#define CP_DMA_MAX_BYTE_COUNT  ((1 << 21) - CP_DMA_ALIGNMENT)
+#define SI_CPDMA_ALIGNMENT     32
 
-static void si_emit_cp_dma_copy_buffer(struct radv_cmd_buffer *cmd_buffer,
-                                      uint64_t dst_va, uint64_t src_va,
-                                      unsigned size, unsigned flags)
+/* The max number of bytes that can be copied per packet. */
+static inline unsigned cp_dma_max_byte_count(struct radv_cmd_buffer *cmd_buffer)
+{
+       unsigned max = cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9 ?
+                              S_414_BYTE_COUNT_GFX9(~0u) :
+                              S_414_BYTE_COUNT_GFX6(~0u);
+
+       /* make it aligned for optimal performance */
+       return max & ~(SI_CPDMA_ALIGNMENT - 1);
+}
+
+/* Emit a CP DMA packet to do a copy from one buffer to another, or to clear
+ * a buffer. The size must fit in bits [20:0]. If CP_DMA_CLEAR is set, src_va is a 32-bit
+ * clear value.
+ */
+static void si_emit_cp_dma(struct radv_cmd_buffer *cmd_buffer,
+                          uint64_t dst_va, uint64_t src_va,
+                          unsigned size, unsigned flags)
 {
        struct radeon_winsys_cs *cs = cmd_buffer->cs;
-       uint32_t sync_flag = flags & R600_CP_DMA_SYNC ? S_411_CP_SYNC(1) : 0;
-       uint32_t wr_confirm = !(flags & R600_CP_DMA_SYNC) ? S_414_DISABLE_WR_CONFIRM(1) : 0;
-       uint32_t raw_wait = flags & SI_CP_DMA_RAW_WAIT ? S_414_RAW_WAIT(1) : 0;
-       uint32_t sel = flags & CIK_CP_DMA_USE_L2 ?
-                          S_411_SRC_SEL(V_411_SRC_ADDR_TC_L2) |
-                          S_411_DSL_SEL(V_411_DST_ADDR_TC_L2) : 0;
+       uint32_t header = 0, command = 0;
 
        assert(size);
-       assert((size & ((1<<21)-1)) == size);
+       assert(size <= cp_dma_max_byte_count(cmd_buffer));
 
        radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 9);
+       if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9)
+               command |= S_414_BYTE_COUNT_GFX9(size);
+       else
+               command |= S_414_BYTE_COUNT_GFX6(size);
+
+       /* Sync flags. */
+       if (flags & CP_DMA_SYNC)
+               header |= S_411_CP_SYNC(1);
+       else {
+               if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9)
+                       command |= S_414_DISABLE_WR_CONFIRM_GFX9(1);
+               else
+                       command |= S_414_DISABLE_WR_CONFIRM_GFX6(1);
+       }
+
+       if (flags & CP_DMA_RAW_WAIT)
+               command |= S_414_RAW_WAIT(1);
+
+       /* Src and dst flags. */
+       if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9 &&
+           !(flags & CP_DMA_CLEAR) &&
+           src_va == dst_va)
+               header |= S_411_DSL_SEL(V_411_NOWHERE); /* prefetch only */
+       else if (flags & CP_DMA_USE_L2)
+               header |= S_411_DSL_SEL(V_411_DST_ADDR_TC_L2);
+
+       if (flags & CP_DMA_CLEAR)
+               header |= S_411_SRC_SEL(V_411_DATA);
+       else if (flags & CP_DMA_USE_L2)
+               header |= S_411_SRC_SEL(V_411_SRC_ADDR_TC_L2);
 
        if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
                radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
-               radeon_emit(cs, sync_flag | sel);       /* CP_SYNC [31] */
+               radeon_emit(cs, header);
                radeon_emit(cs, src_va);                /* SRC_ADDR_LO [31:0] */
                radeon_emit(cs, src_va >> 32);          /* SRC_ADDR_HI [31:0] */
                radeon_emit(cs, dst_va);                /* DST_ADDR_LO [31:0] */
                radeon_emit(cs, dst_va >> 32);          /* DST_ADDR_HI [31:0] */
-               radeon_emit(cs, size | wr_confirm | raw_wait);  /* COMMAND [29:22] | BYTE_COUNT [20:0] */
+               radeon_emit(cs, command);
        } else {
+               assert(!(flags & CP_DMA_USE_L2));
+               header |= S_411_SRC_ADDR_HI(src_va >> 32);
                radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
                radeon_emit(cs, src_va);                        /* SRC_ADDR_LO [31:0] */
-               radeon_emit(cs, sync_flag | ((src_va >> 32) & 0xffff)); /* CP_SYNC [31] | SRC_ADDR_HI [15:0] */
+               radeon_emit(cs, header);                        /* SRC_ADDR_HI [15:0] + flags. */
                radeon_emit(cs, dst_va);                        /* DST_ADDR_LO [31:0] */
                radeon_emit(cs, (dst_va >> 32) & 0xffff);       /* DST_ADDR_HI [15:0] */
-               radeon_emit(cs, size | wr_confirm | raw_wait);  /* COMMAND [29:22] | BYTE_COUNT [20:0] */
+               radeon_emit(cs, command);
        }
 
        /* CP DMA is executed in ME, but index buffers are read by PFP.
@@ -902,7 +1216,7 @@ static void si_emit_cp_dma_copy_buffer(struct radv_cmd_buffer *cmd_buffer,
         * indices. If we wanted to execute CP DMA in PFP, this packet
         * should precede it.
         */
-       if (sync_flag && cmd_buffer->queue_family_index == RADV_QUEUE_GENERAL) {
+       if ((flags & CP_DMA_SYNC) && cmd_buffer->queue_family_index == RADV_QUEUE_GENERAL) {
                radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
                radeon_emit(cs, 0);
        }
@@ -910,45 +1224,14 @@ static void si_emit_cp_dma_copy_buffer(struct radv_cmd_buffer *cmd_buffer,
        radv_cmd_buffer_trace_emit(cmd_buffer);
 }
 
-/* Emit a CP DMA packet to clear a buffer. The size must fit in bits [20:0]. */
-static void si_emit_cp_dma_clear_buffer(struct radv_cmd_buffer *cmd_buffer,
-                                       uint64_t dst_va, unsigned size,
-                                       uint32_t clear_value, unsigned flags)
+void si_cp_dma_prefetch(struct radv_cmd_buffer *cmd_buffer, uint64_t va,
+                        unsigned size)
 {
-       struct radeon_winsys_cs *cs = cmd_buffer->cs;
-       uint32_t sync_flag = flags & R600_CP_DMA_SYNC ? S_411_CP_SYNC(1) : 0;
-       uint32_t wr_confirm = !(flags & R600_CP_DMA_SYNC) ? S_414_DISABLE_WR_CONFIRM(1) : 0;
-       uint32_t raw_wait = flags & SI_CP_DMA_RAW_WAIT ? S_414_RAW_WAIT(1) : 0;
-       uint32_t dst_sel = flags & CIK_CP_DMA_USE_L2 ? S_411_DSL_SEL(V_411_DST_ADDR_TC_L2) : 0;
-
-       assert(size);
-       assert((size & ((1<<21)-1)) == size);
-
-       radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 9);
+       uint64_t aligned_va = va & ~(SI_CPDMA_ALIGNMENT - 1);
+       uint64_t aligned_size = ((va + size + SI_CPDMA_ALIGNMENT -1) & ~(SI_CPDMA_ALIGNMENT - 1)) - aligned_va;
 
-       if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
-               radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
-               radeon_emit(cs, sync_flag | dst_sel | S_411_SRC_SEL(V_411_DATA)); /* CP_SYNC [31] | SRC_SEL[30:29] */
-               radeon_emit(cs, clear_value);           /* DATA [31:0] */
-               radeon_emit(cs, 0);
-               radeon_emit(cs, dst_va);                /* DST_ADDR_LO [31:0] */
-               radeon_emit(cs, dst_va >> 32);          /* DST_ADDR_HI [15:0] */
-               radeon_emit(cs, size | wr_confirm | raw_wait);  /* COMMAND [29:22] | BYTE_COUNT [20:0] */
-       } else {
-               radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
-               radeon_emit(cs, clear_value);           /* DATA [31:0] */
-               radeon_emit(cs, sync_flag | S_411_SRC_SEL(V_411_DATA)); /* CP_SYNC [31] | SRC_SEL[30:29] */
-               radeon_emit(cs, dst_va);                        /* DST_ADDR_LO [31:0] */
-               radeon_emit(cs, (dst_va >> 32) & 0xffff);       /* DST_ADDR_HI [15:0] */
-               radeon_emit(cs, size | wr_confirm | raw_wait);  /* COMMAND [29:22] | BYTE_COUNT [20:0] */
-       }
-
-       /* See "copy_buffer" for explanation. */
-       if (sync_flag && cmd_buffer->queue_family_index == RADV_QUEUE_GENERAL) {
-               radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
-               radeon_emit(cs, 0);
-       }
-       radv_cmd_buffer_trace_emit(cmd_buffer);
+       si_emit_cp_dma(cmd_buffer, aligned_va, aligned_va,
+                      aligned_size, CP_DMA_USE_L2);
 }
 
 static void si_cp_dma_prepare(struct radv_cmd_buffer *cmd_buffer, uint64_t byte_count,
@@ -960,14 +1243,14 @@ static void si_cp_dma_prepare(struct radv_cmd_buffer *cmd_buffer, uint64_t byte_
         */
        if (cmd_buffer->state.flush_bits) {
                si_emit_cache_flush(cmd_buffer);
-               *flags |= SI_CP_DMA_RAW_WAIT;
+               *flags |= CP_DMA_RAW_WAIT;
        }
 
        /* Do the synchronization after the last dma, so that all data
         * is written to memory.
         */
        if (byte_count == remaining_size)
-               *flags |= R600_CP_DMA_SYNC;
+               *flags |= CP_DMA_SYNC;
 }
 
 static void si_cp_dma_realign_engine(struct radv_cmd_buffer *cmd_buffer, unsigned size)
@@ -975,20 +1258,20 @@ static void si_cp_dma_realign_engine(struct radv_cmd_buffer *cmd_buffer, unsigne
        uint64_t va;
        uint32_t offset;
        unsigned dma_flags = 0;
-       unsigned buf_size = CP_DMA_ALIGNMENT * 2;
+       unsigned buf_size = SI_CPDMA_ALIGNMENT * 2;
        void *ptr;
 
-       assert(size < CP_DMA_ALIGNMENT);
+       assert(size < SI_CPDMA_ALIGNMENT);
 
-       radv_cmd_buffer_upload_alloc(cmd_buffer, buf_size, CP_DMA_ALIGNMENT,  &offset, &ptr);
+       radv_cmd_buffer_upload_alloc(cmd_buffer, buf_size, SI_CPDMA_ALIGNMENT,  &offset, &ptr);
 
        va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
        va += offset;
 
        si_cp_dma_prepare(cmd_buffer, size, size, &dma_flags);
 
-       si_emit_cp_dma_copy_buffer(cmd_buffer, va, va + CP_DMA_ALIGNMENT, size,
-                                  dma_flags);
+       si_emit_cp_dma(cmd_buffer, va, va + SI_CPDMA_ALIGNMENT, size,
+                      dma_flags);
 }
 
 void si_cp_dma_buffer_copy(struct radv_cmd_buffer *cmd_buffer,
@@ -1005,15 +1288,15 @@ void si_cp_dma_buffer_copy(struct radv_cmd_buffer *cmd_buffer,
                 * just to align the internal counter. Otherwise, the DMA engine
                 * would slow down by an order of magnitude for following copies.
                 */
-               if (size % CP_DMA_ALIGNMENT)
-                       realign_size = CP_DMA_ALIGNMENT - (size % CP_DMA_ALIGNMENT);
+               if (size % SI_CPDMA_ALIGNMENT)
+                       realign_size = SI_CPDMA_ALIGNMENT - (size % SI_CPDMA_ALIGNMENT);
 
                /* If the copy begins unaligned, we must start copying from the next
                 * aligned block and the skipped part should be copied after everything
                 * else has been copied. Only the src alignment matters, not dst.
                 */
-               if (src_va % CP_DMA_ALIGNMENT) {
-                       skipped_size = CP_DMA_ALIGNMENT - (src_va % CP_DMA_ALIGNMENT);
+               if (src_va % SI_CPDMA_ALIGNMENT) {
+                       skipped_size = SI_CPDMA_ALIGNMENT - (src_va % SI_CPDMA_ALIGNMENT);
                        /* The main part will be skipped if the size is too small. */
                        skipped_size = MIN2(skipped_size, size);
                        size -= skipped_size;
@@ -1024,14 +1307,14 @@ void si_cp_dma_buffer_copy(struct radv_cmd_buffer *cmd_buffer,
 
        while (size) {
                unsigned dma_flags = 0;
-               unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
+               unsigned byte_count = MIN2(size, cp_dma_max_byte_count(cmd_buffer));
 
                si_cp_dma_prepare(cmd_buffer, byte_count,
                                  size + skipped_size + realign_size,
                                  &dma_flags);
 
-               si_emit_cp_dma_copy_buffer(cmd_buffer, main_dest_va, main_src_va,
-                                          byte_count, dma_flags);
+               si_emit_cp_dma(cmd_buffer, main_dest_va, main_src_va,
+                              byte_count, dma_flags);
 
                size -= byte_count;
                main_src_va += byte_count;
@@ -1045,8 +1328,8 @@ void si_cp_dma_buffer_copy(struct radv_cmd_buffer *cmd_buffer,
                                  size + skipped_size + realign_size,
                                  &dma_flags);
 
-               si_emit_cp_dma_copy_buffer(cmd_buffer, dest_va, src_va,
-                                          skipped_size, dma_flags);
+               si_emit_cp_dma(cmd_buffer, dest_va, src_va,
+                              skipped_size, dma_flags);
        }
        if (realign_size)
                si_cp_dma_realign_engine(cmd_buffer, realign_size);
@@ -1062,14 +1345,14 @@ void si_cp_dma_clear_buffer(struct radv_cmd_buffer *cmd_buffer, uint64_t va,
        assert(va % 4 == 0 && size % 4 == 0);
 
        while (size) {
-               unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
-               unsigned dma_flags = 0;
+               unsigned byte_count = MIN2(size, cp_dma_max_byte_count(cmd_buffer));
+               unsigned dma_flags = CP_DMA_CLEAR;
 
                si_cp_dma_prepare(cmd_buffer, byte_count, size, &dma_flags);
 
                /* Emit the clear packet. */
-               si_emit_cp_dma_clear_buffer(cmd_buffer, va, byte_count, value,
-                                           dma_flags);
+               si_emit_cp_dma(cmd_buffer, va, value, byte_count,
+                              dma_flags);
 
                size -= byte_count;
                va += byte_count;