#include "radv_cs.h"
#include "sid.h"
#include "radv_util.h"
-#include "main/macros.h"
static void
si_write_harvested_raster_configs(struct radv_physical_device *physical_device,
}
void
-si_emit_compute(struct radv_physical_device *physical_device,
+si_emit_compute(struct radv_device *device,
struct radeon_cmdbuf *cs)
{
radeon_set_sh_reg_seq(cs, R_00B810_COMPUTE_START_X, 3);
radeon_emit(cs, 0);
radeon_set_sh_reg_seq(cs, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0, 2);
- /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1 */
+ /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1,
+ * renamed COMPUTE_DESTINATION_EN_SEn on gfx10. */
radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
- if (physical_device->rad_info.chip_class >= GFX7) {
+ if (device->physical_device->rad_info.chip_class >= GFX7) {
/* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */
radeon_set_sh_reg_seq(cs,
R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2, 2);
S_00B858_SH1_CU_EN(0xffff));
radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) |
S_00B858_SH1_CU_EN(0xffff));
+
+ if (device->border_color_data.bo) {
+ uint64_t bc_va = radv_buffer_get_va(device->border_color_data.bo);
+
+ radeon_set_uconfig_reg_seq(cs, R_030E00_TA_CS_BC_BASE_ADDR, 2);
+ radeon_emit(cs, bc_va >> 8);
+ radeon_emit(cs, S_030E04_ADDRESS(bc_va >> 40));
+ }
+ }
+
+ if (device->physical_device->rad_info.chip_class >= GFX9) {
+ radeon_set_uconfig_reg(cs, R_0301EC_CP_COHER_START_DELAY,
+ device->physical_device->rad_info.chip_class >= GFX10 ? 0x20 : 0);
+ }
+
+ if (device->physical_device->rad_info.chip_class >= GFX10) {
+ radeon_set_sh_reg(cs, R_00B890_COMPUTE_USER_ACCUM_0, 0);
+ radeon_set_sh_reg(cs, R_00B894_COMPUTE_USER_ACCUM_1, 0);
+ radeon_set_sh_reg(cs, R_00B898_COMPUTE_USER_ACCUM_2, 0);
+ radeon_set_sh_reg(cs, R_00B89C_COMPUTE_USER_ACCUM_3, 0);
+ radeon_set_sh_reg(cs, R_00B8A0_COMPUTE_PGM_RSRC3, 0);
+ radeon_set_sh_reg(cs, R_00B9F4_COMPUTE_DISPATCH_TUNNEL, 0);
}
/* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID
* kernel if we want to use something other than the default value,
* which is now 0x22f.
*/
- if (physical_device->rad_info.chip_class <= GFX6) {
+ if (device->physical_device->rad_info.chip_class <= GFX6) {
/* XXX: This should be:
* (number of compute units) * 4 * (waves per simd) - 1 */
radeon_set_sh_reg(cs, R_00B82C_COMPUTE_MAX_WAVE_ID,
0x190 /* Default value */);
+
+ if (device->border_color_data.bo) {
+ uint64_t bc_va = radv_buffer_get_va(device->border_color_data.bo);
+ radeon_set_config_reg(cs, R_00950C_TA_CS_BC_BASE_ADDR, bc_va >> 8);
+ }
}
}
}
void
-si_emit_graphics(struct radv_physical_device *physical_device,
+si_emit_graphics(struct radv_device *device,
struct radeon_cmdbuf *cs)
{
- int i;
+ struct radv_physical_device *physical_device = device->physical_device;
- /* Only GFX6 can disable CLEAR_STATE for now. */
- assert(physical_device->has_clear_state ||
- physical_device->rad_info.chip_class == GFX6);
+ bool has_clear_state = physical_device->rad_info.has_clear_state;
+ int i;
radeon_emit(cs, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
- radeon_emit(cs, CONTEXT_CONTROL_LOAD_ENABLE(1));
- radeon_emit(cs, CONTEXT_CONTROL_SHADOW_ENABLE(1));
+ radeon_emit(cs, CC0_UPDATE_LOAD_ENABLES(1));
+ radeon_emit(cs, CC1_UPDATE_SHADOW_ENABLES(1));
- if (physical_device->has_clear_state) {
+ if (has_clear_state) {
radeon_emit(cs, PKT3(PKT3_CLEAR_STATE, 0, 0));
radeon_emit(cs, 0);
}
si_set_raster_config(physical_device, cs);
radeon_set_context_reg(cs, R_028A18_VGT_HOS_MAX_TESS_LEVEL, fui(64));
- if (!physical_device->has_clear_state)
+ if (!has_clear_state)
radeon_set_context_reg(cs, R_028A1C_VGT_HOS_MIN_TESS_LEVEL, fui(0));
/* FIXME calculate these values somehow ??? */
radeon_set_context_reg(cs, R_028A58_VGT_ES_PER_GS, 0x40);
}
- if (!physical_device->has_clear_state) {
+ if (!has_clear_state) {
radeon_set_context_reg(cs, R_028A5C_VGT_GS_PER_VS, 0x2);
radeon_set_context_reg(cs, R_028A8C_VGT_PRIMITIVEID_RESET, 0x0);
radeon_set_context_reg(cs, R_028B98_VGT_STRMOUT_BUFFER_CONFIG, 0x0);
}
- radeon_set_context_reg(cs, R_028AA0_VGT_INSTANCE_STEP_RATE_0, 1);
- if (!physical_device->has_clear_state)
+ if (physical_device->rad_info.chip_class <= GFX9)
+ radeon_set_context_reg(cs, R_028AA0_VGT_INSTANCE_STEP_RATE_0, 1);
+ if (!has_clear_state)
radeon_set_context_reg(cs, R_028AB8_VGT_VTX_CNT_EN, 0x0);
if (physical_device->rad_info.chip_class < GFX7)
radeon_set_config_reg(cs, R_008A14_PA_CL_ENHANCE, S_008A14_NUM_CLIP_SEQ(3) |
S_008A14_CLIP_VTX_REORDER_ENA(1));
- if (!physical_device->has_clear_state)
+ if (!has_clear_state)
radeon_set_context_reg(cs, R_02882C_PA_SU_PRIM_FILTER_CNTL, 0);
/* CLEAR_STATE doesn't clear these correctly on certain generations.
* I don't know why. Deduced by trial and error.
*/
- if (physical_device->rad_info.chip_class <= GFX7) {
+ if (physical_device->rad_info.chip_class <= GFX7 || !has_clear_state) {
radeon_set_context_reg(cs, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET, 0);
radeon_set_context_reg(cs, R_028204_PA_SC_WINDOW_SCISSOR_TL,
S_028204_WINDOW_OFFSET_DISABLE(1));
S_028034_BR_X(16384) | S_028034_BR_Y(16384));
}
- if (!physical_device->has_clear_state) {
+ if (!has_clear_state) {
for (i = 0; i < 16; i++) {
radeon_set_context_reg(cs, R_0282D0_PA_SC_VPORT_ZMIN_0 + i*8, 0);
radeon_set_context_reg(cs, R_0282D4_PA_SC_VPORT_ZMAX_0 + i*8, fui(1.0));
}
}
- if (!physical_device->has_clear_state) {
+ if (!has_clear_state) {
radeon_set_context_reg(cs, R_02820C_PA_SC_CLIPRECT_RULE, 0xFFFF);
radeon_set_context_reg(cs, R_028230_PA_SC_EDGERULE, 0xAAAAAAAA);
/* PA_SU_HARDWARE_SCREEN_OFFSET must be 0 due to hw bug on GFX6 */
S_02800C_FORCE_HIS_ENABLE0(V_02800C_FORCE_DISABLE) |
S_02800C_FORCE_HIS_ENABLE1(V_02800C_FORCE_DISABLE));
- if (physical_device->rad_info.chip_class >= GFX9) {
+ if (physical_device->rad_info.chip_class >= GFX10) {
+ radeon_set_context_reg(cs, R_028A98_VGT_DRAW_PAYLOAD_CNTL, 0);
+ radeon_set_uconfig_reg(cs, R_030964_GE_MAX_VTX_INDX, ~0);
+ radeon_set_uconfig_reg(cs, R_030924_GE_MIN_VTX_INDX, 0);
+ radeon_set_uconfig_reg(cs, R_030928_GE_INDX_OFFSET, 0);
+ radeon_set_uconfig_reg(cs, R_03097C_GE_STEREO_CNTL, 0);
+ radeon_set_uconfig_reg(cs, R_030988_GE_USER_VGPR_EN, 0);
+ } else if (physical_device->rad_info.chip_class == GFX9) {
radeon_set_uconfig_reg(cs, R_030920_VGT_MAX_VTX_INDX, ~0);
radeon_set_uconfig_reg(cs, R_030924_VGT_MIN_VTX_INDX, 0);
radeon_set_uconfig_reg(cs, R_030928_VGT_INDX_OFFSET, 0);
}
if (physical_device->rad_info.chip_class >= GFX7) {
+ if (physical_device->rad_info.chip_class >= GFX10) {
+ /* Logical CUs 16 - 31 */
+ radeon_set_sh_reg_idx(physical_device, cs, R_00B404_SPI_SHADER_PGM_RSRC4_HS,
+ 3, S_00B404_CU_EN(0xffff));
+ radeon_set_sh_reg_idx(physical_device, cs, R_00B104_SPI_SHADER_PGM_RSRC4_VS,
+ 3, S_00B104_CU_EN(0xffff));
+ radeon_set_sh_reg_idx(physical_device, cs, R_00B004_SPI_SHADER_PGM_RSRC4_PS,
+ 3, S_00B004_CU_EN(0xffff));
+ }
+
if (physical_device->rad_info.chip_class >= GFX9) {
- radeon_set_sh_reg(cs, R_00B41C_SPI_SHADER_PGM_RSRC3_HS,
- S_00B41C_CU_EN(0xffff) | S_00B41C_WAVE_LIMIT(0x3F));
+ radeon_set_sh_reg_idx(physical_device, cs, R_00B41C_SPI_SHADER_PGM_RSRC3_HS,
+ 3, S_00B41C_CU_EN(0xffff) | S_00B41C_WAVE_LIMIT(0x3F));
} else {
radeon_set_sh_reg(cs, R_00B51C_SPI_SHADER_PGM_RSRC3_LS,
S_00B51C_CU_EN(0xffff) | S_00B51C_WAVE_LIMIT(0x3F));
S_028A44_ES_VERTS_PER_SUBGRP(64) |
S_028A44_GS_PRIMS_PER_SUBGRP(4));
}
- radeon_set_sh_reg(cs, R_00B21C_SPI_SHADER_PGM_RSRC3_GS,
- S_00B21C_CU_EN(0xffff) | S_00B21C_WAVE_LIMIT(0x3F));
- if (physical_device->rad_info.num_good_cu_per_sh <= 4) {
- /* Too few available compute units per SH. Disallowing
- * VS to run on CU0 could hurt us more than late VS
- * allocation would help.
- *
- * LATE_ALLOC_VS = 2 is the highest safe number.
+ /* Compute LATE_ALLOC_VS.LIMIT. */
+ unsigned num_cu_per_sh = physical_device->rad_info.min_good_cu_per_sa;
+ unsigned late_alloc_wave64 = 0; /* The limit is per SA. */
+ unsigned late_alloc_wave64_gs = 0;
+ unsigned cu_mask_vs = 0xffff;
+ unsigned cu_mask_gs = 0xffff;
+
+ if (physical_device->rad_info.chip_class >= GFX10) {
+ /* For Wave32, the hw will launch twice the number of late
+ * alloc waves, so 1 == 2x wave32.
*/
- radeon_set_sh_reg(cs, R_00B118_SPI_SHADER_PGM_RSRC3_VS,
- S_00B118_CU_EN(0xffff) | S_00B118_WAVE_LIMIT(0x3F) );
- radeon_set_sh_reg(cs, R_00B11C_SPI_SHADER_LATE_ALLOC_VS, S_00B11C_LIMIT(2));
- } else {
- /* Set LATE_ALLOC_VS == 31. It should be less than
- * the number of scratch waves. Limitations:
- * - VS can't execute on CU0.
- * - If HS writes outputs to LDS, LS can't execute on CU0.
+ if (!physical_device->rad_info.use_late_alloc) {
+ late_alloc_wave64 = 0;
+ } else if (num_cu_per_sh <= 6) {
+ late_alloc_wave64 = num_cu_per_sh - 2;
+ } else {
+ late_alloc_wave64 = (num_cu_per_sh - 2) * 4;
+
+ /* CU2 & CU3 disabled because of the dual CU design */
+ cu_mask_vs = 0xfff3;
+ cu_mask_gs = 0xfff3; /* NGG only */
+ }
+
+ late_alloc_wave64_gs = late_alloc_wave64;
+
+ /* Don't use late alloc for NGG on Navi14 due to a hw
+ * bug. If NGG is never used, enable all CUs.
*/
- radeon_set_sh_reg(cs, R_00B118_SPI_SHADER_PGM_RSRC3_VS,
- S_00B118_CU_EN(0xfffe) | S_00B118_WAVE_LIMIT(0x3F));
- radeon_set_sh_reg(cs, R_00B11C_SPI_SHADER_LATE_ALLOC_VS, S_00B11C_LIMIT(31));
+ if (!physical_device->use_ngg ||
+ physical_device->rad_info.family == CHIP_NAVI14) {
+ late_alloc_wave64_gs = 0;
+ cu_mask_gs = 0xffff;
+ }
+
+ /* Limit LATE_ALLOC_GS for prevent a hang (hw bug). */
+ if (physical_device->rad_info.chip_class == GFX10)
+ late_alloc_wave64_gs = MIN2(late_alloc_wave64_gs, 64);
+ } else {
+ if (!physical_device->rad_info.use_late_alloc) {
+ late_alloc_wave64 = 0;
+ } else if (num_cu_per_sh <= 4) {
+ /* Too few available compute units per SA.
+ * Disallowing VS to run on one CU could hurt
+ * us more than late VS allocation would help.
+ *
+ * 2 is the highest safe number that allows us
+ * to keep all CUs enabled.
+ */
+ late_alloc_wave64 = 2;
+ } else {
+ /* This is a good initial value, allowing 1
+ * late_alloc wave per SIMD on num_cu - 2.
+ */
+ late_alloc_wave64 = (num_cu_per_sh - 2) * 4;
+ }
+
+ if (late_alloc_wave64 > 2)
+ cu_mask_vs = 0xfffe; /* 1 CU disabled */
+ }
+
+ radeon_set_sh_reg_idx(physical_device, cs, R_00B118_SPI_SHADER_PGM_RSRC3_VS,
+ 3, S_00B118_CU_EN(cu_mask_vs) |
+ S_00B118_WAVE_LIMIT(0x3F));
+ radeon_set_sh_reg(cs, R_00B11C_SPI_SHADER_LATE_ALLOC_VS,
+ S_00B11C_LIMIT(late_alloc_wave64));
+
+ radeon_set_sh_reg_idx(physical_device, cs, R_00B21C_SPI_SHADER_PGM_RSRC3_GS,
+ 3, S_00B21C_CU_EN(cu_mask_gs) | S_00B21C_WAVE_LIMIT(0x3F));
+
+ if (physical_device->rad_info.chip_class >= GFX10) {
+ radeon_set_sh_reg_idx(physical_device, cs, R_00B204_SPI_SHADER_PGM_RSRC4_GS,
+ 3, S_00B204_CU_EN(0xffff) |
+ S_00B204_SPI_SHADER_LATE_ALLOC_GS_GFX10(late_alloc_wave64_gs));
+ }
+
+ radeon_set_sh_reg_idx(physical_device, cs, R_00B01C_SPI_SHADER_PGM_RSRC3_PS,
+ 3, S_00B01C_CU_EN(0xffff) | S_00B01C_WAVE_LIMIT(0x3F));
+ }
+
+ if (physical_device->rad_info.chip_class >= GFX10) {
+ /* Break up a pixel wave if it contains deallocs for more than
+ * half the parameter cache.
+ *
+ * To avoid a deadlock where pixel waves aren't launched
+ * because they're waiting for more pixels while the frontend
+ * is stuck waiting for PC space, the maximum allowed value is
+ * the size of the PC minus the largest possible allocation for
+ * a single primitive shader subgroup.
+ */
+ radeon_set_context_reg(cs, R_028C50_PA_SC_NGG_MODE_CNTL,
+ S_028C50_MAX_DEALLOCS_IN_WAVE(512));
+ radeon_set_context_reg(cs, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
+
+ /* Enable CMASK/FMASK/HTILE/DCC caching in L2 for small chips. */
+ unsigned meta_write_policy, meta_read_policy;
+
+ /* TODO: investigate whether LRU improves performance on other chips too */
+ if (physical_device->rad_info.num_render_backends <= 4) {
+ meta_write_policy = V_02807C_CACHE_LRU_WR; /* cache writes */
+ meta_read_policy = V_02807C_CACHE_LRU_RD; /* cache reads */
+ } else {
+ meta_write_policy = V_02807C_CACHE_STREAM_WR; /* write combine */
+ meta_read_policy = V_02807C_CACHE_NOA_RD; /* don't cache reads */
+ }
+
+ radeon_set_context_reg(cs, R_02807C_DB_RMI_L2_CACHE_CONTROL,
+ S_02807C_Z_WR_POLICY(V_02807C_CACHE_STREAM_WR) |
+ S_02807C_S_WR_POLICY(V_02807C_CACHE_STREAM_WR) |
+ S_02807C_HTILE_WR_POLICY(meta_write_policy) |
+ S_02807C_ZPCPSD_WR_POLICY(V_02807C_CACHE_STREAM_WR) |
+ S_02807C_Z_RD_POLICY(V_02807C_CACHE_NOA_RD) |
+ S_02807C_S_RD_POLICY(V_02807C_CACHE_NOA_RD) |
+ S_02807C_HTILE_RD_POLICY(meta_read_policy));
+
+ radeon_set_context_reg(cs, R_028410_CB_RMI_GL2_CACHE_CONTROL,
+ S_028410_CMASK_WR_POLICY(meta_write_policy) |
+ S_028410_FMASK_WR_POLICY(meta_write_policy) |
+ S_028410_DCC_WR_POLICY(meta_write_policy) |
+ S_028410_COLOR_WR_POLICY(V_028410_CACHE_STREAM_WR) |
+ S_028410_CMASK_RD_POLICY(meta_read_policy) |
+ S_028410_FMASK_RD_POLICY(meta_read_policy) |
+ S_028410_DCC_RD_POLICY(meta_read_policy) |
+ S_028410_COLOR_RD_POLICY(V_028410_CACHE_NOA_RD));
+ radeon_set_context_reg(cs, R_028428_CB_COVERAGE_OUT_CONTROL, 0);
+
+ radeon_set_sh_reg(cs, R_00B0C8_SPI_SHADER_USER_ACCUM_PS_0, 0);
+ radeon_set_sh_reg(cs, R_00B0CC_SPI_SHADER_USER_ACCUM_PS_1, 0);
+ radeon_set_sh_reg(cs, R_00B0D0_SPI_SHADER_USER_ACCUM_PS_2, 0);
+ radeon_set_sh_reg(cs, R_00B0D4_SPI_SHADER_USER_ACCUM_PS_3, 0);
+ radeon_set_sh_reg(cs, R_00B1C8_SPI_SHADER_USER_ACCUM_VS_0, 0);
+ radeon_set_sh_reg(cs, R_00B1CC_SPI_SHADER_USER_ACCUM_VS_1, 0);
+ radeon_set_sh_reg(cs, R_00B1D0_SPI_SHADER_USER_ACCUM_VS_2, 0);
+ radeon_set_sh_reg(cs, R_00B1D4_SPI_SHADER_USER_ACCUM_VS_3, 0);
+ radeon_set_sh_reg(cs, R_00B2C8_SPI_SHADER_USER_ACCUM_ESGS_0, 0);
+ radeon_set_sh_reg(cs, R_00B2CC_SPI_SHADER_USER_ACCUM_ESGS_1, 0);
+ radeon_set_sh_reg(cs, R_00B2D0_SPI_SHADER_USER_ACCUM_ESGS_2, 0);
+ radeon_set_sh_reg(cs, R_00B2D4_SPI_SHADER_USER_ACCUM_ESGS_3, 0);
+ radeon_set_sh_reg(cs, R_00B4C8_SPI_SHADER_USER_ACCUM_LSHS_0, 0);
+ radeon_set_sh_reg(cs, R_00B4CC_SPI_SHADER_USER_ACCUM_LSHS_1, 0);
+ radeon_set_sh_reg(cs, R_00B4D0_SPI_SHADER_USER_ACCUM_LSHS_2, 0);
+ radeon_set_sh_reg(cs, R_00B4D4_SPI_SHADER_USER_ACCUM_LSHS_3, 0);
+
+ radeon_set_sh_reg(cs, R_00B0C0_SPI_SHADER_REQ_CTRL_PS,
+ S_00B0C0_SOFT_GROUPING_EN(1) |
+ S_00B0C0_NUMBER_OF_REQUESTS_PER_CU(4 - 1));
+ radeon_set_sh_reg(cs, R_00B1C0_SPI_SHADER_REQ_CTRL_VS, 0);
+
+ if (physical_device->rad_info.chip_class >= GFX10_3) {
+ radeon_set_context_reg(cs, R_028750_SX_PS_DOWNCONVERT_CONTROL_GFX103, 0xff);
+ radeon_set_context_reg(cs, 0x28848, 1 << 9); /* This fixes sample shading. */
}
- radeon_set_sh_reg(cs, R_00B01C_SPI_SHADER_PGM_RSRC3_PS,
- S_00B01C_CU_EN(0xffff) | S_00B01C_WAVE_LIMIT(0x3F));
+ if (physical_device->rad_info.chip_class == GFX10) {
+ /* SQ_NON_EVENT must be emitted before GE_PC_ALLOC is written. */
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_SQ_NON_EVENT) | EVENT_INDEX(0));
+ }
+
+ /* TODO: For culling, replace 128 with 256. */
+ radeon_set_uconfig_reg(cs, R_030980_GE_PC_ALLOC,
+ S_030980_OVERSUB_EN(physical_device->rad_info.use_late_alloc) |
+ S_030980_NUM_PC_LINES(128 * physical_device->rad_info.max_se - 1));
}
- if (physical_device->rad_info.chip_class >= GFX8) {
+ if (physical_device->rad_info.chip_class >= GFX9) {
+ radeon_set_context_reg(cs, R_028B50_VGT_TESS_DISTRIBUTION,
+ S_028B50_ACCUM_ISOLINE(40) |
+ S_028B50_ACCUM_TRI(30) |
+ S_028B50_ACCUM_QUAD(24) |
+ S_028B50_DONUT_SPLIT(24) |
+ S_028B50_TRAP_SPLIT(6));
+ } else if (physical_device->rad_info.chip_class >= GFX8) {
uint32_t vgt_tess_distribution;
vgt_tess_distribution = S_028B50_ACCUM_ISOLINE(32) |
radeon_set_context_reg(cs, R_028B50_VGT_TESS_DISTRIBUTION,
vgt_tess_distribution);
- } else if (!physical_device->has_clear_state) {
+ } else if (!has_clear_state) {
radeon_set_context_reg(cs, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
radeon_set_context_reg(cs, R_028C5C_VGT_OUT_DEALLOC_CNTL, 16);
}
- if (physical_device->rad_info.chip_class >= GFX9) {
- unsigned num_se = physical_device->rad_info.max_se;
- unsigned pc_lines = 0;
-
- switch (physical_device->rad_info.family) {
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- pc_lines = 4096;
- break;
- case CHIP_RAVEN:
- case CHIP_RAVEN2:
- pc_lines = 1024;
- break;
- default:
- assert(0);
+ if (device->border_color_data.bo) {
+ uint64_t border_color_va = radv_buffer_get_va(device->border_color_data.bo);
+
+ radeon_set_context_reg(cs, R_028080_TA_BC_BASE_ADDR, border_color_va >> 8);
+ if (physical_device->rad_info.chip_class >= GFX7) {
+ radeon_set_context_reg(cs, R_028084_TA_BC_BASE_ADDR_HI,
+ S_028084_ADDRESS(border_color_va >> 40));
}
+ }
+ if (physical_device->rad_info.chip_class >= GFX9) {
radeon_set_context_reg(cs, R_028C48_PA_SC_BINNER_CNTL_1,
- S_028C48_MAX_ALLOC_COUNT(MIN2(128, pc_lines / (4 * num_se))) |
+ S_028C48_MAX_ALLOC_COUNT(physical_device->rad_info.pbb_max_alloc_count - 1) |
S_028C48_MAX_PRIM_PER_BATCH(1023));
radeon_set_context_reg(cs, R_028C4C_PA_SC_CONSERVATIVE_RASTERIZATION_CNTL,
S_028C4C_NULL_SQUAD_AA_MASK_ENABLE(1));
radeon_emit(cs, S_028A00_HEIGHT(tmp) | S_028A00_WIDTH(tmp));
radeon_set_context_reg_seq(cs, R_028A04_PA_SU_POINT_MINMAX, 1);
radeon_emit(cs, S_028A04_MIN_SIZE(radv_pack_float_12p4(0)) |
- S_028A04_MAX_SIZE(radv_pack_float_12p4(8192/2)));
+ S_028A04_MAX_SIZE(radv_pack_float_12p4(8191.875/2)));
- if (!physical_device->has_clear_state) {
+ if (!has_clear_state) {
radeon_set_context_reg(cs, R_028004_DB_COUNT_CONTROL,
S_028004_ZPASS_INCREMENT_DISABLE(1));
}
small_prim_filter_cntl);
}
- si_emit_compute(physical_device, cs);
+ radeon_set_context_reg(cs, R_0286D4_SPI_INTERP_CONTROL_0,
+ S_0286D4_FLAT_SHADE_ENA(1) |
+ S_0286D4_PNT_SPRITE_ENA(1) |
+ S_0286D4_PNT_SPRITE_OVRD_X(V_0286D4_SPI_PNT_SPRITE_SEL_S) |
+ S_0286D4_PNT_SPRITE_OVRD_Y(V_0286D4_SPI_PNT_SPRITE_SEL_T) |
+ S_0286D4_PNT_SPRITE_OVRD_Z(V_0286D4_SPI_PNT_SPRITE_SEL_0) |
+ S_0286D4_PNT_SPRITE_OVRD_W(V_0286D4_SPI_PNT_SPRITE_SEL_1) |
+ S_0286D4_PNT_SPRITE_TOP_1(0)); /* vulkan is top to bottom - 1.0 at bottom */
+
+ radeon_set_context_reg(cs, R_028BE4_PA_SU_VTX_CNTL,
+ S_028BE4_PIX_CENTER(1) |
+ S_028BE4_ROUND_MODE(V_028BE4_X_ROUND_TO_EVEN) |
+ S_028BE4_QUANT_MODE(V_028BE4_X_16_8_FIXED_POINT_1_256TH));
+
+ radeon_set_context_reg(cs, R_028818_PA_CL_VTE_CNTL,
+ S_028818_VTX_W0_FMT(1) |
+ S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
+ S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
+ S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
+
+ si_emit_compute(device, cs);
}
void
if (!cs)
return;
- si_emit_graphics(device->physical_device, cs);
+ si_emit_graphics(device, cs);
while (cs->cdw & 7) {
if (device->physical_device->rad_info.gfx_ib_pad_with_type2)
- radeon_emit(cs, 0x80000000);
+ radeon_emit(cs, PKT2_NOP_PAD);
else
- radeon_emit(cs, 0xffff1000);
+ radeon_emit(cs, PKT3_NOP_PAD);
}
device->gfx_init = device->ws->buffer_create(device->ws,
RADEON_DOMAIN_GTT,
RADEON_FLAG_CPU_ACCESS|
RADEON_FLAG_NO_INTERPROCESS_SHARING |
- RADEON_FLAG_READ_ONLY,
+ RADEON_FLAG_READ_ONLY |
+ RADEON_FLAG_GTT_WC,
RADV_BO_PRIORITY_CS);
if (!device->gfx_init)
goto fail;
get_viewport_xform(viewport, scale, translate);
- rect.offset.x = translate[0] - fabs(scale[0]);
- rect.offset.y = translate[1] - fabs(scale[1]);
- rect.extent.width = ceilf(translate[0] + fabs(scale[0])) - rect.offset.x;
- rect.extent.height = ceilf(translate[1] + fabs(scale[1])) - rect.offset.y;
+ rect.offset.x = translate[0] - fabsf(scale[0]);
+ rect.offset.y = translate[1] - fabsf(scale[1]);
+ rect.extent.width = ceilf(translate[0] + fabsf(scale[0])) - rect.offset.x;
+ rect.extent.height = ceilf(translate[1] + fabsf(scale[1])) - rect.offset.y;
return rect;
}
return 1 + ((num - info->min) / info->incr);
}
+static const struct radv_prim_vertex_count prim_size_table[] = {
+ [V_008958_DI_PT_NONE] = {0, 0},
+ [V_008958_DI_PT_POINTLIST] = {1, 1},
+ [V_008958_DI_PT_LINELIST] = {2, 2},
+ [V_008958_DI_PT_LINESTRIP] = {2, 1},
+ [V_008958_DI_PT_TRILIST] = {3, 3},
+ [V_008958_DI_PT_TRIFAN] = {3, 1},
+ [V_008958_DI_PT_TRISTRIP] = {3, 1},
+ [V_008958_DI_PT_LINELIST_ADJ] = {4, 4},
+ [V_008958_DI_PT_LINESTRIP_ADJ] = {4, 1},
+ [V_008958_DI_PT_TRILIST_ADJ] = {6, 6},
+ [V_008958_DI_PT_TRISTRIP_ADJ] = {6, 2},
+ [V_008958_DI_PT_RECTLIST] = {3, 3},
+ [V_008958_DI_PT_LINELOOP] = {2, 1},
+ [V_008958_DI_PT_POLYGON] = {3, 1},
+ [V_008958_DI_PT_2D_TRI_STRIP] = {0, 0},
+};
+
uint32_t
si_get_ia_multi_vgt_param(struct radv_cmd_buffer *cmd_buffer,
bool instanced_draw, bool indirect_draw,
bool count_from_stream_output,
- uint32_t draw_vertex_count)
+ uint32_t draw_vertex_count,
+ unsigned topology)
{
enum chip_class chip_class = cmd_buffer->device->physical_device->rad_info.chip_class;
enum radeon_family family = cmd_buffer->device->physical_device->rad_info.family;
bool partial_vs_wave = false;
bool partial_es_wave = cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.partial_es_wave;
bool multi_instances_smaller_than_primgroup;
+ struct radv_prim_vertex_count prim_vertex_count = prim_size_table[topology];
+
+ if (radv_pipeline_has_tess(cmd_buffer->state.pipeline)) {
+ if (topology == V_008958_DI_PT_PATCH) {
+ prim_vertex_count.min = cmd_buffer->state.pipeline->graphics.tess_patch_control_points;
+ prim_vertex_count.incr = 1;
+ }
+ }
multi_instances_smaller_than_primgroup = indirect_draw;
if (!multi_instances_smaller_than_primgroup && instanced_draw) {
- uint32_t num_prims = radv_prims_for_vertices(&cmd_buffer->state.pipeline->graphics.prim_vertex_count, draw_vertex_count);
+ uint32_t num_prims = radv_prims_for_vertices(&prim_vertex_count, draw_vertex_count);
if (num_prims < cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.primgroup_size)
multi_instances_smaller_than_primgroup = true;
}
partial_vs_wave = cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.partial_vs_wave;
if (chip_class >= GFX7) {
- wd_switch_on_eop = cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.wd_switch_on_eop;
+ /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
+ * 4 shader engines. Set 1 to pass the assertion below.
+ * The other cases are hardware requirements. */
+ if (cmd_buffer->device->physical_device->rad_info.max_se < 4 ||
+ topology == V_008958_DI_PT_POLYGON ||
+ topology == V_008958_DI_PT_LINELOOP ||
+ topology == V_008958_DI_PT_TRIFAN ||
+ topology == V_008958_DI_PT_TRISTRIP_ADJ ||
+ (cmd_buffer->state.pipeline->graphics.prim_restart_enable &&
+ (cmd_buffer->device->physical_device->rad_info.family < CHIP_POLARIS10 ||
+ (topology != V_008958_DI_PT_POINTLIST &&
+ topology != V_008958_DI_PT_LINESTRIP))))
+ wd_switch_on_eop = true;
/* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
* We don't know that for indirect drawing, so treat it as
if (family == CHIP_HAWAII && ia_switch_on_eoi) {
bool set_vgt_flush = indirect_draw;
if (!set_vgt_flush && instanced_draw) {
- uint32_t num_prims = radv_prims_for_vertices(&cmd_buffer->state.pipeline->graphics.prim_vertex_count, draw_vertex_count);
+ uint32_t num_prims = radv_prims_for_vertices(&prim_vertex_count, draw_vertex_count);
if (num_prims <= 1)
set_vgt_flush = true;
}
}
}
+ /* Workaround for a VGT hang when strip primitive types are used with
+ * primitive restart.
+ */
+ if (cmd_buffer->state.pipeline->graphics.prim_restart_enable &&
+ (topology == V_008958_DI_PT_LINESTRIP ||
+ topology == V_008958_DI_PT_TRISTRIP ||
+ topology == V_008958_DI_PT_LINESTRIP_ADJ ||
+ topology == V_008958_DI_PT_TRISTRIP_ADJ)) {
+ partial_vs_wave = true;
+ }
+
return cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.base |
S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) |
S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi) |
enum chip_class chip_class,
bool is_mec,
unsigned event, unsigned event_flags,
- unsigned data_sel,
+ unsigned dst_sel, unsigned data_sel,
uint64_t va,
uint32_t new_fence,
uint64_t gfx9_eop_bug_va)
{
unsigned op = EVENT_TYPE(event) |
- EVENT_INDEX(5) |
+ EVENT_INDEX(event == V_028A90_CS_DONE ||
+ event == V_028A90_PS_DONE ? 6 : 5) |
event_flags;
unsigned is_gfx8_mec = is_mec && chip_class < GFX9;
- unsigned sel = EOP_DATA_SEL(data_sel);
+ unsigned sel = EOP_DST_SEL(dst_sel) |
+ EOP_DATA_SEL(data_sel);
/* Wait for write confirmation before writing data, but don't send
* an interrupt. */
}
}
+static void
+gfx10_cs_emit_cache_flush(struct radeon_cmdbuf *cs,
+ enum chip_class chip_class,
+ uint32_t *flush_cnt,
+ uint64_t flush_va,
+ bool is_mec,
+ enum radv_cmd_flush_bits flush_bits,
+ uint64_t gfx9_eop_bug_va)
+{
+ uint32_t gcr_cntl = 0;
+ unsigned cb_db_event = 0;
+
+ /* We don't need these. */
+ assert(!(flush_bits & (RADV_CMD_FLAG_VGT_STREAMOUT_SYNC)));
+
+ if (flush_bits & RADV_CMD_FLAG_INV_ICACHE)
+ gcr_cntl |= S_586_GLI_INV(V_586_GLI_ALL);
+ if (flush_bits & RADV_CMD_FLAG_INV_SCACHE) {
+ /* TODO: When writing to the SMEM L1 cache, we need to set SEQ
+ * to FORWARD when both L1 and L2 are written out (WB or INV).
+ */
+ gcr_cntl |= S_586_GL1_INV(1) | S_586_GLK_INV(1);
+ }
+ if (flush_bits & RADV_CMD_FLAG_INV_VCACHE)
+ gcr_cntl |= S_586_GL1_INV(1) | S_586_GLV_INV(1);
+ if (flush_bits & RADV_CMD_FLAG_INV_L2) {
+ /* Writeback and invalidate everything in L2. */
+ gcr_cntl |= S_586_GL2_INV(1) | S_586_GL2_WB(1) |
+ S_586_GLM_INV(1) | S_586_GLM_WB(1);
+ } else if (flush_bits & RADV_CMD_FLAG_WB_L2) {
+ /* Writeback but do not invalidate.
+ * GLM doesn't support WB alone. If WB is set, INV must be set too.
+ */
+ gcr_cntl |= S_586_GL2_WB(1) |
+ S_586_GLM_WB(1) | S_586_GLM_INV(1);
+ }
+
+ /* TODO: Implement this new flag for GFX9+.
+ else if (flush_bits & RADV_CMD_FLAG_INV_L2_METADATA)
+ gcr_cntl |= S_586_GLM_INV(1) | S_586_GLM_WB(1);
+ */
+
+ if (flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_DB)) {
+ /* TODO: trigger on RADV_CMD_FLAG_FLUSH_AND_INV_CB_META */
+ if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_CB) {
+ /* Flush CMASK/FMASK/DCC. Will wait for idle later. */
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) |
+ EVENT_INDEX(0));
+ }
+
+ /* TODO: trigger on RADV_CMD_FLAG_FLUSH_AND_INV_DB_META ? */
+ if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_DB) {
+ /* Flush HTILE. Will wait for idle later. */
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) |
+ EVENT_INDEX(0));
+ }
+
+ /* First flush CB/DB, then L1/L2. */
+ gcr_cntl |= S_586_SEQ(V_586_SEQ_FORWARD);
+
+ if ((flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_DB)) ==
+ (RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_DB)) {
+ cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
+ } else if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_CB) {
+ cb_db_event = V_028A90_FLUSH_AND_INV_CB_DATA_TS;
+ } else if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_DB) {
+ cb_db_event = V_028A90_FLUSH_AND_INV_DB_DATA_TS;
+ } else {
+ assert(0);
+ }
+ } else {
+ /* Wait for graphics shaders to go idle if requested. */
+ if (flush_bits & RADV_CMD_FLAG_PS_PARTIAL_FLUSH) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
+ } else if (flush_bits & RADV_CMD_FLAG_VS_PARTIAL_FLUSH) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
+ }
+ }
+
+ if (flush_bits & RADV_CMD_FLAG_CS_PARTIAL_FLUSH) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH | EVENT_INDEX(4)));
+ }
+
+ if (cb_db_event) {
+ /* CB/DB flush and invalidate (or possibly just a wait for a
+ * meta flush) via RELEASE_MEM.
+ *
+ * Combine this with other cache flushes when possible; this
+ * requires affected shaders to be idle, so do it after the
+ * CS_PARTIAL_FLUSH before (VS/PS partial flushes are always
+ * implied).
+ */
+ /* Get GCR_CNTL fields, because the encoding is different in RELEASE_MEM. */
+ unsigned glm_wb = G_586_GLM_WB(gcr_cntl);
+ unsigned glm_inv = G_586_GLM_INV(gcr_cntl);
+ unsigned glv_inv = G_586_GLV_INV(gcr_cntl);
+ unsigned gl1_inv = G_586_GL1_INV(gcr_cntl);
+ assert(G_586_GL2_US(gcr_cntl) == 0);
+ assert(G_586_GL2_RANGE(gcr_cntl) == 0);
+ assert(G_586_GL2_DISCARD(gcr_cntl) == 0);
+ unsigned gl2_inv = G_586_GL2_INV(gcr_cntl);
+ unsigned gl2_wb = G_586_GL2_WB(gcr_cntl);
+ unsigned gcr_seq = G_586_SEQ(gcr_cntl);
+
+ gcr_cntl &= C_586_GLM_WB &
+ C_586_GLM_INV &
+ C_586_GLV_INV &
+ C_586_GL1_INV &
+ C_586_GL2_INV &
+ C_586_GL2_WB; /* keep SEQ */
+
+ assert(flush_cnt);
+ (*flush_cnt)++;
+
+ si_cs_emit_write_event_eop(cs, chip_class, false, cb_db_event,
+ S_490_GLM_WB(glm_wb) |
+ S_490_GLM_INV(glm_inv) |
+ S_490_GLV_INV(glv_inv) |
+ S_490_GL1_INV(gl1_inv) |
+ S_490_GL2_INV(gl2_inv) |
+ S_490_GL2_WB(gl2_wb) |
+ S_490_SEQ(gcr_seq),
+ EOP_DST_SEL_MEM,
+ EOP_DATA_SEL_VALUE_32BIT,
+ flush_va, *flush_cnt,
+ gfx9_eop_bug_va);
+
+ radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL, flush_va,
+ *flush_cnt, 0xffffffff);
+ }
+
+ /* VGT state sync */
+ if (flush_bits & RADV_CMD_FLAG_VGT_FLUSH) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
+ }
+
+ /* Ignore fields that only modify the behavior of other fields. */
+ if (gcr_cntl & C_586_GL1_RANGE & C_586_GL2_RANGE & C_586_SEQ) {
+ /* Flush caches and wait for the caches to assert idle.
+ * The cache flush is executed in the ME, but the PFP waits
+ * for completion.
+ */
+ radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 6, 0));
+ radeon_emit(cs, 0); /* CP_COHER_CNTL */
+ radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
+ radeon_emit(cs, 0xffffff); /* CP_COHER_SIZE_HI */
+ radeon_emit(cs, 0); /* CP_COHER_BASE */
+ radeon_emit(cs, 0); /* CP_COHER_BASE_HI */
+ radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
+ radeon_emit(cs, gcr_cntl); /* GCR_CNTL */
+ } else if ((cb_db_event ||
+ (flush_bits & (RADV_CMD_FLAG_VS_PARTIAL_FLUSH |
+ RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
+ RADV_CMD_FLAG_CS_PARTIAL_FLUSH)))
+ && !is_mec) {
+ /* We need to ensure that PFP waits as well. */
+ radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
+ radeon_emit(cs, 0);
+ }
+
+ if (flush_bits & RADV_CMD_FLAG_START_PIPELINE_STATS) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_START) |
+ EVENT_INDEX(0));
+ } else if (flush_bits & RADV_CMD_FLAG_STOP_PIPELINE_STATS) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) |
+ EVENT_INDEX(0));
+ }
+}
+
void
si_cs_emit_cache_flush(struct radeon_cmdbuf *cs,
enum chip_class chip_class,
unsigned cp_coher_cntl = 0;
uint32_t flush_cb_db = flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
RADV_CMD_FLAG_FLUSH_AND_INV_DB);
-
+
+ if (chip_class >= GFX10) {
+ /* GFX10 cache flush handling is quite different. */
+ gfx10_cs_emit_cache_flush(cs, chip_class, flush_cnt, flush_va,
+ is_mec, flush_bits, gfx9_eop_bug_va);
+ return;
+ }
+
if (flush_bits & RADV_CMD_FLAG_INV_ICACHE)
cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
if (flush_bits & RADV_CMD_FLAG_INV_SCACHE)
is_mec,
V_028A90_FLUSH_AND_INV_CB_DATA_TS,
0,
+ EOP_DST_SEL_MEM,
EOP_DATA_SEL_DISCARD,
0, 0,
gfx9_eop_bug_va);
radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
}
- if (chip_class >= GFX9 && flush_cb_db) {
+ if (chip_class == GFX9 && flush_cb_db) {
unsigned cb_db_event, tc_flags;
/* Set the CB/DB flush event. */
(*flush_cnt)++;
si_cs_emit_write_event_eop(cs, chip_class, false, cb_db_event, tc_flags,
+ EOP_DST_SEL_MEM,
EOP_DATA_SEL_VALUE_32BIT,
flush_va, *flush_cnt,
gfx9_eop_bug_va);
if ((flush_bits & RADV_CMD_FLAG_INV_L2) ||
(chip_class <= GFX7 && (flush_bits & RADV_CMD_FLAG_WB_L2))) {
- si_emit_acquire_mem(cs, is_mec, chip_class >= GFX9,
+ si_emit_acquire_mem(cs, is_mec, chip_class == GFX9,
cp_coher_cntl |
S_0085F0_TC_ACTION_ENA(1) |
S_0085F0_TCL1_ACTION_ENA(1) |
* WB doesn't work without NC.
*/
si_emit_acquire_mem(cs, is_mec,
- chip_class >= GFX9,
+ chip_class == GFX9,
cp_coher_cntl |
S_0301F0_TC_WB_ACTION_ENA(1) |
S_0301F0_TC_NC_ACTION_ENA(1));
}
if (flush_bits & RADV_CMD_FLAG_INV_VCACHE) {
si_emit_acquire_mem(cs, is_mec,
- chip_class >= GFX9,
+ chip_class == GFX9,
cp_coher_cntl |
S_0085F0_TCL1_ACTION_ENA(1));
cp_coher_cntl = 0;
* Therefore, it should be last. Done in PFP.
*/
if (cp_coher_cntl)
- si_emit_acquire_mem(cs, is_mec, chip_class >= GFX9, cp_coher_cntl);
+ si_emit_acquire_mem(cs, is_mec, chip_class == GFX9, cp_coher_cntl);
if (flush_bits & RADV_CMD_FLAG_START_PIPELINE_STATS) {
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
unsigned dma_flags = 0;
unsigned byte_count = MIN2(size, cp_dma_max_byte_count(cmd_buffer));
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
+ /* DMA operations via L2 are coherent and faster.
+ * TODO: GFX7-GFX9 should also support this but it
+ * requires tests/benchmarks.
+ */
+ dma_flags |= CP_DMA_USE_L2;
+ }
+
si_cp_dma_prepare(cmd_buffer, byte_count,
size + skipped_size + realign_size,
&dma_flags);
unsigned byte_count = MIN2(size, cp_dma_max_byte_count(cmd_buffer));
unsigned dma_flags = CP_DMA_CLEAR;
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
+ /* DMA operations via L2 are coherent and faster.
+ * TODO: GFX7-GFX9 should also support this but it
+ * requires tests/benchmarks.
+ */
+ dma_flags |= CP_DMA_USE_L2;
+ }
+
si_cp_dma_prepare(cmd_buffer, byte_count, size, &dma_flags);
/* Emit the clear packet. */