X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Famd%2Fvulkan%2Fsi_cmd_buffer.c;h=126cabd390ae281d6bf46cc89a874597272f27d7;hb=e67fc11c26111ae7aa8d140a5f62074b5e0b43c3;hp=86e8e3e4986bbd74254ccf51c934d1c16ee243d0;hpb=a563f611c3d3cdb2ca8a60d5eb4086cab5fa0f31;p=mesa.git diff --git a/src/amd/vulkan/si_cmd_buffer.c b/src/amd/vulkan/si_cmd_buffer.c index 86e8e3e4986..126cabd390a 100644 --- a/src/amd/vulkan/si_cmd_buffer.c +++ b/src/amd/vulkan/si_cmd_buffer.c @@ -25,175 +25,81 @@ * IN THE SOFTWARE. */ -/* command buffer handling for SI */ +/* command buffer handling for AMD GCN */ #include "radv_private.h" +#include "radv_shader.h" #include "radv_cs.h" #include "sid.h" -#include "gfx9d.h" #include "radv_util.h" #include "main/macros.h" -#define SI_GS_PER_ES 128 - static void si_write_harvested_raster_configs(struct radv_physical_device *physical_device, - struct radeon_winsys_cs *cs, + struct radeon_cmdbuf *cs, unsigned raster_config, unsigned raster_config_1) { - unsigned sh_per_se = MAX2(physical_device->rad_info.max_sh_per_se, 1); unsigned num_se = MAX2(physical_device->rad_info.max_se, 1); - unsigned rb_mask = physical_device->rad_info.enabled_rb_mask; - unsigned num_rb = MIN2(physical_device->rad_info.num_render_backends, 16); - unsigned rb_per_pkr = MIN2(num_rb / num_se / sh_per_se, 2); - unsigned rb_per_se = num_rb / num_se; - unsigned se_mask[4]; + unsigned raster_config_se[4]; unsigned se; - se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask; - se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask; - se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask; - se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask; - - assert(num_se == 1 || num_se == 2 || num_se == 4); - assert(sh_per_se == 1 || sh_per_se == 2); - assert(rb_per_pkr == 1 || rb_per_pkr == 2); - - /* XXX: I can't figure out what the *_XSEL and *_YSEL - * fields are for, so I'm leaving them as their default - * values. */ - - if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) || - (!se_mask[2] && !se_mask[3]))) { - raster_config_1 &= C_028354_SE_PAIR_MAP; - - if (!se_mask[0] && !se_mask[1]) { - raster_config_1 |= - S_028354_SE_PAIR_MAP(V_028354_RASTER_CONFIG_SE_PAIR_MAP_3); - } else { - raster_config_1 |= - S_028354_SE_PAIR_MAP(V_028354_RASTER_CONFIG_SE_PAIR_MAP_0); - } - } + ac_get_harvested_configs(&physical_device->rad_info, + raster_config, + &raster_config_1, + raster_config_se); for (se = 0; se < num_se; se++) { - unsigned raster_config_se = raster_config; - unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se); - unsigned pkr1_mask = pkr0_mask << rb_per_pkr; - int idx = (se / 2) * 2; - - if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) { - raster_config_se &= C_028350_SE_MAP; - - if (!se_mask[idx]) { - raster_config_se |= - S_028350_SE_MAP(V_028350_RASTER_CONFIG_SE_MAP_3); - } else { - raster_config_se |= - S_028350_SE_MAP(V_028350_RASTER_CONFIG_SE_MAP_0); - } - } - - pkr0_mask &= rb_mask; - pkr1_mask &= rb_mask; - if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) { - raster_config_se &= C_028350_PKR_MAP; - - if (!pkr0_mask) { - raster_config_se |= - S_028350_PKR_MAP(V_028350_RASTER_CONFIG_PKR_MAP_3); - } else { - raster_config_se |= - S_028350_PKR_MAP(V_028350_RASTER_CONFIG_PKR_MAP_0); - } - } - - if (rb_per_se >= 2) { - unsigned rb0_mask = 1 << (se * rb_per_se); - unsigned rb1_mask = rb0_mask << 1; - - rb0_mask &= rb_mask; - rb1_mask &= rb_mask; - if (!rb0_mask || !rb1_mask) { - raster_config_se &= C_028350_RB_MAP_PKR0; - - if (!rb0_mask) { - raster_config_se |= - S_028350_RB_MAP_PKR0(V_028350_RASTER_CONFIG_RB_MAP_3); - } else { - raster_config_se |= - S_028350_RB_MAP_PKR0(V_028350_RASTER_CONFIG_RB_MAP_0); - } - } - - if (rb_per_se > 2) { - rb0_mask = 1 << (se * rb_per_se + rb_per_pkr); - rb1_mask = rb0_mask << 1; - rb0_mask &= rb_mask; - rb1_mask &= rb_mask; - if (!rb0_mask || !rb1_mask) { - raster_config_se &= C_028350_RB_MAP_PKR1; - - if (!rb0_mask) { - raster_config_se |= - S_028350_RB_MAP_PKR1(V_028350_RASTER_CONFIG_RB_MAP_3); - } else { - raster_config_se |= - S_028350_RB_MAP_PKR1(V_028350_RASTER_CONFIG_RB_MAP_0); - } - } - } - } - - /* GRBM_GFX_INDEX has a different offset on SI and CI+ */ - if (physical_device->rad_info.chip_class < CIK) - radeon_set_config_reg(cs, GRBM_GFX_INDEX, - SE_INDEX(se) | SH_BROADCAST_WRITES | - INSTANCE_BROADCAST_WRITES); + /* GRBM_GFX_INDEX has a different offset on GFX6 and GFX7+ */ + if (physical_device->rad_info.chip_class < GFX7) + radeon_set_config_reg(cs, R_00802C_GRBM_GFX_INDEX, + S_00802C_SE_INDEX(se) | + S_00802C_SH_BROADCAST_WRITES(1) | + S_00802C_INSTANCE_BROADCAST_WRITES(1)); else radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX, S_030800_SE_INDEX(se) | S_030800_SH_BROADCAST_WRITES(1) | S_030800_INSTANCE_BROADCAST_WRITES(1)); - radeon_set_context_reg(cs, R_028350_PA_SC_RASTER_CONFIG, raster_config_se); - if (physical_device->rad_info.chip_class >= CIK) - radeon_set_context_reg(cs, R_028354_PA_SC_RASTER_CONFIG_1, raster_config_1); + radeon_set_context_reg(cs, R_028350_PA_SC_RASTER_CONFIG, raster_config_se[se]); } - /* GRBM_GFX_INDEX has a different offset on SI and CI+ */ - if (physical_device->rad_info.chip_class < CIK) - radeon_set_config_reg(cs, GRBM_GFX_INDEX, - SE_BROADCAST_WRITES | SH_BROADCAST_WRITES | - INSTANCE_BROADCAST_WRITES); + /* GRBM_GFX_INDEX has a different offset on GFX6 and GFX7+ */ + if (physical_device->rad_info.chip_class < GFX7) + radeon_set_config_reg(cs, R_00802C_GRBM_GFX_INDEX, + S_00802C_SE_BROADCAST_WRITES(1) | + S_00802C_SH_BROADCAST_WRITES(1) | + S_00802C_INSTANCE_BROADCAST_WRITES(1)); else radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX, S_030800_SE_BROADCAST_WRITES(1) | S_030800_SH_BROADCAST_WRITES(1) | S_030800_INSTANCE_BROADCAST_WRITES(1)); + + if (physical_device->rad_info.chip_class >= GFX7) + radeon_set_context_reg(cs, R_028354_PA_SC_RASTER_CONFIG_1, raster_config_1); } -static void +void si_emit_compute(struct radv_physical_device *physical_device, - struct radeon_winsys_cs *cs) + struct radeon_cmdbuf *cs) { radeon_set_sh_reg_seq(cs, R_00B810_COMPUTE_START_X, 3); radeon_emit(cs, 0); radeon_emit(cs, 0); radeon_emit(cs, 0); - radeon_set_sh_reg_seq(cs, R_00B854_COMPUTE_RESOURCE_LIMITS, 3); - radeon_emit(cs, 0); + radeon_set_sh_reg_seq(cs, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0, 2); /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1 */ radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff)); - radeon_emit(cs, S_00B85C_SH0_CU_EN(0xffff) | S_00B85C_SH1_CU_EN(0xffff)); + radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff)); - if (physical_device->rad_info.chip_class >= CIK) { + if (physical_device->rad_info.chip_class >= GFX7) { /* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */ radeon_set_sh_reg_seq(cs, R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2, 2); - radeon_emit(cs, S_00B864_SH0_CU_EN(0xffff) | - S_00B864_SH1_CU_EN(0xffff)); - radeon_emit(cs, S_00B868_SH0_CU_EN(0xffff) | - S_00B868_SH1_CU_EN(0xffff)); + radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | + S_00B858_SH1_CU_EN(0xffff)); + radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | + S_00B858_SH1_CU_EN(0xffff)); } /* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID @@ -201,7 +107,7 @@ si_emit_compute(struct radv_physical_device *physical_device, * kernel if we want to use something other than the default value, * which is now 0x22f. */ - if (physical_device->rad_info.chip_class <= SI) { + if (physical_device->rad_info.chip_class <= GFX6) { /* XXX: This should be: * (number of compute units) * 4 * (waves per simd) - 1 */ @@ -210,169 +116,123 @@ si_emit_compute(struct radv_physical_device *physical_device, } } -void -si_init_compute(struct radv_cmd_buffer *cmd_buffer) +/* 12.4 fixed-point */ +static unsigned radv_pack_float_12p4(float x) { - struct radv_physical_device *physical_device = cmd_buffer->device->physical_device; - si_emit_compute(physical_device, cmd_buffer->cs); + return x <= 0 ? 0 : + x >= 4096 ? 0xffff : x * 16; } static void -si_emit_config(struct radv_physical_device *physical_device, - struct radeon_winsys_cs *cs) +si_set_raster_config(struct radv_physical_device *physical_device, + struct radeon_cmdbuf *cs) { unsigned num_rb = MIN2(physical_device->rad_info.num_render_backends, 16); unsigned rb_mask = physical_device->rad_info.enabled_rb_mask; unsigned raster_config, raster_config_1; + + ac_get_raster_config(&physical_device->rad_info, + &raster_config, + &raster_config_1, NULL); + + /* Always use the default config when all backends are enabled + * (or when we failed to determine the enabled backends). + */ + if (!rb_mask || util_bitcount(rb_mask) >= num_rb) { + radeon_set_context_reg(cs, R_028350_PA_SC_RASTER_CONFIG, + raster_config); + if (physical_device->rad_info.chip_class >= GFX7) + radeon_set_context_reg(cs, R_028354_PA_SC_RASTER_CONFIG_1, + raster_config_1); + } else { + si_write_harvested_raster_configs(physical_device, cs, + raster_config, + raster_config_1); + } +} + +void +si_emit_graphics(struct radv_physical_device *physical_device, + struct radeon_cmdbuf *cs) +{ int i; + /* Only GFX6 can disable CLEAR_STATE for now. */ + assert(physical_device->has_clear_state || + physical_device->rad_info.chip_class == GFX6); + radeon_emit(cs, PKT3(PKT3_CONTEXT_CONTROL, 1, 0)); radeon_emit(cs, CONTEXT_CONTROL_LOAD_ENABLE(1)); radeon_emit(cs, CONTEXT_CONTROL_SHADOW_ENABLE(1)); + if (physical_device->has_clear_state) { + radeon_emit(cs, PKT3(PKT3_CLEAR_STATE, 0, 0)); + radeon_emit(cs, 0); + } + + if (physical_device->rad_info.chip_class <= GFX8) + si_set_raster_config(physical_device, cs); + radeon_set_context_reg(cs, R_028A18_VGT_HOS_MAX_TESS_LEVEL, fui(64)); - radeon_set_context_reg(cs, R_028A1C_VGT_HOS_MIN_TESS_LEVEL, fui(0)); + if (!physical_device->has_clear_state) + radeon_set_context_reg(cs, R_028A1C_VGT_HOS_MIN_TESS_LEVEL, fui(0)); /* FIXME calculate these values somehow ??? */ - radeon_set_context_reg(cs, R_028A54_VGT_GS_PER_ES, SI_GS_PER_ES); - radeon_set_context_reg(cs, R_028A58_VGT_ES_PER_GS, 0x40); - radeon_set_context_reg(cs, R_028A5C_VGT_GS_PER_VS, 0x2); + if (physical_device->rad_info.chip_class <= GFX8) { + radeon_set_context_reg(cs, R_028A54_VGT_GS_PER_ES, SI_GS_PER_ES); + radeon_set_context_reg(cs, R_028A58_VGT_ES_PER_GS, 0x40); + } - radeon_set_context_reg(cs, R_028A8C_VGT_PRIMITIVEID_RESET, 0x0); - radeon_set_context_reg(cs, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET, 0); + if (!physical_device->has_clear_state) { + radeon_set_context_reg(cs, R_028A5C_VGT_GS_PER_VS, 0x2); + radeon_set_context_reg(cs, R_028A8C_VGT_PRIMITIVEID_RESET, 0x0); + radeon_set_context_reg(cs, R_028B98_VGT_STRMOUT_BUFFER_CONFIG, 0x0); + } - radeon_set_context_reg(cs, R_028B98_VGT_STRMOUT_BUFFER_CONFIG, 0x0); radeon_set_context_reg(cs, R_028AA0_VGT_INSTANCE_STEP_RATE_0, 1); - if (physical_device->rad_info.chip_class >= GFX9) - radeon_set_context_reg(cs, R_028AB4_VGT_REUSE_OFF, 0); - radeon_set_context_reg(cs, R_028AB8_VGT_VTX_CNT_EN, 0x0); - if (physical_device->rad_info.chip_class < CIK) + if (!physical_device->has_clear_state) + radeon_set_context_reg(cs, R_028AB8_VGT_VTX_CNT_EN, 0x0); + if (physical_device->rad_info.chip_class < GFX7) radeon_set_config_reg(cs, R_008A14_PA_CL_ENHANCE, S_008A14_NUM_CLIP_SEQ(3) | S_008A14_CLIP_VTX_REORDER_ENA(1)); - radeon_set_context_reg(cs, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 0x76543210); - radeon_set_context_reg(cs, R_028BD8_PA_SC_CENTROID_PRIORITY_1, 0xfedcba98); - - radeon_set_context_reg(cs, R_02882C_PA_SU_PRIM_FILTER_CNTL, 0); + if (!physical_device->has_clear_state) + radeon_set_context_reg(cs, R_02882C_PA_SU_PRIM_FILTER_CNTL, 0); - for (i = 0; i < 16; i++) { - radeon_set_context_reg(cs, R_0282D0_PA_SC_VPORT_ZMIN_0 + i*8, 0); - radeon_set_context_reg(cs, R_0282D4_PA_SC_VPORT_ZMAX_0 + i*8, fui(1.0)); + /* CLEAR_STATE doesn't clear these correctly on certain generations. + * I don't know why. Deduced by trial and error. + */ + if (physical_device->rad_info.chip_class <= GFX7) { + radeon_set_context_reg(cs, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET, 0); + radeon_set_context_reg(cs, R_028204_PA_SC_WINDOW_SCISSOR_TL, + S_028204_WINDOW_OFFSET_DISABLE(1)); + radeon_set_context_reg(cs, R_028240_PA_SC_GENERIC_SCISSOR_TL, + S_028240_WINDOW_OFFSET_DISABLE(1)); + radeon_set_context_reg(cs, R_028244_PA_SC_GENERIC_SCISSOR_BR, + S_028244_BR_X(16384) | S_028244_BR_Y(16384)); + radeon_set_context_reg(cs, R_028030_PA_SC_SCREEN_SCISSOR_TL, 0); + radeon_set_context_reg(cs, R_028034_PA_SC_SCREEN_SCISSOR_BR, + S_028034_BR_X(16384) | S_028034_BR_Y(16384)); } - switch (physical_device->rad_info.family) { - case CHIP_TAHITI: - case CHIP_PITCAIRN: - raster_config = 0x2a00126a; - raster_config_1 = 0x00000000; - break; - case CHIP_VERDE: - raster_config = 0x0000124a; - raster_config_1 = 0x00000000; - break; - case CHIP_OLAND: - raster_config = 0x00000082; - raster_config_1 = 0x00000000; - break; - case CHIP_HAINAN: - raster_config = 0x00000000; - raster_config_1 = 0x00000000; - break; - case CHIP_BONAIRE: - raster_config = 0x16000012; - raster_config_1 = 0x00000000; - break; - case CHIP_HAWAII: - raster_config = 0x3a00161a; - raster_config_1 = 0x0000002e; - break; - case CHIP_FIJI: - if (physical_device->rad_info.cik_macrotile_mode_array[0] == 0x000000e8) { - /* old kernels with old tiling config */ - raster_config = 0x16000012; - raster_config_1 = 0x0000002a; - } else { - raster_config = 0x3a00161a; - raster_config_1 = 0x0000002e; - } - break; - case CHIP_POLARIS10: - raster_config = 0x16000012; - raster_config_1 = 0x0000002a; - break; - case CHIP_POLARIS11: - case CHIP_POLARIS12: - raster_config = 0x16000012; - raster_config_1 = 0x00000000; - break; - case CHIP_TONGA: - raster_config = 0x16000012; - raster_config_1 = 0x0000002a; - break; - case CHIP_ICELAND: - if (num_rb == 1) - raster_config = 0x00000000; - else - raster_config = 0x00000002; - raster_config_1 = 0x00000000; - break; - case CHIP_CARRIZO: - raster_config = 0x00000002; - raster_config_1 = 0x00000000; - break; - case CHIP_KAVERI: - /* KV should be 0x00000002, but that causes problems with radeon */ - raster_config = 0x00000000; /* 0x00000002 */ - raster_config_1 = 0x00000000; - break; - case CHIP_KABINI: - case CHIP_MULLINS: - case CHIP_STONEY: - raster_config = 0x00000000; - raster_config_1 = 0x00000000; - break; - default: - if (physical_device->rad_info.chip_class <= VI) { - fprintf(stderr, - "radeonsi: Unknown GPU, using 0 for raster_config\n"); - raster_config = 0x00000000; - raster_config_1 = 0x00000000; + if (!physical_device->has_clear_state) { + for (i = 0; i < 16; i++) { + radeon_set_context_reg(cs, R_0282D0_PA_SC_VPORT_ZMIN_0 + i*8, 0); + radeon_set_context_reg(cs, R_0282D4_PA_SC_VPORT_ZMAX_0 + i*8, fui(1.0)); } - break; } - /* Always use the default config when all backends are enabled - * (or when we failed to determine the enabled backends). - */ - if (physical_device->rad_info.chip_class <= VI) { - if (!rb_mask || util_bitcount(rb_mask) >= num_rb) { - radeon_set_context_reg(cs, R_028350_PA_SC_RASTER_CONFIG, - raster_config); - if (physical_device->rad_info.chip_class >= CIK) - radeon_set_context_reg(cs, R_028354_PA_SC_RASTER_CONFIG_1, - raster_config_1); - } else { - si_write_harvested_raster_configs(physical_device, cs, raster_config, raster_config_1); - } + if (!physical_device->has_clear_state) { + radeon_set_context_reg(cs, R_02820C_PA_SC_CLIPRECT_RULE, 0xFFFF); + radeon_set_context_reg(cs, R_028230_PA_SC_EDGERULE, 0xAAAAAAAA); + /* PA_SU_HARDWARE_SCREEN_OFFSET must be 0 due to hw bug on GFX6 */ + radeon_set_context_reg(cs, R_028234_PA_SU_HARDWARE_SCREEN_OFFSET, 0); + radeon_set_context_reg(cs, R_028820_PA_CL_NANINF_CNTL, 0); + radeon_set_context_reg(cs, R_028AC0_DB_SRESULTS_COMPARE_STATE0, 0x0); + radeon_set_context_reg(cs, R_028AC4_DB_SRESULTS_COMPARE_STATE1, 0x0); + radeon_set_context_reg(cs, R_028AC8_DB_PRELOAD_CONTROL, 0x0); } - radeon_set_context_reg(cs, R_028204_PA_SC_WINDOW_SCISSOR_TL, S_028204_WINDOW_OFFSET_DISABLE(1)); - radeon_set_context_reg(cs, R_028240_PA_SC_GENERIC_SCISSOR_TL, S_028240_WINDOW_OFFSET_DISABLE(1)); - radeon_set_context_reg(cs, R_028244_PA_SC_GENERIC_SCISSOR_BR, - S_028244_BR_X(16384) | S_028244_BR_Y(16384)); - radeon_set_context_reg(cs, R_028030_PA_SC_SCREEN_SCISSOR_TL, 0); - radeon_set_context_reg(cs, R_028034_PA_SC_SCREEN_SCISSOR_BR, - S_028034_BR_X(16384) | S_028034_BR_Y(16384)); - - radeon_set_context_reg(cs, R_02820C_PA_SC_CLIPRECT_RULE, 0xFFFF); - radeon_set_context_reg(cs, R_028230_PA_SC_EDGERULE, 0xAAAAAAAA); - /* PA_SU_HARDWARE_SCREEN_OFFSET must be 0 due to hw bug on SI */ - radeon_set_context_reg(cs, R_028234_PA_SU_HARDWARE_SCREEN_OFFSET, 0); - radeon_set_context_reg(cs, R_028820_PA_CL_NANINF_CNTL, 0); - - radeon_set_context_reg(cs, R_028AC0_DB_SRESULTS_COMPARE_STATE0, 0x0); - radeon_set_context_reg(cs, R_028AC4_DB_SRESULTS_COMPARE_STATE1, 0x0); - radeon_set_context_reg(cs, R_028AC8_DB_PRELOAD_CONTROL, 0x0); radeon_set_context_reg(cs, R_02800C_DB_RENDER_OVERRIDE, S_02800C_FORCE_HIS_ENABLE0(V_02800C_FORCE_DISABLE) | S_02800C_FORCE_HIS_ENABLE1(V_02800C_FORCE_DISABLE)); @@ -382,18 +242,27 @@ si_emit_config(struct radv_physical_device *physical_device, radeon_set_uconfig_reg(cs, R_030924_VGT_MIN_VTX_INDX, 0); radeon_set_uconfig_reg(cs, R_030928_VGT_INDX_OFFSET, 0); } else { + /* These registers, when written, also overwrite the + * CLEAR_STATE context, so we can't rely on CLEAR_STATE setting + * them. It would be an issue if there was another UMD + * changing them. + */ radeon_set_context_reg(cs, R_028400_VGT_MAX_VTX_INDX, ~0); radeon_set_context_reg(cs, R_028404_VGT_MIN_VTX_INDX, 0); radeon_set_context_reg(cs, R_028408_VGT_INDX_OFFSET, 0); } - if (physical_device->rad_info.chip_class >= CIK) { + if (physical_device->rad_info.chip_class >= GFX7) { if (physical_device->rad_info.chip_class >= GFX9) { - radeon_set_sh_reg(cs, R_00B41C_SPI_SHADER_PGM_RSRC3_HS, S_00B41C_CU_EN(0xffff)); + radeon_set_sh_reg(cs, R_00B41C_SPI_SHADER_PGM_RSRC3_HS, + S_00B41C_CU_EN(0xffff) | S_00B41C_WAVE_LIMIT(0x3F)); } else { - radeon_set_sh_reg(cs, R_00B51C_SPI_SHADER_PGM_RSRC3_LS, S_00B51C_CU_EN(0xffff)); - radeon_set_sh_reg(cs, R_00B41C_SPI_SHADER_PGM_RSRC3_HS, 0); - radeon_set_sh_reg(cs, R_00B31C_SPI_SHADER_PGM_RSRC3_ES, S_00B31C_CU_EN(0xffff)); + radeon_set_sh_reg(cs, R_00B51C_SPI_SHADER_PGM_RSRC3_LS, + S_00B51C_CU_EN(0xffff) | S_00B51C_WAVE_LIMIT(0x3F)); + radeon_set_sh_reg(cs, R_00B41C_SPI_SHADER_PGM_RSRC3_HS, + S_00B41C_WAVE_LIMIT(0x3F)); + radeon_set_sh_reg(cs, R_00B31C_SPI_SHADER_PGM_RSRC3_ES, + S_00B31C_CU_EN(0xffff) | S_00B31C_WAVE_LIMIT(0x3F)); /* If this is 0, Bonaire can hang even if GS isn't being used. * Other chips are unaffected. These are suboptimal values, * but we don't use on-chip GS. @@ -402,17 +271,18 @@ si_emit_config(struct radv_physical_device *physical_device, S_028A44_ES_VERTS_PER_SUBGRP(64) | S_028A44_GS_PRIMS_PER_SUBGRP(4)); } - radeon_set_sh_reg(cs, R_00B21C_SPI_SHADER_PGM_RSRC3_GS, S_00B21C_CU_EN(0xffff)); + radeon_set_sh_reg(cs, R_00B21C_SPI_SHADER_PGM_RSRC3_GS, + S_00B21C_CU_EN(0xffff) | S_00B21C_WAVE_LIMIT(0x3F)); - if (physical_device->rad_info.num_good_compute_units / - (physical_device->rad_info.max_se * physical_device->rad_info.max_sh_per_se) <= 4) { + if (physical_device->rad_info.num_good_cu_per_sh <= 4) { /* Too few available compute units per SH. Disallowing * VS to run on CU0 could hurt us more than late VS * allocation would help. * * LATE_ALLOC_VS = 2 is the highest safe number. */ - radeon_set_sh_reg(cs, R_00B118_SPI_SHADER_PGM_RSRC3_VS, S_00B118_CU_EN(0xffff)); + radeon_set_sh_reg(cs, R_00B118_SPI_SHADER_PGM_RSRC3_VS, + S_00B118_CU_EN(0xffff) | S_00B118_WAVE_LIMIT(0x3F) ); radeon_set_sh_reg(cs, R_00B11C_SPI_SHADER_LATE_ALLOC_VS, S_00B11C_LIMIT(2)); } else { /* Set LATE_ALLOC_VS == 31. It should be less than @@ -420,21 +290,17 @@ si_emit_config(struct radv_physical_device *physical_device, * - VS can't execute on CU0. * - If HS writes outputs to LDS, LS can't execute on CU0. */ - radeon_set_sh_reg(cs, R_00B118_SPI_SHADER_PGM_RSRC3_VS, S_00B118_CU_EN(0xfffe)); + radeon_set_sh_reg(cs, R_00B118_SPI_SHADER_PGM_RSRC3_VS, + S_00B118_CU_EN(0xfffe) | S_00B118_WAVE_LIMIT(0x3F)); radeon_set_sh_reg(cs, R_00B11C_SPI_SHADER_LATE_ALLOC_VS, S_00B11C_LIMIT(31)); } - radeon_set_sh_reg(cs, R_00B01C_SPI_SHADER_PGM_RSRC3_PS, S_00B01C_CU_EN(0xffff)); + radeon_set_sh_reg(cs, R_00B01C_SPI_SHADER_PGM_RSRC3_PS, + S_00B01C_CU_EN(0xffff) | S_00B01C_WAVE_LIMIT(0x3F)); } - if (physical_device->rad_info.chip_class >= VI) { + if (physical_device->rad_info.chip_class >= GFX8) { uint32_t vgt_tess_distribution; - radeon_set_context_reg(cs, R_028424_CB_DCC_CONTROL, - S_028424_OVERWRITE_COMBINER_MRT_SHARING_DISABLE(1) | - S_028424_OVERWRITE_COMBINER_WATERMARK(4)); - if (physical_device->rad_info.family < CHIP_POLARIS10) - radeon_set_context_reg(cs, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL, 30); - radeon_set_context_reg(cs, R_028C5C_VGT_OUT_DEALLOC_CNTL, 32); vgt_tess_distribution = S_028B50_ACCUM_ISOLINE(32) | S_028B50_ACCUM_TRI(11) | @@ -447,39 +313,29 @@ si_emit_config(struct radv_physical_device *physical_device, radeon_set_context_reg(cs, R_028B50_VGT_TESS_DISTRIBUTION, vgt_tess_distribution); - } else { + } else if (!physical_device->has_clear_state) { radeon_set_context_reg(cs, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL, 14); radeon_set_context_reg(cs, R_028C5C_VGT_OUT_DEALLOC_CNTL, 16); } - if (physical_device->has_rbplus) - radeon_set_context_reg(cs, R_028C40_PA_SC_SHADER_CONTROL, 0); - if (physical_device->rad_info.chip_class >= GFX9) { unsigned num_se = physical_device->rad_info.max_se; unsigned pc_lines = 0; switch (physical_device->rad_info.family) { case CHIP_VEGA10: + case CHIP_VEGA12: + case CHIP_VEGA20: pc_lines = 4096; break; case CHIP_RAVEN: + case CHIP_RAVEN2: pc_lines = 1024; break; default: assert(0); } - radeon_set_context_reg(cs, R_028060_DB_DFSM_CONTROL, - S_028060_PUNCHOUT_MODE(V_028060_FORCE_OFF)); - radeon_set_context_reg(cs, R_028064_DB_RENDER_FILTER, 0); - /* TODO: We can use this to disable RBs for rendering to GART: */ - radeon_set_context_reg(cs, R_02835C_PA_SC_TILE_STEERING_OVERRIDE, 0); - radeon_set_context_reg(cs, R_02883C_PA_SU_OVER_RASTERIZATION_CNTL, 0); - /* TODO: Enable the binner: */ - radeon_set_context_reg(cs, R_028C44_PA_SC_BINNER_CNTL_0, - S_028C44_BINNING_MODE(V_028C44_DISABLE_BINNING_USE_LEGACY_SC) | - S_028C44_DISABLE_START_OF_PRIM(1)); radeon_set_context_reg(cs, R_028C48_PA_SC_BINNER_CNTL_1, S_028C48_MAX_ALLOC_COUNT(MIN2(128, pc_lines / (4 * num_se))) | S_028C48_MAX_PRIM_PER_BATCH(1023)); @@ -487,24 +343,45 @@ si_emit_config(struct radv_physical_device *physical_device, S_028C4C_NULL_SQUAD_AA_MASK_ENABLE(1)); radeon_set_uconfig_reg(cs, R_030968_VGT_INSTANCE_BASE_ID, 0); } - si_emit_compute(physical_device, cs); -} -void si_init_config(struct radv_cmd_buffer *cmd_buffer) -{ - struct radv_physical_device *physical_device = cmd_buffer->device->physical_device; + unsigned tmp = (unsigned)(1.0 * 8.0); + radeon_set_context_reg_seq(cs, R_028A00_PA_SU_POINT_SIZE, 1); + radeon_emit(cs, S_028A00_HEIGHT(tmp) | S_028A00_WIDTH(tmp)); + radeon_set_context_reg_seq(cs, R_028A04_PA_SU_POINT_MINMAX, 1); + radeon_emit(cs, S_028A04_MIN_SIZE(radv_pack_float_12p4(0)) | + S_028A04_MAX_SIZE(radv_pack_float_12p4(8192/2))); + + if (!physical_device->has_clear_state) { + radeon_set_context_reg(cs, R_028004_DB_COUNT_CONTROL, + S_028004_ZPASS_INCREMENT_DISABLE(1)); + } + + /* Enable the Polaris small primitive filter control. + * XXX: There is possibly an issue when MSAA is off (see RadeonSI + * has_msaa_sample_loc_bug). But this doesn't seem to regress anything, + * and AMDVLK doesn't have a workaround as well. + */ + if (physical_device->rad_info.family >= CHIP_POLARIS10) { + unsigned small_prim_filter_cntl = + S_028830_SMALL_PRIM_FILTER_ENABLE(1) | + /* Workaround for a hw line bug. */ + S_028830_LINE_FILTER_DISABLE(physical_device->rad_info.family <= CHIP_POLARIS12); + + radeon_set_context_reg(cs, R_028830_PA_SU_SMALL_PRIM_FILTER_CNTL, + small_prim_filter_cntl); + } - si_emit_config(physical_device, cmd_buffer->cs); + si_emit_compute(physical_device, cs); } void cik_create_gfx_config(struct radv_device *device) { - struct radeon_winsys_cs *cs = device->ws->cs_create(device->ws, RING_GFX); + struct radeon_cmdbuf *cs = device->ws->cs_create(device->ws, RING_GFX); if (!cs) return; - si_emit_config(device->physical_device, cs); + si_emit_graphics(device->physical_device, cs); while (cs->cdw & 7) { if (device->physical_device->rad_info.gfx_ib_pad_with_type2) @@ -516,7 +393,10 @@ cik_create_gfx_config(struct radv_device *device) device->gfx_init = device->ws->buffer_create(device->ws, cs->cdw * 4, 4096, RADEON_DOMAIN_GTT, - RADEON_FLAG_CPU_ACCESS); + RADEON_FLAG_CPU_ACCESS| + RADEON_FLAG_NO_INTERPROCESS_SHARING | + RADEON_FLAG_READ_ONLY, + RADV_BO_PRIORITY_CS); if (!device->gfx_init) goto fail; @@ -555,7 +435,7 @@ get_viewport_xform(const VkViewport *viewport, } void -si_write_viewport(struct radeon_winsys_cs *cs, int first_vp, +si_write_viewport(struct radeon_cmdbuf *cs, int first_vp, int count, const VkViewport *viewports) { int i; @@ -594,10 +474,10 @@ static VkRect2D si_scissor_from_viewport(const VkViewport *viewport) get_viewport_xform(viewport, scale, translate); - rect.offset.x = translate[0] - abs(scale[0]); - rect.offset.y = translate[1] - abs(scale[1]); - rect.extent.width = ceilf(translate[0] + abs(scale[0])) - rect.offset.x; - rect.extent.height = ceilf(translate[1] + abs(scale[1])) - rect.offset.y; + rect.offset.x = translate[0] - fabs(scale[0]); + rect.offset.y = translate[1] - fabs(scale[1]); + rect.extent.width = ceilf(translate[0] + fabs(scale[0])) - rect.offset.x; + rect.extent.height = ceilf(translate[1] + fabs(scale[1])) - rect.offset.y; return rect; } @@ -614,14 +494,15 @@ static VkRect2D si_intersect_scissor(const VkRect2D *a, const VkRect2D *b) { } void -si_write_scissors(struct radeon_winsys_cs *cs, int first, +si_write_scissors(struct radeon_cmdbuf *cs, int first, int count, const VkRect2D *scissors, const VkViewport *viewports, bool can_use_guardband) { int i; float scale[3], translate[3], guardband_x = INFINITY, guardband_y = INFINITY; const float max_range = 32767.0f; - assert(count); + if (!count) + return; radeon_set_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL + first * 4 * 2, count * 2); for (i = 0; i < count; i++) { @@ -629,16 +510,16 @@ si_write_scissors(struct radeon_winsys_cs *cs, int first, VkRect2D scissor = si_intersect_scissor(&scissors[i], &viewport_scissor); get_viewport_xform(viewports + i, scale, translate); - scale[0] = abs(scale[0]); - scale[1] = abs(scale[1]); + scale[0] = fabsf(scale[0]); + scale[1] = fabsf(scale[1]); if (scale[0] < 0.5) scale[0] = 0.5; if (scale[1] < 0.5) scale[1] = 0.5; - guardband_x = MIN2(guardband_x, (max_range - abs(translate[0])) / scale[0]); - guardband_y = MIN2(guardband_y, (max_range - abs(translate[1])) / scale[1]); + guardband_x = MIN2(guardband_x, (max_range - fabsf(translate[0])) / scale[0]); + guardband_y = MIN2(guardband_y, (max_range - fabsf(translate[1])) / scale[1]); radeon_emit(cs, S_028250_TL_X(scissor.offset.x) | S_028250_TL_Y(scissor.offset.y) | @@ -676,77 +557,33 @@ radv_prims_for_vertices(struct radv_prim_vertex_count *info, unsigned num) uint32_t si_get_ia_multi_vgt_param(struct radv_cmd_buffer *cmd_buffer, bool instanced_draw, bool indirect_draw, + bool count_from_stream_output, uint32_t draw_vertex_count) { enum chip_class chip_class = cmd_buffer->device->physical_device->rad_info.chip_class; enum radeon_family family = cmd_buffer->device->physical_device->rad_info.family; struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info; - unsigned prim = cmd_buffer->state.pipeline->graphics.prim; - unsigned primgroup_size = 128; /* recommended without a GS */ - unsigned max_primgroup_in_wave = 2; + const unsigned max_primgroup_in_wave = 2; /* SWITCH_ON_EOP(0) is always preferable. */ bool wd_switch_on_eop = false; bool ia_switch_on_eop = false; bool ia_switch_on_eoi = false; bool partial_vs_wave = false; - bool partial_es_wave = false; - uint32_t num_prims = radv_prims_for_vertices(&cmd_buffer->state.pipeline->graphics.prim_vertex_count, draw_vertex_count); + bool partial_es_wave = cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.partial_es_wave; bool multi_instances_smaller_than_primgroup; - if (radv_pipeline_has_tess(cmd_buffer->state.pipeline)) - primgroup_size = cmd_buffer->state.pipeline->graphics.tess.num_patches; - else if (radv_pipeline_has_gs(cmd_buffer->state.pipeline)) - primgroup_size = 64; /* recommended with a GS */ - - multi_instances_smaller_than_primgroup = indirect_draw || (instanced_draw && - num_prims < primgroup_size); - if (radv_pipeline_has_tess(cmd_buffer->state.pipeline)) { - /* SWITCH_ON_EOI must be set if PrimID is used. */ - if (cmd_buffer->state.pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.tcs.uses_prim_id || - cmd_buffer->state.pipeline->shaders[MESA_SHADER_TESS_EVAL]->info.tes.uses_prim_id) - ia_switch_on_eoi = true; + multi_instances_smaller_than_primgroup = indirect_draw; + if (!multi_instances_smaller_than_primgroup && instanced_draw) { + uint32_t num_prims = radv_prims_for_vertices(&cmd_buffer->state.pipeline->graphics.prim_vertex_count, draw_vertex_count); + if (num_prims < cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.primgroup_size) + multi_instances_smaller_than_primgroup = true; + } - /* Bug with tessellation and GS on Bonaire and older 2 SE chips. */ - if ((family == CHIP_TAHITI || - family == CHIP_PITCAIRN || - family == CHIP_BONAIRE) && - radv_pipeline_has_gs(cmd_buffer->state.pipeline)) - partial_vs_wave = true; + ia_switch_on_eoi = cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.ia_switch_on_eoi; + partial_vs_wave = cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.partial_vs_wave; - /* Needed for 028B6C_DISTRIBUTION_MODE != 0 */ - if (cmd_buffer->device->has_distributed_tess) { - if (radv_pipeline_has_gs(cmd_buffer->state.pipeline)) { - if (chip_class <= VI) - partial_es_wave = true; - - if (family == CHIP_TONGA || - family == CHIP_FIJI || - family == CHIP_POLARIS10 || - family == CHIP_POLARIS11 || - family == CHIP_POLARIS12) - partial_vs_wave = true; - } else { - partial_vs_wave = true; - } - } - } - /* TODO linestipple */ - - if (chip_class >= CIK) { - /* WD_SWITCH_ON_EOP has no effect on GPUs with less than - * 4 shader engines. Set 1 to pass the assertion below. - * The other cases are hardware requirements. */ - if (info->max_se < 4 || - prim == V_008958_DI_PT_POLYGON || - prim == V_008958_DI_PT_LINELOOP || - prim == V_008958_DI_PT_TRIFAN || - prim == V_008958_DI_PT_TRISTRIP_ADJ || - (cmd_buffer->state.pipeline->graphics.prim_restart_enable && - (family < CHIP_POLARIS10 || - (prim != V_008958_DI_PT_POINTLIST && - prim != V_008958_DI_PT_LINESTRIP && - prim != V_008958_DI_PT_TRISTRIP)))) - wd_switch_on_eop = true; + if (chip_class >= GFX7) { + wd_switch_on_eop = cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.wd_switch_on_eop; /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0. * We don't know that for indirect drawing, so treat it as @@ -760,19 +597,20 @@ si_get_ia_multi_vgt_param(struct radv_cmd_buffer *cmd_buffer, * Assume indirect draws always use small instances. * This is needed for good VS wave utilization. */ - if (chip_class <= VI && + if (chip_class <= GFX8 && info->max_se == 4 && multi_instances_smaller_than_primgroup) wd_switch_on_eop = true; - /* Required on CIK and later. */ + /* Required on GFX7 and later. */ if (info->max_se > 2 && !wd_switch_on_eop) ia_switch_on_eoi = true; - /* Required by Hawaii and, for some special cases, by VI. */ + /* Required by Hawaii and, for some special cases, by GFX8. */ if (ia_switch_on_eoi && (family == CHIP_HAWAII || - (chip_class == VI && + (chip_class == GFX8 && + /* max primgroup in wave is always 2 - leave this for documentation */ (radv_pipeline_has_gs(cmd_buffer->state.pipeline) || max_primgroup_in_wave != 2)))) partial_vs_wave = true; @@ -781,63 +619,80 @@ si_get_ia_multi_vgt_param(struct radv_cmd_buffer *cmd_buffer, (instanced_draw || indirect_draw)) partial_vs_wave = true; + /* Hardware requirement when drawing primitives from a stream + * output buffer. + */ + if (count_from_stream_output) + wd_switch_on_eop = true; + /* If the WD switch is false, the IA switch must be false too. */ assert(wd_switch_on_eop || !ia_switch_on_eop); } /* If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */ - if (chip_class <= VI && ia_switch_on_eoi) + if (chip_class <= GFX8 && ia_switch_on_eoi) partial_es_wave = true; if (radv_pipeline_has_gs(cmd_buffer->state.pipeline)) { - - if (radv_pipeline_has_gs(cmd_buffer->state.pipeline) && - cmd_buffer->state.pipeline->shaders[MESA_SHADER_GEOMETRY]->info.gs.uses_prim_id) - ia_switch_on_eoi = true; - - /* GS requirement. */ - if (SI_GS_PER_ES / primgroup_size >= cmd_buffer->device->gs_table_depth - 3) - partial_es_wave = true; - - /* Hw bug with single-primitive instances and SWITCH_ON_EOI - * on multi-SE chips. */ - if (info->max_se >= 2 && ia_switch_on_eoi && - ((instanced_draw || indirect_draw) && - num_prims <= 1)) - cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VGT_FLUSH; + /* GS hw bug with single-primitive instances and SWITCH_ON_EOI. + * The hw doc says all multi-SE chips are affected, but amdgpu-pro Vulkan + * only applies it to Hawaii. Do what amdgpu-pro Vulkan does. + */ + if (family == CHIP_HAWAII && ia_switch_on_eoi) { + bool set_vgt_flush = indirect_draw; + if (!set_vgt_flush && instanced_draw) { + uint32_t num_prims = radv_prims_for_vertices(&cmd_buffer->state.pipeline->graphics.prim_vertex_count, draw_vertex_count); + if (num_prims <= 1) + set_vgt_flush = true; + } + if (set_vgt_flush) + cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VGT_FLUSH; + } } - return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) | + return cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.base | + S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) | S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi) | S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) | S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave) | - S_028AA8_PRIMGROUP_SIZE(primgroup_size - 1) | - S_028AA8_WD_SWITCH_ON_EOP(chip_class >= CIK ? wd_switch_on_eop : 0) | - /* The following field was moved to VGT_SHADER_STAGES_EN in GFX9. */ - S_028AA8_MAX_PRIMGRP_IN_WAVE(chip_class == VI ? - max_primgroup_in_wave : 0) | - S_030960_EN_INST_OPT_BASIC(chip_class >= GFX9) | - S_030960_EN_INST_OPT_ADV(chip_class >= GFX9); + S_028AA8_WD_SWITCH_ON_EOP(chip_class >= GFX7 ? wd_switch_on_eop : 0); } -void si_cs_emit_write_event_eop(struct radeon_winsys_cs *cs, +void si_cs_emit_write_event_eop(struct radeon_cmdbuf *cs, enum chip_class chip_class, bool is_mec, unsigned event, unsigned event_flags, unsigned data_sel, uint64_t va, - uint32_t old_fence, - uint32_t new_fence) + uint32_t new_fence, + uint64_t gfx9_eop_bug_va) { unsigned op = EVENT_TYPE(event) | EVENT_INDEX(5) | event_flags; unsigned is_gfx8_mec = is_mec && chip_class < GFX9; + unsigned sel = EOP_DATA_SEL(data_sel); + + /* Wait for write confirmation before writing data, but don't send + * an interrupt. */ + if (data_sel != EOP_DATA_SEL_DISCARD) + sel |= EOP_INT_SEL(EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM); if (chip_class >= GFX9 || is_gfx8_mec) { - radeon_emit(cs, PKT3(PKT3_RELEASE_MEM, is_gfx8_mec ? 5 : 6, 0)); + /* A ZPASS_DONE or PIXEL_STAT_DUMP_EVENT (of the DB occlusion + * counters) must immediately precede every timestamp event to + * prevent a GPU hang on GFX9. + */ + if (chip_class == GFX9 && !is_mec) { + radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0)); + radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1)); + radeon_emit(cs, gfx9_eop_bug_va); + radeon_emit(cs, gfx9_eop_bug_va >> 32); + } + + radeon_emit(cs, PKT3(PKT3_RELEASE_MEM, is_gfx8_mec ? 5 : 6, false)); radeon_emit(cs, op); - radeon_emit(cs, EOP_DATA_SEL(data_sel)); + radeon_emit(cs, sel); radeon_emit(cs, va); /* address lo */ radeon_emit(cs, va >> 32); /* address hi */ radeon_emit(cs, new_fence); /* immediate data lo */ @@ -845,36 +700,39 @@ void si_cs_emit_write_event_eop(struct radeon_winsys_cs *cs, if (!is_gfx8_mec) radeon_emit(cs, 0); /* unused */ } else { - if (chip_class == CIK || - chip_class == VI) { + if (chip_class == GFX7 || + chip_class == GFX8) { /* Two EOP events are required to make all engines go idle * (and optional cache flushes executed) before the timestamp * is written. */ - radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0)); + radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, false)); radeon_emit(cs, op); radeon_emit(cs, va); - radeon_emit(cs, ((va >> 32) & 0xffff) | EOP_DATA_SEL(data_sel)); - radeon_emit(cs, old_fence); /* immediate data */ + radeon_emit(cs, ((va >> 32) & 0xffff) | sel); + radeon_emit(cs, 0); /* immediate data */ radeon_emit(cs, 0); /* unused */ } - radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0)); + radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, false)); radeon_emit(cs, op); radeon_emit(cs, va); - radeon_emit(cs, ((va >> 32) & 0xffff) | EOP_DATA_SEL(data_sel)); + radeon_emit(cs, ((va >> 32) & 0xffff) | sel); radeon_emit(cs, new_fence); /* immediate data */ radeon_emit(cs, 0); /* unused */ } } void -si_emit_wait_fence(struct radeon_winsys_cs *cs, - uint64_t va, uint32_t ref, - uint32_t mask) +radv_cp_wait_mem(struct radeon_cmdbuf *cs, uint32_t op, uint64_t va, + uint32_t ref, uint32_t mask) { - radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0)); - radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1)); + assert(op == WAIT_REG_MEM_EQUAL || + op == WAIT_REG_MEM_NOT_EQUAL || + op == WAIT_REG_MEM_GREATER_OR_EQUAL); + + radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, false)); + radeon_emit(cs, op | WAIT_REG_MEM_MEM_SPACE(1)); radeon_emit(cs, va); radeon_emit(cs, va >> 32); radeon_emit(cs, ref); /* reference value */ @@ -883,13 +741,14 @@ si_emit_wait_fence(struct radeon_winsys_cs *cs, } static void -si_emit_acquire_mem(struct radeon_winsys_cs *cs, - bool is_mec, bool is_gfx9, +si_emit_acquire_mem(struct radeon_cmdbuf *cs, + bool is_mec, + bool is_gfx9, unsigned cp_coher_cntl) { if (is_mec || is_gfx9) { uint32_t hi_val = is_gfx9 ? 0xffffff : 0xff; - radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, 0) | + radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, false) | PKT3_SHADER_TYPE_S(is_mec)); radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */ radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */ @@ -899,7 +758,7 @@ si_emit_acquire_mem(struct radeon_winsys_cs *cs, radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */ } else { /* ACQUIRE_MEM is only required on a compute ring. */ - radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, 0)); + radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, false)); radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */ radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */ radeon_emit(cs, 0); /* CP_COHER_BASE */ @@ -908,12 +767,13 @@ si_emit_acquire_mem(struct radeon_winsys_cs *cs, } void -si_cs_emit_cache_flush(struct radeon_winsys_cs *cs, +si_cs_emit_cache_flush(struct radeon_cmdbuf *cs, enum chip_class chip_class, uint32_t *flush_cnt, uint64_t flush_va, bool is_mec, - enum radv_cmd_flush_bits flush_bits) + enum radv_cmd_flush_bits flush_bits, + uint64_t gfx9_eop_bug_va) { unsigned cp_coher_cntl = 0; uint32_t flush_cb_db = flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB | @@ -924,7 +784,7 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs, if (flush_bits & RADV_CMD_FLAG_INV_SMEM_L1) cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1); - if (chip_class <= VI) { + if (chip_class <= GFX8) { if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_CB) { cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) | S_0085F0_CB0_DEST_BASE_ENA(1) | @@ -937,12 +797,15 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs, S_0085F0_CB7_DEST_BASE_ENA(1); /* Necessary for DCC */ - if (chip_class >= VI) { + if (chip_class >= GFX8) { si_cs_emit_write_event_eop(cs, chip_class, is_mec, V_028A90_FLUSH_AND_INV_CB_DATA_TS, - 0, 0, 0, 0, 0); + 0, + EOP_DATA_SEL_DISCARD, + 0, 0, + gfx9_eop_bug_va); } } if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_DB) { @@ -961,14 +824,12 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs, radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0)); } - if (!flush_cb_db) { - if (flush_bits & RADV_CMD_FLAG_PS_PARTIAL_FLUSH) { - radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); - radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4)); - } else if (flush_bits & RADV_CMD_FLAG_VS_PARTIAL_FLUSH) { - radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); - radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4)); - } + if (flush_bits & RADV_CMD_FLAG_PS_PARTIAL_FLUSH) { + radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); + radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4)); + } else if (flush_bits & RADV_CMD_FLAG_VS_PARTIAL_FLUSH) { + radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); + radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4)); } if (flush_bits & RADV_CMD_FLAG_CS_PARTIAL_FLUSH) { @@ -980,36 +841,29 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs, unsigned cb_db_event, tc_flags; /* Set the CB/DB flush event. */ - switch (flush_cb_db) { - case RADV_CMD_FLAG_FLUSH_AND_INV_CB: - cb_db_event = V_028A90_FLUSH_AND_INV_CB_DATA_TS; - break; - case RADV_CMD_FLAG_FLUSH_AND_INV_DB: - cb_db_event = V_028A90_FLUSH_AND_INV_DB_DATA_TS; - break; - default: - /* both CB & DB */ - cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT; - } + cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT; - /* TC | TC_WB = invalidate L2 data - * TC_MD | TC_WB = invalidate L2 metadata - * TC | TC_WB | TC_MD = invalidate L2 data & metadata - * - * The metadata cache must always be invalidated for coherency - * between CB/DB and shaders. (metadata = HTILE, CMASK, DCC) + /* These are the only allowed combinations. If you need to + * do multiple operations at once, do them separately. + * All operations that invalidate L2 also seem to invalidate + * metadata. Volatile (VOL) and WC flushes are not listed here. * - * TC must be invalidated on GFX9 only if the CB/DB surface is - * not pipe-aligned. If the surface is RB-aligned, it might not - * strictly be pipe-aligned since RB alignment takes precendence. + * TC | TC_WB = writeback & invalidate L2 & L1 + * TC | TC_WB | TC_NC = writeback & invalidate L2 for MTYPE == NC + * TC_WB | TC_NC = writeback L2 for MTYPE == NC + * TC | TC_NC = invalidate L2 for MTYPE == NC + * TC | TC_MD = writeback & invalidate L2 metadata (DCC, etc.) + * TCL1 = invalidate L1 */ - tc_flags = EVENT_TC_WB_ACTION_ENA | - EVENT_TC_MD_ACTION_ENA; + tc_flags = EVENT_TC_ACTION_ENA | + EVENT_TC_MD_ACTION_ENA; /* Ideally flush TC together with CB/DB. */ if (flush_bits & RADV_CMD_FLAG_INV_GLOBAL_L2) { - tc_flags |= EVENT_TC_ACTION_ENA | - EVENT_TCL1_ACTION_ENA; + /* Writeback and invalidate everything in L2 & L1. */ + tc_flags = EVENT_TC_ACTION_ENA | + EVENT_TC_WB_ACTION_ENA; + /* Clear the flags. */ flush_bits &= ~(RADV_CMD_FLAG_INV_GLOBAL_L2 | @@ -1017,11 +871,14 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs, RADV_CMD_FLAG_INV_VMEM_L1); } assert(flush_cnt); - uint32_t old_fence = (*flush_cnt)++; - - si_cs_emit_write_event_eop(cs, chip_class, false, cb_db_event, tc_flags, 1, - flush_va, old_fence, *flush_cnt); - si_emit_wait_fence(cs, flush_va, *flush_cnt, 0xffffffff); + (*flush_cnt)++; + + si_cs_emit_write_event_eop(cs, chip_class, false, cb_db_event, tc_flags, + EOP_DATA_SEL_VALUE_32BIT, + flush_va, *flush_cnt, + gfx9_eop_bug_va); + radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL, flush_va, + *flush_cnt, 0xffffffff); } /* VGT state sync */ @@ -1030,6 +887,12 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs, radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0)); } + /* VGT streamout state sync */ + if (flush_bits & RADV_CMD_FLAG_VGT_STREAMOUT_SYNC) { + radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); + radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0)); + } + /* Make sure ME is idle (it executes most packets) before continuing. * This prevents read-after-write hazards between PFP and ME. */ @@ -1044,12 +907,12 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs, } if ((flush_bits & RADV_CMD_FLAG_INV_GLOBAL_L2) || - (chip_class <= CIK && (flush_bits & RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2))) { + (chip_class <= GFX7 && (flush_bits & RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2))) { si_emit_acquire_mem(cs, is_mec, chip_class >= GFX9, cp_coher_cntl | S_0085F0_TC_ACTION_ENA(1) | S_0085F0_TCL1_ACTION_ENA(1) | - S_0301F0_TC_WB_ACTION_ENA(chip_class >= VI)); + S_0301F0_TC_WB_ACTION_ENA(chip_class >= GFX8)); cp_coher_cntl = 0; } else { if(flush_bits & RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2) { @@ -1059,14 +922,16 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs, * * WB doesn't work without NC. */ - si_emit_acquire_mem(cs, is_mec, chip_class >= GFX9, + si_emit_acquire_mem(cs, is_mec, + chip_class >= GFX9, cp_coher_cntl | S_0301F0_TC_WB_ACTION_ENA(1) | S_0301F0_TC_NC_ACTION_ENA(1)); cp_coher_cntl = 0; } if (flush_bits & RADV_CMD_FLAG_INV_VMEM_L1) { - si_emit_acquire_mem(cs, is_mec, chip_class >= GFX9, + si_emit_acquire_mem(cs, is_mec, + chip_class >= GFX9, cp_coher_cntl | S_0085F0_TCL1_ACTION_ENA(1)); cp_coher_cntl = 0; @@ -1078,6 +943,16 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs, */ if (cp_coher_cntl) si_emit_acquire_mem(cs, is_mec, chip_class >= GFX9, cp_coher_cntl); + + if (flush_bits & RADV_CMD_FLAG_START_PIPELINE_STATS) { + radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); + radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_START) | + EVENT_INDEX(0)); + } else if (flush_bits & RADV_CMD_FLAG_STOP_PIPELINE_STATS) { + radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); + radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) | + EVENT_INDEX(0)); + } } void @@ -1092,31 +967,68 @@ si_emit_cache_flush(struct radv_cmd_buffer *cmd_buffer) RADV_CMD_FLAG_FLUSH_AND_INV_DB_META | RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_VS_PARTIAL_FLUSH | - RADV_CMD_FLAG_VGT_FLUSH); + RADV_CMD_FLAG_VGT_FLUSH | + RADV_CMD_FLAG_START_PIPELINE_STATS | + RADV_CMD_FLAG_STOP_PIPELINE_STATS); if (!cmd_buffer->state.flush_bits) return; - enum chip_class chip_class = cmd_buffer->device->physical_device->rad_info.chip_class; radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 128); - uint32_t *ptr = NULL; - uint64_t va = 0; - if (chip_class == GFX9) { - va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->gfx9_fence_bo) + cmd_buffer->gfx9_fence_offset; - ptr = &cmd_buffer->gfx9_fence_idx; - } si_cs_emit_cache_flush(cmd_buffer->cs, cmd_buffer->device->physical_device->rad_info.chip_class, - ptr, va, + &cmd_buffer->gfx9_fence_idx, + cmd_buffer->gfx9_fence_va, radv_cmd_buffer_uses_mec(cmd_buffer), - cmd_buffer->state.flush_bits); + cmd_buffer->state.flush_bits, + cmd_buffer->gfx9_eop_bug_va); - radv_cmd_buffer_trace_emit(cmd_buffer); + if (unlikely(cmd_buffer->device->trace_bo)) + radv_cmd_buffer_trace_emit(cmd_buffer); + + /* Clear the caches that have been flushed to avoid syncing too much + * when there is some pending active queries. + */ + cmd_buffer->active_query_flush_bits &= ~cmd_buffer->state.flush_bits; + cmd_buffer->state.flush_bits = 0; + + /* If the driver used a compute shader for resetting a query pool, it + * should be finished at this point. + */ + cmd_buffer->pending_reset_query = false; } +/* sets the CP predication state using a boolean stored at va */ +void +si_emit_set_predication_state(struct radv_cmd_buffer *cmd_buffer, + bool draw_visible, uint64_t va) +{ + uint32_t op = 0; + + if (va) { + op = PRED_OP(PREDICATION_OP_BOOL64); + + /* PREDICATION_DRAW_VISIBLE means that if the 32-bit value is + * zero, all rendering commands are discarded. Otherwise, they + * are discarded if the value is non zero. + */ + op |= draw_visible ? PREDICATION_DRAW_VISIBLE : + PREDICATION_DRAW_NOT_VISIBLE; + } + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) { + radeon_emit(cmd_buffer->cs, PKT3(PKT3_SET_PREDICATION, 2, 0)); + radeon_emit(cmd_buffer->cs, op); + radeon_emit(cmd_buffer->cs, va); + radeon_emit(cmd_buffer->cs, va >> 32); + } else { + radeon_emit(cmd_buffer->cs, PKT3(PKT3_SET_PREDICATION, 1, 0)); + radeon_emit(cmd_buffer->cs, va); + radeon_emit(cmd_buffer->cs, op | ((va >> 32) & 0xFF)); + } +} /* Set this if you want the 3D engine to wait until CP DMA is done. * It should be set on the last CP DMA packet. */ @@ -1151,10 +1063,9 @@ static void si_emit_cp_dma(struct radv_cmd_buffer *cmd_buffer, uint64_t dst_va, uint64_t src_va, unsigned size, unsigned flags) { - struct radeon_winsys_cs *cs = cmd_buffer->cs; + struct radeon_cmdbuf *cs = cmd_buffer->cs; uint32_t header = 0, command = 0; - assert(size); assert(size <= cp_dma_max_byte_count(cmd_buffer)); radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 9); @@ -1180,17 +1091,17 @@ static void si_emit_cp_dma(struct radv_cmd_buffer *cmd_buffer, if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9 && !(flags & CP_DMA_CLEAR) && src_va == dst_va) - header |= S_411_DSL_SEL(V_411_NOWHERE); /* prefetch only */ + header |= S_411_DST_SEL(V_411_NOWHERE); /* prefetch only */ else if (flags & CP_DMA_USE_L2) - header |= S_411_DSL_SEL(V_411_DST_ADDR_TC_L2); + header |= S_411_DST_SEL(V_411_DST_ADDR_TC_L2); if (flags & CP_DMA_CLEAR) header |= S_411_SRC_SEL(V_411_DATA); else if (flags & CP_DMA_USE_L2) header |= S_411_SRC_SEL(V_411_SRC_ADDR_TC_L2); - if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) { - radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0)); + if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) { + radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, cmd_buffer->state.predicating)); radeon_emit(cs, header); radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */ radeon_emit(cs, src_va >> 32); /* SRC_ADDR_HI [31:0] */ @@ -1200,7 +1111,7 @@ static void si_emit_cp_dma(struct radv_cmd_buffer *cmd_buffer, } else { assert(!(flags & CP_DMA_USE_L2)); header |= S_411_SRC_ADDR_HI(src_va >> 32); - radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0)); + radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, cmd_buffer->state.predicating)); radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */ radeon_emit(cs, header); /* SRC_ADDR_HI [15:0] + flags. */ radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */ @@ -1213,12 +1124,18 @@ static void si_emit_cp_dma(struct radv_cmd_buffer *cmd_buffer, * indices. If we wanted to execute CP DMA in PFP, this packet * should precede it. */ - if ((flags & CP_DMA_SYNC) && cmd_buffer->queue_family_index == RADV_QUEUE_GENERAL) { - radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0)); - radeon_emit(cs, 0); + if (flags & CP_DMA_SYNC) { + if (cmd_buffer->queue_family_index == RADV_QUEUE_GENERAL) { + radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, cmd_buffer->state.predicating)); + radeon_emit(cs, 0); + } + + /* CP will see the sync flag and wait for all DMAs to complete. */ + cmd_buffer->state.dma_is_busy = false; } - radv_cmd_buffer_trace_emit(cmd_buffer); + if (unlikely(cmd_buffer->device->trace_bo)) + radv_cmd_buffer_trace_emit(cmd_buffer); } void si_cp_dma_prefetch(struct radv_cmd_buffer *cmd_buffer, uint64_t va, @@ -1262,7 +1179,7 @@ static void si_cp_dma_realign_engine(struct radv_cmd_buffer *cmd_buffer, unsigne radv_cmd_buffer_upload_alloc(cmd_buffer, buf_size, SI_CPDMA_ALIGNMENT, &offset, &ptr); - va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo); + va = radv_buffer_get_va(cmd_buffer->upload.upload_bo); va += offset; si_cp_dma_prepare(cmd_buffer, size, size, &dma_flags); @@ -1278,6 +1195,8 @@ void si_cp_dma_buffer_copy(struct radv_cmd_buffer *cmd_buffer, uint64_t main_src_va, main_dest_va; uint64_t skipped_size = 0, realign_size = 0; + /* Assume that we are not going to sync after the last DMA operation. */ + cmd_buffer->state.dma_is_busy = true; if (cmd_buffer->device->physical_device->rad_info.family <= CHIP_CARRIZO || cmd_buffer->device->physical_device->rad_info.family == CHIP_STONEY) { @@ -1310,6 +1229,8 @@ void si_cp_dma_buffer_copy(struct radv_cmd_buffer *cmd_buffer, size + skipped_size + realign_size, &dma_flags); + dma_flags &= ~CP_DMA_SYNC; + si_emit_cp_dma(cmd_buffer, main_dest_va, main_src_va, byte_count, dma_flags); @@ -1341,6 +1262,9 @@ void si_cp_dma_clear_buffer(struct radv_cmd_buffer *cmd_buffer, uint64_t va, assert(va % 4 == 0 && size % 4 == 0); + /* Assume that we are not going to sync after the last DMA operation. */ + cmd_buffer->state.dma_is_busy = true; + while (size) { unsigned byte_count = MIN2(size, cp_dma_max_byte_count(cmd_buffer)); unsigned dma_flags = CP_DMA_CLEAR; @@ -1356,196 +1280,159 @@ void si_cp_dma_clear_buffer(struct radv_cmd_buffer *cmd_buffer, uint64_t va, } } +void si_cp_dma_wait_for_idle(struct radv_cmd_buffer *cmd_buffer) +{ + if (cmd_buffer->device->physical_device->rad_info.chip_class < GFX7) + return; + + if (!cmd_buffer->state.dma_is_busy) + return; + + /* Issue a dummy DMA that copies zero bytes. + * + * The DMA engine will see that there's no work to do and skip this + * DMA request, however, the CP will see the sync flag and still wait + * for all DMAs to complete. + */ + si_emit_cp_dma(cmd_buffer, 0, 0, 0, CP_DMA_SYNC); + + cmd_buffer->state.dma_is_busy = false; +} + /* For MSAA sample positions. */ #define FILL_SREG(s0x, s0y, s1x, s1y, s2x, s2y, s3x, s3y) \ - (((s0x) & 0xf) | (((unsigned)(s0y) & 0xf) << 4) | \ - (((unsigned)(s1x) & 0xf) << 8) | (((unsigned)(s1y) & 0xf) << 12) | \ - (((unsigned)(s2x) & 0xf) << 16) | (((unsigned)(s2y) & 0xf) << 20) | \ + ((((unsigned)(s0x) & 0xf) << 0) | (((unsigned)(s0y) & 0xf) << 4) | \ + (((unsigned)(s1x) & 0xf) << 8) | (((unsigned)(s1y) & 0xf) << 12) | \ + (((unsigned)(s2x) & 0xf) << 16) | (((unsigned)(s2y) & 0xf) << 20) | \ (((unsigned)(s3x) & 0xf) << 24) | (((unsigned)(s3y) & 0xf) << 28)) - -/* 2xMSAA - * There are two locations (4, 4), (-4, -4). */ -const uint32_t eg_sample_locs_2x[4] = { - FILL_SREG(4, 4, -4, -4, 4, 4, -4, -4), - FILL_SREG(4, 4, -4, -4, 4, 4, -4, -4), - FILL_SREG(4, 4, -4, -4, 4, 4, -4, -4), - FILL_SREG(4, 4, -4, -4, 4, 4, -4, -4), -}; -const unsigned eg_max_dist_2x = 4; -/* 4xMSAA - * There are 4 locations: (-2, 6), (6, -2), (-6, 2), (2, 6). */ -const uint32_t eg_sample_locs_4x[4] = { - FILL_SREG(-2, -6, 6, -2, -6, 2, 2, 6), - FILL_SREG(-2, -6, 6, -2, -6, 2, 2, 6), - FILL_SREG(-2, -6, 6, -2, -6, 2, 2, 6), - FILL_SREG(-2, -6, 6, -2, -6, 2, 2, 6), -}; -const unsigned eg_max_dist_4x = 6; - -/* Cayman 8xMSAA */ -static const uint32_t cm_sample_locs_8x[] = { - FILL_SREG( 1, -3, -1, 3, 5, 1, -3, -5), - FILL_SREG( 1, -3, -1, 3, 5, 1, -3, -5), - FILL_SREG( 1, -3, -1, 3, 5, 1, -3, -5), - FILL_SREG( 1, -3, -1, 3, 5, 1, -3, -5), - FILL_SREG(-5, 5, -7, -1, 3, 7, 7, -7), - FILL_SREG(-5, 5, -7, -1, 3, 7, 7, -7), - FILL_SREG(-5, 5, -7, -1, 3, 7, 7, -7), - FILL_SREG(-5, 5, -7, -1, 3, 7, 7, -7), +/* For obtaining location coordinates from registers */ +#define SEXT4(x) ((int)((x) | ((x) & 0x8 ? 0xfffffff0 : 0))) +#define GET_SFIELD(reg, index) SEXT4(((reg) >> ((index) * 4)) & 0xf) +#define GET_SX(reg, index) GET_SFIELD((reg)[(index) / 4], ((index) % 4) * 2) +#define GET_SY(reg, index) GET_SFIELD((reg)[(index) / 4], ((index) % 4) * 2 + 1) + +/* 1x MSAA */ +static const uint32_t sample_locs_1x = + FILL_SREG(0, 0, 0, 0, 0, 0, 0, 0); +static const unsigned max_dist_1x = 0; +static const uint64_t centroid_priority_1x = 0x0000000000000000ull; + +/* 2xMSAA */ +static const uint32_t sample_locs_2x = + FILL_SREG(4,4, -4, -4, 0, 0, 0, 0); +static const unsigned max_dist_2x = 4; +static const uint64_t centroid_priority_2x = 0x1010101010101010ull; + +/* 4xMSAA */ +static const uint32_t sample_locs_4x = + FILL_SREG(-2,-6, 6, -2, -6, 2, 2, 6); +static const unsigned max_dist_4x = 6; +static const uint64_t centroid_priority_4x = 0x3210321032103210ull; + +/* 8xMSAA */ +static const uint32_t sample_locs_8x[] = { + FILL_SREG( 1,-3, -1, 3, 5, 1, -3,-5), + FILL_SREG(-5, 5, -7,-1, 3, 7, 7,-7), + /* The following are unused by hardware, but we emit them to IBs + * instead of multiple SET_CONTEXT_REG packets. */ + 0, + 0, }; -static const unsigned cm_max_dist_8x = 8; -/* Cayman 16xMSAA */ -static const uint32_t cm_sample_locs_16x[] = { - FILL_SREG( 1, 1, -1, -3, -3, 2, 4, -1), - FILL_SREG( 1, 1, -1, -3, -3, 2, 4, -1), - FILL_SREG( 1, 1, -1, -3, -3, 2, 4, -1), - FILL_SREG( 1, 1, -1, -3, -3, 2, 4, -1), - FILL_SREG(-5, -2, 2, 5, 5, 3, 3, -5), - FILL_SREG(-5, -2, 2, 5, 5, 3, 3, -5), - FILL_SREG(-5, -2, 2, 5, 5, 3, 3, -5), - FILL_SREG(-5, -2, 2, 5, 5, 3, 3, -5), - FILL_SREG(-2, 6, 0, -7, -4, -6, -6, 4), - FILL_SREG(-2, 6, 0, -7, -4, -6, -6, 4), - FILL_SREG(-2, 6, 0, -7, -4, -6, -6, 4), - FILL_SREG(-2, 6, 0, -7, -4, -6, -6, 4), - FILL_SREG(-8, 0, 7, -4, 6, 7, -7, -8), - FILL_SREG(-8, 0, 7, -4, 6, 7, -7, -8), - FILL_SREG(-8, 0, 7, -4, 6, 7, -7, -8), - FILL_SREG(-8, 0, 7, -4, 6, 7, -7, -8), -}; -static const unsigned cm_max_dist_16x = 8; +static const unsigned max_dist_8x = 7; +static const uint64_t centroid_priority_8x = 0x7654321076543210ull; -unsigned radv_cayman_get_maxdist(int log_samples) +unsigned radv_get_default_max_sample_dist(int log_samples) { unsigned max_dist[] = { - 0, - eg_max_dist_2x, - eg_max_dist_4x, - cm_max_dist_8x, - cm_max_dist_16x + max_dist_1x, + max_dist_2x, + max_dist_4x, + max_dist_8x, }; return max_dist[log_samples]; } -void radv_cayman_emit_msaa_sample_locs(struct radeon_winsys_cs *cs, int nr_samples) +void radv_emit_default_sample_locations(struct radeon_cmdbuf *cs, int nr_samples) { switch (nr_samples) { default: case 1: - radeon_set_context_reg(cs, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, 0); - radeon_set_context_reg(cs, CM_R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, 0); - radeon_set_context_reg(cs, CM_R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, 0); - radeon_set_context_reg(cs, CM_R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, 0); + radeon_set_context_reg_seq(cs, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 2); + radeon_emit(cs, (uint32_t)centroid_priority_1x); + radeon_emit(cs, centroid_priority_1x >> 32); + radeon_set_context_reg(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, sample_locs_1x); + radeon_set_context_reg(cs, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, sample_locs_1x); + radeon_set_context_reg(cs, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, sample_locs_1x); + radeon_set_context_reg(cs, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, sample_locs_1x); break; case 2: - radeon_set_context_reg(cs, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, eg_sample_locs_2x[0]); - radeon_set_context_reg(cs, CM_R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, eg_sample_locs_2x[1]); - radeon_set_context_reg(cs, CM_R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, eg_sample_locs_2x[2]); - radeon_set_context_reg(cs, CM_R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, eg_sample_locs_2x[3]); + radeon_set_context_reg_seq(cs, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 2); + radeon_emit(cs, (uint32_t)centroid_priority_2x); + radeon_emit(cs, centroid_priority_2x >> 32); + radeon_set_context_reg(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, sample_locs_2x); + radeon_set_context_reg(cs, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, sample_locs_2x); + radeon_set_context_reg(cs, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, sample_locs_2x); + radeon_set_context_reg(cs, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, sample_locs_2x); break; case 4: - radeon_set_context_reg(cs, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, eg_sample_locs_4x[0]); - radeon_set_context_reg(cs, CM_R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, eg_sample_locs_4x[1]); - radeon_set_context_reg(cs, CM_R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, eg_sample_locs_4x[2]); - radeon_set_context_reg(cs, CM_R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, eg_sample_locs_4x[3]); + radeon_set_context_reg_seq(cs, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 2); + radeon_emit(cs, (uint32_t)centroid_priority_4x); + radeon_emit(cs, centroid_priority_4x >> 32); + radeon_set_context_reg(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, sample_locs_4x); + radeon_set_context_reg(cs, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, sample_locs_4x); + radeon_set_context_reg(cs, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, sample_locs_4x); + radeon_set_context_reg(cs, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, sample_locs_4x); break; case 8: - radeon_set_context_reg_seq(cs, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, 14); - radeon_emit(cs, cm_sample_locs_8x[0]); - radeon_emit(cs, cm_sample_locs_8x[4]); - radeon_emit(cs, 0); - radeon_emit(cs, 0); - radeon_emit(cs, cm_sample_locs_8x[1]); - radeon_emit(cs, cm_sample_locs_8x[5]); - radeon_emit(cs, 0); - radeon_emit(cs, 0); - radeon_emit(cs, cm_sample_locs_8x[2]); - radeon_emit(cs, cm_sample_locs_8x[6]); - radeon_emit(cs, 0); - radeon_emit(cs, 0); - radeon_emit(cs, cm_sample_locs_8x[3]); - radeon_emit(cs, cm_sample_locs_8x[7]); - break; - case 16: - radeon_set_context_reg_seq(cs, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, 16); - radeon_emit(cs, cm_sample_locs_16x[0]); - radeon_emit(cs, cm_sample_locs_16x[4]); - radeon_emit(cs, cm_sample_locs_16x[8]); - radeon_emit(cs, cm_sample_locs_16x[12]); - radeon_emit(cs, cm_sample_locs_16x[1]); - radeon_emit(cs, cm_sample_locs_16x[5]); - radeon_emit(cs, cm_sample_locs_16x[9]); - radeon_emit(cs, cm_sample_locs_16x[13]); - radeon_emit(cs, cm_sample_locs_16x[2]); - radeon_emit(cs, cm_sample_locs_16x[6]); - radeon_emit(cs, cm_sample_locs_16x[10]); - radeon_emit(cs, cm_sample_locs_16x[14]); - radeon_emit(cs, cm_sample_locs_16x[3]); - radeon_emit(cs, cm_sample_locs_16x[7]); - radeon_emit(cs, cm_sample_locs_16x[11]); - radeon_emit(cs, cm_sample_locs_16x[15]); + radeon_set_context_reg_seq(cs, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 2); + radeon_emit(cs, (uint32_t)centroid_priority_8x); + radeon_emit(cs, centroid_priority_8x >> 32); + radeon_set_context_reg_seq(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, 14); + radeon_emit_array(cs, sample_locs_8x, 4); + radeon_emit_array(cs, sample_locs_8x, 4); + radeon_emit_array(cs, sample_locs_8x, 4); + radeon_emit_array(cs, sample_locs_8x, 2); break; } } -static void radv_cayman_get_sample_position(struct radv_device *device, - unsigned sample_count, - unsigned sample_index, float *out_value) +static void radv_get_sample_position(struct radv_device *device, + unsigned sample_count, + unsigned sample_index, float *out_value) { - int offset, index; - struct { - int idx:4; - } val; + const uint32_t *sample_locs; + switch (sample_count) { case 1: default: - out_value[0] = out_value[1] = 0.5; + sample_locs = &sample_locs_1x; break; case 2: - offset = 4 * (sample_index * 2); - val.idx = (eg_sample_locs_2x[0] >> offset) & 0xf; - out_value[0] = (float)(val.idx + 8) / 16.0f; - val.idx = (eg_sample_locs_2x[0] >> (offset + 4)) & 0xf; - out_value[1] = (float)(val.idx + 8) / 16.0f; + sample_locs = &sample_locs_2x; break; case 4: - offset = 4 * (sample_index * 2); - val.idx = (eg_sample_locs_4x[0] >> offset) & 0xf; - out_value[0] = (float)(val.idx + 8) / 16.0f; - val.idx = (eg_sample_locs_4x[0] >> (offset + 4)) & 0xf; - out_value[1] = (float)(val.idx + 8) / 16.0f; + sample_locs = &sample_locs_4x; break; case 8: - offset = 4 * (sample_index % 4 * 2); - index = (sample_index / 4) * 4; - val.idx = (cm_sample_locs_8x[index] >> offset) & 0xf; - out_value[0] = (float)(val.idx + 8) / 16.0f; - val.idx = (cm_sample_locs_8x[index] >> (offset + 4)) & 0xf; - out_value[1] = (float)(val.idx + 8) / 16.0f; - break; - case 16: - offset = 4 * (sample_index % 4 * 2); - index = (sample_index / 4) * 4; - val.idx = (cm_sample_locs_16x[index] >> offset) & 0xf; - out_value[0] = (float)(val.idx + 8) / 16.0f; - val.idx = (cm_sample_locs_16x[index] >> (offset + 4)) & 0xf; - out_value[1] = (float)(val.idx + 8) / 16.0f; + sample_locs = sample_locs_8x; break; } + + out_value[0] = (GET_SX(sample_locs, sample_index) + 8) / 16.0f; + out_value[1] = (GET_SY(sample_locs, sample_index) + 8) / 16.0f; } void radv_device_init_msaa(struct radv_device *device) { int i; - radv_cayman_get_sample_position(device, 1, 0, device->sample_locations_1x[0]); + + radv_get_sample_position(device, 1, 0, device->sample_locations_1x[0]); for (i = 0; i < 2; i++) - radv_cayman_get_sample_position(device, 2, i, device->sample_locations_2x[i]); + radv_get_sample_position(device, 2, i, device->sample_locations_2x[i]); for (i = 0; i < 4; i++) - radv_cayman_get_sample_position(device, 4, i, device->sample_locations_4x[i]); + radv_get_sample_position(device, 4, i, device->sample_locations_4x[i]); for (i = 0; i < 8; i++) - radv_cayman_get_sample_position(device, 8, i, device->sample_locations_8x[i]); - for (i = 0; i < 16; i++) - radv_cayman_get_sample_position(device, 16, i, device->sample_locations_16x[i]); + radv_get_sample_position(device, 8, i, device->sample_locations_8x[i]); }