2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
6 * Copyright © 2015 Advanced Micro Devices, Inc.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 /* command buffer handling for AMD GCN */
30 #include "radv_private.h"
31 #include "radv_shader.h"
34 #include "radv_util.h"
37 si_write_harvested_raster_configs(struct radv_physical_device
*physical_device
,
38 struct radeon_cmdbuf
*cs
,
39 unsigned raster_config
,
40 unsigned raster_config_1
)
42 unsigned num_se
= MAX2(physical_device
->rad_info
.max_se
, 1);
43 unsigned raster_config_se
[4];
46 ac_get_harvested_configs(&physical_device
->rad_info
,
51 for (se
= 0; se
< num_se
; se
++) {
52 /* GRBM_GFX_INDEX has a different offset on GFX6 and GFX7+ */
53 if (physical_device
->rad_info
.chip_class
< GFX7
)
54 radeon_set_config_reg(cs
, R_00802C_GRBM_GFX_INDEX
,
55 S_00802C_SE_INDEX(se
) |
56 S_00802C_SH_BROADCAST_WRITES(1) |
57 S_00802C_INSTANCE_BROADCAST_WRITES(1));
59 radeon_set_uconfig_reg(cs
, R_030800_GRBM_GFX_INDEX
,
60 S_030800_SE_INDEX(se
) | S_030800_SH_BROADCAST_WRITES(1) |
61 S_030800_INSTANCE_BROADCAST_WRITES(1));
62 radeon_set_context_reg(cs
, R_028350_PA_SC_RASTER_CONFIG
, raster_config_se
[se
]);
65 /* GRBM_GFX_INDEX has a different offset on GFX6 and GFX7+ */
66 if (physical_device
->rad_info
.chip_class
< GFX7
)
67 radeon_set_config_reg(cs
, R_00802C_GRBM_GFX_INDEX
,
68 S_00802C_SE_BROADCAST_WRITES(1) |
69 S_00802C_SH_BROADCAST_WRITES(1) |
70 S_00802C_INSTANCE_BROADCAST_WRITES(1));
72 radeon_set_uconfig_reg(cs
, R_030800_GRBM_GFX_INDEX
,
73 S_030800_SE_BROADCAST_WRITES(1) | S_030800_SH_BROADCAST_WRITES(1) |
74 S_030800_INSTANCE_BROADCAST_WRITES(1));
76 if (physical_device
->rad_info
.chip_class
>= GFX7
)
77 radeon_set_context_reg(cs
, R_028354_PA_SC_RASTER_CONFIG_1
, raster_config_1
);
81 si_emit_compute(struct radv_device
*device
,
82 struct radeon_cmdbuf
*cs
)
84 radeon_set_sh_reg_seq(cs
, R_00B810_COMPUTE_START_X
, 3);
89 radeon_set_sh_reg_seq(cs
, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0
, 2);
90 /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1,
91 * renamed COMPUTE_DESTINATION_EN_SEn on gfx10. */
92 radeon_emit(cs
, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
93 radeon_emit(cs
, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
95 if (device
->physical_device
->rad_info
.chip_class
>= GFX7
) {
96 /* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */
97 radeon_set_sh_reg_seq(cs
,
98 R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2
, 2);
99 radeon_emit(cs
, S_00B858_SH0_CU_EN(0xffff) |
100 S_00B858_SH1_CU_EN(0xffff));
101 radeon_emit(cs
, S_00B858_SH0_CU_EN(0xffff) |
102 S_00B858_SH1_CU_EN(0xffff));
104 if (device
->border_color_data
.bo
) {
105 uint64_t bc_va
= radv_buffer_get_va(device
->border_color_data
.bo
);
107 radeon_set_uconfig_reg_seq(cs
, R_030E00_TA_CS_BC_BASE_ADDR
, 2);
108 radeon_emit(cs
, bc_va
>> 8);
109 radeon_emit(cs
, S_030E04_ADDRESS(bc_va
>> 40));
113 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
114 radeon_set_uconfig_reg(cs
, R_0301EC_CP_COHER_START_DELAY
,
115 device
->physical_device
->rad_info
.chip_class
>= GFX10
? 0x20 : 0);
118 if (device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
119 radeon_set_sh_reg(cs
, R_00B890_COMPUTE_USER_ACCUM_0
, 0);
120 radeon_set_sh_reg(cs
, R_00B894_COMPUTE_USER_ACCUM_1
, 0);
121 radeon_set_sh_reg(cs
, R_00B898_COMPUTE_USER_ACCUM_2
, 0);
122 radeon_set_sh_reg(cs
, R_00B89C_COMPUTE_USER_ACCUM_3
, 0);
123 radeon_set_sh_reg(cs
, R_00B8A0_COMPUTE_PGM_RSRC3
, 0);
124 radeon_set_sh_reg(cs
, R_00B9F4_COMPUTE_DISPATCH_TUNNEL
, 0);
127 /* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID
128 * and is now per pipe, so it should be handled in the
129 * kernel if we want to use something other than the default value,
130 * which is now 0x22f.
132 if (device
->physical_device
->rad_info
.chip_class
<= GFX6
) {
133 /* XXX: This should be:
134 * (number of compute units) * 4 * (waves per simd) - 1 */
136 radeon_set_sh_reg(cs
, R_00B82C_COMPUTE_MAX_WAVE_ID
,
137 0x190 /* Default value */);
139 if (device
->border_color_data
.bo
) {
140 uint64_t bc_va
= radv_buffer_get_va(device
->border_color_data
.bo
);
141 radeon_set_config_reg(cs
, R_00950C_TA_CS_BC_BASE_ADDR
, bc_va
>> 8);
146 /* 12.4 fixed-point */
147 static unsigned radv_pack_float_12p4(float x
)
150 x
>= 4096 ? 0xffff : x
* 16;
154 si_set_raster_config(struct radv_physical_device
*physical_device
,
155 struct radeon_cmdbuf
*cs
)
157 unsigned num_rb
= MIN2(physical_device
->rad_info
.num_render_backends
, 16);
158 unsigned rb_mask
= physical_device
->rad_info
.enabled_rb_mask
;
159 unsigned raster_config
, raster_config_1
;
161 ac_get_raster_config(&physical_device
->rad_info
,
163 &raster_config_1
, NULL
);
165 /* Always use the default config when all backends are enabled
166 * (or when we failed to determine the enabled backends).
168 if (!rb_mask
|| util_bitcount(rb_mask
) >= num_rb
) {
169 radeon_set_context_reg(cs
, R_028350_PA_SC_RASTER_CONFIG
,
171 if (physical_device
->rad_info
.chip_class
>= GFX7
)
172 radeon_set_context_reg(cs
, R_028354_PA_SC_RASTER_CONFIG_1
,
175 si_write_harvested_raster_configs(physical_device
, cs
,
182 si_emit_graphics(struct radv_device
*device
,
183 struct radeon_cmdbuf
*cs
)
185 struct radv_physical_device
*physical_device
= device
->physical_device
;
187 bool has_clear_state
= physical_device
->rad_info
.has_clear_state
;
190 radeon_emit(cs
, PKT3(PKT3_CONTEXT_CONTROL
, 1, 0));
191 radeon_emit(cs
, CC0_UPDATE_LOAD_ENABLES(1));
192 radeon_emit(cs
, CC1_UPDATE_SHADOW_ENABLES(1));
194 if (has_clear_state
) {
195 radeon_emit(cs
, PKT3(PKT3_CLEAR_STATE
, 0, 0));
199 if (physical_device
->rad_info
.chip_class
<= GFX8
)
200 si_set_raster_config(physical_device
, cs
);
202 radeon_set_context_reg(cs
, R_028A18_VGT_HOS_MAX_TESS_LEVEL
, fui(64));
203 if (!has_clear_state
)
204 radeon_set_context_reg(cs
, R_028A1C_VGT_HOS_MIN_TESS_LEVEL
, fui(0));
206 /* FIXME calculate these values somehow ??? */
207 if (physical_device
->rad_info
.chip_class
<= GFX8
) {
208 radeon_set_context_reg(cs
, R_028A54_VGT_GS_PER_ES
, SI_GS_PER_ES
);
209 radeon_set_context_reg(cs
, R_028A58_VGT_ES_PER_GS
, 0x40);
212 if (!has_clear_state
) {
213 radeon_set_context_reg(cs
, R_028A5C_VGT_GS_PER_VS
, 0x2);
214 radeon_set_context_reg(cs
, R_028A8C_VGT_PRIMITIVEID_RESET
, 0x0);
215 radeon_set_context_reg(cs
, R_028B98_VGT_STRMOUT_BUFFER_CONFIG
, 0x0);
218 if (physical_device
->rad_info
.chip_class
<= GFX9
)
219 radeon_set_context_reg(cs
, R_028AA0_VGT_INSTANCE_STEP_RATE_0
, 1);
220 if (!has_clear_state
)
221 radeon_set_context_reg(cs
, R_028AB8_VGT_VTX_CNT_EN
, 0x0);
222 if (physical_device
->rad_info
.chip_class
< GFX7
)
223 radeon_set_config_reg(cs
, R_008A14_PA_CL_ENHANCE
, S_008A14_NUM_CLIP_SEQ(3) |
224 S_008A14_CLIP_VTX_REORDER_ENA(1));
226 if (!has_clear_state
)
227 radeon_set_context_reg(cs
, R_02882C_PA_SU_PRIM_FILTER_CNTL
, 0);
229 /* CLEAR_STATE doesn't clear these correctly on certain generations.
230 * I don't know why. Deduced by trial and error.
232 if (physical_device
->rad_info
.chip_class
<= GFX7
|| !has_clear_state
) {
233 radeon_set_context_reg(cs
, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET
, 0);
234 radeon_set_context_reg(cs
, R_028204_PA_SC_WINDOW_SCISSOR_TL
,
235 S_028204_WINDOW_OFFSET_DISABLE(1));
236 radeon_set_context_reg(cs
, R_028240_PA_SC_GENERIC_SCISSOR_TL
,
237 S_028240_WINDOW_OFFSET_DISABLE(1));
238 radeon_set_context_reg(cs
, R_028244_PA_SC_GENERIC_SCISSOR_BR
,
239 S_028244_BR_X(16384) | S_028244_BR_Y(16384));
240 radeon_set_context_reg(cs
, R_028030_PA_SC_SCREEN_SCISSOR_TL
, 0);
241 radeon_set_context_reg(cs
, R_028034_PA_SC_SCREEN_SCISSOR_BR
,
242 S_028034_BR_X(16384) | S_028034_BR_Y(16384));
245 if (!has_clear_state
) {
246 for (i
= 0; i
< 16; i
++) {
247 radeon_set_context_reg(cs
, R_0282D0_PA_SC_VPORT_ZMIN_0
+ i
*8, 0);
248 radeon_set_context_reg(cs
, R_0282D4_PA_SC_VPORT_ZMAX_0
+ i
*8, fui(1.0));
252 if (!has_clear_state
) {
253 radeon_set_context_reg(cs
, R_02820C_PA_SC_CLIPRECT_RULE
, 0xFFFF);
254 radeon_set_context_reg(cs
, R_028230_PA_SC_EDGERULE
, 0xAAAAAAAA);
255 /* PA_SU_HARDWARE_SCREEN_OFFSET must be 0 due to hw bug on GFX6 */
256 radeon_set_context_reg(cs
, R_028234_PA_SU_HARDWARE_SCREEN_OFFSET
, 0);
257 radeon_set_context_reg(cs
, R_028820_PA_CL_NANINF_CNTL
, 0);
258 radeon_set_context_reg(cs
, R_028AC0_DB_SRESULTS_COMPARE_STATE0
, 0x0);
259 radeon_set_context_reg(cs
, R_028AC4_DB_SRESULTS_COMPARE_STATE1
, 0x0);
260 radeon_set_context_reg(cs
, R_028AC8_DB_PRELOAD_CONTROL
, 0x0);
263 radeon_set_context_reg(cs
, R_02800C_DB_RENDER_OVERRIDE
,
264 S_02800C_FORCE_HIS_ENABLE0(V_02800C_FORCE_DISABLE
) |
265 S_02800C_FORCE_HIS_ENABLE1(V_02800C_FORCE_DISABLE
));
267 if (physical_device
->rad_info
.chip_class
>= GFX10
) {
268 radeon_set_context_reg(cs
, R_028A98_VGT_DRAW_PAYLOAD_CNTL
, 0);
269 radeon_set_uconfig_reg(cs
, R_030964_GE_MAX_VTX_INDX
, ~0);
270 radeon_set_uconfig_reg(cs
, R_030924_GE_MIN_VTX_INDX
, 0);
271 radeon_set_uconfig_reg(cs
, R_030928_GE_INDX_OFFSET
, 0);
272 radeon_set_uconfig_reg(cs
, R_03097C_GE_STEREO_CNTL
, 0);
273 radeon_set_uconfig_reg(cs
, R_030988_GE_USER_VGPR_EN
, 0);
274 } else if (physical_device
->rad_info
.chip_class
== GFX9
) {
275 radeon_set_uconfig_reg(cs
, R_030920_VGT_MAX_VTX_INDX
, ~0);
276 radeon_set_uconfig_reg(cs
, R_030924_VGT_MIN_VTX_INDX
, 0);
277 radeon_set_uconfig_reg(cs
, R_030928_VGT_INDX_OFFSET
, 0);
279 /* These registers, when written, also overwrite the
280 * CLEAR_STATE context, so we can't rely on CLEAR_STATE setting
281 * them. It would be an issue if there was another UMD
284 radeon_set_context_reg(cs
, R_028400_VGT_MAX_VTX_INDX
, ~0);
285 radeon_set_context_reg(cs
, R_028404_VGT_MIN_VTX_INDX
, 0);
286 radeon_set_context_reg(cs
, R_028408_VGT_INDX_OFFSET
, 0);
289 if (physical_device
->rad_info
.chip_class
>= GFX7
) {
290 if (physical_device
->rad_info
.chip_class
>= GFX10
) {
291 /* Logical CUs 16 - 31 */
292 radeon_set_sh_reg_idx(physical_device
, cs
, R_00B404_SPI_SHADER_PGM_RSRC4_HS
,
293 3, S_00B404_CU_EN(0xffff));
294 radeon_set_sh_reg_idx(physical_device
, cs
, R_00B104_SPI_SHADER_PGM_RSRC4_VS
,
295 3, S_00B104_CU_EN(0xffff));
296 radeon_set_sh_reg_idx(physical_device
, cs
, R_00B004_SPI_SHADER_PGM_RSRC4_PS
,
297 3, S_00B004_CU_EN(0xffff));
300 if (physical_device
->rad_info
.chip_class
>= GFX9
) {
301 radeon_set_sh_reg_idx(physical_device
, cs
, R_00B41C_SPI_SHADER_PGM_RSRC3_HS
,
302 3, S_00B41C_CU_EN(0xffff) | S_00B41C_WAVE_LIMIT(0x3F));
304 radeon_set_sh_reg(cs
, R_00B51C_SPI_SHADER_PGM_RSRC3_LS
,
305 S_00B51C_CU_EN(0xffff) | S_00B51C_WAVE_LIMIT(0x3F));
306 radeon_set_sh_reg(cs
, R_00B41C_SPI_SHADER_PGM_RSRC3_HS
,
307 S_00B41C_WAVE_LIMIT(0x3F));
308 radeon_set_sh_reg(cs
, R_00B31C_SPI_SHADER_PGM_RSRC3_ES
,
309 S_00B31C_CU_EN(0xffff) | S_00B31C_WAVE_LIMIT(0x3F));
310 /* If this is 0, Bonaire can hang even if GS isn't being used.
311 * Other chips are unaffected. These are suboptimal values,
312 * but we don't use on-chip GS.
314 radeon_set_context_reg(cs
, R_028A44_VGT_GS_ONCHIP_CNTL
,
315 S_028A44_ES_VERTS_PER_SUBGRP(64) |
316 S_028A44_GS_PRIMS_PER_SUBGRP(4));
319 /* Compute LATE_ALLOC_VS.LIMIT. */
320 unsigned num_cu_per_sh
= physical_device
->rad_info
.min_good_cu_per_sa
;
321 unsigned late_alloc_wave64
= 0; /* The limit is per SA. */
322 unsigned late_alloc_wave64_gs
= 0;
323 unsigned cu_mask_vs
= 0xffff;
324 unsigned cu_mask_gs
= 0xffff;
326 if (physical_device
->rad_info
.chip_class
>= GFX10
) {
327 /* For Wave32, the hw will launch twice the number of late
328 * alloc waves, so 1 == 2x wave32.
330 if (!physical_device
->rad_info
.use_late_alloc
) {
331 late_alloc_wave64
= 0;
332 } else if (num_cu_per_sh
<= 6) {
333 late_alloc_wave64
= num_cu_per_sh
- 2;
335 late_alloc_wave64
= (num_cu_per_sh
- 2) * 4;
337 /* CU2 & CU3 disabled because of the dual CU design */
339 cu_mask_gs
= 0xfff3; /* NGG only */
342 late_alloc_wave64_gs
= late_alloc_wave64
;
344 /* Don't use late alloc for NGG on Navi14 due to a hw
345 * bug. If NGG is never used, enable all CUs.
347 if (!physical_device
->use_ngg
||
348 physical_device
->rad_info
.family
== CHIP_NAVI14
) {
349 late_alloc_wave64_gs
= 0;
353 /* Limit LATE_ALLOC_GS for prevent a hang (hw bug). */
354 if (physical_device
->rad_info
.chip_class
== GFX10
)
355 late_alloc_wave64_gs
= MIN2(late_alloc_wave64_gs
, 64);
357 if (!physical_device
->rad_info
.use_late_alloc
) {
358 late_alloc_wave64
= 0;
359 } else if (num_cu_per_sh
<= 4) {
360 /* Too few available compute units per SA.
361 * Disallowing VS to run on one CU could hurt
362 * us more than late VS allocation would help.
364 * 2 is the highest safe number that allows us
365 * to keep all CUs enabled.
367 late_alloc_wave64
= 2;
369 /* This is a good initial value, allowing 1
370 * late_alloc wave per SIMD on num_cu - 2.
372 late_alloc_wave64
= (num_cu_per_sh
- 2) * 4;
375 if (late_alloc_wave64
> 2)
376 cu_mask_vs
= 0xfffe; /* 1 CU disabled */
379 radeon_set_sh_reg_idx(physical_device
, cs
, R_00B118_SPI_SHADER_PGM_RSRC3_VS
,
380 3, S_00B118_CU_EN(cu_mask_vs
) |
381 S_00B118_WAVE_LIMIT(0x3F));
382 radeon_set_sh_reg(cs
, R_00B11C_SPI_SHADER_LATE_ALLOC_VS
,
383 S_00B11C_LIMIT(late_alloc_wave64
));
385 radeon_set_sh_reg_idx(physical_device
, cs
, R_00B21C_SPI_SHADER_PGM_RSRC3_GS
,
386 3, S_00B21C_CU_EN(cu_mask_gs
) | S_00B21C_WAVE_LIMIT(0x3F));
388 if (physical_device
->rad_info
.chip_class
>= GFX10
) {
389 radeon_set_sh_reg_idx(physical_device
, cs
, R_00B204_SPI_SHADER_PGM_RSRC4_GS
,
390 3, S_00B204_CU_EN(0xffff) |
391 S_00B204_SPI_SHADER_LATE_ALLOC_GS_GFX10(late_alloc_wave64_gs
));
394 radeon_set_sh_reg_idx(physical_device
, cs
, R_00B01C_SPI_SHADER_PGM_RSRC3_PS
,
395 3, S_00B01C_CU_EN(0xffff) | S_00B01C_WAVE_LIMIT(0x3F));
398 if (physical_device
->rad_info
.chip_class
>= GFX10
) {
399 /* Break up a pixel wave if it contains deallocs for more than
400 * half the parameter cache.
402 * To avoid a deadlock where pixel waves aren't launched
403 * because they're waiting for more pixels while the frontend
404 * is stuck waiting for PC space, the maximum allowed value is
405 * the size of the PC minus the largest possible allocation for
406 * a single primitive shader subgroup.
408 radeon_set_context_reg(cs
, R_028C50_PA_SC_NGG_MODE_CNTL
,
409 S_028C50_MAX_DEALLOCS_IN_WAVE(512));
410 radeon_set_context_reg(cs
, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL
, 14);
412 /* Enable CMASK/FMASK/HTILE/DCC caching in L2 for small chips. */
413 unsigned meta_write_policy
, meta_read_policy
;
415 /* TODO: investigate whether LRU improves performance on other chips too */
416 if (physical_device
->rad_info
.num_render_backends
<= 4) {
417 meta_write_policy
= V_02807C_CACHE_LRU_WR
; /* cache writes */
418 meta_read_policy
= V_02807C_CACHE_LRU_RD
; /* cache reads */
420 meta_write_policy
= V_02807C_CACHE_STREAM_WR
; /* write combine */
421 meta_read_policy
= V_02807C_CACHE_NOA_RD
; /* don't cache reads */
424 radeon_set_context_reg(cs
, R_02807C_DB_RMI_L2_CACHE_CONTROL
,
425 S_02807C_Z_WR_POLICY(V_02807C_CACHE_STREAM_WR
) |
426 S_02807C_S_WR_POLICY(V_02807C_CACHE_STREAM_WR
) |
427 S_02807C_HTILE_WR_POLICY(meta_write_policy
) |
428 S_02807C_ZPCPSD_WR_POLICY(V_02807C_CACHE_STREAM_WR
) |
429 S_02807C_Z_RD_POLICY(V_02807C_CACHE_NOA_RD
) |
430 S_02807C_S_RD_POLICY(V_02807C_CACHE_NOA_RD
) |
431 S_02807C_HTILE_RD_POLICY(meta_read_policy
));
433 radeon_set_context_reg(cs
, R_028410_CB_RMI_GL2_CACHE_CONTROL
,
434 S_028410_CMASK_WR_POLICY(meta_write_policy
) |
435 S_028410_FMASK_WR_POLICY(meta_write_policy
) |
436 S_028410_DCC_WR_POLICY(meta_write_policy
) |
437 S_028410_COLOR_WR_POLICY(V_028410_CACHE_STREAM_WR
) |
438 S_028410_CMASK_RD_POLICY(meta_read_policy
) |
439 S_028410_FMASK_RD_POLICY(meta_read_policy
) |
440 S_028410_DCC_RD_POLICY(meta_read_policy
) |
441 S_028410_COLOR_RD_POLICY(V_028410_CACHE_NOA_RD
));
442 radeon_set_context_reg(cs
, R_028428_CB_COVERAGE_OUT_CONTROL
, 0);
444 radeon_set_sh_reg(cs
, R_00B0C8_SPI_SHADER_USER_ACCUM_PS_0
, 0);
445 radeon_set_sh_reg(cs
, R_00B0CC_SPI_SHADER_USER_ACCUM_PS_1
, 0);
446 radeon_set_sh_reg(cs
, R_00B0D0_SPI_SHADER_USER_ACCUM_PS_2
, 0);
447 radeon_set_sh_reg(cs
, R_00B0D4_SPI_SHADER_USER_ACCUM_PS_3
, 0);
448 radeon_set_sh_reg(cs
, R_00B1C8_SPI_SHADER_USER_ACCUM_VS_0
, 0);
449 radeon_set_sh_reg(cs
, R_00B1CC_SPI_SHADER_USER_ACCUM_VS_1
, 0);
450 radeon_set_sh_reg(cs
, R_00B1D0_SPI_SHADER_USER_ACCUM_VS_2
, 0);
451 radeon_set_sh_reg(cs
, R_00B1D4_SPI_SHADER_USER_ACCUM_VS_3
, 0);
452 radeon_set_sh_reg(cs
, R_00B2C8_SPI_SHADER_USER_ACCUM_ESGS_0
, 0);
453 radeon_set_sh_reg(cs
, R_00B2CC_SPI_SHADER_USER_ACCUM_ESGS_1
, 0);
454 radeon_set_sh_reg(cs
, R_00B2D0_SPI_SHADER_USER_ACCUM_ESGS_2
, 0);
455 radeon_set_sh_reg(cs
, R_00B2D4_SPI_SHADER_USER_ACCUM_ESGS_3
, 0);
456 radeon_set_sh_reg(cs
, R_00B4C8_SPI_SHADER_USER_ACCUM_LSHS_0
, 0);
457 radeon_set_sh_reg(cs
, R_00B4CC_SPI_SHADER_USER_ACCUM_LSHS_1
, 0);
458 radeon_set_sh_reg(cs
, R_00B4D0_SPI_SHADER_USER_ACCUM_LSHS_2
, 0);
459 radeon_set_sh_reg(cs
, R_00B4D4_SPI_SHADER_USER_ACCUM_LSHS_3
, 0);
461 radeon_set_sh_reg(cs
, R_00B0C0_SPI_SHADER_REQ_CTRL_PS
,
462 S_00B0C0_SOFT_GROUPING_EN(1) |
463 S_00B0C0_NUMBER_OF_REQUESTS_PER_CU(4 - 1));
464 radeon_set_sh_reg(cs
, R_00B1C0_SPI_SHADER_REQ_CTRL_VS
, 0);
466 if (physical_device
->rad_info
.chip_class
>= GFX10_3
) {
467 radeon_set_context_reg(cs
, R_028750_SX_PS_DOWNCONVERT_CONTROL_GFX103
, 0xff);
468 radeon_set_context_reg(cs
, 0x28848, 1 << 9); /* This fixes sample shading. */
471 if (physical_device
->rad_info
.chip_class
== GFX10
) {
472 /* SQ_NON_EVENT must be emitted before GE_PC_ALLOC is written. */
473 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
474 radeon_emit(cs
, EVENT_TYPE(V_028A90_SQ_NON_EVENT
) | EVENT_INDEX(0));
477 /* TODO: For culling, replace 128 with 256. */
478 radeon_set_uconfig_reg(cs
, R_030980_GE_PC_ALLOC
,
479 S_030980_OVERSUB_EN(physical_device
->rad_info
.use_late_alloc
) |
480 S_030980_NUM_PC_LINES(128 * physical_device
->rad_info
.max_se
- 1));
483 if (physical_device
->rad_info
.chip_class
>= GFX9
) {
484 radeon_set_context_reg(cs
, R_028B50_VGT_TESS_DISTRIBUTION
,
485 S_028B50_ACCUM_ISOLINE(40) |
486 S_028B50_ACCUM_TRI(30) |
487 S_028B50_ACCUM_QUAD(24) |
488 S_028B50_DONUT_SPLIT(24) |
489 S_028B50_TRAP_SPLIT(6));
490 } else if (physical_device
->rad_info
.chip_class
>= GFX8
) {
491 uint32_t vgt_tess_distribution
;
493 vgt_tess_distribution
= S_028B50_ACCUM_ISOLINE(32) |
494 S_028B50_ACCUM_TRI(11) |
495 S_028B50_ACCUM_QUAD(11) |
496 S_028B50_DONUT_SPLIT(16);
498 if (physical_device
->rad_info
.family
== CHIP_FIJI
||
499 physical_device
->rad_info
.family
>= CHIP_POLARIS10
)
500 vgt_tess_distribution
|= S_028B50_TRAP_SPLIT(3);
502 radeon_set_context_reg(cs
, R_028B50_VGT_TESS_DISTRIBUTION
,
503 vgt_tess_distribution
);
504 } else if (!has_clear_state
) {
505 radeon_set_context_reg(cs
, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL
, 14);
506 radeon_set_context_reg(cs
, R_028C5C_VGT_OUT_DEALLOC_CNTL
, 16);
509 if (device
->border_color_data
.bo
) {
510 uint64_t border_color_va
= radv_buffer_get_va(device
->border_color_data
.bo
);
512 radeon_set_context_reg(cs
, R_028080_TA_BC_BASE_ADDR
, border_color_va
>> 8);
513 if (physical_device
->rad_info
.chip_class
>= GFX7
) {
514 radeon_set_context_reg(cs
, R_028084_TA_BC_BASE_ADDR_HI
,
515 S_028084_ADDRESS(border_color_va
>> 40));
519 if (physical_device
->rad_info
.chip_class
>= GFX9
) {
520 radeon_set_context_reg(cs
, R_028C48_PA_SC_BINNER_CNTL_1
,
521 S_028C48_MAX_ALLOC_COUNT(physical_device
->rad_info
.pbb_max_alloc_count
- 1) |
522 S_028C48_MAX_PRIM_PER_BATCH(1023));
523 radeon_set_context_reg(cs
, R_028C4C_PA_SC_CONSERVATIVE_RASTERIZATION_CNTL
,
524 S_028C4C_NULL_SQUAD_AA_MASK_ENABLE(1));
525 radeon_set_uconfig_reg(cs
, R_030968_VGT_INSTANCE_BASE_ID
, 0);
528 unsigned tmp
= (unsigned)(1.0 * 8.0);
529 radeon_set_context_reg_seq(cs
, R_028A00_PA_SU_POINT_SIZE
, 1);
530 radeon_emit(cs
, S_028A00_HEIGHT(tmp
) | S_028A00_WIDTH(tmp
));
531 radeon_set_context_reg_seq(cs
, R_028A04_PA_SU_POINT_MINMAX
, 1);
532 radeon_emit(cs
, S_028A04_MIN_SIZE(radv_pack_float_12p4(0)) |
533 S_028A04_MAX_SIZE(radv_pack_float_12p4(8191.875/2)));
535 if (!has_clear_state
) {
536 radeon_set_context_reg(cs
, R_028004_DB_COUNT_CONTROL
,
537 S_028004_ZPASS_INCREMENT_DISABLE(1));
540 /* Enable the Polaris small primitive filter control.
541 * XXX: There is possibly an issue when MSAA is off (see RadeonSI
542 * has_msaa_sample_loc_bug). But this doesn't seem to regress anything,
543 * and AMDVLK doesn't have a workaround as well.
545 if (physical_device
->rad_info
.family
>= CHIP_POLARIS10
) {
546 unsigned small_prim_filter_cntl
=
547 S_028830_SMALL_PRIM_FILTER_ENABLE(1) |
548 /* Workaround for a hw line bug. */
549 S_028830_LINE_FILTER_DISABLE(physical_device
->rad_info
.family
<= CHIP_POLARIS12
);
551 radeon_set_context_reg(cs
, R_028830_PA_SU_SMALL_PRIM_FILTER_CNTL
,
552 small_prim_filter_cntl
);
555 radeon_set_context_reg(cs
, R_0286D4_SPI_INTERP_CONTROL_0
,
556 S_0286D4_FLAT_SHADE_ENA(1) |
557 S_0286D4_PNT_SPRITE_ENA(1) |
558 S_0286D4_PNT_SPRITE_OVRD_X(V_0286D4_SPI_PNT_SPRITE_SEL_S
) |
559 S_0286D4_PNT_SPRITE_OVRD_Y(V_0286D4_SPI_PNT_SPRITE_SEL_T
) |
560 S_0286D4_PNT_SPRITE_OVRD_Z(V_0286D4_SPI_PNT_SPRITE_SEL_0
) |
561 S_0286D4_PNT_SPRITE_OVRD_W(V_0286D4_SPI_PNT_SPRITE_SEL_1
) |
562 S_0286D4_PNT_SPRITE_TOP_1(0)); /* vulkan is top to bottom - 1.0 at bottom */
564 radeon_set_context_reg(cs
, R_028BE4_PA_SU_VTX_CNTL
,
565 S_028BE4_PIX_CENTER(1) |
566 S_028BE4_ROUND_MODE(V_028BE4_X_ROUND_TO_EVEN
) |
567 S_028BE4_QUANT_MODE(V_028BE4_X_16_8_FIXED_POINT_1_256TH
));
569 radeon_set_context_reg(cs
, R_028818_PA_CL_VTE_CNTL
,
570 S_028818_VTX_W0_FMT(1) |
571 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
572 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
573 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
575 si_emit_compute(device
, cs
);
579 cik_create_gfx_config(struct radv_device
*device
)
581 struct radeon_cmdbuf
*cs
= device
->ws
->cs_create(device
->ws
, RING_GFX
);
585 si_emit_graphics(device
, cs
);
587 while (cs
->cdw
& 7) {
588 if (device
->physical_device
->rad_info
.gfx_ib_pad_with_type2
)
589 radeon_emit(cs
, PKT2_NOP_PAD
);
591 radeon_emit(cs
, PKT3_NOP_PAD
);
594 device
->gfx_init
= device
->ws
->buffer_create(device
->ws
,
597 RADEON_FLAG_CPU_ACCESS
|
598 RADEON_FLAG_NO_INTERPROCESS_SHARING
|
599 RADEON_FLAG_READ_ONLY
|
601 RADV_BO_PRIORITY_CS
);
602 if (!device
->gfx_init
)
605 void *map
= device
->ws
->buffer_map(device
->gfx_init
);
607 device
->ws
->buffer_destroy(device
->gfx_init
);
608 device
->gfx_init
= NULL
;
611 memcpy(map
, cs
->buf
, cs
->cdw
* 4);
613 device
->ws
->buffer_unmap(device
->gfx_init
);
614 device
->gfx_init_size_dw
= cs
->cdw
;
616 device
->ws
->cs_destroy(cs
);
620 get_viewport_xform(const VkViewport
*viewport
,
621 float scale
[3], float translate
[3])
623 float x
= viewport
->x
;
624 float y
= viewport
->y
;
625 float half_width
= 0.5f
* viewport
->width
;
626 float half_height
= 0.5f
* viewport
->height
;
627 double n
= viewport
->minDepth
;
628 double f
= viewport
->maxDepth
;
630 scale
[0] = half_width
;
631 translate
[0] = half_width
+ x
;
632 scale
[1] = half_height
;
633 translate
[1] = half_height
+ y
;
640 si_write_viewport(struct radeon_cmdbuf
*cs
, int first_vp
,
641 int count
, const VkViewport
*viewports
)
646 radeon_set_context_reg_seq(cs
, R_02843C_PA_CL_VPORT_XSCALE
+
647 first_vp
* 4 * 6, count
* 6);
649 for (i
= 0; i
< count
; i
++) {
650 float scale
[3], translate
[3];
653 get_viewport_xform(&viewports
[i
], scale
, translate
);
654 radeon_emit(cs
, fui(scale
[0]));
655 radeon_emit(cs
, fui(translate
[0]));
656 radeon_emit(cs
, fui(scale
[1]));
657 radeon_emit(cs
, fui(translate
[1]));
658 radeon_emit(cs
, fui(scale
[2]));
659 radeon_emit(cs
, fui(translate
[2]));
662 radeon_set_context_reg_seq(cs
, R_0282D0_PA_SC_VPORT_ZMIN_0
+
663 first_vp
* 4 * 2, count
* 2);
664 for (i
= 0; i
< count
; i
++) {
665 float zmin
= MIN2(viewports
[i
].minDepth
, viewports
[i
].maxDepth
);
666 float zmax
= MAX2(viewports
[i
].minDepth
, viewports
[i
].maxDepth
);
667 radeon_emit(cs
, fui(zmin
));
668 radeon_emit(cs
, fui(zmax
));
672 static VkRect2D
si_scissor_from_viewport(const VkViewport
*viewport
)
674 float scale
[3], translate
[3];
677 get_viewport_xform(viewport
, scale
, translate
);
679 rect
.offset
.x
= translate
[0] - fabsf(scale
[0]);
680 rect
.offset
.y
= translate
[1] - fabsf(scale
[1]);
681 rect
.extent
.width
= ceilf(translate
[0] + fabsf(scale
[0])) - rect
.offset
.x
;
682 rect
.extent
.height
= ceilf(translate
[1] + fabsf(scale
[1])) - rect
.offset
.y
;
687 static VkRect2D
si_intersect_scissor(const VkRect2D
*a
, const VkRect2D
*b
) {
689 ret
.offset
.x
= MAX2(a
->offset
.x
, b
->offset
.x
);
690 ret
.offset
.y
= MAX2(a
->offset
.y
, b
->offset
.y
);
691 ret
.extent
.width
= MIN2(a
->offset
.x
+ a
->extent
.width
,
692 b
->offset
.x
+ b
->extent
.width
) - ret
.offset
.x
;
693 ret
.extent
.height
= MIN2(a
->offset
.y
+ a
->extent
.height
,
694 b
->offset
.y
+ b
->extent
.height
) - ret
.offset
.y
;
699 si_write_scissors(struct radeon_cmdbuf
*cs
, int first
,
700 int count
, const VkRect2D
*scissors
,
701 const VkViewport
*viewports
, bool can_use_guardband
)
704 float scale
[3], translate
[3], guardband_x
= INFINITY
, guardband_y
= INFINITY
;
705 const float max_range
= 32767.0f
;
709 radeon_set_context_reg_seq(cs
, R_028250_PA_SC_VPORT_SCISSOR_0_TL
+ first
* 4 * 2, count
* 2);
710 for (i
= 0; i
< count
; i
++) {
711 VkRect2D viewport_scissor
= si_scissor_from_viewport(viewports
+ i
);
712 VkRect2D scissor
= si_intersect_scissor(&scissors
[i
], &viewport_scissor
);
714 get_viewport_xform(viewports
+ i
, scale
, translate
);
715 scale
[0] = fabsf(scale
[0]);
716 scale
[1] = fabsf(scale
[1]);
723 guardband_x
= MIN2(guardband_x
, (max_range
- fabsf(translate
[0])) / scale
[0]);
724 guardband_y
= MIN2(guardband_y
, (max_range
- fabsf(translate
[1])) / scale
[1]);
726 radeon_emit(cs
, S_028250_TL_X(scissor
.offset
.x
) |
727 S_028250_TL_Y(scissor
.offset
.y
) |
728 S_028250_WINDOW_OFFSET_DISABLE(1));
729 radeon_emit(cs
, S_028254_BR_X(scissor
.offset
.x
+ scissor
.extent
.width
) |
730 S_028254_BR_Y(scissor
.offset
.y
+ scissor
.extent
.height
));
732 if (!can_use_guardband
) {
737 radeon_set_context_reg_seq(cs
, R_028BE8_PA_CL_GB_VERT_CLIP_ADJ
, 4);
738 radeon_emit(cs
, fui(guardband_y
));
739 radeon_emit(cs
, fui(1.0));
740 radeon_emit(cs
, fui(guardband_x
));
741 radeon_emit(cs
, fui(1.0));
744 static inline unsigned
745 radv_prims_for_vertices(struct radv_prim_vertex_count
*info
, unsigned num
)
756 return 1 + ((num
- info
->min
) / info
->incr
);
759 static const struct radv_prim_vertex_count prim_size_table
[] = {
760 [V_008958_DI_PT_NONE
] = {0, 0},
761 [V_008958_DI_PT_POINTLIST
] = {1, 1},
762 [V_008958_DI_PT_LINELIST
] = {2, 2},
763 [V_008958_DI_PT_LINESTRIP
] = {2, 1},
764 [V_008958_DI_PT_TRILIST
] = {3, 3},
765 [V_008958_DI_PT_TRIFAN
] = {3, 1},
766 [V_008958_DI_PT_TRISTRIP
] = {3, 1},
767 [V_008958_DI_PT_LINELIST_ADJ
] = {4, 4},
768 [V_008958_DI_PT_LINESTRIP_ADJ
] = {4, 1},
769 [V_008958_DI_PT_TRILIST_ADJ
] = {6, 6},
770 [V_008958_DI_PT_TRISTRIP_ADJ
] = {6, 2},
771 [V_008958_DI_PT_RECTLIST
] = {3, 3},
772 [V_008958_DI_PT_LINELOOP
] = {2, 1},
773 [V_008958_DI_PT_POLYGON
] = {3, 1},
774 [V_008958_DI_PT_2D_TRI_STRIP
] = {0, 0},
778 si_get_ia_multi_vgt_param(struct radv_cmd_buffer
*cmd_buffer
,
779 bool instanced_draw
, bool indirect_draw
,
780 bool count_from_stream_output
,
781 uint32_t draw_vertex_count
,
784 enum chip_class chip_class
= cmd_buffer
->device
->physical_device
->rad_info
.chip_class
;
785 enum radeon_family family
= cmd_buffer
->device
->physical_device
->rad_info
.family
;
786 struct radeon_info
*info
= &cmd_buffer
->device
->physical_device
->rad_info
;
787 const unsigned max_primgroup_in_wave
= 2;
788 /* SWITCH_ON_EOP(0) is always preferable. */
789 bool wd_switch_on_eop
= false;
790 bool ia_switch_on_eop
= false;
791 bool ia_switch_on_eoi
= false;
792 bool partial_vs_wave
= false;
793 bool partial_es_wave
= cmd_buffer
->state
.pipeline
->graphics
.ia_multi_vgt_param
.partial_es_wave
;
794 bool multi_instances_smaller_than_primgroup
;
795 struct radv_prim_vertex_count prim_vertex_count
= prim_size_table
[topology
];
797 if (radv_pipeline_has_tess(cmd_buffer
->state
.pipeline
)) {
798 if (topology
== V_008958_DI_PT_PATCH
) {
799 prim_vertex_count
.min
= cmd_buffer
->state
.pipeline
->graphics
.tess_patch_control_points
;
800 prim_vertex_count
.incr
= 1;
804 multi_instances_smaller_than_primgroup
= indirect_draw
;
805 if (!multi_instances_smaller_than_primgroup
&& instanced_draw
) {
806 uint32_t num_prims
= radv_prims_for_vertices(&prim_vertex_count
, draw_vertex_count
);
807 if (num_prims
< cmd_buffer
->state
.pipeline
->graphics
.ia_multi_vgt_param
.primgroup_size
)
808 multi_instances_smaller_than_primgroup
= true;
811 ia_switch_on_eoi
= cmd_buffer
->state
.pipeline
->graphics
.ia_multi_vgt_param
.ia_switch_on_eoi
;
812 partial_vs_wave
= cmd_buffer
->state
.pipeline
->graphics
.ia_multi_vgt_param
.partial_vs_wave
;
814 if (chip_class
>= GFX7
) {
815 /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
816 * 4 shader engines. Set 1 to pass the assertion below.
817 * The other cases are hardware requirements. */
818 if (cmd_buffer
->device
->physical_device
->rad_info
.max_se
< 4 ||
819 topology
== V_008958_DI_PT_POLYGON
||
820 topology
== V_008958_DI_PT_LINELOOP
||
821 topology
== V_008958_DI_PT_TRIFAN
||
822 topology
== V_008958_DI_PT_TRISTRIP_ADJ
||
823 (cmd_buffer
->state
.pipeline
->graphics
.prim_restart_enable
&&
824 (cmd_buffer
->device
->physical_device
->rad_info
.family
< CHIP_POLARIS10
||
825 (topology
!= V_008958_DI_PT_POINTLIST
&&
826 topology
!= V_008958_DI_PT_LINESTRIP
))))
827 wd_switch_on_eop
= true;
829 /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
830 * We don't know that for indirect drawing, so treat it as
831 * always problematic. */
832 if (family
== CHIP_HAWAII
&&
833 (instanced_draw
|| indirect_draw
))
834 wd_switch_on_eop
= true;
836 /* Performance recommendation for 4 SE Gfx7-8 parts if
837 * instances are smaller than a primgroup.
838 * Assume indirect draws always use small instances.
839 * This is needed for good VS wave utilization.
841 if (chip_class
<= GFX8
&&
843 multi_instances_smaller_than_primgroup
)
844 wd_switch_on_eop
= true;
846 /* Required on GFX7 and later. */
847 if (info
->max_se
> 2 && !wd_switch_on_eop
)
848 ia_switch_on_eoi
= true;
850 /* Required by Hawaii and, for some special cases, by GFX8. */
851 if (ia_switch_on_eoi
&&
852 (family
== CHIP_HAWAII
||
853 (chip_class
== GFX8
&&
854 /* max primgroup in wave is always 2 - leave this for documentation */
855 (radv_pipeline_has_gs(cmd_buffer
->state
.pipeline
) || max_primgroup_in_wave
!= 2))))
856 partial_vs_wave
= true;
858 /* Instancing bug on Bonaire. */
859 if (family
== CHIP_BONAIRE
&& ia_switch_on_eoi
&&
860 (instanced_draw
|| indirect_draw
))
861 partial_vs_wave
= true;
863 /* Hardware requirement when drawing primitives from a stream
866 if (count_from_stream_output
)
867 wd_switch_on_eop
= true;
869 /* If the WD switch is false, the IA switch must be false too. */
870 assert(wd_switch_on_eop
|| !ia_switch_on_eop
);
872 /* If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
873 if (chip_class
<= GFX8
&& ia_switch_on_eoi
)
874 partial_es_wave
= true;
876 if (radv_pipeline_has_gs(cmd_buffer
->state
.pipeline
)) {
877 /* GS hw bug with single-primitive instances and SWITCH_ON_EOI.
878 * The hw doc says all multi-SE chips are affected, but amdgpu-pro Vulkan
879 * only applies it to Hawaii. Do what amdgpu-pro Vulkan does.
881 if (family
== CHIP_HAWAII
&& ia_switch_on_eoi
) {
882 bool set_vgt_flush
= indirect_draw
;
883 if (!set_vgt_flush
&& instanced_draw
) {
884 uint32_t num_prims
= radv_prims_for_vertices(&prim_vertex_count
, draw_vertex_count
);
886 set_vgt_flush
= true;
889 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLAG_VGT_FLUSH
;
893 /* Workaround for a VGT hang when strip primitive types are used with
896 if (cmd_buffer
->state
.pipeline
->graphics
.prim_restart_enable
&&
897 (topology
== V_008958_DI_PT_LINESTRIP
||
898 topology
== V_008958_DI_PT_TRISTRIP
||
899 topology
== V_008958_DI_PT_LINESTRIP_ADJ
||
900 topology
== V_008958_DI_PT_TRISTRIP_ADJ
)) {
901 partial_vs_wave
= true;
904 return cmd_buffer
->state
.pipeline
->graphics
.ia_multi_vgt_param
.base
|
905 S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop
) |
906 S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi
) |
907 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave
) |
908 S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave
) |
909 S_028AA8_WD_SWITCH_ON_EOP(chip_class
>= GFX7
? wd_switch_on_eop
: 0);
913 void si_cs_emit_write_event_eop(struct radeon_cmdbuf
*cs
,
914 enum chip_class chip_class
,
916 unsigned event
, unsigned event_flags
,
917 unsigned dst_sel
, unsigned data_sel
,
920 uint64_t gfx9_eop_bug_va
)
922 unsigned op
= EVENT_TYPE(event
) |
923 EVENT_INDEX(event
== V_028A90_CS_DONE
||
924 event
== V_028A90_PS_DONE
? 6 : 5) |
926 unsigned is_gfx8_mec
= is_mec
&& chip_class
< GFX9
;
927 unsigned sel
= EOP_DST_SEL(dst_sel
) |
928 EOP_DATA_SEL(data_sel
);
930 /* Wait for write confirmation before writing data, but don't send
932 if (data_sel
!= EOP_DATA_SEL_DISCARD
)
933 sel
|= EOP_INT_SEL(EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM
);
935 if (chip_class
>= GFX9
|| is_gfx8_mec
) {
936 /* A ZPASS_DONE or PIXEL_STAT_DUMP_EVENT (of the DB occlusion
937 * counters) must immediately precede every timestamp event to
938 * prevent a GPU hang on GFX9.
940 if (chip_class
== GFX9
&& !is_mec
) {
941 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
942 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
943 radeon_emit(cs
, gfx9_eop_bug_va
);
944 radeon_emit(cs
, gfx9_eop_bug_va
>> 32);
947 radeon_emit(cs
, PKT3(PKT3_RELEASE_MEM
, is_gfx8_mec
? 5 : 6, false));
949 radeon_emit(cs
, sel
);
950 radeon_emit(cs
, va
); /* address lo */
951 radeon_emit(cs
, va
>> 32); /* address hi */
952 radeon_emit(cs
, new_fence
); /* immediate data lo */
953 radeon_emit(cs
, 0); /* immediate data hi */
955 radeon_emit(cs
, 0); /* unused */
957 if (chip_class
== GFX7
||
958 chip_class
== GFX8
) {
959 /* Two EOP events are required to make all engines go idle
960 * (and optional cache flushes executed) before the timestamp
963 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, false));
966 radeon_emit(cs
, ((va
>> 32) & 0xffff) | sel
);
967 radeon_emit(cs
, 0); /* immediate data */
968 radeon_emit(cs
, 0); /* unused */
971 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, false));
974 radeon_emit(cs
, ((va
>> 32) & 0xffff) | sel
);
975 radeon_emit(cs
, new_fence
); /* immediate data */
976 radeon_emit(cs
, 0); /* unused */
981 radv_cp_wait_mem(struct radeon_cmdbuf
*cs
, uint32_t op
, uint64_t va
,
982 uint32_t ref
, uint32_t mask
)
984 assert(op
== WAIT_REG_MEM_EQUAL
||
985 op
== WAIT_REG_MEM_NOT_EQUAL
||
986 op
== WAIT_REG_MEM_GREATER_OR_EQUAL
);
988 radeon_emit(cs
, PKT3(PKT3_WAIT_REG_MEM
, 5, false));
989 radeon_emit(cs
, op
| WAIT_REG_MEM_MEM_SPACE(1));
991 radeon_emit(cs
, va
>> 32);
992 radeon_emit(cs
, ref
); /* reference value */
993 radeon_emit(cs
, mask
); /* mask */
994 radeon_emit(cs
, 4); /* poll interval */
998 si_emit_acquire_mem(struct radeon_cmdbuf
*cs
,
1001 unsigned cp_coher_cntl
)
1003 if (is_mec
|| is_gfx9
) {
1004 uint32_t hi_val
= is_gfx9
? 0xffffff : 0xff;
1005 radeon_emit(cs
, PKT3(PKT3_ACQUIRE_MEM
, 5, false) |
1006 PKT3_SHADER_TYPE_S(is_mec
));
1007 radeon_emit(cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
1008 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
1009 radeon_emit(cs
, hi_val
); /* CP_COHER_SIZE_HI */
1010 radeon_emit(cs
, 0); /* CP_COHER_BASE */
1011 radeon_emit(cs
, 0); /* CP_COHER_BASE_HI */
1012 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
1014 /* ACQUIRE_MEM is only required on a compute ring. */
1015 radeon_emit(cs
, PKT3(PKT3_SURFACE_SYNC
, 3, false));
1016 radeon_emit(cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
1017 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
1018 radeon_emit(cs
, 0); /* CP_COHER_BASE */
1019 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
1024 gfx10_cs_emit_cache_flush(struct radeon_cmdbuf
*cs
,
1025 enum chip_class chip_class
,
1026 uint32_t *flush_cnt
,
1029 enum radv_cmd_flush_bits flush_bits
,
1030 uint64_t gfx9_eop_bug_va
)
1032 uint32_t gcr_cntl
= 0;
1033 unsigned cb_db_event
= 0;
1035 /* We don't need these. */
1036 assert(!(flush_bits
& (RADV_CMD_FLAG_VGT_STREAMOUT_SYNC
)));
1038 if (flush_bits
& RADV_CMD_FLAG_INV_ICACHE
)
1039 gcr_cntl
|= S_586_GLI_INV(V_586_GLI_ALL
);
1040 if (flush_bits
& RADV_CMD_FLAG_INV_SCACHE
) {
1041 /* TODO: When writing to the SMEM L1 cache, we need to set SEQ
1042 * to FORWARD when both L1 and L2 are written out (WB or INV).
1044 gcr_cntl
|= S_586_GL1_INV(1) | S_586_GLK_INV(1);
1046 if (flush_bits
& RADV_CMD_FLAG_INV_VCACHE
)
1047 gcr_cntl
|= S_586_GL1_INV(1) | S_586_GLV_INV(1);
1048 if (flush_bits
& RADV_CMD_FLAG_INV_L2
) {
1049 /* Writeback and invalidate everything in L2. */
1050 gcr_cntl
|= S_586_GL2_INV(1) | S_586_GL2_WB(1) |
1051 S_586_GLM_INV(1) | S_586_GLM_WB(1);
1052 } else if (flush_bits
& RADV_CMD_FLAG_WB_L2
) {
1053 /* Writeback but do not invalidate.
1054 * GLM doesn't support WB alone. If WB is set, INV must be set too.
1056 gcr_cntl
|= S_586_GL2_WB(1) |
1057 S_586_GLM_WB(1) | S_586_GLM_INV(1);
1060 /* TODO: Implement this new flag for GFX9+.
1061 else if (flush_bits & RADV_CMD_FLAG_INV_L2_METADATA)
1062 gcr_cntl |= S_586_GLM_INV(1) | S_586_GLM_WB(1);
1065 if (flush_bits
& (RADV_CMD_FLAG_FLUSH_AND_INV_CB
| RADV_CMD_FLAG_FLUSH_AND_INV_DB
)) {
1066 /* TODO: trigger on RADV_CMD_FLAG_FLUSH_AND_INV_CB_META */
1067 if (flush_bits
& RADV_CMD_FLAG_FLUSH_AND_INV_CB
) {
1068 /* Flush CMASK/FMASK/DCC. Will wait for idle later. */
1069 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1070 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META
) |
1074 /* TODO: trigger on RADV_CMD_FLAG_FLUSH_AND_INV_DB_META ? */
1075 if (flush_bits
& RADV_CMD_FLAG_FLUSH_AND_INV_DB
) {
1076 /* Flush HTILE. Will wait for idle later. */
1077 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1078 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META
) |
1082 /* First flush CB/DB, then L1/L2. */
1083 gcr_cntl
|= S_586_SEQ(V_586_SEQ_FORWARD
);
1085 if ((flush_bits
& (RADV_CMD_FLAG_FLUSH_AND_INV_CB
| RADV_CMD_FLAG_FLUSH_AND_INV_DB
)) ==
1086 (RADV_CMD_FLAG_FLUSH_AND_INV_CB
| RADV_CMD_FLAG_FLUSH_AND_INV_DB
)) {
1087 cb_db_event
= V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT
;
1088 } else if (flush_bits
& RADV_CMD_FLAG_FLUSH_AND_INV_CB
) {
1089 cb_db_event
= V_028A90_FLUSH_AND_INV_CB_DATA_TS
;
1090 } else if (flush_bits
& RADV_CMD_FLAG_FLUSH_AND_INV_DB
) {
1091 cb_db_event
= V_028A90_FLUSH_AND_INV_DB_DATA_TS
;
1096 /* Wait for graphics shaders to go idle if requested. */
1097 if (flush_bits
& RADV_CMD_FLAG_PS_PARTIAL_FLUSH
) {
1098 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1099 radeon_emit(cs
, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
1100 } else if (flush_bits
& RADV_CMD_FLAG_VS_PARTIAL_FLUSH
) {
1101 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1102 radeon_emit(cs
, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
1106 if (flush_bits
& RADV_CMD_FLAG_CS_PARTIAL_FLUSH
) {
1107 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1108 radeon_emit(cs
, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH
| EVENT_INDEX(4)));
1112 /* CB/DB flush and invalidate (or possibly just a wait for a
1113 * meta flush) via RELEASE_MEM.
1115 * Combine this with other cache flushes when possible; this
1116 * requires affected shaders to be idle, so do it after the
1117 * CS_PARTIAL_FLUSH before (VS/PS partial flushes are always
1120 /* Get GCR_CNTL fields, because the encoding is different in RELEASE_MEM. */
1121 unsigned glm_wb
= G_586_GLM_WB(gcr_cntl
);
1122 unsigned glm_inv
= G_586_GLM_INV(gcr_cntl
);
1123 unsigned glv_inv
= G_586_GLV_INV(gcr_cntl
);
1124 unsigned gl1_inv
= G_586_GL1_INV(gcr_cntl
);
1125 assert(G_586_GL2_US(gcr_cntl
) == 0);
1126 assert(G_586_GL2_RANGE(gcr_cntl
) == 0);
1127 assert(G_586_GL2_DISCARD(gcr_cntl
) == 0);
1128 unsigned gl2_inv
= G_586_GL2_INV(gcr_cntl
);
1129 unsigned gl2_wb
= G_586_GL2_WB(gcr_cntl
);
1130 unsigned gcr_seq
= G_586_SEQ(gcr_cntl
);
1132 gcr_cntl
&= C_586_GLM_WB
&
1137 C_586_GL2_WB
; /* keep SEQ */
1142 si_cs_emit_write_event_eop(cs
, chip_class
, false, cb_db_event
,
1143 S_490_GLM_WB(glm_wb
) |
1144 S_490_GLM_INV(glm_inv
) |
1145 S_490_GLV_INV(glv_inv
) |
1146 S_490_GL1_INV(gl1_inv
) |
1147 S_490_GL2_INV(gl2_inv
) |
1148 S_490_GL2_WB(gl2_wb
) |
1151 EOP_DATA_SEL_VALUE_32BIT
,
1152 flush_va
, *flush_cnt
,
1155 radv_cp_wait_mem(cs
, WAIT_REG_MEM_EQUAL
, flush_va
,
1156 *flush_cnt
, 0xffffffff);
1159 /* VGT state sync */
1160 if (flush_bits
& RADV_CMD_FLAG_VGT_FLUSH
) {
1161 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1162 radeon_emit(cs
, EVENT_TYPE(V_028A90_VGT_FLUSH
) | EVENT_INDEX(0));
1165 /* Ignore fields that only modify the behavior of other fields. */
1166 if (gcr_cntl
& C_586_GL1_RANGE
& C_586_GL2_RANGE
& C_586_SEQ
) {
1167 /* Flush caches and wait for the caches to assert idle.
1168 * The cache flush is executed in the ME, but the PFP waits
1171 radeon_emit(cs
, PKT3(PKT3_ACQUIRE_MEM
, 6, 0));
1172 radeon_emit(cs
, 0); /* CP_COHER_CNTL */
1173 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
1174 radeon_emit(cs
, 0xffffff); /* CP_COHER_SIZE_HI */
1175 radeon_emit(cs
, 0); /* CP_COHER_BASE */
1176 radeon_emit(cs
, 0); /* CP_COHER_BASE_HI */
1177 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
1178 radeon_emit(cs
, gcr_cntl
); /* GCR_CNTL */
1179 } else if ((cb_db_event
||
1180 (flush_bits
& (RADV_CMD_FLAG_VS_PARTIAL_FLUSH
|
1181 RADV_CMD_FLAG_PS_PARTIAL_FLUSH
|
1182 RADV_CMD_FLAG_CS_PARTIAL_FLUSH
)))
1184 /* We need to ensure that PFP waits as well. */
1185 radeon_emit(cs
, PKT3(PKT3_PFP_SYNC_ME
, 0, 0));
1189 if (flush_bits
& RADV_CMD_FLAG_START_PIPELINE_STATS
) {
1190 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1191 radeon_emit(cs
, EVENT_TYPE(V_028A90_PIPELINESTAT_START
) |
1193 } else if (flush_bits
& RADV_CMD_FLAG_STOP_PIPELINE_STATS
) {
1194 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1195 radeon_emit(cs
, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP
) |
1201 si_cs_emit_cache_flush(struct radeon_cmdbuf
*cs
,
1202 enum chip_class chip_class
,
1203 uint32_t *flush_cnt
,
1206 enum radv_cmd_flush_bits flush_bits
,
1207 uint64_t gfx9_eop_bug_va
)
1209 unsigned cp_coher_cntl
= 0;
1210 uint32_t flush_cb_db
= flush_bits
& (RADV_CMD_FLAG_FLUSH_AND_INV_CB
|
1211 RADV_CMD_FLAG_FLUSH_AND_INV_DB
);
1213 if (chip_class
>= GFX10
) {
1214 /* GFX10 cache flush handling is quite different. */
1215 gfx10_cs_emit_cache_flush(cs
, chip_class
, flush_cnt
, flush_va
,
1216 is_mec
, flush_bits
, gfx9_eop_bug_va
);
1220 if (flush_bits
& RADV_CMD_FLAG_INV_ICACHE
)
1221 cp_coher_cntl
|= S_0085F0_SH_ICACHE_ACTION_ENA(1);
1222 if (flush_bits
& RADV_CMD_FLAG_INV_SCACHE
)
1223 cp_coher_cntl
|= S_0085F0_SH_KCACHE_ACTION_ENA(1);
1225 if (chip_class
<= GFX8
) {
1226 if (flush_bits
& RADV_CMD_FLAG_FLUSH_AND_INV_CB
) {
1227 cp_coher_cntl
|= S_0085F0_CB_ACTION_ENA(1) |
1228 S_0085F0_CB0_DEST_BASE_ENA(1) |
1229 S_0085F0_CB1_DEST_BASE_ENA(1) |
1230 S_0085F0_CB2_DEST_BASE_ENA(1) |
1231 S_0085F0_CB3_DEST_BASE_ENA(1) |
1232 S_0085F0_CB4_DEST_BASE_ENA(1) |
1233 S_0085F0_CB5_DEST_BASE_ENA(1) |
1234 S_0085F0_CB6_DEST_BASE_ENA(1) |
1235 S_0085F0_CB7_DEST_BASE_ENA(1);
1237 /* Necessary for DCC */
1238 if (chip_class
>= GFX8
) {
1239 si_cs_emit_write_event_eop(cs
,
1242 V_028A90_FLUSH_AND_INV_CB_DATA_TS
,
1245 EOP_DATA_SEL_DISCARD
,
1250 if (flush_bits
& RADV_CMD_FLAG_FLUSH_AND_INV_DB
) {
1251 cp_coher_cntl
|= S_0085F0_DB_ACTION_ENA(1) |
1252 S_0085F0_DB_DEST_BASE_ENA(1);
1256 if (flush_bits
& RADV_CMD_FLAG_FLUSH_AND_INV_CB_META
) {
1257 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1258 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META
) | EVENT_INDEX(0));
1261 if (flush_bits
& RADV_CMD_FLAG_FLUSH_AND_INV_DB_META
) {
1262 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1263 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META
) | EVENT_INDEX(0));
1266 if (flush_bits
& RADV_CMD_FLAG_PS_PARTIAL_FLUSH
) {
1267 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1268 radeon_emit(cs
, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
1269 } else if (flush_bits
& RADV_CMD_FLAG_VS_PARTIAL_FLUSH
) {
1270 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1271 radeon_emit(cs
, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
1274 if (flush_bits
& RADV_CMD_FLAG_CS_PARTIAL_FLUSH
) {
1275 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1276 radeon_emit(cs
, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
1279 if (chip_class
== GFX9
&& flush_cb_db
) {
1280 unsigned cb_db_event
, tc_flags
;
1282 /* Set the CB/DB flush event. */
1283 cb_db_event
= V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT
;
1285 /* These are the only allowed combinations. If you need to
1286 * do multiple operations at once, do them separately.
1287 * All operations that invalidate L2 also seem to invalidate
1288 * metadata. Volatile (VOL) and WC flushes are not listed here.
1290 * TC | TC_WB = writeback & invalidate L2 & L1
1291 * TC | TC_WB | TC_NC = writeback & invalidate L2 for MTYPE == NC
1292 * TC_WB | TC_NC = writeback L2 for MTYPE == NC
1293 * TC | TC_NC = invalidate L2 for MTYPE == NC
1294 * TC | TC_MD = writeback & invalidate L2 metadata (DCC, etc.)
1295 * TCL1 = invalidate L1
1297 tc_flags
= EVENT_TC_ACTION_ENA
|
1298 EVENT_TC_MD_ACTION_ENA
;
1300 /* Ideally flush TC together with CB/DB. */
1301 if (flush_bits
& RADV_CMD_FLAG_INV_L2
) {
1302 /* Writeback and invalidate everything in L2 & L1. */
1303 tc_flags
= EVENT_TC_ACTION_ENA
|
1304 EVENT_TC_WB_ACTION_ENA
;
1307 /* Clear the flags. */
1308 flush_bits
&= ~(RADV_CMD_FLAG_INV_L2
|
1309 RADV_CMD_FLAG_WB_L2
|
1310 RADV_CMD_FLAG_INV_VCACHE
);
1315 si_cs_emit_write_event_eop(cs
, chip_class
, false, cb_db_event
, tc_flags
,
1317 EOP_DATA_SEL_VALUE_32BIT
,
1318 flush_va
, *flush_cnt
,
1320 radv_cp_wait_mem(cs
, WAIT_REG_MEM_EQUAL
, flush_va
,
1321 *flush_cnt
, 0xffffffff);
1324 /* VGT state sync */
1325 if (flush_bits
& RADV_CMD_FLAG_VGT_FLUSH
) {
1326 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1327 radeon_emit(cs
, EVENT_TYPE(V_028A90_VGT_FLUSH
) | EVENT_INDEX(0));
1330 /* VGT streamout state sync */
1331 if (flush_bits
& RADV_CMD_FLAG_VGT_STREAMOUT_SYNC
) {
1332 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1333 radeon_emit(cs
, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC
) | EVENT_INDEX(0));
1336 /* Make sure ME is idle (it executes most packets) before continuing.
1337 * This prevents read-after-write hazards between PFP and ME.
1339 if ((cp_coher_cntl
||
1340 (flush_bits
& (RADV_CMD_FLAG_CS_PARTIAL_FLUSH
|
1341 RADV_CMD_FLAG_INV_VCACHE
|
1342 RADV_CMD_FLAG_INV_L2
|
1343 RADV_CMD_FLAG_WB_L2
))) &&
1345 radeon_emit(cs
, PKT3(PKT3_PFP_SYNC_ME
, 0, 0));
1349 if ((flush_bits
& RADV_CMD_FLAG_INV_L2
) ||
1350 (chip_class
<= GFX7
&& (flush_bits
& RADV_CMD_FLAG_WB_L2
))) {
1351 si_emit_acquire_mem(cs
, is_mec
, chip_class
== GFX9
,
1353 S_0085F0_TC_ACTION_ENA(1) |
1354 S_0085F0_TCL1_ACTION_ENA(1) |
1355 S_0301F0_TC_WB_ACTION_ENA(chip_class
>= GFX8
));
1358 if(flush_bits
& RADV_CMD_FLAG_WB_L2
) {
1360 * NC = apply to non-coherent MTYPEs
1361 * (i.e. MTYPE <= 1, which is what we use everywhere)
1363 * WB doesn't work without NC.
1365 si_emit_acquire_mem(cs
, is_mec
,
1368 S_0301F0_TC_WB_ACTION_ENA(1) |
1369 S_0301F0_TC_NC_ACTION_ENA(1));
1372 if (flush_bits
& RADV_CMD_FLAG_INV_VCACHE
) {
1373 si_emit_acquire_mem(cs
, is_mec
,
1376 S_0085F0_TCL1_ACTION_ENA(1));
1381 /* When one of the DEST_BASE flags is set, SURFACE_SYNC waits for idle.
1382 * Therefore, it should be last. Done in PFP.
1385 si_emit_acquire_mem(cs
, is_mec
, chip_class
== GFX9
, cp_coher_cntl
);
1387 if (flush_bits
& RADV_CMD_FLAG_START_PIPELINE_STATS
) {
1388 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1389 radeon_emit(cs
, EVENT_TYPE(V_028A90_PIPELINESTAT_START
) |
1391 } else if (flush_bits
& RADV_CMD_FLAG_STOP_PIPELINE_STATS
) {
1392 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1393 radeon_emit(cs
, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP
) |
1399 si_emit_cache_flush(struct radv_cmd_buffer
*cmd_buffer
)
1401 bool is_compute
= cmd_buffer
->queue_family_index
== RADV_QUEUE_COMPUTE
;
1404 cmd_buffer
->state
.flush_bits
&= ~(RADV_CMD_FLAG_FLUSH_AND_INV_CB
|
1405 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META
|
1406 RADV_CMD_FLAG_FLUSH_AND_INV_DB
|
1407 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META
|
1408 RADV_CMD_FLAG_PS_PARTIAL_FLUSH
|
1409 RADV_CMD_FLAG_VS_PARTIAL_FLUSH
|
1410 RADV_CMD_FLAG_VGT_FLUSH
|
1411 RADV_CMD_FLAG_START_PIPELINE_STATS
|
1412 RADV_CMD_FLAG_STOP_PIPELINE_STATS
);
1414 if (!cmd_buffer
->state
.flush_bits
)
1417 radeon_check_space(cmd_buffer
->device
->ws
, cmd_buffer
->cs
, 128);
1419 si_cs_emit_cache_flush(cmd_buffer
->cs
,
1420 cmd_buffer
->device
->physical_device
->rad_info
.chip_class
,
1421 &cmd_buffer
->gfx9_fence_idx
,
1422 cmd_buffer
->gfx9_fence_va
,
1423 radv_cmd_buffer_uses_mec(cmd_buffer
),
1424 cmd_buffer
->state
.flush_bits
,
1425 cmd_buffer
->gfx9_eop_bug_va
);
1428 if (unlikely(cmd_buffer
->device
->trace_bo
))
1429 radv_cmd_buffer_trace_emit(cmd_buffer
);
1431 /* Clear the caches that have been flushed to avoid syncing too much
1432 * when there is some pending active queries.
1434 cmd_buffer
->active_query_flush_bits
&= ~cmd_buffer
->state
.flush_bits
;
1436 cmd_buffer
->state
.flush_bits
= 0;
1438 /* If the driver used a compute shader for resetting a query pool, it
1439 * should be finished at this point.
1441 cmd_buffer
->pending_reset_query
= false;
1444 /* sets the CP predication state using a boolean stored at va */
1446 si_emit_set_predication_state(struct radv_cmd_buffer
*cmd_buffer
,
1447 bool draw_visible
, uint64_t va
)
1452 op
= PRED_OP(PREDICATION_OP_BOOL64
);
1454 /* PREDICATION_DRAW_VISIBLE means that if the 32-bit value is
1455 * zero, all rendering commands are discarded. Otherwise, they
1456 * are discarded if the value is non zero.
1458 op
|= draw_visible
? PREDICATION_DRAW_VISIBLE
:
1459 PREDICATION_DRAW_NOT_VISIBLE
;
1461 if (cmd_buffer
->device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
1462 radeon_emit(cmd_buffer
->cs
, PKT3(PKT3_SET_PREDICATION
, 2, 0));
1463 radeon_emit(cmd_buffer
->cs
, op
);
1464 radeon_emit(cmd_buffer
->cs
, va
);
1465 radeon_emit(cmd_buffer
->cs
, va
>> 32);
1467 radeon_emit(cmd_buffer
->cs
, PKT3(PKT3_SET_PREDICATION
, 1, 0));
1468 radeon_emit(cmd_buffer
->cs
, va
);
1469 radeon_emit(cmd_buffer
->cs
, op
| ((va
>> 32) & 0xFF));
1473 /* Set this if you want the 3D engine to wait until CP DMA is done.
1474 * It should be set on the last CP DMA packet. */
1475 #define CP_DMA_SYNC (1 << 0)
1477 /* Set this if the source data was used as a destination in a previous CP DMA
1478 * packet. It's for preventing a read-after-write (RAW) hazard between two
1479 * CP DMA packets. */
1480 #define CP_DMA_RAW_WAIT (1 << 1)
1481 #define CP_DMA_USE_L2 (1 << 2)
1482 #define CP_DMA_CLEAR (1 << 3)
1484 /* Alignment for optimal performance. */
1485 #define SI_CPDMA_ALIGNMENT 32
1487 /* The max number of bytes that can be copied per packet. */
1488 static inline unsigned cp_dma_max_byte_count(struct radv_cmd_buffer
*cmd_buffer
)
1490 unsigned max
= cmd_buffer
->device
->physical_device
->rad_info
.chip_class
>= GFX9
?
1491 S_414_BYTE_COUNT_GFX9(~0u) :
1492 S_414_BYTE_COUNT_GFX6(~0u);
1494 /* make it aligned for optimal performance */
1495 return max
& ~(SI_CPDMA_ALIGNMENT
- 1);
1498 /* Emit a CP DMA packet to do a copy from one buffer to another, or to clear
1499 * a buffer. The size must fit in bits [20:0]. If CP_DMA_CLEAR is set, src_va is a 32-bit
1502 static void si_emit_cp_dma(struct radv_cmd_buffer
*cmd_buffer
,
1503 uint64_t dst_va
, uint64_t src_va
,
1504 unsigned size
, unsigned flags
)
1506 struct radeon_cmdbuf
*cs
= cmd_buffer
->cs
;
1507 uint32_t header
= 0, command
= 0;
1509 assert(size
<= cp_dma_max_byte_count(cmd_buffer
));
1511 radeon_check_space(cmd_buffer
->device
->ws
, cmd_buffer
->cs
, 9);
1512 if (cmd_buffer
->device
->physical_device
->rad_info
.chip_class
>= GFX9
)
1513 command
|= S_414_BYTE_COUNT_GFX9(size
);
1515 command
|= S_414_BYTE_COUNT_GFX6(size
);
1518 if (flags
& CP_DMA_SYNC
)
1519 header
|= S_411_CP_SYNC(1);
1521 if (cmd_buffer
->device
->physical_device
->rad_info
.chip_class
>= GFX9
)
1522 command
|= S_414_DISABLE_WR_CONFIRM_GFX9(1);
1524 command
|= S_414_DISABLE_WR_CONFIRM_GFX6(1);
1527 if (flags
& CP_DMA_RAW_WAIT
)
1528 command
|= S_414_RAW_WAIT(1);
1530 /* Src and dst flags. */
1531 if (cmd_buffer
->device
->physical_device
->rad_info
.chip_class
>= GFX9
&&
1532 !(flags
& CP_DMA_CLEAR
) &&
1534 header
|= S_411_DST_SEL(V_411_NOWHERE
); /* prefetch only */
1535 else if (flags
& CP_DMA_USE_L2
)
1536 header
|= S_411_DST_SEL(V_411_DST_ADDR_TC_L2
);
1538 if (flags
& CP_DMA_CLEAR
)
1539 header
|= S_411_SRC_SEL(V_411_DATA
);
1540 else if (flags
& CP_DMA_USE_L2
)
1541 header
|= S_411_SRC_SEL(V_411_SRC_ADDR_TC_L2
);
1543 if (cmd_buffer
->device
->physical_device
->rad_info
.chip_class
>= GFX7
) {
1544 radeon_emit(cs
, PKT3(PKT3_DMA_DATA
, 5, cmd_buffer
->state
.predicating
));
1545 radeon_emit(cs
, header
);
1546 radeon_emit(cs
, src_va
); /* SRC_ADDR_LO [31:0] */
1547 radeon_emit(cs
, src_va
>> 32); /* SRC_ADDR_HI [31:0] */
1548 radeon_emit(cs
, dst_va
); /* DST_ADDR_LO [31:0] */
1549 radeon_emit(cs
, dst_va
>> 32); /* DST_ADDR_HI [31:0] */
1550 radeon_emit(cs
, command
);
1552 assert(!(flags
& CP_DMA_USE_L2
));
1553 header
|= S_411_SRC_ADDR_HI(src_va
>> 32);
1554 radeon_emit(cs
, PKT3(PKT3_CP_DMA
, 4, cmd_buffer
->state
.predicating
));
1555 radeon_emit(cs
, src_va
); /* SRC_ADDR_LO [31:0] */
1556 radeon_emit(cs
, header
); /* SRC_ADDR_HI [15:0] + flags. */
1557 radeon_emit(cs
, dst_va
); /* DST_ADDR_LO [31:0] */
1558 radeon_emit(cs
, (dst_va
>> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
1559 radeon_emit(cs
, command
);
1562 /* CP DMA is executed in ME, but index buffers are read by PFP.
1563 * This ensures that ME (CP DMA) is idle before PFP starts fetching
1564 * indices. If we wanted to execute CP DMA in PFP, this packet
1565 * should precede it.
1567 if (flags
& CP_DMA_SYNC
) {
1568 if (cmd_buffer
->queue_family_index
== RADV_QUEUE_GENERAL
) {
1569 radeon_emit(cs
, PKT3(PKT3_PFP_SYNC_ME
, 0, cmd_buffer
->state
.predicating
));
1573 /* CP will see the sync flag and wait for all DMAs to complete. */
1574 cmd_buffer
->state
.dma_is_busy
= false;
1577 if (unlikely(cmd_buffer
->device
->trace_bo
))
1578 radv_cmd_buffer_trace_emit(cmd_buffer
);
1581 void si_cp_dma_prefetch(struct radv_cmd_buffer
*cmd_buffer
, uint64_t va
,
1584 uint64_t aligned_va
= va
& ~(SI_CPDMA_ALIGNMENT
- 1);
1585 uint64_t aligned_size
= ((va
+ size
+ SI_CPDMA_ALIGNMENT
-1) & ~(SI_CPDMA_ALIGNMENT
- 1)) - aligned_va
;
1587 si_emit_cp_dma(cmd_buffer
, aligned_va
, aligned_va
,
1588 aligned_size
, CP_DMA_USE_L2
);
1591 static void si_cp_dma_prepare(struct radv_cmd_buffer
*cmd_buffer
, uint64_t byte_count
,
1592 uint64_t remaining_size
, unsigned *flags
)
1595 /* Flush the caches for the first copy only.
1596 * Also wait for the previous CP DMA operations.
1598 if (cmd_buffer
->state
.flush_bits
) {
1599 si_emit_cache_flush(cmd_buffer
);
1600 *flags
|= CP_DMA_RAW_WAIT
;
1603 /* Do the synchronization after the last dma, so that all data
1604 * is written to memory.
1606 if (byte_count
== remaining_size
)
1607 *flags
|= CP_DMA_SYNC
;
1610 static void si_cp_dma_realign_engine(struct radv_cmd_buffer
*cmd_buffer
, unsigned size
)
1614 unsigned dma_flags
= 0;
1615 unsigned buf_size
= SI_CPDMA_ALIGNMENT
* 2;
1618 assert(size
< SI_CPDMA_ALIGNMENT
);
1620 radv_cmd_buffer_upload_alloc(cmd_buffer
, buf_size
, SI_CPDMA_ALIGNMENT
, &offset
, &ptr
);
1622 va
= radv_buffer_get_va(cmd_buffer
->upload
.upload_bo
);
1625 si_cp_dma_prepare(cmd_buffer
, size
, size
, &dma_flags
);
1627 si_emit_cp_dma(cmd_buffer
, va
, va
+ SI_CPDMA_ALIGNMENT
, size
,
1631 void si_cp_dma_buffer_copy(struct radv_cmd_buffer
*cmd_buffer
,
1632 uint64_t src_va
, uint64_t dest_va
,
1635 uint64_t main_src_va
, main_dest_va
;
1636 uint64_t skipped_size
= 0, realign_size
= 0;
1638 /* Assume that we are not going to sync after the last DMA operation. */
1639 cmd_buffer
->state
.dma_is_busy
= true;
1641 if (cmd_buffer
->device
->physical_device
->rad_info
.family
<= CHIP_CARRIZO
||
1642 cmd_buffer
->device
->physical_device
->rad_info
.family
== CHIP_STONEY
) {
1643 /* If the size is not aligned, we must add a dummy copy at the end
1644 * just to align the internal counter. Otherwise, the DMA engine
1645 * would slow down by an order of magnitude for following copies.
1647 if (size
% SI_CPDMA_ALIGNMENT
)
1648 realign_size
= SI_CPDMA_ALIGNMENT
- (size
% SI_CPDMA_ALIGNMENT
);
1650 /* If the copy begins unaligned, we must start copying from the next
1651 * aligned block and the skipped part should be copied after everything
1652 * else has been copied. Only the src alignment matters, not dst.
1654 if (src_va
% SI_CPDMA_ALIGNMENT
) {
1655 skipped_size
= SI_CPDMA_ALIGNMENT
- (src_va
% SI_CPDMA_ALIGNMENT
);
1656 /* The main part will be skipped if the size is too small. */
1657 skipped_size
= MIN2(skipped_size
, size
);
1658 size
-= skipped_size
;
1661 main_src_va
= src_va
+ skipped_size
;
1662 main_dest_va
= dest_va
+ skipped_size
;
1665 unsigned dma_flags
= 0;
1666 unsigned byte_count
= MIN2(size
, cp_dma_max_byte_count(cmd_buffer
));
1668 if (cmd_buffer
->device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
1669 /* DMA operations via L2 are coherent and faster.
1670 * TODO: GFX7-GFX9 should also support this but it
1671 * requires tests/benchmarks.
1673 dma_flags
|= CP_DMA_USE_L2
;
1676 si_cp_dma_prepare(cmd_buffer
, byte_count
,
1677 size
+ skipped_size
+ realign_size
,
1680 dma_flags
&= ~CP_DMA_SYNC
;
1682 si_emit_cp_dma(cmd_buffer
, main_dest_va
, main_src_va
,
1683 byte_count
, dma_flags
);
1686 main_src_va
+= byte_count
;
1687 main_dest_va
+= byte_count
;
1691 unsigned dma_flags
= 0;
1693 si_cp_dma_prepare(cmd_buffer
, skipped_size
,
1694 size
+ skipped_size
+ realign_size
,
1697 si_emit_cp_dma(cmd_buffer
, dest_va
, src_va
,
1698 skipped_size
, dma_flags
);
1701 si_cp_dma_realign_engine(cmd_buffer
, realign_size
);
1704 void si_cp_dma_clear_buffer(struct radv_cmd_buffer
*cmd_buffer
, uint64_t va
,
1705 uint64_t size
, unsigned value
)
1711 assert(va
% 4 == 0 && size
% 4 == 0);
1713 /* Assume that we are not going to sync after the last DMA operation. */
1714 cmd_buffer
->state
.dma_is_busy
= true;
1717 unsigned byte_count
= MIN2(size
, cp_dma_max_byte_count(cmd_buffer
));
1718 unsigned dma_flags
= CP_DMA_CLEAR
;
1720 if (cmd_buffer
->device
->physical_device
->rad_info
.chip_class
>= GFX10
) {
1721 /* DMA operations via L2 are coherent and faster.
1722 * TODO: GFX7-GFX9 should also support this but it
1723 * requires tests/benchmarks.
1725 dma_flags
|= CP_DMA_USE_L2
;
1728 si_cp_dma_prepare(cmd_buffer
, byte_count
, size
, &dma_flags
);
1730 /* Emit the clear packet. */
1731 si_emit_cp_dma(cmd_buffer
, va
, value
, byte_count
,
1739 void si_cp_dma_wait_for_idle(struct radv_cmd_buffer
*cmd_buffer
)
1741 if (cmd_buffer
->device
->physical_device
->rad_info
.chip_class
< GFX7
)
1744 if (!cmd_buffer
->state
.dma_is_busy
)
1747 /* Issue a dummy DMA that copies zero bytes.
1749 * The DMA engine will see that there's no work to do and skip this
1750 * DMA request, however, the CP will see the sync flag and still wait
1751 * for all DMAs to complete.
1753 si_emit_cp_dma(cmd_buffer
, 0, 0, 0, CP_DMA_SYNC
);
1755 cmd_buffer
->state
.dma_is_busy
= false;
1758 /* For MSAA sample positions. */
1759 #define FILL_SREG(s0x, s0y, s1x, s1y, s2x, s2y, s3x, s3y) \
1760 ((((unsigned)(s0x) & 0xf) << 0) | (((unsigned)(s0y) & 0xf) << 4) | \
1761 (((unsigned)(s1x) & 0xf) << 8) | (((unsigned)(s1y) & 0xf) << 12) | \
1762 (((unsigned)(s2x) & 0xf) << 16) | (((unsigned)(s2y) & 0xf) << 20) | \
1763 (((unsigned)(s3x) & 0xf) << 24) | (((unsigned)(s3y) & 0xf) << 28))
1765 /* For obtaining location coordinates from registers */
1766 #define SEXT4(x) ((int)((x) | ((x) & 0x8 ? 0xfffffff0 : 0)))
1767 #define GET_SFIELD(reg, index) SEXT4(((reg) >> ((index) * 4)) & 0xf)
1768 #define GET_SX(reg, index) GET_SFIELD((reg)[(index) / 4], ((index) % 4) * 2)
1769 #define GET_SY(reg, index) GET_SFIELD((reg)[(index) / 4], ((index) % 4) * 2 + 1)
1772 static const uint32_t sample_locs_1x
=
1773 FILL_SREG(0, 0, 0, 0, 0, 0, 0, 0);
1774 static const unsigned max_dist_1x
= 0;
1775 static const uint64_t centroid_priority_1x
= 0x0000000000000000ull
;
1778 static const uint32_t sample_locs_2x
=
1779 FILL_SREG(4,4, -4, -4, 0, 0, 0, 0);
1780 static const unsigned max_dist_2x
= 4;
1781 static const uint64_t centroid_priority_2x
= 0x1010101010101010ull
;
1784 static const uint32_t sample_locs_4x
=
1785 FILL_SREG(-2,-6, 6, -2, -6, 2, 2, 6);
1786 static const unsigned max_dist_4x
= 6;
1787 static const uint64_t centroid_priority_4x
= 0x3210321032103210ull
;
1790 static const uint32_t sample_locs_8x
[] = {
1791 FILL_SREG( 1,-3, -1, 3, 5, 1, -3,-5),
1792 FILL_SREG(-5, 5, -7,-1, 3, 7, 7,-7),
1793 /* The following are unused by hardware, but we emit them to IBs
1794 * instead of multiple SET_CONTEXT_REG packets. */
1798 static const unsigned max_dist_8x
= 7;
1799 static const uint64_t centroid_priority_8x
= 0x7654321076543210ull
;
1801 unsigned radv_get_default_max_sample_dist(int log_samples
)
1803 unsigned max_dist
[] = {
1809 return max_dist
[log_samples
];
1812 void radv_emit_default_sample_locations(struct radeon_cmdbuf
*cs
, int nr_samples
)
1814 switch (nr_samples
) {
1817 radeon_set_context_reg_seq(cs
, R_028BD4_PA_SC_CENTROID_PRIORITY_0
, 2);
1818 radeon_emit(cs
, (uint32_t)centroid_priority_1x
);
1819 radeon_emit(cs
, centroid_priority_1x
>> 32);
1820 radeon_set_context_reg(cs
, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
, sample_locs_1x
);
1821 radeon_set_context_reg(cs
, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
, sample_locs_1x
);
1822 radeon_set_context_reg(cs
, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
, sample_locs_1x
);
1823 radeon_set_context_reg(cs
, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
, sample_locs_1x
);
1826 radeon_set_context_reg_seq(cs
, R_028BD4_PA_SC_CENTROID_PRIORITY_0
, 2);
1827 radeon_emit(cs
, (uint32_t)centroid_priority_2x
);
1828 radeon_emit(cs
, centroid_priority_2x
>> 32);
1829 radeon_set_context_reg(cs
, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
, sample_locs_2x
);
1830 radeon_set_context_reg(cs
, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
, sample_locs_2x
);
1831 radeon_set_context_reg(cs
, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
, sample_locs_2x
);
1832 radeon_set_context_reg(cs
, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
, sample_locs_2x
);
1835 radeon_set_context_reg_seq(cs
, R_028BD4_PA_SC_CENTROID_PRIORITY_0
, 2);
1836 radeon_emit(cs
, (uint32_t)centroid_priority_4x
);
1837 radeon_emit(cs
, centroid_priority_4x
>> 32);
1838 radeon_set_context_reg(cs
, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
, sample_locs_4x
);
1839 radeon_set_context_reg(cs
, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
, sample_locs_4x
);
1840 radeon_set_context_reg(cs
, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
, sample_locs_4x
);
1841 radeon_set_context_reg(cs
, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
, sample_locs_4x
);
1844 radeon_set_context_reg_seq(cs
, R_028BD4_PA_SC_CENTROID_PRIORITY_0
, 2);
1845 radeon_emit(cs
, (uint32_t)centroid_priority_8x
);
1846 radeon_emit(cs
, centroid_priority_8x
>> 32);
1847 radeon_set_context_reg_seq(cs
, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
, 14);
1848 radeon_emit_array(cs
, sample_locs_8x
, 4);
1849 radeon_emit_array(cs
, sample_locs_8x
, 4);
1850 radeon_emit_array(cs
, sample_locs_8x
, 4);
1851 radeon_emit_array(cs
, sample_locs_8x
, 2);
1856 static void radv_get_sample_position(struct radv_device
*device
,
1857 unsigned sample_count
,
1858 unsigned sample_index
, float *out_value
)
1860 const uint32_t *sample_locs
;
1862 switch (sample_count
) {
1865 sample_locs
= &sample_locs_1x
;
1868 sample_locs
= &sample_locs_2x
;
1871 sample_locs
= &sample_locs_4x
;
1874 sample_locs
= sample_locs_8x
;
1878 out_value
[0] = (GET_SX(sample_locs
, sample_index
) + 8) / 16.0f
;
1879 out_value
[1] = (GET_SY(sample_locs
, sample_index
) + 8) / 16.0f
;
1882 void radv_device_init_msaa(struct radv_device
*device
)
1886 radv_get_sample_position(device
, 1, 0, device
->sample_locations_1x
[0]);
1888 for (i
= 0; i
< 2; i
++)
1889 radv_get_sample_position(device
, 2, i
, device
->sample_locations_2x
[i
]);
1890 for (i
= 0; i
< 4; i
++)
1891 radv_get_sample_position(device
, 4, i
, device
->sample_locations_4x
[i
]);
1892 for (i
= 0; i
< 8; i
++)
1893 radv_get_sample_position(device
, 8, i
, device
->sample_locations_8x
[i
]);