radv/gfx10: use L2 for DMA copy/fill operations
[mesa.git] / src / amd / vulkan / si_cmd_buffer.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based on si_state.c
6 * Copyright © 2015 Advanced Micro Devices, Inc.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 /* command buffer handling for AMD GCN */
29
30 #include "radv_private.h"
31 #include "radv_shader.h"
32 #include "radv_cs.h"
33 #include "sid.h"
34 #include "radv_util.h"
35 #include "main/macros.h"
36
37 static void
38 si_write_harvested_raster_configs(struct radv_physical_device *physical_device,
39 struct radeon_cmdbuf *cs,
40 unsigned raster_config,
41 unsigned raster_config_1)
42 {
43 unsigned num_se = MAX2(physical_device->rad_info.max_se, 1);
44 unsigned raster_config_se[4];
45 unsigned se;
46
47 ac_get_harvested_configs(&physical_device->rad_info,
48 raster_config,
49 &raster_config_1,
50 raster_config_se);
51
52 for (se = 0; se < num_se; se++) {
53 /* GRBM_GFX_INDEX has a different offset on GFX6 and GFX7+ */
54 if (physical_device->rad_info.chip_class < GFX7)
55 radeon_set_config_reg(cs, R_00802C_GRBM_GFX_INDEX,
56 S_00802C_SE_INDEX(se) |
57 S_00802C_SH_BROADCAST_WRITES(1) |
58 S_00802C_INSTANCE_BROADCAST_WRITES(1));
59 else
60 radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX,
61 S_030800_SE_INDEX(se) | S_030800_SH_BROADCAST_WRITES(1) |
62 S_030800_INSTANCE_BROADCAST_WRITES(1));
63 radeon_set_context_reg(cs, R_028350_PA_SC_RASTER_CONFIG, raster_config_se[se]);
64 }
65
66 /* GRBM_GFX_INDEX has a different offset on GFX6 and GFX7+ */
67 if (physical_device->rad_info.chip_class < GFX7)
68 radeon_set_config_reg(cs, R_00802C_GRBM_GFX_INDEX,
69 S_00802C_SE_BROADCAST_WRITES(1) |
70 S_00802C_SH_BROADCAST_WRITES(1) |
71 S_00802C_INSTANCE_BROADCAST_WRITES(1));
72 else
73 radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX,
74 S_030800_SE_BROADCAST_WRITES(1) | S_030800_SH_BROADCAST_WRITES(1) |
75 S_030800_INSTANCE_BROADCAST_WRITES(1));
76
77 if (physical_device->rad_info.chip_class >= GFX7)
78 radeon_set_context_reg(cs, R_028354_PA_SC_RASTER_CONFIG_1, raster_config_1);
79 }
80
81 void
82 si_emit_compute(struct radv_physical_device *physical_device,
83 struct radeon_cmdbuf *cs)
84 {
85 radeon_set_sh_reg_seq(cs, R_00B810_COMPUTE_START_X, 3);
86 radeon_emit(cs, 0);
87 radeon_emit(cs, 0);
88 radeon_emit(cs, 0);
89
90 radeon_set_sh_reg_seq(cs, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0, 2);
91 /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1,
92 * renamed COMPUTE_DESTINATION_EN_SEn on gfx10. */
93 radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
94 radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
95
96 if (physical_device->rad_info.chip_class >= GFX7) {
97 /* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */
98 radeon_set_sh_reg_seq(cs,
99 R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2, 2);
100 radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) |
101 S_00B858_SH1_CU_EN(0xffff));
102 radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) |
103 S_00B858_SH1_CU_EN(0xffff));
104 }
105
106 if (physical_device->rad_info.chip_class >= GFX10)
107 radeon_set_sh_reg(cs, R_00B8A0_COMPUTE_PGM_RSRC3, 0);
108
109 /* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID
110 * and is now per pipe, so it should be handled in the
111 * kernel if we want to use something other than the default value,
112 * which is now 0x22f.
113 */
114 if (physical_device->rad_info.chip_class <= GFX6) {
115 /* XXX: This should be:
116 * (number of compute units) * 4 * (waves per simd) - 1 */
117
118 radeon_set_sh_reg(cs, R_00B82C_COMPUTE_MAX_WAVE_ID,
119 0x190 /* Default value */);
120 }
121 }
122
123 /* 12.4 fixed-point */
124 static unsigned radv_pack_float_12p4(float x)
125 {
126 return x <= 0 ? 0 :
127 x >= 4096 ? 0xffff : x * 16;
128 }
129
130 static void
131 si_set_raster_config(struct radv_physical_device *physical_device,
132 struct radeon_cmdbuf *cs)
133 {
134 unsigned num_rb = MIN2(physical_device->rad_info.num_render_backends, 16);
135 unsigned rb_mask = physical_device->rad_info.enabled_rb_mask;
136 unsigned raster_config, raster_config_1;
137
138 ac_get_raster_config(&physical_device->rad_info,
139 &raster_config,
140 &raster_config_1, NULL);
141
142 /* Always use the default config when all backends are enabled
143 * (or when we failed to determine the enabled backends).
144 */
145 if (!rb_mask || util_bitcount(rb_mask) >= num_rb) {
146 radeon_set_context_reg(cs, R_028350_PA_SC_RASTER_CONFIG,
147 raster_config);
148 if (physical_device->rad_info.chip_class >= GFX7)
149 radeon_set_context_reg(cs, R_028354_PA_SC_RASTER_CONFIG_1,
150 raster_config_1);
151 } else {
152 si_write_harvested_raster_configs(physical_device, cs,
153 raster_config,
154 raster_config_1);
155 }
156 }
157
158 void
159 si_emit_graphics(struct radv_physical_device *physical_device,
160 struct radeon_cmdbuf *cs)
161 {
162 int i;
163
164 radeon_emit(cs, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
165 radeon_emit(cs, CONTEXT_CONTROL_LOAD_ENABLE(1));
166 radeon_emit(cs, CONTEXT_CONTROL_SHADOW_ENABLE(1));
167
168 if (physical_device->has_clear_state) {
169 radeon_emit(cs, PKT3(PKT3_CLEAR_STATE, 0, 0));
170 radeon_emit(cs, 0);
171 }
172
173 if (physical_device->rad_info.chip_class <= GFX8)
174 si_set_raster_config(physical_device, cs);
175
176 radeon_set_context_reg(cs, R_028A18_VGT_HOS_MAX_TESS_LEVEL, fui(64));
177 if (!physical_device->has_clear_state)
178 radeon_set_context_reg(cs, R_028A1C_VGT_HOS_MIN_TESS_LEVEL, fui(0));
179
180 /* FIXME calculate these values somehow ??? */
181 if (physical_device->rad_info.chip_class <= GFX8) {
182 radeon_set_context_reg(cs, R_028A54_VGT_GS_PER_ES, SI_GS_PER_ES);
183 radeon_set_context_reg(cs, R_028A58_VGT_ES_PER_GS, 0x40);
184 }
185
186 if (!physical_device->has_clear_state) {
187 radeon_set_context_reg(cs, R_028A5C_VGT_GS_PER_VS, 0x2);
188 radeon_set_context_reg(cs, R_028A8C_VGT_PRIMITIVEID_RESET, 0x0);
189 radeon_set_context_reg(cs, R_028B98_VGT_STRMOUT_BUFFER_CONFIG, 0x0);
190 }
191
192 radeon_set_context_reg(cs, R_028AA0_VGT_INSTANCE_STEP_RATE_0, 1);
193 if (!physical_device->has_clear_state)
194 radeon_set_context_reg(cs, R_028AB8_VGT_VTX_CNT_EN, 0x0);
195 if (physical_device->rad_info.chip_class < GFX7)
196 radeon_set_config_reg(cs, R_008A14_PA_CL_ENHANCE, S_008A14_NUM_CLIP_SEQ(3) |
197 S_008A14_CLIP_VTX_REORDER_ENA(1));
198
199 if (!physical_device->has_clear_state)
200 radeon_set_context_reg(cs, R_02882C_PA_SU_PRIM_FILTER_CNTL, 0);
201
202 /* CLEAR_STATE doesn't clear these correctly on certain generations.
203 * I don't know why. Deduced by trial and error.
204 */
205 if (physical_device->rad_info.chip_class <= GFX7 || !physical_device->has_clear_state) {
206 radeon_set_context_reg(cs, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET, 0);
207 radeon_set_context_reg(cs, R_028204_PA_SC_WINDOW_SCISSOR_TL,
208 S_028204_WINDOW_OFFSET_DISABLE(1));
209 radeon_set_context_reg(cs, R_028240_PA_SC_GENERIC_SCISSOR_TL,
210 S_028240_WINDOW_OFFSET_DISABLE(1));
211 radeon_set_context_reg(cs, R_028244_PA_SC_GENERIC_SCISSOR_BR,
212 S_028244_BR_X(16384) | S_028244_BR_Y(16384));
213 radeon_set_context_reg(cs, R_028030_PA_SC_SCREEN_SCISSOR_TL, 0);
214 radeon_set_context_reg(cs, R_028034_PA_SC_SCREEN_SCISSOR_BR,
215 S_028034_BR_X(16384) | S_028034_BR_Y(16384));
216 }
217
218 if (!physical_device->has_clear_state) {
219 for (i = 0; i < 16; i++) {
220 radeon_set_context_reg(cs, R_0282D0_PA_SC_VPORT_ZMIN_0 + i*8, 0);
221 radeon_set_context_reg(cs, R_0282D4_PA_SC_VPORT_ZMAX_0 + i*8, fui(1.0));
222 }
223 }
224
225 if (!physical_device->has_clear_state) {
226 radeon_set_context_reg(cs, R_02820C_PA_SC_CLIPRECT_RULE, 0xFFFF);
227 radeon_set_context_reg(cs, R_028230_PA_SC_EDGERULE, 0xAAAAAAAA);
228 /* PA_SU_HARDWARE_SCREEN_OFFSET must be 0 due to hw bug on GFX6 */
229 radeon_set_context_reg(cs, R_028234_PA_SU_HARDWARE_SCREEN_OFFSET, 0);
230 radeon_set_context_reg(cs, R_028820_PA_CL_NANINF_CNTL, 0);
231 radeon_set_context_reg(cs, R_028AC0_DB_SRESULTS_COMPARE_STATE0, 0x0);
232 radeon_set_context_reg(cs, R_028AC4_DB_SRESULTS_COMPARE_STATE1, 0x0);
233 radeon_set_context_reg(cs, R_028AC8_DB_PRELOAD_CONTROL, 0x0);
234 }
235
236 radeon_set_context_reg(cs, R_02800C_DB_RENDER_OVERRIDE,
237 S_02800C_FORCE_HIS_ENABLE0(V_02800C_FORCE_DISABLE) |
238 S_02800C_FORCE_HIS_ENABLE1(V_02800C_FORCE_DISABLE));
239
240 if (physical_device->rad_info.chip_class >= GFX10) {
241 radeon_set_context_reg(cs, R_028A98_VGT_DRAW_PAYLOAD_CNTL, 0);
242 radeon_set_uconfig_reg(cs, R_030964_GE_MAX_VTX_INDX, ~0);
243 radeon_set_uconfig_reg(cs, R_030924_GE_MIN_VTX_INDX, 0);
244 radeon_set_uconfig_reg(cs, R_030928_GE_INDX_OFFSET, 0);
245 radeon_set_uconfig_reg(cs, R_03097C_GE_STEREO_CNTL, 0);
246 radeon_set_uconfig_reg(cs, R_030988_GE_USER_VGPR_EN, 0);
247 } else if (physical_device->rad_info.chip_class == GFX9) {
248 radeon_set_uconfig_reg(cs, R_030920_VGT_MAX_VTX_INDX, ~0);
249 radeon_set_uconfig_reg(cs, R_030924_VGT_MIN_VTX_INDX, 0);
250 radeon_set_uconfig_reg(cs, R_030928_VGT_INDX_OFFSET, 0);
251 } else {
252 /* These registers, when written, also overwrite the
253 * CLEAR_STATE context, so we can't rely on CLEAR_STATE setting
254 * them. It would be an issue if there was another UMD
255 * changing them.
256 */
257 radeon_set_context_reg(cs, R_028400_VGT_MAX_VTX_INDX, ~0);
258 radeon_set_context_reg(cs, R_028404_VGT_MIN_VTX_INDX, 0);
259 radeon_set_context_reg(cs, R_028408_VGT_INDX_OFFSET, 0);
260 }
261
262 if (physical_device->rad_info.chip_class >= GFX7) {
263 if (physical_device->rad_info.chip_class >= GFX10) {
264 /* Logical CUs 16 - 31 */
265 radeon_set_sh_reg_idx(physical_device, cs, R_00B404_SPI_SHADER_PGM_RSRC4_HS,
266 3, S_00B404_CU_EN(0xffff));
267 radeon_set_sh_reg_idx(physical_device, cs, R_00B104_SPI_SHADER_PGM_RSRC4_VS,
268 3, S_00B104_CU_EN(0xffff));
269 radeon_set_sh_reg_idx(physical_device, cs, R_00B004_SPI_SHADER_PGM_RSRC4_PS,
270 3, S_00B004_CU_EN(0xffff));
271 }
272
273 if (physical_device->rad_info.chip_class >= GFX9) {
274 radeon_set_sh_reg_idx(physical_device, cs, R_00B41C_SPI_SHADER_PGM_RSRC3_HS,
275 3, S_00B41C_CU_EN(0xffff) | S_00B41C_WAVE_LIMIT(0x3F));
276 } else {
277 radeon_set_sh_reg(cs, R_00B51C_SPI_SHADER_PGM_RSRC3_LS,
278 S_00B51C_CU_EN(0xffff) | S_00B51C_WAVE_LIMIT(0x3F));
279 radeon_set_sh_reg(cs, R_00B41C_SPI_SHADER_PGM_RSRC3_HS,
280 S_00B41C_WAVE_LIMIT(0x3F));
281 radeon_set_sh_reg(cs, R_00B31C_SPI_SHADER_PGM_RSRC3_ES,
282 S_00B31C_CU_EN(0xffff) | S_00B31C_WAVE_LIMIT(0x3F));
283 /* If this is 0, Bonaire can hang even if GS isn't being used.
284 * Other chips are unaffected. These are suboptimal values,
285 * but we don't use on-chip GS.
286 */
287 radeon_set_context_reg(cs, R_028A44_VGT_GS_ONCHIP_CNTL,
288 S_028A44_ES_VERTS_PER_SUBGRP(64) |
289 S_028A44_GS_PRIMS_PER_SUBGRP(4));
290 }
291
292 /* Compute LATE_ALLOC_VS.LIMIT. */
293 unsigned num_cu_per_sh = physical_device->rad_info.num_good_cu_per_sh;
294 unsigned late_alloc_limit; /* The limit is per SH. */
295
296 if (physical_device->rad_info.family == CHIP_KABINI) {
297 late_alloc_limit = 0; /* Potential hang on Kabini. */
298 } else if (num_cu_per_sh <= 4) {
299 /* Too few available compute units per SH. Disallowing
300 * VS to run on one CU could hurt us more than late VS
301 * allocation would help.
302 *
303 * 2 is the highest safe number that allows us to keep
304 * all CUs enabled.
305 */
306 late_alloc_limit = 2;
307 } else {
308 /* This is a good initial value, allowing 1 late_alloc
309 * wave per SIMD on num_cu - 2.
310 */
311 late_alloc_limit = (num_cu_per_sh - 2) * 4;
312 }
313
314 unsigned cu_mask_vs = 0xffff;
315 unsigned cu_mask_gs = 0xffff;
316
317 if (late_alloc_limit > 2) {
318 if (physical_device->rad_info.chip_class >= GFX10) {
319 /* CU2 & CU3 disabled because of the dual CU design */
320 cu_mask_vs = 0xfff3;
321 cu_mask_gs = 0xfff3; /* NGG only */
322 } else {
323 cu_mask_vs = 0xfffe; /* 1 CU disabled */
324 }
325 }
326
327 radeon_set_sh_reg_idx(physical_device, cs, R_00B118_SPI_SHADER_PGM_RSRC3_VS,
328 3, S_00B118_CU_EN(cu_mask_vs) |
329 S_00B118_WAVE_LIMIT(0x3F));
330 radeon_set_sh_reg(cs, R_00B11C_SPI_SHADER_LATE_ALLOC_VS,
331 S_00B11C_LIMIT(late_alloc_limit));
332
333 radeon_set_sh_reg_idx(physical_device, cs, R_00B21C_SPI_SHADER_PGM_RSRC3_GS,
334 3, S_00B21C_CU_EN(cu_mask_gs) | S_00B21C_WAVE_LIMIT(0x3F));
335
336 if (physical_device->rad_info.chip_class >= GFX10) {
337 radeon_set_sh_reg_idx(physical_device, cs, R_00B204_SPI_SHADER_PGM_RSRC4_GS,
338 3, S_00B204_CU_EN(0xffff) |
339 S_00B204_SPI_SHADER_LATE_ALLOC_GS_GFX10(late_alloc_limit));
340 }
341
342 radeon_set_sh_reg_idx(physical_device, cs, R_00B01C_SPI_SHADER_PGM_RSRC3_PS,
343 3, S_00B01C_CU_EN(0xffff) | S_00B01C_WAVE_LIMIT(0x3F));
344 }
345
346 if (physical_device->rad_info.chip_class >= GFX10) {
347 /* Break up a pixel wave if it contains deallocs for more than
348 * half the parameter cache.
349 *
350 * To avoid a deadlock where pixel waves aren't launched
351 * because they're waiting for more pixels while the frontend
352 * is stuck waiting for PC space, the maximum allowed value is
353 * the size of the PC minus the largest possible allocation for
354 * a single primitive shader subgroup.
355 */
356 radeon_set_context_reg(cs, R_028C50_PA_SC_NGG_MODE_CNTL,
357 S_028C50_MAX_DEALLOCS_IN_WAVE(512));
358 radeon_set_context_reg(cs, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
359 radeon_set_context_reg(cs, R_02835C_PA_SC_TILE_STEERING_OVERRIDE,
360 physical_device->rad_info.pa_sc_tile_steering_override);
361 radeon_set_context_reg(cs, R_02807C_DB_RMI_L2_CACHE_CONTROL,
362 S_02807C_Z_WR_POLICY(V_02807C_CACHE_STREAM_WR) |
363 S_02807C_S_WR_POLICY(V_02807C_CACHE_STREAM_WR) |
364 S_02807C_HTILE_WR_POLICY(V_02807C_CACHE_STREAM_WR) |
365 S_02807C_ZPCPSD_WR_POLICY(V_02807C_CACHE_STREAM_WR) |
366 S_02807C_Z_RD_POLICY(V_02807C_CACHE_NOA_RD) |
367 S_02807C_S_RD_POLICY(V_02807C_CACHE_NOA_RD) |
368 S_02807C_HTILE_RD_POLICY(V_02807C_CACHE_NOA_RD));
369
370 radeon_set_context_reg(cs, R_028410_CB_RMI_GL2_CACHE_CONTROL,
371 S_028410_CMASK_WR_POLICY(V_028410_CACHE_STREAM_WR) |
372 S_028410_FMASK_WR_POLICY(V_028410_CACHE_STREAM_WR) |
373 S_028410_DCC_WR_POLICY(V_028410_CACHE_STREAM_WR) |
374 S_028410_COLOR_WR_POLICY(V_028410_CACHE_STREAM_WR) |
375 S_028410_CMASK_RD_POLICY(V_028410_CACHE_NOA_RD) |
376 S_028410_FMASK_RD_POLICY(V_028410_CACHE_NOA_RD) |
377 S_028410_DCC_RD_POLICY(V_028410_CACHE_NOA_RD) |
378 S_028410_COLOR_RD_POLICY(V_028410_CACHE_NOA_RD));
379 radeon_set_context_reg(cs, R_028428_CB_COVERAGE_OUT_CONTROL, 0);
380
381 radeon_set_sh_reg(cs, R_00B0C0_SPI_SHADER_REQ_CTRL_PS,
382 S_00B0C0_SOFT_GROUPING_EN(1) |
383 S_00B0C0_NUMBER_OF_REQUESTS_PER_CU(4 - 1));
384 radeon_set_sh_reg(cs, R_00B1C0_SPI_SHADER_REQ_CTRL_VS, 0);
385 }
386
387 if (physical_device->rad_info.chip_class >= GFX8) {
388 uint32_t vgt_tess_distribution;
389
390 vgt_tess_distribution = S_028B50_ACCUM_ISOLINE(32) |
391 S_028B50_ACCUM_TRI(11) |
392 S_028B50_ACCUM_QUAD(11) |
393 S_028B50_DONUT_SPLIT(16);
394
395 if (physical_device->rad_info.family == CHIP_FIJI ||
396 physical_device->rad_info.family >= CHIP_POLARIS10)
397 vgt_tess_distribution |= S_028B50_TRAP_SPLIT(3);
398
399 radeon_set_context_reg(cs, R_028B50_VGT_TESS_DISTRIBUTION,
400 vgt_tess_distribution);
401 } else if (!physical_device->has_clear_state) {
402 radeon_set_context_reg(cs, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
403 radeon_set_context_reg(cs, R_028C5C_VGT_OUT_DEALLOC_CNTL, 16);
404 }
405
406 if (physical_device->rad_info.chip_class >= GFX9) {
407 unsigned num_se = physical_device->rad_info.max_se;
408 unsigned pc_lines = 0;
409 unsigned max_alloc_count = 0;
410
411 switch (physical_device->rad_info.family) {
412 case CHIP_VEGA10:
413 case CHIP_VEGA12:
414 case CHIP_VEGA20:
415 pc_lines = 4096;
416 break;
417 case CHIP_RAVEN:
418 case CHIP_RAVEN2:
419 case CHIP_NAVI10:
420 case CHIP_NAVI12:
421 pc_lines = 1024;
422 break;
423 case CHIP_NAVI14:
424 pc_lines = 512;
425 break;
426 default:
427 assert(0);
428 }
429
430 if (physical_device->rad_info.chip_class >= GFX10) {
431 max_alloc_count = pc_lines / 3;
432 } else {
433 max_alloc_count = MIN2(128, pc_lines / (4 * num_se));
434 }
435
436 radeon_set_context_reg(cs, R_028C48_PA_SC_BINNER_CNTL_1,
437 S_028C48_MAX_ALLOC_COUNT(max_alloc_count) |
438 S_028C48_MAX_PRIM_PER_BATCH(1023));
439 radeon_set_context_reg(cs, R_028C4C_PA_SC_CONSERVATIVE_RASTERIZATION_CNTL,
440 S_028C4C_NULL_SQUAD_AA_MASK_ENABLE(1));
441 radeon_set_uconfig_reg(cs, R_030968_VGT_INSTANCE_BASE_ID, 0);
442 }
443
444 unsigned tmp = (unsigned)(1.0 * 8.0);
445 radeon_set_context_reg_seq(cs, R_028A00_PA_SU_POINT_SIZE, 1);
446 radeon_emit(cs, S_028A00_HEIGHT(tmp) | S_028A00_WIDTH(tmp));
447 radeon_set_context_reg_seq(cs, R_028A04_PA_SU_POINT_MINMAX, 1);
448 radeon_emit(cs, S_028A04_MIN_SIZE(radv_pack_float_12p4(0)) |
449 S_028A04_MAX_SIZE(radv_pack_float_12p4(8192/2)));
450
451 if (!physical_device->has_clear_state) {
452 radeon_set_context_reg(cs, R_028004_DB_COUNT_CONTROL,
453 S_028004_ZPASS_INCREMENT_DISABLE(1));
454 }
455
456 /* Enable the Polaris small primitive filter control.
457 * XXX: There is possibly an issue when MSAA is off (see RadeonSI
458 * has_msaa_sample_loc_bug). But this doesn't seem to regress anything,
459 * and AMDVLK doesn't have a workaround as well.
460 */
461 if (physical_device->rad_info.family >= CHIP_POLARIS10) {
462 unsigned small_prim_filter_cntl =
463 S_028830_SMALL_PRIM_FILTER_ENABLE(1) |
464 /* Workaround for a hw line bug. */
465 S_028830_LINE_FILTER_DISABLE(physical_device->rad_info.family <= CHIP_POLARIS12);
466
467 radeon_set_context_reg(cs, R_028830_PA_SU_SMALL_PRIM_FILTER_CNTL,
468 small_prim_filter_cntl);
469 }
470
471 si_emit_compute(physical_device, cs);
472 }
473
474 void
475 cik_create_gfx_config(struct radv_device *device)
476 {
477 struct radeon_cmdbuf *cs = device->ws->cs_create(device->ws, RING_GFX);
478 if (!cs)
479 return;
480
481 si_emit_graphics(device->physical_device, cs);
482
483 while (cs->cdw & 7) {
484 if (device->physical_device->rad_info.gfx_ib_pad_with_type2)
485 radeon_emit(cs, 0x80000000);
486 else
487 radeon_emit(cs, 0xffff1000);
488 }
489
490 device->gfx_init = device->ws->buffer_create(device->ws,
491 cs->cdw * 4, 4096,
492 RADEON_DOMAIN_GTT,
493 RADEON_FLAG_CPU_ACCESS|
494 RADEON_FLAG_NO_INTERPROCESS_SHARING |
495 RADEON_FLAG_READ_ONLY,
496 RADV_BO_PRIORITY_CS);
497 if (!device->gfx_init)
498 goto fail;
499
500 void *map = device->ws->buffer_map(device->gfx_init);
501 if (!map) {
502 device->ws->buffer_destroy(device->gfx_init);
503 device->gfx_init = NULL;
504 goto fail;
505 }
506 memcpy(map, cs->buf, cs->cdw * 4);
507
508 device->ws->buffer_unmap(device->gfx_init);
509 device->gfx_init_size_dw = cs->cdw;
510 fail:
511 device->ws->cs_destroy(cs);
512 }
513
514 static void
515 get_viewport_xform(const VkViewport *viewport,
516 float scale[3], float translate[3])
517 {
518 float x = viewport->x;
519 float y = viewport->y;
520 float half_width = 0.5f * viewport->width;
521 float half_height = 0.5f * viewport->height;
522 double n = viewport->minDepth;
523 double f = viewport->maxDepth;
524
525 scale[0] = half_width;
526 translate[0] = half_width + x;
527 scale[1] = half_height;
528 translate[1] = half_height + y;
529
530 scale[2] = (f - n);
531 translate[2] = n;
532 }
533
534 void
535 si_write_viewport(struct radeon_cmdbuf *cs, int first_vp,
536 int count, const VkViewport *viewports)
537 {
538 int i;
539
540 assert(count);
541 radeon_set_context_reg_seq(cs, R_02843C_PA_CL_VPORT_XSCALE +
542 first_vp * 4 * 6, count * 6);
543
544 for (i = 0; i < count; i++) {
545 float scale[3], translate[3];
546
547
548 get_viewport_xform(&viewports[i], scale, translate);
549 radeon_emit(cs, fui(scale[0]));
550 radeon_emit(cs, fui(translate[0]));
551 radeon_emit(cs, fui(scale[1]));
552 radeon_emit(cs, fui(translate[1]));
553 radeon_emit(cs, fui(scale[2]));
554 radeon_emit(cs, fui(translate[2]));
555 }
556
557 radeon_set_context_reg_seq(cs, R_0282D0_PA_SC_VPORT_ZMIN_0 +
558 first_vp * 4 * 2, count * 2);
559 for (i = 0; i < count; i++) {
560 float zmin = MIN2(viewports[i].minDepth, viewports[i].maxDepth);
561 float zmax = MAX2(viewports[i].minDepth, viewports[i].maxDepth);
562 radeon_emit(cs, fui(zmin));
563 radeon_emit(cs, fui(zmax));
564 }
565 }
566
567 static VkRect2D si_scissor_from_viewport(const VkViewport *viewport)
568 {
569 float scale[3], translate[3];
570 VkRect2D rect;
571
572 get_viewport_xform(viewport, scale, translate);
573
574 rect.offset.x = translate[0] - fabs(scale[0]);
575 rect.offset.y = translate[1] - fabs(scale[1]);
576 rect.extent.width = ceilf(translate[0] + fabs(scale[0])) - rect.offset.x;
577 rect.extent.height = ceilf(translate[1] + fabs(scale[1])) - rect.offset.y;
578
579 return rect;
580 }
581
582 static VkRect2D si_intersect_scissor(const VkRect2D *a, const VkRect2D *b) {
583 VkRect2D ret;
584 ret.offset.x = MAX2(a->offset.x, b->offset.x);
585 ret.offset.y = MAX2(a->offset.y, b->offset.y);
586 ret.extent.width = MIN2(a->offset.x + a->extent.width,
587 b->offset.x + b->extent.width) - ret.offset.x;
588 ret.extent.height = MIN2(a->offset.y + a->extent.height,
589 b->offset.y + b->extent.height) - ret.offset.y;
590 return ret;
591 }
592
593 void
594 si_write_scissors(struct radeon_cmdbuf *cs, int first,
595 int count, const VkRect2D *scissors,
596 const VkViewport *viewports, bool can_use_guardband)
597 {
598 int i;
599 float scale[3], translate[3], guardband_x = INFINITY, guardband_y = INFINITY;
600 const float max_range = 32767.0f;
601 if (!count)
602 return;
603
604 radeon_set_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL + first * 4 * 2, count * 2);
605 for (i = 0; i < count; i++) {
606 VkRect2D viewport_scissor = si_scissor_from_viewport(viewports + i);
607 VkRect2D scissor = si_intersect_scissor(&scissors[i], &viewport_scissor);
608
609 get_viewport_xform(viewports + i, scale, translate);
610 scale[0] = fabsf(scale[0]);
611 scale[1] = fabsf(scale[1]);
612
613 if (scale[0] < 0.5)
614 scale[0] = 0.5;
615 if (scale[1] < 0.5)
616 scale[1] = 0.5;
617
618 guardband_x = MIN2(guardband_x, (max_range - fabsf(translate[0])) / scale[0]);
619 guardband_y = MIN2(guardband_y, (max_range - fabsf(translate[1])) / scale[1]);
620
621 radeon_emit(cs, S_028250_TL_X(scissor.offset.x) |
622 S_028250_TL_Y(scissor.offset.y) |
623 S_028250_WINDOW_OFFSET_DISABLE(1));
624 radeon_emit(cs, S_028254_BR_X(scissor.offset.x + scissor.extent.width) |
625 S_028254_BR_Y(scissor.offset.y + scissor.extent.height));
626 }
627 if (!can_use_guardband) {
628 guardband_x = 1.0;
629 guardband_y = 1.0;
630 }
631
632 radeon_set_context_reg_seq(cs, R_028BE8_PA_CL_GB_VERT_CLIP_ADJ, 4);
633 radeon_emit(cs, fui(guardband_y));
634 radeon_emit(cs, fui(1.0));
635 radeon_emit(cs, fui(guardband_x));
636 radeon_emit(cs, fui(1.0));
637 }
638
639 static inline unsigned
640 radv_prims_for_vertices(struct radv_prim_vertex_count *info, unsigned num)
641 {
642 if (num == 0)
643 return 0;
644
645 if (info->incr == 0)
646 return 0;
647
648 if (num < info->min)
649 return 0;
650
651 return 1 + ((num - info->min) / info->incr);
652 }
653
654 uint32_t
655 si_get_ia_multi_vgt_param(struct radv_cmd_buffer *cmd_buffer,
656 bool instanced_draw, bool indirect_draw,
657 bool count_from_stream_output,
658 uint32_t draw_vertex_count)
659 {
660 enum chip_class chip_class = cmd_buffer->device->physical_device->rad_info.chip_class;
661 enum radeon_family family = cmd_buffer->device->physical_device->rad_info.family;
662 struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info;
663 const unsigned max_primgroup_in_wave = 2;
664 /* SWITCH_ON_EOP(0) is always preferable. */
665 bool wd_switch_on_eop = false;
666 bool ia_switch_on_eop = false;
667 bool ia_switch_on_eoi = false;
668 bool partial_vs_wave = false;
669 bool partial_es_wave = cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.partial_es_wave;
670 bool multi_instances_smaller_than_primgroup;
671
672 multi_instances_smaller_than_primgroup = indirect_draw;
673 if (!multi_instances_smaller_than_primgroup && instanced_draw) {
674 uint32_t num_prims = radv_prims_for_vertices(&cmd_buffer->state.pipeline->graphics.prim_vertex_count, draw_vertex_count);
675 if (num_prims < cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.primgroup_size)
676 multi_instances_smaller_than_primgroup = true;
677 }
678
679 ia_switch_on_eoi = cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.ia_switch_on_eoi;
680 partial_vs_wave = cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.partial_vs_wave;
681
682 if (chip_class >= GFX7) {
683 wd_switch_on_eop = cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.wd_switch_on_eop;
684
685 /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
686 * We don't know that for indirect drawing, so treat it as
687 * always problematic. */
688 if (family == CHIP_HAWAII &&
689 (instanced_draw || indirect_draw))
690 wd_switch_on_eop = true;
691
692 /* Performance recommendation for 4 SE Gfx7-8 parts if
693 * instances are smaller than a primgroup.
694 * Assume indirect draws always use small instances.
695 * This is needed for good VS wave utilization.
696 */
697 if (chip_class <= GFX8 &&
698 info->max_se == 4 &&
699 multi_instances_smaller_than_primgroup)
700 wd_switch_on_eop = true;
701
702 /* Required on GFX7 and later. */
703 if (info->max_se > 2 && !wd_switch_on_eop)
704 ia_switch_on_eoi = true;
705
706 /* Required by Hawaii and, for some special cases, by GFX8. */
707 if (ia_switch_on_eoi &&
708 (family == CHIP_HAWAII ||
709 (chip_class == GFX8 &&
710 /* max primgroup in wave is always 2 - leave this for documentation */
711 (radv_pipeline_has_gs(cmd_buffer->state.pipeline) || max_primgroup_in_wave != 2))))
712 partial_vs_wave = true;
713
714 /* Instancing bug on Bonaire. */
715 if (family == CHIP_BONAIRE && ia_switch_on_eoi &&
716 (instanced_draw || indirect_draw))
717 partial_vs_wave = true;
718
719 /* Hardware requirement when drawing primitives from a stream
720 * output buffer.
721 */
722 if (count_from_stream_output)
723 wd_switch_on_eop = true;
724
725 /* If the WD switch is false, the IA switch must be false too. */
726 assert(wd_switch_on_eop || !ia_switch_on_eop);
727 }
728 /* If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
729 if (chip_class <= GFX8 && ia_switch_on_eoi)
730 partial_es_wave = true;
731
732 if (radv_pipeline_has_gs(cmd_buffer->state.pipeline)) {
733 /* GS hw bug with single-primitive instances and SWITCH_ON_EOI.
734 * The hw doc says all multi-SE chips are affected, but amdgpu-pro Vulkan
735 * only applies it to Hawaii. Do what amdgpu-pro Vulkan does.
736 */
737 if (family == CHIP_HAWAII && ia_switch_on_eoi) {
738 bool set_vgt_flush = indirect_draw;
739 if (!set_vgt_flush && instanced_draw) {
740 uint32_t num_prims = radv_prims_for_vertices(&cmd_buffer->state.pipeline->graphics.prim_vertex_count, draw_vertex_count);
741 if (num_prims <= 1)
742 set_vgt_flush = true;
743 }
744 if (set_vgt_flush)
745 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VGT_FLUSH;
746 }
747 }
748
749 return cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.base |
750 S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) |
751 S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi) |
752 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) |
753 S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave) |
754 S_028AA8_WD_SWITCH_ON_EOP(chip_class >= GFX7 ? wd_switch_on_eop : 0);
755
756 }
757
758 void si_cs_emit_write_event_eop(struct radeon_cmdbuf *cs,
759 enum chip_class chip_class,
760 bool is_mec,
761 unsigned event, unsigned event_flags,
762 unsigned dst_sel, unsigned data_sel,
763 uint64_t va,
764 uint32_t new_fence,
765 uint64_t gfx9_eop_bug_va)
766 {
767 unsigned op = EVENT_TYPE(event) |
768 EVENT_INDEX(event == V_028A90_CS_DONE ||
769 event == V_028A90_PS_DONE ? 6 : 5) |
770 event_flags;
771 unsigned is_gfx8_mec = is_mec && chip_class < GFX9;
772 unsigned sel = EOP_DST_SEL(dst_sel) |
773 EOP_DATA_SEL(data_sel);
774
775 /* Wait for write confirmation before writing data, but don't send
776 * an interrupt. */
777 if (data_sel != EOP_DATA_SEL_DISCARD)
778 sel |= EOP_INT_SEL(EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM);
779
780 if (chip_class >= GFX9 || is_gfx8_mec) {
781 /* A ZPASS_DONE or PIXEL_STAT_DUMP_EVENT (of the DB occlusion
782 * counters) must immediately precede every timestamp event to
783 * prevent a GPU hang on GFX9.
784 */
785 if (chip_class == GFX9 && !is_mec) {
786 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
787 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
788 radeon_emit(cs, gfx9_eop_bug_va);
789 radeon_emit(cs, gfx9_eop_bug_va >> 32);
790 }
791
792 radeon_emit(cs, PKT3(PKT3_RELEASE_MEM, is_gfx8_mec ? 5 : 6, false));
793 radeon_emit(cs, op);
794 radeon_emit(cs, sel);
795 radeon_emit(cs, va); /* address lo */
796 radeon_emit(cs, va >> 32); /* address hi */
797 radeon_emit(cs, new_fence); /* immediate data lo */
798 radeon_emit(cs, 0); /* immediate data hi */
799 if (!is_gfx8_mec)
800 radeon_emit(cs, 0); /* unused */
801 } else {
802 if (chip_class == GFX7 ||
803 chip_class == GFX8) {
804 /* Two EOP events are required to make all engines go idle
805 * (and optional cache flushes executed) before the timestamp
806 * is written.
807 */
808 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, false));
809 radeon_emit(cs, op);
810 radeon_emit(cs, va);
811 radeon_emit(cs, ((va >> 32) & 0xffff) | sel);
812 radeon_emit(cs, 0); /* immediate data */
813 radeon_emit(cs, 0); /* unused */
814 }
815
816 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, false));
817 radeon_emit(cs, op);
818 radeon_emit(cs, va);
819 radeon_emit(cs, ((va >> 32) & 0xffff) | sel);
820 radeon_emit(cs, new_fence); /* immediate data */
821 radeon_emit(cs, 0); /* unused */
822 }
823 }
824
825 void
826 radv_cp_wait_mem(struct radeon_cmdbuf *cs, uint32_t op, uint64_t va,
827 uint32_t ref, uint32_t mask)
828 {
829 assert(op == WAIT_REG_MEM_EQUAL ||
830 op == WAIT_REG_MEM_NOT_EQUAL ||
831 op == WAIT_REG_MEM_GREATER_OR_EQUAL);
832
833 radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, false));
834 radeon_emit(cs, op | WAIT_REG_MEM_MEM_SPACE(1));
835 radeon_emit(cs, va);
836 radeon_emit(cs, va >> 32);
837 radeon_emit(cs, ref); /* reference value */
838 radeon_emit(cs, mask); /* mask */
839 radeon_emit(cs, 4); /* poll interval */
840 }
841
842 static void
843 si_emit_acquire_mem(struct radeon_cmdbuf *cs,
844 bool is_mec,
845 bool is_gfx9,
846 unsigned cp_coher_cntl)
847 {
848 if (is_mec || is_gfx9) {
849 uint32_t hi_val = is_gfx9 ? 0xffffff : 0xff;
850 radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, false) |
851 PKT3_SHADER_TYPE_S(is_mec));
852 radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
853 radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
854 radeon_emit(cs, hi_val); /* CP_COHER_SIZE_HI */
855 radeon_emit(cs, 0); /* CP_COHER_BASE */
856 radeon_emit(cs, 0); /* CP_COHER_BASE_HI */
857 radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
858 } else {
859 /* ACQUIRE_MEM is only required on a compute ring. */
860 radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, false));
861 radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
862 radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
863 radeon_emit(cs, 0); /* CP_COHER_BASE */
864 radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
865 }
866 }
867
868 static void
869 gfx10_cs_emit_cache_flush(struct radeon_cmdbuf *cs,
870 enum chip_class chip_class,
871 uint32_t *flush_cnt,
872 uint64_t flush_va,
873 bool is_mec,
874 enum radv_cmd_flush_bits flush_bits,
875 uint64_t gfx9_eop_bug_va)
876 {
877 uint32_t gcr_cntl = 0;
878 unsigned cb_db_event = 0;
879
880 /* We don't need these. */
881 assert(!(flush_bits & (RADV_CMD_FLAG_VGT_FLUSH |
882 RADV_CMD_FLAG_VGT_STREAMOUT_SYNC)));
883
884 if (flush_bits & RADV_CMD_FLAG_INV_ICACHE)
885 gcr_cntl |= S_586_GLI_INV(V_586_GLI_ALL);
886 if (flush_bits & RADV_CMD_FLAG_INV_SCACHE) {
887 /* TODO: When writing to the SMEM L1 cache, we need to set SEQ
888 * to FORWARD when both L1 and L2 are written out (WB or INV).
889 */
890 gcr_cntl |= S_586_GL1_INV(1) | S_586_GLK_INV(1);
891 }
892 if (flush_bits & RADV_CMD_FLAG_INV_VCACHE)
893 gcr_cntl |= S_586_GL1_INV(1) | S_586_GLV_INV(1);
894 if (flush_bits & RADV_CMD_FLAG_INV_L2) {
895 /* Writeback and invalidate everything in L2. */
896 gcr_cntl |= S_586_GL2_INV(1) | S_586_GLM_INV(1);
897 } else if (flush_bits & RADV_CMD_FLAG_WB_L2) {
898 /* Writeback but do not invalidate. */
899 gcr_cntl |= S_586_GL2_WB(1);
900 }
901
902 /* TODO: Implement this new flag for GFX9+.
903 if (flush_bits & RADV_CMD_FLAG_INV_L2_METADATA)
904 gcr_cntl |= S_586_GLM_INV(1);
905 */
906
907 if (flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_DB)) {
908 /* TODO: trigger on RADV_CMD_FLAG_FLUSH_AND_INV_CB_META */
909 if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_CB) {
910 /* Flush CMASK/FMASK/DCC. Will wait for idle later. */
911 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
912 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) |
913 EVENT_INDEX(0));
914 }
915
916 /* TODO: trigger on RADV_CMD_FLAG_FLUSH_AND_INV_DB_META ? */
917 if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_DB) {
918 /* Flush HTILE. Will wait for idle later. */
919 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
920 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) |
921 EVENT_INDEX(0));
922 }
923
924 /* First flush CB/DB, then L1/L2. */
925 gcr_cntl |= S_586_SEQ(V_586_SEQ_FORWARD);
926
927 if ((flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_DB)) ==
928 (RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_DB)) {
929 cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
930 } else if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_CB) {
931 cb_db_event = V_028A90_FLUSH_AND_INV_CB_DATA_TS;
932 } else if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_DB) {
933 cb_db_event = V_028A90_FLUSH_AND_INV_DB_DATA_TS;
934 } else {
935 assert(0);
936 }
937 } else {
938 /* Wait for graphics shaders to go idle if requested. */
939 if (flush_bits & RADV_CMD_FLAG_PS_PARTIAL_FLUSH) {
940 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
941 radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
942 } else if (flush_bits & RADV_CMD_FLAG_VS_PARTIAL_FLUSH) {
943 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
944 radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
945 }
946 }
947
948 if (flush_bits & RADV_CMD_FLAG_CS_PARTIAL_FLUSH) {
949 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
950 radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH | EVENT_INDEX(4)));
951 }
952
953 if (cb_db_event) {
954 /* CB/DB flush and invalidate (or possibly just a wait for a
955 * meta flush) via RELEASE_MEM.
956 *
957 * Combine this with other cache flushes when possible; this
958 * requires affected shaders to be idle, so do it after the
959 * CS_PARTIAL_FLUSH before (VS/PS partial flushes are always
960 * implied).
961 */
962 /* Get GCR_CNTL fields, because the encoding is different in RELEASE_MEM. */
963 unsigned glm_wb = G_586_GLM_WB(gcr_cntl);
964 unsigned glm_inv = G_586_GLM_INV(gcr_cntl);
965 unsigned glv_inv = G_586_GLV_INV(gcr_cntl);
966 unsigned gl1_inv = G_586_GL1_INV(gcr_cntl);
967 assert(G_586_GL2_US(gcr_cntl) == 0);
968 assert(G_586_GL2_RANGE(gcr_cntl) == 0);
969 assert(G_586_GL2_DISCARD(gcr_cntl) == 0);
970 unsigned gl2_inv = G_586_GL2_INV(gcr_cntl);
971 unsigned gl2_wb = G_586_GL2_WB(gcr_cntl);
972 unsigned gcr_seq = G_586_SEQ(gcr_cntl);
973
974 gcr_cntl &= C_586_GLM_WB &
975 C_586_GLM_INV &
976 C_586_GLV_INV &
977 C_586_GL1_INV &
978 C_586_GL2_INV &
979 C_586_GL2_WB; /* keep SEQ */
980
981 assert(flush_cnt);
982 (*flush_cnt)++;
983
984 si_cs_emit_write_event_eop(cs, chip_class, false, cb_db_event,
985 S_490_GLM_WB(glm_wb) |
986 S_490_GLM_INV(glm_inv) |
987 S_490_GLV_INV(glv_inv) |
988 S_490_GL1_INV(gl1_inv) |
989 S_490_GL2_INV(gl2_inv) |
990 S_490_GL2_WB(gl2_wb) |
991 S_490_SEQ(gcr_seq),
992 EOP_DST_SEL_MEM,
993 EOP_DATA_SEL_VALUE_32BIT,
994 flush_va, *flush_cnt,
995 gfx9_eop_bug_va);
996
997 radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL, flush_va,
998 *flush_cnt, 0xffffffff);
999 }
1000
1001 /* Ignore fields that only modify the behavior of other fields. */
1002 if (gcr_cntl & C_586_GL1_RANGE & C_586_GL2_RANGE & C_586_SEQ) {
1003 /* Flush caches and wait for the caches to assert idle.
1004 * The cache flush is executed in the ME, but the PFP waits
1005 * for completion.
1006 */
1007 radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 6, 0));
1008 radeon_emit(cs, 0); /* CP_COHER_CNTL */
1009 radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
1010 radeon_emit(cs, 0xffffff); /* CP_COHER_SIZE_HI */
1011 radeon_emit(cs, 0); /* CP_COHER_BASE */
1012 radeon_emit(cs, 0); /* CP_COHER_BASE_HI */
1013 radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
1014 radeon_emit(cs, gcr_cntl); /* GCR_CNTL */
1015 } else if ((cb_db_event ||
1016 (flush_bits & (RADV_CMD_FLAG_VS_PARTIAL_FLUSH |
1017 RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
1018 RADV_CMD_FLAG_CS_PARTIAL_FLUSH)))
1019 && !is_mec) {
1020 /* We need to ensure that PFP waits as well. */
1021 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
1022 radeon_emit(cs, 0);
1023 }
1024
1025 if (flush_bits & RADV_CMD_FLAG_START_PIPELINE_STATS) {
1026 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1027 radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_START) |
1028 EVENT_INDEX(0));
1029 } else if (flush_bits & RADV_CMD_FLAG_STOP_PIPELINE_STATS) {
1030 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1031 radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) |
1032 EVENT_INDEX(0));
1033 }
1034 }
1035
1036 void
1037 si_cs_emit_cache_flush(struct radeon_cmdbuf *cs,
1038 enum chip_class chip_class,
1039 uint32_t *flush_cnt,
1040 uint64_t flush_va,
1041 bool is_mec,
1042 enum radv_cmd_flush_bits flush_bits,
1043 uint64_t gfx9_eop_bug_va)
1044 {
1045 unsigned cp_coher_cntl = 0;
1046 uint32_t flush_cb_db = flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
1047 RADV_CMD_FLAG_FLUSH_AND_INV_DB);
1048
1049 if (chip_class >= GFX10) {
1050 /* GFX10 cache flush handling is quite different. */
1051 gfx10_cs_emit_cache_flush(cs, chip_class, flush_cnt, flush_va,
1052 is_mec, flush_bits, gfx9_eop_bug_va);
1053 return;
1054 }
1055
1056 if (flush_bits & RADV_CMD_FLAG_INV_ICACHE)
1057 cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
1058 if (flush_bits & RADV_CMD_FLAG_INV_SCACHE)
1059 cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
1060
1061 if (chip_class <= GFX8) {
1062 if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_CB) {
1063 cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) |
1064 S_0085F0_CB0_DEST_BASE_ENA(1) |
1065 S_0085F0_CB1_DEST_BASE_ENA(1) |
1066 S_0085F0_CB2_DEST_BASE_ENA(1) |
1067 S_0085F0_CB3_DEST_BASE_ENA(1) |
1068 S_0085F0_CB4_DEST_BASE_ENA(1) |
1069 S_0085F0_CB5_DEST_BASE_ENA(1) |
1070 S_0085F0_CB6_DEST_BASE_ENA(1) |
1071 S_0085F0_CB7_DEST_BASE_ENA(1);
1072
1073 /* Necessary for DCC */
1074 if (chip_class >= GFX8) {
1075 si_cs_emit_write_event_eop(cs,
1076 chip_class,
1077 is_mec,
1078 V_028A90_FLUSH_AND_INV_CB_DATA_TS,
1079 0,
1080 EOP_DST_SEL_MEM,
1081 EOP_DATA_SEL_DISCARD,
1082 0, 0,
1083 gfx9_eop_bug_va);
1084 }
1085 }
1086 if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_DB) {
1087 cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) |
1088 S_0085F0_DB_DEST_BASE_ENA(1);
1089 }
1090 }
1091
1092 if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_CB_META) {
1093 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1094 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
1095 }
1096
1097 if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_DB_META) {
1098 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1099 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
1100 }
1101
1102 if (flush_bits & RADV_CMD_FLAG_PS_PARTIAL_FLUSH) {
1103 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1104 radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1105 } else if (flush_bits & RADV_CMD_FLAG_VS_PARTIAL_FLUSH) {
1106 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1107 radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1108 }
1109
1110 if (flush_bits & RADV_CMD_FLAG_CS_PARTIAL_FLUSH) {
1111 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1112 radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1113 }
1114
1115 if (chip_class == GFX9 && flush_cb_db) {
1116 unsigned cb_db_event, tc_flags;
1117
1118 /* Set the CB/DB flush event. */
1119 cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
1120
1121 /* These are the only allowed combinations. If you need to
1122 * do multiple operations at once, do them separately.
1123 * All operations that invalidate L2 also seem to invalidate
1124 * metadata. Volatile (VOL) and WC flushes are not listed here.
1125 *
1126 * TC | TC_WB = writeback & invalidate L2 & L1
1127 * TC | TC_WB | TC_NC = writeback & invalidate L2 for MTYPE == NC
1128 * TC_WB | TC_NC = writeback L2 for MTYPE == NC
1129 * TC | TC_NC = invalidate L2 for MTYPE == NC
1130 * TC | TC_MD = writeback & invalidate L2 metadata (DCC, etc.)
1131 * TCL1 = invalidate L1
1132 */
1133 tc_flags = EVENT_TC_ACTION_ENA |
1134 EVENT_TC_MD_ACTION_ENA;
1135
1136 /* Ideally flush TC together with CB/DB. */
1137 if (flush_bits & RADV_CMD_FLAG_INV_L2) {
1138 /* Writeback and invalidate everything in L2 & L1. */
1139 tc_flags = EVENT_TC_ACTION_ENA |
1140 EVENT_TC_WB_ACTION_ENA;
1141
1142
1143 /* Clear the flags. */
1144 flush_bits &= ~(RADV_CMD_FLAG_INV_L2 |
1145 RADV_CMD_FLAG_WB_L2 |
1146 RADV_CMD_FLAG_INV_VCACHE);
1147 }
1148 assert(flush_cnt);
1149 (*flush_cnt)++;
1150
1151 si_cs_emit_write_event_eop(cs, chip_class, false, cb_db_event, tc_flags,
1152 EOP_DST_SEL_MEM,
1153 EOP_DATA_SEL_VALUE_32BIT,
1154 flush_va, *flush_cnt,
1155 gfx9_eop_bug_va);
1156 radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL, flush_va,
1157 *flush_cnt, 0xffffffff);
1158 }
1159
1160 /* VGT state sync */
1161 if (flush_bits & RADV_CMD_FLAG_VGT_FLUSH) {
1162 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1163 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
1164 }
1165
1166 /* VGT streamout state sync */
1167 if (flush_bits & RADV_CMD_FLAG_VGT_STREAMOUT_SYNC) {
1168 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1169 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0));
1170 }
1171
1172 /* Make sure ME is idle (it executes most packets) before continuing.
1173 * This prevents read-after-write hazards between PFP and ME.
1174 */
1175 if ((cp_coher_cntl ||
1176 (flush_bits & (RADV_CMD_FLAG_CS_PARTIAL_FLUSH |
1177 RADV_CMD_FLAG_INV_VCACHE |
1178 RADV_CMD_FLAG_INV_L2 |
1179 RADV_CMD_FLAG_WB_L2))) &&
1180 !is_mec) {
1181 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
1182 radeon_emit(cs, 0);
1183 }
1184
1185 if ((flush_bits & RADV_CMD_FLAG_INV_L2) ||
1186 (chip_class <= GFX7 && (flush_bits & RADV_CMD_FLAG_WB_L2))) {
1187 si_emit_acquire_mem(cs, is_mec, chip_class == GFX9,
1188 cp_coher_cntl |
1189 S_0085F0_TC_ACTION_ENA(1) |
1190 S_0085F0_TCL1_ACTION_ENA(1) |
1191 S_0301F0_TC_WB_ACTION_ENA(chip_class >= GFX8));
1192 cp_coher_cntl = 0;
1193 } else {
1194 if(flush_bits & RADV_CMD_FLAG_WB_L2) {
1195 /* WB = write-back
1196 * NC = apply to non-coherent MTYPEs
1197 * (i.e. MTYPE <= 1, which is what we use everywhere)
1198 *
1199 * WB doesn't work without NC.
1200 */
1201 si_emit_acquire_mem(cs, is_mec,
1202 chip_class == GFX9,
1203 cp_coher_cntl |
1204 S_0301F0_TC_WB_ACTION_ENA(1) |
1205 S_0301F0_TC_NC_ACTION_ENA(1));
1206 cp_coher_cntl = 0;
1207 }
1208 if (flush_bits & RADV_CMD_FLAG_INV_VCACHE) {
1209 si_emit_acquire_mem(cs, is_mec,
1210 chip_class == GFX9,
1211 cp_coher_cntl |
1212 S_0085F0_TCL1_ACTION_ENA(1));
1213 cp_coher_cntl = 0;
1214 }
1215 }
1216
1217 /* When one of the DEST_BASE flags is set, SURFACE_SYNC waits for idle.
1218 * Therefore, it should be last. Done in PFP.
1219 */
1220 if (cp_coher_cntl)
1221 si_emit_acquire_mem(cs, is_mec, chip_class == GFX9, cp_coher_cntl);
1222
1223 if (flush_bits & RADV_CMD_FLAG_START_PIPELINE_STATS) {
1224 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1225 radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_START) |
1226 EVENT_INDEX(0));
1227 } else if (flush_bits & RADV_CMD_FLAG_STOP_PIPELINE_STATS) {
1228 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1229 radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) |
1230 EVENT_INDEX(0));
1231 }
1232 }
1233
1234 void
1235 si_emit_cache_flush(struct radv_cmd_buffer *cmd_buffer)
1236 {
1237 bool is_compute = cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE;
1238
1239 if (is_compute)
1240 cmd_buffer->state.flush_bits &= ~(RADV_CMD_FLAG_FLUSH_AND_INV_CB |
1241 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META |
1242 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
1243 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META |
1244 RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
1245 RADV_CMD_FLAG_VS_PARTIAL_FLUSH |
1246 RADV_CMD_FLAG_VGT_FLUSH |
1247 RADV_CMD_FLAG_START_PIPELINE_STATS |
1248 RADV_CMD_FLAG_STOP_PIPELINE_STATS);
1249
1250 if (!cmd_buffer->state.flush_bits)
1251 return;
1252
1253 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 128);
1254
1255 si_cs_emit_cache_flush(cmd_buffer->cs,
1256 cmd_buffer->device->physical_device->rad_info.chip_class,
1257 &cmd_buffer->gfx9_fence_idx,
1258 cmd_buffer->gfx9_fence_va,
1259 radv_cmd_buffer_uses_mec(cmd_buffer),
1260 cmd_buffer->state.flush_bits,
1261 cmd_buffer->gfx9_eop_bug_va);
1262
1263
1264 if (unlikely(cmd_buffer->device->trace_bo))
1265 radv_cmd_buffer_trace_emit(cmd_buffer);
1266
1267 /* Clear the caches that have been flushed to avoid syncing too much
1268 * when there is some pending active queries.
1269 */
1270 cmd_buffer->active_query_flush_bits &= ~cmd_buffer->state.flush_bits;
1271
1272 cmd_buffer->state.flush_bits = 0;
1273
1274 /* If the driver used a compute shader for resetting a query pool, it
1275 * should be finished at this point.
1276 */
1277 cmd_buffer->pending_reset_query = false;
1278 }
1279
1280 /* sets the CP predication state using a boolean stored at va */
1281 void
1282 si_emit_set_predication_state(struct radv_cmd_buffer *cmd_buffer,
1283 bool draw_visible, uint64_t va)
1284 {
1285 uint32_t op = 0;
1286
1287 if (va) {
1288 op = PRED_OP(PREDICATION_OP_BOOL64);
1289
1290 /* PREDICATION_DRAW_VISIBLE means that if the 32-bit value is
1291 * zero, all rendering commands are discarded. Otherwise, they
1292 * are discarded if the value is non zero.
1293 */
1294 op |= draw_visible ? PREDICATION_DRAW_VISIBLE :
1295 PREDICATION_DRAW_NOT_VISIBLE;
1296 }
1297 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1298 radeon_emit(cmd_buffer->cs, PKT3(PKT3_SET_PREDICATION, 2, 0));
1299 radeon_emit(cmd_buffer->cs, op);
1300 radeon_emit(cmd_buffer->cs, va);
1301 radeon_emit(cmd_buffer->cs, va >> 32);
1302 } else {
1303 radeon_emit(cmd_buffer->cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
1304 radeon_emit(cmd_buffer->cs, va);
1305 radeon_emit(cmd_buffer->cs, op | ((va >> 32) & 0xFF));
1306 }
1307 }
1308
1309 /* Set this if you want the 3D engine to wait until CP DMA is done.
1310 * It should be set on the last CP DMA packet. */
1311 #define CP_DMA_SYNC (1 << 0)
1312
1313 /* Set this if the source data was used as a destination in a previous CP DMA
1314 * packet. It's for preventing a read-after-write (RAW) hazard between two
1315 * CP DMA packets. */
1316 #define CP_DMA_RAW_WAIT (1 << 1)
1317 #define CP_DMA_USE_L2 (1 << 2)
1318 #define CP_DMA_CLEAR (1 << 3)
1319
1320 /* Alignment for optimal performance. */
1321 #define SI_CPDMA_ALIGNMENT 32
1322
1323 /* The max number of bytes that can be copied per packet. */
1324 static inline unsigned cp_dma_max_byte_count(struct radv_cmd_buffer *cmd_buffer)
1325 {
1326 unsigned max = cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9 ?
1327 S_414_BYTE_COUNT_GFX9(~0u) :
1328 S_414_BYTE_COUNT_GFX6(~0u);
1329
1330 /* make it aligned for optimal performance */
1331 return max & ~(SI_CPDMA_ALIGNMENT - 1);
1332 }
1333
1334 /* Emit a CP DMA packet to do a copy from one buffer to another, or to clear
1335 * a buffer. The size must fit in bits [20:0]. If CP_DMA_CLEAR is set, src_va is a 32-bit
1336 * clear value.
1337 */
1338 static void si_emit_cp_dma(struct radv_cmd_buffer *cmd_buffer,
1339 uint64_t dst_va, uint64_t src_va,
1340 unsigned size, unsigned flags)
1341 {
1342 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1343 uint32_t header = 0, command = 0;
1344
1345 assert(size <= cp_dma_max_byte_count(cmd_buffer));
1346
1347 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 9);
1348 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9)
1349 command |= S_414_BYTE_COUNT_GFX9(size);
1350 else
1351 command |= S_414_BYTE_COUNT_GFX6(size);
1352
1353 /* Sync flags. */
1354 if (flags & CP_DMA_SYNC)
1355 header |= S_411_CP_SYNC(1);
1356 else {
1357 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9)
1358 command |= S_414_DISABLE_WR_CONFIRM_GFX9(1);
1359 else
1360 command |= S_414_DISABLE_WR_CONFIRM_GFX6(1);
1361 }
1362
1363 if (flags & CP_DMA_RAW_WAIT)
1364 command |= S_414_RAW_WAIT(1);
1365
1366 /* Src and dst flags. */
1367 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9 &&
1368 !(flags & CP_DMA_CLEAR) &&
1369 src_va == dst_va)
1370 header |= S_411_DST_SEL(V_411_NOWHERE); /* prefetch only */
1371 else if (flags & CP_DMA_USE_L2)
1372 header |= S_411_DST_SEL(V_411_DST_ADDR_TC_L2);
1373
1374 if (flags & CP_DMA_CLEAR)
1375 header |= S_411_SRC_SEL(V_411_DATA);
1376 else if (flags & CP_DMA_USE_L2)
1377 header |= S_411_SRC_SEL(V_411_SRC_ADDR_TC_L2);
1378
1379 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX7) {
1380 radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, cmd_buffer->state.predicating));
1381 radeon_emit(cs, header);
1382 radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */
1383 radeon_emit(cs, src_va >> 32); /* SRC_ADDR_HI [31:0] */
1384 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
1385 radeon_emit(cs, dst_va >> 32); /* DST_ADDR_HI [31:0] */
1386 radeon_emit(cs, command);
1387 } else {
1388 assert(!(flags & CP_DMA_USE_L2));
1389 header |= S_411_SRC_ADDR_HI(src_va >> 32);
1390 radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, cmd_buffer->state.predicating));
1391 radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */
1392 radeon_emit(cs, header); /* SRC_ADDR_HI [15:0] + flags. */
1393 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
1394 radeon_emit(cs, (dst_va >> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
1395 radeon_emit(cs, command);
1396 }
1397
1398 /* CP DMA is executed in ME, but index buffers are read by PFP.
1399 * This ensures that ME (CP DMA) is idle before PFP starts fetching
1400 * indices. If we wanted to execute CP DMA in PFP, this packet
1401 * should precede it.
1402 */
1403 if (flags & CP_DMA_SYNC) {
1404 if (cmd_buffer->queue_family_index == RADV_QUEUE_GENERAL) {
1405 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, cmd_buffer->state.predicating));
1406 radeon_emit(cs, 0);
1407 }
1408
1409 /* CP will see the sync flag and wait for all DMAs to complete. */
1410 cmd_buffer->state.dma_is_busy = false;
1411 }
1412
1413 if (unlikely(cmd_buffer->device->trace_bo))
1414 radv_cmd_buffer_trace_emit(cmd_buffer);
1415 }
1416
1417 void si_cp_dma_prefetch(struct radv_cmd_buffer *cmd_buffer, uint64_t va,
1418 unsigned size)
1419 {
1420 uint64_t aligned_va = va & ~(SI_CPDMA_ALIGNMENT - 1);
1421 uint64_t aligned_size = ((va + size + SI_CPDMA_ALIGNMENT -1) & ~(SI_CPDMA_ALIGNMENT - 1)) - aligned_va;
1422
1423 si_emit_cp_dma(cmd_buffer, aligned_va, aligned_va,
1424 aligned_size, CP_DMA_USE_L2);
1425 }
1426
1427 static void si_cp_dma_prepare(struct radv_cmd_buffer *cmd_buffer, uint64_t byte_count,
1428 uint64_t remaining_size, unsigned *flags)
1429 {
1430
1431 /* Flush the caches for the first copy only.
1432 * Also wait for the previous CP DMA operations.
1433 */
1434 if (cmd_buffer->state.flush_bits) {
1435 si_emit_cache_flush(cmd_buffer);
1436 *flags |= CP_DMA_RAW_WAIT;
1437 }
1438
1439 /* Do the synchronization after the last dma, so that all data
1440 * is written to memory.
1441 */
1442 if (byte_count == remaining_size)
1443 *flags |= CP_DMA_SYNC;
1444 }
1445
1446 static void si_cp_dma_realign_engine(struct radv_cmd_buffer *cmd_buffer, unsigned size)
1447 {
1448 uint64_t va;
1449 uint32_t offset;
1450 unsigned dma_flags = 0;
1451 unsigned buf_size = SI_CPDMA_ALIGNMENT * 2;
1452 void *ptr;
1453
1454 assert(size < SI_CPDMA_ALIGNMENT);
1455
1456 radv_cmd_buffer_upload_alloc(cmd_buffer, buf_size, SI_CPDMA_ALIGNMENT, &offset, &ptr);
1457
1458 va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
1459 va += offset;
1460
1461 si_cp_dma_prepare(cmd_buffer, size, size, &dma_flags);
1462
1463 si_emit_cp_dma(cmd_buffer, va, va + SI_CPDMA_ALIGNMENT, size,
1464 dma_flags);
1465 }
1466
1467 void si_cp_dma_buffer_copy(struct radv_cmd_buffer *cmd_buffer,
1468 uint64_t src_va, uint64_t dest_va,
1469 uint64_t size)
1470 {
1471 uint64_t main_src_va, main_dest_va;
1472 uint64_t skipped_size = 0, realign_size = 0;
1473
1474 /* Assume that we are not going to sync after the last DMA operation. */
1475 cmd_buffer->state.dma_is_busy = true;
1476
1477 if (cmd_buffer->device->physical_device->rad_info.family <= CHIP_CARRIZO ||
1478 cmd_buffer->device->physical_device->rad_info.family == CHIP_STONEY) {
1479 /* If the size is not aligned, we must add a dummy copy at the end
1480 * just to align the internal counter. Otherwise, the DMA engine
1481 * would slow down by an order of magnitude for following copies.
1482 */
1483 if (size % SI_CPDMA_ALIGNMENT)
1484 realign_size = SI_CPDMA_ALIGNMENT - (size % SI_CPDMA_ALIGNMENT);
1485
1486 /* If the copy begins unaligned, we must start copying from the next
1487 * aligned block and the skipped part should be copied after everything
1488 * else has been copied. Only the src alignment matters, not dst.
1489 */
1490 if (src_va % SI_CPDMA_ALIGNMENT) {
1491 skipped_size = SI_CPDMA_ALIGNMENT - (src_va % SI_CPDMA_ALIGNMENT);
1492 /* The main part will be skipped if the size is too small. */
1493 skipped_size = MIN2(skipped_size, size);
1494 size -= skipped_size;
1495 }
1496 }
1497 main_src_va = src_va + skipped_size;
1498 main_dest_va = dest_va + skipped_size;
1499
1500 while (size) {
1501 unsigned dma_flags = 0;
1502 unsigned byte_count = MIN2(size, cp_dma_max_byte_count(cmd_buffer));
1503
1504 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
1505 /* DMA operations via L2 are coherent and faster.
1506 * TODO: GFX7-GFX9 should also support this but it
1507 * requires tests/benchmarks.
1508 */
1509 dma_flags |= CP_DMA_USE_L2;
1510 }
1511
1512 si_cp_dma_prepare(cmd_buffer, byte_count,
1513 size + skipped_size + realign_size,
1514 &dma_flags);
1515
1516 dma_flags &= ~CP_DMA_SYNC;
1517
1518 si_emit_cp_dma(cmd_buffer, main_dest_va, main_src_va,
1519 byte_count, dma_flags);
1520
1521 size -= byte_count;
1522 main_src_va += byte_count;
1523 main_dest_va += byte_count;
1524 }
1525
1526 if (skipped_size) {
1527 unsigned dma_flags = 0;
1528
1529 si_cp_dma_prepare(cmd_buffer, skipped_size,
1530 size + skipped_size + realign_size,
1531 &dma_flags);
1532
1533 si_emit_cp_dma(cmd_buffer, dest_va, src_va,
1534 skipped_size, dma_flags);
1535 }
1536 if (realign_size)
1537 si_cp_dma_realign_engine(cmd_buffer, realign_size);
1538 }
1539
1540 void si_cp_dma_clear_buffer(struct radv_cmd_buffer *cmd_buffer, uint64_t va,
1541 uint64_t size, unsigned value)
1542 {
1543
1544 if (!size)
1545 return;
1546
1547 assert(va % 4 == 0 && size % 4 == 0);
1548
1549 /* Assume that we are not going to sync after the last DMA operation. */
1550 cmd_buffer->state.dma_is_busy = true;
1551
1552 while (size) {
1553 unsigned byte_count = MIN2(size, cp_dma_max_byte_count(cmd_buffer));
1554 unsigned dma_flags = CP_DMA_CLEAR;
1555
1556 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX10) {
1557 /* DMA operations via L2 are coherent and faster.
1558 * TODO: GFX7-GFX9 should also support this but it
1559 * requires tests/benchmarks.
1560 */
1561 dma_flags |= CP_DMA_USE_L2;
1562 }
1563
1564 si_cp_dma_prepare(cmd_buffer, byte_count, size, &dma_flags);
1565
1566 /* Emit the clear packet. */
1567 si_emit_cp_dma(cmd_buffer, va, value, byte_count,
1568 dma_flags);
1569
1570 size -= byte_count;
1571 va += byte_count;
1572 }
1573 }
1574
1575 void si_cp_dma_wait_for_idle(struct radv_cmd_buffer *cmd_buffer)
1576 {
1577 if (cmd_buffer->device->physical_device->rad_info.chip_class < GFX7)
1578 return;
1579
1580 if (!cmd_buffer->state.dma_is_busy)
1581 return;
1582
1583 /* Issue a dummy DMA that copies zero bytes.
1584 *
1585 * The DMA engine will see that there's no work to do and skip this
1586 * DMA request, however, the CP will see the sync flag and still wait
1587 * for all DMAs to complete.
1588 */
1589 si_emit_cp_dma(cmd_buffer, 0, 0, 0, CP_DMA_SYNC);
1590
1591 cmd_buffer->state.dma_is_busy = false;
1592 }
1593
1594 /* For MSAA sample positions. */
1595 #define FILL_SREG(s0x, s0y, s1x, s1y, s2x, s2y, s3x, s3y) \
1596 ((((unsigned)(s0x) & 0xf) << 0) | (((unsigned)(s0y) & 0xf) << 4) | \
1597 (((unsigned)(s1x) & 0xf) << 8) | (((unsigned)(s1y) & 0xf) << 12) | \
1598 (((unsigned)(s2x) & 0xf) << 16) | (((unsigned)(s2y) & 0xf) << 20) | \
1599 (((unsigned)(s3x) & 0xf) << 24) | (((unsigned)(s3y) & 0xf) << 28))
1600
1601 /* For obtaining location coordinates from registers */
1602 #define SEXT4(x) ((int)((x) | ((x) & 0x8 ? 0xfffffff0 : 0)))
1603 #define GET_SFIELD(reg, index) SEXT4(((reg) >> ((index) * 4)) & 0xf)
1604 #define GET_SX(reg, index) GET_SFIELD((reg)[(index) / 4], ((index) % 4) * 2)
1605 #define GET_SY(reg, index) GET_SFIELD((reg)[(index) / 4], ((index) % 4) * 2 + 1)
1606
1607 /* 1x MSAA */
1608 static const uint32_t sample_locs_1x =
1609 FILL_SREG(0, 0, 0, 0, 0, 0, 0, 0);
1610 static const unsigned max_dist_1x = 0;
1611 static const uint64_t centroid_priority_1x = 0x0000000000000000ull;
1612
1613 /* 2xMSAA */
1614 static const uint32_t sample_locs_2x =
1615 FILL_SREG(4,4, -4, -4, 0, 0, 0, 0);
1616 static const unsigned max_dist_2x = 4;
1617 static const uint64_t centroid_priority_2x = 0x1010101010101010ull;
1618
1619 /* 4xMSAA */
1620 static const uint32_t sample_locs_4x =
1621 FILL_SREG(-2,-6, 6, -2, -6, 2, 2, 6);
1622 static const unsigned max_dist_4x = 6;
1623 static const uint64_t centroid_priority_4x = 0x3210321032103210ull;
1624
1625 /* 8xMSAA */
1626 static const uint32_t sample_locs_8x[] = {
1627 FILL_SREG( 1,-3, -1, 3, 5, 1, -3,-5),
1628 FILL_SREG(-5, 5, -7,-1, 3, 7, 7,-7),
1629 /* The following are unused by hardware, but we emit them to IBs
1630 * instead of multiple SET_CONTEXT_REG packets. */
1631 0,
1632 0,
1633 };
1634 static const unsigned max_dist_8x = 7;
1635 static const uint64_t centroid_priority_8x = 0x7654321076543210ull;
1636
1637 unsigned radv_get_default_max_sample_dist(int log_samples)
1638 {
1639 unsigned max_dist[] = {
1640 max_dist_1x,
1641 max_dist_2x,
1642 max_dist_4x,
1643 max_dist_8x,
1644 };
1645 return max_dist[log_samples];
1646 }
1647
1648 void radv_emit_default_sample_locations(struct radeon_cmdbuf *cs, int nr_samples)
1649 {
1650 switch (nr_samples) {
1651 default:
1652 case 1:
1653 radeon_set_context_reg_seq(cs, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 2);
1654 radeon_emit(cs, (uint32_t)centroid_priority_1x);
1655 radeon_emit(cs, centroid_priority_1x >> 32);
1656 radeon_set_context_reg(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, sample_locs_1x);
1657 radeon_set_context_reg(cs, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, sample_locs_1x);
1658 radeon_set_context_reg(cs, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, sample_locs_1x);
1659 radeon_set_context_reg(cs, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, sample_locs_1x);
1660 break;
1661 case 2:
1662 radeon_set_context_reg_seq(cs, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 2);
1663 radeon_emit(cs, (uint32_t)centroid_priority_2x);
1664 radeon_emit(cs, centroid_priority_2x >> 32);
1665 radeon_set_context_reg(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, sample_locs_2x);
1666 radeon_set_context_reg(cs, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, sample_locs_2x);
1667 radeon_set_context_reg(cs, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, sample_locs_2x);
1668 radeon_set_context_reg(cs, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, sample_locs_2x);
1669 break;
1670 case 4:
1671 radeon_set_context_reg_seq(cs, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 2);
1672 radeon_emit(cs, (uint32_t)centroid_priority_4x);
1673 radeon_emit(cs, centroid_priority_4x >> 32);
1674 radeon_set_context_reg(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, sample_locs_4x);
1675 radeon_set_context_reg(cs, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, sample_locs_4x);
1676 radeon_set_context_reg(cs, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, sample_locs_4x);
1677 radeon_set_context_reg(cs, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, sample_locs_4x);
1678 break;
1679 case 8:
1680 radeon_set_context_reg_seq(cs, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 2);
1681 radeon_emit(cs, (uint32_t)centroid_priority_8x);
1682 radeon_emit(cs, centroid_priority_8x >> 32);
1683 radeon_set_context_reg_seq(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, 14);
1684 radeon_emit_array(cs, sample_locs_8x, 4);
1685 radeon_emit_array(cs, sample_locs_8x, 4);
1686 radeon_emit_array(cs, sample_locs_8x, 4);
1687 radeon_emit_array(cs, sample_locs_8x, 2);
1688 break;
1689 }
1690 }
1691
1692 static void radv_get_sample_position(struct radv_device *device,
1693 unsigned sample_count,
1694 unsigned sample_index, float *out_value)
1695 {
1696 const uint32_t *sample_locs;
1697
1698 switch (sample_count) {
1699 case 1:
1700 default:
1701 sample_locs = &sample_locs_1x;
1702 break;
1703 case 2:
1704 sample_locs = &sample_locs_2x;
1705 break;
1706 case 4:
1707 sample_locs = &sample_locs_4x;
1708 break;
1709 case 8:
1710 sample_locs = sample_locs_8x;
1711 break;
1712 }
1713
1714 out_value[0] = (GET_SX(sample_locs, sample_index) + 8) / 16.0f;
1715 out_value[1] = (GET_SY(sample_locs, sample_index) + 8) / 16.0f;
1716 }
1717
1718 void radv_device_init_msaa(struct radv_device *device)
1719 {
1720 int i;
1721
1722 radv_get_sample_position(device, 1, 0, device->sample_locations_1x[0]);
1723
1724 for (i = 0; i < 2; i++)
1725 radv_get_sample_position(device, 2, i, device->sample_locations_2x[i]);
1726 for (i = 0; i < 4; i++)
1727 radv_get_sample_position(device, 4, i, device->sample_locations_4x[i]);
1728 for (i = 0; i < 8; i++)
1729 radv_get_sample_position(device, 8, i, device->sample_locations_8x[i]);
1730 }