2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
6 * Copyright © 2015 Advanced Micro Devices, Inc.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 /* command buffer handling for SI */
30 #include "radv_private.h"
31 #include "radv_shader.h"
35 #include "radv_util.h"
36 #include "main/macros.h"
39 si_write_harvested_raster_configs(struct radv_physical_device
*physical_device
,
40 struct radeon_cmdbuf
*cs
,
41 unsigned raster_config
,
42 unsigned raster_config_1
)
44 unsigned num_se
= MAX2(physical_device
->rad_info
.max_se
, 1);
45 unsigned raster_config_se
[4];
48 ac_get_harvested_configs(&physical_device
->rad_info
,
53 for (se
= 0; se
< num_se
; se
++) {
54 /* GRBM_GFX_INDEX has a different offset on SI and CI+ */
55 if (physical_device
->rad_info
.chip_class
< CIK
)
56 radeon_set_config_reg(cs
, R_00802C_GRBM_GFX_INDEX
,
57 S_00802C_SE_INDEX(se
) |
58 S_00802C_SH_BROADCAST_WRITES(1) |
59 S_00802C_INSTANCE_BROADCAST_WRITES(1));
61 radeon_set_uconfig_reg(cs
, R_030800_GRBM_GFX_INDEX
,
62 S_030800_SE_INDEX(se
) | S_030800_SH_BROADCAST_WRITES(1) |
63 S_030800_INSTANCE_BROADCAST_WRITES(1));
64 radeon_set_context_reg(cs
, R_028350_PA_SC_RASTER_CONFIG
, raster_config_se
[se
]);
67 /* GRBM_GFX_INDEX has a different offset on SI and CI+ */
68 if (physical_device
->rad_info
.chip_class
< CIK
)
69 radeon_set_config_reg(cs
, R_00802C_GRBM_GFX_INDEX
,
70 S_00802C_SE_BROADCAST_WRITES(1) |
71 S_00802C_SH_BROADCAST_WRITES(1) |
72 S_00802C_INSTANCE_BROADCAST_WRITES(1));
74 radeon_set_uconfig_reg(cs
, R_030800_GRBM_GFX_INDEX
,
75 S_030800_SE_BROADCAST_WRITES(1) | S_030800_SH_BROADCAST_WRITES(1) |
76 S_030800_INSTANCE_BROADCAST_WRITES(1));
78 if (physical_device
->rad_info
.chip_class
>= CIK
)
79 radeon_set_context_reg(cs
, R_028354_PA_SC_RASTER_CONFIG_1
, raster_config_1
);
83 si_emit_compute(struct radv_physical_device
*physical_device
,
84 struct radeon_cmdbuf
*cs
)
86 radeon_set_sh_reg_seq(cs
, R_00B810_COMPUTE_START_X
, 3);
91 radeon_set_sh_reg_seq(cs
, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0
, 2);
92 /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1 */
93 radeon_emit(cs
, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
94 radeon_emit(cs
, S_00B85C_SH0_CU_EN(0xffff) | S_00B85C_SH1_CU_EN(0xffff));
96 if (physical_device
->rad_info
.chip_class
>= CIK
) {
97 /* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */
98 radeon_set_sh_reg_seq(cs
,
99 R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2
, 2);
100 radeon_emit(cs
, S_00B864_SH0_CU_EN(0xffff) |
101 S_00B864_SH1_CU_EN(0xffff));
102 radeon_emit(cs
, S_00B868_SH0_CU_EN(0xffff) |
103 S_00B868_SH1_CU_EN(0xffff));
106 /* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID
107 * and is now per pipe, so it should be handled in the
108 * kernel if we want to use something other than the default value,
109 * which is now 0x22f.
111 if (physical_device
->rad_info
.chip_class
<= SI
) {
112 /* XXX: This should be:
113 * (number of compute units) * 4 * (waves per simd) - 1 */
115 radeon_set_sh_reg(cs
, R_00B82C_COMPUTE_MAX_WAVE_ID
,
116 0x190 /* Default value */);
120 /* 12.4 fixed-point */
121 static unsigned radv_pack_float_12p4(float x
)
124 x
>= 4096 ? 0xffff : x
* 16;
128 si_set_raster_config(struct radv_physical_device
*physical_device
,
129 struct radeon_cmdbuf
*cs
)
131 unsigned num_rb
= MIN2(physical_device
->rad_info
.num_render_backends
, 16);
132 unsigned rb_mask
= physical_device
->rad_info
.enabled_rb_mask
;
133 unsigned raster_config
, raster_config_1
;
135 ac_get_raster_config(&physical_device
->rad_info
,
137 &raster_config_1
, NULL
);
139 /* Always use the default config when all backends are enabled
140 * (or when we failed to determine the enabled backends).
142 if (!rb_mask
|| util_bitcount(rb_mask
) >= num_rb
) {
143 radeon_set_context_reg(cs
, R_028350_PA_SC_RASTER_CONFIG
,
145 if (physical_device
->rad_info
.chip_class
>= CIK
)
146 radeon_set_context_reg(cs
, R_028354_PA_SC_RASTER_CONFIG_1
,
149 si_write_harvested_raster_configs(physical_device
, cs
,
156 si_emit_graphics(struct radv_physical_device
*physical_device
,
157 struct radeon_cmdbuf
*cs
)
161 /* Only SI can disable CLEAR_STATE for now. */
162 assert(physical_device
->has_clear_state
||
163 physical_device
->rad_info
.chip_class
== SI
);
165 radeon_emit(cs
, PKT3(PKT3_CONTEXT_CONTROL
, 1, 0));
166 radeon_emit(cs
, CONTEXT_CONTROL_LOAD_ENABLE(1));
167 radeon_emit(cs
, CONTEXT_CONTROL_SHADOW_ENABLE(1));
169 if (physical_device
->has_clear_state
) {
170 radeon_emit(cs
, PKT3(PKT3_CLEAR_STATE
, 0, 0));
174 if (physical_device
->rad_info
.chip_class
<= VI
)
175 si_set_raster_config(physical_device
, cs
);
177 radeon_set_context_reg(cs
, R_028A18_VGT_HOS_MAX_TESS_LEVEL
, fui(64));
178 if (!physical_device
->has_clear_state
)
179 radeon_set_context_reg(cs
, R_028A1C_VGT_HOS_MIN_TESS_LEVEL
, fui(0));
181 /* FIXME calculate these values somehow ??? */
182 if (physical_device
->rad_info
.chip_class
<= VI
) {
183 radeon_set_context_reg(cs
, R_028A54_VGT_GS_PER_ES
, SI_GS_PER_ES
);
184 radeon_set_context_reg(cs
, R_028A58_VGT_ES_PER_GS
, 0x40);
187 if (!physical_device
->has_clear_state
) {
188 radeon_set_context_reg(cs
, R_028A5C_VGT_GS_PER_VS
, 0x2);
189 radeon_set_context_reg(cs
, R_028A8C_VGT_PRIMITIVEID_RESET
, 0x0);
190 radeon_set_context_reg(cs
, R_028B98_VGT_STRMOUT_BUFFER_CONFIG
, 0x0);
193 radeon_set_context_reg(cs
, R_028AA0_VGT_INSTANCE_STEP_RATE_0
, 1);
194 if (!physical_device
->has_clear_state
)
195 radeon_set_context_reg(cs
, R_028AB8_VGT_VTX_CNT_EN
, 0x0);
196 if (physical_device
->rad_info
.chip_class
< CIK
)
197 radeon_set_config_reg(cs
, R_008A14_PA_CL_ENHANCE
, S_008A14_NUM_CLIP_SEQ(3) |
198 S_008A14_CLIP_VTX_REORDER_ENA(1));
200 radeon_set_context_reg(cs
, R_028BD4_PA_SC_CENTROID_PRIORITY_0
, 0x76543210);
201 radeon_set_context_reg(cs
, R_028BD8_PA_SC_CENTROID_PRIORITY_1
, 0xfedcba98);
203 if (!physical_device
->has_clear_state
)
204 radeon_set_context_reg(cs
, R_02882C_PA_SU_PRIM_FILTER_CNTL
, 0);
206 /* CLEAR_STATE doesn't clear these correctly on certain generations.
207 * I don't know why. Deduced by trial and error.
209 if (physical_device
->rad_info
.chip_class
<= CIK
) {
210 radeon_set_context_reg(cs
, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET
, 0);
211 radeon_set_context_reg(cs
, R_028204_PA_SC_WINDOW_SCISSOR_TL
,
212 S_028204_WINDOW_OFFSET_DISABLE(1));
213 radeon_set_context_reg(cs
, R_028240_PA_SC_GENERIC_SCISSOR_TL
,
214 S_028240_WINDOW_OFFSET_DISABLE(1));
215 radeon_set_context_reg(cs
, R_028244_PA_SC_GENERIC_SCISSOR_BR
,
216 S_028244_BR_X(16384) | S_028244_BR_Y(16384));
217 radeon_set_context_reg(cs
, R_028030_PA_SC_SCREEN_SCISSOR_TL
, 0);
218 radeon_set_context_reg(cs
, R_028034_PA_SC_SCREEN_SCISSOR_BR
,
219 S_028034_BR_X(16384) | S_028034_BR_Y(16384));
222 if (!physical_device
->has_clear_state
) {
223 for (i
= 0; i
< 16; i
++) {
224 radeon_set_context_reg(cs
, R_0282D0_PA_SC_VPORT_ZMIN_0
+ i
*8, 0);
225 radeon_set_context_reg(cs
, R_0282D4_PA_SC_VPORT_ZMAX_0
+ i
*8, fui(1.0));
229 if (!physical_device
->has_clear_state
) {
230 radeon_set_context_reg(cs
, R_02820C_PA_SC_CLIPRECT_RULE
, 0xFFFF);
231 radeon_set_context_reg(cs
, R_028230_PA_SC_EDGERULE
, 0xAAAAAAAA);
232 /* PA_SU_HARDWARE_SCREEN_OFFSET must be 0 due to hw bug on SI */
233 radeon_set_context_reg(cs
, R_028234_PA_SU_HARDWARE_SCREEN_OFFSET
, 0);
234 radeon_set_context_reg(cs
, R_028820_PA_CL_NANINF_CNTL
, 0);
235 radeon_set_context_reg(cs
, R_028AC0_DB_SRESULTS_COMPARE_STATE0
, 0x0);
236 radeon_set_context_reg(cs
, R_028AC4_DB_SRESULTS_COMPARE_STATE1
, 0x0);
237 radeon_set_context_reg(cs
, R_028AC8_DB_PRELOAD_CONTROL
, 0x0);
240 radeon_set_context_reg(cs
, R_02800C_DB_RENDER_OVERRIDE
,
241 S_02800C_FORCE_HIS_ENABLE0(V_02800C_FORCE_DISABLE
) |
242 S_02800C_FORCE_HIS_ENABLE1(V_02800C_FORCE_DISABLE
));
244 if (physical_device
->rad_info
.chip_class
>= GFX9
) {
245 radeon_set_uconfig_reg(cs
, R_030920_VGT_MAX_VTX_INDX
, ~0);
246 radeon_set_uconfig_reg(cs
, R_030924_VGT_MIN_VTX_INDX
, 0);
247 radeon_set_uconfig_reg(cs
, R_030928_VGT_INDX_OFFSET
, 0);
249 /* These registers, when written, also overwrite the
250 * CLEAR_STATE context, so we can't rely on CLEAR_STATE setting
251 * them. It would be an issue if there was another UMD
254 radeon_set_context_reg(cs
, R_028400_VGT_MAX_VTX_INDX
, ~0);
255 radeon_set_context_reg(cs
, R_028404_VGT_MIN_VTX_INDX
, 0);
256 radeon_set_context_reg(cs
, R_028408_VGT_INDX_OFFSET
, 0);
259 if (physical_device
->rad_info
.chip_class
>= CIK
) {
260 if (physical_device
->rad_info
.chip_class
>= GFX9
) {
261 radeon_set_sh_reg(cs
, R_00B41C_SPI_SHADER_PGM_RSRC3_HS
,
262 S_00B41C_CU_EN(0xffff) | S_00B41C_WAVE_LIMIT(0x3F));
264 radeon_set_sh_reg(cs
, R_00B51C_SPI_SHADER_PGM_RSRC3_LS
,
265 S_00B51C_CU_EN(0xffff) | S_00B51C_WAVE_LIMIT(0x3F));
266 radeon_set_sh_reg(cs
, R_00B41C_SPI_SHADER_PGM_RSRC3_HS
,
267 S_00B41C_WAVE_LIMIT(0x3F));
268 radeon_set_sh_reg(cs
, R_00B31C_SPI_SHADER_PGM_RSRC3_ES
,
269 S_00B31C_CU_EN(0xffff) | S_00B31C_WAVE_LIMIT(0x3F));
270 /* If this is 0, Bonaire can hang even if GS isn't being used.
271 * Other chips are unaffected. These are suboptimal values,
272 * but we don't use on-chip GS.
274 radeon_set_context_reg(cs
, R_028A44_VGT_GS_ONCHIP_CNTL
,
275 S_028A44_ES_VERTS_PER_SUBGRP(64) |
276 S_028A44_GS_PRIMS_PER_SUBGRP(4));
278 radeon_set_sh_reg(cs
, R_00B21C_SPI_SHADER_PGM_RSRC3_GS
,
279 S_00B21C_CU_EN(0xffff) | S_00B21C_WAVE_LIMIT(0x3F));
281 if (physical_device
->rad_info
.num_good_cu_per_sh
<= 4) {
282 /* Too few available compute units per SH. Disallowing
283 * VS to run on CU0 could hurt us more than late VS
284 * allocation would help.
286 * LATE_ALLOC_VS = 2 is the highest safe number.
288 radeon_set_sh_reg(cs
, R_00B118_SPI_SHADER_PGM_RSRC3_VS
,
289 S_00B118_CU_EN(0xffff) | S_00B118_WAVE_LIMIT(0x3F) );
290 radeon_set_sh_reg(cs
, R_00B11C_SPI_SHADER_LATE_ALLOC_VS
, S_00B11C_LIMIT(2));
292 /* Set LATE_ALLOC_VS == 31. It should be less than
293 * the number of scratch waves. Limitations:
294 * - VS can't execute on CU0.
295 * - If HS writes outputs to LDS, LS can't execute on CU0.
297 radeon_set_sh_reg(cs
, R_00B118_SPI_SHADER_PGM_RSRC3_VS
,
298 S_00B118_CU_EN(0xfffe) | S_00B118_WAVE_LIMIT(0x3F));
299 radeon_set_sh_reg(cs
, R_00B11C_SPI_SHADER_LATE_ALLOC_VS
, S_00B11C_LIMIT(31));
302 radeon_set_sh_reg(cs
, R_00B01C_SPI_SHADER_PGM_RSRC3_PS
,
303 S_00B01C_CU_EN(0xffff) | S_00B01C_WAVE_LIMIT(0x3F));
306 if (physical_device
->rad_info
.chip_class
>= VI
) {
307 uint32_t vgt_tess_distribution
;
309 vgt_tess_distribution
= S_028B50_ACCUM_ISOLINE(32) |
310 S_028B50_ACCUM_TRI(11) |
311 S_028B50_ACCUM_QUAD(11) |
312 S_028B50_DONUT_SPLIT(16);
314 if (physical_device
->rad_info
.family
== CHIP_FIJI
||
315 physical_device
->rad_info
.family
>= CHIP_POLARIS10
)
316 vgt_tess_distribution
|= S_028B50_TRAP_SPLIT(3);
318 radeon_set_context_reg(cs
, R_028B50_VGT_TESS_DISTRIBUTION
,
319 vgt_tess_distribution
);
320 } else if (!physical_device
->has_clear_state
) {
321 radeon_set_context_reg(cs
, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL
, 14);
322 radeon_set_context_reg(cs
, R_028C5C_VGT_OUT_DEALLOC_CNTL
, 16);
325 if (physical_device
->rad_info
.chip_class
>= GFX9
) {
326 unsigned num_se
= physical_device
->rad_info
.max_se
;
327 unsigned pc_lines
= 0;
329 switch (physical_device
->rad_info
.family
) {
343 radeon_set_context_reg(cs
, R_028C48_PA_SC_BINNER_CNTL_1
,
344 S_028C48_MAX_ALLOC_COUNT(MIN2(128, pc_lines
/ (4 * num_se
))) |
345 S_028C48_MAX_PRIM_PER_BATCH(1023));
346 radeon_set_context_reg(cs
, R_028C4C_PA_SC_CONSERVATIVE_RASTERIZATION_CNTL
,
347 S_028C4C_NULL_SQUAD_AA_MASK_ENABLE(1));
348 radeon_set_uconfig_reg(cs
, R_030968_VGT_INSTANCE_BASE_ID
, 0);
351 unsigned tmp
= (unsigned)(1.0 * 8.0);
352 radeon_set_context_reg_seq(cs
, R_028A00_PA_SU_POINT_SIZE
, 1);
353 radeon_emit(cs
, S_028A00_HEIGHT(tmp
) | S_028A00_WIDTH(tmp
));
354 radeon_set_context_reg_seq(cs
, R_028A04_PA_SU_POINT_MINMAX
, 1);
355 radeon_emit(cs
, S_028A04_MIN_SIZE(radv_pack_float_12p4(0)) |
356 S_028A04_MAX_SIZE(radv_pack_float_12p4(8192/2)));
358 if (!physical_device
->has_clear_state
) {
359 radeon_set_context_reg(cs
, R_028004_DB_COUNT_CONTROL
,
360 S_028004_ZPASS_INCREMENT_DISABLE(1));
363 /* Enable the Polaris small primitive filter control.
364 * XXX: There is possibly an issue when MSAA is off (see RadeonSI
365 * has_msaa_sample_loc_bug). But this doesn't seem to regress anything,
366 * and AMDVLK doesn't have a workaround as well.
368 if (physical_device
->rad_info
.family
>= CHIP_POLARIS10
) {
369 unsigned small_prim_filter_cntl
=
370 S_028830_SMALL_PRIM_FILTER_ENABLE(1) |
371 /* Workaround for a hw line bug. */
372 S_028830_LINE_FILTER_DISABLE(physical_device
->rad_info
.family
<= CHIP_POLARIS12
);
374 radeon_set_context_reg(cs
, R_028830_PA_SU_SMALL_PRIM_FILTER_CNTL
,
375 small_prim_filter_cntl
);
378 si_emit_compute(physical_device
, cs
);
382 cik_create_gfx_config(struct radv_device
*device
)
384 struct radeon_cmdbuf
*cs
= device
->ws
->cs_create(device
->ws
, RING_GFX
);
388 si_emit_graphics(device
->physical_device
, cs
);
390 while (cs
->cdw
& 7) {
391 if (device
->physical_device
->rad_info
.gfx_ib_pad_with_type2
)
392 radeon_emit(cs
, 0x80000000);
394 radeon_emit(cs
, 0xffff1000);
397 device
->gfx_init
= device
->ws
->buffer_create(device
->ws
,
400 RADEON_FLAG_CPU_ACCESS
|
401 RADEON_FLAG_NO_INTERPROCESS_SHARING
|
402 RADEON_FLAG_READ_ONLY
);
403 if (!device
->gfx_init
)
406 void *map
= device
->ws
->buffer_map(device
->gfx_init
);
408 device
->ws
->buffer_destroy(device
->gfx_init
);
409 device
->gfx_init
= NULL
;
412 memcpy(map
, cs
->buf
, cs
->cdw
* 4);
414 device
->ws
->buffer_unmap(device
->gfx_init
);
415 device
->gfx_init_size_dw
= cs
->cdw
;
417 device
->ws
->cs_destroy(cs
);
421 get_viewport_xform(const VkViewport
*viewport
,
422 float scale
[3], float translate
[3])
424 float x
= viewport
->x
;
425 float y
= viewport
->y
;
426 float half_width
= 0.5f
* viewport
->width
;
427 float half_height
= 0.5f
* viewport
->height
;
428 double n
= viewport
->minDepth
;
429 double f
= viewport
->maxDepth
;
431 scale
[0] = half_width
;
432 translate
[0] = half_width
+ x
;
433 scale
[1] = half_height
;
434 translate
[1] = half_height
+ y
;
441 si_write_viewport(struct radeon_cmdbuf
*cs
, int first_vp
,
442 int count
, const VkViewport
*viewports
)
447 radeon_set_context_reg_seq(cs
, R_02843C_PA_CL_VPORT_XSCALE
+
448 first_vp
* 4 * 6, count
* 6);
450 for (i
= 0; i
< count
; i
++) {
451 float scale
[3], translate
[3];
454 get_viewport_xform(&viewports
[i
], scale
, translate
);
455 radeon_emit(cs
, fui(scale
[0]));
456 radeon_emit(cs
, fui(translate
[0]));
457 radeon_emit(cs
, fui(scale
[1]));
458 radeon_emit(cs
, fui(translate
[1]));
459 radeon_emit(cs
, fui(scale
[2]));
460 radeon_emit(cs
, fui(translate
[2]));
463 radeon_set_context_reg_seq(cs
, R_0282D0_PA_SC_VPORT_ZMIN_0
+
464 first_vp
* 4 * 2, count
* 2);
465 for (i
= 0; i
< count
; i
++) {
466 float zmin
= MIN2(viewports
[i
].minDepth
, viewports
[i
].maxDepth
);
467 float zmax
= MAX2(viewports
[i
].minDepth
, viewports
[i
].maxDepth
);
468 radeon_emit(cs
, fui(zmin
));
469 radeon_emit(cs
, fui(zmax
));
473 static VkRect2D
si_scissor_from_viewport(const VkViewport
*viewport
)
475 float scale
[3], translate
[3];
478 get_viewport_xform(viewport
, scale
, translate
);
480 rect
.offset
.x
= translate
[0] - fabs(scale
[0]);
481 rect
.offset
.y
= translate
[1] - fabs(scale
[1]);
482 rect
.extent
.width
= ceilf(translate
[0] + fabs(scale
[0])) - rect
.offset
.x
;
483 rect
.extent
.height
= ceilf(translate
[1] + fabs(scale
[1])) - rect
.offset
.y
;
488 static VkRect2D
si_intersect_scissor(const VkRect2D
*a
, const VkRect2D
*b
) {
490 ret
.offset
.x
= MAX2(a
->offset
.x
, b
->offset
.x
);
491 ret
.offset
.y
= MAX2(a
->offset
.y
, b
->offset
.y
);
492 ret
.extent
.width
= MIN2(a
->offset
.x
+ a
->extent
.width
,
493 b
->offset
.x
+ b
->extent
.width
) - ret
.offset
.x
;
494 ret
.extent
.height
= MIN2(a
->offset
.y
+ a
->extent
.height
,
495 b
->offset
.y
+ b
->extent
.height
) - ret
.offset
.y
;
500 si_write_scissors(struct radeon_cmdbuf
*cs
, int first
,
501 int count
, const VkRect2D
*scissors
,
502 const VkViewport
*viewports
, bool can_use_guardband
)
505 float scale
[3], translate
[3], guardband_x
= INFINITY
, guardband_y
= INFINITY
;
506 const float max_range
= 32767.0f
;
510 radeon_set_context_reg_seq(cs
, R_028250_PA_SC_VPORT_SCISSOR_0_TL
+ first
* 4 * 2, count
* 2);
511 for (i
= 0; i
< count
; i
++) {
512 VkRect2D viewport_scissor
= si_scissor_from_viewport(viewports
+ i
);
513 VkRect2D scissor
= si_intersect_scissor(&scissors
[i
], &viewport_scissor
);
515 get_viewport_xform(viewports
+ i
, scale
, translate
);
516 scale
[0] = fabsf(scale
[0]);
517 scale
[1] = fabsf(scale
[1]);
524 guardband_x
= MIN2(guardband_x
, (max_range
- fabsf(translate
[0])) / scale
[0]);
525 guardband_y
= MIN2(guardband_y
, (max_range
- fabsf(translate
[1])) / scale
[1]);
527 radeon_emit(cs
, S_028250_TL_X(scissor
.offset
.x
) |
528 S_028250_TL_Y(scissor
.offset
.y
) |
529 S_028250_WINDOW_OFFSET_DISABLE(1));
530 radeon_emit(cs
, S_028254_BR_X(scissor
.offset
.x
+ scissor
.extent
.width
) |
531 S_028254_BR_Y(scissor
.offset
.y
+ scissor
.extent
.height
));
533 if (!can_use_guardband
) {
538 radeon_set_context_reg_seq(cs
, R_028BE8_PA_CL_GB_VERT_CLIP_ADJ
, 4);
539 radeon_emit(cs
, fui(guardband_y
));
540 radeon_emit(cs
, fui(1.0));
541 radeon_emit(cs
, fui(guardband_x
));
542 radeon_emit(cs
, fui(1.0));
545 static inline unsigned
546 radv_prims_for_vertices(struct radv_prim_vertex_count
*info
, unsigned num
)
557 return 1 + ((num
- info
->min
) / info
->incr
);
561 si_get_ia_multi_vgt_param(struct radv_cmd_buffer
*cmd_buffer
,
562 bool instanced_draw
, bool indirect_draw
,
563 uint32_t draw_vertex_count
)
565 enum chip_class chip_class
= cmd_buffer
->device
->physical_device
->rad_info
.chip_class
;
566 enum radeon_family family
= cmd_buffer
->device
->physical_device
->rad_info
.family
;
567 struct radeon_info
*info
= &cmd_buffer
->device
->physical_device
->rad_info
;
568 const unsigned max_primgroup_in_wave
= 2;
569 /* SWITCH_ON_EOP(0) is always preferable. */
570 bool wd_switch_on_eop
= false;
571 bool ia_switch_on_eop
= false;
572 bool ia_switch_on_eoi
= false;
573 bool partial_vs_wave
= false;
574 bool partial_es_wave
= cmd_buffer
->state
.pipeline
->graphics
.ia_multi_vgt_param
.partial_es_wave
;
575 bool multi_instances_smaller_than_primgroup
;
577 multi_instances_smaller_than_primgroup
= indirect_draw
;
578 if (!multi_instances_smaller_than_primgroup
&& instanced_draw
) {
579 uint32_t num_prims
= radv_prims_for_vertices(&cmd_buffer
->state
.pipeline
->graphics
.prim_vertex_count
, draw_vertex_count
);
580 if (num_prims
< cmd_buffer
->state
.pipeline
->graphics
.ia_multi_vgt_param
.primgroup_size
)
581 multi_instances_smaller_than_primgroup
= true;
584 ia_switch_on_eoi
= cmd_buffer
->state
.pipeline
->graphics
.ia_multi_vgt_param
.ia_switch_on_eoi
;
585 partial_vs_wave
= cmd_buffer
->state
.pipeline
->graphics
.ia_multi_vgt_param
.partial_vs_wave
;
587 if (chip_class
>= CIK
) {
588 wd_switch_on_eop
= cmd_buffer
->state
.pipeline
->graphics
.ia_multi_vgt_param
.wd_switch_on_eop
;
590 /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
591 * We don't know that for indirect drawing, so treat it as
592 * always problematic. */
593 if (family
== CHIP_HAWAII
&&
594 (instanced_draw
|| indirect_draw
))
595 wd_switch_on_eop
= true;
597 /* Performance recommendation for 4 SE Gfx7-8 parts if
598 * instances are smaller than a primgroup.
599 * Assume indirect draws always use small instances.
600 * This is needed for good VS wave utilization.
602 if (chip_class
<= VI
&&
604 multi_instances_smaller_than_primgroup
)
605 wd_switch_on_eop
= true;
607 /* Required on CIK and later. */
608 if (info
->max_se
> 2 && !wd_switch_on_eop
)
609 ia_switch_on_eoi
= true;
611 /* Required by Hawaii and, for some special cases, by VI. */
612 if (ia_switch_on_eoi
&&
613 (family
== CHIP_HAWAII
||
615 /* max primgroup in wave is always 2 - leave this for documentation */
616 (radv_pipeline_has_gs(cmd_buffer
->state
.pipeline
) || max_primgroup_in_wave
!= 2))))
617 partial_vs_wave
= true;
619 /* Instancing bug on Bonaire. */
620 if (family
== CHIP_BONAIRE
&& ia_switch_on_eoi
&&
621 (instanced_draw
|| indirect_draw
))
622 partial_vs_wave
= true;
624 /* If the WD switch is false, the IA switch must be false too. */
625 assert(wd_switch_on_eop
|| !ia_switch_on_eop
);
627 /* If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
628 if (chip_class
<= VI
&& ia_switch_on_eoi
)
629 partial_es_wave
= true;
631 if (radv_pipeline_has_gs(cmd_buffer
->state
.pipeline
)) {
632 /* GS hw bug with single-primitive instances and SWITCH_ON_EOI.
633 * The hw doc says all multi-SE chips are affected, but amdgpu-pro Vulkan
634 * only applies it to Hawaii. Do what amdgpu-pro Vulkan does.
636 if (family
== CHIP_HAWAII
&& ia_switch_on_eoi
) {
637 bool set_vgt_flush
= indirect_draw
;
638 if (!set_vgt_flush
&& instanced_draw
) {
639 uint32_t num_prims
= radv_prims_for_vertices(&cmd_buffer
->state
.pipeline
->graphics
.prim_vertex_count
, draw_vertex_count
);
641 set_vgt_flush
= true;
644 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLAG_VGT_FLUSH
;
648 return cmd_buffer
->state
.pipeline
->graphics
.ia_multi_vgt_param
.base
|
649 S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop
) |
650 S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi
) |
651 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave
) |
652 S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave
) |
653 S_028AA8_WD_SWITCH_ON_EOP(chip_class
>= CIK
? wd_switch_on_eop
: 0);
657 void si_cs_emit_write_event_eop(struct radeon_cmdbuf
*cs
,
658 enum chip_class chip_class
,
660 unsigned event
, unsigned event_flags
,
665 uint64_t gfx9_eop_bug_va
)
667 unsigned op
= EVENT_TYPE(event
) |
670 unsigned is_gfx8_mec
= is_mec
&& chip_class
< GFX9
;
671 unsigned sel
= EOP_DATA_SEL(data_sel
);
673 /* Wait for write confirmation before writing data, but don't send
675 if (data_sel
!= EOP_DATA_SEL_DISCARD
)
676 sel
|= EOP_INT_SEL(EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM
);
678 if (chip_class
>= GFX9
|| is_gfx8_mec
) {
679 /* A ZPASS_DONE or PIXEL_STAT_DUMP_EVENT (of the DB occlusion
680 * counters) must immediately precede every timestamp event to
681 * prevent a GPU hang on GFX9.
683 if (chip_class
== GFX9
&& !is_mec
) {
684 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
685 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
686 radeon_emit(cs
, gfx9_eop_bug_va
);
687 radeon_emit(cs
, gfx9_eop_bug_va
>> 32);
690 radeon_emit(cs
, PKT3(PKT3_RELEASE_MEM
, is_gfx8_mec
? 5 : 6, false));
692 radeon_emit(cs
, sel
);
693 radeon_emit(cs
, va
); /* address lo */
694 radeon_emit(cs
, va
>> 32); /* address hi */
695 radeon_emit(cs
, new_fence
); /* immediate data lo */
696 radeon_emit(cs
, 0); /* immediate data hi */
698 radeon_emit(cs
, 0); /* unused */
700 if (chip_class
== CIK
||
702 /* Two EOP events are required to make all engines go idle
703 * (and optional cache flushes executed) before the timestamp
706 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, false));
709 radeon_emit(cs
, ((va
>> 32) & 0xffff) | sel
);
710 radeon_emit(cs
, old_fence
); /* immediate data */
711 radeon_emit(cs
, 0); /* unused */
714 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, false));
717 radeon_emit(cs
, ((va
>> 32) & 0xffff) | sel
);
718 radeon_emit(cs
, new_fence
); /* immediate data */
719 radeon_emit(cs
, 0); /* unused */
724 radv_cp_wait_mem(struct radeon_cmdbuf
*cs
, uint32_t op
, uint64_t va
,
725 uint32_t ref
, uint32_t mask
)
727 assert(op
== WAIT_REG_MEM_EQUAL
||
728 op
== WAIT_REG_MEM_NOT_EQUAL
||
729 op
== WAIT_REG_MEM_GREATER_OR_EQUAL
);
731 radeon_emit(cs
, PKT3(PKT3_WAIT_REG_MEM
, 5, false));
732 radeon_emit(cs
, op
| WAIT_REG_MEM_MEM_SPACE(1));
734 radeon_emit(cs
, va
>> 32);
735 radeon_emit(cs
, ref
); /* reference value */
736 radeon_emit(cs
, mask
); /* mask */
737 radeon_emit(cs
, 4); /* poll interval */
741 si_emit_acquire_mem(struct radeon_cmdbuf
*cs
,
744 unsigned cp_coher_cntl
)
746 if (is_mec
|| is_gfx9
) {
747 uint32_t hi_val
= is_gfx9
? 0xffffff : 0xff;
748 radeon_emit(cs
, PKT3(PKT3_ACQUIRE_MEM
, 5, false) |
749 PKT3_SHADER_TYPE_S(is_mec
));
750 radeon_emit(cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
751 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
752 radeon_emit(cs
, hi_val
); /* CP_COHER_SIZE_HI */
753 radeon_emit(cs
, 0); /* CP_COHER_BASE */
754 radeon_emit(cs
, 0); /* CP_COHER_BASE_HI */
755 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
757 /* ACQUIRE_MEM is only required on a compute ring. */
758 radeon_emit(cs
, PKT3(PKT3_SURFACE_SYNC
, 3, false));
759 radeon_emit(cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
760 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
761 radeon_emit(cs
, 0); /* CP_COHER_BASE */
762 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
767 si_cs_emit_cache_flush(struct radeon_cmdbuf
*cs
,
768 enum chip_class chip_class
,
772 enum radv_cmd_flush_bits flush_bits
,
773 uint64_t gfx9_eop_bug_va
)
775 unsigned cp_coher_cntl
= 0;
776 uint32_t flush_cb_db
= flush_bits
& (RADV_CMD_FLAG_FLUSH_AND_INV_CB
|
777 RADV_CMD_FLAG_FLUSH_AND_INV_DB
);
779 if (flush_bits
& RADV_CMD_FLAG_INV_ICACHE
)
780 cp_coher_cntl
|= S_0085F0_SH_ICACHE_ACTION_ENA(1);
781 if (flush_bits
& RADV_CMD_FLAG_INV_SMEM_L1
)
782 cp_coher_cntl
|= S_0085F0_SH_KCACHE_ACTION_ENA(1);
784 if (chip_class
<= VI
) {
785 if (flush_bits
& RADV_CMD_FLAG_FLUSH_AND_INV_CB
) {
786 cp_coher_cntl
|= S_0085F0_CB_ACTION_ENA(1) |
787 S_0085F0_CB0_DEST_BASE_ENA(1) |
788 S_0085F0_CB1_DEST_BASE_ENA(1) |
789 S_0085F0_CB2_DEST_BASE_ENA(1) |
790 S_0085F0_CB3_DEST_BASE_ENA(1) |
791 S_0085F0_CB4_DEST_BASE_ENA(1) |
792 S_0085F0_CB5_DEST_BASE_ENA(1) |
793 S_0085F0_CB6_DEST_BASE_ENA(1) |
794 S_0085F0_CB7_DEST_BASE_ENA(1);
796 /* Necessary for DCC */
797 if (chip_class
>= VI
) {
798 si_cs_emit_write_event_eop(cs
,
801 V_028A90_FLUSH_AND_INV_CB_DATA_TS
,
803 EOP_DATA_SEL_DISCARD
,
808 if (flush_bits
& RADV_CMD_FLAG_FLUSH_AND_INV_DB
) {
809 cp_coher_cntl
|= S_0085F0_DB_ACTION_ENA(1) |
810 S_0085F0_DB_DEST_BASE_ENA(1);
814 if (flush_bits
& RADV_CMD_FLAG_FLUSH_AND_INV_CB_META
) {
815 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
816 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META
) | EVENT_INDEX(0));
819 if (flush_bits
& RADV_CMD_FLAG_FLUSH_AND_INV_DB_META
) {
820 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
821 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META
) | EVENT_INDEX(0));
824 if (flush_bits
& RADV_CMD_FLAG_PS_PARTIAL_FLUSH
) {
825 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
826 radeon_emit(cs
, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
827 } else if (flush_bits
& RADV_CMD_FLAG_VS_PARTIAL_FLUSH
) {
828 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
829 radeon_emit(cs
, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
832 if (flush_bits
& RADV_CMD_FLAG_CS_PARTIAL_FLUSH
) {
833 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
834 radeon_emit(cs
, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
837 if (chip_class
>= GFX9
&& flush_cb_db
) {
838 unsigned cb_db_event
, tc_flags
;
840 /* Set the CB/DB flush event. */
841 cb_db_event
= V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT
;
843 /* These are the only allowed combinations. If you need to
844 * do multiple operations at once, do them separately.
845 * All operations that invalidate L2 also seem to invalidate
846 * metadata. Volatile (VOL) and WC flushes are not listed here.
848 * TC | TC_WB = writeback & invalidate L2 & L1
849 * TC | TC_WB | TC_NC = writeback & invalidate L2 for MTYPE == NC
850 * TC_WB | TC_NC = writeback L2 for MTYPE == NC
851 * TC | TC_NC = invalidate L2 for MTYPE == NC
852 * TC | TC_MD = writeback & invalidate L2 metadata (DCC, etc.)
853 * TCL1 = invalidate L1
855 tc_flags
= EVENT_TC_ACTION_ENA
|
856 EVENT_TC_MD_ACTION_ENA
;
858 /* Ideally flush TC together with CB/DB. */
859 if (flush_bits
& RADV_CMD_FLAG_INV_GLOBAL_L2
) {
860 /* Writeback and invalidate everything in L2 & L1. */
861 tc_flags
= EVENT_TC_ACTION_ENA
|
862 EVENT_TC_WB_ACTION_ENA
;
865 /* Clear the flags. */
866 flush_bits
&= ~(RADV_CMD_FLAG_INV_GLOBAL_L2
|
867 RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2
|
868 RADV_CMD_FLAG_INV_VMEM_L1
);
871 uint32_t old_fence
= (*flush_cnt
)++;
873 si_cs_emit_write_event_eop(cs
, chip_class
, false, cb_db_event
, tc_flags
,
874 EOP_DATA_SEL_VALUE_32BIT
,
875 flush_va
, old_fence
, *flush_cnt
,
877 radv_cp_wait_mem(cs
, WAIT_REG_MEM_EQUAL
, flush_va
,
878 *flush_cnt
, 0xffffffff);
882 if (flush_bits
& RADV_CMD_FLAG_VGT_FLUSH
) {
883 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
884 radeon_emit(cs
, EVENT_TYPE(V_028A90_VGT_FLUSH
) | EVENT_INDEX(0));
887 /* VGT streamout state sync */
888 if (flush_bits
& RADV_CMD_FLAG_VGT_STREAMOUT_SYNC
) {
889 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
890 radeon_emit(cs
, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC
) | EVENT_INDEX(0));
893 /* Make sure ME is idle (it executes most packets) before continuing.
894 * This prevents read-after-write hazards between PFP and ME.
896 if ((cp_coher_cntl
||
897 (flush_bits
& (RADV_CMD_FLAG_CS_PARTIAL_FLUSH
|
898 RADV_CMD_FLAG_INV_VMEM_L1
|
899 RADV_CMD_FLAG_INV_GLOBAL_L2
|
900 RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2
))) &&
902 radeon_emit(cs
, PKT3(PKT3_PFP_SYNC_ME
, 0, 0));
906 if ((flush_bits
& RADV_CMD_FLAG_INV_GLOBAL_L2
) ||
907 (chip_class
<= CIK
&& (flush_bits
& RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2
))) {
908 si_emit_acquire_mem(cs
, is_mec
, chip_class
>= GFX9
,
910 S_0085F0_TC_ACTION_ENA(1) |
911 S_0085F0_TCL1_ACTION_ENA(1) |
912 S_0301F0_TC_WB_ACTION_ENA(chip_class
>= VI
));
915 if(flush_bits
& RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2
) {
917 * NC = apply to non-coherent MTYPEs
918 * (i.e. MTYPE <= 1, which is what we use everywhere)
920 * WB doesn't work without NC.
922 si_emit_acquire_mem(cs
, is_mec
,
925 S_0301F0_TC_WB_ACTION_ENA(1) |
926 S_0301F0_TC_NC_ACTION_ENA(1));
929 if (flush_bits
& RADV_CMD_FLAG_INV_VMEM_L1
) {
930 si_emit_acquire_mem(cs
, is_mec
,
933 S_0085F0_TCL1_ACTION_ENA(1));
938 /* When one of the DEST_BASE flags is set, SURFACE_SYNC waits for idle.
939 * Therefore, it should be last. Done in PFP.
942 si_emit_acquire_mem(cs
, is_mec
, chip_class
>= GFX9
, cp_coher_cntl
);
944 if (flush_bits
& RADV_CMD_FLAG_START_PIPELINE_STATS
) {
945 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
946 radeon_emit(cs
, EVENT_TYPE(V_028A90_PIPELINESTAT_START
) |
948 } else if (flush_bits
& RADV_CMD_FLAG_STOP_PIPELINE_STATS
) {
949 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
950 radeon_emit(cs
, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP
) |
956 si_emit_cache_flush(struct radv_cmd_buffer
*cmd_buffer
)
958 bool is_compute
= cmd_buffer
->queue_family_index
== RADV_QUEUE_COMPUTE
;
961 cmd_buffer
->state
.flush_bits
&= ~(RADV_CMD_FLAG_FLUSH_AND_INV_CB
|
962 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META
|
963 RADV_CMD_FLAG_FLUSH_AND_INV_DB
|
964 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META
|
965 RADV_CMD_FLAG_PS_PARTIAL_FLUSH
|
966 RADV_CMD_FLAG_VS_PARTIAL_FLUSH
|
967 RADV_CMD_FLAG_VGT_FLUSH
|
968 RADV_CMD_FLAG_START_PIPELINE_STATS
|
969 RADV_CMD_FLAG_STOP_PIPELINE_STATS
);
971 if (!cmd_buffer
->state
.flush_bits
)
974 enum chip_class chip_class
= cmd_buffer
->device
->physical_device
->rad_info
.chip_class
;
975 radeon_check_space(cmd_buffer
->device
->ws
, cmd_buffer
->cs
, 128);
977 uint32_t *ptr
= NULL
;
979 if (chip_class
== GFX9
) {
980 va
= radv_buffer_get_va(cmd_buffer
->gfx9_fence_bo
) + cmd_buffer
->gfx9_fence_offset
;
981 ptr
= &cmd_buffer
->gfx9_fence_idx
;
983 si_cs_emit_cache_flush(cmd_buffer
->cs
,
984 cmd_buffer
->device
->physical_device
->rad_info
.chip_class
,
986 radv_cmd_buffer_uses_mec(cmd_buffer
),
987 cmd_buffer
->state
.flush_bits
,
988 cmd_buffer
->gfx9_eop_bug_va
);
991 if (unlikely(cmd_buffer
->device
->trace_bo
))
992 radv_cmd_buffer_trace_emit(cmd_buffer
);
994 cmd_buffer
->state
.flush_bits
= 0;
997 /* sets the CP predication state using a boolean stored at va */
999 si_emit_set_predication_state(struct radv_cmd_buffer
*cmd_buffer
,
1000 bool draw_visible
, uint64_t va
)
1005 op
= PRED_OP(PREDICATION_OP_BOOL64
);
1007 /* PREDICATION_DRAW_VISIBLE means that if the 32-bit value is
1008 * zero, all rendering commands are discarded. Otherwise, they
1009 * are discarded if the value is non zero.
1011 op
|= draw_visible
? PREDICATION_DRAW_VISIBLE
:
1012 PREDICATION_DRAW_NOT_VISIBLE
;
1014 if (cmd_buffer
->device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
1015 radeon_emit(cmd_buffer
->cs
, PKT3(PKT3_SET_PREDICATION
, 2, 0));
1016 radeon_emit(cmd_buffer
->cs
, op
);
1017 radeon_emit(cmd_buffer
->cs
, va
);
1018 radeon_emit(cmd_buffer
->cs
, va
>> 32);
1020 radeon_emit(cmd_buffer
->cs
, PKT3(PKT3_SET_PREDICATION
, 1, 0));
1021 radeon_emit(cmd_buffer
->cs
, va
);
1022 radeon_emit(cmd_buffer
->cs
, op
| ((va
>> 32) & 0xFF));
1026 /* Set this if you want the 3D engine to wait until CP DMA is done.
1027 * It should be set on the last CP DMA packet. */
1028 #define CP_DMA_SYNC (1 << 0)
1030 /* Set this if the source data was used as a destination in a previous CP DMA
1031 * packet. It's for preventing a read-after-write (RAW) hazard between two
1032 * CP DMA packets. */
1033 #define CP_DMA_RAW_WAIT (1 << 1)
1034 #define CP_DMA_USE_L2 (1 << 2)
1035 #define CP_DMA_CLEAR (1 << 3)
1037 /* Alignment for optimal performance. */
1038 #define SI_CPDMA_ALIGNMENT 32
1040 /* The max number of bytes that can be copied per packet. */
1041 static inline unsigned cp_dma_max_byte_count(struct radv_cmd_buffer
*cmd_buffer
)
1043 unsigned max
= cmd_buffer
->device
->physical_device
->rad_info
.chip_class
>= GFX9
?
1044 S_414_BYTE_COUNT_GFX9(~0u) :
1045 S_414_BYTE_COUNT_GFX6(~0u);
1047 /* make it aligned for optimal performance */
1048 return max
& ~(SI_CPDMA_ALIGNMENT
- 1);
1051 /* Emit a CP DMA packet to do a copy from one buffer to another, or to clear
1052 * a buffer. The size must fit in bits [20:0]. If CP_DMA_CLEAR is set, src_va is a 32-bit
1055 static void si_emit_cp_dma(struct radv_cmd_buffer
*cmd_buffer
,
1056 uint64_t dst_va
, uint64_t src_va
,
1057 unsigned size
, unsigned flags
)
1059 struct radeon_cmdbuf
*cs
= cmd_buffer
->cs
;
1060 uint32_t header
= 0, command
= 0;
1062 assert(size
<= cp_dma_max_byte_count(cmd_buffer
));
1064 radeon_check_space(cmd_buffer
->device
->ws
, cmd_buffer
->cs
, 9);
1065 if (cmd_buffer
->device
->physical_device
->rad_info
.chip_class
>= GFX9
)
1066 command
|= S_414_BYTE_COUNT_GFX9(size
);
1068 command
|= S_414_BYTE_COUNT_GFX6(size
);
1071 if (flags
& CP_DMA_SYNC
)
1072 header
|= S_411_CP_SYNC(1);
1074 if (cmd_buffer
->device
->physical_device
->rad_info
.chip_class
>= GFX9
)
1075 command
|= S_414_DISABLE_WR_CONFIRM_GFX9(1);
1077 command
|= S_414_DISABLE_WR_CONFIRM_GFX6(1);
1080 if (flags
& CP_DMA_RAW_WAIT
)
1081 command
|= S_414_RAW_WAIT(1);
1083 /* Src and dst flags. */
1084 if (cmd_buffer
->device
->physical_device
->rad_info
.chip_class
>= GFX9
&&
1085 !(flags
& CP_DMA_CLEAR
) &&
1087 header
|= S_411_DST_SEL(V_411_NOWHERE
); /* prefetch only */
1088 else if (flags
& CP_DMA_USE_L2
)
1089 header
|= S_411_DST_SEL(V_411_DST_ADDR_TC_L2
);
1091 if (flags
& CP_DMA_CLEAR
)
1092 header
|= S_411_SRC_SEL(V_411_DATA
);
1093 else if (flags
& CP_DMA_USE_L2
)
1094 header
|= S_411_SRC_SEL(V_411_SRC_ADDR_TC_L2
);
1096 if (cmd_buffer
->device
->physical_device
->rad_info
.chip_class
>= CIK
) {
1097 radeon_emit(cs
, PKT3(PKT3_DMA_DATA
, 5, cmd_buffer
->state
.predicating
));
1098 radeon_emit(cs
, header
);
1099 radeon_emit(cs
, src_va
); /* SRC_ADDR_LO [31:0] */
1100 radeon_emit(cs
, src_va
>> 32); /* SRC_ADDR_HI [31:0] */
1101 radeon_emit(cs
, dst_va
); /* DST_ADDR_LO [31:0] */
1102 radeon_emit(cs
, dst_va
>> 32); /* DST_ADDR_HI [31:0] */
1103 radeon_emit(cs
, command
);
1105 assert(!(flags
& CP_DMA_USE_L2
));
1106 header
|= S_411_SRC_ADDR_HI(src_va
>> 32);
1107 radeon_emit(cs
, PKT3(PKT3_CP_DMA
, 4, cmd_buffer
->state
.predicating
));
1108 radeon_emit(cs
, src_va
); /* SRC_ADDR_LO [31:0] */
1109 radeon_emit(cs
, header
); /* SRC_ADDR_HI [15:0] + flags. */
1110 radeon_emit(cs
, dst_va
); /* DST_ADDR_LO [31:0] */
1111 radeon_emit(cs
, (dst_va
>> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
1112 radeon_emit(cs
, command
);
1115 /* CP DMA is executed in ME, but index buffers are read by PFP.
1116 * This ensures that ME (CP DMA) is idle before PFP starts fetching
1117 * indices. If we wanted to execute CP DMA in PFP, this packet
1118 * should precede it.
1120 if (flags
& CP_DMA_SYNC
) {
1121 if (cmd_buffer
->queue_family_index
== RADV_QUEUE_GENERAL
) {
1122 radeon_emit(cs
, PKT3(PKT3_PFP_SYNC_ME
, 0, cmd_buffer
->state
.predicating
));
1126 /* CP will see the sync flag and wait for all DMAs to complete. */
1127 cmd_buffer
->state
.dma_is_busy
= false;
1130 if (unlikely(cmd_buffer
->device
->trace_bo
))
1131 radv_cmd_buffer_trace_emit(cmd_buffer
);
1134 void si_cp_dma_prefetch(struct radv_cmd_buffer
*cmd_buffer
, uint64_t va
,
1137 uint64_t aligned_va
= va
& ~(SI_CPDMA_ALIGNMENT
- 1);
1138 uint64_t aligned_size
= ((va
+ size
+ SI_CPDMA_ALIGNMENT
-1) & ~(SI_CPDMA_ALIGNMENT
- 1)) - aligned_va
;
1140 si_emit_cp_dma(cmd_buffer
, aligned_va
, aligned_va
,
1141 aligned_size
, CP_DMA_USE_L2
);
1144 static void si_cp_dma_prepare(struct radv_cmd_buffer
*cmd_buffer
, uint64_t byte_count
,
1145 uint64_t remaining_size
, unsigned *flags
)
1148 /* Flush the caches for the first copy only.
1149 * Also wait for the previous CP DMA operations.
1151 if (cmd_buffer
->state
.flush_bits
) {
1152 si_emit_cache_flush(cmd_buffer
);
1153 *flags
|= CP_DMA_RAW_WAIT
;
1156 /* Do the synchronization after the last dma, so that all data
1157 * is written to memory.
1159 if (byte_count
== remaining_size
)
1160 *flags
|= CP_DMA_SYNC
;
1163 static void si_cp_dma_realign_engine(struct radv_cmd_buffer
*cmd_buffer
, unsigned size
)
1167 unsigned dma_flags
= 0;
1168 unsigned buf_size
= SI_CPDMA_ALIGNMENT
* 2;
1171 assert(size
< SI_CPDMA_ALIGNMENT
);
1173 radv_cmd_buffer_upload_alloc(cmd_buffer
, buf_size
, SI_CPDMA_ALIGNMENT
, &offset
, &ptr
);
1175 va
= radv_buffer_get_va(cmd_buffer
->upload
.upload_bo
);
1178 si_cp_dma_prepare(cmd_buffer
, size
, size
, &dma_flags
);
1180 si_emit_cp_dma(cmd_buffer
, va
, va
+ SI_CPDMA_ALIGNMENT
, size
,
1184 void si_cp_dma_buffer_copy(struct radv_cmd_buffer
*cmd_buffer
,
1185 uint64_t src_va
, uint64_t dest_va
,
1188 uint64_t main_src_va
, main_dest_va
;
1189 uint64_t skipped_size
= 0, realign_size
= 0;
1191 /* Assume that we are not going to sync after the last DMA operation. */
1192 cmd_buffer
->state
.dma_is_busy
= true;
1194 if (cmd_buffer
->device
->physical_device
->rad_info
.family
<= CHIP_CARRIZO
||
1195 cmd_buffer
->device
->physical_device
->rad_info
.family
== CHIP_STONEY
) {
1196 /* If the size is not aligned, we must add a dummy copy at the end
1197 * just to align the internal counter. Otherwise, the DMA engine
1198 * would slow down by an order of magnitude for following copies.
1200 if (size
% SI_CPDMA_ALIGNMENT
)
1201 realign_size
= SI_CPDMA_ALIGNMENT
- (size
% SI_CPDMA_ALIGNMENT
);
1203 /* If the copy begins unaligned, we must start copying from the next
1204 * aligned block and the skipped part should be copied after everything
1205 * else has been copied. Only the src alignment matters, not dst.
1207 if (src_va
% SI_CPDMA_ALIGNMENT
) {
1208 skipped_size
= SI_CPDMA_ALIGNMENT
- (src_va
% SI_CPDMA_ALIGNMENT
);
1209 /* The main part will be skipped if the size is too small. */
1210 skipped_size
= MIN2(skipped_size
, size
);
1211 size
-= skipped_size
;
1214 main_src_va
= src_va
+ skipped_size
;
1215 main_dest_va
= dest_va
+ skipped_size
;
1218 unsigned dma_flags
= 0;
1219 unsigned byte_count
= MIN2(size
, cp_dma_max_byte_count(cmd_buffer
));
1221 si_cp_dma_prepare(cmd_buffer
, byte_count
,
1222 size
+ skipped_size
+ realign_size
,
1225 dma_flags
&= ~CP_DMA_SYNC
;
1227 si_emit_cp_dma(cmd_buffer
, main_dest_va
, main_src_va
,
1228 byte_count
, dma_flags
);
1231 main_src_va
+= byte_count
;
1232 main_dest_va
+= byte_count
;
1236 unsigned dma_flags
= 0;
1238 si_cp_dma_prepare(cmd_buffer
, skipped_size
,
1239 size
+ skipped_size
+ realign_size
,
1242 si_emit_cp_dma(cmd_buffer
, dest_va
, src_va
,
1243 skipped_size
, dma_flags
);
1246 si_cp_dma_realign_engine(cmd_buffer
, realign_size
);
1249 void si_cp_dma_clear_buffer(struct radv_cmd_buffer
*cmd_buffer
, uint64_t va
,
1250 uint64_t size
, unsigned value
)
1256 assert(va
% 4 == 0 && size
% 4 == 0);
1258 /* Assume that we are not going to sync after the last DMA operation. */
1259 cmd_buffer
->state
.dma_is_busy
= true;
1262 unsigned byte_count
= MIN2(size
, cp_dma_max_byte_count(cmd_buffer
));
1263 unsigned dma_flags
= CP_DMA_CLEAR
;
1265 si_cp_dma_prepare(cmd_buffer
, byte_count
, size
, &dma_flags
);
1267 /* Emit the clear packet. */
1268 si_emit_cp_dma(cmd_buffer
, va
, value
, byte_count
,
1276 void si_cp_dma_wait_for_idle(struct radv_cmd_buffer
*cmd_buffer
)
1278 if (cmd_buffer
->device
->physical_device
->rad_info
.chip_class
< CIK
)
1281 if (!cmd_buffer
->state
.dma_is_busy
)
1284 /* Issue a dummy DMA that copies zero bytes.
1286 * The DMA engine will see that there's no work to do and skip this
1287 * DMA request, however, the CP will see the sync flag and still wait
1288 * for all DMAs to complete.
1290 si_emit_cp_dma(cmd_buffer
, 0, 0, 0, CP_DMA_SYNC
);
1292 cmd_buffer
->state
.dma_is_busy
= false;
1295 /* For MSAA sample positions. */
1296 #define FILL_SREG(s0x, s0y, s1x, s1y, s2x, s2y, s3x, s3y) \
1297 (((s0x) & 0xf) | (((unsigned)(s0y) & 0xf) << 4) | \
1298 (((unsigned)(s1x) & 0xf) << 8) | (((unsigned)(s1y) & 0xf) << 12) | \
1299 (((unsigned)(s2x) & 0xf) << 16) | (((unsigned)(s2y) & 0xf) << 20) | \
1300 (((unsigned)(s3x) & 0xf) << 24) | (((unsigned)(s3y) & 0xf) << 28))
1304 * There are two locations (4, 4), (-4, -4). */
1305 const uint32_t eg_sample_locs_2x
[4] = {
1306 FILL_SREG(4, 4, -4, -4, 4, 4, -4, -4),
1307 FILL_SREG(4, 4, -4, -4, 4, 4, -4, -4),
1308 FILL_SREG(4, 4, -4, -4, 4, 4, -4, -4),
1309 FILL_SREG(4, 4, -4, -4, 4, 4, -4, -4),
1311 const unsigned eg_max_dist_2x
= 4;
1313 * There are 4 locations: (-2, 6), (6, -2), (-6, 2), (2, 6). */
1314 const uint32_t eg_sample_locs_4x
[4] = {
1315 FILL_SREG(-2, -6, 6, -2, -6, 2, 2, 6),
1316 FILL_SREG(-2, -6, 6, -2, -6, 2, 2, 6),
1317 FILL_SREG(-2, -6, 6, -2, -6, 2, 2, 6),
1318 FILL_SREG(-2, -6, 6, -2, -6, 2, 2, 6),
1320 const unsigned eg_max_dist_4x
= 6;
1323 static const uint32_t cm_sample_locs_8x
[] = {
1324 FILL_SREG( 1, -3, -1, 3, 5, 1, -3, -5),
1325 FILL_SREG( 1, -3, -1, 3, 5, 1, -3, -5),
1326 FILL_SREG( 1, -3, -1, 3, 5, 1, -3, -5),
1327 FILL_SREG( 1, -3, -1, 3, 5, 1, -3, -5),
1328 FILL_SREG(-5, 5, -7, -1, 3, 7, 7, -7),
1329 FILL_SREG(-5, 5, -7, -1, 3, 7, 7, -7),
1330 FILL_SREG(-5, 5, -7, -1, 3, 7, 7, -7),
1331 FILL_SREG(-5, 5, -7, -1, 3, 7, 7, -7),
1333 static const unsigned cm_max_dist_8x
= 8;
1334 /* Cayman 16xMSAA */
1335 static const uint32_t cm_sample_locs_16x
[] = {
1336 FILL_SREG( 1, 1, -1, -3, -3, 2, 4, -1),
1337 FILL_SREG( 1, 1, -1, -3, -3, 2, 4, -1),
1338 FILL_SREG( 1, 1, -1, -3, -3, 2, 4, -1),
1339 FILL_SREG( 1, 1, -1, -3, -3, 2, 4, -1),
1340 FILL_SREG(-5, -2, 2, 5, 5, 3, 3, -5),
1341 FILL_SREG(-5, -2, 2, 5, 5, 3, 3, -5),
1342 FILL_SREG(-5, -2, 2, 5, 5, 3, 3, -5),
1343 FILL_SREG(-5, -2, 2, 5, 5, 3, 3, -5),
1344 FILL_SREG(-2, 6, 0, -7, -4, -6, -6, 4),
1345 FILL_SREG(-2, 6, 0, -7, -4, -6, -6, 4),
1346 FILL_SREG(-2, 6, 0, -7, -4, -6, -6, 4),
1347 FILL_SREG(-2, 6, 0, -7, -4, -6, -6, 4),
1348 FILL_SREG(-8, 0, 7, -4, 6, 7, -7, -8),
1349 FILL_SREG(-8, 0, 7, -4, 6, 7, -7, -8),
1350 FILL_SREG(-8, 0, 7, -4, 6, 7, -7, -8),
1351 FILL_SREG(-8, 0, 7, -4, 6, 7, -7, -8),
1353 static const unsigned cm_max_dist_16x
= 8;
1355 unsigned radv_cayman_get_maxdist(int log_samples
)
1357 unsigned max_dist
[] = {
1364 return max_dist
[log_samples
];
1367 void radv_cayman_emit_msaa_sample_locs(struct radeon_cmdbuf
*cs
, int nr_samples
)
1369 switch (nr_samples
) {
1372 radeon_set_context_reg(cs
, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
, 0);
1373 radeon_set_context_reg(cs
, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
, 0);
1374 radeon_set_context_reg(cs
, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
, 0);
1375 radeon_set_context_reg(cs
, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
, 0);
1378 radeon_set_context_reg(cs
, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
, eg_sample_locs_2x
[0]);
1379 radeon_set_context_reg(cs
, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
, eg_sample_locs_2x
[1]);
1380 radeon_set_context_reg(cs
, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
, eg_sample_locs_2x
[2]);
1381 radeon_set_context_reg(cs
, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
, eg_sample_locs_2x
[3]);
1384 radeon_set_context_reg(cs
, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
, eg_sample_locs_4x
[0]);
1385 radeon_set_context_reg(cs
, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
, eg_sample_locs_4x
[1]);
1386 radeon_set_context_reg(cs
, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
, eg_sample_locs_4x
[2]);
1387 radeon_set_context_reg(cs
, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
, eg_sample_locs_4x
[3]);
1390 radeon_set_context_reg_seq(cs
, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
, 14);
1391 radeon_emit(cs
, cm_sample_locs_8x
[0]);
1392 radeon_emit(cs
, cm_sample_locs_8x
[4]);
1395 radeon_emit(cs
, cm_sample_locs_8x
[1]);
1396 radeon_emit(cs
, cm_sample_locs_8x
[5]);
1399 radeon_emit(cs
, cm_sample_locs_8x
[2]);
1400 radeon_emit(cs
, cm_sample_locs_8x
[6]);
1403 radeon_emit(cs
, cm_sample_locs_8x
[3]);
1404 radeon_emit(cs
, cm_sample_locs_8x
[7]);
1407 radeon_set_context_reg_seq(cs
, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
, 16);
1408 radeon_emit(cs
, cm_sample_locs_16x
[0]);
1409 radeon_emit(cs
, cm_sample_locs_16x
[4]);
1410 radeon_emit(cs
, cm_sample_locs_16x
[8]);
1411 radeon_emit(cs
, cm_sample_locs_16x
[12]);
1412 radeon_emit(cs
, cm_sample_locs_16x
[1]);
1413 radeon_emit(cs
, cm_sample_locs_16x
[5]);
1414 radeon_emit(cs
, cm_sample_locs_16x
[9]);
1415 radeon_emit(cs
, cm_sample_locs_16x
[13]);
1416 radeon_emit(cs
, cm_sample_locs_16x
[2]);
1417 radeon_emit(cs
, cm_sample_locs_16x
[6]);
1418 radeon_emit(cs
, cm_sample_locs_16x
[10]);
1419 radeon_emit(cs
, cm_sample_locs_16x
[14]);
1420 radeon_emit(cs
, cm_sample_locs_16x
[3]);
1421 radeon_emit(cs
, cm_sample_locs_16x
[7]);
1422 radeon_emit(cs
, cm_sample_locs_16x
[11]);
1423 radeon_emit(cs
, cm_sample_locs_16x
[15]);
1428 static void radv_cayman_get_sample_position(struct radv_device
*device
,
1429 unsigned sample_count
,
1430 unsigned sample_index
, float *out_value
)
1436 switch (sample_count
) {
1439 out_value
[0] = out_value
[1] = 0.5;
1442 offset
= 4 * (sample_index
* 2);
1443 val
.idx
= (eg_sample_locs_2x
[0] >> offset
) & 0xf;
1444 out_value
[0] = (float)(val
.idx
+ 8) / 16.0f
;
1445 val
.idx
= (eg_sample_locs_2x
[0] >> (offset
+ 4)) & 0xf;
1446 out_value
[1] = (float)(val
.idx
+ 8) / 16.0f
;
1449 offset
= 4 * (sample_index
* 2);
1450 val
.idx
= (eg_sample_locs_4x
[0] >> offset
) & 0xf;
1451 out_value
[0] = (float)(val
.idx
+ 8) / 16.0f
;
1452 val
.idx
= (eg_sample_locs_4x
[0] >> (offset
+ 4)) & 0xf;
1453 out_value
[1] = (float)(val
.idx
+ 8) / 16.0f
;
1456 offset
= 4 * (sample_index
% 4 * 2);
1457 index
= (sample_index
/ 4) * 4;
1458 val
.idx
= (cm_sample_locs_8x
[index
] >> offset
) & 0xf;
1459 out_value
[0] = (float)(val
.idx
+ 8) / 16.0f
;
1460 val
.idx
= (cm_sample_locs_8x
[index
] >> (offset
+ 4)) & 0xf;
1461 out_value
[1] = (float)(val
.idx
+ 8) / 16.0f
;
1464 offset
= 4 * (sample_index
% 4 * 2);
1465 index
= (sample_index
/ 4) * 4;
1466 val
.idx
= (cm_sample_locs_16x
[index
] >> offset
) & 0xf;
1467 out_value
[0] = (float)(val
.idx
+ 8) / 16.0f
;
1468 val
.idx
= (cm_sample_locs_16x
[index
] >> (offset
+ 4)) & 0xf;
1469 out_value
[1] = (float)(val
.idx
+ 8) / 16.0f
;
1474 void radv_device_init_msaa(struct radv_device
*device
)
1477 radv_cayman_get_sample_position(device
, 1, 0, device
->sample_locations_1x
[0]);
1479 for (i
= 0; i
< 2; i
++)
1480 radv_cayman_get_sample_position(device
, 2, i
, device
->sample_locations_2x
[i
]);
1481 for (i
= 0; i
< 4; i
++)
1482 radv_cayman_get_sample_position(device
, 4, i
, device
->sample_locations_4x
[i
]);
1483 for (i
= 0; i
< 8; i
++)
1484 radv_cayman_get_sample_position(device
, 8, i
, device
->sample_locations_8x
[i
]);
1485 for (i
= 0; i
< 16; i
++)
1486 radv_cayman_get_sample_position(device
, 16, i
, device
->sample_locations_16x
[i
]);