2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
6 * Copyright © 2015 Advanced Micro Devices, Inc.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 /* command buffer handling for SI */
30 #include "radv_private.h"
33 #include "radv_util.h"
34 #include "main/macros.h"
36 #define SI_GS_PER_ES 128
39 si_write_harvested_raster_configs(struct radv_physical_device
*physical_device
,
40 struct radeon_winsys_cs
*cs
,
41 unsigned raster_config
,
42 unsigned raster_config_1
)
44 unsigned sh_per_se
= MAX2(physical_device
->rad_info
.max_sh_per_se
, 1);
45 unsigned num_se
= MAX2(physical_device
->rad_info
.max_se
, 1);
46 unsigned rb_mask
= physical_device
->rad_info
.enabled_rb_mask
;
47 unsigned num_rb
= MIN2(physical_device
->rad_info
.num_render_backends
, 16);
48 unsigned rb_per_pkr
= MIN2(num_rb
/ num_se
/ sh_per_se
, 2);
49 unsigned rb_per_se
= num_rb
/ num_se
;
53 se_mask
[0] = ((1 << rb_per_se
) - 1) & rb_mask
;
54 se_mask
[1] = (se_mask
[0] << rb_per_se
) & rb_mask
;
55 se_mask
[2] = (se_mask
[1] << rb_per_se
) & rb_mask
;
56 se_mask
[3] = (se_mask
[2] << rb_per_se
) & rb_mask
;
58 assert(num_se
== 1 || num_se
== 2 || num_se
== 4);
59 assert(sh_per_se
== 1 || sh_per_se
== 2);
60 assert(rb_per_pkr
== 1 || rb_per_pkr
== 2);
62 /* XXX: I can't figure out what the *_XSEL and *_YSEL
63 * fields are for, so I'm leaving them as their default
66 if ((num_se
> 2) && ((!se_mask
[0] && !se_mask
[1]) ||
67 (!se_mask
[2] && !se_mask
[3]))) {
68 raster_config_1
&= C_028354_SE_PAIR_MAP
;
70 if (!se_mask
[0] && !se_mask
[1]) {
72 S_028354_SE_PAIR_MAP(V_028354_RASTER_CONFIG_SE_PAIR_MAP_3
);
75 S_028354_SE_PAIR_MAP(V_028354_RASTER_CONFIG_SE_PAIR_MAP_0
);
79 for (se
= 0; se
< num_se
; se
++) {
80 unsigned raster_config_se
= raster_config
;
81 unsigned pkr0_mask
= ((1 << rb_per_pkr
) - 1) << (se
* rb_per_se
);
82 unsigned pkr1_mask
= pkr0_mask
<< rb_per_pkr
;
83 int idx
= (se
/ 2) * 2;
85 if ((num_se
> 1) && (!se_mask
[idx
] || !se_mask
[idx
+ 1])) {
86 raster_config_se
&= C_028350_SE_MAP
;
90 S_028350_SE_MAP(V_028350_RASTER_CONFIG_SE_MAP_3
);
93 S_028350_SE_MAP(V_028350_RASTER_CONFIG_SE_MAP_0
);
99 if (rb_per_se
> 2 && (!pkr0_mask
|| !pkr1_mask
)) {
100 raster_config_se
&= C_028350_PKR_MAP
;
104 S_028350_PKR_MAP(V_028350_RASTER_CONFIG_PKR_MAP_3
);
107 S_028350_PKR_MAP(V_028350_RASTER_CONFIG_PKR_MAP_0
);
111 if (rb_per_se
>= 2) {
112 unsigned rb0_mask
= 1 << (se
* rb_per_se
);
113 unsigned rb1_mask
= rb0_mask
<< 1;
117 if (!rb0_mask
|| !rb1_mask
) {
118 raster_config_se
&= C_028350_RB_MAP_PKR0
;
122 S_028350_RB_MAP_PKR0(V_028350_RASTER_CONFIG_RB_MAP_3
);
125 S_028350_RB_MAP_PKR0(V_028350_RASTER_CONFIG_RB_MAP_0
);
130 rb0_mask
= 1 << (se
* rb_per_se
+ rb_per_pkr
);
131 rb1_mask
= rb0_mask
<< 1;
134 if (!rb0_mask
|| !rb1_mask
) {
135 raster_config_se
&= C_028350_RB_MAP_PKR1
;
139 S_028350_RB_MAP_PKR1(V_028350_RASTER_CONFIG_RB_MAP_3
);
142 S_028350_RB_MAP_PKR1(V_028350_RASTER_CONFIG_RB_MAP_0
);
148 /* GRBM_GFX_INDEX has a different offset on SI and CI+ */
149 if (physical_device
->rad_info
.chip_class
< CIK
)
150 radeon_set_config_reg(cs
, GRBM_GFX_INDEX
,
151 SE_INDEX(se
) | SH_BROADCAST_WRITES
|
152 INSTANCE_BROADCAST_WRITES
);
154 radeon_set_uconfig_reg(cs
, R_030800_GRBM_GFX_INDEX
,
155 S_030800_SE_INDEX(se
) | S_030800_SH_BROADCAST_WRITES(1) |
156 S_030800_INSTANCE_BROADCAST_WRITES(1));
157 radeon_set_context_reg(cs
, R_028350_PA_SC_RASTER_CONFIG
, raster_config_se
);
158 if (physical_device
->rad_info
.chip_class
>= CIK
)
159 radeon_set_context_reg(cs
, R_028354_PA_SC_RASTER_CONFIG_1
, raster_config_1
);
162 /* GRBM_GFX_INDEX has a different offset on SI and CI+ */
163 if (physical_device
->rad_info
.chip_class
< CIK
)
164 radeon_set_config_reg(cs
, GRBM_GFX_INDEX
,
165 SE_BROADCAST_WRITES
| SH_BROADCAST_WRITES
|
166 INSTANCE_BROADCAST_WRITES
);
168 radeon_set_uconfig_reg(cs
, R_030800_GRBM_GFX_INDEX
,
169 S_030800_SE_BROADCAST_WRITES(1) | S_030800_SH_BROADCAST_WRITES(1) |
170 S_030800_INSTANCE_BROADCAST_WRITES(1));
174 si_emit_compute(struct radv_physical_device
*physical_device
,
175 struct radeon_winsys_cs
*cs
)
177 radeon_set_sh_reg_seq(cs
, R_00B810_COMPUTE_START_X
, 3);
182 radeon_set_sh_reg_seq(cs
, R_00B854_COMPUTE_RESOURCE_LIMITS
, 3);
184 /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1 */
185 radeon_emit(cs
, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
186 radeon_emit(cs
, S_00B85C_SH0_CU_EN(0xffff) | S_00B85C_SH1_CU_EN(0xffff));
188 if (physical_device
->rad_info
.chip_class
>= CIK
) {
189 /* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */
190 radeon_set_sh_reg_seq(cs
,
191 R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2
, 2);
192 radeon_emit(cs
, S_00B864_SH0_CU_EN(0xffff) |
193 S_00B864_SH1_CU_EN(0xffff));
194 radeon_emit(cs
, S_00B868_SH0_CU_EN(0xffff) |
195 S_00B868_SH1_CU_EN(0xffff));
198 /* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID
199 * and is now per pipe, so it should be handled in the
200 * kernel if we want to use something other than the default value,
201 * which is now 0x22f.
203 if (physical_device
->rad_info
.chip_class
<= SI
) {
204 /* XXX: This should be:
205 * (number of compute units) * 4 * (waves per simd) - 1 */
207 radeon_set_sh_reg(cs
, R_00B82C_COMPUTE_MAX_WAVE_ID
,
208 0x190 /* Default value */);
213 si_init_compute(struct radv_cmd_buffer
*cmd_buffer
)
215 struct radv_physical_device
*physical_device
= cmd_buffer
->device
->physical_device
;
216 si_emit_compute(physical_device
, cmd_buffer
->cs
);
220 si_emit_config(struct radv_physical_device
*physical_device
,
221 struct radeon_winsys_cs
*cs
)
223 unsigned num_rb
= MIN2(physical_device
->rad_info
.num_render_backends
, 16);
224 unsigned rb_mask
= physical_device
->rad_info
.enabled_rb_mask
;
225 unsigned raster_config
, raster_config_1
;
228 radeon_emit(cs
, PKT3(PKT3_CONTEXT_CONTROL
, 1, 0));
229 radeon_emit(cs
, CONTEXT_CONTROL_LOAD_ENABLE(1));
230 radeon_emit(cs
, CONTEXT_CONTROL_SHADOW_ENABLE(1));
232 radeon_set_context_reg(cs
, R_028A18_VGT_HOS_MAX_TESS_LEVEL
, fui(64));
233 radeon_set_context_reg(cs
, R_028A1C_VGT_HOS_MIN_TESS_LEVEL
, fui(0));
235 /* FIXME calculate these values somehow ??? */
236 radeon_set_context_reg(cs
, R_028A54_VGT_GS_PER_ES
, SI_GS_PER_ES
);
237 radeon_set_context_reg(cs
, R_028A58_VGT_ES_PER_GS
, 0x40);
238 radeon_set_context_reg(cs
, R_028A5C_VGT_GS_PER_VS
, 0x2);
240 radeon_set_context_reg(cs
, R_028A8C_VGT_PRIMITIVEID_RESET
, 0x0);
241 radeon_set_context_reg(cs
, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET
, 0);
243 radeon_set_context_reg(cs
, R_028B98_VGT_STRMOUT_BUFFER_CONFIG
, 0x0);
244 radeon_set_context_reg(cs
, R_028AB8_VGT_VTX_CNT_EN
, 0x0);
245 if (physical_device
->rad_info
.chip_class
< CIK
)
246 radeon_set_config_reg(cs
, R_008A14_PA_CL_ENHANCE
, S_008A14_NUM_CLIP_SEQ(3) |
247 S_008A14_CLIP_VTX_REORDER_ENA(1));
249 radeon_set_context_reg(cs
, R_028BD4_PA_SC_CENTROID_PRIORITY_0
, 0x76543210);
250 radeon_set_context_reg(cs
, R_028BD8_PA_SC_CENTROID_PRIORITY_1
, 0xfedcba98);
252 radeon_set_context_reg(cs
, R_02882C_PA_SU_PRIM_FILTER_CNTL
, 0);
254 for (i
= 0; i
< 16; i
++) {
255 radeon_set_context_reg(cs
, R_0282D0_PA_SC_VPORT_ZMIN_0
+ i
*8, 0);
256 radeon_set_context_reg(cs
, R_0282D4_PA_SC_VPORT_ZMAX_0
+ i
*8, fui(1.0));
259 switch (physical_device
->rad_info
.family
) {
262 raster_config
= 0x2a00126a;
263 raster_config_1
= 0x00000000;
266 raster_config
= 0x0000124a;
267 raster_config_1
= 0x00000000;
270 raster_config
= 0x00000082;
271 raster_config_1
= 0x00000000;
274 raster_config
= 0x00000000;
275 raster_config_1
= 0x00000000;
278 raster_config
= 0x16000012;
279 raster_config_1
= 0x00000000;
282 raster_config
= 0x3a00161a;
283 raster_config_1
= 0x0000002e;
286 if (physical_device
->rad_info
.cik_macrotile_mode_array
[0] == 0x000000e8) {
287 /* old kernels with old tiling config */
288 raster_config
= 0x16000012;
289 raster_config_1
= 0x0000002a;
291 raster_config
= 0x3a00161a;
292 raster_config_1
= 0x0000002e;
296 raster_config
= 0x16000012;
297 raster_config_1
= 0x0000002a;
300 raster_config
= 0x16000012;
301 raster_config_1
= 0x00000000;
304 raster_config
= 0x16000012;
305 raster_config_1
= 0x0000002a;
309 raster_config
= 0x00000000;
311 raster_config
= 0x00000002;
312 raster_config_1
= 0x00000000;
315 raster_config
= 0x00000002;
316 raster_config_1
= 0x00000000;
319 /* KV should be 0x00000002, but that causes problems with radeon */
320 raster_config
= 0x00000000; /* 0x00000002 */
321 raster_config_1
= 0x00000000;
326 raster_config
= 0x00000000;
327 raster_config_1
= 0x00000000;
331 "radeonsi: Unknown GPU, using 0 for raster_config\n");
332 raster_config
= 0x00000000;
333 raster_config_1
= 0x00000000;
337 /* Always use the default config when all backends are enabled
338 * (or when we failed to determine the enabled backends).
340 if (!rb_mask
|| util_bitcount(rb_mask
) >= num_rb
) {
341 radeon_set_context_reg(cs
, R_028350_PA_SC_RASTER_CONFIG
,
343 if (physical_device
->rad_info
.chip_class
>= CIK
)
344 radeon_set_context_reg(cs
, R_028354_PA_SC_RASTER_CONFIG_1
,
347 si_write_harvested_raster_configs(physical_device
, cs
, raster_config
, raster_config_1
);
350 radeon_set_context_reg(cs
, R_028204_PA_SC_WINDOW_SCISSOR_TL
, S_028204_WINDOW_OFFSET_DISABLE(1));
351 radeon_set_context_reg(cs
, R_028240_PA_SC_GENERIC_SCISSOR_TL
, S_028240_WINDOW_OFFSET_DISABLE(1));
352 radeon_set_context_reg(cs
, R_028244_PA_SC_GENERIC_SCISSOR_BR
,
353 S_028244_BR_X(16384) | S_028244_BR_Y(16384));
354 radeon_set_context_reg(cs
, R_028030_PA_SC_SCREEN_SCISSOR_TL
, 0);
355 radeon_set_context_reg(cs
, R_028034_PA_SC_SCREEN_SCISSOR_BR
,
356 S_028034_BR_X(16384) | S_028034_BR_Y(16384));
358 radeon_set_context_reg(cs
, R_02820C_PA_SC_CLIPRECT_RULE
, 0xFFFF);
359 radeon_set_context_reg(cs
, R_028230_PA_SC_EDGERULE
, 0xAAAAAAAA);
360 /* PA_SU_HARDWARE_SCREEN_OFFSET must be 0 due to hw bug on SI */
361 radeon_set_context_reg(cs
, R_028234_PA_SU_HARDWARE_SCREEN_OFFSET
, 0);
362 radeon_set_context_reg(cs
, R_028820_PA_CL_NANINF_CNTL
, 0);
364 radeon_set_context_reg(cs
, R_028BE8_PA_CL_GB_VERT_CLIP_ADJ
, fui(1.0));
365 radeon_set_context_reg(cs
, R_028BEC_PA_CL_GB_VERT_DISC_ADJ
, fui(1.0));
366 radeon_set_context_reg(cs
, R_028BF0_PA_CL_GB_HORZ_CLIP_ADJ
, fui(1.0));
367 radeon_set_context_reg(cs
, R_028BF4_PA_CL_GB_HORZ_DISC_ADJ
, fui(1.0));
369 radeon_set_context_reg(cs
, R_028AC0_DB_SRESULTS_COMPARE_STATE0
, 0x0);
370 radeon_set_context_reg(cs
, R_028AC4_DB_SRESULTS_COMPARE_STATE1
, 0x0);
371 radeon_set_context_reg(cs
, R_028AC8_DB_PRELOAD_CONTROL
, 0x0);
372 radeon_set_context_reg(cs
, R_02800C_DB_RENDER_OVERRIDE
,
373 S_02800C_FORCE_HIS_ENABLE0(V_02800C_FORCE_DISABLE
) |
374 S_02800C_FORCE_HIS_ENABLE1(V_02800C_FORCE_DISABLE
));
376 radeon_set_context_reg(cs
, R_028400_VGT_MAX_VTX_INDX
, ~0);
377 radeon_set_context_reg(cs
, R_028404_VGT_MIN_VTX_INDX
, 0);
378 radeon_set_context_reg(cs
, R_028408_VGT_INDX_OFFSET
, 0);
380 if (physical_device
->rad_info
.chip_class
>= CIK
) {
381 radeon_set_sh_reg(cs
, R_00B41C_SPI_SHADER_PGM_RSRC3_HS
, 0);
382 radeon_set_sh_reg(cs
, R_00B31C_SPI_SHADER_PGM_RSRC3_ES
, S_00B31C_CU_EN(0xffff));
383 radeon_set_sh_reg(cs
, R_00B21C_SPI_SHADER_PGM_RSRC3_GS
, S_00B21C_CU_EN(0xffff));
385 if (physical_device
->rad_info
.num_good_compute_units
/
386 (physical_device
->rad_info
.max_se
* physical_device
->rad_info
.max_sh_per_se
) <= 4) {
387 /* Too few available compute units per SH. Disallowing
388 * VS to run on CU0 could hurt us more than late VS
389 * allocation would help.
391 * LATE_ALLOC_VS = 2 is the highest safe number.
393 radeon_set_sh_reg(cs
, R_00B51C_SPI_SHADER_PGM_RSRC3_LS
, S_00B51C_CU_EN(0xffff));
394 radeon_set_sh_reg(cs
, R_00B118_SPI_SHADER_PGM_RSRC3_VS
, S_00B118_CU_EN(0xffff));
395 radeon_set_sh_reg(cs
, R_00B11C_SPI_SHADER_LATE_ALLOC_VS
, S_00B11C_LIMIT(2));
397 /* Set LATE_ALLOC_VS == 31. It should be less than
398 * the number of scratch waves. Limitations:
399 * - VS can't execute on CU0.
400 * - If HS writes outputs to LDS, LS can't execute on CU0.
402 radeon_set_sh_reg(cs
, R_00B51C_SPI_SHADER_PGM_RSRC3_LS
, S_00B51C_CU_EN(0xfffe));
403 radeon_set_sh_reg(cs
, R_00B118_SPI_SHADER_PGM_RSRC3_VS
, S_00B118_CU_EN(0xfffe));
404 radeon_set_sh_reg(cs
, R_00B11C_SPI_SHADER_LATE_ALLOC_VS
, S_00B11C_LIMIT(31));
407 radeon_set_sh_reg(cs
, R_00B01C_SPI_SHADER_PGM_RSRC3_PS
, S_00B01C_CU_EN(0xffff));
410 if (physical_device
->rad_info
.chip_class
>= VI
) {
411 radeon_set_context_reg(cs
, R_028424_CB_DCC_CONTROL
,
412 S_028424_OVERWRITE_COMBINER_MRT_SHARING_DISABLE(1) |
413 S_028424_OVERWRITE_COMBINER_WATERMARK(4));
414 radeon_set_context_reg(cs
, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL
, 30);
415 radeon_set_context_reg(cs
, R_028C5C_VGT_OUT_DEALLOC_CNTL
, 32);
416 radeon_set_context_reg(cs
, R_028B50_VGT_TESS_DISTRIBUTION
,
417 S_028B50_ACCUM_ISOLINE(32) |
418 S_028B50_ACCUM_TRI(11) |
419 S_028B50_ACCUM_QUAD(11) |
420 S_028B50_DONUT_SPLIT(16));
422 radeon_set_context_reg(cs
, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL
, 14);
423 radeon_set_context_reg(cs
, R_028C5C_VGT_OUT_DEALLOC_CNTL
, 16);
426 if (physical_device
->rad_info
.family
== CHIP_STONEY
)
427 radeon_set_context_reg(cs
, R_028C40_PA_SC_SHADER_CONTROL
, 0);
429 si_emit_compute(physical_device
, cs
);
432 void si_init_config(struct radv_cmd_buffer
*cmd_buffer
)
434 struct radv_physical_device
*physical_device
= cmd_buffer
->device
->physical_device
;
435 si_emit_config(physical_device
, cmd_buffer
->cs
);
439 get_viewport_xform(const VkViewport
*viewport
,
440 float scale
[3], float translate
[3])
442 float x
= viewport
->x
;
443 float y
= viewport
->y
;
444 float half_width
= 0.5f
* viewport
->width
;
445 float half_height
= 0.5f
* viewport
->height
;
446 double n
= viewport
->minDepth
;
447 double f
= viewport
->maxDepth
;
449 scale
[0] = half_width
;
450 translate
[0] = half_width
+ x
;
451 scale
[1] = half_height
;
452 translate
[1] = half_height
+ y
;
459 si_write_viewport(struct radeon_winsys_cs
*cs
, int first_vp
,
460 int count
, const VkViewport
*viewports
)
465 radeon_set_context_reg_seq(cs
, R_02843C_PA_CL_VPORT_XSCALE
, 6);
466 radeon_emit(cs
, fui(1.0));
467 radeon_emit(cs
, fui(0.0));
468 radeon_emit(cs
, fui(1.0));
469 radeon_emit(cs
, fui(0.0));
470 radeon_emit(cs
, fui(1.0));
471 radeon_emit(cs
, fui(0.0));
473 radeon_set_context_reg_seq(cs
, R_0282D0_PA_SC_VPORT_ZMIN_0
, 2);
474 radeon_emit(cs
, fui(0.0));
475 radeon_emit(cs
, fui(1.0));
479 radeon_set_context_reg_seq(cs
, R_02843C_PA_CL_VPORT_XSCALE
+
480 first_vp
* 4 * 6, count
* 6);
482 for (i
= 0; i
< count
; i
++) {
483 float scale
[3], translate
[3];
486 get_viewport_xform(&viewports
[i
], scale
, translate
);
487 radeon_emit(cs
, fui(scale
[0]));
488 radeon_emit(cs
, fui(translate
[0]));
489 radeon_emit(cs
, fui(scale
[1]));
490 radeon_emit(cs
, fui(translate
[1]));
491 radeon_emit(cs
, fui(scale
[2]));
492 radeon_emit(cs
, fui(translate
[2]));
495 radeon_set_context_reg_seq(cs
, R_0282D0_PA_SC_VPORT_ZMIN_0
+
496 first_vp
* 4 * 2, count
* 2);
497 for (i
= 0; i
< count
; i
++) {
498 float zmin
= MIN2(viewports
[i
].minDepth
, viewports
[i
].maxDepth
);
499 float zmax
= MAX2(viewports
[i
].minDepth
, viewports
[i
].maxDepth
);
500 radeon_emit(cs
, fui(zmin
));
501 radeon_emit(cs
, fui(zmax
));
506 si_write_scissors(struct radeon_winsys_cs
*cs
, int first
,
507 int count
, const VkRect2D
*scissors
)
513 radeon_set_context_reg_seq(cs
, R_028250_PA_SC_VPORT_SCISSOR_0_TL
+ first
* 4 * 2, count
* 2);
514 for (i
= 0; i
< count
; i
++) {
515 radeon_emit(cs
, S_028250_TL_X(scissors
[i
].offset
.x
) |
516 S_028250_TL_Y(scissors
[i
].offset
.y
) |
517 S_028250_WINDOW_OFFSET_DISABLE(1));
518 radeon_emit(cs
, S_028254_BR_X(scissors
[i
].offset
.x
+ scissors
[i
].extent
.width
) |
519 S_028254_BR_Y(scissors
[i
].offset
.y
+ scissors
[i
].extent
.height
));
524 si_get_ia_multi_vgt_param(struct radv_cmd_buffer
*cmd_buffer
)
526 enum chip_class chip_class
= cmd_buffer
->device
->physical_device
->rad_info
.chip_class
;
527 enum radeon_family family
= cmd_buffer
->device
->physical_device
->rad_info
.family
;
528 struct radeon_info
*info
= &cmd_buffer
->device
->physical_device
->rad_info
;
529 unsigned prim
= cmd_buffer
->state
.pipeline
->graphics
.prim
;
530 unsigned primgroup_size
= 128; /* recommended without a GS */
531 unsigned max_primgroup_in_wave
= 2;
532 /* SWITCH_ON_EOP(0) is always preferable. */
533 bool wd_switch_on_eop
= false;
534 bool ia_switch_on_eop
= false;
535 bool ia_switch_on_eoi
= false;
536 bool partial_vs_wave
= false;
537 bool partial_es_wave
= false;
539 if (radv_pipeline_has_gs(cmd_buffer
->state
.pipeline
))
540 primgroup_size
= 64; /* recommended with a GS */
544 /* TODO linestipple */
546 if (chip_class
>= CIK
) {
547 /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
548 * 4 shader engines. Set 1 to pass the assertion below.
549 * The other cases are hardware requirements. */
550 if (info
->max_se
< 4 ||
551 prim
== V_008958_DI_PT_POLYGON
||
552 prim
== V_008958_DI_PT_LINELOOP
||
553 prim
== V_008958_DI_PT_TRIFAN
||
554 prim
== V_008958_DI_PT_TRISTRIP_ADJ
)
555 // info->primitive_restart ||
556 // info->count_from_stream_output)
557 wd_switch_on_eop
= true;
561 /* Required on CIK and later. */
562 if (info
->max_se
> 2 && !wd_switch_on_eop
)
563 ia_switch_on_eoi
= true;
565 /* Required by Hawaii and, for some special cases, by VI. */
566 if (ia_switch_on_eoi
&&
567 (family
== CHIP_HAWAII
||
569 (radv_pipeline_has_gs(cmd_buffer
->state
.pipeline
) || max_primgroup_in_wave
!= 2))))
570 partial_vs_wave
= true;
573 /* Instancing bug on Bonaire. */
574 if (family
== CHIP_BONAIRE
&& ia_switch_on_eoi
&&
575 (info
->indirect
|| info
->instance_count
> 1))
576 partial_vs_wave
= true;
578 /* If the WD switch is false, the IA switch must be false too. */
579 assert(wd_switch_on_eop
|| !ia_switch_on_eop
);
581 /* If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
582 if (ia_switch_on_eoi
)
583 partial_es_wave
= true;
585 /* GS requirement. */
586 if (SI_GS_PER_ES
/ primgroup_size
>= cmd_buffer
->device
->gs_table_depth
- 3)
587 partial_es_wave
= true;
589 /* Hw bug with single-primitive instances and SWITCH_ON_EOI
590 * on multi-SE chips. */
592 if (info
->max_se
>= 2 && ia_switch_on_eoi
&&
594 (info
->instance_count
> 1 &&
595 si_num_prims_for_vertices(info
) <= 1)))
596 sctx
->b
.flags
|= SI_CONTEXT_VGT_FLUSH
;
598 return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop
) |
599 S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi
) |
600 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave
) |
601 S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave
) |
602 S_028AA8_PRIMGROUP_SIZE(primgroup_size
- 1) |
603 S_028AA8_WD_SWITCH_ON_EOP(chip_class
>= CIK
? wd_switch_on_eop
: 0) |
604 S_028AA8_MAX_PRIMGRP_IN_WAVE(chip_class
>= VI
?
605 max_primgroup_in_wave
: 0);
610 si_emit_cache_flush(struct radv_cmd_buffer
*cmd_buffer
)
612 enum chip_class chip_class
= cmd_buffer
->device
->physical_device
->rad_info
.chip_class
;
613 unsigned cp_coher_cntl
= 0;
614 bool is_compute
= cmd_buffer
->queue_family_index
== RADV_QUEUE_COMPUTE
;
617 cmd_buffer
->state
.flush_bits
&= ~(RADV_CMD_FLAG_FLUSH_AND_INV_CB
|
618 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META
|
619 RADV_CMD_FLAG_FLUSH_AND_INV_DB
|
620 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META
|
621 RADV_CMD_FLAG_PS_PARTIAL_FLUSH
|
622 RADV_CMD_FLAG_VS_PARTIAL_FLUSH
|
623 RADV_CMD_FLAG_VGT_FLUSH
);
625 radeon_check_space(cmd_buffer
->device
->ws
, cmd_buffer
->cs
, 128);
627 if (cmd_buffer
->state
.flush_bits
& RADV_CMD_FLAG_INV_ICACHE
)
628 cp_coher_cntl
|= S_0085F0_SH_ICACHE_ACTION_ENA(1);
629 if (cmd_buffer
->state
.flush_bits
& RADV_CMD_FLAG_INV_SMEM_L1
)
630 cp_coher_cntl
|= S_0085F0_SH_KCACHE_ACTION_ENA(1);
631 if (cmd_buffer
->state
.flush_bits
& RADV_CMD_FLAG_INV_VMEM_L1
)
632 cp_coher_cntl
|= S_0085F0_TCL1_ACTION_ENA(1);
633 if (cmd_buffer
->state
.flush_bits
& RADV_CMD_FLAG_INV_GLOBAL_L2
) {
634 cp_coher_cntl
|= S_0085F0_TC_ACTION_ENA(1);
635 if (chip_class
>= VI
)
636 cp_coher_cntl
|= S_0301F0_TC_WB_ACTION_ENA(1);
639 if (cmd_buffer
->state
.flush_bits
& RADV_CMD_FLAG_FLUSH_AND_INV_CB
) {
640 cp_coher_cntl
|= S_0085F0_CB_ACTION_ENA(1) |
641 S_0085F0_CB0_DEST_BASE_ENA(1) |
642 S_0085F0_CB1_DEST_BASE_ENA(1) |
643 S_0085F0_CB2_DEST_BASE_ENA(1) |
644 S_0085F0_CB3_DEST_BASE_ENA(1) |
645 S_0085F0_CB4_DEST_BASE_ENA(1) |
646 S_0085F0_CB5_DEST_BASE_ENA(1) |
647 S_0085F0_CB6_DEST_BASE_ENA(1) |
648 S_0085F0_CB7_DEST_BASE_ENA(1);
650 /* Necessary for DCC */
651 if (cmd_buffer
->device
->physical_device
->rad_info
.chip_class
>= VI
) {
652 radeon_emit(cmd_buffer
->cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
653 radeon_emit(cmd_buffer
->cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_DATA_TS
) |
655 radeon_emit(cmd_buffer
->cs
, 0);
656 radeon_emit(cmd_buffer
->cs
, 0);
657 radeon_emit(cmd_buffer
->cs
, 0);
658 radeon_emit(cmd_buffer
->cs
, 0);
662 if (cmd_buffer
->state
.flush_bits
& RADV_CMD_FLAG_FLUSH_AND_INV_DB
) {
663 cp_coher_cntl
|= S_0085F0_DB_ACTION_ENA(1) |
664 S_0085F0_DB_DEST_BASE_ENA(1);
667 if (cmd_buffer
->state
.flush_bits
& RADV_CMD_FLAG_FLUSH_AND_INV_CB_META
) {
668 radeon_emit(cmd_buffer
->cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
669 radeon_emit(cmd_buffer
->cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META
) | EVENT_INDEX(0));
672 if (cmd_buffer
->state
.flush_bits
& RADV_CMD_FLAG_FLUSH_AND_INV_DB_META
) {
673 radeon_emit(cmd_buffer
->cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
674 radeon_emit(cmd_buffer
->cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META
) | EVENT_INDEX(0));
677 if (!(cmd_buffer
->state
.flush_bits
& (RADV_CMD_FLAG_FLUSH_AND_INV_CB
|
678 RADV_CMD_FLAG_FLUSH_AND_INV_DB
))) {
679 if (cmd_buffer
->state
.flush_bits
& RADV_CMD_FLAG_PS_PARTIAL_FLUSH
) {
680 radeon_emit(cmd_buffer
->cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
681 radeon_emit(cmd_buffer
->cs
, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
682 } else if (cmd_buffer
->state
.flush_bits
& RADV_CMD_FLAG_VS_PARTIAL_FLUSH
) {
683 radeon_emit(cmd_buffer
->cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
684 radeon_emit(cmd_buffer
->cs
, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
688 if (cmd_buffer
->state
.flush_bits
& RADV_CMD_FLAG_CS_PARTIAL_FLUSH
) {
689 radeon_emit(cmd_buffer
->cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
690 radeon_emit(cmd_buffer
->cs
, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
694 if (cmd_buffer
->state
.flush_bits
& RADV_CMD_FLAG_VGT_FLUSH
) {
695 radeon_emit(cmd_buffer
->cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
696 radeon_emit(cmd_buffer
->cs
, EVENT_TYPE(V_028A90_VGT_FLUSH
) | EVENT_INDEX(0));
699 /* Make sure ME is idle (it executes most packets) before continuing.
700 * This prevents read-after-write hazards between PFP and ME.
702 if ((cp_coher_cntl
|| (cmd_buffer
->state
.flush_bits
& RADV_CMD_FLAG_CS_PARTIAL_FLUSH
)) &&
703 !radv_cmd_buffer_uses_mec(cmd_buffer
)) {
704 radeon_emit(cmd_buffer
->cs
, PKT3(PKT3_PFP_SYNC_ME
, 0, 0));
705 radeon_emit(cmd_buffer
->cs
, 0);
708 /* When one of the DEST_BASE flags is set, SURFACE_SYNC waits for idle.
709 * Therefore, it should be last. Done in PFP.
712 if (radv_cmd_buffer_uses_mec(cmd_buffer
)) {
713 radeon_emit(cmd_buffer
->cs
, PKT3(PKT3_ACQUIRE_MEM
, 5, 0) |
714 PKT3_SHADER_TYPE_S(1));
715 radeon_emit(cmd_buffer
->cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
716 radeon_emit(cmd_buffer
->cs
, 0xffffffff); /* CP_COHER_SIZE */
717 radeon_emit(cmd_buffer
->cs
, 0xff); /* CP_COHER_SIZE_HI */
718 radeon_emit(cmd_buffer
->cs
, 0); /* CP_COHER_BASE */
719 radeon_emit(cmd_buffer
->cs
, 0); /* CP_COHER_BASE_HI */
720 radeon_emit(cmd_buffer
->cs
, 0x0000000A); /* POLL_INTERVAL */
722 /* ACQUIRE_MEM is only required on a compute ring. */
723 radeon_emit(cmd_buffer
->cs
, PKT3(PKT3_SURFACE_SYNC
, 3, 0));
724 radeon_emit(cmd_buffer
->cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
725 radeon_emit(cmd_buffer
->cs
, 0xffffffff); /* CP_COHER_SIZE */
726 radeon_emit(cmd_buffer
->cs
, 0); /* CP_COHER_BASE */
727 radeon_emit(cmd_buffer
->cs
, 0x0000000A); /* POLL_INTERVAL */
731 if (cmd_buffer
->state
.flush_bits
)
732 radv_cmd_buffer_trace_emit(cmd_buffer
);
733 cmd_buffer
->state
.flush_bits
= 0;
737 /* Set this if you want the 3D engine to wait until CP DMA is done.
738 * It should be set on the last CP DMA packet. */
739 #define R600_CP_DMA_SYNC (1 << 0) /* R600+ */
741 /* Set this if the source data was used as a destination in a previous CP DMA
742 * packet. It's for preventing a read-after-write (RAW) hazard between two
744 #define SI_CP_DMA_RAW_WAIT (1 << 1) /* SI+ */
745 #define CIK_CP_DMA_USE_L2 (1 << 2)
747 /* Alignment for optimal performance. */
748 #define CP_DMA_ALIGNMENT 32
749 /* The max number of bytes to copy per packet. */
750 #define CP_DMA_MAX_BYTE_COUNT ((1 << 21) - CP_DMA_ALIGNMENT)
752 static void si_emit_cp_dma_copy_buffer(struct radv_cmd_buffer
*cmd_buffer
,
753 uint64_t dst_va
, uint64_t src_va
,
754 unsigned size
, unsigned flags
)
756 struct radeon_winsys_cs
*cs
= cmd_buffer
->cs
;
757 uint32_t sync_flag
= flags
& R600_CP_DMA_SYNC
? S_411_CP_SYNC(1) : 0;
758 uint32_t wr_confirm
= !(flags
& R600_CP_DMA_SYNC
) ? S_414_DISABLE_WR_CONFIRM(1) : 0;
759 uint32_t raw_wait
= flags
& SI_CP_DMA_RAW_WAIT
? S_414_RAW_WAIT(1) : 0;
760 uint32_t sel
= flags
& CIK_CP_DMA_USE_L2
?
761 S_411_SRC_SEL(V_411_SRC_ADDR_TC_L2
) |
762 S_411_DSL_SEL(V_411_DST_ADDR_TC_L2
) : 0;
765 assert((size
& ((1<<21)-1)) == size
);
767 radeon_check_space(cmd_buffer
->device
->ws
, cmd_buffer
->cs
, 9);
769 if (cmd_buffer
->device
->physical_device
->rad_info
.chip_class
>= CIK
) {
770 radeon_emit(cs
, PKT3(PKT3_DMA_DATA
, 5, 0));
771 radeon_emit(cs
, sync_flag
| sel
); /* CP_SYNC [31] */
772 radeon_emit(cs
, src_va
); /* SRC_ADDR_LO [31:0] */
773 radeon_emit(cs
, src_va
>> 32); /* SRC_ADDR_HI [31:0] */
774 radeon_emit(cs
, dst_va
); /* DST_ADDR_LO [31:0] */
775 radeon_emit(cs
, dst_va
>> 32); /* DST_ADDR_HI [31:0] */
776 radeon_emit(cs
, size
| wr_confirm
| raw_wait
); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
778 radeon_emit(cs
, PKT3(PKT3_CP_DMA
, 4, 0));
779 radeon_emit(cs
, src_va
); /* SRC_ADDR_LO [31:0] */
780 radeon_emit(cs
, sync_flag
| ((src_va
>> 32) & 0xffff)); /* CP_SYNC [31] | SRC_ADDR_HI [15:0] */
781 radeon_emit(cs
, dst_va
); /* DST_ADDR_LO [31:0] */
782 radeon_emit(cs
, (dst_va
>> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
783 radeon_emit(cs
, size
| wr_confirm
| raw_wait
); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
786 /* CP DMA is executed in ME, but index buffers are read by PFP.
787 * This ensures that ME (CP DMA) is idle before PFP starts fetching
788 * indices. If we wanted to execute CP DMA in PFP, this packet
791 if (sync_flag
&& cmd_buffer
->queue_family_index
== RADV_QUEUE_GENERAL
) {
792 radeon_emit(cs
, PKT3(PKT3_PFP_SYNC_ME
, 0, 0));
796 radv_cmd_buffer_trace_emit(cmd_buffer
);
799 /* Emit a CP DMA packet to clear a buffer. The size must fit in bits [20:0]. */
800 static void si_emit_cp_dma_clear_buffer(struct radv_cmd_buffer
*cmd_buffer
,
801 uint64_t dst_va
, unsigned size
,
802 uint32_t clear_value
, unsigned flags
)
804 struct radeon_winsys_cs
*cs
= cmd_buffer
->cs
;
805 uint32_t sync_flag
= flags
& R600_CP_DMA_SYNC
? S_411_CP_SYNC(1) : 0;
806 uint32_t wr_confirm
= !(flags
& R600_CP_DMA_SYNC
) ? S_414_DISABLE_WR_CONFIRM(1) : 0;
807 uint32_t raw_wait
= flags
& SI_CP_DMA_RAW_WAIT
? S_414_RAW_WAIT(1) : 0;
808 uint32_t dst_sel
= flags
& CIK_CP_DMA_USE_L2
? S_411_DSL_SEL(V_411_DST_ADDR_TC_L2
) : 0;
811 assert((size
& ((1<<21)-1)) == size
);
813 radeon_check_space(cmd_buffer
->device
->ws
, cmd_buffer
->cs
, 9);
815 if (cmd_buffer
->device
->physical_device
->rad_info
.chip_class
>= CIK
) {
816 radeon_emit(cs
, PKT3(PKT3_DMA_DATA
, 5, 0));
817 radeon_emit(cs
, sync_flag
| dst_sel
| S_411_SRC_SEL(V_411_DATA
)); /* CP_SYNC [31] | SRC_SEL[30:29] */
818 radeon_emit(cs
, clear_value
); /* DATA [31:0] */
820 radeon_emit(cs
, dst_va
); /* DST_ADDR_LO [31:0] */
821 radeon_emit(cs
, dst_va
>> 32); /* DST_ADDR_HI [15:0] */
822 radeon_emit(cs
, size
| wr_confirm
| raw_wait
); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
824 radeon_emit(cs
, PKT3(PKT3_CP_DMA
, 4, 0));
825 radeon_emit(cs
, clear_value
); /* DATA [31:0] */
826 radeon_emit(cs
, sync_flag
| S_411_SRC_SEL(V_411_DATA
)); /* CP_SYNC [31] | SRC_SEL[30:29] */
827 radeon_emit(cs
, dst_va
); /* DST_ADDR_LO [31:0] */
828 radeon_emit(cs
, (dst_va
>> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
829 radeon_emit(cs
, size
| wr_confirm
| raw_wait
); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
832 /* See "copy_buffer" for explanation. */
833 if (sync_flag
&& cmd_buffer
->queue_family_index
== RADV_QUEUE_GENERAL
) {
834 radeon_emit(cs
, PKT3(PKT3_PFP_SYNC_ME
, 0, 0));
837 radv_cmd_buffer_trace_emit(cmd_buffer
);
840 static void si_cp_dma_prepare(struct radv_cmd_buffer
*cmd_buffer
, uint64_t byte_count
,
841 uint64_t remaining_size
, unsigned *flags
)
843 cmd_buffer
->no_draws
= false;
844 /* Flush the caches for the first copy only.
845 * Also wait for the previous CP DMA operations.
847 if (cmd_buffer
->state
.flush_bits
) {
848 si_emit_cache_flush(cmd_buffer
);
849 *flags
|= SI_CP_DMA_RAW_WAIT
;
852 /* Do the synchronization after the last dma, so that all data
853 * is written to memory.
855 if (byte_count
== remaining_size
)
856 *flags
|= R600_CP_DMA_SYNC
;
859 static void si_cp_dma_realign_engine(struct radv_cmd_buffer
*cmd_buffer
, unsigned size
)
863 unsigned dma_flags
= 0;
864 unsigned buf_size
= CP_DMA_ALIGNMENT
* 2;
867 assert(size
< CP_DMA_ALIGNMENT
);
869 radv_cmd_buffer_upload_alloc(cmd_buffer
, buf_size
, CP_DMA_ALIGNMENT
, &offset
, &ptr
);
871 va
= cmd_buffer
->device
->ws
->buffer_get_va(cmd_buffer
->upload
.upload_bo
);
874 si_cp_dma_prepare(cmd_buffer
, size
, size
, &dma_flags
);
876 si_emit_cp_dma_copy_buffer(cmd_buffer
, va
, va
+ CP_DMA_ALIGNMENT
, size
,
880 void si_cp_dma_buffer_copy(struct radv_cmd_buffer
*cmd_buffer
,
881 uint64_t src_va
, uint64_t dest_va
,
884 uint64_t main_src_va
, main_dest_va
;
885 uint64_t skipped_size
= 0, realign_size
= 0;
888 if (cmd_buffer
->device
->physical_device
->rad_info
.family
<= CHIP_CARRIZO
||
889 cmd_buffer
->device
->physical_device
->rad_info
.family
== CHIP_STONEY
) {
890 /* If the size is not aligned, we must add a dummy copy at the end
891 * just to align the internal counter. Otherwise, the DMA engine
892 * would slow down by an order of magnitude for following copies.
894 if (size
% CP_DMA_ALIGNMENT
)
895 realign_size
= CP_DMA_ALIGNMENT
- (size
% CP_DMA_ALIGNMENT
);
897 /* If the copy begins unaligned, we must start copying from the next
898 * aligned block and the skipped part should be copied after everything
899 * else has been copied. Only the src alignment matters, not dst.
901 if (src_va
% CP_DMA_ALIGNMENT
) {
902 skipped_size
= CP_DMA_ALIGNMENT
- (src_va
% CP_DMA_ALIGNMENT
);
903 /* The main part will be skipped if the size is too small. */
904 skipped_size
= MIN2(skipped_size
, size
);
905 size
-= skipped_size
;
908 main_src_va
= src_va
+ skipped_size
;
909 main_dest_va
= dest_va
+ skipped_size
;
912 unsigned dma_flags
= 0;
913 unsigned byte_count
= MIN2(size
, CP_DMA_MAX_BYTE_COUNT
);
915 si_cp_dma_prepare(cmd_buffer
, byte_count
,
916 size
+ skipped_size
+ realign_size
,
919 si_emit_cp_dma_copy_buffer(cmd_buffer
, main_dest_va
, main_src_va
,
920 byte_count
, dma_flags
);
923 main_src_va
+= byte_count
;
924 main_dest_va
+= byte_count
;
928 unsigned dma_flags
= 0;
930 si_cp_dma_prepare(cmd_buffer
, skipped_size
,
931 size
+ skipped_size
+ realign_size
,
934 si_emit_cp_dma_copy_buffer(cmd_buffer
, dest_va
, src_va
,
935 skipped_size
, dma_flags
);
938 si_cp_dma_realign_engine(cmd_buffer
, realign_size
);
941 void si_cp_dma_clear_buffer(struct radv_cmd_buffer
*cmd_buffer
, uint64_t va
,
942 uint64_t size
, unsigned value
)
948 assert(va
% 4 == 0 && size
% 4 == 0);
951 unsigned byte_count
= MIN2(size
, CP_DMA_MAX_BYTE_COUNT
);
952 unsigned dma_flags
= 0;
954 si_cp_dma_prepare(cmd_buffer
, byte_count
, size
, &dma_flags
);
956 /* Emit the clear packet. */
957 si_emit_cp_dma_clear_buffer(cmd_buffer
, va
, byte_count
, value
,
965 /* For MSAA sample positions. */
966 #define FILL_SREG(s0x, s0y, s1x, s1y, s2x, s2y, s3x, s3y) \
967 (((s0x) & 0xf) | (((unsigned)(s0y) & 0xf) << 4) | \
968 (((unsigned)(s1x) & 0xf) << 8) | (((unsigned)(s1y) & 0xf) << 12) | \
969 (((unsigned)(s2x) & 0xf) << 16) | (((unsigned)(s2y) & 0xf) << 20) | \
970 (((unsigned)(s3x) & 0xf) << 24) | (((unsigned)(s3y) & 0xf) << 28))
974 * There are two locations (4, 4), (-4, -4). */
975 const uint32_t eg_sample_locs_2x
[4] = {
976 FILL_SREG(4, 4, -4, -4, 4, 4, -4, -4),
977 FILL_SREG(4, 4, -4, -4, 4, 4, -4, -4),
978 FILL_SREG(4, 4, -4, -4, 4, 4, -4, -4),
979 FILL_SREG(4, 4, -4, -4, 4, 4, -4, -4),
981 const unsigned eg_max_dist_2x
= 4;
983 * There are 4 locations: (-2, 6), (6, -2), (-6, 2), (2, 6). */
984 const uint32_t eg_sample_locs_4x
[4] = {
985 FILL_SREG(-2, -6, 6, -2, -6, 2, 2, 6),
986 FILL_SREG(-2, -6, 6, -2, -6, 2, 2, 6),
987 FILL_SREG(-2, -6, 6, -2, -6, 2, 2, 6),
988 FILL_SREG(-2, -6, 6, -2, -6, 2, 2, 6),
990 const unsigned eg_max_dist_4x
= 6;
993 static const uint32_t cm_sample_locs_8x
[] = {
994 FILL_SREG( 1, -3, -1, 3, 5, 1, -3, -5),
995 FILL_SREG( 1, -3, -1, 3, 5, 1, -3, -5),
996 FILL_SREG( 1, -3, -1, 3, 5, 1, -3, -5),
997 FILL_SREG( 1, -3, -1, 3, 5, 1, -3, -5),
998 FILL_SREG(-5, 5, -7, -1, 3, 7, 7, -7),
999 FILL_SREG(-5, 5, -7, -1, 3, 7, 7, -7),
1000 FILL_SREG(-5, 5, -7, -1, 3, 7, 7, -7),
1001 FILL_SREG(-5, 5, -7, -1, 3, 7, 7, -7),
1003 static const unsigned cm_max_dist_8x
= 8;
1004 /* Cayman 16xMSAA */
1005 static const uint32_t cm_sample_locs_16x
[] = {
1006 FILL_SREG( 1, 1, -1, -3, -3, 2, 4, -1),
1007 FILL_SREG( 1, 1, -1, -3, -3, 2, 4, -1),
1008 FILL_SREG( 1, 1, -1, -3, -3, 2, 4, -1),
1009 FILL_SREG( 1, 1, -1, -3, -3, 2, 4, -1),
1010 FILL_SREG(-5, -2, 2, 5, 5, 3, 3, -5),
1011 FILL_SREG(-5, -2, 2, 5, 5, 3, 3, -5),
1012 FILL_SREG(-5, -2, 2, 5, 5, 3, 3, -5),
1013 FILL_SREG(-5, -2, 2, 5, 5, 3, 3, -5),
1014 FILL_SREG(-2, 6, 0, -7, -4, -6, -6, 4),
1015 FILL_SREG(-2, 6, 0, -7, -4, -6, -6, 4),
1016 FILL_SREG(-2, 6, 0, -7, -4, -6, -6, 4),
1017 FILL_SREG(-2, 6, 0, -7, -4, -6, -6, 4),
1018 FILL_SREG(-8, 0, 7, -4, 6, 7, -7, -8),
1019 FILL_SREG(-8, 0, 7, -4, 6, 7, -7, -8),
1020 FILL_SREG(-8, 0, 7, -4, 6, 7, -7, -8),
1021 FILL_SREG(-8, 0, 7, -4, 6, 7, -7, -8),
1023 static const unsigned cm_max_dist_16x
= 8;
1025 unsigned radv_cayman_get_maxdist(int log_samples
)
1027 unsigned max_dist
[] = {
1034 return max_dist
[log_samples
];
1037 void radv_cayman_emit_msaa_sample_locs(struct radeon_winsys_cs
*cs
, int nr_samples
)
1039 switch (nr_samples
) {
1042 radeon_set_context_reg(cs
, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
, 0);
1043 radeon_set_context_reg(cs
, CM_R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
, 0);
1044 radeon_set_context_reg(cs
, CM_R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
, 0);
1045 radeon_set_context_reg(cs
, CM_R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
, 0);
1048 radeon_set_context_reg(cs
, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
, eg_sample_locs_2x
[0]);
1049 radeon_set_context_reg(cs
, CM_R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
, eg_sample_locs_2x
[1]);
1050 radeon_set_context_reg(cs
, CM_R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
, eg_sample_locs_2x
[2]);
1051 radeon_set_context_reg(cs
, CM_R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
, eg_sample_locs_2x
[3]);
1054 radeon_set_context_reg(cs
, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
, eg_sample_locs_4x
[0]);
1055 radeon_set_context_reg(cs
, CM_R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
, eg_sample_locs_4x
[1]);
1056 radeon_set_context_reg(cs
, CM_R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
, eg_sample_locs_4x
[2]);
1057 radeon_set_context_reg(cs
, CM_R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
, eg_sample_locs_4x
[3]);
1060 radeon_set_context_reg_seq(cs
, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
, 14);
1061 radeon_emit(cs
, cm_sample_locs_8x
[0]);
1062 radeon_emit(cs
, cm_sample_locs_8x
[4]);
1065 radeon_emit(cs
, cm_sample_locs_8x
[1]);
1066 radeon_emit(cs
, cm_sample_locs_8x
[5]);
1069 radeon_emit(cs
, cm_sample_locs_8x
[2]);
1070 radeon_emit(cs
, cm_sample_locs_8x
[6]);
1073 radeon_emit(cs
, cm_sample_locs_8x
[3]);
1074 radeon_emit(cs
, cm_sample_locs_8x
[7]);
1077 radeon_set_context_reg_seq(cs
, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
, 16);
1078 radeon_emit(cs
, cm_sample_locs_16x
[0]);
1079 radeon_emit(cs
, cm_sample_locs_16x
[4]);
1080 radeon_emit(cs
, cm_sample_locs_16x
[8]);
1081 radeon_emit(cs
, cm_sample_locs_16x
[12]);
1082 radeon_emit(cs
, cm_sample_locs_16x
[1]);
1083 radeon_emit(cs
, cm_sample_locs_16x
[5]);
1084 radeon_emit(cs
, cm_sample_locs_16x
[9]);
1085 radeon_emit(cs
, cm_sample_locs_16x
[13]);
1086 radeon_emit(cs
, cm_sample_locs_16x
[2]);
1087 radeon_emit(cs
, cm_sample_locs_16x
[6]);
1088 radeon_emit(cs
, cm_sample_locs_16x
[10]);
1089 radeon_emit(cs
, cm_sample_locs_16x
[14]);
1090 radeon_emit(cs
, cm_sample_locs_16x
[3]);
1091 radeon_emit(cs
, cm_sample_locs_16x
[7]);
1092 radeon_emit(cs
, cm_sample_locs_16x
[11]);
1093 radeon_emit(cs
, cm_sample_locs_16x
[15]);
1098 static void radv_cayman_get_sample_position(struct radv_device
*device
,
1099 unsigned sample_count
,
1100 unsigned sample_index
, float *out_value
)
1106 switch (sample_count
) {
1109 out_value
[0] = out_value
[1] = 0.5;
1112 offset
= 4 * (sample_index
* 2);
1113 val
.idx
= (eg_sample_locs_2x
[0] >> offset
) & 0xf;
1114 out_value
[0] = (float)(val
.idx
+ 8) / 16.0f
;
1115 val
.idx
= (eg_sample_locs_2x
[0] >> (offset
+ 4)) & 0xf;
1116 out_value
[1] = (float)(val
.idx
+ 8) / 16.0f
;
1119 offset
= 4 * (sample_index
* 2);
1120 val
.idx
= (eg_sample_locs_4x
[0] >> offset
) & 0xf;
1121 out_value
[0] = (float)(val
.idx
+ 8) / 16.0f
;
1122 val
.idx
= (eg_sample_locs_4x
[0] >> (offset
+ 4)) & 0xf;
1123 out_value
[1] = (float)(val
.idx
+ 8) / 16.0f
;
1126 offset
= 4 * (sample_index
% 4 * 2);
1127 index
= (sample_index
/ 4) * 4;
1128 val
.idx
= (cm_sample_locs_8x
[index
] >> offset
) & 0xf;
1129 out_value
[0] = (float)(val
.idx
+ 8) / 16.0f
;
1130 val
.idx
= (cm_sample_locs_8x
[index
] >> (offset
+ 4)) & 0xf;
1131 out_value
[1] = (float)(val
.idx
+ 8) / 16.0f
;
1134 offset
= 4 * (sample_index
% 4 * 2);
1135 index
= (sample_index
/ 4) * 4;
1136 val
.idx
= (cm_sample_locs_16x
[index
] >> offset
) & 0xf;
1137 out_value
[0] = (float)(val
.idx
+ 8) / 16.0f
;
1138 val
.idx
= (cm_sample_locs_16x
[index
] >> (offset
+ 4)) & 0xf;
1139 out_value
[1] = (float)(val
.idx
+ 8) / 16.0f
;
1144 void radv_device_init_msaa(struct radv_device
*device
)
1147 radv_cayman_get_sample_position(device
, 1, 0, device
->sample_locations_1x
[0]);
1149 for (i
= 0; i
< 2; i
++)
1150 radv_cayman_get_sample_position(device
, 2, i
, device
->sample_locations_2x
[i
]);
1151 for (i
= 0; i
< 4; i
++)
1152 radv_cayman_get_sample_position(device
, 4, i
, device
->sample_locations_4x
[i
]);
1153 for (i
= 0; i
< 8; i
++)
1154 radv_cayman_get_sample_position(device
, 8, i
, device
->sample_locations_8x
[i
]);
1155 for (i
= 0; i
< 16; i
++)
1156 radv_cayman_get_sample_position(device
, 16, i
, device
->sample_locations_16x
[i
]);