2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
6 * Copyright © 2015 Advanced Micro Devices, Inc.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 /* command buffer handling for SI */
30 #include "radv_private.h"
31 #include "radv_shader.h"
35 #include "radv_util.h"
36 #include "main/macros.h"
38 #define SI_GS_PER_ES 128
41 si_write_harvested_raster_configs(struct radv_physical_device
*physical_device
,
42 struct radeon_winsys_cs
*cs
,
43 unsigned raster_config
,
44 unsigned raster_config_1
)
46 unsigned sh_per_se
= MAX2(physical_device
->rad_info
.max_sh_per_se
, 1);
47 unsigned num_se
= MAX2(physical_device
->rad_info
.max_se
, 1);
48 unsigned rb_mask
= physical_device
->rad_info
.enabled_rb_mask
;
49 unsigned num_rb
= MIN2(physical_device
->rad_info
.num_render_backends
, 16);
50 unsigned rb_per_pkr
= MIN2(num_rb
/ num_se
/ sh_per_se
, 2);
51 unsigned rb_per_se
= num_rb
/ num_se
;
55 se_mask
[0] = ((1 << rb_per_se
) - 1) & rb_mask
;
56 se_mask
[1] = (se_mask
[0] << rb_per_se
) & rb_mask
;
57 se_mask
[2] = (se_mask
[1] << rb_per_se
) & rb_mask
;
58 se_mask
[3] = (se_mask
[2] << rb_per_se
) & rb_mask
;
60 assert(num_se
== 1 || num_se
== 2 || num_se
== 4);
61 assert(sh_per_se
== 1 || sh_per_se
== 2);
62 assert(rb_per_pkr
== 1 || rb_per_pkr
== 2);
64 /* XXX: I can't figure out what the *_XSEL and *_YSEL
65 * fields are for, so I'm leaving them as their default
68 if ((num_se
> 2) && ((!se_mask
[0] && !se_mask
[1]) ||
69 (!se_mask
[2] && !se_mask
[3]))) {
70 raster_config_1
&= C_028354_SE_PAIR_MAP
;
72 if (!se_mask
[0] && !se_mask
[1]) {
74 S_028354_SE_PAIR_MAP(V_028354_RASTER_CONFIG_SE_PAIR_MAP_3
);
77 S_028354_SE_PAIR_MAP(V_028354_RASTER_CONFIG_SE_PAIR_MAP_0
);
81 for (se
= 0; se
< num_se
; se
++) {
82 unsigned raster_config_se
= raster_config
;
83 unsigned pkr0_mask
= ((1 << rb_per_pkr
) - 1) << (se
* rb_per_se
);
84 unsigned pkr1_mask
= pkr0_mask
<< rb_per_pkr
;
85 int idx
= (se
/ 2) * 2;
87 if ((num_se
> 1) && (!se_mask
[idx
] || !se_mask
[idx
+ 1])) {
88 raster_config_se
&= C_028350_SE_MAP
;
92 S_028350_SE_MAP(V_028350_RASTER_CONFIG_SE_MAP_3
);
95 S_028350_SE_MAP(V_028350_RASTER_CONFIG_SE_MAP_0
);
100 pkr1_mask
&= rb_mask
;
101 if (rb_per_se
> 2 && (!pkr0_mask
|| !pkr1_mask
)) {
102 raster_config_se
&= C_028350_PKR_MAP
;
106 S_028350_PKR_MAP(V_028350_RASTER_CONFIG_PKR_MAP_3
);
109 S_028350_PKR_MAP(V_028350_RASTER_CONFIG_PKR_MAP_0
);
113 if (rb_per_se
>= 2) {
114 unsigned rb0_mask
= 1 << (se
* rb_per_se
);
115 unsigned rb1_mask
= rb0_mask
<< 1;
119 if (!rb0_mask
|| !rb1_mask
) {
120 raster_config_se
&= C_028350_RB_MAP_PKR0
;
124 S_028350_RB_MAP_PKR0(V_028350_RASTER_CONFIG_RB_MAP_3
);
127 S_028350_RB_MAP_PKR0(V_028350_RASTER_CONFIG_RB_MAP_0
);
132 rb0_mask
= 1 << (se
* rb_per_se
+ rb_per_pkr
);
133 rb1_mask
= rb0_mask
<< 1;
136 if (!rb0_mask
|| !rb1_mask
) {
137 raster_config_se
&= C_028350_RB_MAP_PKR1
;
141 S_028350_RB_MAP_PKR1(V_028350_RASTER_CONFIG_RB_MAP_3
);
144 S_028350_RB_MAP_PKR1(V_028350_RASTER_CONFIG_RB_MAP_0
);
150 /* GRBM_GFX_INDEX has a different offset on SI and CI+ */
151 if (physical_device
->rad_info
.chip_class
< CIK
)
152 radeon_set_config_reg(cs
, GRBM_GFX_INDEX
,
153 SE_INDEX(se
) | SH_BROADCAST_WRITES
|
154 INSTANCE_BROADCAST_WRITES
);
156 radeon_set_uconfig_reg(cs
, R_030800_GRBM_GFX_INDEX
,
157 S_030800_SE_INDEX(se
) | S_030800_SH_BROADCAST_WRITES(1) |
158 S_030800_INSTANCE_BROADCAST_WRITES(1));
159 radeon_set_context_reg(cs
, R_028350_PA_SC_RASTER_CONFIG
, raster_config_se
);
160 if (physical_device
->rad_info
.chip_class
>= CIK
)
161 radeon_set_context_reg(cs
, R_028354_PA_SC_RASTER_CONFIG_1
, raster_config_1
);
164 /* GRBM_GFX_INDEX has a different offset on SI and CI+ */
165 if (physical_device
->rad_info
.chip_class
< CIK
)
166 radeon_set_config_reg(cs
, GRBM_GFX_INDEX
,
167 SE_BROADCAST_WRITES
| SH_BROADCAST_WRITES
|
168 INSTANCE_BROADCAST_WRITES
);
170 radeon_set_uconfig_reg(cs
, R_030800_GRBM_GFX_INDEX
,
171 S_030800_SE_BROADCAST_WRITES(1) | S_030800_SH_BROADCAST_WRITES(1) |
172 S_030800_INSTANCE_BROADCAST_WRITES(1));
176 si_emit_compute(struct radv_physical_device
*physical_device
,
177 struct radeon_winsys_cs
*cs
)
179 radeon_set_sh_reg_seq(cs
, R_00B810_COMPUTE_START_X
, 3);
184 radeon_set_sh_reg_seq(cs
, R_00B854_COMPUTE_RESOURCE_LIMITS
, 3);
186 /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1 */
187 radeon_emit(cs
, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
188 radeon_emit(cs
, S_00B85C_SH0_CU_EN(0xffff) | S_00B85C_SH1_CU_EN(0xffff));
190 if (physical_device
->rad_info
.chip_class
>= CIK
) {
191 /* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */
192 radeon_set_sh_reg_seq(cs
,
193 R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2
, 2);
194 radeon_emit(cs
, S_00B864_SH0_CU_EN(0xffff) |
195 S_00B864_SH1_CU_EN(0xffff));
196 radeon_emit(cs
, S_00B868_SH0_CU_EN(0xffff) |
197 S_00B868_SH1_CU_EN(0xffff));
200 /* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID
201 * and is now per pipe, so it should be handled in the
202 * kernel if we want to use something other than the default value,
203 * which is now 0x22f.
205 if (physical_device
->rad_info
.chip_class
<= SI
) {
206 /* XXX: This should be:
207 * (number of compute units) * 4 * (waves per simd) - 1 */
209 radeon_set_sh_reg(cs
, R_00B82C_COMPUTE_MAX_WAVE_ID
,
210 0x190 /* Default value */);
215 si_init_compute(struct radv_cmd_buffer
*cmd_buffer
)
217 struct radv_physical_device
*physical_device
= cmd_buffer
->device
->physical_device
;
218 si_emit_compute(physical_device
, cmd_buffer
->cs
);
222 si_emit_config(struct radv_physical_device
*physical_device
,
223 struct radeon_winsys_cs
*cs
)
225 unsigned num_rb
= MIN2(physical_device
->rad_info
.num_render_backends
, 16);
226 unsigned rb_mask
= physical_device
->rad_info
.enabled_rb_mask
;
227 unsigned raster_config
, raster_config_1
;
230 radeon_emit(cs
, PKT3(PKT3_CONTEXT_CONTROL
, 1, 0));
231 radeon_emit(cs
, CONTEXT_CONTROL_LOAD_ENABLE(1));
232 radeon_emit(cs
, CONTEXT_CONTROL_SHADOW_ENABLE(1));
234 radeon_set_context_reg(cs
, R_028A18_VGT_HOS_MAX_TESS_LEVEL
, fui(64));
235 radeon_set_context_reg(cs
, R_028A1C_VGT_HOS_MIN_TESS_LEVEL
, fui(0));
237 /* FIXME calculate these values somehow ??? */
238 radeon_set_context_reg(cs
, R_028A54_VGT_GS_PER_ES
, SI_GS_PER_ES
);
239 radeon_set_context_reg(cs
, R_028A58_VGT_ES_PER_GS
, 0x40);
240 radeon_set_context_reg(cs
, R_028A5C_VGT_GS_PER_VS
, 0x2);
242 radeon_set_context_reg(cs
, R_028A8C_VGT_PRIMITIVEID_RESET
, 0x0);
243 radeon_set_context_reg(cs
, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET
, 0);
245 radeon_set_context_reg(cs
, R_028B98_VGT_STRMOUT_BUFFER_CONFIG
, 0x0);
246 radeon_set_context_reg(cs
, R_028AA0_VGT_INSTANCE_STEP_RATE_0
, 1);
247 if (physical_device
->rad_info
.chip_class
>= GFX9
)
248 radeon_set_context_reg(cs
, R_028AB4_VGT_REUSE_OFF
, 0);
249 radeon_set_context_reg(cs
, R_028AB8_VGT_VTX_CNT_EN
, 0x0);
250 if (physical_device
->rad_info
.chip_class
< CIK
)
251 radeon_set_config_reg(cs
, R_008A14_PA_CL_ENHANCE
, S_008A14_NUM_CLIP_SEQ(3) |
252 S_008A14_CLIP_VTX_REORDER_ENA(1));
254 radeon_set_context_reg(cs
, R_028BD4_PA_SC_CENTROID_PRIORITY_0
, 0x76543210);
255 radeon_set_context_reg(cs
, R_028BD8_PA_SC_CENTROID_PRIORITY_1
, 0xfedcba98);
257 radeon_set_context_reg(cs
, R_02882C_PA_SU_PRIM_FILTER_CNTL
, 0);
259 for (i
= 0; i
< 16; i
++) {
260 radeon_set_context_reg(cs
, R_0282D0_PA_SC_VPORT_ZMIN_0
+ i
*8, 0);
261 radeon_set_context_reg(cs
, R_0282D4_PA_SC_VPORT_ZMAX_0
+ i
*8, fui(1.0));
264 switch (physical_device
->rad_info
.family
) {
267 raster_config
= 0x2a00126a;
268 raster_config_1
= 0x00000000;
271 raster_config
= 0x0000124a;
272 raster_config_1
= 0x00000000;
275 raster_config
= 0x00000082;
276 raster_config_1
= 0x00000000;
279 raster_config
= 0x00000000;
280 raster_config_1
= 0x00000000;
283 raster_config
= 0x16000012;
284 raster_config_1
= 0x00000000;
287 raster_config
= 0x3a00161a;
288 raster_config_1
= 0x0000002e;
291 if (physical_device
->rad_info
.cik_macrotile_mode_array
[0] == 0x000000e8) {
292 /* old kernels with old tiling config */
293 raster_config
= 0x16000012;
294 raster_config_1
= 0x0000002a;
296 raster_config
= 0x3a00161a;
297 raster_config_1
= 0x0000002e;
301 raster_config
= 0x16000012;
302 raster_config_1
= 0x0000002a;
306 raster_config
= 0x16000012;
307 raster_config_1
= 0x00000000;
310 raster_config
= 0x16000012;
311 raster_config_1
= 0x0000002a;
315 raster_config
= 0x00000000;
317 raster_config
= 0x00000002;
318 raster_config_1
= 0x00000000;
321 raster_config
= 0x00000002;
322 raster_config_1
= 0x00000000;
325 /* KV should be 0x00000002, but that causes problems with radeon */
326 raster_config
= 0x00000000; /* 0x00000002 */
327 raster_config_1
= 0x00000000;
332 raster_config
= 0x00000000;
333 raster_config_1
= 0x00000000;
336 if (physical_device
->rad_info
.chip_class
<= VI
) {
338 "radeonsi: Unknown GPU, using 0 for raster_config\n");
339 raster_config
= 0x00000000;
340 raster_config_1
= 0x00000000;
345 /* Always use the default config when all backends are enabled
346 * (or when we failed to determine the enabled backends).
348 if (physical_device
->rad_info
.chip_class
<= VI
) {
349 if (!rb_mask
|| util_bitcount(rb_mask
) >= num_rb
) {
350 radeon_set_context_reg(cs
, R_028350_PA_SC_RASTER_CONFIG
,
352 if (physical_device
->rad_info
.chip_class
>= CIK
)
353 radeon_set_context_reg(cs
, R_028354_PA_SC_RASTER_CONFIG_1
,
356 si_write_harvested_raster_configs(physical_device
, cs
, raster_config
, raster_config_1
);
360 radeon_set_context_reg(cs
, R_028204_PA_SC_WINDOW_SCISSOR_TL
, S_028204_WINDOW_OFFSET_DISABLE(1));
361 radeon_set_context_reg(cs
, R_028240_PA_SC_GENERIC_SCISSOR_TL
, S_028240_WINDOW_OFFSET_DISABLE(1));
362 radeon_set_context_reg(cs
, R_028244_PA_SC_GENERIC_SCISSOR_BR
,
363 S_028244_BR_X(16384) | S_028244_BR_Y(16384));
364 radeon_set_context_reg(cs
, R_028030_PA_SC_SCREEN_SCISSOR_TL
, 0);
365 radeon_set_context_reg(cs
, R_028034_PA_SC_SCREEN_SCISSOR_BR
,
366 S_028034_BR_X(16384) | S_028034_BR_Y(16384));
368 radeon_set_context_reg(cs
, R_02820C_PA_SC_CLIPRECT_RULE
, 0xFFFF);
369 radeon_set_context_reg(cs
, R_028230_PA_SC_EDGERULE
, 0xAAAAAAAA);
370 /* PA_SU_HARDWARE_SCREEN_OFFSET must be 0 due to hw bug on SI */
371 radeon_set_context_reg(cs
, R_028234_PA_SU_HARDWARE_SCREEN_OFFSET
, 0);
372 radeon_set_context_reg(cs
, R_028820_PA_CL_NANINF_CNTL
, 0);
374 radeon_set_context_reg(cs
, R_028AC0_DB_SRESULTS_COMPARE_STATE0
, 0x0);
375 radeon_set_context_reg(cs
, R_028AC4_DB_SRESULTS_COMPARE_STATE1
, 0x0);
376 radeon_set_context_reg(cs
, R_028AC8_DB_PRELOAD_CONTROL
, 0x0);
377 radeon_set_context_reg(cs
, R_02800C_DB_RENDER_OVERRIDE
,
378 S_02800C_FORCE_HIS_ENABLE0(V_02800C_FORCE_DISABLE
) |
379 S_02800C_FORCE_HIS_ENABLE1(V_02800C_FORCE_DISABLE
));
381 if (physical_device
->rad_info
.chip_class
>= GFX9
) {
382 radeon_set_uconfig_reg(cs
, R_030920_VGT_MAX_VTX_INDX
, ~0);
383 radeon_set_uconfig_reg(cs
, R_030924_VGT_MIN_VTX_INDX
, 0);
384 radeon_set_uconfig_reg(cs
, R_030928_VGT_INDX_OFFSET
, 0);
386 radeon_set_context_reg(cs
, R_028400_VGT_MAX_VTX_INDX
, ~0);
387 radeon_set_context_reg(cs
, R_028404_VGT_MIN_VTX_INDX
, 0);
388 radeon_set_context_reg(cs
, R_028408_VGT_INDX_OFFSET
, 0);
391 if (physical_device
->rad_info
.chip_class
>= CIK
) {
392 if (physical_device
->rad_info
.chip_class
>= GFX9
) {
393 radeon_set_sh_reg(cs
, R_00B41C_SPI_SHADER_PGM_RSRC3_HS
, S_00B41C_CU_EN(0xffff));
395 radeon_set_sh_reg(cs
, R_00B51C_SPI_SHADER_PGM_RSRC3_LS
, S_00B51C_CU_EN(0xffff));
396 radeon_set_sh_reg(cs
, R_00B41C_SPI_SHADER_PGM_RSRC3_HS
, 0);
397 radeon_set_sh_reg(cs
, R_00B31C_SPI_SHADER_PGM_RSRC3_ES
, S_00B31C_CU_EN(0xffff));
398 /* If this is 0, Bonaire can hang even if GS isn't being used.
399 * Other chips are unaffected. These are suboptimal values,
400 * but we don't use on-chip GS.
402 radeon_set_context_reg(cs
, R_028A44_VGT_GS_ONCHIP_CNTL
,
403 S_028A44_ES_VERTS_PER_SUBGRP(64) |
404 S_028A44_GS_PRIMS_PER_SUBGRP(4));
406 radeon_set_sh_reg(cs
, R_00B21C_SPI_SHADER_PGM_RSRC3_GS
, S_00B21C_CU_EN(0xffff));
408 if (physical_device
->rad_info
.num_good_compute_units
/
409 (physical_device
->rad_info
.max_se
* physical_device
->rad_info
.max_sh_per_se
) <= 4) {
410 /* Too few available compute units per SH. Disallowing
411 * VS to run on CU0 could hurt us more than late VS
412 * allocation would help.
414 * LATE_ALLOC_VS = 2 is the highest safe number.
416 radeon_set_sh_reg(cs
, R_00B118_SPI_SHADER_PGM_RSRC3_VS
, S_00B118_CU_EN(0xffff));
417 radeon_set_sh_reg(cs
, R_00B11C_SPI_SHADER_LATE_ALLOC_VS
, S_00B11C_LIMIT(2));
419 /* Set LATE_ALLOC_VS == 31. It should be less than
420 * the number of scratch waves. Limitations:
421 * - VS can't execute on CU0.
422 * - If HS writes outputs to LDS, LS can't execute on CU0.
424 radeon_set_sh_reg(cs
, R_00B118_SPI_SHADER_PGM_RSRC3_VS
, S_00B118_CU_EN(0xfffe));
425 radeon_set_sh_reg(cs
, R_00B11C_SPI_SHADER_LATE_ALLOC_VS
, S_00B11C_LIMIT(31));
428 radeon_set_sh_reg(cs
, R_00B01C_SPI_SHADER_PGM_RSRC3_PS
, S_00B01C_CU_EN(0xffff));
431 if (physical_device
->rad_info
.chip_class
>= VI
) {
432 uint32_t vgt_tess_distribution
;
433 radeon_set_context_reg(cs
, R_028424_CB_DCC_CONTROL
,
434 S_028424_OVERWRITE_COMBINER_MRT_SHARING_DISABLE(1) |
435 S_028424_OVERWRITE_COMBINER_WATERMARK(4));
436 if (physical_device
->rad_info
.family
< CHIP_POLARIS10
)
437 radeon_set_context_reg(cs
, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL
, 30);
438 radeon_set_context_reg(cs
, R_028C5C_VGT_OUT_DEALLOC_CNTL
, 32);
440 vgt_tess_distribution
= S_028B50_ACCUM_ISOLINE(32) |
441 S_028B50_ACCUM_TRI(11) |
442 S_028B50_ACCUM_QUAD(11) |
443 S_028B50_DONUT_SPLIT(16);
445 if (physical_device
->rad_info
.family
== CHIP_FIJI
||
446 physical_device
->rad_info
.family
>= CHIP_POLARIS10
)
447 vgt_tess_distribution
|= S_028B50_TRAP_SPLIT(3);
449 radeon_set_context_reg(cs
, R_028B50_VGT_TESS_DISTRIBUTION
,
450 vgt_tess_distribution
);
452 radeon_set_context_reg(cs
, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL
, 14);
453 radeon_set_context_reg(cs
, R_028C5C_VGT_OUT_DEALLOC_CNTL
, 16);
456 if (physical_device
->has_rbplus
)
457 radeon_set_context_reg(cs
, R_028C40_PA_SC_SHADER_CONTROL
, 0);
459 if (physical_device
->rad_info
.chip_class
>= GFX9
) {
460 unsigned num_se
= physical_device
->rad_info
.max_se
;
461 unsigned pc_lines
= 0;
463 switch (physical_device
->rad_info
.family
) {
474 radeon_set_context_reg(cs
, R_028060_DB_DFSM_CONTROL
,
475 S_028060_PUNCHOUT_MODE(V_028060_FORCE_OFF
));
476 radeon_set_context_reg(cs
, R_028064_DB_RENDER_FILTER
, 0);
477 /* TODO: We can use this to disable RBs for rendering to GART: */
478 radeon_set_context_reg(cs
, R_02835C_PA_SC_TILE_STEERING_OVERRIDE
, 0);
479 radeon_set_context_reg(cs
, R_02883C_PA_SU_OVER_RASTERIZATION_CNTL
, 0);
480 /* TODO: Enable the binner: */
481 radeon_set_context_reg(cs
, R_028C44_PA_SC_BINNER_CNTL_0
,
482 S_028C44_BINNING_MODE(V_028C44_DISABLE_BINNING_USE_LEGACY_SC
) |
483 S_028C44_DISABLE_START_OF_PRIM(1));
484 radeon_set_context_reg(cs
, R_028C48_PA_SC_BINNER_CNTL_1
,
485 S_028C48_MAX_ALLOC_COUNT(MIN2(128, pc_lines
/ (4 * num_se
))) |
486 S_028C48_MAX_PRIM_PER_BATCH(1023));
487 radeon_set_context_reg(cs
, R_028C4C_PA_SC_CONSERVATIVE_RASTERIZATION_CNTL
,
488 S_028C4C_NULL_SQUAD_AA_MASK_ENABLE(1));
489 radeon_set_uconfig_reg(cs
, R_030968_VGT_INSTANCE_BASE_ID
, 0);
491 si_emit_compute(physical_device
, cs
);
494 void si_init_config(struct radv_cmd_buffer
*cmd_buffer
)
496 struct radv_physical_device
*physical_device
= cmd_buffer
->device
->physical_device
;
498 si_emit_config(physical_device
, cmd_buffer
->cs
);
502 cik_create_gfx_config(struct radv_device
*device
)
504 struct radeon_winsys_cs
*cs
= device
->ws
->cs_create(device
->ws
, RING_GFX
);
508 si_emit_config(device
->physical_device
, cs
);
510 while (cs
->cdw
& 7) {
511 if (device
->physical_device
->rad_info
.gfx_ib_pad_with_type2
)
512 radeon_emit(cs
, 0x80000000);
514 radeon_emit(cs
, 0xffff1000);
517 device
->gfx_init
= device
->ws
->buffer_create(device
->ws
,
520 RADEON_FLAG_CPU_ACCESS
);
521 if (!device
->gfx_init
)
524 void *map
= device
->ws
->buffer_map(device
->gfx_init
);
526 device
->ws
->buffer_destroy(device
->gfx_init
);
527 device
->gfx_init
= NULL
;
530 memcpy(map
, cs
->buf
, cs
->cdw
* 4);
532 device
->ws
->buffer_unmap(device
->gfx_init
);
533 device
->gfx_init_size_dw
= cs
->cdw
;
535 device
->ws
->cs_destroy(cs
);
539 get_viewport_xform(const VkViewport
*viewport
,
540 float scale
[3], float translate
[3])
542 float x
= viewport
->x
;
543 float y
= viewport
->y
;
544 float half_width
= 0.5f
* viewport
->width
;
545 float half_height
= 0.5f
* viewport
->height
;
546 double n
= viewport
->minDepth
;
547 double f
= viewport
->maxDepth
;
549 scale
[0] = half_width
;
550 translate
[0] = half_width
+ x
;
551 scale
[1] = half_height
;
552 translate
[1] = half_height
+ y
;
559 si_write_viewport(struct radeon_winsys_cs
*cs
, int first_vp
,
560 int count
, const VkViewport
*viewports
)
565 radeon_set_context_reg_seq(cs
, R_02843C_PA_CL_VPORT_XSCALE
+
566 first_vp
* 4 * 6, count
* 6);
568 for (i
= 0; i
< count
; i
++) {
569 float scale
[3], translate
[3];
572 get_viewport_xform(&viewports
[i
], scale
, translate
);
573 radeon_emit(cs
, fui(scale
[0]));
574 radeon_emit(cs
, fui(translate
[0]));
575 radeon_emit(cs
, fui(scale
[1]));
576 radeon_emit(cs
, fui(translate
[1]));
577 radeon_emit(cs
, fui(scale
[2]));
578 radeon_emit(cs
, fui(translate
[2]));
581 radeon_set_context_reg_seq(cs
, R_0282D0_PA_SC_VPORT_ZMIN_0
+
582 first_vp
* 4 * 2, count
* 2);
583 for (i
= 0; i
< count
; i
++) {
584 float zmin
= MIN2(viewports
[i
].minDepth
, viewports
[i
].maxDepth
);
585 float zmax
= MAX2(viewports
[i
].minDepth
, viewports
[i
].maxDepth
);
586 radeon_emit(cs
, fui(zmin
));
587 radeon_emit(cs
, fui(zmax
));
591 static VkRect2D
si_scissor_from_viewport(const VkViewport
*viewport
)
593 float scale
[3], translate
[3];
596 get_viewport_xform(viewport
, scale
, translate
);
598 rect
.offset
.x
= translate
[0] - abs(scale
[0]);
599 rect
.offset
.y
= translate
[1] - abs(scale
[1]);
600 rect
.extent
.width
= ceilf(translate
[0] + abs(scale
[0])) - rect
.offset
.x
;
601 rect
.extent
.height
= ceilf(translate
[1] + abs(scale
[1])) - rect
.offset
.y
;
606 static VkRect2D
si_intersect_scissor(const VkRect2D
*a
, const VkRect2D
*b
) {
608 ret
.offset
.x
= MAX2(a
->offset
.x
, b
->offset
.x
);
609 ret
.offset
.y
= MAX2(a
->offset
.y
, b
->offset
.y
);
610 ret
.extent
.width
= MIN2(a
->offset
.x
+ a
->extent
.width
,
611 b
->offset
.x
+ b
->extent
.width
) - ret
.offset
.x
;
612 ret
.extent
.height
= MIN2(a
->offset
.y
+ a
->extent
.height
,
613 b
->offset
.y
+ b
->extent
.height
) - ret
.offset
.y
;
618 si_write_scissors(struct radeon_winsys_cs
*cs
, int first
,
619 int count
, const VkRect2D
*scissors
,
620 const VkViewport
*viewports
, bool can_use_guardband
)
623 float scale
[3], translate
[3], guardband_x
= INFINITY
, guardband_y
= INFINITY
;
624 const float max_range
= 32767.0f
;
627 radeon_set_context_reg_seq(cs
, R_028250_PA_SC_VPORT_SCISSOR_0_TL
+ first
* 4 * 2, count
* 2);
628 for (i
= 0; i
< count
; i
++) {
629 VkRect2D viewport_scissor
= si_scissor_from_viewport(viewports
+ i
);
630 VkRect2D scissor
= si_intersect_scissor(&scissors
[i
], &viewport_scissor
);
632 get_viewport_xform(viewports
+ i
, scale
, translate
);
633 scale
[0] = abs(scale
[0]);
634 scale
[1] = abs(scale
[1]);
641 guardband_x
= MIN2(guardband_x
, (max_range
- abs(translate
[0])) / scale
[0]);
642 guardband_y
= MIN2(guardband_y
, (max_range
- abs(translate
[1])) / scale
[1]);
644 radeon_emit(cs
, S_028250_TL_X(scissor
.offset
.x
) |
645 S_028250_TL_Y(scissor
.offset
.y
) |
646 S_028250_WINDOW_OFFSET_DISABLE(1));
647 radeon_emit(cs
, S_028254_BR_X(scissor
.offset
.x
+ scissor
.extent
.width
) |
648 S_028254_BR_Y(scissor
.offset
.y
+ scissor
.extent
.height
));
650 if (!can_use_guardband
) {
655 radeon_set_context_reg_seq(cs
, R_028BE8_PA_CL_GB_VERT_CLIP_ADJ
, 4);
656 radeon_emit(cs
, fui(guardband_y
));
657 radeon_emit(cs
, fui(1.0));
658 radeon_emit(cs
, fui(guardband_x
));
659 radeon_emit(cs
, fui(1.0));
662 static inline unsigned
663 radv_prims_for_vertices(struct radv_prim_vertex_count
*info
, unsigned num
)
674 return 1 + ((num
- info
->min
) / info
->incr
);
678 si_get_ia_multi_vgt_param(struct radv_cmd_buffer
*cmd_buffer
,
679 bool instanced_draw
, bool indirect_draw
,
680 uint32_t draw_vertex_count
)
682 enum chip_class chip_class
= cmd_buffer
->device
->physical_device
->rad_info
.chip_class
;
683 enum radeon_family family
= cmd_buffer
->device
->physical_device
->rad_info
.family
;
684 struct radeon_info
*info
= &cmd_buffer
->device
->physical_device
->rad_info
;
685 unsigned prim
= cmd_buffer
->state
.pipeline
->graphics
.prim
;
686 unsigned max_primgroup_in_wave
= 2;
687 /* SWITCH_ON_EOP(0) is always preferable. */
688 bool wd_switch_on_eop
= false;
689 bool ia_switch_on_eop
= false;
690 bool ia_switch_on_eoi
= false;
691 bool partial_vs_wave
= false;
692 bool partial_es_wave
= false;
693 bool multi_instances_smaller_than_primgroup
;
695 multi_instances_smaller_than_primgroup
= indirect_draw
;
696 if (!multi_instances_smaller_than_primgroup
&& instanced_draw
) {
697 uint32_t num_prims
= radv_prims_for_vertices(&cmd_buffer
->state
.pipeline
->graphics
.prim_vertex_count
, draw_vertex_count
);
698 if (num_prims
< cmd_buffer
->state
.pipeline
->graphics
.primgroup_size
)
699 multi_instances_smaller_than_primgroup
= true;
702 if (cmd_buffer
->state
.pipeline
->shaders
[MESA_SHADER_FRAGMENT
]->info
.fs
.prim_id_input
)
703 ia_switch_on_eoi
= true;
705 if (radv_pipeline_has_tess(cmd_buffer
->state
.pipeline
)) {
706 /* SWITCH_ON_EOI must be set if PrimID is used. */
707 if (cmd_buffer
->state
.pipeline
->shaders
[MESA_SHADER_TESS_CTRL
]->info
.tcs
.uses_prim_id
||
708 cmd_buffer
->state
.pipeline
->shaders
[MESA_SHADER_TESS_EVAL
]->info
.tes
.uses_prim_id
)
709 ia_switch_on_eoi
= true;
711 /* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
712 if ((family
== CHIP_TAHITI
||
713 family
== CHIP_PITCAIRN
||
714 family
== CHIP_BONAIRE
) &&
715 radv_pipeline_has_gs(cmd_buffer
->state
.pipeline
))
716 partial_vs_wave
= true;
718 /* Needed for 028B6C_DISTRIBUTION_MODE != 0 */
719 if (cmd_buffer
->device
->has_distributed_tess
) {
720 if (radv_pipeline_has_gs(cmd_buffer
->state
.pipeline
)) {
721 if (chip_class
<= VI
)
722 partial_es_wave
= true;
724 if (family
== CHIP_TONGA
||
725 family
== CHIP_FIJI
||
726 family
== CHIP_POLARIS10
||
727 family
== CHIP_POLARIS11
||
728 family
== CHIP_POLARIS12
)
729 partial_vs_wave
= true;
731 partial_vs_wave
= true;
735 /* TODO linestipple */
737 if (chip_class
>= CIK
) {
738 /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
739 * 4 shader engines. Set 1 to pass the assertion below.
740 * The other cases are hardware requirements. */
741 if (info
->max_se
< 4 ||
742 prim
== V_008958_DI_PT_POLYGON
||
743 prim
== V_008958_DI_PT_LINELOOP
||
744 prim
== V_008958_DI_PT_TRIFAN
||
745 prim
== V_008958_DI_PT_TRISTRIP_ADJ
||
746 (cmd_buffer
->state
.pipeline
->graphics
.prim_restart_enable
&&
747 (family
< CHIP_POLARIS10
||
748 (prim
!= V_008958_DI_PT_POINTLIST
&&
749 prim
!= V_008958_DI_PT_LINESTRIP
&&
750 prim
!= V_008958_DI_PT_TRISTRIP
))))
751 wd_switch_on_eop
= true;
753 /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
754 * We don't know that for indirect drawing, so treat it as
755 * always problematic. */
756 if (family
== CHIP_HAWAII
&&
757 (instanced_draw
|| indirect_draw
))
758 wd_switch_on_eop
= true;
760 /* Performance recommendation for 4 SE Gfx7-8 parts if
761 * instances are smaller than a primgroup.
762 * Assume indirect draws always use small instances.
763 * This is needed for good VS wave utilization.
765 if (chip_class
<= VI
&&
767 multi_instances_smaller_than_primgroup
)
768 wd_switch_on_eop
= true;
770 /* Required on CIK and later. */
771 if (info
->max_se
> 2 && !wd_switch_on_eop
)
772 ia_switch_on_eoi
= true;
774 /* Required by Hawaii and, for some special cases, by VI. */
775 if (ia_switch_on_eoi
&&
776 (family
== CHIP_HAWAII
||
778 (radv_pipeline_has_gs(cmd_buffer
->state
.pipeline
) || max_primgroup_in_wave
!= 2))))
779 partial_vs_wave
= true;
781 /* Instancing bug on Bonaire. */
782 if (family
== CHIP_BONAIRE
&& ia_switch_on_eoi
&&
783 (instanced_draw
|| indirect_draw
))
784 partial_vs_wave
= true;
786 /* If the WD switch is false, the IA switch must be false too. */
787 assert(wd_switch_on_eop
|| !ia_switch_on_eop
);
789 /* If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
790 if (chip_class
<= VI
&& ia_switch_on_eoi
)
791 partial_es_wave
= true;
793 if (radv_pipeline_has_gs(cmd_buffer
->state
.pipeline
)) {
795 if (radv_pipeline_has_gs(cmd_buffer
->state
.pipeline
) &&
796 cmd_buffer
->state
.pipeline
->shaders
[MESA_SHADER_GEOMETRY
]->info
.gs
.uses_prim_id
)
797 ia_switch_on_eoi
= true;
799 /* GS requirement. */
800 if (SI_GS_PER_ES
/ cmd_buffer
->state
.pipeline
->graphics
.primgroup_size
>= cmd_buffer
->device
->gs_table_depth
- 3)
801 partial_es_wave
= true;
803 /* GS hw bug with single-primitive instances and SWITCH_ON_EOI.
804 * The hw doc says all multi-SE chips are affected, but amdgpu-pro Vulkan
805 * only applies it to Hawaii. Do what amdgpu-pro Vulkan does.
807 if (family
== CHIP_HAWAII
&& ia_switch_on_eoi
) {
808 bool set_vgt_flush
= indirect_draw
;
809 if (!set_vgt_flush
&& instanced_draw
) {
810 uint32_t num_prims
= radv_prims_for_vertices(&cmd_buffer
->state
.pipeline
->graphics
.prim_vertex_count
, draw_vertex_count
);
812 set_vgt_flush
= true;
815 cmd_buffer
->state
.flush_bits
|= RADV_CMD_FLAG_VGT_FLUSH
;
819 return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop
) |
820 S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi
) |
821 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave
) |
822 S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave
) |
823 S_028AA8_PRIMGROUP_SIZE(cmd_buffer
->state
.pipeline
->graphics
.primgroup_size
- 1) |
824 S_028AA8_WD_SWITCH_ON_EOP(chip_class
>= CIK
? wd_switch_on_eop
: 0) |
825 /* The following field was moved to VGT_SHADER_STAGES_EN in GFX9. */
826 S_028AA8_MAX_PRIMGRP_IN_WAVE(chip_class
== VI
?
827 max_primgroup_in_wave
: 0) |
828 S_030960_EN_INST_OPT_BASIC(chip_class
>= GFX9
) |
829 S_030960_EN_INST_OPT_ADV(chip_class
>= GFX9
);
833 void si_cs_emit_write_event_eop(struct radeon_winsys_cs
*cs
,
835 enum chip_class chip_class
,
837 unsigned event
, unsigned event_flags
,
843 unsigned op
= EVENT_TYPE(event
) |
846 unsigned is_gfx8_mec
= is_mec
&& chip_class
< GFX9
;
848 if (chip_class
>= GFX9
|| is_gfx8_mec
) {
849 radeon_emit(cs
, PKT3(PKT3_RELEASE_MEM
, is_gfx8_mec
? 5 : 6, predicated
));
851 radeon_emit(cs
, EOP_DATA_SEL(data_sel
));
852 radeon_emit(cs
, va
); /* address lo */
853 radeon_emit(cs
, va
>> 32); /* address hi */
854 radeon_emit(cs
, new_fence
); /* immediate data lo */
855 radeon_emit(cs
, 0); /* immediate data hi */
857 radeon_emit(cs
, 0); /* unused */
859 if (chip_class
== CIK
||
861 /* Two EOP events are required to make all engines go idle
862 * (and optional cache flushes executed) before the timestamp
865 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, predicated
));
868 radeon_emit(cs
, ((va
>> 32) & 0xffff) | EOP_DATA_SEL(data_sel
));
869 radeon_emit(cs
, old_fence
); /* immediate data */
870 radeon_emit(cs
, 0); /* unused */
873 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, predicated
));
876 radeon_emit(cs
, ((va
>> 32) & 0xffff) | EOP_DATA_SEL(data_sel
));
877 radeon_emit(cs
, new_fence
); /* immediate data */
878 radeon_emit(cs
, 0); /* unused */
883 si_emit_wait_fence(struct radeon_winsys_cs
*cs
,
885 uint64_t va
, uint32_t ref
,
888 radeon_emit(cs
, PKT3(PKT3_WAIT_REG_MEM
, 5, predicated
));
889 radeon_emit(cs
, WAIT_REG_MEM_EQUAL
| WAIT_REG_MEM_MEM_SPACE(1));
891 radeon_emit(cs
, va
>> 32);
892 radeon_emit(cs
, ref
); /* reference value */
893 radeon_emit(cs
, mask
); /* mask */
894 radeon_emit(cs
, 4); /* poll interval */
898 si_emit_acquire_mem(struct radeon_winsys_cs
*cs
,
902 unsigned cp_coher_cntl
)
904 if (is_mec
|| is_gfx9
) {
905 uint32_t hi_val
= is_gfx9
? 0xffffff : 0xff;
906 radeon_emit(cs
, PKT3(PKT3_ACQUIRE_MEM
, 5, predicated
) |
907 PKT3_SHADER_TYPE_S(is_mec
));
908 radeon_emit(cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
909 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
910 radeon_emit(cs
, hi_val
); /* CP_COHER_SIZE_HI */
911 radeon_emit(cs
, 0); /* CP_COHER_BASE */
912 radeon_emit(cs
, 0); /* CP_COHER_BASE_HI */
913 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
915 /* ACQUIRE_MEM is only required on a compute ring. */
916 radeon_emit(cs
, PKT3(PKT3_SURFACE_SYNC
, 3, predicated
));
917 radeon_emit(cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
918 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
919 radeon_emit(cs
, 0); /* CP_COHER_BASE */
920 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
925 si_cs_emit_cache_flush(struct radeon_winsys_cs
*cs
,
927 enum chip_class chip_class
,
931 enum radv_cmd_flush_bits flush_bits
)
933 unsigned cp_coher_cntl
= 0;
934 uint32_t flush_cb_db
= flush_bits
& (RADV_CMD_FLAG_FLUSH_AND_INV_CB
|
935 RADV_CMD_FLAG_FLUSH_AND_INV_DB
);
937 if (flush_bits
& RADV_CMD_FLAG_INV_ICACHE
)
938 cp_coher_cntl
|= S_0085F0_SH_ICACHE_ACTION_ENA(1);
939 if (flush_bits
& RADV_CMD_FLAG_INV_SMEM_L1
)
940 cp_coher_cntl
|= S_0085F0_SH_KCACHE_ACTION_ENA(1);
942 if (chip_class
<= VI
) {
943 if (flush_bits
& RADV_CMD_FLAG_FLUSH_AND_INV_CB
) {
944 cp_coher_cntl
|= S_0085F0_CB_ACTION_ENA(1) |
945 S_0085F0_CB0_DEST_BASE_ENA(1) |
946 S_0085F0_CB1_DEST_BASE_ENA(1) |
947 S_0085F0_CB2_DEST_BASE_ENA(1) |
948 S_0085F0_CB3_DEST_BASE_ENA(1) |
949 S_0085F0_CB4_DEST_BASE_ENA(1) |
950 S_0085F0_CB5_DEST_BASE_ENA(1) |
951 S_0085F0_CB6_DEST_BASE_ENA(1) |
952 S_0085F0_CB7_DEST_BASE_ENA(1);
954 /* Necessary for DCC */
955 if (chip_class
>= VI
) {
956 si_cs_emit_write_event_eop(cs
,
960 V_028A90_FLUSH_AND_INV_CB_DATA_TS
,
964 if (flush_bits
& RADV_CMD_FLAG_FLUSH_AND_INV_DB
) {
965 cp_coher_cntl
|= S_0085F0_DB_ACTION_ENA(1) |
966 S_0085F0_DB_DEST_BASE_ENA(1);
970 if (flush_bits
& RADV_CMD_FLAG_FLUSH_AND_INV_CB_META
) {
971 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, predicated
));
972 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META
) | EVENT_INDEX(0));
975 if (flush_bits
& RADV_CMD_FLAG_FLUSH_AND_INV_DB_META
) {
976 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, predicated
));
977 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META
) | EVENT_INDEX(0));
981 if (flush_bits
& RADV_CMD_FLAG_PS_PARTIAL_FLUSH
) {
982 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, predicated
));
983 radeon_emit(cs
, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
984 } else if (flush_bits
& RADV_CMD_FLAG_VS_PARTIAL_FLUSH
) {
985 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, predicated
));
986 radeon_emit(cs
, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
990 if (flush_bits
& RADV_CMD_FLAG_CS_PARTIAL_FLUSH
) {
991 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, predicated
));
992 radeon_emit(cs
, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
995 if (chip_class
>= GFX9
&& flush_cb_db
) {
996 unsigned cb_db_event
, tc_flags
;
998 /* Set the CB/DB flush event. */
999 switch (flush_cb_db
) {
1000 case RADV_CMD_FLAG_FLUSH_AND_INV_CB
:
1001 cb_db_event
= V_028A90_FLUSH_AND_INV_CB_DATA_TS
;
1003 case RADV_CMD_FLAG_FLUSH_AND_INV_DB
:
1004 cb_db_event
= V_028A90_FLUSH_AND_INV_DB_DATA_TS
;
1008 cb_db_event
= V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT
;
1011 /* TC | TC_WB = invalidate L2 data
1012 * TC_MD | TC_WB = invalidate L2 metadata
1013 * TC | TC_WB | TC_MD = invalidate L2 data & metadata
1015 * The metadata cache must always be invalidated for coherency
1016 * between CB/DB and shaders. (metadata = HTILE, CMASK, DCC)
1018 * TC must be invalidated on GFX9 only if the CB/DB surface is
1019 * not pipe-aligned. If the surface is RB-aligned, it might not
1020 * strictly be pipe-aligned since RB alignment takes precendence.
1022 tc_flags
= EVENT_TC_WB_ACTION_ENA
|
1023 EVENT_TC_MD_ACTION_ENA
;
1025 /* Ideally flush TC together with CB/DB. */
1026 if (flush_bits
& RADV_CMD_FLAG_INV_GLOBAL_L2
) {
1027 tc_flags
|= EVENT_TC_ACTION_ENA
|
1028 EVENT_TCL1_ACTION_ENA
;
1030 /* Clear the flags. */
1031 flush_bits
&= ~(RADV_CMD_FLAG_INV_GLOBAL_L2
|
1032 RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2
|
1033 RADV_CMD_FLAG_INV_VMEM_L1
);
1036 uint32_t old_fence
= (*flush_cnt
)++;
1038 si_cs_emit_write_event_eop(cs
, predicated
, chip_class
, false, cb_db_event
, tc_flags
, 1,
1039 flush_va
, old_fence
, *flush_cnt
);
1040 si_emit_wait_fence(cs
, predicated
, flush_va
, *flush_cnt
, 0xffffffff);
1043 /* VGT state sync */
1044 if (flush_bits
& RADV_CMD_FLAG_VGT_FLUSH
) {
1045 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, predicated
));
1046 radeon_emit(cs
, EVENT_TYPE(V_028A90_VGT_FLUSH
) | EVENT_INDEX(0));
1049 /* Make sure ME is idle (it executes most packets) before continuing.
1050 * This prevents read-after-write hazards between PFP and ME.
1052 if ((cp_coher_cntl
||
1053 (flush_bits
& (RADV_CMD_FLAG_CS_PARTIAL_FLUSH
|
1054 RADV_CMD_FLAG_INV_VMEM_L1
|
1055 RADV_CMD_FLAG_INV_GLOBAL_L2
|
1056 RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2
))) &&
1058 radeon_emit(cs
, PKT3(PKT3_PFP_SYNC_ME
, 0, predicated
));
1062 if ((flush_bits
& RADV_CMD_FLAG_INV_GLOBAL_L2
) ||
1063 (chip_class
<= CIK
&& (flush_bits
& RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2
))) {
1064 si_emit_acquire_mem(cs
, is_mec
, predicated
, chip_class
>= GFX9
,
1066 S_0085F0_TC_ACTION_ENA(1) |
1067 S_0085F0_TCL1_ACTION_ENA(1) |
1068 S_0301F0_TC_WB_ACTION_ENA(chip_class
>= VI
));
1071 if(flush_bits
& RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2
) {
1073 * NC = apply to non-coherent MTYPEs
1074 * (i.e. MTYPE <= 1, which is what we use everywhere)
1076 * WB doesn't work without NC.
1078 si_emit_acquire_mem(cs
, is_mec
, predicated
,
1081 S_0301F0_TC_WB_ACTION_ENA(1) |
1082 S_0301F0_TC_NC_ACTION_ENA(1));
1085 if (flush_bits
& RADV_CMD_FLAG_INV_VMEM_L1
) {
1086 si_emit_acquire_mem(cs
, is_mec
,
1087 predicated
, chip_class
>= GFX9
,
1089 S_0085F0_TCL1_ACTION_ENA(1));
1094 /* When one of the DEST_BASE flags is set, SURFACE_SYNC waits for idle.
1095 * Therefore, it should be last. Done in PFP.
1098 si_emit_acquire_mem(cs
, is_mec
, predicated
, chip_class
>= GFX9
, cp_coher_cntl
);
1102 si_emit_cache_flush(struct radv_cmd_buffer
*cmd_buffer
)
1104 bool is_compute
= cmd_buffer
->queue_family_index
== RADV_QUEUE_COMPUTE
;
1107 cmd_buffer
->state
.flush_bits
&= ~(RADV_CMD_FLAG_FLUSH_AND_INV_CB
|
1108 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META
|
1109 RADV_CMD_FLAG_FLUSH_AND_INV_DB
|
1110 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META
|
1111 RADV_CMD_FLAG_PS_PARTIAL_FLUSH
|
1112 RADV_CMD_FLAG_VS_PARTIAL_FLUSH
|
1113 RADV_CMD_FLAG_VGT_FLUSH
);
1115 if (!cmd_buffer
->state
.flush_bits
)
1118 enum chip_class chip_class
= cmd_buffer
->device
->physical_device
->rad_info
.chip_class
;
1119 radeon_check_space(cmd_buffer
->device
->ws
, cmd_buffer
->cs
, 128);
1121 uint32_t *ptr
= NULL
;
1123 if (chip_class
== GFX9
) {
1124 va
= cmd_buffer
->device
->ws
->buffer_get_va(cmd_buffer
->gfx9_fence_bo
) + cmd_buffer
->gfx9_fence_offset
;
1125 ptr
= &cmd_buffer
->gfx9_fence_idx
;
1127 si_cs_emit_cache_flush(cmd_buffer
->cs
,
1128 cmd_buffer
->state
.predicating
,
1129 cmd_buffer
->device
->physical_device
->rad_info
.chip_class
,
1131 radv_cmd_buffer_uses_mec(cmd_buffer
),
1132 cmd_buffer
->state
.flush_bits
);
1135 radv_cmd_buffer_trace_emit(cmd_buffer
);
1136 cmd_buffer
->state
.flush_bits
= 0;
1139 /* sets the CP predication state using a boolean stored at va */
1141 si_emit_set_predication_state(struct radv_cmd_buffer
*cmd_buffer
, uint64_t va
)
1146 op
= PRED_OP(PREDICATION_OP_BOOL64
) | PREDICATION_DRAW_VISIBLE
;
1147 if (cmd_buffer
->device
->physical_device
->rad_info
.chip_class
>= GFX9
) {
1148 radeon_emit(cmd_buffer
->cs
, PKT3(PKT3_SET_PREDICATION
, 2, 0));
1149 radeon_emit(cmd_buffer
->cs
, op
);
1150 radeon_emit(cmd_buffer
->cs
, va
);
1151 radeon_emit(cmd_buffer
->cs
, va
>> 32);
1153 radeon_emit(cmd_buffer
->cs
, PKT3(PKT3_SET_PREDICATION
, 1, 0));
1154 radeon_emit(cmd_buffer
->cs
, va
);
1155 radeon_emit(cmd_buffer
->cs
, op
| ((va
>> 32) & 0xFF));
1159 /* Set this if you want the 3D engine to wait until CP DMA is done.
1160 * It should be set on the last CP DMA packet. */
1161 #define CP_DMA_SYNC (1 << 0)
1163 /* Set this if the source data was used as a destination in a previous CP DMA
1164 * packet. It's for preventing a read-after-write (RAW) hazard between two
1165 * CP DMA packets. */
1166 #define CP_DMA_RAW_WAIT (1 << 1)
1167 #define CP_DMA_USE_L2 (1 << 2)
1168 #define CP_DMA_CLEAR (1 << 3)
1170 /* Alignment for optimal performance. */
1171 #define SI_CPDMA_ALIGNMENT 32
1173 /* The max number of bytes that can be copied per packet. */
1174 static inline unsigned cp_dma_max_byte_count(struct radv_cmd_buffer
*cmd_buffer
)
1176 unsigned max
= cmd_buffer
->device
->physical_device
->rad_info
.chip_class
>= GFX9
?
1177 S_414_BYTE_COUNT_GFX9(~0u) :
1178 S_414_BYTE_COUNT_GFX6(~0u);
1180 /* make it aligned for optimal performance */
1181 return max
& ~(SI_CPDMA_ALIGNMENT
- 1);
1184 /* Emit a CP DMA packet to do a copy from one buffer to another, or to clear
1185 * a buffer. The size must fit in bits [20:0]. If CP_DMA_CLEAR is set, src_va is a 32-bit
1188 static void si_emit_cp_dma(struct radv_cmd_buffer
*cmd_buffer
,
1189 uint64_t dst_va
, uint64_t src_va
,
1190 unsigned size
, unsigned flags
)
1192 struct radeon_winsys_cs
*cs
= cmd_buffer
->cs
;
1193 uint32_t header
= 0, command
= 0;
1196 assert(size
<= cp_dma_max_byte_count(cmd_buffer
));
1198 radeon_check_space(cmd_buffer
->device
->ws
, cmd_buffer
->cs
, 9);
1199 if (cmd_buffer
->device
->physical_device
->rad_info
.chip_class
>= GFX9
)
1200 command
|= S_414_BYTE_COUNT_GFX9(size
);
1202 command
|= S_414_BYTE_COUNT_GFX6(size
);
1205 if (flags
& CP_DMA_SYNC
)
1206 header
|= S_411_CP_SYNC(1);
1208 if (cmd_buffer
->device
->physical_device
->rad_info
.chip_class
>= GFX9
)
1209 command
|= S_414_DISABLE_WR_CONFIRM_GFX9(1);
1211 command
|= S_414_DISABLE_WR_CONFIRM_GFX6(1);
1214 if (flags
& CP_DMA_RAW_WAIT
)
1215 command
|= S_414_RAW_WAIT(1);
1217 /* Src and dst flags. */
1218 if (cmd_buffer
->device
->physical_device
->rad_info
.chip_class
>= GFX9
&&
1219 !(flags
& CP_DMA_CLEAR
) &&
1221 header
|= S_411_DSL_SEL(V_411_NOWHERE
); /* prefetch only */
1222 else if (flags
& CP_DMA_USE_L2
)
1223 header
|= S_411_DSL_SEL(V_411_DST_ADDR_TC_L2
);
1225 if (flags
& CP_DMA_CLEAR
)
1226 header
|= S_411_SRC_SEL(V_411_DATA
);
1227 else if (flags
& CP_DMA_USE_L2
)
1228 header
|= S_411_SRC_SEL(V_411_SRC_ADDR_TC_L2
);
1230 if (cmd_buffer
->device
->physical_device
->rad_info
.chip_class
>= CIK
) {
1231 radeon_emit(cs
, PKT3(PKT3_DMA_DATA
, 5, cmd_buffer
->state
.predicating
));
1232 radeon_emit(cs
, header
);
1233 radeon_emit(cs
, src_va
); /* SRC_ADDR_LO [31:0] */
1234 radeon_emit(cs
, src_va
>> 32); /* SRC_ADDR_HI [31:0] */
1235 radeon_emit(cs
, dst_va
); /* DST_ADDR_LO [31:0] */
1236 radeon_emit(cs
, dst_va
>> 32); /* DST_ADDR_HI [31:0] */
1237 radeon_emit(cs
, command
);
1239 assert(!(flags
& CP_DMA_USE_L2
));
1240 header
|= S_411_SRC_ADDR_HI(src_va
>> 32);
1241 radeon_emit(cs
, PKT3(PKT3_CP_DMA
, 4, cmd_buffer
->state
.predicating
));
1242 radeon_emit(cs
, src_va
); /* SRC_ADDR_LO [31:0] */
1243 radeon_emit(cs
, header
); /* SRC_ADDR_HI [15:0] + flags. */
1244 radeon_emit(cs
, dst_va
); /* DST_ADDR_LO [31:0] */
1245 radeon_emit(cs
, (dst_va
>> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
1246 radeon_emit(cs
, command
);
1249 /* CP DMA is executed in ME, but index buffers are read by PFP.
1250 * This ensures that ME (CP DMA) is idle before PFP starts fetching
1251 * indices. If we wanted to execute CP DMA in PFP, this packet
1252 * should precede it.
1254 if ((flags
& CP_DMA_SYNC
) && cmd_buffer
->queue_family_index
== RADV_QUEUE_GENERAL
) {
1255 radeon_emit(cs
, PKT3(PKT3_PFP_SYNC_ME
, 0, cmd_buffer
->state
.predicating
));
1259 radv_cmd_buffer_trace_emit(cmd_buffer
);
1262 void si_cp_dma_prefetch(struct radv_cmd_buffer
*cmd_buffer
, uint64_t va
,
1265 uint64_t aligned_va
= va
& ~(SI_CPDMA_ALIGNMENT
- 1);
1266 uint64_t aligned_size
= ((va
+ size
+ SI_CPDMA_ALIGNMENT
-1) & ~(SI_CPDMA_ALIGNMENT
- 1)) - aligned_va
;
1268 si_emit_cp_dma(cmd_buffer
, aligned_va
, aligned_va
,
1269 aligned_size
, CP_DMA_USE_L2
);
1272 static void si_cp_dma_prepare(struct radv_cmd_buffer
*cmd_buffer
, uint64_t byte_count
,
1273 uint64_t remaining_size
, unsigned *flags
)
1276 /* Flush the caches for the first copy only.
1277 * Also wait for the previous CP DMA operations.
1279 if (cmd_buffer
->state
.flush_bits
) {
1280 si_emit_cache_flush(cmd_buffer
);
1281 *flags
|= CP_DMA_RAW_WAIT
;
1284 /* Do the synchronization after the last dma, so that all data
1285 * is written to memory.
1287 if (byte_count
== remaining_size
)
1288 *flags
|= CP_DMA_SYNC
;
1291 static void si_cp_dma_realign_engine(struct radv_cmd_buffer
*cmd_buffer
, unsigned size
)
1295 unsigned dma_flags
= 0;
1296 unsigned buf_size
= SI_CPDMA_ALIGNMENT
* 2;
1299 assert(size
< SI_CPDMA_ALIGNMENT
);
1301 radv_cmd_buffer_upload_alloc(cmd_buffer
, buf_size
, SI_CPDMA_ALIGNMENT
, &offset
, &ptr
);
1303 va
= cmd_buffer
->device
->ws
->buffer_get_va(cmd_buffer
->upload
.upload_bo
);
1306 si_cp_dma_prepare(cmd_buffer
, size
, size
, &dma_flags
);
1308 si_emit_cp_dma(cmd_buffer
, va
, va
+ SI_CPDMA_ALIGNMENT
, size
,
1312 void si_cp_dma_buffer_copy(struct radv_cmd_buffer
*cmd_buffer
,
1313 uint64_t src_va
, uint64_t dest_va
,
1316 uint64_t main_src_va
, main_dest_va
;
1317 uint64_t skipped_size
= 0, realign_size
= 0;
1320 if (cmd_buffer
->device
->physical_device
->rad_info
.family
<= CHIP_CARRIZO
||
1321 cmd_buffer
->device
->physical_device
->rad_info
.family
== CHIP_STONEY
) {
1322 /* If the size is not aligned, we must add a dummy copy at the end
1323 * just to align the internal counter. Otherwise, the DMA engine
1324 * would slow down by an order of magnitude for following copies.
1326 if (size
% SI_CPDMA_ALIGNMENT
)
1327 realign_size
= SI_CPDMA_ALIGNMENT
- (size
% SI_CPDMA_ALIGNMENT
);
1329 /* If the copy begins unaligned, we must start copying from the next
1330 * aligned block and the skipped part should be copied after everything
1331 * else has been copied. Only the src alignment matters, not dst.
1333 if (src_va
% SI_CPDMA_ALIGNMENT
) {
1334 skipped_size
= SI_CPDMA_ALIGNMENT
- (src_va
% SI_CPDMA_ALIGNMENT
);
1335 /* The main part will be skipped if the size is too small. */
1336 skipped_size
= MIN2(skipped_size
, size
);
1337 size
-= skipped_size
;
1340 main_src_va
= src_va
+ skipped_size
;
1341 main_dest_va
= dest_va
+ skipped_size
;
1344 unsigned dma_flags
= 0;
1345 unsigned byte_count
= MIN2(size
, cp_dma_max_byte_count(cmd_buffer
));
1347 si_cp_dma_prepare(cmd_buffer
, byte_count
,
1348 size
+ skipped_size
+ realign_size
,
1351 si_emit_cp_dma(cmd_buffer
, main_dest_va
, main_src_va
,
1352 byte_count
, dma_flags
);
1355 main_src_va
+= byte_count
;
1356 main_dest_va
+= byte_count
;
1360 unsigned dma_flags
= 0;
1362 si_cp_dma_prepare(cmd_buffer
, skipped_size
,
1363 size
+ skipped_size
+ realign_size
,
1366 si_emit_cp_dma(cmd_buffer
, dest_va
, src_va
,
1367 skipped_size
, dma_flags
);
1370 si_cp_dma_realign_engine(cmd_buffer
, realign_size
);
1373 void si_cp_dma_clear_buffer(struct radv_cmd_buffer
*cmd_buffer
, uint64_t va
,
1374 uint64_t size
, unsigned value
)
1380 assert(va
% 4 == 0 && size
% 4 == 0);
1383 unsigned byte_count
= MIN2(size
, cp_dma_max_byte_count(cmd_buffer
));
1384 unsigned dma_flags
= CP_DMA_CLEAR
;
1386 si_cp_dma_prepare(cmd_buffer
, byte_count
, size
, &dma_flags
);
1388 /* Emit the clear packet. */
1389 si_emit_cp_dma(cmd_buffer
, va
, value
, byte_count
,
1397 /* For MSAA sample positions. */
1398 #define FILL_SREG(s0x, s0y, s1x, s1y, s2x, s2y, s3x, s3y) \
1399 (((s0x) & 0xf) | (((unsigned)(s0y) & 0xf) << 4) | \
1400 (((unsigned)(s1x) & 0xf) << 8) | (((unsigned)(s1y) & 0xf) << 12) | \
1401 (((unsigned)(s2x) & 0xf) << 16) | (((unsigned)(s2y) & 0xf) << 20) | \
1402 (((unsigned)(s3x) & 0xf) << 24) | (((unsigned)(s3y) & 0xf) << 28))
1406 * There are two locations (4, 4), (-4, -4). */
1407 const uint32_t eg_sample_locs_2x
[4] = {
1408 FILL_SREG(4, 4, -4, -4, 4, 4, -4, -4),
1409 FILL_SREG(4, 4, -4, -4, 4, 4, -4, -4),
1410 FILL_SREG(4, 4, -4, -4, 4, 4, -4, -4),
1411 FILL_SREG(4, 4, -4, -4, 4, 4, -4, -4),
1413 const unsigned eg_max_dist_2x
= 4;
1415 * There are 4 locations: (-2, 6), (6, -2), (-6, 2), (2, 6). */
1416 const uint32_t eg_sample_locs_4x
[4] = {
1417 FILL_SREG(-2, -6, 6, -2, -6, 2, 2, 6),
1418 FILL_SREG(-2, -6, 6, -2, -6, 2, 2, 6),
1419 FILL_SREG(-2, -6, 6, -2, -6, 2, 2, 6),
1420 FILL_SREG(-2, -6, 6, -2, -6, 2, 2, 6),
1422 const unsigned eg_max_dist_4x
= 6;
1425 static const uint32_t cm_sample_locs_8x
[] = {
1426 FILL_SREG( 1, -3, -1, 3, 5, 1, -3, -5),
1427 FILL_SREG( 1, -3, -1, 3, 5, 1, -3, -5),
1428 FILL_SREG( 1, -3, -1, 3, 5, 1, -3, -5),
1429 FILL_SREG( 1, -3, -1, 3, 5, 1, -3, -5),
1430 FILL_SREG(-5, 5, -7, -1, 3, 7, 7, -7),
1431 FILL_SREG(-5, 5, -7, -1, 3, 7, 7, -7),
1432 FILL_SREG(-5, 5, -7, -1, 3, 7, 7, -7),
1433 FILL_SREG(-5, 5, -7, -1, 3, 7, 7, -7),
1435 static const unsigned cm_max_dist_8x
= 8;
1436 /* Cayman 16xMSAA */
1437 static const uint32_t cm_sample_locs_16x
[] = {
1438 FILL_SREG( 1, 1, -1, -3, -3, 2, 4, -1),
1439 FILL_SREG( 1, 1, -1, -3, -3, 2, 4, -1),
1440 FILL_SREG( 1, 1, -1, -3, -3, 2, 4, -1),
1441 FILL_SREG( 1, 1, -1, -3, -3, 2, 4, -1),
1442 FILL_SREG(-5, -2, 2, 5, 5, 3, 3, -5),
1443 FILL_SREG(-5, -2, 2, 5, 5, 3, 3, -5),
1444 FILL_SREG(-5, -2, 2, 5, 5, 3, 3, -5),
1445 FILL_SREG(-5, -2, 2, 5, 5, 3, 3, -5),
1446 FILL_SREG(-2, 6, 0, -7, -4, -6, -6, 4),
1447 FILL_SREG(-2, 6, 0, -7, -4, -6, -6, 4),
1448 FILL_SREG(-2, 6, 0, -7, -4, -6, -6, 4),
1449 FILL_SREG(-2, 6, 0, -7, -4, -6, -6, 4),
1450 FILL_SREG(-8, 0, 7, -4, 6, 7, -7, -8),
1451 FILL_SREG(-8, 0, 7, -4, 6, 7, -7, -8),
1452 FILL_SREG(-8, 0, 7, -4, 6, 7, -7, -8),
1453 FILL_SREG(-8, 0, 7, -4, 6, 7, -7, -8),
1455 static const unsigned cm_max_dist_16x
= 8;
1457 unsigned radv_cayman_get_maxdist(int log_samples
)
1459 unsigned max_dist
[] = {
1466 return max_dist
[log_samples
];
1469 void radv_cayman_emit_msaa_sample_locs(struct radeon_winsys_cs
*cs
, int nr_samples
)
1471 switch (nr_samples
) {
1474 radeon_set_context_reg(cs
, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
, 0);
1475 radeon_set_context_reg(cs
, CM_R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
, 0);
1476 radeon_set_context_reg(cs
, CM_R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
, 0);
1477 radeon_set_context_reg(cs
, CM_R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
, 0);
1480 radeon_set_context_reg(cs
, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
, eg_sample_locs_2x
[0]);
1481 radeon_set_context_reg(cs
, CM_R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
, eg_sample_locs_2x
[1]);
1482 radeon_set_context_reg(cs
, CM_R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
, eg_sample_locs_2x
[2]);
1483 radeon_set_context_reg(cs
, CM_R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
, eg_sample_locs_2x
[3]);
1486 radeon_set_context_reg(cs
, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
, eg_sample_locs_4x
[0]);
1487 radeon_set_context_reg(cs
, CM_R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
, eg_sample_locs_4x
[1]);
1488 radeon_set_context_reg(cs
, CM_R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
, eg_sample_locs_4x
[2]);
1489 radeon_set_context_reg(cs
, CM_R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
, eg_sample_locs_4x
[3]);
1492 radeon_set_context_reg_seq(cs
, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
, 14);
1493 radeon_emit(cs
, cm_sample_locs_8x
[0]);
1494 radeon_emit(cs
, cm_sample_locs_8x
[4]);
1497 radeon_emit(cs
, cm_sample_locs_8x
[1]);
1498 radeon_emit(cs
, cm_sample_locs_8x
[5]);
1501 radeon_emit(cs
, cm_sample_locs_8x
[2]);
1502 radeon_emit(cs
, cm_sample_locs_8x
[6]);
1505 radeon_emit(cs
, cm_sample_locs_8x
[3]);
1506 radeon_emit(cs
, cm_sample_locs_8x
[7]);
1509 radeon_set_context_reg_seq(cs
, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
, 16);
1510 radeon_emit(cs
, cm_sample_locs_16x
[0]);
1511 radeon_emit(cs
, cm_sample_locs_16x
[4]);
1512 radeon_emit(cs
, cm_sample_locs_16x
[8]);
1513 radeon_emit(cs
, cm_sample_locs_16x
[12]);
1514 radeon_emit(cs
, cm_sample_locs_16x
[1]);
1515 radeon_emit(cs
, cm_sample_locs_16x
[5]);
1516 radeon_emit(cs
, cm_sample_locs_16x
[9]);
1517 radeon_emit(cs
, cm_sample_locs_16x
[13]);
1518 radeon_emit(cs
, cm_sample_locs_16x
[2]);
1519 radeon_emit(cs
, cm_sample_locs_16x
[6]);
1520 radeon_emit(cs
, cm_sample_locs_16x
[10]);
1521 radeon_emit(cs
, cm_sample_locs_16x
[14]);
1522 radeon_emit(cs
, cm_sample_locs_16x
[3]);
1523 radeon_emit(cs
, cm_sample_locs_16x
[7]);
1524 radeon_emit(cs
, cm_sample_locs_16x
[11]);
1525 radeon_emit(cs
, cm_sample_locs_16x
[15]);
1530 static void radv_cayman_get_sample_position(struct radv_device
*device
,
1531 unsigned sample_count
,
1532 unsigned sample_index
, float *out_value
)
1538 switch (sample_count
) {
1541 out_value
[0] = out_value
[1] = 0.5;
1544 offset
= 4 * (sample_index
* 2);
1545 val
.idx
= (eg_sample_locs_2x
[0] >> offset
) & 0xf;
1546 out_value
[0] = (float)(val
.idx
+ 8) / 16.0f
;
1547 val
.idx
= (eg_sample_locs_2x
[0] >> (offset
+ 4)) & 0xf;
1548 out_value
[1] = (float)(val
.idx
+ 8) / 16.0f
;
1551 offset
= 4 * (sample_index
* 2);
1552 val
.idx
= (eg_sample_locs_4x
[0] >> offset
) & 0xf;
1553 out_value
[0] = (float)(val
.idx
+ 8) / 16.0f
;
1554 val
.idx
= (eg_sample_locs_4x
[0] >> (offset
+ 4)) & 0xf;
1555 out_value
[1] = (float)(val
.idx
+ 8) / 16.0f
;
1558 offset
= 4 * (sample_index
% 4 * 2);
1559 index
= (sample_index
/ 4) * 4;
1560 val
.idx
= (cm_sample_locs_8x
[index
] >> offset
) & 0xf;
1561 out_value
[0] = (float)(val
.idx
+ 8) / 16.0f
;
1562 val
.idx
= (cm_sample_locs_8x
[index
] >> (offset
+ 4)) & 0xf;
1563 out_value
[1] = (float)(val
.idx
+ 8) / 16.0f
;
1566 offset
= 4 * (sample_index
% 4 * 2);
1567 index
= (sample_index
/ 4) * 4;
1568 val
.idx
= (cm_sample_locs_16x
[index
] >> offset
) & 0xf;
1569 out_value
[0] = (float)(val
.idx
+ 8) / 16.0f
;
1570 val
.idx
= (cm_sample_locs_16x
[index
] >> (offset
+ 4)) & 0xf;
1571 out_value
[1] = (float)(val
.idx
+ 8) / 16.0f
;
1576 void radv_device_init_msaa(struct radv_device
*device
)
1579 radv_cayman_get_sample_position(device
, 1, 0, device
->sample_locations_1x
[0]);
1581 for (i
= 0; i
< 2; i
++)
1582 radv_cayman_get_sample_position(device
, 2, i
, device
->sample_locations_2x
[i
]);
1583 for (i
= 0; i
< 4; i
++)
1584 radv_cayman_get_sample_position(device
, 4, i
, device
->sample_locations_4x
[i
]);
1585 for (i
= 0; i
< 8; i
++)
1586 radv_cayman_get_sample_position(device
, 8, i
, device
->sample_locations_8x
[i
]);
1587 for (i
= 0; i
< 16; i
++)
1588 radv_cayman_get_sample_position(device
, 16, i
, device
->sample_locations_16x
[i
]);