604a5e218ad947eef5d5f455eda7d5eeac171611
[mesa.git] / src / amd / vulkan / si_cmd_buffer.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based on si_state.c
6 * Copyright © 2015 Advanced Micro Devices, Inc.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 /* command buffer handling for SI */
29
30 #include "radv_private.h"
31 #include "radv_cs.h"
32 #include "sid.h"
33 #include "gfx9d.h"
34 #include "radv_util.h"
35 #include "main/macros.h"
36
37 #define SI_GS_PER_ES 128
38
39 static void
40 si_write_harvested_raster_configs(struct radv_physical_device *physical_device,
41 struct radeon_winsys_cs *cs,
42 unsigned raster_config,
43 unsigned raster_config_1)
44 {
45 unsigned sh_per_se = MAX2(physical_device->rad_info.max_sh_per_se, 1);
46 unsigned num_se = MAX2(physical_device->rad_info.max_se, 1);
47 unsigned rb_mask = physical_device->rad_info.enabled_rb_mask;
48 unsigned num_rb = MIN2(physical_device->rad_info.num_render_backends, 16);
49 unsigned rb_per_pkr = MIN2(num_rb / num_se / sh_per_se, 2);
50 unsigned rb_per_se = num_rb / num_se;
51 unsigned se_mask[4];
52 unsigned se;
53
54 se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
55 se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
56 se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
57 se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
58
59 assert(num_se == 1 || num_se == 2 || num_se == 4);
60 assert(sh_per_se == 1 || sh_per_se == 2);
61 assert(rb_per_pkr == 1 || rb_per_pkr == 2);
62
63 /* XXX: I can't figure out what the *_XSEL and *_YSEL
64 * fields are for, so I'm leaving them as their default
65 * values. */
66
67 if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
68 (!se_mask[2] && !se_mask[3]))) {
69 raster_config_1 &= C_028354_SE_PAIR_MAP;
70
71 if (!se_mask[0] && !se_mask[1]) {
72 raster_config_1 |=
73 S_028354_SE_PAIR_MAP(V_028354_RASTER_CONFIG_SE_PAIR_MAP_3);
74 } else {
75 raster_config_1 |=
76 S_028354_SE_PAIR_MAP(V_028354_RASTER_CONFIG_SE_PAIR_MAP_0);
77 }
78 }
79
80 for (se = 0; se < num_se; se++) {
81 unsigned raster_config_se = raster_config;
82 unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
83 unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
84 int idx = (se / 2) * 2;
85
86 if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
87 raster_config_se &= C_028350_SE_MAP;
88
89 if (!se_mask[idx]) {
90 raster_config_se |=
91 S_028350_SE_MAP(V_028350_RASTER_CONFIG_SE_MAP_3);
92 } else {
93 raster_config_se |=
94 S_028350_SE_MAP(V_028350_RASTER_CONFIG_SE_MAP_0);
95 }
96 }
97
98 pkr0_mask &= rb_mask;
99 pkr1_mask &= rb_mask;
100 if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
101 raster_config_se &= C_028350_PKR_MAP;
102
103 if (!pkr0_mask) {
104 raster_config_se |=
105 S_028350_PKR_MAP(V_028350_RASTER_CONFIG_PKR_MAP_3);
106 } else {
107 raster_config_se |=
108 S_028350_PKR_MAP(V_028350_RASTER_CONFIG_PKR_MAP_0);
109 }
110 }
111
112 if (rb_per_se >= 2) {
113 unsigned rb0_mask = 1 << (se * rb_per_se);
114 unsigned rb1_mask = rb0_mask << 1;
115
116 rb0_mask &= rb_mask;
117 rb1_mask &= rb_mask;
118 if (!rb0_mask || !rb1_mask) {
119 raster_config_se &= C_028350_RB_MAP_PKR0;
120
121 if (!rb0_mask) {
122 raster_config_se |=
123 S_028350_RB_MAP_PKR0(V_028350_RASTER_CONFIG_RB_MAP_3);
124 } else {
125 raster_config_se |=
126 S_028350_RB_MAP_PKR0(V_028350_RASTER_CONFIG_RB_MAP_0);
127 }
128 }
129
130 if (rb_per_se > 2) {
131 rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
132 rb1_mask = rb0_mask << 1;
133 rb0_mask &= rb_mask;
134 rb1_mask &= rb_mask;
135 if (!rb0_mask || !rb1_mask) {
136 raster_config_se &= C_028350_RB_MAP_PKR1;
137
138 if (!rb0_mask) {
139 raster_config_se |=
140 S_028350_RB_MAP_PKR1(V_028350_RASTER_CONFIG_RB_MAP_3);
141 } else {
142 raster_config_se |=
143 S_028350_RB_MAP_PKR1(V_028350_RASTER_CONFIG_RB_MAP_0);
144 }
145 }
146 }
147 }
148
149 /* GRBM_GFX_INDEX has a different offset on SI and CI+ */
150 if (physical_device->rad_info.chip_class < CIK)
151 radeon_set_config_reg(cs, GRBM_GFX_INDEX,
152 SE_INDEX(se) | SH_BROADCAST_WRITES |
153 INSTANCE_BROADCAST_WRITES);
154 else
155 radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX,
156 S_030800_SE_INDEX(se) | S_030800_SH_BROADCAST_WRITES(1) |
157 S_030800_INSTANCE_BROADCAST_WRITES(1));
158 radeon_set_context_reg(cs, R_028350_PA_SC_RASTER_CONFIG, raster_config_se);
159 if (physical_device->rad_info.chip_class >= CIK)
160 radeon_set_context_reg(cs, R_028354_PA_SC_RASTER_CONFIG_1, raster_config_1);
161 }
162
163 /* GRBM_GFX_INDEX has a different offset on SI and CI+ */
164 if (physical_device->rad_info.chip_class < CIK)
165 radeon_set_config_reg(cs, GRBM_GFX_INDEX,
166 SE_BROADCAST_WRITES | SH_BROADCAST_WRITES |
167 INSTANCE_BROADCAST_WRITES);
168 else
169 radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX,
170 S_030800_SE_BROADCAST_WRITES(1) | S_030800_SH_BROADCAST_WRITES(1) |
171 S_030800_INSTANCE_BROADCAST_WRITES(1));
172 }
173
174 static void
175 si_emit_compute(struct radv_physical_device *physical_device,
176 struct radeon_winsys_cs *cs)
177 {
178 radeon_set_sh_reg_seq(cs, R_00B810_COMPUTE_START_X, 3);
179 radeon_emit(cs, 0);
180 radeon_emit(cs, 0);
181 radeon_emit(cs, 0);
182
183 radeon_set_sh_reg_seq(cs, R_00B854_COMPUTE_RESOURCE_LIMITS, 3);
184 radeon_emit(cs, 0);
185 /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1 */
186 radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
187 radeon_emit(cs, S_00B85C_SH0_CU_EN(0xffff) | S_00B85C_SH1_CU_EN(0xffff));
188
189 if (physical_device->rad_info.chip_class >= CIK) {
190 /* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */
191 radeon_set_sh_reg_seq(cs,
192 R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2, 2);
193 radeon_emit(cs, S_00B864_SH0_CU_EN(0xffff) |
194 S_00B864_SH1_CU_EN(0xffff));
195 radeon_emit(cs, S_00B868_SH0_CU_EN(0xffff) |
196 S_00B868_SH1_CU_EN(0xffff));
197 }
198
199 /* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID
200 * and is now per pipe, so it should be handled in the
201 * kernel if we want to use something other than the default value,
202 * which is now 0x22f.
203 */
204 if (physical_device->rad_info.chip_class <= SI) {
205 /* XXX: This should be:
206 * (number of compute units) * 4 * (waves per simd) - 1 */
207
208 radeon_set_sh_reg(cs, R_00B82C_COMPUTE_MAX_WAVE_ID,
209 0x190 /* Default value */);
210 }
211 }
212
213 void
214 si_init_compute(struct radv_cmd_buffer *cmd_buffer)
215 {
216 struct radv_physical_device *physical_device = cmd_buffer->device->physical_device;
217 si_emit_compute(physical_device, cmd_buffer->cs);
218 }
219
220 static void
221 si_emit_config(struct radv_physical_device *physical_device,
222 struct radeon_winsys_cs *cs)
223 {
224 unsigned num_rb = MIN2(physical_device->rad_info.num_render_backends, 16);
225 unsigned rb_mask = physical_device->rad_info.enabled_rb_mask;
226 unsigned raster_config, raster_config_1;
227 int i;
228
229 radeon_emit(cs, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
230 radeon_emit(cs, CONTEXT_CONTROL_LOAD_ENABLE(1));
231 radeon_emit(cs, CONTEXT_CONTROL_SHADOW_ENABLE(1));
232
233 radeon_set_context_reg(cs, R_028A18_VGT_HOS_MAX_TESS_LEVEL, fui(64));
234 radeon_set_context_reg(cs, R_028A1C_VGT_HOS_MIN_TESS_LEVEL, fui(0));
235
236 /* FIXME calculate these values somehow ??? */
237 radeon_set_context_reg(cs, R_028A54_VGT_GS_PER_ES, SI_GS_PER_ES);
238 radeon_set_context_reg(cs, R_028A58_VGT_ES_PER_GS, 0x40);
239 radeon_set_context_reg(cs, R_028A5C_VGT_GS_PER_VS, 0x2);
240
241 radeon_set_context_reg(cs, R_028A8C_VGT_PRIMITIVEID_RESET, 0x0);
242 radeon_set_context_reg(cs, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET, 0);
243
244 radeon_set_context_reg(cs, R_028B98_VGT_STRMOUT_BUFFER_CONFIG, 0x0);
245 radeon_set_context_reg(cs, R_028AB8_VGT_VTX_CNT_EN, 0x0);
246 if (physical_device->rad_info.chip_class < CIK)
247 radeon_set_config_reg(cs, R_008A14_PA_CL_ENHANCE, S_008A14_NUM_CLIP_SEQ(3) |
248 S_008A14_CLIP_VTX_REORDER_ENA(1));
249
250 radeon_set_context_reg(cs, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 0x76543210);
251 radeon_set_context_reg(cs, R_028BD8_PA_SC_CENTROID_PRIORITY_1, 0xfedcba98);
252
253 radeon_set_context_reg(cs, R_02882C_PA_SU_PRIM_FILTER_CNTL, 0);
254
255 for (i = 0; i < 16; i++) {
256 radeon_set_context_reg(cs, R_0282D0_PA_SC_VPORT_ZMIN_0 + i*8, 0);
257 radeon_set_context_reg(cs, R_0282D4_PA_SC_VPORT_ZMAX_0 + i*8, fui(1.0));
258 }
259
260 switch (physical_device->rad_info.family) {
261 case CHIP_TAHITI:
262 case CHIP_PITCAIRN:
263 raster_config = 0x2a00126a;
264 raster_config_1 = 0x00000000;
265 break;
266 case CHIP_VERDE:
267 raster_config = 0x0000124a;
268 raster_config_1 = 0x00000000;
269 break;
270 case CHIP_OLAND:
271 raster_config = 0x00000082;
272 raster_config_1 = 0x00000000;
273 break;
274 case CHIP_HAINAN:
275 raster_config = 0x00000000;
276 raster_config_1 = 0x00000000;
277 break;
278 case CHIP_BONAIRE:
279 raster_config = 0x16000012;
280 raster_config_1 = 0x00000000;
281 break;
282 case CHIP_HAWAII:
283 raster_config = 0x3a00161a;
284 raster_config_1 = 0x0000002e;
285 break;
286 case CHIP_FIJI:
287 if (physical_device->rad_info.cik_macrotile_mode_array[0] == 0x000000e8) {
288 /* old kernels with old tiling config */
289 raster_config = 0x16000012;
290 raster_config_1 = 0x0000002a;
291 } else {
292 raster_config = 0x3a00161a;
293 raster_config_1 = 0x0000002e;
294 }
295 break;
296 case CHIP_POLARIS10:
297 raster_config = 0x16000012;
298 raster_config_1 = 0x0000002a;
299 break;
300 case CHIP_POLARIS11:
301 case CHIP_POLARIS12:
302 raster_config = 0x16000012;
303 raster_config_1 = 0x00000000;
304 break;
305 case CHIP_TONGA:
306 raster_config = 0x16000012;
307 raster_config_1 = 0x0000002a;
308 break;
309 case CHIP_ICELAND:
310 if (num_rb == 1)
311 raster_config = 0x00000000;
312 else
313 raster_config = 0x00000002;
314 raster_config_1 = 0x00000000;
315 break;
316 case CHIP_CARRIZO:
317 raster_config = 0x00000002;
318 raster_config_1 = 0x00000000;
319 break;
320 case CHIP_KAVERI:
321 /* KV should be 0x00000002, but that causes problems with radeon */
322 raster_config = 0x00000000; /* 0x00000002 */
323 raster_config_1 = 0x00000000;
324 break;
325 case CHIP_KABINI:
326 case CHIP_MULLINS:
327 case CHIP_STONEY:
328 raster_config = 0x00000000;
329 raster_config_1 = 0x00000000;
330 break;
331 default:
332 fprintf(stderr,
333 "radeonsi: Unknown GPU, using 0 for raster_config\n");
334 raster_config = 0x00000000;
335 raster_config_1 = 0x00000000;
336 break;
337 }
338
339 /* Always use the default config when all backends are enabled
340 * (or when we failed to determine the enabled backends).
341 */
342 if (!rb_mask || util_bitcount(rb_mask) >= num_rb) {
343 radeon_set_context_reg(cs, R_028350_PA_SC_RASTER_CONFIG,
344 raster_config);
345 if (physical_device->rad_info.chip_class >= CIK)
346 radeon_set_context_reg(cs, R_028354_PA_SC_RASTER_CONFIG_1,
347 raster_config_1);
348 } else {
349 si_write_harvested_raster_configs(physical_device, cs, raster_config, raster_config_1);
350 }
351
352 radeon_set_context_reg(cs, R_028204_PA_SC_WINDOW_SCISSOR_TL, S_028204_WINDOW_OFFSET_DISABLE(1));
353 radeon_set_context_reg(cs, R_028240_PA_SC_GENERIC_SCISSOR_TL, S_028240_WINDOW_OFFSET_DISABLE(1));
354 radeon_set_context_reg(cs, R_028244_PA_SC_GENERIC_SCISSOR_BR,
355 S_028244_BR_X(16384) | S_028244_BR_Y(16384));
356 radeon_set_context_reg(cs, R_028030_PA_SC_SCREEN_SCISSOR_TL, 0);
357 radeon_set_context_reg(cs, R_028034_PA_SC_SCREEN_SCISSOR_BR,
358 S_028034_BR_X(16384) | S_028034_BR_Y(16384));
359
360 radeon_set_context_reg(cs, R_02820C_PA_SC_CLIPRECT_RULE, 0xFFFF);
361 radeon_set_context_reg(cs, R_028230_PA_SC_EDGERULE, 0xAAAAAAAA);
362 /* PA_SU_HARDWARE_SCREEN_OFFSET must be 0 due to hw bug on SI */
363 radeon_set_context_reg(cs, R_028234_PA_SU_HARDWARE_SCREEN_OFFSET, 0);
364 radeon_set_context_reg(cs, R_028820_PA_CL_NANINF_CNTL, 0);
365
366 radeon_set_context_reg(cs, R_028AC0_DB_SRESULTS_COMPARE_STATE0, 0x0);
367 radeon_set_context_reg(cs, R_028AC4_DB_SRESULTS_COMPARE_STATE1, 0x0);
368 radeon_set_context_reg(cs, R_028AC8_DB_PRELOAD_CONTROL, 0x0);
369 radeon_set_context_reg(cs, R_02800C_DB_RENDER_OVERRIDE,
370 S_02800C_FORCE_HIS_ENABLE0(V_02800C_FORCE_DISABLE) |
371 S_02800C_FORCE_HIS_ENABLE1(V_02800C_FORCE_DISABLE));
372
373 radeon_set_context_reg(cs, R_028400_VGT_MAX_VTX_INDX, ~0);
374 radeon_set_context_reg(cs, R_028404_VGT_MIN_VTX_INDX, 0);
375 radeon_set_context_reg(cs, R_028408_VGT_INDX_OFFSET, 0);
376
377 if (physical_device->rad_info.chip_class >= CIK) {
378 /* If this is 0, Bonaire can hang even if GS isn't being used.
379 * Other chips are unaffected. These are suboptimal values,
380 * but we don't use on-chip GS.
381 */
382 radeon_set_context_reg(cs, R_028A44_VGT_GS_ONCHIP_CNTL,
383 S_028A44_ES_VERTS_PER_SUBGRP(64) |
384 S_028A44_GS_PRIMS_PER_SUBGRP(4));
385
386 radeon_set_sh_reg(cs, R_00B51C_SPI_SHADER_PGM_RSRC3_LS, S_00B51C_CU_EN(0xffff));
387 radeon_set_sh_reg(cs, R_00B41C_SPI_SHADER_PGM_RSRC3_HS, 0);
388 radeon_set_sh_reg(cs, R_00B31C_SPI_SHADER_PGM_RSRC3_ES, S_00B31C_CU_EN(0xffff));
389 radeon_set_sh_reg(cs, R_00B21C_SPI_SHADER_PGM_RSRC3_GS, S_00B21C_CU_EN(0xffff));
390
391 if (physical_device->rad_info.num_good_compute_units /
392 (physical_device->rad_info.max_se * physical_device->rad_info.max_sh_per_se) <= 4) {
393 /* Too few available compute units per SH. Disallowing
394 * VS to run on CU0 could hurt us more than late VS
395 * allocation would help.
396 *
397 * LATE_ALLOC_VS = 2 is the highest safe number.
398 */
399 radeon_set_sh_reg(cs, R_00B118_SPI_SHADER_PGM_RSRC3_VS, S_00B118_CU_EN(0xffff));
400 radeon_set_sh_reg(cs, R_00B11C_SPI_SHADER_LATE_ALLOC_VS, S_00B11C_LIMIT(2));
401 } else {
402 /* Set LATE_ALLOC_VS == 31. It should be less than
403 * the number of scratch waves. Limitations:
404 * - VS can't execute on CU0.
405 * - If HS writes outputs to LDS, LS can't execute on CU0.
406 */
407 radeon_set_sh_reg(cs, R_00B118_SPI_SHADER_PGM_RSRC3_VS, S_00B118_CU_EN(0xfffe));
408 radeon_set_sh_reg(cs, R_00B11C_SPI_SHADER_LATE_ALLOC_VS, S_00B11C_LIMIT(31));
409 }
410
411 radeon_set_sh_reg(cs, R_00B01C_SPI_SHADER_PGM_RSRC3_PS, S_00B01C_CU_EN(0xffff));
412 }
413
414 if (physical_device->rad_info.chip_class >= VI) {
415 uint32_t vgt_tess_distribution;
416 radeon_set_context_reg(cs, R_028424_CB_DCC_CONTROL,
417 S_028424_OVERWRITE_COMBINER_MRT_SHARING_DISABLE(1) |
418 S_028424_OVERWRITE_COMBINER_WATERMARK(4));
419 if (physical_device->rad_info.family < CHIP_POLARIS10)
420 radeon_set_context_reg(cs, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL, 30);
421 radeon_set_context_reg(cs, R_028C5C_VGT_OUT_DEALLOC_CNTL, 32);
422
423 vgt_tess_distribution = S_028B50_ACCUM_ISOLINE(32) |
424 S_028B50_ACCUM_TRI(11) |
425 S_028B50_ACCUM_QUAD(11) |
426 S_028B50_DONUT_SPLIT(16);
427
428 if (physical_device->rad_info.family == CHIP_FIJI ||
429 physical_device->rad_info.family >= CHIP_POLARIS10)
430 vgt_tess_distribution |= S_028B50_TRAP_SPLIT(3);
431
432 radeon_set_context_reg(cs, R_028B50_VGT_TESS_DISTRIBUTION,
433 vgt_tess_distribution);
434 } else {
435 radeon_set_context_reg(cs, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
436 radeon_set_context_reg(cs, R_028C5C_VGT_OUT_DEALLOC_CNTL, 16);
437 }
438
439 if (physical_device->rad_info.family == CHIP_STONEY)
440 radeon_set_context_reg(cs, R_028C40_PA_SC_SHADER_CONTROL, 0);
441
442 si_emit_compute(physical_device, cs);
443 }
444
445 void si_init_config(struct radv_cmd_buffer *cmd_buffer)
446 {
447 struct radv_physical_device *physical_device = cmd_buffer->device->physical_device;
448
449 si_emit_config(physical_device, cmd_buffer->cs);
450 }
451
452 void
453 cik_create_gfx_config(struct radv_device *device)
454 {
455 struct radeon_winsys_cs *cs = device->ws->cs_create(device->ws, RING_GFX);
456 if (!cs)
457 return;
458
459 si_emit_config(device->physical_device, cs);
460
461 while (cs->cdw & 7) {
462 if (device->physical_device->rad_info.gfx_ib_pad_with_type2)
463 radeon_emit(cs, 0x80000000);
464 else
465 radeon_emit(cs, 0xffff1000);
466 }
467
468 device->gfx_init = device->ws->buffer_create(device->ws,
469 cs->cdw * 4, 4096,
470 RADEON_DOMAIN_GTT,
471 RADEON_FLAG_CPU_ACCESS);
472 if (!device->gfx_init)
473 goto fail;
474
475 void *map = device->ws->buffer_map(device->gfx_init);
476 if (!map) {
477 device->ws->buffer_destroy(device->gfx_init);
478 device->gfx_init = NULL;
479 goto fail;
480 }
481 memcpy(map, cs->buf, cs->cdw * 4);
482
483 device->ws->buffer_unmap(device->gfx_init);
484 device->gfx_init_size_dw = cs->cdw;
485 fail:
486 device->ws->cs_destroy(cs);
487 }
488
489 static void
490 get_viewport_xform(const VkViewport *viewport,
491 float scale[3], float translate[3])
492 {
493 float x = viewport->x;
494 float y = viewport->y;
495 float half_width = 0.5f * viewport->width;
496 float half_height = 0.5f * viewport->height;
497 double n = viewport->minDepth;
498 double f = viewport->maxDepth;
499
500 scale[0] = half_width;
501 translate[0] = half_width + x;
502 scale[1] = half_height;
503 translate[1] = half_height + y;
504
505 scale[2] = (f - n);
506 translate[2] = n;
507 }
508
509 void
510 si_write_viewport(struct radeon_winsys_cs *cs, int first_vp,
511 int count, const VkViewport *viewports)
512 {
513 int i;
514
515 assert(count);
516 radeon_set_context_reg_seq(cs, R_02843C_PA_CL_VPORT_XSCALE +
517 first_vp * 4 * 6, count * 6);
518
519 for (i = 0; i < count; i++) {
520 float scale[3], translate[3];
521
522
523 get_viewport_xform(&viewports[i], scale, translate);
524 radeon_emit(cs, fui(scale[0]));
525 radeon_emit(cs, fui(translate[0]));
526 radeon_emit(cs, fui(scale[1]));
527 radeon_emit(cs, fui(translate[1]));
528 radeon_emit(cs, fui(scale[2]));
529 radeon_emit(cs, fui(translate[2]));
530 }
531
532 radeon_set_context_reg_seq(cs, R_0282D0_PA_SC_VPORT_ZMIN_0 +
533 first_vp * 4 * 2, count * 2);
534 for (i = 0; i < count; i++) {
535 float zmin = MIN2(viewports[i].minDepth, viewports[i].maxDepth);
536 float zmax = MAX2(viewports[i].minDepth, viewports[i].maxDepth);
537 radeon_emit(cs, fui(zmin));
538 radeon_emit(cs, fui(zmax));
539 }
540 }
541
542 static VkRect2D si_scissor_from_viewport(const VkViewport *viewport)
543 {
544 float scale[3], translate[3];
545 VkRect2D rect;
546
547 get_viewport_xform(viewport, scale, translate);
548
549 rect.offset.x = translate[0] - abs(scale[0]);
550 rect.offset.y = translate[1] - abs(scale[1]);
551 rect.extent.width = ceilf(translate[0] + abs(scale[0])) - rect.offset.x;
552 rect.extent.height = ceilf(translate[1] + abs(scale[1])) - rect.offset.y;
553
554 return rect;
555 }
556
557 static VkRect2D si_intersect_scissor(const VkRect2D *a, const VkRect2D *b) {
558 VkRect2D ret;
559 ret.offset.x = MAX2(a->offset.x, b->offset.x);
560 ret.offset.y = MAX2(a->offset.y, b->offset.y);
561 ret.extent.width = MIN2(a->offset.x + a->extent.width,
562 b->offset.x + b->extent.width) - ret.offset.x;
563 ret.extent.height = MIN2(a->offset.y + a->extent.height,
564 b->offset.y + b->extent.height) - ret.offset.y;
565 return ret;
566 }
567
568 void
569 si_write_scissors(struct radeon_winsys_cs *cs, int first,
570 int count, const VkRect2D *scissors,
571 const VkViewport *viewports, bool can_use_guardband)
572 {
573 int i;
574 float scale[3], translate[3], guardband_x = INFINITY, guardband_y = INFINITY;
575 const float max_range = 32767.0f;
576 assert(count);
577
578 radeon_set_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL + first * 4 * 2, count * 2);
579 for (i = 0; i < count; i++) {
580 VkRect2D viewport_scissor = si_scissor_from_viewport(viewports + i);
581 VkRect2D scissor = si_intersect_scissor(&scissors[i], &viewport_scissor);
582
583 get_viewport_xform(viewports + i, scale, translate);
584 scale[0] = abs(scale[0]);
585 scale[1] = abs(scale[1]);
586
587 if (scale[0] < 0.5)
588 scale[0] = 0.5;
589 if (scale[1] < 0.5)
590 scale[1] = 0.5;
591
592 guardband_x = MIN2(guardband_x, (max_range - abs(translate[0])) / scale[0]);
593 guardband_y = MIN2(guardband_y, (max_range - abs(translate[1])) / scale[1]);
594
595 radeon_emit(cs, S_028250_TL_X(scissor.offset.x) |
596 S_028250_TL_Y(scissor.offset.y) |
597 S_028250_WINDOW_OFFSET_DISABLE(1));
598 radeon_emit(cs, S_028254_BR_X(scissor.offset.x + scissor.extent.width) |
599 S_028254_BR_Y(scissor.offset.y + scissor.extent.height));
600 }
601 if (!can_use_guardband) {
602 guardband_x = 1.0;
603 guardband_y = 1.0;
604 }
605
606 radeon_set_context_reg_seq(cs, R_028BE8_PA_CL_GB_VERT_CLIP_ADJ, 4);
607 radeon_emit(cs, fui(guardband_y));
608 radeon_emit(cs, fui(1.0));
609 radeon_emit(cs, fui(guardband_x));
610 radeon_emit(cs, fui(1.0));
611 }
612
613 static inline unsigned
614 radv_prims_for_vertices(struct radv_prim_vertex_count *info, unsigned num)
615 {
616 if (num == 0)
617 return 0;
618
619 if (info->incr == 0)
620 return 0;
621
622 if (num < info->min)
623 return 0;
624
625 return 1 + ((num - info->min) / info->incr);
626 }
627
628 uint32_t
629 si_get_ia_multi_vgt_param(struct radv_cmd_buffer *cmd_buffer,
630 bool instanced_draw, bool indirect_draw,
631 uint32_t draw_vertex_count)
632 {
633 enum chip_class chip_class = cmd_buffer->device->physical_device->rad_info.chip_class;
634 enum radeon_family family = cmd_buffer->device->physical_device->rad_info.family;
635 struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info;
636 unsigned prim = cmd_buffer->state.pipeline->graphics.prim;
637 unsigned primgroup_size = 128; /* recommended without a GS */
638 unsigned max_primgroup_in_wave = 2;
639 /* SWITCH_ON_EOP(0) is always preferable. */
640 bool wd_switch_on_eop = false;
641 bool ia_switch_on_eop = false;
642 bool ia_switch_on_eoi = false;
643 bool partial_vs_wave = false;
644 bool partial_es_wave = false;
645 uint32_t num_prims = radv_prims_for_vertices(&cmd_buffer->state.pipeline->graphics.prim_vertex_count, draw_vertex_count);
646 bool multi_instances_smaller_than_primgroup;
647
648 if (radv_pipeline_has_tess(cmd_buffer->state.pipeline))
649 primgroup_size = cmd_buffer->state.pipeline->graphics.tess.num_patches;
650 else if (radv_pipeline_has_gs(cmd_buffer->state.pipeline))
651 primgroup_size = 64; /* recommended with a GS */
652
653 multi_instances_smaller_than_primgroup = indirect_draw || (instanced_draw &&
654 num_prims < primgroup_size);
655 if (radv_pipeline_has_tess(cmd_buffer->state.pipeline)) {
656 /* SWITCH_ON_EOI must be set if PrimID is used. */
657 if (cmd_buffer->state.pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.tcs.uses_prim_id ||
658 cmd_buffer->state.pipeline->shaders[MESA_SHADER_TESS_EVAL]->info.tes.uses_prim_id)
659 ia_switch_on_eoi = true;
660
661 /* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
662 if ((family == CHIP_TAHITI ||
663 family == CHIP_PITCAIRN ||
664 family == CHIP_BONAIRE) &&
665 radv_pipeline_has_gs(cmd_buffer->state.pipeline))
666 partial_vs_wave = true;
667
668 /* Needed for 028B6C_DISTRIBUTION_MODE != 0 */
669 if (cmd_buffer->device->has_distributed_tess) {
670 if (radv_pipeline_has_gs(cmd_buffer->state.pipeline)) {
671 partial_es_wave = true;
672
673 if (family == CHIP_TONGA ||
674 family == CHIP_FIJI ||
675 family == CHIP_POLARIS10 ||
676 family == CHIP_POLARIS11 ||
677 family == CHIP_POLARIS12)
678 partial_vs_wave = true;
679 } else {
680 partial_vs_wave = true;
681 }
682 }
683 }
684 /* TODO linestipple */
685
686 if (chip_class >= CIK) {
687 /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
688 * 4 shader engines. Set 1 to pass the assertion below.
689 * The other cases are hardware requirements. */
690 if (info->max_se < 4 ||
691 prim == V_008958_DI_PT_POLYGON ||
692 prim == V_008958_DI_PT_LINELOOP ||
693 prim == V_008958_DI_PT_TRIFAN ||
694 prim == V_008958_DI_PT_TRISTRIP_ADJ ||
695 (cmd_buffer->state.pipeline->graphics.prim_restart_enable &&
696 (family < CHIP_POLARIS10 ||
697 (prim != V_008958_DI_PT_POINTLIST &&
698 prim != V_008958_DI_PT_LINESTRIP &&
699 prim != V_008958_DI_PT_TRISTRIP))))
700 wd_switch_on_eop = true;
701
702 /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
703 * We don't know that for indirect drawing, so treat it as
704 * always problematic. */
705 if (family == CHIP_HAWAII &&
706 (instanced_draw || indirect_draw))
707 wd_switch_on_eop = true;
708
709 /* Performance recommendation for 4 SE Gfx7-8 parts if
710 * instances are smaller than a primgroup.
711 * Assume indirect draws always use small instances.
712 * This is needed for good VS wave utilization.
713 */
714 if (chip_class <= VI &&
715 info->max_se == 4 &&
716 multi_instances_smaller_than_primgroup)
717 wd_switch_on_eop = true;
718
719 /* Required on CIK and later. */
720 if (info->max_se > 2 && !wd_switch_on_eop)
721 ia_switch_on_eoi = true;
722
723 /* Required by Hawaii and, for some special cases, by VI. */
724 if (ia_switch_on_eoi &&
725 (family == CHIP_HAWAII ||
726 (chip_class == VI &&
727 (radv_pipeline_has_gs(cmd_buffer->state.pipeline) || max_primgroup_in_wave != 2))))
728 partial_vs_wave = true;
729
730 /* Instancing bug on Bonaire. */
731 if (family == CHIP_BONAIRE && ia_switch_on_eoi &&
732 (instanced_draw || indirect_draw))
733 partial_vs_wave = true;
734
735 /* If the WD switch is false, the IA switch must be false too. */
736 assert(wd_switch_on_eop || !ia_switch_on_eop);
737 }
738 /* If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
739 if (ia_switch_on_eoi)
740 partial_es_wave = true;
741
742 if (radv_pipeline_has_gs(cmd_buffer->state.pipeline)) {
743 /* GS requirement. */
744 if (SI_GS_PER_ES / primgroup_size >= cmd_buffer->device->gs_table_depth - 3)
745 partial_es_wave = true;
746
747 /* Hw bug with single-primitive instances and SWITCH_ON_EOI
748 * on multi-SE chips. */
749 if (info->max_se >= 2 && ia_switch_on_eoi &&
750 ((instanced_draw || indirect_draw) &&
751 num_prims <= 1))
752 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VGT_FLUSH;
753 }
754
755 return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) |
756 S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi) |
757 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) |
758 S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave) |
759 S_028AA8_PRIMGROUP_SIZE(primgroup_size - 1) |
760 S_028AA8_WD_SWITCH_ON_EOP(chip_class >= CIK ? wd_switch_on_eop : 0) |
761 S_028AA8_MAX_PRIMGRP_IN_WAVE(chip_class >= VI ?
762 max_primgroup_in_wave : 0);
763
764 }
765
766 void si_cs_emit_write_event_eop(struct radeon_winsys_cs *cs,
767 enum chip_class chip_class,
768 bool is_mec,
769 unsigned event, unsigned event_flags,
770 unsigned data_sel,
771 uint64_t va,
772 uint32_t old_fence,
773 uint32_t new_fence)
774 {
775 unsigned op = EVENT_TYPE(event) |
776 EVENT_INDEX(5) |
777 event_flags;
778
779 if (is_mec) {
780 radeon_emit(cs, PKT3(PKT3_RELEASE_MEM, 5, 0));
781 radeon_emit(cs, op);
782 radeon_emit(cs, EOP_DATA_SEL(data_sel));
783 radeon_emit(cs, va); /* address lo */
784 radeon_emit(cs, va >> 32); /* address hi */
785 radeon_emit(cs, new_fence); /* immediate data lo */
786 radeon_emit(cs, 0); /* immediate data hi */
787 } else {
788 if (chip_class == CIK ||
789 chip_class == VI) {
790 /* Two EOP events are required to make all engines go idle
791 * (and optional cache flushes executed) before the timestamp
792 * is written.
793 */
794 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
795 radeon_emit(cs, op);
796 radeon_emit(cs, va);
797 radeon_emit(cs, ((va >> 32) & 0xffff) | EOP_DATA_SEL(data_sel));
798 radeon_emit(cs, old_fence); /* immediate data */
799 radeon_emit(cs, 0); /* unused */
800 }
801
802 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
803 radeon_emit(cs, op);
804 radeon_emit(cs, va);
805 radeon_emit(cs, ((va >> 32) & 0xffff) | EOP_DATA_SEL(data_sel));
806 radeon_emit(cs, new_fence); /* immediate data */
807 radeon_emit(cs, 0); /* unused */
808 }
809 }
810
811 void
812 si_emit_wait_fence(struct radeon_winsys_cs *cs,
813 uint64_t va, uint32_t ref,
814 uint32_t mask)
815 {
816 radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
817 radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1));
818 radeon_emit(cs, va);
819 radeon_emit(cs, va >> 32);
820 radeon_emit(cs, ref); /* reference value */
821 radeon_emit(cs, mask); /* mask */
822 radeon_emit(cs, 4); /* poll interval */
823 }
824
825 static void
826 si_emit_acquire_mem(struct radeon_winsys_cs *cs,
827 bool is_mec,
828 unsigned cp_coher_cntl)
829 {
830 if (is_mec) {
831 radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, 0) |
832 PKT3_SHADER_TYPE_S(1));
833 radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
834 radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
835 radeon_emit(cs, 0xff); /* CP_COHER_SIZE_HI */
836 radeon_emit(cs, 0); /* CP_COHER_BASE */
837 radeon_emit(cs, 0); /* CP_COHER_BASE_HI */
838 radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
839 } else {
840 /* ACQUIRE_MEM is only required on a compute ring. */
841 radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, 0));
842 radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
843 radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
844 radeon_emit(cs, 0); /* CP_COHER_BASE */
845 radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
846 }
847 }
848
849 void
850 si_cs_emit_cache_flush(struct radeon_winsys_cs *cs,
851 enum chip_class chip_class,
852 bool is_mec,
853 enum radv_cmd_flush_bits flush_bits)
854 {
855 unsigned cp_coher_cntl = 0;
856
857 if (flush_bits & RADV_CMD_FLAG_INV_ICACHE)
858 cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
859 if (flush_bits & RADV_CMD_FLAG_INV_SMEM_L1)
860 cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
861
862 if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_CB) {
863 cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) |
864 S_0085F0_CB0_DEST_BASE_ENA(1) |
865 S_0085F0_CB1_DEST_BASE_ENA(1) |
866 S_0085F0_CB2_DEST_BASE_ENA(1) |
867 S_0085F0_CB3_DEST_BASE_ENA(1) |
868 S_0085F0_CB4_DEST_BASE_ENA(1) |
869 S_0085F0_CB5_DEST_BASE_ENA(1) |
870 S_0085F0_CB6_DEST_BASE_ENA(1) |
871 S_0085F0_CB7_DEST_BASE_ENA(1);
872
873 /* Necessary for DCC */
874 if (chip_class >= VI) {
875 si_cs_emit_write_event_eop(cs,
876 chip_class,
877 is_mec,
878 V_028A90_FLUSH_AND_INV_CB_DATA_TS,
879 0, 0, 0, 0, 0);
880 }
881 }
882
883 if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_DB) {
884 cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) |
885 S_0085F0_DB_DEST_BASE_ENA(1);
886 }
887
888 if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_CB_META) {
889 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
890 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
891 }
892
893 if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_DB_META) {
894 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
895 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
896 }
897
898 if (!(flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
899 RADV_CMD_FLAG_FLUSH_AND_INV_DB))) {
900 if (flush_bits & RADV_CMD_FLAG_PS_PARTIAL_FLUSH) {
901 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
902 radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
903 } else if (flush_bits & RADV_CMD_FLAG_VS_PARTIAL_FLUSH) {
904 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
905 radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
906 }
907 }
908
909 if (flush_bits & RADV_CMD_FLAG_CS_PARTIAL_FLUSH) {
910 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
911 radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
912 }
913
914 /* VGT state sync */
915 if (flush_bits & RADV_CMD_FLAG_VGT_FLUSH) {
916 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
917 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
918 }
919
920 /* Make sure ME is idle (it executes most packets) before continuing.
921 * This prevents read-after-write hazards between PFP and ME.
922 */
923 if ((cp_coher_cntl || (flush_bits & RADV_CMD_FLAG_CS_PARTIAL_FLUSH)) &&
924 !is_mec) {
925 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
926 radeon_emit(cs, 0);
927 }
928
929 if ((flush_bits & RADV_CMD_FLAG_INV_GLOBAL_L2) ||
930 (chip_class <= CIK && (flush_bits & RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2))) {
931 cp_coher_cntl |= S_0085F0_TC_ACTION_ENA(1);
932 if (chip_class >= VI)
933 cp_coher_cntl |= S_0301F0_TC_WB_ACTION_ENA(1);
934 } else if(flush_bits & RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2) {
935 cp_coher_cntl |= S_0301F0_TC_WB_ACTION_ENA(1) |
936 S_0301F0_TC_NC_ACTION_ENA(1);
937
938 /* L2 writeback doesn't combine with L1 invalidate */
939 si_emit_acquire_mem(cs, is_mec, cp_coher_cntl);
940
941 cp_coher_cntl = 0;
942 }
943
944 if (flush_bits & RADV_CMD_FLAG_INV_VMEM_L1)
945 cp_coher_cntl |= S_0085F0_TCL1_ACTION_ENA(1);
946
947 /* When one of the DEST_BASE flags is set, SURFACE_SYNC waits for idle.
948 * Therefore, it should be last. Done in PFP.
949 */
950 if (cp_coher_cntl)
951 si_emit_acquire_mem(cs, is_mec, cp_coher_cntl);
952 }
953
954 void
955 si_emit_cache_flush(struct radv_cmd_buffer *cmd_buffer)
956 {
957 bool is_compute = cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE;
958
959 if (is_compute)
960 cmd_buffer->state.flush_bits &= ~(RADV_CMD_FLAG_FLUSH_AND_INV_CB |
961 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META |
962 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
963 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META |
964 RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
965 RADV_CMD_FLAG_VS_PARTIAL_FLUSH |
966 RADV_CMD_FLAG_VGT_FLUSH);
967
968 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 128);
969
970 si_cs_emit_cache_flush(cmd_buffer->cs,
971 cmd_buffer->device->physical_device->rad_info.chip_class,
972 radv_cmd_buffer_uses_mec(cmd_buffer),
973 cmd_buffer->state.flush_bits);
974
975
976 if (cmd_buffer->state.flush_bits)
977 radv_cmd_buffer_trace_emit(cmd_buffer);
978 cmd_buffer->state.flush_bits = 0;
979 }
980
981
982 /* Set this if you want the 3D engine to wait until CP DMA is done.
983 * It should be set on the last CP DMA packet. */
984 #define CP_DMA_SYNC (1 << 0)
985
986 /* Set this if the source data was used as a destination in a previous CP DMA
987 * packet. It's for preventing a read-after-write (RAW) hazard between two
988 * CP DMA packets. */
989 #define CP_DMA_RAW_WAIT (1 << 1)
990 #define CP_DMA_USE_L2 (1 << 2)
991 #define CP_DMA_CLEAR (1 << 3)
992
993 /* Alignment for optimal performance. */
994 #define SI_CPDMA_ALIGNMENT 32
995
996 /* The max number of bytes that can be copied per packet. */
997 static inline unsigned cp_dma_max_byte_count(struct radv_cmd_buffer *cmd_buffer)
998 {
999 unsigned max = cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9 ?
1000 S_414_BYTE_COUNT_GFX9(~0u) :
1001 S_414_BYTE_COUNT_GFX6(~0u);
1002
1003 /* make it aligned for optimal performance */
1004 return max & ~(SI_CPDMA_ALIGNMENT - 1);
1005 }
1006
1007 /* Emit a CP DMA packet to do a copy from one buffer to another, or to clear
1008 * a buffer. The size must fit in bits [20:0]. If CP_DMA_CLEAR is set, src_va is a 32-bit
1009 * clear value.
1010 */
1011 static void si_emit_cp_dma(struct radv_cmd_buffer *cmd_buffer,
1012 uint64_t dst_va, uint64_t src_va,
1013 unsigned size, unsigned flags)
1014 {
1015 struct radeon_winsys_cs *cs = cmd_buffer->cs;
1016 uint32_t header = 0, command = 0;
1017
1018 assert(size);
1019 assert(size <= cp_dma_max_byte_count(cmd_buffer));
1020
1021 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 9);
1022 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9)
1023 command |= S_414_BYTE_COUNT_GFX9(size);
1024 else
1025 command |= S_414_BYTE_COUNT_GFX6(size);
1026
1027 /* Sync flags. */
1028 if (flags & CP_DMA_SYNC)
1029 header |= S_411_CP_SYNC(1);
1030 else {
1031 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9)
1032 command |= S_414_DISABLE_WR_CONFIRM_GFX9(1);
1033 else
1034 command |= S_414_DISABLE_WR_CONFIRM_GFX6(1);
1035 }
1036
1037 if (flags & CP_DMA_RAW_WAIT)
1038 command |= S_414_RAW_WAIT(1);
1039
1040 /* Src and dst flags. */
1041 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9 &&
1042 !(flags & CP_DMA_CLEAR) &&
1043 src_va == dst_va)
1044 header |= S_411_DSL_SEL(V_411_NOWHERE); /* prefetch only */
1045 else if (flags & CP_DMA_USE_L2)
1046 header |= S_411_DSL_SEL(V_411_DST_ADDR_TC_L2);
1047
1048 if (flags & CP_DMA_CLEAR)
1049 header |= S_411_SRC_SEL(V_411_DATA);
1050 else if (flags & CP_DMA_USE_L2)
1051 header |= S_411_SRC_SEL(V_411_SRC_ADDR_TC_L2);
1052
1053 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
1054 radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
1055 radeon_emit(cs, header);
1056 radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */
1057 radeon_emit(cs, src_va >> 32); /* SRC_ADDR_HI [31:0] */
1058 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
1059 radeon_emit(cs, dst_va >> 32); /* DST_ADDR_HI [31:0] */
1060 radeon_emit(cs, command);
1061 } else {
1062 header |= S_411_SRC_ADDR_HI(src_va >> 32);
1063 radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
1064 radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */
1065 radeon_emit(cs, header); /* SRC_ADDR_HI [15:0] + flags. */
1066 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
1067 radeon_emit(cs, (dst_va >> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
1068 radeon_emit(cs, command);
1069 }
1070
1071 /* CP DMA is executed in ME, but index buffers are read by PFP.
1072 * This ensures that ME (CP DMA) is idle before PFP starts fetching
1073 * indices. If we wanted to execute CP DMA in PFP, this packet
1074 * should precede it.
1075 */
1076 if ((flags & CP_DMA_SYNC) && cmd_buffer->queue_family_index == RADV_QUEUE_GENERAL) {
1077 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
1078 radeon_emit(cs, 0);
1079 }
1080
1081 radv_cmd_buffer_trace_emit(cmd_buffer);
1082 }
1083
1084 void si_cp_dma_prefetch(struct radv_cmd_buffer *cmd_buffer, uint64_t va,
1085 unsigned size)
1086 {
1087 uint64_t aligned_va = va & ~(SI_CPDMA_ALIGNMENT - 1);
1088 uint64_t aligned_size = ((va + size + SI_CPDMA_ALIGNMENT -1) & ~(SI_CPDMA_ALIGNMENT - 1)) - aligned_va;
1089
1090 si_emit_cp_dma(cmd_buffer, aligned_va, aligned_va,
1091 aligned_size, CP_DMA_USE_L2);
1092 }
1093
1094 static void si_cp_dma_prepare(struct radv_cmd_buffer *cmd_buffer, uint64_t byte_count,
1095 uint64_t remaining_size, unsigned *flags)
1096 {
1097
1098 /* Flush the caches for the first copy only.
1099 * Also wait for the previous CP DMA operations.
1100 */
1101 if (cmd_buffer->state.flush_bits) {
1102 si_emit_cache_flush(cmd_buffer);
1103 *flags |= CP_DMA_RAW_WAIT;
1104 }
1105
1106 /* Do the synchronization after the last dma, so that all data
1107 * is written to memory.
1108 */
1109 if (byte_count == remaining_size)
1110 *flags |= CP_DMA_SYNC;
1111 }
1112
1113 static void si_cp_dma_realign_engine(struct radv_cmd_buffer *cmd_buffer, unsigned size)
1114 {
1115 uint64_t va;
1116 uint32_t offset;
1117 unsigned dma_flags = 0;
1118 unsigned buf_size = SI_CPDMA_ALIGNMENT * 2;
1119 void *ptr;
1120
1121 assert(size < SI_CPDMA_ALIGNMENT);
1122
1123 radv_cmd_buffer_upload_alloc(cmd_buffer, buf_size, SI_CPDMA_ALIGNMENT, &offset, &ptr);
1124
1125 va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
1126 va += offset;
1127
1128 si_cp_dma_prepare(cmd_buffer, size, size, &dma_flags);
1129
1130 si_emit_cp_dma(cmd_buffer, va, va + SI_CPDMA_ALIGNMENT, size,
1131 dma_flags);
1132 }
1133
1134 void si_cp_dma_buffer_copy(struct radv_cmd_buffer *cmd_buffer,
1135 uint64_t src_va, uint64_t dest_va,
1136 uint64_t size)
1137 {
1138 uint64_t main_src_va, main_dest_va;
1139 uint64_t skipped_size = 0, realign_size = 0;
1140
1141
1142 if (cmd_buffer->device->physical_device->rad_info.family <= CHIP_CARRIZO ||
1143 cmd_buffer->device->physical_device->rad_info.family == CHIP_STONEY) {
1144 /* If the size is not aligned, we must add a dummy copy at the end
1145 * just to align the internal counter. Otherwise, the DMA engine
1146 * would slow down by an order of magnitude for following copies.
1147 */
1148 if (size % SI_CPDMA_ALIGNMENT)
1149 realign_size = SI_CPDMA_ALIGNMENT - (size % SI_CPDMA_ALIGNMENT);
1150
1151 /* If the copy begins unaligned, we must start copying from the next
1152 * aligned block and the skipped part should be copied after everything
1153 * else has been copied. Only the src alignment matters, not dst.
1154 */
1155 if (src_va % SI_CPDMA_ALIGNMENT) {
1156 skipped_size = SI_CPDMA_ALIGNMENT - (src_va % SI_CPDMA_ALIGNMENT);
1157 /* The main part will be skipped if the size is too small. */
1158 skipped_size = MIN2(skipped_size, size);
1159 size -= skipped_size;
1160 }
1161 }
1162 main_src_va = src_va + skipped_size;
1163 main_dest_va = dest_va + skipped_size;
1164
1165 while (size) {
1166 unsigned dma_flags = 0;
1167 unsigned byte_count = MIN2(size, cp_dma_max_byte_count(cmd_buffer));
1168
1169 si_cp_dma_prepare(cmd_buffer, byte_count,
1170 size + skipped_size + realign_size,
1171 &dma_flags);
1172
1173 si_emit_cp_dma(cmd_buffer, main_dest_va, main_src_va,
1174 byte_count, dma_flags);
1175
1176 size -= byte_count;
1177 main_src_va += byte_count;
1178 main_dest_va += byte_count;
1179 }
1180
1181 if (skipped_size) {
1182 unsigned dma_flags = 0;
1183
1184 si_cp_dma_prepare(cmd_buffer, skipped_size,
1185 size + skipped_size + realign_size,
1186 &dma_flags);
1187
1188 si_emit_cp_dma(cmd_buffer, dest_va, src_va,
1189 skipped_size, dma_flags);
1190 }
1191 if (realign_size)
1192 si_cp_dma_realign_engine(cmd_buffer, realign_size);
1193 }
1194
1195 void si_cp_dma_clear_buffer(struct radv_cmd_buffer *cmd_buffer, uint64_t va,
1196 uint64_t size, unsigned value)
1197 {
1198
1199 if (!size)
1200 return;
1201
1202 assert(va % 4 == 0 && size % 4 == 0);
1203
1204 while (size) {
1205 unsigned byte_count = MIN2(size, cp_dma_max_byte_count(cmd_buffer));
1206 unsigned dma_flags = CP_DMA_CLEAR;
1207
1208 si_cp_dma_prepare(cmd_buffer, byte_count, size, &dma_flags);
1209
1210 /* Emit the clear packet. */
1211 si_emit_cp_dma(cmd_buffer, va, value, byte_count,
1212 dma_flags);
1213
1214 size -= byte_count;
1215 va += byte_count;
1216 }
1217 }
1218
1219 /* For MSAA sample positions. */
1220 #define FILL_SREG(s0x, s0y, s1x, s1y, s2x, s2y, s3x, s3y) \
1221 (((s0x) & 0xf) | (((unsigned)(s0y) & 0xf) << 4) | \
1222 (((unsigned)(s1x) & 0xf) << 8) | (((unsigned)(s1y) & 0xf) << 12) | \
1223 (((unsigned)(s2x) & 0xf) << 16) | (((unsigned)(s2y) & 0xf) << 20) | \
1224 (((unsigned)(s3x) & 0xf) << 24) | (((unsigned)(s3y) & 0xf) << 28))
1225
1226
1227 /* 2xMSAA
1228 * There are two locations (4, 4), (-4, -4). */
1229 const uint32_t eg_sample_locs_2x[4] = {
1230 FILL_SREG(4, 4, -4, -4, 4, 4, -4, -4),
1231 FILL_SREG(4, 4, -4, -4, 4, 4, -4, -4),
1232 FILL_SREG(4, 4, -4, -4, 4, 4, -4, -4),
1233 FILL_SREG(4, 4, -4, -4, 4, 4, -4, -4),
1234 };
1235 const unsigned eg_max_dist_2x = 4;
1236 /* 4xMSAA
1237 * There are 4 locations: (-2, 6), (6, -2), (-6, 2), (2, 6). */
1238 const uint32_t eg_sample_locs_4x[4] = {
1239 FILL_SREG(-2, -6, 6, -2, -6, 2, 2, 6),
1240 FILL_SREG(-2, -6, 6, -2, -6, 2, 2, 6),
1241 FILL_SREG(-2, -6, 6, -2, -6, 2, 2, 6),
1242 FILL_SREG(-2, -6, 6, -2, -6, 2, 2, 6),
1243 };
1244 const unsigned eg_max_dist_4x = 6;
1245
1246 /* Cayman 8xMSAA */
1247 static const uint32_t cm_sample_locs_8x[] = {
1248 FILL_SREG( 1, -3, -1, 3, 5, 1, -3, -5),
1249 FILL_SREG( 1, -3, -1, 3, 5, 1, -3, -5),
1250 FILL_SREG( 1, -3, -1, 3, 5, 1, -3, -5),
1251 FILL_SREG( 1, -3, -1, 3, 5, 1, -3, -5),
1252 FILL_SREG(-5, 5, -7, -1, 3, 7, 7, -7),
1253 FILL_SREG(-5, 5, -7, -1, 3, 7, 7, -7),
1254 FILL_SREG(-5, 5, -7, -1, 3, 7, 7, -7),
1255 FILL_SREG(-5, 5, -7, -1, 3, 7, 7, -7),
1256 };
1257 static const unsigned cm_max_dist_8x = 8;
1258 /* Cayman 16xMSAA */
1259 static const uint32_t cm_sample_locs_16x[] = {
1260 FILL_SREG( 1, 1, -1, -3, -3, 2, 4, -1),
1261 FILL_SREG( 1, 1, -1, -3, -3, 2, 4, -1),
1262 FILL_SREG( 1, 1, -1, -3, -3, 2, 4, -1),
1263 FILL_SREG( 1, 1, -1, -3, -3, 2, 4, -1),
1264 FILL_SREG(-5, -2, 2, 5, 5, 3, 3, -5),
1265 FILL_SREG(-5, -2, 2, 5, 5, 3, 3, -5),
1266 FILL_SREG(-5, -2, 2, 5, 5, 3, 3, -5),
1267 FILL_SREG(-5, -2, 2, 5, 5, 3, 3, -5),
1268 FILL_SREG(-2, 6, 0, -7, -4, -6, -6, 4),
1269 FILL_SREG(-2, 6, 0, -7, -4, -6, -6, 4),
1270 FILL_SREG(-2, 6, 0, -7, -4, -6, -6, 4),
1271 FILL_SREG(-2, 6, 0, -7, -4, -6, -6, 4),
1272 FILL_SREG(-8, 0, 7, -4, 6, 7, -7, -8),
1273 FILL_SREG(-8, 0, 7, -4, 6, 7, -7, -8),
1274 FILL_SREG(-8, 0, 7, -4, 6, 7, -7, -8),
1275 FILL_SREG(-8, 0, 7, -4, 6, 7, -7, -8),
1276 };
1277 static const unsigned cm_max_dist_16x = 8;
1278
1279 unsigned radv_cayman_get_maxdist(int log_samples)
1280 {
1281 unsigned max_dist[] = {
1282 0,
1283 eg_max_dist_2x,
1284 eg_max_dist_4x,
1285 cm_max_dist_8x,
1286 cm_max_dist_16x
1287 };
1288 return max_dist[log_samples];
1289 }
1290
1291 void radv_cayman_emit_msaa_sample_locs(struct radeon_winsys_cs *cs, int nr_samples)
1292 {
1293 switch (nr_samples) {
1294 default:
1295 case 1:
1296 radeon_set_context_reg(cs, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, 0);
1297 radeon_set_context_reg(cs, CM_R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, 0);
1298 radeon_set_context_reg(cs, CM_R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, 0);
1299 radeon_set_context_reg(cs, CM_R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, 0);
1300 break;
1301 case 2:
1302 radeon_set_context_reg(cs, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, eg_sample_locs_2x[0]);
1303 radeon_set_context_reg(cs, CM_R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, eg_sample_locs_2x[1]);
1304 radeon_set_context_reg(cs, CM_R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, eg_sample_locs_2x[2]);
1305 radeon_set_context_reg(cs, CM_R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, eg_sample_locs_2x[3]);
1306 break;
1307 case 4:
1308 radeon_set_context_reg(cs, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, eg_sample_locs_4x[0]);
1309 radeon_set_context_reg(cs, CM_R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, eg_sample_locs_4x[1]);
1310 radeon_set_context_reg(cs, CM_R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, eg_sample_locs_4x[2]);
1311 radeon_set_context_reg(cs, CM_R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, eg_sample_locs_4x[3]);
1312 break;
1313 case 8:
1314 radeon_set_context_reg_seq(cs, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, 14);
1315 radeon_emit(cs, cm_sample_locs_8x[0]);
1316 radeon_emit(cs, cm_sample_locs_8x[4]);
1317 radeon_emit(cs, 0);
1318 radeon_emit(cs, 0);
1319 radeon_emit(cs, cm_sample_locs_8x[1]);
1320 radeon_emit(cs, cm_sample_locs_8x[5]);
1321 radeon_emit(cs, 0);
1322 radeon_emit(cs, 0);
1323 radeon_emit(cs, cm_sample_locs_8x[2]);
1324 radeon_emit(cs, cm_sample_locs_8x[6]);
1325 radeon_emit(cs, 0);
1326 radeon_emit(cs, 0);
1327 radeon_emit(cs, cm_sample_locs_8x[3]);
1328 radeon_emit(cs, cm_sample_locs_8x[7]);
1329 break;
1330 case 16:
1331 radeon_set_context_reg_seq(cs, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, 16);
1332 radeon_emit(cs, cm_sample_locs_16x[0]);
1333 radeon_emit(cs, cm_sample_locs_16x[4]);
1334 radeon_emit(cs, cm_sample_locs_16x[8]);
1335 radeon_emit(cs, cm_sample_locs_16x[12]);
1336 radeon_emit(cs, cm_sample_locs_16x[1]);
1337 radeon_emit(cs, cm_sample_locs_16x[5]);
1338 radeon_emit(cs, cm_sample_locs_16x[9]);
1339 radeon_emit(cs, cm_sample_locs_16x[13]);
1340 radeon_emit(cs, cm_sample_locs_16x[2]);
1341 radeon_emit(cs, cm_sample_locs_16x[6]);
1342 radeon_emit(cs, cm_sample_locs_16x[10]);
1343 radeon_emit(cs, cm_sample_locs_16x[14]);
1344 radeon_emit(cs, cm_sample_locs_16x[3]);
1345 radeon_emit(cs, cm_sample_locs_16x[7]);
1346 radeon_emit(cs, cm_sample_locs_16x[11]);
1347 radeon_emit(cs, cm_sample_locs_16x[15]);
1348 break;
1349 }
1350 }
1351
1352 static void radv_cayman_get_sample_position(struct radv_device *device,
1353 unsigned sample_count,
1354 unsigned sample_index, float *out_value)
1355 {
1356 int offset, index;
1357 struct {
1358 int idx:4;
1359 } val;
1360 switch (sample_count) {
1361 case 1:
1362 default:
1363 out_value[0] = out_value[1] = 0.5;
1364 break;
1365 case 2:
1366 offset = 4 * (sample_index * 2);
1367 val.idx = (eg_sample_locs_2x[0] >> offset) & 0xf;
1368 out_value[0] = (float)(val.idx + 8) / 16.0f;
1369 val.idx = (eg_sample_locs_2x[0] >> (offset + 4)) & 0xf;
1370 out_value[1] = (float)(val.idx + 8) / 16.0f;
1371 break;
1372 case 4:
1373 offset = 4 * (sample_index * 2);
1374 val.idx = (eg_sample_locs_4x[0] >> offset) & 0xf;
1375 out_value[0] = (float)(val.idx + 8) / 16.0f;
1376 val.idx = (eg_sample_locs_4x[0] >> (offset + 4)) & 0xf;
1377 out_value[1] = (float)(val.idx + 8) / 16.0f;
1378 break;
1379 case 8:
1380 offset = 4 * (sample_index % 4 * 2);
1381 index = (sample_index / 4) * 4;
1382 val.idx = (cm_sample_locs_8x[index] >> offset) & 0xf;
1383 out_value[0] = (float)(val.idx + 8) / 16.0f;
1384 val.idx = (cm_sample_locs_8x[index] >> (offset + 4)) & 0xf;
1385 out_value[1] = (float)(val.idx + 8) / 16.0f;
1386 break;
1387 case 16:
1388 offset = 4 * (sample_index % 4 * 2);
1389 index = (sample_index / 4) * 4;
1390 val.idx = (cm_sample_locs_16x[index] >> offset) & 0xf;
1391 out_value[0] = (float)(val.idx + 8) / 16.0f;
1392 val.idx = (cm_sample_locs_16x[index] >> (offset + 4)) & 0xf;
1393 out_value[1] = (float)(val.idx + 8) / 16.0f;
1394 break;
1395 }
1396 }
1397
1398 void radv_device_init_msaa(struct radv_device *device)
1399 {
1400 int i;
1401 radv_cayman_get_sample_position(device, 1, 0, device->sample_locations_1x[0]);
1402
1403 for (i = 0; i < 2; i++)
1404 radv_cayman_get_sample_position(device, 2, i, device->sample_locations_2x[i]);
1405 for (i = 0; i < 4; i++)
1406 radv_cayman_get_sample_position(device, 4, i, device->sample_locations_4x[i]);
1407 for (i = 0; i < 8; i++)
1408 radv_cayman_get_sample_position(device, 8, i, device->sample_locations_8x[i]);
1409 for (i = 0; i < 16; i++)
1410 radv_cayman_get_sample_position(device, 16, i, device->sample_locations_16x[i]);
1411 }