radv: add support for Raven2
[mesa.git] / src / amd / vulkan / si_cmd_buffer.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based on si_state.c
6 * Copyright © 2015 Advanced Micro Devices, Inc.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 /* command buffer handling for SI */
29
30 #include "radv_private.h"
31 #include "radv_shader.h"
32 #include "radv_cs.h"
33 #include "sid.h"
34 #include "gfx9d.h"
35 #include "radv_util.h"
36 #include "main/macros.h"
37
38 static void
39 si_write_harvested_raster_configs(struct radv_physical_device *physical_device,
40 struct radeon_cmdbuf *cs,
41 unsigned raster_config,
42 unsigned raster_config_1)
43 {
44 unsigned num_se = MAX2(physical_device->rad_info.max_se, 1);
45 unsigned raster_config_se[4];
46 unsigned se;
47
48 ac_get_harvested_configs(&physical_device->rad_info,
49 raster_config,
50 &raster_config_1,
51 raster_config_se);
52
53 for (se = 0; se < num_se; se++) {
54 /* GRBM_GFX_INDEX has a different offset on SI and CI+ */
55 if (physical_device->rad_info.chip_class < CIK)
56 radeon_set_config_reg(cs, R_00802C_GRBM_GFX_INDEX,
57 S_00802C_SE_INDEX(se) |
58 S_00802C_SH_BROADCAST_WRITES(1) |
59 S_00802C_INSTANCE_BROADCAST_WRITES(1));
60 else
61 radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX,
62 S_030800_SE_INDEX(se) | S_030800_SH_BROADCAST_WRITES(1) |
63 S_030800_INSTANCE_BROADCAST_WRITES(1));
64 radeon_set_context_reg(cs, R_028350_PA_SC_RASTER_CONFIG, raster_config_se[se]);
65 }
66
67 /* GRBM_GFX_INDEX has a different offset on SI and CI+ */
68 if (physical_device->rad_info.chip_class < CIK)
69 radeon_set_config_reg(cs, R_00802C_GRBM_GFX_INDEX,
70 S_00802C_SE_BROADCAST_WRITES(1) |
71 S_00802C_SH_BROADCAST_WRITES(1) |
72 S_00802C_INSTANCE_BROADCAST_WRITES(1));
73 else
74 radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX,
75 S_030800_SE_BROADCAST_WRITES(1) | S_030800_SH_BROADCAST_WRITES(1) |
76 S_030800_INSTANCE_BROADCAST_WRITES(1));
77
78 if (physical_device->rad_info.chip_class >= CIK)
79 radeon_set_context_reg(cs, R_028354_PA_SC_RASTER_CONFIG_1, raster_config_1);
80 }
81
82 void
83 si_emit_compute(struct radv_physical_device *physical_device,
84 struct radeon_cmdbuf *cs)
85 {
86 radeon_set_sh_reg_seq(cs, R_00B810_COMPUTE_START_X, 3);
87 radeon_emit(cs, 0);
88 radeon_emit(cs, 0);
89 radeon_emit(cs, 0);
90
91 radeon_set_sh_reg_seq(cs, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0, 2);
92 /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1 */
93 radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
94 radeon_emit(cs, S_00B85C_SH0_CU_EN(0xffff) | S_00B85C_SH1_CU_EN(0xffff));
95
96 if (physical_device->rad_info.chip_class >= CIK) {
97 /* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */
98 radeon_set_sh_reg_seq(cs,
99 R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2, 2);
100 radeon_emit(cs, S_00B864_SH0_CU_EN(0xffff) |
101 S_00B864_SH1_CU_EN(0xffff));
102 radeon_emit(cs, S_00B868_SH0_CU_EN(0xffff) |
103 S_00B868_SH1_CU_EN(0xffff));
104 }
105
106 /* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID
107 * and is now per pipe, so it should be handled in the
108 * kernel if we want to use something other than the default value,
109 * which is now 0x22f.
110 */
111 if (physical_device->rad_info.chip_class <= SI) {
112 /* XXX: This should be:
113 * (number of compute units) * 4 * (waves per simd) - 1 */
114
115 radeon_set_sh_reg(cs, R_00B82C_COMPUTE_MAX_WAVE_ID,
116 0x190 /* Default value */);
117 }
118 }
119
120 /* 12.4 fixed-point */
121 static unsigned radv_pack_float_12p4(float x)
122 {
123 return x <= 0 ? 0 :
124 x >= 4096 ? 0xffff : x * 16;
125 }
126
127 static void
128 si_set_raster_config(struct radv_physical_device *physical_device,
129 struct radeon_cmdbuf *cs)
130 {
131 unsigned num_rb = MIN2(physical_device->rad_info.num_render_backends, 16);
132 unsigned rb_mask = physical_device->rad_info.enabled_rb_mask;
133 unsigned raster_config, raster_config_1;
134
135 ac_get_raster_config(&physical_device->rad_info,
136 &raster_config,
137 &raster_config_1, NULL);
138
139 /* Always use the default config when all backends are enabled
140 * (or when we failed to determine the enabled backends).
141 */
142 if (!rb_mask || util_bitcount(rb_mask) >= num_rb) {
143 radeon_set_context_reg(cs, R_028350_PA_SC_RASTER_CONFIG,
144 raster_config);
145 if (physical_device->rad_info.chip_class >= CIK)
146 radeon_set_context_reg(cs, R_028354_PA_SC_RASTER_CONFIG_1,
147 raster_config_1);
148 } else {
149 si_write_harvested_raster_configs(physical_device, cs,
150 raster_config,
151 raster_config_1);
152 }
153 }
154
155 void
156 si_emit_graphics(struct radv_physical_device *physical_device,
157 struct radeon_cmdbuf *cs)
158 {
159 int i;
160
161 /* Only SI can disable CLEAR_STATE for now. */
162 assert(physical_device->has_clear_state ||
163 physical_device->rad_info.chip_class == SI);
164
165 radeon_emit(cs, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
166 radeon_emit(cs, CONTEXT_CONTROL_LOAD_ENABLE(1));
167 radeon_emit(cs, CONTEXT_CONTROL_SHADOW_ENABLE(1));
168
169 if (physical_device->has_clear_state) {
170 radeon_emit(cs, PKT3(PKT3_CLEAR_STATE, 0, 0));
171 radeon_emit(cs, 0);
172 }
173
174 if (physical_device->rad_info.chip_class <= VI)
175 si_set_raster_config(physical_device, cs);
176
177 radeon_set_context_reg(cs, R_028A18_VGT_HOS_MAX_TESS_LEVEL, fui(64));
178 if (!physical_device->has_clear_state)
179 radeon_set_context_reg(cs, R_028A1C_VGT_HOS_MIN_TESS_LEVEL, fui(0));
180
181 /* FIXME calculate these values somehow ??? */
182 if (physical_device->rad_info.chip_class <= VI) {
183 radeon_set_context_reg(cs, R_028A54_VGT_GS_PER_ES, SI_GS_PER_ES);
184 radeon_set_context_reg(cs, R_028A58_VGT_ES_PER_GS, 0x40);
185 }
186
187 if (!physical_device->has_clear_state) {
188 radeon_set_context_reg(cs, R_028A5C_VGT_GS_PER_VS, 0x2);
189 radeon_set_context_reg(cs, R_028A8C_VGT_PRIMITIVEID_RESET, 0x0);
190 radeon_set_context_reg(cs, R_028B98_VGT_STRMOUT_BUFFER_CONFIG, 0x0);
191 }
192
193 radeon_set_context_reg(cs, R_028AA0_VGT_INSTANCE_STEP_RATE_0, 1);
194 if (!physical_device->has_clear_state)
195 radeon_set_context_reg(cs, R_028AB8_VGT_VTX_CNT_EN, 0x0);
196 if (physical_device->rad_info.chip_class < CIK)
197 radeon_set_config_reg(cs, R_008A14_PA_CL_ENHANCE, S_008A14_NUM_CLIP_SEQ(3) |
198 S_008A14_CLIP_VTX_REORDER_ENA(1));
199
200 radeon_set_context_reg(cs, R_028BD4_PA_SC_CENTROID_PRIORITY_0, 0x76543210);
201 radeon_set_context_reg(cs, R_028BD8_PA_SC_CENTROID_PRIORITY_1, 0xfedcba98);
202
203 if (!physical_device->has_clear_state)
204 radeon_set_context_reg(cs, R_02882C_PA_SU_PRIM_FILTER_CNTL, 0);
205
206 /* CLEAR_STATE doesn't clear these correctly on certain generations.
207 * I don't know why. Deduced by trial and error.
208 */
209 if (physical_device->rad_info.chip_class <= CIK) {
210 radeon_set_context_reg(cs, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET, 0);
211 radeon_set_context_reg(cs, R_028204_PA_SC_WINDOW_SCISSOR_TL,
212 S_028204_WINDOW_OFFSET_DISABLE(1));
213 radeon_set_context_reg(cs, R_028240_PA_SC_GENERIC_SCISSOR_TL,
214 S_028240_WINDOW_OFFSET_DISABLE(1));
215 radeon_set_context_reg(cs, R_028244_PA_SC_GENERIC_SCISSOR_BR,
216 S_028244_BR_X(16384) | S_028244_BR_Y(16384));
217 radeon_set_context_reg(cs, R_028030_PA_SC_SCREEN_SCISSOR_TL, 0);
218 radeon_set_context_reg(cs, R_028034_PA_SC_SCREEN_SCISSOR_BR,
219 S_028034_BR_X(16384) | S_028034_BR_Y(16384));
220 }
221
222 if (!physical_device->has_clear_state) {
223 for (i = 0; i < 16; i++) {
224 radeon_set_context_reg(cs, R_0282D0_PA_SC_VPORT_ZMIN_0 + i*8, 0);
225 radeon_set_context_reg(cs, R_0282D4_PA_SC_VPORT_ZMAX_0 + i*8, fui(1.0));
226 }
227 }
228
229 if (!physical_device->has_clear_state) {
230 radeon_set_context_reg(cs, R_02820C_PA_SC_CLIPRECT_RULE, 0xFFFF);
231 radeon_set_context_reg(cs, R_028230_PA_SC_EDGERULE, 0xAAAAAAAA);
232 /* PA_SU_HARDWARE_SCREEN_OFFSET must be 0 due to hw bug on SI */
233 radeon_set_context_reg(cs, R_028234_PA_SU_HARDWARE_SCREEN_OFFSET, 0);
234 radeon_set_context_reg(cs, R_028820_PA_CL_NANINF_CNTL, 0);
235 radeon_set_context_reg(cs, R_028AC0_DB_SRESULTS_COMPARE_STATE0, 0x0);
236 radeon_set_context_reg(cs, R_028AC4_DB_SRESULTS_COMPARE_STATE1, 0x0);
237 radeon_set_context_reg(cs, R_028AC8_DB_PRELOAD_CONTROL, 0x0);
238 }
239
240 radeon_set_context_reg(cs, R_02800C_DB_RENDER_OVERRIDE,
241 S_02800C_FORCE_HIS_ENABLE0(V_02800C_FORCE_DISABLE) |
242 S_02800C_FORCE_HIS_ENABLE1(V_02800C_FORCE_DISABLE));
243
244 if (physical_device->rad_info.chip_class >= GFX9) {
245 radeon_set_uconfig_reg(cs, R_030920_VGT_MAX_VTX_INDX, ~0);
246 radeon_set_uconfig_reg(cs, R_030924_VGT_MIN_VTX_INDX, 0);
247 radeon_set_uconfig_reg(cs, R_030928_VGT_INDX_OFFSET, 0);
248 } else {
249 /* These registers, when written, also overwrite the
250 * CLEAR_STATE context, so we can't rely on CLEAR_STATE setting
251 * them. It would be an issue if there was another UMD
252 * changing them.
253 */
254 radeon_set_context_reg(cs, R_028400_VGT_MAX_VTX_INDX, ~0);
255 radeon_set_context_reg(cs, R_028404_VGT_MIN_VTX_INDX, 0);
256 radeon_set_context_reg(cs, R_028408_VGT_INDX_OFFSET, 0);
257 }
258
259 if (physical_device->rad_info.chip_class >= CIK) {
260 if (physical_device->rad_info.chip_class >= GFX9) {
261 radeon_set_sh_reg(cs, R_00B41C_SPI_SHADER_PGM_RSRC3_HS,
262 S_00B41C_CU_EN(0xffff) | S_00B41C_WAVE_LIMIT(0x3F));
263 } else {
264 radeon_set_sh_reg(cs, R_00B51C_SPI_SHADER_PGM_RSRC3_LS,
265 S_00B51C_CU_EN(0xffff) | S_00B51C_WAVE_LIMIT(0x3F));
266 radeon_set_sh_reg(cs, R_00B41C_SPI_SHADER_PGM_RSRC3_HS,
267 S_00B41C_WAVE_LIMIT(0x3F));
268 radeon_set_sh_reg(cs, R_00B31C_SPI_SHADER_PGM_RSRC3_ES,
269 S_00B31C_CU_EN(0xffff) | S_00B31C_WAVE_LIMIT(0x3F));
270 /* If this is 0, Bonaire can hang even if GS isn't being used.
271 * Other chips are unaffected. These are suboptimal values,
272 * but we don't use on-chip GS.
273 */
274 radeon_set_context_reg(cs, R_028A44_VGT_GS_ONCHIP_CNTL,
275 S_028A44_ES_VERTS_PER_SUBGRP(64) |
276 S_028A44_GS_PRIMS_PER_SUBGRP(4));
277 }
278 radeon_set_sh_reg(cs, R_00B21C_SPI_SHADER_PGM_RSRC3_GS,
279 S_00B21C_CU_EN(0xffff) | S_00B21C_WAVE_LIMIT(0x3F));
280
281 if (physical_device->rad_info.num_good_compute_units /
282 (physical_device->rad_info.max_se * physical_device->rad_info.max_sh_per_se) <= 4) {
283 /* Too few available compute units per SH. Disallowing
284 * VS to run on CU0 could hurt us more than late VS
285 * allocation would help.
286 *
287 * LATE_ALLOC_VS = 2 is the highest safe number.
288 */
289 radeon_set_sh_reg(cs, R_00B118_SPI_SHADER_PGM_RSRC3_VS,
290 S_00B118_CU_EN(0xffff) | S_00B118_WAVE_LIMIT(0x3F) );
291 radeon_set_sh_reg(cs, R_00B11C_SPI_SHADER_LATE_ALLOC_VS, S_00B11C_LIMIT(2));
292 } else {
293 /* Set LATE_ALLOC_VS == 31. It should be less than
294 * the number of scratch waves. Limitations:
295 * - VS can't execute on CU0.
296 * - If HS writes outputs to LDS, LS can't execute on CU0.
297 */
298 radeon_set_sh_reg(cs, R_00B118_SPI_SHADER_PGM_RSRC3_VS,
299 S_00B118_CU_EN(0xfffe) | S_00B118_WAVE_LIMIT(0x3F));
300 radeon_set_sh_reg(cs, R_00B11C_SPI_SHADER_LATE_ALLOC_VS, S_00B11C_LIMIT(31));
301 }
302
303 radeon_set_sh_reg(cs, R_00B01C_SPI_SHADER_PGM_RSRC3_PS,
304 S_00B01C_CU_EN(0xffff) | S_00B01C_WAVE_LIMIT(0x3F));
305 }
306
307 if (physical_device->rad_info.chip_class >= VI) {
308 uint32_t vgt_tess_distribution;
309 radeon_set_context_reg(cs, R_028424_CB_DCC_CONTROL,
310 S_028424_OVERWRITE_COMBINER_MRT_SHARING_DISABLE(1) |
311 S_028424_OVERWRITE_COMBINER_WATERMARK(4));
312
313 vgt_tess_distribution = S_028B50_ACCUM_ISOLINE(32) |
314 S_028B50_ACCUM_TRI(11) |
315 S_028B50_ACCUM_QUAD(11) |
316 S_028B50_DONUT_SPLIT(16);
317
318 if (physical_device->rad_info.family == CHIP_FIJI ||
319 physical_device->rad_info.family >= CHIP_POLARIS10)
320 vgt_tess_distribution |= S_028B50_TRAP_SPLIT(3);
321
322 radeon_set_context_reg(cs, R_028B50_VGT_TESS_DISTRIBUTION,
323 vgt_tess_distribution);
324 } else if (!physical_device->has_clear_state) {
325 radeon_set_context_reg(cs, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
326 radeon_set_context_reg(cs, R_028C5C_VGT_OUT_DEALLOC_CNTL, 16);
327 }
328
329 if (physical_device->rad_info.chip_class >= GFX9) {
330 unsigned num_se = physical_device->rad_info.max_se;
331 unsigned pc_lines = 0;
332
333 switch (physical_device->rad_info.family) {
334 case CHIP_VEGA10:
335 case CHIP_VEGA12:
336 case CHIP_VEGA20:
337 pc_lines = 4096;
338 break;
339 case CHIP_RAVEN:
340 case CHIP_RAVEN2:
341 pc_lines = 1024;
342 break;
343 default:
344 assert(0);
345 }
346
347 radeon_set_context_reg(cs, R_028C48_PA_SC_BINNER_CNTL_1,
348 S_028C48_MAX_ALLOC_COUNT(MIN2(128, pc_lines / (4 * num_se))) |
349 S_028C48_MAX_PRIM_PER_BATCH(1023));
350 radeon_set_context_reg(cs, R_028C4C_PA_SC_CONSERVATIVE_RASTERIZATION_CNTL,
351 S_028C4C_NULL_SQUAD_AA_MASK_ENABLE(1));
352 radeon_set_uconfig_reg(cs, R_030968_VGT_INSTANCE_BASE_ID, 0);
353 }
354
355 unsigned tmp = (unsigned)(1.0 * 8.0);
356 radeon_set_context_reg_seq(cs, R_028A00_PA_SU_POINT_SIZE, 1);
357 radeon_emit(cs, S_028A00_HEIGHT(tmp) | S_028A00_WIDTH(tmp));
358 radeon_set_context_reg_seq(cs, R_028A04_PA_SU_POINT_MINMAX, 1);
359 radeon_emit(cs, S_028A04_MIN_SIZE(radv_pack_float_12p4(0)) |
360 S_028A04_MAX_SIZE(radv_pack_float_12p4(8192/2)));
361
362 if (!physical_device->has_clear_state) {
363 radeon_set_context_reg(cs, R_028004_DB_COUNT_CONTROL,
364 S_028004_ZPASS_INCREMENT_DISABLE(1));
365 }
366
367 /* Enable the Polaris small primitive filter control.
368 * XXX: There is possibly an issue when MSAA is off (see RadeonSI
369 * has_msaa_sample_loc_bug). But this doesn't seem to regress anything,
370 * and AMDVLK doesn't have a workaround as well.
371 */
372 if (physical_device->rad_info.family >= CHIP_POLARIS10) {
373 unsigned small_prim_filter_cntl =
374 S_028830_SMALL_PRIM_FILTER_ENABLE(1) |
375 /* Workaround for a hw line bug. */
376 S_028830_LINE_FILTER_DISABLE(physical_device->rad_info.family <= CHIP_POLARIS12);
377
378 radeon_set_context_reg(cs, R_028830_PA_SU_SMALL_PRIM_FILTER_CNTL,
379 small_prim_filter_cntl);
380 }
381
382 si_emit_compute(physical_device, cs);
383 }
384
385 void
386 cik_create_gfx_config(struct radv_device *device)
387 {
388 struct radeon_cmdbuf *cs = device->ws->cs_create(device->ws, RING_GFX);
389 if (!cs)
390 return;
391
392 si_emit_graphics(device->physical_device, cs);
393
394 while (cs->cdw & 7) {
395 if (device->physical_device->rad_info.gfx_ib_pad_with_type2)
396 radeon_emit(cs, 0x80000000);
397 else
398 radeon_emit(cs, 0xffff1000);
399 }
400
401 device->gfx_init = device->ws->buffer_create(device->ws,
402 cs->cdw * 4, 4096,
403 RADEON_DOMAIN_GTT,
404 RADEON_FLAG_CPU_ACCESS|
405 RADEON_FLAG_NO_INTERPROCESS_SHARING |
406 RADEON_FLAG_READ_ONLY);
407 if (!device->gfx_init)
408 goto fail;
409
410 void *map = device->ws->buffer_map(device->gfx_init);
411 if (!map) {
412 device->ws->buffer_destroy(device->gfx_init);
413 device->gfx_init = NULL;
414 goto fail;
415 }
416 memcpy(map, cs->buf, cs->cdw * 4);
417
418 device->ws->buffer_unmap(device->gfx_init);
419 device->gfx_init_size_dw = cs->cdw;
420 fail:
421 device->ws->cs_destroy(cs);
422 }
423
424 static void
425 get_viewport_xform(const VkViewport *viewport,
426 float scale[3], float translate[3])
427 {
428 float x = viewport->x;
429 float y = viewport->y;
430 float half_width = 0.5f * viewport->width;
431 float half_height = 0.5f * viewport->height;
432 double n = viewport->minDepth;
433 double f = viewport->maxDepth;
434
435 scale[0] = half_width;
436 translate[0] = half_width + x;
437 scale[1] = half_height;
438 translate[1] = half_height + y;
439
440 scale[2] = (f - n);
441 translate[2] = n;
442 }
443
444 void
445 si_write_viewport(struct radeon_cmdbuf *cs, int first_vp,
446 int count, const VkViewport *viewports)
447 {
448 int i;
449
450 assert(count);
451 radeon_set_context_reg_seq(cs, R_02843C_PA_CL_VPORT_XSCALE +
452 first_vp * 4 * 6, count * 6);
453
454 for (i = 0; i < count; i++) {
455 float scale[3], translate[3];
456
457
458 get_viewport_xform(&viewports[i], scale, translate);
459 radeon_emit(cs, fui(scale[0]));
460 radeon_emit(cs, fui(translate[0]));
461 radeon_emit(cs, fui(scale[1]));
462 radeon_emit(cs, fui(translate[1]));
463 radeon_emit(cs, fui(scale[2]));
464 radeon_emit(cs, fui(translate[2]));
465 }
466
467 radeon_set_context_reg_seq(cs, R_0282D0_PA_SC_VPORT_ZMIN_0 +
468 first_vp * 4 * 2, count * 2);
469 for (i = 0; i < count; i++) {
470 float zmin = MIN2(viewports[i].minDepth, viewports[i].maxDepth);
471 float zmax = MAX2(viewports[i].minDepth, viewports[i].maxDepth);
472 radeon_emit(cs, fui(zmin));
473 radeon_emit(cs, fui(zmax));
474 }
475 }
476
477 static VkRect2D si_scissor_from_viewport(const VkViewport *viewport)
478 {
479 float scale[3], translate[3];
480 VkRect2D rect;
481
482 get_viewport_xform(viewport, scale, translate);
483
484 rect.offset.x = translate[0] - fabs(scale[0]);
485 rect.offset.y = translate[1] - fabs(scale[1]);
486 rect.extent.width = ceilf(translate[0] + fabs(scale[0])) - rect.offset.x;
487 rect.extent.height = ceilf(translate[1] + fabs(scale[1])) - rect.offset.y;
488
489 return rect;
490 }
491
492 static VkRect2D si_intersect_scissor(const VkRect2D *a, const VkRect2D *b) {
493 VkRect2D ret;
494 ret.offset.x = MAX2(a->offset.x, b->offset.x);
495 ret.offset.y = MAX2(a->offset.y, b->offset.y);
496 ret.extent.width = MIN2(a->offset.x + a->extent.width,
497 b->offset.x + b->extent.width) - ret.offset.x;
498 ret.extent.height = MIN2(a->offset.y + a->extent.height,
499 b->offset.y + b->extent.height) - ret.offset.y;
500 return ret;
501 }
502
503 void
504 si_write_scissors(struct radeon_cmdbuf *cs, int first,
505 int count, const VkRect2D *scissors,
506 const VkViewport *viewports, bool can_use_guardband)
507 {
508 int i;
509 float scale[3], translate[3], guardband_x = INFINITY, guardband_y = INFINITY;
510 const float max_range = 32767.0f;
511 if (!count)
512 return;
513
514 radeon_set_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL + first * 4 * 2, count * 2);
515 for (i = 0; i < count; i++) {
516 VkRect2D viewport_scissor = si_scissor_from_viewport(viewports + i);
517 VkRect2D scissor = si_intersect_scissor(&scissors[i], &viewport_scissor);
518
519 get_viewport_xform(viewports + i, scale, translate);
520 scale[0] = fabsf(scale[0]);
521 scale[1] = fabsf(scale[1]);
522
523 if (scale[0] < 0.5)
524 scale[0] = 0.5;
525 if (scale[1] < 0.5)
526 scale[1] = 0.5;
527
528 guardband_x = MIN2(guardband_x, (max_range - fabsf(translate[0])) / scale[0]);
529 guardband_y = MIN2(guardband_y, (max_range - fabsf(translate[1])) / scale[1]);
530
531 radeon_emit(cs, S_028250_TL_X(scissor.offset.x) |
532 S_028250_TL_Y(scissor.offset.y) |
533 S_028250_WINDOW_OFFSET_DISABLE(1));
534 radeon_emit(cs, S_028254_BR_X(scissor.offset.x + scissor.extent.width) |
535 S_028254_BR_Y(scissor.offset.y + scissor.extent.height));
536 }
537 if (!can_use_guardband) {
538 guardband_x = 1.0;
539 guardband_y = 1.0;
540 }
541
542 radeon_set_context_reg_seq(cs, R_028BE8_PA_CL_GB_VERT_CLIP_ADJ, 4);
543 radeon_emit(cs, fui(guardband_y));
544 radeon_emit(cs, fui(1.0));
545 radeon_emit(cs, fui(guardband_x));
546 radeon_emit(cs, fui(1.0));
547 }
548
549 static inline unsigned
550 radv_prims_for_vertices(struct radv_prim_vertex_count *info, unsigned num)
551 {
552 if (num == 0)
553 return 0;
554
555 if (info->incr == 0)
556 return 0;
557
558 if (num < info->min)
559 return 0;
560
561 return 1 + ((num - info->min) / info->incr);
562 }
563
564 uint32_t
565 si_get_ia_multi_vgt_param(struct radv_cmd_buffer *cmd_buffer,
566 bool instanced_draw, bool indirect_draw,
567 uint32_t draw_vertex_count)
568 {
569 enum chip_class chip_class = cmd_buffer->device->physical_device->rad_info.chip_class;
570 enum radeon_family family = cmd_buffer->device->physical_device->rad_info.family;
571 struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info;
572 const unsigned max_primgroup_in_wave = 2;
573 /* SWITCH_ON_EOP(0) is always preferable. */
574 bool wd_switch_on_eop = false;
575 bool ia_switch_on_eop = false;
576 bool ia_switch_on_eoi = false;
577 bool partial_vs_wave = false;
578 bool partial_es_wave = cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.partial_es_wave;
579 bool multi_instances_smaller_than_primgroup;
580
581 multi_instances_smaller_than_primgroup = indirect_draw;
582 if (!multi_instances_smaller_than_primgroup && instanced_draw) {
583 uint32_t num_prims = radv_prims_for_vertices(&cmd_buffer->state.pipeline->graphics.prim_vertex_count, draw_vertex_count);
584 if (num_prims < cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.primgroup_size)
585 multi_instances_smaller_than_primgroup = true;
586 }
587
588 ia_switch_on_eoi = cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.ia_switch_on_eoi;
589 partial_vs_wave = cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.partial_vs_wave;
590
591 if (chip_class >= CIK) {
592 wd_switch_on_eop = cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.wd_switch_on_eop;
593
594 /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
595 * We don't know that for indirect drawing, so treat it as
596 * always problematic. */
597 if (family == CHIP_HAWAII &&
598 (instanced_draw || indirect_draw))
599 wd_switch_on_eop = true;
600
601 /* Performance recommendation for 4 SE Gfx7-8 parts if
602 * instances are smaller than a primgroup.
603 * Assume indirect draws always use small instances.
604 * This is needed for good VS wave utilization.
605 */
606 if (chip_class <= VI &&
607 info->max_se == 4 &&
608 multi_instances_smaller_than_primgroup)
609 wd_switch_on_eop = true;
610
611 /* Required on CIK and later. */
612 if (info->max_se > 2 && !wd_switch_on_eop)
613 ia_switch_on_eoi = true;
614
615 /* Required by Hawaii and, for some special cases, by VI. */
616 if (ia_switch_on_eoi &&
617 (family == CHIP_HAWAII ||
618 (chip_class == VI &&
619 /* max primgroup in wave is always 2 - leave this for documentation */
620 (radv_pipeline_has_gs(cmd_buffer->state.pipeline) || max_primgroup_in_wave != 2))))
621 partial_vs_wave = true;
622
623 /* Instancing bug on Bonaire. */
624 if (family == CHIP_BONAIRE && ia_switch_on_eoi &&
625 (instanced_draw || indirect_draw))
626 partial_vs_wave = true;
627
628 /* If the WD switch is false, the IA switch must be false too. */
629 assert(wd_switch_on_eop || !ia_switch_on_eop);
630 }
631 /* If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
632 if (chip_class <= VI && ia_switch_on_eoi)
633 partial_es_wave = true;
634
635 if (radv_pipeline_has_gs(cmd_buffer->state.pipeline)) {
636 /* GS hw bug with single-primitive instances and SWITCH_ON_EOI.
637 * The hw doc says all multi-SE chips are affected, but amdgpu-pro Vulkan
638 * only applies it to Hawaii. Do what amdgpu-pro Vulkan does.
639 */
640 if (family == CHIP_HAWAII && ia_switch_on_eoi) {
641 bool set_vgt_flush = indirect_draw;
642 if (!set_vgt_flush && instanced_draw) {
643 uint32_t num_prims = radv_prims_for_vertices(&cmd_buffer->state.pipeline->graphics.prim_vertex_count, draw_vertex_count);
644 if (num_prims <= 1)
645 set_vgt_flush = true;
646 }
647 if (set_vgt_flush)
648 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VGT_FLUSH;
649 }
650 }
651
652 return cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.base |
653 S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) |
654 S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi) |
655 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) |
656 S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave) |
657 S_028AA8_WD_SWITCH_ON_EOP(chip_class >= CIK ? wd_switch_on_eop : 0);
658
659 }
660
661 void si_cs_emit_write_event_eop(struct radeon_cmdbuf *cs,
662 enum chip_class chip_class,
663 bool is_mec,
664 unsigned event, unsigned event_flags,
665 unsigned data_sel,
666 uint64_t va,
667 uint32_t old_fence,
668 uint32_t new_fence,
669 uint64_t gfx9_eop_bug_va)
670 {
671 unsigned op = EVENT_TYPE(event) |
672 EVENT_INDEX(5) |
673 event_flags;
674 unsigned is_gfx8_mec = is_mec && chip_class < GFX9;
675 unsigned sel = EOP_DATA_SEL(data_sel);
676
677 /* Wait for write confirmation before writing data, but don't send
678 * an interrupt. */
679 if (data_sel != EOP_DATA_SEL_DISCARD)
680 sel |= EOP_INT_SEL(EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM);
681
682 if (chip_class >= GFX9 || is_gfx8_mec) {
683 /* A ZPASS_DONE or PIXEL_STAT_DUMP_EVENT (of the DB occlusion
684 * counters) must immediately precede every timestamp event to
685 * prevent a GPU hang on GFX9.
686 */
687 if (chip_class == GFX9 && !is_mec) {
688 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
689 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
690 radeon_emit(cs, gfx9_eop_bug_va);
691 radeon_emit(cs, gfx9_eop_bug_va >> 32);
692 }
693
694 radeon_emit(cs, PKT3(PKT3_RELEASE_MEM, is_gfx8_mec ? 5 : 6, false));
695 radeon_emit(cs, op);
696 radeon_emit(cs, sel);
697 radeon_emit(cs, va); /* address lo */
698 radeon_emit(cs, va >> 32); /* address hi */
699 radeon_emit(cs, new_fence); /* immediate data lo */
700 radeon_emit(cs, 0); /* immediate data hi */
701 if (!is_gfx8_mec)
702 radeon_emit(cs, 0); /* unused */
703 } else {
704 if (chip_class == CIK ||
705 chip_class == VI) {
706 /* Two EOP events are required to make all engines go idle
707 * (and optional cache flushes executed) before the timestamp
708 * is written.
709 */
710 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, false));
711 radeon_emit(cs, op);
712 radeon_emit(cs, va);
713 radeon_emit(cs, ((va >> 32) & 0xffff) | sel);
714 radeon_emit(cs, old_fence); /* immediate data */
715 radeon_emit(cs, 0); /* unused */
716 }
717
718 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, false));
719 radeon_emit(cs, op);
720 radeon_emit(cs, va);
721 radeon_emit(cs, ((va >> 32) & 0xffff) | sel);
722 radeon_emit(cs, new_fence); /* immediate data */
723 radeon_emit(cs, 0); /* unused */
724 }
725 }
726
727 void
728 si_emit_wait_fence(struct radeon_cmdbuf *cs,
729 uint64_t va, uint32_t ref,
730 uint32_t mask)
731 {
732 radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, false));
733 radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1));
734 radeon_emit(cs, va);
735 radeon_emit(cs, va >> 32);
736 radeon_emit(cs, ref); /* reference value */
737 radeon_emit(cs, mask); /* mask */
738 radeon_emit(cs, 4); /* poll interval */
739 }
740
741 static void
742 si_emit_acquire_mem(struct radeon_cmdbuf *cs,
743 bool is_mec,
744 bool is_gfx9,
745 unsigned cp_coher_cntl)
746 {
747 if (is_mec || is_gfx9) {
748 uint32_t hi_val = is_gfx9 ? 0xffffff : 0xff;
749 radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, false) |
750 PKT3_SHADER_TYPE_S(is_mec));
751 radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
752 radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
753 radeon_emit(cs, hi_val); /* CP_COHER_SIZE_HI */
754 radeon_emit(cs, 0); /* CP_COHER_BASE */
755 radeon_emit(cs, 0); /* CP_COHER_BASE_HI */
756 radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
757 } else {
758 /* ACQUIRE_MEM is only required on a compute ring. */
759 radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, false));
760 radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
761 radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
762 radeon_emit(cs, 0); /* CP_COHER_BASE */
763 radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
764 }
765 }
766
767 void
768 si_cs_emit_cache_flush(struct radeon_cmdbuf *cs,
769 enum chip_class chip_class,
770 uint32_t *flush_cnt,
771 uint64_t flush_va,
772 bool is_mec,
773 enum radv_cmd_flush_bits flush_bits,
774 uint64_t gfx9_eop_bug_va)
775 {
776 unsigned cp_coher_cntl = 0;
777 uint32_t flush_cb_db = flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
778 RADV_CMD_FLAG_FLUSH_AND_INV_DB);
779
780 if (flush_bits & RADV_CMD_FLAG_INV_ICACHE)
781 cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
782 if (flush_bits & RADV_CMD_FLAG_INV_SMEM_L1)
783 cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
784
785 if (chip_class <= VI) {
786 if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_CB) {
787 cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) |
788 S_0085F0_CB0_DEST_BASE_ENA(1) |
789 S_0085F0_CB1_DEST_BASE_ENA(1) |
790 S_0085F0_CB2_DEST_BASE_ENA(1) |
791 S_0085F0_CB3_DEST_BASE_ENA(1) |
792 S_0085F0_CB4_DEST_BASE_ENA(1) |
793 S_0085F0_CB5_DEST_BASE_ENA(1) |
794 S_0085F0_CB6_DEST_BASE_ENA(1) |
795 S_0085F0_CB7_DEST_BASE_ENA(1);
796
797 /* Necessary for DCC */
798 if (chip_class >= VI) {
799 si_cs_emit_write_event_eop(cs,
800 chip_class,
801 is_mec,
802 V_028A90_FLUSH_AND_INV_CB_DATA_TS,
803 0,
804 EOP_DATA_SEL_DISCARD,
805 0, 0, 0,
806 gfx9_eop_bug_va);
807 }
808 }
809 if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_DB) {
810 cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) |
811 S_0085F0_DB_DEST_BASE_ENA(1);
812 }
813 }
814
815 if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_CB_META) {
816 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
817 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
818 }
819
820 if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_DB_META) {
821 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
822 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
823 }
824
825 if (flush_bits & RADV_CMD_FLAG_PS_PARTIAL_FLUSH) {
826 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
827 radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
828 } else if (flush_bits & RADV_CMD_FLAG_VS_PARTIAL_FLUSH) {
829 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
830 radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
831 }
832
833 if (flush_bits & RADV_CMD_FLAG_CS_PARTIAL_FLUSH) {
834 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
835 radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
836 }
837
838 if (chip_class >= GFX9 && flush_cb_db) {
839 unsigned cb_db_event, tc_flags;
840
841 /* Set the CB/DB flush event. */
842 cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
843
844 /* These are the only allowed combinations. If you need to
845 * do multiple operations at once, do them separately.
846 * All operations that invalidate L2 also seem to invalidate
847 * metadata. Volatile (VOL) and WC flushes are not listed here.
848 *
849 * TC | TC_WB = writeback & invalidate L2 & L1
850 * TC | TC_WB | TC_NC = writeback & invalidate L2 for MTYPE == NC
851 * TC_WB | TC_NC = writeback L2 for MTYPE == NC
852 * TC | TC_NC = invalidate L2 for MTYPE == NC
853 * TC | TC_MD = writeback & invalidate L2 metadata (DCC, etc.)
854 * TCL1 = invalidate L1
855 */
856 tc_flags = EVENT_TC_ACTION_ENA |
857 EVENT_TC_MD_ACTION_ENA;
858
859 /* Ideally flush TC together with CB/DB. */
860 if (flush_bits & RADV_CMD_FLAG_INV_GLOBAL_L2) {
861 /* Writeback and invalidate everything in L2 & L1. */
862 tc_flags = EVENT_TC_ACTION_ENA |
863 EVENT_TC_WB_ACTION_ENA;
864
865
866 /* Clear the flags. */
867 flush_bits &= ~(RADV_CMD_FLAG_INV_GLOBAL_L2 |
868 RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2 |
869 RADV_CMD_FLAG_INV_VMEM_L1);
870 }
871 assert(flush_cnt);
872 uint32_t old_fence = (*flush_cnt)++;
873
874 si_cs_emit_write_event_eop(cs, chip_class, false, cb_db_event, tc_flags,
875 EOP_DATA_SEL_VALUE_32BIT,
876 flush_va, old_fence, *flush_cnt,
877 gfx9_eop_bug_va);
878 si_emit_wait_fence(cs, flush_va, *flush_cnt, 0xffffffff);
879 }
880
881 /* VGT state sync */
882 if (flush_bits & RADV_CMD_FLAG_VGT_FLUSH) {
883 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
884 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
885 }
886
887 /* VGT streamout state sync */
888 if (flush_bits & RADV_CMD_FLAG_VGT_STREAMOUT_SYNC) {
889 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
890 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0));
891 }
892
893 /* Make sure ME is idle (it executes most packets) before continuing.
894 * This prevents read-after-write hazards between PFP and ME.
895 */
896 if ((cp_coher_cntl ||
897 (flush_bits & (RADV_CMD_FLAG_CS_PARTIAL_FLUSH |
898 RADV_CMD_FLAG_INV_VMEM_L1 |
899 RADV_CMD_FLAG_INV_GLOBAL_L2 |
900 RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2))) &&
901 !is_mec) {
902 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
903 radeon_emit(cs, 0);
904 }
905
906 if ((flush_bits & RADV_CMD_FLAG_INV_GLOBAL_L2) ||
907 (chip_class <= CIK && (flush_bits & RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2))) {
908 si_emit_acquire_mem(cs, is_mec, chip_class >= GFX9,
909 cp_coher_cntl |
910 S_0085F0_TC_ACTION_ENA(1) |
911 S_0085F0_TCL1_ACTION_ENA(1) |
912 S_0301F0_TC_WB_ACTION_ENA(chip_class >= VI));
913 cp_coher_cntl = 0;
914 } else {
915 if(flush_bits & RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2) {
916 /* WB = write-back
917 * NC = apply to non-coherent MTYPEs
918 * (i.e. MTYPE <= 1, which is what we use everywhere)
919 *
920 * WB doesn't work without NC.
921 */
922 si_emit_acquire_mem(cs, is_mec,
923 chip_class >= GFX9,
924 cp_coher_cntl |
925 S_0301F0_TC_WB_ACTION_ENA(1) |
926 S_0301F0_TC_NC_ACTION_ENA(1));
927 cp_coher_cntl = 0;
928 }
929 if (flush_bits & RADV_CMD_FLAG_INV_VMEM_L1) {
930 si_emit_acquire_mem(cs, is_mec,
931 chip_class >= GFX9,
932 cp_coher_cntl |
933 S_0085F0_TCL1_ACTION_ENA(1));
934 cp_coher_cntl = 0;
935 }
936 }
937
938 /* When one of the DEST_BASE flags is set, SURFACE_SYNC waits for idle.
939 * Therefore, it should be last. Done in PFP.
940 */
941 if (cp_coher_cntl)
942 si_emit_acquire_mem(cs, is_mec, chip_class >= GFX9, cp_coher_cntl);
943
944 if (flush_bits & RADV_CMD_FLAG_START_PIPELINE_STATS) {
945 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
946 radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_START) |
947 EVENT_INDEX(0));
948 } else if (flush_bits & RADV_CMD_FLAG_STOP_PIPELINE_STATS) {
949 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
950 radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) |
951 EVENT_INDEX(0));
952 }
953 }
954
955 void
956 si_emit_cache_flush(struct radv_cmd_buffer *cmd_buffer)
957 {
958 bool is_compute = cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE;
959
960 if (is_compute)
961 cmd_buffer->state.flush_bits &= ~(RADV_CMD_FLAG_FLUSH_AND_INV_CB |
962 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META |
963 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
964 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META |
965 RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
966 RADV_CMD_FLAG_VS_PARTIAL_FLUSH |
967 RADV_CMD_FLAG_VGT_FLUSH |
968 RADV_CMD_FLAG_START_PIPELINE_STATS |
969 RADV_CMD_FLAG_STOP_PIPELINE_STATS);
970
971 if (!cmd_buffer->state.flush_bits)
972 return;
973
974 enum chip_class chip_class = cmd_buffer->device->physical_device->rad_info.chip_class;
975 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 128);
976
977 uint32_t *ptr = NULL;
978 uint64_t va = 0;
979 if (chip_class == GFX9) {
980 va = radv_buffer_get_va(cmd_buffer->gfx9_fence_bo) + cmd_buffer->gfx9_fence_offset;
981 ptr = &cmd_buffer->gfx9_fence_idx;
982 }
983 si_cs_emit_cache_flush(cmd_buffer->cs,
984 cmd_buffer->device->physical_device->rad_info.chip_class,
985 ptr, va,
986 radv_cmd_buffer_uses_mec(cmd_buffer),
987 cmd_buffer->state.flush_bits,
988 cmd_buffer->gfx9_eop_bug_va);
989
990
991 if (unlikely(cmd_buffer->device->trace_bo))
992 radv_cmd_buffer_trace_emit(cmd_buffer);
993
994 cmd_buffer->state.flush_bits = 0;
995 }
996
997 /* sets the CP predication state using a boolean stored at va */
998 void
999 si_emit_set_predication_state(struct radv_cmd_buffer *cmd_buffer,
1000 bool draw_visible, uint64_t va)
1001 {
1002 uint32_t op = 0;
1003
1004 if (va) {
1005 op = PRED_OP(PREDICATION_OP_BOOL64);
1006
1007 /* PREDICATION_DRAW_VISIBLE means that if the 32-bit value is
1008 * zero, all rendering commands are discarded. Otherwise, they
1009 * are discarded if the value is non zero.
1010 */
1011 op |= draw_visible ? PREDICATION_DRAW_VISIBLE :
1012 PREDICATION_DRAW_NOT_VISIBLE;
1013 }
1014 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1015 radeon_emit(cmd_buffer->cs, PKT3(PKT3_SET_PREDICATION, 2, 0));
1016 radeon_emit(cmd_buffer->cs, op);
1017 radeon_emit(cmd_buffer->cs, va);
1018 radeon_emit(cmd_buffer->cs, va >> 32);
1019 } else {
1020 radeon_emit(cmd_buffer->cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
1021 radeon_emit(cmd_buffer->cs, va);
1022 radeon_emit(cmd_buffer->cs, op | ((va >> 32) & 0xFF));
1023 }
1024 }
1025
1026 /* Set this if you want the 3D engine to wait until CP DMA is done.
1027 * It should be set on the last CP DMA packet. */
1028 #define CP_DMA_SYNC (1 << 0)
1029
1030 /* Set this if the source data was used as a destination in a previous CP DMA
1031 * packet. It's for preventing a read-after-write (RAW) hazard between two
1032 * CP DMA packets. */
1033 #define CP_DMA_RAW_WAIT (1 << 1)
1034 #define CP_DMA_USE_L2 (1 << 2)
1035 #define CP_DMA_CLEAR (1 << 3)
1036
1037 /* Alignment for optimal performance. */
1038 #define SI_CPDMA_ALIGNMENT 32
1039
1040 /* The max number of bytes that can be copied per packet. */
1041 static inline unsigned cp_dma_max_byte_count(struct radv_cmd_buffer *cmd_buffer)
1042 {
1043 unsigned max = cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9 ?
1044 S_414_BYTE_COUNT_GFX9(~0u) :
1045 S_414_BYTE_COUNT_GFX6(~0u);
1046
1047 /* make it aligned for optimal performance */
1048 return max & ~(SI_CPDMA_ALIGNMENT - 1);
1049 }
1050
1051 /* Emit a CP DMA packet to do a copy from one buffer to another, or to clear
1052 * a buffer. The size must fit in bits [20:0]. If CP_DMA_CLEAR is set, src_va is a 32-bit
1053 * clear value.
1054 */
1055 static void si_emit_cp_dma(struct radv_cmd_buffer *cmd_buffer,
1056 uint64_t dst_va, uint64_t src_va,
1057 unsigned size, unsigned flags)
1058 {
1059 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1060 uint32_t header = 0, command = 0;
1061
1062 assert(size <= cp_dma_max_byte_count(cmd_buffer));
1063
1064 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 9);
1065 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9)
1066 command |= S_414_BYTE_COUNT_GFX9(size);
1067 else
1068 command |= S_414_BYTE_COUNT_GFX6(size);
1069
1070 /* Sync flags. */
1071 if (flags & CP_DMA_SYNC)
1072 header |= S_411_CP_SYNC(1);
1073 else {
1074 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9)
1075 command |= S_414_DISABLE_WR_CONFIRM_GFX9(1);
1076 else
1077 command |= S_414_DISABLE_WR_CONFIRM_GFX6(1);
1078 }
1079
1080 if (flags & CP_DMA_RAW_WAIT)
1081 command |= S_414_RAW_WAIT(1);
1082
1083 /* Src and dst flags. */
1084 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9 &&
1085 !(flags & CP_DMA_CLEAR) &&
1086 src_va == dst_va)
1087 header |= S_411_DST_SEL(V_411_NOWHERE); /* prefetch only */
1088 else if (flags & CP_DMA_USE_L2)
1089 header |= S_411_DST_SEL(V_411_DST_ADDR_TC_L2);
1090
1091 if (flags & CP_DMA_CLEAR)
1092 header |= S_411_SRC_SEL(V_411_DATA);
1093 else if (flags & CP_DMA_USE_L2)
1094 header |= S_411_SRC_SEL(V_411_SRC_ADDR_TC_L2);
1095
1096 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
1097 radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, cmd_buffer->state.predicating));
1098 radeon_emit(cs, header);
1099 radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */
1100 radeon_emit(cs, src_va >> 32); /* SRC_ADDR_HI [31:0] */
1101 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
1102 radeon_emit(cs, dst_va >> 32); /* DST_ADDR_HI [31:0] */
1103 radeon_emit(cs, command);
1104 } else {
1105 assert(!(flags & CP_DMA_USE_L2));
1106 header |= S_411_SRC_ADDR_HI(src_va >> 32);
1107 radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, cmd_buffer->state.predicating));
1108 radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */
1109 radeon_emit(cs, header); /* SRC_ADDR_HI [15:0] + flags. */
1110 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
1111 radeon_emit(cs, (dst_va >> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
1112 radeon_emit(cs, command);
1113 }
1114
1115 /* CP DMA is executed in ME, but index buffers are read by PFP.
1116 * This ensures that ME (CP DMA) is idle before PFP starts fetching
1117 * indices. If we wanted to execute CP DMA in PFP, this packet
1118 * should precede it.
1119 */
1120 if (flags & CP_DMA_SYNC) {
1121 if (cmd_buffer->queue_family_index == RADV_QUEUE_GENERAL) {
1122 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, cmd_buffer->state.predicating));
1123 radeon_emit(cs, 0);
1124 }
1125
1126 /* CP will see the sync flag and wait for all DMAs to complete. */
1127 cmd_buffer->state.dma_is_busy = false;
1128 }
1129
1130 if (unlikely(cmd_buffer->device->trace_bo))
1131 radv_cmd_buffer_trace_emit(cmd_buffer);
1132 }
1133
1134 void si_cp_dma_prefetch(struct radv_cmd_buffer *cmd_buffer, uint64_t va,
1135 unsigned size)
1136 {
1137 uint64_t aligned_va = va & ~(SI_CPDMA_ALIGNMENT - 1);
1138 uint64_t aligned_size = ((va + size + SI_CPDMA_ALIGNMENT -1) & ~(SI_CPDMA_ALIGNMENT - 1)) - aligned_va;
1139
1140 si_emit_cp_dma(cmd_buffer, aligned_va, aligned_va,
1141 aligned_size, CP_DMA_USE_L2);
1142 }
1143
1144 static void si_cp_dma_prepare(struct radv_cmd_buffer *cmd_buffer, uint64_t byte_count,
1145 uint64_t remaining_size, unsigned *flags)
1146 {
1147
1148 /* Flush the caches for the first copy only.
1149 * Also wait for the previous CP DMA operations.
1150 */
1151 if (cmd_buffer->state.flush_bits) {
1152 si_emit_cache_flush(cmd_buffer);
1153 *flags |= CP_DMA_RAW_WAIT;
1154 }
1155
1156 /* Do the synchronization after the last dma, so that all data
1157 * is written to memory.
1158 */
1159 if (byte_count == remaining_size)
1160 *flags |= CP_DMA_SYNC;
1161 }
1162
1163 static void si_cp_dma_realign_engine(struct radv_cmd_buffer *cmd_buffer, unsigned size)
1164 {
1165 uint64_t va;
1166 uint32_t offset;
1167 unsigned dma_flags = 0;
1168 unsigned buf_size = SI_CPDMA_ALIGNMENT * 2;
1169 void *ptr;
1170
1171 assert(size < SI_CPDMA_ALIGNMENT);
1172
1173 radv_cmd_buffer_upload_alloc(cmd_buffer, buf_size, SI_CPDMA_ALIGNMENT, &offset, &ptr);
1174
1175 va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
1176 va += offset;
1177
1178 si_cp_dma_prepare(cmd_buffer, size, size, &dma_flags);
1179
1180 si_emit_cp_dma(cmd_buffer, va, va + SI_CPDMA_ALIGNMENT, size,
1181 dma_flags);
1182 }
1183
1184 void si_cp_dma_buffer_copy(struct radv_cmd_buffer *cmd_buffer,
1185 uint64_t src_va, uint64_t dest_va,
1186 uint64_t size)
1187 {
1188 uint64_t main_src_va, main_dest_va;
1189 uint64_t skipped_size = 0, realign_size = 0;
1190
1191 /* Assume that we are not going to sync after the last DMA operation. */
1192 cmd_buffer->state.dma_is_busy = true;
1193
1194 if (cmd_buffer->device->physical_device->rad_info.family <= CHIP_CARRIZO ||
1195 cmd_buffer->device->physical_device->rad_info.family == CHIP_STONEY) {
1196 /* If the size is not aligned, we must add a dummy copy at the end
1197 * just to align the internal counter. Otherwise, the DMA engine
1198 * would slow down by an order of magnitude for following copies.
1199 */
1200 if (size % SI_CPDMA_ALIGNMENT)
1201 realign_size = SI_CPDMA_ALIGNMENT - (size % SI_CPDMA_ALIGNMENT);
1202
1203 /* If the copy begins unaligned, we must start copying from the next
1204 * aligned block and the skipped part should be copied after everything
1205 * else has been copied. Only the src alignment matters, not dst.
1206 */
1207 if (src_va % SI_CPDMA_ALIGNMENT) {
1208 skipped_size = SI_CPDMA_ALIGNMENT - (src_va % SI_CPDMA_ALIGNMENT);
1209 /* The main part will be skipped if the size is too small. */
1210 skipped_size = MIN2(skipped_size, size);
1211 size -= skipped_size;
1212 }
1213 }
1214 main_src_va = src_va + skipped_size;
1215 main_dest_va = dest_va + skipped_size;
1216
1217 while (size) {
1218 unsigned dma_flags = 0;
1219 unsigned byte_count = MIN2(size, cp_dma_max_byte_count(cmd_buffer));
1220
1221 si_cp_dma_prepare(cmd_buffer, byte_count,
1222 size + skipped_size + realign_size,
1223 &dma_flags);
1224
1225 dma_flags &= ~CP_DMA_SYNC;
1226
1227 si_emit_cp_dma(cmd_buffer, main_dest_va, main_src_va,
1228 byte_count, dma_flags);
1229
1230 size -= byte_count;
1231 main_src_va += byte_count;
1232 main_dest_va += byte_count;
1233 }
1234
1235 if (skipped_size) {
1236 unsigned dma_flags = 0;
1237
1238 si_cp_dma_prepare(cmd_buffer, skipped_size,
1239 size + skipped_size + realign_size,
1240 &dma_flags);
1241
1242 si_emit_cp_dma(cmd_buffer, dest_va, src_va,
1243 skipped_size, dma_flags);
1244 }
1245 if (realign_size)
1246 si_cp_dma_realign_engine(cmd_buffer, realign_size);
1247 }
1248
1249 void si_cp_dma_clear_buffer(struct radv_cmd_buffer *cmd_buffer, uint64_t va,
1250 uint64_t size, unsigned value)
1251 {
1252
1253 if (!size)
1254 return;
1255
1256 assert(va % 4 == 0 && size % 4 == 0);
1257
1258 /* Assume that we are not going to sync after the last DMA operation. */
1259 cmd_buffer->state.dma_is_busy = true;
1260
1261 while (size) {
1262 unsigned byte_count = MIN2(size, cp_dma_max_byte_count(cmd_buffer));
1263 unsigned dma_flags = CP_DMA_CLEAR;
1264
1265 si_cp_dma_prepare(cmd_buffer, byte_count, size, &dma_flags);
1266
1267 /* Emit the clear packet. */
1268 si_emit_cp_dma(cmd_buffer, va, value, byte_count,
1269 dma_flags);
1270
1271 size -= byte_count;
1272 va += byte_count;
1273 }
1274 }
1275
1276 void si_cp_dma_wait_for_idle(struct radv_cmd_buffer *cmd_buffer)
1277 {
1278 if (cmd_buffer->device->physical_device->rad_info.chip_class < CIK)
1279 return;
1280
1281 if (!cmd_buffer->state.dma_is_busy)
1282 return;
1283
1284 /* Issue a dummy DMA that copies zero bytes.
1285 *
1286 * The DMA engine will see that there's no work to do and skip this
1287 * DMA request, however, the CP will see the sync flag and still wait
1288 * for all DMAs to complete.
1289 */
1290 si_emit_cp_dma(cmd_buffer, 0, 0, 0, CP_DMA_SYNC);
1291
1292 cmd_buffer->state.dma_is_busy = false;
1293 }
1294
1295 /* For MSAA sample positions. */
1296 #define FILL_SREG(s0x, s0y, s1x, s1y, s2x, s2y, s3x, s3y) \
1297 (((s0x) & 0xf) | (((unsigned)(s0y) & 0xf) << 4) | \
1298 (((unsigned)(s1x) & 0xf) << 8) | (((unsigned)(s1y) & 0xf) << 12) | \
1299 (((unsigned)(s2x) & 0xf) << 16) | (((unsigned)(s2y) & 0xf) << 20) | \
1300 (((unsigned)(s3x) & 0xf) << 24) | (((unsigned)(s3y) & 0xf) << 28))
1301
1302
1303 /* 2xMSAA
1304 * There are two locations (4, 4), (-4, -4). */
1305 const uint32_t eg_sample_locs_2x[4] = {
1306 FILL_SREG(4, 4, -4, -4, 4, 4, -4, -4),
1307 FILL_SREG(4, 4, -4, -4, 4, 4, -4, -4),
1308 FILL_SREG(4, 4, -4, -4, 4, 4, -4, -4),
1309 FILL_SREG(4, 4, -4, -4, 4, 4, -4, -4),
1310 };
1311 const unsigned eg_max_dist_2x = 4;
1312 /* 4xMSAA
1313 * There are 4 locations: (-2, 6), (6, -2), (-6, 2), (2, 6). */
1314 const uint32_t eg_sample_locs_4x[4] = {
1315 FILL_SREG(-2, -6, 6, -2, -6, 2, 2, 6),
1316 FILL_SREG(-2, -6, 6, -2, -6, 2, 2, 6),
1317 FILL_SREG(-2, -6, 6, -2, -6, 2, 2, 6),
1318 FILL_SREG(-2, -6, 6, -2, -6, 2, 2, 6),
1319 };
1320 const unsigned eg_max_dist_4x = 6;
1321
1322 /* Cayman 8xMSAA */
1323 static const uint32_t cm_sample_locs_8x[] = {
1324 FILL_SREG( 1, -3, -1, 3, 5, 1, -3, -5),
1325 FILL_SREG( 1, -3, -1, 3, 5, 1, -3, -5),
1326 FILL_SREG( 1, -3, -1, 3, 5, 1, -3, -5),
1327 FILL_SREG( 1, -3, -1, 3, 5, 1, -3, -5),
1328 FILL_SREG(-5, 5, -7, -1, 3, 7, 7, -7),
1329 FILL_SREG(-5, 5, -7, -1, 3, 7, 7, -7),
1330 FILL_SREG(-5, 5, -7, -1, 3, 7, 7, -7),
1331 FILL_SREG(-5, 5, -7, -1, 3, 7, 7, -7),
1332 };
1333 static const unsigned cm_max_dist_8x = 8;
1334 /* Cayman 16xMSAA */
1335 static const uint32_t cm_sample_locs_16x[] = {
1336 FILL_SREG( 1, 1, -1, -3, -3, 2, 4, -1),
1337 FILL_SREG( 1, 1, -1, -3, -3, 2, 4, -1),
1338 FILL_SREG( 1, 1, -1, -3, -3, 2, 4, -1),
1339 FILL_SREG( 1, 1, -1, -3, -3, 2, 4, -1),
1340 FILL_SREG(-5, -2, 2, 5, 5, 3, 3, -5),
1341 FILL_SREG(-5, -2, 2, 5, 5, 3, 3, -5),
1342 FILL_SREG(-5, -2, 2, 5, 5, 3, 3, -5),
1343 FILL_SREG(-5, -2, 2, 5, 5, 3, 3, -5),
1344 FILL_SREG(-2, 6, 0, -7, -4, -6, -6, 4),
1345 FILL_SREG(-2, 6, 0, -7, -4, -6, -6, 4),
1346 FILL_SREG(-2, 6, 0, -7, -4, -6, -6, 4),
1347 FILL_SREG(-2, 6, 0, -7, -4, -6, -6, 4),
1348 FILL_SREG(-8, 0, 7, -4, 6, 7, -7, -8),
1349 FILL_SREG(-8, 0, 7, -4, 6, 7, -7, -8),
1350 FILL_SREG(-8, 0, 7, -4, 6, 7, -7, -8),
1351 FILL_SREG(-8, 0, 7, -4, 6, 7, -7, -8),
1352 };
1353 static const unsigned cm_max_dist_16x = 8;
1354
1355 unsigned radv_cayman_get_maxdist(int log_samples)
1356 {
1357 unsigned max_dist[] = {
1358 0,
1359 eg_max_dist_2x,
1360 eg_max_dist_4x,
1361 cm_max_dist_8x,
1362 cm_max_dist_16x
1363 };
1364 return max_dist[log_samples];
1365 }
1366
1367 void radv_cayman_emit_msaa_sample_locs(struct radeon_cmdbuf *cs, int nr_samples)
1368 {
1369 switch (nr_samples) {
1370 default:
1371 case 1:
1372 radeon_set_context_reg(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, 0);
1373 radeon_set_context_reg(cs, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, 0);
1374 radeon_set_context_reg(cs, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, 0);
1375 radeon_set_context_reg(cs, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, 0);
1376 break;
1377 case 2:
1378 radeon_set_context_reg(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, eg_sample_locs_2x[0]);
1379 radeon_set_context_reg(cs, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, eg_sample_locs_2x[1]);
1380 radeon_set_context_reg(cs, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, eg_sample_locs_2x[2]);
1381 radeon_set_context_reg(cs, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, eg_sample_locs_2x[3]);
1382 break;
1383 case 4:
1384 radeon_set_context_reg(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, eg_sample_locs_4x[0]);
1385 radeon_set_context_reg(cs, R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, eg_sample_locs_4x[1]);
1386 radeon_set_context_reg(cs, R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, eg_sample_locs_4x[2]);
1387 radeon_set_context_reg(cs, R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, eg_sample_locs_4x[3]);
1388 break;
1389 case 8:
1390 radeon_set_context_reg_seq(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, 14);
1391 radeon_emit(cs, cm_sample_locs_8x[0]);
1392 radeon_emit(cs, cm_sample_locs_8x[4]);
1393 radeon_emit(cs, 0);
1394 radeon_emit(cs, 0);
1395 radeon_emit(cs, cm_sample_locs_8x[1]);
1396 radeon_emit(cs, cm_sample_locs_8x[5]);
1397 radeon_emit(cs, 0);
1398 radeon_emit(cs, 0);
1399 radeon_emit(cs, cm_sample_locs_8x[2]);
1400 radeon_emit(cs, cm_sample_locs_8x[6]);
1401 radeon_emit(cs, 0);
1402 radeon_emit(cs, 0);
1403 radeon_emit(cs, cm_sample_locs_8x[3]);
1404 radeon_emit(cs, cm_sample_locs_8x[7]);
1405 break;
1406 case 16:
1407 radeon_set_context_reg_seq(cs, R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, 16);
1408 radeon_emit(cs, cm_sample_locs_16x[0]);
1409 radeon_emit(cs, cm_sample_locs_16x[4]);
1410 radeon_emit(cs, cm_sample_locs_16x[8]);
1411 radeon_emit(cs, cm_sample_locs_16x[12]);
1412 radeon_emit(cs, cm_sample_locs_16x[1]);
1413 radeon_emit(cs, cm_sample_locs_16x[5]);
1414 radeon_emit(cs, cm_sample_locs_16x[9]);
1415 radeon_emit(cs, cm_sample_locs_16x[13]);
1416 radeon_emit(cs, cm_sample_locs_16x[2]);
1417 radeon_emit(cs, cm_sample_locs_16x[6]);
1418 radeon_emit(cs, cm_sample_locs_16x[10]);
1419 radeon_emit(cs, cm_sample_locs_16x[14]);
1420 radeon_emit(cs, cm_sample_locs_16x[3]);
1421 radeon_emit(cs, cm_sample_locs_16x[7]);
1422 radeon_emit(cs, cm_sample_locs_16x[11]);
1423 radeon_emit(cs, cm_sample_locs_16x[15]);
1424 break;
1425 }
1426 }
1427
1428 static void radv_cayman_get_sample_position(struct radv_device *device,
1429 unsigned sample_count,
1430 unsigned sample_index, float *out_value)
1431 {
1432 int offset, index;
1433 struct {
1434 int idx:4;
1435 } val;
1436 switch (sample_count) {
1437 case 1:
1438 default:
1439 out_value[0] = out_value[1] = 0.5;
1440 break;
1441 case 2:
1442 offset = 4 * (sample_index * 2);
1443 val.idx = (eg_sample_locs_2x[0] >> offset) & 0xf;
1444 out_value[0] = (float)(val.idx + 8) / 16.0f;
1445 val.idx = (eg_sample_locs_2x[0] >> (offset + 4)) & 0xf;
1446 out_value[1] = (float)(val.idx + 8) / 16.0f;
1447 break;
1448 case 4:
1449 offset = 4 * (sample_index * 2);
1450 val.idx = (eg_sample_locs_4x[0] >> offset) & 0xf;
1451 out_value[0] = (float)(val.idx + 8) / 16.0f;
1452 val.idx = (eg_sample_locs_4x[0] >> (offset + 4)) & 0xf;
1453 out_value[1] = (float)(val.idx + 8) / 16.0f;
1454 break;
1455 case 8:
1456 offset = 4 * (sample_index % 4 * 2);
1457 index = (sample_index / 4) * 4;
1458 val.idx = (cm_sample_locs_8x[index] >> offset) & 0xf;
1459 out_value[0] = (float)(val.idx + 8) / 16.0f;
1460 val.idx = (cm_sample_locs_8x[index] >> (offset + 4)) & 0xf;
1461 out_value[1] = (float)(val.idx + 8) / 16.0f;
1462 break;
1463 case 16:
1464 offset = 4 * (sample_index % 4 * 2);
1465 index = (sample_index / 4) * 4;
1466 val.idx = (cm_sample_locs_16x[index] >> offset) & 0xf;
1467 out_value[0] = (float)(val.idx + 8) / 16.0f;
1468 val.idx = (cm_sample_locs_16x[index] >> (offset + 4)) & 0xf;
1469 out_value[1] = (float)(val.idx + 8) / 16.0f;
1470 break;
1471 }
1472 }
1473
1474 void radv_device_init_msaa(struct radv_device *device)
1475 {
1476 int i;
1477 radv_cayman_get_sample_position(device, 1, 0, device->sample_locations_1x[0]);
1478
1479 for (i = 0; i < 2; i++)
1480 radv_cayman_get_sample_position(device, 2, i, device->sample_locations_2x[i]);
1481 for (i = 0; i < 4; i++)
1482 radv_cayman_get_sample_position(device, 4, i, device->sample_locations_4x[i]);
1483 for (i = 0; i < 8; i++)
1484 radv_cayman_get_sample_position(device, 8, i, device->sample_locations_8x[i]);
1485 for (i = 0; i < 16; i++)
1486 radv_cayman_get_sample_position(device, 16, i, device->sample_locations_16x[i]);
1487 }