radeonsi: rename and re-document cache flush flags
[mesa.git] / src / gallium / drivers / radeonsi / si_state_draw.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "si_build_pm4.h"
26 #include "sid.h"
27
28 #include "util/u_index_modify.h"
29 #include "util/u_log.h"
30 #include "util/u_upload_mgr.h"
31 #include "util/u_prim.h"
32 #include "util/u_suballoc.h"
33
34 #include "ac_debug.h"
35
36 /* special primitive types */
37 #define SI_PRIM_RECTANGLE_LIST PIPE_PRIM_MAX
38
39 static unsigned si_conv_pipe_prim(unsigned mode)
40 {
41 static const unsigned prim_conv[] = {
42 [PIPE_PRIM_POINTS] = V_008958_DI_PT_POINTLIST,
43 [PIPE_PRIM_LINES] = V_008958_DI_PT_LINELIST,
44 [PIPE_PRIM_LINE_LOOP] = V_008958_DI_PT_LINELOOP,
45 [PIPE_PRIM_LINE_STRIP] = V_008958_DI_PT_LINESTRIP,
46 [PIPE_PRIM_TRIANGLES] = V_008958_DI_PT_TRILIST,
47 [PIPE_PRIM_TRIANGLE_STRIP] = V_008958_DI_PT_TRISTRIP,
48 [PIPE_PRIM_TRIANGLE_FAN] = V_008958_DI_PT_TRIFAN,
49 [PIPE_PRIM_QUADS] = V_008958_DI_PT_QUADLIST,
50 [PIPE_PRIM_QUAD_STRIP] = V_008958_DI_PT_QUADSTRIP,
51 [PIPE_PRIM_POLYGON] = V_008958_DI_PT_POLYGON,
52 [PIPE_PRIM_LINES_ADJACENCY] = V_008958_DI_PT_LINELIST_ADJ,
53 [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_008958_DI_PT_LINESTRIP_ADJ,
54 [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_008958_DI_PT_TRILIST_ADJ,
55 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_008958_DI_PT_TRISTRIP_ADJ,
56 [PIPE_PRIM_PATCHES] = V_008958_DI_PT_PATCH,
57 [SI_PRIM_RECTANGLE_LIST] = V_008958_DI_PT_RECTLIST
58 };
59 assert(mode < ARRAY_SIZE(prim_conv));
60 return prim_conv[mode];
61 }
62
63 /**
64 * This calculates the LDS size for tessellation shaders (VS, TCS, TES).
65 * LS.LDS_SIZE is shared by all 3 shader stages.
66 *
67 * The information about LDS and other non-compile-time parameters is then
68 * written to userdata SGPRs.
69 */
70 static void si_emit_derived_tess_state(struct si_context *sctx,
71 const struct pipe_draw_info *info,
72 unsigned *num_patches)
73 {
74 struct radeon_cmdbuf *cs = sctx->gfx_cs;
75 struct si_shader *ls_current;
76 struct si_shader_selector *ls;
77 /* The TES pointer will only be used for sctx->last_tcs.
78 * It would be wrong to think that TCS = TES. */
79 struct si_shader_selector *tcs =
80 sctx->tcs_shader.cso ? sctx->tcs_shader.cso : sctx->tes_shader.cso;
81 unsigned tess_uses_primid = sctx->ia_multi_vgt_param_key.u.tess_uses_prim_id;
82 bool has_primid_instancing_bug = sctx->chip_class == GFX6 &&
83 sctx->screen->info.max_se == 1;
84 unsigned tes_sh_base = sctx->shader_pointers.sh_base[PIPE_SHADER_TESS_EVAL];
85 unsigned num_tcs_input_cp = info->vertices_per_patch;
86 unsigned num_tcs_output_cp, num_tcs_inputs, num_tcs_outputs;
87 unsigned num_tcs_patch_outputs;
88 unsigned input_vertex_size, output_vertex_size, pervertex_output_patch_size;
89 unsigned input_patch_size, output_patch_size, output_patch0_offset;
90 unsigned perpatch_output_offset, lds_size;
91 unsigned tcs_in_layout, tcs_out_layout, tcs_out_offsets;
92 unsigned offchip_layout, hardware_lds_size, ls_hs_config;
93
94 /* Since GFX9 has merged LS-HS in the TCS state, set LS = TCS. */
95 if (sctx->chip_class >= GFX9) {
96 if (sctx->tcs_shader.cso)
97 ls_current = sctx->tcs_shader.current;
98 else
99 ls_current = sctx->fixed_func_tcs_shader.current;
100
101 ls = ls_current->key.part.tcs.ls;
102 } else {
103 ls_current = sctx->vs_shader.current;
104 ls = sctx->vs_shader.cso;
105 }
106
107 if (sctx->last_ls == ls_current &&
108 sctx->last_tcs == tcs &&
109 sctx->last_tes_sh_base == tes_sh_base &&
110 sctx->last_num_tcs_input_cp == num_tcs_input_cp &&
111 (!has_primid_instancing_bug ||
112 (sctx->last_tess_uses_primid == tess_uses_primid))) {
113 *num_patches = sctx->last_num_patches;
114 return;
115 }
116
117 sctx->last_ls = ls_current;
118 sctx->last_tcs = tcs;
119 sctx->last_tes_sh_base = tes_sh_base;
120 sctx->last_num_tcs_input_cp = num_tcs_input_cp;
121 sctx->last_tess_uses_primid = tess_uses_primid;
122
123 /* This calculates how shader inputs and outputs among VS, TCS, and TES
124 * are laid out in LDS. */
125 num_tcs_inputs = util_last_bit64(ls->outputs_written);
126
127 if (sctx->tcs_shader.cso) {
128 num_tcs_outputs = util_last_bit64(tcs->outputs_written);
129 num_tcs_output_cp = tcs->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT];
130 num_tcs_patch_outputs = util_last_bit64(tcs->patch_outputs_written);
131 } else {
132 /* No TCS. Route varyings from LS to TES. */
133 num_tcs_outputs = num_tcs_inputs;
134 num_tcs_output_cp = num_tcs_input_cp;
135 num_tcs_patch_outputs = 2; /* TESSINNER + TESSOUTER */
136 }
137
138 input_vertex_size = ls->lshs_vertex_stride;
139 output_vertex_size = num_tcs_outputs * 16;
140
141 input_patch_size = num_tcs_input_cp * input_vertex_size;
142
143 pervertex_output_patch_size = num_tcs_output_cp * output_vertex_size;
144 output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
145
146 /* Ensure that we only need one wave per SIMD so we don't need to check
147 * resource usage. Also ensures that the number of tcs in and out
148 * vertices per threadgroup are at most 256.
149 */
150 unsigned max_verts_per_patch = MAX2(num_tcs_input_cp, num_tcs_output_cp);
151 *num_patches = 256 / max_verts_per_patch;
152
153 /* Make sure that the data fits in LDS. This assumes the shaders only
154 * use LDS for the inputs and outputs.
155 *
156 * While GFX7 can use 64K per threadgroup, there is a hang on Stoney
157 * with 2 CUs if we use more than 32K. The closed Vulkan driver also
158 * uses 32K at most on all GCN chips.
159 */
160 hardware_lds_size = 32768;
161 *num_patches = MIN2(*num_patches, hardware_lds_size / (input_patch_size +
162 output_patch_size));
163
164 /* Make sure the output data fits in the offchip buffer */
165 *num_patches = MIN2(*num_patches,
166 (sctx->screen->tess_offchip_block_dw_size * 4) /
167 output_patch_size);
168
169 /* Not necessary for correctness, but improves performance.
170 * The hardware can do more, but the radeonsi shader constant is
171 * limited to 6 bits.
172 */
173 *num_patches = MIN2(*num_patches, 63); /* triangles: 3 full waves except 3 lanes */
174
175 /* When distributed tessellation is unsupported, switch between SEs
176 * at a higher frequency to compensate for it.
177 */
178 if (!sctx->screen->has_distributed_tess && sctx->screen->info.max_se > 1)
179 *num_patches = MIN2(*num_patches, 16); /* recommended */
180
181 /* Make sure that vector lanes are reasonably occupied. It probably
182 * doesn't matter much because this is LS-HS, and TES is likely to
183 * occupy significantly more CUs.
184 */
185 unsigned temp_verts_per_tg = *num_patches * max_verts_per_patch;
186 if (temp_verts_per_tg > 64 && temp_verts_per_tg % 64 < 48)
187 *num_patches = (temp_verts_per_tg & ~63) / max_verts_per_patch;
188
189 if (sctx->chip_class == GFX6) {
190 /* GFX6 bug workaround, related to power management. Limit LS-HS
191 * threadgroups to only one wave.
192 */
193 unsigned one_wave = 64 / max_verts_per_patch;
194 *num_patches = MIN2(*num_patches, one_wave);
195 }
196
197 /* The VGT HS block increments the patch ID unconditionally
198 * within a single threadgroup. This results in incorrect
199 * patch IDs when instanced draws are used.
200 *
201 * The intended solution is to restrict threadgroups to
202 * a single instance by setting SWITCH_ON_EOI, which
203 * should cause IA to split instances up. However, this
204 * doesn't work correctly on GFX6 when there is no other
205 * SE to switch to.
206 */
207 if (has_primid_instancing_bug && tess_uses_primid)
208 *num_patches = 1;
209
210 sctx->last_num_patches = *num_patches;
211
212 output_patch0_offset = input_patch_size * *num_patches;
213 perpatch_output_offset = output_patch0_offset + pervertex_output_patch_size;
214
215 /* Compute userdata SGPRs. */
216 assert(((input_vertex_size / 4) & ~0xff) == 0);
217 assert(((output_vertex_size / 4) & ~0xff) == 0);
218 assert(((input_patch_size / 4) & ~0x1fff) == 0);
219 assert(((output_patch_size / 4) & ~0x1fff) == 0);
220 assert(((output_patch0_offset / 16) & ~0xffff) == 0);
221 assert(((perpatch_output_offset / 16) & ~0xffff) == 0);
222 assert(num_tcs_input_cp <= 32);
223 assert(num_tcs_output_cp <= 32);
224
225 uint64_t ring_va = si_resource(sctx->tess_rings)->gpu_address;
226 assert((ring_va & u_bit_consecutive(0, 19)) == 0);
227
228 tcs_in_layout = S_VS_STATE_LS_OUT_PATCH_SIZE(input_patch_size / 4) |
229 S_VS_STATE_LS_OUT_VERTEX_SIZE(input_vertex_size / 4);
230 tcs_out_layout = (output_patch_size / 4) |
231 (num_tcs_input_cp << 13) |
232 ring_va;
233 tcs_out_offsets = (output_patch0_offset / 16) |
234 ((perpatch_output_offset / 16) << 16);
235 offchip_layout = *num_patches |
236 (num_tcs_output_cp << 6) |
237 (pervertex_output_patch_size * *num_patches << 12);
238
239 /* Compute the LDS size. */
240 lds_size = output_patch0_offset + output_patch_size * *num_patches;
241
242 if (sctx->chip_class >= GFX7) {
243 assert(lds_size <= 65536);
244 lds_size = align(lds_size, 512) / 512;
245 } else {
246 assert(lds_size <= 32768);
247 lds_size = align(lds_size, 256) / 256;
248 }
249
250 /* Set SI_SGPR_VS_STATE_BITS. */
251 sctx->current_vs_state &= C_VS_STATE_LS_OUT_PATCH_SIZE &
252 C_VS_STATE_LS_OUT_VERTEX_SIZE;
253 sctx->current_vs_state |= tcs_in_layout;
254
255 /* We should be able to support in-shader LDS use with LLVM >= 9
256 * by just adding the lds_sizes together, but it has never
257 * been tested. */
258 assert(ls_current->config.lds_size == 0);
259
260 if (sctx->chip_class >= GFX9) {
261 unsigned hs_rsrc2 = ls_current->config.rsrc2 |
262 S_00B42C_LDS_SIZE(lds_size);
263
264 radeon_set_sh_reg(cs, R_00B42C_SPI_SHADER_PGM_RSRC2_HS, hs_rsrc2);
265
266 /* Set userdata SGPRs for merged LS-HS. */
267 radeon_set_sh_reg_seq(cs,
268 R_00B430_SPI_SHADER_USER_DATA_LS_0 +
269 GFX9_SGPR_TCS_OFFCHIP_LAYOUT * 4, 3);
270 radeon_emit(cs, offchip_layout);
271 radeon_emit(cs, tcs_out_offsets);
272 radeon_emit(cs, tcs_out_layout);
273 } else {
274 unsigned ls_rsrc2 = ls_current->config.rsrc2;
275
276 si_multiwave_lds_size_workaround(sctx->screen, &lds_size);
277 ls_rsrc2 |= S_00B52C_LDS_SIZE(lds_size);
278
279 /* Due to a hw bug, RSRC2_LS must be written twice with another
280 * LS register written in between. */
281 if (sctx->chip_class == GFX7 && sctx->family != CHIP_HAWAII)
282 radeon_set_sh_reg(cs, R_00B52C_SPI_SHADER_PGM_RSRC2_LS, ls_rsrc2);
283 radeon_set_sh_reg_seq(cs, R_00B528_SPI_SHADER_PGM_RSRC1_LS, 2);
284 radeon_emit(cs, ls_current->config.rsrc1);
285 radeon_emit(cs, ls_rsrc2);
286
287 /* Set userdata SGPRs for TCS. */
288 radeon_set_sh_reg_seq(cs,
289 R_00B430_SPI_SHADER_USER_DATA_HS_0 + GFX6_SGPR_TCS_OFFCHIP_LAYOUT * 4, 4);
290 radeon_emit(cs, offchip_layout);
291 radeon_emit(cs, tcs_out_offsets);
292 radeon_emit(cs, tcs_out_layout);
293 radeon_emit(cs, tcs_in_layout);
294 }
295
296 /* Set userdata SGPRs for TES. */
297 radeon_set_sh_reg_seq(cs, tes_sh_base + SI_SGPR_TES_OFFCHIP_LAYOUT * 4, 2);
298 radeon_emit(cs, offchip_layout);
299 radeon_emit(cs, ring_va);
300
301 ls_hs_config = S_028B58_NUM_PATCHES(*num_patches) |
302 S_028B58_HS_NUM_INPUT_CP(num_tcs_input_cp) |
303 S_028B58_HS_NUM_OUTPUT_CP(num_tcs_output_cp);
304
305 if (sctx->last_ls_hs_config != ls_hs_config) {
306 if (sctx->chip_class >= GFX7) {
307 radeon_set_context_reg_idx(cs, R_028B58_VGT_LS_HS_CONFIG, 2,
308 ls_hs_config);
309 } else {
310 radeon_set_context_reg(cs, R_028B58_VGT_LS_HS_CONFIG,
311 ls_hs_config);
312 }
313 sctx->last_ls_hs_config = ls_hs_config;
314 sctx->context_roll = true;
315 }
316 }
317
318 static unsigned si_num_prims_for_vertices(const struct pipe_draw_info *info,
319 enum pipe_prim_type prim)
320 {
321 switch (prim) {
322 case PIPE_PRIM_PATCHES:
323 return info->count / info->vertices_per_patch;
324 case PIPE_PRIM_POLYGON:
325 return info->count >= 3;
326 case SI_PRIM_RECTANGLE_LIST:
327 return info->count / 3;
328 default:
329 return u_decomposed_prims_for_vertices(prim, info->count);
330 }
331 }
332
333 static unsigned
334 si_get_init_multi_vgt_param(struct si_screen *sscreen,
335 union si_vgt_param_key *key)
336 {
337 STATIC_ASSERT(sizeof(union si_vgt_param_key) == 4);
338 unsigned max_primgroup_in_wave = 2;
339
340 /* SWITCH_ON_EOP(0) is always preferable. */
341 bool wd_switch_on_eop = false;
342 bool ia_switch_on_eop = false;
343 bool ia_switch_on_eoi = false;
344 bool partial_vs_wave = false;
345 bool partial_es_wave = false;
346
347 if (key->u.uses_tess) {
348 /* SWITCH_ON_EOI must be set if PrimID is used. */
349 if (key->u.tess_uses_prim_id)
350 ia_switch_on_eoi = true;
351
352 /* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
353 if ((sscreen->info.family == CHIP_TAHITI ||
354 sscreen->info.family == CHIP_PITCAIRN ||
355 sscreen->info.family == CHIP_BONAIRE) &&
356 key->u.uses_gs)
357 partial_vs_wave = true;
358
359 /* Needed for 028B6C_DISTRIBUTION_MODE != 0. (implies >= GFX8) */
360 if (sscreen->has_distributed_tess) {
361 if (key->u.uses_gs) {
362 if (sscreen->info.chip_class == GFX8)
363 partial_es_wave = true;
364 } else {
365 partial_vs_wave = true;
366 }
367 }
368 }
369
370 /* This is a hardware requirement. */
371 if (key->u.line_stipple_enabled ||
372 (sscreen->debug_flags & DBG(SWITCH_ON_EOP))) {
373 ia_switch_on_eop = true;
374 wd_switch_on_eop = true;
375 }
376
377 if (sscreen->info.chip_class >= GFX7) {
378 /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
379 * 4 shader engines. Set 1 to pass the assertion below.
380 * The other cases are hardware requirements.
381 *
382 * Polaris supports primitive restart with WD_SWITCH_ON_EOP=0
383 * for points, line strips, and tri strips.
384 */
385 if (sscreen->info.max_se <= 2 ||
386 key->u.prim == PIPE_PRIM_POLYGON ||
387 key->u.prim == PIPE_PRIM_LINE_LOOP ||
388 key->u.prim == PIPE_PRIM_TRIANGLE_FAN ||
389 key->u.prim == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY ||
390 (key->u.primitive_restart &&
391 (sscreen->info.family < CHIP_POLARIS10 ||
392 (key->u.prim != PIPE_PRIM_POINTS &&
393 key->u.prim != PIPE_PRIM_LINE_STRIP &&
394 key->u.prim != PIPE_PRIM_TRIANGLE_STRIP))) ||
395 key->u.count_from_stream_output)
396 wd_switch_on_eop = true;
397
398 /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
399 * We don't know that for indirect drawing, so treat it as
400 * always problematic. */
401 if (sscreen->info.family == CHIP_HAWAII &&
402 key->u.uses_instancing)
403 wd_switch_on_eop = true;
404
405 /* Performance recommendation for 4 SE Gfx7-8 parts if
406 * instances are smaller than a primgroup.
407 * Assume indirect draws always use small instances.
408 * This is needed for good VS wave utilization.
409 */
410 if (sscreen->info.chip_class <= GFX8 &&
411 sscreen->info.max_se == 4 &&
412 key->u.multi_instances_smaller_than_primgroup)
413 wd_switch_on_eop = true;
414
415 /* Required on GFX7 and later. */
416 if (sscreen->info.max_se == 4 && !wd_switch_on_eop)
417 ia_switch_on_eoi = true;
418
419 /* HW engineers suggested that PARTIAL_VS_WAVE_ON should be set
420 * to work around a GS hang.
421 */
422 if (key->u.uses_gs &&
423 (sscreen->info.family == CHIP_TONGA ||
424 sscreen->info.family == CHIP_FIJI ||
425 sscreen->info.family == CHIP_POLARIS10 ||
426 sscreen->info.family == CHIP_POLARIS11 ||
427 sscreen->info.family == CHIP_POLARIS12 ||
428 sscreen->info.family == CHIP_VEGAM))
429 partial_vs_wave = true;
430
431 /* Required by Hawaii and, for some special cases, by GFX8. */
432 if (ia_switch_on_eoi &&
433 (sscreen->info.family == CHIP_HAWAII ||
434 (sscreen->info.chip_class == GFX8 &&
435 (key->u.uses_gs || max_primgroup_in_wave != 2))))
436 partial_vs_wave = true;
437
438 /* Instancing bug on Bonaire. */
439 if (sscreen->info.family == CHIP_BONAIRE && ia_switch_on_eoi &&
440 key->u.uses_instancing)
441 partial_vs_wave = true;
442
443 /* This only applies to Polaris10 and later 4 SE chips.
444 * wd_switch_on_eop is already true on all other chips.
445 */
446 if (!wd_switch_on_eop && key->u.primitive_restart)
447 partial_vs_wave = true;
448
449 /* If the WD switch is false, the IA switch must be false too. */
450 assert(wd_switch_on_eop || !ia_switch_on_eop);
451 }
452
453 /* If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
454 if (sscreen->info.chip_class <= GFX8 && ia_switch_on_eoi)
455 partial_es_wave = true;
456
457 return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) |
458 S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi) |
459 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) |
460 S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave) |
461 S_028AA8_WD_SWITCH_ON_EOP(sscreen->info.chip_class >= GFX7 ? wd_switch_on_eop : 0) |
462 /* The following field was moved to VGT_SHADER_STAGES_EN in GFX9. */
463 S_028AA8_MAX_PRIMGRP_IN_WAVE(sscreen->info.chip_class == GFX8 ?
464 max_primgroup_in_wave : 0) |
465 S_030960_EN_INST_OPT_BASIC(sscreen->info.chip_class >= GFX9) |
466 S_030960_EN_INST_OPT_ADV(sscreen->info.chip_class >= GFX9);
467 }
468
469 static void si_init_ia_multi_vgt_param_table(struct si_context *sctx)
470 {
471 for (int prim = 0; prim <= SI_PRIM_RECTANGLE_LIST; prim++)
472 for (int uses_instancing = 0; uses_instancing < 2; uses_instancing++)
473 for (int multi_instances = 0; multi_instances < 2; multi_instances++)
474 for (int primitive_restart = 0; primitive_restart < 2; primitive_restart++)
475 for (int count_from_so = 0; count_from_so < 2; count_from_so++)
476 for (int line_stipple = 0; line_stipple < 2; line_stipple++)
477 for (int uses_tess = 0; uses_tess < 2; uses_tess++)
478 for (int tess_uses_primid = 0; tess_uses_primid < 2; tess_uses_primid++)
479 for (int uses_gs = 0; uses_gs < 2; uses_gs++) {
480 union si_vgt_param_key key;
481
482 key.index = 0;
483 key.u.prim = prim;
484 key.u.uses_instancing = uses_instancing;
485 key.u.multi_instances_smaller_than_primgroup = multi_instances;
486 key.u.primitive_restart = primitive_restart;
487 key.u.count_from_stream_output = count_from_so;
488 key.u.line_stipple_enabled = line_stipple;
489 key.u.uses_tess = uses_tess;
490 key.u.tess_uses_prim_id = tess_uses_primid;
491 key.u.uses_gs = uses_gs;
492
493 sctx->ia_multi_vgt_param[key.index] =
494 si_get_init_multi_vgt_param(sctx->screen, &key);
495 }
496 }
497
498 static unsigned si_get_ia_multi_vgt_param(struct si_context *sctx,
499 const struct pipe_draw_info *info,
500 enum pipe_prim_type prim,
501 unsigned num_patches,
502 unsigned instance_count,
503 bool primitive_restart)
504 {
505 union si_vgt_param_key key = sctx->ia_multi_vgt_param_key;
506 unsigned primgroup_size;
507 unsigned ia_multi_vgt_param;
508
509 if (sctx->tes_shader.cso) {
510 primgroup_size = num_patches; /* must be a multiple of NUM_PATCHES */
511 } else if (sctx->gs_shader.cso) {
512 primgroup_size = 64; /* recommended with a GS */
513 } else {
514 primgroup_size = 128; /* recommended without a GS and tess */
515 }
516
517 key.u.prim = prim;
518 key.u.uses_instancing = info->indirect || instance_count > 1;
519 key.u.multi_instances_smaller_than_primgroup =
520 info->indirect ||
521 (instance_count > 1 &&
522 (info->count_from_stream_output ||
523 si_num_prims_for_vertices(info, prim) < primgroup_size));
524 key.u.primitive_restart = primitive_restart;
525 key.u.count_from_stream_output = info->count_from_stream_output != NULL;
526
527 ia_multi_vgt_param = sctx->ia_multi_vgt_param[key.index] |
528 S_028AA8_PRIMGROUP_SIZE(primgroup_size - 1);
529
530 if (sctx->gs_shader.cso) {
531 /* GS requirement. */
532 if (sctx->chip_class <= GFX8 &&
533 SI_GS_PER_ES / primgroup_size >= sctx->screen->gs_table_depth - 3)
534 ia_multi_vgt_param |= S_028AA8_PARTIAL_ES_WAVE_ON(1);
535
536 /* GS hw bug with single-primitive instances and SWITCH_ON_EOI.
537 * The hw doc says all multi-SE chips are affected, but Vulkan
538 * only applies it to Hawaii. Do what Vulkan does.
539 */
540 if (sctx->family == CHIP_HAWAII &&
541 G_028AA8_SWITCH_ON_EOI(ia_multi_vgt_param) &&
542 (info->indirect ||
543 (instance_count > 1 &&
544 (info->count_from_stream_output ||
545 si_num_prims_for_vertices(info, prim) <= 1))))
546 sctx->flags |= SI_CONTEXT_VGT_FLUSH;
547 }
548
549 return ia_multi_vgt_param;
550 }
551
552 /* rast_prim is the primitive type after GS. */
553 static void si_emit_rasterizer_prim_state(struct si_context *sctx)
554 {
555 struct radeon_cmdbuf *cs = sctx->gfx_cs;
556 enum pipe_prim_type rast_prim = sctx->current_rast_prim;
557 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
558
559 /* Skip this if not rendering lines. */
560 if (!util_prim_is_lines(rast_prim))
561 return;
562
563 if (rast_prim == sctx->last_rast_prim &&
564 rs->pa_sc_line_stipple == sctx->last_sc_line_stipple)
565 return;
566
567 /* For lines, reset the stipple pattern at each primitive. Otherwise,
568 * reset the stipple pattern at each packet (line strips, line loops).
569 */
570 radeon_set_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE,
571 rs->pa_sc_line_stipple |
572 S_028A0C_AUTO_RESET_CNTL(rast_prim == PIPE_PRIM_LINES ? 1 : 2));
573
574 sctx->last_rast_prim = rast_prim;
575 sctx->last_sc_line_stipple = rs->pa_sc_line_stipple;
576 sctx->context_roll = true;
577 }
578
579 static void si_emit_vs_state(struct si_context *sctx,
580 const struct pipe_draw_info *info)
581 {
582 sctx->current_vs_state &= C_VS_STATE_INDEXED;
583 sctx->current_vs_state |= S_VS_STATE_INDEXED(!!info->index_size);
584
585 if (sctx->num_vs_blit_sgprs) {
586 /* Re-emit the state after we leave u_blitter. */
587 sctx->last_vs_state = ~0;
588 return;
589 }
590
591 if (sctx->current_vs_state != sctx->last_vs_state) {
592 struct radeon_cmdbuf *cs = sctx->gfx_cs;
593
594 /* For the API vertex shader (VS_STATE_INDEXED). */
595 radeon_set_sh_reg(cs,
596 sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX] +
597 SI_SGPR_VS_STATE_BITS * 4,
598 sctx->current_vs_state);
599
600 /* For vertex color clamping, which is done in the last stage
601 * before the rasterizer. */
602 if (sctx->gs_shader.cso || sctx->tes_shader.cso) {
603 /* GS copy shader or TES if GS is missing. */
604 radeon_set_sh_reg(cs,
605 R_00B130_SPI_SHADER_USER_DATA_VS_0 +
606 SI_SGPR_VS_STATE_BITS * 4,
607 sctx->current_vs_state);
608 }
609
610 sctx->last_vs_state = sctx->current_vs_state;
611 }
612 }
613
614 static inline bool si_prim_restart_index_changed(struct si_context *sctx,
615 bool primitive_restart,
616 unsigned restart_index)
617 {
618 return primitive_restart &&
619 (restart_index != sctx->last_restart_index ||
620 sctx->last_restart_index == SI_RESTART_INDEX_UNKNOWN);
621 }
622
623 static void si_emit_draw_registers(struct si_context *sctx,
624 const struct pipe_draw_info *info,
625 enum pipe_prim_type prim,
626 unsigned num_patches,
627 unsigned instance_count,
628 bool primitive_restart)
629 {
630 struct radeon_cmdbuf *cs = sctx->gfx_cs;
631 unsigned vgt_prim = si_conv_pipe_prim(prim);
632 unsigned ia_multi_vgt_param;
633
634 ia_multi_vgt_param = si_get_ia_multi_vgt_param(sctx, info, prim, num_patches,
635 instance_count, primitive_restart);
636
637 /* Draw state. */
638 if (ia_multi_vgt_param != sctx->last_multi_vgt_param) {
639 if (sctx->chip_class >= GFX9)
640 radeon_set_uconfig_reg_idx(cs, sctx->screen,
641 R_030960_IA_MULTI_VGT_PARAM, 4,
642 ia_multi_vgt_param);
643 else if (sctx->chip_class >= GFX7)
644 radeon_set_context_reg_idx(cs, R_028AA8_IA_MULTI_VGT_PARAM, 1, ia_multi_vgt_param);
645 else
646 radeon_set_context_reg(cs, R_028AA8_IA_MULTI_VGT_PARAM, ia_multi_vgt_param);
647
648 sctx->last_multi_vgt_param = ia_multi_vgt_param;
649 }
650 if (vgt_prim != sctx->last_prim) {
651 if (sctx->chip_class >= GFX7)
652 radeon_set_uconfig_reg_idx(cs, sctx->screen,
653 R_030908_VGT_PRIMITIVE_TYPE, 1, vgt_prim);
654 else
655 radeon_set_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE, vgt_prim);
656
657 sctx->last_prim = vgt_prim;
658 }
659
660 /* Primitive restart. */
661 if (primitive_restart != sctx->last_primitive_restart_en) {
662 if (sctx->chip_class >= GFX9)
663 radeon_set_uconfig_reg(cs, R_03092C_VGT_MULTI_PRIM_IB_RESET_EN,
664 primitive_restart);
665 else
666 radeon_set_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN,
667 primitive_restart);
668
669 sctx->last_primitive_restart_en = primitive_restart;
670
671 }
672 if (si_prim_restart_index_changed(sctx, primitive_restart, info->restart_index)) {
673 radeon_set_context_reg(cs, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX,
674 info->restart_index);
675 sctx->last_restart_index = info->restart_index;
676 sctx->context_roll = true;
677 }
678 }
679
680 static void si_emit_draw_packets(struct si_context *sctx,
681 const struct pipe_draw_info *info,
682 struct pipe_resource *indexbuf,
683 unsigned index_size,
684 unsigned index_offset,
685 unsigned instance_count,
686 bool dispatch_prim_discard_cs,
687 unsigned original_index_size)
688 {
689 struct pipe_draw_indirect_info *indirect = info->indirect;
690 struct radeon_cmdbuf *cs = sctx->gfx_cs;
691 unsigned sh_base_reg = sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX];
692 bool render_cond_bit = sctx->render_cond && !sctx->render_cond_force_off;
693 uint32_t index_max_size = 0;
694 uint64_t index_va = 0;
695
696 if (info->count_from_stream_output) {
697 struct si_streamout_target *t =
698 (struct si_streamout_target*)info->count_from_stream_output;
699
700 radeon_set_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE,
701 t->stride_in_dw);
702 si_cp_copy_data(sctx, sctx->gfx_cs,
703 COPY_DATA_REG, NULL,
704 R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2,
705 COPY_DATA_SRC_MEM, t->buf_filled_size,
706 t->buf_filled_size_offset);
707 }
708
709 /* draw packet */
710 if (index_size) {
711 if (index_size != sctx->last_index_size) {
712 unsigned index_type;
713
714 /* index type */
715 switch (index_size) {
716 case 1:
717 index_type = V_028A7C_VGT_INDEX_8;
718 break;
719 case 2:
720 index_type = V_028A7C_VGT_INDEX_16 |
721 (SI_BIG_ENDIAN && sctx->chip_class <= GFX7 ?
722 V_028A7C_VGT_DMA_SWAP_16_BIT : 0);
723 break;
724 case 4:
725 index_type = V_028A7C_VGT_INDEX_32 |
726 (SI_BIG_ENDIAN && sctx->chip_class <= GFX7 ?
727 V_028A7C_VGT_DMA_SWAP_32_BIT : 0);
728 break;
729 default:
730 assert(!"unreachable");
731 return;
732 }
733
734 if (sctx->chip_class >= GFX9) {
735 radeon_set_uconfig_reg_idx(cs, sctx->screen,
736 R_03090C_VGT_INDEX_TYPE, 2,
737 index_type);
738 } else {
739 radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
740 radeon_emit(cs, index_type);
741 }
742
743 sctx->last_index_size = index_size;
744 }
745
746 if (original_index_size) {
747 index_max_size = (indexbuf->width0 - index_offset) /
748 original_index_size;
749 index_va = si_resource(indexbuf)->gpu_address + index_offset;
750
751 radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
752 si_resource(indexbuf),
753 RADEON_USAGE_READ, RADEON_PRIO_INDEX_BUFFER);
754 }
755 } else {
756 /* On GFX7 and later, non-indexed draws overwrite VGT_INDEX_TYPE,
757 * so the state must be re-emitted before the next indexed draw.
758 */
759 if (sctx->chip_class >= GFX7)
760 sctx->last_index_size = -1;
761 }
762
763 if (indirect) {
764 uint64_t indirect_va = si_resource(indirect->buffer)->gpu_address;
765
766 assert(indirect_va % 8 == 0);
767
768 si_invalidate_draw_sh_constants(sctx);
769
770 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
771 radeon_emit(cs, 1);
772 radeon_emit(cs, indirect_va);
773 radeon_emit(cs, indirect_va >> 32);
774
775 radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
776 si_resource(indirect->buffer),
777 RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
778
779 unsigned di_src_sel = index_size ? V_0287F0_DI_SRC_SEL_DMA
780 : V_0287F0_DI_SRC_SEL_AUTO_INDEX;
781
782 assert(indirect->offset % 4 == 0);
783
784 if (index_size) {
785 radeon_emit(cs, PKT3(PKT3_INDEX_BASE, 1, 0));
786 radeon_emit(cs, index_va);
787 radeon_emit(cs, index_va >> 32);
788
789 radeon_emit(cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0));
790 radeon_emit(cs, index_max_size);
791 }
792
793 if (!sctx->screen->has_draw_indirect_multi) {
794 radeon_emit(cs, PKT3(index_size ? PKT3_DRAW_INDEX_INDIRECT
795 : PKT3_DRAW_INDIRECT,
796 3, render_cond_bit));
797 radeon_emit(cs, indirect->offset);
798 radeon_emit(cs, (sh_base_reg + SI_SGPR_BASE_VERTEX * 4 - SI_SH_REG_OFFSET) >> 2);
799 radeon_emit(cs, (sh_base_reg + SI_SGPR_START_INSTANCE * 4 - SI_SH_REG_OFFSET) >> 2);
800 radeon_emit(cs, di_src_sel);
801 } else {
802 uint64_t count_va = 0;
803
804 if (indirect->indirect_draw_count) {
805 struct si_resource *params_buf =
806 si_resource(indirect->indirect_draw_count);
807
808 radeon_add_to_buffer_list(
809 sctx, sctx->gfx_cs, params_buf,
810 RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
811
812 count_va = params_buf->gpu_address + indirect->indirect_draw_count_offset;
813 }
814
815 radeon_emit(cs, PKT3(index_size ? PKT3_DRAW_INDEX_INDIRECT_MULTI :
816 PKT3_DRAW_INDIRECT_MULTI,
817 8, render_cond_bit));
818 radeon_emit(cs, indirect->offset);
819 radeon_emit(cs, (sh_base_reg + SI_SGPR_BASE_VERTEX * 4 - SI_SH_REG_OFFSET) >> 2);
820 radeon_emit(cs, (sh_base_reg + SI_SGPR_START_INSTANCE * 4 - SI_SH_REG_OFFSET) >> 2);
821 radeon_emit(cs, ((sh_base_reg + SI_SGPR_DRAWID * 4 - SI_SH_REG_OFFSET) >> 2) |
822 S_2C3_DRAW_INDEX_ENABLE(1) |
823 S_2C3_COUNT_INDIRECT_ENABLE(!!indirect->indirect_draw_count));
824 radeon_emit(cs, indirect->draw_count);
825 radeon_emit(cs, count_va);
826 radeon_emit(cs, count_va >> 32);
827 radeon_emit(cs, indirect->stride);
828 radeon_emit(cs, di_src_sel);
829 }
830 } else {
831 int base_vertex;
832
833 if (sctx->last_instance_count == SI_INSTANCE_COUNT_UNKNOWN ||
834 sctx->last_instance_count != instance_count) {
835 radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, 0));
836 radeon_emit(cs, instance_count);
837 sctx->last_instance_count = instance_count;
838 }
839
840 /* Base vertex and start instance. */
841 base_vertex = original_index_size ? info->index_bias : info->start;
842
843 if (sctx->num_vs_blit_sgprs) {
844 /* Re-emit draw constants after we leave u_blitter. */
845 si_invalidate_draw_sh_constants(sctx);
846
847 /* Blit VS doesn't use BASE_VERTEX, START_INSTANCE, and DRAWID. */
848 radeon_set_sh_reg_seq(cs, sh_base_reg + SI_SGPR_VS_BLIT_DATA * 4,
849 sctx->num_vs_blit_sgprs);
850 radeon_emit_array(cs, sctx->vs_blit_sh_data,
851 sctx->num_vs_blit_sgprs);
852 } else if (base_vertex != sctx->last_base_vertex ||
853 sctx->last_base_vertex == SI_BASE_VERTEX_UNKNOWN ||
854 info->start_instance != sctx->last_start_instance ||
855 info->drawid != sctx->last_drawid ||
856 sh_base_reg != sctx->last_sh_base_reg) {
857 radeon_set_sh_reg_seq(cs, sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 3);
858 radeon_emit(cs, base_vertex);
859 radeon_emit(cs, info->start_instance);
860 radeon_emit(cs, info->drawid);
861
862 sctx->last_base_vertex = base_vertex;
863 sctx->last_start_instance = info->start_instance;
864 sctx->last_drawid = info->drawid;
865 sctx->last_sh_base_reg = sh_base_reg;
866 }
867
868 if (index_size) {
869 if (dispatch_prim_discard_cs) {
870 index_va += info->start * original_index_size;
871 index_max_size = MIN2(index_max_size, info->count);
872
873 si_dispatch_prim_discard_cs_and_draw(sctx, info,
874 original_index_size,
875 base_vertex,
876 index_va, index_max_size);
877 return;
878 }
879
880 index_va += info->start * index_size;
881
882 radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_2, 4, render_cond_bit));
883 radeon_emit(cs, index_max_size);
884 radeon_emit(cs, index_va);
885 radeon_emit(cs, index_va >> 32);
886 radeon_emit(cs, info->count);
887 radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
888 } else {
889 radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, render_cond_bit));
890 radeon_emit(cs, info->count);
891 radeon_emit(cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
892 S_0287F0_USE_OPAQUE(!!info->count_from_stream_output));
893 }
894 }
895 }
896
897 void si_emit_surface_sync(struct si_context *sctx, struct radeon_cmdbuf *cs,
898 unsigned cp_coher_cntl)
899 {
900 bool compute_ib = !sctx->has_graphics ||
901 cs == sctx->prim_discard_compute_cs;
902
903 if (sctx->chip_class >= GFX9 || compute_ib) {
904 /* Flush caches and wait for the caches to assert idle. */
905 radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, 0));
906 radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
907 radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
908 radeon_emit(cs, 0xffffff); /* CP_COHER_SIZE_HI */
909 radeon_emit(cs, 0); /* CP_COHER_BASE */
910 radeon_emit(cs, 0); /* CP_COHER_BASE_HI */
911 radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
912 } else {
913 /* ACQUIRE_MEM is only required on a compute ring. */
914 radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, 0));
915 radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
916 radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
917 radeon_emit(cs, 0); /* CP_COHER_BASE */
918 radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
919 }
920
921 /* ACQUIRE_MEM has an implicit context roll if the current context
922 * is busy. */
923 if (!compute_ib)
924 sctx->context_roll = true;
925 }
926
927 void si_prim_discard_signal_next_compute_ib_start(struct si_context *sctx)
928 {
929 if (!si_compute_prim_discard_enabled(sctx))
930 return;
931
932 if (!sctx->barrier_buf) {
933 u_suballocator_alloc(sctx->allocator_zeroed_memory, 4, 4,
934 &sctx->barrier_buf_offset,
935 (struct pipe_resource**)&sctx->barrier_buf);
936 }
937
938 /* Emit a placeholder to signal the next compute IB to start.
939 * See si_compute_prim_discard.c for explanation.
940 */
941 uint32_t signal = 1;
942 si_cp_write_data(sctx, sctx->barrier_buf, sctx->barrier_buf_offset,
943 4, V_370_MEM, V_370_ME, &signal);
944
945 sctx->last_pkt3_write_data =
946 &sctx->gfx_cs->current.buf[sctx->gfx_cs->current.cdw - 5];
947
948 /* Only the last occurence of WRITE_DATA will be executed.
949 * The packet will be enabled in si_flush_gfx_cs.
950 */
951 *sctx->last_pkt3_write_data = PKT3(PKT3_NOP, 3, 0);
952 }
953
954 void si_emit_cache_flush(struct si_context *sctx)
955 {
956 struct radeon_cmdbuf *cs = sctx->gfx_cs;
957 uint32_t flags = sctx->flags;
958
959 if (!sctx->has_graphics) {
960 /* Only process compute flags. */
961 flags &= SI_CONTEXT_INV_ICACHE |
962 SI_CONTEXT_INV_SCACHE |
963 SI_CONTEXT_INV_VCACHE |
964 SI_CONTEXT_INV_L2 |
965 SI_CONTEXT_WB_L2 |
966 SI_CONTEXT_INV_L2_METADATA |
967 SI_CONTEXT_CS_PARTIAL_FLUSH;
968 }
969
970 uint32_t cp_coher_cntl = 0;
971 const uint32_t flush_cb_db = flags & (SI_CONTEXT_FLUSH_AND_INV_CB |
972 SI_CONTEXT_FLUSH_AND_INV_DB);
973 const bool is_barrier = flush_cb_db ||
974 /* INV_ICACHE == beginning of gfx IB. Checking
975 * INV_ICACHE fixes corruption for DeusExMD with
976 * compute-based culling, but I don't know why.
977 */
978 flags & (SI_CONTEXT_INV_ICACHE |
979 SI_CONTEXT_PS_PARTIAL_FLUSH |
980 SI_CONTEXT_VS_PARTIAL_FLUSH) ||
981 (flags & SI_CONTEXT_CS_PARTIAL_FLUSH &&
982 sctx->compute_is_busy);
983
984 if (flags & SI_CONTEXT_FLUSH_AND_INV_CB)
985 sctx->num_cb_cache_flushes++;
986 if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
987 sctx->num_db_cache_flushes++;
988
989 /* GFX6 has a bug that it always flushes ICACHE and KCACHE if either
990 * bit is set. An alternative way is to write SQC_CACHES, but that
991 * doesn't seem to work reliably. Since the bug doesn't affect
992 * correctness (it only does more work than necessary) and
993 * the performance impact is likely negligible, there is no plan
994 * to add a workaround for it.
995 */
996
997 if (flags & SI_CONTEXT_INV_ICACHE)
998 cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
999 if (flags & SI_CONTEXT_INV_SCACHE)
1000 cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
1001
1002 if (sctx->chip_class <= GFX8) {
1003 if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
1004 cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) |
1005 S_0085F0_CB0_DEST_BASE_ENA(1) |
1006 S_0085F0_CB1_DEST_BASE_ENA(1) |
1007 S_0085F0_CB2_DEST_BASE_ENA(1) |
1008 S_0085F0_CB3_DEST_BASE_ENA(1) |
1009 S_0085F0_CB4_DEST_BASE_ENA(1) |
1010 S_0085F0_CB5_DEST_BASE_ENA(1) |
1011 S_0085F0_CB6_DEST_BASE_ENA(1) |
1012 S_0085F0_CB7_DEST_BASE_ENA(1);
1013
1014 /* Necessary for DCC */
1015 if (sctx->chip_class == GFX8)
1016 si_cp_release_mem(sctx, cs,
1017 V_028A90_FLUSH_AND_INV_CB_DATA_TS,
1018 0, EOP_DST_SEL_MEM, EOP_INT_SEL_NONE,
1019 EOP_DATA_SEL_DISCARD, NULL,
1020 0, 0, SI_NOT_QUERY);
1021 }
1022 if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
1023 cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) |
1024 S_0085F0_DB_DEST_BASE_ENA(1);
1025 }
1026
1027 if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
1028 /* Flush CMASK/FMASK/DCC. SURFACE_SYNC will wait for idle. */
1029 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1030 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
1031 }
1032 if (flags & (SI_CONTEXT_FLUSH_AND_INV_DB |
1033 SI_CONTEXT_FLUSH_AND_INV_DB_META)) {
1034 /* Flush HTILE. SURFACE_SYNC will wait for idle. */
1035 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1036 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
1037 }
1038
1039 /* Wait for shader engines to go idle.
1040 * VS and PS waits are unnecessary if SURFACE_SYNC is going to wait
1041 * for everything including CB/DB cache flushes.
1042 */
1043 if (!flush_cb_db) {
1044 if (flags & SI_CONTEXT_PS_PARTIAL_FLUSH) {
1045 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1046 radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1047 /* Only count explicit shader flushes, not implicit ones
1048 * done by SURFACE_SYNC.
1049 */
1050 sctx->num_vs_flushes++;
1051 sctx->num_ps_flushes++;
1052 } else if (flags & SI_CONTEXT_VS_PARTIAL_FLUSH) {
1053 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1054 radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1055 sctx->num_vs_flushes++;
1056 }
1057 }
1058
1059 if (flags & SI_CONTEXT_CS_PARTIAL_FLUSH &&
1060 sctx->compute_is_busy) {
1061 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1062 radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1063 sctx->num_cs_flushes++;
1064 sctx->compute_is_busy = false;
1065 }
1066
1067 /* VGT state synchronization. */
1068 if (flags & SI_CONTEXT_VGT_FLUSH) {
1069 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1070 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
1071 }
1072 if (flags & SI_CONTEXT_VGT_STREAMOUT_SYNC) {
1073 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1074 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0));
1075 }
1076
1077 /* GFX9: Wait for idle if we're flushing CB or DB. ACQUIRE_MEM doesn't
1078 * wait for idle on GFX9. We have to use a TS event.
1079 */
1080 if (sctx->chip_class >= GFX9 && flush_cb_db) {
1081 uint64_t va;
1082 unsigned tc_flags, cb_db_event;
1083
1084 /* Set the CB/DB flush event. */
1085 switch (flush_cb_db) {
1086 case SI_CONTEXT_FLUSH_AND_INV_CB:
1087 cb_db_event = V_028A90_FLUSH_AND_INV_CB_DATA_TS;
1088 break;
1089 case SI_CONTEXT_FLUSH_AND_INV_DB:
1090 cb_db_event = V_028A90_FLUSH_AND_INV_DB_DATA_TS;
1091 break;
1092 default:
1093 /* both CB & DB */
1094 cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
1095 }
1096
1097 /* These are the only allowed combinations. If you need to
1098 * do multiple operations at once, do them separately.
1099 * All operations that invalidate L2 also seem to invalidate
1100 * metadata. Volatile (VOL) and WC flushes are not listed here.
1101 *
1102 * TC | TC_WB = writeback & invalidate L2 & L1
1103 * TC | TC_WB | TC_NC = writeback & invalidate L2 for MTYPE == NC
1104 * TC_WB | TC_NC = writeback L2 for MTYPE == NC
1105 * TC | TC_NC = invalidate L2 for MTYPE == NC
1106 * TC | TC_MD = writeback & invalidate L2 metadata (DCC, etc.)
1107 * TCL1 = invalidate L1
1108 */
1109 tc_flags = 0;
1110
1111 if (flags & SI_CONTEXT_INV_L2_METADATA) {
1112 tc_flags = EVENT_TC_ACTION_ENA |
1113 EVENT_TC_MD_ACTION_ENA;
1114 }
1115
1116 /* Ideally flush TC together with CB/DB. */
1117 if (flags & SI_CONTEXT_INV_L2) {
1118 /* Writeback and invalidate everything in L2 & L1. */
1119 tc_flags = EVENT_TC_ACTION_ENA |
1120 EVENT_TC_WB_ACTION_ENA;
1121
1122 /* Clear the flags. */
1123 flags &= ~(SI_CONTEXT_INV_L2 |
1124 SI_CONTEXT_WB_L2 |
1125 SI_CONTEXT_INV_VCACHE);
1126 sctx->num_L2_invalidates++;
1127 }
1128
1129 /* Do the flush (enqueue the event and wait for it). */
1130 va = sctx->wait_mem_scratch->gpu_address;
1131 sctx->wait_mem_number++;
1132
1133 si_cp_release_mem(sctx, cs, cb_db_event, tc_flags,
1134 EOP_DST_SEL_MEM,
1135 EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM,
1136 EOP_DATA_SEL_VALUE_32BIT,
1137 sctx->wait_mem_scratch, va,
1138 sctx->wait_mem_number, SI_NOT_QUERY);
1139 si_cp_wait_mem(sctx, cs, va, sctx->wait_mem_number, 0xffffffff,
1140 WAIT_REG_MEM_EQUAL);
1141 }
1142
1143 /* Make sure ME is idle (it executes most packets) before continuing.
1144 * This prevents read-after-write hazards between PFP and ME.
1145 */
1146 if (sctx->has_graphics &&
1147 (cp_coher_cntl ||
1148 (flags & (SI_CONTEXT_CS_PARTIAL_FLUSH |
1149 SI_CONTEXT_INV_VCACHE |
1150 SI_CONTEXT_INV_L2 |
1151 SI_CONTEXT_WB_L2)))) {
1152 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
1153 radeon_emit(cs, 0);
1154 }
1155
1156 /* GFX6-GFX8 only:
1157 * When one of the CP_COHER_CNTL.DEST_BASE flags is set, SURFACE_SYNC
1158 * waits for idle, so it should be last. SURFACE_SYNC is done in PFP.
1159 *
1160 * cp_coher_cntl should contain all necessary flags except TC flags
1161 * at this point.
1162 *
1163 * GFX6-GFX7 don't support L2 write-back.
1164 */
1165 if (flags & SI_CONTEXT_INV_L2 ||
1166 (sctx->chip_class <= GFX7 &&
1167 (flags & SI_CONTEXT_WB_L2))) {
1168 /* Invalidate L1 & L2. (L1 is always invalidated on GFX6)
1169 * WB must be set on GFX8+ when TC_ACTION is set.
1170 */
1171 si_emit_surface_sync(sctx, sctx->gfx_cs, cp_coher_cntl |
1172 S_0085F0_TC_ACTION_ENA(1) |
1173 S_0085F0_TCL1_ACTION_ENA(1) |
1174 S_0301F0_TC_WB_ACTION_ENA(sctx->chip_class >= GFX8));
1175 cp_coher_cntl = 0;
1176 sctx->num_L2_invalidates++;
1177 } else {
1178 /* L1 invalidation and L2 writeback must be done separately,
1179 * because both operations can't be done together.
1180 */
1181 if (flags & SI_CONTEXT_WB_L2) {
1182 /* WB = write-back
1183 * NC = apply to non-coherent MTYPEs
1184 * (i.e. MTYPE <= 1, which is what we use everywhere)
1185 *
1186 * WB doesn't work without NC.
1187 */
1188 si_emit_surface_sync(sctx, sctx->gfx_cs, cp_coher_cntl |
1189 S_0301F0_TC_WB_ACTION_ENA(1) |
1190 S_0301F0_TC_NC_ACTION_ENA(1));
1191 cp_coher_cntl = 0;
1192 sctx->num_L2_writebacks++;
1193 }
1194 if (flags & SI_CONTEXT_INV_VCACHE) {
1195 /* Invalidate per-CU VMEM L1. */
1196 si_emit_surface_sync(sctx, sctx->gfx_cs, cp_coher_cntl |
1197 S_0085F0_TCL1_ACTION_ENA(1));
1198 cp_coher_cntl = 0;
1199 }
1200 }
1201
1202 /* If TC flushes haven't cleared this... */
1203 if (cp_coher_cntl)
1204 si_emit_surface_sync(sctx, sctx->gfx_cs, cp_coher_cntl);
1205
1206 if (is_barrier)
1207 si_prim_discard_signal_next_compute_ib_start(sctx);
1208
1209 if (flags & SI_CONTEXT_START_PIPELINE_STATS) {
1210 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1211 radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_START) |
1212 EVENT_INDEX(0));
1213 } else if (flags & SI_CONTEXT_STOP_PIPELINE_STATS) {
1214 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1215 radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) |
1216 EVENT_INDEX(0));
1217 }
1218
1219 sctx->flags = 0;
1220 }
1221
1222 static void si_get_draw_start_count(struct si_context *sctx,
1223 const struct pipe_draw_info *info,
1224 unsigned *start, unsigned *count)
1225 {
1226 struct pipe_draw_indirect_info *indirect = info->indirect;
1227
1228 if (indirect) {
1229 unsigned indirect_count;
1230 struct pipe_transfer *transfer;
1231 unsigned begin, end;
1232 unsigned map_size;
1233 unsigned *data;
1234
1235 if (indirect->indirect_draw_count) {
1236 data = pipe_buffer_map_range(&sctx->b,
1237 indirect->indirect_draw_count,
1238 indirect->indirect_draw_count_offset,
1239 sizeof(unsigned),
1240 PIPE_TRANSFER_READ, &transfer);
1241
1242 indirect_count = *data;
1243
1244 pipe_buffer_unmap(&sctx->b, transfer);
1245 } else {
1246 indirect_count = indirect->draw_count;
1247 }
1248
1249 if (!indirect_count) {
1250 *start = *count = 0;
1251 return;
1252 }
1253
1254 map_size = (indirect_count - 1) * indirect->stride + 3 * sizeof(unsigned);
1255 data = pipe_buffer_map_range(&sctx->b, indirect->buffer,
1256 indirect->offset, map_size,
1257 PIPE_TRANSFER_READ, &transfer);
1258
1259 begin = UINT_MAX;
1260 end = 0;
1261
1262 for (unsigned i = 0; i < indirect_count; ++i) {
1263 unsigned count = data[0];
1264 unsigned start = data[2];
1265
1266 if (count > 0) {
1267 begin = MIN2(begin, start);
1268 end = MAX2(end, start + count);
1269 }
1270
1271 data += indirect->stride / sizeof(unsigned);
1272 }
1273
1274 pipe_buffer_unmap(&sctx->b, transfer);
1275
1276 if (begin < end) {
1277 *start = begin;
1278 *count = end - begin;
1279 } else {
1280 *start = *count = 0;
1281 }
1282 } else {
1283 *start = info->start;
1284 *count = info->count;
1285 }
1286 }
1287
1288 static void si_emit_all_states(struct si_context *sctx, const struct pipe_draw_info *info,
1289 enum pipe_prim_type prim, unsigned instance_count,
1290 bool primitive_restart, unsigned skip_atom_mask)
1291 {
1292 unsigned num_patches = 0;
1293
1294 si_emit_rasterizer_prim_state(sctx);
1295 if (sctx->tes_shader.cso)
1296 si_emit_derived_tess_state(sctx, info, &num_patches);
1297
1298 /* Emit state atoms. */
1299 unsigned mask = sctx->dirty_atoms & ~skip_atom_mask;
1300 while (mask)
1301 sctx->atoms.array[u_bit_scan(&mask)].emit(sctx);
1302
1303 sctx->dirty_atoms &= skip_atom_mask;
1304
1305 /* Emit states. */
1306 mask = sctx->dirty_states;
1307 while (mask) {
1308 unsigned i = u_bit_scan(&mask);
1309 struct si_pm4_state *state = sctx->queued.array[i];
1310
1311 if (!state || sctx->emitted.array[i] == state)
1312 continue;
1313
1314 si_pm4_emit(sctx, state);
1315 sctx->emitted.array[i] = state;
1316 }
1317 sctx->dirty_states = 0;
1318
1319 /* Emit draw states. */
1320 si_emit_vs_state(sctx, info);
1321 si_emit_draw_registers(sctx, info, prim, num_patches, instance_count,
1322 primitive_restart);
1323 }
1324
1325 static bool
1326 si_all_vs_resources_read_only(struct si_context *sctx,
1327 struct pipe_resource *indexbuf)
1328 {
1329 struct radeon_winsys *ws = sctx->ws;
1330 struct radeon_cmdbuf *cs = sctx->gfx_cs;
1331
1332 /* Index buffer. */
1333 if (indexbuf &&
1334 ws->cs_is_buffer_referenced(cs, si_resource(indexbuf)->buf,
1335 RADEON_USAGE_WRITE))
1336 goto has_write_reference;
1337
1338 /* Vertex buffers. */
1339 struct si_vertex_elements *velems = sctx->vertex_elements;
1340 unsigned num_velems = velems->count;
1341
1342 for (unsigned i = 0; i < num_velems; i++) {
1343 if (!((1 << i) & velems->first_vb_use_mask))
1344 continue;
1345
1346 unsigned vb_index = velems->vertex_buffer_index[i];
1347 struct pipe_resource *res = sctx->vertex_buffer[vb_index].buffer.resource;
1348 if (!res)
1349 continue;
1350
1351 if (ws->cs_is_buffer_referenced(cs, si_resource(res)->buf,
1352 RADEON_USAGE_WRITE))
1353 goto has_write_reference;
1354 }
1355
1356 /* Constant and shader buffers. */
1357 struct si_descriptors *buffers =
1358 &sctx->descriptors[si_const_and_shader_buffer_descriptors_idx(PIPE_SHADER_VERTEX)];
1359 for (unsigned i = 0; i < buffers->num_active_slots; i++) {
1360 unsigned index = buffers->first_active_slot + i;
1361 struct pipe_resource *res =
1362 sctx->const_and_shader_buffers[PIPE_SHADER_VERTEX].buffers[index];
1363 if (!res)
1364 continue;
1365
1366 if (ws->cs_is_buffer_referenced(cs, si_resource(res)->buf,
1367 RADEON_USAGE_WRITE))
1368 goto has_write_reference;
1369 }
1370
1371 /* Samplers. */
1372 struct si_shader_selector *vs = sctx->vs_shader.cso;
1373 if (vs->info.samplers_declared) {
1374 unsigned num_samplers = util_last_bit(vs->info.samplers_declared);
1375
1376 for (unsigned i = 0; i < num_samplers; i++) {
1377 struct pipe_sampler_view *view = sctx->samplers[PIPE_SHADER_VERTEX].views[i];
1378 if (!view)
1379 continue;
1380
1381 if (ws->cs_is_buffer_referenced(cs,
1382 si_resource(view->texture)->buf,
1383 RADEON_USAGE_WRITE))
1384 goto has_write_reference;
1385 }
1386 }
1387
1388 /* Images. */
1389 if (vs->info.images_declared) {
1390 unsigned num_images = util_last_bit(vs->info.images_declared);
1391
1392 for (unsigned i = 0; i < num_images; i++) {
1393 struct pipe_resource *res = sctx->images[PIPE_SHADER_VERTEX].views[i].resource;
1394 if (!res)
1395 continue;
1396
1397 if (ws->cs_is_buffer_referenced(cs, si_resource(res)->buf,
1398 RADEON_USAGE_WRITE))
1399 goto has_write_reference;
1400 }
1401 }
1402
1403 return true;
1404
1405 has_write_reference:
1406 /* If the current gfx IB has enough packets, flush it to remove write
1407 * references to buffers.
1408 */
1409 if (cs->prev_dw + cs->current.cdw > 2048) {
1410 si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
1411 assert(si_all_vs_resources_read_only(sctx, indexbuf));
1412 return true;
1413 }
1414 return false;
1415 }
1416
1417 static ALWAYS_INLINE bool pd_msg(const char *s)
1418 {
1419 if (SI_PRIM_DISCARD_DEBUG)
1420 printf("PD failed: %s\n", s);
1421 return false;
1422 }
1423
1424 static void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
1425 {
1426 struct si_context *sctx = (struct si_context *)ctx;
1427 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
1428 struct pipe_resource *indexbuf = info->index.resource;
1429 unsigned dirty_tex_counter, dirty_buf_counter;
1430 enum pipe_prim_type rast_prim, prim = info->mode;
1431 unsigned index_size = info->index_size;
1432 unsigned index_offset = info->indirect ? info->start * index_size : 0;
1433 unsigned instance_count = info->instance_count;
1434 bool primitive_restart = info->primitive_restart &&
1435 (!sctx->screen->options.prim_restart_tri_strips_only ||
1436 (prim != PIPE_PRIM_TRIANGLE_STRIP &&
1437 prim != PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY));
1438
1439 if (likely(!info->indirect)) {
1440 /* GFX6-GFX7 treat instance_count==0 as instance_count==1. There is
1441 * no workaround for indirect draws, but we can at least skip
1442 * direct draws.
1443 */
1444 if (unlikely(!instance_count))
1445 return;
1446
1447 /* Handle count == 0. */
1448 if (unlikely(!info->count &&
1449 (index_size || !info->count_from_stream_output)))
1450 return;
1451 }
1452
1453 if (unlikely(!sctx->vs_shader.cso ||
1454 !rs ||
1455 (!sctx->ps_shader.cso && !rs->rasterizer_discard) ||
1456 (!!sctx->tes_shader.cso != (prim == PIPE_PRIM_PATCHES)))) {
1457 assert(0);
1458 return;
1459 }
1460
1461 /* Recompute and re-emit the texture resource states if needed. */
1462 dirty_tex_counter = p_atomic_read(&sctx->screen->dirty_tex_counter);
1463 if (unlikely(dirty_tex_counter != sctx->last_dirty_tex_counter)) {
1464 sctx->last_dirty_tex_counter = dirty_tex_counter;
1465 sctx->framebuffer.dirty_cbufs |=
1466 ((1 << sctx->framebuffer.state.nr_cbufs) - 1);
1467 sctx->framebuffer.dirty_zsbuf = true;
1468 si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
1469 si_update_all_texture_descriptors(sctx);
1470 }
1471
1472 dirty_buf_counter = p_atomic_read(&sctx->screen->dirty_buf_counter);
1473 if (unlikely(dirty_buf_counter != sctx->last_dirty_buf_counter)) {
1474 sctx->last_dirty_buf_counter = dirty_buf_counter;
1475 /* Rebind all buffers unconditionally. */
1476 si_rebind_buffer(sctx, NULL);
1477 }
1478
1479 si_decompress_textures(sctx, u_bit_consecutive(0, SI_NUM_GRAPHICS_SHADERS));
1480
1481 /* Set the rasterization primitive type.
1482 *
1483 * This must be done after si_decompress_textures, which can call
1484 * draw_vbo recursively, and before si_update_shaders, which uses
1485 * current_rast_prim for this draw_vbo call. */
1486 if (sctx->gs_shader.cso)
1487 rast_prim = sctx->gs_shader.cso->gs_output_prim;
1488 else if (sctx->tes_shader.cso) {
1489 if (sctx->tes_shader.cso->info.properties[TGSI_PROPERTY_TES_POINT_MODE])
1490 rast_prim = PIPE_PRIM_POINTS;
1491 else
1492 rast_prim = sctx->tes_shader.cso->info.properties[TGSI_PROPERTY_TES_PRIM_MODE];
1493 } else
1494 rast_prim = prim;
1495
1496 if (rast_prim != sctx->current_rast_prim) {
1497 if (util_prim_is_points_or_lines(sctx->current_rast_prim) !=
1498 util_prim_is_points_or_lines(rast_prim))
1499 si_mark_atom_dirty(sctx, &sctx->atoms.s.guardband);
1500
1501 sctx->current_rast_prim = rast_prim;
1502 sctx->do_update_shaders = true;
1503 }
1504
1505 if (sctx->tes_shader.cso &&
1506 sctx->screen->has_ls_vgpr_init_bug) {
1507 /* Determine whether the LS VGPR fix should be applied.
1508 *
1509 * It is only required when num input CPs > num output CPs,
1510 * which cannot happen with the fixed function TCS. We should
1511 * also update this bit when switching from TCS to fixed
1512 * function TCS.
1513 */
1514 struct si_shader_selector *tcs = sctx->tcs_shader.cso;
1515 bool ls_vgpr_fix =
1516 tcs &&
1517 info->vertices_per_patch >
1518 tcs->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT];
1519
1520 if (ls_vgpr_fix != sctx->ls_vgpr_fix) {
1521 sctx->ls_vgpr_fix = ls_vgpr_fix;
1522 sctx->do_update_shaders = true;
1523 }
1524 }
1525
1526 if (sctx->gs_shader.cso) {
1527 /* Determine whether the GS triangle strip adjacency fix should
1528 * be applied. Rotate every other triangle if
1529 * - triangle strips with adjacency are fed to the GS and
1530 * - primitive restart is disabled (the rotation doesn't help
1531 * when the restart occurs after an odd number of triangles).
1532 */
1533 bool gs_tri_strip_adj_fix =
1534 !sctx->tes_shader.cso &&
1535 prim == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY &&
1536 !primitive_restart;
1537
1538 if (gs_tri_strip_adj_fix != sctx->gs_tri_strip_adj_fix) {
1539 sctx->gs_tri_strip_adj_fix = gs_tri_strip_adj_fix;
1540 sctx->do_update_shaders = true;
1541 }
1542 }
1543
1544 if (index_size) {
1545 /* Translate or upload, if needed. */
1546 /* 8-bit indices are supported on GFX8. */
1547 if (sctx->chip_class <= GFX7 && index_size == 1) {
1548 unsigned start, count, start_offset, size, offset;
1549 void *ptr;
1550
1551 si_get_draw_start_count(sctx, info, &start, &count);
1552 start_offset = start * 2;
1553 size = count * 2;
1554
1555 indexbuf = NULL;
1556 u_upload_alloc(ctx->stream_uploader, start_offset,
1557 size,
1558 si_optimal_tcc_alignment(sctx, size),
1559 &offset, &indexbuf, &ptr);
1560 if (!indexbuf)
1561 return;
1562
1563 util_shorten_ubyte_elts_to_userptr(&sctx->b, info, 0, 0,
1564 index_offset + start,
1565 count, ptr);
1566
1567 /* info->start will be added by the drawing code */
1568 index_offset = offset - start_offset;
1569 index_size = 2;
1570 } else if (info->has_user_indices) {
1571 unsigned start_offset;
1572
1573 assert(!info->indirect);
1574 start_offset = info->start * index_size;
1575
1576 indexbuf = NULL;
1577 u_upload_data(ctx->stream_uploader, start_offset,
1578 info->count * index_size,
1579 sctx->screen->info.tcc_cache_line_size,
1580 (char*)info->index.user + start_offset,
1581 &index_offset, &indexbuf);
1582 if (!indexbuf)
1583 return;
1584
1585 /* info->start will be added by the drawing code */
1586 index_offset -= start_offset;
1587 } else if (sctx->chip_class <= GFX7 &&
1588 si_resource(indexbuf)->TC_L2_dirty) {
1589 /* GFX8 reads index buffers through TC L2, so it doesn't
1590 * need this. */
1591 sctx->flags |= SI_CONTEXT_WB_L2;
1592 si_resource(indexbuf)->TC_L2_dirty = false;
1593 }
1594 }
1595
1596 bool dispatch_prim_discard_cs = false;
1597 bool prim_discard_cs_instancing = false;
1598 unsigned original_index_size = index_size;
1599 unsigned direct_count = 0;
1600
1601 if (info->indirect) {
1602 struct pipe_draw_indirect_info *indirect = info->indirect;
1603
1604 /* Add the buffer size for memory checking in need_cs_space. */
1605 si_context_add_resource_size(sctx, indirect->buffer);
1606
1607 /* Indirect buffers use TC L2 on GFX9, but not older hw. */
1608 if (sctx->chip_class <= GFX8) {
1609 if (si_resource(indirect->buffer)->TC_L2_dirty) {
1610 sctx->flags |= SI_CONTEXT_WB_L2;
1611 si_resource(indirect->buffer)->TC_L2_dirty = false;
1612 }
1613
1614 if (indirect->indirect_draw_count &&
1615 si_resource(indirect->indirect_draw_count)->TC_L2_dirty) {
1616 sctx->flags |= SI_CONTEXT_WB_L2;
1617 si_resource(indirect->indirect_draw_count)->TC_L2_dirty = false;
1618 }
1619 }
1620 } else {
1621 /* Multiply by 3 for strips and fans to get an approximate vertex
1622 * count as triangles. */
1623 direct_count = info->count * instance_count *
1624 (prim == PIPE_PRIM_TRIANGLES ? 1 : 3);
1625 }
1626
1627 /* Determine if we can use the primitive discard compute shader. */
1628 if (si_compute_prim_discard_enabled(sctx) &&
1629 (direct_count > sctx->prim_discard_vertex_count_threshold ?
1630 (sctx->compute_num_verts_rejected += direct_count, true) : /* Add, then return true. */
1631 (sctx->compute_num_verts_ineligible += direct_count, false)) && /* Add, then return false. */
1632 (!info->count_from_stream_output || pd_msg("draw_opaque")) &&
1633 (primitive_restart ?
1634 /* Supported prim types with primitive restart: */
1635 (prim == PIPE_PRIM_TRIANGLE_STRIP || pd_msg("bad prim type with primitive restart")) &&
1636 /* Disallow instancing with primitive restart: */
1637 (instance_count == 1 || pd_msg("instance_count > 1 with primitive restart")) :
1638 /* Supported prim types without primitive restart + allow instancing: */
1639 (1 << prim) & ((1 << PIPE_PRIM_TRIANGLES) |
1640 (1 << PIPE_PRIM_TRIANGLE_STRIP) |
1641 (1 << PIPE_PRIM_TRIANGLE_FAN)) &&
1642 /* Instancing is limited to 16-bit indices, because InstanceID is packed into VertexID. */
1643 /* TODO: DrawArraysInstanced doesn't sometimes work, so it's disabled. */
1644 (instance_count == 1 ||
1645 (instance_count <= USHRT_MAX && index_size && index_size <= 2) ||
1646 pd_msg("instance_count too large or index_size == 4 or DrawArraysInstanced"))) &&
1647 (info->drawid == 0 || !sctx->vs_shader.cso->info.uses_drawid || pd_msg("draw_id > 0")) &&
1648 (!sctx->render_cond || pd_msg("render condition")) &&
1649 /* Forced enablement ignores pipeline statistics queries. */
1650 (sctx->screen->debug_flags & (DBG(PD) | DBG(ALWAYS_PD)) ||
1651 (!sctx->num_pipeline_stat_queries && !sctx->streamout.prims_gen_query_enabled) ||
1652 pd_msg("pipestat or primgen query")) &&
1653 (!sctx->vertex_elements->instance_divisor_is_fetched || pd_msg("loads instance divisors")) &&
1654 (!sctx->tes_shader.cso || pd_msg("uses tess")) &&
1655 (!sctx->gs_shader.cso || pd_msg("uses GS")) &&
1656 (!sctx->ps_shader.cso->info.uses_primid || pd_msg("PS uses PrimID")) &&
1657 #if SI_PRIM_DISCARD_DEBUG /* same as cso->prim_discard_cs_allowed */
1658 (!sctx->vs_shader.cso->info.uses_bindless_images || pd_msg("uses bindless images")) &&
1659 (!sctx->vs_shader.cso->info.uses_bindless_samplers || pd_msg("uses bindless samplers")) &&
1660 (!sctx->vs_shader.cso->info.writes_memory || pd_msg("writes memory")) &&
1661 (!sctx->vs_shader.cso->info.writes_viewport_index || pd_msg("writes viewport index")) &&
1662 !sctx->vs_shader.cso->info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION] &&
1663 !sctx->vs_shader.cso->so.num_outputs &&
1664 #else
1665 (sctx->vs_shader.cso->prim_discard_cs_allowed || pd_msg("VS shader uses unsupported features")) &&
1666 #endif
1667 /* Check that all buffers are used for read only, because compute
1668 * dispatches can run ahead. */
1669 (si_all_vs_resources_read_only(sctx, index_size ? indexbuf : NULL) || pd_msg("write reference"))) {
1670 switch (si_prepare_prim_discard_or_split_draw(sctx, info, primitive_restart)) {
1671 case SI_PRIM_DISCARD_ENABLED:
1672 original_index_size = index_size;
1673 prim_discard_cs_instancing = instance_count > 1;
1674 dispatch_prim_discard_cs = true;
1675
1676 /* The compute shader changes/lowers the following: */
1677 prim = PIPE_PRIM_TRIANGLES;
1678 index_size = 4;
1679 instance_count = 1;
1680 primitive_restart = false;
1681 sctx->compute_num_verts_rejected -= direct_count;
1682 sctx->compute_num_verts_accepted += direct_count;
1683 break;
1684 case SI_PRIM_DISCARD_DISABLED:
1685 break;
1686 case SI_PRIM_DISCARD_DRAW_SPLIT:
1687 sctx->compute_num_verts_rejected -= direct_count;
1688 goto return_cleanup;
1689 }
1690 }
1691
1692 if (prim_discard_cs_instancing != sctx->prim_discard_cs_instancing) {
1693 sctx->prim_discard_cs_instancing = prim_discard_cs_instancing;
1694 sctx->do_update_shaders = true;
1695 }
1696
1697 if (sctx->do_update_shaders && !si_update_shaders(sctx))
1698 goto return_cleanup;
1699
1700 si_need_gfx_cs_space(sctx);
1701
1702 if (sctx->bo_list_add_all_gfx_resources)
1703 si_gfx_resources_add_all_to_bo_list(sctx);
1704
1705 /* Since we've called si_context_add_resource_size for vertex buffers,
1706 * this must be called after si_need_cs_space, because we must let
1707 * need_cs_space flush before we add buffers to the buffer list.
1708 */
1709 if (!si_upload_vertex_buffer_descriptors(sctx))
1710 goto return_cleanup;
1711
1712 /* Vega10/Raven scissor bug workaround. When any context register is
1713 * written (i.e. the GPU rolls the context), PA_SC_VPORT_SCISSOR
1714 * registers must be written too.
1715 */
1716 bool has_gfx9_scissor_bug = sctx->screen->has_gfx9_scissor_bug;
1717 unsigned masked_atoms = 0;
1718
1719 if (has_gfx9_scissor_bug) {
1720 masked_atoms |= si_get_atom_bit(sctx, &sctx->atoms.s.scissors);
1721
1722 if (info->count_from_stream_output ||
1723 sctx->dirty_atoms & si_atoms_that_always_roll_context() ||
1724 sctx->dirty_states & si_states_that_always_roll_context())
1725 sctx->context_roll = true;
1726 }
1727
1728 /* Use optimal packet order based on whether we need to sync the pipeline. */
1729 if (unlikely(sctx->flags & (SI_CONTEXT_FLUSH_AND_INV_CB |
1730 SI_CONTEXT_FLUSH_AND_INV_DB |
1731 SI_CONTEXT_PS_PARTIAL_FLUSH |
1732 SI_CONTEXT_CS_PARTIAL_FLUSH))) {
1733 /* If we have to wait for idle, set all states first, so that all
1734 * SET packets are processed in parallel with previous draw calls.
1735 * Then draw and prefetch at the end. This ensures that the time
1736 * the CUs are idle is very short.
1737 */
1738 if (unlikely(sctx->flags & SI_CONTEXT_FLUSH_FOR_RENDER_COND))
1739 masked_atoms |= si_get_atom_bit(sctx, &sctx->atoms.s.render_cond);
1740
1741 if (!si_upload_graphics_shader_descriptors(sctx))
1742 goto return_cleanup;
1743
1744 /* Emit all states except possibly render condition. */
1745 si_emit_all_states(sctx, info, prim, instance_count,
1746 primitive_restart, masked_atoms);
1747 si_emit_cache_flush(sctx);
1748 /* <-- CUs are idle here. */
1749
1750 if (si_is_atom_dirty(sctx, &sctx->atoms.s.render_cond))
1751 sctx->atoms.s.render_cond.emit(sctx);
1752
1753 if (has_gfx9_scissor_bug &&
1754 (sctx->context_roll ||
1755 si_is_atom_dirty(sctx, &sctx->atoms.s.scissors)))
1756 sctx->atoms.s.scissors.emit(sctx);
1757
1758 sctx->dirty_atoms = 0;
1759
1760 si_emit_draw_packets(sctx, info, indexbuf, index_size, index_offset,
1761 instance_count, dispatch_prim_discard_cs,
1762 original_index_size);
1763 /* <-- CUs are busy here. */
1764
1765 /* Start prefetches after the draw has been started. Both will run
1766 * in parallel, but starting the draw first is more important.
1767 */
1768 if (sctx->chip_class >= GFX7 && sctx->prefetch_L2_mask)
1769 cik_emit_prefetch_L2(sctx, false);
1770 } else {
1771 /* If we don't wait for idle, start prefetches first, then set
1772 * states, and draw at the end.
1773 */
1774 if (sctx->flags)
1775 si_emit_cache_flush(sctx);
1776
1777 /* Only prefetch the API VS and VBO descriptors. */
1778 if (sctx->chip_class >= GFX7 && sctx->prefetch_L2_mask)
1779 cik_emit_prefetch_L2(sctx, true);
1780
1781 if (!si_upload_graphics_shader_descriptors(sctx))
1782 goto return_cleanup;
1783
1784 si_emit_all_states(sctx, info, prim, instance_count,
1785 primitive_restart, masked_atoms);
1786
1787 if (has_gfx9_scissor_bug &&
1788 (sctx->context_roll ||
1789 si_is_atom_dirty(sctx, &sctx->atoms.s.scissors)))
1790 sctx->atoms.s.scissors.emit(sctx);
1791
1792 sctx->dirty_atoms = 0;
1793
1794 si_emit_draw_packets(sctx, info, indexbuf, index_size, index_offset,
1795 instance_count, dispatch_prim_discard_cs,
1796 original_index_size);
1797
1798 /* Prefetch the remaining shaders after the draw has been
1799 * started. */
1800 if (sctx->chip_class >= GFX7 && sctx->prefetch_L2_mask)
1801 cik_emit_prefetch_L2(sctx, false);
1802 }
1803
1804 /* Clear the context roll flag after the draw call. */
1805 sctx->context_roll = false;
1806
1807 if (unlikely(sctx->current_saved_cs)) {
1808 si_trace_emit(sctx);
1809 si_log_draw_state(sctx, sctx->log);
1810 }
1811
1812 /* Workaround for a VGT hang when streamout is enabled.
1813 * It must be done after drawing. */
1814 if ((sctx->family == CHIP_HAWAII ||
1815 sctx->family == CHIP_TONGA ||
1816 sctx->family == CHIP_FIJI) &&
1817 si_get_strmout_en(sctx)) {
1818 sctx->flags |= SI_CONTEXT_VGT_STREAMOUT_SYNC;
1819 }
1820
1821 if (unlikely(sctx->decompression_enabled)) {
1822 sctx->num_decompress_calls++;
1823 } else {
1824 sctx->num_draw_calls++;
1825 if (sctx->framebuffer.state.nr_cbufs > 1)
1826 sctx->num_mrt_draw_calls++;
1827 if (primitive_restart)
1828 sctx->num_prim_restart_calls++;
1829 if (G_0286E8_WAVESIZE(sctx->spi_tmpring_size))
1830 sctx->num_spill_draw_calls++;
1831 }
1832
1833 return_cleanup:
1834 if (index_size && indexbuf != info->index.resource)
1835 pipe_resource_reference(&indexbuf, NULL);
1836 }
1837
1838 static void
1839 si_draw_rectangle(struct blitter_context *blitter,
1840 void *vertex_elements_cso,
1841 blitter_get_vs_func get_vs,
1842 int x1, int y1, int x2, int y2,
1843 float depth, unsigned num_instances,
1844 enum blitter_attrib_type type,
1845 const union blitter_attrib *attrib)
1846 {
1847 struct pipe_context *pipe = util_blitter_get_pipe(blitter);
1848 struct si_context *sctx = (struct si_context*)pipe;
1849
1850 /* Pack position coordinates as signed int16. */
1851 sctx->vs_blit_sh_data[0] = (uint32_t)(x1 & 0xffff) |
1852 ((uint32_t)(y1 & 0xffff) << 16);
1853 sctx->vs_blit_sh_data[1] = (uint32_t)(x2 & 0xffff) |
1854 ((uint32_t)(y2 & 0xffff) << 16);
1855 sctx->vs_blit_sh_data[2] = fui(depth);
1856
1857 switch (type) {
1858 case UTIL_BLITTER_ATTRIB_COLOR:
1859 memcpy(&sctx->vs_blit_sh_data[3], attrib->color,
1860 sizeof(float)*4);
1861 break;
1862 case UTIL_BLITTER_ATTRIB_TEXCOORD_XY:
1863 case UTIL_BLITTER_ATTRIB_TEXCOORD_XYZW:
1864 memcpy(&sctx->vs_blit_sh_data[3], &attrib->texcoord,
1865 sizeof(attrib->texcoord));
1866 break;
1867 case UTIL_BLITTER_ATTRIB_NONE:;
1868 }
1869
1870 pipe->bind_vs_state(pipe, si_get_blitter_vs(sctx, type, num_instances));
1871
1872 struct pipe_draw_info info = {};
1873 info.mode = SI_PRIM_RECTANGLE_LIST;
1874 info.count = 3;
1875 info.instance_count = num_instances;
1876
1877 /* Don't set per-stage shader pointers for VS. */
1878 sctx->shader_pointers_dirty &= ~SI_DESCS_SHADER_MASK(VERTEX);
1879 sctx->vertex_buffer_pointer_dirty = false;
1880
1881 si_draw_vbo(pipe, &info);
1882 }
1883
1884 void si_trace_emit(struct si_context *sctx)
1885 {
1886 struct radeon_cmdbuf *cs = sctx->gfx_cs;
1887 uint32_t trace_id = ++sctx->current_saved_cs->trace_id;
1888
1889 si_cp_write_data(sctx, sctx->current_saved_cs->trace_buf,
1890 0, 4, V_370_MEM, V_370_ME, &trace_id);
1891
1892 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
1893 radeon_emit(cs, AC_ENCODE_TRACE_POINT(trace_id));
1894
1895 if (sctx->log)
1896 u_log_flush(sctx->log);
1897 }
1898
1899 void si_init_draw_functions(struct si_context *sctx)
1900 {
1901 sctx->b.draw_vbo = si_draw_vbo;
1902
1903 sctx->blitter->draw_rectangle = si_draw_rectangle;
1904
1905 si_init_ia_multi_vgt_param_table(sctx);
1906 }