radeonsi: reduce type sizes in si_shader_selector
[mesa.git] / src / gallium / drivers / radeonsi / si_state_draw.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "ac_debug.h"
26 #include "si_build_pm4.h"
27 #include "sid.h"
28 #include "util/u_index_modify.h"
29 #include "util/u_log.h"
30 #include "util/u_prim.h"
31 #include "util/u_suballoc.h"
32 #include "util/u_upload_mgr.h"
33
34 /* special primitive types */
35 #define SI_PRIM_RECTANGLE_LIST PIPE_PRIM_MAX
36
37 static unsigned si_conv_pipe_prim(unsigned mode)
38 {
39 static const unsigned prim_conv[] = {
40 [PIPE_PRIM_POINTS] = V_008958_DI_PT_POINTLIST,
41 [PIPE_PRIM_LINES] = V_008958_DI_PT_LINELIST,
42 [PIPE_PRIM_LINE_LOOP] = V_008958_DI_PT_LINELOOP,
43 [PIPE_PRIM_LINE_STRIP] = V_008958_DI_PT_LINESTRIP,
44 [PIPE_PRIM_TRIANGLES] = V_008958_DI_PT_TRILIST,
45 [PIPE_PRIM_TRIANGLE_STRIP] = V_008958_DI_PT_TRISTRIP,
46 [PIPE_PRIM_TRIANGLE_FAN] = V_008958_DI_PT_TRIFAN,
47 [PIPE_PRIM_QUADS] = V_008958_DI_PT_QUADLIST,
48 [PIPE_PRIM_QUAD_STRIP] = V_008958_DI_PT_QUADSTRIP,
49 [PIPE_PRIM_POLYGON] = V_008958_DI_PT_POLYGON,
50 [PIPE_PRIM_LINES_ADJACENCY] = V_008958_DI_PT_LINELIST_ADJ,
51 [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_008958_DI_PT_LINESTRIP_ADJ,
52 [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_008958_DI_PT_TRILIST_ADJ,
53 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_008958_DI_PT_TRISTRIP_ADJ,
54 [PIPE_PRIM_PATCHES] = V_008958_DI_PT_PATCH,
55 [SI_PRIM_RECTANGLE_LIST] = V_008958_DI_PT_RECTLIST};
56 assert(mode < ARRAY_SIZE(prim_conv));
57 return prim_conv[mode];
58 }
59
60 /**
61 * This calculates the LDS size for tessellation shaders (VS, TCS, TES).
62 * LS.LDS_SIZE is shared by all 3 shader stages.
63 *
64 * The information about LDS and other non-compile-time parameters is then
65 * written to userdata SGPRs.
66 */
67 static void si_emit_derived_tess_state(struct si_context *sctx, const struct pipe_draw_info *info,
68 unsigned *num_patches)
69 {
70 struct radeon_cmdbuf *cs = sctx->gfx_cs;
71 struct si_shader *ls_current;
72 struct si_shader_selector *ls;
73 /* The TES pointer will only be used for sctx->last_tcs.
74 * It would be wrong to think that TCS = TES. */
75 struct si_shader_selector *tcs =
76 sctx->tcs_shader.cso ? sctx->tcs_shader.cso : sctx->tes_shader.cso;
77 unsigned tess_uses_primid = sctx->ia_multi_vgt_param_key.u.tess_uses_prim_id;
78 bool has_primid_instancing_bug = sctx->chip_class == GFX6 && sctx->screen->info.max_se == 1;
79 unsigned tes_sh_base = sctx->shader_pointers.sh_base[PIPE_SHADER_TESS_EVAL];
80 unsigned num_tcs_input_cp = info->vertices_per_patch;
81 unsigned num_tcs_output_cp, num_tcs_inputs, num_tcs_outputs;
82 unsigned num_tcs_patch_outputs;
83 unsigned input_vertex_size, output_vertex_size, pervertex_output_patch_size;
84 unsigned input_patch_size, output_patch_size, output_patch0_offset;
85 unsigned perpatch_output_offset, lds_size;
86 unsigned tcs_in_layout, tcs_out_layout, tcs_out_offsets;
87 unsigned offchip_layout, hardware_lds_size, ls_hs_config;
88
89 /* Since GFX9 has merged LS-HS in the TCS state, set LS = TCS. */
90 if (sctx->chip_class >= GFX9) {
91 if (sctx->tcs_shader.cso)
92 ls_current = sctx->tcs_shader.current;
93 else
94 ls_current = sctx->fixed_func_tcs_shader.current;
95
96 ls = ls_current->key.part.tcs.ls;
97 } else {
98 ls_current = sctx->vs_shader.current;
99 ls = sctx->vs_shader.cso;
100 }
101
102 if (sctx->last_ls == ls_current && sctx->last_tcs == tcs &&
103 sctx->last_tes_sh_base == tes_sh_base && sctx->last_num_tcs_input_cp == num_tcs_input_cp &&
104 (!has_primid_instancing_bug || (sctx->last_tess_uses_primid == tess_uses_primid))) {
105 *num_patches = sctx->last_num_patches;
106 return;
107 }
108
109 sctx->last_ls = ls_current;
110 sctx->last_tcs = tcs;
111 sctx->last_tes_sh_base = tes_sh_base;
112 sctx->last_num_tcs_input_cp = num_tcs_input_cp;
113 sctx->last_tess_uses_primid = tess_uses_primid;
114
115 /* This calculates how shader inputs and outputs among VS, TCS, and TES
116 * are laid out in LDS. */
117 num_tcs_inputs = util_last_bit64(ls->outputs_written);
118
119 if (sctx->tcs_shader.cso) {
120 num_tcs_outputs = util_last_bit64(tcs->outputs_written);
121 num_tcs_output_cp = tcs->info.base.tess.tcs_vertices_out;
122 num_tcs_patch_outputs = util_last_bit64(tcs->patch_outputs_written);
123 } else {
124 /* No TCS. Route varyings from LS to TES. */
125 num_tcs_outputs = num_tcs_inputs;
126 num_tcs_output_cp = num_tcs_input_cp;
127 num_tcs_patch_outputs = 2; /* TESSINNER + TESSOUTER */
128 }
129
130 input_vertex_size = ls->lshs_vertex_stride;
131 output_vertex_size = num_tcs_outputs * 16;
132
133 input_patch_size = num_tcs_input_cp * input_vertex_size;
134
135 pervertex_output_patch_size = num_tcs_output_cp * output_vertex_size;
136 output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
137
138 /* Ensure that we only need one wave per SIMD so we don't need to check
139 * resource usage. Also ensures that the number of tcs in and out
140 * vertices per threadgroup are at most 256.
141 */
142 unsigned max_verts_per_patch = MAX2(num_tcs_input_cp, num_tcs_output_cp);
143 *num_patches = 256 / max_verts_per_patch;
144
145 /* Make sure that the data fits in LDS. This assumes the shaders only
146 * use LDS for the inputs and outputs.
147 *
148 * While GFX7 can use 64K per threadgroup, there is a hang on Stoney
149 * with 2 CUs if we use more than 32K. The closed Vulkan driver also
150 * uses 32K at most on all GCN chips.
151 */
152 hardware_lds_size = 32768;
153 *num_patches = MIN2(*num_patches, hardware_lds_size / (input_patch_size + output_patch_size));
154
155 /* Make sure the output data fits in the offchip buffer */
156 *num_patches =
157 MIN2(*num_patches, (sctx->screen->tess_offchip_block_dw_size * 4) / output_patch_size);
158
159 /* Not necessary for correctness, but improves performance.
160 * The hardware can do more, but the radeonsi shader constant is
161 * limited to 6 bits.
162 */
163 *num_patches = MIN2(*num_patches, 63); /* triangles: 3 full waves except 3 lanes */
164
165 /* When distributed tessellation is unsupported, switch between SEs
166 * at a higher frequency to compensate for it.
167 */
168 if (!sctx->screen->info.has_distributed_tess && sctx->screen->info.max_se > 1)
169 *num_patches = MIN2(*num_patches, 16); /* recommended */
170
171 /* Make sure that vector lanes are reasonably occupied. It probably
172 * doesn't matter much because this is LS-HS, and TES is likely to
173 * occupy significantly more CUs.
174 */
175 unsigned temp_verts_per_tg = *num_patches * max_verts_per_patch;
176 unsigned wave_size = sctx->screen->ge_wave_size;
177
178 if (temp_verts_per_tg > wave_size && temp_verts_per_tg % wave_size < wave_size * 3 / 4)
179 *num_patches = (temp_verts_per_tg & ~(wave_size - 1)) / max_verts_per_patch;
180
181 if (sctx->chip_class == GFX6) {
182 /* GFX6 bug workaround, related to power management. Limit LS-HS
183 * threadgroups to only one wave.
184 */
185 unsigned one_wave = wave_size / max_verts_per_patch;
186 *num_patches = MIN2(*num_patches, one_wave);
187 }
188
189 /* The VGT HS block increments the patch ID unconditionally
190 * within a single threadgroup. This results in incorrect
191 * patch IDs when instanced draws are used.
192 *
193 * The intended solution is to restrict threadgroups to
194 * a single instance by setting SWITCH_ON_EOI, which
195 * should cause IA to split instances up. However, this
196 * doesn't work correctly on GFX6 when there is no other
197 * SE to switch to.
198 */
199 if (has_primid_instancing_bug && tess_uses_primid)
200 *num_patches = 1;
201
202 sctx->last_num_patches = *num_patches;
203
204 output_patch0_offset = input_patch_size * *num_patches;
205 perpatch_output_offset = output_patch0_offset + pervertex_output_patch_size;
206
207 /* Compute userdata SGPRs. */
208 assert(((input_vertex_size / 4) & ~0xff) == 0);
209 assert(((output_vertex_size / 4) & ~0xff) == 0);
210 assert(((input_patch_size / 4) & ~0x1fff) == 0);
211 assert(((output_patch_size / 4) & ~0x1fff) == 0);
212 assert(((output_patch0_offset / 16) & ~0xffff) == 0);
213 assert(((perpatch_output_offset / 16) & ~0xffff) == 0);
214 assert(num_tcs_input_cp <= 32);
215 assert(num_tcs_output_cp <= 32);
216
217 uint64_t ring_va = si_resource(sctx->tess_rings)->gpu_address;
218 assert((ring_va & u_bit_consecutive(0, 19)) == 0);
219
220 tcs_in_layout = S_VS_STATE_LS_OUT_PATCH_SIZE(input_patch_size / 4) |
221 S_VS_STATE_LS_OUT_VERTEX_SIZE(input_vertex_size / 4);
222 tcs_out_layout = (output_patch_size / 4) | (num_tcs_input_cp << 13) | ring_va;
223 tcs_out_offsets = (output_patch0_offset / 16) | ((perpatch_output_offset / 16) << 16);
224 offchip_layout =
225 *num_patches | (num_tcs_output_cp << 6) | (pervertex_output_patch_size * *num_patches << 12);
226
227 /* Compute the LDS size. */
228 lds_size = output_patch0_offset + output_patch_size * *num_patches;
229
230 if (sctx->chip_class >= GFX7) {
231 assert(lds_size <= 65536);
232 lds_size = align(lds_size, 512) / 512;
233 } else {
234 assert(lds_size <= 32768);
235 lds_size = align(lds_size, 256) / 256;
236 }
237
238 /* Set SI_SGPR_VS_STATE_BITS. */
239 sctx->current_vs_state &= C_VS_STATE_LS_OUT_PATCH_SIZE & C_VS_STATE_LS_OUT_VERTEX_SIZE;
240 sctx->current_vs_state |= tcs_in_layout;
241
242 /* We should be able to support in-shader LDS use with LLVM >= 9
243 * by just adding the lds_sizes together, but it has never
244 * been tested. */
245 assert(ls_current->config.lds_size == 0);
246
247 if (sctx->chip_class >= GFX9) {
248 unsigned hs_rsrc2 = ls_current->config.rsrc2;
249
250 if (sctx->chip_class >= GFX10)
251 hs_rsrc2 |= S_00B42C_LDS_SIZE_GFX10(lds_size);
252 else
253 hs_rsrc2 |= S_00B42C_LDS_SIZE_GFX9(lds_size);
254
255 radeon_set_sh_reg(cs, R_00B42C_SPI_SHADER_PGM_RSRC2_HS, hs_rsrc2);
256
257 /* Set userdata SGPRs for merged LS-HS. */
258 radeon_set_sh_reg_seq(
259 cs, R_00B430_SPI_SHADER_USER_DATA_LS_0 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT * 4, 3);
260 radeon_emit(cs, offchip_layout);
261 radeon_emit(cs, tcs_out_offsets);
262 radeon_emit(cs, tcs_out_layout);
263 } else {
264 unsigned ls_rsrc2 = ls_current->config.rsrc2;
265
266 si_multiwave_lds_size_workaround(sctx->screen, &lds_size);
267 ls_rsrc2 |= S_00B52C_LDS_SIZE(lds_size);
268
269 /* Due to a hw bug, RSRC2_LS must be written twice with another
270 * LS register written in between. */
271 if (sctx->chip_class == GFX7 && sctx->family != CHIP_HAWAII)
272 radeon_set_sh_reg(cs, R_00B52C_SPI_SHADER_PGM_RSRC2_LS, ls_rsrc2);
273 radeon_set_sh_reg_seq(cs, R_00B528_SPI_SHADER_PGM_RSRC1_LS, 2);
274 radeon_emit(cs, ls_current->config.rsrc1);
275 radeon_emit(cs, ls_rsrc2);
276
277 /* Set userdata SGPRs for TCS. */
278 radeon_set_sh_reg_seq(
279 cs, R_00B430_SPI_SHADER_USER_DATA_HS_0 + GFX6_SGPR_TCS_OFFCHIP_LAYOUT * 4, 4);
280 radeon_emit(cs, offchip_layout);
281 radeon_emit(cs, tcs_out_offsets);
282 radeon_emit(cs, tcs_out_layout);
283 radeon_emit(cs, tcs_in_layout);
284 }
285
286 /* Set userdata SGPRs for TES. */
287 radeon_set_sh_reg_seq(cs, tes_sh_base + SI_SGPR_TES_OFFCHIP_LAYOUT * 4, 2);
288 radeon_emit(cs, offchip_layout);
289 radeon_emit(cs, ring_va);
290
291 ls_hs_config = S_028B58_NUM_PATCHES(*num_patches) | S_028B58_HS_NUM_INPUT_CP(num_tcs_input_cp) |
292 S_028B58_HS_NUM_OUTPUT_CP(num_tcs_output_cp);
293
294 if (sctx->last_ls_hs_config != ls_hs_config) {
295 if (sctx->chip_class >= GFX7) {
296 radeon_set_context_reg_idx(cs, R_028B58_VGT_LS_HS_CONFIG, 2, ls_hs_config);
297 } else {
298 radeon_set_context_reg(cs, R_028B58_VGT_LS_HS_CONFIG, ls_hs_config);
299 }
300 sctx->last_ls_hs_config = ls_hs_config;
301 sctx->context_roll = true;
302 }
303 }
304
305 static unsigned si_num_prims_for_vertices(const struct pipe_draw_info *info,
306 enum pipe_prim_type prim)
307 {
308 switch (prim) {
309 case PIPE_PRIM_PATCHES:
310 return info->count / info->vertices_per_patch;
311 case PIPE_PRIM_POLYGON:
312 return info->count >= 3;
313 case SI_PRIM_RECTANGLE_LIST:
314 return info->count / 3;
315 default:
316 return u_decomposed_prims_for_vertices(prim, info->count);
317 }
318 }
319
320 static unsigned si_get_init_multi_vgt_param(struct si_screen *sscreen, union si_vgt_param_key *key)
321 {
322 STATIC_ASSERT(sizeof(union si_vgt_param_key) == 4);
323 unsigned max_primgroup_in_wave = 2;
324
325 /* SWITCH_ON_EOP(0) is always preferable. */
326 bool wd_switch_on_eop = false;
327 bool ia_switch_on_eop = false;
328 bool ia_switch_on_eoi = false;
329 bool partial_vs_wave = false;
330 bool partial_es_wave = false;
331
332 if (key->u.uses_tess) {
333 /* SWITCH_ON_EOI must be set if PrimID is used. */
334 if (key->u.tess_uses_prim_id)
335 ia_switch_on_eoi = true;
336
337 /* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
338 if ((sscreen->info.family == CHIP_TAHITI || sscreen->info.family == CHIP_PITCAIRN ||
339 sscreen->info.family == CHIP_BONAIRE) &&
340 key->u.uses_gs)
341 partial_vs_wave = true;
342
343 /* Needed for 028B6C_DISTRIBUTION_MODE != 0. (implies >= GFX8) */
344 if (sscreen->info.has_distributed_tess) {
345 if (key->u.uses_gs) {
346 if (sscreen->info.chip_class == GFX8)
347 partial_es_wave = true;
348 } else {
349 partial_vs_wave = true;
350 }
351 }
352 }
353
354 /* This is a hardware requirement. */
355 if (key->u.line_stipple_enabled || (sscreen->debug_flags & DBG(SWITCH_ON_EOP))) {
356 ia_switch_on_eop = true;
357 wd_switch_on_eop = true;
358 }
359
360 if (sscreen->info.chip_class >= GFX7) {
361 /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
362 * 4 shader engines. Set 1 to pass the assertion below.
363 * The other cases are hardware requirements.
364 *
365 * Polaris supports primitive restart with WD_SWITCH_ON_EOP=0
366 * for points, line strips, and tri strips.
367 */
368 if (sscreen->info.max_se <= 2 || key->u.prim == PIPE_PRIM_POLYGON ||
369 key->u.prim == PIPE_PRIM_LINE_LOOP || key->u.prim == PIPE_PRIM_TRIANGLE_FAN ||
370 key->u.prim == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY ||
371 (key->u.primitive_restart &&
372 (sscreen->info.family < CHIP_POLARIS10 ||
373 (key->u.prim != PIPE_PRIM_POINTS && key->u.prim != PIPE_PRIM_LINE_STRIP &&
374 key->u.prim != PIPE_PRIM_TRIANGLE_STRIP))) ||
375 key->u.count_from_stream_output)
376 wd_switch_on_eop = true;
377
378 /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
379 * We don't know that for indirect drawing, so treat it as
380 * always problematic. */
381 if (sscreen->info.family == CHIP_HAWAII && key->u.uses_instancing)
382 wd_switch_on_eop = true;
383
384 /* Performance recommendation for 4 SE Gfx7-8 parts if
385 * instances are smaller than a primgroup.
386 * Assume indirect draws always use small instances.
387 * This is needed for good VS wave utilization.
388 */
389 if (sscreen->info.chip_class <= GFX8 && sscreen->info.max_se == 4 &&
390 key->u.multi_instances_smaller_than_primgroup)
391 wd_switch_on_eop = true;
392
393 /* Required on GFX7 and later. */
394 if (sscreen->info.max_se == 4 && !wd_switch_on_eop)
395 ia_switch_on_eoi = true;
396
397 /* HW engineers suggested that PARTIAL_VS_WAVE_ON should be set
398 * to work around a GS hang.
399 */
400 if (key->u.uses_gs &&
401 (sscreen->info.family == CHIP_TONGA || sscreen->info.family == CHIP_FIJI ||
402 sscreen->info.family == CHIP_POLARIS10 || sscreen->info.family == CHIP_POLARIS11 ||
403 sscreen->info.family == CHIP_POLARIS12 || sscreen->info.family == CHIP_VEGAM))
404 partial_vs_wave = true;
405
406 /* Required by Hawaii and, for some special cases, by GFX8. */
407 if (ia_switch_on_eoi &&
408 (sscreen->info.family == CHIP_HAWAII ||
409 (sscreen->info.chip_class == GFX8 && (key->u.uses_gs || max_primgroup_in_wave != 2))))
410 partial_vs_wave = true;
411
412 /* Instancing bug on Bonaire. */
413 if (sscreen->info.family == CHIP_BONAIRE && ia_switch_on_eoi && key->u.uses_instancing)
414 partial_vs_wave = true;
415
416 /* This only applies to Polaris10 and later 4 SE chips.
417 * wd_switch_on_eop is already true on all other chips.
418 */
419 if (!wd_switch_on_eop && key->u.primitive_restart)
420 partial_vs_wave = true;
421
422 /* If the WD switch is false, the IA switch must be false too. */
423 assert(wd_switch_on_eop || !ia_switch_on_eop);
424 }
425
426 /* If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
427 if (sscreen->info.chip_class <= GFX8 && ia_switch_on_eoi)
428 partial_es_wave = true;
429
430 return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) | S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi) |
431 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) |
432 S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave) |
433 S_028AA8_WD_SWITCH_ON_EOP(sscreen->info.chip_class >= GFX7 ? wd_switch_on_eop : 0) |
434 /* The following field was moved to VGT_SHADER_STAGES_EN in GFX9. */
435 S_028AA8_MAX_PRIMGRP_IN_WAVE(sscreen->info.chip_class == GFX8 ? max_primgroup_in_wave
436 : 0) |
437 S_030960_EN_INST_OPT_BASIC(sscreen->info.chip_class >= GFX9) |
438 S_030960_EN_INST_OPT_ADV(sscreen->info.chip_class >= GFX9);
439 }
440
441 static void si_init_ia_multi_vgt_param_table(struct si_context *sctx)
442 {
443 for (int prim = 0; prim <= SI_PRIM_RECTANGLE_LIST; prim++)
444 for (int uses_instancing = 0; uses_instancing < 2; uses_instancing++)
445 for (int multi_instances = 0; multi_instances < 2; multi_instances++)
446 for (int primitive_restart = 0; primitive_restart < 2; primitive_restart++)
447 for (int count_from_so = 0; count_from_so < 2; count_from_so++)
448 for (int line_stipple = 0; line_stipple < 2; line_stipple++)
449 for (int uses_tess = 0; uses_tess < 2; uses_tess++)
450 for (int tess_uses_primid = 0; tess_uses_primid < 2; tess_uses_primid++)
451 for (int uses_gs = 0; uses_gs < 2; uses_gs++) {
452 union si_vgt_param_key key;
453
454 key.index = 0;
455 key.u.prim = prim;
456 key.u.uses_instancing = uses_instancing;
457 key.u.multi_instances_smaller_than_primgroup = multi_instances;
458 key.u.primitive_restart = primitive_restart;
459 key.u.count_from_stream_output = count_from_so;
460 key.u.line_stipple_enabled = line_stipple;
461 key.u.uses_tess = uses_tess;
462 key.u.tess_uses_prim_id = tess_uses_primid;
463 key.u.uses_gs = uses_gs;
464
465 sctx->ia_multi_vgt_param[key.index] =
466 si_get_init_multi_vgt_param(sctx->screen, &key);
467 }
468 }
469
470 static bool si_is_line_stipple_enabled(struct si_context *sctx)
471 {
472 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
473
474 return rs->line_stipple_enable && sctx->current_rast_prim != PIPE_PRIM_POINTS &&
475 (rs->polygon_mode_is_lines || util_prim_is_lines(sctx->current_rast_prim));
476 }
477
478 static unsigned si_get_ia_multi_vgt_param(struct si_context *sctx,
479 const struct pipe_draw_info *info,
480 enum pipe_prim_type prim, unsigned num_patches,
481 unsigned instance_count, bool primitive_restart)
482 {
483 union si_vgt_param_key key = sctx->ia_multi_vgt_param_key;
484 unsigned primgroup_size;
485 unsigned ia_multi_vgt_param;
486
487 if (sctx->tes_shader.cso) {
488 primgroup_size = num_patches; /* must be a multiple of NUM_PATCHES */
489 } else if (sctx->gs_shader.cso) {
490 primgroup_size = 64; /* recommended with a GS */
491 } else {
492 primgroup_size = 128; /* recommended without a GS and tess */
493 }
494
495 key.u.prim = prim;
496 key.u.uses_instancing = info->indirect || instance_count > 1;
497 key.u.multi_instances_smaller_than_primgroup =
498 info->indirect ||
499 (instance_count > 1 &&
500 (info->count_from_stream_output || si_num_prims_for_vertices(info, prim) < primgroup_size));
501 key.u.primitive_restart = primitive_restart;
502 key.u.count_from_stream_output = info->count_from_stream_output != NULL;
503 key.u.line_stipple_enabled = si_is_line_stipple_enabled(sctx);
504
505 ia_multi_vgt_param =
506 sctx->ia_multi_vgt_param[key.index] | S_028AA8_PRIMGROUP_SIZE(primgroup_size - 1);
507
508 if (sctx->gs_shader.cso) {
509 /* GS requirement. */
510 if (sctx->chip_class <= GFX8 &&
511 SI_GS_PER_ES / primgroup_size >= sctx->screen->gs_table_depth - 3)
512 ia_multi_vgt_param |= S_028AA8_PARTIAL_ES_WAVE_ON(1);
513
514 /* GS hw bug with single-primitive instances and SWITCH_ON_EOI.
515 * The hw doc says all multi-SE chips are affected, but Vulkan
516 * only applies it to Hawaii. Do what Vulkan does.
517 */
518 if (sctx->family == CHIP_HAWAII && G_028AA8_SWITCH_ON_EOI(ia_multi_vgt_param) &&
519 (info->indirect || (instance_count > 1 && (info->count_from_stream_output ||
520 si_num_prims_for_vertices(info, prim) <= 1))))
521 sctx->flags |= SI_CONTEXT_VGT_FLUSH;
522 }
523
524 return ia_multi_vgt_param;
525 }
526
527 static unsigned si_conv_prim_to_gs_out(unsigned mode)
528 {
529 static const int prim_conv[] = {
530 [PIPE_PRIM_POINTS] = V_028A6C_POINTLIST,
531 [PIPE_PRIM_LINES] = V_028A6C_LINESTRIP,
532 [PIPE_PRIM_LINE_LOOP] = V_028A6C_LINESTRIP,
533 [PIPE_PRIM_LINE_STRIP] = V_028A6C_LINESTRIP,
534 [PIPE_PRIM_TRIANGLES] = V_028A6C_TRISTRIP,
535 [PIPE_PRIM_TRIANGLE_STRIP] = V_028A6C_TRISTRIP,
536 [PIPE_PRIM_TRIANGLE_FAN] = V_028A6C_TRISTRIP,
537 [PIPE_PRIM_QUADS] = V_028A6C_TRISTRIP,
538 [PIPE_PRIM_QUAD_STRIP] = V_028A6C_TRISTRIP,
539 [PIPE_PRIM_POLYGON] = V_028A6C_TRISTRIP,
540 [PIPE_PRIM_LINES_ADJACENCY] = V_028A6C_LINESTRIP,
541 [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_028A6C_LINESTRIP,
542 [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_028A6C_TRISTRIP,
543 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_028A6C_TRISTRIP,
544 [PIPE_PRIM_PATCHES] = V_028A6C_POINTLIST,
545 [SI_PRIM_RECTANGLE_LIST] = V_028A6C_RECTLIST,
546 };
547 assert(mode < ARRAY_SIZE(prim_conv));
548
549 return prim_conv[mode];
550 }
551
552 /* rast_prim is the primitive type after GS. */
553 static void si_emit_rasterizer_prim_state(struct si_context *sctx)
554 {
555 struct radeon_cmdbuf *cs = sctx->gfx_cs;
556 enum pipe_prim_type rast_prim = sctx->current_rast_prim;
557 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
558 unsigned initial_cdw = cs->current.cdw;
559
560 if (unlikely(si_is_line_stipple_enabled(sctx))) {
561 /* For lines, reset the stipple pattern at each primitive. Otherwise,
562 * reset the stipple pattern at each packet (line strips, line loops).
563 */
564 unsigned value =
565 rs->pa_sc_line_stipple | S_028A0C_AUTO_RESET_CNTL(rast_prim == PIPE_PRIM_LINES ? 1 : 2);
566
567 radeon_opt_set_context_reg(sctx, R_028A0C_PA_SC_LINE_STIPPLE, SI_TRACKED_PA_SC_LINE_STIPPLE,
568 value);
569 }
570
571 unsigned gs_out_prim = si_conv_prim_to_gs_out(rast_prim);
572 if (unlikely(gs_out_prim != sctx->last_gs_out_prim && (sctx->ngg || sctx->gs_shader.cso))) {
573 radeon_set_context_reg(cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, gs_out_prim);
574 sctx->last_gs_out_prim = gs_out_prim;
575 }
576
577 if (initial_cdw != cs->current.cdw)
578 sctx->context_roll = true;
579
580 if (sctx->ngg) {
581 unsigned vtx_index = rs->flatshade_first ? 0 : gs_out_prim;
582
583 sctx->current_vs_state &= C_VS_STATE_OUTPRIM & C_VS_STATE_PROVOKING_VTX_INDEX;
584 sctx->current_vs_state |=
585 S_VS_STATE_OUTPRIM(gs_out_prim) | S_VS_STATE_PROVOKING_VTX_INDEX(vtx_index);
586 }
587 }
588
589 static void si_emit_vs_state(struct si_context *sctx, const struct pipe_draw_info *info)
590 {
591 sctx->current_vs_state &= C_VS_STATE_INDEXED;
592 sctx->current_vs_state |= S_VS_STATE_INDEXED(!!info->index_size);
593
594 if (sctx->num_vs_blit_sgprs) {
595 /* Re-emit the state after we leave u_blitter. */
596 sctx->last_vs_state = ~0;
597 return;
598 }
599
600 if (sctx->current_vs_state != sctx->last_vs_state) {
601 struct radeon_cmdbuf *cs = sctx->gfx_cs;
602
603 /* For the API vertex shader (VS_STATE_INDEXED, LS_OUT_*). */
604 radeon_set_sh_reg(
605 cs, sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX] + SI_SGPR_VS_STATE_BITS * 4,
606 sctx->current_vs_state);
607
608 /* Set CLAMP_VERTEX_COLOR and OUTPRIM in the last stage
609 * before the rasterizer.
610 *
611 * For TES or the GS copy shader without NGG:
612 */
613 if (sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX] != R_00B130_SPI_SHADER_USER_DATA_VS_0) {
614 radeon_set_sh_reg(cs, R_00B130_SPI_SHADER_USER_DATA_VS_0 + SI_SGPR_VS_STATE_BITS * 4,
615 sctx->current_vs_state);
616 }
617
618 /* For NGG: */
619 if (sctx->screen->use_ngg &&
620 sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX] != R_00B230_SPI_SHADER_USER_DATA_GS_0) {
621 radeon_set_sh_reg(cs, R_00B230_SPI_SHADER_USER_DATA_GS_0 + SI_SGPR_VS_STATE_BITS * 4,
622 sctx->current_vs_state);
623 }
624
625 sctx->last_vs_state = sctx->current_vs_state;
626 }
627 }
628
629 static inline bool si_prim_restart_index_changed(struct si_context *sctx, bool primitive_restart,
630 unsigned restart_index)
631 {
632 return primitive_restart && (restart_index != sctx->last_restart_index ||
633 sctx->last_restart_index == SI_RESTART_INDEX_UNKNOWN);
634 }
635
636 static void si_emit_ia_multi_vgt_param(struct si_context *sctx, const struct pipe_draw_info *info,
637 enum pipe_prim_type prim, unsigned num_patches,
638 unsigned instance_count, bool primitive_restart)
639 {
640 struct radeon_cmdbuf *cs = sctx->gfx_cs;
641 unsigned ia_multi_vgt_param;
642
643 ia_multi_vgt_param =
644 si_get_ia_multi_vgt_param(sctx, info, prim, num_patches, instance_count, primitive_restart);
645
646 /* Draw state. */
647 if (ia_multi_vgt_param != sctx->last_multi_vgt_param) {
648 if (sctx->chip_class == GFX9)
649 radeon_set_uconfig_reg_idx(cs, sctx->screen, R_030960_IA_MULTI_VGT_PARAM, 4,
650 ia_multi_vgt_param);
651 else if (sctx->chip_class >= GFX7)
652 radeon_set_context_reg_idx(cs, R_028AA8_IA_MULTI_VGT_PARAM, 1, ia_multi_vgt_param);
653 else
654 radeon_set_context_reg(cs, R_028AA8_IA_MULTI_VGT_PARAM, ia_multi_vgt_param);
655
656 sctx->last_multi_vgt_param = ia_multi_vgt_param;
657 }
658 }
659
660 /* GFX10 removed IA_MULTI_VGT_PARAM in exchange for GE_CNTL.
661 * We overload last_multi_vgt_param.
662 */
663 static void gfx10_emit_ge_cntl(struct si_context *sctx, unsigned num_patches)
664 {
665 union si_vgt_param_key key = sctx->ia_multi_vgt_param_key;
666 unsigned ge_cntl;
667
668 if (sctx->ngg) {
669 if (sctx->tes_shader.cso) {
670 ge_cntl = S_03096C_PRIM_GRP_SIZE(num_patches) |
671 S_03096C_VERT_GRP_SIZE(256) | /* 256 = disable vertex grouping */
672 S_03096C_BREAK_WAVE_AT_EOI(key.u.tess_uses_prim_id);
673 } else {
674 ge_cntl = si_get_vs_state(sctx)->ge_cntl;
675 }
676 } else {
677 unsigned primgroup_size;
678 unsigned vertgroup_size = 256; /* 256 = disable vertex grouping */
679 ;
680
681 if (sctx->tes_shader.cso) {
682 primgroup_size = num_patches; /* must be a multiple of NUM_PATCHES */
683 } else if (sctx->gs_shader.cso) {
684 unsigned vgt_gs_onchip_cntl = sctx->gs_shader.current->ctx_reg.gs.vgt_gs_onchip_cntl;
685 primgroup_size = G_028A44_GS_PRIMS_PER_SUBGRP(vgt_gs_onchip_cntl);
686 } else {
687 primgroup_size = 128; /* recommended without a GS and tess */
688 }
689
690 ge_cntl = S_03096C_PRIM_GRP_SIZE(primgroup_size) | S_03096C_VERT_GRP_SIZE(vertgroup_size) |
691 S_03096C_BREAK_WAVE_AT_EOI(key.u.uses_tess && key.u.tess_uses_prim_id);
692 }
693
694 ge_cntl |= S_03096C_PACKET_TO_ONE_PA(si_is_line_stipple_enabled(sctx));
695
696 if (ge_cntl != sctx->last_multi_vgt_param) {
697 radeon_set_uconfig_reg(sctx->gfx_cs, R_03096C_GE_CNTL, ge_cntl);
698 sctx->last_multi_vgt_param = ge_cntl;
699 }
700 }
701
702 static void si_emit_draw_registers(struct si_context *sctx, const struct pipe_draw_info *info,
703 enum pipe_prim_type prim, unsigned num_patches,
704 unsigned instance_count, bool primitive_restart)
705 {
706 struct radeon_cmdbuf *cs = sctx->gfx_cs;
707 unsigned vgt_prim = si_conv_pipe_prim(prim);
708
709 if (sctx->chip_class >= GFX10)
710 gfx10_emit_ge_cntl(sctx, num_patches);
711 else
712 si_emit_ia_multi_vgt_param(sctx, info, prim, num_patches, instance_count, primitive_restart);
713
714 if (vgt_prim != sctx->last_prim) {
715 if (sctx->chip_class >= GFX10)
716 radeon_set_uconfig_reg(cs, R_030908_VGT_PRIMITIVE_TYPE, vgt_prim);
717 else if (sctx->chip_class >= GFX7)
718 radeon_set_uconfig_reg_idx(cs, sctx->screen, R_030908_VGT_PRIMITIVE_TYPE, 1, vgt_prim);
719 else
720 radeon_set_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE, vgt_prim);
721
722 sctx->last_prim = vgt_prim;
723 }
724
725 /* Primitive restart. */
726 if (primitive_restart != sctx->last_primitive_restart_en) {
727 if (sctx->chip_class >= GFX9)
728 radeon_set_uconfig_reg(cs, R_03092C_VGT_MULTI_PRIM_IB_RESET_EN, primitive_restart);
729 else
730 radeon_set_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, primitive_restart);
731
732 sctx->last_primitive_restart_en = primitive_restart;
733 }
734 if (si_prim_restart_index_changed(sctx, primitive_restart, info->restart_index)) {
735 radeon_set_context_reg(cs, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, info->restart_index);
736 sctx->last_restart_index = info->restart_index;
737 sctx->context_roll = true;
738 }
739 }
740
741 static void si_emit_draw_packets(struct si_context *sctx, const struct pipe_draw_info *info,
742 struct pipe_resource *indexbuf, unsigned index_size,
743 unsigned index_offset, unsigned instance_count,
744 bool dispatch_prim_discard_cs, unsigned original_index_size)
745 {
746 struct pipe_draw_indirect_info *indirect = info->indirect;
747 struct radeon_cmdbuf *cs = sctx->gfx_cs;
748 unsigned sh_base_reg = sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX];
749 bool render_cond_bit = sctx->render_cond && !sctx->render_cond_force_off;
750 uint32_t index_max_size = 0;
751 uint64_t index_va = 0;
752
753 if (info->count_from_stream_output) {
754 struct si_streamout_target *t = (struct si_streamout_target *)info->count_from_stream_output;
755
756 radeon_set_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE, t->stride_in_dw);
757 si_cp_copy_data(sctx, sctx->gfx_cs, COPY_DATA_REG, NULL,
758 R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2, COPY_DATA_SRC_MEM,
759 t->buf_filled_size, t->buf_filled_size_offset);
760 }
761
762 /* draw packet */
763 if (index_size) {
764 /* Register shadowing doesn't shadow INDEX_TYPE. */
765 if (index_size != sctx->last_index_size || sctx->shadowed_regs) {
766 unsigned index_type;
767
768 /* index type */
769 switch (index_size) {
770 case 1:
771 index_type = V_028A7C_VGT_INDEX_8;
772 break;
773 case 2:
774 index_type =
775 V_028A7C_VGT_INDEX_16 |
776 (SI_BIG_ENDIAN && sctx->chip_class <= GFX7 ? V_028A7C_VGT_DMA_SWAP_16_BIT : 0);
777 break;
778 case 4:
779 index_type =
780 V_028A7C_VGT_INDEX_32 |
781 (SI_BIG_ENDIAN && sctx->chip_class <= GFX7 ? V_028A7C_VGT_DMA_SWAP_32_BIT : 0);
782 break;
783 default:
784 assert(!"unreachable");
785 return;
786 }
787
788 if (sctx->chip_class >= GFX9) {
789 radeon_set_uconfig_reg_idx(cs, sctx->screen, R_03090C_VGT_INDEX_TYPE, 2, index_type);
790 } else {
791 radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
792 radeon_emit(cs, index_type);
793 }
794
795 sctx->last_index_size = index_size;
796 }
797
798 if (original_index_size) {
799 index_max_size = (indexbuf->width0 - index_offset) / original_index_size;
800 /* Skip draw calls with 0-sized index buffers.
801 * They cause a hang on some chips, like Navi10-14.
802 */
803 if (!index_max_size)
804 return;
805
806 index_va = si_resource(indexbuf)->gpu_address + index_offset;
807
808 radeon_add_to_buffer_list(sctx, sctx->gfx_cs, si_resource(indexbuf), RADEON_USAGE_READ,
809 RADEON_PRIO_INDEX_BUFFER);
810 }
811 } else {
812 /* On GFX7 and later, non-indexed draws overwrite VGT_INDEX_TYPE,
813 * so the state must be re-emitted before the next indexed draw.
814 */
815 if (sctx->chip_class >= GFX7)
816 sctx->last_index_size = -1;
817 }
818
819 if (indirect) {
820 uint64_t indirect_va = si_resource(indirect->buffer)->gpu_address;
821
822 assert(indirect_va % 8 == 0);
823
824 si_invalidate_draw_sh_constants(sctx);
825
826 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
827 radeon_emit(cs, 1);
828 radeon_emit(cs, indirect_va);
829 radeon_emit(cs, indirect_va >> 32);
830
831 radeon_add_to_buffer_list(sctx, sctx->gfx_cs, si_resource(indirect->buffer),
832 RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
833
834 unsigned di_src_sel = index_size ? V_0287F0_DI_SRC_SEL_DMA : V_0287F0_DI_SRC_SEL_AUTO_INDEX;
835
836 assert(indirect->offset % 4 == 0);
837
838 if (index_size) {
839 radeon_emit(cs, PKT3(PKT3_INDEX_BASE, 1, 0));
840 radeon_emit(cs, index_va);
841 radeon_emit(cs, index_va >> 32);
842
843 radeon_emit(cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0));
844 radeon_emit(cs, index_max_size);
845 }
846
847 if (!sctx->screen->has_draw_indirect_multi) {
848 radeon_emit(cs, PKT3(index_size ? PKT3_DRAW_INDEX_INDIRECT : PKT3_DRAW_INDIRECT, 3,
849 render_cond_bit));
850 radeon_emit(cs, indirect->offset);
851 radeon_emit(cs, (sh_base_reg + SI_SGPR_BASE_VERTEX * 4 - SI_SH_REG_OFFSET) >> 2);
852 radeon_emit(cs, (sh_base_reg + SI_SGPR_START_INSTANCE * 4 - SI_SH_REG_OFFSET) >> 2);
853 radeon_emit(cs, di_src_sel);
854 } else {
855 uint64_t count_va = 0;
856
857 if (indirect->indirect_draw_count) {
858 struct si_resource *params_buf = si_resource(indirect->indirect_draw_count);
859
860 radeon_add_to_buffer_list(sctx, sctx->gfx_cs, params_buf, RADEON_USAGE_READ,
861 RADEON_PRIO_DRAW_INDIRECT);
862
863 count_va = params_buf->gpu_address + indirect->indirect_draw_count_offset;
864 }
865
866 radeon_emit(cs,
867 PKT3(index_size ? PKT3_DRAW_INDEX_INDIRECT_MULTI : PKT3_DRAW_INDIRECT_MULTI, 8,
868 render_cond_bit));
869 radeon_emit(cs, indirect->offset);
870 radeon_emit(cs, (sh_base_reg + SI_SGPR_BASE_VERTEX * 4 - SI_SH_REG_OFFSET) >> 2);
871 radeon_emit(cs, (sh_base_reg + SI_SGPR_START_INSTANCE * 4 - SI_SH_REG_OFFSET) >> 2);
872 radeon_emit(cs, ((sh_base_reg + SI_SGPR_DRAWID * 4 - SI_SH_REG_OFFSET) >> 2) |
873 S_2C3_DRAW_INDEX_ENABLE(1) |
874 S_2C3_COUNT_INDIRECT_ENABLE(!!indirect->indirect_draw_count));
875 radeon_emit(cs, indirect->draw_count);
876 radeon_emit(cs, count_va);
877 radeon_emit(cs, count_va >> 32);
878 radeon_emit(cs, indirect->stride);
879 radeon_emit(cs, di_src_sel);
880 }
881 } else {
882 int base_vertex;
883
884 /* Register shadowing requires that we always emit PKT3_NUM_INSTANCES. */
885 if (sctx->shadowed_regs ||
886 sctx->last_instance_count == SI_INSTANCE_COUNT_UNKNOWN ||
887 sctx->last_instance_count != instance_count) {
888 radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, 0));
889 radeon_emit(cs, instance_count);
890 sctx->last_instance_count = instance_count;
891 }
892
893 /* Base vertex and start instance. */
894 base_vertex = original_index_size ? info->index_bias : info->start;
895
896 if (sctx->num_vs_blit_sgprs) {
897 /* Re-emit draw constants after we leave u_blitter. */
898 si_invalidate_draw_sh_constants(sctx);
899
900 /* Blit VS doesn't use BASE_VERTEX, START_INSTANCE, and DRAWID. */
901 radeon_set_sh_reg_seq(cs, sh_base_reg + SI_SGPR_VS_BLIT_DATA * 4, sctx->num_vs_blit_sgprs);
902 radeon_emit_array(cs, sctx->vs_blit_sh_data, sctx->num_vs_blit_sgprs);
903 } else if (base_vertex != sctx->last_base_vertex ||
904 sctx->last_base_vertex == SI_BASE_VERTEX_UNKNOWN ||
905 info->start_instance != sctx->last_start_instance ||
906 info->drawid != sctx->last_drawid || sh_base_reg != sctx->last_sh_base_reg) {
907 radeon_set_sh_reg_seq(cs, sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 3);
908 radeon_emit(cs, base_vertex);
909 radeon_emit(cs, info->start_instance);
910 radeon_emit(cs, info->drawid);
911
912 sctx->last_base_vertex = base_vertex;
913 sctx->last_start_instance = info->start_instance;
914 sctx->last_drawid = info->drawid;
915 sctx->last_sh_base_reg = sh_base_reg;
916 }
917
918 if (index_size) {
919 if (dispatch_prim_discard_cs) {
920 index_va += info->start * original_index_size;
921 index_max_size = MIN2(index_max_size, info->count);
922
923 si_dispatch_prim_discard_cs_and_draw(sctx, info, original_index_size, base_vertex,
924 index_va, index_max_size);
925 return;
926 }
927
928 index_va += info->start * index_size;
929
930 radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_2, 4, render_cond_bit));
931 radeon_emit(cs, index_max_size);
932 radeon_emit(cs, index_va);
933 radeon_emit(cs, index_va >> 32);
934 radeon_emit(cs, info->count);
935 radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
936 } else {
937 radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, render_cond_bit));
938 radeon_emit(cs, info->count);
939 radeon_emit(cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
940 S_0287F0_USE_OPAQUE(!!info->count_from_stream_output));
941 }
942 }
943 }
944
945 void si_emit_surface_sync(struct si_context *sctx, struct radeon_cmdbuf *cs, unsigned cp_coher_cntl)
946 {
947 bool compute_ib = !sctx->has_graphics || cs == sctx->prim_discard_compute_cs;
948
949 assert(sctx->chip_class <= GFX9);
950
951 if (sctx->chip_class == GFX9 || compute_ib) {
952 /* Flush caches and wait for the caches to assert idle. */
953 radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, 0));
954 radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
955 radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
956 radeon_emit(cs, 0xffffff); /* CP_COHER_SIZE_HI */
957 radeon_emit(cs, 0); /* CP_COHER_BASE */
958 radeon_emit(cs, 0); /* CP_COHER_BASE_HI */
959 radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
960 } else {
961 /* ACQUIRE_MEM is only required on a compute ring. */
962 radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, 0));
963 radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
964 radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
965 radeon_emit(cs, 0); /* CP_COHER_BASE */
966 radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
967 }
968
969 /* ACQUIRE_MEM has an implicit context roll if the current context
970 * is busy. */
971 if (!compute_ib)
972 sctx->context_roll = true;
973 }
974
975 void si_prim_discard_signal_next_compute_ib_start(struct si_context *sctx)
976 {
977 if (!si_compute_prim_discard_enabled(sctx))
978 return;
979
980 if (!sctx->barrier_buf) {
981 u_suballocator_alloc(sctx->allocator_zeroed_memory, 4, 4, &sctx->barrier_buf_offset,
982 (struct pipe_resource **)&sctx->barrier_buf);
983 }
984
985 /* Emit a placeholder to signal the next compute IB to start.
986 * See si_compute_prim_discard.c for explanation.
987 */
988 uint32_t signal = 1;
989 si_cp_write_data(sctx, sctx->barrier_buf, sctx->barrier_buf_offset, 4, V_370_MEM, V_370_ME,
990 &signal);
991
992 sctx->last_pkt3_write_data = &sctx->gfx_cs->current.buf[sctx->gfx_cs->current.cdw - 5];
993
994 /* Only the last occurence of WRITE_DATA will be executed.
995 * The packet will be enabled in si_flush_gfx_cs.
996 */
997 *sctx->last_pkt3_write_data = PKT3(PKT3_NOP, 3, 0);
998 }
999
1000 void gfx10_emit_cache_flush(struct si_context *ctx)
1001 {
1002 struct radeon_cmdbuf *cs = ctx->gfx_cs;
1003 uint32_t gcr_cntl = 0;
1004 unsigned cb_db_event = 0;
1005 unsigned flags = ctx->flags;
1006
1007 if (!ctx->has_graphics) {
1008 /* Only process compute flags. */
1009 flags &= SI_CONTEXT_INV_ICACHE | SI_CONTEXT_INV_SCACHE | SI_CONTEXT_INV_VCACHE |
1010 SI_CONTEXT_INV_L2 | SI_CONTEXT_WB_L2 | SI_CONTEXT_INV_L2_METADATA |
1011 SI_CONTEXT_CS_PARTIAL_FLUSH;
1012 }
1013
1014 /* We don't need these. */
1015 assert(!(flags & (SI_CONTEXT_VGT_STREAMOUT_SYNC | SI_CONTEXT_FLUSH_AND_INV_DB_META)));
1016
1017 if (flags & SI_CONTEXT_VGT_FLUSH) {
1018 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1019 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
1020 }
1021
1022 if (flags & SI_CONTEXT_FLUSH_AND_INV_CB)
1023 ctx->num_cb_cache_flushes++;
1024 if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
1025 ctx->num_db_cache_flushes++;
1026
1027 if (flags & SI_CONTEXT_INV_ICACHE)
1028 gcr_cntl |= S_586_GLI_INV(V_586_GLI_ALL);
1029 if (flags & SI_CONTEXT_INV_SCACHE) {
1030 /* TODO: When writing to the SMEM L1 cache, we need to set SEQ
1031 * to FORWARD when both L1 and L2 are written out (WB or INV).
1032 */
1033 gcr_cntl |= S_586_GL1_INV(1) | S_586_GLK_INV(1);
1034 }
1035 if (flags & SI_CONTEXT_INV_VCACHE)
1036 gcr_cntl |= S_586_GL1_INV(1) | S_586_GLV_INV(1);
1037
1038 /* The L2 cache ops are:
1039 * - INV: - invalidate lines that reflect memory (were loaded from memory)
1040 * - don't touch lines that were overwritten (were stored by gfx clients)
1041 * - WB: - don't touch lines that reflect memory
1042 * - write back lines that were overwritten
1043 * - WB | INV: - invalidate lines that reflect memory
1044 * - write back lines that were overwritten
1045 *
1046 * GLM doesn't support WB alone. If WB is set, INV must be set too.
1047 */
1048 if (flags & SI_CONTEXT_INV_L2) {
1049 /* Writeback and invalidate everything in L2. */
1050 gcr_cntl |= S_586_GL2_INV(1) | S_586_GL2_WB(1) | S_586_GLM_INV(1) | S_586_GLM_WB(1);
1051 ctx->num_L2_invalidates++;
1052 } else if (flags & SI_CONTEXT_WB_L2) {
1053 gcr_cntl |= S_586_GL2_WB(1) | S_586_GLM_WB(1) | S_586_GLM_INV(1);
1054 } else if (flags & SI_CONTEXT_INV_L2_METADATA) {
1055 gcr_cntl |= S_586_GLM_INV(1) | S_586_GLM_WB(1);
1056 }
1057
1058 if (flags & (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB)) {
1059 if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
1060 /* Flush CMASK/FMASK/DCC. Will wait for idle later. */
1061 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1062 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
1063 }
1064 if (flags & SI_CONTEXT_FLUSH_AND_INV_DB) {
1065 /* Flush HTILE. Will wait for idle later. */
1066 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1067 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
1068 }
1069
1070 /* First flush CB/DB, then L1/L2. */
1071 gcr_cntl |= S_586_SEQ(V_586_SEQ_FORWARD);
1072
1073 if ((flags & (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB)) ==
1074 (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB)) {
1075 cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
1076 } else if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
1077 cb_db_event = V_028A90_FLUSH_AND_INV_CB_DATA_TS;
1078 } else if (flags & SI_CONTEXT_FLUSH_AND_INV_DB) {
1079 cb_db_event = V_028A90_FLUSH_AND_INV_DB_DATA_TS;
1080 } else {
1081 assert(0);
1082 }
1083 } else {
1084 /* Wait for graphics shaders to go idle if requested. */
1085 if (flags & SI_CONTEXT_PS_PARTIAL_FLUSH) {
1086 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1087 radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1088 /* Only count explicit shader flushes, not implicit ones. */
1089 ctx->num_vs_flushes++;
1090 ctx->num_ps_flushes++;
1091 } else if (flags & SI_CONTEXT_VS_PARTIAL_FLUSH) {
1092 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1093 radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1094 ctx->num_vs_flushes++;
1095 }
1096 }
1097
1098 if (flags & SI_CONTEXT_CS_PARTIAL_FLUSH && ctx->compute_is_busy) {
1099 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1100 radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH | EVENT_INDEX(4)));
1101 ctx->num_cs_flushes++;
1102 ctx->compute_is_busy = false;
1103 }
1104
1105 if (cb_db_event) {
1106 /* CB/DB flush and invalidate (or possibly just a wait for a
1107 * meta flush) via RELEASE_MEM.
1108 *
1109 * Combine this with other cache flushes when possible; this
1110 * requires affected shaders to be idle, so do it after the
1111 * CS_PARTIAL_FLUSH before (VS/PS partial flushes are always
1112 * implied).
1113 */
1114 uint64_t va;
1115
1116 /* Do the flush (enqueue the event and wait for it). */
1117 va = ctx->wait_mem_scratch->gpu_address;
1118 ctx->wait_mem_number++;
1119
1120 /* Get GCR_CNTL fields, because the encoding is different in RELEASE_MEM. */
1121 unsigned glm_wb = G_586_GLM_WB(gcr_cntl);
1122 unsigned glm_inv = G_586_GLM_INV(gcr_cntl);
1123 unsigned glv_inv = G_586_GLV_INV(gcr_cntl);
1124 unsigned gl1_inv = G_586_GL1_INV(gcr_cntl);
1125 assert(G_586_GL2_US(gcr_cntl) == 0);
1126 assert(G_586_GL2_RANGE(gcr_cntl) == 0);
1127 assert(G_586_GL2_DISCARD(gcr_cntl) == 0);
1128 unsigned gl2_inv = G_586_GL2_INV(gcr_cntl);
1129 unsigned gl2_wb = G_586_GL2_WB(gcr_cntl);
1130 unsigned gcr_seq = G_586_SEQ(gcr_cntl);
1131
1132 gcr_cntl &= C_586_GLM_WB & C_586_GLM_INV & C_586_GLV_INV & C_586_GL1_INV & C_586_GL2_INV &
1133 C_586_GL2_WB; /* keep SEQ */
1134
1135 si_cp_release_mem(ctx, cs, cb_db_event,
1136 S_490_GLM_WB(glm_wb) | S_490_GLM_INV(glm_inv) | S_490_GLV_INV(glv_inv) |
1137 S_490_GL1_INV(gl1_inv) | S_490_GL2_INV(gl2_inv) | S_490_GL2_WB(gl2_wb) |
1138 S_490_SEQ(gcr_seq),
1139 EOP_DST_SEL_MEM, EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM,
1140 EOP_DATA_SEL_VALUE_32BIT, ctx->wait_mem_scratch, va, ctx->wait_mem_number,
1141 SI_NOT_QUERY);
1142 si_cp_wait_mem(ctx, ctx->gfx_cs, va, ctx->wait_mem_number, 0xffffffff, WAIT_REG_MEM_EQUAL);
1143 }
1144
1145 /* Ignore fields that only modify the behavior of other fields. */
1146 if (gcr_cntl & C_586_GL1_RANGE & C_586_GL2_RANGE & C_586_SEQ) {
1147 /* Flush caches and wait for the caches to assert idle.
1148 * The cache flush is executed in the ME, but the PFP waits
1149 * for completion.
1150 */
1151 radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 6, 0));
1152 radeon_emit(cs, 0); /* CP_COHER_CNTL */
1153 radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
1154 radeon_emit(cs, 0xffffff); /* CP_COHER_SIZE_HI */
1155 radeon_emit(cs, 0); /* CP_COHER_BASE */
1156 radeon_emit(cs, 0); /* CP_COHER_BASE_HI */
1157 radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
1158 radeon_emit(cs, gcr_cntl); /* GCR_CNTL */
1159 } else if (cb_db_event || (flags & (SI_CONTEXT_VS_PARTIAL_FLUSH | SI_CONTEXT_PS_PARTIAL_FLUSH |
1160 SI_CONTEXT_CS_PARTIAL_FLUSH))) {
1161 /* We need to ensure that PFP waits as well. */
1162 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
1163 radeon_emit(cs, 0);
1164 }
1165
1166 if (flags & SI_CONTEXT_START_PIPELINE_STATS) {
1167 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1168 radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_START) | EVENT_INDEX(0));
1169 } else if (flags & SI_CONTEXT_STOP_PIPELINE_STATS) {
1170 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1171 radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) | EVENT_INDEX(0));
1172 }
1173
1174 ctx->flags = 0;
1175 }
1176
1177 void si_emit_cache_flush(struct si_context *sctx)
1178 {
1179 struct radeon_cmdbuf *cs = sctx->gfx_cs;
1180 uint32_t flags = sctx->flags;
1181
1182 if (!sctx->has_graphics) {
1183 /* Only process compute flags. */
1184 flags &= SI_CONTEXT_INV_ICACHE | SI_CONTEXT_INV_SCACHE | SI_CONTEXT_INV_VCACHE |
1185 SI_CONTEXT_INV_L2 | SI_CONTEXT_WB_L2 | SI_CONTEXT_INV_L2_METADATA |
1186 SI_CONTEXT_CS_PARTIAL_FLUSH;
1187 }
1188
1189 uint32_t cp_coher_cntl = 0;
1190 const uint32_t flush_cb_db = flags & (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB);
1191 const bool is_barrier =
1192 flush_cb_db ||
1193 /* INV_ICACHE == beginning of gfx IB. Checking
1194 * INV_ICACHE fixes corruption for DeusExMD with
1195 * compute-based culling, but I don't know why.
1196 */
1197 flags & (SI_CONTEXT_INV_ICACHE | SI_CONTEXT_PS_PARTIAL_FLUSH | SI_CONTEXT_VS_PARTIAL_FLUSH) ||
1198 (flags & SI_CONTEXT_CS_PARTIAL_FLUSH && sctx->compute_is_busy);
1199
1200 assert(sctx->chip_class <= GFX9);
1201
1202 if (flags & SI_CONTEXT_FLUSH_AND_INV_CB)
1203 sctx->num_cb_cache_flushes++;
1204 if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
1205 sctx->num_db_cache_flushes++;
1206
1207 /* GFX6 has a bug that it always flushes ICACHE and KCACHE if either
1208 * bit is set. An alternative way is to write SQC_CACHES, but that
1209 * doesn't seem to work reliably. Since the bug doesn't affect
1210 * correctness (it only does more work than necessary) and
1211 * the performance impact is likely negligible, there is no plan
1212 * to add a workaround for it.
1213 */
1214
1215 if (flags & SI_CONTEXT_INV_ICACHE)
1216 cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
1217 if (flags & SI_CONTEXT_INV_SCACHE)
1218 cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
1219
1220 if (sctx->chip_class <= GFX8) {
1221 if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
1222 cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) | S_0085F0_CB0_DEST_BASE_ENA(1) |
1223 S_0085F0_CB1_DEST_BASE_ENA(1) | S_0085F0_CB2_DEST_BASE_ENA(1) |
1224 S_0085F0_CB3_DEST_BASE_ENA(1) | S_0085F0_CB4_DEST_BASE_ENA(1) |
1225 S_0085F0_CB5_DEST_BASE_ENA(1) | S_0085F0_CB6_DEST_BASE_ENA(1) |
1226 S_0085F0_CB7_DEST_BASE_ENA(1);
1227
1228 /* Necessary for DCC */
1229 if (sctx->chip_class == GFX8)
1230 si_cp_release_mem(sctx, cs, V_028A90_FLUSH_AND_INV_CB_DATA_TS, 0, EOP_DST_SEL_MEM,
1231 EOP_INT_SEL_NONE, EOP_DATA_SEL_DISCARD, NULL, 0, 0, SI_NOT_QUERY);
1232 }
1233 if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
1234 cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) | S_0085F0_DB_DEST_BASE_ENA(1);
1235 }
1236
1237 if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
1238 /* Flush CMASK/FMASK/DCC. SURFACE_SYNC will wait for idle. */
1239 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1240 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
1241 }
1242 if (flags & (SI_CONTEXT_FLUSH_AND_INV_DB | SI_CONTEXT_FLUSH_AND_INV_DB_META)) {
1243 /* Flush HTILE. SURFACE_SYNC will wait for idle. */
1244 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1245 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
1246 }
1247
1248 /* Wait for shader engines to go idle.
1249 * VS and PS waits are unnecessary if SURFACE_SYNC is going to wait
1250 * for everything including CB/DB cache flushes.
1251 */
1252 if (!flush_cb_db) {
1253 if (flags & SI_CONTEXT_PS_PARTIAL_FLUSH) {
1254 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1255 radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1256 /* Only count explicit shader flushes, not implicit ones
1257 * done by SURFACE_SYNC.
1258 */
1259 sctx->num_vs_flushes++;
1260 sctx->num_ps_flushes++;
1261 } else if (flags & SI_CONTEXT_VS_PARTIAL_FLUSH) {
1262 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1263 radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1264 sctx->num_vs_flushes++;
1265 }
1266 }
1267
1268 if (flags & SI_CONTEXT_CS_PARTIAL_FLUSH && sctx->compute_is_busy) {
1269 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1270 radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1271 sctx->num_cs_flushes++;
1272 sctx->compute_is_busy = false;
1273 }
1274
1275 /* VGT state synchronization. */
1276 if (flags & SI_CONTEXT_VGT_FLUSH) {
1277 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1278 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
1279 }
1280 if (flags & SI_CONTEXT_VGT_STREAMOUT_SYNC) {
1281 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1282 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0));
1283 }
1284
1285 /* GFX9: Wait for idle if we're flushing CB or DB. ACQUIRE_MEM doesn't
1286 * wait for idle on GFX9. We have to use a TS event.
1287 */
1288 if (sctx->chip_class == GFX9 && flush_cb_db) {
1289 uint64_t va;
1290 unsigned tc_flags, cb_db_event;
1291
1292 /* Set the CB/DB flush event. */
1293 switch (flush_cb_db) {
1294 case SI_CONTEXT_FLUSH_AND_INV_CB:
1295 cb_db_event = V_028A90_FLUSH_AND_INV_CB_DATA_TS;
1296 break;
1297 case SI_CONTEXT_FLUSH_AND_INV_DB:
1298 cb_db_event = V_028A90_FLUSH_AND_INV_DB_DATA_TS;
1299 break;
1300 default:
1301 /* both CB & DB */
1302 cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
1303 }
1304
1305 /* These are the only allowed combinations. If you need to
1306 * do multiple operations at once, do them separately.
1307 * All operations that invalidate L2 also seem to invalidate
1308 * metadata. Volatile (VOL) and WC flushes are not listed here.
1309 *
1310 * TC | TC_WB = writeback & invalidate L2 & L1
1311 * TC | TC_WB | TC_NC = writeback & invalidate L2 for MTYPE == NC
1312 * TC_WB | TC_NC = writeback L2 for MTYPE == NC
1313 * TC | TC_NC = invalidate L2 for MTYPE == NC
1314 * TC | TC_MD = writeback & invalidate L2 metadata (DCC, etc.)
1315 * TCL1 = invalidate L1
1316 */
1317 tc_flags = 0;
1318
1319 if (flags & SI_CONTEXT_INV_L2_METADATA) {
1320 tc_flags = EVENT_TC_ACTION_ENA | EVENT_TC_MD_ACTION_ENA;
1321 }
1322
1323 /* Ideally flush TC together with CB/DB. */
1324 if (flags & SI_CONTEXT_INV_L2) {
1325 /* Writeback and invalidate everything in L2 & L1. */
1326 tc_flags = EVENT_TC_ACTION_ENA | EVENT_TC_WB_ACTION_ENA;
1327
1328 /* Clear the flags. */
1329 flags &= ~(SI_CONTEXT_INV_L2 | SI_CONTEXT_WB_L2 | SI_CONTEXT_INV_VCACHE);
1330 sctx->num_L2_invalidates++;
1331 }
1332
1333 /* Do the flush (enqueue the event and wait for it). */
1334 va = sctx->wait_mem_scratch->gpu_address;
1335 sctx->wait_mem_number++;
1336
1337 si_cp_release_mem(sctx, cs, cb_db_event, tc_flags, EOP_DST_SEL_MEM,
1338 EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM, EOP_DATA_SEL_VALUE_32BIT,
1339 sctx->wait_mem_scratch, va, sctx->wait_mem_number, SI_NOT_QUERY);
1340 si_cp_wait_mem(sctx, cs, va, sctx->wait_mem_number, 0xffffffff, WAIT_REG_MEM_EQUAL);
1341 }
1342
1343 /* Make sure ME is idle (it executes most packets) before continuing.
1344 * This prevents read-after-write hazards between PFP and ME.
1345 */
1346 if (sctx->has_graphics &&
1347 (cp_coher_cntl || (flags & (SI_CONTEXT_CS_PARTIAL_FLUSH | SI_CONTEXT_INV_VCACHE |
1348 SI_CONTEXT_INV_L2 | SI_CONTEXT_WB_L2)))) {
1349 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
1350 radeon_emit(cs, 0);
1351 }
1352
1353 /* GFX6-GFX8 only:
1354 * When one of the CP_COHER_CNTL.DEST_BASE flags is set, SURFACE_SYNC
1355 * waits for idle, so it should be last. SURFACE_SYNC is done in PFP.
1356 *
1357 * cp_coher_cntl should contain all necessary flags except TC flags
1358 * at this point.
1359 *
1360 * GFX6-GFX7 don't support L2 write-back.
1361 */
1362 if (flags & SI_CONTEXT_INV_L2 || (sctx->chip_class <= GFX7 && (flags & SI_CONTEXT_WB_L2))) {
1363 /* Invalidate L1 & L2. (L1 is always invalidated on GFX6)
1364 * WB must be set on GFX8+ when TC_ACTION is set.
1365 */
1366 si_emit_surface_sync(sctx, sctx->gfx_cs,
1367 cp_coher_cntl | S_0085F0_TC_ACTION_ENA(1) | S_0085F0_TCL1_ACTION_ENA(1) |
1368 S_0301F0_TC_WB_ACTION_ENA(sctx->chip_class >= GFX8));
1369 cp_coher_cntl = 0;
1370 sctx->num_L2_invalidates++;
1371 } else {
1372 /* L1 invalidation and L2 writeback must be done separately,
1373 * because both operations can't be done together.
1374 */
1375 if (flags & SI_CONTEXT_WB_L2) {
1376 /* WB = write-back
1377 * NC = apply to non-coherent MTYPEs
1378 * (i.e. MTYPE <= 1, which is what we use everywhere)
1379 *
1380 * WB doesn't work without NC.
1381 */
1382 si_emit_surface_sync(
1383 sctx, sctx->gfx_cs,
1384 cp_coher_cntl | S_0301F0_TC_WB_ACTION_ENA(1) | S_0301F0_TC_NC_ACTION_ENA(1));
1385 cp_coher_cntl = 0;
1386 sctx->num_L2_writebacks++;
1387 }
1388 if (flags & SI_CONTEXT_INV_VCACHE) {
1389 /* Invalidate per-CU VMEM L1. */
1390 si_emit_surface_sync(sctx, sctx->gfx_cs, cp_coher_cntl | S_0085F0_TCL1_ACTION_ENA(1));
1391 cp_coher_cntl = 0;
1392 }
1393 }
1394
1395 /* If TC flushes haven't cleared this... */
1396 if (cp_coher_cntl)
1397 si_emit_surface_sync(sctx, sctx->gfx_cs, cp_coher_cntl);
1398
1399 if (is_barrier)
1400 si_prim_discard_signal_next_compute_ib_start(sctx);
1401
1402 if (flags & SI_CONTEXT_START_PIPELINE_STATS) {
1403 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1404 radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_START) | EVENT_INDEX(0));
1405 } else if (flags & SI_CONTEXT_STOP_PIPELINE_STATS) {
1406 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1407 radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) | EVENT_INDEX(0));
1408 }
1409
1410 sctx->flags = 0;
1411 }
1412
1413 static void si_get_draw_start_count(struct si_context *sctx, const struct pipe_draw_info *info,
1414 unsigned *start, unsigned *count)
1415 {
1416 struct pipe_draw_indirect_info *indirect = info->indirect;
1417
1418 if (indirect) {
1419 unsigned indirect_count;
1420 struct pipe_transfer *transfer;
1421 unsigned begin, end;
1422 unsigned map_size;
1423 unsigned *data;
1424
1425 if (indirect->indirect_draw_count) {
1426 data = pipe_buffer_map_range(&sctx->b, indirect->indirect_draw_count,
1427 indirect->indirect_draw_count_offset, sizeof(unsigned),
1428 PIPE_TRANSFER_READ, &transfer);
1429
1430 indirect_count = *data;
1431
1432 pipe_buffer_unmap(&sctx->b, transfer);
1433 } else {
1434 indirect_count = indirect->draw_count;
1435 }
1436
1437 if (!indirect_count) {
1438 *start = *count = 0;
1439 return;
1440 }
1441
1442 map_size = (indirect_count - 1) * indirect->stride + 3 * sizeof(unsigned);
1443 data = pipe_buffer_map_range(&sctx->b, indirect->buffer, indirect->offset, map_size,
1444 PIPE_TRANSFER_READ, &transfer);
1445
1446 begin = UINT_MAX;
1447 end = 0;
1448
1449 for (unsigned i = 0; i < indirect_count; ++i) {
1450 unsigned count = data[0];
1451 unsigned start = data[2];
1452
1453 if (count > 0) {
1454 begin = MIN2(begin, start);
1455 end = MAX2(end, start + count);
1456 }
1457
1458 data += indirect->stride / sizeof(unsigned);
1459 }
1460
1461 pipe_buffer_unmap(&sctx->b, transfer);
1462
1463 if (begin < end) {
1464 *start = begin;
1465 *count = end - begin;
1466 } else {
1467 *start = *count = 0;
1468 }
1469 } else {
1470 *start = info->start;
1471 *count = info->count;
1472 }
1473 }
1474
1475 static void si_emit_all_states(struct si_context *sctx, const struct pipe_draw_info *info,
1476 enum pipe_prim_type prim, unsigned instance_count,
1477 bool primitive_restart, unsigned skip_atom_mask)
1478 {
1479 unsigned num_patches = 0;
1480
1481 si_emit_rasterizer_prim_state(sctx);
1482 if (sctx->tes_shader.cso)
1483 si_emit_derived_tess_state(sctx, info, &num_patches);
1484
1485 /* Emit state atoms. */
1486 unsigned mask = sctx->dirty_atoms & ~skip_atom_mask;
1487 while (mask)
1488 sctx->atoms.array[u_bit_scan(&mask)].emit(sctx);
1489
1490 sctx->dirty_atoms &= skip_atom_mask;
1491
1492 /* Emit states. */
1493 mask = sctx->dirty_states;
1494 while (mask) {
1495 unsigned i = u_bit_scan(&mask);
1496 struct si_pm4_state *state = sctx->queued.array[i];
1497
1498 if (!state || sctx->emitted.array[i] == state)
1499 continue;
1500
1501 si_pm4_emit(sctx, state);
1502 sctx->emitted.array[i] = state;
1503 }
1504 sctx->dirty_states = 0;
1505
1506 /* Emit draw states. */
1507 si_emit_vs_state(sctx, info);
1508 si_emit_draw_registers(sctx, info, prim, num_patches, instance_count, primitive_restart);
1509 }
1510
1511 static bool si_all_vs_resources_read_only(struct si_context *sctx, struct pipe_resource *indexbuf)
1512 {
1513 struct radeon_winsys *ws = sctx->ws;
1514 struct radeon_cmdbuf *cs = sctx->gfx_cs;
1515
1516 /* Index buffer. */
1517 if (indexbuf && ws->cs_is_buffer_referenced(cs, si_resource(indexbuf)->buf, RADEON_USAGE_WRITE))
1518 goto has_write_reference;
1519
1520 /* Vertex buffers. */
1521 struct si_vertex_elements *velems = sctx->vertex_elements;
1522 unsigned num_velems = velems->count;
1523
1524 for (unsigned i = 0; i < num_velems; i++) {
1525 if (!((1 << i) & velems->first_vb_use_mask))
1526 continue;
1527
1528 unsigned vb_index = velems->vertex_buffer_index[i];
1529 struct pipe_resource *res = sctx->vertex_buffer[vb_index].buffer.resource;
1530 if (!res)
1531 continue;
1532
1533 if (ws->cs_is_buffer_referenced(cs, si_resource(res)->buf, RADEON_USAGE_WRITE))
1534 goto has_write_reference;
1535 }
1536
1537 /* Constant and shader buffers. */
1538 struct si_descriptors *buffers =
1539 &sctx->descriptors[si_const_and_shader_buffer_descriptors_idx(PIPE_SHADER_VERTEX)];
1540 for (unsigned i = 0; i < buffers->num_active_slots; i++) {
1541 unsigned index = buffers->first_active_slot + i;
1542 struct pipe_resource *res = sctx->const_and_shader_buffers[PIPE_SHADER_VERTEX].buffers[index];
1543 if (!res)
1544 continue;
1545
1546 if (ws->cs_is_buffer_referenced(cs, si_resource(res)->buf, RADEON_USAGE_WRITE))
1547 goto has_write_reference;
1548 }
1549
1550 /* Samplers. */
1551 struct si_shader_selector *vs = sctx->vs_shader.cso;
1552 if (vs->info.base.textures_used) {
1553 unsigned num_samplers = util_last_bit(vs->info.base.textures_used);
1554
1555 for (unsigned i = 0; i < num_samplers; i++) {
1556 struct pipe_sampler_view *view = sctx->samplers[PIPE_SHADER_VERTEX].views[i];
1557 if (!view)
1558 continue;
1559
1560 if (ws->cs_is_buffer_referenced(cs, si_resource(view->texture)->buf, RADEON_USAGE_WRITE))
1561 goto has_write_reference;
1562 }
1563 }
1564
1565 /* Images. */
1566 unsigned num_images = vs->info.base.num_images;
1567 if (num_images) {
1568 for (unsigned i = 0; i < num_images; i++) {
1569 struct pipe_resource *res = sctx->images[PIPE_SHADER_VERTEX].views[i].resource;
1570 if (!res)
1571 continue;
1572
1573 if (ws->cs_is_buffer_referenced(cs, si_resource(res)->buf, RADEON_USAGE_WRITE))
1574 goto has_write_reference;
1575 }
1576 }
1577
1578 return true;
1579
1580 has_write_reference:
1581 /* If the current gfx IB has enough packets, flush it to remove write
1582 * references to buffers.
1583 */
1584 if (cs->prev_dw + cs->current.cdw > 2048) {
1585 si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
1586 assert(si_all_vs_resources_read_only(sctx, indexbuf));
1587 return true;
1588 }
1589 return false;
1590 }
1591
1592 static ALWAYS_INLINE bool pd_msg(const char *s)
1593 {
1594 if (SI_PRIM_DISCARD_DEBUG)
1595 printf("PD failed: %s\n", s);
1596 return false;
1597 }
1598
1599 static void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
1600 {
1601 struct si_context *sctx = (struct si_context *)ctx;
1602 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
1603 struct pipe_resource *indexbuf = info->index.resource;
1604 unsigned dirty_tex_counter, dirty_buf_counter;
1605 enum pipe_prim_type rast_prim, prim = info->mode;
1606 unsigned index_size = info->index_size;
1607 unsigned index_offset = info->indirect ? info->start * index_size : 0;
1608 unsigned instance_count = info->instance_count;
1609 bool primitive_restart =
1610 info->primitive_restart &&
1611 (!sctx->screen->options.prim_restart_tri_strips_only ||
1612 (prim != PIPE_PRIM_TRIANGLE_STRIP && prim != PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY));
1613
1614 if (likely(!info->indirect)) {
1615 /* GFX6-GFX7 treat instance_count==0 as instance_count==1. There is
1616 * no workaround for indirect draws, but we can at least skip
1617 * direct draws.
1618 */
1619 if (unlikely(!instance_count))
1620 return;
1621
1622 /* Handle count == 0. */
1623 if (unlikely(!info->count && (index_size || !info->count_from_stream_output)))
1624 return;
1625 }
1626
1627 struct si_shader_selector *vs = sctx->vs_shader.cso;
1628 if (unlikely(!vs || sctx->num_vertex_elements < vs->num_vs_inputs ||
1629 (!sctx->ps_shader.cso && !rs->rasterizer_discard) ||
1630 (!!sctx->tes_shader.cso != (prim == PIPE_PRIM_PATCHES)))) {
1631 assert(0);
1632 return;
1633 }
1634
1635 /* Recompute and re-emit the texture resource states if needed. */
1636 dirty_tex_counter = p_atomic_read(&sctx->screen->dirty_tex_counter);
1637 if (unlikely(dirty_tex_counter != sctx->last_dirty_tex_counter)) {
1638 sctx->last_dirty_tex_counter = dirty_tex_counter;
1639 sctx->framebuffer.dirty_cbufs |= ((1 << sctx->framebuffer.state.nr_cbufs) - 1);
1640 sctx->framebuffer.dirty_zsbuf = true;
1641 si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
1642 si_update_all_texture_descriptors(sctx);
1643 }
1644
1645 dirty_buf_counter = p_atomic_read(&sctx->screen->dirty_buf_counter);
1646 if (unlikely(dirty_buf_counter != sctx->last_dirty_buf_counter)) {
1647 sctx->last_dirty_buf_counter = dirty_buf_counter;
1648 /* Rebind all buffers unconditionally. */
1649 si_rebind_buffer(sctx, NULL);
1650 }
1651
1652 si_decompress_textures(sctx, u_bit_consecutive(0, SI_NUM_GRAPHICS_SHADERS));
1653
1654 /* Set the rasterization primitive type.
1655 *
1656 * This must be done after si_decompress_textures, which can call
1657 * draw_vbo recursively, and before si_update_shaders, which uses
1658 * current_rast_prim for this draw_vbo call. */
1659 if (sctx->gs_shader.cso) {
1660 /* Only possibilities: POINTS, LINE_STRIP, TRIANGLES */
1661 rast_prim = sctx->gs_shader.cso->rast_prim;
1662 } else if (sctx->tes_shader.cso) {
1663 /* Only possibilities: POINTS, LINE_STRIP, TRIANGLES */
1664 rast_prim = sctx->tes_shader.cso->rast_prim;
1665 } else if (util_rast_prim_is_triangles(prim)) {
1666 rast_prim = PIPE_PRIM_TRIANGLES;
1667 } else {
1668 /* Only possibilities, POINTS, LINE*, RECTANGLES */
1669 rast_prim = prim;
1670 }
1671
1672 if (rast_prim != sctx->current_rast_prim) {
1673 if (util_prim_is_points_or_lines(sctx->current_rast_prim) !=
1674 util_prim_is_points_or_lines(rast_prim))
1675 si_mark_atom_dirty(sctx, &sctx->atoms.s.guardband);
1676
1677 sctx->current_rast_prim = rast_prim;
1678 sctx->do_update_shaders = true;
1679 }
1680
1681 if (sctx->tes_shader.cso && sctx->screen->info.has_ls_vgpr_init_bug) {
1682 /* Determine whether the LS VGPR fix should be applied.
1683 *
1684 * It is only required when num input CPs > num output CPs,
1685 * which cannot happen with the fixed function TCS. We should
1686 * also update this bit when switching from TCS to fixed
1687 * function TCS.
1688 */
1689 struct si_shader_selector *tcs = sctx->tcs_shader.cso;
1690 bool ls_vgpr_fix =
1691 tcs && info->vertices_per_patch > tcs->info.base.tess.tcs_vertices_out;
1692
1693 if (ls_vgpr_fix != sctx->ls_vgpr_fix) {
1694 sctx->ls_vgpr_fix = ls_vgpr_fix;
1695 sctx->do_update_shaders = true;
1696 }
1697 }
1698
1699 if (sctx->chip_class <= GFX9 && sctx->gs_shader.cso) {
1700 /* Determine whether the GS triangle strip adjacency fix should
1701 * be applied. Rotate every other triangle if
1702 * - triangle strips with adjacency are fed to the GS and
1703 * - primitive restart is disabled (the rotation doesn't help
1704 * when the restart occurs after an odd number of triangles).
1705 */
1706 bool gs_tri_strip_adj_fix =
1707 !sctx->tes_shader.cso && prim == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY && !primitive_restart;
1708
1709 if (gs_tri_strip_adj_fix != sctx->gs_tri_strip_adj_fix) {
1710 sctx->gs_tri_strip_adj_fix = gs_tri_strip_adj_fix;
1711 sctx->do_update_shaders = true;
1712 }
1713 }
1714
1715 if (index_size) {
1716 /* Translate or upload, if needed. */
1717 /* 8-bit indices are supported on GFX8. */
1718 if (sctx->chip_class <= GFX7 && index_size == 1) {
1719 unsigned start, count, start_offset, size, offset;
1720 void *ptr;
1721
1722 si_get_draw_start_count(sctx, info, &start, &count);
1723 start_offset = start * 2;
1724 size = count * 2;
1725
1726 indexbuf = NULL;
1727 u_upload_alloc(ctx->stream_uploader, start_offset, size,
1728 si_optimal_tcc_alignment(sctx, size), &offset, &indexbuf, &ptr);
1729 if (!indexbuf)
1730 return;
1731
1732 util_shorten_ubyte_elts_to_userptr(&sctx->b, info, 0, 0, index_offset + start, count, ptr);
1733
1734 /* info->start will be added by the drawing code */
1735 index_offset = offset - start_offset;
1736 index_size = 2;
1737 } else if (info->has_user_indices) {
1738 unsigned start_offset;
1739
1740 assert(!info->indirect);
1741 start_offset = info->start * index_size;
1742
1743 indexbuf = NULL;
1744 u_upload_data(ctx->stream_uploader, start_offset, info->count * index_size,
1745 sctx->screen->info.tcc_cache_line_size,
1746 (char *)info->index.user + start_offset, &index_offset, &indexbuf);
1747 if (!indexbuf)
1748 return;
1749
1750 /* info->start will be added by the drawing code */
1751 index_offset -= start_offset;
1752 } else if (sctx->chip_class <= GFX7 && si_resource(indexbuf)->TC_L2_dirty) {
1753 /* GFX8 reads index buffers through TC L2, so it doesn't
1754 * need this. */
1755 sctx->flags |= SI_CONTEXT_WB_L2;
1756 si_resource(indexbuf)->TC_L2_dirty = false;
1757 }
1758 }
1759
1760 bool dispatch_prim_discard_cs = false;
1761 bool prim_discard_cs_instancing = false;
1762 unsigned original_index_size = index_size;
1763 unsigned direct_count = 0;
1764
1765 if (info->indirect) {
1766 struct pipe_draw_indirect_info *indirect = info->indirect;
1767
1768 /* Add the buffer size for memory checking in need_cs_space. */
1769 si_context_add_resource_size(sctx, indirect->buffer);
1770
1771 /* Indirect buffers use TC L2 on GFX9, but not older hw. */
1772 if (sctx->chip_class <= GFX8) {
1773 if (si_resource(indirect->buffer)->TC_L2_dirty) {
1774 sctx->flags |= SI_CONTEXT_WB_L2;
1775 si_resource(indirect->buffer)->TC_L2_dirty = false;
1776 }
1777
1778 if (indirect->indirect_draw_count &&
1779 si_resource(indirect->indirect_draw_count)->TC_L2_dirty) {
1780 sctx->flags |= SI_CONTEXT_WB_L2;
1781 si_resource(indirect->indirect_draw_count)->TC_L2_dirty = false;
1782 }
1783 }
1784 } else {
1785 /* Multiply by 3 for strips and fans to get an approximate vertex
1786 * count as triangles. */
1787 direct_count = info->count * instance_count * (prim == PIPE_PRIM_TRIANGLES ? 1 : 3);
1788 }
1789
1790 /* Determine if we can use the primitive discard compute shader. */
1791 if (si_compute_prim_discard_enabled(sctx) &&
1792 (direct_count > sctx->prim_discard_vertex_count_threshold
1793 ? (sctx->compute_num_verts_rejected += direct_count, true)
1794 : /* Add, then return true. */
1795 (sctx->compute_num_verts_ineligible += direct_count,
1796 false)) && /* Add, then return false. */
1797 (!info->count_from_stream_output || pd_msg("draw_opaque")) &&
1798 (primitive_restart ?
1799 /* Supported prim types with primitive restart: */
1800 (prim == PIPE_PRIM_TRIANGLE_STRIP || pd_msg("bad prim type with primitive restart")) &&
1801 /* Disallow instancing with primitive restart: */
1802 (instance_count == 1 || pd_msg("instance_count > 1 with primitive restart"))
1803 :
1804 /* Supported prim types without primitive restart + allow instancing: */
1805 (1 << prim) & ((1 << PIPE_PRIM_TRIANGLES) | (1 << PIPE_PRIM_TRIANGLE_STRIP) |
1806 (1 << PIPE_PRIM_TRIANGLE_FAN)) &&
1807 /* Instancing is limited to 16-bit indices, because InstanceID is packed into
1808 VertexID. */
1809 /* TODO: DrawArraysInstanced doesn't sometimes work, so it's disabled. */
1810 (instance_count == 1 ||
1811 (instance_count <= USHRT_MAX && index_size && index_size <= 2) ||
1812 pd_msg("instance_count too large or index_size == 4 or DrawArraysInstanced"))) &&
1813 (info->drawid == 0 || !sctx->vs_shader.cso->info.uses_drawid || pd_msg("draw_id > 0")) &&
1814 (!sctx->render_cond || pd_msg("render condition")) &&
1815 /* Forced enablement ignores pipeline statistics queries. */
1816 (sctx->screen->debug_flags & (DBG(PD) | DBG(ALWAYS_PD)) ||
1817 (!sctx->num_pipeline_stat_queries && !sctx->streamout.prims_gen_query_enabled) ||
1818 pd_msg("pipestat or primgen query")) &&
1819 (!sctx->vertex_elements->instance_divisor_is_fetched || pd_msg("loads instance divisors")) &&
1820 (!sctx->tes_shader.cso || pd_msg("uses tess")) &&
1821 (!sctx->gs_shader.cso || pd_msg("uses GS")) &&
1822 (!sctx->ps_shader.cso->info.uses_primid || pd_msg("PS uses PrimID")) &&
1823 !rs->polygon_mode_enabled &&
1824 #if SI_PRIM_DISCARD_DEBUG /* same as cso->prim_discard_cs_allowed */
1825 (!sctx->vs_shader.cso->info.uses_bindless_images || pd_msg("uses bindless images")) &&
1826 (!sctx->vs_shader.cso->info.uses_bindless_samplers || pd_msg("uses bindless samplers")) &&
1827 (!sctx->vs_shader.cso->info.writes_memory || pd_msg("writes memory")) &&
1828 (!sctx->vs_shader.cso->info.writes_viewport_index || pd_msg("writes viewport index")) &&
1829 !sctx->vs_shader.cso->info.base.vs.window_space_position &&
1830 !sctx->vs_shader.cso->so.num_outputs &&
1831 #else
1832 (sctx->vs_shader.cso->prim_discard_cs_allowed ||
1833 pd_msg("VS shader uses unsupported features")) &&
1834 #endif
1835 /* Check that all buffers are used for read only, because compute
1836 * dispatches can run ahead. */
1837 (si_all_vs_resources_read_only(sctx, index_size ? indexbuf : NULL) ||
1838 pd_msg("write reference"))) {
1839 switch (si_prepare_prim_discard_or_split_draw(sctx, info, primitive_restart)) {
1840 case SI_PRIM_DISCARD_ENABLED:
1841 original_index_size = index_size;
1842 prim_discard_cs_instancing = instance_count > 1;
1843 dispatch_prim_discard_cs = true;
1844
1845 /* The compute shader changes/lowers the following: */
1846 prim = PIPE_PRIM_TRIANGLES;
1847 index_size = 4;
1848 instance_count = 1;
1849 primitive_restart = false;
1850 sctx->compute_num_verts_rejected -= direct_count;
1851 sctx->compute_num_verts_accepted += direct_count;
1852 break;
1853 case SI_PRIM_DISCARD_DISABLED:
1854 break;
1855 case SI_PRIM_DISCARD_DRAW_SPLIT:
1856 sctx->compute_num_verts_rejected -= direct_count;
1857 goto return_cleanup;
1858 }
1859 }
1860
1861 if (prim_discard_cs_instancing != sctx->prim_discard_cs_instancing) {
1862 sctx->prim_discard_cs_instancing = prim_discard_cs_instancing;
1863 sctx->do_update_shaders = true;
1864 }
1865
1866 /* Update NGG culling settings. */
1867 if (sctx->ngg && !dispatch_prim_discard_cs && rast_prim == PIPE_PRIM_TRIANGLES &&
1868 !sctx->gs_shader.cso && /* GS doesn't support NGG culling. */
1869 (sctx->screen->always_use_ngg_culling_all ||
1870 (sctx->tes_shader.cso && sctx->screen->always_use_ngg_culling_tess) ||
1871 /* At least 1024 non-indexed vertices (8 subgroups) are needed
1872 * per draw call (no TES/GS) to enable NGG culling.
1873 */
1874 (!index_size && direct_count >= 1024 &&
1875 (prim == PIPE_PRIM_TRIANGLES || prim == PIPE_PRIM_TRIANGLE_STRIP) &&
1876 !sctx->tes_shader.cso)) &&
1877 si_get_vs(sctx)->cso->ngg_culling_allowed) {
1878 unsigned ngg_culling = 0;
1879
1880 if (rs->rasterizer_discard) {
1881 ngg_culling |= SI_NGG_CULL_FRONT_FACE | SI_NGG_CULL_BACK_FACE;
1882 } else {
1883 /* Polygon mode can't use view and small primitive culling,
1884 * because it draws points or lines where the culling depends
1885 * on the point or line width.
1886 */
1887 if (!rs->polygon_mode_enabled)
1888 ngg_culling |= SI_NGG_CULL_VIEW_SMALLPRIMS;
1889
1890 if (sctx->viewports.y_inverted ? rs->cull_back : rs->cull_front)
1891 ngg_culling |= SI_NGG_CULL_FRONT_FACE;
1892 if (sctx->viewports.y_inverted ? rs->cull_front : rs->cull_back)
1893 ngg_culling |= SI_NGG_CULL_BACK_FACE;
1894 }
1895
1896 /* Use NGG fast launch for certain non-indexed primitive types.
1897 * A draw must have at least 1 full primitive.
1898 */
1899 if (ngg_culling && !index_size && direct_count >= 3 && !sctx->tes_shader.cso &&
1900 !sctx->gs_shader.cso) {
1901 if (prim == PIPE_PRIM_TRIANGLES)
1902 ngg_culling |= SI_NGG_CULL_GS_FAST_LAUNCH_TRI_LIST;
1903 else if (prim == PIPE_PRIM_TRIANGLE_STRIP)
1904 ngg_culling |= SI_NGG_CULL_GS_FAST_LAUNCH_TRI_STRIP;
1905 }
1906
1907 if (ngg_culling != sctx->ngg_culling) {
1908 /* Insert a VGT_FLUSH when enabling fast launch changes to prevent hangs.
1909 * See issues #2418, #2426, #2434
1910 */
1911 if (ngg_culling & SI_NGG_CULL_GS_FAST_LAUNCH_ALL)
1912 sctx->flags |= SI_CONTEXT_VGT_FLUSH;
1913 sctx->ngg_culling = ngg_culling;
1914 sctx->do_update_shaders = true;
1915 }
1916 } else if (sctx->ngg_culling) {
1917 sctx->ngg_culling = false;
1918 sctx->do_update_shaders = true;
1919 }
1920
1921 if (sctx->do_update_shaders && !si_update_shaders(sctx))
1922 goto return_cleanup;
1923
1924 si_need_gfx_cs_space(sctx);
1925
1926 /* If we're using a secure context, determine if cs must be secure or not */
1927 if (unlikely(sctx->ws->ws_is_secure(sctx->ws))) {
1928 bool secure = si_gfx_resources_check_encrypted(sctx);
1929 if (secure != sctx->ws->cs_is_secure(sctx->gfx_cs)) {
1930 si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
1931 sctx->ws->cs_set_secure(sctx->gfx_cs, secure);
1932 }
1933 }
1934
1935 if (sctx->bo_list_add_all_gfx_resources)
1936 si_gfx_resources_add_all_to_bo_list(sctx);
1937
1938 /* Since we've called si_context_add_resource_size for vertex buffers,
1939 * this must be called after si_need_cs_space, because we must let
1940 * need_cs_space flush before we add buffers to the buffer list.
1941 */
1942 if (!si_upload_vertex_buffer_descriptors(sctx))
1943 goto return_cleanup;
1944
1945 /* Vega10/Raven scissor bug workaround. When any context register is
1946 * written (i.e. the GPU rolls the context), PA_SC_VPORT_SCISSOR
1947 * registers must be written too.
1948 */
1949 unsigned masked_atoms = 0;
1950
1951 if (sctx->screen->info.has_gfx9_scissor_bug) {
1952 masked_atoms |= si_get_atom_bit(sctx, &sctx->atoms.s.scissors);
1953
1954 if (info->count_from_stream_output ||
1955 sctx->dirty_atoms & si_atoms_that_always_roll_context() ||
1956 sctx->dirty_states & si_states_that_always_roll_context())
1957 sctx->context_roll = true;
1958 }
1959
1960 /* Use optimal packet order based on whether we need to sync the pipeline. */
1961 if (unlikely(sctx->flags & (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB |
1962 SI_CONTEXT_PS_PARTIAL_FLUSH | SI_CONTEXT_CS_PARTIAL_FLUSH))) {
1963 /* If we have to wait for idle, set all states first, so that all
1964 * SET packets are processed in parallel with previous draw calls.
1965 * Then draw and prefetch at the end. This ensures that the time
1966 * the CUs are idle is very short.
1967 */
1968 if (unlikely(sctx->flags & SI_CONTEXT_FLUSH_FOR_RENDER_COND))
1969 masked_atoms |= si_get_atom_bit(sctx, &sctx->atoms.s.render_cond);
1970
1971 if (!si_upload_graphics_shader_descriptors(sctx))
1972 goto return_cleanup;
1973
1974 /* Emit all states except possibly render condition. */
1975 si_emit_all_states(sctx, info, prim, instance_count, primitive_restart, masked_atoms);
1976 sctx->emit_cache_flush(sctx);
1977 /* <-- CUs are idle here. */
1978
1979 if (si_is_atom_dirty(sctx, &sctx->atoms.s.render_cond))
1980 sctx->atoms.s.render_cond.emit(sctx);
1981
1982 if (sctx->screen->info.has_gfx9_scissor_bug &&
1983 (sctx->context_roll || si_is_atom_dirty(sctx, &sctx->atoms.s.scissors)))
1984 sctx->atoms.s.scissors.emit(sctx);
1985
1986 sctx->dirty_atoms = 0;
1987
1988 si_emit_draw_packets(sctx, info, indexbuf, index_size, index_offset, instance_count,
1989 dispatch_prim_discard_cs, original_index_size);
1990 /* <-- CUs are busy here. */
1991
1992 /* Start prefetches after the draw has been started. Both will run
1993 * in parallel, but starting the draw first is more important.
1994 */
1995 if (sctx->chip_class >= GFX7 && sctx->prefetch_L2_mask)
1996 cik_emit_prefetch_L2(sctx, false);
1997 } else {
1998 /* If we don't wait for idle, start prefetches first, then set
1999 * states, and draw at the end.
2000 */
2001 if (sctx->flags)
2002 sctx->emit_cache_flush(sctx);
2003
2004 /* Only prefetch the API VS and VBO descriptors. */
2005 if (sctx->chip_class >= GFX7 && sctx->prefetch_L2_mask)
2006 cik_emit_prefetch_L2(sctx, true);
2007
2008 if (!si_upload_graphics_shader_descriptors(sctx))
2009 goto return_cleanup;
2010
2011 si_emit_all_states(sctx, info, prim, instance_count, primitive_restart, masked_atoms);
2012
2013 if (sctx->screen->info.has_gfx9_scissor_bug &&
2014 (sctx->context_roll || si_is_atom_dirty(sctx, &sctx->atoms.s.scissors)))
2015 sctx->atoms.s.scissors.emit(sctx);
2016
2017 sctx->dirty_atoms = 0;
2018
2019 si_emit_draw_packets(sctx, info, indexbuf, index_size, index_offset, instance_count,
2020 dispatch_prim_discard_cs, original_index_size);
2021
2022 /* Prefetch the remaining shaders after the draw has been
2023 * started. */
2024 if (sctx->chip_class >= GFX7 && sctx->prefetch_L2_mask)
2025 cik_emit_prefetch_L2(sctx, false);
2026 }
2027
2028 /* Mark the displayable dcc buffer as dirty in order to update
2029 * it on the next call to si_flush_resource. */
2030 if (sctx->screen->info.use_display_dcc_with_retile_blit) {
2031 /* Don't use si_update_fb_dirtiness_after_rendering because it'll
2032 * cause unnecessary texture decompressions on each draw. */
2033 unsigned displayable_dcc_cb_mask = sctx->framebuffer.displayable_dcc_cb_mask;
2034 while (displayable_dcc_cb_mask) {
2035 unsigned i = u_bit_scan(&displayable_dcc_cb_mask);
2036 struct pipe_surface *surf = sctx->framebuffer.state.cbufs[i];
2037 struct si_texture *tex = (struct si_texture *)surf->texture;
2038 tex->displayable_dcc_dirty = true;
2039 }
2040 }
2041
2042 /* Clear the context roll flag after the draw call. */
2043 sctx->context_roll = false;
2044
2045 if (unlikely(sctx->current_saved_cs)) {
2046 si_trace_emit(sctx);
2047 si_log_draw_state(sctx, sctx->log);
2048 }
2049
2050 /* Workaround for a VGT hang when streamout is enabled.
2051 * It must be done after drawing. */
2052 if ((sctx->family == CHIP_HAWAII || sctx->family == CHIP_TONGA || sctx->family == CHIP_FIJI) &&
2053 si_get_strmout_en(sctx)) {
2054 sctx->flags |= SI_CONTEXT_VGT_STREAMOUT_SYNC;
2055 }
2056
2057 if (unlikely(sctx->decompression_enabled)) {
2058 sctx->num_decompress_calls++;
2059 } else {
2060 sctx->num_draw_calls++;
2061 if (sctx->framebuffer.state.nr_cbufs > 1)
2062 sctx->num_mrt_draw_calls++;
2063 if (primitive_restart)
2064 sctx->num_prim_restart_calls++;
2065 if (G_0286E8_WAVESIZE(sctx->spi_tmpring_size))
2066 sctx->num_spill_draw_calls++;
2067 }
2068
2069 return_cleanup:
2070 if (index_size && indexbuf != info->index.resource)
2071 pipe_resource_reference(&indexbuf, NULL);
2072 }
2073
2074 static void si_draw_rectangle(struct blitter_context *blitter, void *vertex_elements_cso,
2075 blitter_get_vs_func get_vs, int x1, int y1, int x2, int y2,
2076 float depth, unsigned num_instances, enum blitter_attrib_type type,
2077 const union blitter_attrib *attrib)
2078 {
2079 struct pipe_context *pipe = util_blitter_get_pipe(blitter);
2080 struct si_context *sctx = (struct si_context *)pipe;
2081
2082 /* Pack position coordinates as signed int16. */
2083 sctx->vs_blit_sh_data[0] = (uint32_t)(x1 & 0xffff) | ((uint32_t)(y1 & 0xffff) << 16);
2084 sctx->vs_blit_sh_data[1] = (uint32_t)(x2 & 0xffff) | ((uint32_t)(y2 & 0xffff) << 16);
2085 sctx->vs_blit_sh_data[2] = fui(depth);
2086
2087 switch (type) {
2088 case UTIL_BLITTER_ATTRIB_COLOR:
2089 memcpy(&sctx->vs_blit_sh_data[3], attrib->color, sizeof(float) * 4);
2090 break;
2091 case UTIL_BLITTER_ATTRIB_TEXCOORD_XY:
2092 case UTIL_BLITTER_ATTRIB_TEXCOORD_XYZW:
2093 memcpy(&sctx->vs_blit_sh_data[3], &attrib->texcoord, sizeof(attrib->texcoord));
2094 break;
2095 case UTIL_BLITTER_ATTRIB_NONE:;
2096 }
2097
2098 pipe->bind_vs_state(pipe, si_get_blitter_vs(sctx, type, num_instances));
2099
2100 struct pipe_draw_info info = {};
2101 info.mode = SI_PRIM_RECTANGLE_LIST;
2102 info.count = 3;
2103 info.instance_count = num_instances;
2104
2105 /* Don't set per-stage shader pointers for VS. */
2106 sctx->shader_pointers_dirty &= ~SI_DESCS_SHADER_MASK(VERTEX);
2107 sctx->vertex_buffer_pointer_dirty = false;
2108 sctx->vertex_buffer_user_sgprs_dirty = false;
2109
2110 si_draw_vbo(pipe, &info);
2111 }
2112
2113 void si_trace_emit(struct si_context *sctx)
2114 {
2115 struct radeon_cmdbuf *cs = sctx->gfx_cs;
2116 uint32_t trace_id = ++sctx->current_saved_cs->trace_id;
2117
2118 si_cp_write_data(sctx, sctx->current_saved_cs->trace_buf, 0, 4, V_370_MEM, V_370_ME, &trace_id);
2119
2120 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
2121 radeon_emit(cs, AC_ENCODE_TRACE_POINT(trace_id));
2122
2123 if (sctx->log)
2124 u_log_flush(sctx->log);
2125 }
2126
2127 void si_init_draw_functions(struct si_context *sctx)
2128 {
2129 sctx->b.draw_vbo = si_draw_vbo;
2130
2131 sctx->blitter->draw_rectangle = si_draw_rectangle;
2132
2133 si_init_ia_multi_vgt_param_table(sctx);
2134 }