radeonsi: disallow compute-based culling if polygon mode is enabled
[mesa.git] / src / gallium / drivers / radeonsi / si_state_draw.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "si_build_pm4.h"
26 #include "sid.h"
27
28 #include "util/u_index_modify.h"
29 #include "util/u_log.h"
30 #include "util/u_upload_mgr.h"
31 #include "util/u_prim.h"
32 #include "util/u_suballoc.h"
33
34 #include "ac_debug.h"
35
36 /* special primitive types */
37 #define SI_PRIM_RECTANGLE_LIST PIPE_PRIM_MAX
38
39 static unsigned si_conv_pipe_prim(unsigned mode)
40 {
41 static const unsigned prim_conv[] = {
42 [PIPE_PRIM_POINTS] = V_008958_DI_PT_POINTLIST,
43 [PIPE_PRIM_LINES] = V_008958_DI_PT_LINELIST,
44 [PIPE_PRIM_LINE_LOOP] = V_008958_DI_PT_LINELOOP,
45 [PIPE_PRIM_LINE_STRIP] = V_008958_DI_PT_LINESTRIP,
46 [PIPE_PRIM_TRIANGLES] = V_008958_DI_PT_TRILIST,
47 [PIPE_PRIM_TRIANGLE_STRIP] = V_008958_DI_PT_TRISTRIP,
48 [PIPE_PRIM_TRIANGLE_FAN] = V_008958_DI_PT_TRIFAN,
49 [PIPE_PRIM_QUADS] = V_008958_DI_PT_QUADLIST,
50 [PIPE_PRIM_QUAD_STRIP] = V_008958_DI_PT_QUADSTRIP,
51 [PIPE_PRIM_POLYGON] = V_008958_DI_PT_POLYGON,
52 [PIPE_PRIM_LINES_ADJACENCY] = V_008958_DI_PT_LINELIST_ADJ,
53 [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_008958_DI_PT_LINESTRIP_ADJ,
54 [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_008958_DI_PT_TRILIST_ADJ,
55 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_008958_DI_PT_TRISTRIP_ADJ,
56 [PIPE_PRIM_PATCHES] = V_008958_DI_PT_PATCH,
57 [SI_PRIM_RECTANGLE_LIST] = V_008958_DI_PT_RECTLIST
58 };
59 assert(mode < ARRAY_SIZE(prim_conv));
60 return prim_conv[mode];
61 }
62
63 /**
64 * This calculates the LDS size for tessellation shaders (VS, TCS, TES).
65 * LS.LDS_SIZE is shared by all 3 shader stages.
66 *
67 * The information about LDS and other non-compile-time parameters is then
68 * written to userdata SGPRs.
69 */
70 static void si_emit_derived_tess_state(struct si_context *sctx,
71 const struct pipe_draw_info *info,
72 unsigned *num_patches)
73 {
74 struct radeon_cmdbuf *cs = sctx->gfx_cs;
75 struct si_shader *ls_current;
76 struct si_shader_selector *ls;
77 /* The TES pointer will only be used for sctx->last_tcs.
78 * It would be wrong to think that TCS = TES. */
79 struct si_shader_selector *tcs =
80 sctx->tcs_shader.cso ? sctx->tcs_shader.cso : sctx->tes_shader.cso;
81 unsigned tess_uses_primid = sctx->ia_multi_vgt_param_key.u.tess_uses_prim_id;
82 bool has_primid_instancing_bug = sctx->chip_class == GFX6 &&
83 sctx->screen->info.max_se == 1;
84 unsigned tes_sh_base = sctx->shader_pointers.sh_base[PIPE_SHADER_TESS_EVAL];
85 unsigned num_tcs_input_cp = info->vertices_per_patch;
86 unsigned num_tcs_output_cp, num_tcs_inputs, num_tcs_outputs;
87 unsigned num_tcs_patch_outputs;
88 unsigned input_vertex_size, output_vertex_size, pervertex_output_patch_size;
89 unsigned input_patch_size, output_patch_size, output_patch0_offset;
90 unsigned perpatch_output_offset, lds_size;
91 unsigned tcs_in_layout, tcs_out_layout, tcs_out_offsets;
92 unsigned offchip_layout, hardware_lds_size, ls_hs_config;
93
94 /* Since GFX9 has merged LS-HS in the TCS state, set LS = TCS. */
95 if (sctx->chip_class >= GFX9) {
96 if (sctx->tcs_shader.cso)
97 ls_current = sctx->tcs_shader.current;
98 else
99 ls_current = sctx->fixed_func_tcs_shader.current;
100
101 ls = ls_current->key.part.tcs.ls;
102 } else {
103 ls_current = sctx->vs_shader.current;
104 ls = sctx->vs_shader.cso;
105 }
106
107 if (sctx->last_ls == ls_current &&
108 sctx->last_tcs == tcs &&
109 sctx->last_tes_sh_base == tes_sh_base &&
110 sctx->last_num_tcs_input_cp == num_tcs_input_cp &&
111 (!has_primid_instancing_bug ||
112 (sctx->last_tess_uses_primid == tess_uses_primid))) {
113 *num_patches = sctx->last_num_patches;
114 return;
115 }
116
117 sctx->last_ls = ls_current;
118 sctx->last_tcs = tcs;
119 sctx->last_tes_sh_base = tes_sh_base;
120 sctx->last_num_tcs_input_cp = num_tcs_input_cp;
121 sctx->last_tess_uses_primid = tess_uses_primid;
122
123 /* This calculates how shader inputs and outputs among VS, TCS, and TES
124 * are laid out in LDS. */
125 num_tcs_inputs = util_last_bit64(ls->outputs_written);
126
127 if (sctx->tcs_shader.cso) {
128 num_tcs_outputs = util_last_bit64(tcs->outputs_written);
129 num_tcs_output_cp = tcs->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT];
130 num_tcs_patch_outputs = util_last_bit64(tcs->patch_outputs_written);
131 } else {
132 /* No TCS. Route varyings from LS to TES. */
133 num_tcs_outputs = num_tcs_inputs;
134 num_tcs_output_cp = num_tcs_input_cp;
135 num_tcs_patch_outputs = 2; /* TESSINNER + TESSOUTER */
136 }
137
138 input_vertex_size = ls->lshs_vertex_stride;
139 output_vertex_size = num_tcs_outputs * 16;
140
141 input_patch_size = num_tcs_input_cp * input_vertex_size;
142
143 pervertex_output_patch_size = num_tcs_output_cp * output_vertex_size;
144 output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
145
146 /* Ensure that we only need one wave per SIMD so we don't need to check
147 * resource usage. Also ensures that the number of tcs in and out
148 * vertices per threadgroup are at most 256.
149 */
150 unsigned max_verts_per_patch = MAX2(num_tcs_input_cp, num_tcs_output_cp);
151 *num_patches = 256 / max_verts_per_patch;
152
153 /* Make sure that the data fits in LDS. This assumes the shaders only
154 * use LDS for the inputs and outputs.
155 *
156 * While GFX7 can use 64K per threadgroup, there is a hang on Stoney
157 * with 2 CUs if we use more than 32K. The closed Vulkan driver also
158 * uses 32K at most on all GCN chips.
159 */
160 hardware_lds_size = 32768;
161 *num_patches = MIN2(*num_patches, hardware_lds_size / (input_patch_size +
162 output_patch_size));
163
164 /* Make sure the output data fits in the offchip buffer */
165 *num_patches = MIN2(*num_patches,
166 (sctx->screen->tess_offchip_block_dw_size * 4) /
167 output_patch_size);
168
169 /* Not necessary for correctness, but improves performance.
170 * The hardware can do more, but the radeonsi shader constant is
171 * limited to 6 bits.
172 */
173 *num_patches = MIN2(*num_patches, 63); /* triangles: 3 full waves except 3 lanes */
174
175 /* When distributed tessellation is unsupported, switch between SEs
176 * at a higher frequency to compensate for it.
177 */
178 if (!sctx->screen->info.has_distributed_tess && sctx->screen->info.max_se > 1)
179 *num_patches = MIN2(*num_patches, 16); /* recommended */
180
181 /* Make sure that vector lanes are reasonably occupied. It probably
182 * doesn't matter much because this is LS-HS, and TES is likely to
183 * occupy significantly more CUs.
184 */
185 unsigned temp_verts_per_tg = *num_patches * max_verts_per_patch;
186 unsigned wave_size = sctx->screen->ge_wave_size;
187
188 if (temp_verts_per_tg > wave_size && temp_verts_per_tg % wave_size < wave_size*3/4)
189 *num_patches = (temp_verts_per_tg & ~(wave_size - 1)) / max_verts_per_patch;
190
191 if (sctx->chip_class == GFX6) {
192 /* GFX6 bug workaround, related to power management. Limit LS-HS
193 * threadgroups to only one wave.
194 */
195 unsigned one_wave = wave_size / max_verts_per_patch;
196 *num_patches = MIN2(*num_patches, one_wave);
197 }
198
199 /* The VGT HS block increments the patch ID unconditionally
200 * within a single threadgroup. This results in incorrect
201 * patch IDs when instanced draws are used.
202 *
203 * The intended solution is to restrict threadgroups to
204 * a single instance by setting SWITCH_ON_EOI, which
205 * should cause IA to split instances up. However, this
206 * doesn't work correctly on GFX6 when there is no other
207 * SE to switch to.
208 */
209 if (has_primid_instancing_bug && tess_uses_primid)
210 *num_patches = 1;
211
212 sctx->last_num_patches = *num_patches;
213
214 output_patch0_offset = input_patch_size * *num_patches;
215 perpatch_output_offset = output_patch0_offset + pervertex_output_patch_size;
216
217 /* Compute userdata SGPRs. */
218 assert(((input_vertex_size / 4) & ~0xff) == 0);
219 assert(((output_vertex_size / 4) & ~0xff) == 0);
220 assert(((input_patch_size / 4) & ~0x1fff) == 0);
221 assert(((output_patch_size / 4) & ~0x1fff) == 0);
222 assert(((output_patch0_offset / 16) & ~0xffff) == 0);
223 assert(((perpatch_output_offset / 16) & ~0xffff) == 0);
224 assert(num_tcs_input_cp <= 32);
225 assert(num_tcs_output_cp <= 32);
226
227 uint64_t ring_va = si_resource(sctx->tess_rings)->gpu_address;
228 assert((ring_va & u_bit_consecutive(0, 19)) == 0);
229
230 tcs_in_layout = S_VS_STATE_LS_OUT_PATCH_SIZE(input_patch_size / 4) |
231 S_VS_STATE_LS_OUT_VERTEX_SIZE(input_vertex_size / 4);
232 tcs_out_layout = (output_patch_size / 4) |
233 (num_tcs_input_cp << 13) |
234 ring_va;
235 tcs_out_offsets = (output_patch0_offset / 16) |
236 ((perpatch_output_offset / 16) << 16);
237 offchip_layout = *num_patches |
238 (num_tcs_output_cp << 6) |
239 (pervertex_output_patch_size * *num_patches << 12);
240
241 /* Compute the LDS size. */
242 lds_size = output_patch0_offset + output_patch_size * *num_patches;
243
244 if (sctx->chip_class >= GFX7) {
245 assert(lds_size <= 65536);
246 lds_size = align(lds_size, 512) / 512;
247 } else {
248 assert(lds_size <= 32768);
249 lds_size = align(lds_size, 256) / 256;
250 }
251
252 /* Set SI_SGPR_VS_STATE_BITS. */
253 sctx->current_vs_state &= C_VS_STATE_LS_OUT_PATCH_SIZE &
254 C_VS_STATE_LS_OUT_VERTEX_SIZE;
255 sctx->current_vs_state |= tcs_in_layout;
256
257 /* We should be able to support in-shader LDS use with LLVM >= 9
258 * by just adding the lds_sizes together, but it has never
259 * been tested. */
260 assert(ls_current->config.lds_size == 0);
261
262 if (sctx->chip_class >= GFX9) {
263 unsigned hs_rsrc2 = ls_current->config.rsrc2;
264
265 if (sctx->chip_class >= GFX10)
266 hs_rsrc2 |= S_00B42C_LDS_SIZE_GFX10(lds_size);
267 else
268 hs_rsrc2 |= S_00B42C_LDS_SIZE_GFX9(lds_size);
269
270 radeon_set_sh_reg(cs, R_00B42C_SPI_SHADER_PGM_RSRC2_HS, hs_rsrc2);
271
272 /* Set userdata SGPRs for merged LS-HS. */
273 radeon_set_sh_reg_seq(cs,
274 R_00B430_SPI_SHADER_USER_DATA_LS_0 +
275 GFX9_SGPR_TCS_OFFCHIP_LAYOUT * 4, 3);
276 radeon_emit(cs, offchip_layout);
277 radeon_emit(cs, tcs_out_offsets);
278 radeon_emit(cs, tcs_out_layout);
279 } else {
280 unsigned ls_rsrc2 = ls_current->config.rsrc2;
281
282 si_multiwave_lds_size_workaround(sctx->screen, &lds_size);
283 ls_rsrc2 |= S_00B52C_LDS_SIZE(lds_size);
284
285 /* Due to a hw bug, RSRC2_LS must be written twice with another
286 * LS register written in between. */
287 if (sctx->chip_class == GFX7 && sctx->family != CHIP_HAWAII)
288 radeon_set_sh_reg(cs, R_00B52C_SPI_SHADER_PGM_RSRC2_LS, ls_rsrc2);
289 radeon_set_sh_reg_seq(cs, R_00B528_SPI_SHADER_PGM_RSRC1_LS, 2);
290 radeon_emit(cs, ls_current->config.rsrc1);
291 radeon_emit(cs, ls_rsrc2);
292
293 /* Set userdata SGPRs for TCS. */
294 radeon_set_sh_reg_seq(cs,
295 R_00B430_SPI_SHADER_USER_DATA_HS_0 + GFX6_SGPR_TCS_OFFCHIP_LAYOUT * 4, 4);
296 radeon_emit(cs, offchip_layout);
297 radeon_emit(cs, tcs_out_offsets);
298 radeon_emit(cs, tcs_out_layout);
299 radeon_emit(cs, tcs_in_layout);
300 }
301
302 /* Set userdata SGPRs for TES. */
303 radeon_set_sh_reg_seq(cs, tes_sh_base + SI_SGPR_TES_OFFCHIP_LAYOUT * 4, 2);
304 radeon_emit(cs, offchip_layout);
305 radeon_emit(cs, ring_va);
306
307 ls_hs_config = S_028B58_NUM_PATCHES(*num_patches) |
308 S_028B58_HS_NUM_INPUT_CP(num_tcs_input_cp) |
309 S_028B58_HS_NUM_OUTPUT_CP(num_tcs_output_cp);
310
311 if (sctx->last_ls_hs_config != ls_hs_config) {
312 if (sctx->chip_class >= GFX7) {
313 radeon_set_context_reg_idx(cs, R_028B58_VGT_LS_HS_CONFIG, 2,
314 ls_hs_config);
315 } else {
316 radeon_set_context_reg(cs, R_028B58_VGT_LS_HS_CONFIG,
317 ls_hs_config);
318 }
319 sctx->last_ls_hs_config = ls_hs_config;
320 sctx->context_roll = true;
321 }
322 }
323
324 static unsigned si_num_prims_for_vertices(const struct pipe_draw_info *info,
325 enum pipe_prim_type prim)
326 {
327 switch (prim) {
328 case PIPE_PRIM_PATCHES:
329 return info->count / info->vertices_per_patch;
330 case PIPE_PRIM_POLYGON:
331 return info->count >= 3;
332 case SI_PRIM_RECTANGLE_LIST:
333 return info->count / 3;
334 default:
335 return u_decomposed_prims_for_vertices(prim, info->count);
336 }
337 }
338
339 static unsigned
340 si_get_init_multi_vgt_param(struct si_screen *sscreen,
341 union si_vgt_param_key *key)
342 {
343 STATIC_ASSERT(sizeof(union si_vgt_param_key) == 4);
344 unsigned max_primgroup_in_wave = 2;
345
346 /* SWITCH_ON_EOP(0) is always preferable. */
347 bool wd_switch_on_eop = false;
348 bool ia_switch_on_eop = false;
349 bool ia_switch_on_eoi = false;
350 bool partial_vs_wave = false;
351 bool partial_es_wave = false;
352
353 if (key->u.uses_tess) {
354 /* SWITCH_ON_EOI must be set if PrimID is used. */
355 if (key->u.tess_uses_prim_id)
356 ia_switch_on_eoi = true;
357
358 /* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
359 if ((sscreen->info.family == CHIP_TAHITI ||
360 sscreen->info.family == CHIP_PITCAIRN ||
361 sscreen->info.family == CHIP_BONAIRE) &&
362 key->u.uses_gs)
363 partial_vs_wave = true;
364
365 /* Needed for 028B6C_DISTRIBUTION_MODE != 0. (implies >= GFX8) */
366 if (sscreen->info.has_distributed_tess) {
367 if (key->u.uses_gs) {
368 if (sscreen->info.chip_class == GFX8)
369 partial_es_wave = true;
370 } else {
371 partial_vs_wave = true;
372 }
373 }
374 }
375
376 /* This is a hardware requirement. */
377 if (key->u.line_stipple_enabled ||
378 (sscreen->debug_flags & DBG(SWITCH_ON_EOP))) {
379 ia_switch_on_eop = true;
380 wd_switch_on_eop = true;
381 }
382
383 if (sscreen->info.chip_class >= GFX7) {
384 /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
385 * 4 shader engines. Set 1 to pass the assertion below.
386 * The other cases are hardware requirements.
387 *
388 * Polaris supports primitive restart with WD_SWITCH_ON_EOP=0
389 * for points, line strips, and tri strips.
390 */
391 if (sscreen->info.max_se <= 2 ||
392 key->u.prim == PIPE_PRIM_POLYGON ||
393 key->u.prim == PIPE_PRIM_LINE_LOOP ||
394 key->u.prim == PIPE_PRIM_TRIANGLE_FAN ||
395 key->u.prim == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY ||
396 (key->u.primitive_restart &&
397 (sscreen->info.family < CHIP_POLARIS10 ||
398 (key->u.prim != PIPE_PRIM_POINTS &&
399 key->u.prim != PIPE_PRIM_LINE_STRIP &&
400 key->u.prim != PIPE_PRIM_TRIANGLE_STRIP))) ||
401 key->u.count_from_stream_output)
402 wd_switch_on_eop = true;
403
404 /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
405 * We don't know that for indirect drawing, so treat it as
406 * always problematic. */
407 if (sscreen->info.family == CHIP_HAWAII &&
408 key->u.uses_instancing)
409 wd_switch_on_eop = true;
410
411 /* Performance recommendation for 4 SE Gfx7-8 parts if
412 * instances are smaller than a primgroup.
413 * Assume indirect draws always use small instances.
414 * This is needed for good VS wave utilization.
415 */
416 if (sscreen->info.chip_class <= GFX8 &&
417 sscreen->info.max_se == 4 &&
418 key->u.multi_instances_smaller_than_primgroup)
419 wd_switch_on_eop = true;
420
421 /* Required on GFX7 and later. */
422 if (sscreen->info.max_se == 4 && !wd_switch_on_eop)
423 ia_switch_on_eoi = true;
424
425 /* HW engineers suggested that PARTIAL_VS_WAVE_ON should be set
426 * to work around a GS hang.
427 */
428 if (key->u.uses_gs &&
429 (sscreen->info.family == CHIP_TONGA ||
430 sscreen->info.family == CHIP_FIJI ||
431 sscreen->info.family == CHIP_POLARIS10 ||
432 sscreen->info.family == CHIP_POLARIS11 ||
433 sscreen->info.family == CHIP_POLARIS12 ||
434 sscreen->info.family == CHIP_VEGAM))
435 partial_vs_wave = true;
436
437 /* Required by Hawaii and, for some special cases, by GFX8. */
438 if (ia_switch_on_eoi &&
439 (sscreen->info.family == CHIP_HAWAII ||
440 (sscreen->info.chip_class == GFX8 &&
441 (key->u.uses_gs || max_primgroup_in_wave != 2))))
442 partial_vs_wave = true;
443
444 /* Instancing bug on Bonaire. */
445 if (sscreen->info.family == CHIP_BONAIRE && ia_switch_on_eoi &&
446 key->u.uses_instancing)
447 partial_vs_wave = true;
448
449 /* This only applies to Polaris10 and later 4 SE chips.
450 * wd_switch_on_eop is already true on all other chips.
451 */
452 if (!wd_switch_on_eop && key->u.primitive_restart)
453 partial_vs_wave = true;
454
455 /* If the WD switch is false, the IA switch must be false too. */
456 assert(wd_switch_on_eop || !ia_switch_on_eop);
457 }
458
459 /* If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
460 if (sscreen->info.chip_class <= GFX8 && ia_switch_on_eoi)
461 partial_es_wave = true;
462
463 return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) |
464 S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi) |
465 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) |
466 S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave) |
467 S_028AA8_WD_SWITCH_ON_EOP(sscreen->info.chip_class >= GFX7 ? wd_switch_on_eop : 0) |
468 /* The following field was moved to VGT_SHADER_STAGES_EN in GFX9. */
469 S_028AA8_MAX_PRIMGRP_IN_WAVE(sscreen->info.chip_class == GFX8 ?
470 max_primgroup_in_wave : 0) |
471 S_030960_EN_INST_OPT_BASIC(sscreen->info.chip_class >= GFX9) |
472 S_030960_EN_INST_OPT_ADV(sscreen->info.chip_class >= GFX9);
473 }
474
475 static void si_init_ia_multi_vgt_param_table(struct si_context *sctx)
476 {
477 for (int prim = 0; prim <= SI_PRIM_RECTANGLE_LIST; prim++)
478 for (int uses_instancing = 0; uses_instancing < 2; uses_instancing++)
479 for (int multi_instances = 0; multi_instances < 2; multi_instances++)
480 for (int primitive_restart = 0; primitive_restart < 2; primitive_restart++)
481 for (int count_from_so = 0; count_from_so < 2; count_from_so++)
482 for (int line_stipple = 0; line_stipple < 2; line_stipple++)
483 for (int uses_tess = 0; uses_tess < 2; uses_tess++)
484 for (int tess_uses_primid = 0; tess_uses_primid < 2; tess_uses_primid++)
485 for (int uses_gs = 0; uses_gs < 2; uses_gs++) {
486 union si_vgt_param_key key;
487
488 key.index = 0;
489 key.u.prim = prim;
490 key.u.uses_instancing = uses_instancing;
491 key.u.multi_instances_smaller_than_primgroup = multi_instances;
492 key.u.primitive_restart = primitive_restart;
493 key.u.count_from_stream_output = count_from_so;
494 key.u.line_stipple_enabled = line_stipple;
495 key.u.uses_tess = uses_tess;
496 key.u.tess_uses_prim_id = tess_uses_primid;
497 key.u.uses_gs = uses_gs;
498
499 sctx->ia_multi_vgt_param[key.index] =
500 si_get_init_multi_vgt_param(sctx->screen, &key);
501 }
502 }
503
504 static unsigned si_get_ia_multi_vgt_param(struct si_context *sctx,
505 const struct pipe_draw_info *info,
506 enum pipe_prim_type prim,
507 unsigned num_patches,
508 unsigned instance_count,
509 bool primitive_restart)
510 {
511 union si_vgt_param_key key = sctx->ia_multi_vgt_param_key;
512 unsigned primgroup_size;
513 unsigned ia_multi_vgt_param;
514
515 if (sctx->tes_shader.cso) {
516 primgroup_size = num_patches; /* must be a multiple of NUM_PATCHES */
517 } else if (sctx->gs_shader.cso) {
518 primgroup_size = 64; /* recommended with a GS */
519 } else {
520 primgroup_size = 128; /* recommended without a GS and tess */
521 }
522
523 key.u.prim = prim;
524 key.u.uses_instancing = info->indirect || instance_count > 1;
525 key.u.multi_instances_smaller_than_primgroup =
526 info->indirect ||
527 (instance_count > 1 &&
528 (info->count_from_stream_output ||
529 si_num_prims_for_vertices(info, prim) < primgroup_size));
530 key.u.primitive_restart = primitive_restart;
531 key.u.count_from_stream_output = info->count_from_stream_output != NULL;
532
533 ia_multi_vgt_param = sctx->ia_multi_vgt_param[key.index] |
534 S_028AA8_PRIMGROUP_SIZE(primgroup_size - 1);
535
536 if (sctx->gs_shader.cso) {
537 /* GS requirement. */
538 if (sctx->chip_class <= GFX8 &&
539 SI_GS_PER_ES / primgroup_size >= sctx->screen->gs_table_depth - 3)
540 ia_multi_vgt_param |= S_028AA8_PARTIAL_ES_WAVE_ON(1);
541
542 /* GS hw bug with single-primitive instances and SWITCH_ON_EOI.
543 * The hw doc says all multi-SE chips are affected, but Vulkan
544 * only applies it to Hawaii. Do what Vulkan does.
545 */
546 if (sctx->family == CHIP_HAWAII &&
547 G_028AA8_SWITCH_ON_EOI(ia_multi_vgt_param) &&
548 (info->indirect ||
549 (instance_count > 1 &&
550 (info->count_from_stream_output ||
551 si_num_prims_for_vertices(info, prim) <= 1))))
552 sctx->flags |= SI_CONTEXT_VGT_FLUSH;
553 }
554
555 return ia_multi_vgt_param;
556 }
557
558 static unsigned si_conv_prim_to_gs_out(unsigned mode)
559 {
560 static const int prim_conv[] = {
561 [PIPE_PRIM_POINTS] = V_028A6C_OUTPRIM_TYPE_POINTLIST,
562 [PIPE_PRIM_LINES] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
563 [PIPE_PRIM_LINE_LOOP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
564 [PIPE_PRIM_LINE_STRIP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
565 [PIPE_PRIM_TRIANGLES] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
566 [PIPE_PRIM_TRIANGLE_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
567 [PIPE_PRIM_TRIANGLE_FAN] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
568 [PIPE_PRIM_QUADS] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
569 [PIPE_PRIM_QUAD_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
570 [PIPE_PRIM_POLYGON] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
571 [PIPE_PRIM_LINES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
572 [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
573 [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
574 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
575 [PIPE_PRIM_PATCHES] = V_028A6C_OUTPRIM_TYPE_POINTLIST,
576 [SI_PRIM_RECTANGLE_LIST] = V_028A6C_VGT_OUT_RECT_V0,
577 };
578 assert(mode < ARRAY_SIZE(prim_conv));
579
580 return prim_conv[mode];
581 }
582
583 /* rast_prim is the primitive type after GS. */
584 static void si_emit_rasterizer_prim_state(struct si_context *sctx)
585 {
586 struct radeon_cmdbuf *cs = sctx->gfx_cs;
587 enum pipe_prim_type rast_prim = sctx->current_rast_prim;
588 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
589 bool use_ngg = sctx->screen->use_ngg;
590
591 if (likely(rast_prim == sctx->last_rast_prim &&
592 rs->pa_sc_line_stipple == sctx->last_sc_line_stipple &&
593 (!use_ngg ||
594 rs->flatshade_first == sctx->last_flatshade_first)))
595 return;
596
597 if (util_prim_is_lines(rast_prim)) {
598 /* For lines, reset the stipple pattern at each primitive. Otherwise,
599 * reset the stipple pattern at each packet (line strips, line loops).
600 */
601 radeon_set_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE,
602 rs->pa_sc_line_stipple |
603 S_028A0C_AUTO_RESET_CNTL(rast_prim == PIPE_PRIM_LINES ? 1 : 2));
604 sctx->context_roll = true;
605 }
606
607 unsigned gs_out = si_conv_prim_to_gs_out(sctx->current_rast_prim);
608
609 if (rast_prim != sctx->last_rast_prim &&
610 (sctx->ngg || sctx->gs_shader.cso)) {
611 radeon_set_context_reg(cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, gs_out);
612 sctx->context_roll = true;
613
614 if (use_ngg) {
615 sctx->current_vs_state &= C_VS_STATE_OUTPRIM;
616 sctx->current_vs_state |= S_VS_STATE_OUTPRIM(gs_out);
617 }
618 }
619
620 if (use_ngg) {
621 unsigned vtx_index = rs->flatshade_first ? 0 : gs_out;
622 sctx->current_vs_state &= C_VS_STATE_PROVOKING_VTX_INDEX;
623 sctx->current_vs_state |= S_VS_STATE_PROVOKING_VTX_INDEX(vtx_index);
624 }
625
626 sctx->last_rast_prim = rast_prim;
627 sctx->last_sc_line_stipple = rs->pa_sc_line_stipple;
628 sctx->last_flatshade_first = rs->flatshade_first;
629 }
630
631 static void si_emit_vs_state(struct si_context *sctx,
632 const struct pipe_draw_info *info)
633 {
634 sctx->current_vs_state &= C_VS_STATE_INDEXED;
635 sctx->current_vs_state |= S_VS_STATE_INDEXED(!!info->index_size);
636
637 if (sctx->num_vs_blit_sgprs) {
638 /* Re-emit the state after we leave u_blitter. */
639 sctx->last_vs_state = ~0;
640 return;
641 }
642
643 if (sctx->current_vs_state != sctx->last_vs_state) {
644 struct radeon_cmdbuf *cs = sctx->gfx_cs;
645
646 /* For the API vertex shader (VS_STATE_INDEXED, LS_OUT_*). */
647 radeon_set_sh_reg(cs,
648 sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX] +
649 SI_SGPR_VS_STATE_BITS * 4,
650 sctx->current_vs_state);
651
652 /* Set CLAMP_VERTEX_COLOR and OUTPRIM in the last stage
653 * before the rasterizer.
654 *
655 * For TES or the GS copy shader without NGG:
656 */
657 if (sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX] !=
658 R_00B130_SPI_SHADER_USER_DATA_VS_0) {
659 radeon_set_sh_reg(cs,
660 R_00B130_SPI_SHADER_USER_DATA_VS_0 +
661 SI_SGPR_VS_STATE_BITS * 4,
662 sctx->current_vs_state);
663 }
664
665 /* For NGG: */
666 if (sctx->screen->use_ngg &&
667 sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX] !=
668 R_00B230_SPI_SHADER_USER_DATA_GS_0) {
669 radeon_set_sh_reg(cs,
670 R_00B230_SPI_SHADER_USER_DATA_GS_0 +
671 SI_SGPR_VS_STATE_BITS * 4,
672 sctx->current_vs_state);
673 }
674
675 sctx->last_vs_state = sctx->current_vs_state;
676 }
677 }
678
679 static inline bool si_prim_restart_index_changed(struct si_context *sctx,
680 bool primitive_restart,
681 unsigned restart_index)
682 {
683 return primitive_restart &&
684 (restart_index != sctx->last_restart_index ||
685 sctx->last_restart_index == SI_RESTART_INDEX_UNKNOWN);
686 }
687
688 static void si_emit_ia_multi_vgt_param(struct si_context *sctx,
689 const struct pipe_draw_info *info,
690 enum pipe_prim_type prim,
691 unsigned num_patches,
692 unsigned instance_count,
693 bool primitive_restart)
694 {
695 struct radeon_cmdbuf *cs = sctx->gfx_cs;
696 unsigned ia_multi_vgt_param;
697
698 ia_multi_vgt_param = si_get_ia_multi_vgt_param(sctx, info, prim, num_patches,
699 instance_count, primitive_restart);
700
701 /* Draw state. */
702 if (ia_multi_vgt_param != sctx->last_multi_vgt_param) {
703 if (sctx->chip_class == GFX9)
704 radeon_set_uconfig_reg_idx(cs, sctx->screen,
705 R_030960_IA_MULTI_VGT_PARAM, 4,
706 ia_multi_vgt_param);
707 else if (sctx->chip_class >= GFX7)
708 radeon_set_context_reg_idx(cs, R_028AA8_IA_MULTI_VGT_PARAM, 1, ia_multi_vgt_param);
709 else
710 radeon_set_context_reg(cs, R_028AA8_IA_MULTI_VGT_PARAM, ia_multi_vgt_param);
711
712 sctx->last_multi_vgt_param = ia_multi_vgt_param;
713 }
714 }
715
716 /* GFX10 removed IA_MULTI_VGT_PARAM in exchange for GE_CNTL.
717 * We overload last_multi_vgt_param.
718 */
719 static void gfx10_emit_ge_cntl(struct si_context *sctx, unsigned num_patches)
720 {
721 union si_vgt_param_key key = sctx->ia_multi_vgt_param_key;
722 unsigned ge_cntl;
723
724 if (sctx->ngg) {
725 if (sctx->tes_shader.cso) {
726 ge_cntl = S_03096C_PRIM_GRP_SIZE(num_patches) |
727 S_03096C_VERT_GRP_SIZE(256) | /* 256 = disable vertex grouping */
728 S_03096C_BREAK_WAVE_AT_EOI(key.u.tess_uses_prim_id);
729 } else {
730 ge_cntl = si_get_vs_state(sctx)->ge_cntl;
731 }
732 } else {
733 unsigned primgroup_size;
734 unsigned vertgroup_size = 256; /* 256 = disable vertex grouping */;
735
736 if (sctx->tes_shader.cso) {
737 primgroup_size = num_patches; /* must be a multiple of NUM_PATCHES */
738 } else if (sctx->gs_shader.cso) {
739 unsigned vgt_gs_onchip_cntl = sctx->gs_shader.current->ctx_reg.gs.vgt_gs_onchip_cntl;
740 primgroup_size = G_028A44_GS_PRIMS_PER_SUBGRP(vgt_gs_onchip_cntl);
741 } else {
742 primgroup_size = 128; /* recommended without a GS and tess */
743 }
744
745 ge_cntl = S_03096C_PRIM_GRP_SIZE(primgroup_size) |
746 S_03096C_VERT_GRP_SIZE(vertgroup_size) |
747 S_03096C_BREAK_WAVE_AT_EOI(key.u.uses_tess && key.u.tess_uses_prim_id);
748 }
749
750 ge_cntl |= S_03096C_PACKET_TO_ONE_PA(key.u.line_stipple_enabled);
751
752 if (ge_cntl != sctx->last_multi_vgt_param) {
753 radeon_set_uconfig_reg(sctx->gfx_cs, R_03096C_GE_CNTL, ge_cntl);
754 sctx->last_multi_vgt_param = ge_cntl;
755 }
756 }
757
758 static void si_emit_draw_registers(struct si_context *sctx,
759 const struct pipe_draw_info *info,
760 enum pipe_prim_type prim,
761 unsigned num_patches,
762 unsigned instance_count,
763 bool primitive_restart)
764 {
765 struct radeon_cmdbuf *cs = sctx->gfx_cs;
766 unsigned vgt_prim = si_conv_pipe_prim(prim);
767
768 if (sctx->chip_class >= GFX10)
769 gfx10_emit_ge_cntl(sctx, num_patches);
770 else
771 si_emit_ia_multi_vgt_param(sctx, info, prim, num_patches,
772 instance_count, primitive_restart);
773
774 if (vgt_prim != sctx->last_prim) {
775 if (sctx->chip_class >= GFX10)
776 radeon_set_uconfig_reg(cs, R_030908_VGT_PRIMITIVE_TYPE, vgt_prim);
777 else if (sctx->chip_class >= GFX7)
778 radeon_set_uconfig_reg_idx(cs, sctx->screen,
779 R_030908_VGT_PRIMITIVE_TYPE, 1, vgt_prim);
780 else
781 radeon_set_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE, vgt_prim);
782
783 sctx->last_prim = vgt_prim;
784 }
785
786 /* Primitive restart. */
787 if (primitive_restart != sctx->last_primitive_restart_en) {
788 if (sctx->chip_class >= GFX9)
789 radeon_set_uconfig_reg(cs, R_03092C_VGT_MULTI_PRIM_IB_RESET_EN,
790 primitive_restart);
791 else
792 radeon_set_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN,
793 primitive_restart);
794
795 sctx->last_primitive_restart_en = primitive_restart;
796
797 }
798 if (si_prim_restart_index_changed(sctx, primitive_restart, info->restart_index)) {
799 radeon_set_context_reg(cs, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX,
800 info->restart_index);
801 sctx->last_restart_index = info->restart_index;
802 sctx->context_roll = true;
803 }
804 }
805
806 static void si_emit_draw_packets(struct si_context *sctx,
807 const struct pipe_draw_info *info,
808 struct pipe_resource *indexbuf,
809 unsigned index_size,
810 unsigned index_offset,
811 unsigned instance_count,
812 bool dispatch_prim_discard_cs,
813 unsigned original_index_size)
814 {
815 struct pipe_draw_indirect_info *indirect = info->indirect;
816 struct radeon_cmdbuf *cs = sctx->gfx_cs;
817 unsigned sh_base_reg = sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX];
818 bool render_cond_bit = sctx->render_cond && !sctx->render_cond_force_off;
819 uint32_t index_max_size = 0;
820 uint64_t index_va = 0;
821
822 if (info->count_from_stream_output) {
823 struct si_streamout_target *t =
824 (struct si_streamout_target*)info->count_from_stream_output;
825
826 radeon_set_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE,
827 t->stride_in_dw);
828 si_cp_copy_data(sctx, sctx->gfx_cs,
829 COPY_DATA_REG, NULL,
830 R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2,
831 COPY_DATA_SRC_MEM, t->buf_filled_size,
832 t->buf_filled_size_offset);
833 }
834
835 /* draw packet */
836 if (index_size) {
837 if (index_size != sctx->last_index_size) {
838 unsigned index_type;
839
840 /* index type */
841 switch (index_size) {
842 case 1:
843 index_type = V_028A7C_VGT_INDEX_8;
844 break;
845 case 2:
846 index_type = V_028A7C_VGT_INDEX_16 |
847 (SI_BIG_ENDIAN && sctx->chip_class <= GFX7 ?
848 V_028A7C_VGT_DMA_SWAP_16_BIT : 0);
849 break;
850 case 4:
851 index_type = V_028A7C_VGT_INDEX_32 |
852 (SI_BIG_ENDIAN && sctx->chip_class <= GFX7 ?
853 V_028A7C_VGT_DMA_SWAP_32_BIT : 0);
854 break;
855 default:
856 assert(!"unreachable");
857 return;
858 }
859
860 if (sctx->chip_class >= GFX9) {
861 radeon_set_uconfig_reg_idx(cs, sctx->screen,
862 R_03090C_VGT_INDEX_TYPE, 2,
863 index_type);
864 } else {
865 radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
866 radeon_emit(cs, index_type);
867 }
868
869 sctx->last_index_size = index_size;
870 }
871
872 if (original_index_size) {
873 index_max_size = (indexbuf->width0 - index_offset) /
874 original_index_size;
875 /* Skip draw calls with 0-sized index buffers.
876 * They cause a hang on some chips, like Navi10-14.
877 */
878 if (!index_max_size)
879 return;
880
881 index_va = si_resource(indexbuf)->gpu_address + index_offset;
882
883 radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
884 si_resource(indexbuf),
885 RADEON_USAGE_READ, RADEON_PRIO_INDEX_BUFFER);
886 }
887 } else {
888 /* On GFX7 and later, non-indexed draws overwrite VGT_INDEX_TYPE,
889 * so the state must be re-emitted before the next indexed draw.
890 */
891 if (sctx->chip_class >= GFX7)
892 sctx->last_index_size = -1;
893 }
894
895 if (indirect) {
896 uint64_t indirect_va = si_resource(indirect->buffer)->gpu_address;
897
898 assert(indirect_va % 8 == 0);
899
900 si_invalidate_draw_sh_constants(sctx);
901
902 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
903 radeon_emit(cs, 1);
904 radeon_emit(cs, indirect_va);
905 radeon_emit(cs, indirect_va >> 32);
906
907 radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
908 si_resource(indirect->buffer),
909 RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
910
911 unsigned di_src_sel = index_size ? V_0287F0_DI_SRC_SEL_DMA
912 : V_0287F0_DI_SRC_SEL_AUTO_INDEX;
913
914 assert(indirect->offset % 4 == 0);
915
916 if (index_size) {
917 radeon_emit(cs, PKT3(PKT3_INDEX_BASE, 1, 0));
918 radeon_emit(cs, index_va);
919 radeon_emit(cs, index_va >> 32);
920
921 radeon_emit(cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0));
922 radeon_emit(cs, index_max_size);
923 }
924
925 if (!sctx->screen->has_draw_indirect_multi) {
926 radeon_emit(cs, PKT3(index_size ? PKT3_DRAW_INDEX_INDIRECT
927 : PKT3_DRAW_INDIRECT,
928 3, render_cond_bit));
929 radeon_emit(cs, indirect->offset);
930 radeon_emit(cs, (sh_base_reg + SI_SGPR_BASE_VERTEX * 4 - SI_SH_REG_OFFSET) >> 2);
931 radeon_emit(cs, (sh_base_reg + SI_SGPR_START_INSTANCE * 4 - SI_SH_REG_OFFSET) >> 2);
932 radeon_emit(cs, di_src_sel);
933 } else {
934 uint64_t count_va = 0;
935
936 if (indirect->indirect_draw_count) {
937 struct si_resource *params_buf =
938 si_resource(indirect->indirect_draw_count);
939
940 radeon_add_to_buffer_list(
941 sctx, sctx->gfx_cs, params_buf,
942 RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
943
944 count_va = params_buf->gpu_address + indirect->indirect_draw_count_offset;
945 }
946
947 radeon_emit(cs, PKT3(index_size ? PKT3_DRAW_INDEX_INDIRECT_MULTI :
948 PKT3_DRAW_INDIRECT_MULTI,
949 8, render_cond_bit));
950 radeon_emit(cs, indirect->offset);
951 radeon_emit(cs, (sh_base_reg + SI_SGPR_BASE_VERTEX * 4 - SI_SH_REG_OFFSET) >> 2);
952 radeon_emit(cs, (sh_base_reg + SI_SGPR_START_INSTANCE * 4 - SI_SH_REG_OFFSET) >> 2);
953 radeon_emit(cs, ((sh_base_reg + SI_SGPR_DRAWID * 4 - SI_SH_REG_OFFSET) >> 2) |
954 S_2C3_DRAW_INDEX_ENABLE(1) |
955 S_2C3_COUNT_INDIRECT_ENABLE(!!indirect->indirect_draw_count));
956 radeon_emit(cs, indirect->draw_count);
957 radeon_emit(cs, count_va);
958 radeon_emit(cs, count_va >> 32);
959 radeon_emit(cs, indirect->stride);
960 radeon_emit(cs, di_src_sel);
961 }
962 } else {
963 int base_vertex;
964
965 if (sctx->last_instance_count == SI_INSTANCE_COUNT_UNKNOWN ||
966 sctx->last_instance_count != instance_count) {
967 radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, 0));
968 radeon_emit(cs, instance_count);
969 sctx->last_instance_count = instance_count;
970 }
971
972 /* Base vertex and start instance. */
973 base_vertex = original_index_size ? info->index_bias : info->start;
974
975 if (sctx->num_vs_blit_sgprs) {
976 /* Re-emit draw constants after we leave u_blitter. */
977 si_invalidate_draw_sh_constants(sctx);
978
979 /* Blit VS doesn't use BASE_VERTEX, START_INSTANCE, and DRAWID. */
980 radeon_set_sh_reg_seq(cs, sh_base_reg + SI_SGPR_VS_BLIT_DATA * 4,
981 sctx->num_vs_blit_sgprs);
982 radeon_emit_array(cs, sctx->vs_blit_sh_data,
983 sctx->num_vs_blit_sgprs);
984 } else if (base_vertex != sctx->last_base_vertex ||
985 sctx->last_base_vertex == SI_BASE_VERTEX_UNKNOWN ||
986 info->start_instance != sctx->last_start_instance ||
987 info->drawid != sctx->last_drawid ||
988 sh_base_reg != sctx->last_sh_base_reg) {
989 radeon_set_sh_reg_seq(cs, sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 3);
990 radeon_emit(cs, base_vertex);
991 radeon_emit(cs, info->start_instance);
992 radeon_emit(cs, info->drawid);
993
994 sctx->last_base_vertex = base_vertex;
995 sctx->last_start_instance = info->start_instance;
996 sctx->last_drawid = info->drawid;
997 sctx->last_sh_base_reg = sh_base_reg;
998 }
999
1000 if (index_size) {
1001 if (dispatch_prim_discard_cs) {
1002 index_va += info->start * original_index_size;
1003 index_max_size = MIN2(index_max_size, info->count);
1004
1005 si_dispatch_prim_discard_cs_and_draw(sctx, info,
1006 original_index_size,
1007 base_vertex,
1008 index_va, index_max_size);
1009 return;
1010 }
1011
1012 index_va += info->start * index_size;
1013
1014 radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_2, 4, render_cond_bit));
1015 radeon_emit(cs, index_max_size);
1016 radeon_emit(cs, index_va);
1017 radeon_emit(cs, index_va >> 32);
1018 radeon_emit(cs, info->count);
1019 radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
1020 } else {
1021 radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, render_cond_bit));
1022 radeon_emit(cs, info->count);
1023 radeon_emit(cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
1024 S_0287F0_USE_OPAQUE(!!info->count_from_stream_output));
1025 }
1026 }
1027 }
1028
1029 void si_emit_surface_sync(struct si_context *sctx, struct radeon_cmdbuf *cs,
1030 unsigned cp_coher_cntl)
1031 {
1032 bool compute_ib = !sctx->has_graphics ||
1033 cs == sctx->prim_discard_compute_cs;
1034
1035 assert(sctx->chip_class <= GFX9);
1036
1037 if (sctx->chip_class == GFX9 || compute_ib) {
1038 /* Flush caches and wait for the caches to assert idle. */
1039 radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, 0));
1040 radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
1041 radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
1042 radeon_emit(cs, 0xffffff); /* CP_COHER_SIZE_HI */
1043 radeon_emit(cs, 0); /* CP_COHER_BASE */
1044 radeon_emit(cs, 0); /* CP_COHER_BASE_HI */
1045 radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
1046 } else {
1047 /* ACQUIRE_MEM is only required on a compute ring. */
1048 radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, 0));
1049 radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
1050 radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
1051 radeon_emit(cs, 0); /* CP_COHER_BASE */
1052 radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
1053 }
1054
1055 /* ACQUIRE_MEM has an implicit context roll if the current context
1056 * is busy. */
1057 if (!compute_ib)
1058 sctx->context_roll = true;
1059 }
1060
1061 void si_prim_discard_signal_next_compute_ib_start(struct si_context *sctx)
1062 {
1063 if (!si_compute_prim_discard_enabled(sctx))
1064 return;
1065
1066 if (!sctx->barrier_buf) {
1067 u_suballocator_alloc(sctx->allocator_zeroed_memory, 4, 4,
1068 &sctx->barrier_buf_offset,
1069 (struct pipe_resource**)&sctx->barrier_buf);
1070 }
1071
1072 /* Emit a placeholder to signal the next compute IB to start.
1073 * See si_compute_prim_discard.c for explanation.
1074 */
1075 uint32_t signal = 1;
1076 si_cp_write_data(sctx, sctx->barrier_buf, sctx->barrier_buf_offset,
1077 4, V_370_MEM, V_370_ME, &signal);
1078
1079 sctx->last_pkt3_write_data =
1080 &sctx->gfx_cs->current.buf[sctx->gfx_cs->current.cdw - 5];
1081
1082 /* Only the last occurence of WRITE_DATA will be executed.
1083 * The packet will be enabled in si_flush_gfx_cs.
1084 */
1085 *sctx->last_pkt3_write_data = PKT3(PKT3_NOP, 3, 0);
1086 }
1087
1088 void gfx10_emit_cache_flush(struct si_context *ctx)
1089 {
1090 struct radeon_cmdbuf *cs = ctx->gfx_cs;
1091 uint32_t gcr_cntl = 0;
1092 unsigned cb_db_event = 0;
1093 unsigned flags = ctx->flags;
1094
1095 if (!ctx->has_graphics) {
1096 /* Only process compute flags. */
1097 flags &= SI_CONTEXT_INV_ICACHE |
1098 SI_CONTEXT_INV_SCACHE |
1099 SI_CONTEXT_INV_VCACHE |
1100 SI_CONTEXT_INV_L2 |
1101 SI_CONTEXT_WB_L2 |
1102 SI_CONTEXT_INV_L2_METADATA |
1103 SI_CONTEXT_CS_PARTIAL_FLUSH;
1104 }
1105
1106 /* We don't need these. */
1107 assert(!(flags & (SI_CONTEXT_VGT_STREAMOUT_SYNC |
1108 SI_CONTEXT_FLUSH_AND_INV_DB_META)));
1109
1110 if (flags & SI_CONTEXT_VGT_FLUSH) {
1111 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1112 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
1113 }
1114
1115 if (flags & SI_CONTEXT_FLUSH_AND_INV_CB)
1116 ctx->num_cb_cache_flushes++;
1117 if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
1118 ctx->num_db_cache_flushes++;
1119
1120 if (flags & SI_CONTEXT_INV_ICACHE)
1121 gcr_cntl |= S_586_GLI_INV(V_586_GLI_ALL);
1122 if (flags & SI_CONTEXT_INV_SCACHE) {
1123 /* TODO: When writing to the SMEM L1 cache, we need to set SEQ
1124 * to FORWARD when both L1 and L2 are written out (WB or INV).
1125 */
1126 gcr_cntl |= S_586_GL1_INV(1) | S_586_GLK_INV(1);
1127 }
1128 if (flags & SI_CONTEXT_INV_VCACHE)
1129 gcr_cntl |= S_586_GL1_INV(1) | S_586_GLV_INV(1);
1130
1131 /* The L2 cache ops are:
1132 * - INV: - invalidate lines that reflect memory (were loaded from memory)
1133 * - don't touch lines that were overwritten (were stored by gfx clients)
1134 * - WB: - don't touch lines that reflect memory
1135 * - write back lines that were overwritten
1136 * - WB | INV: - invalidate lines that reflect memory
1137 * - write back lines that were overwritten
1138 *
1139 * GLM doesn't support WB alone. If WB is set, INV must be set too.
1140 */
1141 if (flags & SI_CONTEXT_INV_L2) {
1142 /* Writeback and invalidate everything in L2. */
1143 gcr_cntl |= S_586_GL2_INV(1) | S_586_GL2_WB(1) |
1144 S_586_GLM_INV(1) | S_586_GLM_WB(1);
1145 ctx->num_L2_invalidates++;
1146 } else if (flags & SI_CONTEXT_WB_L2) {
1147 gcr_cntl |= S_586_GL2_WB(1) |
1148 S_586_GLM_WB(1) | S_586_GLM_INV(1);
1149 } else if (flags & SI_CONTEXT_INV_L2_METADATA) {
1150 gcr_cntl |= S_586_GLM_INV(1) | S_586_GLM_WB(1);
1151 }
1152
1153 if (flags & (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB)) {
1154 if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
1155 /* Flush CMASK/FMASK/DCC. Will wait for idle later. */
1156 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1157 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) |
1158 EVENT_INDEX(0));
1159 }
1160 if (flags & SI_CONTEXT_FLUSH_AND_INV_DB) {
1161 /* Flush HTILE. Will wait for idle later. */
1162 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1163 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) |
1164 EVENT_INDEX(0));
1165 }
1166
1167 /* First flush CB/DB, then L1/L2. */
1168 gcr_cntl |= S_586_SEQ(V_586_SEQ_FORWARD);
1169
1170 if ((flags & (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB)) ==
1171 (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB)) {
1172 cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
1173 } else if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
1174 cb_db_event = V_028A90_FLUSH_AND_INV_CB_DATA_TS;
1175 } else if (flags & SI_CONTEXT_FLUSH_AND_INV_DB) {
1176 cb_db_event = V_028A90_FLUSH_AND_INV_DB_DATA_TS;
1177 } else {
1178 assert(0);
1179 }
1180 } else {
1181 /* Wait for graphics shaders to go idle if requested. */
1182 if (flags & SI_CONTEXT_PS_PARTIAL_FLUSH) {
1183 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1184 radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1185 /* Only count explicit shader flushes, not implicit ones. */
1186 ctx->num_vs_flushes++;
1187 ctx->num_ps_flushes++;
1188 } else if (flags & SI_CONTEXT_VS_PARTIAL_FLUSH) {
1189 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1190 radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1191 ctx->num_vs_flushes++;
1192 }
1193 }
1194
1195 if (flags & SI_CONTEXT_CS_PARTIAL_FLUSH && ctx->compute_is_busy) {
1196 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1197 radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH | EVENT_INDEX(4)));
1198 ctx->num_cs_flushes++;
1199 ctx->compute_is_busy = false;
1200 }
1201
1202 if (cb_db_event) {
1203 /* CB/DB flush and invalidate (or possibly just a wait for a
1204 * meta flush) via RELEASE_MEM.
1205 *
1206 * Combine this with other cache flushes when possible; this
1207 * requires affected shaders to be idle, so do it after the
1208 * CS_PARTIAL_FLUSH before (VS/PS partial flushes are always
1209 * implied).
1210 */
1211 uint64_t va;
1212
1213 /* Do the flush (enqueue the event and wait for it). */
1214 va = ctx->wait_mem_scratch->gpu_address;
1215 ctx->wait_mem_number++;
1216
1217 /* Get GCR_CNTL fields, because the encoding is different in RELEASE_MEM. */
1218 unsigned glm_wb = G_586_GLM_WB(gcr_cntl);
1219 unsigned glm_inv = G_586_GLM_INV(gcr_cntl);
1220 unsigned glv_inv = G_586_GLV_INV(gcr_cntl);
1221 unsigned gl1_inv = G_586_GL1_INV(gcr_cntl);
1222 assert(G_586_GL2_US(gcr_cntl) == 0);
1223 assert(G_586_GL2_RANGE(gcr_cntl) == 0);
1224 assert(G_586_GL2_DISCARD(gcr_cntl) == 0);
1225 unsigned gl2_inv = G_586_GL2_INV(gcr_cntl);
1226 unsigned gl2_wb = G_586_GL2_WB(gcr_cntl);
1227 unsigned gcr_seq = G_586_SEQ(gcr_cntl);
1228
1229 gcr_cntl &= C_586_GLM_WB &
1230 C_586_GLM_INV &
1231 C_586_GLV_INV &
1232 C_586_GL1_INV &
1233 C_586_GL2_INV &
1234 C_586_GL2_WB; /* keep SEQ */
1235
1236 si_cp_release_mem(ctx, cs, cb_db_event,
1237 S_490_GLM_WB(glm_wb) |
1238 S_490_GLM_INV(glm_inv) |
1239 S_490_GLV_INV(glv_inv) |
1240 S_490_GL1_INV(gl1_inv) |
1241 S_490_GL2_INV(gl2_inv) |
1242 S_490_GL2_WB(gl2_wb) |
1243 S_490_SEQ(gcr_seq),
1244 EOP_DST_SEL_MEM,
1245 EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM,
1246 EOP_DATA_SEL_VALUE_32BIT,
1247 ctx->wait_mem_scratch, va,
1248 ctx->wait_mem_number, SI_NOT_QUERY);
1249 si_cp_wait_mem(ctx, ctx->gfx_cs, va, ctx->wait_mem_number, 0xffffffff,
1250 WAIT_REG_MEM_EQUAL);
1251 }
1252
1253 /* Ignore fields that only modify the behavior of other fields. */
1254 if (gcr_cntl & C_586_GL1_RANGE & C_586_GL2_RANGE & C_586_SEQ) {
1255 /* Flush caches and wait for the caches to assert idle.
1256 * The cache flush is executed in the ME, but the PFP waits
1257 * for completion.
1258 */
1259 radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 6, 0));
1260 radeon_emit(cs, 0); /* CP_COHER_CNTL */
1261 radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
1262 radeon_emit(cs, 0xffffff); /* CP_COHER_SIZE_HI */
1263 radeon_emit(cs, 0); /* CP_COHER_BASE */
1264 radeon_emit(cs, 0); /* CP_COHER_BASE_HI */
1265 radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
1266 radeon_emit(cs, gcr_cntl); /* GCR_CNTL */
1267 } else if (cb_db_event ||
1268 (flags & (SI_CONTEXT_VS_PARTIAL_FLUSH |
1269 SI_CONTEXT_PS_PARTIAL_FLUSH |
1270 SI_CONTEXT_CS_PARTIAL_FLUSH))) {
1271 /* We need to ensure that PFP waits as well. */
1272 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
1273 radeon_emit(cs, 0);
1274 }
1275
1276 if (flags & SI_CONTEXT_START_PIPELINE_STATS) {
1277 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1278 radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_START) |
1279 EVENT_INDEX(0));
1280 } else if (flags & SI_CONTEXT_STOP_PIPELINE_STATS) {
1281 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1282 radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) |
1283 EVENT_INDEX(0));
1284 }
1285
1286 ctx->flags = 0;
1287 }
1288
1289 void si_emit_cache_flush(struct si_context *sctx)
1290 {
1291 struct radeon_cmdbuf *cs = sctx->gfx_cs;
1292 uint32_t flags = sctx->flags;
1293
1294 if (!sctx->has_graphics) {
1295 /* Only process compute flags. */
1296 flags &= SI_CONTEXT_INV_ICACHE |
1297 SI_CONTEXT_INV_SCACHE |
1298 SI_CONTEXT_INV_VCACHE |
1299 SI_CONTEXT_INV_L2 |
1300 SI_CONTEXT_WB_L2 |
1301 SI_CONTEXT_INV_L2_METADATA |
1302 SI_CONTEXT_CS_PARTIAL_FLUSH;
1303 }
1304
1305 uint32_t cp_coher_cntl = 0;
1306 const uint32_t flush_cb_db = flags & (SI_CONTEXT_FLUSH_AND_INV_CB |
1307 SI_CONTEXT_FLUSH_AND_INV_DB);
1308 const bool is_barrier = flush_cb_db ||
1309 /* INV_ICACHE == beginning of gfx IB. Checking
1310 * INV_ICACHE fixes corruption for DeusExMD with
1311 * compute-based culling, but I don't know why.
1312 */
1313 flags & (SI_CONTEXT_INV_ICACHE |
1314 SI_CONTEXT_PS_PARTIAL_FLUSH |
1315 SI_CONTEXT_VS_PARTIAL_FLUSH) ||
1316 (flags & SI_CONTEXT_CS_PARTIAL_FLUSH &&
1317 sctx->compute_is_busy);
1318
1319 assert(sctx->chip_class <= GFX9);
1320
1321 if (flags & SI_CONTEXT_FLUSH_AND_INV_CB)
1322 sctx->num_cb_cache_flushes++;
1323 if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
1324 sctx->num_db_cache_flushes++;
1325
1326 /* GFX6 has a bug that it always flushes ICACHE and KCACHE if either
1327 * bit is set. An alternative way is to write SQC_CACHES, but that
1328 * doesn't seem to work reliably. Since the bug doesn't affect
1329 * correctness (it only does more work than necessary) and
1330 * the performance impact is likely negligible, there is no plan
1331 * to add a workaround for it.
1332 */
1333
1334 if (flags & SI_CONTEXT_INV_ICACHE)
1335 cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
1336 if (flags & SI_CONTEXT_INV_SCACHE)
1337 cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
1338
1339 if (sctx->chip_class <= GFX8) {
1340 if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
1341 cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) |
1342 S_0085F0_CB0_DEST_BASE_ENA(1) |
1343 S_0085F0_CB1_DEST_BASE_ENA(1) |
1344 S_0085F0_CB2_DEST_BASE_ENA(1) |
1345 S_0085F0_CB3_DEST_BASE_ENA(1) |
1346 S_0085F0_CB4_DEST_BASE_ENA(1) |
1347 S_0085F0_CB5_DEST_BASE_ENA(1) |
1348 S_0085F0_CB6_DEST_BASE_ENA(1) |
1349 S_0085F0_CB7_DEST_BASE_ENA(1);
1350
1351 /* Necessary for DCC */
1352 if (sctx->chip_class == GFX8)
1353 si_cp_release_mem(sctx, cs,
1354 V_028A90_FLUSH_AND_INV_CB_DATA_TS,
1355 0, EOP_DST_SEL_MEM, EOP_INT_SEL_NONE,
1356 EOP_DATA_SEL_DISCARD, NULL,
1357 0, 0, SI_NOT_QUERY);
1358 }
1359 if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
1360 cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) |
1361 S_0085F0_DB_DEST_BASE_ENA(1);
1362 }
1363
1364 if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
1365 /* Flush CMASK/FMASK/DCC. SURFACE_SYNC will wait for idle. */
1366 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1367 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
1368 }
1369 if (flags & (SI_CONTEXT_FLUSH_AND_INV_DB |
1370 SI_CONTEXT_FLUSH_AND_INV_DB_META)) {
1371 /* Flush HTILE. SURFACE_SYNC will wait for idle. */
1372 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1373 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
1374 }
1375
1376 /* Wait for shader engines to go idle.
1377 * VS and PS waits are unnecessary if SURFACE_SYNC is going to wait
1378 * for everything including CB/DB cache flushes.
1379 */
1380 if (!flush_cb_db) {
1381 if (flags & SI_CONTEXT_PS_PARTIAL_FLUSH) {
1382 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1383 radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1384 /* Only count explicit shader flushes, not implicit ones
1385 * done by SURFACE_SYNC.
1386 */
1387 sctx->num_vs_flushes++;
1388 sctx->num_ps_flushes++;
1389 } else if (flags & SI_CONTEXT_VS_PARTIAL_FLUSH) {
1390 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1391 radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1392 sctx->num_vs_flushes++;
1393 }
1394 }
1395
1396 if (flags & SI_CONTEXT_CS_PARTIAL_FLUSH &&
1397 sctx->compute_is_busy) {
1398 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1399 radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1400 sctx->num_cs_flushes++;
1401 sctx->compute_is_busy = false;
1402 }
1403
1404 /* VGT state synchronization. */
1405 if (flags & SI_CONTEXT_VGT_FLUSH) {
1406 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1407 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
1408 }
1409 if (flags & SI_CONTEXT_VGT_STREAMOUT_SYNC) {
1410 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1411 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0));
1412 }
1413
1414 /* GFX9: Wait for idle if we're flushing CB or DB. ACQUIRE_MEM doesn't
1415 * wait for idle on GFX9. We have to use a TS event.
1416 */
1417 if (sctx->chip_class == GFX9 && flush_cb_db) {
1418 uint64_t va;
1419 unsigned tc_flags, cb_db_event;
1420
1421 /* Set the CB/DB flush event. */
1422 switch (flush_cb_db) {
1423 case SI_CONTEXT_FLUSH_AND_INV_CB:
1424 cb_db_event = V_028A90_FLUSH_AND_INV_CB_DATA_TS;
1425 break;
1426 case SI_CONTEXT_FLUSH_AND_INV_DB:
1427 cb_db_event = V_028A90_FLUSH_AND_INV_DB_DATA_TS;
1428 break;
1429 default:
1430 /* both CB & DB */
1431 cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
1432 }
1433
1434 /* These are the only allowed combinations. If you need to
1435 * do multiple operations at once, do them separately.
1436 * All operations that invalidate L2 also seem to invalidate
1437 * metadata. Volatile (VOL) and WC flushes are not listed here.
1438 *
1439 * TC | TC_WB = writeback & invalidate L2 & L1
1440 * TC | TC_WB | TC_NC = writeback & invalidate L2 for MTYPE == NC
1441 * TC_WB | TC_NC = writeback L2 for MTYPE == NC
1442 * TC | TC_NC = invalidate L2 for MTYPE == NC
1443 * TC | TC_MD = writeback & invalidate L2 metadata (DCC, etc.)
1444 * TCL1 = invalidate L1
1445 */
1446 tc_flags = 0;
1447
1448 if (flags & SI_CONTEXT_INV_L2_METADATA) {
1449 tc_flags = EVENT_TC_ACTION_ENA |
1450 EVENT_TC_MD_ACTION_ENA;
1451 }
1452
1453 /* Ideally flush TC together with CB/DB. */
1454 if (flags & SI_CONTEXT_INV_L2) {
1455 /* Writeback and invalidate everything in L2 & L1. */
1456 tc_flags = EVENT_TC_ACTION_ENA |
1457 EVENT_TC_WB_ACTION_ENA;
1458
1459 /* Clear the flags. */
1460 flags &= ~(SI_CONTEXT_INV_L2 |
1461 SI_CONTEXT_WB_L2 |
1462 SI_CONTEXT_INV_VCACHE);
1463 sctx->num_L2_invalidates++;
1464 }
1465
1466 /* Do the flush (enqueue the event and wait for it). */
1467 va = sctx->wait_mem_scratch->gpu_address;
1468 sctx->wait_mem_number++;
1469
1470 si_cp_release_mem(sctx, cs, cb_db_event, tc_flags,
1471 EOP_DST_SEL_MEM,
1472 EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM,
1473 EOP_DATA_SEL_VALUE_32BIT,
1474 sctx->wait_mem_scratch, va,
1475 sctx->wait_mem_number, SI_NOT_QUERY);
1476 si_cp_wait_mem(sctx, cs, va, sctx->wait_mem_number, 0xffffffff,
1477 WAIT_REG_MEM_EQUAL);
1478 }
1479
1480 /* Make sure ME is idle (it executes most packets) before continuing.
1481 * This prevents read-after-write hazards between PFP and ME.
1482 */
1483 if (sctx->has_graphics &&
1484 (cp_coher_cntl ||
1485 (flags & (SI_CONTEXT_CS_PARTIAL_FLUSH |
1486 SI_CONTEXT_INV_VCACHE |
1487 SI_CONTEXT_INV_L2 |
1488 SI_CONTEXT_WB_L2)))) {
1489 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
1490 radeon_emit(cs, 0);
1491 }
1492
1493 /* GFX6-GFX8 only:
1494 * When one of the CP_COHER_CNTL.DEST_BASE flags is set, SURFACE_SYNC
1495 * waits for idle, so it should be last. SURFACE_SYNC is done in PFP.
1496 *
1497 * cp_coher_cntl should contain all necessary flags except TC flags
1498 * at this point.
1499 *
1500 * GFX6-GFX7 don't support L2 write-back.
1501 */
1502 if (flags & SI_CONTEXT_INV_L2 ||
1503 (sctx->chip_class <= GFX7 &&
1504 (flags & SI_CONTEXT_WB_L2))) {
1505 /* Invalidate L1 & L2. (L1 is always invalidated on GFX6)
1506 * WB must be set on GFX8+ when TC_ACTION is set.
1507 */
1508 si_emit_surface_sync(sctx, sctx->gfx_cs, cp_coher_cntl |
1509 S_0085F0_TC_ACTION_ENA(1) |
1510 S_0085F0_TCL1_ACTION_ENA(1) |
1511 S_0301F0_TC_WB_ACTION_ENA(sctx->chip_class >= GFX8));
1512 cp_coher_cntl = 0;
1513 sctx->num_L2_invalidates++;
1514 } else {
1515 /* L1 invalidation and L2 writeback must be done separately,
1516 * because both operations can't be done together.
1517 */
1518 if (flags & SI_CONTEXT_WB_L2) {
1519 /* WB = write-back
1520 * NC = apply to non-coherent MTYPEs
1521 * (i.e. MTYPE <= 1, which is what we use everywhere)
1522 *
1523 * WB doesn't work without NC.
1524 */
1525 si_emit_surface_sync(sctx, sctx->gfx_cs, cp_coher_cntl |
1526 S_0301F0_TC_WB_ACTION_ENA(1) |
1527 S_0301F0_TC_NC_ACTION_ENA(1));
1528 cp_coher_cntl = 0;
1529 sctx->num_L2_writebacks++;
1530 }
1531 if (flags & SI_CONTEXT_INV_VCACHE) {
1532 /* Invalidate per-CU VMEM L1. */
1533 si_emit_surface_sync(sctx, sctx->gfx_cs, cp_coher_cntl |
1534 S_0085F0_TCL1_ACTION_ENA(1));
1535 cp_coher_cntl = 0;
1536 }
1537 }
1538
1539 /* If TC flushes haven't cleared this... */
1540 if (cp_coher_cntl)
1541 si_emit_surface_sync(sctx, sctx->gfx_cs, cp_coher_cntl);
1542
1543 if (is_barrier)
1544 si_prim_discard_signal_next_compute_ib_start(sctx);
1545
1546 if (flags & SI_CONTEXT_START_PIPELINE_STATS) {
1547 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1548 radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_START) |
1549 EVENT_INDEX(0));
1550 } else if (flags & SI_CONTEXT_STOP_PIPELINE_STATS) {
1551 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1552 radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) |
1553 EVENT_INDEX(0));
1554 }
1555
1556 sctx->flags = 0;
1557 }
1558
1559 static void si_get_draw_start_count(struct si_context *sctx,
1560 const struct pipe_draw_info *info,
1561 unsigned *start, unsigned *count)
1562 {
1563 struct pipe_draw_indirect_info *indirect = info->indirect;
1564
1565 if (indirect) {
1566 unsigned indirect_count;
1567 struct pipe_transfer *transfer;
1568 unsigned begin, end;
1569 unsigned map_size;
1570 unsigned *data;
1571
1572 if (indirect->indirect_draw_count) {
1573 data = pipe_buffer_map_range(&sctx->b,
1574 indirect->indirect_draw_count,
1575 indirect->indirect_draw_count_offset,
1576 sizeof(unsigned),
1577 PIPE_TRANSFER_READ, &transfer);
1578
1579 indirect_count = *data;
1580
1581 pipe_buffer_unmap(&sctx->b, transfer);
1582 } else {
1583 indirect_count = indirect->draw_count;
1584 }
1585
1586 if (!indirect_count) {
1587 *start = *count = 0;
1588 return;
1589 }
1590
1591 map_size = (indirect_count - 1) * indirect->stride + 3 * sizeof(unsigned);
1592 data = pipe_buffer_map_range(&sctx->b, indirect->buffer,
1593 indirect->offset, map_size,
1594 PIPE_TRANSFER_READ, &transfer);
1595
1596 begin = UINT_MAX;
1597 end = 0;
1598
1599 for (unsigned i = 0; i < indirect_count; ++i) {
1600 unsigned count = data[0];
1601 unsigned start = data[2];
1602
1603 if (count > 0) {
1604 begin = MIN2(begin, start);
1605 end = MAX2(end, start + count);
1606 }
1607
1608 data += indirect->stride / sizeof(unsigned);
1609 }
1610
1611 pipe_buffer_unmap(&sctx->b, transfer);
1612
1613 if (begin < end) {
1614 *start = begin;
1615 *count = end - begin;
1616 } else {
1617 *start = *count = 0;
1618 }
1619 } else {
1620 *start = info->start;
1621 *count = info->count;
1622 }
1623 }
1624
1625 static void si_emit_all_states(struct si_context *sctx, const struct pipe_draw_info *info,
1626 enum pipe_prim_type prim, unsigned instance_count,
1627 bool primitive_restart, unsigned skip_atom_mask)
1628 {
1629 unsigned num_patches = 0;
1630
1631 si_emit_rasterizer_prim_state(sctx);
1632 if (sctx->tes_shader.cso)
1633 si_emit_derived_tess_state(sctx, info, &num_patches);
1634
1635 /* Emit state atoms. */
1636 unsigned mask = sctx->dirty_atoms & ~skip_atom_mask;
1637 while (mask)
1638 sctx->atoms.array[u_bit_scan(&mask)].emit(sctx);
1639
1640 sctx->dirty_atoms &= skip_atom_mask;
1641
1642 /* Emit states. */
1643 mask = sctx->dirty_states;
1644 while (mask) {
1645 unsigned i = u_bit_scan(&mask);
1646 struct si_pm4_state *state = sctx->queued.array[i];
1647
1648 if (!state || sctx->emitted.array[i] == state)
1649 continue;
1650
1651 si_pm4_emit(sctx, state);
1652 sctx->emitted.array[i] = state;
1653 }
1654 sctx->dirty_states = 0;
1655
1656 /* Emit draw states. */
1657 si_emit_vs_state(sctx, info);
1658 si_emit_draw_registers(sctx, info, prim, num_patches, instance_count,
1659 primitive_restart);
1660 }
1661
1662 static bool
1663 si_all_vs_resources_read_only(struct si_context *sctx,
1664 struct pipe_resource *indexbuf)
1665 {
1666 struct radeon_winsys *ws = sctx->ws;
1667 struct radeon_cmdbuf *cs = sctx->gfx_cs;
1668
1669 /* Index buffer. */
1670 if (indexbuf &&
1671 ws->cs_is_buffer_referenced(cs, si_resource(indexbuf)->buf,
1672 RADEON_USAGE_WRITE))
1673 goto has_write_reference;
1674
1675 /* Vertex buffers. */
1676 struct si_vertex_elements *velems = sctx->vertex_elements;
1677 unsigned num_velems = velems->count;
1678
1679 for (unsigned i = 0; i < num_velems; i++) {
1680 if (!((1 << i) & velems->first_vb_use_mask))
1681 continue;
1682
1683 unsigned vb_index = velems->vertex_buffer_index[i];
1684 struct pipe_resource *res = sctx->vertex_buffer[vb_index].buffer.resource;
1685 if (!res)
1686 continue;
1687
1688 if (ws->cs_is_buffer_referenced(cs, si_resource(res)->buf,
1689 RADEON_USAGE_WRITE))
1690 goto has_write_reference;
1691 }
1692
1693 /* Constant and shader buffers. */
1694 struct si_descriptors *buffers =
1695 &sctx->descriptors[si_const_and_shader_buffer_descriptors_idx(PIPE_SHADER_VERTEX)];
1696 for (unsigned i = 0; i < buffers->num_active_slots; i++) {
1697 unsigned index = buffers->first_active_slot + i;
1698 struct pipe_resource *res =
1699 sctx->const_and_shader_buffers[PIPE_SHADER_VERTEX].buffers[index];
1700 if (!res)
1701 continue;
1702
1703 if (ws->cs_is_buffer_referenced(cs, si_resource(res)->buf,
1704 RADEON_USAGE_WRITE))
1705 goto has_write_reference;
1706 }
1707
1708 /* Samplers. */
1709 struct si_shader_selector *vs = sctx->vs_shader.cso;
1710 if (vs->info.samplers_declared) {
1711 unsigned num_samplers = util_last_bit(vs->info.samplers_declared);
1712
1713 for (unsigned i = 0; i < num_samplers; i++) {
1714 struct pipe_sampler_view *view = sctx->samplers[PIPE_SHADER_VERTEX].views[i];
1715 if (!view)
1716 continue;
1717
1718 if (ws->cs_is_buffer_referenced(cs,
1719 si_resource(view->texture)->buf,
1720 RADEON_USAGE_WRITE))
1721 goto has_write_reference;
1722 }
1723 }
1724
1725 /* Images. */
1726 if (vs->info.images_declared) {
1727 unsigned num_images = util_last_bit(vs->info.images_declared);
1728
1729 for (unsigned i = 0; i < num_images; i++) {
1730 struct pipe_resource *res = sctx->images[PIPE_SHADER_VERTEX].views[i].resource;
1731 if (!res)
1732 continue;
1733
1734 if (ws->cs_is_buffer_referenced(cs, si_resource(res)->buf,
1735 RADEON_USAGE_WRITE))
1736 goto has_write_reference;
1737 }
1738 }
1739
1740 return true;
1741
1742 has_write_reference:
1743 /* If the current gfx IB has enough packets, flush it to remove write
1744 * references to buffers.
1745 */
1746 if (cs->prev_dw + cs->current.cdw > 2048) {
1747 si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
1748 assert(si_all_vs_resources_read_only(sctx, indexbuf));
1749 return true;
1750 }
1751 return false;
1752 }
1753
1754 static ALWAYS_INLINE bool pd_msg(const char *s)
1755 {
1756 if (SI_PRIM_DISCARD_DEBUG)
1757 printf("PD failed: %s\n", s);
1758 return false;
1759 }
1760
1761 static void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
1762 {
1763 struct si_context *sctx = (struct si_context *)ctx;
1764 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
1765 struct pipe_resource *indexbuf = info->index.resource;
1766 unsigned dirty_tex_counter, dirty_buf_counter;
1767 enum pipe_prim_type rast_prim, prim = info->mode;
1768 unsigned index_size = info->index_size;
1769 unsigned index_offset = info->indirect ? info->start * index_size : 0;
1770 unsigned instance_count = info->instance_count;
1771 bool primitive_restart = info->primitive_restart &&
1772 (!sctx->screen->options.prim_restart_tri_strips_only ||
1773 (prim != PIPE_PRIM_TRIANGLE_STRIP &&
1774 prim != PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY));
1775
1776 if (likely(!info->indirect)) {
1777 /* GFX6-GFX7 treat instance_count==0 as instance_count==1. There is
1778 * no workaround for indirect draws, but we can at least skip
1779 * direct draws.
1780 */
1781 if (unlikely(!instance_count))
1782 return;
1783
1784 /* Handle count == 0. */
1785 if (unlikely(!info->count &&
1786 (index_size || !info->count_from_stream_output)))
1787 return;
1788 }
1789
1790 if (unlikely(!sctx->vs_shader.cso ||
1791 (!sctx->ps_shader.cso && !rs->rasterizer_discard) ||
1792 (!!sctx->tes_shader.cso != (prim == PIPE_PRIM_PATCHES)))) {
1793 assert(0);
1794 return;
1795 }
1796
1797 /* Recompute and re-emit the texture resource states if needed. */
1798 dirty_tex_counter = p_atomic_read(&sctx->screen->dirty_tex_counter);
1799 if (unlikely(dirty_tex_counter != sctx->last_dirty_tex_counter)) {
1800 sctx->last_dirty_tex_counter = dirty_tex_counter;
1801 sctx->framebuffer.dirty_cbufs |=
1802 ((1 << sctx->framebuffer.state.nr_cbufs) - 1);
1803 sctx->framebuffer.dirty_zsbuf = true;
1804 si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
1805 si_update_all_texture_descriptors(sctx);
1806 }
1807
1808 dirty_buf_counter = p_atomic_read(&sctx->screen->dirty_buf_counter);
1809 if (unlikely(dirty_buf_counter != sctx->last_dirty_buf_counter)) {
1810 sctx->last_dirty_buf_counter = dirty_buf_counter;
1811 /* Rebind all buffers unconditionally. */
1812 si_rebind_buffer(sctx, NULL);
1813 }
1814
1815 si_decompress_textures(sctx, u_bit_consecutive(0, SI_NUM_GRAPHICS_SHADERS));
1816
1817 /* Set the rasterization primitive type.
1818 *
1819 * This must be done after si_decompress_textures, which can call
1820 * draw_vbo recursively, and before si_update_shaders, which uses
1821 * current_rast_prim for this draw_vbo call. */
1822 if (sctx->gs_shader.cso) {
1823 /* Only possibilities: POINTS, LINE_STRIP, TRIANGLES */
1824 rast_prim = sctx->gs_shader.cso->rast_prim;
1825 } else if (sctx->tes_shader.cso) {
1826 /* Only possibilities: POINTS, LINE_STRIP, TRIANGLES */
1827 rast_prim = sctx->tes_shader.cso->rast_prim;
1828 } else if (util_rast_prim_is_triangles(prim)) {
1829 rast_prim = PIPE_PRIM_TRIANGLES;
1830 } else {
1831 /* Only possibilities, POINTS, LINE*, RECTANGLES */
1832 rast_prim = prim;
1833 }
1834
1835 if (rast_prim != sctx->current_rast_prim) {
1836 if (util_prim_is_points_or_lines(sctx->current_rast_prim) !=
1837 util_prim_is_points_or_lines(rast_prim))
1838 si_mark_atom_dirty(sctx, &sctx->atoms.s.guardband);
1839
1840 sctx->current_rast_prim = rast_prim;
1841 sctx->do_update_shaders = true;
1842 }
1843
1844 if (sctx->tes_shader.cso &&
1845 sctx->screen->info.has_ls_vgpr_init_bug) {
1846 /* Determine whether the LS VGPR fix should be applied.
1847 *
1848 * It is only required when num input CPs > num output CPs,
1849 * which cannot happen with the fixed function TCS. We should
1850 * also update this bit when switching from TCS to fixed
1851 * function TCS.
1852 */
1853 struct si_shader_selector *tcs = sctx->tcs_shader.cso;
1854 bool ls_vgpr_fix =
1855 tcs &&
1856 info->vertices_per_patch >
1857 tcs->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT];
1858
1859 if (ls_vgpr_fix != sctx->ls_vgpr_fix) {
1860 sctx->ls_vgpr_fix = ls_vgpr_fix;
1861 sctx->do_update_shaders = true;
1862 }
1863 }
1864
1865 if (sctx->chip_class <= GFX9 && sctx->gs_shader.cso) {
1866 /* Determine whether the GS triangle strip adjacency fix should
1867 * be applied. Rotate every other triangle if
1868 * - triangle strips with adjacency are fed to the GS and
1869 * - primitive restart is disabled (the rotation doesn't help
1870 * when the restart occurs after an odd number of triangles).
1871 */
1872 bool gs_tri_strip_adj_fix =
1873 !sctx->tes_shader.cso &&
1874 prim == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY &&
1875 !primitive_restart;
1876
1877 if (gs_tri_strip_adj_fix != sctx->gs_tri_strip_adj_fix) {
1878 sctx->gs_tri_strip_adj_fix = gs_tri_strip_adj_fix;
1879 sctx->do_update_shaders = true;
1880 }
1881 }
1882
1883 if (index_size) {
1884 /* Translate or upload, if needed. */
1885 /* 8-bit indices are supported on GFX8. */
1886 if (sctx->chip_class <= GFX7 && index_size == 1) {
1887 unsigned start, count, start_offset, size, offset;
1888 void *ptr;
1889
1890 si_get_draw_start_count(sctx, info, &start, &count);
1891 start_offset = start * 2;
1892 size = count * 2;
1893
1894 indexbuf = NULL;
1895 u_upload_alloc(ctx->stream_uploader, start_offset,
1896 size,
1897 si_optimal_tcc_alignment(sctx, size),
1898 &offset, &indexbuf, &ptr);
1899 if (!indexbuf)
1900 return;
1901
1902 util_shorten_ubyte_elts_to_userptr(&sctx->b, info, 0, 0,
1903 index_offset + start,
1904 count, ptr);
1905
1906 /* info->start will be added by the drawing code */
1907 index_offset = offset - start_offset;
1908 index_size = 2;
1909 } else if (info->has_user_indices) {
1910 unsigned start_offset;
1911
1912 assert(!info->indirect);
1913 start_offset = info->start * index_size;
1914
1915 indexbuf = NULL;
1916 u_upload_data(ctx->stream_uploader, start_offset,
1917 info->count * index_size,
1918 sctx->screen->info.tcc_cache_line_size,
1919 (char*)info->index.user + start_offset,
1920 &index_offset, &indexbuf);
1921 if (!indexbuf)
1922 return;
1923
1924 /* info->start will be added by the drawing code */
1925 index_offset -= start_offset;
1926 } else if (sctx->chip_class <= GFX7 &&
1927 si_resource(indexbuf)->TC_L2_dirty) {
1928 /* GFX8 reads index buffers through TC L2, so it doesn't
1929 * need this. */
1930 sctx->flags |= SI_CONTEXT_WB_L2;
1931 si_resource(indexbuf)->TC_L2_dirty = false;
1932 }
1933 }
1934
1935 bool dispatch_prim_discard_cs = false;
1936 bool prim_discard_cs_instancing = false;
1937 unsigned original_index_size = index_size;
1938 unsigned direct_count = 0;
1939
1940 if (info->indirect) {
1941 struct pipe_draw_indirect_info *indirect = info->indirect;
1942
1943 /* Add the buffer size for memory checking in need_cs_space. */
1944 si_context_add_resource_size(sctx, indirect->buffer);
1945
1946 /* Indirect buffers use TC L2 on GFX9, but not older hw. */
1947 if (sctx->chip_class <= GFX8) {
1948 if (si_resource(indirect->buffer)->TC_L2_dirty) {
1949 sctx->flags |= SI_CONTEXT_WB_L2;
1950 si_resource(indirect->buffer)->TC_L2_dirty = false;
1951 }
1952
1953 if (indirect->indirect_draw_count &&
1954 si_resource(indirect->indirect_draw_count)->TC_L2_dirty) {
1955 sctx->flags |= SI_CONTEXT_WB_L2;
1956 si_resource(indirect->indirect_draw_count)->TC_L2_dirty = false;
1957 }
1958 }
1959 } else {
1960 /* Multiply by 3 for strips and fans to get an approximate vertex
1961 * count as triangles. */
1962 direct_count = info->count * instance_count *
1963 (prim == PIPE_PRIM_TRIANGLES ? 1 : 3);
1964 }
1965
1966 /* Determine if we can use the primitive discard compute shader. */
1967 if (si_compute_prim_discard_enabled(sctx) &&
1968 (direct_count > sctx->prim_discard_vertex_count_threshold ?
1969 (sctx->compute_num_verts_rejected += direct_count, true) : /* Add, then return true. */
1970 (sctx->compute_num_verts_ineligible += direct_count, false)) && /* Add, then return false. */
1971 (!info->count_from_stream_output || pd_msg("draw_opaque")) &&
1972 (primitive_restart ?
1973 /* Supported prim types with primitive restart: */
1974 (prim == PIPE_PRIM_TRIANGLE_STRIP || pd_msg("bad prim type with primitive restart")) &&
1975 /* Disallow instancing with primitive restart: */
1976 (instance_count == 1 || pd_msg("instance_count > 1 with primitive restart")) :
1977 /* Supported prim types without primitive restart + allow instancing: */
1978 (1 << prim) & ((1 << PIPE_PRIM_TRIANGLES) |
1979 (1 << PIPE_PRIM_TRIANGLE_STRIP) |
1980 (1 << PIPE_PRIM_TRIANGLE_FAN)) &&
1981 /* Instancing is limited to 16-bit indices, because InstanceID is packed into VertexID. */
1982 /* TODO: DrawArraysInstanced doesn't sometimes work, so it's disabled. */
1983 (instance_count == 1 ||
1984 (instance_count <= USHRT_MAX && index_size && index_size <= 2) ||
1985 pd_msg("instance_count too large or index_size == 4 or DrawArraysInstanced"))) &&
1986 (info->drawid == 0 || !sctx->vs_shader.cso->info.uses_drawid || pd_msg("draw_id > 0")) &&
1987 (!sctx->render_cond || pd_msg("render condition")) &&
1988 /* Forced enablement ignores pipeline statistics queries. */
1989 (sctx->screen->debug_flags & (DBG(PD) | DBG(ALWAYS_PD)) ||
1990 (!sctx->num_pipeline_stat_queries && !sctx->streamout.prims_gen_query_enabled) ||
1991 pd_msg("pipestat or primgen query")) &&
1992 (!sctx->vertex_elements->instance_divisor_is_fetched || pd_msg("loads instance divisors")) &&
1993 (!sctx->tes_shader.cso || pd_msg("uses tess")) &&
1994 (!sctx->gs_shader.cso || pd_msg("uses GS")) &&
1995 (!sctx->ps_shader.cso->info.uses_primid || pd_msg("PS uses PrimID")) &&
1996 !rs->polygon_mode_enabled &&
1997 #if SI_PRIM_DISCARD_DEBUG /* same as cso->prim_discard_cs_allowed */
1998 (!sctx->vs_shader.cso->info.uses_bindless_images || pd_msg("uses bindless images")) &&
1999 (!sctx->vs_shader.cso->info.uses_bindless_samplers || pd_msg("uses bindless samplers")) &&
2000 (!sctx->vs_shader.cso->info.writes_memory || pd_msg("writes memory")) &&
2001 (!sctx->vs_shader.cso->info.writes_viewport_index || pd_msg("writes viewport index")) &&
2002 !sctx->vs_shader.cso->info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION] &&
2003 !sctx->vs_shader.cso->so.num_outputs &&
2004 #else
2005 (sctx->vs_shader.cso->prim_discard_cs_allowed || pd_msg("VS shader uses unsupported features")) &&
2006 #endif
2007 /* Check that all buffers are used for read only, because compute
2008 * dispatches can run ahead. */
2009 (si_all_vs_resources_read_only(sctx, index_size ? indexbuf : NULL) || pd_msg("write reference"))) {
2010 switch (si_prepare_prim_discard_or_split_draw(sctx, info, primitive_restart)) {
2011 case SI_PRIM_DISCARD_ENABLED:
2012 original_index_size = index_size;
2013 prim_discard_cs_instancing = instance_count > 1;
2014 dispatch_prim_discard_cs = true;
2015
2016 /* The compute shader changes/lowers the following: */
2017 prim = PIPE_PRIM_TRIANGLES;
2018 index_size = 4;
2019 instance_count = 1;
2020 primitive_restart = false;
2021 sctx->compute_num_verts_rejected -= direct_count;
2022 sctx->compute_num_verts_accepted += direct_count;
2023 break;
2024 case SI_PRIM_DISCARD_DISABLED:
2025 break;
2026 case SI_PRIM_DISCARD_DRAW_SPLIT:
2027 sctx->compute_num_verts_rejected -= direct_count;
2028 goto return_cleanup;
2029 }
2030 }
2031
2032 if (prim_discard_cs_instancing != sctx->prim_discard_cs_instancing) {
2033 sctx->prim_discard_cs_instancing = prim_discard_cs_instancing;
2034 sctx->do_update_shaders = true;
2035 }
2036
2037 if (sctx->do_update_shaders && !si_update_shaders(sctx))
2038 goto return_cleanup;
2039
2040 si_need_gfx_cs_space(sctx);
2041
2042 if (sctx->bo_list_add_all_gfx_resources)
2043 si_gfx_resources_add_all_to_bo_list(sctx);
2044
2045 /* Since we've called si_context_add_resource_size for vertex buffers,
2046 * this must be called after si_need_cs_space, because we must let
2047 * need_cs_space flush before we add buffers to the buffer list.
2048 */
2049 if (!si_upload_vertex_buffer_descriptors(sctx))
2050 goto return_cleanup;
2051
2052 /* Vega10/Raven scissor bug workaround. When any context register is
2053 * written (i.e. the GPU rolls the context), PA_SC_VPORT_SCISSOR
2054 * registers must be written too.
2055 */
2056 unsigned masked_atoms = 0;
2057
2058 if (sctx->screen->info.has_gfx9_scissor_bug) {
2059 masked_atoms |= si_get_atom_bit(sctx, &sctx->atoms.s.scissors);
2060
2061 if (info->count_from_stream_output ||
2062 sctx->dirty_atoms & si_atoms_that_always_roll_context() ||
2063 sctx->dirty_states & si_states_that_always_roll_context())
2064 sctx->context_roll = true;
2065 }
2066
2067 /* Use optimal packet order based on whether we need to sync the pipeline. */
2068 if (unlikely(sctx->flags & (SI_CONTEXT_FLUSH_AND_INV_CB |
2069 SI_CONTEXT_FLUSH_AND_INV_DB |
2070 SI_CONTEXT_PS_PARTIAL_FLUSH |
2071 SI_CONTEXT_CS_PARTIAL_FLUSH))) {
2072 /* If we have to wait for idle, set all states first, so that all
2073 * SET packets are processed in parallel with previous draw calls.
2074 * Then draw and prefetch at the end. This ensures that the time
2075 * the CUs are idle is very short.
2076 */
2077 if (unlikely(sctx->flags & SI_CONTEXT_FLUSH_FOR_RENDER_COND))
2078 masked_atoms |= si_get_atom_bit(sctx, &sctx->atoms.s.render_cond);
2079
2080 if (!si_upload_graphics_shader_descriptors(sctx))
2081 goto return_cleanup;
2082
2083 /* Emit all states except possibly render condition. */
2084 si_emit_all_states(sctx, info, prim, instance_count,
2085 primitive_restart, masked_atoms);
2086 sctx->emit_cache_flush(sctx);
2087 /* <-- CUs are idle here. */
2088
2089 if (si_is_atom_dirty(sctx, &sctx->atoms.s.render_cond))
2090 sctx->atoms.s.render_cond.emit(sctx);
2091
2092 if (sctx->screen->info.has_gfx9_scissor_bug &&
2093 (sctx->context_roll ||
2094 si_is_atom_dirty(sctx, &sctx->atoms.s.scissors)))
2095 sctx->atoms.s.scissors.emit(sctx);
2096
2097 sctx->dirty_atoms = 0;
2098
2099 si_emit_draw_packets(sctx, info, indexbuf, index_size, index_offset,
2100 instance_count, dispatch_prim_discard_cs,
2101 original_index_size);
2102 /* <-- CUs are busy here. */
2103
2104 /* Start prefetches after the draw has been started. Both will run
2105 * in parallel, but starting the draw first is more important.
2106 */
2107 if (sctx->chip_class >= GFX7 && sctx->prefetch_L2_mask)
2108 cik_emit_prefetch_L2(sctx, false);
2109 } else {
2110 /* If we don't wait for idle, start prefetches first, then set
2111 * states, and draw at the end.
2112 */
2113 if (sctx->flags)
2114 sctx->emit_cache_flush(sctx);
2115
2116 /* Only prefetch the API VS and VBO descriptors. */
2117 if (sctx->chip_class >= GFX7 && sctx->prefetch_L2_mask)
2118 cik_emit_prefetch_L2(sctx, true);
2119
2120 if (!si_upload_graphics_shader_descriptors(sctx))
2121 goto return_cleanup;
2122
2123 si_emit_all_states(sctx, info, prim, instance_count,
2124 primitive_restart, masked_atoms);
2125
2126 if (sctx->screen->info.has_gfx9_scissor_bug &&
2127 (sctx->context_roll ||
2128 si_is_atom_dirty(sctx, &sctx->atoms.s.scissors)))
2129 sctx->atoms.s.scissors.emit(sctx);
2130
2131 sctx->dirty_atoms = 0;
2132
2133 si_emit_draw_packets(sctx, info, indexbuf, index_size, index_offset,
2134 instance_count, dispatch_prim_discard_cs,
2135 original_index_size);
2136
2137 /* Prefetch the remaining shaders after the draw has been
2138 * started. */
2139 if (sctx->chip_class >= GFX7 && sctx->prefetch_L2_mask)
2140 cik_emit_prefetch_L2(sctx, false);
2141 }
2142
2143 /* Mark the displayable dcc buffer as dirty in order to update
2144 * it on the next call to si_flush_resource. */
2145 if (sctx->screen->info.use_display_dcc_with_retile_blit) {
2146 /* Don't use si_update_fb_dirtiness_after_rendering because it'll
2147 * cause unnecessary texture decompressions on each draw. */
2148 unsigned displayable_dcc_cb_mask = sctx->framebuffer.displayable_dcc_cb_mask;
2149 while (displayable_dcc_cb_mask) {
2150 unsigned i = u_bit_scan(&displayable_dcc_cb_mask);
2151 struct pipe_surface *surf = sctx->framebuffer.state.cbufs[i];
2152 struct si_texture *tex = (struct si_texture*) surf->texture;
2153 tex->displayable_dcc_dirty = true;
2154 }
2155 }
2156
2157 /* Clear the context roll flag after the draw call. */
2158 sctx->context_roll = false;
2159
2160 if (unlikely(sctx->current_saved_cs)) {
2161 si_trace_emit(sctx);
2162 si_log_draw_state(sctx, sctx->log);
2163 }
2164
2165 /* Workaround for a VGT hang when streamout is enabled.
2166 * It must be done after drawing. */
2167 if ((sctx->family == CHIP_HAWAII ||
2168 sctx->family == CHIP_TONGA ||
2169 sctx->family == CHIP_FIJI) &&
2170 si_get_strmout_en(sctx)) {
2171 sctx->flags |= SI_CONTEXT_VGT_STREAMOUT_SYNC;
2172 }
2173
2174 if (unlikely(sctx->decompression_enabled)) {
2175 sctx->num_decompress_calls++;
2176 } else {
2177 sctx->num_draw_calls++;
2178 if (sctx->framebuffer.state.nr_cbufs > 1)
2179 sctx->num_mrt_draw_calls++;
2180 if (primitive_restart)
2181 sctx->num_prim_restart_calls++;
2182 if (G_0286E8_WAVESIZE(sctx->spi_tmpring_size))
2183 sctx->num_spill_draw_calls++;
2184 }
2185
2186 return_cleanup:
2187 if (index_size && indexbuf != info->index.resource)
2188 pipe_resource_reference(&indexbuf, NULL);
2189 }
2190
2191 static void
2192 si_draw_rectangle(struct blitter_context *blitter,
2193 void *vertex_elements_cso,
2194 blitter_get_vs_func get_vs,
2195 int x1, int y1, int x2, int y2,
2196 float depth, unsigned num_instances,
2197 enum blitter_attrib_type type,
2198 const union blitter_attrib *attrib)
2199 {
2200 struct pipe_context *pipe = util_blitter_get_pipe(blitter);
2201 struct si_context *sctx = (struct si_context*)pipe;
2202
2203 /* Pack position coordinates as signed int16. */
2204 sctx->vs_blit_sh_data[0] = (uint32_t)(x1 & 0xffff) |
2205 ((uint32_t)(y1 & 0xffff) << 16);
2206 sctx->vs_blit_sh_data[1] = (uint32_t)(x2 & 0xffff) |
2207 ((uint32_t)(y2 & 0xffff) << 16);
2208 sctx->vs_blit_sh_data[2] = fui(depth);
2209
2210 switch (type) {
2211 case UTIL_BLITTER_ATTRIB_COLOR:
2212 memcpy(&sctx->vs_blit_sh_data[3], attrib->color,
2213 sizeof(float)*4);
2214 break;
2215 case UTIL_BLITTER_ATTRIB_TEXCOORD_XY:
2216 case UTIL_BLITTER_ATTRIB_TEXCOORD_XYZW:
2217 memcpy(&sctx->vs_blit_sh_data[3], &attrib->texcoord,
2218 sizeof(attrib->texcoord));
2219 break;
2220 case UTIL_BLITTER_ATTRIB_NONE:;
2221 }
2222
2223 pipe->bind_vs_state(pipe, si_get_blitter_vs(sctx, type, num_instances));
2224
2225 struct pipe_draw_info info = {};
2226 info.mode = SI_PRIM_RECTANGLE_LIST;
2227 info.count = 3;
2228 info.instance_count = num_instances;
2229
2230 /* Don't set per-stage shader pointers for VS. */
2231 sctx->shader_pointers_dirty &= ~SI_DESCS_SHADER_MASK(VERTEX);
2232 sctx->vertex_buffer_pointer_dirty = false;
2233
2234 si_draw_vbo(pipe, &info);
2235 }
2236
2237 void si_trace_emit(struct si_context *sctx)
2238 {
2239 struct radeon_cmdbuf *cs = sctx->gfx_cs;
2240 uint32_t trace_id = ++sctx->current_saved_cs->trace_id;
2241
2242 si_cp_write_data(sctx, sctx->current_saved_cs->trace_buf,
2243 0, 4, V_370_MEM, V_370_ME, &trace_id);
2244
2245 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
2246 radeon_emit(cs, AC_ENCODE_TRACE_POINT(trace_id));
2247
2248 if (sctx->log)
2249 u_log_flush(sctx->log);
2250 }
2251
2252 void si_init_draw_functions(struct si_context *sctx)
2253 {
2254 sctx->b.draw_vbo = si_draw_vbo;
2255
2256 sctx->blitter->draw_rectangle = si_draw_rectangle;
2257
2258 si_init_ia_multi_vgt_param_table(sctx);
2259 }