50bf829346520c4ec76b6dcaa834f27342bbe405
[mesa.git] / src / gallium / drivers / radeonsi / si_state_draw.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Christian König <christian.koenig@amd.com>
25 */
26
27 #include "si_pipe.h"
28 #include "radeon/r600_cs.h"
29 #include "sid.h"
30 #include "gfx9d.h"
31
32 #include "util/u_index_modify.h"
33 #include "util/u_upload_mgr.h"
34 #include "util/u_prim.h"
35
36 #include "ac_debug.h"
37
38 static unsigned si_conv_pipe_prim(unsigned mode)
39 {
40 static const unsigned prim_conv[] = {
41 [PIPE_PRIM_POINTS] = V_008958_DI_PT_POINTLIST,
42 [PIPE_PRIM_LINES] = V_008958_DI_PT_LINELIST,
43 [PIPE_PRIM_LINE_LOOP] = V_008958_DI_PT_LINELOOP,
44 [PIPE_PRIM_LINE_STRIP] = V_008958_DI_PT_LINESTRIP,
45 [PIPE_PRIM_TRIANGLES] = V_008958_DI_PT_TRILIST,
46 [PIPE_PRIM_TRIANGLE_STRIP] = V_008958_DI_PT_TRISTRIP,
47 [PIPE_PRIM_TRIANGLE_FAN] = V_008958_DI_PT_TRIFAN,
48 [PIPE_PRIM_QUADS] = V_008958_DI_PT_QUADLIST,
49 [PIPE_PRIM_QUAD_STRIP] = V_008958_DI_PT_QUADSTRIP,
50 [PIPE_PRIM_POLYGON] = V_008958_DI_PT_POLYGON,
51 [PIPE_PRIM_LINES_ADJACENCY] = V_008958_DI_PT_LINELIST_ADJ,
52 [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_008958_DI_PT_LINESTRIP_ADJ,
53 [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_008958_DI_PT_TRILIST_ADJ,
54 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_008958_DI_PT_TRISTRIP_ADJ,
55 [PIPE_PRIM_PATCHES] = V_008958_DI_PT_PATCH,
56 [R600_PRIM_RECTANGLE_LIST] = V_008958_DI_PT_RECTLIST
57 };
58 assert(mode < ARRAY_SIZE(prim_conv));
59 return prim_conv[mode];
60 }
61
62 static unsigned si_conv_prim_to_gs_out(unsigned mode)
63 {
64 static const int prim_conv[] = {
65 [PIPE_PRIM_POINTS] = V_028A6C_OUTPRIM_TYPE_POINTLIST,
66 [PIPE_PRIM_LINES] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
67 [PIPE_PRIM_LINE_LOOP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
68 [PIPE_PRIM_LINE_STRIP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
69 [PIPE_PRIM_TRIANGLES] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
70 [PIPE_PRIM_TRIANGLE_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
71 [PIPE_PRIM_TRIANGLE_FAN] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
72 [PIPE_PRIM_QUADS] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
73 [PIPE_PRIM_QUAD_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
74 [PIPE_PRIM_POLYGON] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
75 [PIPE_PRIM_LINES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
76 [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
77 [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
78 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
79 [PIPE_PRIM_PATCHES] = V_028A6C_OUTPRIM_TYPE_POINTLIST,
80 [R600_PRIM_RECTANGLE_LIST] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
81 };
82 assert(mode < ARRAY_SIZE(prim_conv));
83
84 return prim_conv[mode];
85 }
86
87 /**
88 * This calculates the LDS size for tessellation shaders (VS, TCS, TES).
89 * LS.LDS_SIZE is shared by all 3 shader stages.
90 *
91 * The information about LDS and other non-compile-time parameters is then
92 * written to userdata SGPRs.
93 */
94 static void si_emit_derived_tess_state(struct si_context *sctx,
95 const struct pipe_draw_info *info,
96 unsigned *num_patches)
97 {
98 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
99 struct si_shader *ls_current;
100 struct si_shader_selector *ls;
101 /* The TES pointer will only be used for sctx->last_tcs.
102 * It would be wrong to think that TCS = TES. */
103 struct si_shader_selector *tcs =
104 sctx->tcs_shader.cso ? sctx->tcs_shader.cso : sctx->tes_shader.cso;
105 unsigned tes_sh_base = sctx->shader_userdata.sh_base[PIPE_SHADER_TESS_EVAL];
106 unsigned num_tcs_input_cp = info->vertices_per_patch;
107 unsigned num_tcs_output_cp, num_tcs_inputs, num_tcs_outputs;
108 unsigned num_tcs_patch_outputs;
109 unsigned input_vertex_size, output_vertex_size, pervertex_output_patch_size;
110 unsigned input_patch_size, output_patch_size, output_patch0_offset;
111 unsigned perpatch_output_offset, lds_size;
112 unsigned tcs_in_layout, tcs_out_layout, tcs_out_offsets;
113 unsigned offchip_layout, hardware_lds_size, ls_hs_config;
114
115 /* Since GFX9 has merged LS-HS in the TCS state, set LS = TCS. */
116 if (sctx->b.chip_class >= GFX9) {
117 if (sctx->tcs_shader.cso)
118 ls_current = sctx->tcs_shader.current;
119 else
120 ls_current = sctx->fixed_func_tcs_shader.current;
121
122 ls = ls_current->key.part.tcs.ls;
123 } else {
124 ls_current = sctx->vs_shader.current;
125 ls = sctx->vs_shader.cso;
126 }
127
128 if (sctx->last_ls == ls_current &&
129 sctx->last_tcs == tcs &&
130 sctx->last_tes_sh_base == tes_sh_base &&
131 sctx->last_num_tcs_input_cp == num_tcs_input_cp) {
132 *num_patches = sctx->last_num_patches;
133 return;
134 }
135
136 sctx->last_ls = ls_current;
137 sctx->last_tcs = tcs;
138 sctx->last_tes_sh_base = tes_sh_base;
139 sctx->last_num_tcs_input_cp = num_tcs_input_cp;
140
141 /* This calculates how shader inputs and outputs among VS, TCS, and TES
142 * are laid out in LDS. */
143 num_tcs_inputs = util_last_bit64(ls->outputs_written);
144
145 if (sctx->tcs_shader.cso) {
146 num_tcs_outputs = util_last_bit64(tcs->outputs_written);
147 num_tcs_output_cp = tcs->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT];
148 num_tcs_patch_outputs = util_last_bit64(tcs->patch_outputs_written);
149 } else {
150 /* No TCS. Route varyings from LS to TES. */
151 num_tcs_outputs = num_tcs_inputs;
152 num_tcs_output_cp = num_tcs_input_cp;
153 num_tcs_patch_outputs = 2; /* TESSINNER + TESSOUTER */
154 }
155
156 input_vertex_size = num_tcs_inputs * 16;
157 output_vertex_size = num_tcs_outputs * 16;
158
159 input_patch_size = num_tcs_input_cp * input_vertex_size;
160
161 pervertex_output_patch_size = num_tcs_output_cp * output_vertex_size;
162 output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
163
164 /* Ensure that we only need one wave per SIMD so we don't need to check
165 * resource usage. Also ensures that the number of tcs in and out
166 * vertices per threadgroup are at most 256.
167 */
168 *num_patches = 64 / MAX2(num_tcs_input_cp, num_tcs_output_cp) * 4;
169
170 /* Make sure that the data fits in LDS. This assumes the shaders only
171 * use LDS for the inputs and outputs.
172 */
173 hardware_lds_size = sctx->b.chip_class >= CIK ? 65536 : 32768;
174 *num_patches = MIN2(*num_patches, hardware_lds_size / (input_patch_size +
175 output_patch_size));
176
177 /* Make sure the output data fits in the offchip buffer */
178 *num_patches = MIN2(*num_patches,
179 (sctx->screen->tess_offchip_block_dw_size * 4) /
180 output_patch_size);
181
182 /* Not necessary for correctness, but improves performance. The
183 * specific value is taken from the proprietary driver.
184 */
185 *num_patches = MIN2(*num_patches, 40);
186
187 if (sctx->b.chip_class == SI) {
188 /* SI bug workaround, related to power management. Limit LS-HS
189 * threadgroups to only one wave.
190 */
191 unsigned one_wave = 64 / MAX2(num_tcs_input_cp, num_tcs_output_cp);
192 *num_patches = MIN2(*num_patches, one_wave);
193
194 if (sctx->screen->b.info.max_se == 1) {
195 /* The VGT HS block increments the patch ID unconditionally
196 * within a single threadgroup. This results in incorrect
197 * patch IDs when instanced draws are used.
198 *
199 * The intended solution is to restrict threadgroups to
200 * a single instance by setting SWITCH_ON_EOI, which
201 * should cause IA to split instances up. However, this
202 * doesn't work correctly on SI when there is no other
203 * SE to switch to.
204 */
205 *num_patches = 1;
206 }
207 }
208
209 sctx->last_num_patches = *num_patches;
210
211 output_patch0_offset = input_patch_size * *num_patches;
212 perpatch_output_offset = output_patch0_offset + pervertex_output_patch_size;
213
214 /* Compute userdata SGPRs. */
215 assert(((input_vertex_size / 4) & ~0xff) == 0);
216 assert(((output_vertex_size / 4) & ~0xff) == 0);
217 assert(((input_patch_size / 4) & ~0x1fff) == 0);
218 assert(((output_patch_size / 4) & ~0x1fff) == 0);
219 assert(((output_patch0_offset / 16) & ~0xffff) == 0);
220 assert(((perpatch_output_offset / 16) & ~0xffff) == 0);
221 assert(num_tcs_input_cp <= 32);
222 assert(num_tcs_output_cp <= 32);
223
224 tcs_in_layout = S_VS_STATE_LS_OUT_PATCH_SIZE(input_patch_size / 4) |
225 S_VS_STATE_LS_OUT_VERTEX_SIZE(input_vertex_size / 4);
226 tcs_out_layout = (output_patch_size / 4) |
227 ((output_vertex_size / 4) << 13);
228 tcs_out_offsets = (output_patch0_offset / 16) |
229 ((perpatch_output_offset / 16) << 16);
230 offchip_layout = *num_patches |
231 (num_tcs_output_cp << 6) |
232 (pervertex_output_patch_size * *num_patches << 12);
233
234 /* Compute the LDS size. */
235 lds_size = output_patch0_offset + output_patch_size * *num_patches;
236
237 if (sctx->b.chip_class >= CIK) {
238 assert(lds_size <= 65536);
239 lds_size = align(lds_size, 512) / 512;
240 } else {
241 assert(lds_size <= 32768);
242 lds_size = align(lds_size, 256) / 256;
243 }
244
245 /* Set SI_SGPR_VS_STATE_BITS. */
246 sctx->current_vs_state &= C_VS_STATE_LS_OUT_PATCH_SIZE &
247 C_VS_STATE_LS_OUT_VERTEX_SIZE;
248 sctx->current_vs_state |= tcs_in_layout;
249
250 if (sctx->b.chip_class >= GFX9) {
251 unsigned hs_rsrc2 = ls_current->config.rsrc2 |
252 S_00B42C_LDS_SIZE(lds_size);
253
254 radeon_set_sh_reg(cs, R_00B42C_SPI_SHADER_PGM_RSRC2_HS, hs_rsrc2);
255
256 /* Set userdata SGPRs for merged LS-HS. */
257 radeon_set_sh_reg_seq(cs,
258 R_00B430_SPI_SHADER_USER_DATA_LS_0 +
259 GFX9_SGPR_TCS_OFFCHIP_LAYOUT * 4, 3);
260 radeon_emit(cs, offchip_layout);
261 radeon_emit(cs, tcs_out_offsets);
262 radeon_emit(cs, tcs_out_layout | (num_tcs_input_cp << 26));
263 } else {
264 unsigned ls_rsrc2 = ls_current->config.rsrc2;
265
266 si_multiwave_lds_size_workaround(sctx->screen, &lds_size);
267 ls_rsrc2 |= S_00B52C_LDS_SIZE(lds_size);
268
269 /* Due to a hw bug, RSRC2_LS must be written twice with another
270 * LS register written in between. */
271 if (sctx->b.chip_class == CIK && sctx->b.family != CHIP_HAWAII)
272 radeon_set_sh_reg(cs, R_00B52C_SPI_SHADER_PGM_RSRC2_LS, ls_rsrc2);
273 radeon_set_sh_reg_seq(cs, R_00B528_SPI_SHADER_PGM_RSRC1_LS, 2);
274 radeon_emit(cs, ls_current->config.rsrc1);
275 radeon_emit(cs, ls_rsrc2);
276
277 /* Set userdata SGPRs for TCS. */
278 radeon_set_sh_reg_seq(cs,
279 R_00B430_SPI_SHADER_USER_DATA_HS_0 + GFX6_SGPR_TCS_OFFCHIP_LAYOUT * 4, 4);
280 radeon_emit(cs, offchip_layout);
281 radeon_emit(cs, tcs_out_offsets);
282 radeon_emit(cs, tcs_out_layout | (num_tcs_input_cp << 26));
283 radeon_emit(cs, tcs_in_layout);
284 }
285
286 /* Set userdata SGPRs for TES. */
287 radeon_set_sh_reg_seq(cs, tes_sh_base + SI_SGPR_TES_OFFCHIP_LAYOUT * 4, 2);
288 radeon_emit(cs, offchip_layout);
289 radeon_emit(cs, r600_resource(sctx->tess_offchip_ring)->gpu_address >> 16);
290
291 ls_hs_config = S_028B58_NUM_PATCHES(*num_patches) |
292 S_028B58_HS_NUM_INPUT_CP(num_tcs_input_cp) |
293 S_028B58_HS_NUM_OUTPUT_CP(num_tcs_output_cp);
294
295 if (sctx->b.chip_class >= CIK)
296 radeon_set_context_reg_idx(cs, R_028B58_VGT_LS_HS_CONFIG, 2,
297 ls_hs_config);
298 else
299 radeon_set_context_reg(cs, R_028B58_VGT_LS_HS_CONFIG,
300 ls_hs_config);
301 }
302
303 static unsigned si_num_prims_for_vertices(const struct pipe_draw_info *info)
304 {
305 switch (info->mode) {
306 case PIPE_PRIM_PATCHES:
307 return info->count / info->vertices_per_patch;
308 case R600_PRIM_RECTANGLE_LIST:
309 return info->count / 3;
310 default:
311 return u_prims_for_vertices(info->mode, info->count);
312 }
313 }
314
315 static unsigned
316 si_get_init_multi_vgt_param(struct si_screen *sscreen,
317 union si_vgt_param_key *key)
318 {
319 STATIC_ASSERT(sizeof(union si_vgt_param_key) == 4);
320 unsigned max_primgroup_in_wave = 2;
321
322 /* SWITCH_ON_EOP(0) is always preferable. */
323 bool wd_switch_on_eop = false;
324 bool ia_switch_on_eop = false;
325 bool ia_switch_on_eoi = false;
326 bool partial_vs_wave = false;
327 bool partial_es_wave = false;
328
329 if (key->u.uses_tess) {
330 /* SWITCH_ON_EOI must be set if PrimID is used. */
331 if (key->u.tcs_tes_uses_prim_id)
332 ia_switch_on_eoi = true;
333
334 /* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
335 if ((sscreen->b.family == CHIP_TAHITI ||
336 sscreen->b.family == CHIP_PITCAIRN ||
337 sscreen->b.family == CHIP_BONAIRE) &&
338 key->u.uses_gs)
339 partial_vs_wave = true;
340
341 /* Needed for 028B6C_DISTRIBUTION_MODE != 0 */
342 if (sscreen->has_distributed_tess) {
343 if (key->u.uses_gs) {
344 if (sscreen->b.chip_class <= VI)
345 partial_es_wave = true;
346
347 /* GPU hang workaround. */
348 if (sscreen->b.family == CHIP_TONGA ||
349 sscreen->b.family == CHIP_FIJI ||
350 sscreen->b.family == CHIP_POLARIS10 ||
351 sscreen->b.family == CHIP_POLARIS11 ||
352 sscreen->b.family == CHIP_POLARIS12)
353 partial_vs_wave = true;
354 } else {
355 partial_vs_wave = true;
356 }
357 }
358 }
359
360 /* This is a hardware requirement. */
361 if (key->u.line_stipple_enabled ||
362 (sscreen->b.debug_flags & DBG_SWITCH_ON_EOP)) {
363 ia_switch_on_eop = true;
364 wd_switch_on_eop = true;
365 }
366
367 if (sscreen->b.chip_class >= CIK) {
368 /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
369 * 4 shader engines. Set 1 to pass the assertion below.
370 * The other cases are hardware requirements.
371 *
372 * Polaris supports primitive restart with WD_SWITCH_ON_EOP=0
373 * for points, line strips, and tri strips.
374 */
375 if (sscreen->b.info.max_se < 4 ||
376 key->u.prim == PIPE_PRIM_POLYGON ||
377 key->u.prim == PIPE_PRIM_LINE_LOOP ||
378 key->u.prim == PIPE_PRIM_TRIANGLE_FAN ||
379 key->u.prim == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY ||
380 (key->u.primitive_restart &&
381 (sscreen->b.family < CHIP_POLARIS10 ||
382 (key->u.prim != PIPE_PRIM_POINTS &&
383 key->u.prim != PIPE_PRIM_LINE_STRIP &&
384 key->u.prim != PIPE_PRIM_TRIANGLE_STRIP))) ||
385 key->u.count_from_stream_output)
386 wd_switch_on_eop = true;
387
388 /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
389 * We don't know that for indirect drawing, so treat it as
390 * always problematic. */
391 if (sscreen->b.family == CHIP_HAWAII &&
392 key->u.uses_instancing)
393 wd_switch_on_eop = true;
394
395 /* Performance recommendation for 4 SE Gfx7-8 parts if
396 * instances are smaller than a primgroup.
397 * Assume indirect draws always use small instances.
398 * This is needed for good VS wave utilization.
399 */
400 if (sscreen->b.chip_class <= VI &&
401 sscreen->b.info.max_se == 4 &&
402 key->u.multi_instances_smaller_than_primgroup)
403 wd_switch_on_eop = true;
404
405 /* Required on CIK and later. */
406 if (sscreen->b.info.max_se > 2 && !wd_switch_on_eop)
407 ia_switch_on_eoi = true;
408
409 /* Required by Hawaii and, for some special cases, by VI. */
410 if (ia_switch_on_eoi &&
411 (sscreen->b.family == CHIP_HAWAII ||
412 (sscreen->b.chip_class == VI &&
413 (key->u.uses_gs || max_primgroup_in_wave != 2))))
414 partial_vs_wave = true;
415
416 /* Instancing bug on Bonaire. */
417 if (sscreen->b.family == CHIP_BONAIRE && ia_switch_on_eoi &&
418 key->u.uses_instancing)
419 partial_vs_wave = true;
420
421 /* If the WD switch is false, the IA switch must be false too. */
422 assert(wd_switch_on_eop || !ia_switch_on_eop);
423 }
424
425 /* If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
426 if (sscreen->b.chip_class <= VI && ia_switch_on_eoi)
427 partial_es_wave = true;
428
429 return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) |
430 S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi) |
431 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) |
432 S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave) |
433 S_028AA8_WD_SWITCH_ON_EOP(sscreen->b.chip_class >= CIK ? wd_switch_on_eop : 0) |
434 /* The following field was moved to VGT_SHADER_STAGES_EN in GFX9. */
435 S_028AA8_MAX_PRIMGRP_IN_WAVE(sscreen->b.chip_class == VI ?
436 max_primgroup_in_wave : 0) |
437 S_030960_EN_INST_OPT_BASIC(sscreen->b.chip_class >= GFX9) |
438 S_030960_EN_INST_OPT_ADV(sscreen->b.chip_class >= GFX9);
439 }
440
441 void si_init_ia_multi_vgt_param_table(struct si_context *sctx)
442 {
443 for (int prim = 0; prim <= R600_PRIM_RECTANGLE_LIST; prim++)
444 for (int uses_instancing = 0; uses_instancing < 2; uses_instancing++)
445 for (int multi_instances = 0; multi_instances < 2; multi_instances++)
446 for (int primitive_restart = 0; primitive_restart < 2; primitive_restart++)
447 for (int count_from_so = 0; count_from_so < 2; count_from_so++)
448 for (int line_stipple = 0; line_stipple < 2; line_stipple++)
449 for (int uses_tess = 0; uses_tess < 2; uses_tess++)
450 for (int tess_uses_primid = 0; tess_uses_primid < 2; tess_uses_primid++)
451 for (int uses_gs = 0; uses_gs < 2; uses_gs++) {
452 union si_vgt_param_key key;
453
454 key.index = 0;
455 key.u.prim = prim;
456 key.u.uses_instancing = uses_instancing;
457 key.u.multi_instances_smaller_than_primgroup = multi_instances;
458 key.u.primitive_restart = primitive_restart;
459 key.u.count_from_stream_output = count_from_so;
460 key.u.line_stipple_enabled = line_stipple;
461 key.u.uses_tess = uses_tess;
462 key.u.tcs_tes_uses_prim_id = tess_uses_primid;
463 key.u.uses_gs = uses_gs;
464
465 sctx->ia_multi_vgt_param[key.index] =
466 si_get_init_multi_vgt_param(sctx->screen, &key);
467 }
468 }
469
470 static unsigned si_get_ia_multi_vgt_param(struct si_context *sctx,
471 const struct pipe_draw_info *info,
472 unsigned num_patches)
473 {
474 union si_vgt_param_key key = sctx->ia_multi_vgt_param_key;
475 unsigned primgroup_size;
476 unsigned ia_multi_vgt_param;
477
478 if (sctx->tes_shader.cso) {
479 primgroup_size = num_patches; /* must be a multiple of NUM_PATCHES */
480 } else if (sctx->gs_shader.cso) {
481 primgroup_size = 64; /* recommended with a GS */
482 } else {
483 primgroup_size = 128; /* recommended without a GS and tess */
484 }
485
486 key.u.prim = info->mode;
487 key.u.uses_instancing = info->indirect || info->instance_count > 1;
488 key.u.multi_instances_smaller_than_primgroup =
489 info->indirect ||
490 (info->instance_count > 1 &&
491 (info->count_from_stream_output ||
492 si_num_prims_for_vertices(info) < primgroup_size));
493 key.u.primitive_restart = info->primitive_restart;
494 key.u.count_from_stream_output = info->count_from_stream_output != NULL;
495
496 ia_multi_vgt_param = sctx->ia_multi_vgt_param[key.index] |
497 S_028AA8_PRIMGROUP_SIZE(primgroup_size - 1);
498
499 if (sctx->gs_shader.cso) {
500 /* GS requirement. */
501 if (sctx->b.chip_class <= VI &&
502 SI_GS_PER_ES / primgroup_size >= sctx->screen->gs_table_depth - 3)
503 ia_multi_vgt_param |= S_028AA8_PARTIAL_ES_WAVE_ON(1);
504
505 /* GS hw bug with single-primitive instances and SWITCH_ON_EOI.
506 * The hw doc says all multi-SE chips are affected, but Vulkan
507 * only applies it to Hawaii. Do what Vulkan does.
508 */
509 if (sctx->b.family == CHIP_HAWAII &&
510 G_028AA8_SWITCH_ON_EOI(ia_multi_vgt_param) &&
511 (info->indirect ||
512 (info->instance_count > 1 &&
513 (info->count_from_stream_output ||
514 si_num_prims_for_vertices(info) <= 1))))
515 sctx->b.flags |= SI_CONTEXT_VGT_FLUSH;
516 }
517
518 return ia_multi_vgt_param;
519 }
520
521 /* rast_prim is the primitive type after GS. */
522 static void si_emit_rasterizer_prim_state(struct si_context *sctx)
523 {
524 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
525 enum pipe_prim_type rast_prim = sctx->current_rast_prim;
526 struct si_state_rasterizer *rs = sctx->emitted.named.rasterizer;
527
528 /* Skip this if not rendering lines. */
529 if (rast_prim != PIPE_PRIM_LINES &&
530 rast_prim != PIPE_PRIM_LINE_LOOP &&
531 rast_prim != PIPE_PRIM_LINE_STRIP &&
532 rast_prim != PIPE_PRIM_LINES_ADJACENCY &&
533 rast_prim != PIPE_PRIM_LINE_STRIP_ADJACENCY)
534 return;
535
536 if (rast_prim == sctx->last_rast_prim &&
537 rs->pa_sc_line_stipple == sctx->last_sc_line_stipple)
538 return;
539
540 /* For lines, reset the stipple pattern at each primitive. Otherwise,
541 * reset the stipple pattern at each packet (line strips, line loops).
542 */
543 radeon_set_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE,
544 rs->pa_sc_line_stipple |
545 S_028A0C_AUTO_RESET_CNTL(rast_prim == PIPE_PRIM_LINES ? 1 : 2));
546
547 sctx->last_rast_prim = rast_prim;
548 sctx->last_sc_line_stipple = rs->pa_sc_line_stipple;
549 }
550
551 static void si_emit_vs_state(struct si_context *sctx,
552 const struct pipe_draw_info *info)
553 {
554 sctx->current_vs_state &= C_VS_STATE_INDEXED;
555 sctx->current_vs_state |= S_VS_STATE_INDEXED(!!info->indexed);
556
557 if (sctx->current_vs_state != sctx->last_vs_state) {
558 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
559
560 radeon_set_sh_reg(cs,
561 sctx->shader_userdata.sh_base[PIPE_SHADER_VERTEX] +
562 SI_SGPR_VS_STATE_BITS * 4,
563 sctx->current_vs_state);
564
565 sctx->last_vs_state = sctx->current_vs_state;
566 }
567 }
568
569 static void si_emit_draw_registers(struct si_context *sctx,
570 const struct pipe_draw_info *info,
571 unsigned num_patches)
572 {
573 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
574 unsigned prim = si_conv_pipe_prim(info->mode);
575 unsigned gs_out_prim = si_conv_prim_to_gs_out(sctx->current_rast_prim);
576 unsigned ia_multi_vgt_param;
577
578 ia_multi_vgt_param = si_get_ia_multi_vgt_param(sctx, info, num_patches);
579
580 /* Draw state. */
581 if (ia_multi_vgt_param != sctx->last_multi_vgt_param) {
582 if (sctx->b.chip_class >= GFX9)
583 radeon_set_uconfig_reg_idx(cs, R_030960_IA_MULTI_VGT_PARAM, 4, ia_multi_vgt_param);
584 else if (sctx->b.chip_class >= CIK)
585 radeon_set_context_reg_idx(cs, R_028AA8_IA_MULTI_VGT_PARAM, 1, ia_multi_vgt_param);
586 else
587 radeon_set_context_reg(cs, R_028AA8_IA_MULTI_VGT_PARAM, ia_multi_vgt_param);
588
589 sctx->last_multi_vgt_param = ia_multi_vgt_param;
590 }
591 if (prim != sctx->last_prim) {
592 if (sctx->b.chip_class >= CIK)
593 radeon_set_uconfig_reg_idx(cs, R_030908_VGT_PRIMITIVE_TYPE, 1, prim);
594 else
595 radeon_set_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE, prim);
596
597 sctx->last_prim = prim;
598 }
599
600 if (gs_out_prim != sctx->last_gs_out_prim) {
601 radeon_set_context_reg(cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, gs_out_prim);
602 sctx->last_gs_out_prim = gs_out_prim;
603 }
604
605 /* Primitive restart. */
606 if (info->primitive_restart != sctx->last_primitive_restart_en) {
607 if (sctx->b.chip_class >= GFX9)
608 radeon_set_uconfig_reg(cs, R_03092C_VGT_MULTI_PRIM_IB_RESET_EN,
609 info->primitive_restart);
610 else
611 radeon_set_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN,
612 info->primitive_restart);
613
614 sctx->last_primitive_restart_en = info->primitive_restart;
615
616 }
617 if (info->primitive_restart &&
618 (info->restart_index != sctx->last_restart_index ||
619 sctx->last_restart_index == SI_RESTART_INDEX_UNKNOWN)) {
620 radeon_set_context_reg(cs, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX,
621 info->restart_index);
622 sctx->last_restart_index = info->restart_index;
623 }
624 }
625
626 static void si_emit_draw_packets(struct si_context *sctx,
627 const struct pipe_draw_info *info,
628 const struct pipe_index_buffer *ib)
629 {
630 struct pipe_draw_indirect_info *indirect = info->indirect;
631 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
632 unsigned sh_base_reg = sctx->shader_userdata.sh_base[PIPE_SHADER_VERTEX];
633 bool render_cond_bit = sctx->b.render_cond && !sctx->b.render_cond_force_off;
634 uint32_t index_max_size = 0;
635 uint64_t index_va = 0;
636
637 if (info->count_from_stream_output) {
638 struct r600_so_target *t =
639 (struct r600_so_target*)info->count_from_stream_output;
640 uint64_t va = t->buf_filled_size->gpu_address +
641 t->buf_filled_size_offset;
642
643 radeon_set_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE,
644 t->stride_in_dw);
645
646 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
647 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
648 COPY_DATA_DST_SEL(COPY_DATA_REG) |
649 COPY_DATA_WR_CONFIRM);
650 radeon_emit(cs, va); /* src address lo */
651 radeon_emit(cs, va >> 32); /* src address hi */
652 radeon_emit(cs, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2);
653 radeon_emit(cs, 0); /* unused */
654
655 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
656 t->buf_filled_size, RADEON_USAGE_READ,
657 RADEON_PRIO_SO_FILLED_SIZE);
658 }
659
660 /* draw packet */
661 if (info->indexed) {
662 if (ib->index_size != sctx->last_index_size) {
663 unsigned index_type;
664
665 /* index type */
666 switch (ib->index_size) {
667 case 1:
668 index_type = V_028A7C_VGT_INDEX_8;
669 break;
670 case 2:
671 index_type = V_028A7C_VGT_INDEX_16 |
672 (SI_BIG_ENDIAN && sctx->b.chip_class <= CIK ?
673 V_028A7C_VGT_DMA_SWAP_16_BIT : 0);
674 break;
675 case 4:
676 index_type = V_028A7C_VGT_INDEX_32 |
677 (SI_BIG_ENDIAN && sctx->b.chip_class <= CIK ?
678 V_028A7C_VGT_DMA_SWAP_32_BIT : 0);
679 break;
680 default:
681 assert(!"unreachable");
682 return;
683 }
684
685 if (sctx->b.chip_class >= GFX9) {
686 radeon_set_uconfig_reg_idx(cs, R_03090C_VGT_INDEX_TYPE,
687 2, index_type);
688 } else {
689 radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
690 radeon_emit(cs, index_type);
691 }
692
693 sctx->last_index_size = ib->index_size;
694 }
695
696 index_max_size = (ib->buffer->width0 - ib->offset) /
697 ib->index_size;
698 index_va = r600_resource(ib->buffer)->gpu_address + ib->offset;
699
700 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
701 (struct r600_resource *)ib->buffer,
702 RADEON_USAGE_READ, RADEON_PRIO_INDEX_BUFFER);
703 } else {
704 /* On CI and later, non-indexed draws overwrite VGT_INDEX_TYPE,
705 * so the state must be re-emitted before the next indexed draw.
706 */
707 if (sctx->b.chip_class >= CIK)
708 sctx->last_index_size = -1;
709 }
710
711 if (indirect) {
712 uint64_t indirect_va = r600_resource(indirect->buffer)->gpu_address;
713
714 assert(indirect_va % 8 == 0);
715
716 si_invalidate_draw_sh_constants(sctx);
717
718 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
719 radeon_emit(cs, 1);
720 radeon_emit(cs, indirect_va);
721 radeon_emit(cs, indirect_va >> 32);
722
723 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
724 (struct r600_resource *)indirect->buffer,
725 RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
726
727 unsigned di_src_sel = info->indexed ? V_0287F0_DI_SRC_SEL_DMA
728 : V_0287F0_DI_SRC_SEL_AUTO_INDEX;
729
730 assert(indirect->offset % 4 == 0);
731
732 if (info->indexed) {
733 radeon_emit(cs, PKT3(PKT3_INDEX_BASE, 1, 0));
734 radeon_emit(cs, index_va);
735 radeon_emit(cs, index_va >> 32);
736
737 radeon_emit(cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0));
738 radeon_emit(cs, index_max_size);
739 }
740
741 if (!sctx->screen->has_draw_indirect_multi) {
742 radeon_emit(cs, PKT3(info->indexed ? PKT3_DRAW_INDEX_INDIRECT
743 : PKT3_DRAW_INDIRECT,
744 3, render_cond_bit));
745 radeon_emit(cs, indirect->offset);
746 radeon_emit(cs, (sh_base_reg + SI_SGPR_BASE_VERTEX * 4 - SI_SH_REG_OFFSET) >> 2);
747 radeon_emit(cs, (sh_base_reg + SI_SGPR_START_INSTANCE * 4 - SI_SH_REG_OFFSET) >> 2);
748 radeon_emit(cs, di_src_sel);
749 } else {
750 uint64_t count_va = 0;
751
752 if (indirect->indirect_draw_count) {
753 struct r600_resource *params_buf =
754 (struct r600_resource *)indirect->indirect_draw_count;
755
756 radeon_add_to_buffer_list(
757 &sctx->b, &sctx->b.gfx, params_buf,
758 RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
759
760 count_va = params_buf->gpu_address + indirect->indirect_draw_count_offset;
761 }
762
763 radeon_emit(cs, PKT3(info->indexed ? PKT3_DRAW_INDEX_INDIRECT_MULTI :
764 PKT3_DRAW_INDIRECT_MULTI,
765 8, render_cond_bit));
766 radeon_emit(cs, indirect->offset);
767 radeon_emit(cs, (sh_base_reg + SI_SGPR_BASE_VERTEX * 4 - SI_SH_REG_OFFSET) >> 2);
768 radeon_emit(cs, (sh_base_reg + SI_SGPR_START_INSTANCE * 4 - SI_SH_REG_OFFSET) >> 2);
769 radeon_emit(cs, ((sh_base_reg + SI_SGPR_DRAWID * 4 - SI_SH_REG_OFFSET) >> 2) |
770 S_2C3_DRAW_INDEX_ENABLE(1) |
771 S_2C3_COUNT_INDIRECT_ENABLE(!!indirect->indirect_draw_count));
772 radeon_emit(cs, indirect->draw_count);
773 radeon_emit(cs, count_va);
774 radeon_emit(cs, count_va >> 32);
775 radeon_emit(cs, indirect->stride);
776 radeon_emit(cs, di_src_sel);
777 }
778 } else {
779 int base_vertex;
780
781 radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, 0));
782 radeon_emit(cs, info->instance_count);
783
784 /* Base vertex and start instance. */
785 base_vertex = info->indexed ? info->index_bias : info->start;
786
787 if (base_vertex != sctx->last_base_vertex ||
788 sctx->last_base_vertex == SI_BASE_VERTEX_UNKNOWN ||
789 info->start_instance != sctx->last_start_instance ||
790 info->drawid != sctx->last_drawid ||
791 sh_base_reg != sctx->last_sh_base_reg) {
792 radeon_set_sh_reg_seq(cs, sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 3);
793 radeon_emit(cs, base_vertex);
794 radeon_emit(cs, info->start_instance);
795 radeon_emit(cs, info->drawid);
796
797 sctx->last_base_vertex = base_vertex;
798 sctx->last_start_instance = info->start_instance;
799 sctx->last_drawid = info->drawid;
800 sctx->last_sh_base_reg = sh_base_reg;
801 }
802
803 if (info->indexed) {
804 index_va += info->start * ib->index_size;
805
806 radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_2, 4, render_cond_bit));
807 radeon_emit(cs, index_max_size);
808 radeon_emit(cs, index_va);
809 radeon_emit(cs, index_va >> 32);
810 radeon_emit(cs, info->count);
811 radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
812 } else {
813 radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, render_cond_bit));
814 radeon_emit(cs, info->count);
815 radeon_emit(cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
816 S_0287F0_USE_OPAQUE(!!info->count_from_stream_output));
817 }
818 }
819 }
820
821 static void si_emit_surface_sync(struct r600_common_context *rctx,
822 unsigned cp_coher_cntl)
823 {
824 struct radeon_winsys_cs *cs = rctx->gfx.cs;
825
826 if (rctx->chip_class >= GFX9) {
827 /* Flush caches and wait for the caches to assert idle. */
828 radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, 0));
829 radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
830 radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
831 radeon_emit(cs, 0xffffff); /* CP_COHER_SIZE_HI */
832 radeon_emit(cs, 0); /* CP_COHER_BASE */
833 radeon_emit(cs, 0); /* CP_COHER_BASE_HI */
834 radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
835 } else {
836 /* ACQUIRE_MEM is only required on a compute ring. */
837 radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, 0));
838 radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
839 radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
840 radeon_emit(cs, 0); /* CP_COHER_BASE */
841 radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
842 }
843 }
844
845 void si_emit_cache_flush(struct si_context *sctx)
846 {
847 struct r600_common_context *rctx = &sctx->b;
848 struct radeon_winsys_cs *cs = rctx->gfx.cs;
849 uint32_t cp_coher_cntl = 0;
850 uint32_t flush_cb_db = rctx->flags & (SI_CONTEXT_FLUSH_AND_INV_CB |
851 SI_CONTEXT_FLUSH_AND_INV_DB);
852
853 if (rctx->flags & (SI_CONTEXT_FLUSH_AND_INV_CB |
854 SI_CONTEXT_FLUSH_AND_INV_DB))
855 sctx->b.num_fb_cache_flushes++;
856
857 /* SI has a bug that it always flushes ICACHE and KCACHE if either
858 * bit is set. An alternative way is to write SQC_CACHES, but that
859 * doesn't seem to work reliably. Since the bug doesn't affect
860 * correctness (it only does more work than necessary) and
861 * the performance impact is likely negligible, there is no plan
862 * to add a workaround for it.
863 */
864
865 if (rctx->flags & SI_CONTEXT_INV_ICACHE)
866 cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
867 if (rctx->flags & SI_CONTEXT_INV_SMEM_L1)
868 cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
869
870 if (rctx->chip_class <= VI) {
871 if (rctx->flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
872 cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) |
873 S_0085F0_CB0_DEST_BASE_ENA(1) |
874 S_0085F0_CB1_DEST_BASE_ENA(1) |
875 S_0085F0_CB2_DEST_BASE_ENA(1) |
876 S_0085F0_CB3_DEST_BASE_ENA(1) |
877 S_0085F0_CB4_DEST_BASE_ENA(1) |
878 S_0085F0_CB5_DEST_BASE_ENA(1) |
879 S_0085F0_CB6_DEST_BASE_ENA(1) |
880 S_0085F0_CB7_DEST_BASE_ENA(1);
881
882 /* Necessary for DCC */
883 if (rctx->chip_class == VI)
884 r600_gfx_write_event_eop(rctx, V_028A90_FLUSH_AND_INV_CB_DATA_TS,
885 0, 0, NULL, 0, 0, 0);
886 }
887 if (rctx->flags & SI_CONTEXT_FLUSH_AND_INV_DB)
888 cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) |
889 S_0085F0_DB_DEST_BASE_ENA(1);
890 }
891
892 if (rctx->flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
893 /* Flush CMASK/FMASK/DCC. SURFACE_SYNC will wait for idle. */
894 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
895 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
896 }
897 if (rctx->flags & SI_CONTEXT_FLUSH_AND_INV_DB) {
898 /* Flush HTILE. SURFACE_SYNC will wait for idle. */
899 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
900 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
901 }
902
903 /* Wait for shader engines to go idle.
904 * VS and PS waits are unnecessary if SURFACE_SYNC is going to wait
905 * for everything including CB/DB cache flushes.
906 */
907 if (!flush_cb_db) {
908 if (rctx->flags & SI_CONTEXT_PS_PARTIAL_FLUSH) {
909 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
910 radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
911 /* Only count explicit shader flushes, not implicit ones
912 * done by SURFACE_SYNC.
913 */
914 rctx->num_vs_flushes++;
915 rctx->num_ps_flushes++;
916 } else if (rctx->flags & SI_CONTEXT_VS_PARTIAL_FLUSH) {
917 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
918 radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
919 rctx->num_vs_flushes++;
920 }
921 }
922
923 if (rctx->flags & SI_CONTEXT_CS_PARTIAL_FLUSH &&
924 sctx->compute_is_busy) {
925 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
926 radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH | EVENT_INDEX(4)));
927 rctx->num_cs_flushes++;
928 sctx->compute_is_busy = false;
929 }
930
931 /* VGT state synchronization. */
932 if (rctx->flags & SI_CONTEXT_VGT_FLUSH) {
933 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
934 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
935 }
936 if (rctx->flags & SI_CONTEXT_VGT_STREAMOUT_SYNC) {
937 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
938 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0));
939 }
940
941 /* GFX9: Wait for idle if we're flushing CB or DB. ACQUIRE_MEM doesn't
942 * wait for idle on GFX9. We have to use a TS event.
943 */
944 if (sctx->b.chip_class >= GFX9 && flush_cb_db) {
945 struct r600_resource *rbuf = NULL;
946 uint64_t va;
947 unsigned offset = 0, tc_flags, cb_db_event;
948
949 /* Set the CB/DB flush event. */
950 switch (flush_cb_db) {
951 case SI_CONTEXT_FLUSH_AND_INV_CB:
952 cb_db_event = V_028A90_FLUSH_AND_INV_CB_DATA_TS;
953 break;
954 case SI_CONTEXT_FLUSH_AND_INV_DB:
955 cb_db_event = V_028A90_FLUSH_AND_INV_DB_DATA_TS;
956 break;
957 default:
958 /* both CB & DB */
959 cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
960 }
961
962 /* TC | TC_WB = invalidate L2 data
963 * TC_MD | TC_WB = invalidate L2 metadata
964 * TC | TC_WB | TC_MD = invalidate L2 data & metadata
965 *
966 * The metadata cache must always be invalidated for coherency
967 * between CB/DB and shaders. (metadata = HTILE, CMASK, DCC)
968 *
969 * TC must be invalidated on GFX9 only if the CB/DB surface is
970 * not pipe-aligned. If the surface is RB-aligned, it might not
971 * strictly be pipe-aligned since RB alignment takes precendence.
972 */
973 tc_flags = EVENT_TC_WB_ACTION_ENA |
974 EVENT_TC_MD_ACTION_ENA;
975
976 /* Ideally flush TC together with CB/DB. */
977 if (rctx->flags & SI_CONTEXT_INV_GLOBAL_L2) {
978 tc_flags |= EVENT_TC_ACTION_ENA |
979 EVENT_TCL1_ACTION_ENA;
980
981 /* Clear the flags. */
982 rctx->flags &= ~(SI_CONTEXT_INV_GLOBAL_L2 |
983 SI_CONTEXT_WRITEBACK_GLOBAL_L2 |
984 SI_CONTEXT_INV_VMEM_L1);
985 }
986
987 /* Allocate memory for the fence. */
988 u_suballocator_alloc(rctx->allocator_zeroed_memory, 4, 4,
989 &offset, (struct pipe_resource**)&rbuf);
990 va = rbuf->gpu_address + offset;
991
992 r600_gfx_write_event_eop(rctx, cb_db_event, tc_flags, 1,
993 rbuf, va, 0, 1);
994 r600_gfx_wait_fence(rctx, va, 1, 0xffffffff);
995 }
996
997 /* Make sure ME is idle (it executes most packets) before continuing.
998 * This prevents read-after-write hazards between PFP and ME.
999 */
1000 if (cp_coher_cntl ||
1001 (rctx->flags & (SI_CONTEXT_CS_PARTIAL_FLUSH |
1002 SI_CONTEXT_INV_VMEM_L1 |
1003 SI_CONTEXT_INV_GLOBAL_L2 |
1004 SI_CONTEXT_WRITEBACK_GLOBAL_L2))) {
1005 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
1006 radeon_emit(cs, 0);
1007 }
1008
1009 /* SI-CI-VI only:
1010 * When one of the CP_COHER_CNTL.DEST_BASE flags is set, SURFACE_SYNC
1011 * waits for idle, so it should be last. SURFACE_SYNC is done in PFP.
1012 *
1013 * cp_coher_cntl should contain all necessary flags except TC flags
1014 * at this point.
1015 *
1016 * SI-CIK don't support L2 write-back.
1017 */
1018 if (rctx->flags & SI_CONTEXT_INV_GLOBAL_L2 ||
1019 (rctx->chip_class <= CIK &&
1020 (rctx->flags & SI_CONTEXT_WRITEBACK_GLOBAL_L2))) {
1021 /* Invalidate L1 & L2. (L1 is always invalidated on SI)
1022 * WB must be set on VI+ when TC_ACTION is set.
1023 */
1024 si_emit_surface_sync(rctx, cp_coher_cntl |
1025 S_0085F0_TC_ACTION_ENA(1) |
1026 S_0085F0_TCL1_ACTION_ENA(1) |
1027 S_0301F0_TC_WB_ACTION_ENA(rctx->chip_class >= VI));
1028 cp_coher_cntl = 0;
1029 sctx->b.num_L2_invalidates++;
1030 } else {
1031 /* L1 invalidation and L2 writeback must be done separately,
1032 * because both operations can't be done together.
1033 */
1034 if (rctx->flags & SI_CONTEXT_WRITEBACK_GLOBAL_L2) {
1035 /* WB = write-back
1036 * NC = apply to non-coherent MTYPEs
1037 * (i.e. MTYPE <= 1, which is what we use everywhere)
1038 *
1039 * WB doesn't work without NC.
1040 */
1041 si_emit_surface_sync(rctx, cp_coher_cntl |
1042 S_0301F0_TC_WB_ACTION_ENA(1) |
1043 S_0301F0_TC_NC_ACTION_ENA(1));
1044 cp_coher_cntl = 0;
1045 sctx->b.num_L2_writebacks++;
1046 }
1047 if (rctx->flags & SI_CONTEXT_INV_VMEM_L1) {
1048 /* Invalidate per-CU VMEM L1. */
1049 si_emit_surface_sync(rctx, cp_coher_cntl |
1050 S_0085F0_TCL1_ACTION_ENA(1));
1051 cp_coher_cntl = 0;
1052 }
1053 }
1054
1055 /* If TC flushes haven't cleared this... */
1056 if (cp_coher_cntl)
1057 si_emit_surface_sync(rctx, cp_coher_cntl);
1058
1059 if (rctx->flags & R600_CONTEXT_START_PIPELINE_STATS) {
1060 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1061 radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_START) |
1062 EVENT_INDEX(0));
1063 } else if (rctx->flags & R600_CONTEXT_STOP_PIPELINE_STATS) {
1064 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1065 radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) |
1066 EVENT_INDEX(0));
1067 }
1068
1069 rctx->flags = 0;
1070 }
1071
1072 static void si_get_draw_start_count(struct si_context *sctx,
1073 const struct pipe_draw_info *info,
1074 unsigned *start, unsigned *count)
1075 {
1076 struct pipe_draw_indirect_info *indirect = info->indirect;
1077
1078 if (indirect) {
1079 unsigned indirect_count;
1080 struct pipe_transfer *transfer;
1081 unsigned begin, end;
1082 unsigned map_size;
1083 unsigned *data;
1084
1085 if (indirect->indirect_draw_count) {
1086 data = pipe_buffer_map_range(&sctx->b.b,
1087 indirect->indirect_draw_count,
1088 indirect->indirect_draw_count_offset,
1089 sizeof(unsigned),
1090 PIPE_TRANSFER_READ, &transfer);
1091
1092 indirect_count = *data;
1093
1094 pipe_buffer_unmap(&sctx->b.b, transfer);
1095 } else {
1096 indirect_count = indirect->draw_count;
1097 }
1098
1099 if (!indirect_count) {
1100 *start = *count = 0;
1101 return;
1102 }
1103
1104 map_size = (indirect_count - 1) * indirect->stride + 3 * sizeof(unsigned);
1105 data = pipe_buffer_map_range(&sctx->b.b, indirect->buffer,
1106 indirect->offset, map_size,
1107 PIPE_TRANSFER_READ, &transfer);
1108
1109 begin = UINT_MAX;
1110 end = 0;
1111
1112 for (unsigned i = 0; i < indirect_count; ++i) {
1113 unsigned count = data[0];
1114 unsigned start = data[2];
1115
1116 if (count > 0) {
1117 begin = MIN2(begin, start);
1118 end = MAX2(end, start + count);
1119 }
1120
1121 data += indirect->stride / sizeof(unsigned);
1122 }
1123
1124 pipe_buffer_unmap(&sctx->b.b, transfer);
1125
1126 if (begin < end) {
1127 *start = begin;
1128 *count = end - begin;
1129 } else {
1130 *start = *count = 0;
1131 }
1132 } else {
1133 *start = info->start;
1134 *count = info->count;
1135 }
1136 }
1137
1138 void si_ce_pre_draw_synchronization(struct si_context *sctx)
1139 {
1140 if (sctx->ce_need_synchronization) {
1141 radeon_emit(sctx->ce_ib, PKT3(PKT3_INCREMENT_CE_COUNTER, 0, 0));
1142 radeon_emit(sctx->ce_ib, 1);
1143
1144 radeon_emit(sctx->b.gfx.cs, PKT3(PKT3_WAIT_ON_CE_COUNTER, 0, 0));
1145 radeon_emit(sctx->b.gfx.cs, 1);
1146 }
1147 }
1148
1149 void si_ce_post_draw_synchronization(struct si_context *sctx)
1150 {
1151 if (sctx->ce_need_synchronization) {
1152 radeon_emit(sctx->b.gfx.cs, PKT3(PKT3_INCREMENT_DE_COUNTER, 0, 0));
1153 radeon_emit(sctx->b.gfx.cs, 0);
1154
1155 sctx->ce_need_synchronization = false;
1156 }
1157 }
1158
1159 void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
1160 {
1161 struct si_context *sctx = (struct si_context *)ctx;
1162 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
1163 const struct pipe_index_buffer *ib = &sctx->index_buffer;
1164 struct pipe_index_buffer ib_tmp; /* for index buffer uploads only */
1165 unsigned mask, dirty_tex_counter;
1166 enum pipe_prim_type rast_prim;
1167 unsigned num_patches = 0;
1168
1169 if (likely(!info->indirect)) {
1170 /* SI-CI treat instance_count==0 as instance_count==1. There is
1171 * no workaround for indirect draws, but we can at least skip
1172 * direct draws.
1173 */
1174 if (unlikely(!info->instance_count))
1175 return;
1176
1177 /* Handle count == 0. */
1178 if (unlikely(!info->count &&
1179 (info->indexed || !info->count_from_stream_output)))
1180 return;
1181 }
1182
1183 if (unlikely(!sctx->vs_shader.cso)) {
1184 assert(0);
1185 return;
1186 }
1187 if (unlikely(!sctx->ps_shader.cso && (!rs || !rs->rasterizer_discard))) {
1188 assert(0);
1189 return;
1190 }
1191 if (unlikely(!!sctx->tes_shader.cso != (info->mode == PIPE_PRIM_PATCHES))) {
1192 assert(0);
1193 return;
1194 }
1195
1196 /* Recompute and re-emit the texture resource states if needed. */
1197 dirty_tex_counter = p_atomic_read(&sctx->b.screen->dirty_tex_counter);
1198 if (unlikely(dirty_tex_counter != sctx->b.last_dirty_tex_counter)) {
1199 sctx->b.last_dirty_tex_counter = dirty_tex_counter;
1200 sctx->framebuffer.dirty_cbufs |=
1201 ((1 << sctx->framebuffer.state.nr_cbufs) - 1);
1202 sctx->framebuffer.dirty_zsbuf = true;
1203 sctx->framebuffer.do_update_surf_dirtiness = true;
1204 si_mark_atom_dirty(sctx, &sctx->framebuffer.atom);
1205 si_update_all_texture_descriptors(sctx);
1206 }
1207
1208 si_decompress_graphics_textures(sctx);
1209
1210 /* Set the rasterization primitive type.
1211 *
1212 * This must be done after si_decompress_textures, which can call
1213 * draw_vbo recursively, and before si_update_shaders, which uses
1214 * current_rast_prim for this draw_vbo call. */
1215 if (sctx->gs_shader.cso)
1216 rast_prim = sctx->gs_shader.cso->gs_output_prim;
1217 else if (sctx->tes_shader.cso)
1218 rast_prim = sctx->tes_shader.cso->info.properties[TGSI_PROPERTY_TES_PRIM_MODE];
1219 else
1220 rast_prim = info->mode;
1221
1222 if (rast_prim != sctx->current_rast_prim) {
1223 sctx->current_rast_prim = rast_prim;
1224 sctx->do_update_shaders = true;
1225 }
1226
1227 if (sctx->gs_shader.cso) {
1228 /* Determine whether the GS triangle strip adjacency fix should
1229 * be applied. Rotate every other triangle if
1230 * - triangle strips with adjacency are fed to the GS and
1231 * - primitive restart is disabled (the rotation doesn't help
1232 * when the restart occurs after an odd number of triangles).
1233 */
1234 bool gs_tri_strip_adj_fix =
1235 !sctx->tes_shader.cso &&
1236 info->mode == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY &&
1237 !info->primitive_restart;
1238
1239 if (gs_tri_strip_adj_fix != sctx->gs_tri_strip_adj_fix) {
1240 sctx->gs_tri_strip_adj_fix = gs_tri_strip_adj_fix;
1241 sctx->do_update_shaders = true;
1242 }
1243 }
1244
1245 if (sctx->do_update_shaders && !si_update_shaders(sctx))
1246 return;
1247
1248 if (!si_upload_graphics_shader_descriptors(sctx))
1249 return;
1250
1251 ib_tmp.buffer = NULL;
1252
1253 if (info->indexed) {
1254 /* Translate or upload, if needed. */
1255 /* 8-bit indices are supported on VI. */
1256 if (sctx->b.chip_class <= CIK && ib->index_size == 1) {
1257 unsigned start, count, start_offset, size;
1258 void *ptr;
1259
1260 si_get_draw_start_count(sctx, info, &start, &count);
1261 start_offset = start * 2;
1262 size = count * 2;
1263
1264 u_upload_alloc(ctx->stream_uploader, start_offset,
1265 size,
1266 si_optimal_tcc_alignment(sctx, size),
1267 &ib_tmp.offset, &ib_tmp.buffer, &ptr);
1268 if (!ib_tmp.buffer)
1269 return;
1270
1271 util_shorten_ubyte_elts_to_userptr(&sctx->b.b, ib, 0, 0,
1272 ib->offset + start,
1273 count, ptr);
1274
1275 /* info->start will be added by the drawing code */
1276 ib_tmp.offset -= start_offset;
1277 ib_tmp.index_size = 2;
1278 ib = &ib_tmp;
1279 } else if (ib->user_buffer && !ib->buffer) {
1280 unsigned start_offset;
1281
1282 assert(!info->indirect);
1283 start_offset = info->start * ib->index_size;
1284
1285 u_upload_data(ctx->stream_uploader, start_offset,
1286 info->count * ib->index_size,
1287 sctx->screen->b.info.tcc_cache_line_size,
1288 (char*)ib->user_buffer + start_offset,
1289 &ib_tmp.offset, &ib_tmp.buffer);
1290 if (!ib_tmp.buffer)
1291 return;
1292
1293 /* info->start will be added by the drawing code */
1294 ib_tmp.offset -= start_offset;
1295 ib_tmp.index_size = ib->index_size;
1296 ib = &ib_tmp;
1297 } else if (sctx->b.chip_class <= CIK &&
1298 r600_resource(ib->buffer)->TC_L2_dirty) {
1299 /* VI reads index buffers through TC L2, so it doesn't
1300 * need this. */
1301 sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
1302 r600_resource(ib->buffer)->TC_L2_dirty = false;
1303 }
1304 }
1305
1306 if (info->indirect) {
1307 struct pipe_draw_indirect_info *indirect = info->indirect;
1308
1309 /* Add the buffer size for memory checking in need_cs_space. */
1310 r600_context_add_resource_size(ctx, indirect->buffer);
1311
1312 if (r600_resource(indirect->buffer)->TC_L2_dirty) {
1313 sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
1314 r600_resource(indirect->buffer)->TC_L2_dirty = false;
1315 }
1316
1317 if (indirect->indirect_draw_count &&
1318 r600_resource(indirect->indirect_draw_count)->TC_L2_dirty) {
1319 sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
1320 r600_resource(indirect->indirect_draw_count)->TC_L2_dirty = false;
1321 }
1322 }
1323
1324 si_need_cs_space(sctx);
1325
1326 /* Since we've called r600_context_add_resource_size for vertex buffers,
1327 * this must be called after si_need_cs_space, because we must let
1328 * need_cs_space flush before we add buffers to the buffer list.
1329 */
1330 if (!si_upload_vertex_buffer_descriptors(sctx))
1331 return;
1332
1333 /* GFX9 scissor bug workaround. There is also a more efficient but
1334 * more involved alternative workaround. */
1335 if (sctx->b.chip_class == GFX9 &&
1336 si_is_atom_dirty(sctx, &sctx->b.scissors.atom))
1337 sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH;
1338
1339 /* Flush caches before the first state atom, which does L2 prefetches. */
1340 if (sctx->b.flags)
1341 si_emit_cache_flush(sctx);
1342
1343 /* Emit state atoms. */
1344 mask = sctx->dirty_atoms;
1345 while (mask) {
1346 struct r600_atom *atom = sctx->atoms.array[u_bit_scan(&mask)];
1347
1348 atom->emit(&sctx->b, atom);
1349 }
1350 sctx->dirty_atoms = 0;
1351
1352 /* Emit states. */
1353 mask = sctx->dirty_states;
1354 while (mask) {
1355 unsigned i = u_bit_scan(&mask);
1356 struct si_pm4_state *state = sctx->queued.array[i];
1357
1358 if (!state || sctx->emitted.array[i] == state)
1359 continue;
1360
1361 si_pm4_emit(sctx, state);
1362 sctx->emitted.array[i] = state;
1363 }
1364 sctx->dirty_states = 0;
1365
1366 si_emit_rasterizer_prim_state(sctx);
1367 if (sctx->tes_shader.cso)
1368 si_emit_derived_tess_state(sctx, info, &num_patches);
1369 si_emit_vs_state(sctx, info);
1370 si_emit_draw_registers(sctx, info, num_patches);
1371
1372 si_ce_pre_draw_synchronization(sctx);
1373 si_emit_draw_packets(sctx, info, ib);
1374 si_ce_post_draw_synchronization(sctx);
1375
1376 if (sctx->trace_buf)
1377 si_trace_emit(sctx);
1378
1379 /* Workaround for a VGT hang when streamout is enabled.
1380 * It must be done after drawing. */
1381 if ((sctx->b.family == CHIP_HAWAII ||
1382 sctx->b.family == CHIP_TONGA ||
1383 sctx->b.family == CHIP_FIJI) &&
1384 r600_get_strmout_en(&sctx->b)) {
1385 sctx->b.flags |= SI_CONTEXT_VGT_STREAMOUT_SYNC;
1386 }
1387
1388 if (sctx->framebuffer.do_update_surf_dirtiness) {
1389 /* Set the depth buffer as dirty. */
1390 if (sctx->framebuffer.state.zsbuf) {
1391 struct pipe_surface *surf = sctx->framebuffer.state.zsbuf;
1392 struct r600_texture *rtex = (struct r600_texture *)surf->texture;
1393
1394 if (!rtex->tc_compatible_htile)
1395 rtex->dirty_level_mask |= 1 << surf->u.tex.level;
1396
1397 if (rtex->surface.flags & RADEON_SURF_SBUFFER)
1398 rtex->stencil_dirty_level_mask |= 1 << surf->u.tex.level;
1399 }
1400 if (sctx->framebuffer.compressed_cb_mask) {
1401 struct pipe_surface *surf;
1402 struct r600_texture *rtex;
1403 unsigned mask = sctx->framebuffer.compressed_cb_mask;
1404
1405 do {
1406 unsigned i = u_bit_scan(&mask);
1407 surf = sctx->framebuffer.state.cbufs[i];
1408 rtex = (struct r600_texture*)surf->texture;
1409
1410 if (rtex->fmask.size)
1411 rtex->dirty_level_mask |= 1 << surf->u.tex.level;
1412 if (rtex->dcc_gather_statistics)
1413 rtex->separate_dcc_dirty = true;
1414 } while (mask);
1415 }
1416 sctx->framebuffer.do_update_surf_dirtiness = false;
1417 }
1418
1419 pipe_resource_reference(&ib_tmp.buffer, NULL);
1420 sctx->b.num_draw_calls++;
1421 if (info->primitive_restart)
1422 sctx->b.num_prim_restart_calls++;
1423 if (G_0286E8_WAVESIZE(sctx->spi_tmpring_size))
1424 sctx->b.num_spill_draw_calls++;
1425 }
1426
1427 void si_trace_emit(struct si_context *sctx)
1428 {
1429 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
1430
1431 sctx->trace_id++;
1432 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, sctx->trace_buf,
1433 RADEON_USAGE_READWRITE, RADEON_PRIO_TRACE);
1434 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
1435 radeon_emit(cs, S_370_DST_SEL(V_370_MEMORY_SYNC) |
1436 S_370_WR_CONFIRM(1) |
1437 S_370_ENGINE_SEL(V_370_ME));
1438 radeon_emit(cs, sctx->trace_buf->gpu_address);
1439 radeon_emit(cs, sctx->trace_buf->gpu_address >> 32);
1440 radeon_emit(cs, sctx->trace_id);
1441 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
1442 radeon_emit(cs, AC_ENCODE_TRACE_POINT(sctx->trace_id));
1443 }