2 * Copyright 2012 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "si_build_pm4.h"
28 #include "util/u_index_modify.h"
29 #include "util/u_log.h"
30 #include "util/u_prim.h"
31 #include "util/u_suballoc.h"
32 #include "util/u_upload_mgr.h"
34 /* special primitive types */
35 #define SI_PRIM_RECTANGLE_LIST PIPE_PRIM_MAX
37 static unsigned si_conv_pipe_prim(unsigned mode
)
39 static const unsigned prim_conv
[] = {
40 [PIPE_PRIM_POINTS
] = V_008958_DI_PT_POINTLIST
,
41 [PIPE_PRIM_LINES
] = V_008958_DI_PT_LINELIST
,
42 [PIPE_PRIM_LINE_LOOP
] = V_008958_DI_PT_LINELOOP
,
43 [PIPE_PRIM_LINE_STRIP
] = V_008958_DI_PT_LINESTRIP
,
44 [PIPE_PRIM_TRIANGLES
] = V_008958_DI_PT_TRILIST
,
45 [PIPE_PRIM_TRIANGLE_STRIP
] = V_008958_DI_PT_TRISTRIP
,
46 [PIPE_PRIM_TRIANGLE_FAN
] = V_008958_DI_PT_TRIFAN
,
47 [PIPE_PRIM_QUADS
] = V_008958_DI_PT_QUADLIST
,
48 [PIPE_PRIM_QUAD_STRIP
] = V_008958_DI_PT_QUADSTRIP
,
49 [PIPE_PRIM_POLYGON
] = V_008958_DI_PT_POLYGON
,
50 [PIPE_PRIM_LINES_ADJACENCY
] = V_008958_DI_PT_LINELIST_ADJ
,
51 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = V_008958_DI_PT_LINESTRIP_ADJ
,
52 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = V_008958_DI_PT_TRILIST_ADJ
,
53 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = V_008958_DI_PT_TRISTRIP_ADJ
,
54 [PIPE_PRIM_PATCHES
] = V_008958_DI_PT_PATCH
,
55 [SI_PRIM_RECTANGLE_LIST
] = V_008958_DI_PT_RECTLIST
};
56 assert(mode
< ARRAY_SIZE(prim_conv
));
57 return prim_conv
[mode
];
61 * This calculates the LDS size for tessellation shaders (VS, TCS, TES).
62 * LS.LDS_SIZE is shared by all 3 shader stages.
64 * The information about LDS and other non-compile-time parameters is then
65 * written to userdata SGPRs.
67 static void si_emit_derived_tess_state(struct si_context
*sctx
, const struct pipe_draw_info
*info
,
68 unsigned *num_patches
)
70 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
71 struct si_shader
*ls_current
;
72 struct si_shader_selector
*ls
;
73 /* The TES pointer will only be used for sctx->last_tcs.
74 * It would be wrong to think that TCS = TES. */
75 struct si_shader_selector
*tcs
=
76 sctx
->tcs_shader
.cso
? sctx
->tcs_shader
.cso
: sctx
->tes_shader
.cso
;
77 unsigned tess_uses_primid
= sctx
->ia_multi_vgt_param_key
.u
.tess_uses_prim_id
;
78 bool has_primid_instancing_bug
= sctx
->chip_class
== GFX6
&& sctx
->screen
->info
.max_se
== 1;
79 unsigned tes_sh_base
= sctx
->shader_pointers
.sh_base
[PIPE_SHADER_TESS_EVAL
];
80 unsigned num_tcs_input_cp
= info
->vertices_per_patch
;
81 unsigned num_tcs_output_cp
, num_tcs_inputs
, num_tcs_outputs
;
82 unsigned num_tcs_patch_outputs
;
83 unsigned input_vertex_size
, output_vertex_size
, pervertex_output_patch_size
;
84 unsigned input_patch_size
, output_patch_size
, output_patch0_offset
;
85 unsigned perpatch_output_offset
, lds_size
;
86 unsigned tcs_in_layout
, tcs_out_layout
, tcs_out_offsets
;
87 unsigned offchip_layout
, hardware_lds_size
, ls_hs_config
;
89 /* Since GFX9 has merged LS-HS in the TCS state, set LS = TCS. */
90 if (sctx
->chip_class
>= GFX9
) {
91 if (sctx
->tcs_shader
.cso
)
92 ls_current
= sctx
->tcs_shader
.current
;
94 ls_current
= sctx
->fixed_func_tcs_shader
.current
;
96 ls
= ls_current
->key
.part
.tcs
.ls
;
98 ls_current
= sctx
->vs_shader
.current
;
99 ls
= sctx
->vs_shader
.cso
;
102 if (sctx
->last_ls
== ls_current
&& sctx
->last_tcs
== tcs
&&
103 sctx
->last_tes_sh_base
== tes_sh_base
&& sctx
->last_num_tcs_input_cp
== num_tcs_input_cp
&&
104 (!has_primid_instancing_bug
|| (sctx
->last_tess_uses_primid
== tess_uses_primid
))) {
105 *num_patches
= sctx
->last_num_patches
;
109 sctx
->last_ls
= ls_current
;
110 sctx
->last_tcs
= tcs
;
111 sctx
->last_tes_sh_base
= tes_sh_base
;
112 sctx
->last_num_tcs_input_cp
= num_tcs_input_cp
;
113 sctx
->last_tess_uses_primid
= tess_uses_primid
;
115 /* This calculates how shader inputs and outputs among VS, TCS, and TES
116 * are laid out in LDS. */
117 num_tcs_inputs
= util_last_bit64(ls
->outputs_written
);
119 if (sctx
->tcs_shader
.cso
) {
120 num_tcs_outputs
= util_last_bit64(tcs
->outputs_written
);
121 num_tcs_output_cp
= tcs
->info
.properties
[TGSI_PROPERTY_TCS_VERTICES_OUT
];
122 num_tcs_patch_outputs
= util_last_bit64(tcs
->patch_outputs_written
);
124 /* No TCS. Route varyings from LS to TES. */
125 num_tcs_outputs
= num_tcs_inputs
;
126 num_tcs_output_cp
= num_tcs_input_cp
;
127 num_tcs_patch_outputs
= 2; /* TESSINNER + TESSOUTER */
130 input_vertex_size
= ls
->lshs_vertex_stride
;
131 output_vertex_size
= num_tcs_outputs
* 16;
133 input_patch_size
= num_tcs_input_cp
* input_vertex_size
;
135 pervertex_output_patch_size
= num_tcs_output_cp
* output_vertex_size
;
136 output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
138 /* Ensure that we only need one wave per SIMD so we don't need to check
139 * resource usage. Also ensures that the number of tcs in and out
140 * vertices per threadgroup are at most 256.
142 unsigned max_verts_per_patch
= MAX2(num_tcs_input_cp
, num_tcs_output_cp
);
143 *num_patches
= 256 / max_verts_per_patch
;
145 /* Make sure that the data fits in LDS. This assumes the shaders only
146 * use LDS for the inputs and outputs.
148 * While GFX7 can use 64K per threadgroup, there is a hang on Stoney
149 * with 2 CUs if we use more than 32K. The closed Vulkan driver also
150 * uses 32K at most on all GCN chips.
152 hardware_lds_size
= 32768;
153 *num_patches
= MIN2(*num_patches
, hardware_lds_size
/ (input_patch_size
+ output_patch_size
));
155 /* Make sure the output data fits in the offchip buffer */
157 MIN2(*num_patches
, (sctx
->screen
->tess_offchip_block_dw_size
* 4) / output_patch_size
);
159 /* Not necessary for correctness, but improves performance.
160 * The hardware can do more, but the radeonsi shader constant is
163 *num_patches
= MIN2(*num_patches
, 63); /* triangles: 3 full waves except 3 lanes */
165 /* When distributed tessellation is unsupported, switch between SEs
166 * at a higher frequency to compensate for it.
168 if (!sctx
->screen
->info
.has_distributed_tess
&& sctx
->screen
->info
.max_se
> 1)
169 *num_patches
= MIN2(*num_patches
, 16); /* recommended */
171 /* Make sure that vector lanes are reasonably occupied. It probably
172 * doesn't matter much because this is LS-HS, and TES is likely to
173 * occupy significantly more CUs.
175 unsigned temp_verts_per_tg
= *num_patches
* max_verts_per_patch
;
176 unsigned wave_size
= sctx
->screen
->ge_wave_size
;
178 if (temp_verts_per_tg
> wave_size
&& temp_verts_per_tg
% wave_size
< wave_size
* 3 / 4)
179 *num_patches
= (temp_verts_per_tg
& ~(wave_size
- 1)) / max_verts_per_patch
;
181 if (sctx
->chip_class
== GFX6
) {
182 /* GFX6 bug workaround, related to power management. Limit LS-HS
183 * threadgroups to only one wave.
185 unsigned one_wave
= wave_size
/ max_verts_per_patch
;
186 *num_patches
= MIN2(*num_patches
, one_wave
);
189 /* The VGT HS block increments the patch ID unconditionally
190 * within a single threadgroup. This results in incorrect
191 * patch IDs when instanced draws are used.
193 * The intended solution is to restrict threadgroups to
194 * a single instance by setting SWITCH_ON_EOI, which
195 * should cause IA to split instances up. However, this
196 * doesn't work correctly on GFX6 when there is no other
199 if (has_primid_instancing_bug
&& tess_uses_primid
)
202 sctx
->last_num_patches
= *num_patches
;
204 output_patch0_offset
= input_patch_size
* *num_patches
;
205 perpatch_output_offset
= output_patch0_offset
+ pervertex_output_patch_size
;
207 /* Compute userdata SGPRs. */
208 assert(((input_vertex_size
/ 4) & ~0xff) == 0);
209 assert(((output_vertex_size
/ 4) & ~0xff) == 0);
210 assert(((input_patch_size
/ 4) & ~0x1fff) == 0);
211 assert(((output_patch_size
/ 4) & ~0x1fff) == 0);
212 assert(((output_patch0_offset
/ 16) & ~0xffff) == 0);
213 assert(((perpatch_output_offset
/ 16) & ~0xffff) == 0);
214 assert(num_tcs_input_cp
<= 32);
215 assert(num_tcs_output_cp
<= 32);
217 uint64_t ring_va
= si_resource(sctx
->tess_rings
)->gpu_address
;
218 assert((ring_va
& u_bit_consecutive(0, 19)) == 0);
220 tcs_in_layout
= S_VS_STATE_LS_OUT_PATCH_SIZE(input_patch_size
/ 4) |
221 S_VS_STATE_LS_OUT_VERTEX_SIZE(input_vertex_size
/ 4);
222 tcs_out_layout
= (output_patch_size
/ 4) | (num_tcs_input_cp
<< 13) | ring_va
;
223 tcs_out_offsets
= (output_patch0_offset
/ 16) | ((perpatch_output_offset
/ 16) << 16);
225 *num_patches
| (num_tcs_output_cp
<< 6) | (pervertex_output_patch_size
* *num_patches
<< 12);
227 /* Compute the LDS size. */
228 lds_size
= output_patch0_offset
+ output_patch_size
* *num_patches
;
230 if (sctx
->chip_class
>= GFX7
) {
231 assert(lds_size
<= 65536);
232 lds_size
= align(lds_size
, 512) / 512;
234 assert(lds_size
<= 32768);
235 lds_size
= align(lds_size
, 256) / 256;
238 /* Set SI_SGPR_VS_STATE_BITS. */
239 sctx
->current_vs_state
&= C_VS_STATE_LS_OUT_PATCH_SIZE
& C_VS_STATE_LS_OUT_VERTEX_SIZE
;
240 sctx
->current_vs_state
|= tcs_in_layout
;
242 /* We should be able to support in-shader LDS use with LLVM >= 9
243 * by just adding the lds_sizes together, but it has never
245 assert(ls_current
->config
.lds_size
== 0);
247 if (sctx
->chip_class
>= GFX9
) {
248 unsigned hs_rsrc2
= ls_current
->config
.rsrc2
;
250 if (sctx
->chip_class
>= GFX10
)
251 hs_rsrc2
|= S_00B42C_LDS_SIZE_GFX10(lds_size
);
253 hs_rsrc2
|= S_00B42C_LDS_SIZE_GFX9(lds_size
);
255 radeon_set_sh_reg(cs
, R_00B42C_SPI_SHADER_PGM_RSRC2_HS
, hs_rsrc2
);
257 /* Set userdata SGPRs for merged LS-HS. */
258 radeon_set_sh_reg_seq(
259 cs
, R_00B430_SPI_SHADER_USER_DATA_LS_0
+ GFX9_SGPR_TCS_OFFCHIP_LAYOUT
* 4, 3);
260 radeon_emit(cs
, offchip_layout
);
261 radeon_emit(cs
, tcs_out_offsets
);
262 radeon_emit(cs
, tcs_out_layout
);
264 unsigned ls_rsrc2
= ls_current
->config
.rsrc2
;
266 si_multiwave_lds_size_workaround(sctx
->screen
, &lds_size
);
267 ls_rsrc2
|= S_00B52C_LDS_SIZE(lds_size
);
269 /* Due to a hw bug, RSRC2_LS must be written twice with another
270 * LS register written in between. */
271 if (sctx
->chip_class
== GFX7
&& sctx
->family
!= CHIP_HAWAII
)
272 radeon_set_sh_reg(cs
, R_00B52C_SPI_SHADER_PGM_RSRC2_LS
, ls_rsrc2
);
273 radeon_set_sh_reg_seq(cs
, R_00B528_SPI_SHADER_PGM_RSRC1_LS
, 2);
274 radeon_emit(cs
, ls_current
->config
.rsrc1
);
275 radeon_emit(cs
, ls_rsrc2
);
277 /* Set userdata SGPRs for TCS. */
278 radeon_set_sh_reg_seq(
279 cs
, R_00B430_SPI_SHADER_USER_DATA_HS_0
+ GFX6_SGPR_TCS_OFFCHIP_LAYOUT
* 4, 4);
280 radeon_emit(cs
, offchip_layout
);
281 radeon_emit(cs
, tcs_out_offsets
);
282 radeon_emit(cs
, tcs_out_layout
);
283 radeon_emit(cs
, tcs_in_layout
);
286 /* Set userdata SGPRs for TES. */
287 radeon_set_sh_reg_seq(cs
, tes_sh_base
+ SI_SGPR_TES_OFFCHIP_LAYOUT
* 4, 2);
288 radeon_emit(cs
, offchip_layout
);
289 radeon_emit(cs
, ring_va
);
291 ls_hs_config
= S_028B58_NUM_PATCHES(*num_patches
) | S_028B58_HS_NUM_INPUT_CP(num_tcs_input_cp
) |
292 S_028B58_HS_NUM_OUTPUT_CP(num_tcs_output_cp
);
294 if (sctx
->last_ls_hs_config
!= ls_hs_config
) {
295 if (sctx
->chip_class
>= GFX7
) {
296 radeon_set_context_reg_idx(cs
, R_028B58_VGT_LS_HS_CONFIG
, 2, ls_hs_config
);
298 radeon_set_context_reg(cs
, R_028B58_VGT_LS_HS_CONFIG
, ls_hs_config
);
300 sctx
->last_ls_hs_config
= ls_hs_config
;
301 sctx
->context_roll
= true;
305 static unsigned si_num_prims_for_vertices(const struct pipe_draw_info
*info
,
306 enum pipe_prim_type prim
)
309 case PIPE_PRIM_PATCHES
:
310 return info
->count
/ info
->vertices_per_patch
;
311 case PIPE_PRIM_POLYGON
:
312 return info
->count
>= 3;
313 case SI_PRIM_RECTANGLE_LIST
:
314 return info
->count
/ 3;
316 return u_decomposed_prims_for_vertices(prim
, info
->count
);
320 static unsigned si_get_init_multi_vgt_param(struct si_screen
*sscreen
, union si_vgt_param_key
*key
)
322 STATIC_ASSERT(sizeof(union si_vgt_param_key
) == 4);
323 unsigned max_primgroup_in_wave
= 2;
325 /* SWITCH_ON_EOP(0) is always preferable. */
326 bool wd_switch_on_eop
= false;
327 bool ia_switch_on_eop
= false;
328 bool ia_switch_on_eoi
= false;
329 bool partial_vs_wave
= false;
330 bool partial_es_wave
= false;
332 if (key
->u
.uses_tess
) {
333 /* SWITCH_ON_EOI must be set if PrimID is used. */
334 if (key
->u
.tess_uses_prim_id
)
335 ia_switch_on_eoi
= true;
337 /* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
338 if ((sscreen
->info
.family
== CHIP_TAHITI
|| sscreen
->info
.family
== CHIP_PITCAIRN
||
339 sscreen
->info
.family
== CHIP_BONAIRE
) &&
341 partial_vs_wave
= true;
343 /* Needed for 028B6C_DISTRIBUTION_MODE != 0. (implies >= GFX8) */
344 if (sscreen
->info
.has_distributed_tess
) {
345 if (key
->u
.uses_gs
) {
346 if (sscreen
->info
.chip_class
== GFX8
)
347 partial_es_wave
= true;
349 partial_vs_wave
= true;
354 /* This is a hardware requirement. */
355 if (key
->u
.line_stipple_enabled
|| (sscreen
->debug_flags
& DBG(SWITCH_ON_EOP
))) {
356 ia_switch_on_eop
= true;
357 wd_switch_on_eop
= true;
360 if (sscreen
->info
.chip_class
>= GFX7
) {
361 /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
362 * 4 shader engines. Set 1 to pass the assertion below.
363 * The other cases are hardware requirements.
365 * Polaris supports primitive restart with WD_SWITCH_ON_EOP=0
366 * for points, line strips, and tri strips.
368 if (sscreen
->info
.max_se
<= 2 || key
->u
.prim
== PIPE_PRIM_POLYGON
||
369 key
->u
.prim
== PIPE_PRIM_LINE_LOOP
|| key
->u
.prim
== PIPE_PRIM_TRIANGLE_FAN
||
370 key
->u
.prim
== PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
||
371 (key
->u
.primitive_restart
&&
372 (sscreen
->info
.family
< CHIP_POLARIS10
||
373 (key
->u
.prim
!= PIPE_PRIM_POINTS
&& key
->u
.prim
!= PIPE_PRIM_LINE_STRIP
&&
374 key
->u
.prim
!= PIPE_PRIM_TRIANGLE_STRIP
))) ||
375 key
->u
.count_from_stream_output
)
376 wd_switch_on_eop
= true;
378 /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
379 * We don't know that for indirect drawing, so treat it as
380 * always problematic. */
381 if (sscreen
->info
.family
== CHIP_HAWAII
&& key
->u
.uses_instancing
)
382 wd_switch_on_eop
= true;
384 /* Performance recommendation for 4 SE Gfx7-8 parts if
385 * instances are smaller than a primgroup.
386 * Assume indirect draws always use small instances.
387 * This is needed for good VS wave utilization.
389 if (sscreen
->info
.chip_class
<= GFX8
&& sscreen
->info
.max_se
== 4 &&
390 key
->u
.multi_instances_smaller_than_primgroup
)
391 wd_switch_on_eop
= true;
393 /* Required on GFX7 and later. */
394 if (sscreen
->info
.max_se
== 4 && !wd_switch_on_eop
)
395 ia_switch_on_eoi
= true;
397 /* HW engineers suggested that PARTIAL_VS_WAVE_ON should be set
398 * to work around a GS hang.
400 if (key
->u
.uses_gs
&&
401 (sscreen
->info
.family
== CHIP_TONGA
|| sscreen
->info
.family
== CHIP_FIJI
||
402 sscreen
->info
.family
== CHIP_POLARIS10
|| sscreen
->info
.family
== CHIP_POLARIS11
||
403 sscreen
->info
.family
== CHIP_POLARIS12
|| sscreen
->info
.family
== CHIP_VEGAM
))
404 partial_vs_wave
= true;
406 /* Required by Hawaii and, for some special cases, by GFX8. */
407 if (ia_switch_on_eoi
&&
408 (sscreen
->info
.family
== CHIP_HAWAII
||
409 (sscreen
->info
.chip_class
== GFX8
&& (key
->u
.uses_gs
|| max_primgroup_in_wave
!= 2))))
410 partial_vs_wave
= true;
412 /* Instancing bug on Bonaire. */
413 if (sscreen
->info
.family
== CHIP_BONAIRE
&& ia_switch_on_eoi
&& key
->u
.uses_instancing
)
414 partial_vs_wave
= true;
416 /* This only applies to Polaris10 and later 4 SE chips.
417 * wd_switch_on_eop is already true on all other chips.
419 if (!wd_switch_on_eop
&& key
->u
.primitive_restart
)
420 partial_vs_wave
= true;
422 /* If the WD switch is false, the IA switch must be false too. */
423 assert(wd_switch_on_eop
|| !ia_switch_on_eop
);
426 /* If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
427 if (sscreen
->info
.chip_class
<= GFX8
&& ia_switch_on_eoi
)
428 partial_es_wave
= true;
430 return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop
) | S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi
) |
431 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave
) |
432 S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave
) |
433 S_028AA8_WD_SWITCH_ON_EOP(sscreen
->info
.chip_class
>= GFX7
? wd_switch_on_eop
: 0) |
434 /* The following field was moved to VGT_SHADER_STAGES_EN in GFX9. */
435 S_028AA8_MAX_PRIMGRP_IN_WAVE(sscreen
->info
.chip_class
== GFX8
? max_primgroup_in_wave
437 S_030960_EN_INST_OPT_BASIC(sscreen
->info
.chip_class
>= GFX9
) |
438 S_030960_EN_INST_OPT_ADV(sscreen
->info
.chip_class
>= GFX9
);
441 static void si_init_ia_multi_vgt_param_table(struct si_context
*sctx
)
443 for (int prim
= 0; prim
<= SI_PRIM_RECTANGLE_LIST
; prim
++)
444 for (int uses_instancing
= 0; uses_instancing
< 2; uses_instancing
++)
445 for (int multi_instances
= 0; multi_instances
< 2; multi_instances
++)
446 for (int primitive_restart
= 0; primitive_restart
< 2; primitive_restart
++)
447 for (int count_from_so
= 0; count_from_so
< 2; count_from_so
++)
448 for (int line_stipple
= 0; line_stipple
< 2; line_stipple
++)
449 for (int uses_tess
= 0; uses_tess
< 2; uses_tess
++)
450 for (int tess_uses_primid
= 0; tess_uses_primid
< 2; tess_uses_primid
++)
451 for (int uses_gs
= 0; uses_gs
< 2; uses_gs
++) {
452 union si_vgt_param_key key
;
456 key
.u
.uses_instancing
= uses_instancing
;
457 key
.u
.multi_instances_smaller_than_primgroup
= multi_instances
;
458 key
.u
.primitive_restart
= primitive_restart
;
459 key
.u
.count_from_stream_output
= count_from_so
;
460 key
.u
.line_stipple_enabled
= line_stipple
;
461 key
.u
.uses_tess
= uses_tess
;
462 key
.u
.tess_uses_prim_id
= tess_uses_primid
;
463 key
.u
.uses_gs
= uses_gs
;
465 sctx
->ia_multi_vgt_param
[key
.index
] =
466 si_get_init_multi_vgt_param(sctx
->screen
, &key
);
470 static bool si_is_line_stipple_enabled(struct si_context
*sctx
)
472 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
474 return rs
->line_stipple_enable
&& sctx
->current_rast_prim
!= PIPE_PRIM_POINTS
&&
475 (rs
->polygon_mode_is_lines
|| util_prim_is_lines(sctx
->current_rast_prim
));
478 static unsigned si_get_ia_multi_vgt_param(struct si_context
*sctx
,
479 const struct pipe_draw_info
*info
,
480 enum pipe_prim_type prim
, unsigned num_patches
,
481 unsigned instance_count
, bool primitive_restart
)
483 union si_vgt_param_key key
= sctx
->ia_multi_vgt_param_key
;
484 unsigned primgroup_size
;
485 unsigned ia_multi_vgt_param
;
487 if (sctx
->tes_shader
.cso
) {
488 primgroup_size
= num_patches
; /* must be a multiple of NUM_PATCHES */
489 } else if (sctx
->gs_shader
.cso
) {
490 primgroup_size
= 64; /* recommended with a GS */
492 primgroup_size
= 128; /* recommended without a GS and tess */
496 key
.u
.uses_instancing
= info
->indirect
|| instance_count
> 1;
497 key
.u
.multi_instances_smaller_than_primgroup
=
499 (instance_count
> 1 &&
500 (info
->count_from_stream_output
|| si_num_prims_for_vertices(info
, prim
) < primgroup_size
));
501 key
.u
.primitive_restart
= primitive_restart
;
502 key
.u
.count_from_stream_output
= info
->count_from_stream_output
!= NULL
;
503 key
.u
.line_stipple_enabled
= si_is_line_stipple_enabled(sctx
);
506 sctx
->ia_multi_vgt_param
[key
.index
] | S_028AA8_PRIMGROUP_SIZE(primgroup_size
- 1);
508 if (sctx
->gs_shader
.cso
) {
509 /* GS requirement. */
510 if (sctx
->chip_class
<= GFX8
&&
511 SI_GS_PER_ES
/ primgroup_size
>= sctx
->screen
->gs_table_depth
- 3)
512 ia_multi_vgt_param
|= S_028AA8_PARTIAL_ES_WAVE_ON(1);
514 /* GS hw bug with single-primitive instances and SWITCH_ON_EOI.
515 * The hw doc says all multi-SE chips are affected, but Vulkan
516 * only applies it to Hawaii. Do what Vulkan does.
518 if (sctx
->family
== CHIP_HAWAII
&& G_028AA8_SWITCH_ON_EOI(ia_multi_vgt_param
) &&
519 (info
->indirect
|| (instance_count
> 1 && (info
->count_from_stream_output
||
520 si_num_prims_for_vertices(info
, prim
) <= 1))))
521 sctx
->flags
|= SI_CONTEXT_VGT_FLUSH
;
524 return ia_multi_vgt_param
;
527 static unsigned si_conv_prim_to_gs_out(unsigned mode
)
529 static const int prim_conv
[] = {
530 [PIPE_PRIM_POINTS
] = V_028A6C_POINTLIST
,
531 [PIPE_PRIM_LINES
] = V_028A6C_LINESTRIP
,
532 [PIPE_PRIM_LINE_LOOP
] = V_028A6C_LINESTRIP
,
533 [PIPE_PRIM_LINE_STRIP
] = V_028A6C_LINESTRIP
,
534 [PIPE_PRIM_TRIANGLES
] = V_028A6C_TRISTRIP
,
535 [PIPE_PRIM_TRIANGLE_STRIP
] = V_028A6C_TRISTRIP
,
536 [PIPE_PRIM_TRIANGLE_FAN
] = V_028A6C_TRISTRIP
,
537 [PIPE_PRIM_QUADS
] = V_028A6C_TRISTRIP
,
538 [PIPE_PRIM_QUAD_STRIP
] = V_028A6C_TRISTRIP
,
539 [PIPE_PRIM_POLYGON
] = V_028A6C_TRISTRIP
,
540 [PIPE_PRIM_LINES_ADJACENCY
] = V_028A6C_LINESTRIP
,
541 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = V_028A6C_LINESTRIP
,
542 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = V_028A6C_TRISTRIP
,
543 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = V_028A6C_TRISTRIP
,
544 [PIPE_PRIM_PATCHES
] = V_028A6C_POINTLIST
,
545 [SI_PRIM_RECTANGLE_LIST
] = V_028A6C_RECTLIST
,
547 assert(mode
< ARRAY_SIZE(prim_conv
));
549 return prim_conv
[mode
];
552 /* rast_prim is the primitive type after GS. */
553 static void si_emit_rasterizer_prim_state(struct si_context
*sctx
)
555 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
556 enum pipe_prim_type rast_prim
= sctx
->current_rast_prim
;
557 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
558 unsigned initial_cdw
= cs
->current
.cdw
;
560 if (unlikely(si_is_line_stipple_enabled(sctx
))) {
561 /* For lines, reset the stipple pattern at each primitive. Otherwise,
562 * reset the stipple pattern at each packet (line strips, line loops).
565 rs
->pa_sc_line_stipple
| S_028A0C_AUTO_RESET_CNTL(rast_prim
== PIPE_PRIM_LINES
? 1 : 2);
567 radeon_opt_set_context_reg(sctx
, R_028A0C_PA_SC_LINE_STIPPLE
, SI_TRACKED_PA_SC_LINE_STIPPLE
,
571 unsigned gs_out_prim
= si_conv_prim_to_gs_out(rast_prim
);
572 if (unlikely(gs_out_prim
!= sctx
->last_gs_out_prim
&& (sctx
->ngg
|| sctx
->gs_shader
.cso
))) {
573 radeon_set_context_reg(cs
, R_028A6C_VGT_GS_OUT_PRIM_TYPE
, gs_out_prim
);
574 sctx
->last_gs_out_prim
= gs_out_prim
;
577 if (initial_cdw
!= cs
->current
.cdw
)
578 sctx
->context_roll
= true;
581 unsigned vtx_index
= rs
->flatshade_first
? 0 : gs_out_prim
;
583 sctx
->current_vs_state
&= C_VS_STATE_OUTPRIM
& C_VS_STATE_PROVOKING_VTX_INDEX
;
584 sctx
->current_vs_state
|=
585 S_VS_STATE_OUTPRIM(gs_out_prim
) | S_VS_STATE_PROVOKING_VTX_INDEX(vtx_index
);
589 static void si_emit_vs_state(struct si_context
*sctx
, const struct pipe_draw_info
*info
)
591 sctx
->current_vs_state
&= C_VS_STATE_INDEXED
;
592 sctx
->current_vs_state
|= S_VS_STATE_INDEXED(!!info
->index_size
);
594 if (sctx
->num_vs_blit_sgprs
) {
595 /* Re-emit the state after we leave u_blitter. */
596 sctx
->last_vs_state
= ~0;
600 if (sctx
->current_vs_state
!= sctx
->last_vs_state
) {
601 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
603 /* For the API vertex shader (VS_STATE_INDEXED, LS_OUT_*). */
605 cs
, sctx
->shader_pointers
.sh_base
[PIPE_SHADER_VERTEX
] + SI_SGPR_VS_STATE_BITS
* 4,
606 sctx
->current_vs_state
);
608 /* Set CLAMP_VERTEX_COLOR and OUTPRIM in the last stage
609 * before the rasterizer.
611 * For TES or the GS copy shader without NGG:
613 if (sctx
->shader_pointers
.sh_base
[PIPE_SHADER_VERTEX
] != R_00B130_SPI_SHADER_USER_DATA_VS_0
) {
614 radeon_set_sh_reg(cs
, R_00B130_SPI_SHADER_USER_DATA_VS_0
+ SI_SGPR_VS_STATE_BITS
* 4,
615 sctx
->current_vs_state
);
619 if (sctx
->screen
->use_ngg
&&
620 sctx
->shader_pointers
.sh_base
[PIPE_SHADER_VERTEX
] != R_00B230_SPI_SHADER_USER_DATA_GS_0
) {
621 radeon_set_sh_reg(cs
, R_00B230_SPI_SHADER_USER_DATA_GS_0
+ SI_SGPR_VS_STATE_BITS
* 4,
622 sctx
->current_vs_state
);
625 sctx
->last_vs_state
= sctx
->current_vs_state
;
629 static inline bool si_prim_restart_index_changed(struct si_context
*sctx
, bool primitive_restart
,
630 unsigned restart_index
)
632 return primitive_restart
&& (restart_index
!= sctx
->last_restart_index
||
633 sctx
->last_restart_index
== SI_RESTART_INDEX_UNKNOWN
);
636 static void si_emit_ia_multi_vgt_param(struct si_context
*sctx
, const struct pipe_draw_info
*info
,
637 enum pipe_prim_type prim
, unsigned num_patches
,
638 unsigned instance_count
, bool primitive_restart
)
640 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
641 unsigned ia_multi_vgt_param
;
644 si_get_ia_multi_vgt_param(sctx
, info
, prim
, num_patches
, instance_count
, primitive_restart
);
647 if (ia_multi_vgt_param
!= sctx
->last_multi_vgt_param
) {
648 if (sctx
->chip_class
== GFX9
)
649 radeon_set_uconfig_reg_idx(cs
, sctx
->screen
, R_030960_IA_MULTI_VGT_PARAM
, 4,
651 else if (sctx
->chip_class
>= GFX7
)
652 radeon_set_context_reg_idx(cs
, R_028AA8_IA_MULTI_VGT_PARAM
, 1, ia_multi_vgt_param
);
654 radeon_set_context_reg(cs
, R_028AA8_IA_MULTI_VGT_PARAM
, ia_multi_vgt_param
);
656 sctx
->last_multi_vgt_param
= ia_multi_vgt_param
;
660 /* GFX10 removed IA_MULTI_VGT_PARAM in exchange for GE_CNTL.
661 * We overload last_multi_vgt_param.
663 static void gfx10_emit_ge_cntl(struct si_context
*sctx
, unsigned num_patches
)
665 union si_vgt_param_key key
= sctx
->ia_multi_vgt_param_key
;
669 if (sctx
->tes_shader
.cso
) {
670 ge_cntl
= S_03096C_PRIM_GRP_SIZE(num_patches
) |
671 S_03096C_VERT_GRP_SIZE(256) | /* 256 = disable vertex grouping */
672 S_03096C_BREAK_WAVE_AT_EOI(key
.u
.tess_uses_prim_id
);
674 ge_cntl
= si_get_vs_state(sctx
)->ge_cntl
;
677 unsigned primgroup_size
;
678 unsigned vertgroup_size
= 256; /* 256 = disable vertex grouping */
681 if (sctx
->tes_shader
.cso
) {
682 primgroup_size
= num_patches
; /* must be a multiple of NUM_PATCHES */
683 } else if (sctx
->gs_shader
.cso
) {
684 unsigned vgt_gs_onchip_cntl
= sctx
->gs_shader
.current
->ctx_reg
.gs
.vgt_gs_onchip_cntl
;
685 primgroup_size
= G_028A44_GS_PRIMS_PER_SUBGRP(vgt_gs_onchip_cntl
);
687 primgroup_size
= 128; /* recommended without a GS and tess */
690 ge_cntl
= S_03096C_PRIM_GRP_SIZE(primgroup_size
) | S_03096C_VERT_GRP_SIZE(vertgroup_size
) |
691 S_03096C_BREAK_WAVE_AT_EOI(key
.u
.uses_tess
&& key
.u
.tess_uses_prim_id
);
694 ge_cntl
|= S_03096C_PACKET_TO_ONE_PA(si_is_line_stipple_enabled(sctx
));
696 if (ge_cntl
!= sctx
->last_multi_vgt_param
) {
697 radeon_set_uconfig_reg(sctx
->gfx_cs
, R_03096C_GE_CNTL
, ge_cntl
);
698 sctx
->last_multi_vgt_param
= ge_cntl
;
702 static void si_emit_draw_registers(struct si_context
*sctx
, const struct pipe_draw_info
*info
,
703 enum pipe_prim_type prim
, unsigned num_patches
,
704 unsigned instance_count
, bool primitive_restart
)
706 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
707 unsigned vgt_prim
= si_conv_pipe_prim(prim
);
709 if (sctx
->chip_class
>= GFX10
)
710 gfx10_emit_ge_cntl(sctx
, num_patches
);
712 si_emit_ia_multi_vgt_param(sctx
, info
, prim
, num_patches
, instance_count
, primitive_restart
);
714 if (vgt_prim
!= sctx
->last_prim
) {
715 if (sctx
->chip_class
>= GFX10
)
716 radeon_set_uconfig_reg(cs
, R_030908_VGT_PRIMITIVE_TYPE
, vgt_prim
);
717 else if (sctx
->chip_class
>= GFX7
)
718 radeon_set_uconfig_reg_idx(cs
, sctx
->screen
, R_030908_VGT_PRIMITIVE_TYPE
, 1, vgt_prim
);
720 radeon_set_config_reg(cs
, R_008958_VGT_PRIMITIVE_TYPE
, vgt_prim
);
722 sctx
->last_prim
= vgt_prim
;
725 /* Primitive restart. */
726 if (primitive_restart
!= sctx
->last_primitive_restart_en
) {
727 if (sctx
->chip_class
>= GFX9
)
728 radeon_set_uconfig_reg(cs
, R_03092C_VGT_MULTI_PRIM_IB_RESET_EN
, primitive_restart
);
730 radeon_set_context_reg(cs
, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN
, primitive_restart
);
732 sctx
->last_primitive_restart_en
= primitive_restart
;
734 if (si_prim_restart_index_changed(sctx
, primitive_restart
, info
->restart_index
)) {
735 radeon_set_context_reg(cs
, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX
, info
->restart_index
);
736 sctx
->last_restart_index
= info
->restart_index
;
737 sctx
->context_roll
= true;
741 static void si_emit_draw_packets(struct si_context
*sctx
, const struct pipe_draw_info
*info
,
742 struct pipe_resource
*indexbuf
, unsigned index_size
,
743 unsigned index_offset
, unsigned instance_count
,
744 bool dispatch_prim_discard_cs
, unsigned original_index_size
)
746 struct pipe_draw_indirect_info
*indirect
= info
->indirect
;
747 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
748 unsigned sh_base_reg
= sctx
->shader_pointers
.sh_base
[PIPE_SHADER_VERTEX
];
749 bool render_cond_bit
= sctx
->render_cond
&& !sctx
->render_cond_force_off
;
750 uint32_t index_max_size
= 0;
751 uint64_t index_va
= 0;
753 if (info
->count_from_stream_output
) {
754 struct si_streamout_target
*t
= (struct si_streamout_target
*)info
->count_from_stream_output
;
756 radeon_set_context_reg(cs
, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
, t
->stride_in_dw
);
757 si_cp_copy_data(sctx
, sctx
->gfx_cs
, COPY_DATA_REG
, NULL
,
758 R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
>> 2, COPY_DATA_SRC_MEM
,
759 t
->buf_filled_size
, t
->buf_filled_size_offset
);
764 /* Register shadowing doesn't shadow INDEX_TYPE. */
765 if (index_size
!= sctx
->last_index_size
|| sctx
->shadowed_regs
) {
769 switch (index_size
) {
771 index_type
= V_028A7C_VGT_INDEX_8
;
775 V_028A7C_VGT_INDEX_16
|
776 (SI_BIG_ENDIAN
&& sctx
->chip_class
<= GFX7
? V_028A7C_VGT_DMA_SWAP_16_BIT
: 0);
780 V_028A7C_VGT_INDEX_32
|
781 (SI_BIG_ENDIAN
&& sctx
->chip_class
<= GFX7
? V_028A7C_VGT_DMA_SWAP_32_BIT
: 0);
784 assert(!"unreachable");
788 if (sctx
->chip_class
>= GFX9
) {
789 radeon_set_uconfig_reg_idx(cs
, sctx
->screen
, R_03090C_VGT_INDEX_TYPE
, 2, index_type
);
791 radeon_emit(cs
, PKT3(PKT3_INDEX_TYPE
, 0, 0));
792 radeon_emit(cs
, index_type
);
795 sctx
->last_index_size
= index_size
;
798 if (original_index_size
) {
799 index_max_size
= (indexbuf
->width0
- index_offset
) / original_index_size
;
800 /* Skip draw calls with 0-sized index buffers.
801 * They cause a hang on some chips, like Navi10-14.
806 index_va
= si_resource(indexbuf
)->gpu_address
+ index_offset
;
808 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
, si_resource(indexbuf
), RADEON_USAGE_READ
,
809 RADEON_PRIO_INDEX_BUFFER
);
812 /* On GFX7 and later, non-indexed draws overwrite VGT_INDEX_TYPE,
813 * so the state must be re-emitted before the next indexed draw.
815 if (sctx
->chip_class
>= GFX7
)
816 sctx
->last_index_size
= -1;
820 uint64_t indirect_va
= si_resource(indirect
->buffer
)->gpu_address
;
822 assert(indirect_va
% 8 == 0);
824 si_invalidate_draw_sh_constants(sctx
);
826 radeon_emit(cs
, PKT3(PKT3_SET_BASE
, 2, 0));
828 radeon_emit(cs
, indirect_va
);
829 radeon_emit(cs
, indirect_va
>> 32);
831 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
, si_resource(indirect
->buffer
),
832 RADEON_USAGE_READ
, RADEON_PRIO_DRAW_INDIRECT
);
834 unsigned di_src_sel
= index_size
? V_0287F0_DI_SRC_SEL_DMA
: V_0287F0_DI_SRC_SEL_AUTO_INDEX
;
836 assert(indirect
->offset
% 4 == 0);
839 radeon_emit(cs
, PKT3(PKT3_INDEX_BASE
, 1, 0));
840 radeon_emit(cs
, index_va
);
841 radeon_emit(cs
, index_va
>> 32);
843 radeon_emit(cs
, PKT3(PKT3_INDEX_BUFFER_SIZE
, 0, 0));
844 radeon_emit(cs
, index_max_size
);
847 if (!sctx
->screen
->has_draw_indirect_multi
) {
848 radeon_emit(cs
, PKT3(index_size
? PKT3_DRAW_INDEX_INDIRECT
: PKT3_DRAW_INDIRECT
, 3,
850 radeon_emit(cs
, indirect
->offset
);
851 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_BASE_VERTEX
* 4 - SI_SH_REG_OFFSET
) >> 2);
852 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_START_INSTANCE
* 4 - SI_SH_REG_OFFSET
) >> 2);
853 radeon_emit(cs
, di_src_sel
);
855 uint64_t count_va
= 0;
857 if (indirect
->indirect_draw_count
) {
858 struct si_resource
*params_buf
= si_resource(indirect
->indirect_draw_count
);
860 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
, params_buf
, RADEON_USAGE_READ
,
861 RADEON_PRIO_DRAW_INDIRECT
);
863 count_va
= params_buf
->gpu_address
+ indirect
->indirect_draw_count_offset
;
867 PKT3(index_size
? PKT3_DRAW_INDEX_INDIRECT_MULTI
: PKT3_DRAW_INDIRECT_MULTI
, 8,
869 radeon_emit(cs
, indirect
->offset
);
870 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_BASE_VERTEX
* 4 - SI_SH_REG_OFFSET
) >> 2);
871 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_START_INSTANCE
* 4 - SI_SH_REG_OFFSET
) >> 2);
872 radeon_emit(cs
, ((sh_base_reg
+ SI_SGPR_DRAWID
* 4 - SI_SH_REG_OFFSET
) >> 2) |
873 S_2C3_DRAW_INDEX_ENABLE(1) |
874 S_2C3_COUNT_INDIRECT_ENABLE(!!indirect
->indirect_draw_count
));
875 radeon_emit(cs
, indirect
->draw_count
);
876 radeon_emit(cs
, count_va
);
877 radeon_emit(cs
, count_va
>> 32);
878 radeon_emit(cs
, indirect
->stride
);
879 radeon_emit(cs
, di_src_sel
);
884 /* Register shadowing requires that we always emit PKT3_NUM_INSTANCES. */
885 if (sctx
->shadowed_regs
||
886 sctx
->last_instance_count
== SI_INSTANCE_COUNT_UNKNOWN
||
887 sctx
->last_instance_count
!= instance_count
) {
888 radeon_emit(cs
, PKT3(PKT3_NUM_INSTANCES
, 0, 0));
889 radeon_emit(cs
, instance_count
);
890 sctx
->last_instance_count
= instance_count
;
893 /* Base vertex and start instance. */
894 base_vertex
= original_index_size
? info
->index_bias
: info
->start
;
896 if (sctx
->num_vs_blit_sgprs
) {
897 /* Re-emit draw constants after we leave u_blitter. */
898 si_invalidate_draw_sh_constants(sctx
);
900 /* Blit VS doesn't use BASE_VERTEX, START_INSTANCE, and DRAWID. */
901 radeon_set_sh_reg_seq(cs
, sh_base_reg
+ SI_SGPR_VS_BLIT_DATA
* 4, sctx
->num_vs_blit_sgprs
);
902 radeon_emit_array(cs
, sctx
->vs_blit_sh_data
, sctx
->num_vs_blit_sgprs
);
903 } else if (base_vertex
!= sctx
->last_base_vertex
||
904 sctx
->last_base_vertex
== SI_BASE_VERTEX_UNKNOWN
||
905 info
->start_instance
!= sctx
->last_start_instance
||
906 info
->drawid
!= sctx
->last_drawid
|| sh_base_reg
!= sctx
->last_sh_base_reg
) {
907 radeon_set_sh_reg_seq(cs
, sh_base_reg
+ SI_SGPR_BASE_VERTEX
* 4, 3);
908 radeon_emit(cs
, base_vertex
);
909 radeon_emit(cs
, info
->start_instance
);
910 radeon_emit(cs
, info
->drawid
);
912 sctx
->last_base_vertex
= base_vertex
;
913 sctx
->last_start_instance
= info
->start_instance
;
914 sctx
->last_drawid
= info
->drawid
;
915 sctx
->last_sh_base_reg
= sh_base_reg
;
919 if (dispatch_prim_discard_cs
) {
920 index_va
+= info
->start
* original_index_size
;
921 index_max_size
= MIN2(index_max_size
, info
->count
);
923 si_dispatch_prim_discard_cs_and_draw(sctx
, info
, original_index_size
, base_vertex
,
924 index_va
, index_max_size
);
928 index_va
+= info
->start
* index_size
;
930 radeon_emit(cs
, PKT3(PKT3_DRAW_INDEX_2
, 4, render_cond_bit
));
931 radeon_emit(cs
, index_max_size
);
932 radeon_emit(cs
, index_va
);
933 radeon_emit(cs
, index_va
>> 32);
934 radeon_emit(cs
, info
->count
);
935 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_DMA
);
937 radeon_emit(cs
, PKT3(PKT3_DRAW_INDEX_AUTO
, 1, render_cond_bit
));
938 radeon_emit(cs
, info
->count
);
939 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_AUTO_INDEX
|
940 S_0287F0_USE_OPAQUE(!!info
->count_from_stream_output
));
945 void si_emit_surface_sync(struct si_context
*sctx
, struct radeon_cmdbuf
*cs
, unsigned cp_coher_cntl
)
947 bool compute_ib
= !sctx
->has_graphics
|| cs
== sctx
->prim_discard_compute_cs
;
949 assert(sctx
->chip_class
<= GFX9
);
951 if (sctx
->chip_class
== GFX9
|| compute_ib
) {
952 /* Flush caches and wait for the caches to assert idle. */
953 radeon_emit(cs
, PKT3(PKT3_ACQUIRE_MEM
, 5, 0));
954 radeon_emit(cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
955 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
956 radeon_emit(cs
, 0xffffff); /* CP_COHER_SIZE_HI */
957 radeon_emit(cs
, 0); /* CP_COHER_BASE */
958 radeon_emit(cs
, 0); /* CP_COHER_BASE_HI */
959 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
961 /* ACQUIRE_MEM is only required on a compute ring. */
962 radeon_emit(cs
, PKT3(PKT3_SURFACE_SYNC
, 3, 0));
963 radeon_emit(cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
964 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
965 radeon_emit(cs
, 0); /* CP_COHER_BASE */
966 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
969 /* ACQUIRE_MEM has an implicit context roll if the current context
972 sctx
->context_roll
= true;
975 void si_prim_discard_signal_next_compute_ib_start(struct si_context
*sctx
)
977 if (!si_compute_prim_discard_enabled(sctx
))
980 if (!sctx
->barrier_buf
) {
981 u_suballocator_alloc(sctx
->allocator_zeroed_memory
, 4, 4, &sctx
->barrier_buf_offset
,
982 (struct pipe_resource
**)&sctx
->barrier_buf
);
985 /* Emit a placeholder to signal the next compute IB to start.
986 * See si_compute_prim_discard.c for explanation.
989 si_cp_write_data(sctx
, sctx
->barrier_buf
, sctx
->barrier_buf_offset
, 4, V_370_MEM
, V_370_ME
,
992 sctx
->last_pkt3_write_data
= &sctx
->gfx_cs
->current
.buf
[sctx
->gfx_cs
->current
.cdw
- 5];
994 /* Only the last occurence of WRITE_DATA will be executed.
995 * The packet will be enabled in si_flush_gfx_cs.
997 *sctx
->last_pkt3_write_data
= PKT3(PKT3_NOP
, 3, 0);
1000 void gfx10_emit_cache_flush(struct si_context
*ctx
)
1002 struct radeon_cmdbuf
*cs
= ctx
->gfx_cs
;
1003 uint32_t gcr_cntl
= 0;
1004 unsigned cb_db_event
= 0;
1005 unsigned flags
= ctx
->flags
;
1007 if (!ctx
->has_graphics
) {
1008 /* Only process compute flags. */
1009 flags
&= SI_CONTEXT_INV_ICACHE
| SI_CONTEXT_INV_SCACHE
| SI_CONTEXT_INV_VCACHE
|
1010 SI_CONTEXT_INV_L2
| SI_CONTEXT_WB_L2
| SI_CONTEXT_INV_L2_METADATA
|
1011 SI_CONTEXT_CS_PARTIAL_FLUSH
;
1014 /* We don't need these. */
1015 assert(!(flags
& (SI_CONTEXT_VGT_STREAMOUT_SYNC
| SI_CONTEXT_FLUSH_AND_INV_DB_META
)));
1017 if (flags
& SI_CONTEXT_VGT_FLUSH
) {
1018 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1019 radeon_emit(cs
, EVENT_TYPE(V_028A90_VGT_FLUSH
) | EVENT_INDEX(0));
1022 if (flags
& SI_CONTEXT_FLUSH_AND_INV_CB
)
1023 ctx
->num_cb_cache_flushes
++;
1024 if (flags
& SI_CONTEXT_FLUSH_AND_INV_DB
)
1025 ctx
->num_db_cache_flushes
++;
1027 if (flags
& SI_CONTEXT_INV_ICACHE
)
1028 gcr_cntl
|= S_586_GLI_INV(V_586_GLI_ALL
);
1029 if (flags
& SI_CONTEXT_INV_SCACHE
) {
1030 /* TODO: When writing to the SMEM L1 cache, we need to set SEQ
1031 * to FORWARD when both L1 and L2 are written out (WB or INV).
1033 gcr_cntl
|= S_586_GL1_INV(1) | S_586_GLK_INV(1);
1035 if (flags
& SI_CONTEXT_INV_VCACHE
)
1036 gcr_cntl
|= S_586_GL1_INV(1) | S_586_GLV_INV(1);
1038 /* The L2 cache ops are:
1039 * - INV: - invalidate lines that reflect memory (were loaded from memory)
1040 * - don't touch lines that were overwritten (were stored by gfx clients)
1041 * - WB: - don't touch lines that reflect memory
1042 * - write back lines that were overwritten
1043 * - WB | INV: - invalidate lines that reflect memory
1044 * - write back lines that were overwritten
1046 * GLM doesn't support WB alone. If WB is set, INV must be set too.
1048 if (flags
& SI_CONTEXT_INV_L2
) {
1049 /* Writeback and invalidate everything in L2. */
1050 gcr_cntl
|= S_586_GL2_INV(1) | S_586_GL2_WB(1) | S_586_GLM_INV(1) | S_586_GLM_WB(1);
1051 ctx
->num_L2_invalidates
++;
1052 } else if (flags
& SI_CONTEXT_WB_L2
) {
1053 gcr_cntl
|= S_586_GL2_WB(1) | S_586_GLM_WB(1) | S_586_GLM_INV(1);
1054 } else if (flags
& SI_CONTEXT_INV_L2_METADATA
) {
1055 gcr_cntl
|= S_586_GLM_INV(1) | S_586_GLM_WB(1);
1058 if (flags
& (SI_CONTEXT_FLUSH_AND_INV_CB
| SI_CONTEXT_FLUSH_AND_INV_DB
)) {
1059 if (flags
& SI_CONTEXT_FLUSH_AND_INV_CB
) {
1060 /* Flush CMASK/FMASK/DCC. Will wait for idle later. */
1061 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1062 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META
) | EVENT_INDEX(0));
1064 if (flags
& SI_CONTEXT_FLUSH_AND_INV_DB
) {
1065 /* Flush HTILE. Will wait for idle later. */
1066 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1067 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META
) | EVENT_INDEX(0));
1070 /* First flush CB/DB, then L1/L2. */
1071 gcr_cntl
|= S_586_SEQ(V_586_SEQ_FORWARD
);
1073 if ((flags
& (SI_CONTEXT_FLUSH_AND_INV_CB
| SI_CONTEXT_FLUSH_AND_INV_DB
)) ==
1074 (SI_CONTEXT_FLUSH_AND_INV_CB
| SI_CONTEXT_FLUSH_AND_INV_DB
)) {
1075 cb_db_event
= V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT
;
1076 } else if (flags
& SI_CONTEXT_FLUSH_AND_INV_CB
) {
1077 cb_db_event
= V_028A90_FLUSH_AND_INV_CB_DATA_TS
;
1078 } else if (flags
& SI_CONTEXT_FLUSH_AND_INV_DB
) {
1079 cb_db_event
= V_028A90_FLUSH_AND_INV_DB_DATA_TS
;
1084 /* Wait for graphics shaders to go idle if requested. */
1085 if (flags
& SI_CONTEXT_PS_PARTIAL_FLUSH
) {
1086 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1087 radeon_emit(cs
, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
1088 /* Only count explicit shader flushes, not implicit ones. */
1089 ctx
->num_vs_flushes
++;
1090 ctx
->num_ps_flushes
++;
1091 } else if (flags
& SI_CONTEXT_VS_PARTIAL_FLUSH
) {
1092 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1093 radeon_emit(cs
, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
1094 ctx
->num_vs_flushes
++;
1098 if (flags
& SI_CONTEXT_CS_PARTIAL_FLUSH
&& ctx
->compute_is_busy
) {
1099 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1100 radeon_emit(cs
, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH
| EVENT_INDEX(4)));
1101 ctx
->num_cs_flushes
++;
1102 ctx
->compute_is_busy
= false;
1106 /* CB/DB flush and invalidate (or possibly just a wait for a
1107 * meta flush) via RELEASE_MEM.
1109 * Combine this with other cache flushes when possible; this
1110 * requires affected shaders to be idle, so do it after the
1111 * CS_PARTIAL_FLUSH before (VS/PS partial flushes are always
1116 /* Do the flush (enqueue the event and wait for it). */
1117 va
= ctx
->wait_mem_scratch
->gpu_address
;
1118 ctx
->wait_mem_number
++;
1120 /* Get GCR_CNTL fields, because the encoding is different in RELEASE_MEM. */
1121 unsigned glm_wb
= G_586_GLM_WB(gcr_cntl
);
1122 unsigned glm_inv
= G_586_GLM_INV(gcr_cntl
);
1123 unsigned glv_inv
= G_586_GLV_INV(gcr_cntl
);
1124 unsigned gl1_inv
= G_586_GL1_INV(gcr_cntl
);
1125 assert(G_586_GL2_US(gcr_cntl
) == 0);
1126 assert(G_586_GL2_RANGE(gcr_cntl
) == 0);
1127 assert(G_586_GL2_DISCARD(gcr_cntl
) == 0);
1128 unsigned gl2_inv
= G_586_GL2_INV(gcr_cntl
);
1129 unsigned gl2_wb
= G_586_GL2_WB(gcr_cntl
);
1130 unsigned gcr_seq
= G_586_SEQ(gcr_cntl
);
1132 gcr_cntl
&= C_586_GLM_WB
& C_586_GLM_INV
& C_586_GLV_INV
& C_586_GL1_INV
& C_586_GL2_INV
&
1133 C_586_GL2_WB
; /* keep SEQ */
1135 si_cp_release_mem(ctx
, cs
, cb_db_event
,
1136 S_490_GLM_WB(glm_wb
) | S_490_GLM_INV(glm_inv
) | S_490_GLV_INV(glv_inv
) |
1137 S_490_GL1_INV(gl1_inv
) | S_490_GL2_INV(gl2_inv
) | S_490_GL2_WB(gl2_wb
) |
1139 EOP_DST_SEL_MEM
, EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM
,
1140 EOP_DATA_SEL_VALUE_32BIT
, ctx
->wait_mem_scratch
, va
, ctx
->wait_mem_number
,
1142 si_cp_wait_mem(ctx
, ctx
->gfx_cs
, va
, ctx
->wait_mem_number
, 0xffffffff, WAIT_REG_MEM_EQUAL
);
1145 /* Ignore fields that only modify the behavior of other fields. */
1146 if (gcr_cntl
& C_586_GL1_RANGE
& C_586_GL2_RANGE
& C_586_SEQ
) {
1147 /* Flush caches and wait for the caches to assert idle.
1148 * The cache flush is executed in the ME, but the PFP waits
1151 radeon_emit(cs
, PKT3(PKT3_ACQUIRE_MEM
, 6, 0));
1152 radeon_emit(cs
, 0); /* CP_COHER_CNTL */
1153 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
1154 radeon_emit(cs
, 0xffffff); /* CP_COHER_SIZE_HI */
1155 radeon_emit(cs
, 0); /* CP_COHER_BASE */
1156 radeon_emit(cs
, 0); /* CP_COHER_BASE_HI */
1157 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
1158 radeon_emit(cs
, gcr_cntl
); /* GCR_CNTL */
1159 } else if (cb_db_event
|| (flags
& (SI_CONTEXT_VS_PARTIAL_FLUSH
| SI_CONTEXT_PS_PARTIAL_FLUSH
|
1160 SI_CONTEXT_CS_PARTIAL_FLUSH
))) {
1161 /* We need to ensure that PFP waits as well. */
1162 radeon_emit(cs
, PKT3(PKT3_PFP_SYNC_ME
, 0, 0));
1166 if (flags
& SI_CONTEXT_START_PIPELINE_STATS
) {
1167 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1168 radeon_emit(cs
, EVENT_TYPE(V_028A90_PIPELINESTAT_START
) | EVENT_INDEX(0));
1169 } else if (flags
& SI_CONTEXT_STOP_PIPELINE_STATS
) {
1170 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1171 radeon_emit(cs
, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP
) | EVENT_INDEX(0));
1177 void si_emit_cache_flush(struct si_context
*sctx
)
1179 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
1180 uint32_t flags
= sctx
->flags
;
1182 if (!sctx
->has_graphics
) {
1183 /* Only process compute flags. */
1184 flags
&= SI_CONTEXT_INV_ICACHE
| SI_CONTEXT_INV_SCACHE
| SI_CONTEXT_INV_VCACHE
|
1185 SI_CONTEXT_INV_L2
| SI_CONTEXT_WB_L2
| SI_CONTEXT_INV_L2_METADATA
|
1186 SI_CONTEXT_CS_PARTIAL_FLUSH
;
1189 uint32_t cp_coher_cntl
= 0;
1190 const uint32_t flush_cb_db
= flags
& (SI_CONTEXT_FLUSH_AND_INV_CB
| SI_CONTEXT_FLUSH_AND_INV_DB
);
1191 const bool is_barrier
=
1193 /* INV_ICACHE == beginning of gfx IB. Checking
1194 * INV_ICACHE fixes corruption for DeusExMD with
1195 * compute-based culling, but I don't know why.
1197 flags
& (SI_CONTEXT_INV_ICACHE
| SI_CONTEXT_PS_PARTIAL_FLUSH
| SI_CONTEXT_VS_PARTIAL_FLUSH
) ||
1198 (flags
& SI_CONTEXT_CS_PARTIAL_FLUSH
&& sctx
->compute_is_busy
);
1200 assert(sctx
->chip_class
<= GFX9
);
1202 if (flags
& SI_CONTEXT_FLUSH_AND_INV_CB
)
1203 sctx
->num_cb_cache_flushes
++;
1204 if (flags
& SI_CONTEXT_FLUSH_AND_INV_DB
)
1205 sctx
->num_db_cache_flushes
++;
1207 /* GFX6 has a bug that it always flushes ICACHE and KCACHE if either
1208 * bit is set. An alternative way is to write SQC_CACHES, but that
1209 * doesn't seem to work reliably. Since the bug doesn't affect
1210 * correctness (it only does more work than necessary) and
1211 * the performance impact is likely negligible, there is no plan
1212 * to add a workaround for it.
1215 if (flags
& SI_CONTEXT_INV_ICACHE
)
1216 cp_coher_cntl
|= S_0085F0_SH_ICACHE_ACTION_ENA(1);
1217 if (flags
& SI_CONTEXT_INV_SCACHE
)
1218 cp_coher_cntl
|= S_0085F0_SH_KCACHE_ACTION_ENA(1);
1220 if (sctx
->chip_class
<= GFX8
) {
1221 if (flags
& SI_CONTEXT_FLUSH_AND_INV_CB
) {
1222 cp_coher_cntl
|= S_0085F0_CB_ACTION_ENA(1) | S_0085F0_CB0_DEST_BASE_ENA(1) |
1223 S_0085F0_CB1_DEST_BASE_ENA(1) | S_0085F0_CB2_DEST_BASE_ENA(1) |
1224 S_0085F0_CB3_DEST_BASE_ENA(1) | S_0085F0_CB4_DEST_BASE_ENA(1) |
1225 S_0085F0_CB5_DEST_BASE_ENA(1) | S_0085F0_CB6_DEST_BASE_ENA(1) |
1226 S_0085F0_CB7_DEST_BASE_ENA(1);
1228 /* Necessary for DCC */
1229 if (sctx
->chip_class
== GFX8
)
1230 si_cp_release_mem(sctx
, cs
, V_028A90_FLUSH_AND_INV_CB_DATA_TS
, 0, EOP_DST_SEL_MEM
,
1231 EOP_INT_SEL_NONE
, EOP_DATA_SEL_DISCARD
, NULL
, 0, 0, SI_NOT_QUERY
);
1233 if (flags
& SI_CONTEXT_FLUSH_AND_INV_DB
)
1234 cp_coher_cntl
|= S_0085F0_DB_ACTION_ENA(1) | S_0085F0_DB_DEST_BASE_ENA(1);
1237 if (flags
& SI_CONTEXT_FLUSH_AND_INV_CB
) {
1238 /* Flush CMASK/FMASK/DCC. SURFACE_SYNC will wait for idle. */
1239 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1240 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META
) | EVENT_INDEX(0));
1242 if (flags
& (SI_CONTEXT_FLUSH_AND_INV_DB
| SI_CONTEXT_FLUSH_AND_INV_DB_META
)) {
1243 /* Flush HTILE. SURFACE_SYNC will wait for idle. */
1244 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1245 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META
) | EVENT_INDEX(0));
1248 /* Wait for shader engines to go idle.
1249 * VS and PS waits are unnecessary if SURFACE_SYNC is going to wait
1250 * for everything including CB/DB cache flushes.
1253 if (flags
& SI_CONTEXT_PS_PARTIAL_FLUSH
) {
1254 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1255 radeon_emit(cs
, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
1256 /* Only count explicit shader flushes, not implicit ones
1257 * done by SURFACE_SYNC.
1259 sctx
->num_vs_flushes
++;
1260 sctx
->num_ps_flushes
++;
1261 } else if (flags
& SI_CONTEXT_VS_PARTIAL_FLUSH
) {
1262 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1263 radeon_emit(cs
, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
1264 sctx
->num_vs_flushes
++;
1268 if (flags
& SI_CONTEXT_CS_PARTIAL_FLUSH
&& sctx
->compute_is_busy
) {
1269 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1270 radeon_emit(cs
, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
1271 sctx
->num_cs_flushes
++;
1272 sctx
->compute_is_busy
= false;
1275 /* VGT state synchronization. */
1276 if (flags
& SI_CONTEXT_VGT_FLUSH
) {
1277 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1278 radeon_emit(cs
, EVENT_TYPE(V_028A90_VGT_FLUSH
) | EVENT_INDEX(0));
1280 if (flags
& SI_CONTEXT_VGT_STREAMOUT_SYNC
) {
1281 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1282 radeon_emit(cs
, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC
) | EVENT_INDEX(0));
1285 /* GFX9: Wait for idle if we're flushing CB or DB. ACQUIRE_MEM doesn't
1286 * wait for idle on GFX9. We have to use a TS event.
1288 if (sctx
->chip_class
== GFX9
&& flush_cb_db
) {
1290 unsigned tc_flags
, cb_db_event
;
1292 /* Set the CB/DB flush event. */
1293 switch (flush_cb_db
) {
1294 case SI_CONTEXT_FLUSH_AND_INV_CB
:
1295 cb_db_event
= V_028A90_FLUSH_AND_INV_CB_DATA_TS
;
1297 case SI_CONTEXT_FLUSH_AND_INV_DB
:
1298 cb_db_event
= V_028A90_FLUSH_AND_INV_DB_DATA_TS
;
1302 cb_db_event
= V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT
;
1305 /* These are the only allowed combinations. If you need to
1306 * do multiple operations at once, do them separately.
1307 * All operations that invalidate L2 also seem to invalidate
1308 * metadata. Volatile (VOL) and WC flushes are not listed here.
1310 * TC | TC_WB = writeback & invalidate L2 & L1
1311 * TC | TC_WB | TC_NC = writeback & invalidate L2 for MTYPE == NC
1312 * TC_WB | TC_NC = writeback L2 for MTYPE == NC
1313 * TC | TC_NC = invalidate L2 for MTYPE == NC
1314 * TC | TC_MD = writeback & invalidate L2 metadata (DCC, etc.)
1315 * TCL1 = invalidate L1
1319 if (flags
& SI_CONTEXT_INV_L2_METADATA
) {
1320 tc_flags
= EVENT_TC_ACTION_ENA
| EVENT_TC_MD_ACTION_ENA
;
1323 /* Ideally flush TC together with CB/DB. */
1324 if (flags
& SI_CONTEXT_INV_L2
) {
1325 /* Writeback and invalidate everything in L2 & L1. */
1326 tc_flags
= EVENT_TC_ACTION_ENA
| EVENT_TC_WB_ACTION_ENA
;
1328 /* Clear the flags. */
1329 flags
&= ~(SI_CONTEXT_INV_L2
| SI_CONTEXT_WB_L2
| SI_CONTEXT_INV_VCACHE
);
1330 sctx
->num_L2_invalidates
++;
1333 /* Do the flush (enqueue the event and wait for it). */
1334 va
= sctx
->wait_mem_scratch
->gpu_address
;
1335 sctx
->wait_mem_number
++;
1337 si_cp_release_mem(sctx
, cs
, cb_db_event
, tc_flags
, EOP_DST_SEL_MEM
,
1338 EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM
, EOP_DATA_SEL_VALUE_32BIT
,
1339 sctx
->wait_mem_scratch
, va
, sctx
->wait_mem_number
, SI_NOT_QUERY
);
1340 si_cp_wait_mem(sctx
, cs
, va
, sctx
->wait_mem_number
, 0xffffffff, WAIT_REG_MEM_EQUAL
);
1343 /* Make sure ME is idle (it executes most packets) before continuing.
1344 * This prevents read-after-write hazards between PFP and ME.
1346 if (sctx
->has_graphics
&&
1347 (cp_coher_cntl
|| (flags
& (SI_CONTEXT_CS_PARTIAL_FLUSH
| SI_CONTEXT_INV_VCACHE
|
1348 SI_CONTEXT_INV_L2
| SI_CONTEXT_WB_L2
)))) {
1349 radeon_emit(cs
, PKT3(PKT3_PFP_SYNC_ME
, 0, 0));
1354 * When one of the CP_COHER_CNTL.DEST_BASE flags is set, SURFACE_SYNC
1355 * waits for idle, so it should be last. SURFACE_SYNC is done in PFP.
1357 * cp_coher_cntl should contain all necessary flags except TC flags
1360 * GFX6-GFX7 don't support L2 write-back.
1362 if (flags
& SI_CONTEXT_INV_L2
|| (sctx
->chip_class
<= GFX7
&& (flags
& SI_CONTEXT_WB_L2
))) {
1363 /* Invalidate L1 & L2. (L1 is always invalidated on GFX6)
1364 * WB must be set on GFX8+ when TC_ACTION is set.
1366 si_emit_surface_sync(sctx
, sctx
->gfx_cs
,
1367 cp_coher_cntl
| S_0085F0_TC_ACTION_ENA(1) | S_0085F0_TCL1_ACTION_ENA(1) |
1368 S_0301F0_TC_WB_ACTION_ENA(sctx
->chip_class
>= GFX8
));
1370 sctx
->num_L2_invalidates
++;
1372 /* L1 invalidation and L2 writeback must be done separately,
1373 * because both operations can't be done together.
1375 if (flags
& SI_CONTEXT_WB_L2
) {
1377 * NC = apply to non-coherent MTYPEs
1378 * (i.e. MTYPE <= 1, which is what we use everywhere)
1380 * WB doesn't work without NC.
1382 si_emit_surface_sync(
1384 cp_coher_cntl
| S_0301F0_TC_WB_ACTION_ENA(1) | S_0301F0_TC_NC_ACTION_ENA(1));
1386 sctx
->num_L2_writebacks
++;
1388 if (flags
& SI_CONTEXT_INV_VCACHE
) {
1389 /* Invalidate per-CU VMEM L1. */
1390 si_emit_surface_sync(sctx
, sctx
->gfx_cs
, cp_coher_cntl
| S_0085F0_TCL1_ACTION_ENA(1));
1395 /* If TC flushes haven't cleared this... */
1397 si_emit_surface_sync(sctx
, sctx
->gfx_cs
, cp_coher_cntl
);
1400 si_prim_discard_signal_next_compute_ib_start(sctx
);
1402 if (flags
& SI_CONTEXT_START_PIPELINE_STATS
) {
1403 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1404 radeon_emit(cs
, EVENT_TYPE(V_028A90_PIPELINESTAT_START
) | EVENT_INDEX(0));
1405 } else if (flags
& SI_CONTEXT_STOP_PIPELINE_STATS
) {
1406 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1407 radeon_emit(cs
, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP
) | EVENT_INDEX(0));
1413 static void si_get_draw_start_count(struct si_context
*sctx
, const struct pipe_draw_info
*info
,
1414 unsigned *start
, unsigned *count
)
1416 struct pipe_draw_indirect_info
*indirect
= info
->indirect
;
1419 unsigned indirect_count
;
1420 struct pipe_transfer
*transfer
;
1421 unsigned begin
, end
;
1425 if (indirect
->indirect_draw_count
) {
1426 data
= pipe_buffer_map_range(&sctx
->b
, indirect
->indirect_draw_count
,
1427 indirect
->indirect_draw_count_offset
, sizeof(unsigned),
1428 PIPE_TRANSFER_READ
, &transfer
);
1430 indirect_count
= *data
;
1432 pipe_buffer_unmap(&sctx
->b
, transfer
);
1434 indirect_count
= indirect
->draw_count
;
1437 if (!indirect_count
) {
1438 *start
= *count
= 0;
1442 map_size
= (indirect_count
- 1) * indirect
->stride
+ 3 * sizeof(unsigned);
1443 data
= pipe_buffer_map_range(&sctx
->b
, indirect
->buffer
, indirect
->offset
, map_size
,
1444 PIPE_TRANSFER_READ
, &transfer
);
1449 for (unsigned i
= 0; i
< indirect_count
; ++i
) {
1450 unsigned count
= data
[0];
1451 unsigned start
= data
[2];
1454 begin
= MIN2(begin
, start
);
1455 end
= MAX2(end
, start
+ count
);
1458 data
+= indirect
->stride
/ sizeof(unsigned);
1461 pipe_buffer_unmap(&sctx
->b
, transfer
);
1465 *count
= end
- begin
;
1467 *start
= *count
= 0;
1470 *start
= info
->start
;
1471 *count
= info
->count
;
1475 static void si_emit_all_states(struct si_context
*sctx
, const struct pipe_draw_info
*info
,
1476 enum pipe_prim_type prim
, unsigned instance_count
,
1477 bool primitive_restart
, unsigned skip_atom_mask
)
1479 unsigned num_patches
= 0;
1481 si_emit_rasterizer_prim_state(sctx
);
1482 if (sctx
->tes_shader
.cso
)
1483 si_emit_derived_tess_state(sctx
, info
, &num_patches
);
1485 /* Emit state atoms. */
1486 unsigned mask
= sctx
->dirty_atoms
& ~skip_atom_mask
;
1488 sctx
->atoms
.array
[u_bit_scan(&mask
)].emit(sctx
);
1490 sctx
->dirty_atoms
&= skip_atom_mask
;
1493 mask
= sctx
->dirty_states
;
1495 unsigned i
= u_bit_scan(&mask
);
1496 struct si_pm4_state
*state
= sctx
->queued
.array
[i
];
1498 if (!state
|| sctx
->emitted
.array
[i
] == state
)
1501 si_pm4_emit(sctx
, state
);
1502 sctx
->emitted
.array
[i
] = state
;
1504 sctx
->dirty_states
= 0;
1506 /* Emit draw states. */
1507 si_emit_vs_state(sctx
, info
);
1508 si_emit_draw_registers(sctx
, info
, prim
, num_patches
, instance_count
, primitive_restart
);
1511 static bool si_all_vs_resources_read_only(struct si_context
*sctx
, struct pipe_resource
*indexbuf
)
1513 struct radeon_winsys
*ws
= sctx
->ws
;
1514 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
1517 if (indexbuf
&& ws
->cs_is_buffer_referenced(cs
, si_resource(indexbuf
)->buf
, RADEON_USAGE_WRITE
))
1518 goto has_write_reference
;
1520 /* Vertex buffers. */
1521 struct si_vertex_elements
*velems
= sctx
->vertex_elements
;
1522 unsigned num_velems
= velems
->count
;
1524 for (unsigned i
= 0; i
< num_velems
; i
++) {
1525 if (!((1 << i
) & velems
->first_vb_use_mask
))
1528 unsigned vb_index
= velems
->vertex_buffer_index
[i
];
1529 struct pipe_resource
*res
= sctx
->vertex_buffer
[vb_index
].buffer
.resource
;
1533 if (ws
->cs_is_buffer_referenced(cs
, si_resource(res
)->buf
, RADEON_USAGE_WRITE
))
1534 goto has_write_reference
;
1537 /* Constant and shader buffers. */
1538 struct si_descriptors
*buffers
=
1539 &sctx
->descriptors
[si_const_and_shader_buffer_descriptors_idx(PIPE_SHADER_VERTEX
)];
1540 for (unsigned i
= 0; i
< buffers
->num_active_slots
; i
++) {
1541 unsigned index
= buffers
->first_active_slot
+ i
;
1542 struct pipe_resource
*res
= sctx
->const_and_shader_buffers
[PIPE_SHADER_VERTEX
].buffers
[index
];
1546 if (ws
->cs_is_buffer_referenced(cs
, si_resource(res
)->buf
, RADEON_USAGE_WRITE
))
1547 goto has_write_reference
;
1551 struct si_shader_selector
*vs
= sctx
->vs_shader
.cso
;
1552 if (vs
->info
.samplers_declared
) {
1553 unsigned num_samplers
= util_last_bit(vs
->info
.samplers_declared
);
1555 for (unsigned i
= 0; i
< num_samplers
; i
++) {
1556 struct pipe_sampler_view
*view
= sctx
->samplers
[PIPE_SHADER_VERTEX
].views
[i
];
1560 if (ws
->cs_is_buffer_referenced(cs
, si_resource(view
->texture
)->buf
, RADEON_USAGE_WRITE
))
1561 goto has_write_reference
;
1566 if (vs
->info
.images_declared
) {
1567 unsigned num_images
= util_last_bit(vs
->info
.images_declared
);
1569 for (unsigned i
= 0; i
< num_images
; i
++) {
1570 struct pipe_resource
*res
= sctx
->images
[PIPE_SHADER_VERTEX
].views
[i
].resource
;
1574 if (ws
->cs_is_buffer_referenced(cs
, si_resource(res
)->buf
, RADEON_USAGE_WRITE
))
1575 goto has_write_reference
;
1581 has_write_reference
:
1582 /* If the current gfx IB has enough packets, flush it to remove write
1583 * references to buffers.
1585 if (cs
->prev_dw
+ cs
->current
.cdw
> 2048) {
1586 si_flush_gfx_cs(sctx
, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW
, NULL
);
1587 assert(si_all_vs_resources_read_only(sctx
, indexbuf
));
1593 static ALWAYS_INLINE
bool pd_msg(const char *s
)
1595 if (SI_PRIM_DISCARD_DEBUG
)
1596 printf("PD failed: %s\n", s
);
1600 static void si_draw_vbo(struct pipe_context
*ctx
, const struct pipe_draw_info
*info
)
1602 struct si_context
*sctx
= (struct si_context
*)ctx
;
1603 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
1604 struct pipe_resource
*indexbuf
= info
->index
.resource
;
1605 unsigned dirty_tex_counter
, dirty_buf_counter
;
1606 enum pipe_prim_type rast_prim
, prim
= info
->mode
;
1607 unsigned index_size
= info
->index_size
;
1608 unsigned index_offset
= info
->indirect
? info
->start
* index_size
: 0;
1609 unsigned instance_count
= info
->instance_count
;
1610 bool primitive_restart
=
1611 info
->primitive_restart
&&
1612 (!sctx
->screen
->options
.prim_restart_tri_strips_only
||
1613 (prim
!= PIPE_PRIM_TRIANGLE_STRIP
&& prim
!= PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
));
1615 if (likely(!info
->indirect
)) {
1616 /* GFX6-GFX7 treat instance_count==0 as instance_count==1. There is
1617 * no workaround for indirect draws, but we can at least skip
1620 if (unlikely(!instance_count
))
1623 /* Handle count == 0. */
1624 if (unlikely(!info
->count
&& (index_size
|| !info
->count_from_stream_output
)))
1628 struct si_shader_selector
*vs
= sctx
->vs_shader
.cso
;
1629 if (unlikely(!vs
|| sctx
->num_vertex_elements
< vs
->num_vs_inputs
||
1630 (!sctx
->ps_shader
.cso
&& !rs
->rasterizer_discard
) ||
1631 (!!sctx
->tes_shader
.cso
!= (prim
== PIPE_PRIM_PATCHES
)))) {
1636 /* Recompute and re-emit the texture resource states if needed. */
1637 dirty_tex_counter
= p_atomic_read(&sctx
->screen
->dirty_tex_counter
);
1638 if (unlikely(dirty_tex_counter
!= sctx
->last_dirty_tex_counter
)) {
1639 sctx
->last_dirty_tex_counter
= dirty_tex_counter
;
1640 sctx
->framebuffer
.dirty_cbufs
|= ((1 << sctx
->framebuffer
.state
.nr_cbufs
) - 1);
1641 sctx
->framebuffer
.dirty_zsbuf
= true;
1642 si_mark_atom_dirty(sctx
, &sctx
->atoms
.s
.framebuffer
);
1643 si_update_all_texture_descriptors(sctx
);
1646 dirty_buf_counter
= p_atomic_read(&sctx
->screen
->dirty_buf_counter
);
1647 if (unlikely(dirty_buf_counter
!= sctx
->last_dirty_buf_counter
)) {
1648 sctx
->last_dirty_buf_counter
= dirty_buf_counter
;
1649 /* Rebind all buffers unconditionally. */
1650 si_rebind_buffer(sctx
, NULL
);
1653 si_decompress_textures(sctx
, u_bit_consecutive(0, SI_NUM_GRAPHICS_SHADERS
));
1655 /* Set the rasterization primitive type.
1657 * This must be done after si_decompress_textures, which can call
1658 * draw_vbo recursively, and before si_update_shaders, which uses
1659 * current_rast_prim for this draw_vbo call. */
1660 if (sctx
->gs_shader
.cso
) {
1661 /* Only possibilities: POINTS, LINE_STRIP, TRIANGLES */
1662 rast_prim
= sctx
->gs_shader
.cso
->rast_prim
;
1663 } else if (sctx
->tes_shader
.cso
) {
1664 /* Only possibilities: POINTS, LINE_STRIP, TRIANGLES */
1665 rast_prim
= sctx
->tes_shader
.cso
->rast_prim
;
1666 } else if (util_rast_prim_is_triangles(prim
)) {
1667 rast_prim
= PIPE_PRIM_TRIANGLES
;
1669 /* Only possibilities, POINTS, LINE*, RECTANGLES */
1673 if (rast_prim
!= sctx
->current_rast_prim
) {
1674 if (util_prim_is_points_or_lines(sctx
->current_rast_prim
) !=
1675 util_prim_is_points_or_lines(rast_prim
))
1676 si_mark_atom_dirty(sctx
, &sctx
->atoms
.s
.guardband
);
1678 sctx
->current_rast_prim
= rast_prim
;
1679 sctx
->do_update_shaders
= true;
1682 if (sctx
->tes_shader
.cso
&& sctx
->screen
->info
.has_ls_vgpr_init_bug
) {
1683 /* Determine whether the LS VGPR fix should be applied.
1685 * It is only required when num input CPs > num output CPs,
1686 * which cannot happen with the fixed function TCS. We should
1687 * also update this bit when switching from TCS to fixed
1690 struct si_shader_selector
*tcs
= sctx
->tcs_shader
.cso
;
1692 tcs
&& info
->vertices_per_patch
> tcs
->info
.properties
[TGSI_PROPERTY_TCS_VERTICES_OUT
];
1694 if (ls_vgpr_fix
!= sctx
->ls_vgpr_fix
) {
1695 sctx
->ls_vgpr_fix
= ls_vgpr_fix
;
1696 sctx
->do_update_shaders
= true;
1700 if (sctx
->chip_class
<= GFX9
&& sctx
->gs_shader
.cso
) {
1701 /* Determine whether the GS triangle strip adjacency fix should
1702 * be applied. Rotate every other triangle if
1703 * - triangle strips with adjacency are fed to the GS and
1704 * - primitive restart is disabled (the rotation doesn't help
1705 * when the restart occurs after an odd number of triangles).
1707 bool gs_tri_strip_adj_fix
=
1708 !sctx
->tes_shader
.cso
&& prim
== PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
&& !primitive_restart
;
1710 if (gs_tri_strip_adj_fix
!= sctx
->gs_tri_strip_adj_fix
) {
1711 sctx
->gs_tri_strip_adj_fix
= gs_tri_strip_adj_fix
;
1712 sctx
->do_update_shaders
= true;
1717 /* Translate or upload, if needed. */
1718 /* 8-bit indices are supported on GFX8. */
1719 if (sctx
->chip_class
<= GFX7
&& index_size
== 1) {
1720 unsigned start
, count
, start_offset
, size
, offset
;
1723 si_get_draw_start_count(sctx
, info
, &start
, &count
);
1724 start_offset
= start
* 2;
1728 u_upload_alloc(ctx
->stream_uploader
, start_offset
, size
,
1729 si_optimal_tcc_alignment(sctx
, size
), &offset
, &indexbuf
, &ptr
);
1733 util_shorten_ubyte_elts_to_userptr(&sctx
->b
, info
, 0, 0, index_offset
+ start
, count
, ptr
);
1735 /* info->start will be added by the drawing code */
1736 index_offset
= offset
- start_offset
;
1738 } else if (info
->has_user_indices
) {
1739 unsigned start_offset
;
1741 assert(!info
->indirect
);
1742 start_offset
= info
->start
* index_size
;
1745 u_upload_data(ctx
->stream_uploader
, start_offset
, info
->count
* index_size
,
1746 sctx
->screen
->info
.tcc_cache_line_size
,
1747 (char *)info
->index
.user
+ start_offset
, &index_offset
, &indexbuf
);
1751 /* info->start will be added by the drawing code */
1752 index_offset
-= start_offset
;
1753 } else if (sctx
->chip_class
<= GFX7
&& si_resource(indexbuf
)->TC_L2_dirty
) {
1754 /* GFX8 reads index buffers through TC L2, so it doesn't
1756 sctx
->flags
|= SI_CONTEXT_WB_L2
;
1757 si_resource(indexbuf
)->TC_L2_dirty
= false;
1761 bool dispatch_prim_discard_cs
= false;
1762 bool prim_discard_cs_instancing
= false;
1763 unsigned original_index_size
= index_size
;
1764 unsigned direct_count
= 0;
1766 if (info
->indirect
) {
1767 struct pipe_draw_indirect_info
*indirect
= info
->indirect
;
1769 /* Add the buffer size for memory checking in need_cs_space. */
1770 si_context_add_resource_size(sctx
, indirect
->buffer
);
1772 /* Indirect buffers use TC L2 on GFX9, but not older hw. */
1773 if (sctx
->chip_class
<= GFX8
) {
1774 if (si_resource(indirect
->buffer
)->TC_L2_dirty
) {
1775 sctx
->flags
|= SI_CONTEXT_WB_L2
;
1776 si_resource(indirect
->buffer
)->TC_L2_dirty
= false;
1779 if (indirect
->indirect_draw_count
&&
1780 si_resource(indirect
->indirect_draw_count
)->TC_L2_dirty
) {
1781 sctx
->flags
|= SI_CONTEXT_WB_L2
;
1782 si_resource(indirect
->indirect_draw_count
)->TC_L2_dirty
= false;
1786 /* Multiply by 3 for strips and fans to get an approximate vertex
1787 * count as triangles. */
1788 direct_count
= info
->count
* instance_count
* (prim
== PIPE_PRIM_TRIANGLES
? 1 : 3);
1791 /* Determine if we can use the primitive discard compute shader. */
1792 if (si_compute_prim_discard_enabled(sctx
) &&
1793 (direct_count
> sctx
->prim_discard_vertex_count_threshold
1794 ? (sctx
->compute_num_verts_rejected
+= direct_count
, true)
1795 : /* Add, then return true. */
1796 (sctx
->compute_num_verts_ineligible
+= direct_count
,
1797 false)) && /* Add, then return false. */
1798 (!info
->count_from_stream_output
|| pd_msg("draw_opaque")) &&
1799 (primitive_restart
?
1800 /* Supported prim types with primitive restart: */
1801 (prim
== PIPE_PRIM_TRIANGLE_STRIP
|| pd_msg("bad prim type with primitive restart")) &&
1802 /* Disallow instancing with primitive restart: */
1803 (instance_count
== 1 || pd_msg("instance_count > 1 with primitive restart"))
1805 /* Supported prim types without primitive restart + allow instancing: */
1806 (1 << prim
) & ((1 << PIPE_PRIM_TRIANGLES
) | (1 << PIPE_PRIM_TRIANGLE_STRIP
) |
1807 (1 << PIPE_PRIM_TRIANGLE_FAN
)) &&
1808 /* Instancing is limited to 16-bit indices, because InstanceID is packed into
1810 /* TODO: DrawArraysInstanced doesn't sometimes work, so it's disabled. */
1811 (instance_count
== 1 ||
1812 (instance_count
<= USHRT_MAX
&& index_size
&& index_size
<= 2) ||
1813 pd_msg("instance_count too large or index_size == 4 or DrawArraysInstanced"))) &&
1814 (info
->drawid
== 0 || !sctx
->vs_shader
.cso
->info
.uses_drawid
|| pd_msg("draw_id > 0")) &&
1815 (!sctx
->render_cond
|| pd_msg("render condition")) &&
1816 /* Forced enablement ignores pipeline statistics queries. */
1817 (sctx
->screen
->debug_flags
& (DBG(PD
) | DBG(ALWAYS_PD
)) ||
1818 (!sctx
->num_pipeline_stat_queries
&& !sctx
->streamout
.prims_gen_query_enabled
) ||
1819 pd_msg("pipestat or primgen query")) &&
1820 (!sctx
->vertex_elements
->instance_divisor_is_fetched
|| pd_msg("loads instance divisors")) &&
1821 (!sctx
->tes_shader
.cso
|| pd_msg("uses tess")) &&
1822 (!sctx
->gs_shader
.cso
|| pd_msg("uses GS")) &&
1823 (!sctx
->ps_shader
.cso
->info
.uses_primid
|| pd_msg("PS uses PrimID")) &&
1824 !rs
->polygon_mode_enabled
&&
1825 #if SI_PRIM_DISCARD_DEBUG /* same as cso->prim_discard_cs_allowed */
1826 (!sctx
->vs_shader
.cso
->info
.uses_bindless_images
|| pd_msg("uses bindless images")) &&
1827 (!sctx
->vs_shader
.cso
->info
.uses_bindless_samplers
|| pd_msg("uses bindless samplers")) &&
1828 (!sctx
->vs_shader
.cso
->info
.writes_memory
|| pd_msg("writes memory")) &&
1829 (!sctx
->vs_shader
.cso
->info
.writes_viewport_index
|| pd_msg("writes viewport index")) &&
1830 !sctx
->vs_shader
.cso
->info
.properties
[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION
] &&
1831 !sctx
->vs_shader
.cso
->so
.num_outputs
&&
1833 (sctx
->vs_shader
.cso
->prim_discard_cs_allowed
||
1834 pd_msg("VS shader uses unsupported features")) &&
1836 /* Check that all buffers are used for read only, because compute
1837 * dispatches can run ahead. */
1838 (si_all_vs_resources_read_only(sctx
, index_size
? indexbuf
: NULL
) ||
1839 pd_msg("write reference"))) {
1840 switch (si_prepare_prim_discard_or_split_draw(sctx
, info
, primitive_restart
)) {
1841 case SI_PRIM_DISCARD_ENABLED
:
1842 original_index_size
= index_size
;
1843 prim_discard_cs_instancing
= instance_count
> 1;
1844 dispatch_prim_discard_cs
= true;
1846 /* The compute shader changes/lowers the following: */
1847 prim
= PIPE_PRIM_TRIANGLES
;
1850 primitive_restart
= false;
1851 sctx
->compute_num_verts_rejected
-= direct_count
;
1852 sctx
->compute_num_verts_accepted
+= direct_count
;
1854 case SI_PRIM_DISCARD_DISABLED
:
1856 case SI_PRIM_DISCARD_DRAW_SPLIT
:
1857 sctx
->compute_num_verts_rejected
-= direct_count
;
1858 goto return_cleanup
;
1862 if (prim_discard_cs_instancing
!= sctx
->prim_discard_cs_instancing
) {
1863 sctx
->prim_discard_cs_instancing
= prim_discard_cs_instancing
;
1864 sctx
->do_update_shaders
= true;
1867 /* Update NGG culling settings. */
1868 if (sctx
->ngg
&& !dispatch_prim_discard_cs
&& rast_prim
== PIPE_PRIM_TRIANGLES
&&
1869 !sctx
->gs_shader
.cso
&& /* GS doesn't support NGG culling. */
1870 (sctx
->screen
->always_use_ngg_culling_all
||
1871 (sctx
->tes_shader
.cso
&& sctx
->screen
->always_use_ngg_culling_tess
) ||
1872 /* At least 1024 non-indexed vertices (8 subgroups) are needed
1873 * per draw call (no TES/GS) to enable NGG culling.
1875 (!index_size
&& direct_count
>= 1024 &&
1876 (prim
== PIPE_PRIM_TRIANGLES
|| prim
== PIPE_PRIM_TRIANGLE_STRIP
) &&
1877 !sctx
->tes_shader
.cso
)) &&
1878 si_get_vs(sctx
)->cso
->ngg_culling_allowed
) {
1879 unsigned ngg_culling
= 0;
1881 if (rs
->rasterizer_discard
) {
1882 ngg_culling
|= SI_NGG_CULL_FRONT_FACE
| SI_NGG_CULL_BACK_FACE
;
1884 /* Polygon mode can't use view and small primitive culling,
1885 * because it draws points or lines where the culling depends
1886 * on the point or line width.
1888 if (!rs
->polygon_mode_enabled
)
1889 ngg_culling
|= SI_NGG_CULL_VIEW_SMALLPRIMS
;
1891 if (sctx
->viewports
.y_inverted
? rs
->cull_back
: rs
->cull_front
)
1892 ngg_culling
|= SI_NGG_CULL_FRONT_FACE
;
1893 if (sctx
->viewports
.y_inverted
? rs
->cull_front
: rs
->cull_back
)
1894 ngg_culling
|= SI_NGG_CULL_BACK_FACE
;
1897 /* Use NGG fast launch for certain non-indexed primitive types.
1898 * A draw must have at least 1 full primitive.
1900 if (ngg_culling
&& !index_size
&& direct_count
>= 3 && !sctx
->tes_shader
.cso
&&
1901 !sctx
->gs_shader
.cso
) {
1902 if (prim
== PIPE_PRIM_TRIANGLES
)
1903 ngg_culling
|= SI_NGG_CULL_GS_FAST_LAUNCH_TRI_LIST
;
1904 else if (prim
== PIPE_PRIM_TRIANGLE_STRIP
)
1905 ngg_culling
|= SI_NGG_CULL_GS_FAST_LAUNCH_TRI_STRIP
;
1908 if (ngg_culling
!= sctx
->ngg_culling
) {
1909 /* Insert a VGT_FLUSH when enabling fast launch changes to prevent hangs.
1910 * See issues #2418, #2426, #2434
1912 if (ngg_culling
& SI_NGG_CULL_GS_FAST_LAUNCH_ALL
)
1913 sctx
->flags
|= SI_CONTEXT_VGT_FLUSH
;
1914 sctx
->ngg_culling
= ngg_culling
;
1915 sctx
->do_update_shaders
= true;
1917 } else if (sctx
->ngg_culling
) {
1918 sctx
->ngg_culling
= false;
1919 sctx
->do_update_shaders
= true;
1922 if (sctx
->do_update_shaders
&& !si_update_shaders(sctx
))
1923 goto return_cleanup
;
1925 si_need_gfx_cs_space(sctx
);
1927 /* If we're using a secure context, determine if cs must be secure or not */
1928 if (unlikely(sctx
->ws
->ws_is_secure(sctx
->ws
))) {
1929 bool secure
= si_gfx_resources_check_encrypted(sctx
);
1930 if (secure
!= sctx
->ws
->cs_is_secure(sctx
->gfx_cs
)) {
1931 si_flush_gfx_cs(sctx
, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW
, NULL
);
1932 sctx
->ws
->cs_set_secure(sctx
->gfx_cs
, secure
);
1936 if (sctx
->bo_list_add_all_gfx_resources
)
1937 si_gfx_resources_add_all_to_bo_list(sctx
);
1939 /* Since we've called si_context_add_resource_size for vertex buffers,
1940 * this must be called after si_need_cs_space, because we must let
1941 * need_cs_space flush before we add buffers to the buffer list.
1943 if (!si_upload_vertex_buffer_descriptors(sctx
))
1944 goto return_cleanup
;
1946 /* Vega10/Raven scissor bug workaround. When any context register is
1947 * written (i.e. the GPU rolls the context), PA_SC_VPORT_SCISSOR
1948 * registers must be written too.
1950 unsigned masked_atoms
= 0;
1952 if (sctx
->screen
->info
.has_gfx9_scissor_bug
) {
1953 masked_atoms
|= si_get_atom_bit(sctx
, &sctx
->atoms
.s
.scissors
);
1955 if (info
->count_from_stream_output
||
1956 sctx
->dirty_atoms
& si_atoms_that_always_roll_context() ||
1957 sctx
->dirty_states
& si_states_that_always_roll_context())
1958 sctx
->context_roll
= true;
1961 /* Use optimal packet order based on whether we need to sync the pipeline. */
1962 if (unlikely(sctx
->flags
& (SI_CONTEXT_FLUSH_AND_INV_CB
| SI_CONTEXT_FLUSH_AND_INV_DB
|
1963 SI_CONTEXT_PS_PARTIAL_FLUSH
| SI_CONTEXT_CS_PARTIAL_FLUSH
))) {
1964 /* If we have to wait for idle, set all states first, so that all
1965 * SET packets are processed in parallel with previous draw calls.
1966 * Then draw and prefetch at the end. This ensures that the time
1967 * the CUs are idle is very short.
1969 if (unlikely(sctx
->flags
& SI_CONTEXT_FLUSH_FOR_RENDER_COND
))
1970 masked_atoms
|= si_get_atom_bit(sctx
, &sctx
->atoms
.s
.render_cond
);
1972 if (!si_upload_graphics_shader_descriptors(sctx
))
1973 goto return_cleanup
;
1975 /* Emit all states except possibly render condition. */
1976 si_emit_all_states(sctx
, info
, prim
, instance_count
, primitive_restart
, masked_atoms
);
1977 sctx
->emit_cache_flush(sctx
);
1978 /* <-- CUs are idle here. */
1980 if (si_is_atom_dirty(sctx
, &sctx
->atoms
.s
.render_cond
))
1981 sctx
->atoms
.s
.render_cond
.emit(sctx
);
1983 if (sctx
->screen
->info
.has_gfx9_scissor_bug
&&
1984 (sctx
->context_roll
|| si_is_atom_dirty(sctx
, &sctx
->atoms
.s
.scissors
)))
1985 sctx
->atoms
.s
.scissors
.emit(sctx
);
1987 sctx
->dirty_atoms
= 0;
1989 si_emit_draw_packets(sctx
, info
, indexbuf
, index_size
, index_offset
, instance_count
,
1990 dispatch_prim_discard_cs
, original_index_size
);
1991 /* <-- CUs are busy here. */
1993 /* Start prefetches after the draw has been started. Both will run
1994 * in parallel, but starting the draw first is more important.
1996 if (sctx
->chip_class
>= GFX7
&& sctx
->prefetch_L2_mask
)
1997 cik_emit_prefetch_L2(sctx
, false);
1999 /* If we don't wait for idle, start prefetches first, then set
2000 * states, and draw at the end.
2003 sctx
->emit_cache_flush(sctx
);
2005 /* Only prefetch the API VS and VBO descriptors. */
2006 if (sctx
->chip_class
>= GFX7
&& sctx
->prefetch_L2_mask
)
2007 cik_emit_prefetch_L2(sctx
, true);
2009 if (!si_upload_graphics_shader_descriptors(sctx
))
2010 goto return_cleanup
;
2012 si_emit_all_states(sctx
, info
, prim
, instance_count
, primitive_restart
, masked_atoms
);
2014 if (sctx
->screen
->info
.has_gfx9_scissor_bug
&&
2015 (sctx
->context_roll
|| si_is_atom_dirty(sctx
, &sctx
->atoms
.s
.scissors
)))
2016 sctx
->atoms
.s
.scissors
.emit(sctx
);
2018 sctx
->dirty_atoms
= 0;
2020 si_emit_draw_packets(sctx
, info
, indexbuf
, index_size
, index_offset
, instance_count
,
2021 dispatch_prim_discard_cs
, original_index_size
);
2023 /* Prefetch the remaining shaders after the draw has been
2025 if (sctx
->chip_class
>= GFX7
&& sctx
->prefetch_L2_mask
)
2026 cik_emit_prefetch_L2(sctx
, false);
2029 /* Mark the displayable dcc buffer as dirty in order to update
2030 * it on the next call to si_flush_resource. */
2031 if (sctx
->screen
->info
.use_display_dcc_with_retile_blit
) {
2032 /* Don't use si_update_fb_dirtiness_after_rendering because it'll
2033 * cause unnecessary texture decompressions on each draw. */
2034 unsigned displayable_dcc_cb_mask
= sctx
->framebuffer
.displayable_dcc_cb_mask
;
2035 while (displayable_dcc_cb_mask
) {
2036 unsigned i
= u_bit_scan(&displayable_dcc_cb_mask
);
2037 struct pipe_surface
*surf
= sctx
->framebuffer
.state
.cbufs
[i
];
2038 struct si_texture
*tex
= (struct si_texture
*)surf
->texture
;
2039 tex
->displayable_dcc_dirty
= true;
2043 /* Clear the context roll flag after the draw call. */
2044 sctx
->context_roll
= false;
2046 if (unlikely(sctx
->current_saved_cs
)) {
2047 si_trace_emit(sctx
);
2048 si_log_draw_state(sctx
, sctx
->log
);
2051 /* Workaround for a VGT hang when streamout is enabled.
2052 * It must be done after drawing. */
2053 if ((sctx
->family
== CHIP_HAWAII
|| sctx
->family
== CHIP_TONGA
|| sctx
->family
== CHIP_FIJI
) &&
2054 si_get_strmout_en(sctx
)) {
2055 sctx
->flags
|= SI_CONTEXT_VGT_STREAMOUT_SYNC
;
2058 if (unlikely(sctx
->decompression_enabled
)) {
2059 sctx
->num_decompress_calls
++;
2061 sctx
->num_draw_calls
++;
2062 if (sctx
->framebuffer
.state
.nr_cbufs
> 1)
2063 sctx
->num_mrt_draw_calls
++;
2064 if (primitive_restart
)
2065 sctx
->num_prim_restart_calls
++;
2066 if (G_0286E8_WAVESIZE(sctx
->spi_tmpring_size
))
2067 sctx
->num_spill_draw_calls
++;
2071 if (index_size
&& indexbuf
!= info
->index
.resource
)
2072 pipe_resource_reference(&indexbuf
, NULL
);
2075 static void si_draw_rectangle(struct blitter_context
*blitter
, void *vertex_elements_cso
,
2076 blitter_get_vs_func get_vs
, int x1
, int y1
, int x2
, int y2
,
2077 float depth
, unsigned num_instances
, enum blitter_attrib_type type
,
2078 const union blitter_attrib
*attrib
)
2080 struct pipe_context
*pipe
= util_blitter_get_pipe(blitter
);
2081 struct si_context
*sctx
= (struct si_context
*)pipe
;
2083 /* Pack position coordinates as signed int16. */
2084 sctx
->vs_blit_sh_data
[0] = (uint32_t)(x1
& 0xffff) | ((uint32_t)(y1
& 0xffff) << 16);
2085 sctx
->vs_blit_sh_data
[1] = (uint32_t)(x2
& 0xffff) | ((uint32_t)(y2
& 0xffff) << 16);
2086 sctx
->vs_blit_sh_data
[2] = fui(depth
);
2089 case UTIL_BLITTER_ATTRIB_COLOR
:
2090 memcpy(&sctx
->vs_blit_sh_data
[3], attrib
->color
, sizeof(float) * 4);
2092 case UTIL_BLITTER_ATTRIB_TEXCOORD_XY
:
2093 case UTIL_BLITTER_ATTRIB_TEXCOORD_XYZW
:
2094 memcpy(&sctx
->vs_blit_sh_data
[3], &attrib
->texcoord
, sizeof(attrib
->texcoord
));
2096 case UTIL_BLITTER_ATTRIB_NONE
:;
2099 pipe
->bind_vs_state(pipe
, si_get_blitter_vs(sctx
, type
, num_instances
));
2101 struct pipe_draw_info info
= {};
2102 info
.mode
= SI_PRIM_RECTANGLE_LIST
;
2104 info
.instance_count
= num_instances
;
2106 /* Don't set per-stage shader pointers for VS. */
2107 sctx
->shader_pointers_dirty
&= ~SI_DESCS_SHADER_MASK(VERTEX
);
2108 sctx
->vertex_buffer_pointer_dirty
= false;
2109 sctx
->vertex_buffer_user_sgprs_dirty
= false;
2111 si_draw_vbo(pipe
, &info
);
2114 void si_trace_emit(struct si_context
*sctx
)
2116 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
2117 uint32_t trace_id
= ++sctx
->current_saved_cs
->trace_id
;
2119 si_cp_write_data(sctx
, sctx
->current_saved_cs
->trace_buf
, 0, 4, V_370_MEM
, V_370_ME
, &trace_id
);
2121 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
2122 radeon_emit(cs
, AC_ENCODE_TRACE_POINT(trace_id
));
2125 u_log_flush(sctx
->log
);
2128 void si_init_draw_functions(struct si_context
*sctx
)
2130 sctx
->b
.draw_vbo
= si_draw_vbo
;
2132 sctx
->blitter
->draw_rectangle
= si_draw_rectangle
;
2134 si_init_ia_multi_vgt_param_table(sctx
);