2 * Copyright 2012 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Christian König <christian.koenig@amd.com>
28 #include "radeon/r600_cs.h"
32 #include "util/u_index_modify.h"
33 #include "util/u_log.h"
34 #include "util/u_upload_mgr.h"
35 #include "util/u_prim.h"
39 static unsigned si_conv_pipe_prim(unsigned mode
)
41 static const unsigned prim_conv
[] = {
42 [PIPE_PRIM_POINTS
] = V_008958_DI_PT_POINTLIST
,
43 [PIPE_PRIM_LINES
] = V_008958_DI_PT_LINELIST
,
44 [PIPE_PRIM_LINE_LOOP
] = V_008958_DI_PT_LINELOOP
,
45 [PIPE_PRIM_LINE_STRIP
] = V_008958_DI_PT_LINESTRIP
,
46 [PIPE_PRIM_TRIANGLES
] = V_008958_DI_PT_TRILIST
,
47 [PIPE_PRIM_TRIANGLE_STRIP
] = V_008958_DI_PT_TRISTRIP
,
48 [PIPE_PRIM_TRIANGLE_FAN
] = V_008958_DI_PT_TRIFAN
,
49 [PIPE_PRIM_QUADS
] = V_008958_DI_PT_QUADLIST
,
50 [PIPE_PRIM_QUAD_STRIP
] = V_008958_DI_PT_QUADSTRIP
,
51 [PIPE_PRIM_POLYGON
] = V_008958_DI_PT_POLYGON
,
52 [PIPE_PRIM_LINES_ADJACENCY
] = V_008958_DI_PT_LINELIST_ADJ
,
53 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = V_008958_DI_PT_LINESTRIP_ADJ
,
54 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = V_008958_DI_PT_TRILIST_ADJ
,
55 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = V_008958_DI_PT_TRISTRIP_ADJ
,
56 [PIPE_PRIM_PATCHES
] = V_008958_DI_PT_PATCH
,
57 [R600_PRIM_RECTANGLE_LIST
] = V_008958_DI_PT_RECTLIST
59 assert(mode
< ARRAY_SIZE(prim_conv
));
60 return prim_conv
[mode
];
63 static unsigned si_conv_prim_to_gs_out(unsigned mode
)
65 static const int prim_conv
[] = {
66 [PIPE_PRIM_POINTS
] = V_028A6C_OUTPRIM_TYPE_POINTLIST
,
67 [PIPE_PRIM_LINES
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
68 [PIPE_PRIM_LINE_LOOP
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
69 [PIPE_PRIM_LINE_STRIP
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
70 [PIPE_PRIM_TRIANGLES
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
71 [PIPE_PRIM_TRIANGLE_STRIP
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
72 [PIPE_PRIM_TRIANGLE_FAN
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
73 [PIPE_PRIM_QUADS
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
74 [PIPE_PRIM_QUAD_STRIP
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
75 [PIPE_PRIM_POLYGON
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
76 [PIPE_PRIM_LINES_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
77 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
78 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
79 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
80 [PIPE_PRIM_PATCHES
] = V_028A6C_OUTPRIM_TYPE_POINTLIST
,
81 [R600_PRIM_RECTANGLE_LIST
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
83 assert(mode
< ARRAY_SIZE(prim_conv
));
85 return prim_conv
[mode
];
89 * This calculates the LDS size for tessellation shaders (VS, TCS, TES).
90 * LS.LDS_SIZE is shared by all 3 shader stages.
92 * The information about LDS and other non-compile-time parameters is then
93 * written to userdata SGPRs.
95 static void si_emit_derived_tess_state(struct si_context
*sctx
,
96 const struct pipe_draw_info
*info
,
97 unsigned *num_patches
)
99 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
100 struct si_shader
*ls_current
;
101 struct si_shader_selector
*ls
;
102 /* The TES pointer will only be used for sctx->last_tcs.
103 * It would be wrong to think that TCS = TES. */
104 struct si_shader_selector
*tcs
=
105 sctx
->tcs_shader
.cso
? sctx
->tcs_shader
.cso
: sctx
->tes_shader
.cso
;
106 unsigned tess_uses_primid
= sctx
->ia_multi_vgt_param_key
.u
.tess_uses_prim_id
;
107 bool has_primid_instancing_bug
= sctx
->b
.chip_class
== SI
&&
108 sctx
->b
.screen
->info
.max_se
== 1;
109 unsigned tes_sh_base
= sctx
->shader_pointers
.sh_base
[PIPE_SHADER_TESS_EVAL
];
110 unsigned num_tcs_input_cp
= info
->vertices_per_patch
;
111 unsigned num_tcs_output_cp
, num_tcs_inputs
, num_tcs_outputs
;
112 unsigned num_tcs_patch_outputs
;
113 unsigned input_vertex_size
, output_vertex_size
, pervertex_output_patch_size
;
114 unsigned input_patch_size
, output_patch_size
, output_patch0_offset
;
115 unsigned perpatch_output_offset
, lds_size
;
116 unsigned tcs_in_layout
, tcs_out_layout
, tcs_out_offsets
;
117 unsigned offchip_layout
, hardware_lds_size
, ls_hs_config
;
119 /* Since GFX9 has merged LS-HS in the TCS state, set LS = TCS. */
120 if (sctx
->b
.chip_class
>= GFX9
) {
121 if (sctx
->tcs_shader
.cso
)
122 ls_current
= sctx
->tcs_shader
.current
;
124 ls_current
= sctx
->fixed_func_tcs_shader
.current
;
126 ls
= ls_current
->key
.part
.tcs
.ls
;
128 ls_current
= sctx
->vs_shader
.current
;
129 ls
= sctx
->vs_shader
.cso
;
132 if (sctx
->last_ls
== ls_current
&&
133 sctx
->last_tcs
== tcs
&&
134 sctx
->last_tes_sh_base
== tes_sh_base
&&
135 sctx
->last_num_tcs_input_cp
== num_tcs_input_cp
&&
136 (!has_primid_instancing_bug
||
137 (sctx
->last_tess_uses_primid
== tess_uses_primid
))) {
138 *num_patches
= sctx
->last_num_patches
;
142 sctx
->last_ls
= ls_current
;
143 sctx
->last_tcs
= tcs
;
144 sctx
->last_tes_sh_base
= tes_sh_base
;
145 sctx
->last_num_tcs_input_cp
= num_tcs_input_cp
;
146 sctx
->last_tess_uses_primid
= tess_uses_primid
;
148 /* This calculates how shader inputs and outputs among VS, TCS, and TES
149 * are laid out in LDS. */
150 num_tcs_inputs
= util_last_bit64(ls
->outputs_written
);
152 if (sctx
->tcs_shader
.cso
) {
153 num_tcs_outputs
= util_last_bit64(tcs
->outputs_written
);
154 num_tcs_output_cp
= tcs
->info
.properties
[TGSI_PROPERTY_TCS_VERTICES_OUT
];
155 num_tcs_patch_outputs
= util_last_bit64(tcs
->patch_outputs_written
);
157 /* No TCS. Route varyings from LS to TES. */
158 num_tcs_outputs
= num_tcs_inputs
;
159 num_tcs_output_cp
= num_tcs_input_cp
;
160 num_tcs_patch_outputs
= 2; /* TESSINNER + TESSOUTER */
163 input_vertex_size
= num_tcs_inputs
* 16;
164 output_vertex_size
= num_tcs_outputs
* 16;
166 input_patch_size
= num_tcs_input_cp
* input_vertex_size
;
168 pervertex_output_patch_size
= num_tcs_output_cp
* output_vertex_size
;
169 output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
171 /* Ensure that we only need one wave per SIMD so we don't need to check
172 * resource usage. Also ensures that the number of tcs in and out
173 * vertices per threadgroup are at most 256.
175 *num_patches
= 64 / MAX2(num_tcs_input_cp
, num_tcs_output_cp
) * 4;
177 /* Make sure that the data fits in LDS. This assumes the shaders only
178 * use LDS for the inputs and outputs.
180 * While CIK can use 64K per threadgroup, there is a hang on Stoney
181 * with 2 CUs if we use more than 32K. The closed Vulkan driver also
182 * uses 32K at most on all GCN chips.
184 hardware_lds_size
= 32768;
185 *num_patches
= MIN2(*num_patches
, hardware_lds_size
/ (input_patch_size
+
188 /* Make sure the output data fits in the offchip buffer */
189 *num_patches
= MIN2(*num_patches
,
190 (sctx
->screen
->tess_offchip_block_dw_size
* 4) /
193 /* Not necessary for correctness, but improves performance. The
194 * specific value is taken from the proprietary driver.
196 *num_patches
= MIN2(*num_patches
, 40);
198 if (sctx
->b
.chip_class
== SI
||
199 /* TODO: fix GFX9 where a threadgroup contains more than 1 wave and
200 * LS vertices per patch > HS vertices per patch. Piglit: 16in-1out */
201 (sctx
->b
.chip_class
== GFX9
&&
202 num_tcs_input_cp
> num_tcs_output_cp
)) {
203 /* SI bug workaround, related to power management. Limit LS-HS
204 * threadgroups to only one wave.
206 unsigned one_wave
= 64 / MAX2(num_tcs_input_cp
, num_tcs_output_cp
);
207 *num_patches
= MIN2(*num_patches
, one_wave
);
210 /* The VGT HS block increments the patch ID unconditionally
211 * within a single threadgroup. This results in incorrect
212 * patch IDs when instanced draws are used.
214 * The intended solution is to restrict threadgroups to
215 * a single instance by setting SWITCH_ON_EOI, which
216 * should cause IA to split instances up. However, this
217 * doesn't work correctly on SI when there is no other
220 if (has_primid_instancing_bug
)
223 sctx
->last_num_patches
= *num_patches
;
225 output_patch0_offset
= input_patch_size
* *num_patches
;
226 perpatch_output_offset
= output_patch0_offset
+ pervertex_output_patch_size
;
228 /* Compute userdata SGPRs. */
229 assert(((input_vertex_size
/ 4) & ~0xff) == 0);
230 assert(((output_vertex_size
/ 4) & ~0xff) == 0);
231 assert(((input_patch_size
/ 4) & ~0x1fff) == 0);
232 assert(((output_patch_size
/ 4) & ~0x1fff) == 0);
233 assert(((output_patch0_offset
/ 16) & ~0xffff) == 0);
234 assert(((perpatch_output_offset
/ 16) & ~0xffff) == 0);
235 assert(num_tcs_input_cp
<= 32);
236 assert(num_tcs_output_cp
<= 32);
238 tcs_in_layout
= S_VS_STATE_LS_OUT_PATCH_SIZE(input_patch_size
/ 4) |
239 S_VS_STATE_LS_OUT_VERTEX_SIZE(input_vertex_size
/ 4);
240 tcs_out_layout
= (output_patch_size
/ 4) |
241 ((output_vertex_size
/ 4) << 13);
242 tcs_out_offsets
= (output_patch0_offset
/ 16) |
243 ((perpatch_output_offset
/ 16) << 16);
244 offchip_layout
= *num_patches
|
245 (num_tcs_output_cp
<< 6) |
246 (pervertex_output_patch_size
* *num_patches
<< 12);
248 /* Compute the LDS size. */
249 lds_size
= output_patch0_offset
+ output_patch_size
* *num_patches
;
251 if (sctx
->b
.chip_class
>= CIK
) {
252 assert(lds_size
<= 65536);
253 lds_size
= align(lds_size
, 512) / 512;
255 assert(lds_size
<= 32768);
256 lds_size
= align(lds_size
, 256) / 256;
259 /* Set SI_SGPR_VS_STATE_BITS. */
260 sctx
->current_vs_state
&= C_VS_STATE_LS_OUT_PATCH_SIZE
&
261 C_VS_STATE_LS_OUT_VERTEX_SIZE
;
262 sctx
->current_vs_state
|= tcs_in_layout
;
264 if (sctx
->b
.chip_class
>= GFX9
) {
265 unsigned hs_rsrc2
= ls_current
->config
.rsrc2
|
266 S_00B42C_LDS_SIZE(lds_size
);
268 radeon_set_sh_reg(cs
, R_00B42C_SPI_SHADER_PGM_RSRC2_HS
, hs_rsrc2
);
270 /* Set userdata SGPRs for merged LS-HS. */
271 radeon_set_sh_reg_seq(cs
,
272 R_00B430_SPI_SHADER_USER_DATA_LS_0
+
273 GFX9_SGPR_TCS_OFFCHIP_LAYOUT
* 4, 3);
274 radeon_emit(cs
, offchip_layout
);
275 radeon_emit(cs
, tcs_out_offsets
);
276 radeon_emit(cs
, tcs_out_layout
| (num_tcs_input_cp
<< 26));
278 unsigned ls_rsrc2
= ls_current
->config
.rsrc2
;
280 si_multiwave_lds_size_workaround(sctx
->screen
, &lds_size
);
281 ls_rsrc2
|= S_00B52C_LDS_SIZE(lds_size
);
283 /* Due to a hw bug, RSRC2_LS must be written twice with another
284 * LS register written in between. */
285 if (sctx
->b
.chip_class
== CIK
&& sctx
->b
.family
!= CHIP_HAWAII
)
286 radeon_set_sh_reg(cs
, R_00B52C_SPI_SHADER_PGM_RSRC2_LS
, ls_rsrc2
);
287 radeon_set_sh_reg_seq(cs
, R_00B528_SPI_SHADER_PGM_RSRC1_LS
, 2);
288 radeon_emit(cs
, ls_current
->config
.rsrc1
);
289 radeon_emit(cs
, ls_rsrc2
);
291 /* Set userdata SGPRs for TCS. */
292 radeon_set_sh_reg_seq(cs
,
293 R_00B430_SPI_SHADER_USER_DATA_HS_0
+ GFX6_SGPR_TCS_OFFCHIP_LAYOUT
* 4, 4);
294 radeon_emit(cs
, offchip_layout
);
295 radeon_emit(cs
, tcs_out_offsets
);
296 radeon_emit(cs
, tcs_out_layout
| (num_tcs_input_cp
<< 26));
297 radeon_emit(cs
, tcs_in_layout
);
300 /* Set userdata SGPRs for TES. */
301 radeon_set_sh_reg_seq(cs
, tes_sh_base
+ SI_SGPR_TES_OFFCHIP_LAYOUT
* 4, 2);
302 radeon_emit(cs
, offchip_layout
);
303 radeon_emit(cs
, r600_resource(sctx
->tess_offchip_ring
)->gpu_address
>> 16);
305 ls_hs_config
= S_028B58_NUM_PATCHES(*num_patches
) |
306 S_028B58_HS_NUM_INPUT_CP(num_tcs_input_cp
) |
307 S_028B58_HS_NUM_OUTPUT_CP(num_tcs_output_cp
);
309 if (sctx
->b
.chip_class
>= CIK
)
310 radeon_set_context_reg_idx(cs
, R_028B58_VGT_LS_HS_CONFIG
, 2,
313 radeon_set_context_reg(cs
, R_028B58_VGT_LS_HS_CONFIG
,
317 static unsigned si_num_prims_for_vertices(const struct pipe_draw_info
*info
)
319 switch (info
->mode
) {
320 case PIPE_PRIM_PATCHES
:
321 return info
->count
/ info
->vertices_per_patch
;
322 case R600_PRIM_RECTANGLE_LIST
:
323 return info
->count
/ 3;
325 return u_prims_for_vertices(info
->mode
, info
->count
);
330 si_get_init_multi_vgt_param(struct si_screen
*sscreen
,
331 union si_vgt_param_key
*key
)
333 STATIC_ASSERT(sizeof(union si_vgt_param_key
) == 4);
334 unsigned max_primgroup_in_wave
= 2;
336 /* SWITCH_ON_EOP(0) is always preferable. */
337 bool wd_switch_on_eop
= false;
338 bool ia_switch_on_eop
= false;
339 bool ia_switch_on_eoi
= false;
340 bool partial_vs_wave
= false;
341 bool partial_es_wave
= false;
343 if (key
->u
.uses_tess
) {
344 /* SWITCH_ON_EOI must be set if PrimID is used. */
345 if (key
->u
.tess_uses_prim_id
)
346 ia_switch_on_eoi
= true;
348 /* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
349 if ((sscreen
->b
.family
== CHIP_TAHITI
||
350 sscreen
->b
.family
== CHIP_PITCAIRN
||
351 sscreen
->b
.family
== CHIP_BONAIRE
) &&
353 partial_vs_wave
= true;
355 /* Needed for 028B6C_DISTRIBUTION_MODE != 0 */
356 if (sscreen
->has_distributed_tess
) {
357 if (key
->u
.uses_gs
) {
358 if (sscreen
->b
.chip_class
<= VI
)
359 partial_es_wave
= true;
361 /* GPU hang workaround. */
362 if (sscreen
->b
.family
== CHIP_TONGA
||
363 sscreen
->b
.family
== CHIP_FIJI
||
364 sscreen
->b
.family
== CHIP_POLARIS10
||
365 sscreen
->b
.family
== CHIP_POLARIS11
||
366 sscreen
->b
.family
== CHIP_POLARIS12
)
367 partial_vs_wave
= true;
369 partial_vs_wave
= true;
374 /* This is a hardware requirement. */
375 if (key
->u
.line_stipple_enabled
||
376 (sscreen
->b
.debug_flags
& DBG_SWITCH_ON_EOP
)) {
377 ia_switch_on_eop
= true;
378 wd_switch_on_eop
= true;
381 if (sscreen
->b
.chip_class
>= CIK
) {
382 /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
383 * 4 shader engines. Set 1 to pass the assertion below.
384 * The other cases are hardware requirements.
386 * Polaris supports primitive restart with WD_SWITCH_ON_EOP=0
387 * for points, line strips, and tri strips.
389 if (sscreen
->b
.info
.max_se
< 4 ||
390 key
->u
.prim
== PIPE_PRIM_POLYGON
||
391 key
->u
.prim
== PIPE_PRIM_LINE_LOOP
||
392 key
->u
.prim
== PIPE_PRIM_TRIANGLE_FAN
||
393 key
->u
.prim
== PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
||
394 (key
->u
.primitive_restart
&&
395 (sscreen
->b
.family
< CHIP_POLARIS10
||
396 (key
->u
.prim
!= PIPE_PRIM_POINTS
&&
397 key
->u
.prim
!= PIPE_PRIM_LINE_STRIP
&&
398 key
->u
.prim
!= PIPE_PRIM_TRIANGLE_STRIP
))) ||
399 key
->u
.count_from_stream_output
)
400 wd_switch_on_eop
= true;
402 /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
403 * We don't know that for indirect drawing, so treat it as
404 * always problematic. */
405 if (sscreen
->b
.family
== CHIP_HAWAII
&&
406 key
->u
.uses_instancing
)
407 wd_switch_on_eop
= true;
409 /* Performance recommendation for 4 SE Gfx7-8 parts if
410 * instances are smaller than a primgroup.
411 * Assume indirect draws always use small instances.
412 * This is needed for good VS wave utilization.
414 if (sscreen
->b
.chip_class
<= VI
&&
415 sscreen
->b
.info
.max_se
== 4 &&
416 key
->u
.multi_instances_smaller_than_primgroup
)
417 wd_switch_on_eop
= true;
419 /* Required on CIK and later. */
420 if (sscreen
->b
.info
.max_se
> 2 && !wd_switch_on_eop
)
421 ia_switch_on_eoi
= true;
423 /* Required by Hawaii and, for some special cases, by VI. */
424 if (ia_switch_on_eoi
&&
425 (sscreen
->b
.family
== CHIP_HAWAII
||
426 (sscreen
->b
.chip_class
== VI
&&
427 (key
->u
.uses_gs
|| max_primgroup_in_wave
!= 2))))
428 partial_vs_wave
= true;
430 /* Instancing bug on Bonaire. */
431 if (sscreen
->b
.family
== CHIP_BONAIRE
&& ia_switch_on_eoi
&&
432 key
->u
.uses_instancing
)
433 partial_vs_wave
= true;
435 /* If the WD switch is false, the IA switch must be false too. */
436 assert(wd_switch_on_eop
|| !ia_switch_on_eop
);
439 /* If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
440 if (sscreen
->b
.chip_class
<= VI
&& ia_switch_on_eoi
)
441 partial_es_wave
= true;
443 return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop
) |
444 S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi
) |
445 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave
) |
446 S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave
) |
447 S_028AA8_WD_SWITCH_ON_EOP(sscreen
->b
.chip_class
>= CIK
? wd_switch_on_eop
: 0) |
448 /* The following field was moved to VGT_SHADER_STAGES_EN in GFX9. */
449 S_028AA8_MAX_PRIMGRP_IN_WAVE(sscreen
->b
.chip_class
== VI
?
450 max_primgroup_in_wave
: 0) |
451 S_030960_EN_INST_OPT_BASIC(sscreen
->b
.chip_class
>= GFX9
) |
452 S_030960_EN_INST_OPT_ADV(sscreen
->b
.chip_class
>= GFX9
);
455 void si_init_ia_multi_vgt_param_table(struct si_context
*sctx
)
457 for (int prim
= 0; prim
<= R600_PRIM_RECTANGLE_LIST
; prim
++)
458 for (int uses_instancing
= 0; uses_instancing
< 2; uses_instancing
++)
459 for (int multi_instances
= 0; multi_instances
< 2; multi_instances
++)
460 for (int primitive_restart
= 0; primitive_restart
< 2; primitive_restart
++)
461 for (int count_from_so
= 0; count_from_so
< 2; count_from_so
++)
462 for (int line_stipple
= 0; line_stipple
< 2; line_stipple
++)
463 for (int uses_tess
= 0; uses_tess
< 2; uses_tess
++)
464 for (int tess_uses_primid
= 0; tess_uses_primid
< 2; tess_uses_primid
++)
465 for (int uses_gs
= 0; uses_gs
< 2; uses_gs
++) {
466 union si_vgt_param_key key
;
470 key
.u
.uses_instancing
= uses_instancing
;
471 key
.u
.multi_instances_smaller_than_primgroup
= multi_instances
;
472 key
.u
.primitive_restart
= primitive_restart
;
473 key
.u
.count_from_stream_output
= count_from_so
;
474 key
.u
.line_stipple_enabled
= line_stipple
;
475 key
.u
.uses_tess
= uses_tess
;
476 key
.u
.tess_uses_prim_id
= tess_uses_primid
;
477 key
.u
.uses_gs
= uses_gs
;
479 sctx
->ia_multi_vgt_param
[key
.index
] =
480 si_get_init_multi_vgt_param(sctx
->screen
, &key
);
484 static unsigned si_get_ia_multi_vgt_param(struct si_context
*sctx
,
485 const struct pipe_draw_info
*info
,
486 unsigned num_patches
)
488 union si_vgt_param_key key
= sctx
->ia_multi_vgt_param_key
;
489 unsigned primgroup_size
;
490 unsigned ia_multi_vgt_param
;
492 if (sctx
->tes_shader
.cso
) {
493 primgroup_size
= num_patches
; /* must be a multiple of NUM_PATCHES */
494 } else if (sctx
->gs_shader
.cso
) {
495 primgroup_size
= 64; /* recommended with a GS */
497 primgroup_size
= 128; /* recommended without a GS and tess */
500 key
.u
.prim
= info
->mode
;
501 key
.u
.uses_instancing
= info
->indirect
|| info
->instance_count
> 1;
502 key
.u
.multi_instances_smaller_than_primgroup
=
504 (info
->instance_count
> 1 &&
505 (info
->count_from_stream_output
||
506 si_num_prims_for_vertices(info
) < primgroup_size
));
507 key
.u
.primitive_restart
= info
->primitive_restart
;
508 key
.u
.count_from_stream_output
= info
->count_from_stream_output
!= NULL
;
510 ia_multi_vgt_param
= sctx
->ia_multi_vgt_param
[key
.index
] |
511 S_028AA8_PRIMGROUP_SIZE(primgroup_size
- 1);
513 if (sctx
->gs_shader
.cso
) {
514 /* GS requirement. */
515 if (sctx
->b
.chip_class
<= VI
&&
516 SI_GS_PER_ES
/ primgroup_size
>= sctx
->screen
->gs_table_depth
- 3)
517 ia_multi_vgt_param
|= S_028AA8_PARTIAL_ES_WAVE_ON(1);
519 /* GS hw bug with single-primitive instances and SWITCH_ON_EOI.
520 * The hw doc says all multi-SE chips are affected, but Vulkan
521 * only applies it to Hawaii. Do what Vulkan does.
523 if (sctx
->b
.family
== CHIP_HAWAII
&&
524 G_028AA8_SWITCH_ON_EOI(ia_multi_vgt_param
) &&
526 (info
->instance_count
> 1 &&
527 (info
->count_from_stream_output
||
528 si_num_prims_for_vertices(info
) <= 1))))
529 sctx
->b
.flags
|= SI_CONTEXT_VGT_FLUSH
;
532 return ia_multi_vgt_param
;
535 /* rast_prim is the primitive type after GS. */
536 static void si_emit_rasterizer_prim_state(struct si_context
*sctx
)
538 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
539 enum pipe_prim_type rast_prim
= sctx
->current_rast_prim
;
540 struct si_state_rasterizer
*rs
= sctx
->emitted
.named
.rasterizer
;
542 /* Skip this if not rendering lines. */
543 if (rast_prim
!= PIPE_PRIM_LINES
&&
544 rast_prim
!= PIPE_PRIM_LINE_LOOP
&&
545 rast_prim
!= PIPE_PRIM_LINE_STRIP
&&
546 rast_prim
!= PIPE_PRIM_LINES_ADJACENCY
&&
547 rast_prim
!= PIPE_PRIM_LINE_STRIP_ADJACENCY
)
550 if (rast_prim
== sctx
->last_rast_prim
&&
551 rs
->pa_sc_line_stipple
== sctx
->last_sc_line_stipple
)
554 /* For lines, reset the stipple pattern at each primitive. Otherwise,
555 * reset the stipple pattern at each packet (line strips, line loops).
557 radeon_set_context_reg(cs
, R_028A0C_PA_SC_LINE_STIPPLE
,
558 rs
->pa_sc_line_stipple
|
559 S_028A0C_AUTO_RESET_CNTL(rast_prim
== PIPE_PRIM_LINES
? 1 : 2));
561 sctx
->last_rast_prim
= rast_prim
;
562 sctx
->last_sc_line_stipple
= rs
->pa_sc_line_stipple
;
565 static void si_emit_vs_state(struct si_context
*sctx
,
566 const struct pipe_draw_info
*info
)
568 sctx
->current_vs_state
&= C_VS_STATE_INDEXED
;
569 sctx
->current_vs_state
|= S_VS_STATE_INDEXED(!!info
->index_size
);
571 if (sctx
->current_vs_state
!= sctx
->last_vs_state
) {
572 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
574 radeon_set_sh_reg(cs
,
575 sctx
->shader_pointers
.sh_base
[PIPE_SHADER_VERTEX
] +
576 SI_SGPR_VS_STATE_BITS
* 4,
577 sctx
->current_vs_state
);
579 sctx
->last_vs_state
= sctx
->current_vs_state
;
583 static void si_emit_draw_registers(struct si_context
*sctx
,
584 const struct pipe_draw_info
*info
,
585 unsigned num_patches
)
587 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
588 unsigned prim
= si_conv_pipe_prim(info
->mode
);
589 unsigned gs_out_prim
= si_conv_prim_to_gs_out(sctx
->current_rast_prim
);
590 unsigned ia_multi_vgt_param
;
592 ia_multi_vgt_param
= si_get_ia_multi_vgt_param(sctx
, info
, num_patches
);
595 if (ia_multi_vgt_param
!= sctx
->last_multi_vgt_param
) {
596 if (sctx
->b
.chip_class
>= GFX9
)
597 radeon_set_uconfig_reg_idx(cs
, R_030960_IA_MULTI_VGT_PARAM
, 4, ia_multi_vgt_param
);
598 else if (sctx
->b
.chip_class
>= CIK
)
599 radeon_set_context_reg_idx(cs
, R_028AA8_IA_MULTI_VGT_PARAM
, 1, ia_multi_vgt_param
);
601 radeon_set_context_reg(cs
, R_028AA8_IA_MULTI_VGT_PARAM
, ia_multi_vgt_param
);
603 sctx
->last_multi_vgt_param
= ia_multi_vgt_param
;
605 if (prim
!= sctx
->last_prim
) {
606 if (sctx
->b
.chip_class
>= CIK
)
607 radeon_set_uconfig_reg_idx(cs
, R_030908_VGT_PRIMITIVE_TYPE
, 1, prim
);
609 radeon_set_config_reg(cs
, R_008958_VGT_PRIMITIVE_TYPE
, prim
);
611 sctx
->last_prim
= prim
;
614 if (gs_out_prim
!= sctx
->last_gs_out_prim
) {
615 radeon_set_context_reg(cs
, R_028A6C_VGT_GS_OUT_PRIM_TYPE
, gs_out_prim
);
616 sctx
->last_gs_out_prim
= gs_out_prim
;
619 /* Primitive restart. */
620 if (info
->primitive_restart
!= sctx
->last_primitive_restart_en
) {
621 if (sctx
->b
.chip_class
>= GFX9
)
622 radeon_set_uconfig_reg(cs
, R_03092C_VGT_MULTI_PRIM_IB_RESET_EN
,
623 info
->primitive_restart
);
625 radeon_set_context_reg(cs
, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN
,
626 info
->primitive_restart
);
628 sctx
->last_primitive_restart_en
= info
->primitive_restart
;
631 if (info
->primitive_restart
&&
632 (info
->restart_index
!= sctx
->last_restart_index
||
633 sctx
->last_restart_index
== SI_RESTART_INDEX_UNKNOWN
)) {
634 radeon_set_context_reg(cs
, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX
,
635 info
->restart_index
);
636 sctx
->last_restart_index
= info
->restart_index
;
640 static void si_emit_draw_packets(struct si_context
*sctx
,
641 const struct pipe_draw_info
*info
,
642 struct pipe_resource
*indexbuf
,
644 unsigned index_offset
)
646 struct pipe_draw_indirect_info
*indirect
= info
->indirect
;
647 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
648 unsigned sh_base_reg
= sctx
->shader_pointers
.sh_base
[PIPE_SHADER_VERTEX
];
649 bool render_cond_bit
= sctx
->b
.render_cond
&& !sctx
->b
.render_cond_force_off
;
650 uint32_t index_max_size
= 0;
651 uint64_t index_va
= 0;
653 if (info
->count_from_stream_output
) {
654 struct r600_so_target
*t
=
655 (struct r600_so_target
*)info
->count_from_stream_output
;
656 uint64_t va
= t
->buf_filled_size
->gpu_address
+
657 t
->buf_filled_size_offset
;
659 radeon_set_context_reg(cs
, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
,
662 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
663 radeon_emit(cs
, COPY_DATA_SRC_SEL(COPY_DATA_MEM
) |
664 COPY_DATA_DST_SEL(COPY_DATA_REG
) |
665 COPY_DATA_WR_CONFIRM
);
666 radeon_emit(cs
, va
); /* src address lo */
667 radeon_emit(cs
, va
>> 32); /* src address hi */
668 radeon_emit(cs
, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
>> 2);
669 radeon_emit(cs
, 0); /* unused */
671 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
672 t
->buf_filled_size
, RADEON_USAGE_READ
,
673 RADEON_PRIO_SO_FILLED_SIZE
);
678 if (index_size
!= sctx
->last_index_size
) {
682 switch (index_size
) {
684 index_type
= V_028A7C_VGT_INDEX_8
;
687 index_type
= V_028A7C_VGT_INDEX_16
|
688 (SI_BIG_ENDIAN
&& sctx
->b
.chip_class
<= CIK
?
689 V_028A7C_VGT_DMA_SWAP_16_BIT
: 0);
692 index_type
= V_028A7C_VGT_INDEX_32
|
693 (SI_BIG_ENDIAN
&& sctx
->b
.chip_class
<= CIK
?
694 V_028A7C_VGT_DMA_SWAP_32_BIT
: 0);
697 assert(!"unreachable");
701 if (sctx
->b
.chip_class
>= GFX9
) {
702 radeon_set_uconfig_reg_idx(cs
, R_03090C_VGT_INDEX_TYPE
,
705 radeon_emit(cs
, PKT3(PKT3_INDEX_TYPE
, 0, 0));
706 radeon_emit(cs
, index_type
);
709 sctx
->last_index_size
= index_size
;
712 index_max_size
= (indexbuf
->width0
- index_offset
) /
714 index_va
= r600_resource(indexbuf
)->gpu_address
+ index_offset
;
716 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
717 (struct r600_resource
*)indexbuf
,
718 RADEON_USAGE_READ
, RADEON_PRIO_INDEX_BUFFER
);
720 /* On CI and later, non-indexed draws overwrite VGT_INDEX_TYPE,
721 * so the state must be re-emitted before the next indexed draw.
723 if (sctx
->b
.chip_class
>= CIK
)
724 sctx
->last_index_size
= -1;
728 uint64_t indirect_va
= r600_resource(indirect
->buffer
)->gpu_address
;
730 assert(indirect_va
% 8 == 0);
732 si_invalidate_draw_sh_constants(sctx
);
734 radeon_emit(cs
, PKT3(PKT3_SET_BASE
, 2, 0));
736 radeon_emit(cs
, indirect_va
);
737 radeon_emit(cs
, indirect_va
>> 32);
739 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
740 (struct r600_resource
*)indirect
->buffer
,
741 RADEON_USAGE_READ
, RADEON_PRIO_DRAW_INDIRECT
);
743 unsigned di_src_sel
= index_size
? V_0287F0_DI_SRC_SEL_DMA
744 : V_0287F0_DI_SRC_SEL_AUTO_INDEX
;
746 assert(indirect
->offset
% 4 == 0);
749 radeon_emit(cs
, PKT3(PKT3_INDEX_BASE
, 1, 0));
750 radeon_emit(cs
, index_va
);
751 radeon_emit(cs
, index_va
>> 32);
753 radeon_emit(cs
, PKT3(PKT3_INDEX_BUFFER_SIZE
, 0, 0));
754 radeon_emit(cs
, index_max_size
);
757 if (!sctx
->screen
->has_draw_indirect_multi
) {
758 radeon_emit(cs
, PKT3(index_size
? PKT3_DRAW_INDEX_INDIRECT
759 : PKT3_DRAW_INDIRECT
,
760 3, render_cond_bit
));
761 radeon_emit(cs
, indirect
->offset
);
762 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_BASE_VERTEX
* 4 - SI_SH_REG_OFFSET
) >> 2);
763 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_START_INSTANCE
* 4 - SI_SH_REG_OFFSET
) >> 2);
764 radeon_emit(cs
, di_src_sel
);
766 uint64_t count_va
= 0;
768 if (indirect
->indirect_draw_count
) {
769 struct r600_resource
*params_buf
=
770 (struct r600_resource
*)indirect
->indirect_draw_count
;
772 radeon_add_to_buffer_list(
773 &sctx
->b
, &sctx
->b
.gfx
, params_buf
,
774 RADEON_USAGE_READ
, RADEON_PRIO_DRAW_INDIRECT
);
776 count_va
= params_buf
->gpu_address
+ indirect
->indirect_draw_count_offset
;
779 radeon_emit(cs
, PKT3(index_size
? PKT3_DRAW_INDEX_INDIRECT_MULTI
:
780 PKT3_DRAW_INDIRECT_MULTI
,
781 8, render_cond_bit
));
782 radeon_emit(cs
, indirect
->offset
);
783 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_BASE_VERTEX
* 4 - SI_SH_REG_OFFSET
) >> 2);
784 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_START_INSTANCE
* 4 - SI_SH_REG_OFFSET
) >> 2);
785 radeon_emit(cs
, ((sh_base_reg
+ SI_SGPR_DRAWID
* 4 - SI_SH_REG_OFFSET
) >> 2) |
786 S_2C3_DRAW_INDEX_ENABLE(1) |
787 S_2C3_COUNT_INDIRECT_ENABLE(!!indirect
->indirect_draw_count
));
788 radeon_emit(cs
, indirect
->draw_count
);
789 radeon_emit(cs
, count_va
);
790 radeon_emit(cs
, count_va
>> 32);
791 radeon_emit(cs
, indirect
->stride
);
792 radeon_emit(cs
, di_src_sel
);
797 radeon_emit(cs
, PKT3(PKT3_NUM_INSTANCES
, 0, 0));
798 radeon_emit(cs
, info
->instance_count
);
800 /* Base vertex and start instance. */
801 base_vertex
= index_size
? info
->index_bias
: info
->start
;
803 if (base_vertex
!= sctx
->last_base_vertex
||
804 sctx
->last_base_vertex
== SI_BASE_VERTEX_UNKNOWN
||
805 info
->start_instance
!= sctx
->last_start_instance
||
806 info
->drawid
!= sctx
->last_drawid
||
807 sh_base_reg
!= sctx
->last_sh_base_reg
) {
808 radeon_set_sh_reg_seq(cs
, sh_base_reg
+ SI_SGPR_BASE_VERTEX
* 4, 3);
809 radeon_emit(cs
, base_vertex
);
810 radeon_emit(cs
, info
->start_instance
);
811 radeon_emit(cs
, info
->drawid
);
813 sctx
->last_base_vertex
= base_vertex
;
814 sctx
->last_start_instance
= info
->start_instance
;
815 sctx
->last_drawid
= info
->drawid
;
816 sctx
->last_sh_base_reg
= sh_base_reg
;
820 index_va
+= info
->start
* index_size
;
822 radeon_emit(cs
, PKT3(PKT3_DRAW_INDEX_2
, 4, render_cond_bit
));
823 radeon_emit(cs
, index_max_size
);
824 radeon_emit(cs
, index_va
);
825 radeon_emit(cs
, index_va
>> 32);
826 radeon_emit(cs
, info
->count
);
827 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_DMA
);
829 radeon_emit(cs
, PKT3(PKT3_DRAW_INDEX_AUTO
, 1, render_cond_bit
));
830 radeon_emit(cs
, info
->count
);
831 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_AUTO_INDEX
|
832 S_0287F0_USE_OPAQUE(!!info
->count_from_stream_output
));
837 static void si_emit_surface_sync(struct r600_common_context
*rctx
,
838 unsigned cp_coher_cntl
)
840 struct radeon_winsys_cs
*cs
= rctx
->gfx
.cs
;
842 if (rctx
->chip_class
>= GFX9
) {
843 /* Flush caches and wait for the caches to assert idle. */
844 radeon_emit(cs
, PKT3(PKT3_ACQUIRE_MEM
, 5, 0));
845 radeon_emit(cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
846 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
847 radeon_emit(cs
, 0xffffff); /* CP_COHER_SIZE_HI */
848 radeon_emit(cs
, 0); /* CP_COHER_BASE */
849 radeon_emit(cs
, 0); /* CP_COHER_BASE_HI */
850 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
852 /* ACQUIRE_MEM is only required on a compute ring. */
853 radeon_emit(cs
, PKT3(PKT3_SURFACE_SYNC
, 3, 0));
854 radeon_emit(cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
855 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
856 radeon_emit(cs
, 0); /* CP_COHER_BASE */
857 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
861 void si_emit_cache_flush(struct si_context
*sctx
)
863 struct r600_common_context
*rctx
= &sctx
->b
;
864 struct radeon_winsys_cs
*cs
= rctx
->gfx
.cs
;
865 uint32_t cp_coher_cntl
= 0;
866 uint32_t flush_cb_db
= rctx
->flags
& (SI_CONTEXT_FLUSH_AND_INV_CB
|
867 SI_CONTEXT_FLUSH_AND_INV_DB
);
869 if (rctx
->flags
& SI_CONTEXT_FLUSH_AND_INV_CB
)
870 sctx
->b
.num_cb_cache_flushes
++;
871 if (rctx
->flags
& SI_CONTEXT_FLUSH_AND_INV_DB
)
872 sctx
->b
.num_db_cache_flushes
++;
874 /* SI has a bug that it always flushes ICACHE and KCACHE if either
875 * bit is set. An alternative way is to write SQC_CACHES, but that
876 * doesn't seem to work reliably. Since the bug doesn't affect
877 * correctness (it only does more work than necessary) and
878 * the performance impact is likely negligible, there is no plan
879 * to add a workaround for it.
882 if (rctx
->flags
& SI_CONTEXT_INV_ICACHE
)
883 cp_coher_cntl
|= S_0085F0_SH_ICACHE_ACTION_ENA(1);
884 if (rctx
->flags
& SI_CONTEXT_INV_SMEM_L1
)
885 cp_coher_cntl
|= S_0085F0_SH_KCACHE_ACTION_ENA(1);
887 if (rctx
->chip_class
<= VI
) {
888 if (rctx
->flags
& SI_CONTEXT_FLUSH_AND_INV_CB
) {
889 cp_coher_cntl
|= S_0085F0_CB_ACTION_ENA(1) |
890 S_0085F0_CB0_DEST_BASE_ENA(1) |
891 S_0085F0_CB1_DEST_BASE_ENA(1) |
892 S_0085F0_CB2_DEST_BASE_ENA(1) |
893 S_0085F0_CB3_DEST_BASE_ENA(1) |
894 S_0085F0_CB4_DEST_BASE_ENA(1) |
895 S_0085F0_CB5_DEST_BASE_ENA(1) |
896 S_0085F0_CB6_DEST_BASE_ENA(1) |
897 S_0085F0_CB7_DEST_BASE_ENA(1);
899 /* Necessary for DCC */
900 if (rctx
->chip_class
== VI
)
901 r600_gfx_write_event_eop(rctx
, V_028A90_FLUSH_AND_INV_CB_DATA_TS
,
902 0, 0, NULL
, 0, 0, 0);
904 if (rctx
->flags
& SI_CONTEXT_FLUSH_AND_INV_DB
)
905 cp_coher_cntl
|= S_0085F0_DB_ACTION_ENA(1) |
906 S_0085F0_DB_DEST_BASE_ENA(1);
909 if (rctx
->flags
& SI_CONTEXT_FLUSH_AND_INV_CB
) {
910 /* Flush CMASK/FMASK/DCC. SURFACE_SYNC will wait for idle. */
911 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
912 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META
) | EVENT_INDEX(0));
914 if (rctx
->flags
& SI_CONTEXT_FLUSH_AND_INV_DB
) {
915 /* Flush HTILE. SURFACE_SYNC will wait for idle. */
916 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
917 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META
) | EVENT_INDEX(0));
920 /* Wait for shader engines to go idle.
921 * VS and PS waits are unnecessary if SURFACE_SYNC is going to wait
922 * for everything including CB/DB cache flushes.
925 if (rctx
->flags
& SI_CONTEXT_PS_PARTIAL_FLUSH
) {
926 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
927 radeon_emit(cs
, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
928 /* Only count explicit shader flushes, not implicit ones
929 * done by SURFACE_SYNC.
931 rctx
->num_vs_flushes
++;
932 rctx
->num_ps_flushes
++;
933 } else if (rctx
->flags
& SI_CONTEXT_VS_PARTIAL_FLUSH
) {
934 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
935 radeon_emit(cs
, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
936 rctx
->num_vs_flushes
++;
940 if (rctx
->flags
& SI_CONTEXT_CS_PARTIAL_FLUSH
&&
941 sctx
->compute_is_busy
) {
942 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
943 radeon_emit(cs
, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH
| EVENT_INDEX(4)));
944 rctx
->num_cs_flushes
++;
945 sctx
->compute_is_busy
= false;
948 /* VGT state synchronization. */
949 if (rctx
->flags
& SI_CONTEXT_VGT_FLUSH
) {
950 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
951 radeon_emit(cs
, EVENT_TYPE(V_028A90_VGT_FLUSH
) | EVENT_INDEX(0));
953 if (rctx
->flags
& SI_CONTEXT_VGT_STREAMOUT_SYNC
) {
954 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
955 radeon_emit(cs
, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC
) | EVENT_INDEX(0));
958 /* GFX9: Wait for idle if we're flushing CB or DB. ACQUIRE_MEM doesn't
959 * wait for idle on GFX9. We have to use a TS event.
961 if (sctx
->b
.chip_class
>= GFX9
&& flush_cb_db
) {
963 unsigned tc_flags
, cb_db_event
;
965 /* Set the CB/DB flush event. */
966 switch (flush_cb_db
) {
967 case SI_CONTEXT_FLUSH_AND_INV_CB
:
968 cb_db_event
= V_028A90_FLUSH_AND_INV_CB_DATA_TS
;
970 case SI_CONTEXT_FLUSH_AND_INV_DB
:
971 cb_db_event
= V_028A90_FLUSH_AND_INV_DB_DATA_TS
;
975 cb_db_event
= V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT
;
978 /* TC | TC_WB = invalidate L2 data
979 * TC_MD | TC_WB = invalidate L2 metadata (DCC, etc.)
980 * TC | TC_WB | TC_MD = invalidate L2 data & metadata
984 /* Ideally flush TC together with CB/DB. */
985 if (rctx
->flags
& SI_CONTEXT_INV_GLOBAL_L2
) {
986 tc_flags
|= EVENT_TC_ACTION_ENA
|
987 EVENT_TC_WB_ACTION_ENA
|
988 EVENT_TCL1_ACTION_ENA
;
990 /* Clear the flags. */
991 rctx
->flags
&= ~(SI_CONTEXT_INV_GLOBAL_L2
|
992 SI_CONTEXT_WRITEBACK_GLOBAL_L2
|
993 SI_CONTEXT_INV_VMEM_L1
);
994 sctx
->b
.num_L2_invalidates
++;
997 /* Do the flush (enqueue the event and wait for it). */
998 va
= sctx
->wait_mem_scratch
->gpu_address
;
999 sctx
->wait_mem_number
++;
1001 r600_gfx_write_event_eop(rctx
, cb_db_event
, tc_flags
, 1,
1002 sctx
->wait_mem_scratch
, va
,
1003 sctx
->wait_mem_number
, 0);
1004 r600_gfx_wait_fence(rctx
, va
, sctx
->wait_mem_number
, 0xffffffff);
1007 /* Make sure ME is idle (it executes most packets) before continuing.
1008 * This prevents read-after-write hazards between PFP and ME.
1010 if (cp_coher_cntl
||
1011 (rctx
->flags
& (SI_CONTEXT_CS_PARTIAL_FLUSH
|
1012 SI_CONTEXT_INV_VMEM_L1
|
1013 SI_CONTEXT_INV_GLOBAL_L2
|
1014 SI_CONTEXT_WRITEBACK_GLOBAL_L2
))) {
1015 radeon_emit(cs
, PKT3(PKT3_PFP_SYNC_ME
, 0, 0));
1020 * When one of the CP_COHER_CNTL.DEST_BASE flags is set, SURFACE_SYNC
1021 * waits for idle, so it should be last. SURFACE_SYNC is done in PFP.
1023 * cp_coher_cntl should contain all necessary flags except TC flags
1026 * SI-CIK don't support L2 write-back.
1028 if (rctx
->flags
& SI_CONTEXT_INV_GLOBAL_L2
||
1029 (rctx
->chip_class
<= CIK
&&
1030 (rctx
->flags
& SI_CONTEXT_WRITEBACK_GLOBAL_L2
))) {
1031 /* Invalidate L1 & L2. (L1 is always invalidated on SI)
1032 * WB must be set on VI+ when TC_ACTION is set.
1034 si_emit_surface_sync(rctx
, cp_coher_cntl
|
1035 S_0085F0_TC_ACTION_ENA(1) |
1036 S_0085F0_TCL1_ACTION_ENA(1) |
1037 S_0301F0_TC_WB_ACTION_ENA(rctx
->chip_class
>= VI
));
1039 sctx
->b
.num_L2_invalidates
++;
1041 /* L1 invalidation and L2 writeback must be done separately,
1042 * because both operations can't be done together.
1044 if (rctx
->flags
& SI_CONTEXT_WRITEBACK_GLOBAL_L2
) {
1046 * NC = apply to non-coherent MTYPEs
1047 * (i.e. MTYPE <= 1, which is what we use everywhere)
1049 * WB doesn't work without NC.
1051 si_emit_surface_sync(rctx
, cp_coher_cntl
|
1052 S_0301F0_TC_WB_ACTION_ENA(1) |
1053 S_0301F0_TC_NC_ACTION_ENA(1));
1055 sctx
->b
.num_L2_writebacks
++;
1057 if (rctx
->flags
& SI_CONTEXT_INV_VMEM_L1
) {
1058 /* Invalidate per-CU VMEM L1. */
1059 si_emit_surface_sync(rctx
, cp_coher_cntl
|
1060 S_0085F0_TCL1_ACTION_ENA(1));
1065 /* If TC flushes haven't cleared this... */
1067 si_emit_surface_sync(rctx
, cp_coher_cntl
);
1069 if (rctx
->flags
& R600_CONTEXT_START_PIPELINE_STATS
) {
1070 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1071 radeon_emit(cs
, EVENT_TYPE(V_028A90_PIPELINESTAT_START
) |
1073 } else if (rctx
->flags
& R600_CONTEXT_STOP_PIPELINE_STATS
) {
1074 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1075 radeon_emit(cs
, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP
) |
1082 static void si_get_draw_start_count(struct si_context
*sctx
,
1083 const struct pipe_draw_info
*info
,
1084 unsigned *start
, unsigned *count
)
1086 struct pipe_draw_indirect_info
*indirect
= info
->indirect
;
1089 unsigned indirect_count
;
1090 struct pipe_transfer
*transfer
;
1091 unsigned begin
, end
;
1095 if (indirect
->indirect_draw_count
) {
1096 data
= pipe_buffer_map_range(&sctx
->b
.b
,
1097 indirect
->indirect_draw_count
,
1098 indirect
->indirect_draw_count_offset
,
1100 PIPE_TRANSFER_READ
, &transfer
);
1102 indirect_count
= *data
;
1104 pipe_buffer_unmap(&sctx
->b
.b
, transfer
);
1106 indirect_count
= indirect
->draw_count
;
1109 if (!indirect_count
) {
1110 *start
= *count
= 0;
1114 map_size
= (indirect_count
- 1) * indirect
->stride
+ 3 * sizeof(unsigned);
1115 data
= pipe_buffer_map_range(&sctx
->b
.b
, indirect
->buffer
,
1116 indirect
->offset
, map_size
,
1117 PIPE_TRANSFER_READ
, &transfer
);
1122 for (unsigned i
= 0; i
< indirect_count
; ++i
) {
1123 unsigned count
= data
[0];
1124 unsigned start
= data
[2];
1127 begin
= MIN2(begin
, start
);
1128 end
= MAX2(end
, start
+ count
);
1131 data
+= indirect
->stride
/ sizeof(unsigned);
1134 pipe_buffer_unmap(&sctx
->b
.b
, transfer
);
1138 *count
= end
- begin
;
1140 *start
= *count
= 0;
1143 *start
= info
->start
;
1144 *count
= info
->count
;
1148 static void si_emit_all_states(struct si_context
*sctx
, const struct pipe_draw_info
*info
,
1149 unsigned skip_atom_mask
)
1151 /* Emit state atoms. */
1152 unsigned mask
= sctx
->dirty_atoms
& ~skip_atom_mask
;
1154 struct r600_atom
*atom
= sctx
->atoms
.array
[u_bit_scan(&mask
)];
1156 atom
->emit(&sctx
->b
, atom
);
1158 sctx
->dirty_atoms
&= skip_atom_mask
;
1161 mask
= sctx
->dirty_states
;
1163 unsigned i
= u_bit_scan(&mask
);
1164 struct si_pm4_state
*state
= sctx
->queued
.array
[i
];
1166 if (!state
|| sctx
->emitted
.array
[i
] == state
)
1169 si_pm4_emit(sctx
, state
);
1170 sctx
->emitted
.array
[i
] = state
;
1172 sctx
->dirty_states
= 0;
1174 /* Emit draw states. */
1175 unsigned num_patches
= 0;
1177 si_emit_rasterizer_prim_state(sctx
);
1178 if (sctx
->tes_shader
.cso
)
1179 si_emit_derived_tess_state(sctx
, info
, &num_patches
);
1180 si_emit_vs_state(sctx
, info
);
1181 si_emit_draw_registers(sctx
, info
, num_patches
);
1184 void si_draw_vbo(struct pipe_context
*ctx
, const struct pipe_draw_info
*info
)
1186 struct si_context
*sctx
= (struct si_context
*)ctx
;
1187 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
1188 struct pipe_resource
*indexbuf
= info
->index
.resource
;
1189 unsigned dirty_tex_counter
;
1190 enum pipe_prim_type rast_prim
;
1191 unsigned index_size
= info
->index_size
;
1192 unsigned index_offset
= info
->indirect
? info
->start
* index_size
: 0;
1194 if (likely(!info
->indirect
)) {
1195 /* SI-CI treat instance_count==0 as instance_count==1. There is
1196 * no workaround for indirect draws, but we can at least skip
1199 if (unlikely(!info
->instance_count
))
1202 /* Handle count == 0. */
1203 if (unlikely(!info
->count
&&
1204 (index_size
|| !info
->count_from_stream_output
)))
1208 if (unlikely(!sctx
->vs_shader
.cso
)) {
1212 if (unlikely(!sctx
->ps_shader
.cso
&& (!rs
|| !rs
->rasterizer_discard
))) {
1216 if (unlikely(!!sctx
->tes_shader
.cso
!= (info
->mode
== PIPE_PRIM_PATCHES
))) {
1221 /* Recompute and re-emit the texture resource states if needed. */
1222 dirty_tex_counter
= p_atomic_read(&sctx
->b
.screen
->dirty_tex_counter
);
1223 if (unlikely(dirty_tex_counter
!= sctx
->b
.last_dirty_tex_counter
)) {
1224 sctx
->b
.last_dirty_tex_counter
= dirty_tex_counter
;
1225 sctx
->framebuffer
.dirty_cbufs
|=
1226 ((1 << sctx
->framebuffer
.state
.nr_cbufs
) - 1);
1227 sctx
->framebuffer
.dirty_zsbuf
= true;
1228 si_mark_atom_dirty(sctx
, &sctx
->framebuffer
.atom
);
1229 si_update_all_texture_descriptors(sctx
);
1232 si_decompress_graphics_textures(sctx
);
1234 /* Set the rasterization primitive type.
1236 * This must be done after si_decompress_textures, which can call
1237 * draw_vbo recursively, and before si_update_shaders, which uses
1238 * current_rast_prim for this draw_vbo call. */
1239 if (sctx
->gs_shader
.cso
)
1240 rast_prim
= sctx
->gs_shader
.cso
->gs_output_prim
;
1241 else if (sctx
->tes_shader
.cso
)
1242 rast_prim
= sctx
->tes_shader
.cso
->info
.properties
[TGSI_PROPERTY_TES_PRIM_MODE
];
1244 rast_prim
= info
->mode
;
1246 if (rast_prim
!= sctx
->current_rast_prim
) {
1247 sctx
->current_rast_prim
= rast_prim
;
1248 sctx
->do_update_shaders
= true;
1251 if (sctx
->gs_shader
.cso
) {
1252 /* Determine whether the GS triangle strip adjacency fix should
1253 * be applied. Rotate every other triangle if
1254 * - triangle strips with adjacency are fed to the GS and
1255 * - primitive restart is disabled (the rotation doesn't help
1256 * when the restart occurs after an odd number of triangles).
1258 bool gs_tri_strip_adj_fix
=
1259 !sctx
->tes_shader
.cso
&&
1260 info
->mode
== PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
&&
1261 !info
->primitive_restart
;
1263 if (gs_tri_strip_adj_fix
!= sctx
->gs_tri_strip_adj_fix
) {
1264 sctx
->gs_tri_strip_adj_fix
= gs_tri_strip_adj_fix
;
1265 sctx
->do_update_shaders
= true;
1269 if (sctx
->do_update_shaders
&& !si_update_shaders(sctx
))
1273 /* Translate or upload, if needed. */
1274 /* 8-bit indices are supported on VI. */
1275 if (sctx
->b
.chip_class
<= CIK
&& index_size
== 1) {
1276 unsigned start
, count
, start_offset
, size
, offset
;
1279 si_get_draw_start_count(sctx
, info
, &start
, &count
);
1280 start_offset
= start
* 2;
1284 u_upload_alloc(ctx
->stream_uploader
, start_offset
,
1286 si_optimal_tcc_alignment(sctx
, size
),
1287 &offset
, &indexbuf
, &ptr
);
1291 util_shorten_ubyte_elts_to_userptr(&sctx
->b
.b
, info
, 0, 0,
1292 index_offset
+ start
,
1295 /* info->start will be added by the drawing code */
1296 index_offset
= offset
- start_offset
;
1298 } else if (info
->has_user_indices
) {
1299 unsigned start_offset
;
1301 assert(!info
->indirect
);
1302 start_offset
= info
->start
* index_size
;
1305 u_upload_data(ctx
->stream_uploader
, start_offset
,
1306 info
->count
* index_size
,
1307 sctx
->screen
->b
.info
.tcc_cache_line_size
,
1308 (char*)info
->index
.user
+ start_offset
,
1309 &index_offset
, &indexbuf
);
1313 /* info->start will be added by the drawing code */
1314 index_offset
-= start_offset
;
1315 } else if (sctx
->b
.chip_class
<= CIK
&&
1316 r600_resource(indexbuf
)->TC_L2_dirty
) {
1317 /* VI reads index buffers through TC L2, so it doesn't
1319 sctx
->b
.flags
|= SI_CONTEXT_WRITEBACK_GLOBAL_L2
;
1320 r600_resource(indexbuf
)->TC_L2_dirty
= false;
1324 if (info
->indirect
) {
1325 struct pipe_draw_indirect_info
*indirect
= info
->indirect
;
1327 /* Add the buffer size for memory checking in need_cs_space. */
1328 r600_context_add_resource_size(ctx
, indirect
->buffer
);
1330 /* Indirect buffers use TC L2 on GFX9, but not older hw. */
1331 if (sctx
->b
.chip_class
<= VI
) {
1332 if (r600_resource(indirect
->buffer
)->TC_L2_dirty
) {
1333 sctx
->b
.flags
|= SI_CONTEXT_WRITEBACK_GLOBAL_L2
;
1334 r600_resource(indirect
->buffer
)->TC_L2_dirty
= false;
1337 if (indirect
->indirect_draw_count
&&
1338 r600_resource(indirect
->indirect_draw_count
)->TC_L2_dirty
) {
1339 sctx
->b
.flags
|= SI_CONTEXT_WRITEBACK_GLOBAL_L2
;
1340 r600_resource(indirect
->indirect_draw_count
)->TC_L2_dirty
= false;
1345 si_need_cs_space(sctx
);
1347 if (unlikely(sctx
->b
.log
))
1348 si_log_draw_state(sctx
, sctx
->b
.log
);
1350 /* Since we've called r600_context_add_resource_size for vertex buffers,
1351 * this must be called after si_need_cs_space, because we must let
1352 * need_cs_space flush before we add buffers to the buffer list.
1354 if (!si_upload_vertex_buffer_descriptors(sctx
))
1357 /* GFX9 scissor bug workaround. This must be done before VPORT scissor
1358 * registers are changed. There is also a more efficient but more
1359 * involved alternative workaround.
1361 if (sctx
->b
.chip_class
== GFX9
&&
1362 si_is_atom_dirty(sctx
, &sctx
->b
.scissors
.atom
)) {
1363 sctx
->b
.flags
|= SI_CONTEXT_PS_PARTIAL_FLUSH
;
1364 si_emit_cache_flush(sctx
);
1367 /* Use optimal packet order based on whether we need to sync the pipeline. */
1368 if (unlikely(sctx
->b
.flags
& (SI_CONTEXT_FLUSH_AND_INV_CB
|
1369 SI_CONTEXT_FLUSH_AND_INV_DB
|
1370 SI_CONTEXT_PS_PARTIAL_FLUSH
|
1371 SI_CONTEXT_CS_PARTIAL_FLUSH
))) {
1372 /* If we have to wait for idle, set all states first, so that all
1373 * SET packets are processed in parallel with previous draw calls.
1374 * Then upload descriptors, set shader pointers, and draw, and
1375 * prefetch at the end. This ensures that the time the CUs
1376 * are idle is very short. (there are only SET_SH packets between
1377 * the wait and the draw)
1379 struct r600_atom
*shader_pointers
= &sctx
->shader_pointers
.atom
;
1381 /* Emit all states except shader pointers. */
1382 si_emit_all_states(sctx
, info
, 1 << shader_pointers
->id
);
1383 si_emit_cache_flush(sctx
);
1385 /* <-- CUs are idle here. */
1386 if (!si_upload_graphics_shader_descriptors(sctx
))
1389 /* Set shader pointers after descriptors are uploaded. */
1390 if (si_is_atom_dirty(sctx
, shader_pointers
)) {
1391 shader_pointers
->emit(&sctx
->b
, NULL
);
1392 sctx
->dirty_atoms
= 0;
1395 si_emit_draw_packets(sctx
, info
, indexbuf
, index_size
, index_offset
);
1396 /* <-- CUs are busy here. */
1398 /* Start prefetches after the draw has been started. Both will run
1399 * in parallel, but starting the draw first is more important.
1401 if (sctx
->b
.chip_class
>= CIK
&& sctx
->prefetch_L2_mask
)
1402 cik_emit_prefetch_L2(sctx
);
1404 /* If we don't wait for idle, start prefetches first, then set
1405 * states, and draw at the end.
1408 si_emit_cache_flush(sctx
);
1410 if (sctx
->b
.chip_class
>= CIK
&& sctx
->prefetch_L2_mask
)
1411 cik_emit_prefetch_L2(sctx
);
1413 if (!si_upload_graphics_shader_descriptors(sctx
))
1416 si_emit_all_states(sctx
, info
, 0);
1417 si_emit_draw_packets(sctx
, info
, indexbuf
, index_size
, index_offset
);
1420 if (unlikely(sctx
->current_saved_cs
))
1421 si_trace_emit(sctx
);
1423 /* Workaround for a VGT hang when streamout is enabled.
1424 * It must be done after drawing. */
1425 if ((sctx
->b
.family
== CHIP_HAWAII
||
1426 sctx
->b
.family
== CHIP_TONGA
||
1427 sctx
->b
.family
== CHIP_FIJI
) &&
1428 r600_get_strmout_en(&sctx
->b
)) {
1429 sctx
->b
.flags
|= SI_CONTEXT_VGT_STREAMOUT_SYNC
;
1432 if (unlikely(sctx
->decompression_enabled
)) {
1433 sctx
->b
.num_decompress_calls
++;
1435 sctx
->b
.num_draw_calls
++;
1436 if (sctx
->framebuffer
.state
.nr_cbufs
> 1)
1437 sctx
->b
.num_mrt_draw_calls
++;
1438 if (info
->primitive_restart
)
1439 sctx
->b
.num_prim_restart_calls
++;
1440 if (G_0286E8_WAVESIZE(sctx
->spi_tmpring_size
))
1441 sctx
->b
.num_spill_draw_calls
++;
1443 if (index_size
&& indexbuf
!= info
->index
.resource
)
1444 pipe_resource_reference(&indexbuf
, NULL
);
1447 void si_trace_emit(struct si_context
*sctx
)
1449 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
1450 uint64_t va
= sctx
->current_saved_cs
->trace_buf
->gpu_address
;
1451 uint32_t trace_id
= ++sctx
->current_saved_cs
->trace_id
;
1453 radeon_emit(cs
, PKT3(PKT3_WRITE_DATA
, 3, 0));
1454 radeon_emit(cs
, S_370_DST_SEL(V_370_MEMORY_SYNC
) |
1455 S_370_WR_CONFIRM(1) |
1456 S_370_ENGINE_SEL(V_370_ME
));
1457 radeon_emit(cs
, va
);
1458 radeon_emit(cs
, va
>> 32);
1459 radeon_emit(cs
, trace_id
);
1460 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1461 radeon_emit(cs
, AC_ENCODE_TRACE_POINT(trace_id
));
1464 u_log_flush(sctx
->b
.log
);