2 * Copyright 2012 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Christian König <christian.koenig@amd.com>
28 #include "radeon/r600_cs.h"
32 #include "util/u_index_modify.h"
33 #include "util/u_upload_mgr.h"
34 #include "util/u_prim.h"
38 static unsigned si_conv_pipe_prim(unsigned mode
)
40 static const unsigned prim_conv
[] = {
41 [PIPE_PRIM_POINTS
] = V_008958_DI_PT_POINTLIST
,
42 [PIPE_PRIM_LINES
] = V_008958_DI_PT_LINELIST
,
43 [PIPE_PRIM_LINE_LOOP
] = V_008958_DI_PT_LINELOOP
,
44 [PIPE_PRIM_LINE_STRIP
] = V_008958_DI_PT_LINESTRIP
,
45 [PIPE_PRIM_TRIANGLES
] = V_008958_DI_PT_TRILIST
,
46 [PIPE_PRIM_TRIANGLE_STRIP
] = V_008958_DI_PT_TRISTRIP
,
47 [PIPE_PRIM_TRIANGLE_FAN
] = V_008958_DI_PT_TRIFAN
,
48 [PIPE_PRIM_QUADS
] = V_008958_DI_PT_QUADLIST
,
49 [PIPE_PRIM_QUAD_STRIP
] = V_008958_DI_PT_QUADSTRIP
,
50 [PIPE_PRIM_POLYGON
] = V_008958_DI_PT_POLYGON
,
51 [PIPE_PRIM_LINES_ADJACENCY
] = V_008958_DI_PT_LINELIST_ADJ
,
52 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = V_008958_DI_PT_LINESTRIP_ADJ
,
53 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = V_008958_DI_PT_TRILIST_ADJ
,
54 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = V_008958_DI_PT_TRISTRIP_ADJ
,
55 [PIPE_PRIM_PATCHES
] = V_008958_DI_PT_PATCH
,
56 [R600_PRIM_RECTANGLE_LIST
] = V_008958_DI_PT_RECTLIST
58 assert(mode
< ARRAY_SIZE(prim_conv
));
59 return prim_conv
[mode
];
62 static unsigned si_conv_prim_to_gs_out(unsigned mode
)
64 static const int prim_conv
[] = {
65 [PIPE_PRIM_POINTS
] = V_028A6C_OUTPRIM_TYPE_POINTLIST
,
66 [PIPE_PRIM_LINES
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
67 [PIPE_PRIM_LINE_LOOP
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
68 [PIPE_PRIM_LINE_STRIP
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
69 [PIPE_PRIM_TRIANGLES
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
70 [PIPE_PRIM_TRIANGLE_STRIP
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
71 [PIPE_PRIM_TRIANGLE_FAN
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
72 [PIPE_PRIM_QUADS
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
73 [PIPE_PRIM_QUAD_STRIP
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
74 [PIPE_PRIM_POLYGON
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
75 [PIPE_PRIM_LINES_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
76 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
77 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
78 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
79 [PIPE_PRIM_PATCHES
] = V_028A6C_OUTPRIM_TYPE_POINTLIST
,
80 [R600_PRIM_RECTANGLE_LIST
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
82 assert(mode
< ARRAY_SIZE(prim_conv
));
84 return prim_conv
[mode
];
88 * This calculates the LDS size for tessellation shaders (VS, TCS, TES).
89 * LS.LDS_SIZE is shared by all 3 shader stages.
91 * The information about LDS and other non-compile-time parameters is then
92 * written to userdata SGPRs.
94 static void si_emit_derived_tess_state(struct si_context
*sctx
,
95 const struct pipe_draw_info
*info
,
96 unsigned *num_patches
)
98 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
99 struct si_shader_ctx_state
*ls
= &sctx
->vs_shader
;
100 /* The TES pointer will only be used for sctx->last_tcs.
101 * It would be wrong to think that TCS = TES. */
102 struct si_shader_selector
*tcs
=
103 sctx
->tcs_shader
.cso
? sctx
->tcs_shader
.cso
: sctx
->tes_shader
.cso
;
104 unsigned tes_sh_base
= sctx
->shader_userdata
.sh_base
[PIPE_SHADER_TESS_EVAL
];
105 unsigned num_tcs_input_cp
= info
->vertices_per_patch
;
106 unsigned num_tcs_output_cp
, num_tcs_inputs
, num_tcs_outputs
;
107 unsigned num_tcs_patch_outputs
;
108 unsigned input_vertex_size
, output_vertex_size
, pervertex_output_patch_size
;
109 unsigned input_patch_size
, output_patch_size
, output_patch0_offset
;
110 unsigned perpatch_output_offset
, lds_size
, ls_rsrc2
;
111 unsigned tcs_in_layout
, tcs_out_layout
, tcs_out_offsets
;
112 unsigned offchip_layout
, hardware_lds_size
, ls_hs_config
;
114 if (sctx
->last_ls
== ls
->current
&&
115 sctx
->last_tcs
== tcs
&&
116 sctx
->last_tes_sh_base
== tes_sh_base
&&
117 sctx
->last_num_tcs_input_cp
== num_tcs_input_cp
) {
118 *num_patches
= sctx
->last_num_patches
;
122 sctx
->last_ls
= ls
->current
;
123 sctx
->last_tcs
= tcs
;
124 sctx
->last_tes_sh_base
= tes_sh_base
;
125 sctx
->last_num_tcs_input_cp
= num_tcs_input_cp
;
127 /* This calculates how shader inputs and outputs among VS, TCS, and TES
128 * are laid out in LDS. */
129 num_tcs_inputs
= util_last_bit64(ls
->cso
->outputs_written
);
131 if (sctx
->tcs_shader
.cso
) {
132 num_tcs_outputs
= util_last_bit64(tcs
->outputs_written
);
133 num_tcs_output_cp
= tcs
->info
.properties
[TGSI_PROPERTY_TCS_VERTICES_OUT
];
134 num_tcs_patch_outputs
= util_last_bit64(tcs
->patch_outputs_written
);
136 /* No TCS. Route varyings from LS to TES. */
137 num_tcs_outputs
= num_tcs_inputs
;
138 num_tcs_output_cp
= num_tcs_input_cp
;
139 num_tcs_patch_outputs
= 2; /* TESSINNER + TESSOUTER */
142 input_vertex_size
= num_tcs_inputs
* 16;
143 output_vertex_size
= num_tcs_outputs
* 16;
145 input_patch_size
= num_tcs_input_cp
* input_vertex_size
;
147 pervertex_output_patch_size
= num_tcs_output_cp
* output_vertex_size
;
148 output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
150 /* Ensure that we only need one wave per SIMD so we don't need to check
151 * resource usage. Also ensures that the number of tcs in and out
152 * vertices per threadgroup are at most 256.
154 *num_patches
= 64 / MAX2(num_tcs_input_cp
, num_tcs_output_cp
) * 4;
156 /* Make sure that the data fits in LDS. This assumes the shaders only
157 * use LDS for the inputs and outputs.
159 hardware_lds_size
= sctx
->b
.chip_class
>= CIK
? 65536 : 32768;
160 *num_patches
= MIN2(*num_patches
, hardware_lds_size
/ (input_patch_size
+
163 /* Make sure the output data fits in the offchip buffer */
164 *num_patches
= MIN2(*num_patches
,
165 (sctx
->screen
->tess_offchip_block_dw_size
* 4) /
168 /* Not necessary for correctness, but improves performance. The
169 * specific value is taken from the proprietary driver.
171 *num_patches
= MIN2(*num_patches
, 40);
173 /* SI bug workaround - limit LS-HS threadgroups to only one wave. */
174 if (sctx
->b
.chip_class
== SI
) {
175 unsigned one_wave
= 64 / MAX2(num_tcs_input_cp
, num_tcs_output_cp
);
176 *num_patches
= MIN2(*num_patches
, one_wave
);
179 sctx
->last_num_patches
= *num_patches
;
181 output_patch0_offset
= input_patch_size
* *num_patches
;
182 perpatch_output_offset
= output_patch0_offset
+ pervertex_output_patch_size
;
184 lds_size
= output_patch0_offset
+ output_patch_size
* *num_patches
;
185 ls_rsrc2
= ls
->current
->config
.rsrc2
;
187 if (sctx
->b
.chip_class
>= CIK
) {
188 assert(lds_size
<= 65536);
189 lds_size
= align(lds_size
, 512) / 512;
191 assert(lds_size
<= 32768);
192 lds_size
= align(lds_size
, 256) / 256;
194 si_multiwave_lds_size_workaround(sctx
->screen
, &lds_size
);
195 ls_rsrc2
|= S_00B52C_LDS_SIZE(lds_size
);
197 /* Due to a hw bug, RSRC2_LS must be written twice with another
198 * LS register written in between. */
199 if (sctx
->b
.chip_class
== CIK
&& sctx
->b
.family
!= CHIP_HAWAII
)
200 radeon_set_sh_reg(cs
, R_00B52C_SPI_SHADER_PGM_RSRC2_LS
, ls_rsrc2
);
201 radeon_set_sh_reg_seq(cs
, R_00B528_SPI_SHADER_PGM_RSRC1_LS
, 2);
202 radeon_emit(cs
, ls
->current
->config
.rsrc1
);
203 radeon_emit(cs
, ls_rsrc2
);
205 /* Compute userdata SGPRs. */
206 assert(((input_vertex_size
/ 4) & ~0xff) == 0);
207 assert(((output_vertex_size
/ 4) & ~0xff) == 0);
208 assert(((input_patch_size
/ 4) & ~0x1fff) == 0);
209 assert(((output_patch_size
/ 4) & ~0x1fff) == 0);
210 assert(((output_patch0_offset
/ 16) & ~0xffff) == 0);
211 assert(((perpatch_output_offset
/ 16) & ~0xffff) == 0);
212 assert(num_tcs_input_cp
<= 32);
213 assert(num_tcs_output_cp
<= 32);
215 tcs_in_layout
= S_VS_STATE_LS_OUT_PATCH_SIZE(input_patch_size
/ 4) |
216 S_VS_STATE_LS_OUT_VERTEX_SIZE(input_vertex_size
/ 4);
217 tcs_out_layout
= (output_patch_size
/ 4) |
218 ((output_vertex_size
/ 4) << 13);
219 tcs_out_offsets
= (output_patch0_offset
/ 16) |
220 ((perpatch_output_offset
/ 16) << 16);
221 offchip_layout
= (pervertex_output_patch_size
* *num_patches
<< 16) |
222 (num_tcs_output_cp
<< 9) | *num_patches
;
224 /* Set them for LS. */
225 sctx
->current_vs_state
&= C_VS_STATE_LS_OUT_PATCH_SIZE
&
226 C_VS_STATE_LS_OUT_VERTEX_SIZE
;
227 sctx
->current_vs_state
|= tcs_in_layout
;
229 /* Set them for TCS. */
230 radeon_set_sh_reg_seq(cs
,
231 R_00B430_SPI_SHADER_USER_DATA_HS_0
+ SI_SGPR_TCS_OFFCHIP_LAYOUT
* 4, 4);
232 radeon_emit(cs
, offchip_layout
);
233 radeon_emit(cs
, tcs_out_offsets
);
234 radeon_emit(cs
, tcs_out_layout
| (num_tcs_input_cp
<< 26));
235 radeon_emit(cs
, tcs_in_layout
);
237 /* Set them for TES. */
238 radeon_set_sh_reg_seq(cs
, tes_sh_base
+ SI_SGPR_TCS_OFFCHIP_LAYOUT
* 4, 1);
239 radeon_emit(cs
, offchip_layout
);
241 ls_hs_config
= S_028B58_NUM_PATCHES(*num_patches
) |
242 S_028B58_HS_NUM_INPUT_CP(num_tcs_input_cp
) |
243 S_028B58_HS_NUM_OUTPUT_CP(num_tcs_output_cp
);
245 if (sctx
->b
.chip_class
>= CIK
)
246 radeon_set_context_reg_idx(cs
, R_028B58_VGT_LS_HS_CONFIG
, 2,
249 radeon_set_context_reg(cs
, R_028B58_VGT_LS_HS_CONFIG
,
253 static unsigned si_num_prims_for_vertices(const struct pipe_draw_info
*info
)
255 switch (info
->mode
) {
256 case PIPE_PRIM_PATCHES
:
257 return info
->count
/ info
->vertices_per_patch
;
258 case R600_PRIM_RECTANGLE_LIST
:
259 return info
->count
/ 3;
261 return u_prims_for_vertices(info
->mode
, info
->count
);
266 si_get_init_multi_vgt_param(struct si_screen
*sscreen
,
267 union si_vgt_param_key
*key
)
269 STATIC_ASSERT(sizeof(union si_vgt_param_key
) == 4);
270 unsigned max_primgroup_in_wave
= 2;
272 /* SWITCH_ON_EOP(0) is always preferable. */
273 bool wd_switch_on_eop
= false;
274 bool ia_switch_on_eop
= false;
275 bool ia_switch_on_eoi
= false;
276 bool partial_vs_wave
= false;
277 bool partial_es_wave
= false;
279 if (key
->u
.uses_tess
) {
280 /* SWITCH_ON_EOI must be set if PrimID is used. */
281 if (key
->u
.tcs_tes_uses_prim_id
)
282 ia_switch_on_eoi
= true;
284 /* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
285 if ((sscreen
->b
.family
== CHIP_TAHITI
||
286 sscreen
->b
.family
== CHIP_PITCAIRN
||
287 sscreen
->b
.family
== CHIP_BONAIRE
) &&
289 partial_vs_wave
= true;
291 /* Needed for 028B6C_DISTRIBUTION_MODE != 0 */
292 if (sscreen
->has_distributed_tess
) {
293 if (key
->u
.uses_gs
) {
294 partial_es_wave
= true;
296 /* GPU hang workaround. */
297 if (sscreen
->b
.family
== CHIP_TONGA
||
298 sscreen
->b
.family
== CHIP_FIJI
||
299 sscreen
->b
.family
== CHIP_POLARIS10
||
300 sscreen
->b
.family
== CHIP_POLARIS11
)
301 partial_vs_wave
= true;
303 partial_vs_wave
= true;
308 /* This is a hardware requirement. */
309 if (key
->u
.line_stipple_enabled
||
310 (sscreen
->b
.debug_flags
& DBG_SWITCH_ON_EOP
)) {
311 ia_switch_on_eop
= true;
312 wd_switch_on_eop
= true;
315 if (sscreen
->b
.chip_class
>= CIK
) {
316 /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
317 * 4 shader engines. Set 1 to pass the assertion below.
318 * The other cases are hardware requirements.
320 * Polaris supports primitive restart with WD_SWITCH_ON_EOP=0
321 * for points, line strips, and tri strips.
323 if (sscreen
->b
.info
.max_se
< 4 ||
324 key
->u
.prim
== PIPE_PRIM_POLYGON
||
325 key
->u
.prim
== PIPE_PRIM_LINE_LOOP
||
326 key
->u
.prim
== PIPE_PRIM_TRIANGLE_FAN
||
327 key
->u
.prim
== PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
||
328 (key
->u
.primitive_restart
&&
329 (sscreen
->b
.family
< CHIP_POLARIS10
||
330 (key
->u
.prim
!= PIPE_PRIM_POINTS
&&
331 key
->u
.prim
!= PIPE_PRIM_LINE_STRIP
&&
332 key
->u
.prim
!= PIPE_PRIM_TRIANGLE_STRIP
))) ||
333 key
->u
.count_from_stream_output
)
334 wd_switch_on_eop
= true;
336 /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
337 * We don't know that for indirect drawing, so treat it as
338 * always problematic. */
339 if (sscreen
->b
.family
== CHIP_HAWAII
&&
340 key
->u
.uses_instancing
)
341 wd_switch_on_eop
= true;
343 /* Performance recommendation for 4 SE Gfx7-8 parts if
344 * instances are smaller than a primgroup.
345 * Assume indirect draws always use small instances.
346 * This is needed for good VS wave utilization.
348 if (sscreen
->b
.chip_class
<= VI
&&
349 sscreen
->b
.info
.max_se
== 4 &&
350 key
->u
.multi_instances_smaller_than_primgroup
)
351 wd_switch_on_eop
= true;
353 /* Required on CIK and later. */
354 if (sscreen
->b
.info
.max_se
> 2 && !wd_switch_on_eop
)
355 ia_switch_on_eoi
= true;
357 /* Required by Hawaii and, for some special cases, by VI. */
358 if (ia_switch_on_eoi
&&
359 (sscreen
->b
.family
== CHIP_HAWAII
||
360 (sscreen
->b
.chip_class
== VI
&&
361 (key
->u
.uses_gs
|| max_primgroup_in_wave
!= 2))))
362 partial_vs_wave
= true;
364 /* Instancing bug on Bonaire. */
365 if (sscreen
->b
.family
== CHIP_BONAIRE
&& ia_switch_on_eoi
&&
366 key
->u
.uses_instancing
)
367 partial_vs_wave
= true;
369 /* If the WD switch is false, the IA switch must be false too. */
370 assert(wd_switch_on_eop
|| !ia_switch_on_eop
);
373 /* If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
374 if (ia_switch_on_eoi
)
375 partial_es_wave
= true;
377 return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop
) |
378 S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi
) |
379 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave
) |
380 S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave
) |
381 S_028AA8_WD_SWITCH_ON_EOP(sscreen
->b
.chip_class
>= CIK
? wd_switch_on_eop
: 0) |
382 /* The following field was moved to VGT_SHADER_STAGES_EN in GFX9. */
383 S_028AA8_MAX_PRIMGRP_IN_WAVE(sscreen
->b
.chip_class
== VI
?
384 max_primgroup_in_wave
: 0) |
385 S_030960_EN_INST_OPT_BASIC(sscreen
->b
.chip_class
>= GFX9
) |
386 S_030960_EN_INST_OPT_ADV(sscreen
->b
.chip_class
>= GFX9
);
389 void si_init_ia_multi_vgt_param_table(struct si_context
*sctx
)
391 for (int prim
= 0; prim
<= R600_PRIM_RECTANGLE_LIST
; prim
++)
392 for (int uses_instancing
= 0; uses_instancing
< 2; uses_instancing
++)
393 for (int multi_instances
= 0; multi_instances
< 2; multi_instances
++)
394 for (int primitive_restart
= 0; primitive_restart
< 2; primitive_restart
++)
395 for (int count_from_so
= 0; count_from_so
< 2; count_from_so
++)
396 for (int line_stipple
= 0; line_stipple
< 2; line_stipple
++)
397 for (int uses_tess
= 0; uses_tess
< 2; uses_tess
++)
398 for (int tess_uses_primid
= 0; tess_uses_primid
< 2; tess_uses_primid
++)
399 for (int uses_gs
= 0; uses_gs
< 2; uses_gs
++) {
400 union si_vgt_param_key key
;
404 key
.u
.uses_instancing
= uses_instancing
;
405 key
.u
.multi_instances_smaller_than_primgroup
= multi_instances
;
406 key
.u
.primitive_restart
= primitive_restart
;
407 key
.u
.count_from_stream_output
= count_from_so
;
408 key
.u
.line_stipple_enabled
= line_stipple
;
409 key
.u
.uses_tess
= uses_tess
;
410 key
.u
.tcs_tes_uses_prim_id
= tess_uses_primid
;
411 key
.u
.uses_gs
= uses_gs
;
413 sctx
->ia_multi_vgt_param
[key
.index
] =
414 si_get_init_multi_vgt_param(sctx
->screen
, &key
);
418 static unsigned si_get_ia_multi_vgt_param(struct si_context
*sctx
,
419 const struct pipe_draw_info
*info
,
420 unsigned num_patches
)
422 union si_vgt_param_key key
= sctx
->ia_multi_vgt_param_key
;
423 unsigned primgroup_size
;
424 unsigned ia_multi_vgt_param
;
426 if (sctx
->tes_shader
.cso
) {
427 primgroup_size
= num_patches
; /* must be a multiple of NUM_PATCHES */
428 } else if (sctx
->gs_shader
.cso
) {
429 primgroup_size
= 64; /* recommended with a GS */
431 primgroup_size
= 128; /* recommended without a GS and tess */
434 key
.u
.prim
= info
->mode
;
435 key
.u
.uses_instancing
= info
->indirect
|| info
->instance_count
> 1;
436 key
.u
.multi_instances_smaller_than_primgroup
=
438 (info
->instance_count
> 1 &&
439 (info
->count_from_stream_output
||
440 si_num_prims_for_vertices(info
) < primgroup_size
));
441 key
.u
.primitive_restart
= info
->primitive_restart
;
442 key
.u
.count_from_stream_output
= info
->count_from_stream_output
!= NULL
;
444 ia_multi_vgt_param
= sctx
->ia_multi_vgt_param
[key
.index
] |
445 S_028AA8_PRIMGROUP_SIZE(primgroup_size
- 1);
447 if (sctx
->gs_shader
.cso
) {
448 /* GS requirement. */
449 if (SI_GS_PER_ES
/ primgroup_size
>= sctx
->screen
->gs_table_depth
- 3)
450 ia_multi_vgt_param
|= S_028AA8_PARTIAL_ES_WAVE_ON(1);
452 /* GS hw bug with single-primitive instances and SWITCH_ON_EOI.
453 * The hw doc says all multi-SE chips are affected, but Vulkan
454 * only applies it to Hawaii. Do what Vulkan does.
456 if (sctx
->b
.family
== CHIP_HAWAII
&&
457 G_028AA8_SWITCH_ON_EOI(ia_multi_vgt_param
) &&
459 (info
->instance_count
> 1 &&
460 (info
->count_from_stream_output
||
461 si_num_prims_for_vertices(info
) <= 1))))
462 sctx
->b
.flags
|= SI_CONTEXT_VGT_FLUSH
;
465 return ia_multi_vgt_param
;
468 /* rast_prim is the primitive type after GS. */
469 static void si_emit_rasterizer_prim_state(struct si_context
*sctx
)
471 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
472 enum pipe_prim_type rast_prim
= sctx
->current_rast_prim
;
473 struct si_state_rasterizer
*rs
= sctx
->emitted
.named
.rasterizer
;
475 /* Skip this if not rendering lines. */
476 if (rast_prim
!= PIPE_PRIM_LINES
&&
477 rast_prim
!= PIPE_PRIM_LINE_LOOP
&&
478 rast_prim
!= PIPE_PRIM_LINE_STRIP
&&
479 rast_prim
!= PIPE_PRIM_LINES_ADJACENCY
&&
480 rast_prim
!= PIPE_PRIM_LINE_STRIP_ADJACENCY
)
483 if (rast_prim
== sctx
->last_rast_prim
&&
484 rs
->pa_sc_line_stipple
== sctx
->last_sc_line_stipple
)
487 /* For lines, reset the stipple pattern at each primitive. Otherwise,
488 * reset the stipple pattern at each packet (line strips, line loops).
490 radeon_set_context_reg(cs
, R_028A0C_PA_SC_LINE_STIPPLE
,
491 rs
->pa_sc_line_stipple
|
492 S_028A0C_AUTO_RESET_CNTL(rast_prim
== PIPE_PRIM_LINES
? 1 : 2));
494 sctx
->last_rast_prim
= rast_prim
;
495 sctx
->last_sc_line_stipple
= rs
->pa_sc_line_stipple
;
498 static void si_emit_vs_state(struct si_context
*sctx
,
499 const struct pipe_draw_info
*info
)
501 sctx
->current_vs_state
&= C_VS_STATE_INDEXED
;
502 sctx
->current_vs_state
|= S_VS_STATE_INDEXED(!!info
->indexed
);
504 if (sctx
->current_vs_state
!= sctx
->last_vs_state
) {
505 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
507 radeon_set_sh_reg(cs
,
508 sctx
->shader_userdata
.sh_base
[PIPE_SHADER_VERTEX
] +
509 SI_SGPR_VS_STATE_BITS
* 4,
510 sctx
->current_vs_state
);
512 sctx
->last_vs_state
= sctx
->current_vs_state
;
516 static void si_emit_draw_registers(struct si_context
*sctx
,
517 const struct pipe_draw_info
*info
,
518 unsigned num_patches
)
520 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
521 unsigned prim
= si_conv_pipe_prim(info
->mode
);
522 unsigned gs_out_prim
= si_conv_prim_to_gs_out(sctx
->current_rast_prim
);
523 unsigned ia_multi_vgt_param
;
525 ia_multi_vgt_param
= si_get_ia_multi_vgt_param(sctx
, info
, num_patches
);
528 if (ia_multi_vgt_param
!= sctx
->last_multi_vgt_param
) {
529 if (sctx
->b
.chip_class
>= GFX9
)
530 radeon_set_uconfig_reg_idx(cs
, R_030960_IA_MULTI_VGT_PARAM
, 4, ia_multi_vgt_param
);
531 else if (sctx
->b
.chip_class
>= CIK
)
532 radeon_set_context_reg_idx(cs
, R_028AA8_IA_MULTI_VGT_PARAM
, 1, ia_multi_vgt_param
);
534 radeon_set_context_reg(cs
, R_028AA8_IA_MULTI_VGT_PARAM
, ia_multi_vgt_param
);
536 sctx
->last_multi_vgt_param
= ia_multi_vgt_param
;
538 if (prim
!= sctx
->last_prim
) {
539 if (sctx
->b
.chip_class
>= CIK
)
540 radeon_set_uconfig_reg_idx(cs
, R_030908_VGT_PRIMITIVE_TYPE
, 1, prim
);
542 radeon_set_config_reg(cs
, R_008958_VGT_PRIMITIVE_TYPE
, prim
);
544 sctx
->last_prim
= prim
;
547 if (gs_out_prim
!= sctx
->last_gs_out_prim
) {
548 radeon_set_context_reg(cs
, R_028A6C_VGT_GS_OUT_PRIM_TYPE
, gs_out_prim
);
549 sctx
->last_gs_out_prim
= gs_out_prim
;
552 /* Primitive restart. */
553 if (info
->primitive_restart
!= sctx
->last_primitive_restart_en
) {
554 if (sctx
->b
.chip_class
>= GFX9
)
555 radeon_set_uconfig_reg(cs
, R_03092C_VGT_MULTI_PRIM_IB_RESET_EN
,
556 info
->primitive_restart
);
558 radeon_set_context_reg(cs
, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN
,
559 info
->primitive_restart
);
561 sctx
->last_primitive_restart_en
= info
->primitive_restart
;
564 if (info
->primitive_restart
&&
565 (info
->restart_index
!= sctx
->last_restart_index
||
566 sctx
->last_restart_index
== SI_RESTART_INDEX_UNKNOWN
)) {
567 radeon_set_context_reg(cs
, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX
,
568 info
->restart_index
);
569 sctx
->last_restart_index
= info
->restart_index
;
573 static void si_emit_draw_packets(struct si_context
*sctx
,
574 const struct pipe_draw_info
*info
,
575 const struct pipe_index_buffer
*ib
)
577 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
578 unsigned sh_base_reg
= sctx
->shader_userdata
.sh_base
[PIPE_SHADER_VERTEX
];
579 bool render_cond_bit
= sctx
->b
.render_cond
&& !sctx
->b
.render_cond_force_off
;
580 uint32_t index_max_size
= 0;
581 uint64_t index_va
= 0;
583 if (info
->count_from_stream_output
) {
584 struct r600_so_target
*t
=
585 (struct r600_so_target
*)info
->count_from_stream_output
;
586 uint64_t va
= t
->buf_filled_size
->gpu_address
+
587 t
->buf_filled_size_offset
;
589 radeon_set_context_reg(cs
, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
,
592 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
593 radeon_emit(cs
, COPY_DATA_SRC_SEL(COPY_DATA_MEM
) |
594 COPY_DATA_DST_SEL(COPY_DATA_REG
) |
595 COPY_DATA_WR_CONFIRM
);
596 radeon_emit(cs
, va
); /* src address lo */
597 radeon_emit(cs
, va
>> 32); /* src address hi */
598 radeon_emit(cs
, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
>> 2);
599 radeon_emit(cs
, 0); /* unused */
601 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
602 t
->buf_filled_size
, RADEON_USAGE_READ
,
603 RADEON_PRIO_SO_FILLED_SIZE
);
608 if (ib
->index_size
!= sctx
->last_index_size
) {
612 switch (ib
->index_size
) {
614 index_type
= V_028A7C_VGT_INDEX_8
;
617 index_type
= V_028A7C_VGT_INDEX_16
|
618 (SI_BIG_ENDIAN
&& sctx
->b
.chip_class
<= CIK
?
619 V_028A7C_VGT_DMA_SWAP_16_BIT
: 0);
622 index_type
= V_028A7C_VGT_INDEX_32
|
623 (SI_BIG_ENDIAN
&& sctx
->b
.chip_class
<= CIK
?
624 V_028A7C_VGT_DMA_SWAP_32_BIT
: 0);
627 assert(!"unreachable");
631 if (sctx
->b
.chip_class
>= GFX9
) {
632 radeon_set_uconfig_reg_idx(cs
, R_03090C_VGT_INDEX_TYPE
,
635 radeon_emit(cs
, PKT3(PKT3_INDEX_TYPE
, 0, 0));
636 radeon_emit(cs
, index_type
);
639 sctx
->last_index_size
= ib
->index_size
;
642 index_max_size
= (ib
->buffer
->width0
- ib
->offset
) /
644 index_va
= r600_resource(ib
->buffer
)->gpu_address
+ ib
->offset
;
646 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
647 (struct r600_resource
*)ib
->buffer
,
648 RADEON_USAGE_READ
, RADEON_PRIO_INDEX_BUFFER
);
650 /* On CI and later, non-indexed draws overwrite VGT_INDEX_TYPE,
651 * so the state must be re-emitted before the next indexed draw.
653 if (sctx
->b
.chip_class
>= CIK
)
654 sctx
->last_index_size
= -1;
657 if (info
->indirect
) {
658 uint64_t indirect_va
= r600_resource(info
->indirect
)->gpu_address
;
660 assert(indirect_va
% 8 == 0);
662 si_invalidate_draw_sh_constants(sctx
);
664 radeon_emit(cs
, PKT3(PKT3_SET_BASE
, 2, 0));
666 radeon_emit(cs
, indirect_va
);
667 radeon_emit(cs
, indirect_va
>> 32);
669 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
670 (struct r600_resource
*)info
->indirect
,
671 RADEON_USAGE_READ
, RADEON_PRIO_DRAW_INDIRECT
);
673 unsigned di_src_sel
= info
->indexed
? V_0287F0_DI_SRC_SEL_DMA
674 : V_0287F0_DI_SRC_SEL_AUTO_INDEX
;
676 assert(info
->indirect_offset
% 4 == 0);
679 radeon_emit(cs
, PKT3(PKT3_INDEX_BASE
, 1, 0));
680 radeon_emit(cs
, index_va
);
681 radeon_emit(cs
, index_va
>> 32);
683 radeon_emit(cs
, PKT3(PKT3_INDEX_BUFFER_SIZE
, 0, 0));
684 radeon_emit(cs
, index_max_size
);
687 if (!sctx
->screen
->has_draw_indirect_multi
) {
688 radeon_emit(cs
, PKT3(info
->indexed
? PKT3_DRAW_INDEX_INDIRECT
689 : PKT3_DRAW_INDIRECT
,
690 3, render_cond_bit
));
691 radeon_emit(cs
, info
->indirect_offset
);
692 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_BASE_VERTEX
* 4 - SI_SH_REG_OFFSET
) >> 2);
693 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_START_INSTANCE
* 4 - SI_SH_REG_OFFSET
) >> 2);
694 radeon_emit(cs
, di_src_sel
);
696 uint64_t count_va
= 0;
698 if (info
->indirect_params
) {
699 struct r600_resource
*params_buf
=
700 (struct r600_resource
*)info
->indirect_params
;
702 radeon_add_to_buffer_list(
703 &sctx
->b
, &sctx
->b
.gfx
, params_buf
,
704 RADEON_USAGE_READ
, RADEON_PRIO_DRAW_INDIRECT
);
706 count_va
= params_buf
->gpu_address
+ info
->indirect_params_offset
;
709 radeon_emit(cs
, PKT3(info
->indexed
? PKT3_DRAW_INDEX_INDIRECT_MULTI
:
710 PKT3_DRAW_INDIRECT_MULTI
,
711 8, render_cond_bit
));
712 radeon_emit(cs
, info
->indirect_offset
);
713 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_BASE_VERTEX
* 4 - SI_SH_REG_OFFSET
) >> 2);
714 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_START_INSTANCE
* 4 - SI_SH_REG_OFFSET
) >> 2);
715 radeon_emit(cs
, ((sh_base_reg
+ SI_SGPR_DRAWID
* 4 - SI_SH_REG_OFFSET
) >> 2) |
716 S_2C3_DRAW_INDEX_ENABLE(1) |
717 S_2C3_COUNT_INDIRECT_ENABLE(!!info
->indirect_params
));
718 radeon_emit(cs
, info
->indirect_count
);
719 radeon_emit(cs
, count_va
);
720 radeon_emit(cs
, count_va
>> 32);
721 radeon_emit(cs
, info
->indirect_stride
);
722 radeon_emit(cs
, di_src_sel
);
727 radeon_emit(cs
, PKT3(PKT3_NUM_INSTANCES
, 0, 0));
728 radeon_emit(cs
, info
->instance_count
);
730 /* Base vertex and start instance. */
731 base_vertex
= info
->indexed
? info
->index_bias
: info
->start
;
733 if (base_vertex
!= sctx
->last_base_vertex
||
734 sctx
->last_base_vertex
== SI_BASE_VERTEX_UNKNOWN
||
735 info
->start_instance
!= sctx
->last_start_instance
||
736 info
->drawid
!= sctx
->last_drawid
||
737 sh_base_reg
!= sctx
->last_sh_base_reg
) {
738 radeon_set_sh_reg_seq(cs
, sh_base_reg
+ SI_SGPR_BASE_VERTEX
* 4, 3);
739 radeon_emit(cs
, base_vertex
);
740 radeon_emit(cs
, info
->start_instance
);
741 radeon_emit(cs
, info
->drawid
);
743 sctx
->last_base_vertex
= base_vertex
;
744 sctx
->last_start_instance
= info
->start_instance
;
745 sctx
->last_drawid
= info
->drawid
;
746 sctx
->last_sh_base_reg
= sh_base_reg
;
750 index_va
+= info
->start
* ib
->index_size
;
752 radeon_emit(cs
, PKT3(PKT3_DRAW_INDEX_2
, 4, render_cond_bit
));
753 radeon_emit(cs
, index_max_size
);
754 radeon_emit(cs
, index_va
);
755 radeon_emit(cs
, index_va
>> 32);
756 radeon_emit(cs
, info
->count
);
757 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_DMA
);
759 radeon_emit(cs
, PKT3(PKT3_DRAW_INDEX_AUTO
, 1, render_cond_bit
));
760 radeon_emit(cs
, info
->count
);
761 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_AUTO_INDEX
|
762 S_0287F0_USE_OPAQUE(!!info
->count_from_stream_output
));
767 static void si_emit_surface_sync(struct r600_common_context
*rctx
,
768 unsigned cp_coher_cntl
)
770 struct radeon_winsys_cs
*cs
= rctx
->gfx
.cs
;
772 if (rctx
->chip_class
>= GFX9
) {
773 /* Flush caches and wait for the caches to assert idle. */
774 radeon_emit(cs
, PKT3(PKT3_ACQUIRE_MEM
, 5, 0));
775 radeon_emit(cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
776 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
777 radeon_emit(cs
, 0xffffff); /* CP_COHER_SIZE_HI */
778 radeon_emit(cs
, 0); /* CP_COHER_BASE */
779 radeon_emit(cs
, 0); /* CP_COHER_BASE_HI */
780 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
782 /* ACQUIRE_MEM is only required on a compute ring. */
783 radeon_emit(cs
, PKT3(PKT3_SURFACE_SYNC
, 3, 0));
784 radeon_emit(cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
785 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
786 radeon_emit(cs
, 0); /* CP_COHER_BASE */
787 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
791 void si_emit_cache_flush(struct si_context
*sctx
)
793 struct r600_common_context
*rctx
= &sctx
->b
;
794 struct radeon_winsys_cs
*cs
= rctx
->gfx
.cs
;
795 uint32_t cp_coher_cntl
= 0;
796 uint32_t flush_cb_db
= rctx
->flags
& (SI_CONTEXT_FLUSH_AND_INV_CB
|
797 SI_CONTEXT_FLUSH_AND_INV_DB
);
799 if (rctx
->flags
& (SI_CONTEXT_FLUSH_AND_INV_CB
|
800 SI_CONTEXT_FLUSH_AND_INV_DB
))
801 sctx
->b
.num_fb_cache_flushes
++;
803 /* SI has a bug that it always flushes ICACHE and KCACHE if either
804 * bit is set. An alternative way is to write SQC_CACHES, but that
805 * doesn't seem to work reliably. Since the bug doesn't affect
806 * correctness (it only does more work than necessary) and
807 * the performance impact is likely negligible, there is no plan
808 * to add a workaround for it.
811 if (rctx
->flags
& SI_CONTEXT_INV_ICACHE
)
812 cp_coher_cntl
|= S_0085F0_SH_ICACHE_ACTION_ENA(1);
813 if (rctx
->flags
& SI_CONTEXT_INV_SMEM_L1
)
814 cp_coher_cntl
|= S_0085F0_SH_KCACHE_ACTION_ENA(1);
816 if (rctx
->chip_class
<= VI
) {
817 if (rctx
->flags
& SI_CONTEXT_FLUSH_AND_INV_CB
) {
818 cp_coher_cntl
|= S_0085F0_CB_ACTION_ENA(1) |
819 S_0085F0_CB0_DEST_BASE_ENA(1) |
820 S_0085F0_CB1_DEST_BASE_ENA(1) |
821 S_0085F0_CB2_DEST_BASE_ENA(1) |
822 S_0085F0_CB3_DEST_BASE_ENA(1) |
823 S_0085F0_CB4_DEST_BASE_ENA(1) |
824 S_0085F0_CB5_DEST_BASE_ENA(1) |
825 S_0085F0_CB6_DEST_BASE_ENA(1) |
826 S_0085F0_CB7_DEST_BASE_ENA(1);
828 /* Necessary for DCC */
829 if (rctx
->chip_class
== VI
)
830 r600_gfx_write_event_eop(rctx
, V_028A90_FLUSH_AND_INV_CB_DATA_TS
,
831 0, 0, NULL
, 0, 0, 0);
833 if (rctx
->flags
& SI_CONTEXT_FLUSH_AND_INV_DB
)
834 cp_coher_cntl
|= S_0085F0_DB_ACTION_ENA(1) |
835 S_0085F0_DB_DEST_BASE_ENA(1);
838 if (rctx
->flags
& SI_CONTEXT_FLUSH_AND_INV_CB
) {
839 /* Flush CMASK/FMASK/DCC. SURFACE_SYNC will wait for idle. */
840 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
841 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META
) | EVENT_INDEX(0));
843 if (rctx
->flags
& SI_CONTEXT_FLUSH_AND_INV_DB
) {
844 /* Flush HTILE. SURFACE_SYNC will wait for idle. */
845 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
846 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META
) | EVENT_INDEX(0));
849 /* Wait for shader engines to go idle.
850 * VS and PS waits are unnecessary if SURFACE_SYNC is going to wait
851 * for everything including CB/DB cache flushes.
854 if (rctx
->flags
& SI_CONTEXT_PS_PARTIAL_FLUSH
) {
855 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
856 radeon_emit(cs
, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
857 /* Only count explicit shader flushes, not implicit ones
858 * done by SURFACE_SYNC.
860 rctx
->num_vs_flushes
++;
861 rctx
->num_ps_flushes
++;
862 } else if (rctx
->flags
& SI_CONTEXT_VS_PARTIAL_FLUSH
) {
863 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
864 radeon_emit(cs
, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
865 rctx
->num_vs_flushes
++;
869 if (rctx
->flags
& SI_CONTEXT_CS_PARTIAL_FLUSH
&&
870 sctx
->compute_is_busy
) {
871 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
872 radeon_emit(cs
, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH
| EVENT_INDEX(4)));
873 rctx
->num_cs_flushes
++;
874 sctx
->compute_is_busy
= false;
877 /* VGT state synchronization. */
878 if (rctx
->flags
& SI_CONTEXT_VGT_FLUSH
) {
879 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
880 radeon_emit(cs
, EVENT_TYPE(V_028A90_VGT_FLUSH
) | EVENT_INDEX(0));
882 if (rctx
->flags
& SI_CONTEXT_VGT_STREAMOUT_SYNC
) {
883 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
884 radeon_emit(cs
, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC
) | EVENT_INDEX(0));
887 /* GFX9: Wait for idle if we're flushing CB or DB. ACQUIRE_MEM doesn't
888 * wait for idle on GFX9. We have to use a TS event.
890 if (sctx
->b
.chip_class
>= GFX9
&& flush_cb_db
) {
891 struct r600_resource
*rbuf
= NULL
;
893 unsigned offset
= 0, tc_flags
, cb_db_event
;
895 /* Set the CB/DB flush event. */
896 switch (flush_cb_db
) {
897 case SI_CONTEXT_FLUSH_AND_INV_CB
:
898 cb_db_event
= V_028A90_FLUSH_AND_INV_CB_DATA_TS
;
900 case SI_CONTEXT_FLUSH_AND_INV_DB
:
901 cb_db_event
= V_028A90_FLUSH_AND_INV_DB_DATA_TS
;
905 cb_db_event
= V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT
;
908 /* TC | TC_WB = invalidate L2 data
909 * TC_MD | TC_WB = invalidate L2 metadata
910 * TC | TC_WB | TC_MD = invalidate L2 data & metadata
912 * The metadata cache must always be invalidated for coherency
913 * between CB/DB and shaders. (metadata = HTILE, CMASK, DCC)
915 * TC must be invalidated on GFX9 only if the CB/DB surface is
916 * not pipe-aligned. If the surface is RB-aligned, it might not
917 * strictly be pipe-aligned since RB alignment takes precendence.
919 tc_flags
= EVENT_TC_WB_ACTION_ENA
|
920 EVENT_TC_MD_ACTION_ENA
;
922 /* Ideally flush TC together with CB/DB. */
923 if (rctx
->flags
& SI_CONTEXT_INV_GLOBAL_L2
) {
924 tc_flags
|= EVENT_TC_ACTION_ENA
|
925 EVENT_TCL1_ACTION_ENA
;
927 /* Clear the flags. */
928 rctx
->flags
&= ~(SI_CONTEXT_INV_GLOBAL_L2
|
929 SI_CONTEXT_WRITEBACK_GLOBAL_L2
|
930 SI_CONTEXT_INV_VMEM_L1
);
933 /* Allocate memory for the fence. */
934 u_suballocator_alloc(rctx
->allocator_zeroed_memory
, 4, 4,
935 &offset
, (struct pipe_resource
**)&rbuf
);
936 va
= rbuf
->gpu_address
+ offset
;
938 r600_gfx_write_event_eop(rctx
, cb_db_event
, tc_flags
, 1,
940 r600_gfx_wait_fence(rctx
, va
, 1, 0xffffffff);
943 /* Make sure ME is idle (it executes most packets) before continuing.
944 * This prevents read-after-write hazards between PFP and ME.
947 (rctx
->flags
& (SI_CONTEXT_CS_PARTIAL_FLUSH
|
948 SI_CONTEXT_INV_VMEM_L1
|
949 SI_CONTEXT_INV_GLOBAL_L2
|
950 SI_CONTEXT_WRITEBACK_GLOBAL_L2
))) {
951 radeon_emit(cs
, PKT3(PKT3_PFP_SYNC_ME
, 0, 0));
956 * When one of the CP_COHER_CNTL.DEST_BASE flags is set, SURFACE_SYNC
957 * waits for idle, so it should be last. SURFACE_SYNC is done in PFP.
959 * cp_coher_cntl should contain all necessary flags except TC flags
962 * SI-CIK don't support L2 write-back.
964 if (rctx
->flags
& SI_CONTEXT_INV_GLOBAL_L2
||
965 (rctx
->chip_class
<= CIK
&&
966 (rctx
->flags
& SI_CONTEXT_WRITEBACK_GLOBAL_L2
))) {
967 /* Invalidate L1 & L2. (L1 is always invalidated on SI)
968 * WB must be set on VI+ when TC_ACTION is set.
970 si_emit_surface_sync(rctx
, cp_coher_cntl
|
971 S_0085F0_TC_ACTION_ENA(1) |
972 S_0085F0_TCL1_ACTION_ENA(1) |
973 S_0301F0_TC_WB_ACTION_ENA(rctx
->chip_class
>= VI
));
975 sctx
->b
.num_L2_invalidates
++;
977 /* L1 invalidation and L2 writeback must be done separately,
978 * because both operations can't be done together.
980 if (rctx
->flags
& SI_CONTEXT_WRITEBACK_GLOBAL_L2
) {
982 * NC = apply to non-coherent MTYPEs
983 * (i.e. MTYPE <= 1, which is what we use everywhere)
985 * WB doesn't work without NC.
987 si_emit_surface_sync(rctx
, cp_coher_cntl
|
988 S_0301F0_TC_WB_ACTION_ENA(1) |
989 S_0301F0_TC_NC_ACTION_ENA(1));
991 sctx
->b
.num_L2_writebacks
++;
993 if (rctx
->flags
& SI_CONTEXT_INV_VMEM_L1
) {
994 /* Invalidate per-CU VMEM L1. */
995 si_emit_surface_sync(rctx
, cp_coher_cntl
|
996 S_0085F0_TCL1_ACTION_ENA(1));
1001 /* If TC flushes haven't cleared this... */
1003 si_emit_surface_sync(rctx
, cp_coher_cntl
);
1005 if (rctx
->flags
& R600_CONTEXT_START_PIPELINE_STATS
) {
1006 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1007 radeon_emit(cs
, EVENT_TYPE(V_028A90_PIPELINESTAT_START
) |
1009 } else if (rctx
->flags
& R600_CONTEXT_STOP_PIPELINE_STATS
) {
1010 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1011 radeon_emit(cs
, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP
) |
1018 static void si_get_draw_start_count(struct si_context
*sctx
,
1019 const struct pipe_draw_info
*info
,
1020 unsigned *start
, unsigned *count
)
1022 if (info
->indirect
) {
1023 unsigned indirect_count
;
1024 struct pipe_transfer
*transfer
;
1025 unsigned begin
, end
;
1029 if (info
->indirect_params
) {
1030 data
= pipe_buffer_map_range(&sctx
->b
.b
,
1031 info
->indirect_params
,
1032 info
->indirect_params_offset
,
1034 PIPE_TRANSFER_READ
, &transfer
);
1036 indirect_count
= *data
;
1038 pipe_buffer_unmap(&sctx
->b
.b
, transfer
);
1040 indirect_count
= info
->indirect_count
;
1043 if (!indirect_count
) {
1044 *start
= *count
= 0;
1048 map_size
= (indirect_count
- 1) * info
->indirect_stride
+ 3 * sizeof(unsigned);
1049 data
= pipe_buffer_map_range(&sctx
->b
.b
, info
->indirect
,
1050 info
->indirect_offset
, map_size
,
1051 PIPE_TRANSFER_READ
, &transfer
);
1056 for (unsigned i
= 0; i
< indirect_count
; ++i
) {
1057 unsigned count
= data
[0];
1058 unsigned start
= data
[2];
1061 begin
= MIN2(begin
, start
);
1062 end
= MAX2(end
, start
+ count
);
1065 data
+= info
->indirect_stride
/ sizeof(unsigned);
1068 pipe_buffer_unmap(&sctx
->b
.b
, transfer
);
1072 *count
= end
- begin
;
1074 *start
= *count
= 0;
1077 *start
= info
->start
;
1078 *count
= info
->count
;
1082 void si_ce_pre_draw_synchronization(struct si_context
*sctx
)
1084 if (sctx
->ce_need_synchronization
) {
1085 radeon_emit(sctx
->ce_ib
, PKT3(PKT3_INCREMENT_CE_COUNTER
, 0, 0));
1086 radeon_emit(sctx
->ce_ib
, 1);
1088 radeon_emit(sctx
->b
.gfx
.cs
, PKT3(PKT3_WAIT_ON_CE_COUNTER
, 0, 0));
1089 radeon_emit(sctx
->b
.gfx
.cs
, 1);
1093 void si_ce_post_draw_synchronization(struct si_context
*sctx
)
1095 if (sctx
->ce_need_synchronization
) {
1096 radeon_emit(sctx
->b
.gfx
.cs
, PKT3(PKT3_INCREMENT_DE_COUNTER
, 0, 0));
1097 radeon_emit(sctx
->b
.gfx
.cs
, 0);
1099 sctx
->ce_need_synchronization
= false;
1103 void si_draw_vbo(struct pipe_context
*ctx
, const struct pipe_draw_info
*info
)
1105 struct si_context
*sctx
= (struct si_context
*)ctx
;
1106 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
1107 const struct pipe_index_buffer
*ib
= &sctx
->index_buffer
;
1108 struct pipe_index_buffer ib_tmp
; /* for index buffer uploads only */
1109 unsigned mask
, dirty_tex_counter
;
1110 enum pipe_prim_type rast_prim
;
1111 unsigned num_patches
= 0;
1113 if (likely(!info
->indirect
)) {
1114 /* SI-CI treat instance_count==0 as instance_count==1. There is
1115 * no workaround for indirect draws, but we can at least skip
1118 if (unlikely(!info
->instance_count
))
1121 /* Handle count == 0. */
1122 if (unlikely(!info
->count
&&
1123 (info
->indexed
|| !info
->count_from_stream_output
)))
1127 if (unlikely(!sctx
->vs_shader
.cso
)) {
1131 if (unlikely(!sctx
->ps_shader
.cso
&& (!rs
|| !rs
->rasterizer_discard
))) {
1135 if (unlikely(!!sctx
->tes_shader
.cso
!= (info
->mode
== PIPE_PRIM_PATCHES
))) {
1140 /* Recompute and re-emit the texture resource states if needed. */
1141 dirty_tex_counter
= p_atomic_read(&sctx
->b
.screen
->dirty_tex_counter
);
1142 if (unlikely(dirty_tex_counter
!= sctx
->b
.last_dirty_tex_counter
)) {
1143 sctx
->b
.last_dirty_tex_counter
= dirty_tex_counter
;
1144 sctx
->framebuffer
.dirty_cbufs
|=
1145 ((1 << sctx
->framebuffer
.state
.nr_cbufs
) - 1);
1146 sctx
->framebuffer
.dirty_zsbuf
= true;
1147 sctx
->framebuffer
.do_update_surf_dirtiness
= true;
1148 si_mark_atom_dirty(sctx
, &sctx
->framebuffer
.atom
);
1149 si_update_all_texture_descriptors(sctx
);
1152 si_decompress_graphics_textures(sctx
);
1154 /* Set the rasterization primitive type.
1156 * This must be done after si_decompress_textures, which can call
1157 * draw_vbo recursively, and before si_update_shaders, which uses
1158 * current_rast_prim for this draw_vbo call. */
1159 if (sctx
->gs_shader
.cso
)
1160 rast_prim
= sctx
->gs_shader
.cso
->gs_output_prim
;
1161 else if (sctx
->tes_shader
.cso
)
1162 rast_prim
= sctx
->tes_shader
.cso
->info
.properties
[TGSI_PROPERTY_TES_PRIM_MODE
];
1164 rast_prim
= info
->mode
;
1166 if (rast_prim
!= sctx
->current_rast_prim
) {
1167 sctx
->current_rast_prim
= rast_prim
;
1168 sctx
->do_update_shaders
= true;
1171 if (sctx
->gs_shader
.cso
) {
1172 /* Determine whether the GS triangle strip adjacency fix should
1173 * be applied. Rotate every other triangle if
1174 * - triangle strips with adjacency are fed to the GS and
1175 * - primitive restart is disabled (the rotation doesn't help
1176 * when the restart occurs after an odd number of triangles).
1178 bool gs_tri_strip_adj_fix
=
1179 !sctx
->tes_shader
.cso
&&
1180 info
->mode
== PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
&&
1181 !info
->primitive_restart
;
1183 if (gs_tri_strip_adj_fix
!= sctx
->gs_tri_strip_adj_fix
) {
1184 sctx
->gs_tri_strip_adj_fix
= gs_tri_strip_adj_fix
;
1185 sctx
->do_update_shaders
= true;
1189 if (sctx
->do_update_shaders
&& !si_update_shaders(sctx
))
1192 if (!si_upload_graphics_shader_descriptors(sctx
))
1195 ib_tmp
.buffer
= NULL
;
1197 if (info
->indexed
) {
1198 /* Translate or upload, if needed. */
1199 /* 8-bit indices are supported on VI. */
1200 if (sctx
->b
.chip_class
<= CIK
&& ib
->index_size
== 1) {
1201 unsigned start
, count
, start_offset
, size
;
1204 si_get_draw_start_count(sctx
, info
, &start
, &count
);
1205 start_offset
= start
* 2;
1208 u_upload_alloc(ctx
->stream_uploader
, start_offset
,
1210 si_optimal_tcc_alignment(sctx
, size
),
1211 &ib_tmp
.offset
, &ib_tmp
.buffer
, &ptr
);
1215 util_shorten_ubyte_elts_to_userptr(&sctx
->b
.b
, ib
, 0, 0,
1219 /* info->start will be added by the drawing code */
1220 ib_tmp
.offset
-= start_offset
;
1221 ib_tmp
.index_size
= 2;
1223 } else if (ib
->user_buffer
&& !ib
->buffer
) {
1224 unsigned start_offset
;
1226 assert(!info
->indirect
);
1227 start_offset
= info
->start
* ib
->index_size
;
1229 u_upload_data(ctx
->stream_uploader
, start_offset
,
1230 info
->count
* ib
->index_size
,
1231 sctx
->screen
->b
.info
.tcc_cache_line_size
,
1232 (char*)ib
->user_buffer
+ start_offset
,
1233 &ib_tmp
.offset
, &ib_tmp
.buffer
);
1237 /* info->start will be added by the drawing code */
1238 ib_tmp
.offset
-= start_offset
;
1239 ib_tmp
.index_size
= ib
->index_size
;
1241 } else if (sctx
->b
.chip_class
<= CIK
&&
1242 r600_resource(ib
->buffer
)->TC_L2_dirty
) {
1243 /* VI reads index buffers through TC L2, so it doesn't
1245 sctx
->b
.flags
|= SI_CONTEXT_WRITEBACK_GLOBAL_L2
;
1246 r600_resource(ib
->buffer
)->TC_L2_dirty
= false;
1250 if (info
->indirect
) {
1251 /* Add the buffer size for memory checking in need_cs_space. */
1252 r600_context_add_resource_size(ctx
, info
->indirect
);
1254 if (r600_resource(info
->indirect
)->TC_L2_dirty
) {
1255 sctx
->b
.flags
|= SI_CONTEXT_WRITEBACK_GLOBAL_L2
;
1256 r600_resource(info
->indirect
)->TC_L2_dirty
= false;
1259 if (info
->indirect_params
&&
1260 r600_resource(info
->indirect_params
)->TC_L2_dirty
) {
1261 sctx
->b
.flags
|= SI_CONTEXT_WRITEBACK_GLOBAL_L2
;
1262 r600_resource(info
->indirect_params
)->TC_L2_dirty
= false;
1266 si_need_cs_space(sctx
);
1268 /* Since we've called r600_context_add_resource_size for vertex buffers,
1269 * this must be called after si_need_cs_space, because we must let
1270 * need_cs_space flush before we add buffers to the buffer list.
1272 if (!si_upload_vertex_buffer_descriptors(sctx
))
1275 /* GFX9 scissor bug workaround. There is also a more efficient but
1276 * more involved alternative workaround. */
1277 if (sctx
->b
.chip_class
== GFX9
&&
1278 si_is_atom_dirty(sctx
, &sctx
->b
.scissors
.atom
))
1279 sctx
->b
.flags
|= SI_CONTEXT_PS_PARTIAL_FLUSH
;
1281 /* Flush caches before the first state atom, which does L2 prefetches. */
1283 si_emit_cache_flush(sctx
);
1285 /* Emit state atoms. */
1286 mask
= sctx
->dirty_atoms
;
1288 struct r600_atom
*atom
= sctx
->atoms
.array
[u_bit_scan(&mask
)];
1290 atom
->emit(&sctx
->b
, atom
);
1292 sctx
->dirty_atoms
= 0;
1295 mask
= sctx
->dirty_states
;
1297 unsigned i
= u_bit_scan(&mask
);
1298 struct si_pm4_state
*state
= sctx
->queued
.array
[i
];
1300 if (!state
|| sctx
->emitted
.array
[i
] == state
)
1303 si_pm4_emit(sctx
, state
);
1304 sctx
->emitted
.array
[i
] = state
;
1306 sctx
->dirty_states
= 0;
1308 si_emit_rasterizer_prim_state(sctx
);
1309 if (sctx
->tes_shader
.cso
)
1310 si_emit_derived_tess_state(sctx
, info
, &num_patches
);
1311 si_emit_vs_state(sctx
, info
);
1312 si_emit_draw_registers(sctx
, info
, num_patches
);
1314 si_ce_pre_draw_synchronization(sctx
);
1315 si_emit_draw_packets(sctx
, info
, ib
);
1316 si_ce_post_draw_synchronization(sctx
);
1318 if (sctx
->trace_buf
)
1319 si_trace_emit(sctx
);
1321 /* Workaround for a VGT hang when streamout is enabled.
1322 * It must be done after drawing. */
1323 if ((sctx
->b
.family
== CHIP_HAWAII
||
1324 sctx
->b
.family
== CHIP_TONGA
||
1325 sctx
->b
.family
== CHIP_FIJI
) &&
1326 r600_get_strmout_en(&sctx
->b
)) {
1327 sctx
->b
.flags
|= SI_CONTEXT_VGT_STREAMOUT_SYNC
;
1330 if (sctx
->framebuffer
.do_update_surf_dirtiness
) {
1331 /* Set the depth buffer as dirty. */
1332 if (sctx
->framebuffer
.state
.zsbuf
) {
1333 struct pipe_surface
*surf
= sctx
->framebuffer
.state
.zsbuf
;
1334 struct r600_texture
*rtex
= (struct r600_texture
*)surf
->texture
;
1336 if (!rtex
->tc_compatible_htile
)
1337 rtex
->dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
1339 if (rtex
->surface
.flags
& RADEON_SURF_SBUFFER
)
1340 rtex
->stencil_dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
1342 if (sctx
->framebuffer
.compressed_cb_mask
) {
1343 struct pipe_surface
*surf
;
1344 struct r600_texture
*rtex
;
1345 unsigned mask
= sctx
->framebuffer
.compressed_cb_mask
;
1348 unsigned i
= u_bit_scan(&mask
);
1349 surf
= sctx
->framebuffer
.state
.cbufs
[i
];
1350 rtex
= (struct r600_texture
*)surf
->texture
;
1352 if (rtex
->fmask
.size
)
1353 rtex
->dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
1354 if (rtex
->dcc_gather_statistics
)
1355 rtex
->separate_dcc_dirty
= true;
1358 sctx
->framebuffer
.do_update_surf_dirtiness
= false;
1361 pipe_resource_reference(&ib_tmp
.buffer
, NULL
);
1362 sctx
->b
.num_draw_calls
++;
1363 if (G_0286E8_WAVESIZE(sctx
->spi_tmpring_size
))
1364 sctx
->b
.num_spill_draw_calls
++;
1367 void si_trace_emit(struct si_context
*sctx
)
1369 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
1372 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
, sctx
->trace_buf
,
1373 RADEON_USAGE_READWRITE
, RADEON_PRIO_TRACE
);
1374 radeon_emit(cs
, PKT3(PKT3_WRITE_DATA
, 3, 0));
1375 radeon_emit(cs
, S_370_DST_SEL(V_370_MEMORY_SYNC
) |
1376 S_370_WR_CONFIRM(1) |
1377 S_370_ENGINE_SEL(V_370_ME
));
1378 radeon_emit(cs
, sctx
->trace_buf
->gpu_address
);
1379 radeon_emit(cs
, sctx
->trace_buf
->gpu_address
>> 32);
1380 radeon_emit(cs
, sctx
->trace_id
);
1381 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1382 radeon_emit(cs
, AC_ENCODE_TRACE_POINT(sctx
->trace_id
));