2 * Copyright 2012 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Christian König <christian.koenig@amd.com>
28 #include "si_shader.h"
29 #include "radeon/r600_cs.h"
32 #include "util/u_index_modify.h"
33 #include "util/u_upload_mgr.h"
34 #include "util/u_prim.h"
35 #include "util/u_memory.h"
37 static unsigned si_conv_pipe_prim(unsigned mode
)
39 static const unsigned prim_conv
[] = {
40 [PIPE_PRIM_POINTS
] = V_008958_DI_PT_POINTLIST
,
41 [PIPE_PRIM_LINES
] = V_008958_DI_PT_LINELIST
,
42 [PIPE_PRIM_LINE_LOOP
] = V_008958_DI_PT_LINELOOP
,
43 [PIPE_PRIM_LINE_STRIP
] = V_008958_DI_PT_LINESTRIP
,
44 [PIPE_PRIM_TRIANGLES
] = V_008958_DI_PT_TRILIST
,
45 [PIPE_PRIM_TRIANGLE_STRIP
] = V_008958_DI_PT_TRISTRIP
,
46 [PIPE_PRIM_TRIANGLE_FAN
] = V_008958_DI_PT_TRIFAN
,
47 [PIPE_PRIM_QUADS
] = V_008958_DI_PT_QUADLIST
,
48 [PIPE_PRIM_QUAD_STRIP
] = V_008958_DI_PT_QUADSTRIP
,
49 [PIPE_PRIM_POLYGON
] = V_008958_DI_PT_POLYGON
,
50 [PIPE_PRIM_LINES_ADJACENCY
] = V_008958_DI_PT_LINELIST_ADJ
,
51 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = V_008958_DI_PT_LINESTRIP_ADJ
,
52 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = V_008958_DI_PT_TRILIST_ADJ
,
53 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = V_008958_DI_PT_TRISTRIP_ADJ
,
54 [PIPE_PRIM_PATCHES
] = V_008958_DI_PT_PATCH
,
55 [R600_PRIM_RECTANGLE_LIST
] = V_008958_DI_PT_RECTLIST
57 assert(mode
< ARRAY_SIZE(prim_conv
));
58 return prim_conv
[mode
];
61 static unsigned si_conv_prim_to_gs_out(unsigned mode
)
63 static const int prim_conv
[] = {
64 [PIPE_PRIM_POINTS
] = V_028A6C_OUTPRIM_TYPE_POINTLIST
,
65 [PIPE_PRIM_LINES
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
66 [PIPE_PRIM_LINE_LOOP
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
67 [PIPE_PRIM_LINE_STRIP
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
68 [PIPE_PRIM_TRIANGLES
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
69 [PIPE_PRIM_TRIANGLE_STRIP
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
70 [PIPE_PRIM_TRIANGLE_FAN
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
71 [PIPE_PRIM_QUADS
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
72 [PIPE_PRIM_QUAD_STRIP
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
73 [PIPE_PRIM_POLYGON
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
74 [PIPE_PRIM_LINES_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
75 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
76 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
77 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
78 [PIPE_PRIM_PATCHES
] = V_028A6C_OUTPRIM_TYPE_POINTLIST
,
79 [R600_PRIM_RECTANGLE_LIST
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
81 assert(mode
< ARRAY_SIZE(prim_conv
));
83 return prim_conv
[mode
];
87 * This calculates the LDS size for tessellation shaders (VS, TCS, TES).
88 * LS.LDS_SIZE is shared by all 3 shader stages.
90 * The information about LDS and other non-compile-time parameters is then
91 * written to userdata SGPRs.
93 static void si_emit_derived_tess_state(struct si_context
*sctx
,
94 const struct pipe_draw_info
*info
,
95 unsigned *num_patches
)
97 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
98 struct si_shader_ctx_state
*ls
= &sctx
->vs_shader
;
99 /* The TES pointer will only be used for sctx->last_tcs.
100 * It would be wrong to think that TCS = TES. */
101 struct si_shader_selector
*tcs
=
102 sctx
->tcs_shader
.cso
? sctx
->tcs_shader
.cso
: sctx
->tes_shader
.cso
;
103 unsigned tes_sh_base
= sctx
->shader_userdata
.sh_base
[PIPE_SHADER_TESS_EVAL
];
104 unsigned num_tcs_input_cp
= info
->vertices_per_patch
;
105 unsigned num_tcs_output_cp
, num_tcs_inputs
, num_tcs_outputs
;
106 unsigned num_tcs_patch_outputs
;
107 unsigned input_vertex_size
, output_vertex_size
, pervertex_output_patch_size
;
108 unsigned input_patch_size
, output_patch_size
, output_patch0_offset
;
109 unsigned perpatch_output_offset
, lds_size
, ls_rsrc2
;
110 unsigned tcs_in_layout
, tcs_out_layout
, tcs_out_offsets
;
111 unsigned offchip_layout
;
113 *num_patches
= 1; /* TODO: calculate this */
115 if (sctx
->last_ls
== ls
->current
&&
116 sctx
->last_tcs
== tcs
&&
117 sctx
->last_tes_sh_base
== tes_sh_base
&&
118 sctx
->last_num_tcs_input_cp
== num_tcs_input_cp
)
121 sctx
->last_ls
= ls
->current
;
122 sctx
->last_tcs
= tcs
;
123 sctx
->last_tes_sh_base
= tes_sh_base
;
124 sctx
->last_num_tcs_input_cp
= num_tcs_input_cp
;
126 /* This calculates how shader inputs and outputs among VS, TCS, and TES
127 * are laid out in LDS. */
128 num_tcs_inputs
= util_last_bit64(ls
->cso
->outputs_written
);
130 if (sctx
->tcs_shader
.cso
) {
131 num_tcs_outputs
= util_last_bit64(tcs
->outputs_written
);
132 num_tcs_output_cp
= tcs
->info
.properties
[TGSI_PROPERTY_TCS_VERTICES_OUT
];
133 num_tcs_patch_outputs
= util_last_bit64(tcs
->patch_outputs_written
);
135 /* No TCS. Route varyings from LS to TES. */
136 num_tcs_outputs
= num_tcs_inputs
;
137 num_tcs_output_cp
= num_tcs_input_cp
;
138 num_tcs_patch_outputs
= 2; /* TESSINNER + TESSOUTER */
141 input_vertex_size
= num_tcs_inputs
* 16;
142 output_vertex_size
= num_tcs_outputs
* 16;
144 input_patch_size
= num_tcs_input_cp
* input_vertex_size
;
146 pervertex_output_patch_size
= num_tcs_output_cp
* output_vertex_size
;
147 output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
149 output_patch0_offset
= sctx
->tcs_shader
.cso
? input_patch_size
* *num_patches
: 0;
150 perpatch_output_offset
= output_patch0_offset
+ pervertex_output_patch_size
;
152 lds_size
= output_patch0_offset
+ output_patch_size
* *num_patches
;
153 ls_rsrc2
= ls
->current
->config
.rsrc2
;
155 if (sctx
->b
.chip_class
>= CIK
) {
156 assert(lds_size
<= 65536);
157 ls_rsrc2
|= S_00B52C_LDS_SIZE(align(lds_size
, 512) / 512);
159 assert(lds_size
<= 32768);
160 ls_rsrc2
|= S_00B52C_LDS_SIZE(align(lds_size
, 256) / 256);
163 /* Due to a hw bug, RSRC2_LS must be written twice with another
164 * LS register written in between. */
165 if (sctx
->b
.chip_class
== CIK
&& sctx
->b
.family
!= CHIP_HAWAII
)
166 radeon_set_sh_reg(cs
, R_00B52C_SPI_SHADER_PGM_RSRC2_LS
, ls_rsrc2
);
167 radeon_set_sh_reg_seq(cs
, R_00B528_SPI_SHADER_PGM_RSRC1_LS
, 2);
168 radeon_emit(cs
, ls
->current
->config
.rsrc1
);
169 radeon_emit(cs
, ls_rsrc2
);
171 /* Compute userdata SGPRs. */
172 assert(((input_vertex_size
/ 4) & ~0xff) == 0);
173 assert(((output_vertex_size
/ 4) & ~0xff) == 0);
174 assert(((input_patch_size
/ 4) & ~0x1fff) == 0);
175 assert(((output_patch_size
/ 4) & ~0x1fff) == 0);
176 assert(((output_patch0_offset
/ 16) & ~0xffff) == 0);
177 assert(((perpatch_output_offset
/ 16) & ~0xffff) == 0);
178 assert(num_tcs_input_cp
<= 32);
179 assert(num_tcs_output_cp
<= 32);
181 tcs_in_layout
= (input_patch_size
/ 4) |
182 ((input_vertex_size
/ 4) << 13);
183 tcs_out_layout
= (output_patch_size
/ 4) |
184 ((output_vertex_size
/ 4) << 13);
185 tcs_out_offsets
= (output_patch0_offset
/ 16) |
186 ((perpatch_output_offset
/ 16) << 16);
187 offchip_layout
= (pervertex_output_patch_size
* *num_patches
<< 16) |
188 (num_tcs_output_cp
<< 9) | *num_patches
;
190 /* Set them for LS. */
191 radeon_set_sh_reg(cs
,
192 R_00B530_SPI_SHADER_USER_DATA_LS_0
+ SI_SGPR_LS_OUT_LAYOUT
* 4,
195 /* Set them for TCS. */
196 radeon_set_sh_reg_seq(cs
,
197 R_00B430_SPI_SHADER_USER_DATA_HS_0
+ SI_SGPR_TCS_OFFCHIP_LAYOUT
* 4, 4);
198 radeon_emit(cs
, offchip_layout
);
199 radeon_emit(cs
, tcs_out_offsets
);
200 radeon_emit(cs
, tcs_out_layout
| (num_tcs_input_cp
<< 26));
201 radeon_emit(cs
, tcs_in_layout
);
203 /* Set them for TES. */
204 radeon_set_sh_reg_seq(cs
, tes_sh_base
+ SI_SGPR_TCS_OFFCHIP_LAYOUT
* 4, 1);
205 radeon_emit(cs
, offchip_layout
);
208 static unsigned si_num_prims_for_vertices(const struct pipe_draw_info
*info
)
210 switch (info
->mode
) {
211 case PIPE_PRIM_PATCHES
:
212 return info
->count
/ info
->vertices_per_patch
;
213 case R600_PRIM_RECTANGLE_LIST
:
214 return info
->count
/ 3;
216 return u_prims_for_vertices(info
->mode
, info
->count
);
220 static unsigned si_get_ia_multi_vgt_param(struct si_context
*sctx
,
221 const struct pipe_draw_info
*info
,
222 unsigned num_patches
)
224 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
225 unsigned prim
= info
->mode
;
226 unsigned primgroup_size
= 128; /* recommended without a GS */
227 unsigned max_primgroup_in_wave
= 2;
229 /* SWITCH_ON_EOP(0) is always preferable. */
230 bool wd_switch_on_eop
= false;
231 bool ia_switch_on_eop
= false;
232 bool ia_switch_on_eoi
= false;
233 bool partial_vs_wave
= false;
234 bool partial_es_wave
= false;
236 if (sctx
->gs_shader
.cso
)
237 primgroup_size
= 64; /* recommended with a GS */
239 if (sctx
->tes_shader
.cso
) {
240 unsigned num_cp_out
=
241 sctx
->tcs_shader
.cso
?
242 sctx
->tcs_shader
.cso
->info
.properties
[TGSI_PROPERTY_TCS_VERTICES_OUT
] :
243 info
->vertices_per_patch
;
244 unsigned max_size
= 256 / MAX2(info
->vertices_per_patch
, num_cp_out
);
246 primgroup_size
= MIN2(primgroup_size
, max_size
);
248 /* primgroup_size must be set to a multiple of NUM_PATCHES */
249 primgroup_size
= (primgroup_size
/ num_patches
) * num_patches
;
251 /* SWITCH_ON_EOI must be set if PrimID is used. */
252 if ((sctx
->tcs_shader
.cso
&& sctx
->tcs_shader
.cso
->info
.uses_primid
) ||
253 sctx
->tes_shader
.cso
->info
.uses_primid
)
254 ia_switch_on_eoi
= true;
256 /* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
257 if ((sctx
->b
.family
== CHIP_TAHITI
||
258 sctx
->b
.family
== CHIP_PITCAIRN
||
259 sctx
->b
.family
== CHIP_BONAIRE
) &&
261 partial_vs_wave
= true;
264 /* This is a hardware requirement. */
265 if ((rs
&& rs
->line_stipple_enable
) ||
266 (sctx
->b
.screen
->debug_flags
& DBG_SWITCH_ON_EOP
)) {
267 ia_switch_on_eop
= true;
268 wd_switch_on_eop
= true;
271 if (sctx
->b
.chip_class
>= CIK
) {
272 /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
273 * 4 shader engines. Set 1 to pass the assertion below.
274 * The other cases are hardware requirements. */
275 if (sctx
->b
.screen
->info
.max_se
< 4 ||
276 prim
== PIPE_PRIM_POLYGON
||
277 prim
== PIPE_PRIM_LINE_LOOP
||
278 prim
== PIPE_PRIM_TRIANGLE_FAN
||
279 prim
== PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
||
280 info
->primitive_restart
||
281 info
->count_from_stream_output
)
282 wd_switch_on_eop
= true;
284 /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
285 * We don't know that for indirect drawing, so treat it as
286 * always problematic. */
287 if (sctx
->b
.family
== CHIP_HAWAII
&&
288 (info
->indirect
|| info
->instance_count
> 1))
289 wd_switch_on_eop
= true;
291 /* Required on CIK and later. */
292 if (sctx
->b
.screen
->info
.max_se
> 2 && !wd_switch_on_eop
)
293 ia_switch_on_eoi
= true;
295 /* Required by Hawaii and, for some special cases, by VI. */
296 if (ia_switch_on_eoi
&&
297 (sctx
->b
.family
== CHIP_HAWAII
||
298 (sctx
->b
.chip_class
== VI
&&
299 (sctx
->gs_shader
.cso
|| max_primgroup_in_wave
!= 2))))
300 partial_vs_wave
= true;
302 /* Instancing bug on Bonaire. */
303 if (sctx
->b
.family
== CHIP_BONAIRE
&& ia_switch_on_eoi
&&
304 (info
->indirect
|| info
->instance_count
> 1))
305 partial_vs_wave
= true;
307 /* If the WD switch is false, the IA switch must be false too. */
308 assert(wd_switch_on_eop
|| !ia_switch_on_eop
);
311 /* If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
312 if (ia_switch_on_eoi
)
313 partial_es_wave
= true;
315 /* GS requirement. */
316 if (SI_GS_PER_ES
/ primgroup_size
>= sctx
->screen
->gs_table_depth
- 3)
317 partial_es_wave
= true;
319 /* Hw bug with single-primitive instances and SWITCH_ON_EOI
320 * on multi-SE chips. */
321 if (sctx
->b
.screen
->info
.max_se
>= 2 && ia_switch_on_eoi
&&
323 (info
->instance_count
> 1 &&
324 si_num_prims_for_vertices(info
) <= 1)))
325 sctx
->b
.flags
|= SI_CONTEXT_VGT_FLUSH
;
327 return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop
) |
328 S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi
) |
329 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave
) |
330 S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave
) |
331 S_028AA8_PRIMGROUP_SIZE(primgroup_size
- 1) |
332 S_028AA8_WD_SWITCH_ON_EOP(sctx
->b
.chip_class
>= CIK
? wd_switch_on_eop
: 0) |
333 S_028AA8_MAX_PRIMGRP_IN_WAVE(sctx
->b
.chip_class
>= VI
?
334 max_primgroup_in_wave
: 0);
337 static unsigned si_get_ls_hs_config(struct si_context
*sctx
,
338 const struct pipe_draw_info
*info
,
339 unsigned num_patches
)
341 unsigned num_output_cp
;
343 if (!sctx
->tes_shader
.cso
)
346 num_output_cp
= sctx
->tcs_shader
.cso
?
347 sctx
->tcs_shader
.cso
->info
.properties
[TGSI_PROPERTY_TCS_VERTICES_OUT
] :
348 info
->vertices_per_patch
;
350 return S_028B58_NUM_PATCHES(num_patches
) |
351 S_028B58_HS_NUM_INPUT_CP(info
->vertices_per_patch
) |
352 S_028B58_HS_NUM_OUTPUT_CP(num_output_cp
);
355 static void si_emit_scratch_reloc(struct si_context
*sctx
)
357 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
359 if (!sctx
->emit_scratch_reloc
)
362 radeon_set_context_reg(cs
, R_0286E8_SPI_TMPRING_SIZE
,
363 sctx
->spi_tmpring_size
);
365 if (sctx
->scratch_buffer
) {
366 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
367 sctx
->scratch_buffer
, RADEON_USAGE_READWRITE
,
368 RADEON_PRIO_SCRATCH_BUFFER
);
371 sctx
->emit_scratch_reloc
= false;
374 /* rast_prim is the primitive type after GS. */
375 static void si_emit_rasterizer_prim_state(struct si_context
*sctx
)
377 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
378 unsigned rast_prim
= sctx
->current_rast_prim
;
379 struct si_state_rasterizer
*rs
= sctx
->emitted
.named
.rasterizer
;
381 /* Skip this if not rendering lines. */
382 if (rast_prim
!= PIPE_PRIM_LINES
&&
383 rast_prim
!= PIPE_PRIM_LINE_LOOP
&&
384 rast_prim
!= PIPE_PRIM_LINE_STRIP
&&
385 rast_prim
!= PIPE_PRIM_LINES_ADJACENCY
&&
386 rast_prim
!= PIPE_PRIM_LINE_STRIP_ADJACENCY
)
389 if (rast_prim
== sctx
->last_rast_prim
&&
390 rs
->pa_sc_line_stipple
== sctx
->last_sc_line_stipple
)
393 radeon_set_context_reg(cs
, R_028A0C_PA_SC_LINE_STIPPLE
,
394 rs
->pa_sc_line_stipple
|
395 S_028A0C_AUTO_RESET_CNTL(rast_prim
== PIPE_PRIM_LINES
? 1 :
396 rast_prim
== PIPE_PRIM_LINE_STRIP
? 2 : 0));
398 sctx
->last_rast_prim
= rast_prim
;
399 sctx
->last_sc_line_stipple
= rs
->pa_sc_line_stipple
;
402 static void si_emit_draw_registers(struct si_context
*sctx
,
403 const struct pipe_draw_info
*info
)
405 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
406 unsigned prim
= si_conv_pipe_prim(info
->mode
);
407 unsigned gs_out_prim
= si_conv_prim_to_gs_out(sctx
->current_rast_prim
);
408 unsigned ia_multi_vgt_param
, ls_hs_config
, num_patches
= 0;
410 if (sctx
->tes_shader
.cso
)
411 si_emit_derived_tess_state(sctx
, info
, &num_patches
);
413 ia_multi_vgt_param
= si_get_ia_multi_vgt_param(sctx
, info
, num_patches
);
414 ls_hs_config
= si_get_ls_hs_config(sctx
, info
, num_patches
);
417 if (prim
!= sctx
->last_prim
||
418 ia_multi_vgt_param
!= sctx
->last_multi_vgt_param
||
419 ls_hs_config
!= sctx
->last_ls_hs_config
) {
420 if (sctx
->b
.chip_class
>= CIK
) {
421 radeon_emit(cs
, PKT3(PKT3_DRAW_PREAMBLE
, 2, 0));
422 radeon_emit(cs
, prim
); /* VGT_PRIMITIVE_TYPE */
423 radeon_emit(cs
, ia_multi_vgt_param
); /* IA_MULTI_VGT_PARAM */
424 radeon_emit(cs
, ls_hs_config
); /* VGT_LS_HS_CONFIG */
426 radeon_set_config_reg(cs
, R_008958_VGT_PRIMITIVE_TYPE
, prim
);
427 radeon_set_context_reg(cs
, R_028AA8_IA_MULTI_VGT_PARAM
, ia_multi_vgt_param
);
428 radeon_set_context_reg(cs
, R_028B58_VGT_LS_HS_CONFIG
, ls_hs_config
);
430 sctx
->last_prim
= prim
;
431 sctx
->last_multi_vgt_param
= ia_multi_vgt_param
;
432 sctx
->last_ls_hs_config
= ls_hs_config
;
435 if (gs_out_prim
!= sctx
->last_gs_out_prim
) {
436 radeon_set_context_reg(cs
, R_028A6C_VGT_GS_OUT_PRIM_TYPE
, gs_out_prim
);
437 sctx
->last_gs_out_prim
= gs_out_prim
;
440 /* Primitive restart. */
441 if (info
->primitive_restart
!= sctx
->last_primitive_restart_en
) {
442 radeon_set_context_reg(cs
, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN
, info
->primitive_restart
);
443 sctx
->last_primitive_restart_en
= info
->primitive_restart
;
445 if (info
->primitive_restart
&&
446 (info
->restart_index
!= sctx
->last_restart_index
||
447 sctx
->last_restart_index
== SI_RESTART_INDEX_UNKNOWN
)) {
448 radeon_set_context_reg(cs
, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX
,
449 info
->restart_index
);
450 sctx
->last_restart_index
= info
->restart_index
;
455 static void si_emit_draw_packets(struct si_context
*sctx
,
456 const struct pipe_draw_info
*info
,
457 const struct pipe_index_buffer
*ib
)
459 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
460 unsigned sh_base_reg
= sctx
->shader_userdata
.sh_base
[PIPE_SHADER_VERTEX
];
461 bool render_cond_bit
= sctx
->b
.render_cond
&& !sctx
->b
.render_cond_force_off
;
463 if (info
->count_from_stream_output
) {
464 struct r600_so_target
*t
=
465 (struct r600_so_target
*)info
->count_from_stream_output
;
466 uint64_t va
= t
->buf_filled_size
->gpu_address
+
467 t
->buf_filled_size_offset
;
469 radeon_set_context_reg(cs
, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
,
472 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
473 radeon_emit(cs
, COPY_DATA_SRC_SEL(COPY_DATA_MEM
) |
474 COPY_DATA_DST_SEL(COPY_DATA_REG
) |
475 COPY_DATA_WR_CONFIRM
);
476 radeon_emit(cs
, va
); /* src address lo */
477 radeon_emit(cs
, va
>> 32); /* src address hi */
478 radeon_emit(cs
, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
>> 2);
479 radeon_emit(cs
, 0); /* unused */
481 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
482 t
->buf_filled_size
, RADEON_USAGE_READ
,
483 RADEON_PRIO_SO_FILLED_SIZE
);
488 radeon_emit(cs
, PKT3(PKT3_INDEX_TYPE
, 0, 0));
491 switch (ib
->index_size
) {
493 radeon_emit(cs
, V_028A7C_VGT_INDEX_8
);
496 radeon_emit(cs
, V_028A7C_VGT_INDEX_16
|
497 (SI_BIG_ENDIAN
&& sctx
->b
.chip_class
<= CIK
?
498 V_028A7C_VGT_DMA_SWAP_16_BIT
: 0));
501 radeon_emit(cs
, V_028A7C_VGT_INDEX_32
|
502 (SI_BIG_ENDIAN
&& sctx
->b
.chip_class
<= CIK
?
503 V_028A7C_VGT_DMA_SWAP_32_BIT
: 0));
506 assert(!"unreachable");
511 if (!info
->indirect
) {
514 radeon_emit(cs
, PKT3(PKT3_NUM_INSTANCES
, 0, 0));
515 radeon_emit(cs
, info
->instance_count
);
517 /* Base vertex and start instance. */
518 base_vertex
= info
->indexed
? info
->index_bias
: info
->start
;
520 if (base_vertex
!= sctx
->last_base_vertex
||
521 sctx
->last_base_vertex
== SI_BASE_VERTEX_UNKNOWN
||
522 info
->start_instance
!= sctx
->last_start_instance
||
523 sh_base_reg
!= sctx
->last_sh_base_reg
) {
524 radeon_set_sh_reg_seq(cs
, sh_base_reg
+ SI_SGPR_BASE_VERTEX
* 4, 2);
525 radeon_emit(cs
, base_vertex
);
526 radeon_emit(cs
, info
->start_instance
);
528 sctx
->last_base_vertex
= base_vertex
;
529 sctx
->last_start_instance
= info
->start_instance
;
530 sctx
->last_sh_base_reg
= sh_base_reg
;
533 si_invalidate_draw_sh_constants(sctx
);
535 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
536 (struct r600_resource
*)info
->indirect
,
537 RADEON_USAGE_READ
, RADEON_PRIO_DRAW_INDIRECT
);
541 uint32_t index_max_size
= (ib
->buffer
->width0
- ib
->offset
) /
543 uint64_t index_va
= r600_resource(ib
->buffer
)->gpu_address
+ ib
->offset
;
545 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
546 (struct r600_resource
*)ib
->buffer
,
547 RADEON_USAGE_READ
, RADEON_PRIO_INDEX_BUFFER
);
549 if (info
->indirect
) {
550 uint64_t indirect_va
= r600_resource(info
->indirect
)->gpu_address
;
552 assert(indirect_va
% 8 == 0);
553 assert(index_va
% 2 == 0);
554 assert(info
->indirect_offset
% 4 == 0);
556 radeon_emit(cs
, PKT3(PKT3_SET_BASE
, 2, 0));
558 radeon_emit(cs
, indirect_va
);
559 radeon_emit(cs
, indirect_va
>> 32);
561 radeon_emit(cs
, PKT3(PKT3_INDEX_BASE
, 1, 0));
562 radeon_emit(cs
, index_va
);
563 radeon_emit(cs
, index_va
>> 32);
565 radeon_emit(cs
, PKT3(PKT3_INDEX_BUFFER_SIZE
, 0, 0));
566 radeon_emit(cs
, index_max_size
);
568 radeon_emit(cs
, PKT3(PKT3_DRAW_INDEX_INDIRECT
, 3, render_cond_bit
));
569 radeon_emit(cs
, info
->indirect_offset
);
570 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_BASE_VERTEX
* 4 - SI_SH_REG_OFFSET
) >> 2);
571 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_START_INSTANCE
* 4 - SI_SH_REG_OFFSET
) >> 2);
572 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_DMA
);
574 index_va
+= info
->start
* ib
->index_size
;
576 radeon_emit(cs
, PKT3(PKT3_DRAW_INDEX_2
, 4, render_cond_bit
));
577 radeon_emit(cs
, index_max_size
);
578 radeon_emit(cs
, index_va
);
579 radeon_emit(cs
, (index_va
>> 32UL) & 0xFF);
580 radeon_emit(cs
, info
->count
);
581 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_DMA
);
584 if (info
->indirect
) {
585 uint64_t indirect_va
= r600_resource(info
->indirect
)->gpu_address
;
587 assert(indirect_va
% 8 == 0);
588 assert(info
->indirect_offset
% 4 == 0);
590 radeon_emit(cs
, PKT3(PKT3_SET_BASE
, 2, 0));
592 radeon_emit(cs
, indirect_va
);
593 radeon_emit(cs
, indirect_va
>> 32);
595 radeon_emit(cs
, PKT3(PKT3_DRAW_INDIRECT
, 3, render_cond_bit
));
596 radeon_emit(cs
, info
->indirect_offset
);
597 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_BASE_VERTEX
* 4 - SI_SH_REG_OFFSET
) >> 2);
598 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_START_INSTANCE
* 4 - SI_SH_REG_OFFSET
) >> 2);
599 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_AUTO_INDEX
);
601 radeon_emit(cs
, PKT3(PKT3_DRAW_INDEX_AUTO
, 1, render_cond_bit
));
602 radeon_emit(cs
, info
->count
);
603 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_AUTO_INDEX
|
604 S_0287F0_USE_OPAQUE(!!info
->count_from_stream_output
));
609 void si_emit_cache_flush(struct si_context
*si_ctx
, struct r600_atom
*atom
)
611 struct r600_common_context
*sctx
= &si_ctx
->b
;
612 struct radeon_winsys_cs
*cs
= sctx
->gfx
.cs
;
613 uint32_t cp_coher_cntl
= 0;
615 /* SI has a bug that it always flushes ICACHE and KCACHE if either
616 * bit is set. An alternative way is to write SQC_CACHES, but that
617 * doesn't seem to work reliably. Since the bug doesn't affect
618 * correctness (it only does more work than necessary) and
619 * the performance impact is likely negligible, there is no plan
620 * to add a workaround for it.
623 if (sctx
->flags
& SI_CONTEXT_INV_ICACHE
)
624 cp_coher_cntl
|= S_0085F0_SH_ICACHE_ACTION_ENA(1);
625 if (sctx
->flags
& SI_CONTEXT_INV_SMEM_L1
)
626 cp_coher_cntl
|= S_0085F0_SH_KCACHE_ACTION_ENA(1);
628 if (sctx
->flags
& SI_CONTEXT_INV_VMEM_L1
)
629 cp_coher_cntl
|= S_0085F0_TCL1_ACTION_ENA(1);
630 if (sctx
->flags
& SI_CONTEXT_INV_GLOBAL_L2
) {
631 cp_coher_cntl
|= S_0085F0_TC_ACTION_ENA(1);
633 if (sctx
->chip_class
>= VI
)
634 cp_coher_cntl
|= S_0301F0_TC_WB_ACTION_ENA(1);
637 if (sctx
->flags
& SI_CONTEXT_FLUSH_AND_INV_CB
) {
638 cp_coher_cntl
|= S_0085F0_CB_ACTION_ENA(1) |
639 S_0085F0_CB0_DEST_BASE_ENA(1) |
640 S_0085F0_CB1_DEST_BASE_ENA(1) |
641 S_0085F0_CB2_DEST_BASE_ENA(1) |
642 S_0085F0_CB3_DEST_BASE_ENA(1) |
643 S_0085F0_CB4_DEST_BASE_ENA(1) |
644 S_0085F0_CB5_DEST_BASE_ENA(1) |
645 S_0085F0_CB6_DEST_BASE_ENA(1) |
646 S_0085F0_CB7_DEST_BASE_ENA(1);
648 /* Necessary for DCC */
649 if (sctx
->chip_class
>= VI
) {
650 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
651 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_DATA_TS
) |
659 if (sctx
->flags
& SI_CONTEXT_FLUSH_AND_INV_DB
) {
660 cp_coher_cntl
|= S_0085F0_DB_ACTION_ENA(1) |
661 S_0085F0_DB_DEST_BASE_ENA(1);
664 if (sctx
->flags
& SI_CONTEXT_FLUSH_AND_INV_CB_META
) {
665 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
666 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META
) | EVENT_INDEX(0));
667 /* needed for wait for idle in SURFACE_SYNC */
668 assert(sctx
->flags
& SI_CONTEXT_FLUSH_AND_INV_CB
);
670 if (sctx
->flags
& SI_CONTEXT_FLUSH_AND_INV_DB_META
) {
671 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
672 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META
) | EVENT_INDEX(0));
673 /* needed for wait for idle in SURFACE_SYNC */
674 assert(sctx
->flags
& SI_CONTEXT_FLUSH_AND_INV_DB
);
677 /* Wait for shader engines to go idle.
678 * VS and PS waits are unnecessary if SURFACE_SYNC is going to wait
679 * for everything including CB/DB cache flushes.
681 if (!(sctx
->flags
& (SI_CONTEXT_FLUSH_AND_INV_CB
|
682 SI_CONTEXT_FLUSH_AND_INV_DB
))) {
683 if (sctx
->flags
& SI_CONTEXT_PS_PARTIAL_FLUSH
) {
684 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
685 radeon_emit(cs
, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
686 } else if (sctx
->flags
& SI_CONTEXT_VS_PARTIAL_FLUSH
) {
687 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
688 radeon_emit(cs
, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
691 if (sctx
->flags
& SI_CONTEXT_CS_PARTIAL_FLUSH
) {
692 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
693 radeon_emit(cs
, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH
| EVENT_INDEX(4)));
696 /* VGT state synchronization. */
697 if (sctx
->flags
& SI_CONTEXT_VGT_FLUSH
) {
698 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
699 radeon_emit(cs
, EVENT_TYPE(V_028A90_VGT_FLUSH
) | EVENT_INDEX(0));
701 if (sctx
->flags
& SI_CONTEXT_VGT_STREAMOUT_SYNC
) {
702 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
703 radeon_emit(cs
, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC
) | EVENT_INDEX(0));
706 /* Make sure ME is idle (it executes most packets) before continuing.
707 * This prevents read-after-write hazards between PFP and ME.
709 if (cp_coher_cntl
|| (sctx
->flags
& SI_CONTEXT_CS_PARTIAL_FLUSH
)) {
710 radeon_emit(cs
, PKT3(PKT3_PFP_SYNC_ME
, 0, 0));
714 /* When one of the DEST_BASE flags is set, SURFACE_SYNC waits for idle.
715 * Therefore, it should be last. Done in PFP.
718 /* ACQUIRE_MEM is only required on a compute ring. */
719 radeon_emit(cs
, PKT3(PKT3_SURFACE_SYNC
, 3, 0));
720 radeon_emit(cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
721 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
722 radeon_emit(cs
, 0); /* CP_COHER_BASE */
723 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
726 if (sctx
->flags
& R600_CONTEXT_START_PIPELINE_STATS
) {
727 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
728 radeon_emit(cs
, EVENT_TYPE(V_028A90_PIPELINESTAT_START
) |
730 } else if (sctx
->flags
& R600_CONTEXT_STOP_PIPELINE_STATS
) {
731 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
732 radeon_emit(cs
, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP
) |
739 static void si_get_draw_start_count(struct si_context
*sctx
,
740 const struct pipe_draw_info
*info
,
741 unsigned *start
, unsigned *count
)
743 if (info
->indirect
) {
744 struct r600_resource
*indirect
=
745 (struct r600_resource
*)info
->indirect
;
746 int *data
= r600_buffer_map_sync_with_rings(&sctx
->b
,
747 indirect
, PIPE_TRANSFER_READ
);
748 data
+= info
->indirect_offset
/sizeof(int);
752 *start
= info
->start
;
753 *count
= info
->count
;
757 void si_ce_pre_draw_synchronization(struct si_context
*sctx
)
759 if (sctx
->ce_need_synchronization
) {
760 radeon_emit(sctx
->ce_ib
, PKT3(PKT3_INCREMENT_CE_COUNTER
, 0, 0));
761 radeon_emit(sctx
->ce_ib
, 1);
763 radeon_emit(sctx
->b
.gfx
.cs
, PKT3(PKT3_WAIT_ON_CE_COUNTER
, 0, 0));
764 radeon_emit(sctx
->b
.gfx
.cs
, 1);
768 void si_ce_post_draw_synchronization(struct si_context
*sctx
)
770 if (sctx
->ce_need_synchronization
) {
771 radeon_emit(sctx
->b
.gfx
.cs
, PKT3(PKT3_INCREMENT_DE_COUNTER
, 0, 0));
772 radeon_emit(sctx
->b
.gfx
.cs
, 0);
774 sctx
->ce_need_synchronization
= false;
778 void si_draw_vbo(struct pipe_context
*ctx
, const struct pipe_draw_info
*info
)
780 struct si_context
*sctx
= (struct si_context
*)ctx
;
781 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
782 struct pipe_index_buffer ib
= {};
783 unsigned mask
, dirty_fb_counter
;
785 if (!info
->count
&& !info
->indirect
&&
786 (info
->indexed
|| !info
->count_from_stream_output
))
789 if (!sctx
->vs_shader
.cso
) {
793 if (!sctx
->ps_shader
.cso
&& (!rs
|| !rs
->rasterizer_discard
)) {
797 if (!!sctx
->tes_shader
.cso
!= (info
->mode
== PIPE_PRIM_PATCHES
)) {
802 /* Re-emit the framebuffer state if needed. */
803 dirty_fb_counter
= p_atomic_read(&sctx
->b
.screen
->dirty_fb_counter
);
804 if (dirty_fb_counter
!= sctx
->b
.last_dirty_fb_counter
) {
805 sctx
->b
.last_dirty_fb_counter
= dirty_fb_counter
;
806 sctx
->framebuffer
.dirty_cbufs
|=
807 ((1 << sctx
->framebuffer
.state
.nr_cbufs
) - 1);
808 sctx
->framebuffer
.dirty_zsbuf
= true;
809 si_mark_atom_dirty(sctx
, &sctx
->framebuffer
.atom
);
812 si_decompress_graphics_textures(sctx
);
814 /* Set the rasterization primitive type.
816 * This must be done after si_decompress_textures, which can call
817 * draw_vbo recursively, and before si_update_shaders, which uses
818 * current_rast_prim for this draw_vbo call. */
819 if (sctx
->gs_shader
.cso
)
820 sctx
->current_rast_prim
= sctx
->gs_shader
.cso
->gs_output_prim
;
821 else if (sctx
->tes_shader
.cso
)
822 sctx
->current_rast_prim
=
823 sctx
->tes_shader
.cso
->info
.properties
[TGSI_PROPERTY_TES_PRIM_MODE
];
825 sctx
->current_rast_prim
= info
->mode
;
827 if (!si_update_shaders(sctx
) ||
828 !si_upload_graphics_shader_descriptors(sctx
))
832 /* Initialize the index buffer struct. */
833 pipe_resource_reference(&ib
.buffer
, sctx
->index_buffer
.buffer
);
834 ib
.user_buffer
= sctx
->index_buffer
.user_buffer
;
835 ib
.index_size
= sctx
->index_buffer
.index_size
;
836 ib
.offset
= sctx
->index_buffer
.offset
;
838 /* Translate or upload, if needed. */
839 /* 8-bit indices are supported on VI. */
840 if (sctx
->b
.chip_class
<= CIK
&& ib
.index_size
== 1) {
841 struct pipe_resource
*out_buffer
= NULL
;
842 unsigned out_offset
, start
, count
, start_offset
;
845 si_get_draw_start_count(sctx
, info
, &start
, &count
);
846 start_offset
= start
* ib
.index_size
;
848 u_upload_alloc(sctx
->b
.uploader
, start_offset
, count
* 2, 256,
849 &out_offset
, &out_buffer
, &ptr
);
851 pipe_resource_reference(&ib
.buffer
, NULL
);
855 util_shorten_ubyte_elts_to_userptr(&sctx
->b
.b
, &ib
, 0,
856 ib
.offset
+ start_offset
,
859 pipe_resource_reference(&ib
.buffer
, NULL
);
860 ib
.user_buffer
= NULL
;
861 ib
.buffer
= out_buffer
;
862 /* info->start will be added by the drawing code */
863 ib
.offset
= out_offset
- start_offset
;
865 } else if (ib
.user_buffer
&& !ib
.buffer
) {
866 unsigned start
, count
, start_offset
;
868 si_get_draw_start_count(sctx
, info
, &start
, &count
);
869 start_offset
= start
* ib
.index_size
;
871 u_upload_data(sctx
->b
.uploader
, start_offset
, count
* ib
.index_size
,
872 256, (char*)ib
.user_buffer
+ start_offset
,
873 &ib
.offset
, &ib
.buffer
);
876 /* info->start will be added by the drawing code */
877 ib
.offset
-= start_offset
;
881 /* VI reads index buffers through TC L2. */
882 if (info
->indexed
&& sctx
->b
.chip_class
<= CIK
&&
883 r600_resource(ib
.buffer
)->TC_L2_dirty
) {
884 sctx
->b
.flags
|= SI_CONTEXT_INV_GLOBAL_L2
;
885 r600_resource(ib
.buffer
)->TC_L2_dirty
= false;
888 /* Check flush flags. */
890 si_mark_atom_dirty(sctx
, sctx
->atoms
.s
.cache_flush
);
892 si_need_cs_space(sctx
);
895 mask
= sctx
->dirty_atoms
;
897 struct r600_atom
*atom
= sctx
->atoms
.array
[u_bit_scan(&mask
)];
899 atom
->emit(&sctx
->b
, atom
);
901 sctx
->dirty_atoms
= 0;
903 si_pm4_emit_dirty(sctx
);
904 si_emit_scratch_reloc(sctx
);
905 si_emit_rasterizer_prim_state(sctx
);
906 si_emit_draw_registers(sctx
, info
);
908 si_ce_pre_draw_synchronization(sctx
);
910 si_emit_draw_packets(sctx
, info
, &ib
);
912 si_ce_post_draw_synchronization(sctx
);
917 /* Workaround for a VGT hang when streamout is enabled.
918 * It must be done after drawing. */
919 if ((sctx
->b
.family
== CHIP_HAWAII
||
920 sctx
->b
.family
== CHIP_TONGA
||
921 sctx
->b
.family
== CHIP_FIJI
) &&
922 r600_get_strmout_en(&sctx
->b
)) {
923 sctx
->b
.flags
|= SI_CONTEXT_VGT_STREAMOUT_SYNC
;
926 /* Set the depth buffer as dirty. */
927 if (sctx
->framebuffer
.state
.zsbuf
) {
928 struct pipe_surface
*surf
= sctx
->framebuffer
.state
.zsbuf
;
929 struct r600_texture
*rtex
= (struct r600_texture
*)surf
->texture
;
931 rtex
->dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
933 if (rtex
->surface
.flags
& RADEON_SURF_SBUFFER
)
934 rtex
->stencil_dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
936 if (sctx
->framebuffer
.compressed_cb_mask
) {
937 struct pipe_surface
*surf
;
938 struct r600_texture
*rtex
;
939 unsigned mask
= sctx
->framebuffer
.compressed_cb_mask
;
942 unsigned i
= u_bit_scan(&mask
);
943 surf
= sctx
->framebuffer
.state
.cbufs
[i
];
944 rtex
= (struct r600_texture
*)surf
->texture
;
946 rtex
->dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
950 pipe_resource_reference(&ib
.buffer
, NULL
);
951 sctx
->b
.num_draw_calls
++;
954 void si_trace_emit(struct si_context
*sctx
)
956 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
959 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
, sctx
->trace_buf
,
960 RADEON_USAGE_READWRITE
, RADEON_PRIO_TRACE
);
961 radeon_emit(cs
, PKT3(PKT3_WRITE_DATA
, 3, 0));
962 radeon_emit(cs
, S_370_DST_SEL(V_370_MEMORY_SYNC
) |
963 S_370_WR_CONFIRM(1) |
964 S_370_ENGINE_SEL(V_370_ME
));
965 radeon_emit(cs
, sctx
->trace_buf
->gpu_address
);
966 radeon_emit(cs
, sctx
->trace_buf
->gpu_address
>> 32);
967 radeon_emit(cs
, sctx
->trace_id
);
968 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
969 radeon_emit(cs
, SI_ENCODE_TRACE_POINT(sctx
->trace_id
));