2 * Copyright 2012 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "si_build_pm4.h"
28 #include "util/u_index_modify.h"
29 #include "util/u_log.h"
30 #include "util/u_upload_mgr.h"
31 #include "util/u_prim.h"
35 /* special primitive types */
36 #define SI_PRIM_RECTANGLE_LIST PIPE_PRIM_MAX
38 static unsigned si_conv_pipe_prim(unsigned mode
)
40 static const unsigned prim_conv
[] = {
41 [PIPE_PRIM_POINTS
] = V_008958_DI_PT_POINTLIST
,
42 [PIPE_PRIM_LINES
] = V_008958_DI_PT_LINELIST
,
43 [PIPE_PRIM_LINE_LOOP
] = V_008958_DI_PT_LINELOOP
,
44 [PIPE_PRIM_LINE_STRIP
] = V_008958_DI_PT_LINESTRIP
,
45 [PIPE_PRIM_TRIANGLES
] = V_008958_DI_PT_TRILIST
,
46 [PIPE_PRIM_TRIANGLE_STRIP
] = V_008958_DI_PT_TRISTRIP
,
47 [PIPE_PRIM_TRIANGLE_FAN
] = V_008958_DI_PT_TRIFAN
,
48 [PIPE_PRIM_QUADS
] = V_008958_DI_PT_QUADLIST
,
49 [PIPE_PRIM_QUAD_STRIP
] = V_008958_DI_PT_QUADSTRIP
,
50 [PIPE_PRIM_POLYGON
] = V_008958_DI_PT_POLYGON
,
51 [PIPE_PRIM_LINES_ADJACENCY
] = V_008958_DI_PT_LINELIST_ADJ
,
52 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = V_008958_DI_PT_LINESTRIP_ADJ
,
53 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = V_008958_DI_PT_TRILIST_ADJ
,
54 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = V_008958_DI_PT_TRISTRIP_ADJ
,
55 [PIPE_PRIM_PATCHES
] = V_008958_DI_PT_PATCH
,
56 [SI_PRIM_RECTANGLE_LIST
] = V_008958_DI_PT_RECTLIST
58 assert(mode
< ARRAY_SIZE(prim_conv
));
59 return prim_conv
[mode
];
63 * This calculates the LDS size for tessellation shaders (VS, TCS, TES).
64 * LS.LDS_SIZE is shared by all 3 shader stages.
66 * The information about LDS and other non-compile-time parameters is then
67 * written to userdata SGPRs.
69 static void si_emit_derived_tess_state(struct si_context
*sctx
,
70 const struct pipe_draw_info
*info
,
71 unsigned *num_patches
)
73 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
74 struct si_shader
*ls_current
;
75 struct si_shader_selector
*ls
;
76 /* The TES pointer will only be used for sctx->last_tcs.
77 * It would be wrong to think that TCS = TES. */
78 struct si_shader_selector
*tcs
=
79 sctx
->tcs_shader
.cso
? sctx
->tcs_shader
.cso
: sctx
->tes_shader
.cso
;
80 unsigned tess_uses_primid
= sctx
->ia_multi_vgt_param_key
.u
.tess_uses_prim_id
;
81 bool has_primid_instancing_bug
= sctx
->chip_class
== GFX6
&&
82 sctx
->screen
->info
.max_se
== 1;
83 unsigned tes_sh_base
= sctx
->shader_pointers
.sh_base
[PIPE_SHADER_TESS_EVAL
];
84 unsigned num_tcs_input_cp
= info
->vertices_per_patch
;
85 unsigned num_tcs_output_cp
, num_tcs_inputs
, num_tcs_outputs
;
86 unsigned num_tcs_patch_outputs
;
87 unsigned input_vertex_size
, output_vertex_size
, pervertex_output_patch_size
;
88 unsigned input_patch_size
, output_patch_size
, output_patch0_offset
;
89 unsigned perpatch_output_offset
, lds_size
;
90 unsigned tcs_in_layout
, tcs_out_layout
, tcs_out_offsets
;
91 unsigned offchip_layout
, hardware_lds_size
, ls_hs_config
;
93 /* Since GFX9 has merged LS-HS in the TCS state, set LS = TCS. */
94 if (sctx
->chip_class
>= GFX9
) {
95 if (sctx
->tcs_shader
.cso
)
96 ls_current
= sctx
->tcs_shader
.current
;
98 ls_current
= sctx
->fixed_func_tcs_shader
.current
;
100 ls
= ls_current
->key
.part
.tcs
.ls
;
102 ls_current
= sctx
->vs_shader
.current
;
103 ls
= sctx
->vs_shader
.cso
;
106 if (sctx
->last_ls
== ls_current
&&
107 sctx
->last_tcs
== tcs
&&
108 sctx
->last_tes_sh_base
== tes_sh_base
&&
109 sctx
->last_num_tcs_input_cp
== num_tcs_input_cp
&&
110 (!has_primid_instancing_bug
||
111 (sctx
->last_tess_uses_primid
== tess_uses_primid
))) {
112 *num_patches
= sctx
->last_num_patches
;
116 sctx
->last_ls
= ls_current
;
117 sctx
->last_tcs
= tcs
;
118 sctx
->last_tes_sh_base
= tes_sh_base
;
119 sctx
->last_num_tcs_input_cp
= num_tcs_input_cp
;
120 sctx
->last_tess_uses_primid
= tess_uses_primid
;
122 /* This calculates how shader inputs and outputs among VS, TCS, and TES
123 * are laid out in LDS. */
124 num_tcs_inputs
= util_last_bit64(ls
->outputs_written
);
126 if (sctx
->tcs_shader
.cso
) {
127 num_tcs_outputs
= util_last_bit64(tcs
->outputs_written
);
128 num_tcs_output_cp
= tcs
->info
.properties
[TGSI_PROPERTY_TCS_VERTICES_OUT
];
129 num_tcs_patch_outputs
= util_last_bit64(tcs
->patch_outputs_written
);
131 /* No TCS. Route varyings from LS to TES. */
132 num_tcs_outputs
= num_tcs_inputs
;
133 num_tcs_output_cp
= num_tcs_input_cp
;
134 num_tcs_patch_outputs
= 2; /* TESSINNER + TESSOUTER */
137 input_vertex_size
= ls
->lshs_vertex_stride
;
138 output_vertex_size
= num_tcs_outputs
* 16;
140 input_patch_size
= num_tcs_input_cp
* input_vertex_size
;
142 pervertex_output_patch_size
= num_tcs_output_cp
* output_vertex_size
;
143 output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
145 /* Ensure that we only need one wave per SIMD so we don't need to check
146 * resource usage. Also ensures that the number of tcs in and out
147 * vertices per threadgroup are at most 256.
149 unsigned max_verts_per_patch
= MAX2(num_tcs_input_cp
, num_tcs_output_cp
);
150 *num_patches
= 256 / max_verts_per_patch
;
152 /* Make sure that the data fits in LDS. This assumes the shaders only
153 * use LDS for the inputs and outputs.
155 * While GFX7 can use 64K per threadgroup, there is a hang on Stoney
156 * with 2 CUs if we use more than 32K. The closed Vulkan driver also
157 * uses 32K at most on all GCN chips.
159 hardware_lds_size
= 32768;
160 *num_patches
= MIN2(*num_patches
, hardware_lds_size
/ (input_patch_size
+
163 /* Make sure the output data fits in the offchip buffer */
164 *num_patches
= MIN2(*num_patches
,
165 (sctx
->screen
->tess_offchip_block_dw_size
* 4) /
168 /* Not necessary for correctness, but improves performance.
169 * The hardware can do more, but the radeonsi shader constant is
172 *num_patches
= MIN2(*num_patches
, 63); /* triangles: 3 full waves except 3 lanes */
174 /* When distributed tessellation is unsupported, switch between SEs
175 * at a higher frequency to compensate for it.
177 if (!sctx
->screen
->has_distributed_tess
&& sctx
->screen
->info
.max_se
> 1)
178 *num_patches
= MIN2(*num_patches
, 16); /* recommended */
180 /* Make sure that vector lanes are reasonably occupied. It probably
181 * doesn't matter much because this is LS-HS, and TES is likely to
182 * occupy significantly more CUs.
184 unsigned temp_verts_per_tg
= *num_patches
* max_verts_per_patch
;
185 if (temp_verts_per_tg
> 64 && temp_verts_per_tg
% 64 < 48)
186 *num_patches
= (temp_verts_per_tg
& ~63) / max_verts_per_patch
;
188 if (sctx
->chip_class
== GFX6
) {
189 /* GFX6 bug workaround, related to power management. Limit LS-HS
190 * threadgroups to only one wave.
192 unsigned one_wave
= 64 / max_verts_per_patch
;
193 *num_patches
= MIN2(*num_patches
, one_wave
);
196 /* The VGT HS block increments the patch ID unconditionally
197 * within a single threadgroup. This results in incorrect
198 * patch IDs when instanced draws are used.
200 * The intended solution is to restrict threadgroups to
201 * a single instance by setting SWITCH_ON_EOI, which
202 * should cause IA to split instances up. However, this
203 * doesn't work correctly on GFX6 when there is no other
206 if (has_primid_instancing_bug
&& tess_uses_primid
)
209 sctx
->last_num_patches
= *num_patches
;
211 output_patch0_offset
= input_patch_size
* *num_patches
;
212 perpatch_output_offset
= output_patch0_offset
+ pervertex_output_patch_size
;
214 /* Compute userdata SGPRs. */
215 assert(((input_vertex_size
/ 4) & ~0xff) == 0);
216 assert(((output_vertex_size
/ 4) & ~0xff) == 0);
217 assert(((input_patch_size
/ 4) & ~0x1fff) == 0);
218 assert(((output_patch_size
/ 4) & ~0x1fff) == 0);
219 assert(((output_patch0_offset
/ 16) & ~0xffff) == 0);
220 assert(((perpatch_output_offset
/ 16) & ~0xffff) == 0);
221 assert(num_tcs_input_cp
<= 32);
222 assert(num_tcs_output_cp
<= 32);
224 uint64_t ring_va
= si_resource(sctx
->tess_rings
)->gpu_address
;
225 assert((ring_va
& u_bit_consecutive(0, 19)) == 0);
227 tcs_in_layout
= S_VS_STATE_LS_OUT_PATCH_SIZE(input_patch_size
/ 4) |
228 S_VS_STATE_LS_OUT_VERTEX_SIZE(input_vertex_size
/ 4);
229 tcs_out_layout
= (output_patch_size
/ 4) |
230 (num_tcs_input_cp
<< 13) |
232 tcs_out_offsets
= (output_patch0_offset
/ 16) |
233 ((perpatch_output_offset
/ 16) << 16);
234 offchip_layout
= *num_patches
|
235 (num_tcs_output_cp
<< 6) |
236 (pervertex_output_patch_size
* *num_patches
<< 12);
238 /* Compute the LDS size. */
239 lds_size
= output_patch0_offset
+ output_patch_size
* *num_patches
;
241 if (sctx
->chip_class
>= GFX7
) {
242 assert(lds_size
<= 65536);
243 lds_size
= align(lds_size
, 512) / 512;
245 assert(lds_size
<= 32768);
246 lds_size
= align(lds_size
, 256) / 256;
249 /* Set SI_SGPR_VS_STATE_BITS. */
250 sctx
->current_vs_state
&= C_VS_STATE_LS_OUT_PATCH_SIZE
&
251 C_VS_STATE_LS_OUT_VERTEX_SIZE
;
252 sctx
->current_vs_state
|= tcs_in_layout
;
254 if (sctx
->chip_class
>= GFX9
) {
255 unsigned hs_rsrc2
= ls_current
->config
.rsrc2
|
256 S_00B42C_LDS_SIZE(lds_size
);
258 radeon_set_sh_reg(cs
, R_00B42C_SPI_SHADER_PGM_RSRC2_HS
, hs_rsrc2
);
260 /* Set userdata SGPRs for merged LS-HS. */
261 radeon_set_sh_reg_seq(cs
,
262 R_00B430_SPI_SHADER_USER_DATA_LS_0
+
263 GFX9_SGPR_TCS_OFFCHIP_LAYOUT
* 4, 3);
264 radeon_emit(cs
, offchip_layout
);
265 radeon_emit(cs
, tcs_out_offsets
);
266 radeon_emit(cs
, tcs_out_layout
);
268 unsigned ls_rsrc2
= ls_current
->config
.rsrc2
;
270 si_multiwave_lds_size_workaround(sctx
->screen
, &lds_size
);
271 ls_rsrc2
|= S_00B52C_LDS_SIZE(lds_size
);
273 /* Due to a hw bug, RSRC2_LS must be written twice with another
274 * LS register written in between. */
275 if (sctx
->chip_class
== GFX7
&& sctx
->family
!= CHIP_HAWAII
)
276 radeon_set_sh_reg(cs
, R_00B52C_SPI_SHADER_PGM_RSRC2_LS
, ls_rsrc2
);
277 radeon_set_sh_reg_seq(cs
, R_00B528_SPI_SHADER_PGM_RSRC1_LS
, 2);
278 radeon_emit(cs
, ls_current
->config
.rsrc1
);
279 radeon_emit(cs
, ls_rsrc2
);
281 /* Set userdata SGPRs for TCS. */
282 radeon_set_sh_reg_seq(cs
,
283 R_00B430_SPI_SHADER_USER_DATA_HS_0
+ GFX6_SGPR_TCS_OFFCHIP_LAYOUT
* 4, 4);
284 radeon_emit(cs
, offchip_layout
);
285 radeon_emit(cs
, tcs_out_offsets
);
286 radeon_emit(cs
, tcs_out_layout
);
287 radeon_emit(cs
, tcs_in_layout
);
290 /* Set userdata SGPRs for TES. */
291 radeon_set_sh_reg_seq(cs
, tes_sh_base
+ SI_SGPR_TES_OFFCHIP_LAYOUT
* 4, 2);
292 radeon_emit(cs
, offchip_layout
);
293 radeon_emit(cs
, ring_va
);
295 ls_hs_config
= S_028B58_NUM_PATCHES(*num_patches
) |
296 S_028B58_HS_NUM_INPUT_CP(num_tcs_input_cp
) |
297 S_028B58_HS_NUM_OUTPUT_CP(num_tcs_output_cp
);
299 if (sctx
->last_ls_hs_config
!= ls_hs_config
) {
300 if (sctx
->chip_class
>= GFX7
) {
301 radeon_set_context_reg_idx(cs
, R_028B58_VGT_LS_HS_CONFIG
, 2,
304 radeon_set_context_reg(cs
, R_028B58_VGT_LS_HS_CONFIG
,
307 sctx
->last_ls_hs_config
= ls_hs_config
;
308 sctx
->context_roll
= true;
312 static unsigned si_num_prims_for_vertices(const struct pipe_draw_info
*info
,
313 enum pipe_prim_type prim
)
316 case PIPE_PRIM_PATCHES
:
317 return info
->count
/ info
->vertices_per_patch
;
318 case PIPE_PRIM_POLYGON
:
319 return info
->count
>= 3;
320 case SI_PRIM_RECTANGLE_LIST
:
321 return info
->count
/ 3;
323 return u_decomposed_prims_for_vertices(prim
, info
->count
);
328 si_get_init_multi_vgt_param(struct si_screen
*sscreen
,
329 union si_vgt_param_key
*key
)
331 STATIC_ASSERT(sizeof(union si_vgt_param_key
) == 4);
332 unsigned max_primgroup_in_wave
= 2;
334 /* SWITCH_ON_EOP(0) is always preferable. */
335 bool wd_switch_on_eop
= false;
336 bool ia_switch_on_eop
= false;
337 bool ia_switch_on_eoi
= false;
338 bool partial_vs_wave
= false;
339 bool partial_es_wave
= false;
341 if (key
->u
.uses_tess
) {
342 /* SWITCH_ON_EOI must be set if PrimID is used. */
343 if (key
->u
.tess_uses_prim_id
)
344 ia_switch_on_eoi
= true;
346 /* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
347 if ((sscreen
->info
.family
== CHIP_TAHITI
||
348 sscreen
->info
.family
== CHIP_PITCAIRN
||
349 sscreen
->info
.family
== CHIP_BONAIRE
) &&
351 partial_vs_wave
= true;
353 /* Needed for 028B6C_DISTRIBUTION_MODE != 0. (implies >= GFX8) */
354 if (sscreen
->has_distributed_tess
) {
355 if (key
->u
.uses_gs
) {
356 if (sscreen
->info
.chip_class
== GFX8
)
357 partial_es_wave
= true;
359 partial_vs_wave
= true;
364 /* This is a hardware requirement. */
365 if (key
->u
.line_stipple_enabled
||
366 (sscreen
->debug_flags
& DBG(SWITCH_ON_EOP
))) {
367 ia_switch_on_eop
= true;
368 wd_switch_on_eop
= true;
371 if (sscreen
->info
.chip_class
>= GFX7
) {
372 /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
373 * 4 shader engines. Set 1 to pass the assertion below.
374 * The other cases are hardware requirements.
376 * Polaris supports primitive restart with WD_SWITCH_ON_EOP=0
377 * for points, line strips, and tri strips.
379 if (sscreen
->info
.max_se
<= 2 ||
380 key
->u
.prim
== PIPE_PRIM_POLYGON
||
381 key
->u
.prim
== PIPE_PRIM_LINE_LOOP
||
382 key
->u
.prim
== PIPE_PRIM_TRIANGLE_FAN
||
383 key
->u
.prim
== PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
||
384 (key
->u
.primitive_restart
&&
385 (sscreen
->info
.family
< CHIP_POLARIS10
||
386 (key
->u
.prim
!= PIPE_PRIM_POINTS
&&
387 key
->u
.prim
!= PIPE_PRIM_LINE_STRIP
&&
388 key
->u
.prim
!= PIPE_PRIM_TRIANGLE_STRIP
))) ||
389 key
->u
.count_from_stream_output
)
390 wd_switch_on_eop
= true;
392 /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
393 * We don't know that for indirect drawing, so treat it as
394 * always problematic. */
395 if (sscreen
->info
.family
== CHIP_HAWAII
&&
396 key
->u
.uses_instancing
)
397 wd_switch_on_eop
= true;
399 /* Performance recommendation for 4 SE Gfx7-8 parts if
400 * instances are smaller than a primgroup.
401 * Assume indirect draws always use small instances.
402 * This is needed for good VS wave utilization.
404 if (sscreen
->info
.chip_class
<= GFX8
&&
405 sscreen
->info
.max_se
== 4 &&
406 key
->u
.multi_instances_smaller_than_primgroup
)
407 wd_switch_on_eop
= true;
409 /* Required on GFX7 and later. */
410 if (sscreen
->info
.max_se
== 4 && !wd_switch_on_eop
)
411 ia_switch_on_eoi
= true;
413 /* HW engineers suggested that PARTIAL_VS_WAVE_ON should be set
414 * to work around a GS hang.
416 if (key
->u
.uses_gs
&&
417 (sscreen
->info
.family
== CHIP_TONGA
||
418 sscreen
->info
.family
== CHIP_FIJI
||
419 sscreen
->info
.family
== CHIP_POLARIS10
||
420 sscreen
->info
.family
== CHIP_POLARIS11
||
421 sscreen
->info
.family
== CHIP_POLARIS12
||
422 sscreen
->info
.family
== CHIP_VEGAM
))
423 partial_vs_wave
= true;
425 /* Required by Hawaii and, for some special cases, by GFX8. */
426 if (ia_switch_on_eoi
&&
427 (sscreen
->info
.family
== CHIP_HAWAII
||
428 (sscreen
->info
.chip_class
== GFX8
&&
429 (key
->u
.uses_gs
|| max_primgroup_in_wave
!= 2))))
430 partial_vs_wave
= true;
432 /* Instancing bug on Bonaire. */
433 if (sscreen
->info
.family
== CHIP_BONAIRE
&& ia_switch_on_eoi
&&
434 key
->u
.uses_instancing
)
435 partial_vs_wave
= true;
437 /* This only applies to Polaris10 and later 4 SE chips.
438 * wd_switch_on_eop is already true on all other chips.
440 if (!wd_switch_on_eop
&& key
->u
.primitive_restart
)
441 partial_vs_wave
= true;
443 /* If the WD switch is false, the IA switch must be false too. */
444 assert(wd_switch_on_eop
|| !ia_switch_on_eop
);
447 /* If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
448 if (sscreen
->info
.chip_class
<= GFX8
&& ia_switch_on_eoi
)
449 partial_es_wave
= true;
451 return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop
) |
452 S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi
) |
453 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave
) |
454 S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave
) |
455 S_028AA8_WD_SWITCH_ON_EOP(sscreen
->info
.chip_class
>= GFX7
? wd_switch_on_eop
: 0) |
456 /* The following field was moved to VGT_SHADER_STAGES_EN in GFX9. */
457 S_028AA8_MAX_PRIMGRP_IN_WAVE(sscreen
->info
.chip_class
== GFX8
?
458 max_primgroup_in_wave
: 0) |
459 S_030960_EN_INST_OPT_BASIC(sscreen
->info
.chip_class
>= GFX9
) |
460 S_030960_EN_INST_OPT_ADV(sscreen
->info
.chip_class
>= GFX9
);
463 static void si_init_ia_multi_vgt_param_table(struct si_context
*sctx
)
465 for (int prim
= 0; prim
<= SI_PRIM_RECTANGLE_LIST
; prim
++)
466 for (int uses_instancing
= 0; uses_instancing
< 2; uses_instancing
++)
467 for (int multi_instances
= 0; multi_instances
< 2; multi_instances
++)
468 for (int primitive_restart
= 0; primitive_restart
< 2; primitive_restart
++)
469 for (int count_from_so
= 0; count_from_so
< 2; count_from_so
++)
470 for (int line_stipple
= 0; line_stipple
< 2; line_stipple
++)
471 for (int uses_tess
= 0; uses_tess
< 2; uses_tess
++)
472 for (int tess_uses_primid
= 0; tess_uses_primid
< 2; tess_uses_primid
++)
473 for (int uses_gs
= 0; uses_gs
< 2; uses_gs
++) {
474 union si_vgt_param_key key
;
478 key
.u
.uses_instancing
= uses_instancing
;
479 key
.u
.multi_instances_smaller_than_primgroup
= multi_instances
;
480 key
.u
.primitive_restart
= primitive_restart
;
481 key
.u
.count_from_stream_output
= count_from_so
;
482 key
.u
.line_stipple_enabled
= line_stipple
;
483 key
.u
.uses_tess
= uses_tess
;
484 key
.u
.tess_uses_prim_id
= tess_uses_primid
;
485 key
.u
.uses_gs
= uses_gs
;
487 sctx
->ia_multi_vgt_param
[key
.index
] =
488 si_get_init_multi_vgt_param(sctx
->screen
, &key
);
492 static unsigned si_get_ia_multi_vgt_param(struct si_context
*sctx
,
493 const struct pipe_draw_info
*info
,
494 enum pipe_prim_type prim
,
495 unsigned num_patches
,
496 unsigned instance_count
,
497 bool primitive_restart
)
499 union si_vgt_param_key key
= sctx
->ia_multi_vgt_param_key
;
500 unsigned primgroup_size
;
501 unsigned ia_multi_vgt_param
;
503 if (sctx
->tes_shader
.cso
) {
504 primgroup_size
= num_patches
; /* must be a multiple of NUM_PATCHES */
505 } else if (sctx
->gs_shader
.cso
) {
506 primgroup_size
= 64; /* recommended with a GS */
508 primgroup_size
= 128; /* recommended without a GS and tess */
512 key
.u
.uses_instancing
= info
->indirect
|| instance_count
> 1;
513 key
.u
.multi_instances_smaller_than_primgroup
=
515 (instance_count
> 1 &&
516 (info
->count_from_stream_output
||
517 si_num_prims_for_vertices(info
, prim
) < primgroup_size
));
518 key
.u
.primitive_restart
= primitive_restart
;
519 key
.u
.count_from_stream_output
= info
->count_from_stream_output
!= NULL
;
521 ia_multi_vgt_param
= sctx
->ia_multi_vgt_param
[key
.index
] |
522 S_028AA8_PRIMGROUP_SIZE(primgroup_size
- 1);
524 if (sctx
->gs_shader
.cso
) {
525 /* GS requirement. */
526 if (sctx
->chip_class
<= GFX8
&&
527 SI_GS_PER_ES
/ primgroup_size
>= sctx
->screen
->gs_table_depth
- 3)
528 ia_multi_vgt_param
|= S_028AA8_PARTIAL_ES_WAVE_ON(1);
530 /* GS hw bug with single-primitive instances and SWITCH_ON_EOI.
531 * The hw doc says all multi-SE chips are affected, but Vulkan
532 * only applies it to Hawaii. Do what Vulkan does.
534 if (sctx
->family
== CHIP_HAWAII
&&
535 G_028AA8_SWITCH_ON_EOI(ia_multi_vgt_param
) &&
537 (instance_count
> 1 &&
538 (info
->count_from_stream_output
||
539 si_num_prims_for_vertices(info
, prim
) <= 1))))
540 sctx
->flags
|= SI_CONTEXT_VGT_FLUSH
;
543 return ia_multi_vgt_param
;
546 /* rast_prim is the primitive type after GS. */
547 static void si_emit_rasterizer_prim_state(struct si_context
*sctx
)
549 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
550 enum pipe_prim_type rast_prim
= sctx
->current_rast_prim
;
551 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
553 /* Skip this if not rendering lines. */
554 if (!util_prim_is_lines(rast_prim
))
557 if (rast_prim
== sctx
->last_rast_prim
&&
558 rs
->pa_sc_line_stipple
== sctx
->last_sc_line_stipple
)
561 /* For lines, reset the stipple pattern at each primitive. Otherwise,
562 * reset the stipple pattern at each packet (line strips, line loops).
564 radeon_set_context_reg(cs
, R_028A0C_PA_SC_LINE_STIPPLE
,
565 rs
->pa_sc_line_stipple
|
566 S_028A0C_AUTO_RESET_CNTL(rast_prim
== PIPE_PRIM_LINES
? 1 : 2));
568 sctx
->last_rast_prim
= rast_prim
;
569 sctx
->last_sc_line_stipple
= rs
->pa_sc_line_stipple
;
570 sctx
->context_roll
= true;
573 static void si_emit_vs_state(struct si_context
*sctx
,
574 const struct pipe_draw_info
*info
)
576 sctx
->current_vs_state
&= C_VS_STATE_INDEXED
;
577 sctx
->current_vs_state
|= S_VS_STATE_INDEXED(!!info
->index_size
);
579 if (sctx
->num_vs_blit_sgprs
) {
580 /* Re-emit the state after we leave u_blitter. */
581 sctx
->last_vs_state
= ~0;
585 if (sctx
->current_vs_state
!= sctx
->last_vs_state
) {
586 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
588 /* For the API vertex shader (VS_STATE_INDEXED). */
589 radeon_set_sh_reg(cs
,
590 sctx
->shader_pointers
.sh_base
[PIPE_SHADER_VERTEX
] +
591 SI_SGPR_VS_STATE_BITS
* 4,
592 sctx
->current_vs_state
);
594 /* For vertex color clamping, which is done in the last stage
595 * before the rasterizer. */
596 if (sctx
->gs_shader
.cso
|| sctx
->tes_shader
.cso
) {
597 /* GS copy shader or TES if GS is missing. */
598 radeon_set_sh_reg(cs
,
599 R_00B130_SPI_SHADER_USER_DATA_VS_0
+
600 SI_SGPR_VS_STATE_BITS
* 4,
601 sctx
->current_vs_state
);
604 sctx
->last_vs_state
= sctx
->current_vs_state
;
608 static inline bool si_prim_restart_index_changed(struct si_context
*sctx
,
609 bool primitive_restart
,
610 unsigned restart_index
)
612 return primitive_restart
&&
613 (restart_index
!= sctx
->last_restart_index
||
614 sctx
->last_restart_index
== SI_RESTART_INDEX_UNKNOWN
);
617 static void si_emit_draw_registers(struct si_context
*sctx
,
618 const struct pipe_draw_info
*info
,
619 enum pipe_prim_type prim
,
620 unsigned num_patches
,
621 unsigned instance_count
,
622 bool primitive_restart
)
624 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
625 unsigned vgt_prim
= si_conv_pipe_prim(prim
);
626 unsigned ia_multi_vgt_param
;
628 ia_multi_vgt_param
= si_get_ia_multi_vgt_param(sctx
, info
, prim
, num_patches
,
629 instance_count
, primitive_restart
);
632 if (ia_multi_vgt_param
!= sctx
->last_multi_vgt_param
) {
633 if (sctx
->chip_class
>= GFX9
)
634 radeon_set_uconfig_reg_idx(cs
, sctx
->screen
,
635 R_030960_IA_MULTI_VGT_PARAM
, 4,
637 else if (sctx
->chip_class
>= GFX7
)
638 radeon_set_context_reg_idx(cs
, R_028AA8_IA_MULTI_VGT_PARAM
, 1, ia_multi_vgt_param
);
640 radeon_set_context_reg(cs
, R_028AA8_IA_MULTI_VGT_PARAM
, ia_multi_vgt_param
);
642 sctx
->last_multi_vgt_param
= ia_multi_vgt_param
;
644 if (vgt_prim
!= sctx
->last_prim
) {
645 if (sctx
->chip_class
>= GFX7
)
646 radeon_set_uconfig_reg_idx(cs
, sctx
->screen
,
647 R_030908_VGT_PRIMITIVE_TYPE
, 1, vgt_prim
);
649 radeon_set_config_reg(cs
, R_008958_VGT_PRIMITIVE_TYPE
, vgt_prim
);
651 sctx
->last_prim
= vgt_prim
;
654 /* Primitive restart. */
655 if (primitive_restart
!= sctx
->last_primitive_restart_en
) {
656 if (sctx
->chip_class
>= GFX9
)
657 radeon_set_uconfig_reg(cs
, R_03092C_VGT_MULTI_PRIM_IB_RESET_EN
,
660 radeon_set_context_reg(cs
, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN
,
663 sctx
->last_primitive_restart_en
= primitive_restart
;
666 if (si_prim_restart_index_changed(sctx
, primitive_restart
, info
->restart_index
)) {
667 radeon_set_context_reg(cs
, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX
,
668 info
->restart_index
);
669 sctx
->last_restart_index
= info
->restart_index
;
670 sctx
->context_roll
= true;
674 static void si_emit_draw_packets(struct si_context
*sctx
,
675 const struct pipe_draw_info
*info
,
676 struct pipe_resource
*indexbuf
,
678 unsigned index_offset
,
679 unsigned instance_count
)
681 struct pipe_draw_indirect_info
*indirect
= info
->indirect
;
682 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
683 unsigned sh_base_reg
= sctx
->shader_pointers
.sh_base
[PIPE_SHADER_VERTEX
];
684 bool render_cond_bit
= sctx
->render_cond
&& !sctx
->render_cond_force_off
;
685 uint32_t index_max_size
= 0;
686 uint64_t index_va
= 0;
688 if (info
->count_from_stream_output
) {
689 struct si_streamout_target
*t
=
690 (struct si_streamout_target
*)info
->count_from_stream_output
;
692 radeon_set_context_reg(cs
, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
,
694 si_cp_copy_data(sctx
, sctx
->gfx_cs
,
696 R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
>> 2,
697 COPY_DATA_SRC_MEM
, t
->buf_filled_size
,
698 t
->buf_filled_size_offset
);
703 if (index_size
!= sctx
->last_index_size
) {
707 switch (index_size
) {
709 index_type
= V_028A7C_VGT_INDEX_8
;
712 index_type
= V_028A7C_VGT_INDEX_16
|
713 (SI_BIG_ENDIAN
&& sctx
->chip_class
<= GFX7
?
714 V_028A7C_VGT_DMA_SWAP_16_BIT
: 0);
717 index_type
= V_028A7C_VGT_INDEX_32
|
718 (SI_BIG_ENDIAN
&& sctx
->chip_class
<= GFX7
?
719 V_028A7C_VGT_DMA_SWAP_32_BIT
: 0);
722 assert(!"unreachable");
726 if (sctx
->chip_class
>= GFX9
) {
727 radeon_set_uconfig_reg_idx(cs
, sctx
->screen
,
728 R_03090C_VGT_INDEX_TYPE
, 2,
731 radeon_emit(cs
, PKT3(PKT3_INDEX_TYPE
, 0, 0));
732 radeon_emit(cs
, index_type
);
735 sctx
->last_index_size
= index_size
;
738 index_max_size
= (indexbuf
->width0
- index_offset
) /
740 index_va
= si_resource(indexbuf
)->gpu_address
+ index_offset
;
742 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
,
743 si_resource(indexbuf
),
744 RADEON_USAGE_READ
, RADEON_PRIO_INDEX_BUFFER
);
746 /* On GFX7 and later, non-indexed draws overwrite VGT_INDEX_TYPE,
747 * so the state must be re-emitted before the next indexed draw.
749 if (sctx
->chip_class
>= GFX7
)
750 sctx
->last_index_size
= -1;
754 uint64_t indirect_va
= si_resource(indirect
->buffer
)->gpu_address
;
756 assert(indirect_va
% 8 == 0);
758 si_invalidate_draw_sh_constants(sctx
);
760 radeon_emit(cs
, PKT3(PKT3_SET_BASE
, 2, 0));
762 radeon_emit(cs
, indirect_va
);
763 radeon_emit(cs
, indirect_va
>> 32);
765 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
,
766 si_resource(indirect
->buffer
),
767 RADEON_USAGE_READ
, RADEON_PRIO_DRAW_INDIRECT
);
769 unsigned di_src_sel
= index_size
? V_0287F0_DI_SRC_SEL_DMA
770 : V_0287F0_DI_SRC_SEL_AUTO_INDEX
;
772 assert(indirect
->offset
% 4 == 0);
775 radeon_emit(cs
, PKT3(PKT3_INDEX_BASE
, 1, 0));
776 radeon_emit(cs
, index_va
);
777 radeon_emit(cs
, index_va
>> 32);
779 radeon_emit(cs
, PKT3(PKT3_INDEX_BUFFER_SIZE
, 0, 0));
780 radeon_emit(cs
, index_max_size
);
783 if (!sctx
->screen
->has_draw_indirect_multi
) {
784 radeon_emit(cs
, PKT3(index_size
? PKT3_DRAW_INDEX_INDIRECT
785 : PKT3_DRAW_INDIRECT
,
786 3, render_cond_bit
));
787 radeon_emit(cs
, indirect
->offset
);
788 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_BASE_VERTEX
* 4 - SI_SH_REG_OFFSET
) >> 2);
789 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_START_INSTANCE
* 4 - SI_SH_REG_OFFSET
) >> 2);
790 radeon_emit(cs
, di_src_sel
);
792 uint64_t count_va
= 0;
794 if (indirect
->indirect_draw_count
) {
795 struct si_resource
*params_buf
=
796 si_resource(indirect
->indirect_draw_count
);
798 radeon_add_to_buffer_list(
799 sctx
, sctx
->gfx_cs
, params_buf
,
800 RADEON_USAGE_READ
, RADEON_PRIO_DRAW_INDIRECT
);
802 count_va
= params_buf
->gpu_address
+ indirect
->indirect_draw_count_offset
;
805 radeon_emit(cs
, PKT3(index_size
? PKT3_DRAW_INDEX_INDIRECT_MULTI
:
806 PKT3_DRAW_INDIRECT_MULTI
,
807 8, render_cond_bit
));
808 radeon_emit(cs
, indirect
->offset
);
809 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_BASE_VERTEX
* 4 - SI_SH_REG_OFFSET
) >> 2);
810 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_START_INSTANCE
* 4 - SI_SH_REG_OFFSET
) >> 2);
811 radeon_emit(cs
, ((sh_base_reg
+ SI_SGPR_DRAWID
* 4 - SI_SH_REG_OFFSET
) >> 2) |
812 S_2C3_DRAW_INDEX_ENABLE(1) |
813 S_2C3_COUNT_INDIRECT_ENABLE(!!indirect
->indirect_draw_count
));
814 radeon_emit(cs
, indirect
->draw_count
);
815 radeon_emit(cs
, count_va
);
816 radeon_emit(cs
, count_va
>> 32);
817 radeon_emit(cs
, indirect
->stride
);
818 radeon_emit(cs
, di_src_sel
);
823 if (sctx
->last_instance_count
== SI_INSTANCE_COUNT_UNKNOWN
||
824 sctx
->last_instance_count
!= instance_count
) {
825 radeon_emit(cs
, PKT3(PKT3_NUM_INSTANCES
, 0, 0));
826 radeon_emit(cs
, instance_count
);
827 sctx
->last_instance_count
= instance_count
;
830 /* Base vertex and start instance. */
831 base_vertex
= index_size
? info
->index_bias
: info
->start
;
833 if (sctx
->num_vs_blit_sgprs
) {
834 /* Re-emit draw constants after we leave u_blitter. */
835 si_invalidate_draw_sh_constants(sctx
);
837 /* Blit VS doesn't use BASE_VERTEX, START_INSTANCE, and DRAWID. */
838 radeon_set_sh_reg_seq(cs
, sh_base_reg
+ SI_SGPR_VS_BLIT_DATA
* 4,
839 sctx
->num_vs_blit_sgprs
);
840 radeon_emit_array(cs
, sctx
->vs_blit_sh_data
,
841 sctx
->num_vs_blit_sgprs
);
842 } else if (base_vertex
!= sctx
->last_base_vertex
||
843 sctx
->last_base_vertex
== SI_BASE_VERTEX_UNKNOWN
||
844 info
->start_instance
!= sctx
->last_start_instance
||
845 info
->drawid
!= sctx
->last_drawid
||
846 sh_base_reg
!= sctx
->last_sh_base_reg
) {
847 radeon_set_sh_reg_seq(cs
, sh_base_reg
+ SI_SGPR_BASE_VERTEX
* 4, 3);
848 radeon_emit(cs
, base_vertex
);
849 radeon_emit(cs
, info
->start_instance
);
850 radeon_emit(cs
, info
->drawid
);
852 sctx
->last_base_vertex
= base_vertex
;
853 sctx
->last_start_instance
= info
->start_instance
;
854 sctx
->last_drawid
= info
->drawid
;
855 sctx
->last_sh_base_reg
= sh_base_reg
;
859 index_va
+= info
->start
* index_size
;
861 radeon_emit(cs
, PKT3(PKT3_DRAW_INDEX_2
, 4, render_cond_bit
));
862 radeon_emit(cs
, index_max_size
);
863 radeon_emit(cs
, index_va
);
864 radeon_emit(cs
, index_va
>> 32);
865 radeon_emit(cs
, info
->count
);
866 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_DMA
);
868 radeon_emit(cs
, PKT3(PKT3_DRAW_INDEX_AUTO
, 1, render_cond_bit
));
869 radeon_emit(cs
, info
->count
);
870 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_AUTO_INDEX
|
871 S_0287F0_USE_OPAQUE(!!info
->count_from_stream_output
));
876 static void si_emit_surface_sync(struct si_context
*sctx
,
877 unsigned cp_coher_cntl
)
879 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
881 if (sctx
->chip_class
>= GFX9
|| !sctx
->has_graphics
) {
882 /* Flush caches and wait for the caches to assert idle. */
883 radeon_emit(cs
, PKT3(PKT3_ACQUIRE_MEM
, 5, 0));
884 radeon_emit(cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
885 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
886 radeon_emit(cs
, 0xffffff); /* CP_COHER_SIZE_HI */
887 radeon_emit(cs
, 0); /* CP_COHER_BASE */
888 radeon_emit(cs
, 0); /* CP_COHER_BASE_HI */
889 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
891 /* ACQUIRE_MEM is only required on a compute ring. */
892 radeon_emit(cs
, PKT3(PKT3_SURFACE_SYNC
, 3, 0));
893 radeon_emit(cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
894 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
895 radeon_emit(cs
, 0); /* CP_COHER_BASE */
896 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
899 /* ACQUIRE_MEM has an implicit context roll if the current context
901 if (sctx
->has_graphics
)
902 sctx
->context_roll
= true;
905 void si_emit_cache_flush(struct si_context
*sctx
)
907 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
908 uint32_t flags
= sctx
->flags
;
910 if (!sctx
->has_graphics
) {
911 /* Only process compute flags. */
912 flags
&= SI_CONTEXT_INV_ICACHE
|
913 SI_CONTEXT_INV_SMEM_L1
|
914 SI_CONTEXT_INV_VMEM_L1
|
915 SI_CONTEXT_INV_GLOBAL_L2
|
916 SI_CONTEXT_WRITEBACK_GLOBAL_L2
|
917 SI_CONTEXT_INV_L2_METADATA
|
918 SI_CONTEXT_CS_PARTIAL_FLUSH
;
921 uint32_t cp_coher_cntl
= 0;
922 uint32_t flush_cb_db
= flags
& (SI_CONTEXT_FLUSH_AND_INV_CB
|
923 SI_CONTEXT_FLUSH_AND_INV_DB
);
925 if (flags
& SI_CONTEXT_FLUSH_AND_INV_CB
)
926 sctx
->num_cb_cache_flushes
++;
927 if (flags
& SI_CONTEXT_FLUSH_AND_INV_DB
)
928 sctx
->num_db_cache_flushes
++;
930 /* GFX6 has a bug that it always flushes ICACHE and KCACHE if either
931 * bit is set. An alternative way is to write SQC_CACHES, but that
932 * doesn't seem to work reliably. Since the bug doesn't affect
933 * correctness (it only does more work than necessary) and
934 * the performance impact is likely negligible, there is no plan
935 * to add a workaround for it.
938 if (flags
& SI_CONTEXT_INV_ICACHE
)
939 cp_coher_cntl
|= S_0085F0_SH_ICACHE_ACTION_ENA(1);
940 if (flags
& SI_CONTEXT_INV_SMEM_L1
)
941 cp_coher_cntl
|= S_0085F0_SH_KCACHE_ACTION_ENA(1);
943 if (sctx
->chip_class
<= GFX8
) {
944 if (flags
& SI_CONTEXT_FLUSH_AND_INV_CB
) {
945 cp_coher_cntl
|= S_0085F0_CB_ACTION_ENA(1) |
946 S_0085F0_CB0_DEST_BASE_ENA(1) |
947 S_0085F0_CB1_DEST_BASE_ENA(1) |
948 S_0085F0_CB2_DEST_BASE_ENA(1) |
949 S_0085F0_CB3_DEST_BASE_ENA(1) |
950 S_0085F0_CB4_DEST_BASE_ENA(1) |
951 S_0085F0_CB5_DEST_BASE_ENA(1) |
952 S_0085F0_CB6_DEST_BASE_ENA(1) |
953 S_0085F0_CB7_DEST_BASE_ENA(1);
955 /* Necessary for DCC */
956 if (sctx
->chip_class
== GFX8
)
957 si_cp_release_mem(sctx
, cs
,
958 V_028A90_FLUSH_AND_INV_CB_DATA_TS
,
959 0, EOP_DST_SEL_MEM
, EOP_INT_SEL_NONE
,
960 EOP_DATA_SEL_DISCARD
, NULL
,
963 if (flags
& SI_CONTEXT_FLUSH_AND_INV_DB
)
964 cp_coher_cntl
|= S_0085F0_DB_ACTION_ENA(1) |
965 S_0085F0_DB_DEST_BASE_ENA(1);
968 if (flags
& SI_CONTEXT_FLUSH_AND_INV_CB
) {
969 /* Flush CMASK/FMASK/DCC. SURFACE_SYNC will wait for idle. */
970 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
971 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META
) | EVENT_INDEX(0));
973 if (flags
& (SI_CONTEXT_FLUSH_AND_INV_DB
|
974 SI_CONTEXT_FLUSH_AND_INV_DB_META
)) {
975 /* Flush HTILE. SURFACE_SYNC will wait for idle. */
976 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
977 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META
) | EVENT_INDEX(0));
980 /* Wait for shader engines to go idle.
981 * VS and PS waits are unnecessary if SURFACE_SYNC is going to wait
982 * for everything including CB/DB cache flushes.
985 if (flags
& SI_CONTEXT_PS_PARTIAL_FLUSH
) {
986 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
987 radeon_emit(cs
, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
988 /* Only count explicit shader flushes, not implicit ones
989 * done by SURFACE_SYNC.
991 sctx
->num_vs_flushes
++;
992 sctx
->num_ps_flushes
++;
993 } else if (flags
& SI_CONTEXT_VS_PARTIAL_FLUSH
) {
994 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
995 radeon_emit(cs
, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
996 sctx
->num_vs_flushes
++;
1000 if (flags
& SI_CONTEXT_CS_PARTIAL_FLUSH
&&
1001 sctx
->compute_is_busy
) {
1002 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1003 radeon_emit(cs
, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
1004 sctx
->num_cs_flushes
++;
1005 sctx
->compute_is_busy
= false;
1008 /* VGT state synchronization. */
1009 if (flags
& SI_CONTEXT_VGT_FLUSH
) {
1010 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1011 radeon_emit(cs
, EVENT_TYPE(V_028A90_VGT_FLUSH
) | EVENT_INDEX(0));
1013 if (flags
& SI_CONTEXT_VGT_STREAMOUT_SYNC
) {
1014 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1015 radeon_emit(cs
, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC
) | EVENT_INDEX(0));
1018 /* GFX9: Wait for idle if we're flushing CB or DB. ACQUIRE_MEM doesn't
1019 * wait for idle on GFX9. We have to use a TS event.
1021 if (sctx
->chip_class
>= GFX9
&& flush_cb_db
) {
1023 unsigned tc_flags
, cb_db_event
;
1025 /* Set the CB/DB flush event. */
1026 switch (flush_cb_db
) {
1027 case SI_CONTEXT_FLUSH_AND_INV_CB
:
1028 cb_db_event
= V_028A90_FLUSH_AND_INV_CB_DATA_TS
;
1030 case SI_CONTEXT_FLUSH_AND_INV_DB
:
1031 cb_db_event
= V_028A90_FLUSH_AND_INV_DB_DATA_TS
;
1035 cb_db_event
= V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT
;
1038 /* These are the only allowed combinations. If you need to
1039 * do multiple operations at once, do them separately.
1040 * All operations that invalidate L2 also seem to invalidate
1041 * metadata. Volatile (VOL) and WC flushes are not listed here.
1043 * TC | TC_WB = writeback & invalidate L2 & L1
1044 * TC | TC_WB | TC_NC = writeback & invalidate L2 for MTYPE == NC
1045 * TC_WB | TC_NC = writeback L2 for MTYPE == NC
1046 * TC | TC_NC = invalidate L2 for MTYPE == NC
1047 * TC | TC_MD = writeback & invalidate L2 metadata (DCC, etc.)
1048 * TCL1 = invalidate L1
1052 if (flags
& SI_CONTEXT_INV_L2_METADATA
) {
1053 tc_flags
= EVENT_TC_ACTION_ENA
|
1054 EVENT_TC_MD_ACTION_ENA
;
1057 /* Ideally flush TC together with CB/DB. */
1058 if (flags
& SI_CONTEXT_INV_GLOBAL_L2
) {
1059 /* Writeback and invalidate everything in L2 & L1. */
1060 tc_flags
= EVENT_TC_ACTION_ENA
|
1061 EVENT_TC_WB_ACTION_ENA
;
1063 /* Clear the flags. */
1064 flags
&= ~(SI_CONTEXT_INV_GLOBAL_L2
|
1065 SI_CONTEXT_WRITEBACK_GLOBAL_L2
|
1066 SI_CONTEXT_INV_VMEM_L1
);
1067 sctx
->num_L2_invalidates
++;
1070 /* Do the flush (enqueue the event and wait for it). */
1071 va
= sctx
->wait_mem_scratch
->gpu_address
;
1072 sctx
->wait_mem_number
++;
1074 si_cp_release_mem(sctx
, cs
, cb_db_event
, tc_flags
,
1076 EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM
,
1077 EOP_DATA_SEL_VALUE_32BIT
,
1078 sctx
->wait_mem_scratch
, va
,
1079 sctx
->wait_mem_number
, SI_NOT_QUERY
);
1080 si_cp_wait_mem(sctx
, cs
, va
, sctx
->wait_mem_number
, 0xffffffff,
1081 WAIT_REG_MEM_EQUAL
);
1084 /* Make sure ME is idle (it executes most packets) before continuing.
1085 * This prevents read-after-write hazards between PFP and ME.
1087 if (sctx
->has_graphics
&&
1089 (flags
& (SI_CONTEXT_CS_PARTIAL_FLUSH
|
1090 SI_CONTEXT_INV_VMEM_L1
|
1091 SI_CONTEXT_INV_GLOBAL_L2
|
1092 SI_CONTEXT_WRITEBACK_GLOBAL_L2
)))) {
1093 radeon_emit(cs
, PKT3(PKT3_PFP_SYNC_ME
, 0, 0));
1098 * When one of the CP_COHER_CNTL.DEST_BASE flags is set, SURFACE_SYNC
1099 * waits for idle, so it should be last. SURFACE_SYNC is done in PFP.
1101 * cp_coher_cntl should contain all necessary flags except TC flags
1104 * GFX6-GFX7 don't support L2 write-back.
1106 if (flags
& SI_CONTEXT_INV_GLOBAL_L2
||
1107 (sctx
->chip_class
<= GFX7
&&
1108 (flags
& SI_CONTEXT_WRITEBACK_GLOBAL_L2
))) {
1109 /* Invalidate L1 & L2. (L1 is always invalidated on GFX6)
1110 * WB must be set on GFX8+ when TC_ACTION is set.
1112 si_emit_surface_sync(sctx
, cp_coher_cntl
|
1113 S_0085F0_TC_ACTION_ENA(1) |
1114 S_0085F0_TCL1_ACTION_ENA(1) |
1115 S_0301F0_TC_WB_ACTION_ENA(sctx
->chip_class
>= GFX8
));
1117 sctx
->num_L2_invalidates
++;
1119 /* L1 invalidation and L2 writeback must be done separately,
1120 * because both operations can't be done together.
1122 if (flags
& SI_CONTEXT_WRITEBACK_GLOBAL_L2
) {
1124 * NC = apply to non-coherent MTYPEs
1125 * (i.e. MTYPE <= 1, which is what we use everywhere)
1127 * WB doesn't work without NC.
1129 si_emit_surface_sync(sctx
, cp_coher_cntl
|
1130 S_0301F0_TC_WB_ACTION_ENA(1) |
1131 S_0301F0_TC_NC_ACTION_ENA(1));
1133 sctx
->num_L2_writebacks
++;
1135 if (flags
& SI_CONTEXT_INV_VMEM_L1
) {
1136 /* Invalidate per-CU VMEM L1. */
1137 si_emit_surface_sync(sctx
, cp_coher_cntl
|
1138 S_0085F0_TCL1_ACTION_ENA(1));
1143 /* If TC flushes haven't cleared this... */
1145 si_emit_surface_sync(sctx
, cp_coher_cntl
);
1147 if (flags
& SI_CONTEXT_START_PIPELINE_STATS
) {
1148 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1149 radeon_emit(cs
, EVENT_TYPE(V_028A90_PIPELINESTAT_START
) |
1151 } else if (flags
& SI_CONTEXT_STOP_PIPELINE_STATS
) {
1152 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1153 radeon_emit(cs
, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP
) |
1160 static void si_get_draw_start_count(struct si_context
*sctx
,
1161 const struct pipe_draw_info
*info
,
1162 unsigned *start
, unsigned *count
)
1164 struct pipe_draw_indirect_info
*indirect
= info
->indirect
;
1167 unsigned indirect_count
;
1168 struct pipe_transfer
*transfer
;
1169 unsigned begin
, end
;
1173 if (indirect
->indirect_draw_count
) {
1174 data
= pipe_buffer_map_range(&sctx
->b
,
1175 indirect
->indirect_draw_count
,
1176 indirect
->indirect_draw_count_offset
,
1178 PIPE_TRANSFER_READ
, &transfer
);
1180 indirect_count
= *data
;
1182 pipe_buffer_unmap(&sctx
->b
, transfer
);
1184 indirect_count
= indirect
->draw_count
;
1187 if (!indirect_count
) {
1188 *start
= *count
= 0;
1192 map_size
= (indirect_count
- 1) * indirect
->stride
+ 3 * sizeof(unsigned);
1193 data
= pipe_buffer_map_range(&sctx
->b
, indirect
->buffer
,
1194 indirect
->offset
, map_size
,
1195 PIPE_TRANSFER_READ
, &transfer
);
1200 for (unsigned i
= 0; i
< indirect_count
; ++i
) {
1201 unsigned count
= data
[0];
1202 unsigned start
= data
[2];
1205 begin
= MIN2(begin
, start
);
1206 end
= MAX2(end
, start
+ count
);
1209 data
+= indirect
->stride
/ sizeof(unsigned);
1212 pipe_buffer_unmap(&sctx
->b
, transfer
);
1216 *count
= end
- begin
;
1218 *start
= *count
= 0;
1221 *start
= info
->start
;
1222 *count
= info
->count
;
1226 static void si_emit_all_states(struct si_context
*sctx
, const struct pipe_draw_info
*info
,
1227 enum pipe_prim_type prim
, unsigned instance_count
,
1228 bool primitive_restart
, unsigned skip_atom_mask
)
1230 unsigned num_patches
= 0;
1232 si_emit_rasterizer_prim_state(sctx
);
1233 if (sctx
->tes_shader
.cso
)
1234 si_emit_derived_tess_state(sctx
, info
, &num_patches
);
1236 /* Emit state atoms. */
1237 unsigned mask
= sctx
->dirty_atoms
& ~skip_atom_mask
;
1239 sctx
->atoms
.array
[u_bit_scan(&mask
)].emit(sctx
);
1241 sctx
->dirty_atoms
&= skip_atom_mask
;
1244 mask
= sctx
->dirty_states
;
1246 unsigned i
= u_bit_scan(&mask
);
1247 struct si_pm4_state
*state
= sctx
->queued
.array
[i
];
1249 if (!state
|| sctx
->emitted
.array
[i
] == state
)
1252 si_pm4_emit(sctx
, state
);
1253 sctx
->emitted
.array
[i
] = state
;
1255 sctx
->dirty_states
= 0;
1257 /* Emit draw states. */
1258 si_emit_vs_state(sctx
, info
);
1259 si_emit_draw_registers(sctx
, info
, prim
, num_patches
, instance_count
,
1263 static void si_draw_vbo(struct pipe_context
*ctx
, const struct pipe_draw_info
*info
)
1265 struct si_context
*sctx
= (struct si_context
*)ctx
;
1266 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
1267 struct pipe_resource
*indexbuf
= info
->index
.resource
;
1268 unsigned dirty_tex_counter
;
1269 enum pipe_prim_type rast_prim
, prim
= info
->mode
;
1270 unsigned index_size
= info
->index_size
;
1271 unsigned index_offset
= info
->indirect
? info
->start
* index_size
: 0;
1272 unsigned instance_count
= info
->instance_count
;
1273 bool primitive_restart
= info
->primitive_restart
;
1275 if (likely(!info
->indirect
)) {
1276 /* GFX6-GFX7 treat instance_count==0 as instance_count==1. There is
1277 * no workaround for indirect draws, but we can at least skip
1280 if (unlikely(!instance_count
))
1283 /* Handle count == 0. */
1284 if (unlikely(!info
->count
&&
1285 (index_size
|| !info
->count_from_stream_output
)))
1289 if (unlikely(!sctx
->vs_shader
.cso
||
1291 (!sctx
->ps_shader
.cso
&& !rs
->rasterizer_discard
) ||
1292 (!!sctx
->tes_shader
.cso
!= (prim
== PIPE_PRIM_PATCHES
)))) {
1297 /* Recompute and re-emit the texture resource states if needed. */
1298 dirty_tex_counter
= p_atomic_read(&sctx
->screen
->dirty_tex_counter
);
1299 if (unlikely(dirty_tex_counter
!= sctx
->last_dirty_tex_counter
)) {
1300 sctx
->last_dirty_tex_counter
= dirty_tex_counter
;
1301 sctx
->framebuffer
.dirty_cbufs
|=
1302 ((1 << sctx
->framebuffer
.state
.nr_cbufs
) - 1);
1303 sctx
->framebuffer
.dirty_zsbuf
= true;
1304 si_mark_atom_dirty(sctx
, &sctx
->atoms
.s
.framebuffer
);
1305 si_update_all_texture_descriptors(sctx
);
1308 si_decompress_textures(sctx
, u_bit_consecutive(0, SI_NUM_GRAPHICS_SHADERS
));
1310 /* Set the rasterization primitive type.
1312 * This must be done after si_decompress_textures, which can call
1313 * draw_vbo recursively, and before si_update_shaders, which uses
1314 * current_rast_prim for this draw_vbo call. */
1315 if (sctx
->gs_shader
.cso
)
1316 rast_prim
= sctx
->gs_shader
.cso
->gs_output_prim
;
1317 else if (sctx
->tes_shader
.cso
) {
1318 if (sctx
->tes_shader
.cso
->info
.properties
[TGSI_PROPERTY_TES_POINT_MODE
])
1319 rast_prim
= PIPE_PRIM_POINTS
;
1321 rast_prim
= sctx
->tes_shader
.cso
->info
.properties
[TGSI_PROPERTY_TES_PRIM_MODE
];
1325 if (rast_prim
!= sctx
->current_rast_prim
) {
1326 if (util_prim_is_points_or_lines(sctx
->current_rast_prim
) !=
1327 util_prim_is_points_or_lines(rast_prim
))
1328 si_mark_atom_dirty(sctx
, &sctx
->atoms
.s
.guardband
);
1330 sctx
->current_rast_prim
= rast_prim
;
1331 sctx
->do_update_shaders
= true;
1334 if (sctx
->tes_shader
.cso
&&
1335 sctx
->screen
->has_ls_vgpr_init_bug
) {
1336 /* Determine whether the LS VGPR fix should be applied.
1338 * It is only required when num input CPs > num output CPs,
1339 * which cannot happen with the fixed function TCS. We should
1340 * also update this bit when switching from TCS to fixed
1343 struct si_shader_selector
*tcs
= sctx
->tcs_shader
.cso
;
1346 info
->vertices_per_patch
>
1347 tcs
->info
.properties
[TGSI_PROPERTY_TCS_VERTICES_OUT
];
1349 if (ls_vgpr_fix
!= sctx
->ls_vgpr_fix
) {
1350 sctx
->ls_vgpr_fix
= ls_vgpr_fix
;
1351 sctx
->do_update_shaders
= true;
1355 if (sctx
->gs_shader
.cso
) {
1356 /* Determine whether the GS triangle strip adjacency fix should
1357 * be applied. Rotate every other triangle if
1358 * - triangle strips with adjacency are fed to the GS and
1359 * - primitive restart is disabled (the rotation doesn't help
1360 * when the restart occurs after an odd number of triangles).
1362 bool gs_tri_strip_adj_fix
=
1363 !sctx
->tes_shader
.cso
&&
1364 prim
== PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
&&
1365 !info
->primitive_restart
;
1367 if (gs_tri_strip_adj_fix
!= sctx
->gs_tri_strip_adj_fix
) {
1368 sctx
->gs_tri_strip_adj_fix
= gs_tri_strip_adj_fix
;
1369 sctx
->do_update_shaders
= true;
1373 if (sctx
->do_update_shaders
&& !si_update_shaders(sctx
))
1374 goto return_cleanup
;
1377 /* Translate or upload, if needed. */
1378 /* 8-bit indices are supported on GFX8. */
1379 if (sctx
->chip_class
<= GFX7
&& index_size
== 1) {
1380 unsigned start
, count
, start_offset
, size
, offset
;
1383 si_get_draw_start_count(sctx
, info
, &start
, &count
);
1384 start_offset
= start
* 2;
1388 u_upload_alloc(ctx
->stream_uploader
, start_offset
,
1390 si_optimal_tcc_alignment(sctx
, size
),
1391 &offset
, &indexbuf
, &ptr
);
1395 util_shorten_ubyte_elts_to_userptr(&sctx
->b
, info
, 0, 0,
1396 index_offset
+ start
,
1399 /* info->start will be added by the drawing code */
1400 index_offset
= offset
- start_offset
;
1402 } else if (info
->has_user_indices
) {
1403 unsigned start_offset
;
1405 assert(!info
->indirect
);
1406 start_offset
= info
->start
* index_size
;
1409 u_upload_data(ctx
->stream_uploader
, start_offset
,
1410 info
->count
* index_size
,
1411 sctx
->screen
->info
.tcc_cache_line_size
,
1412 (char*)info
->index
.user
+ start_offset
,
1413 &index_offset
, &indexbuf
);
1417 /* info->start will be added by the drawing code */
1418 index_offset
-= start_offset
;
1419 } else if (sctx
->chip_class
<= GFX7
&&
1420 si_resource(indexbuf
)->TC_L2_dirty
) {
1421 /* GFX8 reads index buffers through TC L2, so it doesn't
1423 sctx
->flags
|= SI_CONTEXT_WRITEBACK_GLOBAL_L2
;
1424 si_resource(indexbuf
)->TC_L2_dirty
= false;
1428 if (info
->indirect
) {
1429 struct pipe_draw_indirect_info
*indirect
= info
->indirect
;
1431 /* Add the buffer size for memory checking in need_cs_space. */
1432 si_context_add_resource_size(sctx
, indirect
->buffer
);
1434 /* Indirect buffers use TC L2 on GFX9, but not older hw. */
1435 if (sctx
->chip_class
<= GFX8
) {
1436 if (si_resource(indirect
->buffer
)->TC_L2_dirty
) {
1437 sctx
->flags
|= SI_CONTEXT_WRITEBACK_GLOBAL_L2
;
1438 si_resource(indirect
->buffer
)->TC_L2_dirty
= false;
1441 if (indirect
->indirect_draw_count
&&
1442 si_resource(indirect
->indirect_draw_count
)->TC_L2_dirty
) {
1443 sctx
->flags
|= SI_CONTEXT_WRITEBACK_GLOBAL_L2
;
1444 si_resource(indirect
->indirect_draw_count
)->TC_L2_dirty
= false;
1449 si_need_gfx_cs_space(sctx
);
1451 if (sctx
->bo_list_add_all_gfx_resources
)
1452 si_gfx_resources_add_all_to_bo_list(sctx
);
1454 /* Since we've called si_context_add_resource_size for vertex buffers,
1455 * this must be called after si_need_cs_space, because we must let
1456 * need_cs_space flush before we add buffers to the buffer list.
1458 if (!si_upload_vertex_buffer_descriptors(sctx
))
1459 goto return_cleanup
;
1461 /* Vega10/Raven scissor bug workaround. When any context register is
1462 * written (i.e. the GPU rolls the context), PA_SC_VPORT_SCISSOR
1463 * registers must be written too.
1465 bool has_gfx9_scissor_bug
= sctx
->screen
->has_gfx9_scissor_bug
;
1466 unsigned masked_atoms
= 0;
1468 if (has_gfx9_scissor_bug
) {
1469 masked_atoms
|= si_get_atom_bit(sctx
, &sctx
->atoms
.s
.scissors
);
1471 if (info
->count_from_stream_output
||
1472 sctx
->dirty_atoms
& si_atoms_that_always_roll_context() ||
1473 sctx
->dirty_states
& si_states_that_always_roll_context())
1474 sctx
->context_roll
= true;
1477 /* Use optimal packet order based on whether we need to sync the pipeline. */
1478 if (unlikely(sctx
->flags
& (SI_CONTEXT_FLUSH_AND_INV_CB
|
1479 SI_CONTEXT_FLUSH_AND_INV_DB
|
1480 SI_CONTEXT_PS_PARTIAL_FLUSH
|
1481 SI_CONTEXT_CS_PARTIAL_FLUSH
))) {
1482 /* If we have to wait for idle, set all states first, so that all
1483 * SET packets are processed in parallel with previous draw calls.
1484 * Then draw and prefetch at the end. This ensures that the time
1485 * the CUs are idle is very short.
1487 if (unlikely(sctx
->flags
& SI_CONTEXT_FLUSH_FOR_RENDER_COND
))
1488 masked_atoms
|= si_get_atom_bit(sctx
, &sctx
->atoms
.s
.render_cond
);
1490 if (!si_upload_graphics_shader_descriptors(sctx
))
1491 goto return_cleanup
;
1493 /* Emit all states except possibly render condition. */
1494 si_emit_all_states(sctx
, info
, prim
, instance_count
,
1495 primitive_restart
, masked_atoms
);
1496 si_emit_cache_flush(sctx
);
1497 /* <-- CUs are idle here. */
1499 if (si_is_atom_dirty(sctx
, &sctx
->atoms
.s
.render_cond
))
1500 sctx
->atoms
.s
.render_cond
.emit(sctx
);
1502 if (has_gfx9_scissor_bug
&&
1503 (sctx
->context_roll
||
1504 si_is_atom_dirty(sctx
, &sctx
->atoms
.s
.scissors
)))
1505 sctx
->atoms
.s
.scissors
.emit(sctx
);
1507 sctx
->dirty_atoms
= 0;
1509 si_emit_draw_packets(sctx
, info
, indexbuf
, index_size
, index_offset
,
1511 /* <-- CUs are busy here. */
1513 /* Start prefetches after the draw has been started. Both will run
1514 * in parallel, but starting the draw first is more important.
1516 if (sctx
->chip_class
>= GFX7
&& sctx
->prefetch_L2_mask
)
1517 cik_emit_prefetch_L2(sctx
, false);
1519 /* If we don't wait for idle, start prefetches first, then set
1520 * states, and draw at the end.
1523 si_emit_cache_flush(sctx
);
1525 /* Only prefetch the API VS and VBO descriptors. */
1526 if (sctx
->chip_class
>= GFX7
&& sctx
->prefetch_L2_mask
)
1527 cik_emit_prefetch_L2(sctx
, true);
1529 if (!si_upload_graphics_shader_descriptors(sctx
))
1532 si_emit_all_states(sctx
, info
, prim
, instance_count
,
1533 primitive_restart
, masked_atoms
);
1535 if (has_gfx9_scissor_bug
&&
1536 (sctx
->context_roll
||
1537 si_is_atom_dirty(sctx
, &sctx
->atoms
.s
.scissors
)))
1538 sctx
->atoms
.s
.scissors
.emit(sctx
);
1540 sctx
->dirty_atoms
= 0;
1542 si_emit_draw_packets(sctx
, info
, indexbuf
, index_size
, index_offset
,
1545 /* Prefetch the remaining shaders after the draw has been
1547 if (sctx
->chip_class
>= GFX7
&& sctx
->prefetch_L2_mask
)
1548 cik_emit_prefetch_L2(sctx
, false);
1551 /* Clear the context roll flag after the draw call. */
1552 sctx
->context_roll
= false;
1554 if (unlikely(sctx
->current_saved_cs
)) {
1555 si_trace_emit(sctx
);
1556 si_log_draw_state(sctx
, sctx
->log
);
1559 /* Workaround for a VGT hang when streamout is enabled.
1560 * It must be done after drawing. */
1561 if ((sctx
->family
== CHIP_HAWAII
||
1562 sctx
->family
== CHIP_TONGA
||
1563 sctx
->family
== CHIP_FIJI
) &&
1564 si_get_strmout_en(sctx
)) {
1565 sctx
->flags
|= SI_CONTEXT_VGT_STREAMOUT_SYNC
;
1568 if (unlikely(sctx
->decompression_enabled
)) {
1569 sctx
->num_decompress_calls
++;
1571 sctx
->num_draw_calls
++;
1572 if (sctx
->framebuffer
.state
.nr_cbufs
> 1)
1573 sctx
->num_mrt_draw_calls
++;
1574 if (primitive_restart
)
1575 sctx
->num_prim_restart_calls
++;
1576 if (G_0286E8_WAVESIZE(sctx
->spi_tmpring_size
))
1577 sctx
->num_spill_draw_calls
++;
1581 if (index_size
&& indexbuf
!= info
->index
.resource
)
1582 pipe_resource_reference(&indexbuf
, NULL
);
1586 si_draw_rectangle(struct blitter_context
*blitter
,
1587 void *vertex_elements_cso
,
1588 blitter_get_vs_func get_vs
,
1589 int x1
, int y1
, int x2
, int y2
,
1590 float depth
, unsigned num_instances
,
1591 enum blitter_attrib_type type
,
1592 const union blitter_attrib
*attrib
)
1594 struct pipe_context
*pipe
= util_blitter_get_pipe(blitter
);
1595 struct si_context
*sctx
= (struct si_context
*)pipe
;
1597 /* Pack position coordinates as signed int16. */
1598 sctx
->vs_blit_sh_data
[0] = (uint32_t)(x1
& 0xffff) |
1599 ((uint32_t)(y1
& 0xffff) << 16);
1600 sctx
->vs_blit_sh_data
[1] = (uint32_t)(x2
& 0xffff) |
1601 ((uint32_t)(y2
& 0xffff) << 16);
1602 sctx
->vs_blit_sh_data
[2] = fui(depth
);
1605 case UTIL_BLITTER_ATTRIB_COLOR
:
1606 memcpy(&sctx
->vs_blit_sh_data
[3], attrib
->color
,
1609 case UTIL_BLITTER_ATTRIB_TEXCOORD_XY
:
1610 case UTIL_BLITTER_ATTRIB_TEXCOORD_XYZW
:
1611 memcpy(&sctx
->vs_blit_sh_data
[3], &attrib
->texcoord
,
1612 sizeof(attrib
->texcoord
));
1614 case UTIL_BLITTER_ATTRIB_NONE
:;
1617 pipe
->bind_vs_state(pipe
, si_get_blitter_vs(sctx
, type
, num_instances
));
1619 struct pipe_draw_info info
= {};
1620 info
.mode
= SI_PRIM_RECTANGLE_LIST
;
1622 info
.instance_count
= num_instances
;
1624 /* Don't set per-stage shader pointers for VS. */
1625 sctx
->shader_pointers_dirty
&= ~SI_DESCS_SHADER_MASK(VERTEX
);
1626 sctx
->vertex_buffer_pointer_dirty
= false;
1628 si_draw_vbo(pipe
, &info
);
1631 void si_trace_emit(struct si_context
*sctx
)
1633 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
1634 uint32_t trace_id
= ++sctx
->current_saved_cs
->trace_id
;
1636 si_cp_write_data(sctx
, sctx
->current_saved_cs
->trace_buf
,
1637 0, 4, V_370_MEM
, V_370_ME
, &trace_id
);
1639 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1640 radeon_emit(cs
, AC_ENCODE_TRACE_POINT(trace_id
));
1643 u_log_flush(sctx
->log
);
1646 void si_init_draw_functions(struct si_context
*sctx
)
1648 sctx
->b
.draw_vbo
= si_draw_vbo
;
1650 sctx
->blitter
->draw_rectangle
= si_draw_rectangle
;
1652 si_init_ia_multi_vgt_param_table(sctx
);