2 * Copyright 2012 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Christian König <christian.koenig@amd.com>
28 #include "radeon/r600_cs.h"
31 #include "util/u_index_modify.h"
32 #include "util/u_upload_mgr.h"
33 #include "util/u_prim.h"
37 static unsigned si_conv_pipe_prim(unsigned mode
)
39 static const unsigned prim_conv
[] = {
40 [PIPE_PRIM_POINTS
] = V_008958_DI_PT_POINTLIST
,
41 [PIPE_PRIM_LINES
] = V_008958_DI_PT_LINELIST
,
42 [PIPE_PRIM_LINE_LOOP
] = V_008958_DI_PT_LINELOOP
,
43 [PIPE_PRIM_LINE_STRIP
] = V_008958_DI_PT_LINESTRIP
,
44 [PIPE_PRIM_TRIANGLES
] = V_008958_DI_PT_TRILIST
,
45 [PIPE_PRIM_TRIANGLE_STRIP
] = V_008958_DI_PT_TRISTRIP
,
46 [PIPE_PRIM_TRIANGLE_FAN
] = V_008958_DI_PT_TRIFAN
,
47 [PIPE_PRIM_QUADS
] = V_008958_DI_PT_QUADLIST
,
48 [PIPE_PRIM_QUAD_STRIP
] = V_008958_DI_PT_QUADSTRIP
,
49 [PIPE_PRIM_POLYGON
] = V_008958_DI_PT_POLYGON
,
50 [PIPE_PRIM_LINES_ADJACENCY
] = V_008958_DI_PT_LINELIST_ADJ
,
51 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = V_008958_DI_PT_LINESTRIP_ADJ
,
52 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = V_008958_DI_PT_TRILIST_ADJ
,
53 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = V_008958_DI_PT_TRISTRIP_ADJ
,
54 [PIPE_PRIM_PATCHES
] = V_008958_DI_PT_PATCH
,
55 [R600_PRIM_RECTANGLE_LIST
] = V_008958_DI_PT_RECTLIST
57 assert(mode
< ARRAY_SIZE(prim_conv
));
58 return prim_conv
[mode
];
61 static unsigned si_conv_prim_to_gs_out(unsigned mode
)
63 static const int prim_conv
[] = {
64 [PIPE_PRIM_POINTS
] = V_028A6C_OUTPRIM_TYPE_POINTLIST
,
65 [PIPE_PRIM_LINES
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
66 [PIPE_PRIM_LINE_LOOP
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
67 [PIPE_PRIM_LINE_STRIP
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
68 [PIPE_PRIM_TRIANGLES
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
69 [PIPE_PRIM_TRIANGLE_STRIP
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
70 [PIPE_PRIM_TRIANGLE_FAN
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
71 [PIPE_PRIM_QUADS
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
72 [PIPE_PRIM_QUAD_STRIP
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
73 [PIPE_PRIM_POLYGON
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
74 [PIPE_PRIM_LINES_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
75 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
76 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
77 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
78 [PIPE_PRIM_PATCHES
] = V_028A6C_OUTPRIM_TYPE_POINTLIST
,
79 [R600_PRIM_RECTANGLE_LIST
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
81 assert(mode
< ARRAY_SIZE(prim_conv
));
83 return prim_conv
[mode
];
87 * This calculates the LDS size for tessellation shaders (VS, TCS, TES).
88 * LS.LDS_SIZE is shared by all 3 shader stages.
90 * The information about LDS and other non-compile-time parameters is then
91 * written to userdata SGPRs.
93 static void si_emit_derived_tess_state(struct si_context
*sctx
,
94 const struct pipe_draw_info
*info
,
95 unsigned *num_patches
)
97 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
98 struct si_shader_ctx_state
*ls
= &sctx
->vs_shader
;
99 /* The TES pointer will only be used for sctx->last_tcs.
100 * It would be wrong to think that TCS = TES. */
101 struct si_shader_selector
*tcs
=
102 sctx
->tcs_shader
.cso
? sctx
->tcs_shader
.cso
: sctx
->tes_shader
.cso
;
103 unsigned tes_sh_base
= sctx
->shader_userdata
.sh_base
[PIPE_SHADER_TESS_EVAL
];
104 unsigned num_tcs_input_cp
= info
->vertices_per_patch
;
105 unsigned num_tcs_output_cp
, num_tcs_inputs
, num_tcs_outputs
;
106 unsigned num_tcs_patch_outputs
;
107 unsigned input_vertex_size
, output_vertex_size
, pervertex_output_patch_size
;
108 unsigned input_patch_size
, output_patch_size
, output_patch0_offset
;
109 unsigned perpatch_output_offset
, lds_size
, ls_rsrc2
;
110 unsigned tcs_in_layout
, tcs_out_layout
, tcs_out_offsets
;
111 unsigned offchip_layout
, hardware_lds_size
, ls_hs_config
;
113 if (sctx
->last_ls
== ls
->current
&&
114 sctx
->last_tcs
== tcs
&&
115 sctx
->last_tes_sh_base
== tes_sh_base
&&
116 sctx
->last_num_tcs_input_cp
== num_tcs_input_cp
) {
117 *num_patches
= sctx
->last_num_patches
;
121 sctx
->last_ls
= ls
->current
;
122 sctx
->last_tcs
= tcs
;
123 sctx
->last_tes_sh_base
= tes_sh_base
;
124 sctx
->last_num_tcs_input_cp
= num_tcs_input_cp
;
126 /* This calculates how shader inputs and outputs among VS, TCS, and TES
127 * are laid out in LDS. */
128 num_tcs_inputs
= util_last_bit64(ls
->cso
->outputs_written
);
130 if (sctx
->tcs_shader
.cso
) {
131 num_tcs_outputs
= util_last_bit64(tcs
->outputs_written
);
132 num_tcs_output_cp
= tcs
->info
.properties
[TGSI_PROPERTY_TCS_VERTICES_OUT
];
133 num_tcs_patch_outputs
= util_last_bit64(tcs
->patch_outputs_written
);
135 /* No TCS. Route varyings from LS to TES. */
136 num_tcs_outputs
= num_tcs_inputs
;
137 num_tcs_output_cp
= num_tcs_input_cp
;
138 num_tcs_patch_outputs
= 2; /* TESSINNER + TESSOUTER */
141 input_vertex_size
= num_tcs_inputs
* 16;
142 output_vertex_size
= num_tcs_outputs
* 16;
144 input_patch_size
= num_tcs_input_cp
* input_vertex_size
;
146 pervertex_output_patch_size
= num_tcs_output_cp
* output_vertex_size
;
147 output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
149 /* Ensure that we only need one wave per SIMD so we don't need to check
150 * resource usage. Also ensures that the number of tcs in and out
151 * vertices per threadgroup are at most 256.
153 *num_patches
= 64 / MAX2(num_tcs_input_cp
, num_tcs_output_cp
) * 4;
155 /* Make sure that the data fits in LDS. This assumes the shaders only
156 * use LDS for the inputs and outputs.
158 hardware_lds_size
= sctx
->b
.chip_class
>= CIK
? 65536 : 32768;
159 *num_patches
= MIN2(*num_patches
, hardware_lds_size
/ (input_patch_size
+
162 /* Make sure the output data fits in the offchip buffer */
163 *num_patches
= MIN2(*num_patches
,
164 (sctx
->screen
->tess_offchip_block_dw_size
* 4) /
167 /* Not necessary for correctness, but improves performance. The
168 * specific value is taken from the proprietary driver.
170 *num_patches
= MIN2(*num_patches
, 40);
172 /* SI bug workaround - limit LS-HS threadgroups to only one wave. */
173 if (sctx
->b
.chip_class
== SI
) {
174 unsigned one_wave
= 64 / MAX2(num_tcs_input_cp
, num_tcs_output_cp
);
175 *num_patches
= MIN2(*num_patches
, one_wave
);
178 sctx
->last_num_patches
= *num_patches
;
180 output_patch0_offset
= input_patch_size
* *num_patches
;
181 perpatch_output_offset
= output_patch0_offset
+ pervertex_output_patch_size
;
183 lds_size
= output_patch0_offset
+ output_patch_size
* *num_patches
;
184 ls_rsrc2
= ls
->current
->config
.rsrc2
;
186 if (sctx
->b
.chip_class
>= CIK
) {
187 assert(lds_size
<= 65536);
188 lds_size
= align(lds_size
, 512) / 512;
190 assert(lds_size
<= 32768);
191 lds_size
= align(lds_size
, 256) / 256;
193 si_multiwave_lds_size_workaround(sctx
->screen
, &lds_size
);
194 ls_rsrc2
|= S_00B52C_LDS_SIZE(lds_size
);
196 /* Due to a hw bug, RSRC2_LS must be written twice with another
197 * LS register written in between. */
198 if (sctx
->b
.chip_class
== CIK
&& sctx
->b
.family
!= CHIP_HAWAII
)
199 radeon_set_sh_reg(cs
, R_00B52C_SPI_SHADER_PGM_RSRC2_LS
, ls_rsrc2
);
200 radeon_set_sh_reg_seq(cs
, R_00B528_SPI_SHADER_PGM_RSRC1_LS
, 2);
201 radeon_emit(cs
, ls
->current
->config
.rsrc1
);
202 radeon_emit(cs
, ls_rsrc2
);
204 /* Compute userdata SGPRs. */
205 assert(((input_vertex_size
/ 4) & ~0xff) == 0);
206 assert(((output_vertex_size
/ 4) & ~0xff) == 0);
207 assert(((input_patch_size
/ 4) & ~0x1fff) == 0);
208 assert(((output_patch_size
/ 4) & ~0x1fff) == 0);
209 assert(((output_patch0_offset
/ 16) & ~0xffff) == 0);
210 assert(((perpatch_output_offset
/ 16) & ~0xffff) == 0);
211 assert(num_tcs_input_cp
<= 32);
212 assert(num_tcs_output_cp
<= 32);
214 tcs_in_layout
= (input_patch_size
/ 4) |
215 ((input_vertex_size
/ 4) << 13);
216 tcs_out_layout
= (output_patch_size
/ 4) |
217 ((output_vertex_size
/ 4) << 13);
218 tcs_out_offsets
= (output_patch0_offset
/ 16) |
219 ((perpatch_output_offset
/ 16) << 16);
220 offchip_layout
= (pervertex_output_patch_size
* *num_patches
<< 16) |
221 (num_tcs_output_cp
<< 9) | *num_patches
;
223 /* Set them for LS. */
224 radeon_set_sh_reg(cs
,
225 R_00B530_SPI_SHADER_USER_DATA_LS_0
+ SI_SGPR_LS_OUT_LAYOUT
* 4,
228 /* Set them for TCS. */
229 radeon_set_sh_reg_seq(cs
,
230 R_00B430_SPI_SHADER_USER_DATA_HS_0
+ SI_SGPR_TCS_OFFCHIP_LAYOUT
* 4, 4);
231 radeon_emit(cs
, offchip_layout
);
232 radeon_emit(cs
, tcs_out_offsets
);
233 radeon_emit(cs
, tcs_out_layout
| (num_tcs_input_cp
<< 26));
234 radeon_emit(cs
, tcs_in_layout
);
236 /* Set them for TES. */
237 radeon_set_sh_reg_seq(cs
, tes_sh_base
+ SI_SGPR_TCS_OFFCHIP_LAYOUT
* 4, 1);
238 radeon_emit(cs
, offchip_layout
);
240 ls_hs_config
= S_028B58_NUM_PATCHES(*num_patches
) |
241 S_028B58_HS_NUM_INPUT_CP(num_tcs_input_cp
) |
242 S_028B58_HS_NUM_OUTPUT_CP(num_tcs_output_cp
);
244 if (sctx
->b
.chip_class
>= CIK
)
245 radeon_set_context_reg_idx(cs
, R_028B58_VGT_LS_HS_CONFIG
, 2,
248 radeon_set_context_reg(cs
, R_028B58_VGT_LS_HS_CONFIG
,
252 static unsigned si_num_prims_for_vertices(const struct pipe_draw_info
*info
)
254 switch (info
->mode
) {
255 case PIPE_PRIM_PATCHES
:
256 return info
->count
/ info
->vertices_per_patch
;
257 case R600_PRIM_RECTANGLE_LIST
:
258 return info
->count
/ 3;
260 return u_prims_for_vertices(info
->mode
, info
->count
);
264 static unsigned si_get_ia_multi_vgt_param(struct si_context
*sctx
,
265 const struct pipe_draw_info
*info
,
266 unsigned num_patches
)
268 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
269 unsigned prim
= info
->mode
;
270 unsigned primgroup_size
= 128; /* recommended without a GS */
271 unsigned max_primgroup_in_wave
= 2;
273 /* SWITCH_ON_EOP(0) is always preferable. */
274 bool wd_switch_on_eop
= false;
275 bool ia_switch_on_eop
= false;
276 bool ia_switch_on_eoi
= false;
277 bool partial_vs_wave
= false;
278 bool partial_es_wave
= false;
280 if (sctx
->gs_shader
.cso
)
281 primgroup_size
= 64; /* recommended with a GS */
283 if (sctx
->tes_shader
.cso
) {
284 /* primgroup_size must be set to a multiple of NUM_PATCHES */
285 primgroup_size
= num_patches
;
287 /* SWITCH_ON_EOI must be set if PrimID is used. */
288 if ((sctx
->tcs_shader
.cso
&& sctx
->tcs_shader
.cso
->info
.uses_primid
) ||
289 sctx
->tes_shader
.cso
->info
.uses_primid
)
290 ia_switch_on_eoi
= true;
292 /* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
293 if ((sctx
->b
.family
== CHIP_TAHITI
||
294 sctx
->b
.family
== CHIP_PITCAIRN
||
295 sctx
->b
.family
== CHIP_BONAIRE
) &&
297 partial_vs_wave
= true;
299 /* Needed for 028B6C_DISTRIBUTION_MODE != 0 */
300 if (sctx
->screen
->has_distributed_tess
) {
301 if (sctx
->gs_shader
.cso
) {
302 partial_es_wave
= true;
304 /* GPU hang workaround. */
305 if (sctx
->b
.family
== CHIP_TONGA
||
306 sctx
->b
.family
== CHIP_FIJI
||
307 sctx
->b
.family
== CHIP_POLARIS10
||
308 sctx
->b
.family
== CHIP_POLARIS11
)
309 partial_vs_wave
= true;
311 partial_vs_wave
= true;
316 /* This is a hardware requirement. */
317 if ((rs
&& rs
->line_stipple_enable
) ||
318 (sctx
->b
.screen
->debug_flags
& DBG_SWITCH_ON_EOP
)) {
319 ia_switch_on_eop
= true;
320 wd_switch_on_eop
= true;
323 if (sctx
->b
.chip_class
>= CIK
) {
324 /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
325 * 4 shader engines. Set 1 to pass the assertion below.
326 * The other cases are hardware requirements.
328 * Polaris supports primitive restart with WD_SWITCH_ON_EOP=0
329 * for points, line strips, and tri strips.
331 if (sctx
->b
.screen
->info
.max_se
< 4 ||
332 prim
== PIPE_PRIM_POLYGON
||
333 prim
== PIPE_PRIM_LINE_LOOP
||
334 prim
== PIPE_PRIM_TRIANGLE_FAN
||
335 prim
== PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
||
336 (info
->primitive_restart
&&
337 (sctx
->b
.family
< CHIP_POLARIS10
||
338 (prim
!= PIPE_PRIM_POINTS
&&
339 prim
!= PIPE_PRIM_LINE_STRIP
&&
340 prim
!= PIPE_PRIM_TRIANGLE_STRIP
))) ||
341 info
->count_from_stream_output
)
342 wd_switch_on_eop
= true;
344 /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
345 * We don't know that for indirect drawing, so treat it as
346 * always problematic. */
347 if (sctx
->b
.family
== CHIP_HAWAII
&&
348 (info
->indirect
|| info
->instance_count
> 1))
349 wd_switch_on_eop
= true;
351 /* Performance recommendation for 4 SE Gfx7-8 parts if
352 * instances are smaller than a primgroup.
353 * Assume indirect draws always use small instances.
354 * This is needed for good VS wave utilization.
356 if (sctx
->b
.chip_class
<= VI
&&
357 sctx
->b
.screen
->info
.max_se
>= 4 &&
359 (info
->instance_count
> 1 &&
360 si_num_prims_for_vertices(info
) < primgroup_size
)))
361 wd_switch_on_eop
= true;
363 /* Required on CIK and later. */
364 if (sctx
->b
.screen
->info
.max_se
> 2 && !wd_switch_on_eop
)
365 ia_switch_on_eoi
= true;
367 /* Required by Hawaii and, for some special cases, by VI. */
368 if (ia_switch_on_eoi
&&
369 (sctx
->b
.family
== CHIP_HAWAII
||
370 (sctx
->b
.chip_class
== VI
&&
371 (sctx
->gs_shader
.cso
|| max_primgroup_in_wave
!= 2))))
372 partial_vs_wave
= true;
374 /* Instancing bug on Bonaire. */
375 if (sctx
->b
.family
== CHIP_BONAIRE
&& ia_switch_on_eoi
&&
376 (info
->indirect
|| info
->instance_count
> 1))
377 partial_vs_wave
= true;
379 /* GS hw bug with single-primitive instances and SWITCH_ON_EOI.
380 * The hw doc says all multi-SE chips are affected, but Vulkan
381 * only applies it to Hawaii. Do what Vulkan does.
383 if (sctx
->b
.family
== CHIP_HAWAII
&&
384 sctx
->gs_shader
.cso
&&
387 (info
->instance_count
> 1 &&
388 si_num_prims_for_vertices(info
) <= 1)))
389 sctx
->b
.flags
|= SI_CONTEXT_VGT_FLUSH
;
392 /* If the WD switch is false, the IA switch must be false too. */
393 assert(wd_switch_on_eop
|| !ia_switch_on_eop
);
396 /* If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
397 if (ia_switch_on_eoi
)
398 partial_es_wave
= true;
400 /* GS requirement. */
401 if (SI_GS_PER_ES
/ primgroup_size
>= sctx
->screen
->gs_table_depth
- 3)
402 partial_es_wave
= true;
404 return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop
) |
405 S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi
) |
406 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave
) |
407 S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave
) |
408 S_028AA8_PRIMGROUP_SIZE(primgroup_size
- 1) |
409 S_028AA8_WD_SWITCH_ON_EOP(sctx
->b
.chip_class
>= CIK
? wd_switch_on_eop
: 0) |
410 S_028AA8_MAX_PRIMGRP_IN_WAVE(sctx
->b
.chip_class
>= VI
?
411 max_primgroup_in_wave
: 0);
414 static void si_emit_scratch_reloc(struct si_context
*sctx
)
416 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
418 if (!sctx
->emit_scratch_reloc
)
421 radeon_set_context_reg(cs
, R_0286E8_SPI_TMPRING_SIZE
,
422 sctx
->spi_tmpring_size
);
424 if (sctx
->scratch_buffer
) {
425 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
426 sctx
->scratch_buffer
, RADEON_USAGE_READWRITE
,
427 RADEON_PRIO_SCRATCH_BUFFER
);
430 sctx
->emit_scratch_reloc
= false;
433 /* rast_prim is the primitive type after GS. */
434 static void si_emit_rasterizer_prim_state(struct si_context
*sctx
)
436 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
437 unsigned rast_prim
= sctx
->current_rast_prim
;
438 struct si_state_rasterizer
*rs
= sctx
->emitted
.named
.rasterizer
;
440 /* Skip this if not rendering lines. */
441 if (rast_prim
!= PIPE_PRIM_LINES
&&
442 rast_prim
!= PIPE_PRIM_LINE_LOOP
&&
443 rast_prim
!= PIPE_PRIM_LINE_STRIP
&&
444 rast_prim
!= PIPE_PRIM_LINES_ADJACENCY
&&
445 rast_prim
!= PIPE_PRIM_LINE_STRIP_ADJACENCY
)
448 if (rast_prim
== sctx
->last_rast_prim
&&
449 rs
->pa_sc_line_stipple
== sctx
->last_sc_line_stipple
)
452 /* For lines, reset the stipple pattern at each primitive. Otherwise,
453 * reset the stipple pattern at each packet (line strips, line loops).
455 radeon_set_context_reg(cs
, R_028A0C_PA_SC_LINE_STIPPLE
,
456 rs
->pa_sc_line_stipple
|
457 S_028A0C_AUTO_RESET_CNTL(rast_prim
== PIPE_PRIM_LINES
? 1 : 2));
459 sctx
->last_rast_prim
= rast_prim
;
460 sctx
->last_sc_line_stipple
= rs
->pa_sc_line_stipple
;
463 static void si_emit_draw_registers(struct si_context
*sctx
,
464 const struct pipe_draw_info
*info
)
466 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
467 unsigned prim
= si_conv_pipe_prim(info
->mode
);
468 unsigned gs_out_prim
= si_conv_prim_to_gs_out(sctx
->current_rast_prim
);
469 unsigned ia_multi_vgt_param
, num_patches
= 0;
471 /* Polaris needs different VTX_REUSE_DEPTH settings depending on
472 * whether the "fractional odd" tessellation spacing is used.
474 if (sctx
->b
.family
>= CHIP_POLARIS10
) {
475 struct si_shader_selector
*tes
= sctx
->tes_shader
.cso
;
476 unsigned vtx_reuse_depth
= 30;
479 tes
->info
.properties
[TGSI_PROPERTY_TES_SPACING
] ==
480 PIPE_TESS_SPACING_FRACTIONAL_ODD
)
481 vtx_reuse_depth
= 14;
483 if (vtx_reuse_depth
!= sctx
->last_vtx_reuse_depth
) {
484 radeon_set_context_reg(cs
, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL
,
486 sctx
->last_vtx_reuse_depth
= vtx_reuse_depth
;
490 if (sctx
->tes_shader
.cso
)
491 si_emit_derived_tess_state(sctx
, info
, &num_patches
);
493 ia_multi_vgt_param
= si_get_ia_multi_vgt_param(sctx
, info
, num_patches
);
496 if (ia_multi_vgt_param
!= sctx
->last_multi_vgt_param
) {
497 if (sctx
->b
.chip_class
>= CIK
)
498 radeon_set_context_reg_idx(cs
, R_028AA8_IA_MULTI_VGT_PARAM
, 1, ia_multi_vgt_param
);
500 radeon_set_context_reg(cs
, R_028AA8_IA_MULTI_VGT_PARAM
, ia_multi_vgt_param
);
502 sctx
->last_multi_vgt_param
= ia_multi_vgt_param
;
504 if (prim
!= sctx
->last_prim
) {
505 if (sctx
->b
.chip_class
>= CIK
)
506 radeon_set_uconfig_reg_idx(cs
, R_030908_VGT_PRIMITIVE_TYPE
, 1, prim
);
508 radeon_set_config_reg(cs
, R_008958_VGT_PRIMITIVE_TYPE
, prim
);
510 sctx
->last_prim
= prim
;
513 if (gs_out_prim
!= sctx
->last_gs_out_prim
) {
514 radeon_set_context_reg(cs
, R_028A6C_VGT_GS_OUT_PRIM_TYPE
, gs_out_prim
);
515 sctx
->last_gs_out_prim
= gs_out_prim
;
518 /* Primitive restart. */
519 if (info
->primitive_restart
!= sctx
->last_primitive_restart_en
) {
520 radeon_set_context_reg(cs
, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN
, info
->primitive_restart
);
521 sctx
->last_primitive_restart_en
= info
->primitive_restart
;
524 if (info
->primitive_restart
&&
525 (info
->restart_index
!= sctx
->last_restart_index
||
526 sctx
->last_restart_index
== SI_RESTART_INDEX_UNKNOWN
)) {
527 radeon_set_context_reg(cs
, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX
,
528 info
->restart_index
);
529 sctx
->last_restart_index
= info
->restart_index
;
533 static void si_emit_draw_packets(struct si_context
*sctx
,
534 const struct pipe_draw_info
*info
,
535 const struct pipe_index_buffer
*ib
)
537 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
538 unsigned sh_base_reg
= sctx
->shader_userdata
.sh_base
[PIPE_SHADER_VERTEX
];
539 bool render_cond_bit
= sctx
->b
.render_cond
&& !sctx
->b
.render_cond_force_off
;
540 uint32_t index_max_size
= 0;
541 uint64_t index_va
= 0;
543 if (info
->count_from_stream_output
) {
544 struct r600_so_target
*t
=
545 (struct r600_so_target
*)info
->count_from_stream_output
;
546 uint64_t va
= t
->buf_filled_size
->gpu_address
+
547 t
->buf_filled_size_offset
;
549 radeon_set_context_reg(cs
, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
,
552 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
553 radeon_emit(cs
, COPY_DATA_SRC_SEL(COPY_DATA_MEM
) |
554 COPY_DATA_DST_SEL(COPY_DATA_REG
) |
555 COPY_DATA_WR_CONFIRM
);
556 radeon_emit(cs
, va
); /* src address lo */
557 radeon_emit(cs
, va
>> 32); /* src address hi */
558 radeon_emit(cs
, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
>> 2);
559 radeon_emit(cs
, 0); /* unused */
561 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
562 t
->buf_filled_size
, RADEON_USAGE_READ
,
563 RADEON_PRIO_SO_FILLED_SIZE
);
568 if (ib
->index_size
!= sctx
->last_index_size
) {
569 radeon_emit(cs
, PKT3(PKT3_INDEX_TYPE
, 0, 0));
572 switch (ib
->index_size
) {
574 radeon_emit(cs
, V_028A7C_VGT_INDEX_8
);
577 radeon_emit(cs
, V_028A7C_VGT_INDEX_16
|
578 (SI_BIG_ENDIAN
&& sctx
->b
.chip_class
<= CIK
?
579 V_028A7C_VGT_DMA_SWAP_16_BIT
: 0));
582 radeon_emit(cs
, V_028A7C_VGT_INDEX_32
|
583 (SI_BIG_ENDIAN
&& sctx
->b
.chip_class
<= CIK
?
584 V_028A7C_VGT_DMA_SWAP_32_BIT
: 0));
587 assert(!"unreachable");
591 sctx
->last_index_size
= ib
->index_size
;
594 index_max_size
= (ib
->buffer
->width0
- ib
->offset
) /
596 index_va
= r600_resource(ib
->buffer
)->gpu_address
+ ib
->offset
;
598 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
599 (struct r600_resource
*)ib
->buffer
,
600 RADEON_USAGE_READ
, RADEON_PRIO_INDEX_BUFFER
);
602 /* On CI and later, non-indexed draws overwrite VGT_INDEX_TYPE,
603 * so the state must be re-emitted before the next indexed draw.
605 if (sctx
->b
.chip_class
>= CIK
)
606 sctx
->last_index_size
= -1;
609 if (!info
->indirect
) {
612 radeon_emit(cs
, PKT3(PKT3_NUM_INSTANCES
, 0, 0));
613 radeon_emit(cs
, info
->instance_count
);
615 /* Base vertex and start instance. */
616 base_vertex
= info
->indexed
? info
->index_bias
: info
->start
;
618 if (base_vertex
!= sctx
->last_base_vertex
||
619 sctx
->last_base_vertex
== SI_BASE_VERTEX_UNKNOWN
||
620 info
->start_instance
!= sctx
->last_start_instance
||
621 info
->drawid
!= sctx
->last_drawid
||
622 sh_base_reg
!= sctx
->last_sh_base_reg
) {
623 radeon_set_sh_reg_seq(cs
, sh_base_reg
+ SI_SGPR_BASE_VERTEX
* 4, 3);
624 radeon_emit(cs
, base_vertex
);
625 radeon_emit(cs
, info
->start_instance
);
626 radeon_emit(cs
, info
->drawid
);
628 sctx
->last_base_vertex
= base_vertex
;
629 sctx
->last_start_instance
= info
->start_instance
;
630 sctx
->last_drawid
= info
->drawid
;
631 sctx
->last_sh_base_reg
= sh_base_reg
;
634 uint64_t indirect_va
= r600_resource(info
->indirect
)->gpu_address
;
636 assert(indirect_va
% 8 == 0);
638 si_invalidate_draw_sh_constants(sctx
);
640 radeon_emit(cs
, PKT3(PKT3_SET_BASE
, 2, 0));
642 radeon_emit(cs
, indirect_va
);
643 radeon_emit(cs
, indirect_va
>> 32);
645 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
646 (struct r600_resource
*)info
->indirect
,
647 RADEON_USAGE_READ
, RADEON_PRIO_DRAW_INDIRECT
);
650 if (info
->indirect
) {
651 unsigned di_src_sel
= info
->indexed
? V_0287F0_DI_SRC_SEL_DMA
652 : V_0287F0_DI_SRC_SEL_AUTO_INDEX
;
654 assert(info
->indirect_offset
% 4 == 0);
657 radeon_emit(cs
, PKT3(PKT3_INDEX_BASE
, 1, 0));
658 radeon_emit(cs
, index_va
);
659 radeon_emit(cs
, index_va
>> 32);
661 radeon_emit(cs
, PKT3(PKT3_INDEX_BUFFER_SIZE
, 0, 0));
662 radeon_emit(cs
, index_max_size
);
665 if (!sctx
->screen
->has_draw_indirect_multi
) {
666 radeon_emit(cs
, PKT3(info
->indexed
? PKT3_DRAW_INDEX_INDIRECT
667 : PKT3_DRAW_INDIRECT
,
668 3, render_cond_bit
));
669 radeon_emit(cs
, info
->indirect_offset
);
670 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_BASE_VERTEX
* 4 - SI_SH_REG_OFFSET
) >> 2);
671 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_START_INSTANCE
* 4 - SI_SH_REG_OFFSET
) >> 2);
672 radeon_emit(cs
, di_src_sel
);
674 uint64_t count_va
= 0;
676 if (info
->indirect_params
) {
677 struct r600_resource
*params_buf
=
678 (struct r600_resource
*)info
->indirect_params
;
680 radeon_add_to_buffer_list(
681 &sctx
->b
, &sctx
->b
.gfx
, params_buf
,
682 RADEON_USAGE_READ
, RADEON_PRIO_DRAW_INDIRECT
);
684 count_va
= params_buf
->gpu_address
+ info
->indirect_params_offset
;
687 radeon_emit(cs
, PKT3(info
->indexed
? PKT3_DRAW_INDEX_INDIRECT_MULTI
:
688 PKT3_DRAW_INDIRECT_MULTI
,
689 8, render_cond_bit
));
690 radeon_emit(cs
, info
->indirect_offset
);
691 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_BASE_VERTEX
* 4 - SI_SH_REG_OFFSET
) >> 2);
692 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_START_INSTANCE
* 4 - SI_SH_REG_OFFSET
) >> 2);
693 radeon_emit(cs
, ((sh_base_reg
+ SI_SGPR_DRAWID
* 4 - SI_SH_REG_OFFSET
) >> 2) |
694 S_2C3_DRAW_INDEX_ENABLE(1) |
695 S_2C3_COUNT_INDIRECT_ENABLE(!!info
->indirect_params
));
696 radeon_emit(cs
, info
->indirect_count
);
697 radeon_emit(cs
, count_va
);
698 radeon_emit(cs
, count_va
>> 32);
699 radeon_emit(cs
, info
->indirect_stride
);
700 radeon_emit(cs
, di_src_sel
);
704 index_va
+= info
->start
* ib
->index_size
;
706 radeon_emit(cs
, PKT3(PKT3_DRAW_INDEX_2
, 4, render_cond_bit
));
707 radeon_emit(cs
, index_max_size
);
708 radeon_emit(cs
, index_va
);
709 radeon_emit(cs
, (index_va
>> 32UL) & 0xFF);
710 radeon_emit(cs
, info
->count
);
711 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_DMA
);
713 radeon_emit(cs
, PKT3(PKT3_DRAW_INDEX_AUTO
, 1, render_cond_bit
));
714 radeon_emit(cs
, info
->count
);
715 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_AUTO_INDEX
|
716 S_0287F0_USE_OPAQUE(!!info
->count_from_stream_output
));
721 static void si_emit_surface_sync(struct r600_common_context
*rctx
,
722 unsigned cp_coher_cntl
)
724 struct radeon_winsys_cs
*cs
= rctx
->gfx
.cs
;
726 /* ACQUIRE_MEM is only required on a compute ring. */
727 radeon_emit(cs
, PKT3(PKT3_SURFACE_SYNC
, 3, 0));
728 radeon_emit(cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
729 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
730 radeon_emit(cs
, 0); /* CP_COHER_BASE */
731 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
734 void si_emit_cache_flush(struct si_context
*sctx
)
736 struct r600_common_context
*rctx
= &sctx
->b
;
737 struct radeon_winsys_cs
*cs
= rctx
->gfx
.cs
;
738 uint32_t cp_coher_cntl
= 0;
740 if (rctx
->flags
& SI_CONTEXT_FLUSH_AND_INV_FRAMEBUFFER
)
741 sctx
->b
.num_fb_cache_flushes
++;
743 /* SI has a bug that it always flushes ICACHE and KCACHE if either
744 * bit is set. An alternative way is to write SQC_CACHES, but that
745 * doesn't seem to work reliably. Since the bug doesn't affect
746 * correctness (it only does more work than necessary) and
747 * the performance impact is likely negligible, there is no plan
748 * to add a workaround for it.
751 if (rctx
->flags
& SI_CONTEXT_INV_ICACHE
)
752 cp_coher_cntl
|= S_0085F0_SH_ICACHE_ACTION_ENA(1);
753 if (rctx
->flags
& SI_CONTEXT_INV_SMEM_L1
)
754 cp_coher_cntl
|= S_0085F0_SH_KCACHE_ACTION_ENA(1);
756 if (rctx
->flags
& SI_CONTEXT_FLUSH_AND_INV_CB
) {
757 cp_coher_cntl
|= S_0085F0_CB_ACTION_ENA(1) |
758 S_0085F0_CB0_DEST_BASE_ENA(1) |
759 S_0085F0_CB1_DEST_BASE_ENA(1) |
760 S_0085F0_CB2_DEST_BASE_ENA(1) |
761 S_0085F0_CB3_DEST_BASE_ENA(1) |
762 S_0085F0_CB4_DEST_BASE_ENA(1) |
763 S_0085F0_CB5_DEST_BASE_ENA(1) |
764 S_0085F0_CB6_DEST_BASE_ENA(1) |
765 S_0085F0_CB7_DEST_BASE_ENA(1);
767 /* Necessary for DCC */
768 if (rctx
->chip_class
== VI
)
769 r600_gfx_write_event_eop(rctx
, V_028A90_FLUSH_AND_INV_CB_DATA_TS
,
770 0, 0, NULL
, 0, 0, 0);
772 if (rctx
->flags
& SI_CONTEXT_FLUSH_AND_INV_DB
) {
773 cp_coher_cntl
|= S_0085F0_DB_ACTION_ENA(1) |
774 S_0085F0_DB_DEST_BASE_ENA(1);
777 if (rctx
->flags
& SI_CONTEXT_FLUSH_AND_INV_CB_META
) {
778 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
779 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META
) | EVENT_INDEX(0));
780 /* needed for wait for idle in SURFACE_SYNC */
781 assert(rctx
->flags
& SI_CONTEXT_FLUSH_AND_INV_CB
);
783 if (rctx
->flags
& SI_CONTEXT_FLUSH_AND_INV_DB_META
) {
784 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
785 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META
) | EVENT_INDEX(0));
786 /* needed for wait for idle in SURFACE_SYNC */
787 assert(rctx
->flags
& SI_CONTEXT_FLUSH_AND_INV_DB
);
790 /* Wait for shader engines to go idle.
791 * VS and PS waits are unnecessary if SURFACE_SYNC is going to wait
792 * for everything including CB/DB cache flushes.
794 if (!(rctx
->flags
& (SI_CONTEXT_FLUSH_AND_INV_CB
|
795 SI_CONTEXT_FLUSH_AND_INV_DB
))) {
796 if (rctx
->flags
& SI_CONTEXT_PS_PARTIAL_FLUSH
) {
797 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
798 radeon_emit(cs
, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
799 /* Only count explicit shader flushes, not implicit ones
800 * done by SURFACE_SYNC.
802 rctx
->num_vs_flushes
++;
803 rctx
->num_ps_flushes
++;
804 } else if (rctx
->flags
& SI_CONTEXT_VS_PARTIAL_FLUSH
) {
805 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
806 radeon_emit(cs
, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
807 rctx
->num_vs_flushes
++;
811 if (rctx
->flags
& SI_CONTEXT_CS_PARTIAL_FLUSH
&&
812 sctx
->compute_is_busy
) {
813 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
814 radeon_emit(cs
, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH
| EVENT_INDEX(4)));
815 rctx
->num_cs_flushes
++;
816 sctx
->compute_is_busy
= false;
819 /* VGT state synchronization. */
820 if (rctx
->flags
& SI_CONTEXT_VGT_FLUSH
) {
821 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
822 radeon_emit(cs
, EVENT_TYPE(V_028A90_VGT_FLUSH
) | EVENT_INDEX(0));
824 if (rctx
->flags
& SI_CONTEXT_VGT_STREAMOUT_SYNC
) {
825 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
826 radeon_emit(cs
, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC
) | EVENT_INDEX(0));
829 /* Make sure ME is idle (it executes most packets) before continuing.
830 * This prevents read-after-write hazards between PFP and ME.
833 (rctx
->flags
& (SI_CONTEXT_CS_PARTIAL_FLUSH
|
834 SI_CONTEXT_INV_VMEM_L1
|
835 SI_CONTEXT_INV_GLOBAL_L2
|
836 SI_CONTEXT_WRITEBACK_GLOBAL_L2
))) {
837 radeon_emit(cs
, PKT3(PKT3_PFP_SYNC_ME
, 0, 0));
841 /* When one of the CP_COHER_CNTL.DEST_BASE flags is set, SURFACE_SYNC
842 * waits for idle. Therefore, it should be last. SURFACE_SYNC is done
845 * cp_coher_cntl should contain all necessary flags except TC flags
848 * SI-CIK don't support L2 write-back.
850 if (rctx
->flags
& SI_CONTEXT_INV_GLOBAL_L2
||
851 (rctx
->chip_class
<= CIK
&&
852 (rctx
->flags
& SI_CONTEXT_WRITEBACK_GLOBAL_L2
))) {
853 /* Invalidate L1 & L2. (L1 is always invalidated)
854 * WB must be set on VI+ when TC_ACTION is set.
856 si_emit_surface_sync(rctx
, cp_coher_cntl
|
857 S_0085F0_TC_ACTION_ENA(1) |
858 S_0301F0_TC_WB_ACTION_ENA(rctx
->chip_class
>= VI
));
860 sctx
->b
.num_L2_invalidates
++;
862 /* L1 invalidation and L2 writeback must be done separately,
863 * because both operations can't be done together.
865 if (rctx
->flags
& SI_CONTEXT_WRITEBACK_GLOBAL_L2
) {
867 * NC = apply to non-coherent MTYPEs
868 * (i.e. MTYPE <= 1, which is what we use everywhere)
870 * WB doesn't work without NC.
872 si_emit_surface_sync(rctx
, cp_coher_cntl
|
873 S_0301F0_TC_WB_ACTION_ENA(1) |
874 S_0301F0_TC_NC_ACTION_ENA(1));
876 sctx
->b
.num_L2_writebacks
++;
878 if (rctx
->flags
& SI_CONTEXT_INV_VMEM_L1
) {
879 /* Invalidate per-CU VMEM L1. */
880 si_emit_surface_sync(rctx
, cp_coher_cntl
|
881 S_0085F0_TCL1_ACTION_ENA(1));
886 /* If TC flushes haven't cleared this... */
888 si_emit_surface_sync(rctx
, cp_coher_cntl
);
890 if (rctx
->flags
& R600_CONTEXT_START_PIPELINE_STATS
) {
891 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
892 radeon_emit(cs
, EVENT_TYPE(V_028A90_PIPELINESTAT_START
) |
894 } else if (rctx
->flags
& R600_CONTEXT_STOP_PIPELINE_STATS
) {
895 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
896 radeon_emit(cs
, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP
) |
903 static void si_get_draw_start_count(struct si_context
*sctx
,
904 const struct pipe_draw_info
*info
,
905 unsigned *start
, unsigned *count
)
907 if (info
->indirect
) {
908 struct r600_resource
*indirect
=
909 (struct r600_resource
*)info
->indirect
;
910 int *data
= r600_buffer_map_sync_with_rings(&sctx
->b
,
911 indirect
, PIPE_TRANSFER_READ
);
912 data
+= info
->indirect_offset
/sizeof(int);
916 *start
= info
->start
;
917 *count
= info
->count
;
921 void si_ce_pre_draw_synchronization(struct si_context
*sctx
)
923 if (sctx
->ce_need_synchronization
) {
924 radeon_emit(sctx
->ce_ib
, PKT3(PKT3_INCREMENT_CE_COUNTER
, 0, 0));
925 radeon_emit(sctx
->ce_ib
, 1);
927 radeon_emit(sctx
->b
.gfx
.cs
, PKT3(PKT3_WAIT_ON_CE_COUNTER
, 0, 0));
928 radeon_emit(sctx
->b
.gfx
.cs
, 1);
932 void si_ce_post_draw_synchronization(struct si_context
*sctx
)
934 if (sctx
->ce_need_synchronization
) {
935 radeon_emit(sctx
->b
.gfx
.cs
, PKT3(PKT3_INCREMENT_DE_COUNTER
, 0, 0));
936 radeon_emit(sctx
->b
.gfx
.cs
, 0);
938 sctx
->ce_need_synchronization
= false;
942 static void cik_prefetch_shader_async(struct si_context
*sctx
,
943 struct si_pm4_state
*state
)
946 struct pipe_resource
*bo
= &state
->bo
[0]->b
.b
;
947 assert(state
->nbo
== 1);
949 cik_prefetch_TC_L2_async(sctx
, bo
, 0, bo
->width0
);
953 void si_draw_vbo(struct pipe_context
*ctx
, const struct pipe_draw_info
*info
)
955 struct si_context
*sctx
= (struct si_context
*)ctx
;
956 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
957 struct pipe_index_buffer ib
= {};
958 unsigned mask
, dirty_fb_counter
, dirty_tex_counter
, rast_prim
;
960 if (likely(!info
->indirect
)) {
961 /* SI-CI treat instance_count==0 as instance_count==1. There is
962 * no workaround for indirect draws, but we can at least skip
965 if (unlikely(!info
->instance_count
))
968 /* Handle count == 0. */
969 if (unlikely(!info
->count
&&
970 (info
->indexed
|| !info
->count_from_stream_output
)))
974 if (unlikely(!sctx
->vs_shader
.cso
)) {
978 if (unlikely(!sctx
->ps_shader
.cso
&& (!rs
|| !rs
->rasterizer_discard
))) {
982 if (unlikely(!!sctx
->tes_shader
.cso
!= (info
->mode
== PIPE_PRIM_PATCHES
))) {
987 /* Re-emit the framebuffer state if needed. */
988 dirty_fb_counter
= p_atomic_read(&sctx
->b
.screen
->dirty_fb_counter
);
989 if (unlikely(dirty_fb_counter
!= sctx
->b
.last_dirty_fb_counter
)) {
990 sctx
->b
.last_dirty_fb_counter
= dirty_fb_counter
;
991 sctx
->framebuffer
.dirty_cbufs
|=
992 ((1 << sctx
->framebuffer
.state
.nr_cbufs
) - 1);
993 sctx
->framebuffer
.dirty_zsbuf
= true;
994 si_mark_atom_dirty(sctx
, &sctx
->framebuffer
.atom
);
997 /* Invalidate & recompute texture descriptors if needed. */
998 dirty_tex_counter
= p_atomic_read(&sctx
->b
.screen
->dirty_tex_descriptor_counter
);
999 if (unlikely(dirty_tex_counter
!= sctx
->b
.last_dirty_tex_descriptor_counter
)) {
1000 sctx
->b
.last_dirty_tex_descriptor_counter
= dirty_tex_counter
;
1001 si_update_all_texture_descriptors(sctx
);
1004 si_decompress_graphics_textures(sctx
);
1006 /* Set the rasterization primitive type.
1008 * This must be done after si_decompress_textures, which can call
1009 * draw_vbo recursively, and before si_update_shaders, which uses
1010 * current_rast_prim for this draw_vbo call. */
1011 if (sctx
->gs_shader
.cso
)
1012 rast_prim
= sctx
->gs_shader
.cso
->gs_output_prim
;
1013 else if (sctx
->tes_shader
.cso
)
1014 rast_prim
= sctx
->tes_shader
.cso
->info
.properties
[TGSI_PROPERTY_TES_PRIM_MODE
];
1016 rast_prim
= info
->mode
;
1018 if (rast_prim
!= sctx
->current_rast_prim
) {
1019 sctx
->current_rast_prim
= rast_prim
;
1020 sctx
->do_update_shaders
= true;
1023 if (sctx
->gs_shader
.cso
) {
1024 /* Determine whether the GS triangle strip adjacency fix should
1025 * be applied. Rotate every other triangle if
1026 * - triangle strips with adjacency are fed to the GS and
1027 * - primitive restart is disabled (the rotation doesn't help
1028 * when the restart occurs after an odd number of triangles).
1030 bool gs_tri_strip_adj_fix
=
1031 !sctx
->tes_shader
.cso
&&
1032 info
->mode
== PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
&&
1033 !info
->primitive_restart
;
1035 if (gs_tri_strip_adj_fix
!= sctx
->gs_tri_strip_adj_fix
) {
1036 sctx
->gs_tri_strip_adj_fix
= gs_tri_strip_adj_fix
;
1037 sctx
->do_update_shaders
= true;
1041 if (sctx
->do_update_shaders
&& !si_update_shaders(sctx
))
1044 if (!si_upload_graphics_shader_descriptors(sctx
))
1047 if (info
->indexed
) {
1048 /* Initialize the index buffer struct. */
1049 pipe_resource_reference(&ib
.buffer
, sctx
->index_buffer
.buffer
);
1050 ib
.user_buffer
= sctx
->index_buffer
.user_buffer
;
1051 ib
.index_size
= sctx
->index_buffer
.index_size
;
1052 ib
.offset
= sctx
->index_buffer
.offset
;
1054 /* Translate or upload, if needed. */
1055 /* 8-bit indices are supported on VI. */
1056 if (sctx
->b
.chip_class
<= CIK
&& ib
.index_size
== 1) {
1057 struct pipe_resource
*out_buffer
= NULL
;
1058 unsigned out_offset
, start
, count
, start_offset
;
1061 si_get_draw_start_count(sctx
, info
, &start
, &count
);
1062 start_offset
= start
* ib
.index_size
;
1064 u_upload_alloc(sctx
->b
.uploader
, start_offset
, count
* 2, 256,
1065 &out_offset
, &out_buffer
, &ptr
);
1067 pipe_resource_reference(&ib
.buffer
, NULL
);
1071 util_shorten_ubyte_elts_to_userptr(&sctx
->b
.b
, &ib
, 0,
1072 ib
.offset
+ start_offset
,
1075 pipe_resource_reference(&ib
.buffer
, NULL
);
1076 ib
.user_buffer
= NULL
;
1077 ib
.buffer
= out_buffer
;
1078 /* info->start will be added by the drawing code */
1079 ib
.offset
= out_offset
- start_offset
;
1081 } else if (ib
.user_buffer
&& !ib
.buffer
) {
1082 unsigned start
, count
, start_offset
;
1084 si_get_draw_start_count(sctx
, info
, &start
, &count
);
1085 start_offset
= start
* ib
.index_size
;
1087 u_upload_data(sctx
->b
.uploader
, start_offset
, count
* ib
.index_size
,
1088 256, (char*)ib
.user_buffer
+ start_offset
,
1089 &ib
.offset
, &ib
.buffer
);
1092 /* info->start will be added by the drawing code */
1093 ib
.offset
-= start_offset
;
1097 /* VI reads index buffers through TC L2. */
1098 if (info
->indexed
&& sctx
->b
.chip_class
<= CIK
&&
1099 r600_resource(ib
.buffer
)->TC_L2_dirty
) {
1100 sctx
->b
.flags
|= SI_CONTEXT_WRITEBACK_GLOBAL_L2
;
1101 r600_resource(ib
.buffer
)->TC_L2_dirty
= false;
1104 if (info
->indirect
&& r600_resource(info
->indirect
)->TC_L2_dirty
) {
1105 sctx
->b
.flags
|= SI_CONTEXT_WRITEBACK_GLOBAL_L2
;
1106 r600_resource(info
->indirect
)->TC_L2_dirty
= false;
1109 if (info
->indirect_params
&&
1110 r600_resource(info
->indirect_params
)->TC_L2_dirty
) {
1111 sctx
->b
.flags
|= SI_CONTEXT_WRITEBACK_GLOBAL_L2
;
1112 r600_resource(info
->indirect_params
)->TC_L2_dirty
= false;
1115 /* Add buffer sizes for memory checking in need_cs_space. */
1116 if (sctx
->emit_scratch_reloc
&& sctx
->scratch_buffer
)
1117 r600_context_add_resource_size(ctx
, &sctx
->scratch_buffer
->b
.b
);
1119 r600_context_add_resource_size(ctx
, info
->indirect
);
1121 si_need_cs_space(sctx
);
1123 /* Since we've called r600_context_add_resource_size for vertex buffers,
1124 * this must be called after si_need_cs_space, because we must let
1125 * need_cs_space flush before we add buffers to the buffer list.
1127 if (!si_upload_vertex_buffer_descriptors(sctx
))
1130 /* Flushed caches prior to prefetching shaders. */
1132 si_emit_cache_flush(sctx
);
1134 /* Prefetch shaders and VBO descriptors to TC L2. */
1135 if (sctx
->b
.chip_class
>= CIK
) {
1136 if (si_pm4_state_changed(sctx
, ls
))
1137 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.ls
);
1138 if (si_pm4_state_changed(sctx
, hs
))
1139 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.hs
);
1140 if (si_pm4_state_changed(sctx
, es
))
1141 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.es
);
1142 if (si_pm4_state_changed(sctx
, gs
))
1143 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.gs
);
1144 if (si_pm4_state_changed(sctx
, vs
))
1145 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.vs
);
1147 /* Vertex buffer descriptors are uploaded uncached, so prefetch
1148 * them right after the VS binary. */
1149 if (sctx
->vertex_buffers
.pointer_dirty
) {
1150 cik_prefetch_TC_L2_async(sctx
, &sctx
->vertex_buffers
.buffer
->b
.b
,
1151 sctx
->vertex_buffers
.buffer_offset
,
1152 sctx
->vertex_elements
->count
* 16);
1154 if (si_pm4_state_changed(sctx
, ps
))
1155 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.ps
);
1159 mask
= sctx
->dirty_atoms
;
1161 struct r600_atom
*atom
= sctx
->atoms
.array
[u_bit_scan(&mask
)];
1163 atom
->emit(&sctx
->b
, atom
);
1165 sctx
->dirty_atoms
= 0;
1167 si_pm4_emit_dirty(sctx
);
1168 si_emit_scratch_reloc(sctx
);
1169 si_emit_rasterizer_prim_state(sctx
);
1170 si_emit_draw_registers(sctx
, info
);
1172 si_ce_pre_draw_synchronization(sctx
);
1174 si_emit_draw_packets(sctx
, info
, &ib
);
1176 si_ce_post_draw_synchronization(sctx
);
1178 if (sctx
->trace_buf
)
1179 si_trace_emit(sctx
);
1181 /* Workaround for a VGT hang when streamout is enabled.
1182 * It must be done after drawing. */
1183 if ((sctx
->b
.family
== CHIP_HAWAII
||
1184 sctx
->b
.family
== CHIP_TONGA
||
1185 sctx
->b
.family
== CHIP_FIJI
) &&
1186 r600_get_strmout_en(&sctx
->b
)) {
1187 sctx
->b
.flags
|= SI_CONTEXT_VGT_STREAMOUT_SYNC
;
1190 /* Set the depth buffer as dirty. */
1191 if (sctx
->framebuffer
.state
.zsbuf
) {
1192 struct pipe_surface
*surf
= sctx
->framebuffer
.state
.zsbuf
;
1193 struct r600_texture
*rtex
= (struct r600_texture
*)surf
->texture
;
1195 if (!rtex
->tc_compatible_htile
)
1196 rtex
->dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
1198 if (rtex
->surface
.flags
& RADEON_SURF_SBUFFER
)
1199 rtex
->stencil_dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
1201 if (sctx
->framebuffer
.compressed_cb_mask
) {
1202 struct pipe_surface
*surf
;
1203 struct r600_texture
*rtex
;
1204 unsigned mask
= sctx
->framebuffer
.compressed_cb_mask
;
1207 unsigned i
= u_bit_scan(&mask
);
1208 surf
= sctx
->framebuffer
.state
.cbufs
[i
];
1209 rtex
= (struct r600_texture
*)surf
->texture
;
1211 if (rtex
->fmask
.size
)
1212 rtex
->dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
1213 if (rtex
->dcc_gather_statistics
)
1214 rtex
->separate_dcc_dirty
= true;
1218 pipe_resource_reference(&ib
.buffer
, NULL
);
1219 sctx
->b
.num_draw_calls
++;
1220 if (G_0286E8_WAVESIZE(sctx
->spi_tmpring_size
))
1221 sctx
->b
.num_spill_draw_calls
++;
1224 void si_trace_emit(struct si_context
*sctx
)
1226 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
1229 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
, sctx
->trace_buf
,
1230 RADEON_USAGE_READWRITE
, RADEON_PRIO_TRACE
);
1231 radeon_emit(cs
, PKT3(PKT3_WRITE_DATA
, 3, 0));
1232 radeon_emit(cs
, S_370_DST_SEL(V_370_MEMORY_SYNC
) |
1233 S_370_WR_CONFIRM(1) |
1234 S_370_ENGINE_SEL(V_370_ME
));
1235 radeon_emit(cs
, sctx
->trace_buf
->gpu_address
);
1236 radeon_emit(cs
, sctx
->trace_buf
->gpu_address
>> 32);
1237 radeon_emit(cs
, sctx
->trace_id
);
1238 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1239 radeon_emit(cs
, AC_ENCODE_TRACE_POINT(sctx
->trace_id
));