2 * Copyright 2012 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Christian König <christian.koenig@amd.com>
28 #include "si_shader.h"
29 #include "radeon/r600_cs.h"
32 #include "util/u_index_modify.h"
33 #include "util/u_upload_mgr.h"
34 #include "util/u_prim.h"
36 static void si_decompress_textures(struct si_context
*sctx
)
38 if (!sctx
->blitter
->running
) {
39 /* Flush depth textures which need to be flushed. */
40 for (int i
= 0; i
< SI_NUM_SHADERS
; i
++) {
41 if (sctx
->samplers
[i
].depth_texture_mask
) {
42 si_flush_depth_textures(sctx
, &sctx
->samplers
[i
]);
44 if (sctx
->samplers
[i
].compressed_colortex_mask
) {
45 si_decompress_color_textures(sctx
, &sctx
->samplers
[i
]);
51 static unsigned si_conv_pipe_prim(unsigned mode
)
53 static const unsigned prim_conv
[] = {
54 [PIPE_PRIM_POINTS
] = V_008958_DI_PT_POINTLIST
,
55 [PIPE_PRIM_LINES
] = V_008958_DI_PT_LINELIST
,
56 [PIPE_PRIM_LINE_LOOP
] = V_008958_DI_PT_LINELOOP
,
57 [PIPE_PRIM_LINE_STRIP
] = V_008958_DI_PT_LINESTRIP
,
58 [PIPE_PRIM_TRIANGLES
] = V_008958_DI_PT_TRILIST
,
59 [PIPE_PRIM_TRIANGLE_STRIP
] = V_008958_DI_PT_TRISTRIP
,
60 [PIPE_PRIM_TRIANGLE_FAN
] = V_008958_DI_PT_TRIFAN
,
61 [PIPE_PRIM_QUADS
] = V_008958_DI_PT_QUADLIST
,
62 [PIPE_PRIM_QUAD_STRIP
] = V_008958_DI_PT_QUADSTRIP
,
63 [PIPE_PRIM_POLYGON
] = V_008958_DI_PT_POLYGON
,
64 [PIPE_PRIM_LINES_ADJACENCY
] = V_008958_DI_PT_LINELIST_ADJ
,
65 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = V_008958_DI_PT_LINESTRIP_ADJ
,
66 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = V_008958_DI_PT_TRILIST_ADJ
,
67 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = V_008958_DI_PT_TRISTRIP_ADJ
,
68 [PIPE_PRIM_PATCHES
] = V_008958_DI_PT_PATCH
,
69 [R600_PRIM_RECTANGLE_LIST
] = V_008958_DI_PT_RECTLIST
71 assert(mode
< Elements(prim_conv
));
72 return prim_conv
[mode
];
75 static unsigned si_conv_prim_to_gs_out(unsigned mode
)
77 static const int prim_conv
[] = {
78 [PIPE_PRIM_POINTS
] = V_028A6C_OUTPRIM_TYPE_POINTLIST
,
79 [PIPE_PRIM_LINES
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
80 [PIPE_PRIM_LINE_LOOP
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
81 [PIPE_PRIM_LINE_STRIP
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
82 [PIPE_PRIM_TRIANGLES
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
83 [PIPE_PRIM_TRIANGLE_STRIP
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
84 [PIPE_PRIM_TRIANGLE_FAN
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
85 [PIPE_PRIM_QUADS
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
86 [PIPE_PRIM_QUAD_STRIP
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
87 [PIPE_PRIM_POLYGON
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
88 [PIPE_PRIM_LINES_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
89 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
90 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
91 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
92 [PIPE_PRIM_PATCHES
] = V_028A6C_OUTPRIM_TYPE_POINTLIST
,
93 [R600_PRIM_RECTANGLE_LIST
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
95 assert(mode
< Elements(prim_conv
));
97 return prim_conv
[mode
];
101 * This calculates the LDS size for tessellation shaders (VS, TCS, TES).
102 * LS.LDS_SIZE is shared by all 3 shader stages.
104 * The information about LDS and other non-compile-time parameters is then
105 * written to userdata SGPRs.
107 static void si_emit_derived_tess_state(struct si_context
*sctx
,
108 const struct pipe_draw_info
*info
,
109 unsigned *num_patches
)
111 struct radeon_winsys_cs
*cs
= sctx
->b
.rings
.gfx
.cs
;
112 struct si_shader_selector
*ls
= sctx
->vs_shader
;
113 /* The TES pointer will only be used for sctx->last_tcs.
114 * It would be wrong to think that TCS = TES. */
115 struct si_shader_selector
*tcs
=
116 sctx
->tcs_shader
? sctx
->tcs_shader
: sctx
->tes_shader
;
117 unsigned tes_sh_base
= sctx
->shader_userdata
.sh_base
[PIPE_SHADER_TESS_EVAL
];
118 unsigned num_tcs_input_cp
= info
->vertices_per_patch
;
119 unsigned num_tcs_output_cp
, num_tcs_inputs
, num_tcs_outputs
;
120 unsigned num_tcs_patch_outputs
;
121 unsigned input_vertex_size
, output_vertex_size
, pervertex_output_patch_size
;
122 unsigned input_patch_size
, output_patch_size
, output_patch0_offset
;
123 unsigned perpatch_output_offset
, lds_size
, ls_rsrc2
;
124 unsigned tcs_in_layout
, tcs_out_layout
, tcs_out_offsets
;
126 *num_patches
= 1; /* TODO: calculate this */
128 if (sctx
->last_ls
== ls
->current
&&
129 sctx
->last_tcs
== tcs
&&
130 sctx
->last_tes_sh_base
== tes_sh_base
&&
131 sctx
->last_num_tcs_input_cp
== num_tcs_input_cp
)
134 sctx
->last_ls
= ls
->current
;
135 sctx
->last_tcs
= tcs
;
136 sctx
->last_tes_sh_base
= tes_sh_base
;
137 sctx
->last_num_tcs_input_cp
= num_tcs_input_cp
;
139 /* This calculates how shader inputs and outputs among VS, TCS, and TES
140 * are laid out in LDS. */
141 num_tcs_inputs
= util_last_bit64(ls
->outputs_written
);
143 if (sctx
->tcs_shader
) {
144 num_tcs_outputs
= util_last_bit64(tcs
->outputs_written
);
145 num_tcs_output_cp
= tcs
->info
.properties
[TGSI_PROPERTY_TCS_VERTICES_OUT
];
146 num_tcs_patch_outputs
= util_last_bit64(tcs
->patch_outputs_written
);
148 /* No TCS. Route varyings from LS to TES. */
149 num_tcs_outputs
= num_tcs_inputs
;
150 num_tcs_output_cp
= num_tcs_input_cp
;
151 num_tcs_patch_outputs
= 2; /* TESSINNER + TESSOUTER */
154 input_vertex_size
= num_tcs_inputs
* 16;
155 output_vertex_size
= num_tcs_outputs
* 16;
157 input_patch_size
= num_tcs_input_cp
* input_vertex_size
;
159 pervertex_output_patch_size
= num_tcs_output_cp
* output_vertex_size
;
160 output_patch_size
= pervertex_output_patch_size
+ num_tcs_patch_outputs
* 16;
162 output_patch0_offset
= sctx
->tcs_shader
? input_patch_size
* *num_patches
: 0;
163 perpatch_output_offset
= output_patch0_offset
+ pervertex_output_patch_size
;
165 lds_size
= output_patch0_offset
+ output_patch_size
* *num_patches
;
166 ls_rsrc2
= ls
->current
->ls_rsrc2
;
168 if (sctx
->b
.chip_class
>= CIK
) {
169 assert(lds_size
<= 65536);
170 ls_rsrc2
|= S_00B52C_LDS_SIZE(align(lds_size
, 512) / 512);
172 assert(lds_size
<= 32768);
173 ls_rsrc2
|= S_00B52C_LDS_SIZE(align(lds_size
, 256) / 256);
176 /* Due to a hw bug, RSRC2_LS must be written twice with another
177 * LS register written in between. */
178 if (sctx
->b
.chip_class
== CIK
&& sctx
->b
.family
!= CHIP_HAWAII
)
179 si_write_sh_reg(cs
, R_00B52C_SPI_SHADER_PGM_RSRC2_LS
, ls_rsrc2
);
180 si_write_sh_reg_seq(cs
, R_00B528_SPI_SHADER_PGM_RSRC1_LS
, 2);
181 radeon_emit(cs
, ls
->current
->ls_rsrc1
);
182 radeon_emit(cs
, ls_rsrc2
);
184 /* Compute userdata SGPRs. */
185 assert(((input_vertex_size
/ 4) & ~0xff) == 0);
186 assert(((output_vertex_size
/ 4) & ~0xff) == 0);
187 assert(((input_patch_size
/ 4) & ~0x1fff) == 0);
188 assert(((output_patch_size
/ 4) & ~0x1fff) == 0);
189 assert(((output_patch0_offset
/ 16) & ~0xffff) == 0);
190 assert(((perpatch_output_offset
/ 16) & ~0xffff) == 0);
191 assert(num_tcs_input_cp
<= 32);
192 assert(num_tcs_output_cp
<= 32);
194 tcs_in_layout
= (input_patch_size
/ 4) |
195 ((input_vertex_size
/ 4) << 13);
196 tcs_out_layout
= (output_patch_size
/ 4) |
197 ((output_vertex_size
/ 4) << 13);
198 tcs_out_offsets
= (output_patch0_offset
/ 16) |
199 ((perpatch_output_offset
/ 16) << 16);
201 /* Set them for LS. */
203 R_00B530_SPI_SHADER_USER_DATA_LS_0
+ SI_SGPR_LS_OUT_LAYOUT
* 4,
206 /* Set them for TCS. */
207 si_write_sh_reg_seq(cs
,
208 R_00B430_SPI_SHADER_USER_DATA_HS_0
+ SI_SGPR_TCS_OUT_OFFSETS
* 4, 3);
209 radeon_emit(cs
, tcs_out_offsets
);
210 radeon_emit(cs
, tcs_out_layout
| (num_tcs_input_cp
<< 26));
211 radeon_emit(cs
, tcs_in_layout
);
213 /* Set them for TES. */
214 si_write_sh_reg_seq(cs
, tes_sh_base
+ SI_SGPR_TCS_OUT_OFFSETS
* 4, 2);
215 radeon_emit(cs
, tcs_out_offsets
);
216 radeon_emit(cs
, tcs_out_layout
| (num_tcs_output_cp
<< 26));
219 static unsigned si_get_ia_multi_vgt_param(struct si_context
*sctx
,
220 const struct pipe_draw_info
*info
,
221 unsigned num_patches
)
223 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
224 unsigned prim
= info
->mode
;
225 unsigned primgroup_size
= 128; /* recommended without a GS */
227 /* SWITCH_ON_EOP(0) is always preferable. */
228 bool wd_switch_on_eop
= false;
229 bool ia_switch_on_eop
= false;
230 bool ia_switch_on_eoi
= false;
231 bool partial_vs_wave
= false;
232 bool partial_es_wave
= false;
235 primgroup_size
= 64; /* recommended with a GS */
237 if (sctx
->tes_shader
) {
238 unsigned num_cp_out
=
240 sctx
->tcs_shader
->info
.properties
[TGSI_PROPERTY_TCS_VERTICES_OUT
] :
241 info
->vertices_per_patch
;
242 unsigned max_size
= 256 / MAX2(info
->vertices_per_patch
, num_cp_out
);
244 primgroup_size
= MIN2(primgroup_size
, max_size
);
246 /* primgroup_size must be set to a multiple of NUM_PATCHES */
247 primgroup_size
= (primgroup_size
/ num_patches
) * num_patches
;
249 /* SWITCH_ON_EOI must be set if PrimID is used.
250 * If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
251 if ((sctx
->tcs_shader
&& sctx
->tcs_shader
->info
.uses_primid
) ||
252 sctx
->tes_shader
->info
.uses_primid
) {
253 ia_switch_on_eoi
= true;
254 partial_es_wave
= true;
257 /* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
258 if ((sctx
->b
.family
== CHIP_TAHITI
||
259 sctx
->b
.family
== CHIP_PITCAIRN
||
260 sctx
->b
.family
== CHIP_BONAIRE
) &&
262 partial_vs_wave
= true;
265 /* This is a hardware requirement. */
266 if ((rs
&& rs
->line_stipple_enable
) ||
267 (sctx
->b
.screen
->debug_flags
& DBG_SWITCH_ON_EOP
)) {
268 ia_switch_on_eop
= true;
269 wd_switch_on_eop
= true;
272 if (sctx
->b
.streamout
.streamout_enabled
||
273 sctx
->b
.streamout
.prims_gen_query_enabled
)
274 partial_vs_wave
= true;
276 if (sctx
->b
.chip_class
>= CIK
) {
277 /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
278 * 4 shader engines. Set 1 to pass the assertion below.
279 * The other cases are hardware requirements. */
280 if (sctx
->b
.screen
->info
.max_se
< 4 ||
281 prim
== PIPE_PRIM_POLYGON
||
282 prim
== PIPE_PRIM_LINE_LOOP
||
283 prim
== PIPE_PRIM_TRIANGLE_FAN
||
284 prim
== PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
||
285 info
->primitive_restart
)
286 wd_switch_on_eop
= true;
288 /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
289 * We don't know that for indirect drawing, so treat it as
290 * always problematic. */
291 if (sctx
->b
.family
== CHIP_HAWAII
&&
292 (info
->indirect
|| info
->instance_count
> 1))
293 wd_switch_on_eop
= true;
295 /* USE_OPAQUE doesn't work when WD_SWITCH_ON_EOP is 0. */
296 if (info
->count_from_stream_output
)
297 wd_switch_on_eop
= true;
299 /* If the WD switch is false, the IA switch must be false too. */
300 assert(wd_switch_on_eop
|| !ia_switch_on_eop
);
303 /* Hw bug with single-primitive instances and SWITCH_ON_EOI
304 * on multi-SE chips. */
305 if (sctx
->b
.screen
->info
.max_se
>= 2 && ia_switch_on_eoi
&&
307 (info
->instance_count
> 1 &&
308 u_prims_for_vertices(info
->mode
, info
->count
) <= 1)))
309 sctx
->b
.flags
|= SI_CONTEXT_VGT_FLUSH
;
311 /* Instancing bug on 2 SE chips. */
312 if (sctx
->b
.screen
->info
.max_se
== 2 && ia_switch_on_eoi
&&
313 (info
->indirect
|| info
->instance_count
> 1))
314 partial_vs_wave
= true;
316 return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop
) |
317 S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi
) |
318 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave
) |
319 S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave
) |
320 S_028AA8_PRIMGROUP_SIZE(primgroup_size
- 1) |
321 S_028AA8_WD_SWITCH_ON_EOP(sctx
->b
.chip_class
>= CIK
? wd_switch_on_eop
: 0) |
322 S_028AA8_MAX_PRIMGRP_IN_WAVE(sctx
->b
.chip_class
>= VI
? 2 : 0);
325 static unsigned si_get_ls_hs_config(struct si_context
*sctx
,
326 const struct pipe_draw_info
*info
,
327 unsigned num_patches
)
329 unsigned num_output_cp
;
331 if (!sctx
->tes_shader
)
334 num_output_cp
= sctx
->tcs_shader
?
335 sctx
->tcs_shader
->info
.properties
[TGSI_PROPERTY_TCS_VERTICES_OUT
] :
336 info
->vertices_per_patch
;
338 return S_028B58_NUM_PATCHES(num_patches
) |
339 S_028B58_HS_NUM_INPUT_CP(info
->vertices_per_patch
) |
340 S_028B58_HS_NUM_OUTPUT_CP(num_output_cp
);
343 static void si_emit_scratch_reloc(struct si_context
*sctx
)
345 struct radeon_winsys_cs
*cs
= sctx
->b
.rings
.gfx
.cs
;
347 if (!sctx
->emit_scratch_reloc
)
350 r600_write_context_reg(cs
, R_0286E8_SPI_TMPRING_SIZE
,
351 sctx
->spi_tmpring_size
);
353 if (sctx
->scratch_buffer
) {
354 r600_context_bo_reloc(&sctx
->b
, &sctx
->b
.rings
.gfx
,
355 sctx
->scratch_buffer
, RADEON_USAGE_READWRITE
,
356 RADEON_PRIO_SHADER_RESOURCE_RW
);
359 sctx
->emit_scratch_reloc
= false;
362 /* rast_prim is the primitive type after GS. */
363 static void si_emit_rasterizer_prim_state(struct si_context
*sctx
)
365 struct radeon_winsys_cs
*cs
= sctx
->b
.rings
.gfx
.cs
;
366 unsigned rast_prim
= sctx
->current_rast_prim
;
367 struct si_state_rasterizer
*rs
= sctx
->emitted
.named
.rasterizer
;
369 /* Skip this if not rendering lines. */
370 if (rast_prim
!= PIPE_PRIM_LINES
&&
371 rast_prim
!= PIPE_PRIM_LINE_LOOP
&&
372 rast_prim
!= PIPE_PRIM_LINE_STRIP
&&
373 rast_prim
!= PIPE_PRIM_LINES_ADJACENCY
&&
374 rast_prim
!= PIPE_PRIM_LINE_STRIP_ADJACENCY
)
377 if (rast_prim
== sctx
->last_rast_prim
&&
378 rs
->pa_sc_line_stipple
== sctx
->last_sc_line_stipple
)
381 r600_write_context_reg(cs
, R_028A0C_PA_SC_LINE_STIPPLE
,
382 rs
->pa_sc_line_stipple
|
383 S_028A0C_AUTO_RESET_CNTL(rast_prim
== PIPE_PRIM_LINES
? 1 :
384 rast_prim
== PIPE_PRIM_LINE_STRIP
? 2 : 0));
386 sctx
->last_rast_prim
= rast_prim
;
387 sctx
->last_sc_line_stipple
= rs
->pa_sc_line_stipple
;
390 static void si_emit_draw_registers(struct si_context
*sctx
,
391 const struct pipe_draw_info
*info
)
393 struct radeon_winsys_cs
*cs
= sctx
->b
.rings
.gfx
.cs
;
394 unsigned prim
= si_conv_pipe_prim(info
->mode
);
395 unsigned gs_out_prim
= si_conv_prim_to_gs_out(sctx
->current_rast_prim
);
396 unsigned ia_multi_vgt_param
, ls_hs_config
, num_patches
= 0;
398 if (sctx
->tes_shader
)
399 si_emit_derived_tess_state(sctx
, info
, &num_patches
);
401 ia_multi_vgt_param
= si_get_ia_multi_vgt_param(sctx
, info
, num_patches
);
402 ls_hs_config
= si_get_ls_hs_config(sctx
, info
, num_patches
);
405 if (prim
!= sctx
->last_prim
||
406 ia_multi_vgt_param
!= sctx
->last_multi_vgt_param
||
407 ls_hs_config
!= sctx
->last_ls_hs_config
) {
408 if (sctx
->b
.chip_class
>= CIK
) {
409 radeon_emit(cs
, PKT3(PKT3_DRAW_PREAMBLE
, 2, 0));
410 radeon_emit(cs
, prim
); /* VGT_PRIMITIVE_TYPE */
411 radeon_emit(cs
, ia_multi_vgt_param
); /* IA_MULTI_VGT_PARAM */
412 radeon_emit(cs
, ls_hs_config
); /* VGT_LS_HS_CONFIG */
414 r600_write_config_reg(cs
, R_008958_VGT_PRIMITIVE_TYPE
, prim
);
415 r600_write_context_reg(cs
, R_028AA8_IA_MULTI_VGT_PARAM
, ia_multi_vgt_param
);
416 r600_write_context_reg(cs
, R_028B58_VGT_LS_HS_CONFIG
, ls_hs_config
);
418 sctx
->last_prim
= prim
;
419 sctx
->last_multi_vgt_param
= ia_multi_vgt_param
;
420 sctx
->last_ls_hs_config
= ls_hs_config
;
423 if (gs_out_prim
!= sctx
->last_gs_out_prim
) {
424 r600_write_context_reg(cs
, R_028A6C_VGT_GS_OUT_PRIM_TYPE
, gs_out_prim
);
425 sctx
->last_gs_out_prim
= gs_out_prim
;
428 /* Primitive restart. */
429 if (info
->primitive_restart
!= sctx
->last_primitive_restart_en
) {
430 r600_write_context_reg(cs
, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN
, info
->primitive_restart
);
431 sctx
->last_primitive_restart_en
= info
->primitive_restart
;
433 if (info
->primitive_restart
&&
434 (info
->restart_index
!= sctx
->last_restart_index
||
435 sctx
->last_restart_index
== SI_RESTART_INDEX_UNKNOWN
)) {
436 r600_write_context_reg(cs
, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX
,
437 info
->restart_index
);
438 sctx
->last_restart_index
= info
->restart_index
;
443 static void si_emit_draw_packets(struct si_context
*sctx
,
444 const struct pipe_draw_info
*info
,
445 const struct pipe_index_buffer
*ib
)
447 struct radeon_winsys_cs
*cs
= sctx
->b
.rings
.gfx
.cs
;
448 unsigned sh_base_reg
= sctx
->shader_userdata
.sh_base
[PIPE_SHADER_VERTEX
];
450 if (info
->count_from_stream_output
) {
451 struct r600_so_target
*t
=
452 (struct r600_so_target
*)info
->count_from_stream_output
;
453 uint64_t va
= t
->buf_filled_size
->gpu_address
+
454 t
->buf_filled_size_offset
;
456 r600_write_context_reg(cs
, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
,
459 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
460 radeon_emit(cs
, COPY_DATA_SRC_SEL(COPY_DATA_MEM
) |
461 COPY_DATA_DST_SEL(COPY_DATA_REG
) |
462 COPY_DATA_WR_CONFIRM
);
463 radeon_emit(cs
, va
); /* src address lo */
464 radeon_emit(cs
, va
>> 32); /* src address hi */
465 radeon_emit(cs
, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
>> 2);
466 radeon_emit(cs
, 0); /* unused */
468 r600_context_bo_reloc(&sctx
->b
, &sctx
->b
.rings
.gfx
,
469 t
->buf_filled_size
, RADEON_USAGE_READ
,
475 radeon_emit(cs
, PKT3(PKT3_INDEX_TYPE
, 0, 0));
478 switch (ib
->index_size
) {
480 radeon_emit(cs
, V_028A7C_VGT_INDEX_8
);
483 radeon_emit(cs
, V_028A7C_VGT_INDEX_16
|
484 (SI_BIG_ENDIAN
&& sctx
->b
.chip_class
<= CIK
?
485 V_028A7C_VGT_DMA_SWAP_16_BIT
: 0));
488 radeon_emit(cs
, V_028A7C_VGT_INDEX_32
|
489 (SI_BIG_ENDIAN
&& sctx
->b
.chip_class
<= CIK
?
490 V_028A7C_VGT_DMA_SWAP_32_BIT
: 0));
493 assert(!"unreachable");
498 if (!info
->indirect
) {
501 radeon_emit(cs
, PKT3(PKT3_NUM_INSTANCES
, 0, 0));
502 radeon_emit(cs
, info
->instance_count
);
504 /* Base vertex and start instance. */
505 base_vertex
= info
->indexed
? info
->index_bias
: info
->start
;
507 if (base_vertex
!= sctx
->last_base_vertex
||
508 sctx
->last_base_vertex
== SI_BASE_VERTEX_UNKNOWN
||
509 info
->start_instance
!= sctx
->last_start_instance
||
510 sh_base_reg
!= sctx
->last_sh_base_reg
) {
511 si_write_sh_reg_seq(cs
, sh_base_reg
+ SI_SGPR_BASE_VERTEX
* 4, 2);
512 radeon_emit(cs
, base_vertex
);
513 radeon_emit(cs
, info
->start_instance
);
515 sctx
->last_base_vertex
= base_vertex
;
516 sctx
->last_start_instance
= info
->start_instance
;
517 sctx
->last_sh_base_reg
= sh_base_reg
;
520 si_invalidate_draw_sh_constants(sctx
);
522 r600_context_bo_reloc(&sctx
->b
, &sctx
->b
.rings
.gfx
,
523 (struct r600_resource
*)info
->indirect
,
524 RADEON_USAGE_READ
, RADEON_PRIO_MIN
);
528 uint32_t index_max_size
= (ib
->buffer
->width0
- ib
->offset
) /
530 uint64_t index_va
= r600_resource(ib
->buffer
)->gpu_address
+ ib
->offset
;
532 r600_context_bo_reloc(&sctx
->b
, &sctx
->b
.rings
.gfx
,
533 (struct r600_resource
*)ib
->buffer
,
534 RADEON_USAGE_READ
, RADEON_PRIO_MIN
);
536 if (info
->indirect
) {
537 uint64_t indirect_va
= r600_resource(info
->indirect
)->gpu_address
;
539 assert(indirect_va
% 8 == 0);
540 assert(index_va
% 2 == 0);
541 assert(info
->indirect_offset
% 4 == 0);
543 radeon_emit(cs
, PKT3(PKT3_SET_BASE
, 2, 0));
545 radeon_emit(cs
, indirect_va
);
546 radeon_emit(cs
, indirect_va
>> 32);
548 radeon_emit(cs
, PKT3(PKT3_INDEX_BASE
, 1, 0));
549 radeon_emit(cs
, index_va
);
550 radeon_emit(cs
, index_va
>> 32);
552 radeon_emit(cs
, PKT3(PKT3_INDEX_BUFFER_SIZE
, 0, 0));
553 radeon_emit(cs
, index_max_size
);
555 radeon_emit(cs
, PKT3(PKT3_DRAW_INDEX_INDIRECT
, 3, sctx
->b
.predicate_drawing
));
556 radeon_emit(cs
, info
->indirect_offset
);
557 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_BASE_VERTEX
* 4 - SI_SH_REG_OFFSET
) >> 2);
558 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_START_INSTANCE
* 4 - SI_SH_REG_OFFSET
) >> 2);
559 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_DMA
);
561 index_va
+= info
->start
* ib
->index_size
;
563 radeon_emit(cs
, PKT3(PKT3_DRAW_INDEX_2
, 4, sctx
->b
.predicate_drawing
));
564 radeon_emit(cs
, index_max_size
);
565 radeon_emit(cs
, index_va
);
566 radeon_emit(cs
, (index_va
>> 32UL) & 0xFF);
567 radeon_emit(cs
, info
->count
);
568 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_DMA
);
571 if (info
->indirect
) {
572 uint64_t indirect_va
= r600_resource(info
->indirect
)->gpu_address
;
574 assert(indirect_va
% 8 == 0);
575 assert(info
->indirect_offset
% 4 == 0);
577 radeon_emit(cs
, PKT3(PKT3_SET_BASE
, 2, 0));
579 radeon_emit(cs
, indirect_va
);
580 radeon_emit(cs
, indirect_va
>> 32);
582 radeon_emit(cs
, PKT3(PKT3_DRAW_INDIRECT
, 3, sctx
->b
.predicate_drawing
));
583 radeon_emit(cs
, info
->indirect_offset
);
584 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_BASE_VERTEX
* 4 - SI_SH_REG_OFFSET
) >> 2);
585 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_START_INSTANCE
* 4 - SI_SH_REG_OFFSET
) >> 2);
586 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_AUTO_INDEX
);
588 radeon_emit(cs
, PKT3(PKT3_DRAW_INDEX_AUTO
, 1, sctx
->b
.predicate_drawing
));
589 radeon_emit(cs
, info
->count
);
590 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_AUTO_INDEX
|
591 S_0287F0_USE_OPAQUE(!!info
->count_from_stream_output
));
596 #define BOTH_ICACHE_KCACHE (SI_CONTEXT_INV_ICACHE | SI_CONTEXT_INV_KCACHE)
598 void si_emit_cache_flush(struct si_context
*si_ctx
, struct r600_atom
*atom
)
600 struct r600_common_context
*sctx
= &si_ctx
->b
;
601 struct radeon_winsys_cs
*cs
= sctx
->rings
.gfx
.cs
;
602 uint32_t cp_coher_cntl
= 0;
604 PKT3_SHADER_TYPE_S(!!(sctx
->flags
& SI_CONTEXT_FLAG_COMPUTE
));
606 /* SI has a bug that it always flushes ICACHE and KCACHE if either
607 * bit is set. An alternative way is to write SQC_CACHES, but that
608 * doesn't seem to work reliably. Since the bug doesn't affect
609 * correctness (it only does more work than necessary) and
610 * the performance impact is likely negligible, there is no plan
614 if (sctx
->flags
& SI_CONTEXT_INV_ICACHE
)
615 cp_coher_cntl
|= S_0085F0_SH_ICACHE_ACTION_ENA(1);
616 if (sctx
->flags
& SI_CONTEXT_INV_KCACHE
)
617 cp_coher_cntl
|= S_0085F0_SH_KCACHE_ACTION_ENA(1);
619 if (sctx
->flags
& SI_CONTEXT_INV_TC_L1
)
620 cp_coher_cntl
|= S_0085F0_TCL1_ACTION_ENA(1);
621 if (sctx
->flags
& SI_CONTEXT_INV_TC_L2
) {
622 cp_coher_cntl
|= S_0085F0_TC_ACTION_ENA(1);
624 /* TODO: this might not be needed. */
625 if (sctx
->chip_class
>= VI
)
626 cp_coher_cntl
|= S_0301F0_TC_WB_ACTION_ENA(1);
629 if (sctx
->flags
& SI_CONTEXT_FLUSH_AND_INV_CB
) {
630 cp_coher_cntl
|= S_0085F0_CB_ACTION_ENA(1) |
631 S_0085F0_CB0_DEST_BASE_ENA(1) |
632 S_0085F0_CB1_DEST_BASE_ENA(1) |
633 S_0085F0_CB2_DEST_BASE_ENA(1) |
634 S_0085F0_CB3_DEST_BASE_ENA(1) |
635 S_0085F0_CB4_DEST_BASE_ENA(1) |
636 S_0085F0_CB5_DEST_BASE_ENA(1) |
637 S_0085F0_CB6_DEST_BASE_ENA(1) |
638 S_0085F0_CB7_DEST_BASE_ENA(1);
640 if (sctx
->flags
& SI_CONTEXT_FLUSH_AND_INV_DB
) {
641 cp_coher_cntl
|= S_0085F0_DB_ACTION_ENA(1) |
642 S_0085F0_DB_DEST_BASE_ENA(1);
645 if (sctx
->flags
& SI_CONTEXT_FLUSH_AND_INV_CB_META
) {
646 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0) | compute
);
647 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META
) | EVENT_INDEX(0));
649 if (sctx
->flags
& SI_CONTEXT_FLUSH_AND_INV_DB_META
) {
650 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0) | compute
);
651 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META
) | EVENT_INDEX(0));
653 if (sctx
->flags
& SI_CONTEXT_FLUSH_WITH_INV_L2
) {
654 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0) | compute
);
655 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH
) | EVENT_INDEX(7) |
659 /* FLUSH_AND_INV events must be emitted before PS_PARTIAL_FLUSH.
660 * Otherwise, clearing CMASK (CB meta) with CP DMA isn't reliable.
662 * I think the reason is that FLUSH_AND_INV is only added to a queue
663 * and it is PS_PARTIAL_FLUSH that waits for it to complete.
665 if (sctx
->flags
& SI_CONTEXT_PS_PARTIAL_FLUSH
) {
666 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0) | compute
);
667 radeon_emit(cs
, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
668 } else if (sctx
->flags
& SI_CONTEXT_VS_PARTIAL_FLUSH
) {
669 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0) | compute
);
670 radeon_emit(cs
, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
672 if (sctx
->flags
& SI_CONTEXT_CS_PARTIAL_FLUSH
) {
673 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0) | compute
);
674 radeon_emit(cs
, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH
| EVENT_INDEX(4)));
676 if (sctx
->flags
& SI_CONTEXT_VGT_FLUSH
) {
677 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0) | compute
);
678 radeon_emit(cs
, EVENT_TYPE(V_028A90_VGT_FLUSH
) | EVENT_INDEX(0));
680 if (sctx
->flags
& SI_CONTEXT_VGT_STREAMOUT_SYNC
) {
681 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0) | compute
);
682 radeon_emit(cs
, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC
) | EVENT_INDEX(0));
685 /* SURFACE_SYNC must be emitted after partial flushes.
686 * It looks like SURFACE_SYNC flushes caches immediately and doesn't
687 * wait for any engines. This should be last.
690 if (sctx
->chip_class
>= CIK
) {
691 radeon_emit(cs
, PKT3(PKT3_ACQUIRE_MEM
, 5, 0) | compute
);
692 radeon_emit(cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
693 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
694 radeon_emit(cs
, 0xff); /* CP_COHER_SIZE_HI */
695 radeon_emit(cs
, 0); /* CP_COHER_BASE */
696 radeon_emit(cs
, 0); /* CP_COHER_BASE_HI */
697 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
699 radeon_emit(cs
, PKT3(PKT3_SURFACE_SYNC
, 3, 0) | compute
);
700 radeon_emit(cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
701 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
702 radeon_emit(cs
, 0); /* CP_COHER_BASE */
703 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
710 static void si_get_draw_start_count(struct si_context
*sctx
,
711 const struct pipe_draw_info
*info
,
712 unsigned *start
, unsigned *count
)
714 if (info
->indirect
) {
715 struct r600_resource
*indirect
=
716 (struct r600_resource
*)info
->indirect
;
717 int *data
= r600_buffer_map_sync_with_rings(&sctx
->b
,
718 indirect
, PIPE_TRANSFER_READ
);
719 data
+= info
->indirect_offset
/sizeof(int);
723 *start
= info
->start
;
724 *count
= info
->count
;
728 void si_draw_vbo(struct pipe_context
*ctx
, const struct pipe_draw_info
*info
)
730 struct si_context
*sctx
= (struct si_context
*)ctx
;
731 struct pipe_index_buffer ib
= {};
734 if (!info
->count
&& !info
->indirect
&&
735 (info
->indexed
|| !info
->count_from_stream_output
))
738 if (!sctx
->ps_shader
|| !sctx
->vs_shader
) {
742 if (!!sctx
->tes_shader
!= (info
->mode
== PIPE_PRIM_PATCHES
)) {
747 si_decompress_textures(sctx
);
749 /* Set the rasterization primitive type.
751 * This must be done after si_decompress_textures, which can call
752 * draw_vbo recursively, and before si_update_shaders, which uses
753 * current_rast_prim for this draw_vbo call. */
755 sctx
->current_rast_prim
= sctx
->gs_shader
->gs_output_prim
;
756 else if (sctx
->tes_shader
)
757 sctx
->current_rast_prim
=
758 sctx
->tes_shader
->info
.properties
[TGSI_PROPERTY_TES_PRIM_MODE
];
760 sctx
->current_rast_prim
= info
->mode
;
762 si_update_shaders(sctx
);
763 if (!si_upload_shader_descriptors(sctx
))
767 /* Initialize the index buffer struct. */
768 pipe_resource_reference(&ib
.buffer
, sctx
->index_buffer
.buffer
);
769 ib
.user_buffer
= sctx
->index_buffer
.user_buffer
;
770 ib
.index_size
= sctx
->index_buffer
.index_size
;
771 ib
.offset
= sctx
->index_buffer
.offset
;
773 /* Translate or upload, if needed. */
774 /* 8-bit indices are supported on VI. */
775 if (sctx
->b
.chip_class
<= CIK
&& ib
.index_size
== 1) {
776 struct pipe_resource
*out_buffer
= NULL
;
777 unsigned out_offset
, start
, count
, start_offset
;
780 si_get_draw_start_count(sctx
, info
, &start
, &count
);
781 start_offset
= start
* ib
.index_size
;
783 u_upload_alloc(sctx
->b
.uploader
, start_offset
, count
* 2,
784 &out_offset
, &out_buffer
, &ptr
);
786 util_shorten_ubyte_elts_to_userptr(&sctx
->b
.b
, &ib
, 0,
787 ib
.offset
+ start_offset
,
790 pipe_resource_reference(&ib
.buffer
, NULL
);
791 ib
.user_buffer
= NULL
;
792 ib
.buffer
= out_buffer
;
793 /* info->start will be added by the drawing code */
794 ib
.offset
= out_offset
- start_offset
;
796 } else if (ib
.user_buffer
&& !ib
.buffer
) {
797 unsigned start
, count
, start_offset
;
799 si_get_draw_start_count(sctx
, info
, &start
, &count
);
800 start_offset
= start
* ib
.index_size
;
802 u_upload_data(sctx
->b
.uploader
, start_offset
, count
* ib
.index_size
,
803 (char*)ib
.user_buffer
+ start_offset
,
804 &ib
.offset
, &ib
.buffer
);
805 /* info->start will be added by the drawing code */
806 ib
.offset
-= start_offset
;
810 /* TODO: VI should read index buffers through TC, so this shouldn't be
812 if (info
->indexed
&& r600_resource(ib
.buffer
)->TC_L2_dirty
) {
813 sctx
->b
.flags
|= SI_CONTEXT_INV_TC_L2
;
814 r600_resource(ib
.buffer
)->TC_L2_dirty
= false;
817 /* Check flush flags. */
819 si_mark_atom_dirty(sctx
, sctx
->atoms
.s
.cache_flush
);
821 si_need_cs_space(sctx
, 0, TRUE
);
824 for (i
= 0; i
< SI_NUM_ATOMS
; i
++) {
825 if (sctx
->atoms
.array
[i
]->dirty
) {
826 sctx
->atoms
.array
[i
]->emit(&sctx
->b
, sctx
->atoms
.array
[i
]);
827 sctx
->atoms
.array
[i
]->dirty
= false;
831 si_pm4_emit_dirty(sctx
);
832 si_emit_scratch_reloc(sctx
);
833 si_emit_rasterizer_prim_state(sctx
);
834 si_emit_draw_registers(sctx
, info
);
835 si_emit_draw_packets(sctx
, info
, &ib
);
840 /* Workaround for a VGT hang when streamout is enabled.
841 * It must be done after drawing. */
842 if ((sctx
->b
.family
== CHIP_HAWAII
|| sctx
->b
.family
== CHIP_TONGA
) &&
843 (sctx
->b
.streamout
.streamout_enabled
||
844 sctx
->b
.streamout
.prims_gen_query_enabled
)) {
845 sctx
->b
.flags
|= SI_CONTEXT_VGT_STREAMOUT_SYNC
;
848 /* Set the depth buffer as dirty. */
849 if (sctx
->framebuffer
.state
.zsbuf
) {
850 struct pipe_surface
*surf
= sctx
->framebuffer
.state
.zsbuf
;
851 struct r600_texture
*rtex
= (struct r600_texture
*)surf
->texture
;
853 rtex
->dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
855 if (sctx
->framebuffer
.compressed_cb_mask
) {
856 struct pipe_surface
*surf
;
857 struct r600_texture
*rtex
;
858 unsigned mask
= sctx
->framebuffer
.compressed_cb_mask
;
861 unsigned i
= u_bit_scan(&mask
);
862 surf
= sctx
->framebuffer
.state
.cbufs
[i
];
863 rtex
= (struct r600_texture
*)surf
->texture
;
865 rtex
->dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
869 pipe_resource_reference(&ib
.buffer
, NULL
);
870 sctx
->b
.num_draw_calls
++;
873 void si_trace_emit(struct si_context
*sctx
)
875 struct radeon_winsys_cs
*cs
= sctx
->b
.rings
.gfx
.cs
;
878 r600_context_bo_reloc(&sctx
->b
, &sctx
->b
.rings
.gfx
, sctx
->trace_buf
,
879 RADEON_USAGE_READWRITE
, RADEON_PRIO_MIN
);
880 radeon_emit(cs
, PKT3(PKT3_WRITE_DATA
, 3, 0));
881 radeon_emit(cs
, S_370_DST_SEL(V_370_MEMORY_SYNC
) |
882 S_370_WR_CONFIRM(1) |
883 S_370_ENGINE_SEL(V_370_ME
));
884 radeon_emit(cs
, sctx
->trace_buf
->gpu_address
);
885 radeon_emit(cs
, sctx
->trace_buf
->gpu_address
>> 32);
886 radeon_emit(cs
, sctx
->trace_id
);
887 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
888 radeon_emit(cs
, SI_ENCODE_TRACE_POINT(sctx
->trace_id
));