2 * Copyright 2012 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Christian König <christian.koenig@amd.com>
28 #include "si_shader.h"
29 #include "radeon/r600_cs.h"
32 #include "util/u_index_modify.h"
33 #include "util/u_upload_mgr.h"
35 static void si_decompress_textures(struct si_context
*sctx
)
37 if (!sctx
->blitter
->running
) {
38 /* Flush depth textures which need to be flushed. */
39 for (int i
= 0; i
< SI_NUM_SHADERS
; i
++) {
40 if (sctx
->samplers
[i
].depth_texture_mask
) {
41 si_flush_depth_textures(sctx
, &sctx
->samplers
[i
]);
43 if (sctx
->samplers
[i
].compressed_colortex_mask
) {
44 si_decompress_color_textures(sctx
, &sctx
->samplers
[i
]);
50 static unsigned si_conv_pipe_prim(unsigned mode
)
52 static const unsigned prim_conv
[] = {
53 [PIPE_PRIM_POINTS
] = V_008958_DI_PT_POINTLIST
,
54 [PIPE_PRIM_LINES
] = V_008958_DI_PT_LINELIST
,
55 [PIPE_PRIM_LINE_LOOP
] = V_008958_DI_PT_LINELOOP
,
56 [PIPE_PRIM_LINE_STRIP
] = V_008958_DI_PT_LINESTRIP
,
57 [PIPE_PRIM_TRIANGLES
] = V_008958_DI_PT_TRILIST
,
58 [PIPE_PRIM_TRIANGLE_STRIP
] = V_008958_DI_PT_TRISTRIP
,
59 [PIPE_PRIM_TRIANGLE_FAN
] = V_008958_DI_PT_TRIFAN
,
60 [PIPE_PRIM_QUADS
] = V_008958_DI_PT_QUADLIST
,
61 [PIPE_PRIM_QUAD_STRIP
] = V_008958_DI_PT_QUADSTRIP
,
62 [PIPE_PRIM_POLYGON
] = V_008958_DI_PT_POLYGON
,
63 [PIPE_PRIM_LINES_ADJACENCY
] = V_008958_DI_PT_LINELIST_ADJ
,
64 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = V_008958_DI_PT_LINESTRIP_ADJ
,
65 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = V_008958_DI_PT_TRILIST_ADJ
,
66 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = V_008958_DI_PT_TRISTRIP_ADJ
,
67 [R600_PRIM_RECTANGLE_LIST
] = V_008958_DI_PT_RECTLIST
69 assert(mode
< Elements(prim_conv
));
70 return prim_conv
[mode
];
73 static unsigned si_conv_prim_to_gs_out(unsigned mode
)
75 static const int prim_conv
[] = {
76 [PIPE_PRIM_POINTS
] = V_028A6C_OUTPRIM_TYPE_POINTLIST
,
77 [PIPE_PRIM_LINES
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
78 [PIPE_PRIM_LINE_LOOP
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
79 [PIPE_PRIM_LINE_STRIP
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
80 [PIPE_PRIM_TRIANGLES
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
81 [PIPE_PRIM_TRIANGLE_STRIP
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
82 [PIPE_PRIM_TRIANGLE_FAN
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
83 [PIPE_PRIM_QUADS
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
84 [PIPE_PRIM_QUAD_STRIP
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
85 [PIPE_PRIM_POLYGON
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
86 [PIPE_PRIM_LINES_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
87 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
88 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
89 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
90 [R600_PRIM_RECTANGLE_LIST
] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
92 assert(mode
< Elements(prim_conv
));
94 return prim_conv
[mode
];
97 static unsigned si_get_ia_multi_vgt_param(struct si_context
*sctx
,
98 const struct pipe_draw_info
*info
)
100 struct si_state_rasterizer
*rs
= sctx
->queued
.named
.rasterizer
;
101 unsigned prim
= info
->mode
;
102 unsigned primgroup_size
= 128; /* recommended without a GS */
104 /* SWITCH_ON_EOP(0) is always preferable. */
105 bool wd_switch_on_eop
= false;
106 bool ia_switch_on_eop
= false;
107 bool partial_vs_wave
= false;
110 primgroup_size
= 64; /* recommended with a GS */
112 /* This is a hardware requirement. */
113 if ((rs
&& rs
->line_stipple_enable
) ||
114 (sctx
->b
.screen
->debug_flags
& DBG_SWITCH_ON_EOP
)) {
115 ia_switch_on_eop
= true;
116 wd_switch_on_eop
= true;
119 if (sctx
->b
.streamout
.streamout_enabled
||
120 sctx
->b
.streamout
.prims_gen_query_enabled
)
121 partial_vs_wave
= true;
123 if (sctx
->b
.chip_class
>= CIK
) {
124 /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
125 * 4 shader engines. Set 1 to pass the assertion below.
126 * The other cases are hardware requirements. */
127 if (sctx
->b
.screen
->info
.max_se
< 4 ||
128 prim
== PIPE_PRIM_POLYGON
||
129 prim
== PIPE_PRIM_LINE_LOOP
||
130 prim
== PIPE_PRIM_TRIANGLE_FAN
||
131 prim
== PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
||
132 info
->primitive_restart
)
133 wd_switch_on_eop
= true;
135 /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
136 * We don't know that for indirect drawing, so treat it as
137 * always problematic. */
138 if (sctx
->b
.family
== CHIP_HAWAII
&&
139 (info
->indirect
|| info
->instance_count
> 1))
140 wd_switch_on_eop
= true;
142 /* If the WD switch is false, the IA switch must be false too. */
143 assert(wd_switch_on_eop
|| !ia_switch_on_eop
);
146 return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop
) |
147 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave
) |
148 S_028AA8_PRIMGROUP_SIZE(primgroup_size
- 1) |
149 S_028AA8_WD_SWITCH_ON_EOP(sctx
->b
.chip_class
>= CIK
? wd_switch_on_eop
: 0);
152 static void si_emit_rasterizer_prim_state(struct si_context
*sctx
, unsigned mode
)
154 struct radeon_winsys_cs
*cs
= sctx
->b
.rings
.gfx
.cs
;
157 mode
= sctx
->gs_shader
->gs_output_prim
;
159 if (mode
== sctx
->last_rast_prim
)
162 r600_write_context_reg(cs
, R_028A0C_PA_SC_LINE_STIPPLE
,
163 sctx
->pa_sc_line_stipple
|
164 S_028A0C_AUTO_RESET_CNTL(mode
== PIPE_PRIM_LINES
? 1 :
165 mode
== PIPE_PRIM_LINE_STRIP
? 2 : 0));
167 r600_write_context_reg(cs
, R_028814_PA_SU_SC_MODE_CNTL
,
168 sctx
->pa_su_sc_mode_cntl
|
169 S_028814_PROVOKING_VTX_LAST(mode
== PIPE_PRIM_QUADS
||
170 mode
== PIPE_PRIM_QUAD_STRIP
||
171 mode
== PIPE_PRIM_POLYGON
));
173 sctx
->last_rast_prim
= mode
;
176 static void si_emit_draw_registers(struct si_context
*sctx
,
177 const struct pipe_draw_info
*info
,
178 const struct pipe_index_buffer
*ib
)
180 struct radeon_winsys_cs
*cs
= sctx
->b
.rings
.gfx
.cs
;
181 unsigned prim
= si_conv_pipe_prim(info
->mode
);
182 unsigned gs_out_prim
=
183 si_conv_prim_to_gs_out(sctx
->gs_shader
?
184 sctx
->gs_shader
->gs_output_prim
:
186 unsigned ia_multi_vgt_param
= si_get_ia_multi_vgt_param(sctx
, info
);
189 if (prim
!= sctx
->last_prim
||
190 ia_multi_vgt_param
!= sctx
->last_multi_vgt_param
) {
191 if (sctx
->b
.chip_class
>= CIK
) {
192 radeon_emit(cs
, PKT3(PKT3_DRAW_PREAMBLE
, 2, 0));
193 radeon_emit(cs
, prim
); /* VGT_PRIMITIVE_TYPE */
194 radeon_emit(cs
, ia_multi_vgt_param
); /* IA_MULTI_VGT_PARAM */
195 radeon_emit(cs
, 0); /* VGT_LS_HS_CONFIG */
197 r600_write_config_reg(cs
, R_008958_VGT_PRIMITIVE_TYPE
, prim
);
198 r600_write_context_reg(cs
, R_028AA8_IA_MULTI_VGT_PARAM
, ia_multi_vgt_param
);
200 sctx
->last_prim
= prim
;
201 sctx
->last_multi_vgt_param
= ia_multi_vgt_param
;
204 if (gs_out_prim
!= sctx
->last_gs_out_prim
) {
205 r600_write_context_reg(cs
, R_028A6C_VGT_GS_OUT_PRIM_TYPE
, gs_out_prim
);
206 sctx
->last_gs_out_prim
= gs_out_prim
;
209 /* Primitive restart. */
210 if (info
->primitive_restart
!= sctx
->last_primitive_restart_en
) {
211 r600_write_context_reg(cs
, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN
, info
->primitive_restart
);
212 sctx
->last_primitive_restart_en
= info
->primitive_restart
;
214 if (info
->primitive_restart
&&
215 (info
->restart_index
!= sctx
->last_restart_index
||
216 sctx
->last_restart_index
== SI_RESTART_INDEX_UNKNOWN
)) {
217 r600_write_context_reg(cs
, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX
,
218 info
->restart_index
);
219 sctx
->last_restart_index
= info
->restart_index
;
224 static void si_emit_draw_packets(struct si_context
*sctx
,
225 const struct pipe_draw_info
*info
,
226 const struct pipe_index_buffer
*ib
)
228 struct radeon_winsys_cs
*cs
= sctx
->b
.rings
.gfx
.cs
;
229 unsigned sh_base_reg
= (sctx
->gs_shader
? R_00B330_SPI_SHADER_USER_DATA_ES_0
:
230 R_00B130_SPI_SHADER_USER_DATA_VS_0
);
232 if (info
->count_from_stream_output
) {
233 struct r600_so_target
*t
=
234 (struct r600_so_target
*)info
->count_from_stream_output
;
235 uint64_t va
= t
->buf_filled_size
->gpu_address
+
236 t
->buf_filled_size_offset
;
238 r600_write_context_reg(cs
, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
,
241 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
242 radeon_emit(cs
, COPY_DATA_SRC_SEL(COPY_DATA_MEM
) |
243 COPY_DATA_DST_SEL(COPY_DATA_REG
) |
244 COPY_DATA_WR_CONFIRM
);
245 radeon_emit(cs
, va
); /* src address lo */
246 radeon_emit(cs
, va
>> 32); /* src address hi */
247 radeon_emit(cs
, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
>> 2);
248 radeon_emit(cs
, 0); /* unused */
250 r600_context_bo_reloc(&sctx
->b
, &sctx
->b
.rings
.gfx
,
251 t
->buf_filled_size
, RADEON_USAGE_READ
,
257 radeon_emit(cs
, PKT3(PKT3_INDEX_TYPE
, 0, 0));
259 if (ib
->index_size
== 4) {
260 radeon_emit(cs
, V_028A7C_VGT_INDEX_32
| (SI_BIG_ENDIAN
?
261 V_028A7C_VGT_DMA_SWAP_32_BIT
: 0));
263 radeon_emit(cs
, V_028A7C_VGT_INDEX_16
| (SI_BIG_ENDIAN
?
264 V_028A7C_VGT_DMA_SWAP_16_BIT
: 0));
268 if (!info
->indirect
) {
271 radeon_emit(cs
, PKT3(PKT3_NUM_INSTANCES
, 0, 0));
272 radeon_emit(cs
, info
->instance_count
);
274 /* Base vertex and start instance. */
275 base_vertex
= info
->indexed
? info
->index_bias
: info
->start
;
277 if (base_vertex
!= sctx
->last_base_vertex
||
278 sctx
->last_base_vertex
== SI_BASE_VERTEX_UNKNOWN
||
279 info
->start_instance
!= sctx
->last_start_instance
||
280 sh_base_reg
!= sctx
->last_sh_base_reg
) {
281 si_write_sh_reg_seq(cs
, sh_base_reg
+ SI_SGPR_BASE_VERTEX
* 4, 2);
282 radeon_emit(cs
, base_vertex
);
283 radeon_emit(cs
, info
->start_instance
);
285 sctx
->last_base_vertex
= base_vertex
;
286 sctx
->last_start_instance
= info
->start_instance
;
287 sctx
->last_sh_base_reg
= sh_base_reg
;
290 si_invalidate_draw_sh_constants(sctx
);
292 r600_context_bo_reloc(&sctx
->b
, &sctx
->b
.rings
.gfx
,
293 (struct r600_resource
*)info
->indirect
,
294 RADEON_USAGE_READ
, RADEON_PRIO_MIN
);
298 uint32_t index_max_size
= (ib
->buffer
->width0
- ib
->offset
) /
300 uint64_t index_va
= r600_resource(ib
->buffer
)->gpu_address
+ ib
->offset
;
302 r600_context_bo_reloc(&sctx
->b
, &sctx
->b
.rings
.gfx
,
303 (struct r600_resource
*)ib
->buffer
,
304 RADEON_USAGE_READ
, RADEON_PRIO_MIN
);
306 if (info
->indirect
) {
307 uint64_t indirect_va
= r600_resource(info
->indirect
)->gpu_address
;
309 assert(indirect_va
% 8 == 0);
310 assert(index_va
% 2 == 0);
311 assert(info
->indirect_offset
% 4 == 0);
313 radeon_emit(cs
, PKT3(PKT3_SET_BASE
, 2, 0));
315 radeon_emit(cs
, indirect_va
);
316 radeon_emit(cs
, indirect_va
>> 32);
318 radeon_emit(cs
, PKT3(PKT3_INDEX_BASE
, 1, 0));
319 radeon_emit(cs
, index_va
);
320 radeon_emit(cs
, index_va
>> 32);
322 radeon_emit(cs
, PKT3(PKT3_INDEX_BUFFER_SIZE
, 0, 0));
323 radeon_emit(cs
, index_max_size
);
325 radeon_emit(cs
, PKT3(PKT3_DRAW_INDEX_INDIRECT
, 3, sctx
->b
.predicate_drawing
));
326 radeon_emit(cs
, info
->indirect_offset
);
327 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_BASE_VERTEX
* 4 - SI_SH_REG_OFFSET
) >> 2);
328 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_START_INSTANCE
* 4 - SI_SH_REG_OFFSET
) >> 2);
329 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_DMA
);
331 index_va
+= info
->start
* ib
->index_size
;
333 radeon_emit(cs
, PKT3(PKT3_DRAW_INDEX_2
, 4, sctx
->b
.predicate_drawing
));
334 radeon_emit(cs
, index_max_size
);
335 radeon_emit(cs
, index_va
);
336 radeon_emit(cs
, (index_va
>> 32UL) & 0xFF);
337 radeon_emit(cs
, info
->count
);
338 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_DMA
);
341 if (info
->indirect
) {
342 uint64_t indirect_va
= r600_resource(info
->indirect
)->gpu_address
;
344 assert(indirect_va
% 8 == 0);
345 assert(info
->indirect_offset
% 4 == 0);
347 radeon_emit(cs
, PKT3(PKT3_SET_BASE
, 2, 0));
349 radeon_emit(cs
, indirect_va
);
350 radeon_emit(cs
, indirect_va
>> 32);
352 radeon_emit(cs
, PKT3(PKT3_DRAW_INDIRECT
, 3, sctx
->b
.predicate_drawing
));
353 radeon_emit(cs
, info
->indirect_offset
);
354 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_BASE_VERTEX
* 4 - SI_SH_REG_OFFSET
) >> 2);
355 radeon_emit(cs
, (sh_base_reg
+ SI_SGPR_START_INSTANCE
* 4 - SI_SH_REG_OFFSET
) >> 2);
356 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_AUTO_INDEX
);
358 radeon_emit(cs
, PKT3(PKT3_DRAW_INDEX_AUTO
, 1, sctx
->b
.predicate_drawing
));
359 radeon_emit(cs
, info
->count
);
360 radeon_emit(cs
, V_0287F0_DI_SRC_SEL_AUTO_INDEX
|
361 S_0287F0_USE_OPAQUE(!!info
->count_from_stream_output
));
366 #define BOTH_ICACHE_KCACHE (SI_CONTEXT_INV_ICACHE | SI_CONTEXT_INV_KCACHE)
368 void si_emit_cache_flush(struct r600_common_context
*sctx
, struct r600_atom
*atom
)
370 struct radeon_winsys_cs
*cs
= sctx
->rings
.gfx
.cs
;
371 uint32_t cp_coher_cntl
= 0;
373 PKT3_SHADER_TYPE_S(!!(sctx
->flags
& SI_CONTEXT_FLAG_COMPUTE
));
375 /* SI has a bug that it always flushes ICACHE and KCACHE if either
376 * bit is set. An alternative way is to write SQC_CACHES. */
377 if (sctx
->chip_class
== SI
&&
378 sctx
->flags
& BOTH_ICACHE_KCACHE
&&
379 (sctx
->flags
& BOTH_ICACHE_KCACHE
) != BOTH_ICACHE_KCACHE
) {
380 r600_write_config_reg(cs
, R_008C08_SQC_CACHES
,
381 S_008C08_INST_INVALIDATE(!!(sctx
->flags
& SI_CONTEXT_INV_ICACHE
)) |
382 S_008C08_DATA_INVALIDATE(!!(sctx
->flags
& SI_CONTEXT_INV_KCACHE
)));
383 cs
->buf
[cs
->cdw
-3] |= compute
; /* set the compute bit in the header */
385 if (sctx
->flags
& SI_CONTEXT_INV_ICACHE
)
386 cp_coher_cntl
|= S_0085F0_SH_ICACHE_ACTION_ENA(1);
387 if (sctx
->flags
& SI_CONTEXT_INV_KCACHE
)
388 cp_coher_cntl
|= S_0085F0_SH_KCACHE_ACTION_ENA(1);
391 if (sctx
->flags
& (SI_CONTEXT_INV_TC_L1
| R600_CONTEXT_STREAMOUT_FLUSH
))
392 cp_coher_cntl
|= S_0085F0_TCL1_ACTION_ENA(1);
393 if (sctx
->flags
& (SI_CONTEXT_INV_TC_L2
| R600_CONTEXT_STREAMOUT_FLUSH
))
394 cp_coher_cntl
|= S_0085F0_TC_ACTION_ENA(1);
396 if (sctx
->flags
& SI_CONTEXT_FLUSH_AND_INV_CB
) {
397 cp_coher_cntl
|= S_0085F0_CB_ACTION_ENA(1) |
398 S_0085F0_CB0_DEST_BASE_ENA(1) |
399 S_0085F0_CB1_DEST_BASE_ENA(1) |
400 S_0085F0_CB2_DEST_BASE_ENA(1) |
401 S_0085F0_CB3_DEST_BASE_ENA(1) |
402 S_0085F0_CB4_DEST_BASE_ENA(1) |
403 S_0085F0_CB5_DEST_BASE_ENA(1) |
404 S_0085F0_CB6_DEST_BASE_ENA(1) |
405 S_0085F0_CB7_DEST_BASE_ENA(1);
407 if (sctx
->flags
& SI_CONTEXT_FLUSH_AND_INV_DB
) {
408 cp_coher_cntl
|= S_0085F0_DB_ACTION_ENA(1) |
409 S_0085F0_DB_DEST_BASE_ENA(1);
413 if (sctx
->chip_class
>= CIK
) {
414 radeon_emit(cs
, PKT3(PKT3_ACQUIRE_MEM
, 5, 0) | compute
);
415 radeon_emit(cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
416 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
417 radeon_emit(cs
, 0xff); /* CP_COHER_SIZE_HI */
418 radeon_emit(cs
, 0); /* CP_COHER_BASE */
419 radeon_emit(cs
, 0); /* CP_COHER_BASE_HI */
420 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
422 radeon_emit(cs
, PKT3(PKT3_SURFACE_SYNC
, 3, 0) | compute
);
423 radeon_emit(cs
, cp_coher_cntl
); /* CP_COHER_CNTL */
424 radeon_emit(cs
, 0xffffffff); /* CP_COHER_SIZE */
425 radeon_emit(cs
, 0); /* CP_COHER_BASE */
426 radeon_emit(cs
, 0x0000000A); /* POLL_INTERVAL */
430 if (sctx
->flags
& SI_CONTEXT_FLUSH_AND_INV_CB_META
) {
431 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0) | compute
);
432 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META
) | EVENT_INDEX(0));
434 if (sctx
->flags
& SI_CONTEXT_FLUSH_AND_INV_DB_META
) {
435 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0) | compute
);
436 radeon_emit(cs
, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META
) | EVENT_INDEX(0));
438 if (sctx
->flags
& SI_CONTEXT_FLUSH_WITH_INV_L2
) {
439 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0) | compute
);
440 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH
) | EVENT_INDEX(7) |
444 if (sctx
->flags
& SI_CONTEXT_PS_PARTIAL_FLUSH
) {
445 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0) | compute
);
446 radeon_emit(cs
, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
447 } else if (sctx
->flags
& R600_CONTEXT_STREAMOUT_FLUSH
) {
448 /* Needed if streamout buffers are going to be used as a source. */
449 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0) | compute
);
450 radeon_emit(cs
, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
453 if (sctx
->flags
& SI_CONTEXT_CS_PARTIAL_FLUSH
) {
454 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0) | compute
);
455 radeon_emit(cs
, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH
| EVENT_INDEX(4)));
458 if (sctx
->flags
& SI_CONTEXT_VGT_FLUSH
) {
459 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0) | compute
);
460 radeon_emit(cs
, EVENT_TYPE(V_028A90_VGT_FLUSH
) | EVENT_INDEX(0));
462 if (sctx
->flags
& SI_CONTEXT_VGT_STREAMOUT_SYNC
) {
463 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0) | compute
);
464 radeon_emit(cs
, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC
) | EVENT_INDEX(0));
470 const struct r600_atom si_atom_cache_flush
= { si_emit_cache_flush
, 24 }; /* number of CS dwords */
472 static void si_get_draw_start_count(struct si_context
*sctx
,
473 const struct pipe_draw_info
*info
,
474 unsigned *start
, unsigned *count
)
476 if (info
->indirect
) {
477 struct r600_resource
*indirect
=
478 (struct r600_resource
*)info
->indirect
;
479 int *data
= r600_buffer_map_sync_with_rings(&sctx
->b
,
480 indirect
, PIPE_TRANSFER_READ
);
481 data
+= info
->indirect_offset
/sizeof(int);
485 *start
= info
->start
;
486 *count
= info
->count
;
490 void si_draw_vbo(struct pipe_context
*ctx
, const struct pipe_draw_info
*info
)
492 struct si_context
*sctx
= (struct si_context
*)ctx
;
493 struct pipe_index_buffer ib
= {};
496 if (!info
->count
&& !info
->indirect
&&
497 (info
->indexed
|| !info
->count_from_stream_output
))
500 if (!sctx
->ps_shader
|| !sctx
->vs_shader
)
503 si_decompress_textures(sctx
);
504 si_update_shaders(sctx
);
506 if (sctx
->vertex_buffers_dirty
) {
507 si_update_vertex_buffers(sctx
);
508 sctx
->vertex_buffers_dirty
= false;
512 /* Initialize the index buffer struct. */
513 pipe_resource_reference(&ib
.buffer
, sctx
->index_buffer
.buffer
);
514 ib
.user_buffer
= sctx
->index_buffer
.user_buffer
;
515 ib
.index_size
= sctx
->index_buffer
.index_size
;
516 ib
.offset
= sctx
->index_buffer
.offset
;
518 /* Translate or upload, if needed. */
519 if (ib
.index_size
== 1) {
520 struct pipe_resource
*out_buffer
= NULL
;
521 unsigned out_offset
, start
, count
, start_offset
;
524 si_get_draw_start_count(sctx
, info
, &start
, &count
);
525 start_offset
= start
* ib
.index_size
;
527 u_upload_alloc(sctx
->b
.uploader
, start_offset
, count
* 2,
528 &out_offset
, &out_buffer
, &ptr
);
530 util_shorten_ubyte_elts_to_userptr(&sctx
->b
.b
, &ib
, 0,
531 ib
.offset
+ start_offset
,
534 pipe_resource_reference(&ib
.buffer
, NULL
);
535 ib
.user_buffer
= NULL
;
536 ib
.buffer
= out_buffer
;
537 /* info->start will be added by the drawing code */
538 ib
.offset
= out_offset
- start_offset
;
540 } else if (ib
.user_buffer
&& !ib
.buffer
) {
541 unsigned start
, count
, start_offset
;
543 si_get_draw_start_count(sctx
, info
, &start
, &count
);
544 start_offset
= start
* ib
.index_size
;
546 u_upload_data(sctx
->b
.uploader
, start_offset
, count
* ib
.index_size
,
547 (char*)ib
.user_buffer
+ start_offset
,
548 &ib
.offset
, &ib
.buffer
);
549 /* info->start will be added by the drawing code */
550 ib
.offset
-= start_offset
;
554 if (info
->indexed
&& r600_resource(ib
.buffer
)->TC_L2_dirty
) {
555 sctx
->b
.flags
|= SI_CONTEXT_INV_TC_L2
;
556 r600_resource(ib
.buffer
)->TC_L2_dirty
= false;
559 /* Check flush flags. */
561 sctx
->atoms
.s
.cache_flush
->dirty
= true;
563 si_need_cs_space(sctx
, 0, TRUE
);
566 for (i
= 0; i
< SI_NUM_ATOMS(sctx
); i
++) {
567 if (sctx
->atoms
.array
[i
]->dirty
) {
568 sctx
->atoms
.array
[i
]->emit(&sctx
->b
, sctx
->atoms
.array
[i
]);
569 sctx
->atoms
.array
[i
]->dirty
= false;
573 si_pm4_emit_dirty(sctx
);
574 si_emit_rasterizer_prim_state(sctx
, info
->mode
);
575 si_emit_draw_registers(sctx
, info
, &ib
);
576 si_emit_draw_packets(sctx
, info
, &ib
);
579 if (sctx
->screen
->b
.trace_bo
) {
584 /* Workaround for a VGT hang when streamout is enabled.
585 * It must be done after drawing. */
586 if (sctx
->b
.family
== CHIP_HAWAII
&&
587 (sctx
->b
.streamout
.streamout_enabled
||
588 sctx
->b
.streamout
.prims_gen_query_enabled
)) {
589 sctx
->b
.flags
|= SI_CONTEXT_VGT_STREAMOUT_SYNC
;
592 /* Set the depth buffer as dirty. */
593 if (sctx
->framebuffer
.state
.zsbuf
) {
594 struct pipe_surface
*surf
= sctx
->framebuffer
.state
.zsbuf
;
595 struct r600_texture
*rtex
= (struct r600_texture
*)surf
->texture
;
597 rtex
->dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
599 if (sctx
->framebuffer
.compressed_cb_mask
) {
600 struct pipe_surface
*surf
;
601 struct r600_texture
*rtex
;
602 unsigned mask
= sctx
->framebuffer
.compressed_cb_mask
;
605 unsigned i
= u_bit_scan(&mask
);
606 surf
= sctx
->framebuffer
.state
.cbufs
[i
];
607 rtex
= (struct r600_texture
*)surf
->texture
;
609 rtex
->dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
613 pipe_resource_reference(&ib
.buffer
, NULL
);
614 sctx
->b
.num_draw_calls
++;
618 void si_trace_emit(struct si_context
*sctx
)
620 struct si_screen
*sscreen
= sctx
->screen
;
621 struct radeon_winsys_cs
*cs
= sctx
->b
.rings
.gfx
.cs
;
624 va
= sscreen
->b
.trace_bo
->gpu_address
;
625 r600_context_bo_reloc(&sctx
->b
, &sctx
->b
.rings
.gfx
, sscreen
->b
.trace_bo
,
626 RADEON_USAGE_READWRITE
, RADEON_PRIO_MIN
);
627 radeon_emit(cs
, PKT3(PKT3_WRITE_DATA
, 4, 0));
628 radeon_emit(cs
, PKT3_WRITE_DATA_DST_SEL(PKT3_WRITE_DATA_DST_SEL_MEM_SYNC
) |
629 PKT3_WRITE_DATA_WR_CONFIRM
|
630 PKT3_WRITE_DATA_ENGINE_SEL(PKT3_WRITE_DATA_ENGINE_SEL_ME
));
631 radeon_emit(cs
, va
& 0xFFFFFFFFUL
);
632 radeon_emit(cs
, (va
>> 32UL) & 0xFFFFFFFFUL
);
633 radeon_emit(cs
, cs
->cdw
);
634 radeon_emit(cs
, sscreen
->b
.cs_count
);