2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Marek Olšák <maraeo@gmail.com>
29 #include "radeon/r600_cs.h"
31 /* Set this if you want the ME to wait until CP DMA is done.
32 * It should be set on the last CP DMA packet. */
33 #define CP_DMA_SYNC (1 << 0)
35 /* Set this if the source data was used as a destination in a previous CP DMA
36 * packet. It's for preventing a read-after-write (RAW) hazard between two
38 #define CP_DMA_RAW_WAIT (1 << 1)
39 #define CP_DMA_USE_L2 (1 << 2) /* CIK+ */
40 #define CP_DMA_CLEAR (1 << 3)
42 /* The max number of bytes that can be copied per packet. */
43 static inline unsigned cp_dma_max_byte_count(struct si_context
*sctx
)
45 unsigned max
= sctx
->b
.chip_class
>= GFX9
?
46 S_414_BYTE_COUNT_GFX9(~0u) :
47 S_414_BYTE_COUNT_GFX6(~0u);
49 /* make it aligned for optimal performance */
50 return max
& ~(SI_CPDMA_ALIGNMENT
- 1);
54 /* Emit a CP DMA packet to do a copy from one buffer to another, or to clear
55 * a buffer. The size must fit in bits [20:0]. If CP_DMA_CLEAR is set, src_va is a 32-bit
58 static void si_emit_cp_dma(struct si_context
*sctx
, uint64_t dst_va
,
59 uint64_t src_va
, unsigned size
, unsigned flags
,
60 enum r600_coherency coher
)
62 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
63 uint32_t header
= 0, command
= 0;
66 assert(size
<= cp_dma_max_byte_count(sctx
));
68 if (sctx
->b
.chip_class
>= GFX9
)
69 command
|= S_414_BYTE_COUNT_GFX9(size
);
71 command
|= S_414_BYTE_COUNT_GFX6(size
);
74 if (flags
& CP_DMA_SYNC
)
75 header
|= S_411_CP_SYNC(1);
77 if (sctx
->b
.chip_class
>= GFX9
)
78 command
|= S_414_DISABLE_WR_CONFIRM_GFX9(1);
80 command
|= S_414_DISABLE_WR_CONFIRM_GFX6(1);
83 if (flags
& CP_DMA_RAW_WAIT
)
84 command
|= S_414_RAW_WAIT(1);
86 /* Src and dst flags. */
87 if (sctx
->b
.chip_class
>= GFX9
&& !(flags
& CP_DMA_CLEAR
) &&
89 header
|= S_411_DSL_SEL(V_411_NOWHERE
); /* prefetch only */
90 else if (flags
& CP_DMA_USE_L2
)
91 header
|= S_411_DSL_SEL(V_411_DST_ADDR_TC_L2
);
93 if (flags
& CP_DMA_CLEAR
)
94 header
|= S_411_SRC_SEL(V_411_DATA
);
95 else if (flags
& CP_DMA_USE_L2
)
96 header
|= S_411_SRC_SEL(V_411_SRC_ADDR_TC_L2
);
98 if (sctx
->b
.chip_class
>= CIK
) {
99 radeon_emit(cs
, PKT3(PKT3_DMA_DATA
, 5, 0));
100 radeon_emit(cs
, header
);
101 radeon_emit(cs
, src_va
); /* SRC_ADDR_LO [31:0] */
102 radeon_emit(cs
, src_va
>> 32); /* SRC_ADDR_HI [31:0] */
103 radeon_emit(cs
, dst_va
); /* DST_ADDR_LO [31:0] */
104 radeon_emit(cs
, dst_va
>> 32); /* DST_ADDR_HI [31:0] */
105 radeon_emit(cs
, command
);
107 header
|= S_411_SRC_ADDR_HI(src_va
>> 32);
109 radeon_emit(cs
, PKT3(PKT3_CP_DMA
, 4, 0));
110 radeon_emit(cs
, src_va
); /* SRC_ADDR_LO [31:0] */
111 radeon_emit(cs
, header
); /* SRC_ADDR_HI [15:0] + flags. */
112 radeon_emit(cs
, dst_va
); /* DST_ADDR_LO [31:0] */
113 radeon_emit(cs
, (dst_va
>> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
114 radeon_emit(cs
, command
);
117 /* CP DMA is executed in ME, but index buffers are read by PFP.
118 * This ensures that ME (CP DMA) is idle before PFP starts fetching
119 * indices. If we wanted to execute CP DMA in PFP, this packet
122 if (coher
== R600_COHERENCY_SHADER
&& flags
& CP_DMA_SYNC
) {
123 radeon_emit(cs
, PKT3(PKT3_PFP_SYNC_ME
, 0, 0));
128 static unsigned get_flush_flags(struct si_context
*sctx
, enum r600_coherency coher
)
132 case R600_COHERENCY_NONE
:
134 case R600_COHERENCY_SHADER
:
135 return SI_CONTEXT_INV_SMEM_L1
|
136 SI_CONTEXT_INV_VMEM_L1
|
137 (sctx
->b
.chip_class
== SI
? SI_CONTEXT_INV_GLOBAL_L2
: 0);
138 case R600_COHERENCY_CB_META
:
139 return SI_CONTEXT_FLUSH_AND_INV_CB
;
143 static unsigned get_tc_l2_flag(struct si_context
*sctx
, enum r600_coherency coher
)
145 return coher
== R600_COHERENCY_SHADER
&&
146 sctx
->b
.chip_class
>= CIK
? CP_DMA_USE_L2
: 0;
149 static void si_cp_dma_prepare(struct si_context
*sctx
, struct pipe_resource
*dst
,
150 struct pipe_resource
*src
, unsigned byte_count
,
151 uint64_t remaining_size
, unsigned user_flags
,
152 bool *is_first
, unsigned *packet_flags
)
154 /* Fast exit for a CPDMA prefetch. */
155 if ((user_flags
& SI_CPDMA_SKIP_ALL
) == SI_CPDMA_SKIP_ALL
) {
160 if (!(user_flags
& SI_CPDMA_SKIP_BO_LIST_UPDATE
)) {
161 /* Count memory usage in so that need_cs_space can take it into account. */
162 r600_context_add_resource_size(&sctx
->b
.b
, dst
);
164 r600_context_add_resource_size(&sctx
->b
.b
, src
);
167 if (!(user_flags
& SI_CPDMA_SKIP_CHECK_CS_SPACE
))
168 si_need_cs_space(sctx
);
170 /* This must be done after need_cs_space. */
171 if (!(user_flags
& SI_CPDMA_SKIP_BO_LIST_UPDATE
)) {
172 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
173 (struct r600_resource
*)dst
,
174 RADEON_USAGE_WRITE
, RADEON_PRIO_CP_DMA
);
176 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
177 (struct r600_resource
*)src
,
178 RADEON_USAGE_READ
, RADEON_PRIO_CP_DMA
);
181 /* Flush the caches for the first copy only.
182 * Also wait for the previous CP DMA operations.
184 if (!(user_flags
& SI_CPDMA_SKIP_GFX_SYNC
) && sctx
->b
.flags
)
185 si_emit_cache_flush(sctx
);
187 if (!(user_flags
& SI_CPDMA_SKIP_SYNC_BEFORE
) && *is_first
)
188 *packet_flags
|= CP_DMA_RAW_WAIT
;
192 /* Do the synchronization after the last dma, so that all data
193 * is written to memory.
195 if (!(user_flags
& SI_CPDMA_SKIP_SYNC_AFTER
) &&
196 byte_count
== remaining_size
)
197 *packet_flags
|= CP_DMA_SYNC
;
200 static void si_clear_buffer(struct pipe_context
*ctx
, struct pipe_resource
*dst
,
201 uint64_t offset
, uint64_t size
, unsigned value
,
202 enum r600_coherency coher
)
204 struct si_context
*sctx
= (struct si_context
*)ctx
;
205 struct radeon_winsys
*ws
= sctx
->b
.ws
;
206 struct r600_resource
*rdst
= r600_resource(dst
);
207 unsigned tc_l2_flag
= get_tc_l2_flag(sctx
, coher
);
208 unsigned flush_flags
= get_flush_flags(sctx
, coher
);
209 uint64_t dma_clear_size
;
210 bool is_first
= true;
215 dma_clear_size
= size
& ~3llu;
217 /* Mark the buffer range of destination as valid (initialized),
218 * so that transfer_map knows it should wait for the GPU when mapping
220 util_range_add(&rdst
->valid_buffer_range
, offset
,
221 offset
+ dma_clear_size
);
223 /* dma_clear_buffer can use clear_buffer on failure. Make sure that
224 * doesn't happen. We don't want an infinite recursion: */
225 if (sctx
->b
.dma
.cs
&&
226 !(dst
->flags
& PIPE_RESOURCE_FLAG_SPARSE
) &&
228 /* CP DMA is very slow. Always use SDMA for big clears. This
229 * alone improves DeusEx:MD performance by 70%. */
230 (size
> 128 * 1024 ||
231 /* Buffers not used by the GFX IB yet will be cleared by SDMA.
232 * This happens to move most buffer clears to SDMA, including
233 * DCC and CMASK clears, because pipe->clear clears them before
234 * si_emit_framebuffer_state (in a draw call) adds them.
235 * For example, DeusEx:MD has 21 buffer clears per frame and all
236 * of them are moved to SDMA thanks to this. */
237 !ws
->cs_is_buffer_referenced(sctx
->b
.gfx
.cs
, rdst
->buf
,
238 RADEON_USAGE_READWRITE
))) {
239 sctx
->b
.dma_clear_buffer(ctx
, dst
, offset
, dma_clear_size
, value
);
241 offset
+= dma_clear_size
;
242 size
-= dma_clear_size
;
243 } else if (dma_clear_size
>= 4) {
244 uint64_t va
= rdst
->gpu_address
+ offset
;
246 offset
+= dma_clear_size
;
247 size
-= dma_clear_size
;
249 /* Flush the caches. */
250 sctx
->b
.flags
|= SI_CONTEXT_PS_PARTIAL_FLUSH
|
251 SI_CONTEXT_CS_PARTIAL_FLUSH
| flush_flags
;
253 while (dma_clear_size
) {
254 unsigned byte_count
= MIN2(dma_clear_size
, cp_dma_max_byte_count(sctx
));
255 unsigned dma_flags
= tc_l2_flag
| CP_DMA_CLEAR
;
257 si_cp_dma_prepare(sctx
, dst
, NULL
, byte_count
, dma_clear_size
, 0,
258 &is_first
, &dma_flags
);
260 /* Emit the clear packet. */
261 si_emit_cp_dma(sctx
, va
, value
, byte_count
, dma_flags
, coher
);
263 dma_clear_size
-= byte_count
;
268 rdst
->TC_L2_dirty
= true;
270 /* If it's not a framebuffer fast clear... */
271 if (coher
== R600_COHERENCY_SHADER
)
272 sctx
->b
.num_cp_dma_calls
++;
276 /* Handle non-dword alignment.
278 * This function is called for embedded texture metadata clears,
279 * but those should always be properly aligned. */
280 assert(dst
->target
== PIPE_BUFFER
);
283 pipe_buffer_write(ctx
, dst
, offset
, size
, &value
);
288 * Realign the CP DMA engine. This must be done after a copy with an unaligned
291 * \param size Remaining size to the CP DMA alignment.
293 static void si_cp_dma_realign_engine(struct si_context
*sctx
, unsigned size
,
294 unsigned user_flags
, bool *is_first
)
297 unsigned dma_flags
= 0;
298 unsigned scratch_size
= SI_CPDMA_ALIGNMENT
* 2;
300 assert(size
< SI_CPDMA_ALIGNMENT
);
302 /* Use the scratch buffer as the dummy buffer. The 3D engine should be
303 * idle at this point.
305 if (!sctx
->scratch_buffer
||
306 sctx
->scratch_buffer
->b
.b
.width0
< scratch_size
) {
307 r600_resource_reference(&sctx
->scratch_buffer
, NULL
);
308 sctx
->scratch_buffer
= (struct r600_resource
*)
309 r600_aligned_buffer_create(&sctx
->screen
->b
.b
,
310 R600_RESOURCE_FLAG_UNMAPPABLE
,
313 if (!sctx
->scratch_buffer
)
316 si_mark_atom_dirty(sctx
, &sctx
->scratch_state
);
319 si_cp_dma_prepare(sctx
, &sctx
->scratch_buffer
->b
.b
,
320 &sctx
->scratch_buffer
->b
.b
, size
, size
, user_flags
,
321 is_first
, &dma_flags
);
323 va
= sctx
->scratch_buffer
->gpu_address
;
324 si_emit_cp_dma(sctx
, va
, va
+ SI_CPDMA_ALIGNMENT
, size
, dma_flags
,
325 R600_COHERENCY_SHADER
);
329 * Do memcpy between buffers using CP DMA.
331 * \param user_flags bitmask of SI_CPDMA_*
333 void si_copy_buffer(struct si_context
*sctx
,
334 struct pipe_resource
*dst
, struct pipe_resource
*src
,
335 uint64_t dst_offset
, uint64_t src_offset
, unsigned size
,
338 uint64_t main_dst_offset
, main_src_offset
;
339 unsigned skipped_size
= 0;
340 unsigned realign_size
= 0;
341 unsigned tc_l2_flag
= get_tc_l2_flag(sctx
, R600_COHERENCY_SHADER
);
342 unsigned flush_flags
= get_flush_flags(sctx
, R600_COHERENCY_SHADER
);
343 bool is_first
= true;
348 if (dst
!= src
|| dst_offset
!= src_offset
) {
349 /* Mark the buffer range of destination as valid (initialized),
350 * so that transfer_map knows it should wait for the GPU when mapping
352 util_range_add(&r600_resource(dst
)->valid_buffer_range
, dst_offset
,
356 dst_offset
+= r600_resource(dst
)->gpu_address
;
357 src_offset
+= r600_resource(src
)->gpu_address
;
359 /* The workarounds aren't needed on Fiji and beyond. */
360 if (sctx
->b
.family
<= CHIP_CARRIZO
||
361 sctx
->b
.family
== CHIP_STONEY
) {
362 /* If the size is not aligned, we must add a dummy copy at the end
363 * just to align the internal counter. Otherwise, the DMA engine
364 * would slow down by an order of magnitude for following copies.
366 if (size
% SI_CPDMA_ALIGNMENT
)
367 realign_size
= SI_CPDMA_ALIGNMENT
- (size
% SI_CPDMA_ALIGNMENT
);
369 /* If the copy begins unaligned, we must start copying from the next
370 * aligned block and the skipped part should be copied after everything
371 * else has been copied. Only the src alignment matters, not dst.
373 if (src_offset
% SI_CPDMA_ALIGNMENT
) {
374 skipped_size
= SI_CPDMA_ALIGNMENT
- (src_offset
% SI_CPDMA_ALIGNMENT
);
375 /* The main part will be skipped if the size is too small. */
376 skipped_size
= MIN2(skipped_size
, size
);
377 size
-= skipped_size
;
381 /* Flush the caches. */
382 if (!(user_flags
& SI_CPDMA_SKIP_GFX_SYNC
))
383 sctx
->b
.flags
|= SI_CONTEXT_PS_PARTIAL_FLUSH
|
384 SI_CONTEXT_CS_PARTIAL_FLUSH
| flush_flags
;
386 /* This is the main part doing the copying. Src is always aligned. */
387 main_dst_offset
= dst_offset
+ skipped_size
;
388 main_src_offset
= src_offset
+ skipped_size
;
391 unsigned dma_flags
= tc_l2_flag
;
392 unsigned byte_count
= MIN2(size
, cp_dma_max_byte_count(sctx
));
394 si_cp_dma_prepare(sctx
, dst
, src
, byte_count
,
395 size
+ skipped_size
+ realign_size
,
396 user_flags
, &is_first
, &dma_flags
);
398 si_emit_cp_dma(sctx
, main_dst_offset
, main_src_offset
,
399 byte_count
, dma_flags
, R600_COHERENCY_SHADER
);
402 main_src_offset
+= byte_count
;
403 main_dst_offset
+= byte_count
;
406 /* Copy the part we skipped because src wasn't aligned. */
408 unsigned dma_flags
= tc_l2_flag
;
410 si_cp_dma_prepare(sctx
, dst
, src
, skipped_size
,
411 skipped_size
+ realign_size
, user_flags
,
412 &is_first
, &dma_flags
);
414 si_emit_cp_dma(sctx
, dst_offset
, src_offset
, skipped_size
,
415 dma_flags
, R600_COHERENCY_SHADER
);
418 /* Finally, realign the engine if the size wasn't aligned. */
420 si_cp_dma_realign_engine(sctx
, realign_size
, user_flags
,
424 r600_resource(dst
)->TC_L2_dirty
= true;
426 /* If it's not a prefetch... */
427 if (dst_offset
!= src_offset
)
428 sctx
->b
.num_cp_dma_calls
++;
431 void cik_prefetch_TC_L2_async(struct si_context
*sctx
, struct pipe_resource
*buf
,
432 uint64_t offset
, unsigned size
)
434 assert(sctx
->b
.chip_class
>= CIK
);
436 si_copy_buffer(sctx
, buf
, buf
, offset
, offset
, size
, SI_CPDMA_SKIP_ALL
);
439 static void cik_prefetch_shader_async(struct si_context
*sctx
,
440 struct si_pm4_state
*state
)
443 struct pipe_resource
*bo
= &state
->bo
[0]->b
.b
;
444 assert(state
->nbo
== 1);
446 cik_prefetch_TC_L2_async(sctx
, bo
, 0, bo
->width0
);
450 static void cik_emit_prefetch_L2(struct si_context
*sctx
, struct r600_atom
*atom
)
452 /* Prefetch shaders and VBO descriptors to TC L2. */
453 if (si_pm4_state_changed(sctx
, ls
))
454 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.ls
);
455 if (si_pm4_state_changed(sctx
, hs
))
456 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.hs
);
457 if (si_pm4_state_changed(sctx
, es
))
458 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.es
);
459 if (si_pm4_state_changed(sctx
, gs
))
460 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.gs
);
461 if (si_pm4_state_changed(sctx
, vs
))
462 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.vs
);
464 /* Vertex buffer descriptors are uploaded uncached, so prefetch
465 * them right after the VS binary. */
466 if (sctx
->vertex_buffer_pointer_dirty
) {
467 cik_prefetch_TC_L2_async(sctx
, &sctx
->vertex_buffers
.buffer
->b
.b
,
468 sctx
->vertex_buffers
.buffer_offset
,
469 sctx
->vertex_elements
->desc_list_byte_size
);
471 if (si_pm4_state_changed(sctx
, ps
))
472 cik_prefetch_shader_async(sctx
, sctx
->queued
.named
.ps
);
475 void si_init_cp_dma_functions(struct si_context
*sctx
)
477 sctx
->b
.clear_buffer
= si_clear_buffer
;
479 si_init_atom(sctx
, &sctx
->prefetch_L2
, &sctx
->atoms
.s
.prefetch_L2
,
480 cik_emit_prefetch_L2
);