2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Marek Olšák <maraeo@gmail.com>
29 #include "radeon/r600_cs.h"
32 /* Set this if you want the 3D engine to wait until CP DMA is done.
33 * It should be set on the last CP DMA packet. */
34 #define R600_CP_DMA_SYNC (1 << 0) /* R600+ */
36 /* Set this if the source data was used as a destination in a previous CP DMA
37 * packet. It's for preventing a read-after-write (RAW) hazard between two
39 #define SI_CP_DMA_RAW_WAIT (1 << 1) /* SI+ */
40 #define CIK_CP_DMA_USE_L2 (1 << 2)
42 /* Emit a CP DMA packet to do a copy from one buffer to another.
43 * The size must fit in bits [20:0].
45 static void si_emit_cp_dma_copy_buffer(struct si_context
*sctx
,
46 uint64_t dst_va
, uint64_t src_va
,
47 unsigned size
, unsigned flags
)
49 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
50 uint32_t sync_flag
= flags
& R600_CP_DMA_SYNC
? S_411_CP_SYNC(1) : 0;
51 uint32_t wr_confirm
= !(flags
& R600_CP_DMA_SYNC
) ? S_414_DISABLE_WR_CONFIRM(1) : 0;
52 uint32_t raw_wait
= flags
& SI_CP_DMA_RAW_WAIT
? S_414_RAW_WAIT(1) : 0;
53 uint32_t sel
= flags
& CIK_CP_DMA_USE_L2
?
54 S_411_SRC_SEL(V_411_SRC_ADDR_TC_L2
) |
55 S_411_DSL_SEL(V_411_DST_ADDR_TC_L2
) : 0;
58 assert((size
& ((1<<21)-1)) == size
);
60 if (sctx
->b
.chip_class
>= CIK
) {
61 radeon_emit(cs
, PKT3(PKT3_DMA_DATA
, 5, 0));
62 radeon_emit(cs
, sync_flag
| sel
); /* CP_SYNC [31] */
63 radeon_emit(cs
, src_va
); /* SRC_ADDR_LO [31:0] */
64 radeon_emit(cs
, src_va
>> 32); /* SRC_ADDR_HI [31:0] */
65 radeon_emit(cs
, dst_va
); /* DST_ADDR_LO [31:0] */
66 radeon_emit(cs
, dst_va
>> 32); /* DST_ADDR_HI [31:0] */
67 radeon_emit(cs
, size
| wr_confirm
| raw_wait
); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
69 radeon_emit(cs
, PKT3(PKT3_CP_DMA
, 4, 0));
70 radeon_emit(cs
, src_va
); /* SRC_ADDR_LO [31:0] */
71 radeon_emit(cs
, sync_flag
| ((src_va
>> 32) & 0xffff)); /* CP_SYNC [31] | SRC_ADDR_HI [15:0] */
72 radeon_emit(cs
, dst_va
); /* DST_ADDR_LO [31:0] */
73 radeon_emit(cs
, (dst_va
>> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
74 radeon_emit(cs
, size
| wr_confirm
| raw_wait
); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
77 /* CP DMA is executed in ME, but index buffers are read by PFP.
78 * This ensures that ME (CP DMA) is idle before PFP starts fetching
79 * indices. If we wanted to execute CP DMA in PFP, this packet
83 radeon_emit(cs
, PKT3(PKT3_PFP_SYNC_ME
, 0, 0));
88 /* Emit a CP DMA packet to clear a buffer. The size must fit in bits [20:0]. */
89 static void si_emit_cp_dma_clear_buffer(struct si_context
*sctx
,
90 uint64_t dst_va
, unsigned size
,
91 uint32_t clear_value
, unsigned flags
,
92 enum r600_coherency coher
)
94 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
95 uint32_t sync_flag
= flags
& R600_CP_DMA_SYNC
? S_411_CP_SYNC(1) : 0;
96 uint32_t wr_confirm
= !(flags
& R600_CP_DMA_SYNC
) ? S_414_DISABLE_WR_CONFIRM(1) : 0;
97 uint32_t raw_wait
= flags
& SI_CP_DMA_RAW_WAIT
? S_414_RAW_WAIT(1) : 0;
98 uint32_t dst_sel
= flags
& CIK_CP_DMA_USE_L2
? S_411_DSL_SEL(V_411_DST_ADDR_TC_L2
) : 0;
101 assert((size
& ((1<<21)-1)) == size
);
103 if (sctx
->b
.chip_class
>= CIK
) {
104 radeon_emit(cs
, PKT3(PKT3_DMA_DATA
, 5, 0));
105 radeon_emit(cs
, sync_flag
| dst_sel
| S_411_SRC_SEL(V_411_DATA
)); /* CP_SYNC [31] | SRC_SEL[30:29] */
106 radeon_emit(cs
, clear_value
); /* DATA [31:0] */
108 radeon_emit(cs
, dst_va
); /* DST_ADDR_LO [31:0] */
109 radeon_emit(cs
, dst_va
>> 32); /* DST_ADDR_HI [15:0] */
110 radeon_emit(cs
, size
| wr_confirm
| raw_wait
); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
112 radeon_emit(cs
, PKT3(PKT3_CP_DMA
, 4, 0));
113 radeon_emit(cs
, clear_value
); /* DATA [31:0] */
114 radeon_emit(cs
, sync_flag
| S_411_SRC_SEL(V_411_DATA
)); /* CP_SYNC [31] | SRC_SEL[30:29] */
115 radeon_emit(cs
, dst_va
); /* DST_ADDR_LO [31:0] */
116 radeon_emit(cs
, (dst_va
>> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
117 radeon_emit(cs
, size
| wr_confirm
| raw_wait
); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
120 /* See "copy_buffer" for explanation. */
121 if (coher
== R600_COHERENCY_SHADER
&& sync_flag
) {
122 radeon_emit(cs
, PKT3(PKT3_PFP_SYNC_ME
, 0, 0));
127 static unsigned get_flush_flags(struct si_context
*sctx
, enum r600_coherency coher
)
131 case R600_COHERENCY_NONE
:
133 case R600_COHERENCY_SHADER
:
134 return SI_CONTEXT_INV_SMEM_L1
|
135 SI_CONTEXT_INV_VMEM_L1
|
136 (sctx
->b
.chip_class
== SI
? SI_CONTEXT_INV_GLOBAL_L2
: 0);
137 case R600_COHERENCY_CB_META
:
138 return SI_CONTEXT_FLUSH_AND_INV_CB
|
139 SI_CONTEXT_FLUSH_AND_INV_CB_META
;
143 static unsigned get_tc_l2_flag(struct si_context
*sctx
, enum r600_coherency coher
)
145 return coher
== R600_COHERENCY_SHADER
&&
146 sctx
->b
.chip_class
>= CIK
? CIK_CP_DMA_USE_L2
: 0;
149 static void si_cp_dma_prepare(struct si_context
*sctx
, struct pipe_resource
*dst
,
150 struct pipe_resource
*src
, unsigned byte_count
,
151 uint64_t remaining_size
, unsigned *flags
)
153 /* Count memory usage in so that need_cs_space can take it into account. */
154 r600_context_add_resource_size(&sctx
->b
.b
, dst
);
156 r600_context_add_resource_size(&sctx
->b
.b
, src
);
158 si_need_cs_space(sctx
);
160 /* This must be done after need_cs_space. */
161 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
162 (struct r600_resource
*)dst
,
163 RADEON_USAGE_WRITE
, RADEON_PRIO_CP_DMA
);
165 radeon_add_to_buffer_list(&sctx
->b
, &sctx
->b
.gfx
,
166 (struct r600_resource
*)src
,
167 RADEON_USAGE_READ
, RADEON_PRIO_CP_DMA
);
169 /* Flush the caches for the first copy only.
170 * Also wait for the previous CP DMA operations.
173 si_emit_cache_flush(sctx
, NULL
);
174 *flags
|= SI_CP_DMA_RAW_WAIT
;
177 /* Do the synchronization after the last dma, so that all data
178 * is written to memory.
180 if (byte_count
== remaining_size
)
181 *flags
|= R600_CP_DMA_SYNC
;
184 /* Alignment for optimal performance. */
185 #define CP_DMA_ALIGNMENT 32
186 /* The max number of bytes to copy per packet. */
187 #define CP_DMA_MAX_BYTE_COUNT ((1 << 21) - CP_DMA_ALIGNMENT)
189 static void si_clear_buffer(struct pipe_context
*ctx
, struct pipe_resource
*dst
,
190 uint64_t offset
, uint64_t size
, unsigned value
,
191 enum r600_coherency coher
)
193 struct si_context
*sctx
= (struct si_context
*)ctx
;
194 unsigned tc_l2_flag
= get_tc_l2_flag(sctx
, coher
);
195 unsigned flush_flags
= get_flush_flags(sctx
, coher
);
200 /* Mark the buffer range of destination as valid (initialized),
201 * so that transfer_map knows it should wait for the GPU when mapping
203 util_range_add(&r600_resource(dst
)->valid_buffer_range
, offset
,
206 /* Fallback for unaligned clears. */
207 if (offset
% 4 != 0 || size
% 4 != 0) {
208 uint8_t *map
= sctx
->b
.ws
->buffer_map(r600_resource(dst
)->buf
,
210 PIPE_TRANSFER_WRITE
);
212 for (uint64_t i
= 0; i
< size
; i
++) {
213 unsigned byte_within_dword
= (offset
+ i
) % 4;
214 *map
++ = (value
>> (byte_within_dword
* 8)) & 0xff;
219 uint64_t va
= r600_resource(dst
)->gpu_address
+ offset
;
221 /* Flush the caches. */
222 sctx
->b
.flags
|= SI_CONTEXT_PS_PARTIAL_FLUSH
|
223 SI_CONTEXT_CS_PARTIAL_FLUSH
| flush_flags
;
226 unsigned byte_count
= MIN2(size
, CP_DMA_MAX_BYTE_COUNT
);
227 unsigned dma_flags
= tc_l2_flag
;
229 si_cp_dma_prepare(sctx
, dst
, NULL
, byte_count
, size
, &dma_flags
);
231 /* Emit the clear packet. */
232 si_emit_cp_dma_clear_buffer(sctx
, va
, byte_count
, value
,
240 r600_resource(dst
)->TC_L2_dirty
= true;
244 * Realign the CP DMA engine. This must be done after a copy with an unaligned
247 * \param size Remaining size to the CP DMA alignment.
249 static void si_cp_dma_realign_engine(struct si_context
*sctx
, unsigned size
)
252 unsigned dma_flags
= 0;
253 unsigned scratch_size
= CP_DMA_ALIGNMENT
* 2;
255 assert(size
< CP_DMA_ALIGNMENT
);
257 /* Use the scratch buffer as the dummy buffer. The 3D engine should be
258 * idle at this point.
260 if (!sctx
->scratch_buffer
||
261 sctx
->scratch_buffer
->b
.b
.width0
< scratch_size
) {
262 r600_resource_reference(&sctx
->scratch_buffer
, NULL
);
263 sctx
->scratch_buffer
=
264 si_resource_create_custom(&sctx
->screen
->b
.b
,
267 if (!sctx
->scratch_buffer
)
269 sctx
->emit_scratch_reloc
= true;
272 si_cp_dma_prepare(sctx
, &sctx
->scratch_buffer
->b
.b
,
273 &sctx
->scratch_buffer
->b
.b
, size
, size
, &dma_flags
);
275 va
= sctx
->scratch_buffer
->gpu_address
;
276 si_emit_cp_dma_copy_buffer(sctx
, va
, va
+ CP_DMA_ALIGNMENT
, size
,
280 void si_copy_buffer(struct si_context
*sctx
,
281 struct pipe_resource
*dst
, struct pipe_resource
*src
,
282 uint64_t dst_offset
, uint64_t src_offset
, unsigned size
)
284 uint64_t main_dst_offset
, main_src_offset
;
285 unsigned skipped_size
= 0;
286 unsigned realign_size
= 0;
287 unsigned tc_l2_flag
= get_tc_l2_flag(sctx
, R600_COHERENCY_SHADER
);
288 unsigned flush_flags
= get_flush_flags(sctx
, R600_COHERENCY_SHADER
);
293 /* Mark the buffer range of destination as valid (initialized),
294 * so that transfer_map knows it should wait for the GPU when mapping
296 util_range_add(&r600_resource(dst
)->valid_buffer_range
, dst_offset
,
299 dst_offset
+= r600_resource(dst
)->gpu_address
;
300 src_offset
+= r600_resource(src
)->gpu_address
;
302 /* The workarounds aren't needed on Fiji and beyond. */
303 if (sctx
->b
.family
<= CHIP_CARRIZO
||
304 sctx
->b
.family
== CHIP_STONEY
) {
305 /* If the size is not aligned, we must add a dummy copy at the end
306 * just to align the internal counter. Otherwise, the DMA engine
307 * would slow down by an order of magnitude for following copies.
309 if (size
% CP_DMA_ALIGNMENT
)
310 realign_size
= CP_DMA_ALIGNMENT
- (size
% CP_DMA_ALIGNMENT
);
312 /* If the copy begins unaligned, we must start copying from the next
313 * aligned block and the skipped part should be copied after everything
314 * else has been copied. Only the src alignment matters, not dst.
316 if (src_offset
% CP_DMA_ALIGNMENT
) {
317 skipped_size
= CP_DMA_ALIGNMENT
- (src_offset
% CP_DMA_ALIGNMENT
);
318 /* The main part will be skipped if the size is too small. */
319 skipped_size
= MIN2(skipped_size
, size
);
320 size
-= skipped_size
;
324 /* Flush the caches. */
325 sctx
->b
.flags
|= SI_CONTEXT_PS_PARTIAL_FLUSH
|
326 SI_CONTEXT_CS_PARTIAL_FLUSH
| flush_flags
;
328 /* This is the main part doing the copying. Src is always aligned. */
329 main_dst_offset
= dst_offset
+ skipped_size
;
330 main_src_offset
= src_offset
+ skipped_size
;
333 unsigned dma_flags
= tc_l2_flag
;
334 unsigned byte_count
= MIN2(size
, CP_DMA_MAX_BYTE_COUNT
);
336 si_cp_dma_prepare(sctx
, dst
, src
, byte_count
,
337 size
+ skipped_size
+ realign_size
,
340 si_emit_cp_dma_copy_buffer(sctx
, main_dst_offset
, main_src_offset
,
341 byte_count
, dma_flags
);
344 main_src_offset
+= byte_count
;
345 main_dst_offset
+= byte_count
;
348 /* Copy the part we skipped because src wasn't aligned. */
350 unsigned dma_flags
= tc_l2_flag
;
352 si_cp_dma_prepare(sctx
, dst
, src
, skipped_size
,
353 skipped_size
+ realign_size
,
356 si_emit_cp_dma_copy_buffer(sctx
, dst_offset
, src_offset
,
357 skipped_size
, dma_flags
);
360 /* Finally, realign the engine if the size wasn't aligned. */
362 si_cp_dma_realign_engine(sctx
, realign_size
);
365 r600_resource(dst
)->TC_L2_dirty
= true;
368 void si_init_cp_dma_functions(struct si_context
*sctx
)
370 sctx
->b
.clear_buffer
= si_clear_buffer
;