radeonsi: add SI_CPDMA_SKIP_BO_LIST_UPDATE
[mesa.git] / src / gallium / drivers / radeonsi / si_cp_dma.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Marek Olšák <maraeo@gmail.com>
25 */
26
27 #include "si_pipe.h"
28 #include "sid.h"
29 #include "radeon/r600_cs.h"
30
31 /* Alignment for optimal performance. */
32 #define CP_DMA_ALIGNMENT 32
33 /* The max number of bytes to copy per packet. */
34 #define CP_DMA_MAX_BYTE_COUNT ((1 << 21) - CP_DMA_ALIGNMENT)
35
36 /* Set this if you want the ME to wait until CP DMA is done.
37 * It should be set on the last CP DMA packet. */
38 #define CP_DMA_SYNC (1 << 0)
39
40 /* Set this if the source data was used as a destination in a previous CP DMA
41 * packet. It's for preventing a read-after-write (RAW) hazard between two
42 * CP DMA packets. */
43 #define CP_DMA_RAW_WAIT (1 << 1)
44 #define CP_DMA_USE_L2 (1 << 2) /* CIK+ */
45 #define CP_DMA_CLEAR (1 << 3)
46
47 /* Emit a CP DMA packet to do a copy from one buffer to another, or to clear
48 * a buffer. The size must fit in bits [20:0]. If CP_DMA_CLEAR is set, src_va is a 32-bit
49 * clear value.
50 */
51 static void si_emit_cp_dma(struct si_context *sctx, uint64_t dst_va,
52 uint64_t src_va, unsigned size, unsigned flags,
53 enum r600_coherency coher)
54 {
55 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
56 uint32_t header = 0, command = S_414_BYTE_COUNT(size);
57
58 assert(size);
59 assert(size <= CP_DMA_MAX_BYTE_COUNT);
60
61 /* Sync flags. */
62 if (flags & CP_DMA_SYNC)
63 header |= S_411_CP_SYNC(1);
64 else
65 command |= S_414_DISABLE_WR_CONFIRM(1);
66
67 if (flags & CP_DMA_RAW_WAIT)
68 command |= S_414_RAW_WAIT(1);
69
70 /* Src and dst flags. */
71 if (flags & CP_DMA_USE_L2)
72 header |= S_411_DSL_SEL(V_411_DST_ADDR_TC_L2);
73
74 if (flags & CP_DMA_CLEAR)
75 header |= S_411_SRC_SEL(V_411_DATA);
76 else if (flags & CP_DMA_USE_L2)
77 header |= S_411_SRC_SEL(V_411_SRC_ADDR_TC_L2);
78
79 if (sctx->b.chip_class >= CIK) {
80 radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
81 radeon_emit(cs, header);
82 radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */
83 radeon_emit(cs, src_va >> 32); /* SRC_ADDR_HI [31:0] */
84 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
85 radeon_emit(cs, dst_va >> 32); /* DST_ADDR_HI [31:0] */
86 radeon_emit(cs, command);
87 } else {
88 header |= S_411_SRC_ADDR_HI(src_va >> 32);
89
90 radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
91 radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */
92 radeon_emit(cs, header); /* SRC_ADDR_HI [15:0] + flags. */
93 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
94 radeon_emit(cs, (dst_va >> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
95 radeon_emit(cs, command);
96 }
97
98 /* CP DMA is executed in ME, but index buffers are read by PFP.
99 * This ensures that ME (CP DMA) is idle before PFP starts fetching
100 * indices. If we wanted to execute CP DMA in PFP, this packet
101 * should precede it.
102 */
103 if (coher == R600_COHERENCY_SHADER && flags & CP_DMA_SYNC) {
104 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
105 radeon_emit(cs, 0);
106 }
107 }
108
109 static unsigned get_flush_flags(struct si_context *sctx, enum r600_coherency coher)
110 {
111 switch (coher) {
112 default:
113 case R600_COHERENCY_NONE:
114 return 0;
115 case R600_COHERENCY_SHADER:
116 return SI_CONTEXT_INV_SMEM_L1 |
117 SI_CONTEXT_INV_VMEM_L1 |
118 (sctx->b.chip_class == SI ? SI_CONTEXT_INV_GLOBAL_L2 : 0);
119 case R600_COHERENCY_CB_META:
120 return SI_CONTEXT_FLUSH_AND_INV_CB |
121 SI_CONTEXT_FLUSH_AND_INV_CB_META;
122 }
123 }
124
125 static unsigned get_tc_l2_flag(struct si_context *sctx, enum r600_coherency coher)
126 {
127 return coher == R600_COHERENCY_SHADER &&
128 sctx->b.chip_class >= CIK ? CP_DMA_USE_L2 : 0;
129 }
130
131 static void si_cp_dma_prepare(struct si_context *sctx, struct pipe_resource *dst,
132 struct pipe_resource *src, unsigned byte_count,
133 uint64_t remaining_size, unsigned user_flags,
134 bool *is_first, unsigned *packet_flags)
135 {
136 if (!(user_flags & SI_CPDMA_SKIP_BO_LIST_UPDATE)) {
137 /* Count memory usage in so that need_cs_space can take it into account. */
138 r600_context_add_resource_size(&sctx->b.b, dst);
139 if (src)
140 r600_context_add_resource_size(&sctx->b.b, src);
141 }
142
143 if (!(user_flags & SI_CPDMA_SKIP_CHECK_CS_SPACE))
144 si_need_cs_space(sctx);
145
146 /* This must be done after need_cs_space. */
147 if (!(user_flags & SI_CPDMA_SKIP_BO_LIST_UPDATE)) {
148 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
149 (struct r600_resource*)dst,
150 RADEON_USAGE_WRITE, RADEON_PRIO_CP_DMA);
151 if (src)
152 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
153 (struct r600_resource*)src,
154 RADEON_USAGE_READ, RADEON_PRIO_CP_DMA);
155 }
156
157 /* Flush the caches for the first copy only.
158 * Also wait for the previous CP DMA operations.
159 */
160 if (!(user_flags & SI_CPDMA_SKIP_GFX_SYNC) && sctx->b.flags)
161 si_emit_cache_flush(sctx);
162
163 if (!(user_flags & SI_CPDMA_SKIP_SYNC_BEFORE) && *is_first)
164 *packet_flags |= CP_DMA_RAW_WAIT;
165
166 *is_first = false;
167
168 /* Do the synchronization after the last dma, so that all data
169 * is written to memory.
170 */
171 if (!(user_flags & SI_CPDMA_SKIP_SYNC_AFTER) &&
172 byte_count == remaining_size)
173 *packet_flags |= CP_DMA_SYNC;
174 }
175
176 static void si_clear_buffer(struct pipe_context *ctx, struct pipe_resource *dst,
177 uint64_t offset, uint64_t size, unsigned value,
178 enum r600_coherency coher)
179 {
180 struct si_context *sctx = (struct si_context*)ctx;
181 struct radeon_winsys *ws = sctx->b.ws;
182 struct r600_resource *rdst = r600_resource(dst);
183 unsigned tc_l2_flag = get_tc_l2_flag(sctx, coher);
184 unsigned flush_flags = get_flush_flags(sctx, coher);
185 bool is_first = true;
186
187 if (!size)
188 return;
189
190 /* Mark the buffer range of destination as valid (initialized),
191 * so that transfer_map knows it should wait for the GPU when mapping
192 * that range. */
193 util_range_add(&rdst->valid_buffer_range, offset,
194 offset + size);
195
196 /* Fallback for unaligned clears. */
197 if (offset % 4 != 0 || size % 4 != 0) {
198 uint8_t *map = r600_buffer_map_sync_with_rings(&sctx->b, rdst,
199 PIPE_TRANSFER_WRITE);
200 map += offset;
201 for (uint64_t i = 0; i < size; i++) {
202 unsigned byte_within_dword = (offset + i) % 4;
203 *map++ = (value >> (byte_within_dword * 8)) & 0xff;
204 }
205 return;
206 }
207
208 /* dma_clear_buffer can use clear_buffer on failure. Make sure that
209 * doesn't happen. We don't want an infinite recursion: */
210 if (sctx->b.dma.cs &&
211 /* CP DMA is very slow. Always use SDMA for big clears. This
212 * alone improves DeusEx:MD performance by 70%. */
213 (size > 128 * 1024 ||
214 /* Buffers not used by the GFX IB yet will be cleared by SDMA.
215 * This happens to move most buffer clears to SDMA, including
216 * DCC and CMASK clears, because pipe->clear clears them before
217 * si_emit_framebuffer_state (in a draw call) adds them.
218 * For example, DeusEx:MD has 21 buffer clears per frame and all
219 * of them are moved to SDMA thanks to this. */
220 !ws->cs_is_buffer_referenced(sctx->b.gfx.cs, rdst->buf,
221 RADEON_USAGE_READWRITE))) {
222 sctx->b.dma_clear_buffer(ctx, dst, offset, size, value);
223 return;
224 }
225
226 uint64_t va = rdst->gpu_address + offset;
227
228 /* Flush the caches. */
229 sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
230 SI_CONTEXT_CS_PARTIAL_FLUSH | flush_flags;
231
232 while (size) {
233 unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
234 unsigned dma_flags = tc_l2_flag | CP_DMA_CLEAR;
235
236 si_cp_dma_prepare(sctx, dst, NULL, byte_count, size, 0,
237 &is_first, &dma_flags);
238
239 /* Emit the clear packet. */
240 si_emit_cp_dma(sctx, va, value, byte_count, dma_flags, coher);
241
242 size -= byte_count;
243 va += byte_count;
244 }
245
246 if (tc_l2_flag)
247 rdst->TC_L2_dirty = true;
248
249 /* If it's not a framebuffer fast clear... */
250 if (coher == R600_COHERENCY_SHADER)
251 sctx->b.num_cp_dma_calls++;
252 }
253
254 /**
255 * Realign the CP DMA engine. This must be done after a copy with an unaligned
256 * size.
257 *
258 * \param size Remaining size to the CP DMA alignment.
259 */
260 static void si_cp_dma_realign_engine(struct si_context *sctx, unsigned size,
261 unsigned user_flags, bool *is_first)
262 {
263 uint64_t va;
264 unsigned dma_flags = 0;
265 unsigned scratch_size = CP_DMA_ALIGNMENT * 2;
266
267 assert(size < CP_DMA_ALIGNMENT);
268
269 /* Use the scratch buffer as the dummy buffer. The 3D engine should be
270 * idle at this point.
271 */
272 if (!sctx->scratch_buffer ||
273 sctx->scratch_buffer->b.b.width0 < scratch_size) {
274 r600_resource_reference(&sctx->scratch_buffer, NULL);
275 sctx->scratch_buffer = (struct r600_resource*)
276 pipe_buffer_create(&sctx->screen->b.b, 0,
277 PIPE_USAGE_DEFAULT, scratch_size);
278 if (!sctx->scratch_buffer)
279 return;
280 sctx->emit_scratch_reloc = true;
281 }
282
283 si_cp_dma_prepare(sctx, &sctx->scratch_buffer->b.b,
284 &sctx->scratch_buffer->b.b, size, size, user_flags,
285 is_first, &dma_flags);
286
287 va = sctx->scratch_buffer->gpu_address;
288 si_emit_cp_dma(sctx, va, va + CP_DMA_ALIGNMENT, size, dma_flags,
289 R600_COHERENCY_SHADER);
290 }
291
292 /**
293 * Do memcpy between buffers using CP DMA.
294 *
295 * \param user_flags bitmask of SI_CPDMA_*
296 */
297 void si_copy_buffer(struct si_context *sctx,
298 struct pipe_resource *dst, struct pipe_resource *src,
299 uint64_t dst_offset, uint64_t src_offset, unsigned size,
300 unsigned user_flags)
301 {
302 uint64_t main_dst_offset, main_src_offset;
303 unsigned skipped_size = 0;
304 unsigned realign_size = 0;
305 unsigned tc_l2_flag = get_tc_l2_flag(sctx, R600_COHERENCY_SHADER);
306 unsigned flush_flags = get_flush_flags(sctx, R600_COHERENCY_SHADER);
307 bool is_first = true;
308
309 if (!size)
310 return;
311
312 /* Mark the buffer range of destination as valid (initialized),
313 * so that transfer_map knows it should wait for the GPU when mapping
314 * that range. */
315 util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset,
316 dst_offset + size);
317
318 dst_offset += r600_resource(dst)->gpu_address;
319 src_offset += r600_resource(src)->gpu_address;
320
321 /* The workarounds aren't needed on Fiji and beyond. */
322 if (sctx->b.family <= CHIP_CARRIZO ||
323 sctx->b.family == CHIP_STONEY) {
324 /* If the size is not aligned, we must add a dummy copy at the end
325 * just to align the internal counter. Otherwise, the DMA engine
326 * would slow down by an order of magnitude for following copies.
327 */
328 if (size % CP_DMA_ALIGNMENT)
329 realign_size = CP_DMA_ALIGNMENT - (size % CP_DMA_ALIGNMENT);
330
331 /* If the copy begins unaligned, we must start copying from the next
332 * aligned block and the skipped part should be copied after everything
333 * else has been copied. Only the src alignment matters, not dst.
334 */
335 if (src_offset % CP_DMA_ALIGNMENT) {
336 skipped_size = CP_DMA_ALIGNMENT - (src_offset % CP_DMA_ALIGNMENT);
337 /* The main part will be skipped if the size is too small. */
338 skipped_size = MIN2(skipped_size, size);
339 size -= skipped_size;
340 }
341 }
342
343 /* Flush the caches. */
344 if (!(user_flags & SI_CPDMA_SKIP_GFX_SYNC))
345 sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
346 SI_CONTEXT_CS_PARTIAL_FLUSH | flush_flags;
347
348 /* This is the main part doing the copying. Src is always aligned. */
349 main_dst_offset = dst_offset + skipped_size;
350 main_src_offset = src_offset + skipped_size;
351
352 while (size) {
353 unsigned dma_flags = tc_l2_flag;
354 unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
355
356 si_cp_dma_prepare(sctx, dst, src, byte_count,
357 size + skipped_size + realign_size,
358 user_flags, &is_first, &dma_flags);
359
360 si_emit_cp_dma(sctx, main_dst_offset, main_src_offset,
361 byte_count, dma_flags, R600_COHERENCY_SHADER);
362
363 size -= byte_count;
364 main_src_offset += byte_count;
365 main_dst_offset += byte_count;
366 }
367
368 /* Copy the part we skipped because src wasn't aligned. */
369 if (skipped_size) {
370 unsigned dma_flags = tc_l2_flag;
371
372 si_cp_dma_prepare(sctx, dst, src, skipped_size,
373 skipped_size + realign_size, user_flags,
374 &is_first, &dma_flags);
375
376 si_emit_cp_dma(sctx, dst_offset, src_offset, skipped_size,
377 dma_flags, R600_COHERENCY_SHADER);
378 }
379
380 /* Finally, realign the engine if the size wasn't aligned. */
381 if (realign_size)
382 si_cp_dma_realign_engine(sctx, realign_size, user_flags,
383 &is_first);
384
385 if (tc_l2_flag)
386 r600_resource(dst)->TC_L2_dirty = true;
387
388 /* If it's not a prefetch... */
389 if (dst_offset != src_offset)
390 sctx->b.num_cp_dma_calls++;
391 }
392
393 void cik_prefetch_TC_L2_async(struct si_context *sctx, struct pipe_resource *buf,
394 uint64_t offset, unsigned size)
395 {
396 assert(sctx->b.chip_class >= CIK);
397
398 si_copy_buffer(sctx, buf, buf, offset, offset, size,
399 SI_CPDMA_SKIP_CHECK_CS_SPACE |
400 SI_CPDMA_SKIP_SYNC_AFTER |
401 SI_CPDMA_SKIP_SYNC_BEFORE |
402 SI_CPDMA_SKIP_GFX_SYNC);
403 }
404
405 void si_init_cp_dma_functions(struct si_context *sctx)
406 {
407 sctx->b.clear_buffer = si_clear_buffer;
408 }