r600: fork and import gallium/radeon
[mesa.git] / src / gallium / drivers / radeonsi / si_cp_dma.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Marek Olšák <maraeo@gmail.com>
25 */
26
27 #include "si_pipe.h"
28 #include "sid.h"
29 #include "radeon/r600_cs.h"
30
31 /* Set this if you want the ME to wait until CP DMA is done.
32 * It should be set on the last CP DMA packet. */
33 #define CP_DMA_SYNC (1 << 0)
34
35 /* Set this if the source data was used as a destination in a previous CP DMA
36 * packet. It's for preventing a read-after-write (RAW) hazard between two
37 * CP DMA packets. */
38 #define CP_DMA_RAW_WAIT (1 << 1)
39 #define CP_DMA_USE_L2 (1 << 2) /* CIK+ */
40 #define CP_DMA_CLEAR (1 << 3)
41
42 /* The max number of bytes that can be copied per packet. */
43 static inline unsigned cp_dma_max_byte_count(struct si_context *sctx)
44 {
45 unsigned max = sctx->b.chip_class >= GFX9 ?
46 S_414_BYTE_COUNT_GFX9(~0u) :
47 S_414_BYTE_COUNT_GFX6(~0u);
48
49 /* make it aligned for optimal performance */
50 return max & ~(SI_CPDMA_ALIGNMENT - 1);
51 }
52
53
54 /* Emit a CP DMA packet to do a copy from one buffer to another, or to clear
55 * a buffer. The size must fit in bits [20:0]. If CP_DMA_CLEAR is set, src_va is a 32-bit
56 * clear value.
57 */
58 static void si_emit_cp_dma(struct si_context *sctx, uint64_t dst_va,
59 uint64_t src_va, unsigned size, unsigned flags,
60 enum r600_coherency coher)
61 {
62 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
63 uint32_t header = 0, command = 0;
64
65 assert(size);
66 assert(size <= cp_dma_max_byte_count(sctx));
67
68 if (sctx->b.chip_class >= GFX9)
69 command |= S_414_BYTE_COUNT_GFX9(size);
70 else
71 command |= S_414_BYTE_COUNT_GFX6(size);
72
73 /* Sync flags. */
74 if (flags & CP_DMA_SYNC)
75 header |= S_411_CP_SYNC(1);
76 else {
77 if (sctx->b.chip_class >= GFX9)
78 command |= S_414_DISABLE_WR_CONFIRM_GFX9(1);
79 else
80 command |= S_414_DISABLE_WR_CONFIRM_GFX6(1);
81 }
82
83 if (flags & CP_DMA_RAW_WAIT)
84 command |= S_414_RAW_WAIT(1);
85
86 /* Src and dst flags. */
87 if (sctx->b.chip_class >= GFX9 && !(flags & CP_DMA_CLEAR) &&
88 src_va == dst_va)
89 header |= S_411_DSL_SEL(V_411_NOWHERE); /* prefetch only */
90 else if (flags & CP_DMA_USE_L2)
91 header |= S_411_DSL_SEL(V_411_DST_ADDR_TC_L2);
92
93 if (flags & CP_DMA_CLEAR)
94 header |= S_411_SRC_SEL(V_411_DATA);
95 else if (flags & CP_DMA_USE_L2)
96 header |= S_411_SRC_SEL(V_411_SRC_ADDR_TC_L2);
97
98 if (sctx->b.chip_class >= CIK) {
99 radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
100 radeon_emit(cs, header);
101 radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */
102 radeon_emit(cs, src_va >> 32); /* SRC_ADDR_HI [31:0] */
103 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
104 radeon_emit(cs, dst_va >> 32); /* DST_ADDR_HI [31:0] */
105 radeon_emit(cs, command);
106 } else {
107 header |= S_411_SRC_ADDR_HI(src_va >> 32);
108
109 radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
110 radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */
111 radeon_emit(cs, header); /* SRC_ADDR_HI [15:0] + flags. */
112 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
113 radeon_emit(cs, (dst_va >> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
114 radeon_emit(cs, command);
115 }
116
117 /* CP DMA is executed in ME, but index buffers are read by PFP.
118 * This ensures that ME (CP DMA) is idle before PFP starts fetching
119 * indices. If we wanted to execute CP DMA in PFP, this packet
120 * should precede it.
121 */
122 if (coher == R600_COHERENCY_SHADER && flags & CP_DMA_SYNC) {
123 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
124 radeon_emit(cs, 0);
125 }
126 }
127
128 static unsigned get_flush_flags(struct si_context *sctx, enum r600_coherency coher)
129 {
130 switch (coher) {
131 default:
132 case R600_COHERENCY_NONE:
133 return 0;
134 case R600_COHERENCY_SHADER:
135 return SI_CONTEXT_INV_SMEM_L1 |
136 SI_CONTEXT_INV_VMEM_L1 |
137 (sctx->b.chip_class == SI ? SI_CONTEXT_INV_GLOBAL_L2 : 0);
138 case R600_COHERENCY_CB_META:
139 return SI_CONTEXT_FLUSH_AND_INV_CB;
140 }
141 }
142
143 static unsigned get_tc_l2_flag(struct si_context *sctx, enum r600_coherency coher)
144 {
145 if ((sctx->b.chip_class >= GFX9 && coher == R600_COHERENCY_CB_META) ||
146 (sctx->b.chip_class >= CIK && coher == R600_COHERENCY_SHADER))
147 return CP_DMA_USE_L2;
148
149 return 0;
150 }
151
152 static void si_cp_dma_prepare(struct si_context *sctx, struct pipe_resource *dst,
153 struct pipe_resource *src, unsigned byte_count,
154 uint64_t remaining_size, unsigned user_flags,
155 bool *is_first, unsigned *packet_flags)
156 {
157 /* Fast exit for a CPDMA prefetch. */
158 if ((user_flags & SI_CPDMA_SKIP_ALL) == SI_CPDMA_SKIP_ALL) {
159 *is_first = false;
160 return;
161 }
162
163 if (!(user_flags & SI_CPDMA_SKIP_BO_LIST_UPDATE)) {
164 /* Count memory usage in so that need_cs_space can take it into account. */
165 r600_context_add_resource_size(&sctx->b.b, dst);
166 if (src)
167 r600_context_add_resource_size(&sctx->b.b, src);
168 }
169
170 if (!(user_flags & SI_CPDMA_SKIP_CHECK_CS_SPACE))
171 si_need_cs_space(sctx);
172
173 /* This must be done after need_cs_space. */
174 if (!(user_flags & SI_CPDMA_SKIP_BO_LIST_UPDATE)) {
175 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
176 (struct r600_resource*)dst,
177 RADEON_USAGE_WRITE, RADEON_PRIO_CP_DMA);
178 if (src)
179 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
180 (struct r600_resource*)src,
181 RADEON_USAGE_READ, RADEON_PRIO_CP_DMA);
182 }
183
184 /* Flush the caches for the first copy only.
185 * Also wait for the previous CP DMA operations.
186 */
187 if (!(user_flags & SI_CPDMA_SKIP_GFX_SYNC) && sctx->b.flags)
188 si_emit_cache_flush(sctx);
189
190 if (!(user_flags & SI_CPDMA_SKIP_SYNC_BEFORE) && *is_first)
191 *packet_flags |= CP_DMA_RAW_WAIT;
192
193 *is_first = false;
194
195 /* Do the synchronization after the last dma, so that all data
196 * is written to memory.
197 */
198 if (!(user_flags & SI_CPDMA_SKIP_SYNC_AFTER) &&
199 byte_count == remaining_size)
200 *packet_flags |= CP_DMA_SYNC;
201 }
202
203 static void si_clear_buffer(struct pipe_context *ctx, struct pipe_resource *dst,
204 uint64_t offset, uint64_t size, unsigned value,
205 enum r600_coherency coher)
206 {
207 struct si_context *sctx = (struct si_context*)ctx;
208 struct radeon_winsys *ws = sctx->b.ws;
209 struct r600_resource *rdst = r600_resource(dst);
210 unsigned tc_l2_flag = get_tc_l2_flag(sctx, coher);
211 unsigned flush_flags = get_flush_flags(sctx, coher);
212 uint64_t dma_clear_size;
213 bool is_first = true;
214
215 if (!size)
216 return;
217
218 dma_clear_size = size & ~3ull;
219
220 /* Mark the buffer range of destination as valid (initialized),
221 * so that transfer_map knows it should wait for the GPU when mapping
222 * that range. */
223 util_range_add(&rdst->valid_buffer_range, offset,
224 offset + dma_clear_size);
225
226 /* dma_clear_buffer can use clear_buffer on failure. Make sure that
227 * doesn't happen. We don't want an infinite recursion: */
228 if (sctx->b.dma.cs &&
229 !(dst->flags & PIPE_RESOURCE_FLAG_SPARSE) &&
230 (offset % 4 == 0) &&
231 /* CP DMA is very slow. Always use SDMA for big clears. This
232 * alone improves DeusEx:MD performance by 70%. */
233 (size > 128 * 1024 ||
234 /* Buffers not used by the GFX IB yet will be cleared by SDMA.
235 * This happens to move most buffer clears to SDMA, including
236 * DCC and CMASK clears, because pipe->clear clears them before
237 * si_emit_framebuffer_state (in a draw call) adds them.
238 * For example, DeusEx:MD has 21 buffer clears per frame and all
239 * of them are moved to SDMA thanks to this. */
240 !ws->cs_is_buffer_referenced(sctx->b.gfx.cs, rdst->buf,
241 RADEON_USAGE_READWRITE))) {
242 sctx->b.dma_clear_buffer(ctx, dst, offset, dma_clear_size, value);
243
244 offset += dma_clear_size;
245 size -= dma_clear_size;
246 } else if (dma_clear_size >= 4) {
247 uint64_t va = rdst->gpu_address + offset;
248
249 offset += dma_clear_size;
250 size -= dma_clear_size;
251
252 /* Flush the caches. */
253 sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
254 SI_CONTEXT_CS_PARTIAL_FLUSH | flush_flags;
255
256 while (dma_clear_size) {
257 unsigned byte_count = MIN2(dma_clear_size, cp_dma_max_byte_count(sctx));
258 unsigned dma_flags = tc_l2_flag | CP_DMA_CLEAR;
259
260 si_cp_dma_prepare(sctx, dst, NULL, byte_count, dma_clear_size, 0,
261 &is_first, &dma_flags);
262
263 /* Emit the clear packet. */
264 si_emit_cp_dma(sctx, va, value, byte_count, dma_flags, coher);
265
266 dma_clear_size -= byte_count;
267 va += byte_count;
268 }
269
270 if (tc_l2_flag)
271 rdst->TC_L2_dirty = true;
272
273 /* If it's not a framebuffer fast clear... */
274 if (coher == R600_COHERENCY_SHADER)
275 sctx->b.num_cp_dma_calls++;
276 }
277
278 if (size) {
279 /* Handle non-dword alignment.
280 *
281 * This function is called for embedded texture metadata clears,
282 * but those should always be properly aligned. */
283 assert(dst->target == PIPE_BUFFER);
284 assert(size < 4);
285
286 pipe_buffer_write(ctx, dst, offset, size, &value);
287 }
288 }
289
290 /**
291 * Realign the CP DMA engine. This must be done after a copy with an unaligned
292 * size.
293 *
294 * \param size Remaining size to the CP DMA alignment.
295 */
296 static void si_cp_dma_realign_engine(struct si_context *sctx, unsigned size,
297 unsigned user_flags, bool *is_first)
298 {
299 uint64_t va;
300 unsigned dma_flags = 0;
301 unsigned scratch_size = SI_CPDMA_ALIGNMENT * 2;
302
303 assert(size < SI_CPDMA_ALIGNMENT);
304
305 /* Use the scratch buffer as the dummy buffer. The 3D engine should be
306 * idle at this point.
307 */
308 if (!sctx->scratch_buffer ||
309 sctx->scratch_buffer->b.b.width0 < scratch_size) {
310 r600_resource_reference(&sctx->scratch_buffer, NULL);
311 sctx->scratch_buffer = (struct r600_resource*)
312 si_aligned_buffer_create(&sctx->screen->b.b,
313 R600_RESOURCE_FLAG_UNMAPPABLE,
314 PIPE_USAGE_DEFAULT,
315 scratch_size, 256);
316 if (!sctx->scratch_buffer)
317 return;
318
319 si_mark_atom_dirty(sctx, &sctx->scratch_state);
320 }
321
322 si_cp_dma_prepare(sctx, &sctx->scratch_buffer->b.b,
323 &sctx->scratch_buffer->b.b, size, size, user_flags,
324 is_first, &dma_flags);
325
326 va = sctx->scratch_buffer->gpu_address;
327 si_emit_cp_dma(sctx, va, va + SI_CPDMA_ALIGNMENT, size, dma_flags,
328 R600_COHERENCY_SHADER);
329 }
330
331 /**
332 * Do memcpy between buffers using CP DMA.
333 *
334 * \param user_flags bitmask of SI_CPDMA_*
335 */
336 void si_copy_buffer(struct si_context *sctx,
337 struct pipe_resource *dst, struct pipe_resource *src,
338 uint64_t dst_offset, uint64_t src_offset, unsigned size,
339 unsigned user_flags)
340 {
341 uint64_t main_dst_offset, main_src_offset;
342 unsigned skipped_size = 0;
343 unsigned realign_size = 0;
344 unsigned tc_l2_flag = get_tc_l2_flag(sctx, R600_COHERENCY_SHADER);
345 unsigned flush_flags = get_flush_flags(sctx, R600_COHERENCY_SHADER);
346 bool is_first = true;
347
348 if (!size)
349 return;
350
351 if (dst != src || dst_offset != src_offset) {
352 /* Mark the buffer range of destination as valid (initialized),
353 * so that transfer_map knows it should wait for the GPU when mapping
354 * that range. */
355 util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset,
356 dst_offset + size);
357 }
358
359 dst_offset += r600_resource(dst)->gpu_address;
360 src_offset += r600_resource(src)->gpu_address;
361
362 /* The workarounds aren't needed on Fiji and beyond. */
363 if (sctx->b.family <= CHIP_CARRIZO ||
364 sctx->b.family == CHIP_STONEY) {
365 /* If the size is not aligned, we must add a dummy copy at the end
366 * just to align the internal counter. Otherwise, the DMA engine
367 * would slow down by an order of magnitude for following copies.
368 */
369 if (size % SI_CPDMA_ALIGNMENT)
370 realign_size = SI_CPDMA_ALIGNMENT - (size % SI_CPDMA_ALIGNMENT);
371
372 /* If the copy begins unaligned, we must start copying from the next
373 * aligned block and the skipped part should be copied after everything
374 * else has been copied. Only the src alignment matters, not dst.
375 */
376 if (src_offset % SI_CPDMA_ALIGNMENT) {
377 skipped_size = SI_CPDMA_ALIGNMENT - (src_offset % SI_CPDMA_ALIGNMENT);
378 /* The main part will be skipped if the size is too small. */
379 skipped_size = MIN2(skipped_size, size);
380 size -= skipped_size;
381 }
382 }
383
384 /* Flush the caches. */
385 if (!(user_flags & SI_CPDMA_SKIP_GFX_SYNC))
386 sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
387 SI_CONTEXT_CS_PARTIAL_FLUSH | flush_flags;
388
389 /* This is the main part doing the copying. Src is always aligned. */
390 main_dst_offset = dst_offset + skipped_size;
391 main_src_offset = src_offset + skipped_size;
392
393 while (size) {
394 unsigned dma_flags = tc_l2_flag;
395 unsigned byte_count = MIN2(size, cp_dma_max_byte_count(sctx));
396
397 si_cp_dma_prepare(sctx, dst, src, byte_count,
398 size + skipped_size + realign_size,
399 user_flags, &is_first, &dma_flags);
400
401 si_emit_cp_dma(sctx, main_dst_offset, main_src_offset,
402 byte_count, dma_flags, R600_COHERENCY_SHADER);
403
404 size -= byte_count;
405 main_src_offset += byte_count;
406 main_dst_offset += byte_count;
407 }
408
409 /* Copy the part we skipped because src wasn't aligned. */
410 if (skipped_size) {
411 unsigned dma_flags = tc_l2_flag;
412
413 si_cp_dma_prepare(sctx, dst, src, skipped_size,
414 skipped_size + realign_size, user_flags,
415 &is_first, &dma_flags);
416
417 si_emit_cp_dma(sctx, dst_offset, src_offset, skipped_size,
418 dma_flags, R600_COHERENCY_SHADER);
419 }
420
421 /* Finally, realign the engine if the size wasn't aligned. */
422 if (realign_size)
423 si_cp_dma_realign_engine(sctx, realign_size, user_flags,
424 &is_first);
425
426 if (tc_l2_flag)
427 r600_resource(dst)->TC_L2_dirty = true;
428
429 /* If it's not a prefetch... */
430 if (dst_offset != src_offset)
431 sctx->b.num_cp_dma_calls++;
432 }
433
434 void cik_prefetch_TC_L2_async(struct si_context *sctx, struct pipe_resource *buf,
435 uint64_t offset, unsigned size)
436 {
437 assert(sctx->b.chip_class >= CIK);
438
439 si_copy_buffer(sctx, buf, buf, offset, offset, size, SI_CPDMA_SKIP_ALL);
440 }
441
442 static void cik_prefetch_shader_async(struct si_context *sctx,
443 struct si_pm4_state *state)
444 {
445 struct pipe_resource *bo = &state->bo[0]->b.b;
446 assert(state->nbo == 1);
447
448 cik_prefetch_TC_L2_async(sctx, bo, 0, bo->width0);
449 }
450
451 static void cik_prefetch_VBO_descriptors(struct si_context *sctx)
452 {
453 if (!sctx->vertex_elements)
454 return;
455
456 cik_prefetch_TC_L2_async(sctx, &sctx->vertex_buffers.buffer->b.b,
457 sctx->vertex_buffers.buffer_offset,
458 sctx->vertex_elements->desc_list_byte_size);
459 }
460
461 void cik_emit_prefetch_L2(struct si_context *sctx)
462 {
463 /* Prefetch shaders and VBO descriptors to TC L2. */
464 if (sctx->b.chip_class >= GFX9) {
465 /* Choose the right spot for the VBO prefetch. */
466 if (sctx->tes_shader.cso) {
467 if (sctx->prefetch_L2_mask & SI_PREFETCH_HS)
468 cik_prefetch_shader_async(sctx, sctx->queued.named.hs);
469 if (sctx->prefetch_L2_mask & SI_PREFETCH_VBO_DESCRIPTORS)
470 cik_prefetch_VBO_descriptors(sctx);
471 if (sctx->prefetch_L2_mask & SI_PREFETCH_GS)
472 cik_prefetch_shader_async(sctx, sctx->queued.named.gs);
473 if (sctx->prefetch_L2_mask & SI_PREFETCH_VS)
474 cik_prefetch_shader_async(sctx, sctx->queued.named.vs);
475 } else if (sctx->gs_shader.cso) {
476 if (sctx->prefetch_L2_mask & SI_PREFETCH_GS)
477 cik_prefetch_shader_async(sctx, sctx->queued.named.gs);
478 if (sctx->prefetch_L2_mask & SI_PREFETCH_VBO_DESCRIPTORS)
479 cik_prefetch_VBO_descriptors(sctx);
480 if (sctx->prefetch_L2_mask & SI_PREFETCH_VS)
481 cik_prefetch_shader_async(sctx, sctx->queued.named.vs);
482 } else {
483 if (sctx->prefetch_L2_mask & SI_PREFETCH_VS)
484 cik_prefetch_shader_async(sctx, sctx->queued.named.vs);
485 if (sctx->prefetch_L2_mask & SI_PREFETCH_VBO_DESCRIPTORS)
486 cik_prefetch_VBO_descriptors(sctx);
487 }
488 } else {
489 /* SI-CI-VI */
490 /* Choose the right spot for the VBO prefetch. */
491 if (sctx->tes_shader.cso) {
492 if (sctx->prefetch_L2_mask & SI_PREFETCH_LS)
493 cik_prefetch_shader_async(sctx, sctx->queued.named.ls);
494 if (sctx->prefetch_L2_mask & SI_PREFETCH_VBO_DESCRIPTORS)
495 cik_prefetch_VBO_descriptors(sctx);
496 if (sctx->prefetch_L2_mask & SI_PREFETCH_HS)
497 cik_prefetch_shader_async(sctx, sctx->queued.named.hs);
498 if (sctx->prefetch_L2_mask & SI_PREFETCH_ES)
499 cik_prefetch_shader_async(sctx, sctx->queued.named.es);
500 if (sctx->prefetch_L2_mask & SI_PREFETCH_GS)
501 cik_prefetch_shader_async(sctx, sctx->queued.named.gs);
502 if (sctx->prefetch_L2_mask & SI_PREFETCH_VS)
503 cik_prefetch_shader_async(sctx, sctx->queued.named.vs);
504 } else if (sctx->gs_shader.cso) {
505 if (sctx->prefetch_L2_mask & SI_PREFETCH_ES)
506 cik_prefetch_shader_async(sctx, sctx->queued.named.es);
507 if (sctx->prefetch_L2_mask & SI_PREFETCH_VBO_DESCRIPTORS)
508 cik_prefetch_VBO_descriptors(sctx);
509 if (sctx->prefetch_L2_mask & SI_PREFETCH_GS)
510 cik_prefetch_shader_async(sctx, sctx->queued.named.gs);
511 if (sctx->prefetch_L2_mask & SI_PREFETCH_VS)
512 cik_prefetch_shader_async(sctx, sctx->queued.named.vs);
513 } else {
514 if (sctx->prefetch_L2_mask & SI_PREFETCH_VS)
515 cik_prefetch_shader_async(sctx, sctx->queued.named.vs);
516 if (sctx->prefetch_L2_mask & SI_PREFETCH_VBO_DESCRIPTORS)
517 cik_prefetch_VBO_descriptors(sctx);
518 }
519 }
520
521 if (sctx->prefetch_L2_mask & SI_PREFETCH_PS)
522 cik_prefetch_shader_async(sctx, sctx->queued.named.ps);
523
524 sctx->prefetch_L2_mask = 0;
525 }
526
527 void si_init_cp_dma_functions(struct si_context *sctx)
528 {
529 sctx->b.clear_buffer = si_clear_buffer;
530 }