Merge remote-tracking branch 'public/master' into vulkan
[mesa.git] / src / gallium / drivers / radeon / r600_buffer_common.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Marek Olšák
25 */
26
27 #include "r600_cs.h"
28 #include "util/u_memory.h"
29 #include "util/u_upload_mgr.h"
30 #include <inttypes.h>
31 #include <stdio.h>
32
33 boolean r600_rings_is_buffer_referenced(struct r600_common_context *ctx,
34 struct pb_buffer *buf,
35 enum radeon_bo_usage usage)
36 {
37 if (ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, buf, usage)) {
38 return TRUE;
39 }
40 if (ctx->dma.cs && ctx->dma.cs->cdw &&
41 ctx->ws->cs_is_buffer_referenced(ctx->dma.cs, buf, usage)) {
42 return TRUE;
43 }
44 return FALSE;
45 }
46
47 void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx,
48 struct r600_resource *resource,
49 unsigned usage)
50 {
51 enum radeon_bo_usage rusage = RADEON_USAGE_READWRITE;
52 bool busy = false;
53
54 if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
55 return ctx->ws->buffer_map(resource->buf, NULL, usage);
56 }
57
58 if (!(usage & PIPE_TRANSFER_WRITE)) {
59 /* have to wait for the last write */
60 rusage = RADEON_USAGE_WRITE;
61 }
62
63 if (ctx->gfx.cs->cdw != ctx->initial_gfx_cs_size &&
64 ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
65 resource->buf, rusage)) {
66 if (usage & PIPE_TRANSFER_DONTBLOCK) {
67 ctx->gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
68 return NULL;
69 } else {
70 ctx->gfx.flush(ctx, 0, NULL);
71 busy = true;
72 }
73 }
74 if (ctx->dma.cs &&
75 ctx->dma.cs->cdw &&
76 ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
77 resource->buf, rusage)) {
78 if (usage & PIPE_TRANSFER_DONTBLOCK) {
79 ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
80 return NULL;
81 } else {
82 ctx->dma.flush(ctx, 0, NULL);
83 busy = true;
84 }
85 }
86
87 if (busy || !ctx->ws->buffer_wait(resource->buf, 0, rusage)) {
88 if (usage & PIPE_TRANSFER_DONTBLOCK) {
89 return NULL;
90 } else {
91 /* We will be wait for the GPU. Wait for any offloaded
92 * CS flush to complete to avoid busy-waiting in the winsys. */
93 ctx->ws->cs_sync_flush(ctx->gfx.cs);
94 if (ctx->dma.cs)
95 ctx->ws->cs_sync_flush(ctx->dma.cs);
96 }
97 }
98
99 /* Setting the CS to NULL will prevent doing checks we have done already. */
100 return ctx->ws->buffer_map(resource->buf, NULL, usage);
101 }
102
103 bool r600_init_resource(struct r600_common_screen *rscreen,
104 struct r600_resource *res,
105 unsigned size, unsigned alignment,
106 bool use_reusable_pool)
107 {
108 struct r600_texture *rtex = (struct r600_texture*)res;
109 struct pb_buffer *old_buf, *new_buf;
110 enum radeon_bo_flag flags = 0;
111
112 switch (res->b.b.usage) {
113 case PIPE_USAGE_STREAM:
114 flags = RADEON_FLAG_GTT_WC;
115 /* fall through */
116 case PIPE_USAGE_STAGING:
117 /* Transfers are likely to occur more often with these resources. */
118 res->domains = RADEON_DOMAIN_GTT;
119 break;
120 case PIPE_USAGE_DYNAMIC:
121 /* Older kernels didn't always flush the HDP cache before
122 * CS execution
123 */
124 if (rscreen->info.drm_major == 2 &&
125 rscreen->info.drm_minor < 40) {
126 res->domains = RADEON_DOMAIN_GTT;
127 flags |= RADEON_FLAG_GTT_WC;
128 break;
129 }
130 flags |= RADEON_FLAG_CPU_ACCESS;
131 /* fall through */
132 case PIPE_USAGE_DEFAULT:
133 case PIPE_USAGE_IMMUTABLE:
134 default:
135 /* Not listing GTT here improves performance in some apps. */
136 res->domains = RADEON_DOMAIN_VRAM;
137 flags |= RADEON_FLAG_GTT_WC;
138 break;
139 }
140
141 if (res->b.b.target == PIPE_BUFFER &&
142 res->b.b.flags & (PIPE_RESOURCE_FLAG_MAP_PERSISTENT |
143 PIPE_RESOURCE_FLAG_MAP_COHERENT)) {
144 /* Use GTT for all persistent mappings with older kernels,
145 * because they didn't always flush the HDP cache before CS
146 * execution.
147 *
148 * Write-combined CPU mappings are fine, the kernel ensures all CPU
149 * writes finish before the GPU executes a command stream.
150 */
151 if (rscreen->info.drm_major == 2 &&
152 rscreen->info.drm_minor < 40)
153 res->domains = RADEON_DOMAIN_GTT;
154 else if (res->domains & RADEON_DOMAIN_VRAM)
155 flags |= RADEON_FLAG_CPU_ACCESS;
156 }
157
158 /* Tiled textures are unmappable. Always put them in VRAM. */
159 if (res->b.b.target != PIPE_BUFFER &&
160 rtex->surface.level[0].mode >= RADEON_SURF_MODE_1D) {
161 res->domains = RADEON_DOMAIN_VRAM;
162 flags &= ~RADEON_FLAG_CPU_ACCESS;
163 flags |= RADEON_FLAG_NO_CPU_ACCESS;
164 }
165
166 if (rscreen->debug_flags & DBG_NO_WC)
167 flags &= ~RADEON_FLAG_GTT_WC;
168
169 /* Allocate a new resource. */
170 new_buf = rscreen->ws->buffer_create(rscreen->ws, size, alignment,
171 use_reusable_pool,
172 res->domains, flags);
173 if (!new_buf) {
174 return false;
175 }
176
177 /* Replace the pointer such that if res->buf wasn't NULL, it won't be
178 * NULL. This should prevent crashes with multiple contexts using
179 * the same buffer where one of the contexts invalidates it while
180 * the others are using it. */
181 old_buf = res->buf;
182 res->buf = new_buf; /* should be atomic */
183
184 if (rscreen->info.has_virtual_memory)
185 res->gpu_address = rscreen->ws->buffer_get_virtual_address(res->buf);
186 else
187 res->gpu_address = 0;
188
189 pb_reference(&old_buf, NULL);
190
191 util_range_set_empty(&res->valid_buffer_range);
192 res->TC_L2_dirty = false;
193
194 if (rscreen->debug_flags & DBG_VM && res->b.b.target == PIPE_BUFFER) {
195 fprintf(stderr, "VM start=0x%"PRIX64" end=0x%"PRIX64" | Buffer %u bytes\n",
196 res->gpu_address, res->gpu_address + res->buf->size,
197 res->buf->size);
198 }
199 return true;
200 }
201
202 static void r600_buffer_destroy(struct pipe_screen *screen,
203 struct pipe_resource *buf)
204 {
205 struct r600_resource *rbuffer = r600_resource(buf);
206
207 util_range_destroy(&rbuffer->valid_buffer_range);
208 pb_reference(&rbuffer->buf, NULL);
209 FREE(rbuffer);
210 }
211
212 static bool
213 r600_invalidate_buffer(struct r600_common_context *rctx,
214 struct r600_resource *rbuffer)
215 {
216 /* Shared buffers can't be reallocated. */
217 if (rbuffer->is_shared)
218 return false;
219
220 /* In AMD_pinned_memory, the user pointer association only gets
221 * broken when the buffer is explicitly re-allocated.
222 */
223 if (rctx->ws->buffer_is_user_ptr(rbuffer->buf))
224 return false;
225
226 /* Check if mapping this buffer would cause waiting for the GPU. */
227 if (r600_rings_is_buffer_referenced(rctx, rbuffer->buf, RADEON_USAGE_READWRITE) ||
228 !rctx->ws->buffer_wait(rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
229 rctx->invalidate_buffer(&rctx->b, &rbuffer->b.b);
230 } else {
231 util_range_set_empty(&rbuffer->valid_buffer_range);
232 }
233
234 return true;
235 }
236
237 void r600_invalidate_resource(struct pipe_context *ctx,
238 struct pipe_resource *resource)
239 {
240 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
241 struct r600_resource *rbuffer = r600_resource(resource);
242
243 /* We currently only do anyting here for buffers */
244 if (resource->target == PIPE_BUFFER)
245 (void)r600_invalidate_buffer(rctx, rbuffer);
246 }
247
248 static void *r600_buffer_get_transfer(struct pipe_context *ctx,
249 struct pipe_resource *resource,
250 unsigned level,
251 unsigned usage,
252 const struct pipe_box *box,
253 struct pipe_transfer **ptransfer,
254 void *data, struct r600_resource *staging,
255 unsigned offset)
256 {
257 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
258 struct r600_transfer *transfer = util_slab_alloc(&rctx->pool_transfers);
259
260 transfer->transfer.resource = resource;
261 transfer->transfer.level = level;
262 transfer->transfer.usage = usage;
263 transfer->transfer.box = *box;
264 transfer->transfer.stride = 0;
265 transfer->transfer.layer_stride = 0;
266 transfer->offset = offset;
267 transfer->staging = staging;
268 *ptransfer = &transfer->transfer;
269 return data;
270 }
271
272 static bool r600_can_dma_copy_buffer(struct r600_common_context *rctx,
273 unsigned dstx, unsigned srcx, unsigned size)
274 {
275 bool dword_aligned = !(dstx % 4) && !(srcx % 4) && !(size % 4);
276
277 return rctx->screen->has_cp_dma ||
278 (dword_aligned && (rctx->dma.cs ||
279 rctx->screen->has_streamout));
280
281 }
282
283 static void *r600_buffer_transfer_map(struct pipe_context *ctx,
284 struct pipe_resource *resource,
285 unsigned level,
286 unsigned usage,
287 const struct pipe_box *box,
288 struct pipe_transfer **ptransfer)
289 {
290 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
291 struct r600_common_screen *rscreen = (struct r600_common_screen*)ctx->screen;
292 struct r600_resource *rbuffer = r600_resource(resource);
293 uint8_t *data;
294
295 assert(box->x + box->width <= resource->width0);
296
297 /* See if the buffer range being mapped has never been initialized,
298 * in which case it can be mapped unsynchronized. */
299 if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
300 usage & PIPE_TRANSFER_WRITE &&
301 !rbuffer->is_shared &&
302 !util_ranges_intersect(&rbuffer->valid_buffer_range, box->x, box->x + box->width)) {
303 usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
304 }
305
306 /* If discarding the entire range, discard the whole resource instead. */
307 if (usage & PIPE_TRANSFER_DISCARD_RANGE &&
308 box->x == 0 && box->width == resource->width0) {
309 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
310 }
311
312 if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE &&
313 !(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
314 assert(usage & PIPE_TRANSFER_WRITE);
315
316 if (r600_invalidate_buffer(rctx, rbuffer)) {
317 /* At this point, the buffer is always idle. */
318 usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
319 } else {
320 /* Fall back to a temporary buffer. */
321 usage |= PIPE_TRANSFER_DISCARD_RANGE;
322 }
323 }
324
325 if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
326 !(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
327 PIPE_TRANSFER_PERSISTENT)) &&
328 !(rscreen->debug_flags & DBG_NO_DISCARD_RANGE) &&
329 r600_can_dma_copy_buffer(rctx, box->x, 0, box->width)) {
330 assert(usage & PIPE_TRANSFER_WRITE);
331
332 /* Check if mapping this buffer would cause waiting for the GPU. */
333 if (r600_rings_is_buffer_referenced(rctx, rbuffer->buf, RADEON_USAGE_READWRITE) ||
334 !rctx->ws->buffer_wait(rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
335 /* Do a wait-free write-only transfer using a temporary buffer. */
336 unsigned offset;
337 struct r600_resource *staging = NULL;
338
339 u_upload_alloc(rctx->uploader, 0, box->width + (box->x % R600_MAP_BUFFER_ALIGNMENT),
340 256, &offset, (struct pipe_resource**)&staging, (void**)&data);
341
342 if (staging) {
343 data += box->x % R600_MAP_BUFFER_ALIGNMENT;
344 return r600_buffer_get_transfer(ctx, resource, level, usage, box,
345 ptransfer, data, staging, offset);
346 }
347 } else {
348 /* At this point, the buffer is always idle (we checked it above). */
349 usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
350 }
351 }
352 /* Using a staging buffer in GTT for larger reads is much faster. */
353 else if ((usage & PIPE_TRANSFER_READ) &&
354 !(usage & (PIPE_TRANSFER_WRITE |
355 PIPE_TRANSFER_PERSISTENT)) &&
356 rbuffer->domains == RADEON_DOMAIN_VRAM &&
357 r600_can_dma_copy_buffer(rctx, 0, box->x, box->width)) {
358 struct r600_resource *staging;
359
360 staging = (struct r600_resource*) pipe_buffer_create(
361 ctx->screen, PIPE_BIND_TRANSFER_READ, PIPE_USAGE_STAGING,
362 box->width + (box->x % R600_MAP_BUFFER_ALIGNMENT));
363 if (staging) {
364 /* Copy the VRAM buffer to the staging buffer. */
365 rctx->dma_copy(ctx, &staging->b.b, 0,
366 box->x % R600_MAP_BUFFER_ALIGNMENT,
367 0, 0, resource, level, box);
368
369 data = r600_buffer_map_sync_with_rings(rctx, staging, PIPE_TRANSFER_READ);
370 data += box->x % R600_MAP_BUFFER_ALIGNMENT;
371
372 return r600_buffer_get_transfer(ctx, resource, level, usage, box,
373 ptransfer, data, staging, 0);
374 }
375 }
376
377 data = r600_buffer_map_sync_with_rings(rctx, rbuffer, usage);
378 if (!data) {
379 return NULL;
380 }
381 data += box->x;
382
383 return r600_buffer_get_transfer(ctx, resource, level, usage, box,
384 ptransfer, data, NULL, 0);
385 }
386
387 static void r600_buffer_do_flush_region(struct pipe_context *ctx,
388 struct pipe_transfer *transfer,
389 const struct pipe_box *box)
390 {
391 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
392 struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
393 struct r600_resource *rbuffer = r600_resource(transfer->resource);
394
395 if (rtransfer->staging) {
396 struct pipe_resource *dst, *src;
397 unsigned soffset;
398 struct pipe_box dma_box;
399
400 dst = transfer->resource;
401 src = &rtransfer->staging->b.b;
402 soffset = rtransfer->offset + box->x % R600_MAP_BUFFER_ALIGNMENT;
403
404 u_box_1d(soffset, box->width, &dma_box);
405
406 /* Copy the staging buffer into the original one. */
407 rctx->dma_copy(ctx, dst, 0, box->x, 0, 0, src, 0, &dma_box);
408 }
409
410 util_range_add(&rbuffer->valid_buffer_range, box->x,
411 box->x + box->width);
412 }
413
414 static void r600_buffer_flush_region(struct pipe_context *ctx,
415 struct pipe_transfer *transfer,
416 const struct pipe_box *rel_box)
417 {
418 if (transfer->usage & (PIPE_TRANSFER_WRITE |
419 PIPE_TRANSFER_FLUSH_EXPLICIT)) {
420 struct pipe_box box;
421
422 u_box_1d(transfer->box.x + rel_box->x, rel_box->width, &box);
423 r600_buffer_do_flush_region(ctx, transfer, &box);
424 }
425 }
426
427 static void r600_buffer_transfer_unmap(struct pipe_context *ctx,
428 struct pipe_transfer *transfer)
429 {
430 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
431 struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
432
433 if (transfer->usage & PIPE_TRANSFER_WRITE &&
434 !(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT))
435 r600_buffer_do_flush_region(ctx, transfer, &transfer->box);
436
437 if (rtransfer->staging)
438 pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL);
439
440 util_slab_free(&rctx->pool_transfers, transfer);
441 }
442
443 static const struct u_resource_vtbl r600_buffer_vtbl =
444 {
445 NULL, /* get_handle */
446 r600_buffer_destroy, /* resource_destroy */
447 r600_buffer_transfer_map, /* transfer_map */
448 r600_buffer_flush_region, /* transfer_flush_region */
449 r600_buffer_transfer_unmap, /* transfer_unmap */
450 NULL /* transfer_inline_write */
451 };
452
453 static struct r600_resource *
454 r600_alloc_buffer_struct(struct pipe_screen *screen,
455 const struct pipe_resource *templ)
456 {
457 struct r600_resource *rbuffer;
458
459 rbuffer = MALLOC_STRUCT(r600_resource);
460
461 rbuffer->b.b = *templ;
462 pipe_reference_init(&rbuffer->b.b.reference, 1);
463 rbuffer->b.b.screen = screen;
464 rbuffer->b.vtbl = &r600_buffer_vtbl;
465 rbuffer->buf = NULL;
466 rbuffer->TC_L2_dirty = false;
467 rbuffer->is_shared = false;
468 util_range_init(&rbuffer->valid_buffer_range);
469 return rbuffer;
470 }
471
472 struct pipe_resource *r600_buffer_create(struct pipe_screen *screen,
473 const struct pipe_resource *templ,
474 unsigned alignment)
475 {
476 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
477 struct r600_resource *rbuffer = r600_alloc_buffer_struct(screen, templ);
478
479 if (!r600_init_resource(rscreen, rbuffer, templ->width0, alignment, TRUE)) {
480 FREE(rbuffer);
481 return NULL;
482 }
483 return &rbuffer->b.b;
484 }
485
486 struct pipe_resource *r600_aligned_buffer_create(struct pipe_screen *screen,
487 unsigned bind,
488 unsigned usage,
489 unsigned size,
490 unsigned alignment)
491 {
492 struct pipe_resource buffer;
493
494 memset(&buffer, 0, sizeof buffer);
495 buffer.target = PIPE_BUFFER;
496 buffer.format = PIPE_FORMAT_R8_UNORM;
497 buffer.bind = bind;
498 buffer.usage = usage;
499 buffer.flags = 0;
500 buffer.width0 = size;
501 buffer.height0 = 1;
502 buffer.depth0 = 1;
503 buffer.array_size = 1;
504 return r600_buffer_create(screen, &buffer, alignment);
505 }
506
507 struct pipe_resource *
508 r600_buffer_from_user_memory(struct pipe_screen *screen,
509 const struct pipe_resource *templ,
510 void *user_memory)
511 {
512 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
513 struct radeon_winsys *ws = rscreen->ws;
514 struct r600_resource *rbuffer = r600_alloc_buffer_struct(screen, templ);
515
516 rbuffer->domains = RADEON_DOMAIN_GTT;
517 util_range_add(&rbuffer->valid_buffer_range, 0, templ->width0);
518
519 /* Convert a user pointer to a buffer. */
520 rbuffer->buf = ws->buffer_from_ptr(ws, user_memory, templ->width0);
521 if (!rbuffer->buf) {
522 FREE(rbuffer);
523 return NULL;
524 }
525
526 if (rscreen->info.has_virtual_memory)
527 rbuffer->gpu_address =
528 ws->buffer_get_virtual_address(rbuffer->buf);
529 else
530 rbuffer->gpu_address = 0;
531
532 return &rbuffer->b.b;
533 }