radeonsi: simplify DCC format categories
[mesa.git] / src / gallium / drivers / radeon / r600_buffer_common.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "radeonsi/si_pipe.h"
25 #include "r600_cs.h"
26 #include "util/u_memory.h"
27 #include "util/u_upload_mgr.h"
28 #include <inttypes.h>
29 #include <stdio.h>
30
31 bool si_rings_is_buffer_referenced(struct r600_common_context *ctx,
32 struct pb_buffer *buf,
33 enum radeon_bo_usage usage)
34 {
35 if (ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, buf, usage)) {
36 return true;
37 }
38 if (radeon_emitted(ctx->dma.cs, 0) &&
39 ctx->ws->cs_is_buffer_referenced(ctx->dma.cs, buf, usage)) {
40 return true;
41 }
42 return false;
43 }
44
45 void *si_buffer_map_sync_with_rings(struct r600_common_context *ctx,
46 struct r600_resource *resource,
47 unsigned usage)
48 {
49 enum radeon_bo_usage rusage = RADEON_USAGE_READWRITE;
50 bool busy = false;
51
52 assert(!(resource->flags & RADEON_FLAG_SPARSE));
53
54 if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
55 return ctx->ws->buffer_map(resource->buf, NULL, usage);
56 }
57
58 if (!(usage & PIPE_TRANSFER_WRITE)) {
59 /* have to wait for the last write */
60 rusage = RADEON_USAGE_WRITE;
61 }
62
63 if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
64 ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
65 resource->buf, rusage)) {
66 if (usage & PIPE_TRANSFER_DONTBLOCK) {
67 ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
68 return NULL;
69 } else {
70 ctx->gfx.flush(ctx, 0, NULL);
71 busy = true;
72 }
73 }
74 if (radeon_emitted(ctx->dma.cs, 0) &&
75 ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
76 resource->buf, rusage)) {
77 if (usage & PIPE_TRANSFER_DONTBLOCK) {
78 ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
79 return NULL;
80 } else {
81 ctx->dma.flush(ctx, 0, NULL);
82 busy = true;
83 }
84 }
85
86 if (busy || !ctx->ws->buffer_wait(resource->buf, 0, rusage)) {
87 if (usage & PIPE_TRANSFER_DONTBLOCK) {
88 return NULL;
89 } else {
90 /* We will be wait for the GPU. Wait for any offloaded
91 * CS flush to complete to avoid busy-waiting in the winsys. */
92 ctx->ws->cs_sync_flush(ctx->gfx.cs);
93 if (ctx->dma.cs)
94 ctx->ws->cs_sync_flush(ctx->dma.cs);
95 }
96 }
97
98 /* Setting the CS to NULL will prevent doing checks we have done already. */
99 return ctx->ws->buffer_map(resource->buf, NULL, usage);
100 }
101
102 void si_init_resource_fields(struct si_screen *sscreen,
103 struct r600_resource *res,
104 uint64_t size, unsigned alignment)
105 {
106 struct r600_texture *rtex = (struct r600_texture*)res;
107
108 res->bo_size = size;
109 res->bo_alignment = alignment;
110 res->flags = 0;
111 res->texture_handle_allocated = false;
112 res->image_handle_allocated = false;
113
114 switch (res->b.b.usage) {
115 case PIPE_USAGE_STREAM:
116 res->flags = RADEON_FLAG_GTT_WC;
117 /* fall through */
118 case PIPE_USAGE_STAGING:
119 /* Transfers are likely to occur more often with these
120 * resources. */
121 res->domains = RADEON_DOMAIN_GTT;
122 break;
123 case PIPE_USAGE_DYNAMIC:
124 /* Older kernels didn't always flush the HDP cache before
125 * CS execution
126 */
127 if (sscreen->info.drm_major == 2 &&
128 sscreen->info.drm_minor < 40) {
129 res->domains = RADEON_DOMAIN_GTT;
130 res->flags |= RADEON_FLAG_GTT_WC;
131 break;
132 }
133 /* fall through */
134 case PIPE_USAGE_DEFAULT:
135 case PIPE_USAGE_IMMUTABLE:
136 default:
137 /* Not listing GTT here improves performance in some
138 * apps. */
139 res->domains = RADEON_DOMAIN_VRAM;
140 res->flags |= RADEON_FLAG_GTT_WC;
141 break;
142 }
143
144 if (res->b.b.target == PIPE_BUFFER &&
145 res->b.b.flags & (PIPE_RESOURCE_FLAG_MAP_PERSISTENT |
146 PIPE_RESOURCE_FLAG_MAP_COHERENT)) {
147 /* Use GTT for all persistent mappings with older
148 * kernels, because they didn't always flush the HDP
149 * cache before CS execution.
150 *
151 * Write-combined CPU mappings are fine, the kernel
152 * ensures all CPU writes finish before the GPU
153 * executes a command stream.
154 */
155 if (sscreen->info.drm_major == 2 &&
156 sscreen->info.drm_minor < 40)
157 res->domains = RADEON_DOMAIN_GTT;
158 }
159
160 /* Tiled textures are unmappable. Always put them in VRAM. */
161 if ((res->b.b.target != PIPE_BUFFER && !rtex->surface.is_linear) ||
162 res->b.b.flags & R600_RESOURCE_FLAG_UNMAPPABLE) {
163 res->domains = RADEON_DOMAIN_VRAM;
164 res->flags |= RADEON_FLAG_NO_CPU_ACCESS |
165 RADEON_FLAG_GTT_WC;
166 }
167
168 /* Displayable and shareable surfaces are not suballocated. */
169 if (res->b.b.bind & (PIPE_BIND_SHARED | PIPE_BIND_SCANOUT))
170 res->flags |= RADEON_FLAG_NO_SUBALLOC; /* shareable */
171 else
172 res->flags |= RADEON_FLAG_NO_INTERPROCESS_SHARING;
173
174 if (sscreen->debug_flags & DBG(NO_WC))
175 res->flags &= ~RADEON_FLAG_GTT_WC;
176
177 if (res->b.b.flags & R600_RESOURCE_FLAG_READ_ONLY)
178 res->flags |= RADEON_FLAG_READ_ONLY;
179
180 if (res->b.b.flags & R600_RESOURCE_FLAG_32BIT)
181 res->flags |= RADEON_FLAG_32BIT;
182
183 /* Set expected VRAM and GART usage for the buffer. */
184 res->vram_usage = 0;
185 res->gart_usage = 0;
186 res->max_forced_staging_uploads = 0;
187 res->b.max_forced_staging_uploads = 0;
188
189 if (res->domains & RADEON_DOMAIN_VRAM) {
190 res->vram_usage = size;
191
192 res->max_forced_staging_uploads =
193 res->b.max_forced_staging_uploads =
194 sscreen->info.has_dedicated_vram &&
195 size >= sscreen->info.vram_vis_size / 4 ? 1 : 0;
196 } else if (res->domains & RADEON_DOMAIN_GTT) {
197 res->gart_usage = size;
198 }
199 }
200
201 bool si_alloc_resource(struct si_screen *sscreen,
202 struct r600_resource *res)
203 {
204 struct pb_buffer *old_buf, *new_buf;
205
206 /* Allocate a new resource. */
207 new_buf = sscreen->ws->buffer_create(sscreen->ws, res->bo_size,
208 res->bo_alignment,
209 res->domains, res->flags);
210 if (!new_buf) {
211 return false;
212 }
213
214 /* Replace the pointer such that if res->buf wasn't NULL, it won't be
215 * NULL. This should prevent crashes with multiple contexts using
216 * the same buffer where one of the contexts invalidates it while
217 * the others are using it. */
218 old_buf = res->buf;
219 res->buf = new_buf; /* should be atomic */
220 res->gpu_address = sscreen->ws->buffer_get_virtual_address(res->buf);
221
222 if (res->flags & RADEON_FLAG_32BIT) {
223 uint64_t start = res->gpu_address;
224 uint64_t last = start + res->bo_size - 1;
225 (void)start;
226 (void)last;
227
228 assert((start >> 32) == sscreen->info.address32_hi);
229 assert((last >> 32) == sscreen->info.address32_hi);
230 }
231
232 pb_reference(&old_buf, NULL);
233
234 util_range_set_empty(&res->valid_buffer_range);
235 res->TC_L2_dirty = false;
236
237 /* Print debug information. */
238 if (sscreen->debug_flags & DBG(VM) && res->b.b.target == PIPE_BUFFER) {
239 fprintf(stderr, "VM start=0x%"PRIX64" end=0x%"PRIX64" | Buffer %"PRIu64" bytes\n",
240 res->gpu_address, res->gpu_address + res->buf->size,
241 res->buf->size);
242 }
243 return true;
244 }
245
246 static void r600_buffer_destroy(struct pipe_screen *screen,
247 struct pipe_resource *buf)
248 {
249 struct r600_resource *rbuffer = r600_resource(buf);
250
251 threaded_resource_deinit(buf);
252 util_range_destroy(&rbuffer->valid_buffer_range);
253 pb_reference(&rbuffer->buf, NULL);
254 FREE(rbuffer);
255 }
256
257 static bool
258 r600_invalidate_buffer(struct r600_common_context *rctx,
259 struct r600_resource *rbuffer)
260 {
261 /* Shared buffers can't be reallocated. */
262 if (rbuffer->b.is_shared)
263 return false;
264
265 /* Sparse buffers can't be reallocated. */
266 if (rbuffer->flags & RADEON_FLAG_SPARSE)
267 return false;
268
269 /* In AMD_pinned_memory, the user pointer association only gets
270 * broken when the buffer is explicitly re-allocated.
271 */
272 if (rbuffer->b.is_user_ptr)
273 return false;
274
275 /* Check if mapping this buffer would cause waiting for the GPU. */
276 if (si_rings_is_buffer_referenced(rctx, rbuffer->buf, RADEON_USAGE_READWRITE) ||
277 !rctx->ws->buffer_wait(rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
278 rctx->invalidate_buffer(&rctx->b, &rbuffer->b.b);
279 } else {
280 util_range_set_empty(&rbuffer->valid_buffer_range);
281 }
282
283 return true;
284 }
285
286 /* Replace the storage of dst with src. */
287 void si_replace_buffer_storage(struct pipe_context *ctx,
288 struct pipe_resource *dst,
289 struct pipe_resource *src)
290 {
291 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
292 struct r600_resource *rdst = r600_resource(dst);
293 struct r600_resource *rsrc = r600_resource(src);
294 uint64_t old_gpu_address = rdst->gpu_address;
295
296 pb_reference(&rdst->buf, rsrc->buf);
297 rdst->gpu_address = rsrc->gpu_address;
298 rdst->b.b.bind = rsrc->b.b.bind;
299 rdst->b.max_forced_staging_uploads = rsrc->b.max_forced_staging_uploads;
300 rdst->max_forced_staging_uploads = rsrc->max_forced_staging_uploads;
301 rdst->flags = rsrc->flags;
302
303 assert(rdst->vram_usage == rsrc->vram_usage);
304 assert(rdst->gart_usage == rsrc->gart_usage);
305 assert(rdst->bo_size == rsrc->bo_size);
306 assert(rdst->bo_alignment == rsrc->bo_alignment);
307 assert(rdst->domains == rsrc->domains);
308
309 rctx->rebind_buffer(ctx, dst, old_gpu_address);
310 }
311
312 static void si_invalidate_resource(struct pipe_context *ctx,
313 struct pipe_resource *resource)
314 {
315 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
316 struct r600_resource *rbuffer = r600_resource(resource);
317
318 /* We currently only do anyting here for buffers */
319 if (resource->target == PIPE_BUFFER)
320 (void)r600_invalidate_buffer(rctx, rbuffer);
321 }
322
323 static void *r600_buffer_get_transfer(struct pipe_context *ctx,
324 struct pipe_resource *resource,
325 unsigned usage,
326 const struct pipe_box *box,
327 struct pipe_transfer **ptransfer,
328 void *data, struct r600_resource *staging,
329 unsigned offset)
330 {
331 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
332 struct r600_transfer *transfer;
333
334 if (usage & TC_TRANSFER_MAP_THREADED_UNSYNC)
335 transfer = slab_alloc(&rctx->pool_transfers_unsync);
336 else
337 transfer = slab_alloc(&rctx->pool_transfers);
338
339 transfer->b.b.resource = NULL;
340 pipe_resource_reference(&transfer->b.b.resource, resource);
341 transfer->b.b.level = 0;
342 transfer->b.b.usage = usage;
343 transfer->b.b.box = *box;
344 transfer->b.b.stride = 0;
345 transfer->b.b.layer_stride = 0;
346 transfer->b.staging = NULL;
347 transfer->offset = offset;
348 transfer->staging = staging;
349 *ptransfer = &transfer->b.b;
350 return data;
351 }
352
353 static void *r600_buffer_transfer_map(struct pipe_context *ctx,
354 struct pipe_resource *resource,
355 unsigned level,
356 unsigned usage,
357 const struct pipe_box *box,
358 struct pipe_transfer **ptransfer)
359 {
360 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
361 struct r600_resource *rbuffer = r600_resource(resource);
362 uint8_t *data;
363
364 assert(box->x + box->width <= resource->width0);
365
366 /* From GL_AMD_pinned_memory issues:
367 *
368 * 4) Is glMapBuffer on a shared buffer guaranteed to return the
369 * same system address which was specified at creation time?
370 *
371 * RESOLVED: NO. The GL implementation might return a different
372 * virtual mapping of that memory, although the same physical
373 * page will be used.
374 *
375 * So don't ever use staging buffers.
376 */
377 if (rbuffer->b.is_user_ptr)
378 usage |= PIPE_TRANSFER_PERSISTENT;
379
380 /* See if the buffer range being mapped has never been initialized,
381 * in which case it can be mapped unsynchronized. */
382 if (!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
383 TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED)) &&
384 usage & PIPE_TRANSFER_WRITE &&
385 !rbuffer->b.is_shared &&
386 !util_ranges_intersect(&rbuffer->valid_buffer_range, box->x, box->x + box->width)) {
387 usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
388 }
389
390 /* If discarding the entire range, discard the whole resource instead. */
391 if (usage & PIPE_TRANSFER_DISCARD_RANGE &&
392 box->x == 0 && box->width == resource->width0) {
393 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
394 }
395
396 /* If a buffer in VRAM is too large and the range is discarded, don't
397 * map it directly. This makes sure that the buffer stays in VRAM.
398 */
399 bool force_discard_range = false;
400 if (usage & (PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE |
401 PIPE_TRANSFER_DISCARD_RANGE) &&
402 !(usage & PIPE_TRANSFER_PERSISTENT) &&
403 /* Try not to decrement the counter if it's not positive. Still racy,
404 * but it makes it harder to wrap the counter from INT_MIN to INT_MAX. */
405 rbuffer->max_forced_staging_uploads > 0 &&
406 p_atomic_dec_return(&rbuffer->max_forced_staging_uploads) >= 0) {
407 usage &= ~(PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE |
408 PIPE_TRANSFER_UNSYNCHRONIZED);
409 usage |= PIPE_TRANSFER_DISCARD_RANGE;
410 force_discard_range = true;
411 }
412
413 if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE &&
414 !(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
415 TC_TRANSFER_MAP_NO_INVALIDATE))) {
416 assert(usage & PIPE_TRANSFER_WRITE);
417
418 if (r600_invalidate_buffer(rctx, rbuffer)) {
419 /* At this point, the buffer is always idle. */
420 usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
421 } else {
422 /* Fall back to a temporary buffer. */
423 usage |= PIPE_TRANSFER_DISCARD_RANGE;
424 }
425 }
426
427 if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
428 ((!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
429 PIPE_TRANSFER_PERSISTENT))) ||
430 (rbuffer->flags & RADEON_FLAG_SPARSE))) {
431 assert(usage & PIPE_TRANSFER_WRITE);
432
433 /* Check if mapping this buffer would cause waiting for the GPU.
434 */
435 if (rbuffer->flags & RADEON_FLAG_SPARSE ||
436 force_discard_range ||
437 si_rings_is_buffer_referenced(rctx, rbuffer->buf, RADEON_USAGE_READWRITE) ||
438 !rctx->ws->buffer_wait(rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
439 /* Do a wait-free write-only transfer using a temporary buffer. */
440 unsigned offset;
441 struct r600_resource *staging = NULL;
442
443 u_upload_alloc(ctx->stream_uploader, 0,
444 box->width + (box->x % R600_MAP_BUFFER_ALIGNMENT),
445 rctx->screen->info.tcc_cache_line_size,
446 &offset, (struct pipe_resource**)&staging,
447 (void**)&data);
448
449 if (staging) {
450 data += box->x % R600_MAP_BUFFER_ALIGNMENT;
451 return r600_buffer_get_transfer(ctx, resource, usage, box,
452 ptransfer, data, staging, offset);
453 } else if (rbuffer->flags & RADEON_FLAG_SPARSE) {
454 return NULL;
455 }
456 } else {
457 /* At this point, the buffer is always idle (we checked it above). */
458 usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
459 }
460 }
461 /* Use a staging buffer in cached GTT for reads. */
462 else if (((usage & PIPE_TRANSFER_READ) &&
463 !(usage & PIPE_TRANSFER_PERSISTENT) &&
464 (rbuffer->domains & RADEON_DOMAIN_VRAM ||
465 rbuffer->flags & RADEON_FLAG_GTT_WC)) ||
466 (rbuffer->flags & RADEON_FLAG_SPARSE)) {
467 struct r600_resource *staging;
468
469 assert(!(usage & TC_TRANSFER_MAP_THREADED_UNSYNC));
470 staging = (struct r600_resource*) pipe_buffer_create(
471 ctx->screen, 0, PIPE_USAGE_STAGING,
472 box->width + (box->x % R600_MAP_BUFFER_ALIGNMENT));
473 if (staging) {
474 /* Copy the VRAM buffer to the staging buffer. */
475 rctx->dma_copy(ctx, &staging->b.b, 0,
476 box->x % R600_MAP_BUFFER_ALIGNMENT,
477 0, 0, resource, 0, box);
478
479 data = si_buffer_map_sync_with_rings(rctx, staging,
480 usage & ~PIPE_TRANSFER_UNSYNCHRONIZED);
481 if (!data) {
482 r600_resource_reference(&staging, NULL);
483 return NULL;
484 }
485 data += box->x % R600_MAP_BUFFER_ALIGNMENT;
486
487 return r600_buffer_get_transfer(ctx, resource, usage, box,
488 ptransfer, data, staging, 0);
489 } else if (rbuffer->flags & RADEON_FLAG_SPARSE) {
490 return NULL;
491 }
492 }
493
494 data = si_buffer_map_sync_with_rings(rctx, rbuffer, usage);
495 if (!data) {
496 return NULL;
497 }
498 data += box->x;
499
500 return r600_buffer_get_transfer(ctx, resource, usage, box,
501 ptransfer, data, NULL, 0);
502 }
503
504 static void r600_buffer_do_flush_region(struct pipe_context *ctx,
505 struct pipe_transfer *transfer,
506 const struct pipe_box *box)
507 {
508 struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
509 struct r600_resource *rbuffer = r600_resource(transfer->resource);
510
511 if (rtransfer->staging) {
512 struct pipe_resource *dst, *src;
513 unsigned soffset;
514 struct pipe_box dma_box;
515
516 dst = transfer->resource;
517 src = &rtransfer->staging->b.b;
518 soffset = rtransfer->offset + box->x % R600_MAP_BUFFER_ALIGNMENT;
519
520 u_box_1d(soffset, box->width, &dma_box);
521
522 /* Copy the staging buffer into the original one. */
523 ctx->resource_copy_region(ctx, dst, 0, box->x, 0, 0, src, 0, &dma_box);
524 }
525
526 util_range_add(&rbuffer->valid_buffer_range, box->x,
527 box->x + box->width);
528 }
529
530 static void r600_buffer_flush_region(struct pipe_context *ctx,
531 struct pipe_transfer *transfer,
532 const struct pipe_box *rel_box)
533 {
534 unsigned required_usage = PIPE_TRANSFER_WRITE |
535 PIPE_TRANSFER_FLUSH_EXPLICIT;
536
537 if ((transfer->usage & required_usage) == required_usage) {
538 struct pipe_box box;
539
540 u_box_1d(transfer->box.x + rel_box->x, rel_box->width, &box);
541 r600_buffer_do_flush_region(ctx, transfer, &box);
542 }
543 }
544
545 static void r600_buffer_transfer_unmap(struct pipe_context *ctx,
546 struct pipe_transfer *transfer)
547 {
548 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
549 struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
550
551 if (transfer->usage & PIPE_TRANSFER_WRITE &&
552 !(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT))
553 r600_buffer_do_flush_region(ctx, transfer, &transfer->box);
554
555 r600_resource_reference(&rtransfer->staging, NULL);
556 assert(rtransfer->b.staging == NULL); /* for threaded context only */
557 pipe_resource_reference(&transfer->resource, NULL);
558
559 /* Don't use pool_transfers_unsync. We are always in the driver
560 * thread. */
561 slab_free(&rctx->pool_transfers, transfer);
562 }
563
564 static void si_buffer_subdata(struct pipe_context *ctx,
565 struct pipe_resource *buffer,
566 unsigned usage, unsigned offset,
567 unsigned size, const void *data)
568 {
569 struct pipe_transfer *transfer = NULL;
570 struct pipe_box box;
571 uint8_t *map = NULL;
572
573 u_box_1d(offset, size, &box);
574 map = r600_buffer_transfer_map(ctx, buffer, 0,
575 PIPE_TRANSFER_WRITE |
576 PIPE_TRANSFER_DISCARD_RANGE |
577 usage,
578 &box, &transfer);
579 if (!map)
580 return;
581
582 memcpy(map, data, size);
583 r600_buffer_transfer_unmap(ctx, transfer);
584 }
585
586 static const struct u_resource_vtbl r600_buffer_vtbl =
587 {
588 NULL, /* get_handle */
589 r600_buffer_destroy, /* resource_destroy */
590 r600_buffer_transfer_map, /* transfer_map */
591 r600_buffer_flush_region, /* transfer_flush_region */
592 r600_buffer_transfer_unmap, /* transfer_unmap */
593 };
594
595 static struct r600_resource *
596 r600_alloc_buffer_struct(struct pipe_screen *screen,
597 const struct pipe_resource *templ)
598 {
599 struct r600_resource *rbuffer;
600
601 rbuffer = MALLOC_STRUCT(r600_resource);
602
603 rbuffer->b.b = *templ;
604 rbuffer->b.b.next = NULL;
605 pipe_reference_init(&rbuffer->b.b.reference, 1);
606 rbuffer->b.b.screen = screen;
607
608 rbuffer->b.vtbl = &r600_buffer_vtbl;
609 threaded_resource_init(&rbuffer->b.b);
610
611 rbuffer->buf = NULL;
612 rbuffer->bind_history = 0;
613 rbuffer->TC_L2_dirty = false;
614 util_range_init(&rbuffer->valid_buffer_range);
615 return rbuffer;
616 }
617
618 static struct pipe_resource *si_buffer_create(struct pipe_screen *screen,
619 const struct pipe_resource *templ,
620 unsigned alignment)
621 {
622 struct si_screen *sscreen = (struct si_screen*)screen;
623 struct r600_resource *rbuffer = r600_alloc_buffer_struct(screen, templ);
624
625 if (templ->flags & PIPE_RESOURCE_FLAG_SPARSE)
626 rbuffer->b.b.flags |= R600_RESOURCE_FLAG_UNMAPPABLE;
627
628 si_init_resource_fields(sscreen, rbuffer, templ->width0, alignment);
629
630 if (templ->flags & PIPE_RESOURCE_FLAG_SPARSE)
631 rbuffer->flags |= RADEON_FLAG_SPARSE;
632
633 if (!si_alloc_resource(sscreen, rbuffer)) {
634 FREE(rbuffer);
635 return NULL;
636 }
637 return &rbuffer->b.b;
638 }
639
640 struct pipe_resource *si_aligned_buffer_create(struct pipe_screen *screen,
641 unsigned flags,
642 unsigned usage,
643 unsigned size,
644 unsigned alignment)
645 {
646 struct pipe_resource buffer;
647
648 memset(&buffer, 0, sizeof buffer);
649 buffer.target = PIPE_BUFFER;
650 buffer.format = PIPE_FORMAT_R8_UNORM;
651 buffer.bind = 0;
652 buffer.usage = usage;
653 buffer.flags = flags;
654 buffer.width0 = size;
655 buffer.height0 = 1;
656 buffer.depth0 = 1;
657 buffer.array_size = 1;
658 return si_buffer_create(screen, &buffer, alignment);
659 }
660
661 static struct pipe_resource *
662 si_buffer_from_user_memory(struct pipe_screen *screen,
663 const struct pipe_resource *templ,
664 void *user_memory)
665 {
666 struct si_screen *sscreen = (struct si_screen*)screen;
667 struct radeon_winsys *ws = sscreen->ws;
668 struct r600_resource *rbuffer = r600_alloc_buffer_struct(screen, templ);
669
670 rbuffer->domains = RADEON_DOMAIN_GTT;
671 rbuffer->flags = 0;
672 rbuffer->b.is_user_ptr = true;
673 util_range_add(&rbuffer->valid_buffer_range, 0, templ->width0);
674 util_range_add(&rbuffer->b.valid_buffer_range, 0, templ->width0);
675
676 /* Convert a user pointer to a buffer. */
677 rbuffer->buf = ws->buffer_from_ptr(ws, user_memory, templ->width0);
678 if (!rbuffer->buf) {
679 FREE(rbuffer);
680 return NULL;
681 }
682
683 rbuffer->gpu_address = ws->buffer_get_virtual_address(rbuffer->buf);
684 rbuffer->vram_usage = 0;
685 rbuffer->gart_usage = templ->width0;
686
687 return &rbuffer->b.b;
688 }
689
690 static struct pipe_resource *si_resource_create(struct pipe_screen *screen,
691 const struct pipe_resource *templ)
692 {
693 if (templ->target == PIPE_BUFFER) {
694 return si_buffer_create(screen, templ, 256);
695 } else {
696 return si_texture_create(screen, templ);
697 }
698 }
699
700 void si_init_screen_buffer_functions(struct si_screen *sscreen)
701 {
702 sscreen->b.resource_create = si_resource_create;
703 sscreen->b.resource_destroy = u_resource_destroy_vtbl;
704 sscreen->b.resource_from_user_memory = si_buffer_from_user_memory;
705 }
706
707 void si_init_buffer_functions(struct si_context *sctx)
708 {
709 sctx->b.b.invalidate_resource = si_invalidate_resource;
710 sctx->b.b.transfer_map = u_transfer_map_vtbl;
711 sctx->b.b.transfer_flush_region = u_transfer_flush_region_vtbl;
712 sctx->b.b.transfer_unmap = u_transfer_unmap_vtbl;
713 sctx->b.b.texture_subdata = u_default_texture_subdata;
714 sctx->b.b.buffer_subdata = si_buffer_subdata;
715 }