2 * Copyright 2013 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "radeonsi/si_pipe.h"
26 #include "util/u_memory.h"
27 #include "util/u_upload_mgr.h"
28 #include "util/u_transfer.h"
32 bool si_rings_is_buffer_referenced(struct si_context
*sctx
,
33 struct pb_buffer
*buf
,
34 enum radeon_bo_usage usage
)
36 if (sctx
->ws
->cs_is_buffer_referenced(sctx
->gfx_cs
, buf
, usage
)) {
39 if (radeon_emitted(sctx
->dma_cs
, 0) &&
40 sctx
->ws
->cs_is_buffer_referenced(sctx
->dma_cs
, buf
, usage
)) {
46 void *si_buffer_map_sync_with_rings(struct si_context
*sctx
,
47 struct r600_resource
*resource
,
50 enum radeon_bo_usage rusage
= RADEON_USAGE_READWRITE
;
53 assert(!(resource
->flags
& RADEON_FLAG_SPARSE
));
55 if (usage
& PIPE_TRANSFER_UNSYNCHRONIZED
) {
56 return sctx
->ws
->buffer_map(resource
->buf
, NULL
, usage
);
59 if (!(usage
& PIPE_TRANSFER_WRITE
)) {
60 /* have to wait for the last write */
61 rusage
= RADEON_USAGE_WRITE
;
64 if (radeon_emitted(sctx
->gfx_cs
, sctx
->initial_gfx_cs_size
) &&
65 sctx
->ws
->cs_is_buffer_referenced(sctx
->gfx_cs
,
66 resource
->buf
, rusage
)) {
67 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
68 si_flush_gfx_cs(sctx
, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW
, NULL
);
71 si_flush_gfx_cs(sctx
, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW
, NULL
);
75 if (radeon_emitted(sctx
->dma_cs
, 0) &&
76 sctx
->ws
->cs_is_buffer_referenced(sctx
->dma_cs
,
77 resource
->buf
, rusage
)) {
78 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
79 si_flush_dma_cs(sctx
, PIPE_FLUSH_ASYNC
, NULL
);
82 si_flush_dma_cs(sctx
, 0, NULL
);
87 if (busy
|| !sctx
->ws
->buffer_wait(resource
->buf
, 0, rusage
)) {
88 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
91 /* We will be wait for the GPU. Wait for any offloaded
92 * CS flush to complete to avoid busy-waiting in the winsys. */
93 sctx
->ws
->cs_sync_flush(sctx
->gfx_cs
);
95 sctx
->ws
->cs_sync_flush(sctx
->dma_cs
);
99 /* Setting the CS to NULL will prevent doing checks we have done already. */
100 return sctx
->ws
->buffer_map(resource
->buf
, NULL
, usage
);
103 void si_init_resource_fields(struct si_screen
*sscreen
,
104 struct r600_resource
*res
,
105 uint64_t size
, unsigned alignment
)
107 struct si_texture
*tex
= (struct si_texture
*)res
;
110 res
->bo_alignment
= alignment
;
112 res
->texture_handle_allocated
= false;
113 res
->image_handle_allocated
= false;
115 switch (res
->b
.b
.usage
) {
116 case PIPE_USAGE_STREAM
:
117 res
->flags
= RADEON_FLAG_GTT_WC
;
119 case PIPE_USAGE_STAGING
:
120 /* Transfers are likely to occur more often with these
122 res
->domains
= RADEON_DOMAIN_GTT
;
124 case PIPE_USAGE_DYNAMIC
:
125 /* Older kernels didn't always flush the HDP cache before
128 if (!sscreen
->info
.kernel_flushes_hdp_before_ib
) {
129 res
->domains
= RADEON_DOMAIN_GTT
;
130 res
->flags
|= RADEON_FLAG_GTT_WC
;
134 case PIPE_USAGE_DEFAULT
:
135 case PIPE_USAGE_IMMUTABLE
:
137 /* Not listing GTT here improves performance in some
139 res
->domains
= RADEON_DOMAIN_VRAM
;
140 res
->flags
|= RADEON_FLAG_GTT_WC
;
144 if (res
->b
.b
.target
== PIPE_BUFFER
&&
145 res
->b
.b
.flags
& PIPE_RESOURCE_FLAG_MAP_PERSISTENT
) {
146 /* Use GTT for all persistent mappings with older
147 * kernels, because they didn't always flush the HDP
148 * cache before CS execution.
150 * Write-combined CPU mappings are fine, the kernel
151 * ensures all CPU writes finish before the GPU
152 * executes a command stream.
154 * radeon doesn't have good BO move throttling, so put all
155 * persistent buffers into GTT to prevent VRAM CPU page faults.
157 if (!sscreen
->info
.kernel_flushes_hdp_before_ib
||
158 sscreen
->info
.drm_major
== 2)
159 res
->domains
= RADEON_DOMAIN_GTT
;
162 /* Tiled textures are unmappable. Always put them in VRAM. */
163 if ((res
->b
.b
.target
!= PIPE_BUFFER
&& !tex
->surface
.is_linear
) ||
164 res
->b
.b
.flags
& SI_RESOURCE_FLAG_UNMAPPABLE
) {
165 res
->domains
= RADEON_DOMAIN_VRAM
;
166 res
->flags
|= RADEON_FLAG_NO_CPU_ACCESS
|
170 /* Displayable and shareable surfaces are not suballocated. */
171 if (res
->b
.b
.bind
& (PIPE_BIND_SHARED
| PIPE_BIND_SCANOUT
))
172 res
->flags
|= RADEON_FLAG_NO_SUBALLOC
; /* shareable */
174 res
->flags
|= RADEON_FLAG_NO_INTERPROCESS_SHARING
;
176 if (sscreen
->debug_flags
& DBG(NO_WC
))
177 res
->flags
&= ~RADEON_FLAG_GTT_WC
;
179 if (res
->b
.b
.flags
& SI_RESOURCE_FLAG_READ_ONLY
)
180 res
->flags
|= RADEON_FLAG_READ_ONLY
;
182 if (res
->b
.b
.flags
& SI_RESOURCE_FLAG_32BIT
)
183 res
->flags
|= RADEON_FLAG_32BIT
;
185 /* Set expected VRAM and GART usage for the buffer. */
188 res
->max_forced_staging_uploads
= 0;
189 res
->b
.max_forced_staging_uploads
= 0;
191 if (res
->domains
& RADEON_DOMAIN_VRAM
) {
192 res
->vram_usage
= size
;
194 res
->max_forced_staging_uploads
=
195 res
->b
.max_forced_staging_uploads
=
196 sscreen
->info
.has_dedicated_vram
&&
197 size
>= sscreen
->info
.vram_vis_size
/ 4 ? 1 : 0;
198 } else if (res
->domains
& RADEON_DOMAIN_GTT
) {
199 res
->gart_usage
= size
;
203 bool si_alloc_resource(struct si_screen
*sscreen
,
204 struct r600_resource
*res
)
206 struct pb_buffer
*old_buf
, *new_buf
;
208 /* Allocate a new resource. */
209 new_buf
= sscreen
->ws
->buffer_create(sscreen
->ws
, res
->bo_size
,
211 res
->domains
, res
->flags
);
216 /* Replace the pointer such that if res->buf wasn't NULL, it won't be
217 * NULL. This should prevent crashes with multiple contexts using
218 * the same buffer where one of the contexts invalidates it while
219 * the others are using it. */
221 res
->buf
= new_buf
; /* should be atomic */
222 res
->gpu_address
= sscreen
->ws
->buffer_get_virtual_address(res
->buf
);
224 if (res
->flags
& RADEON_FLAG_32BIT
) {
225 uint64_t start
= res
->gpu_address
;
226 uint64_t last
= start
+ res
->bo_size
- 1;
230 assert((start
>> 32) == sscreen
->info
.address32_hi
);
231 assert((last
>> 32) == sscreen
->info
.address32_hi
);
234 pb_reference(&old_buf
, NULL
);
236 util_range_set_empty(&res
->valid_buffer_range
);
237 res
->TC_L2_dirty
= false;
239 /* Print debug information. */
240 if (sscreen
->debug_flags
& DBG(VM
) && res
->b
.b
.target
== PIPE_BUFFER
) {
241 fprintf(stderr
, "VM start=0x%"PRIX64
" end=0x%"PRIX64
" | Buffer %"PRIu64
" bytes\n",
242 res
->gpu_address
, res
->gpu_address
+ res
->buf
->size
,
248 static void si_buffer_destroy(struct pipe_screen
*screen
,
249 struct pipe_resource
*buf
)
251 struct r600_resource
*rbuffer
= r600_resource(buf
);
253 threaded_resource_deinit(buf
);
254 util_range_destroy(&rbuffer
->valid_buffer_range
);
255 pb_reference(&rbuffer
->buf
, NULL
);
259 /* Reallocate the buffer a update all resource bindings where the buffer is
262 * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
263 * idle by discarding its contents.
266 si_invalidate_buffer(struct si_context
*sctx
,
267 struct r600_resource
*rbuffer
)
269 /* Shared buffers can't be reallocated. */
270 if (rbuffer
->b
.is_shared
)
273 /* Sparse buffers can't be reallocated. */
274 if (rbuffer
->flags
& RADEON_FLAG_SPARSE
)
277 /* In AMD_pinned_memory, the user pointer association only gets
278 * broken when the buffer is explicitly re-allocated.
280 if (rbuffer
->b
.is_user_ptr
)
283 /* Check if mapping this buffer would cause waiting for the GPU. */
284 if (si_rings_is_buffer_referenced(sctx
, rbuffer
->buf
, RADEON_USAGE_READWRITE
) ||
285 !sctx
->ws
->buffer_wait(rbuffer
->buf
, 0, RADEON_USAGE_READWRITE
)) {
286 uint64_t old_va
= rbuffer
->gpu_address
;
288 /* Reallocate the buffer in the same pipe_resource. */
289 si_alloc_resource(sctx
->screen
, rbuffer
);
290 si_rebind_buffer(sctx
, &rbuffer
->b
.b
, old_va
);
292 util_range_set_empty(&rbuffer
->valid_buffer_range
);
298 /* Replace the storage of dst with src. */
299 void si_replace_buffer_storage(struct pipe_context
*ctx
,
300 struct pipe_resource
*dst
,
301 struct pipe_resource
*src
)
303 struct si_context
*sctx
= (struct si_context
*)ctx
;
304 struct r600_resource
*rdst
= r600_resource(dst
);
305 struct r600_resource
*rsrc
= r600_resource(src
);
306 uint64_t old_gpu_address
= rdst
->gpu_address
;
308 pb_reference(&rdst
->buf
, rsrc
->buf
);
309 rdst
->gpu_address
= rsrc
->gpu_address
;
310 rdst
->b
.b
.bind
= rsrc
->b
.b
.bind
;
311 rdst
->b
.max_forced_staging_uploads
= rsrc
->b
.max_forced_staging_uploads
;
312 rdst
->max_forced_staging_uploads
= rsrc
->max_forced_staging_uploads
;
313 rdst
->flags
= rsrc
->flags
;
315 assert(rdst
->vram_usage
== rsrc
->vram_usage
);
316 assert(rdst
->gart_usage
== rsrc
->gart_usage
);
317 assert(rdst
->bo_size
== rsrc
->bo_size
);
318 assert(rdst
->bo_alignment
== rsrc
->bo_alignment
);
319 assert(rdst
->domains
== rsrc
->domains
);
321 si_rebind_buffer(sctx
, dst
, old_gpu_address
);
324 static void si_invalidate_resource(struct pipe_context
*ctx
,
325 struct pipe_resource
*resource
)
327 struct si_context
*sctx
= (struct si_context
*)ctx
;
328 struct r600_resource
*rbuffer
= r600_resource(resource
);
330 /* We currently only do anyting here for buffers */
331 if (resource
->target
== PIPE_BUFFER
)
332 (void)si_invalidate_buffer(sctx
, rbuffer
);
335 static void *si_buffer_get_transfer(struct pipe_context
*ctx
,
336 struct pipe_resource
*resource
,
338 const struct pipe_box
*box
,
339 struct pipe_transfer
**ptransfer
,
340 void *data
, struct r600_resource
*staging
,
343 struct si_context
*sctx
= (struct si_context
*)ctx
;
344 struct r600_transfer
*transfer
;
346 if (usage
& TC_TRANSFER_MAP_THREADED_UNSYNC
)
347 transfer
= slab_alloc(&sctx
->pool_transfers_unsync
);
349 transfer
= slab_alloc(&sctx
->pool_transfers
);
351 transfer
->b
.b
.resource
= NULL
;
352 pipe_resource_reference(&transfer
->b
.b
.resource
, resource
);
353 transfer
->b
.b
.level
= 0;
354 transfer
->b
.b
.usage
= usage
;
355 transfer
->b
.b
.box
= *box
;
356 transfer
->b
.b
.stride
= 0;
357 transfer
->b
.b
.layer_stride
= 0;
358 transfer
->b
.staging
= NULL
;
359 transfer
->offset
= offset
;
360 transfer
->staging
= staging
;
361 *ptransfer
= &transfer
->b
.b
;
365 static void *si_buffer_transfer_map(struct pipe_context
*ctx
,
366 struct pipe_resource
*resource
,
369 const struct pipe_box
*box
,
370 struct pipe_transfer
**ptransfer
)
372 struct si_context
*sctx
= (struct si_context
*)ctx
;
373 struct r600_resource
*rbuffer
= r600_resource(resource
);
376 assert(box
->x
+ box
->width
<= resource
->width0
);
378 /* From GL_AMD_pinned_memory issues:
380 * 4) Is glMapBuffer on a shared buffer guaranteed to return the
381 * same system address which was specified at creation time?
383 * RESOLVED: NO. The GL implementation might return a different
384 * virtual mapping of that memory, although the same physical
387 * So don't ever use staging buffers.
389 if (rbuffer
->b
.is_user_ptr
)
390 usage
|= PIPE_TRANSFER_PERSISTENT
;
392 /* See if the buffer range being mapped has never been initialized,
393 * in which case it can be mapped unsynchronized. */
394 if (!(usage
& (PIPE_TRANSFER_UNSYNCHRONIZED
|
395 TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED
)) &&
396 usage
& PIPE_TRANSFER_WRITE
&&
397 !rbuffer
->b
.is_shared
&&
398 !util_ranges_intersect(&rbuffer
->valid_buffer_range
, box
->x
, box
->x
+ box
->width
)) {
399 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
402 /* If discarding the entire range, discard the whole resource instead. */
403 if (usage
& PIPE_TRANSFER_DISCARD_RANGE
&&
404 box
->x
== 0 && box
->width
== resource
->width0
) {
405 usage
|= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
;
408 /* If a buffer in VRAM is too large and the range is discarded, don't
409 * map it directly. This makes sure that the buffer stays in VRAM.
411 bool force_discard_range
= false;
412 if (usage
& (PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
|
413 PIPE_TRANSFER_DISCARD_RANGE
) &&
414 !(usage
& PIPE_TRANSFER_PERSISTENT
) &&
415 /* Try not to decrement the counter if it's not positive. Still racy,
416 * but it makes it harder to wrap the counter from INT_MIN to INT_MAX. */
417 rbuffer
->max_forced_staging_uploads
> 0 &&
418 p_atomic_dec_return(&rbuffer
->max_forced_staging_uploads
) >= 0) {
419 usage
&= ~(PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
|
420 PIPE_TRANSFER_UNSYNCHRONIZED
);
421 usage
|= PIPE_TRANSFER_DISCARD_RANGE
;
422 force_discard_range
= true;
425 if (usage
& PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
&&
426 !(usage
& (PIPE_TRANSFER_UNSYNCHRONIZED
|
427 TC_TRANSFER_MAP_NO_INVALIDATE
))) {
428 assert(usage
& PIPE_TRANSFER_WRITE
);
430 if (si_invalidate_buffer(sctx
, rbuffer
)) {
431 /* At this point, the buffer is always idle. */
432 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
434 /* Fall back to a temporary buffer. */
435 usage
|= PIPE_TRANSFER_DISCARD_RANGE
;
439 if ((usage
& PIPE_TRANSFER_DISCARD_RANGE
) &&
440 ((!(usage
& (PIPE_TRANSFER_UNSYNCHRONIZED
|
441 PIPE_TRANSFER_PERSISTENT
))) ||
442 (rbuffer
->flags
& RADEON_FLAG_SPARSE
))) {
443 assert(usage
& PIPE_TRANSFER_WRITE
);
445 /* Check if mapping this buffer would cause waiting for the GPU.
447 if (rbuffer
->flags
& RADEON_FLAG_SPARSE
||
448 force_discard_range
||
449 si_rings_is_buffer_referenced(sctx
, rbuffer
->buf
, RADEON_USAGE_READWRITE
) ||
450 !sctx
->ws
->buffer_wait(rbuffer
->buf
, 0, RADEON_USAGE_READWRITE
)) {
451 /* Do a wait-free write-only transfer using a temporary buffer. */
453 struct r600_resource
*staging
= NULL
;
455 u_upload_alloc(ctx
->stream_uploader
, 0,
456 box
->width
+ (box
->x
% SI_MAP_BUFFER_ALIGNMENT
),
457 sctx
->screen
->info
.tcc_cache_line_size
,
458 &offset
, (struct pipe_resource
**)&staging
,
462 data
+= box
->x
% SI_MAP_BUFFER_ALIGNMENT
;
463 return si_buffer_get_transfer(ctx
, resource
, usage
, box
,
464 ptransfer
, data
, staging
, offset
);
465 } else if (rbuffer
->flags
& RADEON_FLAG_SPARSE
) {
469 /* At this point, the buffer is always idle (we checked it above). */
470 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
473 /* Use a staging buffer in cached GTT for reads. */
474 else if (((usage
& PIPE_TRANSFER_READ
) &&
475 !(usage
& PIPE_TRANSFER_PERSISTENT
) &&
476 (rbuffer
->domains
& RADEON_DOMAIN_VRAM
||
477 rbuffer
->flags
& RADEON_FLAG_GTT_WC
)) ||
478 (rbuffer
->flags
& RADEON_FLAG_SPARSE
)) {
479 struct r600_resource
*staging
;
481 assert(!(usage
& TC_TRANSFER_MAP_THREADED_UNSYNC
));
482 staging
= r600_resource(pipe_buffer_create(
483 ctx
->screen
, 0, PIPE_USAGE_STAGING
,
484 box
->width
+ (box
->x
% SI_MAP_BUFFER_ALIGNMENT
)));
486 /* Copy the VRAM buffer to the staging buffer. */
487 sctx
->dma_copy(ctx
, &staging
->b
.b
, 0,
488 box
->x
% SI_MAP_BUFFER_ALIGNMENT
,
489 0, 0, resource
, 0, box
);
491 data
= si_buffer_map_sync_with_rings(sctx
, staging
,
492 usage
& ~PIPE_TRANSFER_UNSYNCHRONIZED
);
494 r600_resource_reference(&staging
, NULL
);
497 data
+= box
->x
% SI_MAP_BUFFER_ALIGNMENT
;
499 return si_buffer_get_transfer(ctx
, resource
, usage
, box
,
500 ptransfer
, data
, staging
, 0);
501 } else if (rbuffer
->flags
& RADEON_FLAG_SPARSE
) {
506 data
= si_buffer_map_sync_with_rings(sctx
, rbuffer
, usage
);
512 return si_buffer_get_transfer(ctx
, resource
, usage
, box
,
513 ptransfer
, data
, NULL
, 0);
516 static void si_buffer_do_flush_region(struct pipe_context
*ctx
,
517 struct pipe_transfer
*transfer
,
518 const struct pipe_box
*box
)
520 struct r600_transfer
*rtransfer
= (struct r600_transfer
*)transfer
;
521 struct r600_resource
*rbuffer
= r600_resource(transfer
->resource
);
523 if (rtransfer
->staging
) {
524 struct pipe_resource
*dst
, *src
;
526 struct pipe_box dma_box
;
528 dst
= transfer
->resource
;
529 src
= &rtransfer
->staging
->b
.b
;
530 soffset
= rtransfer
->offset
+ box
->x
% SI_MAP_BUFFER_ALIGNMENT
;
532 u_box_1d(soffset
, box
->width
, &dma_box
);
534 /* Copy the staging buffer into the original one. */
535 ctx
->resource_copy_region(ctx
, dst
, 0, box
->x
, 0, 0, src
, 0, &dma_box
);
538 util_range_add(&rbuffer
->valid_buffer_range
, box
->x
,
539 box
->x
+ box
->width
);
542 static void si_buffer_flush_region(struct pipe_context
*ctx
,
543 struct pipe_transfer
*transfer
,
544 const struct pipe_box
*rel_box
)
546 unsigned required_usage
= PIPE_TRANSFER_WRITE
|
547 PIPE_TRANSFER_FLUSH_EXPLICIT
;
549 if ((transfer
->usage
& required_usage
) == required_usage
) {
552 u_box_1d(transfer
->box
.x
+ rel_box
->x
, rel_box
->width
, &box
);
553 si_buffer_do_flush_region(ctx
, transfer
, &box
);
557 static void si_buffer_transfer_unmap(struct pipe_context
*ctx
,
558 struct pipe_transfer
*transfer
)
560 struct si_context
*sctx
= (struct si_context
*)ctx
;
561 struct r600_transfer
*rtransfer
= (struct r600_transfer
*)transfer
;
563 if (transfer
->usage
& PIPE_TRANSFER_WRITE
&&
564 !(transfer
->usage
& PIPE_TRANSFER_FLUSH_EXPLICIT
))
565 si_buffer_do_flush_region(ctx
, transfer
, &transfer
->box
);
567 r600_resource_reference(&rtransfer
->staging
, NULL
);
568 assert(rtransfer
->b
.staging
== NULL
); /* for threaded context only */
569 pipe_resource_reference(&transfer
->resource
, NULL
);
571 /* Don't use pool_transfers_unsync. We are always in the driver
573 slab_free(&sctx
->pool_transfers
, transfer
);
576 static void si_buffer_subdata(struct pipe_context
*ctx
,
577 struct pipe_resource
*buffer
,
578 unsigned usage
, unsigned offset
,
579 unsigned size
, const void *data
)
581 struct pipe_transfer
*transfer
= NULL
;
585 u_box_1d(offset
, size
, &box
);
586 map
= si_buffer_transfer_map(ctx
, buffer
, 0,
587 PIPE_TRANSFER_WRITE
|
588 PIPE_TRANSFER_DISCARD_RANGE
|
594 memcpy(map
, data
, size
);
595 si_buffer_transfer_unmap(ctx
, transfer
);
598 static const struct u_resource_vtbl si_buffer_vtbl
=
600 NULL
, /* get_handle */
601 si_buffer_destroy
, /* resource_destroy */
602 si_buffer_transfer_map
, /* transfer_map */
603 si_buffer_flush_region
, /* transfer_flush_region */
604 si_buffer_transfer_unmap
, /* transfer_unmap */
607 static struct r600_resource
*
608 si_alloc_buffer_struct(struct pipe_screen
*screen
,
609 const struct pipe_resource
*templ
)
611 struct r600_resource
*rbuffer
;
613 rbuffer
= MALLOC_STRUCT(r600_resource
);
615 rbuffer
->b
.b
= *templ
;
616 rbuffer
->b
.b
.next
= NULL
;
617 pipe_reference_init(&rbuffer
->b
.b
.reference
, 1);
618 rbuffer
->b
.b
.screen
= screen
;
620 rbuffer
->b
.vtbl
= &si_buffer_vtbl
;
621 threaded_resource_init(&rbuffer
->b
.b
);
624 rbuffer
->bind_history
= 0;
625 rbuffer
->TC_L2_dirty
= false;
626 util_range_init(&rbuffer
->valid_buffer_range
);
630 static struct pipe_resource
*si_buffer_create(struct pipe_screen
*screen
,
631 const struct pipe_resource
*templ
,
634 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
635 struct r600_resource
*rbuffer
= si_alloc_buffer_struct(screen
, templ
);
637 if (templ
->flags
& PIPE_RESOURCE_FLAG_SPARSE
)
638 rbuffer
->b
.b
.flags
|= SI_RESOURCE_FLAG_UNMAPPABLE
;
640 si_init_resource_fields(sscreen
, rbuffer
, templ
->width0
, alignment
);
642 if (templ
->flags
& PIPE_RESOURCE_FLAG_SPARSE
)
643 rbuffer
->flags
|= RADEON_FLAG_SPARSE
;
645 if (!si_alloc_resource(sscreen
, rbuffer
)) {
649 return &rbuffer
->b
.b
;
652 struct pipe_resource
*pipe_aligned_buffer_create(struct pipe_screen
*screen
,
653 unsigned flags
, unsigned usage
,
654 unsigned size
, unsigned alignment
)
656 struct pipe_resource buffer
;
658 memset(&buffer
, 0, sizeof buffer
);
659 buffer
.target
= PIPE_BUFFER
;
660 buffer
.format
= PIPE_FORMAT_R8_UNORM
;
662 buffer
.usage
= usage
;
663 buffer
.flags
= flags
;
664 buffer
.width0
= size
;
667 buffer
.array_size
= 1;
668 return si_buffer_create(screen
, &buffer
, alignment
);
671 struct r600_resource
*si_aligned_buffer_create(struct pipe_screen
*screen
,
672 unsigned flags
, unsigned usage
,
673 unsigned size
, unsigned alignment
)
675 return r600_resource(pipe_aligned_buffer_create(screen
, flags
, usage
,
679 static struct pipe_resource
*
680 si_buffer_from_user_memory(struct pipe_screen
*screen
,
681 const struct pipe_resource
*templ
,
684 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
685 struct radeon_winsys
*ws
= sscreen
->ws
;
686 struct r600_resource
*rbuffer
= si_alloc_buffer_struct(screen
, templ
);
688 rbuffer
->domains
= RADEON_DOMAIN_GTT
;
690 rbuffer
->b
.is_user_ptr
= true;
691 util_range_add(&rbuffer
->valid_buffer_range
, 0, templ
->width0
);
692 util_range_add(&rbuffer
->b
.valid_buffer_range
, 0, templ
->width0
);
694 /* Convert a user pointer to a buffer. */
695 rbuffer
->buf
= ws
->buffer_from_ptr(ws
, user_memory
, templ
->width0
);
701 rbuffer
->gpu_address
= ws
->buffer_get_virtual_address(rbuffer
->buf
);
702 rbuffer
->vram_usage
= 0;
703 rbuffer
->gart_usage
= templ
->width0
;
705 return &rbuffer
->b
.b
;
708 static struct pipe_resource
*si_resource_create(struct pipe_screen
*screen
,
709 const struct pipe_resource
*templ
)
711 if (templ
->target
== PIPE_BUFFER
) {
712 return si_buffer_create(screen
, templ
, 256);
714 return si_texture_create(screen
, templ
);
718 static bool si_resource_commit(struct pipe_context
*pctx
,
719 struct pipe_resource
*resource
,
720 unsigned level
, struct pipe_box
*box
,
723 struct si_context
*ctx
= (struct si_context
*)pctx
;
724 struct r600_resource
*res
= r600_resource(resource
);
727 * Since buffer commitment changes cannot be pipelined, we need to
728 * (a) flush any pending commands that refer to the buffer we're about
730 * (b) wait for threaded submit to finish, including those that were
731 * triggered by some other, earlier operation.
733 if (radeon_emitted(ctx
->gfx_cs
, ctx
->initial_gfx_cs_size
) &&
734 ctx
->ws
->cs_is_buffer_referenced(ctx
->gfx_cs
,
735 res
->buf
, RADEON_USAGE_READWRITE
)) {
736 si_flush_gfx_cs(ctx
, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW
, NULL
);
738 if (radeon_emitted(ctx
->dma_cs
, 0) &&
739 ctx
->ws
->cs_is_buffer_referenced(ctx
->dma_cs
,
740 res
->buf
, RADEON_USAGE_READWRITE
)) {
741 si_flush_dma_cs(ctx
, PIPE_FLUSH_ASYNC
, NULL
);
744 ctx
->ws
->cs_sync_flush(ctx
->dma_cs
);
745 ctx
->ws
->cs_sync_flush(ctx
->gfx_cs
);
747 assert(resource
->target
== PIPE_BUFFER
);
749 return ctx
->ws
->buffer_commit(res
->buf
, box
->x
, box
->width
, commit
);
752 void si_init_screen_buffer_functions(struct si_screen
*sscreen
)
754 sscreen
->b
.resource_create
= si_resource_create
;
755 sscreen
->b
.resource_destroy
= u_resource_destroy_vtbl
;
756 sscreen
->b
.resource_from_user_memory
= si_buffer_from_user_memory
;
759 void si_init_buffer_functions(struct si_context
*sctx
)
761 sctx
->b
.invalidate_resource
= si_invalidate_resource
;
762 sctx
->b
.transfer_map
= u_transfer_map_vtbl
;
763 sctx
->b
.transfer_flush_region
= u_transfer_flush_region_vtbl
;
764 sctx
->b
.transfer_unmap
= u_transfer_unmap_vtbl
;
765 sctx
->b
.texture_subdata
= u_default_texture_subdata
;
766 sctx
->b
.buffer_subdata
= si_buffer_subdata
;
767 sctx
->b
.resource_commit
= si_resource_commit
;