2 * Copyright 2013 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "radeonsi/si_pipe.h"
26 #include "util/u_memory.h"
27 #include "util/u_transfer.h"
28 #include "util/u_upload_mgr.h"
33 bool si_rings_is_buffer_referenced(struct si_context
*sctx
, struct pb_buffer
*buf
,
34 enum radeon_bo_usage usage
)
36 if (sctx
->ws
->cs_is_buffer_referenced(sctx
->gfx_cs
, buf
, usage
)) {
39 if (radeon_emitted(sctx
->sdma_cs
, 0) &&
40 sctx
->ws
->cs_is_buffer_referenced(sctx
->sdma_cs
, buf
, usage
)) {
46 void *si_buffer_map_sync_with_rings(struct si_context
*sctx
, struct si_resource
*resource
,
49 enum radeon_bo_usage rusage
= RADEON_USAGE_READWRITE
;
52 assert(!(resource
->flags
& RADEON_FLAG_SPARSE
));
54 if (usage
& PIPE_TRANSFER_UNSYNCHRONIZED
) {
55 return sctx
->ws
->buffer_map(resource
->buf
, NULL
, usage
);
58 if (!(usage
& PIPE_TRANSFER_WRITE
)) {
59 /* have to wait for the last write */
60 rusage
= RADEON_USAGE_WRITE
;
63 if (radeon_emitted(sctx
->gfx_cs
, sctx
->initial_gfx_cs_size
) &&
64 sctx
->ws
->cs_is_buffer_referenced(sctx
->gfx_cs
, resource
->buf
, rusage
)) {
65 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
66 si_flush_gfx_cs(sctx
, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW
, NULL
);
69 si_flush_gfx_cs(sctx
, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW
, NULL
);
73 if (radeon_emitted(sctx
->sdma_cs
, 0) &&
74 sctx
->ws
->cs_is_buffer_referenced(sctx
->sdma_cs
, resource
->buf
, rusage
)) {
75 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
76 si_flush_dma_cs(sctx
, PIPE_FLUSH_ASYNC
, NULL
);
79 si_flush_dma_cs(sctx
, 0, NULL
);
84 if (busy
|| !sctx
->ws
->buffer_wait(resource
->buf
, 0, rusage
)) {
85 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
88 /* We will be wait for the GPU. Wait for any offloaded
89 * CS flush to complete to avoid busy-waiting in the winsys. */
90 sctx
->ws
->cs_sync_flush(sctx
->gfx_cs
);
92 sctx
->ws
->cs_sync_flush(sctx
->sdma_cs
);
96 /* Setting the CS to NULL will prevent doing checks we have done already. */
97 return sctx
->ws
->buffer_map(resource
->buf
, NULL
, usage
);
100 void si_init_resource_fields(struct si_screen
*sscreen
, struct si_resource
*res
, uint64_t size
,
103 struct si_texture
*tex
= (struct si_texture
*)res
;
106 res
->bo_alignment
= alignment
;
108 res
->texture_handle_allocated
= false;
109 res
->image_handle_allocated
= false;
111 switch (res
->b
.b
.usage
) {
112 case PIPE_USAGE_STREAM
:
113 res
->flags
= RADEON_FLAG_GTT_WC
;
115 case PIPE_USAGE_STAGING
:
116 /* Transfers are likely to occur more often with these
118 res
->domains
= RADEON_DOMAIN_GTT
;
120 case PIPE_USAGE_DYNAMIC
:
121 /* Older kernels didn't always flush the HDP cache before
124 if (!sscreen
->info
.kernel_flushes_hdp_before_ib
) {
125 res
->domains
= RADEON_DOMAIN_GTT
;
126 res
->flags
|= RADEON_FLAG_GTT_WC
;
130 case PIPE_USAGE_DEFAULT
:
131 case PIPE_USAGE_IMMUTABLE
:
133 /* Not listing GTT here improves performance in some
135 res
->domains
= RADEON_DOMAIN_VRAM
;
136 res
->flags
|= RADEON_FLAG_GTT_WC
;
140 if (res
->b
.b
.target
== PIPE_BUFFER
&& res
->b
.b
.flags
& PIPE_RESOURCE_FLAG_MAP_PERSISTENT
) {
141 /* Use GTT for all persistent mappings with older
142 * kernels, because they didn't always flush the HDP
143 * cache before CS execution.
145 * Write-combined CPU mappings are fine, the kernel
146 * ensures all CPU writes finish before the GPU
147 * executes a command stream.
149 * radeon doesn't have good BO move throttling, so put all
150 * persistent buffers into GTT to prevent VRAM CPU page faults.
152 if (!sscreen
->info
.kernel_flushes_hdp_before_ib
|| !sscreen
->info
.is_amdgpu
)
153 res
->domains
= RADEON_DOMAIN_GTT
;
156 /* Tiled textures are unmappable. Always put them in VRAM. */
157 if ((res
->b
.b
.target
!= PIPE_BUFFER
&& !tex
->surface
.is_linear
) ||
158 res
->b
.b
.flags
& SI_RESOURCE_FLAG_UNMAPPABLE
) {
159 res
->domains
= RADEON_DOMAIN_VRAM
;
160 res
->flags
|= RADEON_FLAG_NO_CPU_ACCESS
| RADEON_FLAG_GTT_WC
;
163 /* Displayable and shareable surfaces are not suballocated. */
164 if (res
->b
.b
.bind
& (PIPE_BIND_SHARED
| PIPE_BIND_SCANOUT
))
165 res
->flags
|= RADEON_FLAG_NO_SUBALLOC
; /* shareable */
167 res
->flags
|= RADEON_FLAG_NO_INTERPROCESS_SHARING
;
169 if (sscreen
->ws
->ws_is_secure(sscreen
->ws
)) {
170 if (res
->b
.b
.bind
& PIPE_BIND_SCANOUT
)
171 res
->flags
|= RADEON_FLAG_ENCRYPTED
;
172 if (res
->b
.b
.flags
& PIPE_RESOURCE_FLAG_ENCRYPTED
)
173 res
->flags
|= RADEON_FLAG_ENCRYPTED
;
176 if (sscreen
->debug_flags
& DBG(NO_WC
))
177 res
->flags
&= ~RADEON_FLAG_GTT_WC
;
179 if (res
->b
.b
.flags
& SI_RESOURCE_FLAG_READ_ONLY
)
180 res
->flags
|= RADEON_FLAG_READ_ONLY
;
182 if (res
->b
.b
.flags
& SI_RESOURCE_FLAG_32BIT
)
183 res
->flags
|= RADEON_FLAG_32BIT
;
185 /* For higher throughput and lower latency over PCIe assuming sequential access.
186 * Only CP DMA, SDMA, and optimized compute benefit from this.
187 * GFX8 and older don't support RADEON_FLAG_UNCACHED.
189 if (sscreen
->info
.chip_class
>= GFX9
&&
190 res
->b
.b
.flags
& SI_RESOURCE_FLAG_UNCACHED
)
191 res
->flags
|= RADEON_FLAG_UNCACHED
;
193 /* Set expected VRAM and GART usage for the buffer. */
196 res
->max_forced_staging_uploads
= 0;
197 res
->b
.max_forced_staging_uploads
= 0;
199 if (res
->domains
& RADEON_DOMAIN_VRAM
) {
200 res
->vram_usage
= size
;
202 res
->max_forced_staging_uploads
= res
->b
.max_forced_staging_uploads
=
203 sscreen
->info
.has_dedicated_vram
&& size
>= sscreen
->info
.vram_vis_size
/ 4 ? 1 : 0;
204 } else if (res
->domains
& RADEON_DOMAIN_GTT
) {
205 res
->gart_usage
= size
;
209 bool si_alloc_resource(struct si_screen
*sscreen
, struct si_resource
*res
)
211 struct pb_buffer
*old_buf
, *new_buf
;
213 /* Allocate a new resource. */
214 new_buf
= sscreen
->ws
->buffer_create(sscreen
->ws
, res
->bo_size
, res
->bo_alignment
, res
->domains
,
220 /* Replace the pointer such that if res->buf wasn't NULL, it won't be
221 * NULL. This should prevent crashes with multiple contexts using
222 * the same buffer where one of the contexts invalidates it while
223 * the others are using it. */
225 res
->buf
= new_buf
; /* should be atomic */
226 res
->gpu_address
= sscreen
->ws
->buffer_get_virtual_address(res
->buf
);
228 if (res
->flags
& RADEON_FLAG_32BIT
) {
229 uint64_t start
= res
->gpu_address
;
230 uint64_t last
= start
+ res
->bo_size
- 1;
234 assert((start
>> 32) == sscreen
->info
.address32_hi
);
235 assert((last
>> 32) == sscreen
->info
.address32_hi
);
238 pb_reference(&old_buf
, NULL
);
240 util_range_set_empty(&res
->valid_buffer_range
);
241 res
->TC_L2_dirty
= false;
243 /* Print debug information. */
244 if (sscreen
->debug_flags
& DBG(VM
) && res
->b
.b
.target
== PIPE_BUFFER
) {
245 fprintf(stderr
, "VM start=0x%" PRIX64
" end=0x%" PRIX64
" | Buffer %" PRIu64
" bytes\n",
246 res
->gpu_address
, res
->gpu_address
+ res
->buf
->size
, res
->buf
->size
);
249 if (res
->b
.b
.flags
& SI_RESOURCE_FLAG_CLEAR
)
250 si_screen_clear_buffer(sscreen
, &res
->b
.b
, 0, res
->bo_size
, 0);
255 static void si_buffer_destroy(struct pipe_screen
*screen
, struct pipe_resource
*buf
)
257 struct si_resource
*buffer
= si_resource(buf
);
259 threaded_resource_deinit(buf
);
260 util_range_destroy(&buffer
->valid_buffer_range
);
261 pb_reference(&buffer
->buf
, NULL
);
265 /* Reallocate the buffer a update all resource bindings where the buffer is
268 * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
269 * idle by discarding its contents.
271 static bool si_invalidate_buffer(struct si_context
*sctx
, struct si_resource
*buf
)
273 /* Shared buffers can't be reallocated. */
274 if (buf
->b
.is_shared
)
277 /* Sparse buffers can't be reallocated. */
278 if (buf
->flags
& RADEON_FLAG_SPARSE
)
281 /* In AMD_pinned_memory, the user pointer association only gets
282 * broken when the buffer is explicitly re-allocated.
284 if (buf
->b
.is_user_ptr
)
287 /* Check if mapping this buffer would cause waiting for the GPU. */
288 if (si_rings_is_buffer_referenced(sctx
, buf
->buf
, RADEON_USAGE_READWRITE
) ||
289 !sctx
->ws
->buffer_wait(buf
->buf
, 0, RADEON_USAGE_READWRITE
)) {
290 /* Reallocate the buffer in the same pipe_resource. */
291 si_alloc_resource(sctx
->screen
, buf
);
292 si_rebind_buffer(sctx
, &buf
->b
.b
);
294 util_range_set_empty(&buf
->valid_buffer_range
);
300 /* Replace the storage of dst with src. */
301 void si_replace_buffer_storage(struct pipe_context
*ctx
, struct pipe_resource
*dst
,
302 struct pipe_resource
*src
)
304 struct si_context
*sctx
= (struct si_context
*)ctx
;
305 struct si_resource
*sdst
= si_resource(dst
);
306 struct si_resource
*ssrc
= si_resource(src
);
308 pb_reference(&sdst
->buf
, ssrc
->buf
);
309 sdst
->gpu_address
= ssrc
->gpu_address
;
310 sdst
->b
.b
.bind
= ssrc
->b
.b
.bind
;
311 sdst
->b
.max_forced_staging_uploads
= ssrc
->b
.max_forced_staging_uploads
;
312 sdst
->max_forced_staging_uploads
= ssrc
->max_forced_staging_uploads
;
313 sdst
->flags
= ssrc
->flags
;
315 assert(sdst
->vram_usage
== ssrc
->vram_usage
);
316 assert(sdst
->gart_usage
== ssrc
->gart_usage
);
317 assert(sdst
->bo_size
== ssrc
->bo_size
);
318 assert(sdst
->bo_alignment
== ssrc
->bo_alignment
);
319 assert(sdst
->domains
== ssrc
->domains
);
321 si_rebind_buffer(sctx
, dst
);
324 static void si_invalidate_resource(struct pipe_context
*ctx
, struct pipe_resource
*resource
)
326 struct si_context
*sctx
= (struct si_context
*)ctx
;
327 struct si_resource
*buf
= si_resource(resource
);
329 /* We currently only do anyting here for buffers */
330 if (resource
->target
== PIPE_BUFFER
)
331 (void)si_invalidate_buffer(sctx
, buf
);
334 static void *si_buffer_get_transfer(struct pipe_context
*ctx
, struct pipe_resource
*resource
,
335 unsigned usage
, const struct pipe_box
*box
,
336 struct pipe_transfer
**ptransfer
, void *data
,
337 struct si_resource
*staging
, unsigned offset
)
339 struct si_context
*sctx
= (struct si_context
*)ctx
;
340 struct si_transfer
*transfer
;
342 if (usage
& PIPE_TRANSFER_THREAD_SAFE
)
343 transfer
= malloc(sizeof(*transfer
));
344 else if (usage
& TC_TRANSFER_MAP_THREADED_UNSYNC
)
345 transfer
= slab_alloc(&sctx
->pool_transfers_unsync
);
347 transfer
= slab_alloc(&sctx
->pool_transfers
);
349 transfer
->b
.b
.resource
= NULL
;
350 pipe_resource_reference(&transfer
->b
.b
.resource
, resource
);
351 transfer
->b
.b
.level
= 0;
352 transfer
->b
.b
.usage
= usage
;
353 transfer
->b
.b
.box
= *box
;
354 transfer
->b
.b
.stride
= 0;
355 transfer
->b
.b
.layer_stride
= 0;
356 transfer
->b
.staging
= NULL
;
357 transfer
->offset
= offset
;
358 transfer
->staging
= staging
;
359 *ptransfer
= &transfer
->b
.b
;
363 static void *si_buffer_transfer_map(struct pipe_context
*ctx
, struct pipe_resource
*resource
,
364 unsigned level
, unsigned usage
, const struct pipe_box
*box
,
365 struct pipe_transfer
**ptransfer
)
367 struct si_context
*sctx
= (struct si_context
*)ctx
;
368 struct si_resource
*buf
= si_resource(resource
);
371 assert(box
->x
+ box
->width
<= resource
->width0
);
373 /* From GL_AMD_pinned_memory issues:
375 * 4) Is glMapBuffer on a shared buffer guaranteed to return the
376 * same system address which was specified at creation time?
378 * RESOLVED: NO. The GL implementation might return a different
379 * virtual mapping of that memory, although the same physical
382 * So don't ever use staging buffers.
384 if (buf
->b
.is_user_ptr
)
385 usage
|= PIPE_TRANSFER_PERSISTENT
;
387 /* See if the buffer range being mapped has never been initialized,
388 * in which case it can be mapped unsynchronized. */
389 if (!(usage
& (PIPE_TRANSFER_UNSYNCHRONIZED
| TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED
)) &&
390 usage
& PIPE_TRANSFER_WRITE
&& !buf
->b
.is_shared
&&
391 !util_ranges_intersect(&buf
->valid_buffer_range
, box
->x
, box
->x
+ box
->width
)) {
392 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
395 /* If discarding the entire range, discard the whole resource instead. */
396 if (usage
& PIPE_TRANSFER_DISCARD_RANGE
&& box
->x
== 0 && box
->width
== resource
->width0
) {
397 usage
|= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
;
400 /* If a buffer in VRAM is too large and the range is discarded, don't
401 * map it directly. This makes sure that the buffer stays in VRAM.
403 bool force_discard_range
= false;
404 if (usage
& (PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
| PIPE_TRANSFER_DISCARD_RANGE
) &&
405 !(usage
& PIPE_TRANSFER_PERSISTENT
) &&
406 /* Try not to decrement the counter if it's not positive. Still racy,
407 * but it makes it harder to wrap the counter from INT_MIN to INT_MAX. */
408 buf
->max_forced_staging_uploads
> 0 &&
409 p_atomic_dec_return(&buf
->max_forced_staging_uploads
) >= 0) {
410 usage
&= ~(PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
| PIPE_TRANSFER_UNSYNCHRONIZED
);
411 usage
|= PIPE_TRANSFER_DISCARD_RANGE
;
412 force_discard_range
= true;
415 if (usage
& PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
&&
416 !(usage
& (PIPE_TRANSFER_UNSYNCHRONIZED
| TC_TRANSFER_MAP_NO_INVALIDATE
))) {
417 assert(usage
& PIPE_TRANSFER_WRITE
);
419 if (si_invalidate_buffer(sctx
, buf
)) {
420 /* At this point, the buffer is always idle. */
421 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
423 /* Fall back to a temporary buffer. */
424 usage
|= PIPE_TRANSFER_DISCARD_RANGE
;
428 if (usage
& PIPE_TRANSFER_FLUSH_EXPLICIT
&&
429 buf
->b
.b
.flags
& SI_RESOURCE_FLAG_UPLOAD_FLUSH_EXPLICIT_VIA_SDMA
) {
430 usage
&= ~(PIPE_TRANSFER_UNSYNCHRONIZED
| PIPE_TRANSFER_PERSISTENT
);
431 usage
|= PIPE_TRANSFER_DISCARD_RANGE
;
432 force_discard_range
= true;
435 if (usage
& PIPE_TRANSFER_DISCARD_RANGE
&&
436 ((!(usage
& (PIPE_TRANSFER_UNSYNCHRONIZED
| PIPE_TRANSFER_PERSISTENT
))) ||
437 (buf
->flags
& RADEON_FLAG_SPARSE
))) {
438 assert(usage
& PIPE_TRANSFER_WRITE
);
440 /* Check if mapping this buffer would cause waiting for the GPU.
442 if (buf
->flags
& RADEON_FLAG_SPARSE
|| force_discard_range
||
443 si_rings_is_buffer_referenced(sctx
, buf
->buf
, RADEON_USAGE_READWRITE
) ||
444 !sctx
->ws
->buffer_wait(buf
->buf
, 0, RADEON_USAGE_READWRITE
)) {
445 /* Do a wait-free write-only transfer using a temporary buffer. */
446 struct u_upload_mgr
*uploader
;
447 struct si_resource
*staging
= NULL
;
450 /* If we are not called from the driver thread, we have
451 * to use the uploader from u_threaded_context, which is
452 * local to the calling thread.
454 if (usage
& TC_TRANSFER_MAP_THREADED_UNSYNC
)
455 uploader
= sctx
->tc
->base
.stream_uploader
;
457 uploader
= sctx
->b
.stream_uploader
;
459 u_upload_alloc(uploader
, 0, box
->width
+ (box
->x
% SI_MAP_BUFFER_ALIGNMENT
),
460 sctx
->screen
->info
.tcc_cache_line_size
, &offset
,
461 (struct pipe_resource
**)&staging
, (void **)&data
);
464 data
+= box
->x
% SI_MAP_BUFFER_ALIGNMENT
;
465 return si_buffer_get_transfer(ctx
, resource
, usage
, box
, ptransfer
, data
, staging
,
467 } else if (buf
->flags
& RADEON_FLAG_SPARSE
) {
471 /* At this point, the buffer is always idle (we checked it above). */
472 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
475 /* Use a staging buffer in cached GTT for reads. */
476 else if (((usage
& PIPE_TRANSFER_READ
) && !(usage
& PIPE_TRANSFER_PERSISTENT
) &&
477 (buf
->domains
& RADEON_DOMAIN_VRAM
|| buf
->flags
& RADEON_FLAG_GTT_WC
)) ||
478 (buf
->flags
& RADEON_FLAG_SPARSE
)) {
479 struct si_resource
*staging
;
481 assert(!(usage
& (TC_TRANSFER_MAP_THREADED_UNSYNC
| PIPE_TRANSFER_THREAD_SAFE
)));
482 staging
= si_aligned_buffer_create(ctx
->screen
, SI_RESOURCE_FLAG_UNCACHED
,
484 box
->width
+ (box
->x
% SI_MAP_BUFFER_ALIGNMENT
), 256);
486 /* Copy the VRAM buffer to the staging buffer. */
487 si_sdma_copy_buffer(sctx
, &staging
->b
.b
, resource
, box
->x
% SI_MAP_BUFFER_ALIGNMENT
,
490 data
= si_buffer_map_sync_with_rings(sctx
, staging
, usage
& ~PIPE_TRANSFER_UNSYNCHRONIZED
);
492 si_resource_reference(&staging
, NULL
);
495 data
+= box
->x
% SI_MAP_BUFFER_ALIGNMENT
;
497 return si_buffer_get_transfer(ctx
, resource
, usage
, box
, ptransfer
, data
, staging
, 0);
498 } else if (buf
->flags
& RADEON_FLAG_SPARSE
) {
503 data
= si_buffer_map_sync_with_rings(sctx
, buf
, usage
);
509 return si_buffer_get_transfer(ctx
, resource
, usage
, box
, ptransfer
, data
, NULL
, 0);
512 static void si_buffer_do_flush_region(struct pipe_context
*ctx
, struct pipe_transfer
*transfer
,
513 const struct pipe_box
*box
)
515 struct si_context
*sctx
= (struct si_context
*)ctx
;
516 struct si_transfer
*stransfer
= (struct si_transfer
*)transfer
;
517 struct si_resource
*buf
= si_resource(transfer
->resource
);
519 if (stransfer
->staging
) {
520 unsigned src_offset
=
521 stransfer
->offset
+ transfer
->box
.x
% SI_MAP_BUFFER_ALIGNMENT
+ (box
->x
- transfer
->box
.x
);
523 if (buf
->b
.b
.flags
& SI_RESOURCE_FLAG_UPLOAD_FLUSH_EXPLICIT_VIA_SDMA
) {
524 /* This should be true for all uploaders. */
525 assert(transfer
->box
.x
== 0);
527 /* Find a previous upload and extend its range. The last
528 * upload is likely to be at the end of the list.
530 for (int i
= sctx
->num_sdma_uploads
- 1; i
>= 0; i
--) {
531 struct si_sdma_upload
*up
= &sctx
->sdma_uploads
[i
];
536 assert(up
->src
== stransfer
->staging
);
537 assert(box
->x
> up
->dst_offset
);
538 up
->size
= box
->x
+ box
->width
- up
->dst_offset
;
542 /* Enlarge the array if it's full. */
543 if (sctx
->num_sdma_uploads
== sctx
->max_sdma_uploads
) {
546 sctx
->max_sdma_uploads
+= 4;
547 size
= sctx
->max_sdma_uploads
* sizeof(sctx
->sdma_uploads
[0]);
548 sctx
->sdma_uploads
= realloc(sctx
->sdma_uploads
, size
);
551 /* Add a new upload. */
552 struct si_sdma_upload
*up
= &sctx
->sdma_uploads
[sctx
->num_sdma_uploads
++];
553 up
->dst
= up
->src
= NULL
;
554 si_resource_reference(&up
->dst
, buf
);
555 si_resource_reference(&up
->src
, stransfer
->staging
);
556 up
->dst_offset
= box
->x
;
557 up
->src_offset
= src_offset
;
558 up
->size
= box
->width
;
562 /* Copy the staging buffer into the original one. */
563 si_copy_buffer(sctx
, transfer
->resource
, &stransfer
->staging
->b
.b
, box
->x
, src_offset
,
567 util_range_add(&buf
->b
.b
, &buf
->valid_buffer_range
, box
->x
, box
->x
+ box
->width
);
570 static void si_buffer_flush_region(struct pipe_context
*ctx
, struct pipe_transfer
*transfer
,
571 const struct pipe_box
*rel_box
)
573 unsigned required_usage
= PIPE_TRANSFER_WRITE
| PIPE_TRANSFER_FLUSH_EXPLICIT
;
575 if ((transfer
->usage
& required_usage
) == required_usage
) {
578 u_box_1d(transfer
->box
.x
+ rel_box
->x
, rel_box
->width
, &box
);
579 si_buffer_do_flush_region(ctx
, transfer
, &box
);
583 static void si_buffer_transfer_unmap(struct pipe_context
*ctx
, struct pipe_transfer
*transfer
)
585 struct si_context
*sctx
= (struct si_context
*)ctx
;
586 struct si_transfer
*stransfer
= (struct si_transfer
*)transfer
;
588 if (transfer
->usage
& PIPE_TRANSFER_WRITE
&& !(transfer
->usage
& PIPE_TRANSFER_FLUSH_EXPLICIT
))
589 si_buffer_do_flush_region(ctx
, transfer
, &transfer
->box
);
591 si_resource_reference(&stransfer
->staging
, NULL
);
592 assert(stransfer
->b
.staging
== NULL
); /* for threaded context only */
593 pipe_resource_reference(&transfer
->resource
, NULL
);
595 if (transfer
->usage
& PIPE_TRANSFER_THREAD_SAFE
) {
598 /* Don't use pool_transfers_unsync. We are always in the driver
599 * thread. Freeing an object into a different pool is allowed.
601 slab_free(&sctx
->pool_transfers
, transfer
);
605 static void si_buffer_subdata(struct pipe_context
*ctx
, struct pipe_resource
*buffer
,
606 unsigned usage
, unsigned offset
, unsigned size
, const void *data
)
608 struct pipe_transfer
*transfer
= NULL
;
612 usage
|= PIPE_TRANSFER_WRITE
;
614 if (!(usage
& PIPE_TRANSFER_MAP_DIRECTLY
))
615 usage
|= PIPE_TRANSFER_DISCARD_RANGE
;
617 u_box_1d(offset
, size
, &box
);
618 map
= si_buffer_transfer_map(ctx
, buffer
, 0, usage
, &box
, &transfer
);
622 memcpy(map
, data
, size
);
623 si_buffer_transfer_unmap(ctx
, transfer
);
626 static const struct u_resource_vtbl si_buffer_vtbl
= {
627 NULL
, /* get_handle */
628 si_buffer_destroy
, /* resource_destroy */
629 si_buffer_transfer_map
, /* transfer_map */
630 si_buffer_flush_region
, /* transfer_flush_region */
631 si_buffer_transfer_unmap
, /* transfer_unmap */
634 static struct si_resource
*si_alloc_buffer_struct(struct pipe_screen
*screen
,
635 const struct pipe_resource
*templ
)
637 struct si_resource
*buf
;
639 buf
= MALLOC_STRUCT(si_resource
);
642 buf
->b
.b
.next
= NULL
;
643 pipe_reference_init(&buf
->b
.b
.reference
, 1);
644 buf
->b
.b
.screen
= screen
;
646 buf
->b
.vtbl
= &si_buffer_vtbl
;
647 threaded_resource_init(&buf
->b
.b
);
650 buf
->bind_history
= 0;
651 buf
->TC_L2_dirty
= false;
652 util_range_init(&buf
->valid_buffer_range
);
656 static struct pipe_resource
*si_buffer_create(struct pipe_screen
*screen
,
657 const struct pipe_resource
*templ
, unsigned alignment
)
659 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
660 struct si_resource
*buf
= si_alloc_buffer_struct(screen
, templ
);
662 if (templ
->flags
& PIPE_RESOURCE_FLAG_SPARSE
)
663 buf
->b
.b
.flags
|= SI_RESOURCE_FLAG_UNMAPPABLE
;
665 si_init_resource_fields(sscreen
, buf
, templ
->width0
, alignment
);
667 if (templ
->flags
& PIPE_RESOURCE_FLAG_SPARSE
)
668 buf
->flags
|= RADEON_FLAG_SPARSE
;
670 if (!si_alloc_resource(sscreen
, buf
)) {
677 struct pipe_resource
*pipe_aligned_buffer_create(struct pipe_screen
*screen
, unsigned flags
,
678 unsigned usage
, unsigned size
, unsigned alignment
)
680 struct pipe_resource buffer
;
682 memset(&buffer
, 0, sizeof buffer
);
683 buffer
.target
= PIPE_BUFFER
;
684 buffer
.format
= PIPE_FORMAT_R8_UNORM
;
686 buffer
.usage
= usage
;
687 buffer
.flags
= flags
;
688 buffer
.width0
= size
;
691 buffer
.array_size
= 1;
692 return si_buffer_create(screen
, &buffer
, alignment
);
695 struct si_resource
*si_aligned_buffer_create(struct pipe_screen
*screen
, unsigned flags
,
696 unsigned usage
, unsigned size
, unsigned alignment
)
698 return si_resource(pipe_aligned_buffer_create(screen
, flags
, usage
, size
, alignment
));
701 static struct pipe_resource
*si_buffer_from_user_memory(struct pipe_screen
*screen
,
702 const struct pipe_resource
*templ
,
705 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
706 struct radeon_winsys
*ws
= sscreen
->ws
;
707 struct si_resource
*buf
= si_alloc_buffer_struct(screen
, templ
);
709 buf
->domains
= RADEON_DOMAIN_GTT
;
711 buf
->b
.is_user_ptr
= true;
712 util_range_add(&buf
->b
.b
, &buf
->valid_buffer_range
, 0, templ
->width0
);
713 util_range_add(&buf
->b
.b
, &buf
->b
.valid_buffer_range
, 0, templ
->width0
);
715 /* Convert a user pointer to a buffer. */
716 buf
->buf
= ws
->buffer_from_ptr(ws
, user_memory
, templ
->width0
);
722 buf
->gpu_address
= ws
->buffer_get_virtual_address(buf
->buf
);
724 buf
->gart_usage
= templ
->width0
;
729 static struct pipe_resource
*si_resource_create(struct pipe_screen
*screen
,
730 const struct pipe_resource
*templ
)
732 if (templ
->target
== PIPE_BUFFER
) {
733 return si_buffer_create(screen
, templ
, 256);
735 return si_texture_create(screen
, templ
);
739 static bool si_resource_commit(struct pipe_context
*pctx
, struct pipe_resource
*resource
,
740 unsigned level
, struct pipe_box
*box
, bool commit
)
742 struct si_context
*ctx
= (struct si_context
*)pctx
;
743 struct si_resource
*res
= si_resource(resource
);
746 * Since buffer commitment changes cannot be pipelined, we need to
747 * (a) flush any pending commands that refer to the buffer we're about
749 * (b) wait for threaded submit to finish, including those that were
750 * triggered by some other, earlier operation.
752 if (radeon_emitted(ctx
->gfx_cs
, ctx
->initial_gfx_cs_size
) &&
753 ctx
->ws
->cs_is_buffer_referenced(ctx
->gfx_cs
, res
->buf
, RADEON_USAGE_READWRITE
)) {
754 si_flush_gfx_cs(ctx
, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW
, NULL
);
756 if (radeon_emitted(ctx
->sdma_cs
, 0) &&
757 ctx
->ws
->cs_is_buffer_referenced(ctx
->sdma_cs
, res
->buf
, RADEON_USAGE_READWRITE
)) {
758 si_flush_dma_cs(ctx
, PIPE_FLUSH_ASYNC
, NULL
);
762 ctx
->ws
->cs_sync_flush(ctx
->sdma_cs
);
763 ctx
->ws
->cs_sync_flush(ctx
->gfx_cs
);
765 assert(resource
->target
== PIPE_BUFFER
);
767 return ctx
->ws
->buffer_commit(res
->buf
, box
->x
, box
->width
, commit
);
770 void si_init_screen_buffer_functions(struct si_screen
*sscreen
)
772 sscreen
->b
.resource_create
= si_resource_create
;
773 sscreen
->b
.resource_destroy
= u_resource_destroy_vtbl
;
774 sscreen
->b
.resource_from_user_memory
= si_buffer_from_user_memory
;
777 void si_init_buffer_functions(struct si_context
*sctx
)
779 sctx
->b
.invalidate_resource
= si_invalidate_resource
;
780 sctx
->b
.transfer_map
= u_transfer_map_vtbl
;
781 sctx
->b
.transfer_flush_region
= u_transfer_flush_region_vtbl
;
782 sctx
->b
.transfer_unmap
= u_transfer_unmap_vtbl
;
783 sctx
->b
.texture_subdata
= u_default_texture_subdata
;
784 sctx
->b
.buffer_subdata
= si_buffer_subdata
;
785 sctx
->b
.resource_commit
= si_resource_commit
;