2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 #include "util/u_memory.h"
29 #include "util/u_upload_mgr.h"
33 bool r600_rings_is_buffer_referenced(struct r600_common_context
*ctx
,
34 struct pb_buffer
*buf
,
35 enum radeon_bo_usage usage
)
37 if (ctx
->ws
->cs_is_buffer_referenced(ctx
->gfx
.cs
, buf
, usage
)) {
40 if (radeon_emitted(ctx
->dma
.cs
, 0) &&
41 ctx
->ws
->cs_is_buffer_referenced(ctx
->dma
.cs
, buf
, usage
)) {
47 void *r600_buffer_map_sync_with_rings(struct r600_common_context
*ctx
,
48 struct r600_resource
*resource
,
51 enum radeon_bo_usage rusage
= RADEON_USAGE_READWRITE
;
54 if (usage
& PIPE_TRANSFER_UNSYNCHRONIZED
) {
55 return ctx
->ws
->buffer_map(resource
->buf
, NULL
, usage
);
58 if (!(usage
& PIPE_TRANSFER_WRITE
)) {
59 /* have to wait for the last write */
60 rusage
= RADEON_USAGE_WRITE
;
63 if (radeon_emitted(ctx
->gfx
.cs
, ctx
->initial_gfx_cs_size
) &&
64 ctx
->ws
->cs_is_buffer_referenced(ctx
->gfx
.cs
,
65 resource
->buf
, rusage
)) {
66 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
67 ctx
->gfx
.flush(ctx
, RADEON_FLUSH_ASYNC
, NULL
);
70 ctx
->gfx
.flush(ctx
, 0, NULL
);
74 if (radeon_emitted(ctx
->dma
.cs
, 0) &&
75 ctx
->ws
->cs_is_buffer_referenced(ctx
->dma
.cs
,
76 resource
->buf
, rusage
)) {
77 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
78 ctx
->dma
.flush(ctx
, RADEON_FLUSH_ASYNC
, NULL
);
81 ctx
->dma
.flush(ctx
, 0, NULL
);
86 if (busy
|| !ctx
->ws
->buffer_wait(resource
->buf
, 0, rusage
)) {
87 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
90 /* We will be wait for the GPU. Wait for any offloaded
91 * CS flush to complete to avoid busy-waiting in the winsys. */
92 ctx
->ws
->cs_sync_flush(ctx
->gfx
.cs
);
94 ctx
->ws
->cs_sync_flush(ctx
->dma
.cs
);
98 /* Setting the CS to NULL will prevent doing checks we have done already. */
99 return ctx
->ws
->buffer_map(resource
->buf
, NULL
, usage
);
102 bool r600_init_resource(struct r600_common_screen
*rscreen
,
103 struct r600_resource
*res
,
104 uint64_t size
, unsigned alignment
)
106 struct r600_texture
*rtex
= (struct r600_texture
*)res
;
107 struct pb_buffer
*old_buf
, *new_buf
;
108 enum radeon_bo_flag flags
= 0;
110 switch (res
->b
.b
.usage
) {
111 case PIPE_USAGE_STREAM
:
112 flags
= RADEON_FLAG_GTT_WC
;
114 case PIPE_USAGE_STAGING
:
115 /* Transfers are likely to occur more often with these resources. */
116 res
->domains
= RADEON_DOMAIN_GTT
;
118 case PIPE_USAGE_DYNAMIC
:
119 /* Older kernels didn't always flush the HDP cache before
122 if (rscreen
->info
.drm_major
== 2 &&
123 rscreen
->info
.drm_minor
< 40) {
124 res
->domains
= RADEON_DOMAIN_GTT
;
125 flags
|= RADEON_FLAG_GTT_WC
;
128 flags
|= RADEON_FLAG_CPU_ACCESS
;
130 case PIPE_USAGE_DEFAULT
:
131 case PIPE_USAGE_IMMUTABLE
:
133 /* Not listing GTT here improves performance in some apps. */
134 res
->domains
= RADEON_DOMAIN_VRAM
;
135 flags
|= RADEON_FLAG_GTT_WC
;
139 if (res
->b
.b
.target
== PIPE_BUFFER
&&
140 res
->b
.b
.flags
& (PIPE_RESOURCE_FLAG_MAP_PERSISTENT
|
141 PIPE_RESOURCE_FLAG_MAP_COHERENT
)) {
142 /* Use GTT for all persistent mappings with older kernels,
143 * because they didn't always flush the HDP cache before CS
146 * Write-combined CPU mappings are fine, the kernel ensures all CPU
147 * writes finish before the GPU executes a command stream.
149 if (rscreen
->info
.drm_major
== 2 &&
150 rscreen
->info
.drm_minor
< 40)
151 res
->domains
= RADEON_DOMAIN_GTT
;
152 else if (res
->domains
& RADEON_DOMAIN_VRAM
)
153 flags
|= RADEON_FLAG_CPU_ACCESS
;
156 /* Tiled textures are unmappable. Always put them in VRAM. */
157 if (res
->b
.b
.target
!= PIPE_BUFFER
&&
158 rtex
->surface
.level
[0].mode
>= RADEON_SURF_MODE_1D
) {
159 res
->domains
= RADEON_DOMAIN_VRAM
;
160 flags
&= ~RADEON_FLAG_CPU_ACCESS
;
161 flags
|= RADEON_FLAG_NO_CPU_ACCESS
|
165 /* If VRAM is just stolen system memory, allow both VRAM and GTT,
166 * whichever has free space. If a buffer is evicted from VRAM to GTT,
167 * it will stay there.
169 if (!rscreen
->info
.has_dedicated_vram
&&
170 res
->domains
== RADEON_DOMAIN_VRAM
)
171 res
->domains
= RADEON_DOMAIN_VRAM_GTT
;
173 if (rscreen
->debug_flags
& DBG_NO_WC
)
174 flags
&= ~RADEON_FLAG_GTT_WC
;
176 /* Allocate a new resource. */
177 new_buf
= rscreen
->ws
->buffer_create(rscreen
->ws
, size
, alignment
,
178 res
->domains
, flags
);
183 /* Replace the pointer such that if res->buf wasn't NULL, it won't be
184 * NULL. This should prevent crashes with multiple contexts using
185 * the same buffer where one of the contexts invalidates it while
186 * the others are using it. */
188 res
->buf
= new_buf
; /* should be atomic */
190 if (rscreen
->info
.has_virtual_memory
)
191 res
->gpu_address
= rscreen
->ws
->buffer_get_virtual_address(res
->buf
);
193 res
->gpu_address
= 0;
195 pb_reference(&old_buf
, NULL
);
197 util_range_set_empty(&res
->valid_buffer_range
);
198 res
->TC_L2_dirty
= false;
200 /* Set expected VRAM and GART usage for the buffer. */
204 if (res
->domains
& RADEON_DOMAIN_VRAM
)
205 res
->vram_usage
= size
;
206 else if (res
->domains
& RADEON_DOMAIN_GTT
)
207 res
->gart_usage
= size
;
209 /* Print debug information. */
210 if (rscreen
->debug_flags
& DBG_VM
&& res
->b
.b
.target
== PIPE_BUFFER
) {
211 fprintf(stderr
, "VM start=0x%"PRIX64
" end=0x%"PRIX64
" | Buffer %"PRIu64
" bytes\n",
212 res
->gpu_address
, res
->gpu_address
+ res
->buf
->size
,
218 static void r600_buffer_destroy(struct pipe_screen
*screen
,
219 struct pipe_resource
*buf
)
221 struct r600_resource
*rbuffer
= r600_resource(buf
);
223 util_range_destroy(&rbuffer
->valid_buffer_range
);
224 pb_reference(&rbuffer
->buf
, NULL
);
229 r600_invalidate_buffer(struct r600_common_context
*rctx
,
230 struct r600_resource
*rbuffer
)
232 /* Shared buffers can't be reallocated. */
233 if (rbuffer
->is_shared
)
236 /* In AMD_pinned_memory, the user pointer association only gets
237 * broken when the buffer is explicitly re-allocated.
239 if (rctx
->ws
->buffer_is_user_ptr(rbuffer
->buf
))
242 /* Check if mapping this buffer would cause waiting for the GPU. */
243 if (r600_rings_is_buffer_referenced(rctx
, rbuffer
->buf
, RADEON_USAGE_READWRITE
) ||
244 !rctx
->ws
->buffer_wait(rbuffer
->buf
, 0, RADEON_USAGE_READWRITE
)) {
245 rctx
->invalidate_buffer(&rctx
->b
, &rbuffer
->b
.b
);
247 util_range_set_empty(&rbuffer
->valid_buffer_range
);
253 void r600_invalidate_resource(struct pipe_context
*ctx
,
254 struct pipe_resource
*resource
)
256 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
257 struct r600_resource
*rbuffer
= r600_resource(resource
);
259 /* We currently only do anyting here for buffers */
260 if (resource
->target
== PIPE_BUFFER
)
261 (void)r600_invalidate_buffer(rctx
, rbuffer
);
264 static void *r600_buffer_get_transfer(struct pipe_context
*ctx
,
265 struct pipe_resource
*resource
,
268 const struct pipe_box
*box
,
269 struct pipe_transfer
**ptransfer
,
270 void *data
, struct r600_resource
*staging
,
273 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
274 struct r600_transfer
*transfer
= util_slab_alloc(&rctx
->pool_transfers
);
276 transfer
->transfer
.resource
= resource
;
277 transfer
->transfer
.level
= level
;
278 transfer
->transfer
.usage
= usage
;
279 transfer
->transfer
.box
= *box
;
280 transfer
->transfer
.stride
= 0;
281 transfer
->transfer
.layer_stride
= 0;
282 transfer
->offset
= offset
;
283 transfer
->staging
= staging
;
284 *ptransfer
= &transfer
->transfer
;
288 static bool r600_can_dma_copy_buffer(struct r600_common_context
*rctx
,
289 unsigned dstx
, unsigned srcx
, unsigned size
)
291 bool dword_aligned
= !(dstx
% 4) && !(srcx
% 4) && !(size
% 4);
293 return rctx
->screen
->has_cp_dma
||
294 (dword_aligned
&& (rctx
->dma
.cs
||
295 rctx
->screen
->has_streamout
));
299 static void *r600_buffer_transfer_map(struct pipe_context
*ctx
,
300 struct pipe_resource
*resource
,
303 const struct pipe_box
*box
,
304 struct pipe_transfer
**ptransfer
)
306 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
307 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)ctx
->screen
;
308 struct r600_resource
*rbuffer
= r600_resource(resource
);
311 assert(box
->x
+ box
->width
<= resource
->width0
);
313 /* See if the buffer range being mapped has never been initialized,
314 * in which case it can be mapped unsynchronized. */
315 if (!(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
) &&
316 usage
& PIPE_TRANSFER_WRITE
&&
317 !rbuffer
->is_shared
&&
318 !util_ranges_intersect(&rbuffer
->valid_buffer_range
, box
->x
, box
->x
+ box
->width
)) {
319 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
322 /* If discarding the entire range, discard the whole resource instead. */
323 if (usage
& PIPE_TRANSFER_DISCARD_RANGE
&&
324 box
->x
== 0 && box
->width
== resource
->width0
) {
325 usage
|= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
;
328 if (usage
& PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
&&
329 !(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
330 assert(usage
& PIPE_TRANSFER_WRITE
);
332 if (r600_invalidate_buffer(rctx
, rbuffer
)) {
333 /* At this point, the buffer is always idle. */
334 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
336 /* Fall back to a temporary buffer. */
337 usage
|= PIPE_TRANSFER_DISCARD_RANGE
;
341 if ((usage
& PIPE_TRANSFER_DISCARD_RANGE
) &&
342 !(usage
& (PIPE_TRANSFER_UNSYNCHRONIZED
|
343 PIPE_TRANSFER_PERSISTENT
)) &&
344 !(rscreen
->debug_flags
& DBG_NO_DISCARD_RANGE
) &&
345 r600_can_dma_copy_buffer(rctx
, box
->x
, 0, box
->width
)) {
346 assert(usage
& PIPE_TRANSFER_WRITE
);
348 /* Check if mapping this buffer would cause waiting for the GPU. */
349 if (r600_rings_is_buffer_referenced(rctx
, rbuffer
->buf
, RADEON_USAGE_READWRITE
) ||
350 !rctx
->ws
->buffer_wait(rbuffer
->buf
, 0, RADEON_USAGE_READWRITE
)) {
351 /* Do a wait-free write-only transfer using a temporary buffer. */
353 struct r600_resource
*staging
= NULL
;
355 u_upload_alloc(rctx
->uploader
, 0, box
->width
+ (box
->x
% R600_MAP_BUFFER_ALIGNMENT
),
356 256, &offset
, (struct pipe_resource
**)&staging
, (void**)&data
);
359 data
+= box
->x
% R600_MAP_BUFFER_ALIGNMENT
;
360 return r600_buffer_get_transfer(ctx
, resource
, level
, usage
, box
,
361 ptransfer
, data
, staging
, offset
);
364 /* At this point, the buffer is always idle (we checked it above). */
365 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
368 /* Using a staging buffer in GTT for larger reads is much faster. */
369 else if ((usage
& PIPE_TRANSFER_READ
) &&
370 !(usage
& (PIPE_TRANSFER_WRITE
|
371 PIPE_TRANSFER_PERSISTENT
)) &&
372 rbuffer
->domains
& RADEON_DOMAIN_VRAM
&&
373 r600_can_dma_copy_buffer(rctx
, 0, box
->x
, box
->width
)) {
374 struct r600_resource
*staging
;
376 staging
= (struct r600_resource
*) pipe_buffer_create(
377 ctx
->screen
, PIPE_BIND_TRANSFER_READ
, PIPE_USAGE_STAGING
,
378 box
->width
+ (box
->x
% R600_MAP_BUFFER_ALIGNMENT
));
380 /* Copy the VRAM buffer to the staging buffer. */
381 ctx
->resource_copy_region(ctx
, &staging
->b
.b
, 0,
382 box
->x
% R600_MAP_BUFFER_ALIGNMENT
,
383 0, 0, resource
, level
, box
);
385 data
= r600_buffer_map_sync_with_rings(rctx
, staging
, PIPE_TRANSFER_READ
);
387 r600_resource_reference(&staging
, NULL
);
390 data
+= box
->x
% R600_MAP_BUFFER_ALIGNMENT
;
392 return r600_buffer_get_transfer(ctx
, resource
, level
, usage
, box
,
393 ptransfer
, data
, staging
, 0);
397 data
= r600_buffer_map_sync_with_rings(rctx
, rbuffer
, usage
);
403 return r600_buffer_get_transfer(ctx
, resource
, level
, usage
, box
,
404 ptransfer
, data
, NULL
, 0);
407 static void r600_buffer_do_flush_region(struct pipe_context
*ctx
,
408 struct pipe_transfer
*transfer
,
409 const struct pipe_box
*box
)
411 struct r600_transfer
*rtransfer
= (struct r600_transfer
*)transfer
;
412 struct r600_resource
*rbuffer
= r600_resource(transfer
->resource
);
414 if (rtransfer
->staging
) {
415 struct pipe_resource
*dst
, *src
;
417 struct pipe_box dma_box
;
419 dst
= transfer
->resource
;
420 src
= &rtransfer
->staging
->b
.b
;
421 soffset
= rtransfer
->offset
+ box
->x
% R600_MAP_BUFFER_ALIGNMENT
;
423 u_box_1d(soffset
, box
->width
, &dma_box
);
425 /* Copy the staging buffer into the original one. */
426 ctx
->resource_copy_region(ctx
, dst
, 0, box
->x
, 0, 0, src
, 0, &dma_box
);
429 util_range_add(&rbuffer
->valid_buffer_range
, box
->x
,
430 box
->x
+ box
->width
);
433 static void r600_buffer_flush_region(struct pipe_context
*ctx
,
434 struct pipe_transfer
*transfer
,
435 const struct pipe_box
*rel_box
)
437 if (transfer
->usage
& (PIPE_TRANSFER_WRITE
|
438 PIPE_TRANSFER_FLUSH_EXPLICIT
)) {
441 u_box_1d(transfer
->box
.x
+ rel_box
->x
, rel_box
->width
, &box
);
442 r600_buffer_do_flush_region(ctx
, transfer
, &box
);
446 static void r600_buffer_transfer_unmap(struct pipe_context
*ctx
,
447 struct pipe_transfer
*transfer
)
449 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
450 struct r600_transfer
*rtransfer
= (struct r600_transfer
*)transfer
;
452 if (transfer
->usage
& PIPE_TRANSFER_WRITE
&&
453 !(transfer
->usage
& PIPE_TRANSFER_FLUSH_EXPLICIT
))
454 r600_buffer_do_flush_region(ctx
, transfer
, &transfer
->box
);
456 if (rtransfer
->staging
)
457 r600_resource_reference(&rtransfer
->staging
, NULL
);
459 util_slab_free(&rctx
->pool_transfers
, transfer
);
462 void r600_buffer_subdata(struct pipe_context
*ctx
,
463 struct pipe_resource
*buffer
,
464 unsigned usage
, unsigned offset
,
465 unsigned size
, const void *data
)
467 struct pipe_transfer
*transfer
= NULL
;
471 u_box_1d(offset
, size
, &box
);
472 map
= r600_buffer_transfer_map(ctx
, buffer
, 0,
473 PIPE_TRANSFER_WRITE
|
474 PIPE_TRANSFER_DISCARD_RANGE
|
480 memcpy(map
, data
, size
);
481 r600_buffer_transfer_unmap(ctx
, transfer
);
484 static const struct u_resource_vtbl r600_buffer_vtbl
=
486 NULL
, /* get_handle */
487 r600_buffer_destroy
, /* resource_destroy */
488 r600_buffer_transfer_map
, /* transfer_map */
489 r600_buffer_flush_region
, /* transfer_flush_region */
490 r600_buffer_transfer_unmap
, /* transfer_unmap */
493 static struct r600_resource
*
494 r600_alloc_buffer_struct(struct pipe_screen
*screen
,
495 const struct pipe_resource
*templ
)
497 struct r600_resource
*rbuffer
;
499 rbuffer
= MALLOC_STRUCT(r600_resource
);
501 rbuffer
->b
.b
= *templ
;
502 pipe_reference_init(&rbuffer
->b
.b
.reference
, 1);
503 rbuffer
->b
.b
.screen
= screen
;
504 rbuffer
->b
.vtbl
= &r600_buffer_vtbl
;
506 rbuffer
->TC_L2_dirty
= false;
507 rbuffer
->is_shared
= false;
508 util_range_init(&rbuffer
->valid_buffer_range
);
512 struct pipe_resource
*r600_buffer_create(struct pipe_screen
*screen
,
513 const struct pipe_resource
*templ
,
516 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
517 struct r600_resource
*rbuffer
= r600_alloc_buffer_struct(screen
, templ
);
519 if (!r600_init_resource(rscreen
, rbuffer
, templ
->width0
, alignment
)) {
523 return &rbuffer
->b
.b
;
526 struct pipe_resource
*r600_aligned_buffer_create(struct pipe_screen
*screen
,
532 struct pipe_resource buffer
;
534 memset(&buffer
, 0, sizeof buffer
);
535 buffer
.target
= PIPE_BUFFER
;
536 buffer
.format
= PIPE_FORMAT_R8_UNORM
;
538 buffer
.usage
= usage
;
540 buffer
.width0
= size
;
543 buffer
.array_size
= 1;
544 return r600_buffer_create(screen
, &buffer
, alignment
);
547 struct pipe_resource
*
548 r600_buffer_from_user_memory(struct pipe_screen
*screen
,
549 const struct pipe_resource
*templ
,
552 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
553 struct radeon_winsys
*ws
= rscreen
->ws
;
554 struct r600_resource
*rbuffer
= r600_alloc_buffer_struct(screen
, templ
);
556 rbuffer
->domains
= RADEON_DOMAIN_GTT
;
557 util_range_add(&rbuffer
->valid_buffer_range
, 0, templ
->width0
);
559 /* Convert a user pointer to a buffer. */
560 rbuffer
->buf
= ws
->buffer_from_ptr(ws
, user_memory
, templ
->width0
);
566 if (rscreen
->info
.has_virtual_memory
)
567 rbuffer
->gpu_address
=
568 ws
->buffer_get_virtual_address(rbuffer
->buf
);
570 rbuffer
->gpu_address
= 0;
572 return &rbuffer
->b
.b
;