2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 #include "util/u_memory.h"
29 #include "util/u_upload_mgr.h"
33 bool r600_rings_is_buffer_referenced(struct r600_common_context
*ctx
,
34 struct pb_buffer
*buf
,
35 enum radeon_bo_usage usage
)
37 if (ctx
->ws
->cs_is_buffer_referenced(ctx
->gfx
.cs
, buf
, usage
)) {
40 if (radeon_emitted(ctx
->dma
.cs
, 0) &&
41 ctx
->ws
->cs_is_buffer_referenced(ctx
->dma
.cs
, buf
, usage
)) {
47 void *r600_buffer_map_sync_with_rings(struct r600_common_context
*ctx
,
48 struct r600_resource
*resource
,
51 enum radeon_bo_usage rusage
= RADEON_USAGE_READWRITE
;
54 if (usage
& PIPE_TRANSFER_UNSYNCHRONIZED
) {
55 return ctx
->ws
->buffer_map(resource
->buf
, NULL
, usage
);
58 if (!(usage
& PIPE_TRANSFER_WRITE
)) {
59 /* have to wait for the last write */
60 rusage
= RADEON_USAGE_WRITE
;
63 if (radeon_emitted(ctx
->gfx
.cs
, ctx
->initial_gfx_cs_size
) &&
64 ctx
->ws
->cs_is_buffer_referenced(ctx
->gfx
.cs
,
65 resource
->buf
, rusage
)) {
66 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
67 ctx
->gfx
.flush(ctx
, RADEON_FLUSH_ASYNC
, NULL
);
70 ctx
->gfx
.flush(ctx
, 0, NULL
);
74 if (radeon_emitted(ctx
->dma
.cs
, 0) &&
75 ctx
->ws
->cs_is_buffer_referenced(ctx
->dma
.cs
,
76 resource
->buf
, rusage
)) {
77 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
78 ctx
->dma
.flush(ctx
, RADEON_FLUSH_ASYNC
, NULL
);
81 ctx
->dma
.flush(ctx
, 0, NULL
);
86 if (busy
|| !ctx
->ws
->buffer_wait(resource
->buf
, 0, rusage
)) {
87 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
90 /* We will be wait for the GPU. Wait for any offloaded
91 * CS flush to complete to avoid busy-waiting in the winsys. */
92 ctx
->ws
->cs_sync_flush(ctx
->gfx
.cs
);
94 ctx
->ws
->cs_sync_flush(ctx
->dma
.cs
);
98 /* Setting the CS to NULL will prevent doing checks we have done already. */
99 return ctx
->ws
->buffer_map(resource
->buf
, NULL
, usage
);
102 void r600_init_resource_fields(struct r600_common_screen
*rscreen
,
103 struct r600_resource
*res
,
104 uint64_t size
, unsigned alignment
)
106 struct r600_texture
*rtex
= (struct r600_texture
*)res
;
109 res
->bo_alignment
= alignment
;
112 switch (res
->b
.b
.usage
) {
113 case PIPE_USAGE_STREAM
:
114 res
->flags
= RADEON_FLAG_GTT_WC
;
116 case PIPE_USAGE_STAGING
:
117 /* Transfers are likely to occur more often with these
119 res
->domains
= RADEON_DOMAIN_GTT
;
121 case PIPE_USAGE_DYNAMIC
:
122 /* Older kernels didn't always flush the HDP cache before
125 if (rscreen
->info
.drm_major
== 2 &&
126 rscreen
->info
.drm_minor
< 40) {
127 res
->domains
= RADEON_DOMAIN_GTT
;
128 res
->flags
|= RADEON_FLAG_GTT_WC
;
131 res
->flags
|= RADEON_FLAG_CPU_ACCESS
;
133 case PIPE_USAGE_DEFAULT
:
134 case PIPE_USAGE_IMMUTABLE
:
136 /* Not listing GTT here improves performance in some
138 res
->domains
= RADEON_DOMAIN_VRAM
;
139 res
->flags
|= RADEON_FLAG_GTT_WC
;
143 if (res
->b
.b
.target
== PIPE_BUFFER
&&
144 res
->b
.b
.flags
& (PIPE_RESOURCE_FLAG_MAP_PERSISTENT
|
145 PIPE_RESOURCE_FLAG_MAP_COHERENT
)) {
146 /* Use GTT for all persistent mappings with older
147 * kernels, because they didn't always flush the HDP
148 * cache before CS execution.
150 * Write-combined CPU mappings are fine, the kernel
151 * ensures all CPU writes finish before the GPU
152 * executes a command stream.
154 if (rscreen
->info
.drm_major
== 2 &&
155 rscreen
->info
.drm_minor
< 40)
156 res
->domains
= RADEON_DOMAIN_GTT
;
157 else if (res
->domains
& RADEON_DOMAIN_VRAM
)
158 res
->flags
|= RADEON_FLAG_CPU_ACCESS
;
161 /* Tiled textures are unmappable. Always put them in VRAM. */
162 if (res
->b
.b
.target
!= PIPE_BUFFER
&&
163 rtex
->surface
.level
[0].mode
>= RADEON_SURF_MODE_1D
) {
164 res
->domains
= RADEON_DOMAIN_VRAM
;
165 res
->flags
&= ~RADEON_FLAG_CPU_ACCESS
;
166 res
->flags
|= RADEON_FLAG_NO_CPU_ACCESS
|
170 /* If VRAM is just stolen system memory, allow both VRAM and
171 * GTT, whichever has free space. If a buffer is evicted from
172 * VRAM to GTT, it will stay there.
174 if (!rscreen
->info
.has_dedicated_vram
&&
175 res
->domains
== RADEON_DOMAIN_VRAM
)
176 res
->domains
= RADEON_DOMAIN_VRAM_GTT
;
178 if (rscreen
->debug_flags
& DBG_NO_WC
)
179 res
->flags
&= ~RADEON_FLAG_GTT_WC
;
181 /* Set expected VRAM and GART usage for the buffer. */
185 if (res
->domains
& RADEON_DOMAIN_VRAM
)
186 res
->vram_usage
= size
;
187 else if (res
->domains
& RADEON_DOMAIN_GTT
)
188 res
->gart_usage
= size
;
191 bool r600_alloc_resource(struct r600_common_screen
*rscreen
,
192 struct r600_resource
*res
)
194 struct pb_buffer
*old_buf
, *new_buf
;
196 /* Allocate a new resource. */
197 new_buf
= rscreen
->ws
->buffer_create(rscreen
->ws
, res
->bo_size
,
199 res
->domains
, res
->flags
);
204 /* Replace the pointer such that if res->buf wasn't NULL, it won't be
205 * NULL. This should prevent crashes with multiple contexts using
206 * the same buffer where one of the contexts invalidates it while
207 * the others are using it. */
209 res
->buf
= new_buf
; /* should be atomic */
211 if (rscreen
->info
.has_virtual_memory
)
212 res
->gpu_address
= rscreen
->ws
->buffer_get_virtual_address(res
->buf
);
214 res
->gpu_address
= 0;
216 pb_reference(&old_buf
, NULL
);
218 util_range_set_empty(&res
->valid_buffer_range
);
219 res
->TC_L2_dirty
= false;
221 /* Print debug information. */
222 if (rscreen
->debug_flags
& DBG_VM
&& res
->b
.b
.target
== PIPE_BUFFER
) {
223 fprintf(stderr
, "VM start=0x%"PRIX64
" end=0x%"PRIX64
" | Buffer %"PRIu64
" bytes\n",
224 res
->gpu_address
, res
->gpu_address
+ res
->buf
->size
,
230 static void r600_buffer_destroy(struct pipe_screen
*screen
,
231 struct pipe_resource
*buf
)
233 struct r600_resource
*rbuffer
= r600_resource(buf
);
235 util_range_destroy(&rbuffer
->valid_buffer_range
);
236 pb_reference(&rbuffer
->buf
, NULL
);
241 r600_invalidate_buffer(struct r600_common_context
*rctx
,
242 struct r600_resource
*rbuffer
)
244 /* Shared buffers can't be reallocated. */
245 if (rbuffer
->is_shared
)
248 /* In AMD_pinned_memory, the user pointer association only gets
249 * broken when the buffer is explicitly re-allocated.
251 if (rctx
->ws
->buffer_is_user_ptr(rbuffer
->buf
))
254 /* Check if mapping this buffer would cause waiting for the GPU. */
255 if (r600_rings_is_buffer_referenced(rctx
, rbuffer
->buf
, RADEON_USAGE_READWRITE
) ||
256 !rctx
->ws
->buffer_wait(rbuffer
->buf
, 0, RADEON_USAGE_READWRITE
)) {
257 rctx
->invalidate_buffer(&rctx
->b
, &rbuffer
->b
.b
);
259 util_range_set_empty(&rbuffer
->valid_buffer_range
);
265 void r600_invalidate_resource(struct pipe_context
*ctx
,
266 struct pipe_resource
*resource
)
268 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
269 struct r600_resource
*rbuffer
= r600_resource(resource
);
271 /* We currently only do anyting here for buffers */
272 if (resource
->target
== PIPE_BUFFER
)
273 (void)r600_invalidate_buffer(rctx
, rbuffer
);
276 static void *r600_buffer_get_transfer(struct pipe_context
*ctx
,
277 struct pipe_resource
*resource
,
280 const struct pipe_box
*box
,
281 struct pipe_transfer
**ptransfer
,
282 void *data
, struct r600_resource
*staging
,
285 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
286 struct r600_transfer
*transfer
= slab_alloc_st(&rctx
->pool_transfers
);
288 transfer
->transfer
.resource
= resource
;
289 transfer
->transfer
.level
= level
;
290 transfer
->transfer
.usage
= usage
;
291 transfer
->transfer
.box
= *box
;
292 transfer
->transfer
.stride
= 0;
293 transfer
->transfer
.layer_stride
= 0;
294 transfer
->offset
= offset
;
295 transfer
->staging
= staging
;
296 *ptransfer
= &transfer
->transfer
;
300 static bool r600_can_dma_copy_buffer(struct r600_common_context
*rctx
,
301 unsigned dstx
, unsigned srcx
, unsigned size
)
303 bool dword_aligned
= !(dstx
% 4) && !(srcx
% 4) && !(size
% 4);
305 return rctx
->screen
->has_cp_dma
||
306 (dword_aligned
&& (rctx
->dma
.cs
||
307 rctx
->screen
->has_streamout
));
311 static void *r600_buffer_transfer_map(struct pipe_context
*ctx
,
312 struct pipe_resource
*resource
,
315 const struct pipe_box
*box
,
316 struct pipe_transfer
**ptransfer
)
318 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
319 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)ctx
->screen
;
320 struct r600_resource
*rbuffer
= r600_resource(resource
);
323 assert(box
->x
+ box
->width
<= resource
->width0
);
325 /* See if the buffer range being mapped has never been initialized,
326 * in which case it can be mapped unsynchronized. */
327 if (!(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
) &&
328 usage
& PIPE_TRANSFER_WRITE
&&
329 !rbuffer
->is_shared
&&
330 !util_ranges_intersect(&rbuffer
->valid_buffer_range
, box
->x
, box
->x
+ box
->width
)) {
331 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
334 /* If discarding the entire range, discard the whole resource instead. */
335 if (usage
& PIPE_TRANSFER_DISCARD_RANGE
&&
336 box
->x
== 0 && box
->width
== resource
->width0
) {
337 usage
|= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
;
340 if (usage
& PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
&&
341 !(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
342 assert(usage
& PIPE_TRANSFER_WRITE
);
344 if (r600_invalidate_buffer(rctx
, rbuffer
)) {
345 /* At this point, the buffer is always idle. */
346 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
348 /* Fall back to a temporary buffer. */
349 usage
|= PIPE_TRANSFER_DISCARD_RANGE
;
353 if ((usage
& PIPE_TRANSFER_DISCARD_RANGE
) &&
354 !(usage
& (PIPE_TRANSFER_UNSYNCHRONIZED
|
355 PIPE_TRANSFER_PERSISTENT
)) &&
356 !(rscreen
->debug_flags
& DBG_NO_DISCARD_RANGE
) &&
357 r600_can_dma_copy_buffer(rctx
, box
->x
, 0, box
->width
)) {
358 assert(usage
& PIPE_TRANSFER_WRITE
);
360 /* Check if mapping this buffer would cause waiting for the GPU. */
361 if (r600_rings_is_buffer_referenced(rctx
, rbuffer
->buf
, RADEON_USAGE_READWRITE
) ||
362 !rctx
->ws
->buffer_wait(rbuffer
->buf
, 0, RADEON_USAGE_READWRITE
)) {
363 /* Do a wait-free write-only transfer using a temporary buffer. */
365 struct r600_resource
*staging
= NULL
;
367 u_upload_alloc(rctx
->uploader
, 0, box
->width
+ (box
->x
% R600_MAP_BUFFER_ALIGNMENT
),
368 256, &offset
, (struct pipe_resource
**)&staging
, (void**)&data
);
371 data
+= box
->x
% R600_MAP_BUFFER_ALIGNMENT
;
372 return r600_buffer_get_transfer(ctx
, resource
, level
, usage
, box
,
373 ptransfer
, data
, staging
, offset
);
376 /* At this point, the buffer is always idle (we checked it above). */
377 usage
|= PIPE_TRANSFER_UNSYNCHRONIZED
;
380 /* Using a staging buffer in GTT for larger reads is much faster. */
381 else if ((usage
& PIPE_TRANSFER_READ
) &&
382 !(usage
& (PIPE_TRANSFER_WRITE
|
383 PIPE_TRANSFER_PERSISTENT
)) &&
384 rbuffer
->domains
& RADEON_DOMAIN_VRAM
&&
385 r600_can_dma_copy_buffer(rctx
, 0, box
->x
, box
->width
)) {
386 struct r600_resource
*staging
;
388 staging
= (struct r600_resource
*) pipe_buffer_create(
389 ctx
->screen
, 0, PIPE_USAGE_STAGING
,
390 box
->width
+ (box
->x
% R600_MAP_BUFFER_ALIGNMENT
));
392 /* Copy the VRAM buffer to the staging buffer. */
393 ctx
->resource_copy_region(ctx
, &staging
->b
.b
, 0,
394 box
->x
% R600_MAP_BUFFER_ALIGNMENT
,
395 0, 0, resource
, level
, box
);
397 data
= r600_buffer_map_sync_with_rings(rctx
, staging
, PIPE_TRANSFER_READ
);
399 r600_resource_reference(&staging
, NULL
);
402 data
+= box
->x
% R600_MAP_BUFFER_ALIGNMENT
;
404 return r600_buffer_get_transfer(ctx
, resource
, level
, usage
, box
,
405 ptransfer
, data
, staging
, 0);
409 data
= r600_buffer_map_sync_with_rings(rctx
, rbuffer
, usage
);
415 return r600_buffer_get_transfer(ctx
, resource
, level
, usage
, box
,
416 ptransfer
, data
, NULL
, 0);
419 static void r600_buffer_do_flush_region(struct pipe_context
*ctx
,
420 struct pipe_transfer
*transfer
,
421 const struct pipe_box
*box
)
423 struct r600_transfer
*rtransfer
= (struct r600_transfer
*)transfer
;
424 struct r600_resource
*rbuffer
= r600_resource(transfer
->resource
);
426 if (rtransfer
->staging
) {
427 struct pipe_resource
*dst
, *src
;
429 struct pipe_box dma_box
;
431 dst
= transfer
->resource
;
432 src
= &rtransfer
->staging
->b
.b
;
433 soffset
= rtransfer
->offset
+ box
->x
% R600_MAP_BUFFER_ALIGNMENT
;
435 u_box_1d(soffset
, box
->width
, &dma_box
);
437 /* Copy the staging buffer into the original one. */
438 ctx
->resource_copy_region(ctx
, dst
, 0, box
->x
, 0, 0, src
, 0, &dma_box
);
441 util_range_add(&rbuffer
->valid_buffer_range
, box
->x
,
442 box
->x
+ box
->width
);
445 static void r600_buffer_flush_region(struct pipe_context
*ctx
,
446 struct pipe_transfer
*transfer
,
447 const struct pipe_box
*rel_box
)
449 if (transfer
->usage
& (PIPE_TRANSFER_WRITE
|
450 PIPE_TRANSFER_FLUSH_EXPLICIT
)) {
453 u_box_1d(transfer
->box
.x
+ rel_box
->x
, rel_box
->width
, &box
);
454 r600_buffer_do_flush_region(ctx
, transfer
, &box
);
458 static void r600_buffer_transfer_unmap(struct pipe_context
*ctx
,
459 struct pipe_transfer
*transfer
)
461 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
462 struct r600_transfer
*rtransfer
= (struct r600_transfer
*)transfer
;
464 if (transfer
->usage
& PIPE_TRANSFER_WRITE
&&
465 !(transfer
->usage
& PIPE_TRANSFER_FLUSH_EXPLICIT
))
466 r600_buffer_do_flush_region(ctx
, transfer
, &transfer
->box
);
468 if (rtransfer
->staging
)
469 r600_resource_reference(&rtransfer
->staging
, NULL
);
471 slab_free_st(&rctx
->pool_transfers
, transfer
);
474 void r600_buffer_subdata(struct pipe_context
*ctx
,
475 struct pipe_resource
*buffer
,
476 unsigned usage
, unsigned offset
,
477 unsigned size
, const void *data
)
479 struct pipe_transfer
*transfer
= NULL
;
483 u_box_1d(offset
, size
, &box
);
484 map
= r600_buffer_transfer_map(ctx
, buffer
, 0,
485 PIPE_TRANSFER_WRITE
|
486 PIPE_TRANSFER_DISCARD_RANGE
|
492 memcpy(map
, data
, size
);
493 r600_buffer_transfer_unmap(ctx
, transfer
);
496 static const struct u_resource_vtbl r600_buffer_vtbl
=
498 NULL
, /* get_handle */
499 r600_buffer_destroy
, /* resource_destroy */
500 r600_buffer_transfer_map
, /* transfer_map */
501 r600_buffer_flush_region
, /* transfer_flush_region */
502 r600_buffer_transfer_unmap
, /* transfer_unmap */
505 static struct r600_resource
*
506 r600_alloc_buffer_struct(struct pipe_screen
*screen
,
507 const struct pipe_resource
*templ
)
509 struct r600_resource
*rbuffer
;
511 rbuffer
= MALLOC_STRUCT(r600_resource
);
513 rbuffer
->b
.b
= *templ
;
514 pipe_reference_init(&rbuffer
->b
.b
.reference
, 1);
515 rbuffer
->b
.b
.screen
= screen
;
516 rbuffer
->b
.vtbl
= &r600_buffer_vtbl
;
518 rbuffer
->TC_L2_dirty
= false;
519 rbuffer
->is_shared
= false;
520 util_range_init(&rbuffer
->valid_buffer_range
);
524 struct pipe_resource
*r600_buffer_create(struct pipe_screen
*screen
,
525 const struct pipe_resource
*templ
,
528 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
529 struct r600_resource
*rbuffer
= r600_alloc_buffer_struct(screen
, templ
);
531 r600_init_resource_fields(rscreen
, rbuffer
, templ
->width0
, alignment
);
533 if (!r600_alloc_resource(rscreen
, rbuffer
)) {
537 return &rbuffer
->b
.b
;
540 struct pipe_resource
*r600_aligned_buffer_create(struct pipe_screen
*screen
,
546 struct pipe_resource buffer
;
548 memset(&buffer
, 0, sizeof buffer
);
549 buffer
.target
= PIPE_BUFFER
;
550 buffer
.format
= PIPE_FORMAT_R8_UNORM
;
552 buffer
.usage
= usage
;
554 buffer
.width0
= size
;
557 buffer
.array_size
= 1;
558 return r600_buffer_create(screen
, &buffer
, alignment
);
561 struct pipe_resource
*
562 r600_buffer_from_user_memory(struct pipe_screen
*screen
,
563 const struct pipe_resource
*templ
,
566 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
567 struct radeon_winsys
*ws
= rscreen
->ws
;
568 struct r600_resource
*rbuffer
= r600_alloc_buffer_struct(screen
, templ
);
570 rbuffer
->domains
= RADEON_DOMAIN_GTT
;
571 util_range_add(&rbuffer
->valid_buffer_range
, 0, templ
->width0
);
573 /* Convert a user pointer to a buffer. */
574 rbuffer
->buf
= ws
->buffer_from_ptr(ws
, user_memory
, templ
->width0
);
580 if (rscreen
->info
.has_virtual_memory
)
581 rbuffer
->gpu_address
=
582 ws
->buffer_get_virtual_address(rbuffer
->buf
);
584 rbuffer
->gpu_address
= 0;
586 return &rbuffer
->b
.b
;