2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
3 * Copyright © 2015 Advanced Micro Devices, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
29 * Marek Olšák <maraeo@gmail.com>
32 #include "amdgpu_cs.h"
34 #include "os/os_time.h"
35 #include "state_tracker/drm_driver.h"
36 #include <amdgpu_drm.h>
41 #ifndef AMDGPU_GEM_CREATE_VM_ALWAYS_VALID
42 #define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID (1 << 6)
45 /* Set to 1 for verbose output showing committed sparse buffer ranges. */
46 #define DEBUG_SPARSE_COMMITS 0
48 struct amdgpu_sparse_backing_chunk
{
52 static struct pb_buffer
*
53 amdgpu_bo_create(struct radeon_winsys
*rws
,
56 enum radeon_bo_domain domain
,
57 enum radeon_bo_flag flags
);
59 static bool amdgpu_bo_wait(struct pb_buffer
*_buf
, uint64_t timeout
,
60 enum radeon_bo_usage usage
)
62 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(_buf
);
63 struct amdgpu_winsys
*ws
= bo
->ws
;
67 if (p_atomic_read(&bo
->num_active_ioctls
))
71 abs_timeout
= os_time_get_absolute_timeout(timeout
);
73 /* Wait if any ioctl is being submitted with this buffer. */
74 if (!os_wait_until_zero_abs_timeout(&bo
->num_active_ioctls
, abs_timeout
))
79 /* We can't use user fences for shared buffers, because user fences
80 * are local to this process only. If we want to wait for all buffer
81 * uses in all processes, we have to use amdgpu_bo_wait_for_idle.
83 bool buffer_busy
= true;
86 r
= amdgpu_bo_wait_for_idle(bo
->bo
, timeout
, &buffer_busy
);
88 fprintf(stderr
, "%s: amdgpu_bo_wait_for_idle failed %i\n", __func__
,
97 mtx_lock(&ws
->bo_fence_lock
);
99 for (idle_fences
= 0; idle_fences
< bo
->num_fences
; ++idle_fences
) {
100 if (!amdgpu_fence_wait(bo
->fences
[idle_fences
], 0, false))
104 /* Release the idle fences to avoid checking them again later. */
105 for (unsigned i
= 0; i
< idle_fences
; ++i
)
106 amdgpu_fence_reference(&bo
->fences
[i
], NULL
);
108 memmove(&bo
->fences
[0], &bo
->fences
[idle_fences
],
109 (bo
->num_fences
- idle_fences
) * sizeof(*bo
->fences
));
110 bo
->num_fences
-= idle_fences
;
112 buffer_idle
= !bo
->num_fences
;
113 mtx_unlock(&ws
->bo_fence_lock
);
117 bool buffer_idle
= true;
119 mtx_lock(&ws
->bo_fence_lock
);
120 while (bo
->num_fences
&& buffer_idle
) {
121 struct pipe_fence_handle
*fence
= NULL
;
122 bool fence_idle
= false;
124 amdgpu_fence_reference(&fence
, bo
->fences
[0]);
126 /* Wait for the fence. */
127 mtx_unlock(&ws
->bo_fence_lock
);
128 if (amdgpu_fence_wait(fence
, abs_timeout
, true))
132 mtx_lock(&ws
->bo_fence_lock
);
134 /* Release an idle fence to avoid checking it again later, keeping in
135 * mind that the fence array may have been modified by other threads.
137 if (fence_idle
&& bo
->num_fences
&& bo
->fences
[0] == fence
) {
138 amdgpu_fence_reference(&bo
->fences
[0], NULL
);
139 memmove(&bo
->fences
[0], &bo
->fences
[1],
140 (bo
->num_fences
- 1) * sizeof(*bo
->fences
));
144 amdgpu_fence_reference(&fence
, NULL
);
146 mtx_unlock(&ws
->bo_fence_lock
);
152 static enum radeon_bo_domain
amdgpu_bo_get_initial_domain(
153 struct pb_buffer
*buf
)
155 return ((struct amdgpu_winsys_bo
*)buf
)->initial_domain
;
158 static void amdgpu_bo_remove_fences(struct amdgpu_winsys_bo
*bo
)
160 for (unsigned i
= 0; i
< bo
->num_fences
; ++i
)
161 amdgpu_fence_reference(&bo
->fences
[i
], NULL
);
168 void amdgpu_bo_destroy(struct pb_buffer
*_buf
)
170 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(_buf
);
172 assert(bo
->bo
&& "must not be called for slab entries");
174 if (bo
->ws
->debug_all_bos
) {
175 mtx_lock(&bo
->ws
->global_bo_list_lock
);
176 LIST_DEL(&bo
->u
.real
.global_list_item
);
177 bo
->ws
->num_buffers
--;
178 mtx_unlock(&bo
->ws
->global_bo_list_lock
);
181 amdgpu_bo_va_op(bo
->bo
, 0, bo
->base
.size
, bo
->va
, 0, AMDGPU_VA_OP_UNMAP
);
182 amdgpu_va_range_free(bo
->u
.real
.va_handle
);
183 amdgpu_bo_free(bo
->bo
);
185 amdgpu_bo_remove_fences(bo
);
187 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
188 bo
->ws
->allocated_vram
-= align64(bo
->base
.size
, bo
->ws
->info
.gart_page_size
);
189 else if (bo
->initial_domain
& RADEON_DOMAIN_GTT
)
190 bo
->ws
->allocated_gtt
-= align64(bo
->base
.size
, bo
->ws
->info
.gart_page_size
);
192 if (bo
->u
.real
.map_count
>= 1) {
193 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
194 bo
->ws
->mapped_vram
-= bo
->base
.size
;
195 else if (bo
->initial_domain
& RADEON_DOMAIN_GTT
)
196 bo
->ws
->mapped_gtt
-= bo
->base
.size
;
197 bo
->ws
->num_mapped_buffers
--;
203 static void amdgpu_bo_destroy_or_cache(struct pb_buffer
*_buf
)
205 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(_buf
);
207 assert(bo
->bo
); /* slab buffers have a separate vtbl */
209 if (bo
->u
.real
.use_reusable_pool
)
210 pb_cache_add_buffer(&bo
->u
.real
.cache_entry
);
212 amdgpu_bo_destroy(_buf
);
215 static void *amdgpu_bo_map(struct pb_buffer
*buf
,
216 struct radeon_winsys_cs
*rcs
,
217 enum pipe_transfer_usage usage
)
219 struct amdgpu_winsys_bo
*bo
= (struct amdgpu_winsys_bo
*)buf
;
220 struct amdgpu_winsys_bo
*real
;
221 struct amdgpu_cs
*cs
= (struct amdgpu_cs
*)rcs
;
228 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
229 if (!(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
230 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
231 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
232 if (!(usage
& PIPE_TRANSFER_WRITE
)) {
235 * Since we are mapping for read, we don't need to wait
236 * if the GPU is using the buffer for read too
237 * (neither one is changing it).
239 * Only check whether the buffer is being used for write. */
240 if (cs
&& amdgpu_bo_is_referenced_by_cs_with_usage(cs
, bo
,
241 RADEON_USAGE_WRITE
)) {
242 cs
->flush_cs(cs
->flush_data
, RADEON_FLUSH_ASYNC
, NULL
);
246 if (!amdgpu_bo_wait((struct pb_buffer
*)bo
, 0,
247 RADEON_USAGE_WRITE
)) {
251 if (cs
&& amdgpu_bo_is_referenced_by_cs(cs
, bo
)) {
252 cs
->flush_cs(cs
->flush_data
, RADEON_FLUSH_ASYNC
, NULL
);
256 if (!amdgpu_bo_wait((struct pb_buffer
*)bo
, 0,
257 RADEON_USAGE_READWRITE
)) {
262 uint64_t time
= os_time_get_nano();
264 if (!(usage
& PIPE_TRANSFER_WRITE
)) {
267 * Since we are mapping for read, we don't need to wait
268 * if the GPU is using the buffer for read too
269 * (neither one is changing it).
271 * Only check whether the buffer is being used for write. */
273 if (amdgpu_bo_is_referenced_by_cs_with_usage(cs
, bo
,
274 RADEON_USAGE_WRITE
)) {
275 cs
->flush_cs(cs
->flush_data
, 0, NULL
);
277 /* Try to avoid busy-waiting in amdgpu_bo_wait. */
278 if (p_atomic_read(&bo
->num_active_ioctls
))
279 amdgpu_cs_sync_flush(rcs
);
283 amdgpu_bo_wait((struct pb_buffer
*)bo
, PIPE_TIMEOUT_INFINITE
,
286 /* Mapping for write. */
288 if (amdgpu_bo_is_referenced_by_cs(cs
, bo
)) {
289 cs
->flush_cs(cs
->flush_data
, 0, NULL
);
291 /* Try to avoid busy-waiting in amdgpu_bo_wait. */
292 if (p_atomic_read(&bo
->num_active_ioctls
))
293 amdgpu_cs_sync_flush(rcs
);
297 amdgpu_bo_wait((struct pb_buffer
*)bo
, PIPE_TIMEOUT_INFINITE
,
298 RADEON_USAGE_READWRITE
);
301 bo
->ws
->buffer_wait_time
+= os_time_get_nano() - time
;
305 /* If the buffer is created from user memory, return the user pointer. */
312 real
= bo
->u
.slab
.real
;
313 offset
= bo
->va
- real
->va
;
316 r
= amdgpu_bo_cpu_map(real
->bo
, &cpu
);
318 /* Clear the cache and try again. */
319 pb_cache_release_all_buffers(&real
->ws
->bo_cache
);
320 r
= amdgpu_bo_cpu_map(real
->bo
, &cpu
);
325 if (p_atomic_inc_return(&real
->u
.real
.map_count
) == 1) {
326 if (real
->initial_domain
& RADEON_DOMAIN_VRAM
)
327 real
->ws
->mapped_vram
+= real
->base
.size
;
328 else if (real
->initial_domain
& RADEON_DOMAIN_GTT
)
329 real
->ws
->mapped_gtt
+= real
->base
.size
;
330 real
->ws
->num_mapped_buffers
++;
332 return (uint8_t*)cpu
+ offset
;
335 static void amdgpu_bo_unmap(struct pb_buffer
*buf
)
337 struct amdgpu_winsys_bo
*bo
= (struct amdgpu_winsys_bo
*)buf
;
338 struct amdgpu_winsys_bo
*real
;
345 real
= bo
->bo
? bo
: bo
->u
.slab
.real
;
347 if (p_atomic_dec_zero(&real
->u
.real
.map_count
)) {
348 if (real
->initial_domain
& RADEON_DOMAIN_VRAM
)
349 real
->ws
->mapped_vram
-= real
->base
.size
;
350 else if (real
->initial_domain
& RADEON_DOMAIN_GTT
)
351 real
->ws
->mapped_gtt
-= real
->base
.size
;
352 real
->ws
->num_mapped_buffers
--;
355 amdgpu_bo_cpu_unmap(real
->bo
);
358 static const struct pb_vtbl amdgpu_winsys_bo_vtbl
= {
359 amdgpu_bo_destroy_or_cache
360 /* other functions are never called */
363 static void amdgpu_add_buffer_to_global_list(struct amdgpu_winsys_bo
*bo
)
365 struct amdgpu_winsys
*ws
= bo
->ws
;
369 if (ws
->debug_all_bos
) {
370 mtx_lock(&ws
->global_bo_list_lock
);
371 LIST_ADDTAIL(&bo
->u
.real
.global_list_item
, &ws
->global_bo_list
);
373 mtx_unlock(&ws
->global_bo_list_lock
);
377 static struct amdgpu_winsys_bo
*amdgpu_create_bo(struct amdgpu_winsys
*ws
,
381 enum radeon_bo_domain initial_domain
,
383 unsigned pb_cache_bucket
)
385 struct amdgpu_bo_alloc_request request
= {0};
386 amdgpu_bo_handle buf_handle
;
388 struct amdgpu_winsys_bo
*bo
;
389 amdgpu_va_handle va_handle
;
390 unsigned va_gap_size
;
393 assert(initial_domain
& RADEON_DOMAIN_VRAM_GTT
);
394 bo
= CALLOC_STRUCT(amdgpu_winsys_bo
);
399 pb_cache_init_entry(&ws
->bo_cache
, &bo
->u
.real
.cache_entry
, &bo
->base
,
401 request
.alloc_size
= size
;
402 request
.phys_alignment
= alignment
;
404 if (initial_domain
& RADEON_DOMAIN_VRAM
)
405 request
.preferred_heap
|= AMDGPU_GEM_DOMAIN_VRAM
;
406 if (initial_domain
& RADEON_DOMAIN_GTT
)
407 request
.preferred_heap
|= AMDGPU_GEM_DOMAIN_GTT
;
409 if (flags
& RADEON_FLAG_NO_CPU_ACCESS
)
410 request
.flags
|= AMDGPU_GEM_CREATE_NO_CPU_ACCESS
;
411 if (flags
& RADEON_FLAG_GTT_WC
)
412 request
.flags
|= AMDGPU_GEM_CREATE_CPU_GTT_USWC
;
413 if (flags
& RADEON_FLAG_NO_INTERPROCESS_SHARING
&&
414 ws
->info
.drm_minor
>= 20)
415 request
.flags
|= AMDGPU_GEM_CREATE_VM_ALWAYS_VALID
;
417 r
= amdgpu_bo_alloc(ws
->dev
, &request
, &buf_handle
);
419 fprintf(stderr
, "amdgpu: Failed to allocate a buffer:\n");
420 fprintf(stderr
, "amdgpu: size : %"PRIu64
" bytes\n", size
);
421 fprintf(stderr
, "amdgpu: alignment : %u bytes\n", alignment
);
422 fprintf(stderr
, "amdgpu: domains : %u\n", initial_domain
);
426 va_gap_size
= ws
->check_vm
? MAX2(4 * alignment
, 64 * 1024) : 0;
427 if (size
> ws
->info
.pte_fragment_size
)
428 alignment
= MAX2(alignment
, ws
->info
.pte_fragment_size
);
429 r
= amdgpu_va_range_alloc(ws
->dev
, amdgpu_gpu_va_range_general
,
430 size
+ va_gap_size
, alignment
, 0, &va
, &va_handle
, 0);
434 r
= amdgpu_bo_va_op(buf_handle
, 0, size
, va
, 0, AMDGPU_VA_OP_MAP
);
438 pipe_reference_init(&bo
->base
.reference
, 1);
439 bo
->base
.alignment
= alignment
;
440 bo
->base
.usage
= usage
;
441 bo
->base
.size
= size
;
442 bo
->base
.vtbl
= &amdgpu_winsys_bo_vtbl
;
446 bo
->u
.real
.va_handle
= va_handle
;
447 bo
->initial_domain
= initial_domain
;
448 bo
->unique_id
= __sync_fetch_and_add(&ws
->next_bo_unique_id
, 1);
449 bo
->is_local
= !!(request
.flags
& AMDGPU_GEM_CREATE_VM_ALWAYS_VALID
);
451 if (initial_domain
& RADEON_DOMAIN_VRAM
)
452 ws
->allocated_vram
+= align64(size
, ws
->info
.gart_page_size
);
453 else if (initial_domain
& RADEON_DOMAIN_GTT
)
454 ws
->allocated_gtt
+= align64(size
, ws
->info
.gart_page_size
);
456 amdgpu_add_buffer_to_global_list(bo
);
461 amdgpu_va_range_free(va_handle
);
464 amdgpu_bo_free(buf_handle
);
471 bool amdgpu_bo_can_reclaim(struct pb_buffer
*_buf
)
473 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(_buf
);
475 if (amdgpu_bo_is_referenced_by_any_cs(bo
)) {
479 return amdgpu_bo_wait(_buf
, 0, RADEON_USAGE_READWRITE
);
482 bool amdgpu_bo_can_reclaim_slab(void *priv
, struct pb_slab_entry
*entry
)
484 struct amdgpu_winsys_bo
*bo
= NULL
; /* fix container_of */
485 bo
= container_of(entry
, bo
, u
.slab
.entry
);
487 return amdgpu_bo_can_reclaim(&bo
->base
);
490 static void amdgpu_bo_slab_destroy(struct pb_buffer
*_buf
)
492 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(_buf
);
496 pb_slab_free(&bo
->ws
->bo_slabs
, &bo
->u
.slab
.entry
);
499 static const struct pb_vtbl amdgpu_winsys_bo_slab_vtbl
= {
500 amdgpu_bo_slab_destroy
501 /* other functions are never called */
504 struct pb_slab
*amdgpu_bo_slab_alloc(void *priv
, unsigned heap
,
506 unsigned group_index
)
508 struct amdgpu_winsys
*ws
= priv
;
509 struct amdgpu_slab
*slab
= CALLOC_STRUCT(amdgpu_slab
);
510 enum radeon_bo_domain domains
= radeon_domain_from_heap(heap
);
511 enum radeon_bo_flag flags
= radeon_flags_from_heap(heap
);
517 unsigned slab_size
= 1 << AMDGPU_SLAB_BO_SIZE_LOG2
;
518 slab
->buffer
= amdgpu_winsys_bo(amdgpu_bo_create(&ws
->base
,
519 slab_size
, slab_size
,
524 assert(slab
->buffer
->bo
);
526 slab
->base
.num_entries
= slab
->buffer
->base
.size
/ entry_size
;
527 slab
->base
.num_free
= slab
->base
.num_entries
;
528 slab
->entries
= CALLOC(slab
->base
.num_entries
, sizeof(*slab
->entries
));
532 LIST_INITHEAD(&slab
->base
.free
);
534 base_id
= __sync_fetch_and_add(&ws
->next_bo_unique_id
, slab
->base
.num_entries
);
536 for (unsigned i
= 0; i
< slab
->base
.num_entries
; ++i
) {
537 struct amdgpu_winsys_bo
*bo
= &slab
->entries
[i
];
539 bo
->base
.alignment
= entry_size
;
540 bo
->base
.usage
= slab
->buffer
->base
.usage
;
541 bo
->base
.size
= entry_size
;
542 bo
->base
.vtbl
= &amdgpu_winsys_bo_slab_vtbl
;
544 bo
->va
= slab
->buffer
->va
+ i
* entry_size
;
545 bo
->initial_domain
= domains
;
546 bo
->unique_id
= base_id
+ i
;
547 bo
->u
.slab
.entry
.slab
= &slab
->base
;
548 bo
->u
.slab
.entry
.group_index
= group_index
;
549 bo
->u
.slab
.real
= slab
->buffer
;
551 LIST_ADDTAIL(&bo
->u
.slab
.entry
.head
, &slab
->base
.free
);
557 amdgpu_winsys_bo_reference(&slab
->buffer
, NULL
);
563 void amdgpu_bo_slab_free(void *priv
, struct pb_slab
*pslab
)
565 struct amdgpu_slab
*slab
= amdgpu_slab(pslab
);
567 for (unsigned i
= 0; i
< slab
->base
.num_entries
; ++i
)
568 amdgpu_bo_remove_fences(&slab
->entries
[i
]);
571 amdgpu_winsys_bo_reference(&slab
->buffer
, NULL
);
575 #if DEBUG_SPARSE_COMMITS
577 sparse_dump(struct amdgpu_winsys_bo
*bo
, const char *func
)
579 fprintf(stderr
, "%s: %p (size=%"PRIu64
", num_va_pages=%u) @ %s\n"
581 __func__
, bo
, bo
->base
.size
, bo
->u
.sparse
.num_va_pages
, func
);
583 struct amdgpu_sparse_backing
*span_backing
= NULL
;
584 uint32_t span_first_backing_page
= 0;
585 uint32_t span_first_va_page
= 0;
586 uint32_t va_page
= 0;
589 struct amdgpu_sparse_backing
*backing
= 0;
590 uint32_t backing_page
= 0;
592 if (va_page
< bo
->u
.sparse
.num_va_pages
) {
593 backing
= bo
->u
.sparse
.commitments
[va_page
].backing
;
594 backing_page
= bo
->u
.sparse
.commitments
[va_page
].page
;
598 (backing
!= span_backing
||
599 backing_page
!= span_first_backing_page
+ (va_page
- span_first_va_page
))) {
600 fprintf(stderr
, " %u..%u: backing=%p:%u..%u\n",
601 span_first_va_page
, va_page
- 1, span_backing
,
602 span_first_backing_page
,
603 span_first_backing_page
+ (va_page
- span_first_va_page
) - 1);
608 if (va_page
>= bo
->u
.sparse
.num_va_pages
)
611 if (backing
&& !span_backing
) {
612 span_backing
= backing
;
613 span_first_backing_page
= backing_page
;
614 span_first_va_page
= va_page
;
620 fprintf(stderr
, "Backing:\n");
622 list_for_each_entry(struct amdgpu_sparse_backing
, backing
, &bo
->u
.sparse
.backing
, list
) {
623 fprintf(stderr
, " %p (size=%"PRIu64
")\n", backing
, backing
->bo
->base
.size
);
624 for (unsigned i
= 0; i
< backing
->num_chunks
; ++i
)
625 fprintf(stderr
, " %u..%u\n", backing
->chunks
[i
].begin
, backing
->chunks
[i
].end
);
631 * Attempt to allocate the given number of backing pages. Fewer pages may be
632 * allocated (depending on the fragmentation of existing backing buffers),
633 * which will be reflected by a change to *pnum_pages.
635 static struct amdgpu_sparse_backing
*
636 sparse_backing_alloc(struct amdgpu_winsys_bo
*bo
, uint32_t *pstart_page
, uint32_t *pnum_pages
)
638 struct amdgpu_sparse_backing
*best_backing
;
640 uint32_t best_num_pages
;
646 /* This is a very simple and inefficient best-fit algorithm. */
647 list_for_each_entry(struct amdgpu_sparse_backing
, backing
, &bo
->u
.sparse
.backing
, list
) {
648 for (unsigned idx
= 0; idx
< backing
->num_chunks
; ++idx
) {
649 uint32_t cur_num_pages
= backing
->chunks
[idx
].end
- backing
->chunks
[idx
].begin
;
650 if ((best_num_pages
< *pnum_pages
&& cur_num_pages
> best_num_pages
) ||
651 (best_num_pages
> *pnum_pages
&& cur_num_pages
< best_num_pages
)) {
652 best_backing
= backing
;
654 best_num_pages
= cur_num_pages
;
659 /* Allocate a new backing buffer if necessary. */
661 struct pb_buffer
*buf
;
665 best_backing
= CALLOC_STRUCT(amdgpu_sparse_backing
);
669 best_backing
->max_chunks
= 4;
670 best_backing
->chunks
= CALLOC(best_backing
->max_chunks
,
671 sizeof(*best_backing
->chunks
));
672 if (!best_backing
->chunks
) {
677 assert(bo
->u
.sparse
.num_backing_pages
< DIV_ROUND_UP(bo
->base
.size
, RADEON_SPARSE_PAGE_SIZE
));
679 size
= MIN3(bo
->base
.size
/ 16,
681 bo
->base
.size
- (uint64_t)bo
->u
.sparse
.num_backing_pages
* RADEON_SPARSE_PAGE_SIZE
);
682 size
= MAX2(size
, RADEON_SPARSE_PAGE_SIZE
);
684 buf
= amdgpu_bo_create(&bo
->ws
->base
, size
, RADEON_SPARSE_PAGE_SIZE
,
686 bo
->u
.sparse
.flags
| RADEON_FLAG_NO_SUBALLOC
);
688 FREE(best_backing
->chunks
);
693 /* We might have gotten a bigger buffer than requested via caching. */
694 pages
= buf
->size
/ RADEON_SPARSE_PAGE_SIZE
;
696 best_backing
->bo
= amdgpu_winsys_bo(buf
);
697 best_backing
->num_chunks
= 1;
698 best_backing
->chunks
[0].begin
= 0;
699 best_backing
->chunks
[0].end
= pages
;
701 list_add(&best_backing
->list
, &bo
->u
.sparse
.backing
);
702 bo
->u
.sparse
.num_backing_pages
+= pages
;
705 best_num_pages
= pages
;
708 *pnum_pages
= MIN2(*pnum_pages
, best_num_pages
);
709 *pstart_page
= best_backing
->chunks
[best_idx
].begin
;
710 best_backing
->chunks
[best_idx
].begin
+= *pnum_pages
;
712 if (best_backing
->chunks
[best_idx
].begin
>= best_backing
->chunks
[best_idx
].end
) {
713 memmove(&best_backing
->chunks
[best_idx
], &best_backing
->chunks
[best_idx
+ 1],
714 sizeof(*best_backing
->chunks
) * (best_backing
->num_chunks
- best_idx
- 1));
715 best_backing
->num_chunks
--;
722 sparse_free_backing_buffer(struct amdgpu_winsys_bo
*bo
,
723 struct amdgpu_sparse_backing
*backing
)
725 struct amdgpu_winsys
*ws
= backing
->bo
->ws
;
727 bo
->u
.sparse
.num_backing_pages
-= backing
->bo
->base
.size
/ RADEON_SPARSE_PAGE_SIZE
;
729 mtx_lock(&ws
->bo_fence_lock
);
730 amdgpu_add_fences(backing
->bo
, bo
->num_fences
, bo
->fences
);
731 mtx_unlock(&ws
->bo_fence_lock
);
733 list_del(&backing
->list
);
734 amdgpu_winsys_bo_reference(&backing
->bo
, NULL
);
735 FREE(backing
->chunks
);
740 * Return a range of pages from the given backing buffer back into the
744 sparse_backing_free(struct amdgpu_winsys_bo
*bo
,
745 struct amdgpu_sparse_backing
*backing
,
746 uint32_t start_page
, uint32_t num_pages
)
748 uint32_t end_page
= start_page
+ num_pages
;
750 unsigned high
= backing
->num_chunks
;
752 /* Find the first chunk with begin >= start_page. */
754 unsigned mid
= low
+ (high
- low
) / 2;
756 if (backing
->chunks
[mid
].begin
>= start_page
)
762 assert(low
>= backing
->num_chunks
|| end_page
<= backing
->chunks
[low
].begin
);
763 assert(low
== 0 || backing
->chunks
[low
- 1].end
<= start_page
);
765 if (low
> 0 && backing
->chunks
[low
- 1].end
== start_page
) {
766 backing
->chunks
[low
- 1].end
= end_page
;
768 if (low
< backing
->num_chunks
&& end_page
== backing
->chunks
[low
].begin
) {
769 backing
->chunks
[low
- 1].end
= backing
->chunks
[low
].end
;
770 memmove(&backing
->chunks
[low
], &backing
->chunks
[low
+ 1],
771 sizeof(*backing
->chunks
) * (backing
->num_chunks
- low
- 1));
772 backing
->num_chunks
--;
774 } else if (low
< backing
->num_chunks
&& end_page
== backing
->chunks
[low
].begin
) {
775 backing
->chunks
[low
].begin
= start_page
;
777 if (backing
->num_chunks
>= backing
->max_chunks
) {
778 unsigned new_max_chunks
= 2 * backing
->max_chunks
;
779 struct amdgpu_sparse_backing_chunk
*new_chunks
=
780 REALLOC(backing
->chunks
,
781 sizeof(*backing
->chunks
) * backing
->max_chunks
,
782 sizeof(*backing
->chunks
) * new_max_chunks
);
786 backing
->max_chunks
= new_max_chunks
;
787 backing
->chunks
= new_chunks
;
790 memmove(&backing
->chunks
[low
+ 1], &backing
->chunks
[low
],
791 sizeof(*backing
->chunks
) * (backing
->num_chunks
- low
));
792 backing
->chunks
[low
].begin
= start_page
;
793 backing
->chunks
[low
].end
= end_page
;
794 backing
->num_chunks
++;
797 if (backing
->num_chunks
== 1 && backing
->chunks
[0].begin
== 0 &&
798 backing
->chunks
[0].end
== backing
->bo
->base
.size
/ RADEON_SPARSE_PAGE_SIZE
)
799 sparse_free_backing_buffer(bo
, backing
);
804 static void amdgpu_bo_sparse_destroy(struct pb_buffer
*_buf
)
806 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(_buf
);
809 assert(!bo
->bo
&& bo
->sparse
);
811 r
= amdgpu_bo_va_op_raw(bo
->ws
->dev
, NULL
, 0,
812 (uint64_t)bo
->u
.sparse
.num_va_pages
* RADEON_SPARSE_PAGE_SIZE
,
813 bo
->va
, 0, AMDGPU_VA_OP_CLEAR
);
815 fprintf(stderr
, "amdgpu: clearing PRT VA region on destroy failed (%d)\n", r
);
818 while (!list_empty(&bo
->u
.sparse
.backing
)) {
819 struct amdgpu_sparse_backing
*dummy
= NULL
;
820 sparse_free_backing_buffer(bo
,
821 container_of(bo
->u
.sparse
.backing
.next
,
825 amdgpu_va_range_free(bo
->u
.sparse
.va_handle
);
826 mtx_destroy(&bo
->u
.sparse
.commit_lock
);
827 FREE(bo
->u
.sparse
.commitments
);
831 static const struct pb_vtbl amdgpu_winsys_bo_sparse_vtbl
= {
832 amdgpu_bo_sparse_destroy
833 /* other functions are never called */
836 static struct pb_buffer
*
837 amdgpu_bo_sparse_create(struct amdgpu_winsys
*ws
, uint64_t size
,
838 enum radeon_bo_domain domain
,
839 enum radeon_bo_flag flags
)
841 struct amdgpu_winsys_bo
*bo
;
843 uint64_t va_gap_size
;
846 /* We use 32-bit page numbers; refuse to attempt allocating sparse buffers
847 * that exceed this limit. This is not really a restriction: we don't have
848 * that much virtual address space anyway.
850 if (size
> (uint64_t)INT32_MAX
* RADEON_SPARSE_PAGE_SIZE
)
853 bo
= CALLOC_STRUCT(amdgpu_winsys_bo
);
857 pipe_reference_init(&bo
->base
.reference
, 1);
858 bo
->base
.alignment
= RADEON_SPARSE_PAGE_SIZE
;
859 bo
->base
.size
= size
;
860 bo
->base
.vtbl
= &amdgpu_winsys_bo_sparse_vtbl
;
862 bo
->initial_domain
= domain
;
863 bo
->unique_id
= __sync_fetch_and_add(&ws
->next_bo_unique_id
, 1);
865 bo
->u
.sparse
.flags
= flags
& ~RADEON_FLAG_SPARSE
;
867 bo
->u
.sparse
.num_va_pages
= DIV_ROUND_UP(size
, RADEON_SPARSE_PAGE_SIZE
);
868 bo
->u
.sparse
.commitments
= CALLOC(bo
->u
.sparse
.num_va_pages
,
869 sizeof(*bo
->u
.sparse
.commitments
));
870 if (!bo
->u
.sparse
.commitments
)
871 goto error_alloc_commitments
;
873 mtx_init(&bo
->u
.sparse
.commit_lock
, mtx_plain
);
874 LIST_INITHEAD(&bo
->u
.sparse
.backing
);
876 /* For simplicity, we always map a multiple of the page size. */
877 map_size
= align64(size
, RADEON_SPARSE_PAGE_SIZE
);
878 va_gap_size
= ws
->check_vm
? 4 * RADEON_SPARSE_PAGE_SIZE
: 0;
879 r
= amdgpu_va_range_alloc(ws
->dev
, amdgpu_gpu_va_range_general
,
880 map_size
+ va_gap_size
, RADEON_SPARSE_PAGE_SIZE
,
881 0, &bo
->va
, &bo
->u
.sparse
.va_handle
, 0);
885 r
= amdgpu_bo_va_op_raw(bo
->ws
->dev
, NULL
, 0, size
, bo
->va
,
886 AMDGPU_VM_PAGE_PRT
, AMDGPU_VA_OP_MAP
);
893 amdgpu_va_range_free(bo
->u
.sparse
.va_handle
);
895 mtx_destroy(&bo
->u
.sparse
.commit_lock
);
896 FREE(bo
->u
.sparse
.commitments
);
897 error_alloc_commitments
:
903 amdgpu_bo_sparse_commit(struct pb_buffer
*buf
, uint64_t offset
, uint64_t size
,
906 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(buf
);
907 struct amdgpu_sparse_commitment
*comm
;
908 uint32_t va_page
, end_va_page
;
913 assert(offset
% RADEON_SPARSE_PAGE_SIZE
== 0);
914 assert(offset
<= bo
->base
.size
);
915 assert(size
<= bo
->base
.size
- offset
);
916 assert(size
% RADEON_SPARSE_PAGE_SIZE
== 0 || offset
+ size
== bo
->base
.size
);
918 comm
= bo
->u
.sparse
.commitments
;
919 va_page
= offset
/ RADEON_SPARSE_PAGE_SIZE
;
920 end_va_page
= va_page
+ DIV_ROUND_UP(size
, RADEON_SPARSE_PAGE_SIZE
);
922 mtx_lock(&bo
->u
.sparse
.commit_lock
);
924 #if DEBUG_SPARSE_COMMITS
925 sparse_dump(bo
, __func__
);
929 while (va_page
< end_va_page
) {
930 uint32_t span_va_page
;
932 /* Skip pages that are already committed. */
933 if (comm
[va_page
].backing
) {
938 /* Determine length of uncommitted span. */
939 span_va_page
= va_page
;
940 while (va_page
< end_va_page
&& !comm
[va_page
].backing
)
943 /* Fill the uncommitted span with chunks of backing memory. */
944 while (span_va_page
< va_page
) {
945 struct amdgpu_sparse_backing
*backing
;
946 uint32_t backing_start
, backing_size
;
948 backing_size
= va_page
- span_va_page
;
949 backing
= sparse_backing_alloc(bo
, &backing_start
, &backing_size
);
955 r
= amdgpu_bo_va_op_raw(bo
->ws
->dev
, backing
->bo
->bo
,
956 (uint64_t)backing_start
* RADEON_SPARSE_PAGE_SIZE
,
957 (uint64_t)backing_size
* RADEON_SPARSE_PAGE_SIZE
,
958 bo
->va
+ (uint64_t)span_va_page
* RADEON_SPARSE_PAGE_SIZE
,
959 AMDGPU_VM_PAGE_READABLE
|
960 AMDGPU_VM_PAGE_WRITEABLE
|
961 AMDGPU_VM_PAGE_EXECUTABLE
,
962 AMDGPU_VA_OP_REPLACE
);
964 ok
= sparse_backing_free(bo
, backing
, backing_start
, backing_size
);
965 assert(ok
&& "sufficient memory should already be allocated");
971 while (backing_size
) {
972 comm
[span_va_page
].backing
= backing
;
973 comm
[span_va_page
].page
= backing_start
;
981 r
= amdgpu_bo_va_op_raw(bo
->ws
->dev
, NULL
, 0,
982 (uint64_t)(end_va_page
- va_page
) * RADEON_SPARSE_PAGE_SIZE
,
983 bo
->va
+ (uint64_t)va_page
* RADEON_SPARSE_PAGE_SIZE
,
984 AMDGPU_VM_PAGE_PRT
, AMDGPU_VA_OP_REPLACE
);
990 while (va_page
< end_va_page
) {
991 struct amdgpu_sparse_backing
*backing
;
992 uint32_t backing_start
;
995 /* Skip pages that are already uncommitted. */
996 if (!comm
[va_page
].backing
) {
1001 /* Group contiguous spans of pages. */
1002 backing
= comm
[va_page
].backing
;
1003 backing_start
= comm
[va_page
].page
;
1004 comm
[va_page
].backing
= NULL
;
1009 while (va_page
< end_va_page
&&
1010 comm
[va_page
].backing
== backing
&&
1011 comm
[va_page
].page
== backing_start
+ span_pages
) {
1012 comm
[va_page
].backing
= NULL
;
1017 if (!sparse_backing_free(bo
, backing
, backing_start
, span_pages
)) {
1018 /* Couldn't allocate tracking data structures, so we have to leak */
1019 fprintf(stderr
, "amdgpu: leaking PRT backing memory\n");
1026 mtx_unlock(&bo
->u
.sparse
.commit_lock
);
1031 static unsigned eg_tile_split(unsigned tile_split
)
1033 switch (tile_split
) {
1034 case 0: tile_split
= 64; break;
1035 case 1: tile_split
= 128; break;
1036 case 2: tile_split
= 256; break;
1037 case 3: tile_split
= 512; break;
1039 case 4: tile_split
= 1024; break;
1040 case 5: tile_split
= 2048; break;
1041 case 6: tile_split
= 4096; break;
1046 static unsigned eg_tile_split_rev(unsigned eg_tile_split
)
1048 switch (eg_tile_split
) {
1054 case 1024: return 4;
1055 case 2048: return 5;
1056 case 4096: return 6;
1060 static void amdgpu_buffer_get_metadata(struct pb_buffer
*_buf
,
1061 struct radeon_bo_metadata
*md
)
1063 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(_buf
);
1064 struct amdgpu_bo_info info
= {0};
1065 uint64_t tiling_flags
;
1068 assert(bo
->bo
&& "must not be called for slab entries");
1070 r
= amdgpu_bo_query_info(bo
->bo
, &info
);
1074 tiling_flags
= info
.metadata
.tiling_info
;
1076 if (bo
->ws
->info
.chip_class
>= GFX9
) {
1077 md
->u
.gfx9
.swizzle_mode
= AMDGPU_TILING_GET(tiling_flags
, SWIZZLE_MODE
);
1079 md
->u
.legacy
.microtile
= RADEON_LAYOUT_LINEAR
;
1080 md
->u
.legacy
.macrotile
= RADEON_LAYOUT_LINEAR
;
1082 if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == 4) /* 2D_TILED_THIN1 */
1083 md
->u
.legacy
.macrotile
= RADEON_LAYOUT_TILED
;
1084 else if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == 2) /* 1D_TILED_THIN1 */
1085 md
->u
.legacy
.microtile
= RADEON_LAYOUT_TILED
;
1087 md
->u
.legacy
.pipe_config
= AMDGPU_TILING_GET(tiling_flags
, PIPE_CONFIG
);
1088 md
->u
.legacy
.bankw
= 1 << AMDGPU_TILING_GET(tiling_flags
, BANK_WIDTH
);
1089 md
->u
.legacy
.bankh
= 1 << AMDGPU_TILING_GET(tiling_flags
, BANK_HEIGHT
);
1090 md
->u
.legacy
.tile_split
= eg_tile_split(AMDGPU_TILING_GET(tiling_flags
, TILE_SPLIT
));
1091 md
->u
.legacy
.mtilea
= 1 << AMDGPU_TILING_GET(tiling_flags
, MACRO_TILE_ASPECT
);
1092 md
->u
.legacy
.num_banks
= 2 << AMDGPU_TILING_GET(tiling_flags
, NUM_BANKS
);
1093 md
->u
.legacy
.scanout
= AMDGPU_TILING_GET(tiling_flags
, MICRO_TILE_MODE
) == 0; /* DISPLAY */
1096 md
->size_metadata
= info
.metadata
.size_metadata
;
1097 memcpy(md
->metadata
, info
.metadata
.umd_metadata
, sizeof(md
->metadata
));
1100 static void amdgpu_buffer_set_metadata(struct pb_buffer
*_buf
,
1101 struct radeon_bo_metadata
*md
)
1103 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(_buf
);
1104 struct amdgpu_bo_metadata metadata
= {0};
1105 uint64_t tiling_flags
= 0;
1107 assert(bo
->bo
&& "must not be called for slab entries");
1109 if (bo
->ws
->info
.chip_class
>= GFX9
) {
1110 tiling_flags
|= AMDGPU_TILING_SET(SWIZZLE_MODE
, md
->u
.gfx9
.swizzle_mode
);
1112 if (md
->u
.legacy
.macrotile
== RADEON_LAYOUT_TILED
)
1113 tiling_flags
|= AMDGPU_TILING_SET(ARRAY_MODE
, 4); /* 2D_TILED_THIN1 */
1114 else if (md
->u
.legacy
.microtile
== RADEON_LAYOUT_TILED
)
1115 tiling_flags
|= AMDGPU_TILING_SET(ARRAY_MODE
, 2); /* 1D_TILED_THIN1 */
1117 tiling_flags
|= AMDGPU_TILING_SET(ARRAY_MODE
, 1); /* LINEAR_ALIGNED */
1119 tiling_flags
|= AMDGPU_TILING_SET(PIPE_CONFIG
, md
->u
.legacy
.pipe_config
);
1120 tiling_flags
|= AMDGPU_TILING_SET(BANK_WIDTH
, util_logbase2(md
->u
.legacy
.bankw
));
1121 tiling_flags
|= AMDGPU_TILING_SET(BANK_HEIGHT
, util_logbase2(md
->u
.legacy
.bankh
));
1122 if (md
->u
.legacy
.tile_split
)
1123 tiling_flags
|= AMDGPU_TILING_SET(TILE_SPLIT
, eg_tile_split_rev(md
->u
.legacy
.tile_split
));
1124 tiling_flags
|= AMDGPU_TILING_SET(MACRO_TILE_ASPECT
, util_logbase2(md
->u
.legacy
.mtilea
));
1125 tiling_flags
|= AMDGPU_TILING_SET(NUM_BANKS
, util_logbase2(md
->u
.legacy
.num_banks
)-1);
1127 if (md
->u
.legacy
.scanout
)
1128 tiling_flags
|= AMDGPU_TILING_SET(MICRO_TILE_MODE
, 0); /* DISPLAY_MICRO_TILING */
1130 tiling_flags
|= AMDGPU_TILING_SET(MICRO_TILE_MODE
, 1); /* THIN_MICRO_TILING */
1133 metadata
.tiling_info
= tiling_flags
;
1134 metadata
.size_metadata
= md
->size_metadata
;
1135 memcpy(metadata
.umd_metadata
, md
->metadata
, sizeof(md
->metadata
));
1137 amdgpu_bo_set_metadata(bo
->bo
, &metadata
);
1140 static struct pb_buffer
*
1141 amdgpu_bo_create(struct radeon_winsys
*rws
,
1144 enum radeon_bo_domain domain
,
1145 enum radeon_bo_flag flags
)
1147 struct amdgpu_winsys
*ws
= amdgpu_winsys(rws
);
1148 struct amdgpu_winsys_bo
*bo
;
1149 unsigned usage
= 0, pb_cache_bucket
= 0;
1151 /* VRAM implies WC. This is not optional. */
1152 assert(!(domain
& RADEON_DOMAIN_VRAM
) || flags
& RADEON_FLAG_GTT_WC
);
1154 /* NO_CPU_ACCESS is valid with VRAM only. */
1155 assert(domain
== RADEON_DOMAIN_VRAM
|| !(flags
& RADEON_FLAG_NO_CPU_ACCESS
));
1157 /* Sub-allocate small buffers from slabs. */
1158 if (!(flags
& (RADEON_FLAG_NO_SUBALLOC
| RADEON_FLAG_SPARSE
)) &&
1159 size
<= (1 << AMDGPU_SLAB_MAX_SIZE_LOG2
) &&
1160 alignment
<= MAX2(1 << AMDGPU_SLAB_MIN_SIZE_LOG2
, util_next_power_of_two(size
))) {
1161 struct pb_slab_entry
*entry
;
1162 int heap
= radeon_get_heap_index(domain
, flags
);
1164 if (heap
< 0 || heap
>= RADEON_MAX_SLAB_HEAPS
)
1167 entry
= pb_slab_alloc(&ws
->bo_slabs
, size
, heap
);
1169 /* Clear the cache and try again. */
1170 pb_cache_release_all_buffers(&ws
->bo_cache
);
1172 entry
= pb_slab_alloc(&ws
->bo_slabs
, size
, heap
);
1178 bo
= container_of(entry
, bo
, u
.slab
.entry
);
1180 pipe_reference_init(&bo
->base
.reference
, 1);
1186 if (flags
& RADEON_FLAG_SPARSE
) {
1187 assert(RADEON_SPARSE_PAGE_SIZE
% alignment
== 0);
1189 flags
|= RADEON_FLAG_NO_CPU_ACCESS
;
1191 return amdgpu_bo_sparse_create(ws
, size
, domain
, flags
);
1194 /* This flag is irrelevant for the cache. */
1195 flags
&= ~RADEON_FLAG_NO_SUBALLOC
;
1197 /* Align size to page size. This is the minimum alignment for normal
1198 * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
1199 * like constant/uniform buffers, can benefit from better and more reuse.
1201 size
= align64(size
, ws
->info
.gart_page_size
);
1202 alignment
= align(alignment
, ws
->info
.gart_page_size
);
1204 bool use_reusable_pool
= flags
& RADEON_FLAG_NO_INTERPROCESS_SHARING
;
1206 if (use_reusable_pool
) {
1207 int heap
= radeon_get_heap_index(domain
, flags
);
1208 assert(heap
>= 0 && heap
< RADEON_MAX_CACHED_HEAPS
);
1209 usage
= 1 << heap
; /* Only set one usage bit for each heap. */
1211 pb_cache_bucket
= radeon_get_pb_cache_bucket_index(heap
);
1212 assert(pb_cache_bucket
< ARRAY_SIZE(ws
->bo_cache
.buckets
));
1214 /* Get a buffer from the cache. */
1215 bo
= (struct amdgpu_winsys_bo
*)
1216 pb_cache_reclaim_buffer(&ws
->bo_cache
, size
, alignment
, usage
,
1222 /* Create a new one. */
1223 bo
= amdgpu_create_bo(ws
, size
, alignment
, usage
, domain
, flags
,
1226 /* Clear the cache and try again. */
1227 pb_slabs_reclaim(&ws
->bo_slabs
);
1228 pb_cache_release_all_buffers(&ws
->bo_cache
);
1229 bo
= amdgpu_create_bo(ws
, size
, alignment
, usage
, domain
, flags
,
1235 bo
->u
.real
.use_reusable_pool
= use_reusable_pool
;
1239 static struct pb_buffer
*amdgpu_bo_from_handle(struct radeon_winsys
*rws
,
1240 struct winsys_handle
*whandle
,
1244 struct amdgpu_winsys
*ws
= amdgpu_winsys(rws
);
1245 struct amdgpu_winsys_bo
*bo
;
1246 enum amdgpu_bo_handle_type type
;
1247 struct amdgpu_bo_import_result result
= {0};
1249 amdgpu_va_handle va_handle
;
1250 struct amdgpu_bo_info info
= {0};
1251 enum radeon_bo_domain initial
= 0;
1254 /* Initialize the structure. */
1255 bo
= CALLOC_STRUCT(amdgpu_winsys_bo
);
1260 switch (whandle
->type
) {
1261 case DRM_API_HANDLE_TYPE_SHARED
:
1262 type
= amdgpu_bo_handle_type_gem_flink_name
;
1264 case DRM_API_HANDLE_TYPE_FD
:
1265 type
= amdgpu_bo_handle_type_dma_buf_fd
;
1271 r
= amdgpu_bo_import(ws
->dev
, type
, whandle
->handle
, &result
);
1275 /* Get initial domains. */
1276 r
= amdgpu_bo_query_info(result
.buf_handle
, &info
);
1280 r
= amdgpu_va_range_alloc(ws
->dev
, amdgpu_gpu_va_range_general
,
1281 result
.alloc_size
, 1 << 20, 0, &va
, &va_handle
, 0);
1285 r
= amdgpu_bo_va_op(result
.buf_handle
, 0, result
.alloc_size
, va
, 0, AMDGPU_VA_OP_MAP
);
1289 if (info
.preferred_heap
& AMDGPU_GEM_DOMAIN_VRAM
)
1290 initial
|= RADEON_DOMAIN_VRAM
;
1291 if (info
.preferred_heap
& AMDGPU_GEM_DOMAIN_GTT
)
1292 initial
|= RADEON_DOMAIN_GTT
;
1295 pipe_reference_init(&bo
->base
.reference
, 1);
1296 bo
->base
.alignment
= info
.phys_alignment
;
1297 bo
->bo
= result
.buf_handle
;
1298 bo
->base
.size
= result
.alloc_size
;
1299 bo
->base
.vtbl
= &amdgpu_winsys_bo_vtbl
;
1302 bo
->u
.real
.va_handle
= va_handle
;
1303 bo
->initial_domain
= initial
;
1304 bo
->unique_id
= __sync_fetch_and_add(&ws
->next_bo_unique_id
, 1);
1305 bo
->is_shared
= true;
1308 *stride
= whandle
->stride
;
1310 *offset
= whandle
->offset
;
1312 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
1313 ws
->allocated_vram
+= align64(bo
->base
.size
, ws
->info
.gart_page_size
);
1314 else if (bo
->initial_domain
& RADEON_DOMAIN_GTT
)
1315 ws
->allocated_gtt
+= align64(bo
->base
.size
, ws
->info
.gart_page_size
);
1317 amdgpu_add_buffer_to_global_list(bo
);
1322 amdgpu_va_range_free(va_handle
);
1325 amdgpu_bo_free(result
.buf_handle
);
1332 static bool amdgpu_bo_get_handle(struct pb_buffer
*buffer
,
1333 unsigned stride
, unsigned offset
,
1334 unsigned slice_size
,
1335 struct winsys_handle
*whandle
)
1337 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(buffer
);
1338 enum amdgpu_bo_handle_type type
;
1341 /* Don't allow exports of slab entries and sparse buffers. */
1345 bo
->u
.real
.use_reusable_pool
= false;
1347 switch (whandle
->type
) {
1348 case DRM_API_HANDLE_TYPE_SHARED
:
1349 type
= amdgpu_bo_handle_type_gem_flink_name
;
1351 case DRM_API_HANDLE_TYPE_FD
:
1352 type
= amdgpu_bo_handle_type_dma_buf_fd
;
1354 case DRM_API_HANDLE_TYPE_KMS
:
1355 type
= amdgpu_bo_handle_type_kms
;
1361 r
= amdgpu_bo_export(bo
->bo
, type
, &whandle
->handle
);
1365 whandle
->stride
= stride
;
1366 whandle
->offset
= offset
;
1367 whandle
->offset
+= slice_size
* whandle
->layer
;
1368 bo
->is_shared
= true;
1372 static struct pb_buffer
*amdgpu_bo_from_ptr(struct radeon_winsys
*rws
,
1373 void *pointer
, uint64_t size
)
1375 struct amdgpu_winsys
*ws
= amdgpu_winsys(rws
);
1376 amdgpu_bo_handle buf_handle
;
1377 struct amdgpu_winsys_bo
*bo
;
1379 amdgpu_va_handle va_handle
;
1381 bo
= CALLOC_STRUCT(amdgpu_winsys_bo
);
1385 if (amdgpu_create_bo_from_user_mem(ws
->dev
, pointer
, size
, &buf_handle
))
1388 if (amdgpu_va_range_alloc(ws
->dev
, amdgpu_gpu_va_range_general
,
1389 size
, 1 << 12, 0, &va
, &va_handle
, 0))
1390 goto error_va_alloc
;
1392 if (amdgpu_bo_va_op(buf_handle
, 0, size
, va
, 0, AMDGPU_VA_OP_MAP
))
1395 /* Initialize it. */
1396 pipe_reference_init(&bo
->base
.reference
, 1);
1397 bo
->bo
= buf_handle
;
1398 bo
->base
.alignment
= 0;
1399 bo
->base
.size
= size
;
1400 bo
->base
.vtbl
= &amdgpu_winsys_bo_vtbl
;
1402 bo
->user_ptr
= pointer
;
1404 bo
->u
.real
.va_handle
= va_handle
;
1405 bo
->initial_domain
= RADEON_DOMAIN_GTT
;
1406 bo
->unique_id
= __sync_fetch_and_add(&ws
->next_bo_unique_id
, 1);
1408 ws
->allocated_gtt
+= align64(bo
->base
.size
, ws
->info
.gart_page_size
);
1410 amdgpu_add_buffer_to_global_list(bo
);
1412 return (struct pb_buffer
*)bo
;
1415 amdgpu_va_range_free(va_handle
);
1418 amdgpu_bo_free(buf_handle
);
1425 static bool amdgpu_bo_is_user_ptr(struct pb_buffer
*buf
)
1427 return ((struct amdgpu_winsys_bo
*)buf
)->user_ptr
!= NULL
;
1430 static bool amdgpu_bo_is_suballocated(struct pb_buffer
*buf
)
1432 struct amdgpu_winsys_bo
*bo
= (struct amdgpu_winsys_bo
*)buf
;
1434 return !bo
->bo
&& !bo
->sparse
;
1437 static uint64_t amdgpu_bo_get_va(struct pb_buffer
*buf
)
1439 return ((struct amdgpu_winsys_bo
*)buf
)->va
;
1442 void amdgpu_bo_init_functions(struct amdgpu_winsys
*ws
)
1444 ws
->base
.buffer_set_metadata
= amdgpu_buffer_set_metadata
;
1445 ws
->base
.buffer_get_metadata
= amdgpu_buffer_get_metadata
;
1446 ws
->base
.buffer_map
= amdgpu_bo_map
;
1447 ws
->base
.buffer_unmap
= amdgpu_bo_unmap
;
1448 ws
->base
.buffer_wait
= amdgpu_bo_wait
;
1449 ws
->base
.buffer_create
= amdgpu_bo_create
;
1450 ws
->base
.buffer_from_handle
= amdgpu_bo_from_handle
;
1451 ws
->base
.buffer_from_ptr
= amdgpu_bo_from_ptr
;
1452 ws
->base
.buffer_is_user_ptr
= amdgpu_bo_is_user_ptr
;
1453 ws
->base
.buffer_is_suballocated
= amdgpu_bo_is_suballocated
;
1454 ws
->base
.buffer_get_handle
= amdgpu_bo_get_handle
;
1455 ws
->base
.buffer_commit
= amdgpu_bo_sparse_commit
;
1456 ws
->base
.buffer_get_virtual_address
= amdgpu_bo_get_va
;
1457 ws
->base
.buffer_get_initial_domain
= amdgpu_bo_get_initial_domain
;