2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
27 #include "radeon_drm_cs.h"
29 #include "util/u_hash_table.h"
30 #include "util/u_memory.h"
31 #include "util/simple_list.h"
32 #include "os/os_thread.h"
33 #include "os/os_mman.h"
34 #include "util/os_time.h"
36 #include "frontend/drm_driver.h"
38 #include <sys/ioctl.h>
45 static struct pb_buffer
*
46 radeon_winsys_bo_create(struct radeon_winsys
*rws
,
49 enum radeon_bo_domain domain
,
50 enum radeon_bo_flag flags
);
52 static inline struct radeon_bo
*radeon_bo(struct pb_buffer
*bo
)
54 return (struct radeon_bo
*)bo
;
57 struct radeon_bo_va_hole
{
58 struct list_head list
;
63 static bool radeon_real_bo_is_busy(struct radeon_bo
*bo
)
65 struct drm_radeon_gem_busy args
= {0};
67 args
.handle
= bo
->handle
;
68 return drmCommandWriteRead(bo
->rws
->fd
, DRM_RADEON_GEM_BUSY
,
69 &args
, sizeof(args
)) != 0;
72 static bool radeon_bo_is_busy(struct radeon_bo
*bo
)
78 return radeon_real_bo_is_busy(bo
);
80 mtx_lock(&bo
->rws
->bo_fence_lock
);
81 for (num_idle
= 0; num_idle
< bo
->u
.slab
.num_fences
; ++num_idle
) {
82 if (radeon_real_bo_is_busy(bo
->u
.slab
.fences
[num_idle
])) {
86 radeon_bo_reference(&bo
->u
.slab
.fences
[num_idle
], NULL
);
88 memmove(&bo
->u
.slab
.fences
[0], &bo
->u
.slab
.fences
[num_idle
],
89 (bo
->u
.slab
.num_fences
- num_idle
) * sizeof(bo
->u
.slab
.fences
[0]));
90 bo
->u
.slab
.num_fences
-= num_idle
;
91 mtx_unlock(&bo
->rws
->bo_fence_lock
);
96 static void radeon_real_bo_wait_idle(struct radeon_bo
*bo
)
98 struct drm_radeon_gem_wait_idle args
= {0};
100 args
.handle
= bo
->handle
;
101 while (drmCommandWrite(bo
->rws
->fd
, DRM_RADEON_GEM_WAIT_IDLE
,
102 &args
, sizeof(args
)) == -EBUSY
);
105 static void radeon_bo_wait_idle(struct radeon_bo
*bo
)
108 radeon_real_bo_wait_idle(bo
);
110 mtx_lock(&bo
->rws
->bo_fence_lock
);
111 while (bo
->u
.slab
.num_fences
) {
112 struct radeon_bo
*fence
= NULL
;
113 radeon_bo_reference(&fence
, bo
->u
.slab
.fences
[0]);
114 mtx_unlock(&bo
->rws
->bo_fence_lock
);
116 /* Wait without holding the fence lock. */
117 radeon_real_bo_wait_idle(fence
);
119 mtx_lock(&bo
->rws
->bo_fence_lock
);
120 if (bo
->u
.slab
.num_fences
&& fence
== bo
->u
.slab
.fences
[0]) {
121 radeon_bo_reference(&bo
->u
.slab
.fences
[0], NULL
);
122 memmove(&bo
->u
.slab
.fences
[0], &bo
->u
.slab
.fences
[1],
123 (bo
->u
.slab
.num_fences
- 1) * sizeof(bo
->u
.slab
.fences
[0]));
124 bo
->u
.slab
.num_fences
--;
126 radeon_bo_reference(&fence
, NULL
);
128 mtx_unlock(&bo
->rws
->bo_fence_lock
);
132 static bool radeon_bo_wait(struct pb_buffer
*_buf
, uint64_t timeout
,
133 enum radeon_bo_usage usage
)
135 struct radeon_bo
*bo
= radeon_bo(_buf
);
138 /* No timeout. Just query. */
140 return !bo
->num_active_ioctls
&& !radeon_bo_is_busy(bo
);
142 abs_timeout
= os_time_get_absolute_timeout(timeout
);
144 /* Wait if any ioctl is being submitted with this buffer. */
145 if (!os_wait_until_zero_abs_timeout(&bo
->num_active_ioctls
, abs_timeout
))
148 /* Infinite timeout. */
149 if (abs_timeout
== PIPE_TIMEOUT_INFINITE
) {
150 radeon_bo_wait_idle(bo
);
154 /* Other timeouts need to be emulated with a loop. */
155 while (radeon_bo_is_busy(bo
)) {
156 if (os_time_get_nano() >= abs_timeout
)
164 static enum radeon_bo_domain
get_valid_domain(enum radeon_bo_domain domain
)
166 /* Zero domains the driver doesn't understand. */
167 domain
&= RADEON_DOMAIN_VRAM_GTT
;
169 /* If no domain is set, we must set something... */
171 domain
= RADEON_DOMAIN_VRAM_GTT
;
176 static enum radeon_bo_domain
radeon_bo_get_initial_domain(
177 struct pb_buffer
*buf
)
179 struct radeon_bo
*bo
= (struct radeon_bo
*)buf
;
180 struct drm_radeon_gem_op args
;
182 if (bo
->rws
->info
.drm_minor
< 38)
183 return RADEON_DOMAIN_VRAM_GTT
;
185 memset(&args
, 0, sizeof(args
));
186 args
.handle
= bo
->handle
;
187 args
.op
= RADEON_GEM_OP_GET_INITIAL_DOMAIN
;
189 if (drmCommandWriteRead(bo
->rws
->fd
, DRM_RADEON_GEM_OP
,
190 &args
, sizeof(args
))) {
191 fprintf(stderr
, "radeon: failed to get initial domain: %p 0x%08X\n",
193 /* Default domain as returned by get_valid_domain. */
194 return RADEON_DOMAIN_VRAM_GTT
;
197 /* GEM domains and winsys domains are defined the same. */
198 return get_valid_domain(args
.value
);
201 static uint64_t radeon_bomgr_find_va(const struct radeon_info
*info
,
202 struct radeon_vm_heap
*heap
,
203 uint64_t size
, uint64_t alignment
)
205 struct radeon_bo_va_hole
*hole
, *n
;
206 uint64_t offset
= 0, waste
= 0;
208 /* All VM address space holes will implicitly start aligned to the
209 * size alignment, so we don't need to sanitize the alignment here
211 size
= align(size
, info
->gart_page_size
);
213 mtx_lock(&heap
->mutex
);
214 /* first look for a hole */
215 LIST_FOR_EACH_ENTRY_SAFE(hole
, n
, &heap
->holes
, list
) {
216 offset
= hole
->offset
;
217 waste
= offset
% alignment
;
218 waste
= waste
? alignment
- waste
: 0;
220 if (offset
>= (hole
->offset
+ hole
->size
)) {
223 if (!waste
&& hole
->size
== size
) {
224 offset
= hole
->offset
;
225 list_del(&hole
->list
);
227 mtx_unlock(&heap
->mutex
);
230 if ((hole
->size
- waste
) > size
) {
232 n
= CALLOC_STRUCT(radeon_bo_va_hole
);
234 n
->offset
= hole
->offset
;
235 list_add(&n
->list
, &hole
->list
);
237 hole
->size
-= (size
+ waste
);
238 hole
->offset
+= size
+ waste
;
239 mtx_unlock(&heap
->mutex
);
242 if ((hole
->size
- waste
) == size
) {
244 mtx_unlock(&heap
->mutex
);
249 offset
= heap
->start
;
250 waste
= offset
% alignment
;
251 waste
= waste
? alignment
- waste
: 0;
253 if (offset
+ waste
+ size
> heap
->end
) {
254 mtx_unlock(&heap
->mutex
);
259 n
= CALLOC_STRUCT(radeon_bo_va_hole
);
262 list_add(&n
->list
, &heap
->holes
);
265 heap
->start
+= size
+ waste
;
266 mtx_unlock(&heap
->mutex
);
270 static uint64_t radeon_bomgr_find_va64(struct radeon_drm_winsys
*ws
,
271 uint64_t size
, uint64_t alignment
)
275 /* Try to allocate from the 64-bit address space first.
276 * If it doesn't exist (start = 0) or if it doesn't have enough space,
277 * fall back to the 32-bit address space.
280 va
= radeon_bomgr_find_va(&ws
->info
, &ws
->vm64
, size
, alignment
);
282 va
= radeon_bomgr_find_va(&ws
->info
, &ws
->vm32
, size
, alignment
);
286 static void radeon_bomgr_free_va(const struct radeon_info
*info
,
287 struct radeon_vm_heap
*heap
,
288 uint64_t va
, uint64_t size
)
290 struct radeon_bo_va_hole
*hole
= NULL
;
292 size
= align(size
, info
->gart_page_size
);
294 mtx_lock(&heap
->mutex
);
295 if ((va
+ size
) == heap
->start
) {
297 /* Delete uppermost hole if it reaches the new top */
298 if (!list_is_empty(&heap
->holes
)) {
299 hole
= container_of(heap
->holes
.next
, hole
, list
);
300 if ((hole
->offset
+ hole
->size
) == va
) {
301 heap
->start
= hole
->offset
;
302 list_del(&hole
->list
);
307 struct radeon_bo_va_hole
*next
;
309 hole
= container_of(&heap
->holes
, hole
, list
);
310 LIST_FOR_EACH_ENTRY(next
, &heap
->holes
, list
) {
311 if (next
->offset
< va
)
316 if (&hole
->list
!= &heap
->holes
) {
317 /* Grow upper hole if it's adjacent */
318 if (hole
->offset
== (va
+ size
)) {
321 /* Merge lower hole if it's adjacent */
322 if (next
!= hole
&& &next
->list
!= &heap
->holes
&&
323 (next
->offset
+ next
->size
) == va
) {
324 next
->size
+= hole
->size
;
325 list_del(&hole
->list
);
332 /* Grow lower hole if it's adjacent */
333 if (next
!= hole
&& &next
->list
!= &heap
->holes
&&
334 (next
->offset
+ next
->size
) == va
) {
339 /* FIXME on allocation failure we just lose virtual address space
340 * maybe print a warning
342 next
= CALLOC_STRUCT(radeon_bo_va_hole
);
346 list_add(&next
->list
, &hole
->list
);
350 mtx_unlock(&heap
->mutex
);
353 void radeon_bo_destroy(struct pb_buffer
*_buf
)
355 struct radeon_bo
*bo
= radeon_bo(_buf
);
356 struct radeon_drm_winsys
*rws
= bo
->rws
;
357 struct drm_gem_close args
;
359 assert(bo
->handle
&& "must not be called for slab entries");
361 memset(&args
, 0, sizeof(args
));
363 mtx_lock(&rws
->bo_handles_mutex
);
364 _mesa_hash_table_remove_key(rws
->bo_handles
, (void*)(uintptr_t)bo
->handle
);
365 if (bo
->flink_name
) {
366 _mesa_hash_table_remove_key(rws
->bo_names
,
367 (void*)(uintptr_t)bo
->flink_name
);
369 mtx_unlock(&rws
->bo_handles_mutex
);
372 os_munmap(bo
->u
.real
.ptr
, bo
->base
.size
);
374 if (rws
->info
.r600_has_virtual_memory
) {
375 if (rws
->va_unmap_working
) {
376 struct drm_radeon_gem_va va
;
378 va
.handle
= bo
->handle
;
380 va
.operation
= RADEON_VA_UNMAP
;
381 va
.flags
= RADEON_VM_PAGE_READABLE
|
382 RADEON_VM_PAGE_WRITEABLE
|
383 RADEON_VM_PAGE_SNOOPED
;
386 if (drmCommandWriteRead(rws
->fd
, DRM_RADEON_GEM_VA
, &va
,
388 va
.operation
== RADEON_VA_RESULT_ERROR
) {
389 fprintf(stderr
, "radeon: Failed to deallocate virtual address for buffer:\n");
390 fprintf(stderr
, "radeon: size : %"PRIu64
" bytes\n", bo
->base
.size
);
391 fprintf(stderr
, "radeon: va : 0x%"PRIx64
"\n", bo
->va
);
395 radeon_bomgr_free_va(&rws
->info
,
396 bo
->va
< rws
->vm32
.end
? &rws
->vm32
: &rws
->vm64
,
397 bo
->va
, bo
->base
.size
);
401 args
.handle
= bo
->handle
;
402 drmIoctl(rws
->fd
, DRM_IOCTL_GEM_CLOSE
, &args
);
404 mtx_destroy(&bo
->u
.real
.map_mutex
);
406 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
407 rws
->allocated_vram
-= align(bo
->base
.size
, rws
->info
.gart_page_size
);
408 else if (bo
->initial_domain
& RADEON_DOMAIN_GTT
)
409 rws
->allocated_gtt
-= align(bo
->base
.size
, rws
->info
.gart_page_size
);
411 if (bo
->u
.real
.map_count
>= 1) {
412 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
413 bo
->rws
->mapped_vram
-= bo
->base
.size
;
415 bo
->rws
->mapped_gtt
-= bo
->base
.size
;
416 bo
->rws
->num_mapped_buffers
--;
422 static void radeon_bo_destroy_or_cache(struct pb_buffer
*_buf
)
424 struct radeon_bo
*bo
= radeon_bo(_buf
);
426 assert(bo
->handle
&& "must not be called for slab entries");
428 if (bo
->u
.real
.use_reusable_pool
)
429 pb_cache_add_buffer(&bo
->u
.real
.cache_entry
);
431 radeon_bo_destroy(_buf
);
434 void *radeon_bo_do_map(struct radeon_bo
*bo
)
436 struct drm_radeon_gem_mmap args
= {0};
440 /* If the buffer is created from user memory, return the user pointer. */
447 offset
= bo
->va
- bo
->u
.slab
.real
->va
;
448 bo
= bo
->u
.slab
.real
;
451 /* Map the buffer. */
452 mtx_lock(&bo
->u
.real
.map_mutex
);
453 /* Return the pointer if it's already mapped. */
454 if (bo
->u
.real
.ptr
) {
455 bo
->u
.real
.map_count
++;
456 mtx_unlock(&bo
->u
.real
.map_mutex
);
457 return (uint8_t*)bo
->u
.real
.ptr
+ offset
;
459 args
.handle
= bo
->handle
;
461 args
.size
= (uint64_t)bo
->base
.size
;
462 if (drmCommandWriteRead(bo
->rws
->fd
,
466 mtx_unlock(&bo
->u
.real
.map_mutex
);
467 fprintf(stderr
, "radeon: gem_mmap failed: %p 0x%08X\n",
472 ptr
= os_mmap(0, args
.size
, PROT_READ
|PROT_WRITE
, MAP_SHARED
,
473 bo
->rws
->fd
, args
.addr_ptr
);
474 if (ptr
== MAP_FAILED
) {
475 /* Clear the cache and try again. */
476 pb_cache_release_all_buffers(&bo
->rws
->bo_cache
);
478 ptr
= os_mmap(0, args
.size
, PROT_READ
|PROT_WRITE
, MAP_SHARED
,
479 bo
->rws
->fd
, args
.addr_ptr
);
480 if (ptr
== MAP_FAILED
) {
481 mtx_unlock(&bo
->u
.real
.map_mutex
);
482 fprintf(stderr
, "radeon: mmap failed, errno: %i\n", errno
);
486 bo
->u
.real
.ptr
= ptr
;
487 bo
->u
.real
.map_count
= 1;
489 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
490 bo
->rws
->mapped_vram
+= bo
->base
.size
;
492 bo
->rws
->mapped_gtt
+= bo
->base
.size
;
493 bo
->rws
->num_mapped_buffers
++;
495 mtx_unlock(&bo
->u
.real
.map_mutex
);
496 return (uint8_t*)bo
->u
.real
.ptr
+ offset
;
499 static void *radeon_bo_map(struct pb_buffer
*buf
,
500 struct radeon_cmdbuf
*rcs
,
501 enum pipe_transfer_usage usage
)
503 struct radeon_bo
*bo
= (struct radeon_bo
*)buf
;
504 struct radeon_drm_cs
*cs
= (struct radeon_drm_cs
*)rcs
;
506 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
507 if (!(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
508 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
509 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
510 if (!(usage
& PIPE_TRANSFER_WRITE
)) {
513 * Since we are mapping for read, we don't need to wait
514 * if the GPU is using the buffer for read too
515 * (neither one is changing it).
517 * Only check whether the buffer is being used for write. */
518 if (cs
&& radeon_bo_is_referenced_by_cs_for_write(cs
, bo
)) {
519 cs
->flush_cs(cs
->flush_data
,
520 RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW
, NULL
);
524 if (!radeon_bo_wait((struct pb_buffer
*)bo
, 0,
525 RADEON_USAGE_WRITE
)) {
529 if (cs
&& radeon_bo_is_referenced_by_cs(cs
, bo
)) {
530 cs
->flush_cs(cs
->flush_data
,
531 RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW
, NULL
);
535 if (!radeon_bo_wait((struct pb_buffer
*)bo
, 0,
536 RADEON_USAGE_READWRITE
)) {
541 uint64_t time
= os_time_get_nano();
543 if (!(usage
& PIPE_TRANSFER_WRITE
)) {
546 * Since we are mapping for read, we don't need to wait
547 * if the GPU is using the buffer for read too
548 * (neither one is changing it).
550 * Only check whether the buffer is being used for write. */
551 if (cs
&& radeon_bo_is_referenced_by_cs_for_write(cs
, bo
)) {
552 cs
->flush_cs(cs
->flush_data
,
553 RADEON_FLUSH_START_NEXT_GFX_IB_NOW
, NULL
);
555 radeon_bo_wait((struct pb_buffer
*)bo
, PIPE_TIMEOUT_INFINITE
,
558 /* Mapping for write. */
560 if (radeon_bo_is_referenced_by_cs(cs
, bo
)) {
561 cs
->flush_cs(cs
->flush_data
,
562 RADEON_FLUSH_START_NEXT_GFX_IB_NOW
, NULL
);
564 /* Try to avoid busy-waiting in radeon_bo_wait. */
565 if (p_atomic_read(&bo
->num_active_ioctls
))
566 radeon_drm_cs_sync_flush(rcs
);
570 radeon_bo_wait((struct pb_buffer
*)bo
, PIPE_TIMEOUT_INFINITE
,
571 RADEON_USAGE_READWRITE
);
574 bo
->rws
->buffer_wait_time
+= os_time_get_nano() - time
;
578 return radeon_bo_do_map(bo
);
581 static void radeon_bo_unmap(struct pb_buffer
*_buf
)
583 struct radeon_bo
*bo
= (struct radeon_bo
*)_buf
;
589 bo
= bo
->u
.slab
.real
;
591 mtx_lock(&bo
->u
.real
.map_mutex
);
592 if (!bo
->u
.real
.ptr
) {
593 mtx_unlock(&bo
->u
.real
.map_mutex
);
594 return; /* it's not been mapped */
597 assert(bo
->u
.real
.map_count
);
598 if (--bo
->u
.real
.map_count
) {
599 mtx_unlock(&bo
->u
.real
.map_mutex
);
600 return; /* it's been mapped multiple times */
603 os_munmap(bo
->u
.real
.ptr
, bo
->base
.size
);
604 bo
->u
.real
.ptr
= NULL
;
606 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
607 bo
->rws
->mapped_vram
-= bo
->base
.size
;
609 bo
->rws
->mapped_gtt
-= bo
->base
.size
;
610 bo
->rws
->num_mapped_buffers
--;
612 mtx_unlock(&bo
->u
.real
.map_mutex
);
615 static const struct pb_vtbl radeon_bo_vtbl
= {
616 radeon_bo_destroy_or_cache
617 /* other functions are never called */
620 static struct radeon_bo
*radeon_create_bo(struct radeon_drm_winsys
*rws
,
621 unsigned size
, unsigned alignment
,
622 unsigned initial_domains
,
626 struct radeon_bo
*bo
;
627 struct drm_radeon_gem_create args
;
630 memset(&args
, 0, sizeof(args
));
632 assert(initial_domains
);
633 assert((initial_domains
&
634 ~(RADEON_GEM_DOMAIN_GTT
| RADEON_GEM_DOMAIN_VRAM
)) == 0);
637 args
.alignment
= alignment
;
638 args
.initial_domain
= initial_domains
;
641 /* If VRAM is just stolen system memory, allow both VRAM and
642 * GTT, whichever has free space. If a buffer is evicted from
643 * VRAM to GTT, it will stay there.
645 if (!rws
->info
.has_dedicated_vram
)
646 args
.initial_domain
|= RADEON_DOMAIN_GTT
;
648 if (flags
& RADEON_FLAG_GTT_WC
)
649 args
.flags
|= RADEON_GEM_GTT_WC
;
650 if (flags
& RADEON_FLAG_NO_CPU_ACCESS
)
651 args
.flags
|= RADEON_GEM_NO_CPU_ACCESS
;
653 if (drmCommandWriteRead(rws
->fd
, DRM_RADEON_GEM_CREATE
,
654 &args
, sizeof(args
))) {
655 fprintf(stderr
, "radeon: Failed to allocate a buffer:\n");
656 fprintf(stderr
, "radeon: size : %u bytes\n", size
);
657 fprintf(stderr
, "radeon: alignment : %u bytes\n", alignment
);
658 fprintf(stderr
, "radeon: domains : %u\n", args
.initial_domain
);
659 fprintf(stderr
, "radeon: flags : %u\n", args
.flags
);
663 assert(args
.handle
!= 0);
665 bo
= CALLOC_STRUCT(radeon_bo
);
669 pipe_reference_init(&bo
->base
.reference
, 1);
670 bo
->base
.alignment
= alignment
;
672 bo
->base
.size
= size
;
673 bo
->base
.vtbl
= &radeon_bo_vtbl
;
675 bo
->handle
= args
.handle
;
677 bo
->initial_domain
= initial_domains
;
678 bo
->hash
= __sync_fetch_and_add(&rws
->next_bo_hash
, 1);
679 (void) mtx_init(&bo
->u
.real
.map_mutex
, mtx_plain
);
682 pb_cache_init_entry(&rws
->bo_cache
, &bo
->u
.real
.cache_entry
, &bo
->base
,
686 if (rws
->info
.r600_has_virtual_memory
) {
687 struct drm_radeon_gem_va va
;
688 unsigned va_gap_size
;
690 va_gap_size
= rws
->check_vm
? MAX2(4 * alignment
, 64 * 1024) : 0;
692 if (flags
& RADEON_FLAG_32BIT
) {
693 bo
->va
= radeon_bomgr_find_va(&rws
->info
, &rws
->vm32
,
694 size
+ va_gap_size
, alignment
);
695 assert(bo
->va
+ size
< rws
->vm32
.end
);
697 bo
->va
= radeon_bomgr_find_va64(rws
, size
+ va_gap_size
, alignment
);
700 va
.handle
= bo
->handle
;
702 va
.operation
= RADEON_VA_MAP
;
703 va
.flags
= RADEON_VM_PAGE_READABLE
|
704 RADEON_VM_PAGE_WRITEABLE
|
705 RADEON_VM_PAGE_SNOOPED
;
707 r
= drmCommandWriteRead(rws
->fd
, DRM_RADEON_GEM_VA
, &va
, sizeof(va
));
708 if (r
&& va
.operation
== RADEON_VA_RESULT_ERROR
) {
709 fprintf(stderr
, "radeon: Failed to allocate virtual address for buffer:\n");
710 fprintf(stderr
, "radeon: size : %d bytes\n", size
);
711 fprintf(stderr
, "radeon: alignment : %d bytes\n", alignment
);
712 fprintf(stderr
, "radeon: domains : %d\n", args
.initial_domain
);
713 fprintf(stderr
, "radeon: va : 0x%016llx\n", (unsigned long long)bo
->va
);
714 radeon_bo_destroy(&bo
->base
);
717 mtx_lock(&rws
->bo_handles_mutex
);
718 if (va
.operation
== RADEON_VA_RESULT_VA_EXIST
) {
719 struct pb_buffer
*b
= &bo
->base
;
720 struct radeon_bo
*old_bo
=
721 util_hash_table_get(rws
->bo_vas
, (void*)(uintptr_t)va
.offset
);
723 mtx_unlock(&rws
->bo_handles_mutex
);
724 pb_reference(&b
, &old_bo
->base
);
728 _mesa_hash_table_insert(rws
->bo_vas
, (void*)(uintptr_t)bo
->va
, bo
);
729 mtx_unlock(&rws
->bo_handles_mutex
);
732 if (initial_domains
& RADEON_DOMAIN_VRAM
)
733 rws
->allocated_vram
+= align(size
, rws
->info
.gart_page_size
);
734 else if (initial_domains
& RADEON_DOMAIN_GTT
)
735 rws
->allocated_gtt
+= align(size
, rws
->info
.gart_page_size
);
740 bool radeon_bo_can_reclaim(struct pb_buffer
*_buf
)
742 struct radeon_bo
*bo
= radeon_bo(_buf
);
744 if (radeon_bo_is_referenced_by_any_cs(bo
))
747 return radeon_bo_wait(_buf
, 0, RADEON_USAGE_READWRITE
);
750 bool radeon_bo_can_reclaim_slab(void *priv
, struct pb_slab_entry
*entry
)
752 struct radeon_bo
*bo
= NULL
; /* fix container_of */
753 bo
= container_of(entry
, bo
, u
.slab
.entry
);
755 return radeon_bo_can_reclaim(&bo
->base
);
758 static void radeon_bo_slab_destroy(struct pb_buffer
*_buf
)
760 struct radeon_bo
*bo
= radeon_bo(_buf
);
764 pb_slab_free(&bo
->rws
->bo_slabs
, &bo
->u
.slab
.entry
);
767 static const struct pb_vtbl radeon_winsys_bo_slab_vtbl
= {
768 radeon_bo_slab_destroy
769 /* other functions are never called */
772 struct pb_slab
*radeon_bo_slab_alloc(void *priv
, unsigned heap
,
774 unsigned group_index
)
776 struct radeon_drm_winsys
*ws
= priv
;
777 struct radeon_slab
*slab
= CALLOC_STRUCT(radeon_slab
);
778 enum radeon_bo_domain domains
= radeon_domain_from_heap(heap
);
779 enum radeon_bo_flag flags
= radeon_flags_from_heap(heap
);
785 slab
->buffer
= radeon_bo(radeon_winsys_bo_create(&ws
->base
,
786 64 * 1024, 64 * 1024,
791 assert(slab
->buffer
->handle
);
793 slab
->base
.num_entries
= slab
->buffer
->base
.size
/ entry_size
;
794 slab
->base
.num_free
= slab
->base
.num_entries
;
795 slab
->entries
= CALLOC(slab
->base
.num_entries
, sizeof(*slab
->entries
));
799 list_inithead(&slab
->base
.free
);
801 base_hash
= __sync_fetch_and_add(&ws
->next_bo_hash
, slab
->base
.num_entries
);
803 for (unsigned i
= 0; i
< slab
->base
.num_entries
; ++i
) {
804 struct radeon_bo
*bo
= &slab
->entries
[i
];
806 bo
->base
.alignment
= entry_size
;
807 bo
->base
.usage
= slab
->buffer
->base
.usage
;
808 bo
->base
.size
= entry_size
;
809 bo
->base
.vtbl
= &radeon_winsys_bo_slab_vtbl
;
811 bo
->va
= slab
->buffer
->va
+ i
* entry_size
;
812 bo
->initial_domain
= domains
;
813 bo
->hash
= base_hash
+ i
;
814 bo
->u
.slab
.entry
.slab
= &slab
->base
;
815 bo
->u
.slab
.entry
.group_index
= group_index
;
816 bo
->u
.slab
.real
= slab
->buffer
;
818 list_addtail(&bo
->u
.slab
.entry
.head
, &slab
->base
.free
);
824 radeon_bo_reference(&slab
->buffer
, NULL
);
830 void radeon_bo_slab_free(void *priv
, struct pb_slab
*pslab
)
832 struct radeon_slab
*slab
= (struct radeon_slab
*)pslab
;
834 for (unsigned i
= 0; i
< slab
->base
.num_entries
; ++i
) {
835 struct radeon_bo
*bo
= &slab
->entries
[i
];
836 for (unsigned j
= 0; j
< bo
->u
.slab
.num_fences
; ++j
)
837 radeon_bo_reference(&bo
->u
.slab
.fences
[j
], NULL
);
838 FREE(bo
->u
.slab
.fences
);
842 radeon_bo_reference(&slab
->buffer
, NULL
);
846 static unsigned eg_tile_split(unsigned tile_split
)
848 switch (tile_split
) {
849 case 0: tile_split
= 64; break;
850 case 1: tile_split
= 128; break;
851 case 2: tile_split
= 256; break;
852 case 3: tile_split
= 512; break;
854 case 4: tile_split
= 1024; break;
855 case 5: tile_split
= 2048; break;
856 case 6: tile_split
= 4096; break;
861 static unsigned eg_tile_split_rev(unsigned eg_tile_split
)
863 switch (eg_tile_split
) {
875 static void radeon_bo_get_metadata(struct pb_buffer
*_buf
,
876 struct radeon_bo_metadata
*md
,
877 struct radeon_surf
*surf
)
879 struct radeon_bo
*bo
= radeon_bo(_buf
);
880 struct drm_radeon_gem_set_tiling args
;
882 assert(bo
->handle
&& "must not be called for slab entries");
884 memset(&args
, 0, sizeof(args
));
886 args
.handle
= bo
->handle
;
888 drmCommandWriteRead(bo
->rws
->fd
,
889 DRM_RADEON_GEM_GET_TILING
,
894 if (args
.tiling_flags
& RADEON_TILING_MACRO
)
895 md
->mode
= RADEON_SURF_MODE_2D
;
896 else if (args
.tiling_flags
& RADEON_TILING_MICRO
)
897 md
->mode
= RADEON_SURF_MODE_1D
;
899 md
->mode
= RADEON_SURF_MODE_LINEAR_ALIGNED
;
901 surf
->u
.legacy
.bankw
= (args
.tiling_flags
>> RADEON_TILING_EG_BANKW_SHIFT
) & RADEON_TILING_EG_BANKW_MASK
;
902 surf
->u
.legacy
.bankh
= (args
.tiling_flags
>> RADEON_TILING_EG_BANKH_SHIFT
) & RADEON_TILING_EG_BANKH_MASK
;
903 surf
->u
.legacy
.tile_split
= (args
.tiling_flags
>> RADEON_TILING_EG_TILE_SPLIT_SHIFT
) & RADEON_TILING_EG_TILE_SPLIT_MASK
;
904 surf
->u
.legacy
.tile_split
= eg_tile_split(surf
->u
.legacy
.tile_split
);
905 surf
->u
.legacy
.mtilea
= (args
.tiling_flags
>> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT
) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK
;
907 if (bo
->rws
->gen
>= DRV_SI
&& !(args
.tiling_flags
& RADEON_TILING_R600_NO_SCANOUT
))
908 surf
->flags
|= RADEON_SURF_SCANOUT
;
910 surf
->flags
&= ~RADEON_SURF_SCANOUT
;
914 md
->u
.legacy
.microtile
= RADEON_LAYOUT_LINEAR
;
915 md
->u
.legacy
.macrotile
= RADEON_LAYOUT_LINEAR
;
916 if (args
.tiling_flags
& RADEON_TILING_MICRO
)
917 md
->u
.legacy
.microtile
= RADEON_LAYOUT_TILED
;
918 else if (args
.tiling_flags
& RADEON_TILING_MICRO_SQUARE
)
919 md
->u
.legacy
.microtile
= RADEON_LAYOUT_SQUARETILED
;
921 if (args
.tiling_flags
& RADEON_TILING_MACRO
)
922 md
->u
.legacy
.macrotile
= RADEON_LAYOUT_TILED
;
924 md
->u
.legacy
.bankw
= (args
.tiling_flags
>> RADEON_TILING_EG_BANKW_SHIFT
) & RADEON_TILING_EG_BANKW_MASK
;
925 md
->u
.legacy
.bankh
= (args
.tiling_flags
>> RADEON_TILING_EG_BANKH_SHIFT
) & RADEON_TILING_EG_BANKH_MASK
;
926 md
->u
.legacy
.tile_split
= (args
.tiling_flags
>> RADEON_TILING_EG_TILE_SPLIT_SHIFT
) & RADEON_TILING_EG_TILE_SPLIT_MASK
;
927 md
->u
.legacy
.mtilea
= (args
.tiling_flags
>> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT
) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK
;
928 md
->u
.legacy
.tile_split
= eg_tile_split(md
->u
.legacy
.tile_split
);
929 md
->u
.legacy
.scanout
= bo
->rws
->gen
>= DRV_SI
&& !(args
.tiling_flags
& RADEON_TILING_R600_NO_SCANOUT
);
932 static void radeon_bo_set_metadata(struct pb_buffer
*_buf
,
933 struct radeon_bo_metadata
*md
,
934 struct radeon_surf
*surf
)
936 struct radeon_bo
*bo
= radeon_bo(_buf
);
937 struct drm_radeon_gem_set_tiling args
;
939 assert(bo
->handle
&& "must not be called for slab entries");
941 memset(&args
, 0, sizeof(args
));
943 os_wait_until_zero(&bo
->num_active_ioctls
, PIPE_TIMEOUT_INFINITE
);
946 if (surf
->u
.legacy
.level
[0].mode
>= RADEON_SURF_MODE_1D
)
947 args
.tiling_flags
|= RADEON_TILING_MICRO
;
948 if (surf
->u
.legacy
.level
[0].mode
>= RADEON_SURF_MODE_2D
)
949 args
.tiling_flags
|= RADEON_TILING_MACRO
;
951 args
.tiling_flags
|= (surf
->u
.legacy
.bankw
& RADEON_TILING_EG_BANKW_MASK
) <<
952 RADEON_TILING_EG_BANKW_SHIFT
;
953 args
.tiling_flags
|= (surf
->u
.legacy
.bankh
& RADEON_TILING_EG_BANKH_MASK
) <<
954 RADEON_TILING_EG_BANKH_SHIFT
;
955 if (surf
->u
.legacy
.tile_split
) {
956 args
.tiling_flags
|= (eg_tile_split_rev(surf
->u
.legacy
.tile_split
) &
957 RADEON_TILING_EG_TILE_SPLIT_MASK
) <<
958 RADEON_TILING_EG_TILE_SPLIT_SHIFT
;
960 args
.tiling_flags
|= (surf
->u
.legacy
.mtilea
& RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK
) <<
961 RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT
;
963 if (bo
->rws
->gen
>= DRV_SI
&& !(surf
->flags
& RADEON_SURF_SCANOUT
))
964 args
.tiling_flags
|= RADEON_TILING_R600_NO_SCANOUT
;
966 args
.pitch
= surf
->u
.legacy
.level
[0].nblk_x
* surf
->bpe
;
968 if (md
->u
.legacy
.microtile
== RADEON_LAYOUT_TILED
)
969 args
.tiling_flags
|= RADEON_TILING_MICRO
;
970 else if (md
->u
.legacy
.microtile
== RADEON_LAYOUT_SQUARETILED
)
971 args
.tiling_flags
|= RADEON_TILING_MICRO_SQUARE
;
973 if (md
->u
.legacy
.macrotile
== RADEON_LAYOUT_TILED
)
974 args
.tiling_flags
|= RADEON_TILING_MACRO
;
976 args
.tiling_flags
|= (md
->u
.legacy
.bankw
& RADEON_TILING_EG_BANKW_MASK
) <<
977 RADEON_TILING_EG_BANKW_SHIFT
;
978 args
.tiling_flags
|= (md
->u
.legacy
.bankh
& RADEON_TILING_EG_BANKH_MASK
) <<
979 RADEON_TILING_EG_BANKH_SHIFT
;
980 if (md
->u
.legacy
.tile_split
) {
981 args
.tiling_flags
|= (eg_tile_split_rev(md
->u
.legacy
.tile_split
) &
982 RADEON_TILING_EG_TILE_SPLIT_MASK
) <<
983 RADEON_TILING_EG_TILE_SPLIT_SHIFT
;
985 args
.tiling_flags
|= (md
->u
.legacy
.mtilea
& RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK
) <<
986 RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT
;
988 if (bo
->rws
->gen
>= DRV_SI
&& !md
->u
.legacy
.scanout
)
989 args
.tiling_flags
|= RADEON_TILING_R600_NO_SCANOUT
;
991 args
.pitch
= md
->u
.legacy
.stride
;
994 args
.handle
= bo
->handle
;
996 drmCommandWriteRead(bo
->rws
->fd
,
997 DRM_RADEON_GEM_SET_TILING
,
1002 static struct pb_buffer
*
1003 radeon_winsys_bo_create(struct radeon_winsys
*rws
,
1006 enum radeon_bo_domain domain
,
1007 enum radeon_bo_flag flags
)
1009 struct radeon_drm_winsys
*ws
= radeon_drm_winsys(rws
);
1010 struct radeon_bo
*bo
;
1013 assert(!(flags
& RADEON_FLAG_SPARSE
)); /* not supported */
1015 /* Only 32-bit sizes are supported. */
1016 if (size
> UINT_MAX
)
1019 /* VRAM implies WC. This is not optional. */
1020 if (domain
& RADEON_DOMAIN_VRAM
)
1021 flags
|= RADEON_FLAG_GTT_WC
;
1022 /* NO_CPU_ACCESS is valid with VRAM only. */
1023 if (domain
!= RADEON_DOMAIN_VRAM
)
1024 flags
&= ~RADEON_FLAG_NO_CPU_ACCESS
;
1026 /* Sub-allocate small buffers from slabs. */
1027 if (!(flags
& RADEON_FLAG_NO_SUBALLOC
) &&
1028 size
<= (1 << RADEON_SLAB_MAX_SIZE_LOG2
) &&
1029 ws
->info
.r600_has_virtual_memory
&&
1030 alignment
<= MAX2(1 << RADEON_SLAB_MIN_SIZE_LOG2
, util_next_power_of_two(size
))) {
1031 struct pb_slab_entry
*entry
;
1032 int heap
= radeon_get_heap_index(domain
, flags
);
1034 if (heap
< 0 || heap
>= RADEON_MAX_SLAB_HEAPS
)
1037 entry
= pb_slab_alloc(&ws
->bo_slabs
, size
, heap
);
1039 /* Clear the cache and try again. */
1040 pb_cache_release_all_buffers(&ws
->bo_cache
);
1042 entry
= pb_slab_alloc(&ws
->bo_slabs
, size
, heap
);
1048 bo
= container_of(entry
, bo
, u
.slab
.entry
);
1050 pipe_reference_init(&bo
->base
.reference
, 1);
1056 /* This flag is irrelevant for the cache. */
1057 flags
&= ~RADEON_FLAG_NO_SUBALLOC
;
1059 /* Align size to page size. This is the minimum alignment for normal
1060 * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
1061 * like constant/uniform buffers, can benefit from better and more reuse.
1063 size
= align(size
, ws
->info
.gart_page_size
);
1064 alignment
= align(alignment
, ws
->info
.gart_page_size
);
1066 bool use_reusable_pool
= flags
& RADEON_FLAG_NO_INTERPROCESS_SHARING
;
1068 /* Shared resources don't use cached heaps. */
1069 if (use_reusable_pool
) {
1070 heap
= radeon_get_heap_index(domain
, flags
);
1071 assert(heap
>= 0 && heap
< RADEON_MAX_CACHED_HEAPS
);
1073 bo
= radeon_bo(pb_cache_reclaim_buffer(&ws
->bo_cache
, size
, alignment
,
1079 bo
= radeon_create_bo(ws
, size
, alignment
, domain
, flags
, heap
);
1081 /* Clear the cache and try again. */
1082 if (ws
->info
.r600_has_virtual_memory
)
1083 pb_slabs_reclaim(&ws
->bo_slabs
);
1084 pb_cache_release_all_buffers(&ws
->bo_cache
);
1085 bo
= radeon_create_bo(ws
, size
, alignment
, domain
, flags
, heap
);
1090 bo
->u
.real
.use_reusable_pool
= use_reusable_pool
;
1092 mtx_lock(&ws
->bo_handles_mutex
);
1093 _mesa_hash_table_insert(ws
->bo_handles
, (void*)(uintptr_t)bo
->handle
, bo
);
1094 mtx_unlock(&ws
->bo_handles_mutex
);
1099 static struct pb_buffer
*radeon_winsys_bo_from_ptr(struct radeon_winsys
*rws
,
1100 void *pointer
, uint64_t size
)
1102 struct radeon_drm_winsys
*ws
= radeon_drm_winsys(rws
);
1103 struct drm_radeon_gem_userptr args
;
1104 struct radeon_bo
*bo
;
1107 bo
= CALLOC_STRUCT(radeon_bo
);
1111 memset(&args
, 0, sizeof(args
));
1112 args
.addr
= (uintptr_t)pointer
;
1113 args
.size
= align(size
, ws
->info
.gart_page_size
);
1114 args
.flags
= RADEON_GEM_USERPTR_ANONONLY
|
1115 RADEON_GEM_USERPTR_VALIDATE
|
1116 RADEON_GEM_USERPTR_REGISTER
;
1117 if (drmCommandWriteRead(ws
->fd
, DRM_RADEON_GEM_USERPTR
,
1118 &args
, sizeof(args
))) {
1123 assert(args
.handle
!= 0);
1125 mtx_lock(&ws
->bo_handles_mutex
);
1127 /* Initialize it. */
1128 pipe_reference_init(&bo
->base
.reference
, 1);
1129 bo
->handle
= args
.handle
;
1130 bo
->base
.alignment
= 0;
1131 bo
->base
.size
= size
;
1132 bo
->base
.vtbl
= &radeon_bo_vtbl
;
1134 bo
->user_ptr
= pointer
;
1136 bo
->initial_domain
= RADEON_DOMAIN_GTT
;
1137 bo
->hash
= __sync_fetch_and_add(&ws
->next_bo_hash
, 1);
1138 (void) mtx_init(&bo
->u
.real
.map_mutex
, mtx_plain
);
1140 _mesa_hash_table_insert(ws
->bo_handles
, (void*)(uintptr_t)bo
->handle
, bo
);
1142 mtx_unlock(&ws
->bo_handles_mutex
);
1144 if (ws
->info
.r600_has_virtual_memory
) {
1145 struct drm_radeon_gem_va va
;
1147 bo
->va
= radeon_bomgr_find_va64(ws
, bo
->base
.size
, 1 << 20);
1149 va
.handle
= bo
->handle
;
1150 va
.operation
= RADEON_VA_MAP
;
1153 va
.flags
= RADEON_VM_PAGE_READABLE
|
1154 RADEON_VM_PAGE_WRITEABLE
|
1155 RADEON_VM_PAGE_SNOOPED
;
1157 r
= drmCommandWriteRead(ws
->fd
, DRM_RADEON_GEM_VA
, &va
, sizeof(va
));
1158 if (r
&& va
.operation
== RADEON_VA_RESULT_ERROR
) {
1159 fprintf(stderr
, "radeon: Failed to assign virtual address space\n");
1160 radeon_bo_destroy(&bo
->base
);
1163 mtx_lock(&ws
->bo_handles_mutex
);
1164 if (va
.operation
== RADEON_VA_RESULT_VA_EXIST
) {
1165 struct pb_buffer
*b
= &bo
->base
;
1166 struct radeon_bo
*old_bo
=
1167 util_hash_table_get(ws
->bo_vas
, (void*)(uintptr_t)va
.offset
);
1169 mtx_unlock(&ws
->bo_handles_mutex
);
1170 pb_reference(&b
, &old_bo
->base
);
1174 _mesa_hash_table_insert(ws
->bo_vas
, (void*)(uintptr_t)bo
->va
, bo
);
1175 mtx_unlock(&ws
->bo_handles_mutex
);
1178 ws
->allocated_gtt
+= align(bo
->base
.size
, ws
->info
.gart_page_size
);
1180 return (struct pb_buffer
*)bo
;
1183 static struct pb_buffer
*radeon_winsys_bo_from_handle(struct radeon_winsys
*rws
,
1184 struct winsys_handle
*whandle
,
1185 unsigned vm_alignment
)
1187 struct radeon_drm_winsys
*ws
= radeon_drm_winsys(rws
);
1188 struct radeon_bo
*bo
;
1193 /* We must maintain a list of pairs <handle, bo>, so that we always return
1194 * the same BO for one particular handle. If we didn't do that and created
1195 * more than one BO for the same handle and then relocated them in a CS,
1196 * we would hit a deadlock in the kernel.
1198 * The list of pairs is guarded by a mutex, of course. */
1199 mtx_lock(&ws
->bo_handles_mutex
);
1201 if (whandle
->type
== WINSYS_HANDLE_TYPE_SHARED
) {
1202 /* First check if there already is an existing bo for the handle. */
1203 bo
= util_hash_table_get(ws
->bo_names
, (void*)(uintptr_t)whandle
->handle
);
1204 } else if (whandle
->type
== WINSYS_HANDLE_TYPE_FD
) {
1205 /* We must first get the GEM handle, as fds are unreliable keys */
1206 r
= drmPrimeFDToHandle(ws
->fd
, whandle
->handle
, &handle
);
1209 bo
= util_hash_table_get(ws
->bo_handles
, (void*)(uintptr_t)handle
);
1211 /* Unknown handle type */
1216 /* Increase the refcount. */
1217 struct pb_buffer
*b
= NULL
;
1218 pb_reference(&b
, &bo
->base
);
1222 /* There isn't, create a new one. */
1223 bo
= CALLOC_STRUCT(radeon_bo
);
1228 if (whandle
->type
== WINSYS_HANDLE_TYPE_SHARED
) {
1229 struct drm_gem_open open_arg
= {};
1230 memset(&open_arg
, 0, sizeof(open_arg
));
1232 open_arg
.name
= whandle
->handle
;
1233 if (drmIoctl(ws
->fd
, DRM_IOCTL_GEM_OPEN
, &open_arg
)) {
1237 handle
= open_arg
.handle
;
1238 size
= open_arg
.size
;
1239 bo
->flink_name
= whandle
->handle
;
1240 } else if (whandle
->type
== WINSYS_HANDLE_TYPE_FD
) {
1241 size
= lseek(whandle
->handle
, 0, SEEK_END
);
1243 * Could check errno to determine whether the kernel is new enough, but
1244 * it doesn't really matter why this failed, just that it failed.
1246 if (size
== (off_t
)-1) {
1250 lseek(whandle
->handle
, 0, SEEK_SET
);
1253 assert(handle
!= 0);
1255 bo
->handle
= handle
;
1257 /* Initialize it. */
1258 pipe_reference_init(&bo
->base
.reference
, 1);
1259 bo
->base
.alignment
= 0;
1260 bo
->base
.size
= (unsigned) size
;
1261 bo
->base
.vtbl
= &radeon_bo_vtbl
;
1264 bo
->hash
= __sync_fetch_and_add(&ws
->next_bo_hash
, 1);
1265 (void) mtx_init(&bo
->u
.real
.map_mutex
, mtx_plain
);
1268 _mesa_hash_table_insert(ws
->bo_names
, (void*)(uintptr_t)bo
->flink_name
, bo
);
1270 _mesa_hash_table_insert(ws
->bo_handles
, (void*)(uintptr_t)bo
->handle
, bo
);
1273 mtx_unlock(&ws
->bo_handles_mutex
);
1275 if (ws
->info
.r600_has_virtual_memory
&& !bo
->va
) {
1276 struct drm_radeon_gem_va va
;
1278 bo
->va
= radeon_bomgr_find_va64(ws
, bo
->base
.size
, vm_alignment
);
1280 va
.handle
= bo
->handle
;
1281 va
.operation
= RADEON_VA_MAP
;
1284 va
.flags
= RADEON_VM_PAGE_READABLE
|
1285 RADEON_VM_PAGE_WRITEABLE
|
1286 RADEON_VM_PAGE_SNOOPED
;
1288 r
= drmCommandWriteRead(ws
->fd
, DRM_RADEON_GEM_VA
, &va
, sizeof(va
));
1289 if (r
&& va
.operation
== RADEON_VA_RESULT_ERROR
) {
1290 fprintf(stderr
, "radeon: Failed to assign virtual address space\n");
1291 radeon_bo_destroy(&bo
->base
);
1294 mtx_lock(&ws
->bo_handles_mutex
);
1295 if (va
.operation
== RADEON_VA_RESULT_VA_EXIST
) {
1296 struct pb_buffer
*b
= &bo
->base
;
1297 struct radeon_bo
*old_bo
=
1298 util_hash_table_get(ws
->bo_vas
, (void*)(uintptr_t)va
.offset
);
1300 mtx_unlock(&ws
->bo_handles_mutex
);
1301 pb_reference(&b
, &old_bo
->base
);
1305 _mesa_hash_table_insert(ws
->bo_vas
, (void*)(uintptr_t)bo
->va
, bo
);
1306 mtx_unlock(&ws
->bo_handles_mutex
);
1309 bo
->initial_domain
= radeon_bo_get_initial_domain((void*)bo
);
1311 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
1312 ws
->allocated_vram
+= align(bo
->base
.size
, ws
->info
.gart_page_size
);
1313 else if (bo
->initial_domain
& RADEON_DOMAIN_GTT
)
1314 ws
->allocated_gtt
+= align(bo
->base
.size
, ws
->info
.gart_page_size
);
1316 return (struct pb_buffer
*)bo
;
1319 mtx_unlock(&ws
->bo_handles_mutex
);
1323 static bool radeon_winsys_bo_get_handle(struct radeon_winsys
*rws
,
1324 struct pb_buffer
*buffer
,
1325 struct winsys_handle
*whandle
)
1327 struct drm_gem_flink flink
;
1328 struct radeon_bo
*bo
= radeon_bo(buffer
);
1329 struct radeon_drm_winsys
*ws
= bo
->rws
;
1331 /* Don't allow exports of slab entries. */
1335 memset(&flink
, 0, sizeof(flink
));
1337 bo
->u
.real
.use_reusable_pool
= false;
1339 if (whandle
->type
== WINSYS_HANDLE_TYPE_SHARED
) {
1340 if (!bo
->flink_name
) {
1341 flink
.handle
= bo
->handle
;
1343 if (ioctl(ws
->fd
, DRM_IOCTL_GEM_FLINK
, &flink
)) {
1347 bo
->flink_name
= flink
.name
;
1349 mtx_lock(&ws
->bo_handles_mutex
);
1350 _mesa_hash_table_insert(ws
->bo_names
, (void*)(uintptr_t)bo
->flink_name
, bo
);
1351 mtx_unlock(&ws
->bo_handles_mutex
);
1353 whandle
->handle
= bo
->flink_name
;
1354 } else if (whandle
->type
== WINSYS_HANDLE_TYPE_KMS
) {
1355 whandle
->handle
= bo
->handle
;
1356 } else if (whandle
->type
== WINSYS_HANDLE_TYPE_FD
) {
1357 if (drmPrimeHandleToFD(ws
->fd
, bo
->handle
, DRM_CLOEXEC
, (int*)&whandle
->handle
))
1364 static bool radeon_winsys_bo_is_user_ptr(struct pb_buffer
*buf
)
1366 return ((struct radeon_bo
*)buf
)->user_ptr
!= NULL
;
1369 static bool radeon_winsys_bo_is_suballocated(struct pb_buffer
*buf
)
1371 return !((struct radeon_bo
*)buf
)->handle
;
1374 static uint64_t radeon_winsys_bo_va(struct pb_buffer
*buf
)
1376 return ((struct radeon_bo
*)buf
)->va
;
1379 static unsigned radeon_winsys_bo_get_reloc_offset(struct pb_buffer
*buf
)
1381 struct radeon_bo
*bo
= radeon_bo(buf
);
1386 return bo
->va
- bo
->u
.slab
.real
->va
;
1389 void radeon_drm_bo_init_functions(struct radeon_drm_winsys
*ws
)
1391 ws
->base
.buffer_set_metadata
= radeon_bo_set_metadata
;
1392 ws
->base
.buffer_get_metadata
= radeon_bo_get_metadata
;
1393 ws
->base
.buffer_map
= radeon_bo_map
;
1394 ws
->base
.buffer_unmap
= radeon_bo_unmap
;
1395 ws
->base
.buffer_wait
= radeon_bo_wait
;
1396 ws
->base
.buffer_create
= radeon_winsys_bo_create
;
1397 ws
->base
.buffer_from_handle
= radeon_winsys_bo_from_handle
;
1398 ws
->base
.buffer_from_ptr
= radeon_winsys_bo_from_ptr
;
1399 ws
->base
.buffer_is_user_ptr
= radeon_winsys_bo_is_user_ptr
;
1400 ws
->base
.buffer_is_suballocated
= radeon_winsys_bo_is_suballocated
;
1401 ws
->base
.buffer_get_handle
= radeon_winsys_bo_get_handle
;
1402 ws
->base
.buffer_get_virtual_address
= radeon_winsys_bo_va
;
1403 ws
->base
.buffer_get_reloc_offset
= radeon_winsys_bo_get_reloc_offset
;
1404 ws
->base
.buffer_get_initial_domain
= radeon_bo_get_initial_domain
;