2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
27 #include "radeon_drm_cs.h"
29 #include "util/u_hash_table.h"
30 #include "util/u_memory.h"
31 #include "util/simple_list.h"
32 #include "util/list.h"
33 #include "os/os_thread.h"
34 #include "os/os_mman.h"
35 #include "os/os_time.h"
37 #include "state_tracker/drm_driver.h"
39 #include <sys/ioctl.h>
45 static const struct pb_vtbl radeon_bo_vtbl
;
47 static inline struct radeon_bo
*radeon_bo(struct pb_buffer
*bo
)
49 assert(bo
->vtbl
== &radeon_bo_vtbl
);
50 return (struct radeon_bo
*)bo
;
53 struct radeon_bo_va_hole
{
54 struct list_head list
;
61 struct pb_manager base
;
64 struct radeon_drm_winsys
*rws
;
66 /* List of buffer GEM names. Protected by bo_handles_mutex. */
67 struct util_hash_table
*bo_names
;
68 /* List of buffer handles. Protectded by bo_handles_mutex. */
69 struct util_hash_table
*bo_handles
;
70 /* List of buffer virtual memory ranges. Protectded by bo_handles_mutex. */
71 struct util_hash_table
*bo_vas
;
72 pipe_mutex bo_handles_mutex
;
73 pipe_mutex bo_va_mutex
;
75 /* is virtual address supported */
78 struct list_head va_holes
;
80 /* BO size alignment */
84 static inline struct radeon_bomgr
*radeon_bomgr(struct pb_manager
*mgr
)
86 return (struct radeon_bomgr
*)mgr
;
89 static struct radeon_bo
*get_radeon_bo(struct pb_buffer
*_buf
)
91 struct radeon_bo
*bo
= NULL
;
93 if (_buf
->vtbl
== &radeon_bo_vtbl
) {
96 struct pb_buffer
*base_buf
;
98 pb_get_base_buffer(_buf
, &base_buf
, &offset
);
100 if (base_buf
->vtbl
== &radeon_bo_vtbl
)
101 bo
= radeon_bo(base_buf
);
107 static bool radeon_bo_is_busy(struct radeon_bo
*bo
)
109 struct drm_radeon_gem_busy args
= {0};
111 args
.handle
= bo
->handle
;
112 return drmCommandWriteRead(bo
->rws
->fd
, DRM_RADEON_GEM_BUSY
,
113 &args
, sizeof(args
)) != 0;
116 static void radeon_bo_wait_idle(struct radeon_bo
*bo
)
118 struct drm_radeon_gem_wait_idle args
= {0};
120 args
.handle
= bo
->handle
;
121 while (drmCommandWrite(bo
->rws
->fd
, DRM_RADEON_GEM_WAIT_IDLE
,
122 &args
, sizeof(args
)) == -EBUSY
);
125 static bool radeon_bo_wait(struct pb_buffer
*_buf
, uint64_t timeout
,
126 enum radeon_bo_usage usage
)
128 struct radeon_bo
*bo
= get_radeon_bo(_buf
);
131 /* No timeout. Just query. */
133 return !bo
->num_active_ioctls
&& !radeon_bo_is_busy(bo
);
135 abs_timeout
= os_time_get_absolute_timeout(timeout
);
137 /* Wait if any ioctl is being submitted with this buffer. */
138 if (!os_wait_until_zero_abs_timeout(&bo
->num_active_ioctls
, abs_timeout
))
141 /* Infinite timeout. */
142 if (abs_timeout
== PIPE_TIMEOUT_INFINITE
) {
143 radeon_bo_wait_idle(bo
);
147 /* Other timeouts need to be emulated with a loop. */
148 while (radeon_bo_is_busy(bo
)) {
149 if (os_time_get_nano() >= abs_timeout
)
157 static enum radeon_bo_domain
get_valid_domain(enum radeon_bo_domain domain
)
159 /* Zero domains the driver doesn't understand. */
160 domain
&= RADEON_DOMAIN_VRAM_GTT
;
162 /* If no domain is set, we must set something... */
164 domain
= RADEON_DOMAIN_VRAM_GTT
;
169 static enum radeon_bo_domain
radeon_bo_get_initial_domain(
170 struct radeon_winsys_cs_handle
*buf
)
172 struct radeon_bo
*bo
= (struct radeon_bo
*)buf
;
173 struct drm_radeon_gem_op args
;
175 if (bo
->rws
->info
.drm_minor
< 38)
176 return RADEON_DOMAIN_VRAM_GTT
;
178 memset(&args
, 0, sizeof(args
));
179 args
.handle
= bo
->handle
;
180 args
.op
= RADEON_GEM_OP_GET_INITIAL_DOMAIN
;
182 drmCommandWriteRead(bo
->rws
->fd
, DRM_RADEON_GEM_OP
,
183 &args
, sizeof(args
));
185 /* GEM domains and winsys domains are defined the same. */
186 return get_valid_domain(args
.value
);
189 static uint64_t radeon_bomgr_find_va(struct radeon_bomgr
*mgr
, uint64_t size
, uint64_t alignment
)
191 struct radeon_bo_va_hole
*hole
, *n
;
192 uint64_t offset
= 0, waste
= 0;
194 /* All VM address space holes will implicitly start aligned to the
195 * size alignment, so we don't need to sanitize the alignment here
197 size
= align(size
, mgr
->size_align
);
199 pipe_mutex_lock(mgr
->bo_va_mutex
);
200 /* first look for a hole */
201 LIST_FOR_EACH_ENTRY_SAFE(hole
, n
, &mgr
->va_holes
, list
) {
202 offset
= hole
->offset
;
203 waste
= offset
% alignment
;
204 waste
= waste
? alignment
- waste
: 0;
206 if (offset
>= (hole
->offset
+ hole
->size
)) {
209 if (!waste
&& hole
->size
== size
) {
210 offset
= hole
->offset
;
211 list_del(&hole
->list
);
213 pipe_mutex_unlock(mgr
->bo_va_mutex
);
216 if ((hole
->size
- waste
) > size
) {
218 n
= CALLOC_STRUCT(radeon_bo_va_hole
);
220 n
->offset
= hole
->offset
;
221 list_add(&n
->list
, &hole
->list
);
223 hole
->size
-= (size
+ waste
);
224 hole
->offset
+= size
+ waste
;
225 pipe_mutex_unlock(mgr
->bo_va_mutex
);
228 if ((hole
->size
- waste
) == size
) {
230 pipe_mutex_unlock(mgr
->bo_va_mutex
);
235 offset
= mgr
->va_offset
;
236 waste
= offset
% alignment
;
237 waste
= waste
? alignment
- waste
: 0;
239 n
= CALLOC_STRUCT(radeon_bo_va_hole
);
242 list_add(&n
->list
, &mgr
->va_holes
);
245 mgr
->va_offset
+= size
+ waste
;
246 pipe_mutex_unlock(mgr
->bo_va_mutex
);
250 static void radeon_bomgr_free_va(struct radeon_bomgr
*mgr
, uint64_t va
, uint64_t size
)
252 struct radeon_bo_va_hole
*hole
;
254 size
= align(size
, mgr
->size_align
);
256 pipe_mutex_lock(mgr
->bo_va_mutex
);
257 if ((va
+ size
) == mgr
->va_offset
) {
259 /* Delete uppermost hole if it reaches the new top */
260 if (!LIST_IS_EMPTY(&mgr
->va_holes
)) {
261 hole
= container_of(mgr
->va_holes
.next
, hole
, list
);
262 if ((hole
->offset
+ hole
->size
) == va
) {
263 mgr
->va_offset
= hole
->offset
;
264 list_del(&hole
->list
);
269 struct radeon_bo_va_hole
*next
;
271 hole
= container_of(&mgr
->va_holes
, hole
, list
);
272 LIST_FOR_EACH_ENTRY(next
, &mgr
->va_holes
, list
) {
273 if (next
->offset
< va
)
278 if (&hole
->list
!= &mgr
->va_holes
) {
279 /* Grow upper hole if it's adjacent */
280 if (hole
->offset
== (va
+ size
)) {
283 /* Merge lower hole if it's adjacent */
284 if (next
!= hole
&& &next
->list
!= &mgr
->va_holes
&&
285 (next
->offset
+ next
->size
) == va
) {
286 next
->size
+= hole
->size
;
287 list_del(&hole
->list
);
294 /* Grow lower hole if it's adjacent */
295 if (next
!= hole
&& &next
->list
!= &mgr
->va_holes
&&
296 (next
->offset
+ next
->size
) == va
) {
301 /* FIXME on allocation failure we just lose virtual address space
302 * maybe print a warning
304 next
= CALLOC_STRUCT(radeon_bo_va_hole
);
308 list_add(&next
->list
, &hole
->list
);
312 pipe_mutex_unlock(mgr
->bo_va_mutex
);
315 static void radeon_bo_destroy(struct pb_buffer
*_buf
)
317 struct radeon_bo
*bo
= radeon_bo(_buf
);
318 struct radeon_bomgr
*mgr
= bo
->mgr
;
319 struct drm_gem_close args
;
321 memset(&args
, 0, sizeof(args
));
323 pipe_mutex_lock(bo
->mgr
->bo_handles_mutex
);
324 util_hash_table_remove(bo
->mgr
->bo_handles
, (void*)(uintptr_t)bo
->handle
);
325 if (bo
->flink_name
) {
326 util_hash_table_remove(bo
->mgr
->bo_names
,
327 (void*)(uintptr_t)bo
->flink_name
);
329 pipe_mutex_unlock(bo
->mgr
->bo_handles_mutex
);
332 os_munmap(bo
->ptr
, bo
->base
.size
);
335 if (bo
->rws
->va_unmap_working
) {
336 struct drm_radeon_gem_va va
;
338 va
.handle
= bo
->handle
;
340 va
.operation
= RADEON_VA_UNMAP
;
341 va
.flags
= RADEON_VM_PAGE_READABLE
|
342 RADEON_VM_PAGE_WRITEABLE
|
343 RADEON_VM_PAGE_SNOOPED
;
346 if (drmCommandWriteRead(bo
->rws
->fd
, DRM_RADEON_GEM_VA
, &va
,
348 va
.operation
== RADEON_VA_RESULT_ERROR
) {
349 fprintf(stderr
, "radeon: Failed to deallocate virtual address for buffer:\n");
350 fprintf(stderr
, "radeon: size : %d bytes\n", bo
->base
.size
);
351 fprintf(stderr
, "radeon: va : 0x%016llx\n", (unsigned long long)bo
->va
);
355 radeon_bomgr_free_va(mgr
, bo
->va
, bo
->base
.size
);
359 args
.handle
= bo
->handle
;
360 drmIoctl(bo
->rws
->fd
, DRM_IOCTL_GEM_CLOSE
, &args
);
362 pipe_mutex_destroy(bo
->map_mutex
);
364 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
365 bo
->rws
->allocated_vram
-= align(bo
->base
.size
, mgr
->size_align
);
366 else if (bo
->initial_domain
& RADEON_DOMAIN_GTT
)
367 bo
->rws
->allocated_gtt
-= align(bo
->base
.size
, mgr
->size_align
);
371 void *radeon_bo_do_map(struct radeon_bo
*bo
)
373 struct drm_radeon_gem_mmap args
= {0};
376 /* If the buffer is created from user memory, return the user pointer. */
380 /* Map the buffer. */
381 pipe_mutex_lock(bo
->map_mutex
);
382 /* Return the pointer if it's already mapped. */
385 pipe_mutex_unlock(bo
->map_mutex
);
388 args
.handle
= bo
->handle
;
390 args
.size
= (uint64_t)bo
->base
.size
;
391 if (drmCommandWriteRead(bo
->rws
->fd
,
395 pipe_mutex_unlock(bo
->map_mutex
);
396 fprintf(stderr
, "radeon: gem_mmap failed: %p 0x%08X\n",
401 ptr
= os_mmap(0, args
.size
, PROT_READ
|PROT_WRITE
, MAP_SHARED
,
402 bo
->rws
->fd
, args
.addr_ptr
);
403 if (ptr
== MAP_FAILED
) {
404 pipe_mutex_unlock(bo
->map_mutex
);
405 fprintf(stderr
, "radeon: mmap failed, errno: %i\n", errno
);
410 pipe_mutex_unlock(bo
->map_mutex
);
415 static void *radeon_bo_map(struct radeon_winsys_cs_handle
*buf
,
416 struct radeon_winsys_cs
*rcs
,
417 enum pipe_transfer_usage usage
)
419 struct radeon_bo
*bo
= (struct radeon_bo
*)buf
;
420 struct radeon_drm_cs
*cs
= (struct radeon_drm_cs
*)rcs
;
422 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
423 if (!(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
424 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
425 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
426 if (!(usage
& PIPE_TRANSFER_WRITE
)) {
429 * Since we are mapping for read, we don't need to wait
430 * if the GPU is using the buffer for read too
431 * (neither one is changing it).
433 * Only check whether the buffer is being used for write. */
434 if (cs
&& radeon_bo_is_referenced_by_cs_for_write(cs
, bo
)) {
435 cs
->flush_cs(cs
->flush_data
, RADEON_FLUSH_ASYNC
, NULL
);
439 if (!radeon_bo_wait((struct pb_buffer
*)bo
, 0,
440 RADEON_USAGE_WRITE
)) {
444 if (cs
&& radeon_bo_is_referenced_by_cs(cs
, bo
)) {
445 cs
->flush_cs(cs
->flush_data
, RADEON_FLUSH_ASYNC
, NULL
);
449 if (!radeon_bo_wait((struct pb_buffer
*)bo
, 0,
450 RADEON_USAGE_READWRITE
)) {
455 uint64_t time
= os_time_get_nano();
457 if (!(usage
& PIPE_TRANSFER_WRITE
)) {
460 * Since we are mapping for read, we don't need to wait
461 * if the GPU is using the buffer for read too
462 * (neither one is changing it).
464 * Only check whether the buffer is being used for write. */
465 if (cs
&& radeon_bo_is_referenced_by_cs_for_write(cs
, bo
)) {
466 cs
->flush_cs(cs
->flush_data
, 0, NULL
);
468 radeon_bo_wait((struct pb_buffer
*)bo
, PIPE_TIMEOUT_INFINITE
,
471 /* Mapping for write. */
473 if (radeon_bo_is_referenced_by_cs(cs
, bo
)) {
474 cs
->flush_cs(cs
->flush_data
, 0, NULL
);
476 /* Try to avoid busy-waiting in radeon_bo_wait. */
477 if (p_atomic_read(&bo
->num_active_ioctls
))
478 radeon_drm_cs_sync_flush(rcs
);
482 radeon_bo_wait((struct pb_buffer
*)bo
, PIPE_TIMEOUT_INFINITE
,
483 RADEON_USAGE_READWRITE
);
486 bo
->mgr
->rws
->buffer_wait_time
+= os_time_get_nano() - time
;
490 return radeon_bo_do_map(bo
);
493 static void radeon_bo_unmap(struct radeon_winsys_cs_handle
*_buf
)
495 struct radeon_bo
*bo
= (struct radeon_bo
*)_buf
;
500 pipe_mutex_lock(bo
->map_mutex
);
502 pipe_mutex_unlock(bo
->map_mutex
);
503 return; /* it's not been mapped */
506 assert(bo
->map_count
);
507 if (--bo
->map_count
) {
508 pipe_mutex_unlock(bo
->map_mutex
);
509 return; /* it's been mapped multiple times */
512 os_munmap(bo
->ptr
, bo
->base
.size
);
514 pipe_mutex_unlock(bo
->map_mutex
);
517 static void radeon_bo_get_base_buffer(struct pb_buffer
*buf
,
518 struct pb_buffer
**base_buf
,
525 static enum pipe_error
radeon_bo_validate(struct pb_buffer
*_buf
,
526 struct pb_validate
*vl
,
533 static void radeon_bo_fence(struct pb_buffer
*buf
,
534 struct pipe_fence_handle
*fence
)
538 static const struct pb_vtbl radeon_bo_vtbl
= {
540 NULL
, /* never called */
541 NULL
, /* never called */
544 radeon_bo_get_base_buffer
,
547 #ifndef RADEON_GEM_GTT_WC
548 #define RADEON_GEM_GTT_WC (1 << 2)
550 #ifndef RADEON_GEM_CPU_ACCESS
551 /* BO is expected to be accessed by the CPU */
552 #define RADEON_GEM_CPU_ACCESS (1 << 3)
554 #ifndef RADEON_GEM_NO_CPU_ACCESS
555 /* CPU access is not expected to work for this BO */
556 #define RADEON_GEM_NO_CPU_ACCESS (1 << 4)
559 static struct pb_buffer
*radeon_bomgr_create_bo(struct pb_manager
*_mgr
,
561 const struct pb_desc
*desc
)
563 struct radeon_bomgr
*mgr
= radeon_bomgr(_mgr
);
564 struct radeon_drm_winsys
*rws
= mgr
->rws
;
565 struct radeon_bo
*bo
;
566 struct drm_radeon_gem_create args
;
567 struct radeon_bo_desc
*rdesc
= (struct radeon_bo_desc
*)desc
;
570 memset(&args
, 0, sizeof(args
));
572 assert(rdesc
->initial_domains
);
573 assert((rdesc
->initial_domains
&
574 ~(RADEON_GEM_DOMAIN_GTT
| RADEON_GEM_DOMAIN_VRAM
)) == 0);
577 args
.alignment
= desc
->alignment
;
578 args
.initial_domain
= rdesc
->initial_domains
;
581 if (rdesc
->flags
& RADEON_FLAG_GTT_WC
)
582 args
.flags
|= RADEON_GEM_GTT_WC
;
583 if (rdesc
->flags
& RADEON_FLAG_CPU_ACCESS
)
584 args
.flags
|= RADEON_GEM_CPU_ACCESS
;
585 if (rdesc
->flags
& RADEON_FLAG_NO_CPU_ACCESS
)
586 args
.flags
|= RADEON_GEM_NO_CPU_ACCESS
;
588 if (drmCommandWriteRead(rws
->fd
, DRM_RADEON_GEM_CREATE
,
589 &args
, sizeof(args
))) {
590 fprintf(stderr
, "radeon: Failed to allocate a buffer:\n");
591 fprintf(stderr
, "radeon: size : %d bytes\n", size
);
592 fprintf(stderr
, "radeon: alignment : %d bytes\n", desc
->alignment
);
593 fprintf(stderr
, "radeon: domains : %d\n", args
.initial_domain
);
594 fprintf(stderr
, "radeon: flags : %d\n", args
.flags
);
598 bo
= CALLOC_STRUCT(radeon_bo
);
602 pipe_reference_init(&bo
->base
.reference
, 1);
603 bo
->base
.alignment
= desc
->alignment
;
604 bo
->base
.usage
= desc
->usage
;
605 bo
->base
.size
= size
;
606 bo
->base
.vtbl
= &radeon_bo_vtbl
;
609 bo
->handle
= args
.handle
;
611 bo
->initial_domain
= rdesc
->initial_domains
;
612 pipe_mutex_init(bo
->map_mutex
);
615 struct drm_radeon_gem_va va
;
617 bo
->va
= radeon_bomgr_find_va(mgr
, size
, desc
->alignment
);
619 va
.handle
= bo
->handle
;
621 va
.operation
= RADEON_VA_MAP
;
622 va
.flags
= RADEON_VM_PAGE_READABLE
|
623 RADEON_VM_PAGE_WRITEABLE
|
624 RADEON_VM_PAGE_SNOOPED
;
626 r
= drmCommandWriteRead(rws
->fd
, DRM_RADEON_GEM_VA
, &va
, sizeof(va
));
627 if (r
&& va
.operation
== RADEON_VA_RESULT_ERROR
) {
628 fprintf(stderr
, "radeon: Failed to allocate virtual address for buffer:\n");
629 fprintf(stderr
, "radeon: size : %d bytes\n", size
);
630 fprintf(stderr
, "radeon: alignment : %d bytes\n", desc
->alignment
);
631 fprintf(stderr
, "radeon: domains : %d\n", args
.initial_domain
);
632 fprintf(stderr
, "radeon: va : 0x%016llx\n", (unsigned long long)bo
->va
);
633 radeon_bo_destroy(&bo
->base
);
636 pipe_mutex_lock(mgr
->bo_handles_mutex
);
637 if (va
.operation
== RADEON_VA_RESULT_VA_EXIST
) {
638 struct pb_buffer
*b
= &bo
->base
;
639 struct radeon_bo
*old_bo
=
640 util_hash_table_get(mgr
->bo_vas
, (void*)(uintptr_t)va
.offset
);
642 pipe_mutex_unlock(mgr
->bo_handles_mutex
);
643 pb_reference(&b
, &old_bo
->base
);
647 util_hash_table_set(mgr
->bo_vas
, (void*)(uintptr_t)bo
->va
, bo
);
648 pipe_mutex_unlock(mgr
->bo_handles_mutex
);
651 if (rdesc
->initial_domains
& RADEON_DOMAIN_VRAM
)
652 rws
->allocated_vram
+= align(size
, mgr
->size_align
);
653 else if (rdesc
->initial_domains
& RADEON_DOMAIN_GTT
)
654 rws
->allocated_gtt
+= align(size
, mgr
->size_align
);
659 static void radeon_bomgr_flush(struct pb_manager
*mgr
)
664 /* This is for the cache bufmgr. */
665 static boolean
radeon_bomgr_is_buffer_busy(struct pb_manager
*_mgr
,
666 struct pb_buffer
*_buf
)
668 struct radeon_bo
*bo
= radeon_bo(_buf
);
670 if (radeon_bo_is_referenced_by_any_cs(bo
)) {
674 if (!radeon_bo_wait((struct pb_buffer
*)bo
, 0, RADEON_USAGE_READWRITE
)) {
681 static void radeon_bomgr_destroy(struct pb_manager
*_mgr
)
683 struct radeon_bomgr
*mgr
= radeon_bomgr(_mgr
);
684 util_hash_table_destroy(mgr
->bo_names
);
685 util_hash_table_destroy(mgr
->bo_handles
);
686 util_hash_table_destroy(mgr
->bo_vas
);
687 pipe_mutex_destroy(mgr
->bo_handles_mutex
);
688 pipe_mutex_destroy(mgr
->bo_va_mutex
);
692 #define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
694 static unsigned handle_hash(void *key
)
696 return PTR_TO_UINT(key
);
699 static int handle_compare(void *key1
, void *key2
)
701 return PTR_TO_UINT(key1
) != PTR_TO_UINT(key2
);
704 struct pb_manager
*radeon_bomgr_create(struct radeon_drm_winsys
*rws
)
706 struct radeon_bomgr
*mgr
;
708 mgr
= CALLOC_STRUCT(radeon_bomgr
);
712 mgr
->base
.destroy
= radeon_bomgr_destroy
;
713 mgr
->base
.create_buffer
= radeon_bomgr_create_bo
;
714 mgr
->base
.flush
= radeon_bomgr_flush
;
715 mgr
->base
.is_buffer_busy
= radeon_bomgr_is_buffer_busy
;
718 mgr
->bo_names
= util_hash_table_create(handle_hash
, handle_compare
);
719 mgr
->bo_handles
= util_hash_table_create(handle_hash
, handle_compare
);
720 mgr
->bo_vas
= util_hash_table_create(handle_hash
, handle_compare
);
721 pipe_mutex_init(mgr
->bo_handles_mutex
);
722 pipe_mutex_init(mgr
->bo_va_mutex
);
724 mgr
->va
= rws
->info
.r600_virtual_address
;
725 mgr
->va_offset
= rws
->va_start
;
726 list_inithead(&mgr
->va_holes
);
728 /* TTM aligns the BO size to the CPU page size */
729 mgr
->size_align
= sysconf(_SC_PAGESIZE
);
734 static unsigned eg_tile_split(unsigned tile_split
)
736 switch (tile_split
) {
737 case 0: tile_split
= 64; break;
738 case 1: tile_split
= 128; break;
739 case 2: tile_split
= 256; break;
740 case 3: tile_split
= 512; break;
742 case 4: tile_split
= 1024; break;
743 case 5: tile_split
= 2048; break;
744 case 6: tile_split
= 4096; break;
749 static unsigned eg_tile_split_rev(unsigned eg_tile_split
)
751 switch (eg_tile_split
) {
763 static void radeon_bo_get_tiling(struct pb_buffer
*_buf
,
764 enum radeon_bo_layout
*microtiled
,
765 enum radeon_bo_layout
*macrotiled
,
766 unsigned *bankw
, unsigned *bankh
,
767 unsigned *tile_split
,
768 unsigned *stencil_tile_split
,
772 struct radeon_bo
*bo
= get_radeon_bo(_buf
);
773 struct drm_radeon_gem_set_tiling args
;
775 memset(&args
, 0, sizeof(args
));
777 args
.handle
= bo
->handle
;
779 drmCommandWriteRead(bo
->rws
->fd
,
780 DRM_RADEON_GEM_GET_TILING
,
784 *microtiled
= RADEON_LAYOUT_LINEAR
;
785 *macrotiled
= RADEON_LAYOUT_LINEAR
;
786 if (args
.tiling_flags
& RADEON_TILING_MICRO
)
787 *microtiled
= RADEON_LAYOUT_TILED
;
788 else if (args
.tiling_flags
& RADEON_TILING_MICRO_SQUARE
)
789 *microtiled
= RADEON_LAYOUT_SQUARETILED
;
791 if (args
.tiling_flags
& RADEON_TILING_MACRO
)
792 *macrotiled
= RADEON_LAYOUT_TILED
;
793 if (bankw
&& tile_split
&& stencil_tile_split
&& mtilea
&& tile_split
) {
794 *bankw
= (args
.tiling_flags
>> RADEON_TILING_EG_BANKW_SHIFT
) & RADEON_TILING_EG_BANKW_MASK
;
795 *bankh
= (args
.tiling_flags
>> RADEON_TILING_EG_BANKH_SHIFT
) & RADEON_TILING_EG_BANKH_MASK
;
796 *tile_split
= (args
.tiling_flags
>> RADEON_TILING_EG_TILE_SPLIT_SHIFT
) & RADEON_TILING_EG_TILE_SPLIT_MASK
;
797 *stencil_tile_split
= (args
.tiling_flags
>> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT
) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK
;
798 *mtilea
= (args
.tiling_flags
>> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT
) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK
;
799 *tile_split
= eg_tile_split(*tile_split
);
802 *scanout
= bo
->rws
->gen
>= DRV_SI
&& !(args
.tiling_flags
& RADEON_TILING_R600_NO_SCANOUT
);
805 static void radeon_bo_set_tiling(struct pb_buffer
*_buf
,
806 struct radeon_winsys_cs
*rcs
,
807 enum radeon_bo_layout microtiled
,
808 enum radeon_bo_layout macrotiled
,
809 unsigned pipe_config
,
810 unsigned bankw
, unsigned bankh
,
812 unsigned stencil_tile_split
,
813 unsigned mtilea
, unsigned num_banks
,
817 struct radeon_bo
*bo
= get_radeon_bo(_buf
);
818 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
819 struct drm_radeon_gem_set_tiling args
;
821 memset(&args
, 0, sizeof(args
));
823 /* Tiling determines how DRM treats the buffer data.
824 * We must flush CS when changing it if the buffer is referenced. */
825 if (cs
&& radeon_bo_is_referenced_by_cs(cs
, bo
)) {
826 cs
->flush_cs(cs
->flush_data
, 0, NULL
);
829 os_wait_until_zero(&bo
->num_active_ioctls
, PIPE_TIMEOUT_INFINITE
);
831 if (microtiled
== RADEON_LAYOUT_TILED
)
832 args
.tiling_flags
|= RADEON_TILING_MICRO
;
833 else if (microtiled
== RADEON_LAYOUT_SQUARETILED
)
834 args
.tiling_flags
|= RADEON_TILING_MICRO_SQUARE
;
836 if (macrotiled
== RADEON_LAYOUT_TILED
)
837 args
.tiling_flags
|= RADEON_TILING_MACRO
;
839 args
.tiling_flags
|= (bankw
& RADEON_TILING_EG_BANKW_MASK
) <<
840 RADEON_TILING_EG_BANKW_SHIFT
;
841 args
.tiling_flags
|= (bankh
& RADEON_TILING_EG_BANKH_MASK
) <<
842 RADEON_TILING_EG_BANKH_SHIFT
;
844 args
.tiling_flags
|= (eg_tile_split_rev(tile_split
) &
845 RADEON_TILING_EG_TILE_SPLIT_MASK
) <<
846 RADEON_TILING_EG_TILE_SPLIT_SHIFT
;
848 args
.tiling_flags
|= (stencil_tile_split
&
849 RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK
) <<
850 RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT
;
851 args
.tiling_flags
|= (mtilea
& RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK
) <<
852 RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT
;
854 if (bo
->rws
->gen
>= DRV_SI
&& !scanout
)
855 args
.tiling_flags
|= RADEON_TILING_R600_NO_SCANOUT
;
857 args
.handle
= bo
->handle
;
860 drmCommandWriteRead(bo
->rws
->fd
,
861 DRM_RADEON_GEM_SET_TILING
,
866 static struct radeon_winsys_cs_handle
*radeon_drm_get_cs_handle(struct pb_buffer
*_buf
)
868 /* return radeon_bo. */
869 return (struct radeon_winsys_cs_handle
*)get_radeon_bo(_buf
);
872 static struct pb_buffer
*
873 radeon_winsys_bo_create(struct radeon_winsys
*rws
,
876 boolean use_reusable_pool
,
877 enum radeon_bo_domain domain
,
878 enum radeon_bo_flag flags
)
880 struct radeon_drm_winsys
*ws
= radeon_drm_winsys(rws
);
881 struct radeon_bomgr
*mgr
= radeon_bomgr(ws
->kman
);
882 struct radeon_bo_desc desc
;
883 struct pb_manager
*provider
;
884 struct pb_buffer
*buffer
;
886 memset(&desc
, 0, sizeof(desc
));
887 desc
.base
.alignment
= alignment
;
889 /* Align size to page size. This is the minimum alignment for normal
890 * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
891 * like constant/uniform buffers, can benefit from better and more reuse.
893 size
= align(size
, mgr
->size_align
);
895 /* Only set one usage bit each for domains and flags, or the cache manager
896 * might consider different sets of domains / flags compatible
898 if (domain
== RADEON_DOMAIN_VRAM_GTT
)
899 desc
.base
.usage
= 1 << 2;
901 desc
.base
.usage
= domain
>> 1;
902 assert(flags
< sizeof(desc
.base
.usage
) * 8 - 3);
903 desc
.base
.usage
|= 1 << (flags
+ 3);
905 desc
.initial_domains
= domain
;
908 /* Assign a buffer manager. */
909 if (use_reusable_pool
)
914 buffer
= provider
->create_buffer(provider
, size
, &desc
.base
);
918 pipe_mutex_lock(mgr
->bo_handles_mutex
);
919 util_hash_table_set(mgr
->bo_handles
, (void*)(uintptr_t)get_radeon_bo(buffer
)->handle
, buffer
);
920 pipe_mutex_unlock(mgr
->bo_handles_mutex
);
922 return (struct pb_buffer
*)buffer
;
925 static struct pb_buffer
*radeon_winsys_bo_from_ptr(struct radeon_winsys
*rws
,
926 void *pointer
, unsigned size
)
928 struct radeon_drm_winsys
*ws
= radeon_drm_winsys(rws
);
929 struct radeon_bomgr
*mgr
= radeon_bomgr(ws
->kman
);
930 struct drm_radeon_gem_userptr args
;
931 struct radeon_bo
*bo
;
934 bo
= CALLOC_STRUCT(radeon_bo
);
938 memset(&args
, 0, sizeof(args
));
939 args
.addr
= (uintptr_t)pointer
;
940 args
.size
= align(size
, sysconf(_SC_PAGE_SIZE
));
941 args
.flags
= RADEON_GEM_USERPTR_ANONONLY
|
942 RADEON_GEM_USERPTR_VALIDATE
|
943 RADEON_GEM_USERPTR_REGISTER
;
944 if (drmCommandWriteRead(ws
->fd
, DRM_RADEON_GEM_USERPTR
,
945 &args
, sizeof(args
))) {
950 pipe_mutex_lock(mgr
->bo_handles_mutex
);
953 pipe_reference_init(&bo
->base
.reference
, 1);
954 bo
->handle
= args
.handle
;
955 bo
->base
.alignment
= 0;
956 bo
->base
.usage
= PB_USAGE_GPU_WRITE
| PB_USAGE_GPU_READ
;
957 bo
->base
.size
= size
;
958 bo
->base
.vtbl
= &radeon_bo_vtbl
;
961 bo
->user_ptr
= pointer
;
963 bo
->initial_domain
= RADEON_DOMAIN_GTT
;
964 pipe_mutex_init(bo
->map_mutex
);
966 util_hash_table_set(mgr
->bo_handles
, (void*)(uintptr_t)bo
->handle
, bo
);
968 pipe_mutex_unlock(mgr
->bo_handles_mutex
);
971 struct drm_radeon_gem_va va
;
973 bo
->va
= radeon_bomgr_find_va(mgr
, bo
->base
.size
, 1 << 20);
975 va
.handle
= bo
->handle
;
976 va
.operation
= RADEON_VA_MAP
;
979 va
.flags
= RADEON_VM_PAGE_READABLE
|
980 RADEON_VM_PAGE_WRITEABLE
|
981 RADEON_VM_PAGE_SNOOPED
;
983 r
= drmCommandWriteRead(ws
->fd
, DRM_RADEON_GEM_VA
, &va
, sizeof(va
));
984 if (r
&& va
.operation
== RADEON_VA_RESULT_ERROR
) {
985 fprintf(stderr
, "radeon: Failed to assign virtual address space\n");
986 radeon_bo_destroy(&bo
->base
);
989 pipe_mutex_lock(mgr
->bo_handles_mutex
);
990 if (va
.operation
== RADEON_VA_RESULT_VA_EXIST
) {
991 struct pb_buffer
*b
= &bo
->base
;
992 struct radeon_bo
*old_bo
=
993 util_hash_table_get(mgr
->bo_vas
, (void*)(uintptr_t)va
.offset
);
995 pipe_mutex_unlock(mgr
->bo_handles_mutex
);
996 pb_reference(&b
, &old_bo
->base
);
1000 util_hash_table_set(mgr
->bo_vas
, (void*)(uintptr_t)bo
->va
, bo
);
1001 pipe_mutex_unlock(mgr
->bo_handles_mutex
);
1004 ws
->allocated_gtt
+= align(bo
->base
.size
, mgr
->size_align
);
1006 return (struct pb_buffer
*)bo
;
1009 static struct pb_buffer
*radeon_winsys_bo_from_handle(struct radeon_winsys
*rws
,
1010 struct winsys_handle
*whandle
,
1013 struct radeon_drm_winsys
*ws
= radeon_drm_winsys(rws
);
1014 struct radeon_bo
*bo
;
1015 struct radeon_bomgr
*mgr
= radeon_bomgr(ws
->kman
);
1020 /* We must maintain a list of pairs <handle, bo>, so that we always return
1021 * the same BO for one particular handle. If we didn't do that and created
1022 * more than one BO for the same handle and then relocated them in a CS,
1023 * we would hit a deadlock in the kernel.
1025 * The list of pairs is guarded by a mutex, of course. */
1026 pipe_mutex_lock(mgr
->bo_handles_mutex
);
1028 if (whandle
->type
== DRM_API_HANDLE_TYPE_SHARED
) {
1029 /* First check if there already is an existing bo for the handle. */
1030 bo
= util_hash_table_get(mgr
->bo_names
, (void*)(uintptr_t)whandle
->handle
);
1031 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_FD
) {
1032 /* We must first get the GEM handle, as fds are unreliable keys */
1033 r
= drmPrimeFDToHandle(ws
->fd
, whandle
->handle
, &handle
);
1036 bo
= util_hash_table_get(mgr
->bo_handles
, (void*)(uintptr_t)handle
);
1038 /* Unknown handle type */
1043 /* Increase the refcount. */
1044 struct pb_buffer
*b
= NULL
;
1045 pb_reference(&b
, &bo
->base
);
1049 /* There isn't, create a new one. */
1050 bo
= CALLOC_STRUCT(radeon_bo
);
1055 if (whandle
->type
== DRM_API_HANDLE_TYPE_SHARED
) {
1056 struct drm_gem_open open_arg
= {};
1057 memset(&open_arg
, 0, sizeof(open_arg
));
1059 open_arg
.name
= whandle
->handle
;
1060 if (drmIoctl(ws
->fd
, DRM_IOCTL_GEM_OPEN
, &open_arg
)) {
1064 handle
= open_arg
.handle
;
1065 size
= open_arg
.size
;
1066 bo
->flink_name
= whandle
->handle
;
1067 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_FD
) {
1068 size
= lseek(whandle
->handle
, 0, SEEK_END
);
1070 * Could check errno to determine whether the kernel is new enough, but
1071 * it doesn't really matter why this failed, just that it failed.
1073 if (size
== (off_t
)-1) {
1077 lseek(whandle
->handle
, 0, SEEK_SET
);
1080 bo
->handle
= handle
;
1082 /* Initialize it. */
1083 pipe_reference_init(&bo
->base
.reference
, 1);
1084 bo
->base
.alignment
= 0;
1085 bo
->base
.usage
= PB_USAGE_GPU_WRITE
| PB_USAGE_GPU_READ
;
1086 bo
->base
.size
= (unsigned) size
;
1087 bo
->base
.vtbl
= &radeon_bo_vtbl
;
1091 pipe_mutex_init(bo
->map_mutex
);
1094 util_hash_table_set(mgr
->bo_names
, (void*)(uintptr_t)bo
->flink_name
, bo
);
1096 util_hash_table_set(mgr
->bo_handles
, (void*)(uintptr_t)bo
->handle
, bo
);
1099 pipe_mutex_unlock(mgr
->bo_handles_mutex
);
1102 *stride
= whandle
->stride
;
1104 if (mgr
->va
&& !bo
->va
) {
1105 struct drm_radeon_gem_va va
;
1107 bo
->va
= radeon_bomgr_find_va(mgr
, bo
->base
.size
, 1 << 20);
1109 va
.handle
= bo
->handle
;
1110 va
.operation
= RADEON_VA_MAP
;
1113 va
.flags
= RADEON_VM_PAGE_READABLE
|
1114 RADEON_VM_PAGE_WRITEABLE
|
1115 RADEON_VM_PAGE_SNOOPED
;
1117 r
= drmCommandWriteRead(ws
->fd
, DRM_RADEON_GEM_VA
, &va
, sizeof(va
));
1118 if (r
&& va
.operation
== RADEON_VA_RESULT_ERROR
) {
1119 fprintf(stderr
, "radeon: Failed to assign virtual address space\n");
1120 radeon_bo_destroy(&bo
->base
);
1123 pipe_mutex_lock(mgr
->bo_handles_mutex
);
1124 if (va
.operation
== RADEON_VA_RESULT_VA_EXIST
) {
1125 struct pb_buffer
*b
= &bo
->base
;
1126 struct radeon_bo
*old_bo
=
1127 util_hash_table_get(mgr
->bo_vas
, (void*)(uintptr_t)va
.offset
);
1129 pipe_mutex_unlock(mgr
->bo_handles_mutex
);
1130 pb_reference(&b
, &old_bo
->base
);
1134 util_hash_table_set(mgr
->bo_vas
, (void*)(uintptr_t)bo
->va
, bo
);
1135 pipe_mutex_unlock(mgr
->bo_handles_mutex
);
1138 bo
->initial_domain
= radeon_bo_get_initial_domain((void*)bo
);
1140 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
1141 ws
->allocated_vram
+= align(bo
->base
.size
, mgr
->size_align
);
1142 else if (bo
->initial_domain
& RADEON_DOMAIN_GTT
)
1143 ws
->allocated_gtt
+= align(bo
->base
.size
, mgr
->size_align
);
1145 return (struct pb_buffer
*)bo
;
1148 pipe_mutex_unlock(mgr
->bo_handles_mutex
);
1152 static boolean
radeon_winsys_bo_get_handle(struct pb_buffer
*buffer
,
1154 struct winsys_handle
*whandle
)
1156 struct drm_gem_flink flink
;
1157 struct radeon_bo
*bo
= get_radeon_bo(buffer
);
1159 memset(&flink
, 0, sizeof(flink
));
1161 if ((void*)bo
!= (void*)buffer
)
1162 pb_cache_manager_remove_buffer(buffer
);
1164 if (whandle
->type
== DRM_API_HANDLE_TYPE_SHARED
) {
1165 if (!bo
->flink_name
) {
1166 flink
.handle
= bo
->handle
;
1168 if (ioctl(bo
->rws
->fd
, DRM_IOCTL_GEM_FLINK
, &flink
)) {
1172 bo
->flink_name
= flink
.name
;
1174 pipe_mutex_lock(bo
->mgr
->bo_handles_mutex
);
1175 util_hash_table_set(bo
->mgr
->bo_names
, (void*)(uintptr_t)bo
->flink_name
, bo
);
1176 pipe_mutex_unlock(bo
->mgr
->bo_handles_mutex
);
1178 whandle
->handle
= bo
->flink_name
;
1179 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_KMS
) {
1180 whandle
->handle
= bo
->handle
;
1181 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_FD
) {
1182 if (drmPrimeHandleToFD(bo
->rws
->fd
, bo
->handle
, DRM_CLOEXEC
, (int*)&whandle
->handle
))
1186 whandle
->stride
= stride
;
1190 static uint64_t radeon_winsys_bo_va(struct radeon_winsys_cs_handle
*buf
)
1192 return ((struct radeon_bo
*)buf
)->va
;
1195 void radeon_bomgr_init_functions(struct radeon_drm_winsys
*ws
)
1197 ws
->base
.buffer_get_cs_handle
= radeon_drm_get_cs_handle
;
1198 ws
->base
.buffer_set_tiling
= radeon_bo_set_tiling
;
1199 ws
->base
.buffer_get_tiling
= radeon_bo_get_tiling
;
1200 ws
->base
.buffer_map
= radeon_bo_map
;
1201 ws
->base
.buffer_unmap
= radeon_bo_unmap
;
1202 ws
->base
.buffer_wait
= radeon_bo_wait
;
1203 ws
->base
.buffer_create
= radeon_winsys_bo_create
;
1204 ws
->base
.buffer_from_handle
= radeon_winsys_bo_from_handle
;
1205 ws
->base
.buffer_from_ptr
= radeon_winsys_bo_from_ptr
;
1206 ws
->base
.buffer_get_handle
= radeon_winsys_bo_get_handle
;
1207 ws
->base
.buffer_get_virtual_address
= radeon_winsys_bo_va
;
1208 ws
->base
.buffer_get_initial_domain
= radeon_bo_get_initial_domain
;