2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
27 #define _FILE_OFFSET_BITS 64
28 #include "radeon_drm_cs.h"
30 #include "util/u_hash_table.h"
31 #include "util/u_memory.h"
32 #include "util/u_simple_list.h"
33 #include "util/u_double_list.h"
34 #include "os/os_thread.h"
35 #include "os/os_mman.h"
36 #include "os/os_time.h"
38 #include "state_tracker/drm_driver.h"
40 #include <sys/ioctl.h>
47 * this are copy from radeon_drm, once an updated libdrm is released
48 * we should bump configure.ac requirement for it and remove the following
51 #define RADEON_BO_FLAGS_MACRO_TILE 1
52 #define RADEON_BO_FLAGS_MICRO_TILE 2
53 #define RADEON_BO_FLAGS_MICRO_TILE_SQUARE 0x20
55 #ifndef DRM_RADEON_GEM_WAIT
56 #define DRM_RADEON_GEM_WAIT 0x2b
58 #define RADEON_GEM_NO_WAIT 0x1
59 #define RADEON_GEM_USAGE_READ 0x2
60 #define RADEON_GEM_USAGE_WRITE 0x4
62 struct drm_radeon_gem_wait
{
64 uint32_t flags
; /* one of RADEON_GEM_* */
71 #define RADEON_VA_MAP 1
72 #define RADEON_VA_UNMAP 2
74 #define RADEON_VA_RESULT_OK 0
75 #define RADEON_VA_RESULT_ERROR 1
76 #define RADEON_VA_RESULT_VA_EXIST 2
78 #define RADEON_VM_PAGE_VALID (1 << 0)
79 #define RADEON_VM_PAGE_READABLE (1 << 1)
80 #define RADEON_VM_PAGE_WRITEABLE (1 << 2)
81 #define RADEON_VM_PAGE_SYSTEM (1 << 3)
82 #define RADEON_VM_PAGE_SNOOPED (1 << 4)
84 struct drm_radeon_gem_va
{
92 #define DRM_RADEON_GEM_VA 0x2b
95 #ifndef DRM_RADEON_GEM_OP
96 #define DRM_RADEON_GEM_OP 0x2c
98 /* Sets or returns a value associated with a buffer. */
99 struct drm_radeon_gem_op
{
100 uint32_t handle
; /* buffer */
101 uint32_t op
; /* RADEON_GEM_OP_* */
102 uint64_t value
; /* input or return value */
105 #define RADEON_GEM_OP_GET_INITIAL_DOMAIN 0
106 #define RADEON_GEM_OP_SET_INITIAL_DOMAIN 1
110 extern const struct pb_vtbl radeon_bo_vtbl
;
113 static INLINE
struct radeon_bo
*radeon_bo(struct pb_buffer
*bo
)
115 assert(bo
->vtbl
== &radeon_bo_vtbl
);
116 return (struct radeon_bo
*)bo
;
119 struct radeon_bo_va_hole
{
120 struct list_head list
;
125 struct radeon_bomgr
{
127 struct pb_manager base
;
130 struct radeon_drm_winsys
*rws
;
132 /* List of buffer GEM names. Protected by bo_handles_mutex. */
133 struct util_hash_table
*bo_names
;
134 /* List of buffer handles. Protectded by bo_handles_mutex. */
135 struct util_hash_table
*bo_handles
;
136 /* List of buffer virtual memory ranges. Protectded by bo_handles_mutex. */
137 struct util_hash_table
*bo_vas
;
138 pipe_mutex bo_handles_mutex
;
139 pipe_mutex bo_va_mutex
;
141 /* is virtual address supported */
144 struct list_head va_holes
;
147 static INLINE
struct radeon_bomgr
*radeon_bomgr(struct pb_manager
*mgr
)
149 return (struct radeon_bomgr
*)mgr
;
152 static struct radeon_bo
*get_radeon_bo(struct pb_buffer
*_buf
)
154 struct radeon_bo
*bo
= NULL
;
156 if (_buf
->vtbl
== &radeon_bo_vtbl
) {
157 bo
= radeon_bo(_buf
);
159 struct pb_buffer
*base_buf
;
161 pb_get_base_buffer(_buf
, &base_buf
, &offset
);
163 if (base_buf
->vtbl
== &radeon_bo_vtbl
)
164 bo
= radeon_bo(base_buf
);
170 static void radeon_bo_wait(struct pb_buffer
*_buf
, enum radeon_bo_usage usage
)
172 struct radeon_bo
*bo
= get_radeon_bo(_buf
);
174 while (p_atomic_read(&bo
->num_active_ioctls
)) {
178 /* XXX use this when it's ready */
179 /*if (bo->rws->info.drm_minor >= 12) {
180 struct drm_radeon_gem_wait args = {};
181 args.handle = bo->handle;
183 while (drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT,
184 &args, sizeof(args)) == -EBUSY);
186 struct drm_radeon_gem_wait_idle args
;
187 memset(&args
, 0, sizeof(args
));
188 args
.handle
= bo
->handle
;
189 while (drmCommandWrite(bo
->rws
->fd
, DRM_RADEON_GEM_WAIT_IDLE
,
190 &args
, sizeof(args
)) == -EBUSY
);
194 static boolean
radeon_bo_is_busy(struct pb_buffer
*_buf
,
195 enum radeon_bo_usage usage
)
197 struct radeon_bo
*bo
= get_radeon_bo(_buf
);
199 if (p_atomic_read(&bo
->num_active_ioctls
)) {
203 /* XXX use this when it's ready */
204 /*if (bo->rws->info.drm_minor >= 12) {
205 struct drm_radeon_gem_wait args = {};
206 args.handle = bo->handle;
207 args.flags = usage | RADEON_GEM_NO_WAIT;
208 return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT,
209 &args, sizeof(args)) != 0;
211 struct drm_radeon_gem_busy args
;
212 memset(&args
, 0, sizeof(args
));
213 args
.handle
= bo
->handle
;
214 return drmCommandWriteRead(bo
->rws
->fd
, DRM_RADEON_GEM_BUSY
,
215 &args
, sizeof(args
)) != 0;
219 static enum radeon_bo_domain
get_valid_domain(enum radeon_bo_domain domain
)
221 /* Zero domains the driver doesn't understand. */
222 domain
&= RADEON_DOMAIN_VRAM_GTT
;
224 /* If no domain is set, we must set something... */
226 domain
= RADEON_DOMAIN_VRAM_GTT
;
231 static enum radeon_bo_domain
radeon_bo_get_initial_domain(
232 struct radeon_winsys_cs_handle
*buf
)
234 struct radeon_bo
*bo
= (struct radeon_bo
*)buf
;
235 struct drm_radeon_gem_op args
;
237 if (bo
->rws
->info
.drm_minor
< 38)
238 return RADEON_DOMAIN_VRAM_GTT
;
240 memset(&args
, 0, sizeof(args
));
241 args
.handle
= bo
->handle
;
242 args
.op
= RADEON_GEM_OP_GET_INITIAL_DOMAIN
;
244 drmCommandWriteRead(bo
->rws
->fd
, DRM_RADEON_GEM_OP
,
245 &args
, sizeof(args
));
247 /* GEM domains and winsys domains are defined the same. */
248 return get_valid_domain(args
.value
);
251 static uint64_t radeon_bomgr_find_va(struct radeon_bomgr
*mgr
, uint64_t size
, uint64_t alignment
)
253 struct radeon_bo_va_hole
*hole
, *n
;
254 uint64_t offset
= 0, waste
= 0;
256 alignment
= MAX2(alignment
, 4096);
257 size
= align(size
, 4096);
259 pipe_mutex_lock(mgr
->bo_va_mutex
);
260 /* first look for a hole */
261 LIST_FOR_EACH_ENTRY_SAFE(hole
, n
, &mgr
->va_holes
, list
) {
262 offset
= hole
->offset
;
263 waste
= offset
% alignment
;
264 waste
= waste
? alignment
- waste
: 0;
266 if (offset
>= (hole
->offset
+ hole
->size
)) {
269 if (!waste
&& hole
->size
== size
) {
270 offset
= hole
->offset
;
271 list_del(&hole
->list
);
273 pipe_mutex_unlock(mgr
->bo_va_mutex
);
276 if ((hole
->size
- waste
) > size
) {
278 n
= CALLOC_STRUCT(radeon_bo_va_hole
);
280 n
->offset
= hole
->offset
;
281 list_add(&n
->list
, &hole
->list
);
283 hole
->size
-= (size
+ waste
);
284 hole
->offset
+= size
+ waste
;
285 pipe_mutex_unlock(mgr
->bo_va_mutex
);
288 if ((hole
->size
- waste
) == size
) {
290 pipe_mutex_unlock(mgr
->bo_va_mutex
);
295 offset
= mgr
->va_offset
;
296 waste
= offset
% alignment
;
297 waste
= waste
? alignment
- waste
: 0;
299 n
= CALLOC_STRUCT(radeon_bo_va_hole
);
302 list_add(&n
->list
, &mgr
->va_holes
);
305 mgr
->va_offset
+= size
+ waste
;
306 pipe_mutex_unlock(mgr
->bo_va_mutex
);
310 static void radeon_bomgr_free_va(struct radeon_bomgr
*mgr
, uint64_t va
, uint64_t size
)
312 struct radeon_bo_va_hole
*hole
;
314 size
= align(size
, 4096);
316 pipe_mutex_lock(mgr
->bo_va_mutex
);
317 if ((va
+ size
) == mgr
->va_offset
) {
319 /* Delete uppermost hole if it reaches the new top */
320 if (!LIST_IS_EMPTY(&mgr
->va_holes
)) {
321 hole
= container_of(mgr
->va_holes
.next
, hole
, list
);
322 if ((hole
->offset
+ hole
->size
) == va
) {
323 mgr
->va_offset
= hole
->offset
;
324 list_del(&hole
->list
);
329 struct radeon_bo_va_hole
*next
;
331 hole
= container_of(&mgr
->va_holes
, hole
, list
);
332 LIST_FOR_EACH_ENTRY(next
, &mgr
->va_holes
, list
) {
333 if (next
->offset
< va
)
338 if (&hole
->list
!= &mgr
->va_holes
) {
339 /* Grow upper hole if it's adjacent */
340 if (hole
->offset
== (va
+ size
)) {
343 /* Merge lower hole if it's adjacent */
344 if (next
!= hole
&& &next
->list
!= &mgr
->va_holes
&&
345 (next
->offset
+ next
->size
) == va
) {
346 next
->size
+= hole
->size
;
347 list_del(&hole
->list
);
354 /* Grow lower hole if it's adjacent */
355 if (next
!= hole
&& &next
->list
!= &mgr
->va_holes
&&
356 (next
->offset
+ next
->size
) == va
) {
361 /* FIXME on allocation failure we just lose virtual address space
362 * maybe print a warning
364 next
= CALLOC_STRUCT(radeon_bo_va_hole
);
368 list_add(&next
->list
, &hole
->list
);
372 pipe_mutex_unlock(mgr
->bo_va_mutex
);
375 static void radeon_bo_destroy(struct pb_buffer
*_buf
)
377 struct radeon_bo
*bo
= radeon_bo(_buf
);
378 struct radeon_bomgr
*mgr
= bo
->mgr
;
379 struct drm_gem_close args
;
381 memset(&args
, 0, sizeof(args
));
383 pipe_mutex_lock(bo
->mgr
->bo_handles_mutex
);
384 util_hash_table_remove(bo
->mgr
->bo_handles
, (void*)(uintptr_t)bo
->handle
);
386 util_hash_table_remove(bo
->mgr
->bo_names
,
387 (void*)(uintptr_t)bo
->name
);
389 pipe_mutex_unlock(bo
->mgr
->bo_handles_mutex
);
392 os_munmap(bo
->ptr
, bo
->base
.size
);
395 args
.handle
= bo
->handle
;
396 drmIoctl(bo
->rws
->fd
, DRM_IOCTL_GEM_CLOSE
, &args
);
399 radeon_bomgr_free_va(mgr
, bo
->va
, bo
->base
.size
);
402 pipe_mutex_destroy(bo
->map_mutex
);
404 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
405 bo
->rws
->allocated_vram
-= align(bo
->base
.size
, 4096);
406 else if (bo
->initial_domain
& RADEON_DOMAIN_GTT
)
407 bo
->rws
->allocated_gtt
-= align(bo
->base
.size
, 4096);
411 void *radeon_bo_do_map(struct radeon_bo
*bo
)
413 struct drm_radeon_gem_mmap args
= {0};
416 /* Return the pointer if it's already mapped. */
420 /* Map the buffer. */
421 pipe_mutex_lock(bo
->map_mutex
);
422 /* Return the pointer if it's already mapped (in case of a race). */
424 pipe_mutex_unlock(bo
->map_mutex
);
427 args
.handle
= bo
->handle
;
429 args
.size
= (uint64_t)bo
->base
.size
;
430 if (drmCommandWriteRead(bo
->rws
->fd
,
434 pipe_mutex_unlock(bo
->map_mutex
);
435 fprintf(stderr
, "radeon: gem_mmap failed: %p 0x%08X\n",
440 ptr
= os_mmap(0, args
.size
, PROT_READ
|PROT_WRITE
, MAP_SHARED
,
441 bo
->rws
->fd
, args
.addr_ptr
);
442 if (ptr
== MAP_FAILED
) {
443 pipe_mutex_unlock(bo
->map_mutex
);
444 fprintf(stderr
, "radeon: mmap failed, errno: %i\n", errno
);
448 pipe_mutex_unlock(bo
->map_mutex
);
453 static void *radeon_bo_map(struct radeon_winsys_cs_handle
*buf
,
454 struct radeon_winsys_cs
*rcs
,
455 enum pipe_transfer_usage usage
)
457 struct radeon_bo
*bo
= (struct radeon_bo
*)buf
;
458 struct radeon_drm_cs
*cs
= (struct radeon_drm_cs
*)rcs
;
460 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
461 if (!(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
462 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
463 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
464 if (!(usage
& PIPE_TRANSFER_WRITE
)) {
467 * Since we are mapping for read, we don't need to wait
468 * if the GPU is using the buffer for read too
469 * (neither one is changing it).
471 * Only check whether the buffer is being used for write. */
472 if (cs
&& radeon_bo_is_referenced_by_cs_for_write(cs
, bo
)) {
473 cs
->flush_cs(cs
->flush_data
, RADEON_FLUSH_ASYNC
);
477 if (radeon_bo_is_busy((struct pb_buffer
*)bo
,
478 RADEON_USAGE_WRITE
)) {
482 if (cs
&& radeon_bo_is_referenced_by_cs(cs
, bo
)) {
483 cs
->flush_cs(cs
->flush_data
, RADEON_FLUSH_ASYNC
);
487 if (radeon_bo_is_busy((struct pb_buffer
*)bo
,
488 RADEON_USAGE_READWRITE
)) {
493 uint64_t time
= os_time_get_nano();
495 if (!(usage
& PIPE_TRANSFER_WRITE
)) {
498 * Since we are mapping for read, we don't need to wait
499 * if the GPU is using the buffer for read too
500 * (neither one is changing it).
502 * Only check whether the buffer is being used for write. */
503 if (cs
&& radeon_bo_is_referenced_by_cs_for_write(cs
, bo
)) {
504 cs
->flush_cs(cs
->flush_data
, 0);
506 radeon_bo_wait((struct pb_buffer
*)bo
,
509 /* Mapping for write. */
511 if (radeon_bo_is_referenced_by_cs(cs
, bo
)) {
512 cs
->flush_cs(cs
->flush_data
, 0);
514 /* Try to avoid busy-waiting in radeon_bo_wait. */
515 if (p_atomic_read(&bo
->num_active_ioctls
))
516 radeon_drm_cs_sync_flush(rcs
);
520 radeon_bo_wait((struct pb_buffer
*)bo
, RADEON_USAGE_READWRITE
);
523 bo
->mgr
->rws
->buffer_wait_time
+= os_time_get_nano() - time
;
527 return radeon_bo_do_map(bo
);
530 static void radeon_bo_unmap(struct radeon_winsys_cs_handle
*_buf
)
535 static void radeon_bo_get_base_buffer(struct pb_buffer
*buf
,
536 struct pb_buffer
**base_buf
,
543 static enum pipe_error
radeon_bo_validate(struct pb_buffer
*_buf
,
544 struct pb_validate
*vl
,
551 static void radeon_bo_fence(struct pb_buffer
*buf
,
552 struct pipe_fence_handle
*fence
)
556 const struct pb_vtbl radeon_bo_vtbl
= {
558 NULL
, /* never called */
559 NULL
, /* never called */
562 radeon_bo_get_base_buffer
,
565 static struct pb_buffer
*radeon_bomgr_create_bo(struct pb_manager
*_mgr
,
567 const struct pb_desc
*desc
)
569 struct radeon_bomgr
*mgr
= radeon_bomgr(_mgr
);
570 struct radeon_drm_winsys
*rws
= mgr
->rws
;
571 struct radeon_bo
*bo
;
572 struct drm_radeon_gem_create args
;
573 struct radeon_bo_desc
*rdesc
= (struct radeon_bo_desc
*)desc
;
576 memset(&args
, 0, sizeof(args
));
578 assert(rdesc
->initial_domains
);
579 assert((rdesc
->initial_domains
&
580 ~(RADEON_GEM_DOMAIN_GTT
| RADEON_GEM_DOMAIN_VRAM
)) == 0);
583 args
.alignment
= desc
->alignment
;
584 args
.initial_domain
= rdesc
->initial_domains
;
586 if (drmCommandWriteRead(rws
->fd
, DRM_RADEON_GEM_CREATE
,
587 &args
, sizeof(args
))) {
588 fprintf(stderr
, "radeon: Failed to allocate a buffer:\n");
589 fprintf(stderr
, "radeon: size : %d bytes\n", size
);
590 fprintf(stderr
, "radeon: alignment : %d bytes\n", desc
->alignment
);
591 fprintf(stderr
, "radeon: domains : %d\n", args
.initial_domain
);
595 bo
= CALLOC_STRUCT(radeon_bo
);
599 pipe_reference_init(&bo
->base
.reference
, 1);
600 bo
->base
.alignment
= desc
->alignment
;
601 bo
->base
.usage
= desc
->usage
;
602 bo
->base
.size
= size
;
603 bo
->base
.vtbl
= &radeon_bo_vtbl
;
606 bo
->handle
= args
.handle
;
608 bo
->initial_domain
= rdesc
->initial_domains
;
609 pipe_mutex_init(bo
->map_mutex
);
612 struct drm_radeon_gem_va va
;
614 bo
->va
= radeon_bomgr_find_va(mgr
, size
, desc
->alignment
);
616 va
.handle
= bo
->handle
;
618 va
.operation
= RADEON_VA_MAP
;
619 va
.flags
= RADEON_VM_PAGE_READABLE
|
620 RADEON_VM_PAGE_WRITEABLE
|
621 RADEON_VM_PAGE_SNOOPED
;
623 r
= drmCommandWriteRead(rws
->fd
, DRM_RADEON_GEM_VA
, &va
, sizeof(va
));
624 if (r
&& va
.operation
== RADEON_VA_RESULT_ERROR
) {
625 fprintf(stderr
, "radeon: Failed to allocate virtual address for buffer:\n");
626 fprintf(stderr
, "radeon: size : %d bytes\n", size
);
627 fprintf(stderr
, "radeon: alignment : %d bytes\n", desc
->alignment
);
628 fprintf(stderr
, "radeon: domains : %d\n", args
.initial_domain
);
629 fprintf(stderr
, "radeon: va : 0x%016llx\n", (unsigned long long)bo
->va
);
630 radeon_bo_destroy(&bo
->base
);
633 pipe_mutex_lock(mgr
->bo_handles_mutex
);
634 if (va
.operation
== RADEON_VA_RESULT_VA_EXIST
) {
635 struct pb_buffer
*b
= &bo
->base
;
636 struct radeon_bo
*old_bo
=
637 util_hash_table_get(mgr
->bo_vas
, (void*)(uintptr_t)va
.offset
);
639 pipe_mutex_unlock(mgr
->bo_handles_mutex
);
640 pb_reference(&b
, &old_bo
->base
);
644 util_hash_table_set(mgr
->bo_vas
, (void*)(uintptr_t)bo
->va
, bo
);
645 pipe_mutex_unlock(mgr
->bo_handles_mutex
);
648 if (rdesc
->initial_domains
& RADEON_DOMAIN_VRAM
)
649 rws
->allocated_vram
+= align(size
, 4096);
650 else if (rdesc
->initial_domains
& RADEON_DOMAIN_GTT
)
651 rws
->allocated_gtt
+= align(size
, 4096);
656 static void radeon_bomgr_flush(struct pb_manager
*mgr
)
661 /* This is for the cache bufmgr. */
662 static boolean
radeon_bomgr_is_buffer_busy(struct pb_manager
*_mgr
,
663 struct pb_buffer
*_buf
)
665 struct radeon_bo
*bo
= radeon_bo(_buf
);
667 if (radeon_bo_is_referenced_by_any_cs(bo
)) {
671 if (radeon_bo_is_busy((struct pb_buffer
*)bo
, RADEON_USAGE_READWRITE
)) {
678 static void radeon_bomgr_destroy(struct pb_manager
*_mgr
)
680 struct radeon_bomgr
*mgr
= radeon_bomgr(_mgr
);
681 util_hash_table_destroy(mgr
->bo_names
);
682 util_hash_table_destroy(mgr
->bo_handles
);
683 util_hash_table_destroy(mgr
->bo_vas
);
684 pipe_mutex_destroy(mgr
->bo_handles_mutex
);
685 pipe_mutex_destroy(mgr
->bo_va_mutex
);
689 #define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
691 static unsigned handle_hash(void *key
)
693 return PTR_TO_UINT(key
);
696 static int handle_compare(void *key1
, void *key2
)
698 return PTR_TO_UINT(key1
) != PTR_TO_UINT(key2
);
701 struct pb_manager
*radeon_bomgr_create(struct radeon_drm_winsys
*rws
)
703 struct radeon_bomgr
*mgr
;
705 mgr
= CALLOC_STRUCT(radeon_bomgr
);
709 mgr
->base
.destroy
= radeon_bomgr_destroy
;
710 mgr
->base
.create_buffer
= radeon_bomgr_create_bo
;
711 mgr
->base
.flush
= radeon_bomgr_flush
;
712 mgr
->base
.is_buffer_busy
= radeon_bomgr_is_buffer_busy
;
715 mgr
->bo_names
= util_hash_table_create(handle_hash
, handle_compare
);
716 mgr
->bo_handles
= util_hash_table_create(handle_hash
, handle_compare
);
717 mgr
->bo_vas
= util_hash_table_create(handle_hash
, handle_compare
);
718 pipe_mutex_init(mgr
->bo_handles_mutex
);
719 pipe_mutex_init(mgr
->bo_va_mutex
);
721 mgr
->va
= rws
->info
.r600_virtual_address
;
722 mgr
->va_offset
= rws
->info
.r600_va_start
;
723 list_inithead(&mgr
->va_holes
);
728 static unsigned eg_tile_split(unsigned tile_split
)
730 switch (tile_split
) {
731 case 0: tile_split
= 64; break;
732 case 1: tile_split
= 128; break;
733 case 2: tile_split
= 256; break;
734 case 3: tile_split
= 512; break;
736 case 4: tile_split
= 1024; break;
737 case 5: tile_split
= 2048; break;
738 case 6: tile_split
= 4096; break;
743 static unsigned eg_tile_split_rev(unsigned eg_tile_split
)
745 switch (eg_tile_split
) {
757 static void radeon_bo_get_tiling(struct pb_buffer
*_buf
,
758 enum radeon_bo_layout
*microtiled
,
759 enum radeon_bo_layout
*macrotiled
,
760 unsigned *bankw
, unsigned *bankh
,
761 unsigned *tile_split
,
762 unsigned *stencil_tile_split
,
766 struct radeon_bo
*bo
= get_radeon_bo(_buf
);
767 struct drm_radeon_gem_set_tiling args
;
769 memset(&args
, 0, sizeof(args
));
771 args
.handle
= bo
->handle
;
773 drmCommandWriteRead(bo
->rws
->fd
,
774 DRM_RADEON_GEM_GET_TILING
,
778 *microtiled
= RADEON_LAYOUT_LINEAR
;
779 *macrotiled
= RADEON_LAYOUT_LINEAR
;
780 if (args
.tiling_flags
& RADEON_BO_FLAGS_MICRO_TILE
)
781 *microtiled
= RADEON_LAYOUT_TILED
;
782 else if (args
.tiling_flags
& RADEON_TILING_MICRO_SQUARE
)
783 *microtiled
= RADEON_LAYOUT_SQUARETILED
;
785 if (args
.tiling_flags
& RADEON_BO_FLAGS_MACRO_TILE
)
786 *macrotiled
= RADEON_LAYOUT_TILED
;
787 if (bankw
&& tile_split
&& stencil_tile_split
&& mtilea
&& tile_split
) {
788 *bankw
= (args
.tiling_flags
>> RADEON_TILING_EG_BANKW_SHIFT
) & RADEON_TILING_EG_BANKW_MASK
;
789 *bankh
= (args
.tiling_flags
>> RADEON_TILING_EG_BANKH_SHIFT
) & RADEON_TILING_EG_BANKH_MASK
;
790 *tile_split
= (args
.tiling_flags
>> RADEON_TILING_EG_TILE_SPLIT_SHIFT
) & RADEON_TILING_EG_TILE_SPLIT_MASK
;
791 *stencil_tile_split
= (args
.tiling_flags
>> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT
) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK
;
792 *mtilea
= (args
.tiling_flags
>> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT
) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK
;
793 *tile_split
= eg_tile_split(*tile_split
);
796 *scanout
= bo
->rws
->gen
>= DRV_SI
&& !(args
.tiling_flags
& RADEON_TILING_R600_NO_SCANOUT
);
799 static void radeon_bo_set_tiling(struct pb_buffer
*_buf
,
800 struct radeon_winsys_cs
*rcs
,
801 enum radeon_bo_layout microtiled
,
802 enum radeon_bo_layout macrotiled
,
803 unsigned bankw
, unsigned bankh
,
805 unsigned stencil_tile_split
,
810 struct radeon_bo
*bo
= get_radeon_bo(_buf
);
811 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
812 struct drm_radeon_gem_set_tiling args
;
814 memset(&args
, 0, sizeof(args
));
816 /* Tiling determines how DRM treats the buffer data.
817 * We must flush CS when changing it if the buffer is referenced. */
818 if (cs
&& radeon_bo_is_referenced_by_cs(cs
, bo
)) {
819 cs
->flush_cs(cs
->flush_data
, 0);
822 while (p_atomic_read(&bo
->num_active_ioctls
)) {
826 if (microtiled
== RADEON_LAYOUT_TILED
)
827 args
.tiling_flags
|= RADEON_BO_FLAGS_MICRO_TILE
;
828 else if (microtiled
== RADEON_LAYOUT_SQUARETILED
)
829 args
.tiling_flags
|= RADEON_BO_FLAGS_MICRO_TILE_SQUARE
;
831 if (macrotiled
== RADEON_LAYOUT_TILED
)
832 args
.tiling_flags
|= RADEON_BO_FLAGS_MACRO_TILE
;
834 args
.tiling_flags
|= (bankw
& RADEON_TILING_EG_BANKW_MASK
) <<
835 RADEON_TILING_EG_BANKW_SHIFT
;
836 args
.tiling_flags
|= (bankh
& RADEON_TILING_EG_BANKH_MASK
) <<
837 RADEON_TILING_EG_BANKH_SHIFT
;
839 args
.tiling_flags
|= (eg_tile_split_rev(tile_split
) &
840 RADEON_TILING_EG_TILE_SPLIT_MASK
) <<
841 RADEON_TILING_EG_TILE_SPLIT_SHIFT
;
843 args
.tiling_flags
|= (stencil_tile_split
&
844 RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK
) <<
845 RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT
;
846 args
.tiling_flags
|= (mtilea
& RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK
) <<
847 RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT
;
849 if (bo
->rws
->gen
>= DRV_SI
&& !scanout
)
850 args
.tiling_flags
|= RADEON_TILING_R600_NO_SCANOUT
;
852 args
.handle
= bo
->handle
;
855 drmCommandWriteRead(bo
->rws
->fd
,
856 DRM_RADEON_GEM_SET_TILING
,
861 static struct radeon_winsys_cs_handle
*radeon_drm_get_cs_handle(struct pb_buffer
*_buf
)
863 /* return radeon_bo. */
864 return (struct radeon_winsys_cs_handle
*)get_radeon_bo(_buf
);
867 static struct pb_buffer
*
868 radeon_winsys_bo_create(struct radeon_winsys
*rws
,
871 boolean use_reusable_pool
,
872 enum radeon_bo_domain domain
)
874 struct radeon_drm_winsys
*ws
= radeon_drm_winsys(rws
);
875 struct radeon_bomgr
*mgr
= radeon_bomgr(ws
->kman
);
876 struct radeon_bo_desc desc
;
877 struct pb_manager
*provider
;
878 struct pb_buffer
*buffer
;
880 memset(&desc
, 0, sizeof(desc
));
881 desc
.base
.alignment
= alignment
;
883 /* Additional criteria for the cache manager. */
884 desc
.base
.usage
= domain
;
885 desc
.initial_domains
= domain
;
887 /* Assign a buffer manager. */
888 if (use_reusable_pool
)
893 buffer
= provider
->create_buffer(provider
, size
, &desc
.base
);
897 pipe_mutex_lock(mgr
->bo_handles_mutex
);
898 util_hash_table_set(mgr
->bo_handles
, (void*)(uintptr_t)get_radeon_bo(buffer
)->handle
, buffer
);
899 pipe_mutex_unlock(mgr
->bo_handles_mutex
);
901 return (struct pb_buffer
*)buffer
;
904 static struct pb_buffer
*radeon_winsys_bo_from_handle(struct radeon_winsys
*rws
,
905 struct winsys_handle
*whandle
,
908 struct radeon_drm_winsys
*ws
= radeon_drm_winsys(rws
);
909 struct radeon_bo
*bo
;
910 struct radeon_bomgr
*mgr
= radeon_bomgr(ws
->kman
);
915 /* We must maintain a list of pairs <handle, bo>, so that we always return
916 * the same BO for one particular handle. If we didn't do that and created
917 * more than one BO for the same handle and then relocated them in a CS,
918 * we would hit a deadlock in the kernel.
920 * The list of pairs is guarded by a mutex, of course. */
921 pipe_mutex_lock(mgr
->bo_handles_mutex
);
923 if (whandle
->type
== DRM_API_HANDLE_TYPE_SHARED
) {
924 /* First check if there already is an existing bo for the handle. */
925 bo
= util_hash_table_get(mgr
->bo_names
, (void*)(uintptr_t)whandle
->handle
);
926 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_FD
) {
927 /* We must first get the GEM handle, as fds are unreliable keys */
928 r
= drmPrimeFDToHandle(ws
->fd
, whandle
->handle
, &handle
);
931 bo
= util_hash_table_get(mgr
->bo_handles
, (void*)(uintptr_t)handle
);
933 /* Unknown handle type */
938 /* Increase the refcount. */
939 struct pb_buffer
*b
= NULL
;
940 pb_reference(&b
, &bo
->base
);
944 /* There isn't, create a new one. */
945 bo
= CALLOC_STRUCT(radeon_bo
);
950 if (whandle
->type
== DRM_API_HANDLE_TYPE_SHARED
) {
951 struct drm_gem_open open_arg
= {};
952 memset(&open_arg
, 0, sizeof(open_arg
));
954 open_arg
.name
= whandle
->handle
;
955 if (drmIoctl(ws
->fd
, DRM_IOCTL_GEM_OPEN
, &open_arg
)) {
959 handle
= open_arg
.handle
;
960 size
= open_arg
.size
;
961 bo
->name
= whandle
->handle
;
962 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_FD
) {
963 size
= lseek(whandle
->handle
, 0, SEEK_END
);
965 * Could check errno to determine whether the kernel is new enough, but
966 * it doesn't really matter why this failed, just that it failed.
968 if (size
== (off_t
)-1) {
972 lseek(whandle
->handle
, 0, SEEK_SET
);
978 pipe_reference_init(&bo
->base
.reference
, 1);
979 bo
->base
.alignment
= 0;
980 bo
->base
.usage
= PB_USAGE_GPU_WRITE
| PB_USAGE_GPU_READ
;
981 bo
->base
.size
= (unsigned) size
;
982 bo
->base
.vtbl
= &radeon_bo_vtbl
;
986 pipe_mutex_init(bo
->map_mutex
);
989 util_hash_table_set(mgr
->bo_names
, (void*)(uintptr_t)bo
->name
, bo
);
991 util_hash_table_set(mgr
->bo_handles
, (void*)(uintptr_t)bo
->handle
, bo
);
994 pipe_mutex_unlock(mgr
->bo_handles_mutex
);
997 *stride
= whandle
->stride
;
999 if (mgr
->va
&& !bo
->va
) {
1000 struct drm_radeon_gem_va va
;
1002 bo
->va
= radeon_bomgr_find_va(mgr
, bo
->base
.size
, 1 << 20);
1004 va
.handle
= bo
->handle
;
1005 va
.operation
= RADEON_VA_MAP
;
1008 va
.flags
= RADEON_VM_PAGE_READABLE
|
1009 RADEON_VM_PAGE_WRITEABLE
|
1010 RADEON_VM_PAGE_SNOOPED
;
1012 r
= drmCommandWriteRead(ws
->fd
, DRM_RADEON_GEM_VA
, &va
, sizeof(va
));
1013 if (r
&& va
.operation
== RADEON_VA_RESULT_ERROR
) {
1014 fprintf(stderr
, "radeon: Failed to assign virtual address space\n");
1015 radeon_bo_destroy(&bo
->base
);
1018 pipe_mutex_lock(mgr
->bo_handles_mutex
);
1019 if (va
.operation
== RADEON_VA_RESULT_VA_EXIST
) {
1020 struct pb_buffer
*b
= &bo
->base
;
1021 struct radeon_bo
*old_bo
=
1022 util_hash_table_get(mgr
->bo_vas
, (void*)(uintptr_t)va
.offset
);
1024 pipe_mutex_unlock(mgr
->bo_handles_mutex
);
1025 pb_reference(&b
, &old_bo
->base
);
1029 util_hash_table_set(mgr
->bo_vas
, (void*)(uintptr_t)bo
->va
, bo
);
1030 pipe_mutex_unlock(mgr
->bo_handles_mutex
);
1033 bo
->initial_domain
= radeon_bo_get_initial_domain((void*)bo
);
1035 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
1036 ws
->allocated_vram
+= align(bo
->base
.size
, 4096);
1037 else if (bo
->initial_domain
& RADEON_DOMAIN_GTT
)
1038 ws
->allocated_gtt
+= align(bo
->base
.size
, 4096);
1040 return (struct pb_buffer
*)bo
;
1043 pipe_mutex_unlock(mgr
->bo_handles_mutex
);
1047 static boolean
radeon_winsys_bo_get_handle(struct pb_buffer
*buffer
,
1049 struct winsys_handle
*whandle
)
1051 struct drm_gem_flink flink
;
1052 struct radeon_bo
*bo
= get_radeon_bo(buffer
);
1054 memset(&flink
, 0, sizeof(flink
));
1056 if (whandle
->type
== DRM_API_HANDLE_TYPE_SHARED
) {
1058 flink
.handle
= bo
->handle
;
1060 if (ioctl(bo
->rws
->fd
, DRM_IOCTL_GEM_FLINK
, &flink
)) {
1065 bo
->flink
= flink
.name
;
1067 pipe_mutex_lock(bo
->mgr
->bo_handles_mutex
);
1068 util_hash_table_set(bo
->mgr
->bo_names
, (void*)(uintptr_t)bo
->flink
, bo
);
1069 pipe_mutex_unlock(bo
->mgr
->bo_handles_mutex
);
1071 whandle
->handle
= bo
->flink
;
1072 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_KMS
) {
1073 whandle
->handle
= bo
->handle
;
1074 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_FD
) {
1075 if (drmPrimeHandleToFD(bo
->rws
->fd
, bo
->handle
, DRM_CLOEXEC
, (int*)&whandle
->handle
))
1079 whandle
->stride
= stride
;
1083 static uint64_t radeon_winsys_bo_va(struct radeon_winsys_cs_handle
*buf
)
1085 return ((struct radeon_bo
*)buf
)->va
;
1088 void radeon_bomgr_init_functions(struct radeon_drm_winsys
*ws
)
1090 ws
->base
.buffer_get_cs_handle
= radeon_drm_get_cs_handle
;
1091 ws
->base
.buffer_set_tiling
= radeon_bo_set_tiling
;
1092 ws
->base
.buffer_get_tiling
= radeon_bo_get_tiling
;
1093 ws
->base
.buffer_map
= radeon_bo_map
;
1094 ws
->base
.buffer_unmap
= radeon_bo_unmap
;
1095 ws
->base
.buffer_wait
= radeon_bo_wait
;
1096 ws
->base
.buffer_is_busy
= radeon_bo_is_busy
;
1097 ws
->base
.buffer_create
= radeon_winsys_bo_create
;
1098 ws
->base
.buffer_from_handle
= radeon_winsys_bo_from_handle
;
1099 ws
->base
.buffer_get_handle
= radeon_winsys_bo_get_handle
;
1100 ws
->base
.buffer_get_virtual_address
= radeon_winsys_bo_va
;
1101 ws
->base
.buffer_get_initial_domain
= radeon_bo_get_initial_domain
;