2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
27 #define _FILE_OFFSET_BITS 64
28 #include "radeon_drm_cs.h"
30 #include "util/u_hash_table.h"
31 #include "util/u_memory.h"
32 #include "util/u_simple_list.h"
33 #include "util/u_double_list.h"
34 #include "os/os_thread.h"
35 #include "os/os_mman.h"
36 #include "os/os_time.h"
38 #include "state_tracker/drm_driver.h"
40 #include <sys/ioctl.h>
45 * this are copy from radeon_drm, once an updated libdrm is released
46 * we should bump configure.ac requirement for it and remove the following
49 #define RADEON_BO_FLAGS_MACRO_TILE 1
50 #define RADEON_BO_FLAGS_MICRO_TILE 2
51 #define RADEON_BO_FLAGS_MICRO_TILE_SQUARE 0x20
53 #ifndef DRM_RADEON_GEM_WAIT
54 #define DRM_RADEON_GEM_WAIT 0x2b
56 #define RADEON_GEM_NO_WAIT 0x1
57 #define RADEON_GEM_USAGE_READ 0x2
58 #define RADEON_GEM_USAGE_WRITE 0x4
60 struct drm_radeon_gem_wait
{
62 uint32_t flags
; /* one of RADEON_GEM_* */
69 #define RADEON_VA_MAP 1
70 #define RADEON_VA_UNMAP 2
72 #define RADEON_VA_RESULT_OK 0
73 #define RADEON_VA_RESULT_ERROR 1
74 #define RADEON_VA_RESULT_VA_EXIST 2
76 #define RADEON_VM_PAGE_VALID (1 << 0)
77 #define RADEON_VM_PAGE_READABLE (1 << 1)
78 #define RADEON_VM_PAGE_WRITEABLE (1 << 2)
79 #define RADEON_VM_PAGE_SYSTEM (1 << 3)
80 #define RADEON_VM_PAGE_SNOOPED (1 << 4)
82 struct drm_radeon_gem_va
{
90 #define DRM_RADEON_GEM_VA 0x2b
95 extern const struct pb_vtbl radeon_bo_vtbl
;
98 static INLINE
struct radeon_bo
*radeon_bo(struct pb_buffer
*bo
)
100 assert(bo
->vtbl
== &radeon_bo_vtbl
);
101 return (struct radeon_bo
*)bo
;
104 struct radeon_bo_va_hole
{
105 struct list_head list
;
110 struct radeon_bomgr
{
112 struct pb_manager base
;
115 struct radeon_drm_winsys
*rws
;
117 /* List of buffer handles and its mutex. */
118 struct util_hash_table
*bo_handles
;
119 pipe_mutex bo_handles_mutex
;
120 pipe_mutex bo_va_mutex
;
122 /* is virtual address supported */
125 struct list_head va_holes
;
128 static INLINE
struct radeon_bomgr
*radeon_bomgr(struct pb_manager
*mgr
)
130 return (struct radeon_bomgr
*)mgr
;
133 static struct radeon_bo
*get_radeon_bo(struct pb_buffer
*_buf
)
135 struct radeon_bo
*bo
= NULL
;
137 if (_buf
->vtbl
== &radeon_bo_vtbl
) {
138 bo
= radeon_bo(_buf
);
140 struct pb_buffer
*base_buf
;
142 pb_get_base_buffer(_buf
, &base_buf
, &offset
);
144 if (base_buf
->vtbl
== &radeon_bo_vtbl
)
145 bo
= radeon_bo(base_buf
);
151 static void radeon_bo_wait(struct pb_buffer
*_buf
, enum radeon_bo_usage usage
)
153 struct radeon_bo
*bo
= get_radeon_bo(_buf
);
155 while (p_atomic_read(&bo
->num_active_ioctls
)) {
159 /* XXX use this when it's ready */
160 /*if (bo->rws->info.drm_minor >= 12) {
161 struct drm_radeon_gem_wait args = {};
162 args.handle = bo->handle;
164 while (drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT,
165 &args, sizeof(args)) == -EBUSY);
167 struct drm_radeon_gem_wait_idle args
;
168 memset(&args
, 0, sizeof(args
));
169 args
.handle
= bo
->handle
;
170 while (drmCommandWriteRead(bo
->rws
->fd
, DRM_RADEON_GEM_WAIT_IDLE
,
171 &args
, sizeof(args
)) == -EBUSY
);
175 static boolean
radeon_bo_is_busy(struct pb_buffer
*_buf
,
176 enum radeon_bo_usage usage
)
178 struct radeon_bo
*bo
= get_radeon_bo(_buf
);
180 if (p_atomic_read(&bo
->num_active_ioctls
)) {
184 /* XXX use this when it's ready */
185 /*if (bo->rws->info.drm_minor >= 12) {
186 struct drm_radeon_gem_wait args = {};
187 args.handle = bo->handle;
188 args.flags = usage | RADEON_GEM_NO_WAIT;
189 return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT,
190 &args, sizeof(args)) != 0;
192 struct drm_radeon_gem_busy args
;
193 memset(&args
, 0, sizeof(args
));
194 args
.handle
= bo
->handle
;
195 return drmCommandWriteRead(bo
->rws
->fd
, DRM_RADEON_GEM_BUSY
,
196 &args
, sizeof(args
)) != 0;
200 static uint64_t radeon_bomgr_find_va(struct radeon_bomgr
*mgr
, uint64_t size
, uint64_t alignment
)
202 struct radeon_bo_va_hole
*hole
, *n
;
203 uint64_t offset
= 0, waste
= 0;
205 pipe_mutex_lock(mgr
->bo_va_mutex
);
206 /* first look for a hole */
207 LIST_FOR_EACH_ENTRY_SAFE(hole
, n
, &mgr
->va_holes
, list
) {
208 offset
= hole
->offset
;
211 waste
= offset
% alignment
;
212 waste
= waste
? alignment
- waste
: 0;
215 if (offset
>= (hole
->offset
+ hole
->size
)) {
218 if (!waste
&& hole
->size
== size
) {
219 offset
= hole
->offset
;
220 list_del(&hole
->list
);
222 pipe_mutex_unlock(mgr
->bo_va_mutex
);
225 if ((hole
->size
- waste
) > size
) {
227 n
= CALLOC_STRUCT(radeon_bo_va_hole
);
229 n
->offset
= hole
->offset
;
230 list_add(&n
->list
, &hole
->list
);
232 hole
->size
-= (size
+ waste
);
233 hole
->offset
+= size
+ waste
;
234 pipe_mutex_unlock(mgr
->bo_va_mutex
);
237 if ((hole
->size
- waste
) == size
) {
239 pipe_mutex_unlock(mgr
->bo_va_mutex
);
244 offset
= mgr
->va_offset
;
247 waste
= offset
% alignment
;
248 waste
= waste
? alignment
- waste
: 0;
251 n
= CALLOC_STRUCT(radeon_bo_va_hole
);
254 list_add(&n
->list
, &mgr
->va_holes
);
257 mgr
->va_offset
+= size
+ waste
;
258 pipe_mutex_unlock(mgr
->bo_va_mutex
);
262 static void radeon_bomgr_force_va(struct radeon_bomgr
*mgr
, uint64_t va
, uint64_t size
)
264 pipe_mutex_lock(mgr
->bo_va_mutex
);
265 if (va
>= mgr
->va_offset
) {
266 if (va
> mgr
->va_offset
) {
267 struct radeon_bo_va_hole
*hole
;
268 hole
= CALLOC_STRUCT(radeon_bo_va_hole
);
270 hole
->size
= va
- mgr
->va_offset
;
271 hole
->offset
= mgr
->va_offset
;
272 list_add(&hole
->list
, &mgr
->va_holes
);
275 mgr
->va_offset
= va
+ size
;
277 struct radeon_bo_va_hole
*hole
, *n
;
278 uint64_t hole_end
, va_end
;
280 /* Prune/free all holes that fall into the range
282 LIST_FOR_EACH_ENTRY_SAFE(hole
, n
, &mgr
->va_holes
, list
) {
283 hole_end
= hole
->offset
+ hole
->size
;
285 if (hole
->offset
>= va_end
|| hole_end
<= va
)
287 if (hole
->offset
>= va
&& hole_end
<= va_end
) {
288 list_del(&hole
->list
);
292 if (hole
->offset
>= va
)
293 hole
->offset
= va_end
;
296 hole
->size
= hole_end
- hole
->offset
;
299 pipe_mutex_unlock(mgr
->bo_va_mutex
);
302 static void radeon_bomgr_free_va(struct radeon_bomgr
*mgr
, uint64_t va
, uint64_t size
)
304 struct radeon_bo_va_hole
*hole
;
306 pipe_mutex_lock(mgr
->bo_va_mutex
);
307 if ((va
+ size
) == mgr
->va_offset
) {
309 /* Delete uppermost hole if it reaches the new top */
310 if (!LIST_IS_EMPTY(&mgr
->va_holes
)) {
311 hole
= container_of(mgr
->va_holes
.next
, hole
, list
);
312 if ((hole
->offset
+ hole
->size
) == va
) {
313 mgr
->va_offset
= hole
->offset
;
314 list_del(&hole
->list
);
319 struct radeon_bo_va_hole
*next
;
321 hole
= container_of(&mgr
->va_holes
, hole
, list
);
322 LIST_FOR_EACH_ENTRY(next
, &mgr
->va_holes
, list
) {
323 if (next
->offset
< va
)
328 if (&hole
->list
!= &mgr
->va_holes
) {
329 /* Grow upper hole if it's adjacent */
330 if (hole
->offset
== (va
+ size
)) {
333 /* Merge lower hole if it's adjacent */
334 if (next
!= hole
&& &next
->list
!= &mgr
->va_holes
&&
335 (next
->offset
+ next
->size
) == va
) {
336 next
->size
+= hole
->size
;
337 list_del(&hole
->list
);
344 /* Grow lower hole if it's adjacent */
345 if (next
!= hole
&& &next
->list
!= &mgr
->va_holes
&&
346 (next
->offset
+ next
->size
) == va
) {
351 /* FIXME on allocation failure we just lose virtual address space
352 * maybe print a warning
354 next
= CALLOC_STRUCT(radeon_bo_va_hole
);
358 list_add(&next
->list
, &hole
->list
);
362 pipe_mutex_unlock(mgr
->bo_va_mutex
);
365 static void radeon_bo_destroy(struct pb_buffer
*_buf
)
367 struct radeon_bo
*bo
= radeon_bo(_buf
);
368 struct radeon_bomgr
*mgr
= bo
->mgr
;
369 struct drm_gem_close args
;
371 memset(&args
, 0, sizeof(args
));
374 pipe_mutex_lock(bo
->mgr
->bo_handles_mutex
);
375 util_hash_table_remove(bo
->mgr
->bo_handles
,
376 (void*)(uintptr_t)bo
->name
);
377 pipe_mutex_unlock(bo
->mgr
->bo_handles_mutex
);
381 os_munmap(bo
->ptr
, bo
->base
.size
);
384 args
.handle
= bo
->handle
;
385 drmIoctl(bo
->rws
->fd
, DRM_IOCTL_GEM_CLOSE
, &args
);
388 radeon_bomgr_free_va(mgr
, bo
->va
, bo
->va_size
);
391 pipe_mutex_destroy(bo
->map_mutex
);
393 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
394 bo
->rws
->allocated_vram
-= align(bo
->base
.size
, 4096);
395 else if (bo
->initial_domain
& RADEON_DOMAIN_GTT
)
396 bo
->rws
->allocated_gtt
-= align(bo
->base
.size
, 4096);
400 void *radeon_bo_do_map(struct radeon_bo
*bo
)
402 struct drm_radeon_gem_mmap args
= {0};
405 /* Return the pointer if it's already mapped. */
409 /* Map the buffer. */
410 pipe_mutex_lock(bo
->map_mutex
);
411 /* Return the pointer if it's already mapped (in case of a race). */
413 pipe_mutex_unlock(bo
->map_mutex
);
416 args
.handle
= bo
->handle
;
418 args
.size
= (uint64_t)bo
->base
.size
;
419 if (drmCommandWriteRead(bo
->rws
->fd
,
423 pipe_mutex_unlock(bo
->map_mutex
);
424 fprintf(stderr
, "radeon: gem_mmap failed: %p 0x%08X\n",
429 ptr
= os_mmap(0, args
.size
, PROT_READ
|PROT_WRITE
, MAP_SHARED
,
430 bo
->rws
->fd
, args
.addr_ptr
);
431 if (ptr
== MAP_FAILED
) {
432 pipe_mutex_unlock(bo
->map_mutex
);
433 fprintf(stderr
, "radeon: mmap failed, errno: %i\n", errno
);
437 pipe_mutex_unlock(bo
->map_mutex
);
442 static void *radeon_bo_map(struct radeon_winsys_cs_handle
*buf
,
443 struct radeon_winsys_cs
*rcs
,
444 enum pipe_transfer_usage usage
)
446 struct radeon_bo
*bo
= (struct radeon_bo
*)buf
;
447 struct radeon_drm_cs
*cs
= (struct radeon_drm_cs
*)rcs
;
449 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
450 if (!(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
451 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
452 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
453 if (!(usage
& PIPE_TRANSFER_WRITE
)) {
456 * Since we are mapping for read, we don't need to wait
457 * if the GPU is using the buffer for read too
458 * (neither one is changing it).
460 * Only check whether the buffer is being used for write. */
461 if (radeon_bo_is_referenced_by_cs_for_write(cs
, bo
)) {
462 cs
->flush_cs(cs
->flush_data
, RADEON_FLUSH_ASYNC
);
466 if (radeon_bo_is_busy((struct pb_buffer
*)bo
,
467 RADEON_USAGE_WRITE
)) {
471 if (radeon_bo_is_referenced_by_cs(cs
, bo
)) {
472 cs
->flush_cs(cs
->flush_data
, RADEON_FLUSH_ASYNC
);
476 if (radeon_bo_is_busy((struct pb_buffer
*)bo
,
477 RADEON_USAGE_READWRITE
)) {
482 uint64_t time
= os_time_get_nano();
484 if (!(usage
& PIPE_TRANSFER_WRITE
)) {
487 * Since we are mapping for read, we don't need to wait
488 * if the GPU is using the buffer for read too
489 * (neither one is changing it).
491 * Only check whether the buffer is being used for write. */
492 if (radeon_bo_is_referenced_by_cs_for_write(cs
, bo
)) {
493 cs
->flush_cs(cs
->flush_data
, 0);
495 radeon_bo_wait((struct pb_buffer
*)bo
,
498 /* Mapping for write. */
499 if (radeon_bo_is_referenced_by_cs(cs
, bo
)) {
500 cs
->flush_cs(cs
->flush_data
, 0);
502 /* Try to avoid busy-waiting in radeon_bo_wait. */
503 if (p_atomic_read(&bo
->num_active_ioctls
))
504 radeon_drm_cs_sync_flush(rcs
);
507 radeon_bo_wait((struct pb_buffer
*)bo
, RADEON_USAGE_READWRITE
);
510 bo
->mgr
->rws
->buffer_wait_time
+= os_time_get_nano() - time
;
514 return radeon_bo_do_map(bo
);
517 static void radeon_bo_unmap(struct radeon_winsys_cs_handle
*_buf
)
522 static void radeon_bo_get_base_buffer(struct pb_buffer
*buf
,
523 struct pb_buffer
**base_buf
,
530 static enum pipe_error
radeon_bo_validate(struct pb_buffer
*_buf
,
531 struct pb_validate
*vl
,
538 static void radeon_bo_fence(struct pb_buffer
*buf
,
539 struct pipe_fence_handle
*fence
)
543 const struct pb_vtbl radeon_bo_vtbl
= {
545 NULL
, /* never called */
546 NULL
, /* never called */
549 radeon_bo_get_base_buffer
,
552 static struct pb_buffer
*radeon_bomgr_create_bo(struct pb_manager
*_mgr
,
554 const struct pb_desc
*desc
)
556 struct radeon_bomgr
*mgr
= radeon_bomgr(_mgr
);
557 struct radeon_drm_winsys
*rws
= mgr
->rws
;
558 struct radeon_bo
*bo
;
559 struct drm_radeon_gem_create args
;
560 struct radeon_bo_desc
*rdesc
= (struct radeon_bo_desc
*)desc
;
563 memset(&args
, 0, sizeof(args
));
565 assert(rdesc
->initial_domains
);
566 assert((rdesc
->initial_domains
&
567 ~(RADEON_GEM_DOMAIN_GTT
| RADEON_GEM_DOMAIN_VRAM
)) == 0);
570 args
.alignment
= desc
->alignment
;
571 args
.initial_domain
= rdesc
->initial_domains
;
573 if (drmCommandWriteRead(rws
->fd
, DRM_RADEON_GEM_CREATE
,
574 &args
, sizeof(args
))) {
575 fprintf(stderr
, "radeon: Failed to allocate a buffer:\n");
576 fprintf(stderr
, "radeon: size : %d bytes\n", size
);
577 fprintf(stderr
, "radeon: alignment : %d bytes\n", desc
->alignment
);
578 fprintf(stderr
, "radeon: domains : %d\n", args
.initial_domain
);
582 bo
= CALLOC_STRUCT(radeon_bo
);
586 pipe_reference_init(&bo
->base
.reference
, 1);
587 bo
->base
.alignment
= desc
->alignment
;
588 bo
->base
.usage
= desc
->usage
;
589 bo
->base
.size
= size
;
590 bo
->base
.vtbl
= &radeon_bo_vtbl
;
593 bo
->handle
= args
.handle
;
595 bo
->initial_domain
= rdesc
->initial_domains
;
596 pipe_mutex_init(bo
->map_mutex
);
599 struct drm_radeon_gem_va va
;
601 bo
->va_size
= align(size
, 4096);
602 bo
->va
= radeon_bomgr_find_va(mgr
, bo
->va_size
, desc
->alignment
);
604 va
.handle
= bo
->handle
;
606 va
.operation
= RADEON_VA_MAP
;
607 va
.flags
= RADEON_VM_PAGE_READABLE
|
608 RADEON_VM_PAGE_WRITEABLE
|
609 RADEON_VM_PAGE_SNOOPED
;
611 r
= drmCommandWriteRead(rws
->fd
, DRM_RADEON_GEM_VA
, &va
, sizeof(va
));
612 if (r
&& va
.operation
== RADEON_VA_RESULT_ERROR
) {
613 fprintf(stderr
, "radeon: Failed to allocate virtual address for buffer:\n");
614 fprintf(stderr
, "radeon: size : %d bytes\n", size
);
615 fprintf(stderr
, "radeon: alignment : %d bytes\n", desc
->alignment
);
616 fprintf(stderr
, "radeon: domains : %d\n", args
.initial_domain
);
617 fprintf(stderr
, "radeon: va : 0x%016llx\n", (unsigned long long)bo
->va
);
618 radeon_bo_destroy(&bo
->base
);
621 if (va
.operation
== RADEON_VA_RESULT_VA_EXIST
) {
622 radeon_bomgr_free_va(mgr
, bo
->va
, bo
->va_size
);
624 radeon_bomgr_force_va(mgr
, bo
->va
, bo
->va_size
);
628 if (rdesc
->initial_domains
& RADEON_DOMAIN_VRAM
)
629 rws
->allocated_vram
+= align(size
, 4096);
630 else if (rdesc
->initial_domains
& RADEON_DOMAIN_GTT
)
631 rws
->allocated_gtt
+= align(size
, 4096);
636 static void radeon_bomgr_flush(struct pb_manager
*mgr
)
641 /* This is for the cache bufmgr. */
642 static boolean
radeon_bomgr_is_buffer_busy(struct pb_manager
*_mgr
,
643 struct pb_buffer
*_buf
)
645 struct radeon_bo
*bo
= radeon_bo(_buf
);
647 if (radeon_bo_is_referenced_by_any_cs(bo
)) {
651 if (radeon_bo_is_busy((struct pb_buffer
*)bo
, RADEON_USAGE_READWRITE
)) {
658 static void radeon_bomgr_destroy(struct pb_manager
*_mgr
)
660 struct radeon_bomgr
*mgr
= radeon_bomgr(_mgr
);
661 util_hash_table_destroy(mgr
->bo_handles
);
662 pipe_mutex_destroy(mgr
->bo_handles_mutex
);
663 pipe_mutex_destroy(mgr
->bo_va_mutex
);
667 #define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
669 static unsigned handle_hash(void *key
)
671 return PTR_TO_UINT(key
);
674 static int handle_compare(void *key1
, void *key2
)
676 return PTR_TO_UINT(key1
) != PTR_TO_UINT(key2
);
679 struct pb_manager
*radeon_bomgr_create(struct radeon_drm_winsys
*rws
)
681 struct radeon_bomgr
*mgr
;
683 mgr
= CALLOC_STRUCT(radeon_bomgr
);
687 mgr
->base
.destroy
= radeon_bomgr_destroy
;
688 mgr
->base
.create_buffer
= radeon_bomgr_create_bo
;
689 mgr
->base
.flush
= radeon_bomgr_flush
;
690 mgr
->base
.is_buffer_busy
= radeon_bomgr_is_buffer_busy
;
693 mgr
->bo_handles
= util_hash_table_create(handle_hash
, handle_compare
);
694 pipe_mutex_init(mgr
->bo_handles_mutex
);
695 pipe_mutex_init(mgr
->bo_va_mutex
);
697 mgr
->va
= rws
->info
.r600_virtual_address
;
698 mgr
->va_offset
= rws
->info
.r600_va_start
;
699 list_inithead(&mgr
->va_holes
);
704 static unsigned eg_tile_split(unsigned tile_split
)
706 switch (tile_split
) {
707 case 0: tile_split
= 64; break;
708 case 1: tile_split
= 128; break;
709 case 2: tile_split
= 256; break;
710 case 3: tile_split
= 512; break;
712 case 4: tile_split
= 1024; break;
713 case 5: tile_split
= 2048; break;
714 case 6: tile_split
= 4096; break;
719 static unsigned eg_tile_split_rev(unsigned eg_tile_split
)
721 switch (eg_tile_split
) {
733 static void radeon_bo_get_tiling(struct pb_buffer
*_buf
,
734 enum radeon_bo_layout
*microtiled
,
735 enum radeon_bo_layout
*macrotiled
,
736 unsigned *bankw
, unsigned *bankh
,
737 unsigned *tile_split
,
738 unsigned *stencil_tile_split
,
741 struct radeon_bo
*bo
= get_radeon_bo(_buf
);
742 struct drm_radeon_gem_set_tiling args
;
744 memset(&args
, 0, sizeof(args
));
746 args
.handle
= bo
->handle
;
748 drmCommandWriteRead(bo
->rws
->fd
,
749 DRM_RADEON_GEM_GET_TILING
,
753 *microtiled
= RADEON_LAYOUT_LINEAR
;
754 *macrotiled
= RADEON_LAYOUT_LINEAR
;
755 if (args
.tiling_flags
& RADEON_BO_FLAGS_MICRO_TILE
)
756 *microtiled
= RADEON_LAYOUT_TILED
;
758 if (args
.tiling_flags
& RADEON_BO_FLAGS_MACRO_TILE
)
759 *macrotiled
= RADEON_LAYOUT_TILED
;
760 if (bankw
&& tile_split
&& stencil_tile_split
&& mtilea
&& tile_split
) {
761 *bankw
= (args
.tiling_flags
>> RADEON_TILING_EG_BANKW_SHIFT
) & RADEON_TILING_EG_BANKW_MASK
;
762 *bankh
= (args
.tiling_flags
>> RADEON_TILING_EG_BANKH_SHIFT
) & RADEON_TILING_EG_BANKH_MASK
;
763 *tile_split
= (args
.tiling_flags
>> RADEON_TILING_EG_TILE_SPLIT_SHIFT
) & RADEON_TILING_EG_TILE_SPLIT_MASK
;
764 *stencil_tile_split
= (args
.tiling_flags
>> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT
) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK
;
765 *mtilea
= (args
.tiling_flags
>> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT
) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK
;
766 *tile_split
= eg_tile_split(*tile_split
);
770 static void radeon_bo_set_tiling(struct pb_buffer
*_buf
,
771 struct radeon_winsys_cs
*rcs
,
772 enum radeon_bo_layout microtiled
,
773 enum radeon_bo_layout macrotiled
,
774 unsigned bankw
, unsigned bankh
,
776 unsigned stencil_tile_split
,
780 struct radeon_bo
*bo
= get_radeon_bo(_buf
);
781 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
782 struct drm_radeon_gem_set_tiling args
;
784 memset(&args
, 0, sizeof(args
));
786 /* Tiling determines how DRM treats the buffer data.
787 * We must flush CS when changing it if the buffer is referenced. */
788 if (cs
&& radeon_bo_is_referenced_by_cs(cs
, bo
)) {
789 cs
->flush_cs(cs
->flush_data
, 0);
792 while (p_atomic_read(&bo
->num_active_ioctls
)) {
796 if (microtiled
== RADEON_LAYOUT_TILED
)
797 args
.tiling_flags
|= RADEON_BO_FLAGS_MICRO_TILE
;
798 else if (microtiled
== RADEON_LAYOUT_SQUARETILED
)
799 args
.tiling_flags
|= RADEON_BO_FLAGS_MICRO_TILE_SQUARE
;
801 if (macrotiled
== RADEON_LAYOUT_TILED
)
802 args
.tiling_flags
|= RADEON_BO_FLAGS_MACRO_TILE
;
804 args
.tiling_flags
|= (bankw
& RADEON_TILING_EG_BANKW_MASK
) <<
805 RADEON_TILING_EG_BANKW_SHIFT
;
806 args
.tiling_flags
|= (bankh
& RADEON_TILING_EG_BANKH_MASK
) <<
807 RADEON_TILING_EG_BANKH_SHIFT
;
809 args
.tiling_flags
|= (eg_tile_split_rev(tile_split
) &
810 RADEON_TILING_EG_TILE_SPLIT_MASK
) <<
811 RADEON_TILING_EG_TILE_SPLIT_SHIFT
;
813 args
.tiling_flags
|= (stencil_tile_split
&
814 RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK
) <<
815 RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT
;
816 args
.tiling_flags
|= (mtilea
& RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK
) <<
817 RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT
;
819 args
.handle
= bo
->handle
;
822 drmCommandWriteRead(bo
->rws
->fd
,
823 DRM_RADEON_GEM_SET_TILING
,
828 static struct radeon_winsys_cs_handle
*radeon_drm_get_cs_handle(struct pb_buffer
*_buf
)
830 /* return radeon_bo. */
831 return (struct radeon_winsys_cs_handle
*)get_radeon_bo(_buf
);
834 static struct pb_buffer
*
835 radeon_winsys_bo_create(struct radeon_winsys
*rws
,
838 boolean use_reusable_pool
,
839 enum radeon_bo_domain domain
)
841 struct radeon_drm_winsys
*ws
= radeon_drm_winsys(rws
);
842 struct radeon_bo_desc desc
;
843 struct pb_manager
*provider
;
844 struct pb_buffer
*buffer
;
846 memset(&desc
, 0, sizeof(desc
));
847 desc
.base
.alignment
= alignment
;
849 /* Additional criteria for the cache manager. */
850 desc
.base
.usage
= domain
;
851 desc
.initial_domains
= domain
;
853 /* Assign a buffer manager. */
854 if (use_reusable_pool
)
859 buffer
= provider
->create_buffer(provider
, size
, &desc
.base
);
863 return (struct pb_buffer
*)buffer
;
866 static struct pb_buffer
*radeon_winsys_bo_from_handle(struct radeon_winsys
*rws
,
867 struct winsys_handle
*whandle
,
870 struct radeon_drm_winsys
*ws
= radeon_drm_winsys(rws
);
871 struct radeon_bo
*bo
;
872 struct radeon_bomgr
*mgr
= radeon_bomgr(ws
->kman
);
873 struct drm_gem_open open_arg
= {};
876 memset(&open_arg
, 0, sizeof(open_arg
));
878 /* We must maintain a list of pairs <handle, bo>, so that we always return
879 * the same BO for one particular handle. If we didn't do that and created
880 * more than one BO for the same handle and then relocated them in a CS,
881 * we would hit a deadlock in the kernel.
883 * The list of pairs is guarded by a mutex, of course. */
884 pipe_mutex_lock(mgr
->bo_handles_mutex
);
886 /* First check if there already is an existing bo for the handle. */
887 bo
= util_hash_table_get(mgr
->bo_handles
, (void*)(uintptr_t)whandle
->handle
);
889 /* Increase the refcount. */
890 struct pb_buffer
*b
= NULL
;
891 pb_reference(&b
, &bo
->base
);
895 /* There isn't, create a new one. */
896 bo
= CALLOC_STRUCT(radeon_bo
);
902 open_arg
.name
= whandle
->handle
;
903 if (drmIoctl(ws
->fd
, DRM_IOCTL_GEM_OPEN
, &open_arg
)) {
907 bo
->handle
= open_arg
.handle
;
908 bo
->name
= whandle
->handle
;
911 pipe_reference_init(&bo
->base
.reference
, 1);
912 bo
->base
.alignment
= 0;
913 bo
->base
.usage
= PB_USAGE_GPU_WRITE
| PB_USAGE_GPU_READ
;
914 bo
->base
.size
= open_arg
.size
;
915 bo
->base
.vtbl
= &radeon_bo_vtbl
;
919 pipe_mutex_init(bo
->map_mutex
);
921 util_hash_table_set(mgr
->bo_handles
, (void*)(uintptr_t)whandle
->handle
, bo
);
924 pipe_mutex_unlock(mgr
->bo_handles_mutex
);
927 *stride
= whandle
->stride
;
929 if (mgr
->va
&& !bo
->va
) {
930 struct drm_radeon_gem_va va
;
932 bo
->va_size
= ((bo
->base
.size
+ 4095) & ~4095);
933 bo
->va
= radeon_bomgr_find_va(mgr
, bo
->va_size
, 1 << 20);
935 va
.handle
= bo
->handle
;
936 va
.operation
= RADEON_VA_MAP
;
939 va
.flags
= RADEON_VM_PAGE_READABLE
|
940 RADEON_VM_PAGE_WRITEABLE
|
941 RADEON_VM_PAGE_SNOOPED
;
943 r
= drmCommandWriteRead(ws
->fd
, DRM_RADEON_GEM_VA
, &va
, sizeof(va
));
944 if (r
&& va
.operation
== RADEON_VA_RESULT_ERROR
) {
945 fprintf(stderr
, "radeon: Failed to assign virtual address space\n");
946 radeon_bo_destroy(&bo
->base
);
949 if (va
.operation
== RADEON_VA_RESULT_VA_EXIST
) {
950 radeon_bomgr_free_va(mgr
, bo
->va
, bo
->va_size
);
952 radeon_bomgr_force_va(mgr
, bo
->va
, bo
->va_size
);
956 ws
->allocated_vram
+= align(open_arg
.size
, 4096);
957 bo
->initial_domain
= RADEON_DOMAIN_VRAM
;
959 return (struct pb_buffer
*)bo
;
962 pipe_mutex_unlock(mgr
->bo_handles_mutex
);
966 static boolean
radeon_winsys_bo_get_handle(struct pb_buffer
*buffer
,
968 struct winsys_handle
*whandle
)
970 struct drm_gem_flink flink
;
971 struct radeon_bo
*bo
= get_radeon_bo(buffer
);
973 memset(&flink
, 0, sizeof(flink
));
975 if (whandle
->type
== DRM_API_HANDLE_TYPE_SHARED
) {
977 flink
.handle
= bo
->handle
;
979 if (ioctl(bo
->rws
->fd
, DRM_IOCTL_GEM_FLINK
, &flink
)) {
984 bo
->flink
= flink
.name
;
986 pipe_mutex_lock(bo
->mgr
->bo_handles_mutex
);
987 util_hash_table_set(bo
->mgr
->bo_handles
, (void*)(uintptr_t)bo
->flink
, bo
);
988 pipe_mutex_unlock(bo
->mgr
->bo_handles_mutex
);
990 whandle
->handle
= bo
->flink
;
991 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_KMS
) {
992 whandle
->handle
= bo
->handle
;
995 whandle
->stride
= stride
;
999 static uint64_t radeon_winsys_bo_va(struct radeon_winsys_cs_handle
*buf
)
1001 return ((struct radeon_bo
*)buf
)->va
;
1004 void radeon_bomgr_init_functions(struct radeon_drm_winsys
*ws
)
1006 ws
->base
.buffer_get_cs_handle
= radeon_drm_get_cs_handle
;
1007 ws
->base
.buffer_set_tiling
= radeon_bo_set_tiling
;
1008 ws
->base
.buffer_get_tiling
= radeon_bo_get_tiling
;
1009 ws
->base
.buffer_map
= radeon_bo_map
;
1010 ws
->base
.buffer_unmap
= radeon_bo_unmap
;
1011 ws
->base
.buffer_wait
= radeon_bo_wait
;
1012 ws
->base
.buffer_is_busy
= radeon_bo_is_busy
;
1013 ws
->base
.buffer_create
= radeon_winsys_bo_create
;
1014 ws
->base
.buffer_from_handle
= radeon_winsys_bo_from_handle
;
1015 ws
->base
.buffer_get_handle
= radeon_winsys_bo_get_handle
;
1016 ws
->base
.buffer_get_virtual_address
= radeon_winsys_bo_va
;