2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
27 #define _FILE_OFFSET_BITS 64
28 #include "radeon_drm_cs.h"
30 #include "util/u_hash_table.h"
31 #include "util/u_memory.h"
32 #include "util/u_simple_list.h"
33 #include "util/u_double_list.h"
34 #include "os/os_thread.h"
35 #include "os/os_mman.h"
37 #include "state_tracker/drm_driver.h"
39 #include <sys/ioctl.h>
44 * this are copy from radeon_drm, once an updated libdrm is released
45 * we should bump configure.ac requirement for it and remove the following
48 #define RADEON_BO_FLAGS_MACRO_TILE 1
49 #define RADEON_BO_FLAGS_MICRO_TILE 2
50 #define RADEON_BO_FLAGS_MICRO_TILE_SQUARE 0x20
52 #ifndef DRM_RADEON_GEM_WAIT
53 #define DRM_RADEON_GEM_WAIT 0x2b
55 #define RADEON_GEM_NO_WAIT 0x1
56 #define RADEON_GEM_USAGE_READ 0x2
57 #define RADEON_GEM_USAGE_WRITE 0x4
59 struct drm_radeon_gem_wait
{
61 uint32_t flags
; /* one of RADEON_GEM_* */
68 #define RADEON_VA_MAP 1
69 #define RADEON_VA_UNMAP 2
71 #define RADEON_VA_RESULT_OK 0
72 #define RADEON_VA_RESULT_ERROR 1
73 #define RADEON_VA_RESULT_VA_EXIST 2
75 #define RADEON_VM_PAGE_VALID (1 << 0)
76 #define RADEON_VM_PAGE_READABLE (1 << 1)
77 #define RADEON_VM_PAGE_WRITEABLE (1 << 2)
78 #define RADEON_VM_PAGE_SYSTEM (1 << 3)
79 #define RADEON_VM_PAGE_SNOOPED (1 << 4)
81 struct drm_radeon_gem_va
{
89 #define DRM_RADEON_GEM_VA 0x2b
94 extern const struct pb_vtbl radeon_bo_vtbl
;
97 static INLINE
struct radeon_bo
*radeon_bo(struct pb_buffer
*bo
)
99 assert(bo
->vtbl
== &radeon_bo_vtbl
);
100 return (struct radeon_bo
*)bo
;
103 struct radeon_bo_va_hole
{
104 struct list_head list
;
109 struct radeon_bomgr
{
111 struct pb_manager base
;
114 struct radeon_drm_winsys
*rws
;
116 /* List of buffer handles and its mutex. */
117 struct util_hash_table
*bo_handles
;
118 pipe_mutex bo_handles_mutex
;
119 pipe_mutex bo_va_mutex
;
121 /* is virtual address supported */
124 struct list_head va_holes
;
127 static INLINE
struct radeon_bomgr
*radeon_bomgr(struct pb_manager
*mgr
)
129 return (struct radeon_bomgr
*)mgr
;
132 static struct radeon_bo
*get_radeon_bo(struct pb_buffer
*_buf
)
134 struct radeon_bo
*bo
= NULL
;
136 if (_buf
->vtbl
== &radeon_bo_vtbl
) {
137 bo
= radeon_bo(_buf
);
139 struct pb_buffer
*base_buf
;
141 pb_get_base_buffer(_buf
, &base_buf
, &offset
);
143 if (base_buf
->vtbl
== &radeon_bo_vtbl
)
144 bo
= radeon_bo(base_buf
);
150 static void radeon_bo_wait(struct pb_buffer
*_buf
, enum radeon_bo_usage usage
)
152 struct radeon_bo
*bo
= get_radeon_bo(_buf
);
154 while (p_atomic_read(&bo
->num_active_ioctls
)) {
158 /* XXX use this when it's ready */
159 /*if (bo->rws->info.drm_minor >= 12) {
160 struct drm_radeon_gem_wait args = {};
161 args.handle = bo->handle;
163 while (drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT,
164 &args, sizeof(args)) == -EBUSY);
166 struct drm_radeon_gem_wait_idle args
;
167 memset(&args
, 0, sizeof(args
));
168 args
.handle
= bo
->handle
;
169 while (drmCommandWriteRead(bo
->rws
->fd
, DRM_RADEON_GEM_WAIT_IDLE
,
170 &args
, sizeof(args
)) == -EBUSY
);
174 static boolean
radeon_bo_is_busy(struct pb_buffer
*_buf
,
175 enum radeon_bo_usage usage
)
177 struct radeon_bo
*bo
= get_radeon_bo(_buf
);
179 if (p_atomic_read(&bo
->num_active_ioctls
)) {
183 /* XXX use this when it's ready */
184 /*if (bo->rws->info.drm_minor >= 12) {
185 struct drm_radeon_gem_wait args = {};
186 args.handle = bo->handle;
187 args.flags = usage | RADEON_GEM_NO_WAIT;
188 return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT,
189 &args, sizeof(args)) != 0;
191 struct drm_radeon_gem_busy args
;
192 memset(&args
, 0, sizeof(args
));
193 args
.handle
= bo
->handle
;
194 return drmCommandWriteRead(bo
->rws
->fd
, DRM_RADEON_GEM_BUSY
,
195 &args
, sizeof(args
)) != 0;
199 static uint64_t radeon_bomgr_find_va(struct radeon_bomgr
*mgr
, uint64_t size
, uint64_t alignment
)
201 struct radeon_bo_va_hole
*hole
, *n
;
202 uint64_t offset
= 0, waste
= 0;
204 pipe_mutex_lock(mgr
->bo_va_mutex
);
205 /* first look for a hole */
206 LIST_FOR_EACH_ENTRY_SAFE(hole
, n
, &mgr
->va_holes
, list
) {
207 offset
= hole
->offset
;
210 waste
= offset
% alignment
;
211 waste
= waste
? alignment
- waste
: 0;
214 if (offset
>= (hole
->offset
+ hole
->size
)) {
217 if (!waste
&& hole
->size
== size
) {
218 offset
= hole
->offset
;
219 list_del(&hole
->list
);
221 pipe_mutex_unlock(mgr
->bo_va_mutex
);
224 if ((hole
->size
- waste
) >= size
) {
226 n
= CALLOC_STRUCT(radeon_bo_va_hole
);
228 n
->offset
= hole
->offset
;
229 list_add(&n
->list
, &mgr
->va_holes
);
231 hole
->size
-= (size
+ waste
);
232 hole
->offset
+= size
+ waste
;
233 pipe_mutex_unlock(mgr
->bo_va_mutex
);
238 offset
= mgr
->va_offset
;
241 waste
= offset
% alignment
;
242 waste
= waste
? alignment
- waste
: 0;
245 mgr
->va_offset
+= size
+ waste
;
246 pipe_mutex_unlock(mgr
->bo_va_mutex
);
250 static void radeon_bomgr_force_va(struct radeon_bomgr
*mgr
, uint64_t va
, uint64_t size
)
252 pipe_mutex_lock(mgr
->bo_va_mutex
);
253 if (va
>= mgr
->va_offset
) {
254 if (va
> mgr
->va_offset
) {
255 struct radeon_bo_va_hole
*hole
;
256 hole
= CALLOC_STRUCT(radeon_bo_va_hole
);
258 hole
->size
= va
- mgr
->va_offset
;
259 hole
->offset
= mgr
->va_offset
;
260 list_add(&hole
->list
, &mgr
->va_holes
);
263 mgr
->va_offset
= va
+ size
;
265 struct radeon_bo_va_hole
*hole
, *n
;
268 /* free all holes that fall into the range
269 * NOTE that we might lose virtual address space
271 LIST_FOR_EACH_ENTRY_SAFE(hole
, n
, &mgr
->va_holes
, list
) {
273 etmp
= stmp
+ hole
->size
;
274 if (va
>= stmp
&& va
< etmp
) {
275 list_del(&hole
->list
);
280 pipe_mutex_unlock(mgr
->bo_va_mutex
);
283 static void radeon_bomgr_free_va(struct radeon_bomgr
*mgr
, uint64_t va
, uint64_t size
)
285 pipe_mutex_lock(mgr
->bo_va_mutex
);
286 if ((va
+ size
) == mgr
->va_offset
) {
289 struct radeon_bo_va_hole
*hole
;
291 /* FIXME on allocation failure we just lose virtual address space
292 * maybe print a warning
294 hole
= CALLOC_STRUCT(radeon_bo_va_hole
);
298 list_add(&hole
->list
, &mgr
->va_holes
);
301 pipe_mutex_unlock(mgr
->bo_va_mutex
);
304 static void radeon_bo_destroy(struct pb_buffer
*_buf
)
306 struct radeon_bo
*bo
= radeon_bo(_buf
);
307 struct radeon_bomgr
*mgr
= bo
->mgr
;
308 struct drm_gem_close args
;
310 memset(&args
, 0, sizeof(args
));
313 pipe_mutex_lock(bo
->mgr
->bo_handles_mutex
);
314 util_hash_table_remove(bo
->mgr
->bo_handles
,
315 (void*)(uintptr_t)bo
->name
);
316 pipe_mutex_unlock(bo
->mgr
->bo_handles_mutex
);
320 os_munmap(bo
->ptr
, bo
->base
.size
);
323 radeon_bomgr_free_va(mgr
, bo
->va
, bo
->va_size
);
327 args
.handle
= bo
->handle
;
328 drmIoctl(bo
->rws
->fd
, DRM_IOCTL_GEM_CLOSE
, &args
);
329 pipe_mutex_destroy(bo
->map_mutex
);
333 static unsigned get_pb_usage_from_transfer_flags(enum pipe_transfer_usage usage
)
337 if (usage
& PIPE_TRANSFER_WRITE
)
338 res
|= PB_USAGE_CPU_WRITE
;
340 if (usage
& PIPE_TRANSFER_DONTBLOCK
)
341 res
|= PB_USAGE_DONTBLOCK
;
343 if (usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)
344 res
|= PB_USAGE_UNSYNCHRONIZED
;
349 static void *radeon_bo_map_internal(struct pb_buffer
*_buf
,
350 unsigned flags
, void *flush_ctx
)
352 struct radeon_bo
*bo
= radeon_bo(_buf
);
353 struct radeon_drm_cs
*cs
= flush_ctx
;
354 struct drm_radeon_gem_mmap args
;
357 memset(&args
, 0, sizeof(args
));
359 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
360 if (!(flags
& PB_USAGE_UNSYNCHRONIZED
)) {
361 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
362 if (flags
& PB_USAGE_DONTBLOCK
) {
363 if (!(flags
& PB_USAGE_CPU_WRITE
)) {
366 * Since we are mapping for read, we don't need to wait
367 * if the GPU is using the buffer for read too
368 * (neither one is changing it).
370 * Only check whether the buffer is being used for write. */
371 if (radeon_bo_is_referenced_by_cs_for_write(cs
, bo
)) {
372 cs
->flush_cs(cs
->flush_data
, RADEON_FLUSH_ASYNC
);
376 if (radeon_bo_is_busy((struct pb_buffer
*)bo
,
377 RADEON_USAGE_WRITE
)) {
381 if (radeon_bo_is_referenced_by_cs(cs
, bo
)) {
382 cs
->flush_cs(cs
->flush_data
, RADEON_FLUSH_ASYNC
);
386 if (radeon_bo_is_busy((struct pb_buffer
*)bo
,
387 RADEON_USAGE_READWRITE
)) {
392 if (!(flags
& PB_USAGE_CPU_WRITE
)) {
395 * Since we are mapping for read, we don't need to wait
396 * if the GPU is using the buffer for read too
397 * (neither one is changing it).
399 * Only check whether the buffer is being used for write. */
400 if (radeon_bo_is_referenced_by_cs_for_write(cs
, bo
)) {
401 cs
->flush_cs(cs
->flush_data
, 0);
403 radeon_bo_wait((struct pb_buffer
*)bo
,
406 /* Mapping for write. */
407 if (radeon_bo_is_referenced_by_cs(cs
, bo
)) {
408 cs
->flush_cs(cs
->flush_data
, 0);
410 /* Try to avoid busy-waiting in radeon_bo_wait. */
411 if (p_atomic_read(&bo
->num_active_ioctls
))
412 radeon_drm_cs_sync_flush(cs
);
415 radeon_bo_wait((struct pb_buffer
*)bo
, RADEON_USAGE_READWRITE
);
420 /* Return the pointer if it's already mapped. */
424 /* Map the buffer. */
425 pipe_mutex_lock(bo
->map_mutex
);
426 /* Return the pointer if it's already mapped (in case of a race). */
428 pipe_mutex_unlock(bo
->map_mutex
);
431 args
.handle
= bo
->handle
;
433 args
.size
= (uint64_t)bo
->base
.size
;
434 if (drmCommandWriteRead(bo
->rws
->fd
,
438 pipe_mutex_unlock(bo
->map_mutex
);
439 fprintf(stderr
, "radeon: gem_mmap failed: %p 0x%08X\n",
444 ptr
= os_mmap(0, args
.size
, PROT_READ
|PROT_WRITE
, MAP_SHARED
,
445 bo
->rws
->fd
, args
.addr_ptr
);
446 if (ptr
== MAP_FAILED
) {
447 pipe_mutex_unlock(bo
->map_mutex
);
448 fprintf(stderr
, "radeon: mmap failed, errno: %i\n", errno
);
452 pipe_mutex_unlock(bo
->map_mutex
);
457 static void radeon_bo_unmap_internal(struct pb_buffer
*_buf
)
462 static void radeon_bo_get_base_buffer(struct pb_buffer
*buf
,
463 struct pb_buffer
**base_buf
,
470 static enum pipe_error
radeon_bo_validate(struct pb_buffer
*_buf
,
471 struct pb_validate
*vl
,
478 static void radeon_bo_fence(struct pb_buffer
*buf
,
479 struct pipe_fence_handle
*fence
)
483 const struct pb_vtbl radeon_bo_vtbl
= {
485 radeon_bo_map_internal
,
486 radeon_bo_unmap_internal
,
489 radeon_bo_get_base_buffer
,
492 static struct pb_buffer
*radeon_bomgr_create_bo(struct pb_manager
*_mgr
,
494 const struct pb_desc
*desc
)
496 struct radeon_bomgr
*mgr
= radeon_bomgr(_mgr
);
497 struct radeon_drm_winsys
*rws
= mgr
->rws
;
498 struct radeon_bo
*bo
;
499 struct drm_radeon_gem_create args
;
500 struct radeon_bo_desc
*rdesc
= (struct radeon_bo_desc
*)desc
;
503 memset(&args
, 0, sizeof(args
));
505 assert(rdesc
->initial_domains
);
506 assert((rdesc
->initial_domains
&
507 ~(RADEON_GEM_DOMAIN_GTT
| RADEON_GEM_DOMAIN_VRAM
)) == 0);
510 args
.alignment
= desc
->alignment
;
511 args
.initial_domain
= rdesc
->initial_domains
;
513 if (drmCommandWriteRead(rws
->fd
, DRM_RADEON_GEM_CREATE
,
514 &args
, sizeof(args
))) {
515 fprintf(stderr
, "radeon: Failed to allocate a buffer:\n");
516 fprintf(stderr
, "radeon: size : %d bytes\n", size
);
517 fprintf(stderr
, "radeon: alignment : %d bytes\n", desc
->alignment
);
518 fprintf(stderr
, "radeon: domains : %d\n", args
.initial_domain
);
522 bo
= CALLOC_STRUCT(radeon_bo
);
526 pipe_reference_init(&bo
->base
.reference
, 1);
527 bo
->base
.alignment
= desc
->alignment
;
528 bo
->base
.usage
= desc
->usage
;
529 bo
->base
.size
= size
;
530 bo
->base
.vtbl
= &radeon_bo_vtbl
;
533 bo
->handle
= args
.handle
;
535 pipe_mutex_init(bo
->map_mutex
);
538 struct drm_radeon_gem_va va
;
540 bo
->va_size
= align(size
, 4096);
541 bo
->va
= radeon_bomgr_find_va(mgr
, bo
->va_size
, desc
->alignment
);
543 va
.handle
= bo
->handle
;
545 va
.operation
= RADEON_VA_MAP
;
546 va
.flags
= RADEON_VM_PAGE_READABLE
|
547 RADEON_VM_PAGE_WRITEABLE
|
548 RADEON_VM_PAGE_SNOOPED
;
550 r
= drmCommandWriteRead(rws
->fd
, DRM_RADEON_GEM_VA
, &va
, sizeof(va
));
551 if (r
&& va
.operation
== RADEON_VA_RESULT_ERROR
) {
552 fprintf(stderr
, "radeon: Failed to allocate a buffer:\n");
553 fprintf(stderr
, "radeon: size : %d bytes\n", size
);
554 fprintf(stderr
, "radeon: alignment : %d bytes\n", desc
->alignment
);
555 fprintf(stderr
, "radeon: domains : %d\n", args
.initial_domain
);
556 radeon_bo_destroy(&bo
->base
);
559 if (va
.operation
== RADEON_VA_RESULT_VA_EXIST
) {
560 radeon_bomgr_free_va(mgr
, bo
->va
, bo
->va_size
);
562 radeon_bomgr_force_va(mgr
, bo
->va
, bo
->va_size
);
569 static void radeon_bomgr_flush(struct pb_manager
*mgr
)
574 /* This is for the cache bufmgr. */
575 static boolean
radeon_bomgr_is_buffer_busy(struct pb_manager
*_mgr
,
576 struct pb_buffer
*_buf
)
578 struct radeon_bo
*bo
= radeon_bo(_buf
);
580 if (radeon_bo_is_referenced_by_any_cs(bo
)) {
584 if (radeon_bo_is_busy((struct pb_buffer
*)bo
, RADEON_USAGE_READWRITE
)) {
591 static void radeon_bomgr_destroy(struct pb_manager
*_mgr
)
593 struct radeon_bomgr
*mgr
= radeon_bomgr(_mgr
);
594 util_hash_table_destroy(mgr
->bo_handles
);
595 pipe_mutex_destroy(mgr
->bo_handles_mutex
);
596 pipe_mutex_destroy(mgr
->bo_va_mutex
);
600 #define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
602 static unsigned handle_hash(void *key
)
604 return PTR_TO_UINT(key
);
607 static int handle_compare(void *key1
, void *key2
)
609 return PTR_TO_UINT(key1
) != PTR_TO_UINT(key2
);
612 struct pb_manager
*radeon_bomgr_create(struct radeon_drm_winsys
*rws
)
614 struct radeon_bomgr
*mgr
;
616 mgr
= CALLOC_STRUCT(radeon_bomgr
);
620 mgr
->base
.destroy
= radeon_bomgr_destroy
;
621 mgr
->base
.create_buffer
= radeon_bomgr_create_bo
;
622 mgr
->base
.flush
= radeon_bomgr_flush
;
623 mgr
->base
.is_buffer_busy
= radeon_bomgr_is_buffer_busy
;
626 mgr
->bo_handles
= util_hash_table_create(handle_hash
, handle_compare
);
627 pipe_mutex_init(mgr
->bo_handles_mutex
);
628 pipe_mutex_init(mgr
->bo_va_mutex
);
630 mgr
->va
= rws
->info
.r600_virtual_address
;
631 mgr
->va_offset
= rws
->info
.r600_va_start
;
632 list_inithead(&mgr
->va_holes
);
637 static void *radeon_bo_map(struct pb_buffer
*buf
,
638 struct radeon_winsys_cs
*cs
,
639 enum pipe_transfer_usage usage
)
641 return pb_map(buf
, get_pb_usage_from_transfer_flags(usage
), cs
);
644 static unsigned eg_tile_split(unsigned tile_split
)
646 switch (tile_split
) {
647 case 0: tile_split
= 64; break;
648 case 1: tile_split
= 128; break;
649 case 2: tile_split
= 256; break;
650 case 3: tile_split
= 512; break;
652 case 4: tile_split
= 1024; break;
653 case 5: tile_split
= 2048; break;
654 case 6: tile_split
= 4096; break;
659 static void radeon_bo_get_tiling(struct pb_buffer
*_buf
,
660 enum radeon_bo_layout
*microtiled
,
661 enum radeon_bo_layout
*macrotiled
,
662 unsigned *bankw
, unsigned *bankh
,
663 unsigned *tile_split
,
664 unsigned *stencil_tile_split
,
667 struct radeon_bo
*bo
= get_radeon_bo(_buf
);
668 struct drm_radeon_gem_set_tiling args
;
670 memset(&args
, 0, sizeof(args
));
672 args
.handle
= bo
->handle
;
674 drmCommandWriteRead(bo
->rws
->fd
,
675 DRM_RADEON_GEM_GET_TILING
,
679 *microtiled
= RADEON_LAYOUT_LINEAR
;
680 *macrotiled
= RADEON_LAYOUT_LINEAR
;
681 if (args
.tiling_flags
& RADEON_BO_FLAGS_MICRO_TILE
)
682 *microtiled
= RADEON_LAYOUT_TILED
;
684 if (args
.tiling_flags
& RADEON_BO_FLAGS_MACRO_TILE
)
685 *macrotiled
= RADEON_LAYOUT_TILED
;
686 if (bankw
&& tile_split
&& stencil_tile_split
&& mtilea
&& tile_split
) {
687 *bankw
= (args
.tiling_flags
>> RADEON_TILING_EG_BANKW_SHIFT
) & RADEON_TILING_EG_BANKW_MASK
;
688 *bankh
= (args
.tiling_flags
>> RADEON_TILING_EG_BANKH_SHIFT
) & RADEON_TILING_EG_BANKH_MASK
;
689 *tile_split
= (args
.tiling_flags
>> RADEON_TILING_EG_TILE_SPLIT_SHIFT
) & RADEON_TILING_EG_TILE_SPLIT_MASK
;
690 *stencil_tile_split
= (args
.tiling_flags
>> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT
) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK
;
691 *mtilea
= (args
.tiling_flags
>> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT
) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK
;
692 *tile_split
= eg_tile_split(*tile_split
);
696 static void radeon_bo_set_tiling(struct pb_buffer
*_buf
,
697 struct radeon_winsys_cs
*rcs
,
698 enum radeon_bo_layout microtiled
,
699 enum radeon_bo_layout macrotiled
,
702 struct radeon_bo
*bo
= get_radeon_bo(_buf
);
703 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
704 struct drm_radeon_gem_set_tiling args
;
706 memset(&args
, 0, sizeof(args
));
708 /* Tiling determines how DRM treats the buffer data.
709 * We must flush CS when changing it if the buffer is referenced. */
710 if (cs
&& radeon_bo_is_referenced_by_cs(cs
, bo
)) {
711 cs
->flush_cs(cs
->flush_data
, 0);
714 while (p_atomic_read(&bo
->num_active_ioctls
)) {
718 if (microtiled
== RADEON_LAYOUT_TILED
)
719 args
.tiling_flags
|= RADEON_BO_FLAGS_MICRO_TILE
;
720 else if (microtiled
== RADEON_LAYOUT_SQUARETILED
)
721 args
.tiling_flags
|= RADEON_BO_FLAGS_MICRO_TILE_SQUARE
;
723 if (macrotiled
== RADEON_LAYOUT_TILED
)
724 args
.tiling_flags
|= RADEON_BO_FLAGS_MACRO_TILE
;
726 args
.handle
= bo
->handle
;
729 drmCommandWriteRead(bo
->rws
->fd
,
730 DRM_RADEON_GEM_SET_TILING
,
735 static struct radeon_winsys_cs_handle
*radeon_drm_get_cs_handle(
736 struct pb_buffer
*_buf
)
738 /* return radeon_bo. */
739 return (struct radeon_winsys_cs_handle
*)get_radeon_bo(_buf
);
742 static struct pb_buffer
*
743 radeon_winsys_bo_create(struct radeon_winsys
*rws
,
747 enum radeon_bo_domain domain
)
749 struct radeon_drm_winsys
*ws
= radeon_drm_winsys(rws
);
750 struct radeon_bo_desc desc
;
751 struct pb_manager
*provider
;
752 struct pb_buffer
*buffer
;
754 memset(&desc
, 0, sizeof(desc
));
755 desc
.base
.alignment
= alignment
;
757 /* Additional criteria for the cache manager. */
758 desc
.base
.usage
= domain
;
759 desc
.initial_domains
= domain
;
761 /* Assign a buffer manager. */
762 if (bind
& (PIPE_BIND_VERTEX_BUFFER
| PIPE_BIND_INDEX_BUFFER
|
763 PIPE_BIND_CONSTANT_BUFFER
| PIPE_BIND_CUSTOM
))
768 buffer
= provider
->create_buffer(provider
, size
, &desc
.base
);
772 return (struct pb_buffer
*)buffer
;
775 static struct pb_buffer
*radeon_winsys_bo_from_handle(struct radeon_winsys
*rws
,
776 struct winsys_handle
*whandle
,
779 struct radeon_drm_winsys
*ws
= radeon_drm_winsys(rws
);
780 struct radeon_bo
*bo
;
781 struct radeon_bomgr
*mgr
= radeon_bomgr(ws
->kman
);
782 struct drm_gem_open open_arg
= {};
785 memset(&open_arg
, 0, sizeof(open_arg
));
787 /* We must maintain a list of pairs <handle, bo>, so that we always return
788 * the same BO for one particular handle. If we didn't do that and created
789 * more than one BO for the same handle and then relocated them in a CS,
790 * we would hit a deadlock in the kernel.
792 * The list of pairs is guarded by a mutex, of course. */
793 pipe_mutex_lock(mgr
->bo_handles_mutex
);
795 /* First check if there already is an existing bo for the handle. */
796 bo
= util_hash_table_get(mgr
->bo_handles
, (void*)(uintptr_t)whandle
->handle
);
798 /* Increase the refcount. */
799 struct pb_buffer
*b
= NULL
;
800 pb_reference(&b
, &bo
->base
);
804 /* There isn't, create a new one. */
805 bo
= CALLOC_STRUCT(radeon_bo
);
811 open_arg
.name
= whandle
->handle
;
812 if (drmIoctl(ws
->fd
, DRM_IOCTL_GEM_OPEN
, &open_arg
)) {
816 bo
->handle
= open_arg
.handle
;
817 bo
->name
= whandle
->handle
;
820 pipe_reference_init(&bo
->base
.reference
, 1);
821 bo
->base
.alignment
= 0;
822 bo
->base
.usage
= PB_USAGE_GPU_WRITE
| PB_USAGE_GPU_READ
;
823 bo
->base
.size
= open_arg
.size
;
824 bo
->base
.vtbl
= &radeon_bo_vtbl
;
828 pipe_mutex_init(bo
->map_mutex
);
830 util_hash_table_set(mgr
->bo_handles
, (void*)(uintptr_t)whandle
->handle
, bo
);
833 pipe_mutex_unlock(mgr
->bo_handles_mutex
);
836 *stride
= whandle
->stride
;
839 struct drm_radeon_gem_va va
;
841 bo
->va_size
= ((bo
->base
.size
+ 4095) & ~4095);
842 bo
->va
= radeon_bomgr_find_va(mgr
, bo
->va_size
, 1 << 20);
844 va
.handle
= bo
->handle
;
845 va
.operation
= RADEON_VA_MAP
;
848 va
.flags
= RADEON_VM_PAGE_READABLE
|
849 RADEON_VM_PAGE_WRITEABLE
|
850 RADEON_VM_PAGE_SNOOPED
;
852 r
= drmCommandWriteRead(ws
->fd
, DRM_RADEON_GEM_VA
, &va
, sizeof(va
));
853 if (r
&& va
.operation
== RADEON_VA_RESULT_ERROR
) {
854 fprintf(stderr
, "radeon: Failed to assign virtual address space\n");
855 radeon_bo_destroy(&bo
->base
);
858 if (va
.operation
== RADEON_VA_RESULT_VA_EXIST
) {
859 radeon_bomgr_free_va(mgr
, bo
->va
, bo
->va_size
);
861 radeon_bomgr_force_va(mgr
, bo
->va
, bo
->va_size
);
865 return (struct pb_buffer
*)bo
;
868 pipe_mutex_unlock(mgr
->bo_handles_mutex
);
872 static boolean
radeon_winsys_bo_get_handle(struct pb_buffer
*buffer
,
874 struct winsys_handle
*whandle
)
876 struct drm_gem_flink flink
;
877 struct radeon_bo
*bo
= get_radeon_bo(buffer
);
879 memset(&flink
, 0, sizeof(flink
));
881 if (whandle
->type
== DRM_API_HANDLE_TYPE_SHARED
) {
883 flink
.handle
= bo
->handle
;
885 if (ioctl(bo
->rws
->fd
, DRM_IOCTL_GEM_FLINK
, &flink
)) {
890 bo
->flink
= flink
.name
;
892 whandle
->handle
= bo
->flink
;
893 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_KMS
) {
894 whandle
->handle
= bo
->handle
;
897 whandle
->stride
= stride
;
901 static uint64_t radeon_winsys_bo_va(struct radeon_winsys_cs_handle
*buf
)
903 return ((struct radeon_bo
*)buf
)->va
;
906 void radeon_bomgr_init_functions(struct radeon_drm_winsys
*ws
)
908 ws
->base
.buffer_get_cs_handle
= radeon_drm_get_cs_handle
;
909 ws
->base
.buffer_set_tiling
= radeon_bo_set_tiling
;
910 ws
->base
.buffer_get_tiling
= radeon_bo_get_tiling
;
911 ws
->base
.buffer_map
= radeon_bo_map
;
912 ws
->base
.buffer_unmap
= pb_unmap
;
913 ws
->base
.buffer_wait
= radeon_bo_wait
;
914 ws
->base
.buffer_is_busy
= radeon_bo_is_busy
;
915 ws
->base
.buffer_create
= radeon_winsys_bo_create
;
916 ws
->base
.buffer_from_handle
= radeon_winsys_bo_from_handle
;
917 ws
->base
.buffer_get_handle
= radeon_winsys_bo_get_handle
;
918 ws
->base
.buffer_get_virtual_address
= radeon_winsys_bo_va
;