2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
27 #define _FILE_OFFSET_BITS 64
28 #include "radeon_drm_cs.h"
30 #include "util/u_hash_table.h"
31 #include "util/u_memory.h"
32 #include "util/u_simple_list.h"
33 #include "util/u_double_list.h"
34 #include "os/os_thread.h"
35 #include "os/os_mman.h"
36 #include "os/os_time.h"
38 #include "state_tracker/drm_driver.h"
40 #include <sys/ioctl.h>
46 extern const struct pb_vtbl radeon_bo_vtbl
;
48 static INLINE
struct radeon_bo
*radeon_bo(struct pb_buffer
*bo
)
50 assert(bo
->vtbl
== &radeon_bo_vtbl
);
51 return (struct radeon_bo
*)bo
;
54 struct radeon_bo_va_hole
{
55 struct list_head list
;
62 struct pb_manager base
;
65 struct radeon_drm_winsys
*rws
;
67 /* List of buffer GEM names. Protected by bo_handles_mutex. */
68 struct util_hash_table
*bo_names
;
69 /* List of buffer handles. Protectded by bo_handles_mutex. */
70 struct util_hash_table
*bo_handles
;
71 /* List of buffer virtual memory ranges. Protectded by bo_handles_mutex. */
72 struct util_hash_table
*bo_vas
;
73 pipe_mutex bo_handles_mutex
;
74 pipe_mutex bo_va_mutex
;
76 /* is virtual address supported */
79 struct list_head va_holes
;
82 static INLINE
struct radeon_bomgr
*radeon_bomgr(struct pb_manager
*mgr
)
84 return (struct radeon_bomgr
*)mgr
;
87 static struct radeon_bo
*get_radeon_bo(struct pb_buffer
*_buf
)
89 struct radeon_bo
*bo
= NULL
;
91 if (_buf
->vtbl
== &radeon_bo_vtbl
) {
94 struct pb_buffer
*base_buf
;
96 pb_get_base_buffer(_buf
, &base_buf
, &offset
);
98 if (base_buf
->vtbl
== &radeon_bo_vtbl
)
99 bo
= radeon_bo(base_buf
);
105 static void radeon_bo_wait(struct pb_buffer
*_buf
, enum radeon_bo_usage usage
)
107 struct radeon_bo
*bo
= get_radeon_bo(_buf
);
108 struct drm_radeon_gem_wait_idle args
= {0};
110 while (p_atomic_read(&bo
->num_active_ioctls
)) {
114 args
.handle
= bo
->handle
;
115 while (drmCommandWrite(bo
->rws
->fd
, DRM_RADEON_GEM_WAIT_IDLE
,
116 &args
, sizeof(args
)) == -EBUSY
);
119 static boolean
radeon_bo_is_busy(struct pb_buffer
*_buf
,
120 enum radeon_bo_usage usage
)
122 struct radeon_bo
*bo
= get_radeon_bo(_buf
);
123 struct drm_radeon_gem_busy args
= {0};
125 if (p_atomic_read(&bo
->num_active_ioctls
)) {
129 args
.handle
= bo
->handle
;
130 return drmCommandWriteRead(bo
->rws
->fd
, DRM_RADEON_GEM_BUSY
,
131 &args
, sizeof(args
)) != 0;
134 static enum radeon_bo_domain
get_valid_domain(enum radeon_bo_domain domain
)
136 /* Zero domains the driver doesn't understand. */
137 domain
&= RADEON_DOMAIN_VRAM_GTT
;
139 /* If no domain is set, we must set something... */
141 domain
= RADEON_DOMAIN_VRAM_GTT
;
146 static enum radeon_bo_domain
radeon_bo_get_initial_domain(
147 struct radeon_winsys_cs_handle
*buf
)
149 struct radeon_bo
*bo
= (struct radeon_bo
*)buf
;
150 struct drm_radeon_gem_op args
;
152 if (bo
->rws
->info
.drm_minor
< 38)
153 return RADEON_DOMAIN_VRAM_GTT
;
155 memset(&args
, 0, sizeof(args
));
156 args
.handle
= bo
->handle
;
157 args
.op
= RADEON_GEM_OP_GET_INITIAL_DOMAIN
;
159 drmCommandWriteRead(bo
->rws
->fd
, DRM_RADEON_GEM_OP
,
160 &args
, sizeof(args
));
162 /* GEM domains and winsys domains are defined the same. */
163 return get_valid_domain(args
.value
);
166 static uint64_t radeon_bomgr_find_va(struct radeon_bomgr
*mgr
, uint64_t size
, uint64_t alignment
)
168 struct radeon_bo_va_hole
*hole
, *n
;
169 uint64_t offset
= 0, waste
= 0;
171 alignment
= MAX2(alignment
, 4096);
172 size
= align(size
, 4096);
174 pipe_mutex_lock(mgr
->bo_va_mutex
);
175 /* first look for a hole */
176 LIST_FOR_EACH_ENTRY_SAFE(hole
, n
, &mgr
->va_holes
, list
) {
177 offset
= hole
->offset
;
178 waste
= offset
% alignment
;
179 waste
= waste
? alignment
- waste
: 0;
181 if (offset
>= (hole
->offset
+ hole
->size
)) {
184 if (!waste
&& hole
->size
== size
) {
185 offset
= hole
->offset
;
186 list_del(&hole
->list
);
188 pipe_mutex_unlock(mgr
->bo_va_mutex
);
191 if ((hole
->size
- waste
) > size
) {
193 n
= CALLOC_STRUCT(radeon_bo_va_hole
);
195 n
->offset
= hole
->offset
;
196 list_add(&n
->list
, &hole
->list
);
198 hole
->size
-= (size
+ waste
);
199 hole
->offset
+= size
+ waste
;
200 pipe_mutex_unlock(mgr
->bo_va_mutex
);
203 if ((hole
->size
- waste
) == size
) {
205 pipe_mutex_unlock(mgr
->bo_va_mutex
);
210 offset
= mgr
->va_offset
;
211 waste
= offset
% alignment
;
212 waste
= waste
? alignment
- waste
: 0;
214 n
= CALLOC_STRUCT(radeon_bo_va_hole
);
217 list_add(&n
->list
, &mgr
->va_holes
);
220 mgr
->va_offset
+= size
+ waste
;
221 pipe_mutex_unlock(mgr
->bo_va_mutex
);
225 static void radeon_bomgr_free_va(struct radeon_bomgr
*mgr
, uint64_t va
, uint64_t size
)
227 struct radeon_bo_va_hole
*hole
;
229 size
= align(size
, 4096);
231 pipe_mutex_lock(mgr
->bo_va_mutex
);
232 if ((va
+ size
) == mgr
->va_offset
) {
234 /* Delete uppermost hole if it reaches the new top */
235 if (!LIST_IS_EMPTY(&mgr
->va_holes
)) {
236 hole
= container_of(mgr
->va_holes
.next
, hole
, list
);
237 if ((hole
->offset
+ hole
->size
) == va
) {
238 mgr
->va_offset
= hole
->offset
;
239 list_del(&hole
->list
);
244 struct radeon_bo_va_hole
*next
;
246 hole
= container_of(&mgr
->va_holes
, hole
, list
);
247 LIST_FOR_EACH_ENTRY(next
, &mgr
->va_holes
, list
) {
248 if (next
->offset
< va
)
253 if (&hole
->list
!= &mgr
->va_holes
) {
254 /* Grow upper hole if it's adjacent */
255 if (hole
->offset
== (va
+ size
)) {
258 /* Merge lower hole if it's adjacent */
259 if (next
!= hole
&& &next
->list
!= &mgr
->va_holes
&&
260 (next
->offset
+ next
->size
) == va
) {
261 next
->size
+= hole
->size
;
262 list_del(&hole
->list
);
269 /* Grow lower hole if it's adjacent */
270 if (next
!= hole
&& &next
->list
!= &mgr
->va_holes
&&
271 (next
->offset
+ next
->size
) == va
) {
276 /* FIXME on allocation failure we just lose virtual address space
277 * maybe print a warning
279 next
= CALLOC_STRUCT(radeon_bo_va_hole
);
283 list_add(&next
->list
, &hole
->list
);
287 pipe_mutex_unlock(mgr
->bo_va_mutex
);
290 static void radeon_bo_destroy(struct pb_buffer
*_buf
)
292 struct radeon_bo
*bo
= radeon_bo(_buf
);
293 struct radeon_bomgr
*mgr
= bo
->mgr
;
294 struct drm_gem_close args
;
296 memset(&args
, 0, sizeof(args
));
298 pipe_mutex_lock(bo
->mgr
->bo_handles_mutex
);
299 util_hash_table_remove(bo
->mgr
->bo_handles
, (void*)(uintptr_t)bo
->handle
);
300 if (bo
->flink_name
) {
301 util_hash_table_remove(bo
->mgr
->bo_names
,
302 (void*)(uintptr_t)bo
->flink_name
);
304 pipe_mutex_unlock(bo
->mgr
->bo_handles_mutex
);
307 os_munmap(bo
->ptr
, bo
->base
.size
);
310 args
.handle
= bo
->handle
;
311 drmIoctl(bo
->rws
->fd
, DRM_IOCTL_GEM_CLOSE
, &args
);
314 radeon_bomgr_free_va(mgr
, bo
->va
, bo
->base
.size
);
317 pipe_mutex_destroy(bo
->map_mutex
);
319 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
320 bo
->rws
->allocated_vram
-= align(bo
->base
.size
, 4096);
321 else if (bo
->initial_domain
& RADEON_DOMAIN_GTT
)
322 bo
->rws
->allocated_gtt
-= align(bo
->base
.size
, 4096);
326 void *radeon_bo_do_map(struct radeon_bo
*bo
)
328 struct drm_radeon_gem_mmap args
= {0};
331 /* Return the pointer if it's already mapped. */
335 /* Map the buffer. */
336 pipe_mutex_lock(bo
->map_mutex
);
337 /* Return the pointer if it's already mapped (in case of a race). */
339 pipe_mutex_unlock(bo
->map_mutex
);
342 args
.handle
= bo
->handle
;
344 args
.size
= (uint64_t)bo
->base
.size
;
345 if (drmCommandWriteRead(bo
->rws
->fd
,
349 pipe_mutex_unlock(bo
->map_mutex
);
350 fprintf(stderr
, "radeon: gem_mmap failed: %p 0x%08X\n",
355 ptr
= os_mmap(0, args
.size
, PROT_READ
|PROT_WRITE
, MAP_SHARED
,
356 bo
->rws
->fd
, args
.addr_ptr
);
357 if (ptr
== MAP_FAILED
) {
358 pipe_mutex_unlock(bo
->map_mutex
);
359 fprintf(stderr
, "radeon: mmap failed, errno: %i\n", errno
);
363 pipe_mutex_unlock(bo
->map_mutex
);
368 static void *radeon_bo_map(struct radeon_winsys_cs_handle
*buf
,
369 struct radeon_winsys_cs
*rcs
,
370 enum pipe_transfer_usage usage
)
372 struct radeon_bo
*bo
= (struct radeon_bo
*)buf
;
373 struct radeon_drm_cs
*cs
= (struct radeon_drm_cs
*)rcs
;
375 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
376 if (!(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
377 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
378 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
379 if (!(usage
& PIPE_TRANSFER_WRITE
)) {
382 * Since we are mapping for read, we don't need to wait
383 * if the GPU is using the buffer for read too
384 * (neither one is changing it).
386 * Only check whether the buffer is being used for write. */
387 if (cs
&& radeon_bo_is_referenced_by_cs_for_write(cs
, bo
)) {
388 cs
->flush_cs(cs
->flush_data
, RADEON_FLUSH_ASYNC
, NULL
);
392 if (radeon_bo_is_busy((struct pb_buffer
*)bo
,
393 RADEON_USAGE_WRITE
)) {
397 if (cs
&& radeon_bo_is_referenced_by_cs(cs
, bo
)) {
398 cs
->flush_cs(cs
->flush_data
, RADEON_FLUSH_ASYNC
, NULL
);
402 if (radeon_bo_is_busy((struct pb_buffer
*)bo
,
403 RADEON_USAGE_READWRITE
)) {
408 uint64_t time
= os_time_get_nano();
410 if (!(usage
& PIPE_TRANSFER_WRITE
)) {
413 * Since we are mapping for read, we don't need to wait
414 * if the GPU is using the buffer for read too
415 * (neither one is changing it).
417 * Only check whether the buffer is being used for write. */
418 if (cs
&& radeon_bo_is_referenced_by_cs_for_write(cs
, bo
)) {
419 cs
->flush_cs(cs
->flush_data
, 0, NULL
);
421 radeon_bo_wait((struct pb_buffer
*)bo
,
424 /* Mapping for write. */
426 if (radeon_bo_is_referenced_by_cs(cs
, bo
)) {
427 cs
->flush_cs(cs
->flush_data
, 0, NULL
);
429 /* Try to avoid busy-waiting in radeon_bo_wait. */
430 if (p_atomic_read(&bo
->num_active_ioctls
))
431 radeon_drm_cs_sync_flush(rcs
);
435 radeon_bo_wait((struct pb_buffer
*)bo
, RADEON_USAGE_READWRITE
);
438 bo
->mgr
->rws
->buffer_wait_time
+= os_time_get_nano() - time
;
442 return radeon_bo_do_map(bo
);
445 static void radeon_bo_unmap(struct radeon_winsys_cs_handle
*_buf
)
450 static void radeon_bo_get_base_buffer(struct pb_buffer
*buf
,
451 struct pb_buffer
**base_buf
,
458 static enum pipe_error
radeon_bo_validate(struct pb_buffer
*_buf
,
459 struct pb_validate
*vl
,
466 static void radeon_bo_fence(struct pb_buffer
*buf
,
467 struct pipe_fence_handle
*fence
)
471 const struct pb_vtbl radeon_bo_vtbl
= {
473 NULL
, /* never called */
474 NULL
, /* never called */
477 radeon_bo_get_base_buffer
,
480 #ifndef RADEON_GEM_GTT_WC
481 #define RADEON_GEM_GTT_WC (1 << 2)
483 #ifndef RADEON_GTM_CPU_ACCESS
484 /* BO is expected to be accessed by the CPU */
485 #define RADEON_GEM_CPU_ACCESS (1 << 3)
488 static struct pb_buffer
*radeon_bomgr_create_bo(struct pb_manager
*_mgr
,
490 const struct pb_desc
*desc
)
492 struct radeon_bomgr
*mgr
= radeon_bomgr(_mgr
);
493 struct radeon_drm_winsys
*rws
= mgr
->rws
;
494 struct radeon_bo
*bo
;
495 struct drm_radeon_gem_create args
;
496 struct radeon_bo_desc
*rdesc
= (struct radeon_bo_desc
*)desc
;
499 memset(&args
, 0, sizeof(args
));
501 assert(rdesc
->initial_domains
);
502 assert((rdesc
->initial_domains
&
503 ~(RADEON_GEM_DOMAIN_GTT
| RADEON_GEM_DOMAIN_VRAM
)) == 0);
506 args
.alignment
= desc
->alignment
;
507 args
.initial_domain
= rdesc
->initial_domains
;
510 if (rdesc
->flags
& RADEON_FLAG_GTT_WC
)
511 args
.flags
|= RADEON_GEM_GTT_WC
;
512 if (rdesc
->flags
& RADEON_FLAG_CPU_ACCESS
)
513 args
.flags
|= RADEON_GEM_CPU_ACCESS
;
515 if (drmCommandWriteRead(rws
->fd
, DRM_RADEON_GEM_CREATE
,
516 &args
, sizeof(args
))) {
517 fprintf(stderr
, "radeon: Failed to allocate a buffer:\n");
518 fprintf(stderr
, "radeon: size : %d bytes\n", size
);
519 fprintf(stderr
, "radeon: alignment : %d bytes\n", desc
->alignment
);
520 fprintf(stderr
, "radeon: domains : %d\n", args
.initial_domain
);
521 fprintf(stderr
, "radeon: flags : %d\n", args
.flags
);
525 bo
= CALLOC_STRUCT(radeon_bo
);
529 pipe_reference_init(&bo
->base
.reference
, 1);
530 bo
->base
.alignment
= desc
->alignment
;
531 bo
->base
.usage
= desc
->usage
;
532 bo
->base
.size
= size
;
533 bo
->base
.vtbl
= &radeon_bo_vtbl
;
536 bo
->handle
= args
.handle
;
538 bo
->initial_domain
= rdesc
->initial_domains
;
539 pipe_mutex_init(bo
->map_mutex
);
542 struct drm_radeon_gem_va va
;
544 bo
->va
= radeon_bomgr_find_va(mgr
, size
, desc
->alignment
);
546 va
.handle
= bo
->handle
;
548 va
.operation
= RADEON_VA_MAP
;
549 va
.flags
= RADEON_VM_PAGE_READABLE
|
550 RADEON_VM_PAGE_WRITEABLE
|
551 RADEON_VM_PAGE_SNOOPED
;
553 r
= drmCommandWriteRead(rws
->fd
, DRM_RADEON_GEM_VA
, &va
, sizeof(va
));
554 if (r
&& va
.operation
== RADEON_VA_RESULT_ERROR
) {
555 fprintf(stderr
, "radeon: Failed to allocate virtual address for buffer:\n");
556 fprintf(stderr
, "radeon: size : %d bytes\n", size
);
557 fprintf(stderr
, "radeon: alignment : %d bytes\n", desc
->alignment
);
558 fprintf(stderr
, "radeon: domains : %d\n", args
.initial_domain
);
559 fprintf(stderr
, "radeon: va : 0x%016llx\n", (unsigned long long)bo
->va
);
560 radeon_bo_destroy(&bo
->base
);
563 pipe_mutex_lock(mgr
->bo_handles_mutex
);
564 if (va
.operation
== RADEON_VA_RESULT_VA_EXIST
) {
565 struct pb_buffer
*b
= &bo
->base
;
566 struct radeon_bo
*old_bo
=
567 util_hash_table_get(mgr
->bo_vas
, (void*)(uintptr_t)va
.offset
);
569 pipe_mutex_unlock(mgr
->bo_handles_mutex
);
570 pb_reference(&b
, &old_bo
->base
);
574 util_hash_table_set(mgr
->bo_vas
, (void*)(uintptr_t)bo
->va
, bo
);
575 pipe_mutex_unlock(mgr
->bo_handles_mutex
);
578 if (rdesc
->initial_domains
& RADEON_DOMAIN_VRAM
)
579 rws
->allocated_vram
+= align(size
, 4096);
580 else if (rdesc
->initial_domains
& RADEON_DOMAIN_GTT
)
581 rws
->allocated_gtt
+= align(size
, 4096);
586 static void radeon_bomgr_flush(struct pb_manager
*mgr
)
591 /* This is for the cache bufmgr. */
592 static boolean
radeon_bomgr_is_buffer_busy(struct pb_manager
*_mgr
,
593 struct pb_buffer
*_buf
)
595 struct radeon_bo
*bo
= radeon_bo(_buf
);
597 if (radeon_bo_is_referenced_by_any_cs(bo
)) {
601 if (radeon_bo_is_busy((struct pb_buffer
*)bo
, RADEON_USAGE_READWRITE
)) {
608 static void radeon_bomgr_destroy(struct pb_manager
*_mgr
)
610 struct radeon_bomgr
*mgr
= radeon_bomgr(_mgr
);
611 util_hash_table_destroy(mgr
->bo_names
);
612 util_hash_table_destroy(mgr
->bo_handles
);
613 util_hash_table_destroy(mgr
->bo_vas
);
614 pipe_mutex_destroy(mgr
->bo_handles_mutex
);
615 pipe_mutex_destroy(mgr
->bo_va_mutex
);
619 #define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
621 static unsigned handle_hash(void *key
)
623 return PTR_TO_UINT(key
);
626 static int handle_compare(void *key1
, void *key2
)
628 return PTR_TO_UINT(key1
) != PTR_TO_UINT(key2
);
631 struct pb_manager
*radeon_bomgr_create(struct radeon_drm_winsys
*rws
)
633 struct radeon_bomgr
*mgr
;
635 mgr
= CALLOC_STRUCT(radeon_bomgr
);
639 mgr
->base
.destroy
= radeon_bomgr_destroy
;
640 mgr
->base
.create_buffer
= radeon_bomgr_create_bo
;
641 mgr
->base
.flush
= radeon_bomgr_flush
;
642 mgr
->base
.is_buffer_busy
= radeon_bomgr_is_buffer_busy
;
645 mgr
->bo_names
= util_hash_table_create(handle_hash
, handle_compare
);
646 mgr
->bo_handles
= util_hash_table_create(handle_hash
, handle_compare
);
647 mgr
->bo_vas
= util_hash_table_create(handle_hash
, handle_compare
);
648 pipe_mutex_init(mgr
->bo_handles_mutex
);
649 pipe_mutex_init(mgr
->bo_va_mutex
);
651 mgr
->va
= rws
->info
.r600_virtual_address
;
652 mgr
->va_offset
= rws
->va_start
;
653 list_inithead(&mgr
->va_holes
);
658 static unsigned eg_tile_split(unsigned tile_split
)
660 switch (tile_split
) {
661 case 0: tile_split
= 64; break;
662 case 1: tile_split
= 128; break;
663 case 2: tile_split
= 256; break;
664 case 3: tile_split
= 512; break;
666 case 4: tile_split
= 1024; break;
667 case 5: tile_split
= 2048; break;
668 case 6: tile_split
= 4096; break;
673 static unsigned eg_tile_split_rev(unsigned eg_tile_split
)
675 switch (eg_tile_split
) {
687 static void radeon_bo_get_tiling(struct pb_buffer
*_buf
,
688 enum radeon_bo_layout
*microtiled
,
689 enum radeon_bo_layout
*macrotiled
,
690 unsigned *bankw
, unsigned *bankh
,
691 unsigned *tile_split
,
692 unsigned *stencil_tile_split
,
696 struct radeon_bo
*bo
= get_radeon_bo(_buf
);
697 struct drm_radeon_gem_set_tiling args
;
699 memset(&args
, 0, sizeof(args
));
701 args
.handle
= bo
->handle
;
703 drmCommandWriteRead(bo
->rws
->fd
,
704 DRM_RADEON_GEM_GET_TILING
,
708 *microtiled
= RADEON_LAYOUT_LINEAR
;
709 *macrotiled
= RADEON_LAYOUT_LINEAR
;
710 if (args
.tiling_flags
& RADEON_TILING_MICRO
)
711 *microtiled
= RADEON_LAYOUT_TILED
;
712 else if (args
.tiling_flags
& RADEON_TILING_MICRO_SQUARE
)
713 *microtiled
= RADEON_LAYOUT_SQUARETILED
;
715 if (args
.tiling_flags
& RADEON_TILING_MACRO
)
716 *macrotiled
= RADEON_LAYOUT_TILED
;
717 if (bankw
&& tile_split
&& stencil_tile_split
&& mtilea
&& tile_split
) {
718 *bankw
= (args
.tiling_flags
>> RADEON_TILING_EG_BANKW_SHIFT
) & RADEON_TILING_EG_BANKW_MASK
;
719 *bankh
= (args
.tiling_flags
>> RADEON_TILING_EG_BANKH_SHIFT
) & RADEON_TILING_EG_BANKH_MASK
;
720 *tile_split
= (args
.tiling_flags
>> RADEON_TILING_EG_TILE_SPLIT_SHIFT
) & RADEON_TILING_EG_TILE_SPLIT_MASK
;
721 *stencil_tile_split
= (args
.tiling_flags
>> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT
) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK
;
722 *mtilea
= (args
.tiling_flags
>> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT
) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK
;
723 *tile_split
= eg_tile_split(*tile_split
);
726 *scanout
= bo
->rws
->gen
>= DRV_SI
&& !(args
.tiling_flags
& RADEON_TILING_R600_NO_SCANOUT
);
729 static void radeon_bo_set_tiling(struct pb_buffer
*_buf
,
730 struct radeon_winsys_cs
*rcs
,
731 enum radeon_bo_layout microtiled
,
732 enum radeon_bo_layout macrotiled
,
733 unsigned bankw
, unsigned bankh
,
735 unsigned stencil_tile_split
,
740 struct radeon_bo
*bo
= get_radeon_bo(_buf
);
741 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
742 struct drm_radeon_gem_set_tiling args
;
744 memset(&args
, 0, sizeof(args
));
746 /* Tiling determines how DRM treats the buffer data.
747 * We must flush CS when changing it if the buffer is referenced. */
748 if (cs
&& radeon_bo_is_referenced_by_cs(cs
, bo
)) {
749 cs
->flush_cs(cs
->flush_data
, 0, NULL
);
752 while (p_atomic_read(&bo
->num_active_ioctls
)) {
756 if (microtiled
== RADEON_LAYOUT_TILED
)
757 args
.tiling_flags
|= RADEON_TILING_MICRO
;
758 else if (microtiled
== RADEON_LAYOUT_SQUARETILED
)
759 args
.tiling_flags
|= RADEON_TILING_MICRO_SQUARE
;
761 if (macrotiled
== RADEON_LAYOUT_TILED
)
762 args
.tiling_flags
|= RADEON_TILING_MACRO
;
764 args
.tiling_flags
|= (bankw
& RADEON_TILING_EG_BANKW_MASK
) <<
765 RADEON_TILING_EG_BANKW_SHIFT
;
766 args
.tiling_flags
|= (bankh
& RADEON_TILING_EG_BANKH_MASK
) <<
767 RADEON_TILING_EG_BANKH_SHIFT
;
769 args
.tiling_flags
|= (eg_tile_split_rev(tile_split
) &
770 RADEON_TILING_EG_TILE_SPLIT_MASK
) <<
771 RADEON_TILING_EG_TILE_SPLIT_SHIFT
;
773 args
.tiling_flags
|= (stencil_tile_split
&
774 RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK
) <<
775 RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT
;
776 args
.tiling_flags
|= (mtilea
& RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK
) <<
777 RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT
;
779 if (bo
->rws
->gen
>= DRV_SI
&& !scanout
)
780 args
.tiling_flags
|= RADEON_TILING_R600_NO_SCANOUT
;
782 args
.handle
= bo
->handle
;
785 drmCommandWriteRead(bo
->rws
->fd
,
786 DRM_RADEON_GEM_SET_TILING
,
791 static struct radeon_winsys_cs_handle
*radeon_drm_get_cs_handle(struct pb_buffer
*_buf
)
793 /* return radeon_bo. */
794 return (struct radeon_winsys_cs_handle
*)get_radeon_bo(_buf
);
797 static struct pb_buffer
*
798 radeon_winsys_bo_create(struct radeon_winsys
*rws
,
801 boolean use_reusable_pool
,
802 enum radeon_bo_domain domain
,
803 enum radeon_bo_flag flags
)
805 struct radeon_drm_winsys
*ws
= radeon_drm_winsys(rws
);
806 struct radeon_bomgr
*mgr
= radeon_bomgr(ws
->kman
);
807 struct radeon_bo_desc desc
;
808 struct pb_manager
*provider
;
809 struct pb_buffer
*buffer
;
811 memset(&desc
, 0, sizeof(desc
));
812 desc
.base
.alignment
= alignment
;
814 /* Additional criteria for the cache manager. */
815 desc
.base
.usage
= domain
;
816 desc
.initial_domains
= domain
;
819 /* Assign a buffer manager. */
820 if (use_reusable_pool
) {
821 if (domain
== RADEON_DOMAIN_VRAM
) {
822 if (flags
& RADEON_FLAG_GTT_WC
)
823 provider
= ws
->cman_vram_gtt_wc
;
825 provider
= ws
->cman_vram
;
826 } else if (flags
& RADEON_FLAG_GTT_WC
) {
827 provider
= ws
->cman_gtt_wc
;
829 provider
= ws
->cman_gtt
;
835 buffer
= provider
->create_buffer(provider
, size
, &desc
.base
);
839 pipe_mutex_lock(mgr
->bo_handles_mutex
);
840 util_hash_table_set(mgr
->bo_handles
, (void*)(uintptr_t)get_radeon_bo(buffer
)->handle
, buffer
);
841 pipe_mutex_unlock(mgr
->bo_handles_mutex
);
843 return (struct pb_buffer
*)buffer
;
846 static struct pb_buffer
*radeon_winsys_bo_from_handle(struct radeon_winsys
*rws
,
847 struct winsys_handle
*whandle
,
850 struct radeon_drm_winsys
*ws
= radeon_drm_winsys(rws
);
851 struct radeon_bo
*bo
;
852 struct radeon_bomgr
*mgr
= radeon_bomgr(ws
->kman
);
857 /* We must maintain a list of pairs <handle, bo>, so that we always return
858 * the same BO for one particular handle. If we didn't do that and created
859 * more than one BO for the same handle and then relocated them in a CS,
860 * we would hit a deadlock in the kernel.
862 * The list of pairs is guarded by a mutex, of course. */
863 pipe_mutex_lock(mgr
->bo_handles_mutex
);
865 if (whandle
->type
== DRM_API_HANDLE_TYPE_SHARED
) {
866 /* First check if there already is an existing bo for the handle. */
867 bo
= util_hash_table_get(mgr
->bo_names
, (void*)(uintptr_t)whandle
->handle
);
868 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_FD
) {
869 /* We must first get the GEM handle, as fds are unreliable keys */
870 r
= drmPrimeFDToHandle(ws
->fd
, whandle
->handle
, &handle
);
873 bo
= util_hash_table_get(mgr
->bo_handles
, (void*)(uintptr_t)handle
);
875 /* Unknown handle type */
880 /* Increase the refcount. */
881 struct pb_buffer
*b
= NULL
;
882 pb_reference(&b
, &bo
->base
);
886 /* There isn't, create a new one. */
887 bo
= CALLOC_STRUCT(radeon_bo
);
892 if (whandle
->type
== DRM_API_HANDLE_TYPE_SHARED
) {
893 struct drm_gem_open open_arg
= {};
894 memset(&open_arg
, 0, sizeof(open_arg
));
896 open_arg
.name
= whandle
->handle
;
897 if (drmIoctl(ws
->fd
, DRM_IOCTL_GEM_OPEN
, &open_arg
)) {
901 handle
= open_arg
.handle
;
902 size
= open_arg
.size
;
903 bo
->flink_name
= whandle
->handle
;
904 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_FD
) {
905 size
= lseek(whandle
->handle
, 0, SEEK_END
);
907 * Could check errno to determine whether the kernel is new enough, but
908 * it doesn't really matter why this failed, just that it failed.
910 if (size
== (off_t
)-1) {
914 lseek(whandle
->handle
, 0, SEEK_SET
);
920 pipe_reference_init(&bo
->base
.reference
, 1);
921 bo
->base
.alignment
= 0;
922 bo
->base
.usage
= PB_USAGE_GPU_WRITE
| PB_USAGE_GPU_READ
;
923 bo
->base
.size
= (unsigned) size
;
924 bo
->base
.vtbl
= &radeon_bo_vtbl
;
928 pipe_mutex_init(bo
->map_mutex
);
931 util_hash_table_set(mgr
->bo_names
, (void*)(uintptr_t)bo
->flink_name
, bo
);
933 util_hash_table_set(mgr
->bo_handles
, (void*)(uintptr_t)bo
->handle
, bo
);
936 pipe_mutex_unlock(mgr
->bo_handles_mutex
);
939 *stride
= whandle
->stride
;
941 if (mgr
->va
&& !bo
->va
) {
942 struct drm_radeon_gem_va va
;
944 bo
->va
= radeon_bomgr_find_va(mgr
, bo
->base
.size
, 1 << 20);
946 va
.handle
= bo
->handle
;
947 va
.operation
= RADEON_VA_MAP
;
950 va
.flags
= RADEON_VM_PAGE_READABLE
|
951 RADEON_VM_PAGE_WRITEABLE
|
952 RADEON_VM_PAGE_SNOOPED
;
954 r
= drmCommandWriteRead(ws
->fd
, DRM_RADEON_GEM_VA
, &va
, sizeof(va
));
955 if (r
&& va
.operation
== RADEON_VA_RESULT_ERROR
) {
956 fprintf(stderr
, "radeon: Failed to assign virtual address space\n");
957 radeon_bo_destroy(&bo
->base
);
960 pipe_mutex_lock(mgr
->bo_handles_mutex
);
961 if (va
.operation
== RADEON_VA_RESULT_VA_EXIST
) {
962 struct pb_buffer
*b
= &bo
->base
;
963 struct radeon_bo
*old_bo
=
964 util_hash_table_get(mgr
->bo_vas
, (void*)(uintptr_t)va
.offset
);
966 pipe_mutex_unlock(mgr
->bo_handles_mutex
);
967 pb_reference(&b
, &old_bo
->base
);
971 util_hash_table_set(mgr
->bo_vas
, (void*)(uintptr_t)bo
->va
, bo
);
972 pipe_mutex_unlock(mgr
->bo_handles_mutex
);
975 bo
->initial_domain
= radeon_bo_get_initial_domain((void*)bo
);
977 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
978 ws
->allocated_vram
+= align(bo
->base
.size
, 4096);
979 else if (bo
->initial_domain
& RADEON_DOMAIN_GTT
)
980 ws
->allocated_gtt
+= align(bo
->base
.size
, 4096);
982 return (struct pb_buffer
*)bo
;
985 pipe_mutex_unlock(mgr
->bo_handles_mutex
);
989 static boolean
radeon_winsys_bo_get_handle(struct pb_buffer
*buffer
,
991 struct winsys_handle
*whandle
)
993 struct drm_gem_flink flink
;
994 struct radeon_bo
*bo
= get_radeon_bo(buffer
);
996 memset(&flink
, 0, sizeof(flink
));
998 if (whandle
->type
== DRM_API_HANDLE_TYPE_SHARED
) {
999 if (!bo
->flink_name
) {
1000 flink
.handle
= bo
->handle
;
1002 if (ioctl(bo
->rws
->fd
, DRM_IOCTL_GEM_FLINK
, &flink
)) {
1006 bo
->flink_name
= flink
.name
;
1008 pipe_mutex_lock(bo
->mgr
->bo_handles_mutex
);
1009 util_hash_table_set(bo
->mgr
->bo_names
, (void*)(uintptr_t)bo
->flink_name
, bo
);
1010 pipe_mutex_unlock(bo
->mgr
->bo_handles_mutex
);
1012 whandle
->handle
= bo
->flink_name
;
1013 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_KMS
) {
1014 whandle
->handle
= bo
->handle
;
1015 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_FD
) {
1016 if (drmPrimeHandleToFD(bo
->rws
->fd
, bo
->handle
, DRM_CLOEXEC
, (int*)&whandle
->handle
))
1020 whandle
->stride
= stride
;
1024 static uint64_t radeon_winsys_bo_va(struct radeon_winsys_cs_handle
*buf
)
1026 return ((struct radeon_bo
*)buf
)->va
;
1029 void radeon_bomgr_init_functions(struct radeon_drm_winsys
*ws
)
1031 ws
->base
.buffer_get_cs_handle
= radeon_drm_get_cs_handle
;
1032 ws
->base
.buffer_set_tiling
= radeon_bo_set_tiling
;
1033 ws
->base
.buffer_get_tiling
= radeon_bo_get_tiling
;
1034 ws
->base
.buffer_map
= radeon_bo_map
;
1035 ws
->base
.buffer_unmap
= radeon_bo_unmap
;
1036 ws
->base
.buffer_wait
= radeon_bo_wait
;
1037 ws
->base
.buffer_is_busy
= radeon_bo_is_busy
;
1038 ws
->base
.buffer_create
= radeon_winsys_bo_create
;
1039 ws
->base
.buffer_from_handle
= radeon_winsys_bo_from_handle
;
1040 ws
->base
.buffer_get_handle
= radeon_winsys_bo_get_handle
;
1041 ws
->base
.buffer_get_virtual_address
= radeon_winsys_bo_va
;
1042 ws
->base
.buffer_get_initial_domain
= radeon_bo_get_initial_domain
;