2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
27 #include "radeon_drm_cs.h"
29 #include "util/u_hash_table.h"
30 #include "util/u_memory.h"
31 #include "util/simple_list.h"
32 #include "os/os_thread.h"
33 #include "os/os_mman.h"
34 #include "os/os_time.h"
36 #include "state_tracker/drm_driver.h"
38 #include <sys/ioctl.h>
44 static const struct pb_vtbl radeon_bo_vtbl
;
46 static inline struct radeon_bo
*radeon_bo(struct pb_buffer
*bo
)
48 assert(bo
->vtbl
== &radeon_bo_vtbl
);
49 return (struct radeon_bo
*)bo
;
52 struct radeon_bo_va_hole
{
53 struct list_head list
;
60 struct pb_manager base
;
63 struct radeon_drm_winsys
*rws
;
66 static inline struct radeon_bomgr
*radeon_bomgr(struct pb_manager
*mgr
)
68 return (struct radeon_bomgr
*)mgr
;
71 static struct radeon_bo
*get_radeon_bo(struct pb_buffer
*_buf
)
73 struct radeon_bo
*bo
= NULL
;
75 if (_buf
->vtbl
== &radeon_bo_vtbl
) {
78 struct pb_buffer
*base_buf
;
80 pb_get_base_buffer(_buf
, &base_buf
, &offset
);
82 if (base_buf
->vtbl
== &radeon_bo_vtbl
)
83 bo
= radeon_bo(base_buf
);
89 static bool radeon_bo_is_busy(struct radeon_bo
*bo
)
91 struct drm_radeon_gem_busy args
= {0};
93 args
.handle
= bo
->handle
;
94 return drmCommandWriteRead(bo
->rws
->fd
, DRM_RADEON_GEM_BUSY
,
95 &args
, sizeof(args
)) != 0;
98 static void radeon_bo_wait_idle(struct radeon_bo
*bo
)
100 struct drm_radeon_gem_wait_idle args
= {0};
102 args
.handle
= bo
->handle
;
103 while (drmCommandWrite(bo
->rws
->fd
, DRM_RADEON_GEM_WAIT_IDLE
,
104 &args
, sizeof(args
)) == -EBUSY
);
107 static bool radeon_bo_wait(struct pb_buffer
*_buf
, uint64_t timeout
,
108 enum radeon_bo_usage usage
)
110 struct radeon_bo
*bo
= get_radeon_bo(_buf
);
113 /* No timeout. Just query. */
115 return !bo
->num_active_ioctls
&& !radeon_bo_is_busy(bo
);
117 abs_timeout
= os_time_get_absolute_timeout(timeout
);
119 /* Wait if any ioctl is being submitted with this buffer. */
120 if (!os_wait_until_zero_abs_timeout(&bo
->num_active_ioctls
, abs_timeout
))
123 /* Infinite timeout. */
124 if (abs_timeout
== PIPE_TIMEOUT_INFINITE
) {
125 radeon_bo_wait_idle(bo
);
129 /* Other timeouts need to be emulated with a loop. */
130 while (radeon_bo_is_busy(bo
)) {
131 if (os_time_get_nano() >= abs_timeout
)
139 static enum radeon_bo_domain
get_valid_domain(enum radeon_bo_domain domain
)
141 /* Zero domains the driver doesn't understand. */
142 domain
&= RADEON_DOMAIN_VRAM_GTT
;
144 /* If no domain is set, we must set something... */
146 domain
= RADEON_DOMAIN_VRAM_GTT
;
151 static enum radeon_bo_domain
radeon_bo_get_initial_domain(
152 struct radeon_winsys_cs_handle
*buf
)
154 struct radeon_bo
*bo
= (struct radeon_bo
*)buf
;
155 struct drm_radeon_gem_op args
;
157 if (bo
->rws
->info
.drm_minor
< 38)
158 return RADEON_DOMAIN_VRAM_GTT
;
160 memset(&args
, 0, sizeof(args
));
161 args
.handle
= bo
->handle
;
162 args
.op
= RADEON_GEM_OP_GET_INITIAL_DOMAIN
;
164 drmCommandWriteRead(bo
->rws
->fd
, DRM_RADEON_GEM_OP
,
165 &args
, sizeof(args
));
167 /* GEM domains and winsys domains are defined the same. */
168 return get_valid_domain(args
.value
);
171 static uint64_t radeon_bomgr_find_va(struct radeon_bomgr
*mgr
, uint64_t size
, uint64_t alignment
)
173 struct radeon_drm_winsys
*rws
= mgr
->rws
;
174 struct radeon_bo_va_hole
*hole
, *n
;
175 uint64_t offset
= 0, waste
= 0;
177 /* All VM address space holes will implicitly start aligned to the
178 * size alignment, so we don't need to sanitize the alignment here
180 size
= align(size
, rws
->size_align
);
182 pipe_mutex_lock(rws
->bo_va_mutex
);
183 /* first look for a hole */
184 LIST_FOR_EACH_ENTRY_SAFE(hole
, n
, &rws
->va_holes
, list
) {
185 offset
= hole
->offset
;
186 waste
= offset
% alignment
;
187 waste
= waste
? alignment
- waste
: 0;
189 if (offset
>= (hole
->offset
+ hole
->size
)) {
192 if (!waste
&& hole
->size
== size
) {
193 offset
= hole
->offset
;
194 list_del(&hole
->list
);
196 pipe_mutex_unlock(rws
->bo_va_mutex
);
199 if ((hole
->size
- waste
) > size
) {
201 n
= CALLOC_STRUCT(radeon_bo_va_hole
);
203 n
->offset
= hole
->offset
;
204 list_add(&n
->list
, &hole
->list
);
206 hole
->size
-= (size
+ waste
);
207 hole
->offset
+= size
+ waste
;
208 pipe_mutex_unlock(rws
->bo_va_mutex
);
211 if ((hole
->size
- waste
) == size
) {
213 pipe_mutex_unlock(rws
->bo_va_mutex
);
218 offset
= rws
->va_offset
;
219 waste
= offset
% alignment
;
220 waste
= waste
? alignment
- waste
: 0;
222 n
= CALLOC_STRUCT(radeon_bo_va_hole
);
225 list_add(&n
->list
, &rws
->va_holes
);
228 rws
->va_offset
+= size
+ waste
;
229 pipe_mutex_unlock(rws
->bo_va_mutex
);
233 static void radeon_bomgr_free_va(struct radeon_bomgr
*mgr
, uint64_t va
, uint64_t size
)
235 struct radeon_drm_winsys
*rws
= mgr
->rws
;
236 struct radeon_bo_va_hole
*hole
;
238 size
= align(size
, rws
->size_align
);
240 pipe_mutex_lock(rws
->bo_va_mutex
);
241 if ((va
+ size
) == rws
->va_offset
) {
243 /* Delete uppermost hole if it reaches the new top */
244 if (!LIST_IS_EMPTY(&rws
->va_holes
)) {
245 hole
= container_of(rws
->va_holes
.next
, hole
, list
);
246 if ((hole
->offset
+ hole
->size
) == va
) {
247 rws
->va_offset
= hole
->offset
;
248 list_del(&hole
->list
);
253 struct radeon_bo_va_hole
*next
;
255 hole
= container_of(&rws
->va_holes
, hole
, list
);
256 LIST_FOR_EACH_ENTRY(next
, &rws
->va_holes
, list
) {
257 if (next
->offset
< va
)
262 if (&hole
->list
!= &rws
->va_holes
) {
263 /* Grow upper hole if it's adjacent */
264 if (hole
->offset
== (va
+ size
)) {
267 /* Merge lower hole if it's adjacent */
268 if (next
!= hole
&& &next
->list
!= &rws
->va_holes
&&
269 (next
->offset
+ next
->size
) == va
) {
270 next
->size
+= hole
->size
;
271 list_del(&hole
->list
);
278 /* Grow lower hole if it's adjacent */
279 if (next
!= hole
&& &next
->list
!= &rws
->va_holes
&&
280 (next
->offset
+ next
->size
) == va
) {
285 /* FIXME on allocation failure we just lose virtual address space
286 * maybe print a warning
288 next
= CALLOC_STRUCT(radeon_bo_va_hole
);
292 list_add(&next
->list
, &hole
->list
);
296 pipe_mutex_unlock(rws
->bo_va_mutex
);
299 static void radeon_bo_destroy(struct pb_buffer
*_buf
)
301 struct radeon_bo
*bo
= radeon_bo(_buf
);
302 struct radeon_drm_winsys
*rws
= bo
->rws
;
303 struct radeon_bomgr
*mgr
= bo
->mgr
;
304 struct drm_gem_close args
;
306 memset(&args
, 0, sizeof(args
));
308 pipe_mutex_lock(rws
->bo_handles_mutex
);
309 util_hash_table_remove(rws
->bo_handles
, (void*)(uintptr_t)bo
->handle
);
310 if (bo
->flink_name
) {
311 util_hash_table_remove(rws
->bo_names
,
312 (void*)(uintptr_t)bo
->flink_name
);
314 pipe_mutex_unlock(rws
->bo_handles_mutex
);
317 os_munmap(bo
->ptr
, bo
->base
.size
);
319 if (rws
->info
.r600_virtual_address
) {
320 if (rws
->va_unmap_working
) {
321 struct drm_radeon_gem_va va
;
323 va
.handle
= bo
->handle
;
325 va
.operation
= RADEON_VA_UNMAP
;
326 va
.flags
= RADEON_VM_PAGE_READABLE
|
327 RADEON_VM_PAGE_WRITEABLE
|
328 RADEON_VM_PAGE_SNOOPED
;
331 if (drmCommandWriteRead(rws
->fd
, DRM_RADEON_GEM_VA
, &va
,
333 va
.operation
== RADEON_VA_RESULT_ERROR
) {
334 fprintf(stderr
, "radeon: Failed to deallocate virtual address for buffer:\n");
335 fprintf(stderr
, "radeon: size : %d bytes\n", bo
->base
.size
);
336 fprintf(stderr
, "radeon: va : 0x%016llx\n", (unsigned long long)bo
->va
);
340 radeon_bomgr_free_va(mgr
, bo
->va
, bo
->base
.size
);
344 args
.handle
= bo
->handle
;
345 drmIoctl(rws
->fd
, DRM_IOCTL_GEM_CLOSE
, &args
);
347 pipe_mutex_destroy(bo
->map_mutex
);
349 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
350 rws
->allocated_vram
-= align(bo
->base
.size
, rws
->size_align
);
351 else if (bo
->initial_domain
& RADEON_DOMAIN_GTT
)
352 rws
->allocated_gtt
-= align(bo
->base
.size
, rws
->size_align
);
356 void *radeon_bo_do_map(struct radeon_bo
*bo
)
358 struct drm_radeon_gem_mmap args
= {0};
361 /* If the buffer is created from user memory, return the user pointer. */
365 /* Map the buffer. */
366 pipe_mutex_lock(bo
->map_mutex
);
367 /* Return the pointer if it's already mapped. */
370 pipe_mutex_unlock(bo
->map_mutex
);
373 args
.handle
= bo
->handle
;
375 args
.size
= (uint64_t)bo
->base
.size
;
376 if (drmCommandWriteRead(bo
->rws
->fd
,
380 pipe_mutex_unlock(bo
->map_mutex
);
381 fprintf(stderr
, "radeon: gem_mmap failed: %p 0x%08X\n",
386 ptr
= os_mmap(0, args
.size
, PROT_READ
|PROT_WRITE
, MAP_SHARED
,
387 bo
->rws
->fd
, args
.addr_ptr
);
388 if (ptr
== MAP_FAILED
) {
389 pipe_mutex_unlock(bo
->map_mutex
);
390 fprintf(stderr
, "radeon: mmap failed, errno: %i\n", errno
);
395 pipe_mutex_unlock(bo
->map_mutex
);
400 static void *radeon_bo_map(struct radeon_winsys_cs_handle
*buf
,
401 struct radeon_winsys_cs
*rcs
,
402 enum pipe_transfer_usage usage
)
404 struct radeon_bo
*bo
= (struct radeon_bo
*)buf
;
405 struct radeon_drm_cs
*cs
= (struct radeon_drm_cs
*)rcs
;
407 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
408 if (!(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
409 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
410 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
411 if (!(usage
& PIPE_TRANSFER_WRITE
)) {
414 * Since we are mapping for read, we don't need to wait
415 * if the GPU is using the buffer for read too
416 * (neither one is changing it).
418 * Only check whether the buffer is being used for write. */
419 if (cs
&& radeon_bo_is_referenced_by_cs_for_write(cs
, bo
)) {
420 cs
->flush_cs(cs
->flush_data
, RADEON_FLUSH_ASYNC
, NULL
);
424 if (!radeon_bo_wait((struct pb_buffer
*)bo
, 0,
425 RADEON_USAGE_WRITE
)) {
429 if (cs
&& radeon_bo_is_referenced_by_cs(cs
, bo
)) {
430 cs
->flush_cs(cs
->flush_data
, RADEON_FLUSH_ASYNC
, NULL
);
434 if (!radeon_bo_wait((struct pb_buffer
*)bo
, 0,
435 RADEON_USAGE_READWRITE
)) {
440 uint64_t time
= os_time_get_nano();
442 if (!(usage
& PIPE_TRANSFER_WRITE
)) {
445 * Since we are mapping for read, we don't need to wait
446 * if the GPU is using the buffer for read too
447 * (neither one is changing it).
449 * Only check whether the buffer is being used for write. */
450 if (cs
&& radeon_bo_is_referenced_by_cs_for_write(cs
, bo
)) {
451 cs
->flush_cs(cs
->flush_data
, 0, NULL
);
453 radeon_bo_wait((struct pb_buffer
*)bo
, PIPE_TIMEOUT_INFINITE
,
456 /* Mapping for write. */
458 if (radeon_bo_is_referenced_by_cs(cs
, bo
)) {
459 cs
->flush_cs(cs
->flush_data
, 0, NULL
);
461 /* Try to avoid busy-waiting in radeon_bo_wait. */
462 if (p_atomic_read(&bo
->num_active_ioctls
))
463 radeon_drm_cs_sync_flush(rcs
);
467 radeon_bo_wait((struct pb_buffer
*)bo
, PIPE_TIMEOUT_INFINITE
,
468 RADEON_USAGE_READWRITE
);
471 bo
->rws
->buffer_wait_time
+= os_time_get_nano() - time
;
475 return radeon_bo_do_map(bo
);
478 static void radeon_bo_unmap(struct radeon_winsys_cs_handle
*_buf
)
480 struct radeon_bo
*bo
= (struct radeon_bo
*)_buf
;
485 pipe_mutex_lock(bo
->map_mutex
);
487 pipe_mutex_unlock(bo
->map_mutex
);
488 return; /* it's not been mapped */
491 assert(bo
->map_count
);
492 if (--bo
->map_count
) {
493 pipe_mutex_unlock(bo
->map_mutex
);
494 return; /* it's been mapped multiple times */
497 os_munmap(bo
->ptr
, bo
->base
.size
);
499 pipe_mutex_unlock(bo
->map_mutex
);
502 static void radeon_bo_get_base_buffer(struct pb_buffer
*buf
,
503 struct pb_buffer
**base_buf
,
510 static enum pipe_error
radeon_bo_validate(struct pb_buffer
*_buf
,
511 struct pb_validate
*vl
,
518 static void radeon_bo_fence(struct pb_buffer
*buf
,
519 struct pipe_fence_handle
*fence
)
523 static const struct pb_vtbl radeon_bo_vtbl
= {
525 NULL
, /* never called */
526 NULL
, /* never called */
529 radeon_bo_get_base_buffer
,
532 #ifndef RADEON_GEM_GTT_WC
533 #define RADEON_GEM_GTT_WC (1 << 2)
535 #ifndef RADEON_GEM_CPU_ACCESS
536 /* BO is expected to be accessed by the CPU */
537 #define RADEON_GEM_CPU_ACCESS (1 << 3)
539 #ifndef RADEON_GEM_NO_CPU_ACCESS
540 /* CPU access is not expected to work for this BO */
541 #define RADEON_GEM_NO_CPU_ACCESS (1 << 4)
544 static struct pb_buffer
*radeon_bomgr_create_bo(struct pb_manager
*_mgr
,
546 const struct pb_desc
*desc
)
548 struct radeon_bomgr
*mgr
= radeon_bomgr(_mgr
);
549 struct radeon_drm_winsys
*rws
= mgr
->rws
;
550 struct radeon_bo
*bo
;
551 struct drm_radeon_gem_create args
;
552 struct radeon_bo_desc
*rdesc
= (struct radeon_bo_desc
*)desc
;
555 memset(&args
, 0, sizeof(args
));
557 assert(rdesc
->initial_domains
);
558 assert((rdesc
->initial_domains
&
559 ~(RADEON_GEM_DOMAIN_GTT
| RADEON_GEM_DOMAIN_VRAM
)) == 0);
562 args
.alignment
= desc
->alignment
;
563 args
.initial_domain
= rdesc
->initial_domains
;
566 if (rdesc
->flags
& RADEON_FLAG_GTT_WC
)
567 args
.flags
|= RADEON_GEM_GTT_WC
;
568 if (rdesc
->flags
& RADEON_FLAG_CPU_ACCESS
)
569 args
.flags
|= RADEON_GEM_CPU_ACCESS
;
570 if (rdesc
->flags
& RADEON_FLAG_NO_CPU_ACCESS
)
571 args
.flags
|= RADEON_GEM_NO_CPU_ACCESS
;
573 if (drmCommandWriteRead(rws
->fd
, DRM_RADEON_GEM_CREATE
,
574 &args
, sizeof(args
))) {
575 fprintf(stderr
, "radeon: Failed to allocate a buffer:\n");
576 fprintf(stderr
, "radeon: size : %d bytes\n", size
);
577 fprintf(stderr
, "radeon: alignment : %d bytes\n", desc
->alignment
);
578 fprintf(stderr
, "radeon: domains : %d\n", args
.initial_domain
);
579 fprintf(stderr
, "radeon: flags : %d\n", args
.flags
);
583 bo
= CALLOC_STRUCT(radeon_bo
);
587 pipe_reference_init(&bo
->base
.reference
, 1);
588 bo
->base
.alignment
= desc
->alignment
;
589 bo
->base
.usage
= desc
->usage
;
590 bo
->base
.size
= size
;
591 bo
->base
.vtbl
= &radeon_bo_vtbl
;
594 bo
->handle
= args
.handle
;
596 bo
->initial_domain
= rdesc
->initial_domains
;
597 pipe_mutex_init(bo
->map_mutex
);
599 if (rws
->info
.r600_virtual_address
) {
600 struct drm_radeon_gem_va va
;
602 bo
->va
= radeon_bomgr_find_va(mgr
, size
, desc
->alignment
);
604 va
.handle
= bo
->handle
;
606 va
.operation
= RADEON_VA_MAP
;
607 va
.flags
= RADEON_VM_PAGE_READABLE
|
608 RADEON_VM_PAGE_WRITEABLE
|
609 RADEON_VM_PAGE_SNOOPED
;
611 r
= drmCommandWriteRead(rws
->fd
, DRM_RADEON_GEM_VA
, &va
, sizeof(va
));
612 if (r
&& va
.operation
== RADEON_VA_RESULT_ERROR
) {
613 fprintf(stderr
, "radeon: Failed to allocate virtual address for buffer:\n");
614 fprintf(stderr
, "radeon: size : %d bytes\n", size
);
615 fprintf(stderr
, "radeon: alignment : %d bytes\n", desc
->alignment
);
616 fprintf(stderr
, "radeon: domains : %d\n", args
.initial_domain
);
617 fprintf(stderr
, "radeon: va : 0x%016llx\n", (unsigned long long)bo
->va
);
618 radeon_bo_destroy(&bo
->base
);
621 pipe_mutex_lock(rws
->bo_handles_mutex
);
622 if (va
.operation
== RADEON_VA_RESULT_VA_EXIST
) {
623 struct pb_buffer
*b
= &bo
->base
;
624 struct radeon_bo
*old_bo
=
625 util_hash_table_get(rws
->bo_vas
, (void*)(uintptr_t)va
.offset
);
627 pipe_mutex_unlock(rws
->bo_handles_mutex
);
628 pb_reference(&b
, &old_bo
->base
);
632 util_hash_table_set(rws
->bo_vas
, (void*)(uintptr_t)bo
->va
, bo
);
633 pipe_mutex_unlock(rws
->bo_handles_mutex
);
636 if (rdesc
->initial_domains
& RADEON_DOMAIN_VRAM
)
637 rws
->allocated_vram
+= align(size
, rws
->size_align
);
638 else if (rdesc
->initial_domains
& RADEON_DOMAIN_GTT
)
639 rws
->allocated_gtt
+= align(size
, rws
->size_align
);
644 static void radeon_bomgr_flush(struct pb_manager
*mgr
)
649 /* This is for the cache bufmgr. */
650 static boolean
radeon_bomgr_is_buffer_busy(struct pb_manager
*_mgr
,
651 struct pb_buffer
*_buf
)
653 struct radeon_bo
*bo
= radeon_bo(_buf
);
655 if (radeon_bo_is_referenced_by_any_cs(bo
)) {
659 if (!radeon_bo_wait((struct pb_buffer
*)bo
, 0, RADEON_USAGE_READWRITE
)) {
666 static void radeon_bomgr_destroy(struct pb_manager
*_mgr
)
671 struct pb_manager
*radeon_bomgr_create(struct radeon_drm_winsys
*rws
)
673 struct radeon_bomgr
*mgr
;
675 mgr
= CALLOC_STRUCT(radeon_bomgr
);
679 mgr
->base
.destroy
= radeon_bomgr_destroy
;
680 mgr
->base
.create_buffer
= radeon_bomgr_create_bo
;
681 mgr
->base
.flush
= radeon_bomgr_flush
;
682 mgr
->base
.is_buffer_busy
= radeon_bomgr_is_buffer_busy
;
688 static unsigned eg_tile_split(unsigned tile_split
)
690 switch (tile_split
) {
691 case 0: tile_split
= 64; break;
692 case 1: tile_split
= 128; break;
693 case 2: tile_split
= 256; break;
694 case 3: tile_split
= 512; break;
696 case 4: tile_split
= 1024; break;
697 case 5: tile_split
= 2048; break;
698 case 6: tile_split
= 4096; break;
703 static unsigned eg_tile_split_rev(unsigned eg_tile_split
)
705 switch (eg_tile_split
) {
717 static void radeon_bo_get_tiling(struct pb_buffer
*_buf
,
718 enum radeon_bo_layout
*microtiled
,
719 enum radeon_bo_layout
*macrotiled
,
720 unsigned *bankw
, unsigned *bankh
,
721 unsigned *tile_split
,
722 unsigned *stencil_tile_split
,
726 struct radeon_bo
*bo
= get_radeon_bo(_buf
);
727 struct drm_radeon_gem_set_tiling args
;
729 memset(&args
, 0, sizeof(args
));
731 args
.handle
= bo
->handle
;
733 drmCommandWriteRead(bo
->rws
->fd
,
734 DRM_RADEON_GEM_GET_TILING
,
738 *microtiled
= RADEON_LAYOUT_LINEAR
;
739 *macrotiled
= RADEON_LAYOUT_LINEAR
;
740 if (args
.tiling_flags
& RADEON_TILING_MICRO
)
741 *microtiled
= RADEON_LAYOUT_TILED
;
742 else if (args
.tiling_flags
& RADEON_TILING_MICRO_SQUARE
)
743 *microtiled
= RADEON_LAYOUT_SQUARETILED
;
745 if (args
.tiling_flags
& RADEON_TILING_MACRO
)
746 *macrotiled
= RADEON_LAYOUT_TILED
;
747 if (bankw
&& tile_split
&& stencil_tile_split
&& mtilea
&& tile_split
) {
748 *bankw
= (args
.tiling_flags
>> RADEON_TILING_EG_BANKW_SHIFT
) & RADEON_TILING_EG_BANKW_MASK
;
749 *bankh
= (args
.tiling_flags
>> RADEON_TILING_EG_BANKH_SHIFT
) & RADEON_TILING_EG_BANKH_MASK
;
750 *tile_split
= (args
.tiling_flags
>> RADEON_TILING_EG_TILE_SPLIT_SHIFT
) & RADEON_TILING_EG_TILE_SPLIT_MASK
;
751 *stencil_tile_split
= (args
.tiling_flags
>> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT
) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK
;
752 *mtilea
= (args
.tiling_flags
>> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT
) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK
;
753 *tile_split
= eg_tile_split(*tile_split
);
756 *scanout
= bo
->rws
->gen
>= DRV_SI
&& !(args
.tiling_flags
& RADEON_TILING_R600_NO_SCANOUT
);
759 static void radeon_bo_set_tiling(struct pb_buffer
*_buf
,
760 struct radeon_winsys_cs
*rcs
,
761 enum radeon_bo_layout microtiled
,
762 enum radeon_bo_layout macrotiled
,
763 unsigned pipe_config
,
764 unsigned bankw
, unsigned bankh
,
766 unsigned stencil_tile_split
,
767 unsigned mtilea
, unsigned num_banks
,
771 struct radeon_bo
*bo
= get_radeon_bo(_buf
);
772 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
773 struct drm_radeon_gem_set_tiling args
;
775 memset(&args
, 0, sizeof(args
));
777 /* Tiling determines how DRM treats the buffer data.
778 * We must flush CS when changing it if the buffer is referenced. */
779 if (cs
&& radeon_bo_is_referenced_by_cs(cs
, bo
)) {
780 cs
->flush_cs(cs
->flush_data
, 0, NULL
);
783 os_wait_until_zero(&bo
->num_active_ioctls
, PIPE_TIMEOUT_INFINITE
);
785 if (microtiled
== RADEON_LAYOUT_TILED
)
786 args
.tiling_flags
|= RADEON_TILING_MICRO
;
787 else if (microtiled
== RADEON_LAYOUT_SQUARETILED
)
788 args
.tiling_flags
|= RADEON_TILING_MICRO_SQUARE
;
790 if (macrotiled
== RADEON_LAYOUT_TILED
)
791 args
.tiling_flags
|= RADEON_TILING_MACRO
;
793 args
.tiling_flags
|= (bankw
& RADEON_TILING_EG_BANKW_MASK
) <<
794 RADEON_TILING_EG_BANKW_SHIFT
;
795 args
.tiling_flags
|= (bankh
& RADEON_TILING_EG_BANKH_MASK
) <<
796 RADEON_TILING_EG_BANKH_SHIFT
;
798 args
.tiling_flags
|= (eg_tile_split_rev(tile_split
) &
799 RADEON_TILING_EG_TILE_SPLIT_MASK
) <<
800 RADEON_TILING_EG_TILE_SPLIT_SHIFT
;
802 args
.tiling_flags
|= (stencil_tile_split
&
803 RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK
) <<
804 RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT
;
805 args
.tiling_flags
|= (mtilea
& RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK
) <<
806 RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT
;
808 if (bo
->rws
->gen
>= DRV_SI
&& !scanout
)
809 args
.tiling_flags
|= RADEON_TILING_R600_NO_SCANOUT
;
811 args
.handle
= bo
->handle
;
814 drmCommandWriteRead(bo
->rws
->fd
,
815 DRM_RADEON_GEM_SET_TILING
,
820 static struct radeon_winsys_cs_handle
*radeon_drm_get_cs_handle(struct pb_buffer
*_buf
)
822 /* return radeon_bo. */
823 return (struct radeon_winsys_cs_handle
*)get_radeon_bo(_buf
);
826 static struct pb_buffer
*
827 radeon_winsys_bo_create(struct radeon_winsys
*rws
,
830 boolean use_reusable_pool
,
831 enum radeon_bo_domain domain
,
832 enum radeon_bo_flag flags
)
834 struct radeon_drm_winsys
*ws
= radeon_drm_winsys(rws
);
835 struct radeon_bomgr
*mgr
= radeon_bomgr(ws
->kman
);
836 struct radeon_bo_desc desc
;
837 struct pb_manager
*provider
;
838 struct pb_buffer
*buffer
;
840 memset(&desc
, 0, sizeof(desc
));
841 desc
.base
.alignment
= alignment
;
843 /* Align size to page size. This is the minimum alignment for normal
844 * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
845 * like constant/uniform buffers, can benefit from better and more reuse.
847 size
= align(size
, ws
->size_align
);
849 /* Only set one usage bit each for domains and flags, or the cache manager
850 * might consider different sets of domains / flags compatible
852 if (domain
== RADEON_DOMAIN_VRAM_GTT
)
853 desc
.base
.usage
= 1 << 2;
855 desc
.base
.usage
= domain
>> 1;
856 assert(flags
< sizeof(desc
.base
.usage
) * 8 - 3);
857 desc
.base
.usage
|= 1 << (flags
+ 3);
859 desc
.initial_domains
= domain
;
862 /* Assign a buffer manager. */
863 if (use_reusable_pool
)
868 buffer
= provider
->create_buffer(provider
, size
, &desc
.base
);
872 pipe_mutex_lock(ws
->bo_handles_mutex
);
873 util_hash_table_set(ws
->bo_handles
, (void*)(uintptr_t)get_radeon_bo(buffer
)->handle
, buffer
);
874 pipe_mutex_unlock(ws
->bo_handles_mutex
);
876 return (struct pb_buffer
*)buffer
;
879 static struct pb_buffer
*radeon_winsys_bo_from_ptr(struct radeon_winsys
*rws
,
880 void *pointer
, unsigned size
)
882 struct radeon_drm_winsys
*ws
= radeon_drm_winsys(rws
);
883 struct radeon_bomgr
*mgr
= radeon_bomgr(ws
->kman
);
884 struct drm_radeon_gem_userptr args
;
885 struct radeon_bo
*bo
;
888 bo
= CALLOC_STRUCT(radeon_bo
);
892 memset(&args
, 0, sizeof(args
));
893 args
.addr
= (uintptr_t)pointer
;
894 args
.size
= align(size
, sysconf(_SC_PAGE_SIZE
));
895 args
.flags
= RADEON_GEM_USERPTR_ANONONLY
|
896 RADEON_GEM_USERPTR_VALIDATE
|
897 RADEON_GEM_USERPTR_REGISTER
;
898 if (drmCommandWriteRead(ws
->fd
, DRM_RADEON_GEM_USERPTR
,
899 &args
, sizeof(args
))) {
904 pipe_mutex_lock(ws
->bo_handles_mutex
);
907 pipe_reference_init(&bo
->base
.reference
, 1);
908 bo
->handle
= args
.handle
;
909 bo
->base
.alignment
= 0;
910 bo
->base
.usage
= PB_USAGE_GPU_WRITE
| PB_USAGE_GPU_READ
;
911 bo
->base
.size
= size
;
912 bo
->base
.vtbl
= &radeon_bo_vtbl
;
915 bo
->user_ptr
= pointer
;
917 bo
->initial_domain
= RADEON_DOMAIN_GTT
;
918 pipe_mutex_init(bo
->map_mutex
);
920 util_hash_table_set(ws
->bo_handles
, (void*)(uintptr_t)bo
->handle
, bo
);
922 pipe_mutex_unlock(ws
->bo_handles_mutex
);
924 if (ws
->info
.r600_virtual_address
) {
925 struct drm_radeon_gem_va va
;
927 bo
->va
= radeon_bomgr_find_va(mgr
, bo
->base
.size
, 1 << 20);
929 va
.handle
= bo
->handle
;
930 va
.operation
= RADEON_VA_MAP
;
933 va
.flags
= RADEON_VM_PAGE_READABLE
|
934 RADEON_VM_PAGE_WRITEABLE
|
935 RADEON_VM_PAGE_SNOOPED
;
937 r
= drmCommandWriteRead(ws
->fd
, DRM_RADEON_GEM_VA
, &va
, sizeof(va
));
938 if (r
&& va
.operation
== RADEON_VA_RESULT_ERROR
) {
939 fprintf(stderr
, "radeon: Failed to assign virtual address space\n");
940 radeon_bo_destroy(&bo
->base
);
943 pipe_mutex_lock(ws
->bo_handles_mutex
);
944 if (va
.operation
== RADEON_VA_RESULT_VA_EXIST
) {
945 struct pb_buffer
*b
= &bo
->base
;
946 struct radeon_bo
*old_bo
=
947 util_hash_table_get(ws
->bo_vas
, (void*)(uintptr_t)va
.offset
);
949 pipe_mutex_unlock(ws
->bo_handles_mutex
);
950 pb_reference(&b
, &old_bo
->base
);
954 util_hash_table_set(ws
->bo_vas
, (void*)(uintptr_t)bo
->va
, bo
);
955 pipe_mutex_unlock(ws
->bo_handles_mutex
);
958 ws
->allocated_gtt
+= align(bo
->base
.size
, ws
->size_align
);
960 return (struct pb_buffer
*)bo
;
963 static struct pb_buffer
*radeon_winsys_bo_from_handle(struct radeon_winsys
*rws
,
964 struct winsys_handle
*whandle
,
967 struct radeon_drm_winsys
*ws
= radeon_drm_winsys(rws
);
968 struct radeon_bo
*bo
;
969 struct radeon_bomgr
*mgr
= radeon_bomgr(ws
->kman
);
974 /* We must maintain a list of pairs <handle, bo>, so that we always return
975 * the same BO for one particular handle. If we didn't do that and created
976 * more than one BO for the same handle and then relocated them in a CS,
977 * we would hit a deadlock in the kernel.
979 * The list of pairs is guarded by a mutex, of course. */
980 pipe_mutex_lock(ws
->bo_handles_mutex
);
982 if (whandle
->type
== DRM_API_HANDLE_TYPE_SHARED
) {
983 /* First check if there already is an existing bo for the handle. */
984 bo
= util_hash_table_get(ws
->bo_names
, (void*)(uintptr_t)whandle
->handle
);
985 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_FD
) {
986 /* We must first get the GEM handle, as fds are unreliable keys */
987 r
= drmPrimeFDToHandle(ws
->fd
, whandle
->handle
, &handle
);
990 bo
= util_hash_table_get(ws
->bo_handles
, (void*)(uintptr_t)handle
);
992 /* Unknown handle type */
997 /* Increase the refcount. */
998 struct pb_buffer
*b
= NULL
;
999 pb_reference(&b
, &bo
->base
);
1003 /* There isn't, create a new one. */
1004 bo
= CALLOC_STRUCT(radeon_bo
);
1009 if (whandle
->type
== DRM_API_HANDLE_TYPE_SHARED
) {
1010 struct drm_gem_open open_arg
= {};
1011 memset(&open_arg
, 0, sizeof(open_arg
));
1013 open_arg
.name
= whandle
->handle
;
1014 if (drmIoctl(ws
->fd
, DRM_IOCTL_GEM_OPEN
, &open_arg
)) {
1018 handle
= open_arg
.handle
;
1019 size
= open_arg
.size
;
1020 bo
->flink_name
= whandle
->handle
;
1021 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_FD
) {
1022 size
= lseek(whandle
->handle
, 0, SEEK_END
);
1024 * Could check errno to determine whether the kernel is new enough, but
1025 * it doesn't really matter why this failed, just that it failed.
1027 if (size
== (off_t
)-1) {
1031 lseek(whandle
->handle
, 0, SEEK_SET
);
1034 bo
->handle
= handle
;
1036 /* Initialize it. */
1037 pipe_reference_init(&bo
->base
.reference
, 1);
1038 bo
->base
.alignment
= 0;
1039 bo
->base
.usage
= PB_USAGE_GPU_WRITE
| PB_USAGE_GPU_READ
;
1040 bo
->base
.size
= (unsigned) size
;
1041 bo
->base
.vtbl
= &radeon_bo_vtbl
;
1045 pipe_mutex_init(bo
->map_mutex
);
1048 util_hash_table_set(ws
->bo_names
, (void*)(uintptr_t)bo
->flink_name
, bo
);
1050 util_hash_table_set(ws
->bo_handles
, (void*)(uintptr_t)bo
->handle
, bo
);
1053 pipe_mutex_unlock(ws
->bo_handles_mutex
);
1056 *stride
= whandle
->stride
;
1058 if (ws
->info
.r600_virtual_address
&& !bo
->va
) {
1059 struct drm_radeon_gem_va va
;
1061 bo
->va
= radeon_bomgr_find_va(mgr
, bo
->base
.size
, 1 << 20);
1063 va
.handle
= bo
->handle
;
1064 va
.operation
= RADEON_VA_MAP
;
1067 va
.flags
= RADEON_VM_PAGE_READABLE
|
1068 RADEON_VM_PAGE_WRITEABLE
|
1069 RADEON_VM_PAGE_SNOOPED
;
1071 r
= drmCommandWriteRead(ws
->fd
, DRM_RADEON_GEM_VA
, &va
, sizeof(va
));
1072 if (r
&& va
.operation
== RADEON_VA_RESULT_ERROR
) {
1073 fprintf(stderr
, "radeon: Failed to assign virtual address space\n");
1074 radeon_bo_destroy(&bo
->base
);
1077 pipe_mutex_lock(ws
->bo_handles_mutex
);
1078 if (va
.operation
== RADEON_VA_RESULT_VA_EXIST
) {
1079 struct pb_buffer
*b
= &bo
->base
;
1080 struct radeon_bo
*old_bo
=
1081 util_hash_table_get(ws
->bo_vas
, (void*)(uintptr_t)va
.offset
);
1083 pipe_mutex_unlock(ws
->bo_handles_mutex
);
1084 pb_reference(&b
, &old_bo
->base
);
1088 util_hash_table_set(ws
->bo_vas
, (void*)(uintptr_t)bo
->va
, bo
);
1089 pipe_mutex_unlock(ws
->bo_handles_mutex
);
1092 bo
->initial_domain
= radeon_bo_get_initial_domain((void*)bo
);
1094 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
1095 ws
->allocated_vram
+= align(bo
->base
.size
, ws
->size_align
);
1096 else if (bo
->initial_domain
& RADEON_DOMAIN_GTT
)
1097 ws
->allocated_gtt
+= align(bo
->base
.size
, ws
->size_align
);
1099 return (struct pb_buffer
*)bo
;
1102 pipe_mutex_unlock(ws
->bo_handles_mutex
);
1106 static boolean
radeon_winsys_bo_get_handle(struct pb_buffer
*buffer
,
1108 struct winsys_handle
*whandle
)
1110 struct drm_gem_flink flink
;
1111 struct radeon_bo
*bo
= get_radeon_bo(buffer
);
1112 struct radeon_drm_winsys
*ws
= bo
->rws
;
1114 memset(&flink
, 0, sizeof(flink
));
1116 if ((void*)bo
!= (void*)buffer
)
1117 pb_cache_manager_remove_buffer(buffer
);
1119 if (whandle
->type
== DRM_API_HANDLE_TYPE_SHARED
) {
1120 if (!bo
->flink_name
) {
1121 flink
.handle
= bo
->handle
;
1123 if (ioctl(ws
->fd
, DRM_IOCTL_GEM_FLINK
, &flink
)) {
1127 bo
->flink_name
= flink
.name
;
1129 pipe_mutex_lock(ws
->bo_handles_mutex
);
1130 util_hash_table_set(ws
->bo_names
, (void*)(uintptr_t)bo
->flink_name
, bo
);
1131 pipe_mutex_unlock(ws
->bo_handles_mutex
);
1133 whandle
->handle
= bo
->flink_name
;
1134 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_KMS
) {
1135 whandle
->handle
= bo
->handle
;
1136 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_FD
) {
1137 if (drmPrimeHandleToFD(ws
->fd
, bo
->handle
, DRM_CLOEXEC
, (int*)&whandle
->handle
))
1141 whandle
->stride
= stride
;
1145 static uint64_t radeon_winsys_bo_va(struct radeon_winsys_cs_handle
*buf
)
1147 return ((struct radeon_bo
*)buf
)->va
;
1150 void radeon_bomgr_init_functions(struct radeon_drm_winsys
*ws
)
1152 ws
->base
.buffer_get_cs_handle
= radeon_drm_get_cs_handle
;
1153 ws
->base
.buffer_set_tiling
= radeon_bo_set_tiling
;
1154 ws
->base
.buffer_get_tiling
= radeon_bo_get_tiling
;
1155 ws
->base
.buffer_map
= radeon_bo_map
;
1156 ws
->base
.buffer_unmap
= radeon_bo_unmap
;
1157 ws
->base
.buffer_wait
= radeon_bo_wait
;
1158 ws
->base
.buffer_create
= radeon_winsys_bo_create
;
1159 ws
->base
.buffer_from_handle
= radeon_winsys_bo_from_handle
;
1160 ws
->base
.buffer_from_ptr
= radeon_winsys_bo_from_ptr
;
1161 ws
->base
.buffer_get_handle
= radeon_winsys_bo_get_handle
;
1162 ws
->base
.buffer_get_virtual_address
= radeon_winsys_bo_va
;
1163 ws
->base
.buffer_get_initial_domain
= radeon_bo_get_initial_domain
;