2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
27 #include "radeon_drm_cs.h"
29 #include "util/u_hash_table.h"
30 #include "util/u_memory.h"
31 #include "util/simple_list.h"
32 #include "os/os_thread.h"
33 #include "os/os_mman.h"
34 #include "os/os_time.h"
36 #include "state_tracker/drm_driver.h"
38 #include <sys/ioctl.h>
45 static inline struct radeon_bo
*radeon_bo(struct pb_buffer
*bo
)
47 return (struct radeon_bo
*)bo
;
50 struct radeon_bo_va_hole
{
51 struct list_head list
;
56 static bool radeon_bo_is_busy(struct radeon_bo
*bo
)
58 struct drm_radeon_gem_busy args
= {0};
60 args
.handle
= bo
->handle
;
61 return drmCommandWriteRead(bo
->rws
->fd
, DRM_RADEON_GEM_BUSY
,
62 &args
, sizeof(args
)) != 0;
65 static void radeon_bo_wait_idle(struct radeon_bo
*bo
)
67 struct drm_radeon_gem_wait_idle args
= {0};
69 args
.handle
= bo
->handle
;
70 while (drmCommandWrite(bo
->rws
->fd
, DRM_RADEON_GEM_WAIT_IDLE
,
71 &args
, sizeof(args
)) == -EBUSY
);
74 static bool radeon_bo_wait(struct pb_buffer
*_buf
, uint64_t timeout
,
75 enum radeon_bo_usage usage
)
77 struct radeon_bo
*bo
= radeon_bo(_buf
);
80 /* No timeout. Just query. */
82 return !bo
->num_active_ioctls
&& !radeon_bo_is_busy(bo
);
84 abs_timeout
= os_time_get_absolute_timeout(timeout
);
86 /* Wait if any ioctl is being submitted with this buffer. */
87 if (!os_wait_until_zero_abs_timeout(&bo
->num_active_ioctls
, abs_timeout
))
90 /* Infinite timeout. */
91 if (abs_timeout
== PIPE_TIMEOUT_INFINITE
) {
92 radeon_bo_wait_idle(bo
);
96 /* Other timeouts need to be emulated with a loop. */
97 while (radeon_bo_is_busy(bo
)) {
98 if (os_time_get_nano() >= abs_timeout
)
106 static enum radeon_bo_domain
get_valid_domain(enum radeon_bo_domain domain
)
108 /* Zero domains the driver doesn't understand. */
109 domain
&= RADEON_DOMAIN_VRAM_GTT
;
111 /* If no domain is set, we must set something... */
113 domain
= RADEON_DOMAIN_VRAM_GTT
;
118 static enum radeon_bo_domain
radeon_bo_get_initial_domain(
119 struct pb_buffer
*buf
)
121 struct radeon_bo
*bo
= (struct radeon_bo
*)buf
;
122 struct drm_radeon_gem_op args
;
124 if (bo
->rws
->info
.drm_minor
< 38)
125 return RADEON_DOMAIN_VRAM_GTT
;
127 memset(&args
, 0, sizeof(args
));
128 args
.handle
= bo
->handle
;
129 args
.op
= RADEON_GEM_OP_GET_INITIAL_DOMAIN
;
131 drmCommandWriteRead(bo
->rws
->fd
, DRM_RADEON_GEM_OP
,
132 &args
, sizeof(args
));
134 /* GEM domains and winsys domains are defined the same. */
135 return get_valid_domain(args
.value
);
138 static uint64_t radeon_bomgr_find_va(struct radeon_drm_winsys
*rws
,
139 uint64_t size
, uint64_t alignment
)
141 struct radeon_bo_va_hole
*hole
, *n
;
142 uint64_t offset
= 0, waste
= 0;
144 /* All VM address space holes will implicitly start aligned to the
145 * size alignment, so we don't need to sanitize the alignment here
147 size
= align(size
, rws
->info
.gart_page_size
);
149 pipe_mutex_lock(rws
->bo_va_mutex
);
150 /* first look for a hole */
151 LIST_FOR_EACH_ENTRY_SAFE(hole
, n
, &rws
->va_holes
, list
) {
152 offset
= hole
->offset
;
153 waste
= offset
% alignment
;
154 waste
= waste
? alignment
- waste
: 0;
156 if (offset
>= (hole
->offset
+ hole
->size
)) {
159 if (!waste
&& hole
->size
== size
) {
160 offset
= hole
->offset
;
161 list_del(&hole
->list
);
163 pipe_mutex_unlock(rws
->bo_va_mutex
);
166 if ((hole
->size
- waste
) > size
) {
168 n
= CALLOC_STRUCT(radeon_bo_va_hole
);
170 n
->offset
= hole
->offset
;
171 list_add(&n
->list
, &hole
->list
);
173 hole
->size
-= (size
+ waste
);
174 hole
->offset
+= size
+ waste
;
175 pipe_mutex_unlock(rws
->bo_va_mutex
);
178 if ((hole
->size
- waste
) == size
) {
180 pipe_mutex_unlock(rws
->bo_va_mutex
);
185 offset
= rws
->va_offset
;
186 waste
= offset
% alignment
;
187 waste
= waste
? alignment
- waste
: 0;
189 n
= CALLOC_STRUCT(radeon_bo_va_hole
);
192 list_add(&n
->list
, &rws
->va_holes
);
195 rws
->va_offset
+= size
+ waste
;
196 pipe_mutex_unlock(rws
->bo_va_mutex
);
200 static void radeon_bomgr_free_va(struct radeon_drm_winsys
*rws
,
201 uint64_t va
, uint64_t size
)
203 struct radeon_bo_va_hole
*hole
;
205 size
= align(size
, rws
->info
.gart_page_size
);
207 pipe_mutex_lock(rws
->bo_va_mutex
);
208 if ((va
+ size
) == rws
->va_offset
) {
210 /* Delete uppermost hole if it reaches the new top */
211 if (!LIST_IS_EMPTY(&rws
->va_holes
)) {
212 hole
= container_of(rws
->va_holes
.next
, hole
, list
);
213 if ((hole
->offset
+ hole
->size
) == va
) {
214 rws
->va_offset
= hole
->offset
;
215 list_del(&hole
->list
);
220 struct radeon_bo_va_hole
*next
;
222 hole
= container_of(&rws
->va_holes
, hole
, list
);
223 LIST_FOR_EACH_ENTRY(next
, &rws
->va_holes
, list
) {
224 if (next
->offset
< va
)
229 if (&hole
->list
!= &rws
->va_holes
) {
230 /* Grow upper hole if it's adjacent */
231 if (hole
->offset
== (va
+ size
)) {
234 /* Merge lower hole if it's adjacent */
235 if (next
!= hole
&& &next
->list
!= &rws
->va_holes
&&
236 (next
->offset
+ next
->size
) == va
) {
237 next
->size
+= hole
->size
;
238 list_del(&hole
->list
);
245 /* Grow lower hole if it's adjacent */
246 if (next
!= hole
&& &next
->list
!= &rws
->va_holes
&&
247 (next
->offset
+ next
->size
) == va
) {
252 /* FIXME on allocation failure we just lose virtual address space
253 * maybe print a warning
255 next
= CALLOC_STRUCT(radeon_bo_va_hole
);
259 list_add(&next
->list
, &hole
->list
);
263 pipe_mutex_unlock(rws
->bo_va_mutex
);
266 void radeon_bo_destroy(struct pb_buffer
*_buf
)
268 struct radeon_bo
*bo
= radeon_bo(_buf
);
269 struct radeon_drm_winsys
*rws
= bo
->rws
;
270 struct drm_gem_close args
;
272 assert(bo
->handle
&& "must not be called for slab entries");
274 memset(&args
, 0, sizeof(args
));
276 pipe_mutex_lock(rws
->bo_handles_mutex
);
277 util_hash_table_remove(rws
->bo_handles
, (void*)(uintptr_t)bo
->handle
);
278 if (bo
->flink_name
) {
279 util_hash_table_remove(rws
->bo_names
,
280 (void*)(uintptr_t)bo
->flink_name
);
282 pipe_mutex_unlock(rws
->bo_handles_mutex
);
285 os_munmap(bo
->u
.real
.ptr
, bo
->base
.size
);
287 if (rws
->info
.has_virtual_memory
) {
288 if (rws
->va_unmap_working
) {
289 struct drm_radeon_gem_va va
;
291 va
.handle
= bo
->handle
;
293 va
.operation
= RADEON_VA_UNMAP
;
294 va
.flags
= RADEON_VM_PAGE_READABLE
|
295 RADEON_VM_PAGE_WRITEABLE
|
296 RADEON_VM_PAGE_SNOOPED
;
299 if (drmCommandWriteRead(rws
->fd
, DRM_RADEON_GEM_VA
, &va
,
301 va
.operation
== RADEON_VA_RESULT_ERROR
) {
302 fprintf(stderr
, "radeon: Failed to deallocate virtual address for buffer:\n");
303 fprintf(stderr
, "radeon: size : %"PRIu64
" bytes\n", bo
->base
.size
);
304 fprintf(stderr
, "radeon: va : 0x%"PRIx64
"\n", bo
->va
);
308 radeon_bomgr_free_va(rws
, bo
->va
, bo
->base
.size
);
312 args
.handle
= bo
->handle
;
313 drmIoctl(rws
->fd
, DRM_IOCTL_GEM_CLOSE
, &args
);
315 pipe_mutex_destroy(bo
->u
.real
.map_mutex
);
317 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
318 rws
->allocated_vram
-= align(bo
->base
.size
, rws
->info
.gart_page_size
);
319 else if (bo
->initial_domain
& RADEON_DOMAIN_GTT
)
320 rws
->allocated_gtt
-= align(bo
->base
.size
, rws
->info
.gart_page_size
);
322 if (bo
->u
.real
.map_count
>= 1) {
323 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
324 bo
->rws
->mapped_vram
-= bo
->base
.size
;
326 bo
->rws
->mapped_gtt
-= bo
->base
.size
;
332 static void radeon_bo_destroy_or_cache(struct pb_buffer
*_buf
)
334 struct radeon_bo
*bo
= radeon_bo(_buf
);
336 assert(bo
->handle
&& "must not be called for slab entries");
338 if (bo
->u
.real
.use_reusable_pool
)
339 pb_cache_add_buffer(&bo
->u
.real
.cache_entry
);
341 radeon_bo_destroy(_buf
);
344 void *radeon_bo_do_map(struct radeon_bo
*bo
)
346 struct drm_radeon_gem_mmap args
= {0};
350 /* If the buffer is created from user memory, return the user pointer. */
357 offset
= bo
->va
- bo
->u
.slab
.real
->va
;
358 bo
= bo
->u
.slab
.real
;
361 /* Map the buffer. */
362 pipe_mutex_lock(bo
->u
.real
.map_mutex
);
363 /* Return the pointer if it's already mapped. */
364 if (bo
->u
.real
.ptr
) {
365 bo
->u
.real
.map_count
++;
366 pipe_mutex_unlock(bo
->u
.real
.map_mutex
);
367 return (uint8_t*)bo
->u
.real
.ptr
+ offset
;
369 args
.handle
= bo
->handle
;
371 args
.size
= (uint64_t)bo
->base
.size
;
372 if (drmCommandWriteRead(bo
->rws
->fd
,
376 pipe_mutex_unlock(bo
->u
.real
.map_mutex
);
377 fprintf(stderr
, "radeon: gem_mmap failed: %p 0x%08X\n",
382 ptr
= os_mmap(0, args
.size
, PROT_READ
|PROT_WRITE
, MAP_SHARED
,
383 bo
->rws
->fd
, args
.addr_ptr
);
384 if (ptr
== MAP_FAILED
) {
385 /* Clear the cache and try again. */
386 pb_cache_release_all_buffers(&bo
->rws
->bo_cache
);
388 ptr
= os_mmap(0, args
.size
, PROT_READ
|PROT_WRITE
, MAP_SHARED
,
389 bo
->rws
->fd
, args
.addr_ptr
);
390 if (ptr
== MAP_FAILED
) {
391 pipe_mutex_unlock(bo
->u
.real
.map_mutex
);
392 fprintf(stderr
, "radeon: mmap failed, errno: %i\n", errno
);
396 bo
->u
.real
.ptr
= ptr
;
397 bo
->u
.real
.map_count
= 1;
399 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
400 bo
->rws
->mapped_vram
+= bo
->base
.size
;
402 bo
->rws
->mapped_gtt
+= bo
->base
.size
;
404 pipe_mutex_unlock(bo
->u
.real
.map_mutex
);
405 return (uint8_t*)bo
->u
.real
.ptr
+ offset
;
408 static void *radeon_bo_map(struct pb_buffer
*buf
,
409 struct radeon_winsys_cs
*rcs
,
410 enum pipe_transfer_usage usage
)
412 struct radeon_bo
*bo
= (struct radeon_bo
*)buf
;
413 struct radeon_drm_cs
*cs
= (struct radeon_drm_cs
*)rcs
;
415 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
416 if (!(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
417 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
418 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
419 if (!(usage
& PIPE_TRANSFER_WRITE
)) {
422 * Since we are mapping for read, we don't need to wait
423 * if the GPU is using the buffer for read too
424 * (neither one is changing it).
426 * Only check whether the buffer is being used for write. */
427 if (cs
&& radeon_bo_is_referenced_by_cs_for_write(cs
, bo
)) {
428 cs
->flush_cs(cs
->flush_data
, RADEON_FLUSH_ASYNC
, NULL
);
432 if (!radeon_bo_wait((struct pb_buffer
*)bo
, 0,
433 RADEON_USAGE_WRITE
)) {
437 if (cs
&& radeon_bo_is_referenced_by_cs(cs
, bo
)) {
438 cs
->flush_cs(cs
->flush_data
, RADEON_FLUSH_ASYNC
, NULL
);
442 if (!radeon_bo_wait((struct pb_buffer
*)bo
, 0,
443 RADEON_USAGE_READWRITE
)) {
448 uint64_t time
= os_time_get_nano();
450 if (!(usage
& PIPE_TRANSFER_WRITE
)) {
453 * Since we are mapping for read, we don't need to wait
454 * if the GPU is using the buffer for read too
455 * (neither one is changing it).
457 * Only check whether the buffer is being used for write. */
458 if (cs
&& radeon_bo_is_referenced_by_cs_for_write(cs
, bo
)) {
459 cs
->flush_cs(cs
->flush_data
, 0, NULL
);
461 radeon_bo_wait((struct pb_buffer
*)bo
, PIPE_TIMEOUT_INFINITE
,
464 /* Mapping for write. */
466 if (radeon_bo_is_referenced_by_cs(cs
, bo
)) {
467 cs
->flush_cs(cs
->flush_data
, 0, NULL
);
469 /* Try to avoid busy-waiting in radeon_bo_wait. */
470 if (p_atomic_read(&bo
->num_active_ioctls
))
471 radeon_drm_cs_sync_flush(rcs
);
475 radeon_bo_wait((struct pb_buffer
*)bo
, PIPE_TIMEOUT_INFINITE
,
476 RADEON_USAGE_READWRITE
);
479 bo
->rws
->buffer_wait_time
+= os_time_get_nano() - time
;
483 return radeon_bo_do_map(bo
);
486 static void radeon_bo_unmap(struct pb_buffer
*_buf
)
488 struct radeon_bo
*bo
= (struct radeon_bo
*)_buf
;
494 bo
= bo
->u
.slab
.real
;
496 pipe_mutex_lock(bo
->u
.real
.map_mutex
);
497 if (!bo
->u
.real
.ptr
) {
498 pipe_mutex_unlock(bo
->u
.real
.map_mutex
);
499 return; /* it's not been mapped */
502 assert(bo
->u
.real
.map_count
);
503 if (--bo
->u
.real
.map_count
) {
504 pipe_mutex_unlock(bo
->u
.real
.map_mutex
);
505 return; /* it's been mapped multiple times */
508 os_munmap(bo
->u
.real
.ptr
, bo
->base
.size
);
509 bo
->u
.real
.ptr
= NULL
;
511 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
512 bo
->rws
->mapped_vram
-= bo
->base
.size
;
514 bo
->rws
->mapped_gtt
-= bo
->base
.size
;
516 pipe_mutex_unlock(bo
->u
.real
.map_mutex
);
519 static const struct pb_vtbl radeon_bo_vtbl
= {
520 radeon_bo_destroy_or_cache
521 /* other functions are never called */
524 #ifndef RADEON_GEM_GTT_WC
525 #define RADEON_GEM_GTT_WC (1 << 2)
527 #ifndef RADEON_GEM_CPU_ACCESS
528 /* BO is expected to be accessed by the CPU */
529 #define RADEON_GEM_CPU_ACCESS (1 << 3)
531 #ifndef RADEON_GEM_NO_CPU_ACCESS
532 /* CPU access is not expected to work for this BO */
533 #define RADEON_GEM_NO_CPU_ACCESS (1 << 4)
536 static struct radeon_bo
*radeon_create_bo(struct radeon_drm_winsys
*rws
,
537 unsigned size
, unsigned alignment
,
539 unsigned initial_domains
,
541 unsigned pb_cache_bucket
)
543 struct radeon_bo
*bo
;
544 struct drm_radeon_gem_create args
;
547 memset(&args
, 0, sizeof(args
));
549 assert(initial_domains
);
550 assert((initial_domains
&
551 ~(RADEON_GEM_DOMAIN_GTT
| RADEON_GEM_DOMAIN_VRAM
)) == 0);
554 args
.alignment
= alignment
;
555 args
.initial_domain
= initial_domains
;
558 if (flags
& RADEON_FLAG_GTT_WC
)
559 args
.flags
|= RADEON_GEM_GTT_WC
;
560 if (flags
& RADEON_FLAG_CPU_ACCESS
)
561 args
.flags
|= RADEON_GEM_CPU_ACCESS
;
562 if (flags
& RADEON_FLAG_NO_CPU_ACCESS
)
563 args
.flags
|= RADEON_GEM_NO_CPU_ACCESS
;
565 if (drmCommandWriteRead(rws
->fd
, DRM_RADEON_GEM_CREATE
,
566 &args
, sizeof(args
))) {
567 fprintf(stderr
, "radeon: Failed to allocate a buffer:\n");
568 fprintf(stderr
, "radeon: size : %u bytes\n", size
);
569 fprintf(stderr
, "radeon: alignment : %u bytes\n", alignment
);
570 fprintf(stderr
, "radeon: domains : %u\n", args
.initial_domain
);
571 fprintf(stderr
, "radeon: flags : %u\n", args
.flags
);
575 assert(args
.handle
!= 0);
577 bo
= CALLOC_STRUCT(radeon_bo
);
581 pipe_reference_init(&bo
->base
.reference
, 1);
582 bo
->base
.alignment
= alignment
;
583 bo
->base
.usage
= usage
;
584 bo
->base
.size
= size
;
585 bo
->base
.vtbl
= &radeon_bo_vtbl
;
587 bo
->handle
= args
.handle
;
589 bo
->initial_domain
= initial_domains
;
590 pipe_mutex_init(bo
->u
.real
.map_mutex
);
591 pb_cache_init_entry(&rws
->bo_cache
, &bo
->u
.real
.cache_entry
, &bo
->base
,
594 if (rws
->info
.has_virtual_memory
) {
595 struct drm_radeon_gem_va va
;
596 unsigned va_gap_size
;
598 va_gap_size
= rws
->check_vm
? MAX2(4 * alignment
, 64 * 1024) : 0;
599 bo
->va
= radeon_bomgr_find_va(rws
, size
+ va_gap_size
, alignment
);
601 va
.handle
= bo
->handle
;
603 va
.operation
= RADEON_VA_MAP
;
604 va
.flags
= RADEON_VM_PAGE_READABLE
|
605 RADEON_VM_PAGE_WRITEABLE
|
606 RADEON_VM_PAGE_SNOOPED
;
608 r
= drmCommandWriteRead(rws
->fd
, DRM_RADEON_GEM_VA
, &va
, sizeof(va
));
609 if (r
&& va
.operation
== RADEON_VA_RESULT_ERROR
) {
610 fprintf(stderr
, "radeon: Failed to allocate virtual address for buffer:\n");
611 fprintf(stderr
, "radeon: size : %d bytes\n", size
);
612 fprintf(stderr
, "radeon: alignment : %d bytes\n", alignment
);
613 fprintf(stderr
, "radeon: domains : %d\n", args
.initial_domain
);
614 fprintf(stderr
, "radeon: va : 0x%016llx\n", (unsigned long long)bo
->va
);
615 radeon_bo_destroy(&bo
->base
);
618 pipe_mutex_lock(rws
->bo_handles_mutex
);
619 if (va
.operation
== RADEON_VA_RESULT_VA_EXIST
) {
620 struct pb_buffer
*b
= &bo
->base
;
621 struct radeon_bo
*old_bo
=
622 util_hash_table_get(rws
->bo_vas
, (void*)(uintptr_t)va
.offset
);
624 pipe_mutex_unlock(rws
->bo_handles_mutex
);
625 pb_reference(&b
, &old_bo
->base
);
629 util_hash_table_set(rws
->bo_vas
, (void*)(uintptr_t)bo
->va
, bo
);
630 pipe_mutex_unlock(rws
->bo_handles_mutex
);
633 if (initial_domains
& RADEON_DOMAIN_VRAM
)
634 rws
->allocated_vram
+= align(size
, rws
->info
.gart_page_size
);
635 else if (initial_domains
& RADEON_DOMAIN_GTT
)
636 rws
->allocated_gtt
+= align(size
, rws
->info
.gart_page_size
);
641 bool radeon_bo_can_reclaim(struct pb_buffer
*_buf
)
643 struct radeon_bo
*bo
= radeon_bo(_buf
);
645 if (radeon_bo_is_referenced_by_any_cs(bo
))
648 return radeon_bo_wait(_buf
, 0, RADEON_USAGE_READWRITE
);
651 static unsigned eg_tile_split(unsigned tile_split
)
653 switch (tile_split
) {
654 case 0: tile_split
= 64; break;
655 case 1: tile_split
= 128; break;
656 case 2: tile_split
= 256; break;
657 case 3: tile_split
= 512; break;
659 case 4: tile_split
= 1024; break;
660 case 5: tile_split
= 2048; break;
661 case 6: tile_split
= 4096; break;
666 static unsigned eg_tile_split_rev(unsigned eg_tile_split
)
668 switch (eg_tile_split
) {
680 static void radeon_bo_get_metadata(struct pb_buffer
*_buf
,
681 struct radeon_bo_metadata
*md
)
683 struct radeon_bo
*bo
= radeon_bo(_buf
);
684 struct drm_radeon_gem_set_tiling args
;
686 assert(bo
->handle
&& "must not be called for slab entries");
688 memset(&args
, 0, sizeof(args
));
690 args
.handle
= bo
->handle
;
692 drmCommandWriteRead(bo
->rws
->fd
,
693 DRM_RADEON_GEM_GET_TILING
,
697 md
->microtile
= RADEON_LAYOUT_LINEAR
;
698 md
->macrotile
= RADEON_LAYOUT_LINEAR
;
699 if (args
.tiling_flags
& RADEON_TILING_MICRO
)
700 md
->microtile
= RADEON_LAYOUT_TILED
;
701 else if (args
.tiling_flags
& RADEON_TILING_MICRO_SQUARE
)
702 md
->microtile
= RADEON_LAYOUT_SQUARETILED
;
704 if (args
.tiling_flags
& RADEON_TILING_MACRO
)
705 md
->macrotile
= RADEON_LAYOUT_TILED
;
707 md
->bankw
= (args
.tiling_flags
>> RADEON_TILING_EG_BANKW_SHIFT
) & RADEON_TILING_EG_BANKW_MASK
;
708 md
->bankh
= (args
.tiling_flags
>> RADEON_TILING_EG_BANKH_SHIFT
) & RADEON_TILING_EG_BANKH_MASK
;
709 md
->tile_split
= (args
.tiling_flags
>> RADEON_TILING_EG_TILE_SPLIT_SHIFT
) & RADEON_TILING_EG_TILE_SPLIT_MASK
;
710 md
->mtilea
= (args
.tiling_flags
>> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT
) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK
;
711 md
->tile_split
= eg_tile_split(md
->tile_split
);
712 md
->scanout
= bo
->rws
->gen
>= DRV_SI
&& !(args
.tiling_flags
& RADEON_TILING_R600_NO_SCANOUT
);
715 static void radeon_bo_set_metadata(struct pb_buffer
*_buf
,
716 struct radeon_bo_metadata
*md
)
718 struct radeon_bo
*bo
= radeon_bo(_buf
);
719 struct drm_radeon_gem_set_tiling args
;
721 assert(bo
->handle
&& "must not be called for slab entries");
723 memset(&args
, 0, sizeof(args
));
725 os_wait_until_zero(&bo
->num_active_ioctls
, PIPE_TIMEOUT_INFINITE
);
727 if (md
->microtile
== RADEON_LAYOUT_TILED
)
728 args
.tiling_flags
|= RADEON_TILING_MICRO
;
729 else if (md
->microtile
== RADEON_LAYOUT_SQUARETILED
)
730 args
.tiling_flags
|= RADEON_TILING_MICRO_SQUARE
;
732 if (md
->macrotile
== RADEON_LAYOUT_TILED
)
733 args
.tiling_flags
|= RADEON_TILING_MACRO
;
735 args
.tiling_flags
|= (md
->bankw
& RADEON_TILING_EG_BANKW_MASK
) <<
736 RADEON_TILING_EG_BANKW_SHIFT
;
737 args
.tiling_flags
|= (md
->bankh
& RADEON_TILING_EG_BANKH_MASK
) <<
738 RADEON_TILING_EG_BANKH_SHIFT
;
739 if (md
->tile_split
) {
740 args
.tiling_flags
|= (eg_tile_split_rev(md
->tile_split
) &
741 RADEON_TILING_EG_TILE_SPLIT_MASK
) <<
742 RADEON_TILING_EG_TILE_SPLIT_SHIFT
;
744 args
.tiling_flags
|= (md
->mtilea
& RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK
) <<
745 RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT
;
747 if (bo
->rws
->gen
>= DRV_SI
&& !md
->scanout
)
748 args
.tiling_flags
|= RADEON_TILING_R600_NO_SCANOUT
;
750 args
.handle
= bo
->handle
;
751 args
.pitch
= md
->stride
;
753 drmCommandWriteRead(bo
->rws
->fd
,
754 DRM_RADEON_GEM_SET_TILING
,
759 static struct pb_buffer
*
760 radeon_winsys_bo_create(struct radeon_winsys
*rws
,
763 enum radeon_bo_domain domain
,
764 enum radeon_bo_flag flags
)
766 struct radeon_drm_winsys
*ws
= radeon_drm_winsys(rws
);
767 struct radeon_bo
*bo
;
768 unsigned usage
= 0, pb_cache_bucket
;
770 /* Only 32-bit sizes are supported. */
774 /* This flag is irrelevant for the cache. */
775 flags
&= ~RADEON_FLAG_HANDLE
;
777 /* Align size to page size. This is the minimum alignment for normal
778 * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
779 * like constant/uniform buffers, can benefit from better and more reuse.
781 size
= align(size
, ws
->info
.gart_page_size
);
782 alignment
= align(alignment
, ws
->info
.gart_page_size
);
784 /* Only set one usage bit each for domains and flags, or the cache manager
785 * might consider different sets of domains / flags compatible
787 if (domain
== RADEON_DOMAIN_VRAM_GTT
)
790 usage
= (unsigned)domain
>> 1;
791 assert(flags
< sizeof(usage
) * 8 - 3);
792 usage
|= 1 << (flags
+ 3);
794 /* Determine the pb_cache bucket for minimizing pb_cache misses. */
796 if (size
<= 4096) /* small buffers */
797 pb_cache_bucket
+= 1;
798 if (domain
& RADEON_DOMAIN_VRAM
) /* VRAM or VRAM+GTT */
799 pb_cache_bucket
+= 2;
800 if (flags
== RADEON_FLAG_GTT_WC
) /* WC */
801 pb_cache_bucket
+= 4;
802 assert(pb_cache_bucket
< ARRAY_SIZE(ws
->bo_cache
.buckets
));
804 bo
= radeon_bo(pb_cache_reclaim_buffer(&ws
->bo_cache
, size
, alignment
,
805 usage
, pb_cache_bucket
));
809 bo
= radeon_create_bo(ws
, size
, alignment
, usage
, domain
, flags
,
812 /* Clear the cache and try again. */
813 pb_cache_release_all_buffers(&ws
->bo_cache
);
814 bo
= radeon_create_bo(ws
, size
, alignment
, usage
, domain
, flags
,
820 bo
->u
.real
.use_reusable_pool
= true;
822 pipe_mutex_lock(ws
->bo_handles_mutex
);
823 util_hash_table_set(ws
->bo_handles
, (void*)(uintptr_t)bo
->handle
, bo
);
824 pipe_mutex_unlock(ws
->bo_handles_mutex
);
829 static struct pb_buffer
*radeon_winsys_bo_from_ptr(struct radeon_winsys
*rws
,
830 void *pointer
, uint64_t size
)
832 struct radeon_drm_winsys
*ws
= radeon_drm_winsys(rws
);
833 struct drm_radeon_gem_userptr args
;
834 struct radeon_bo
*bo
;
837 bo
= CALLOC_STRUCT(radeon_bo
);
841 memset(&args
, 0, sizeof(args
));
842 args
.addr
= (uintptr_t)pointer
;
843 args
.size
= align(size
, ws
->info
.gart_page_size
);
844 args
.flags
= RADEON_GEM_USERPTR_ANONONLY
|
845 RADEON_GEM_USERPTR_VALIDATE
|
846 RADEON_GEM_USERPTR_REGISTER
;
847 if (drmCommandWriteRead(ws
->fd
, DRM_RADEON_GEM_USERPTR
,
848 &args
, sizeof(args
))) {
853 assert(args
.handle
!= 0);
855 pipe_mutex_lock(ws
->bo_handles_mutex
);
858 pipe_reference_init(&bo
->base
.reference
, 1);
859 bo
->handle
= args
.handle
;
860 bo
->base
.alignment
= 0;
861 bo
->base
.size
= size
;
862 bo
->base
.vtbl
= &radeon_bo_vtbl
;
864 bo
->user_ptr
= pointer
;
866 bo
->initial_domain
= RADEON_DOMAIN_GTT
;
867 pipe_mutex_init(bo
->u
.real
.map_mutex
);
869 util_hash_table_set(ws
->bo_handles
, (void*)(uintptr_t)bo
->handle
, bo
);
871 pipe_mutex_unlock(ws
->bo_handles_mutex
);
873 if (ws
->info
.has_virtual_memory
) {
874 struct drm_radeon_gem_va va
;
876 bo
->va
= radeon_bomgr_find_va(ws
, bo
->base
.size
, 1 << 20);
878 va
.handle
= bo
->handle
;
879 va
.operation
= RADEON_VA_MAP
;
882 va
.flags
= RADEON_VM_PAGE_READABLE
|
883 RADEON_VM_PAGE_WRITEABLE
|
884 RADEON_VM_PAGE_SNOOPED
;
886 r
= drmCommandWriteRead(ws
->fd
, DRM_RADEON_GEM_VA
, &va
, sizeof(va
));
887 if (r
&& va
.operation
== RADEON_VA_RESULT_ERROR
) {
888 fprintf(stderr
, "radeon: Failed to assign virtual address space\n");
889 radeon_bo_destroy(&bo
->base
);
892 pipe_mutex_lock(ws
->bo_handles_mutex
);
893 if (va
.operation
== RADEON_VA_RESULT_VA_EXIST
) {
894 struct pb_buffer
*b
= &bo
->base
;
895 struct radeon_bo
*old_bo
=
896 util_hash_table_get(ws
->bo_vas
, (void*)(uintptr_t)va
.offset
);
898 pipe_mutex_unlock(ws
->bo_handles_mutex
);
899 pb_reference(&b
, &old_bo
->base
);
903 util_hash_table_set(ws
->bo_vas
, (void*)(uintptr_t)bo
->va
, bo
);
904 pipe_mutex_unlock(ws
->bo_handles_mutex
);
907 ws
->allocated_gtt
+= align(bo
->base
.size
, ws
->info
.gart_page_size
);
909 return (struct pb_buffer
*)bo
;
912 static struct pb_buffer
*radeon_winsys_bo_from_handle(struct radeon_winsys
*rws
,
913 struct winsys_handle
*whandle
,
917 struct radeon_drm_winsys
*ws
= radeon_drm_winsys(rws
);
918 struct radeon_bo
*bo
;
923 if (!offset
&& whandle
->offset
!= 0) {
924 fprintf(stderr
, "attempt to import unsupported winsys offset %u\n",
929 /* We must maintain a list of pairs <handle, bo>, so that we always return
930 * the same BO for one particular handle. If we didn't do that and created
931 * more than one BO for the same handle and then relocated them in a CS,
932 * we would hit a deadlock in the kernel.
934 * The list of pairs is guarded by a mutex, of course. */
935 pipe_mutex_lock(ws
->bo_handles_mutex
);
937 if (whandle
->type
== DRM_API_HANDLE_TYPE_SHARED
) {
938 /* First check if there already is an existing bo for the handle. */
939 bo
= util_hash_table_get(ws
->bo_names
, (void*)(uintptr_t)whandle
->handle
);
940 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_FD
) {
941 /* We must first get the GEM handle, as fds are unreliable keys */
942 r
= drmPrimeFDToHandle(ws
->fd
, whandle
->handle
, &handle
);
945 bo
= util_hash_table_get(ws
->bo_handles
, (void*)(uintptr_t)handle
);
947 /* Unknown handle type */
952 /* Increase the refcount. */
953 struct pb_buffer
*b
= NULL
;
954 pb_reference(&b
, &bo
->base
);
958 /* There isn't, create a new one. */
959 bo
= CALLOC_STRUCT(radeon_bo
);
964 if (whandle
->type
== DRM_API_HANDLE_TYPE_SHARED
) {
965 struct drm_gem_open open_arg
= {};
966 memset(&open_arg
, 0, sizeof(open_arg
));
968 open_arg
.name
= whandle
->handle
;
969 if (drmIoctl(ws
->fd
, DRM_IOCTL_GEM_OPEN
, &open_arg
)) {
973 handle
= open_arg
.handle
;
974 size
= open_arg
.size
;
975 bo
->flink_name
= whandle
->handle
;
976 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_FD
) {
977 size
= lseek(whandle
->handle
, 0, SEEK_END
);
979 * Could check errno to determine whether the kernel is new enough, but
980 * it doesn't really matter why this failed, just that it failed.
982 if (size
== (off_t
)-1) {
986 lseek(whandle
->handle
, 0, SEEK_SET
);
994 pipe_reference_init(&bo
->base
.reference
, 1);
995 bo
->base
.alignment
= 0;
996 bo
->base
.size
= (unsigned) size
;
997 bo
->base
.vtbl
= &radeon_bo_vtbl
;
1000 pipe_mutex_init(bo
->u
.real
.map_mutex
);
1003 util_hash_table_set(ws
->bo_names
, (void*)(uintptr_t)bo
->flink_name
, bo
);
1005 util_hash_table_set(ws
->bo_handles
, (void*)(uintptr_t)bo
->handle
, bo
);
1008 pipe_mutex_unlock(ws
->bo_handles_mutex
);
1011 *stride
= whandle
->stride
;
1013 *offset
= whandle
->offset
;
1015 if (ws
->info
.has_virtual_memory
&& !bo
->va
) {
1016 struct drm_radeon_gem_va va
;
1018 bo
->va
= radeon_bomgr_find_va(ws
, bo
->base
.size
, 1 << 20);
1020 va
.handle
= bo
->handle
;
1021 va
.operation
= RADEON_VA_MAP
;
1024 va
.flags
= RADEON_VM_PAGE_READABLE
|
1025 RADEON_VM_PAGE_WRITEABLE
|
1026 RADEON_VM_PAGE_SNOOPED
;
1028 r
= drmCommandWriteRead(ws
->fd
, DRM_RADEON_GEM_VA
, &va
, sizeof(va
));
1029 if (r
&& va
.operation
== RADEON_VA_RESULT_ERROR
) {
1030 fprintf(stderr
, "radeon: Failed to assign virtual address space\n");
1031 radeon_bo_destroy(&bo
->base
);
1034 pipe_mutex_lock(ws
->bo_handles_mutex
);
1035 if (va
.operation
== RADEON_VA_RESULT_VA_EXIST
) {
1036 struct pb_buffer
*b
= &bo
->base
;
1037 struct radeon_bo
*old_bo
=
1038 util_hash_table_get(ws
->bo_vas
, (void*)(uintptr_t)va
.offset
);
1040 pipe_mutex_unlock(ws
->bo_handles_mutex
);
1041 pb_reference(&b
, &old_bo
->base
);
1045 util_hash_table_set(ws
->bo_vas
, (void*)(uintptr_t)bo
->va
, bo
);
1046 pipe_mutex_unlock(ws
->bo_handles_mutex
);
1049 bo
->initial_domain
= radeon_bo_get_initial_domain((void*)bo
);
1051 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
1052 ws
->allocated_vram
+= align(bo
->base
.size
, ws
->info
.gart_page_size
);
1053 else if (bo
->initial_domain
& RADEON_DOMAIN_GTT
)
1054 ws
->allocated_gtt
+= align(bo
->base
.size
, ws
->info
.gart_page_size
);
1056 return (struct pb_buffer
*)bo
;
1059 pipe_mutex_unlock(ws
->bo_handles_mutex
);
1063 static bool radeon_winsys_bo_get_handle(struct pb_buffer
*buffer
,
1064 unsigned stride
, unsigned offset
,
1065 unsigned slice_size
,
1066 struct winsys_handle
*whandle
)
1068 struct drm_gem_flink flink
;
1069 struct radeon_bo
*bo
= radeon_bo(buffer
);
1070 struct radeon_drm_winsys
*ws
= bo
->rws
;
1073 offset
+= bo
->va
- bo
->u
.slab
.real
->va
;
1074 bo
= bo
->u
.slab
.real
;
1077 memset(&flink
, 0, sizeof(flink
));
1079 bo
->u
.real
.use_reusable_pool
= false;
1081 if (whandle
->type
== DRM_API_HANDLE_TYPE_SHARED
) {
1082 if (!bo
->flink_name
) {
1083 flink
.handle
= bo
->handle
;
1085 if (ioctl(ws
->fd
, DRM_IOCTL_GEM_FLINK
, &flink
)) {
1089 bo
->flink_name
= flink
.name
;
1091 pipe_mutex_lock(ws
->bo_handles_mutex
);
1092 util_hash_table_set(ws
->bo_names
, (void*)(uintptr_t)bo
->flink_name
, bo
);
1093 pipe_mutex_unlock(ws
->bo_handles_mutex
);
1095 whandle
->handle
= bo
->flink_name
;
1096 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_KMS
) {
1097 whandle
->handle
= bo
->handle
;
1098 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_FD
) {
1099 if (drmPrimeHandleToFD(ws
->fd
, bo
->handle
, DRM_CLOEXEC
, (int*)&whandle
->handle
))
1103 whandle
->stride
= stride
;
1104 whandle
->offset
= offset
;
1105 whandle
->offset
+= slice_size
* whandle
->layer
;
1110 static bool radeon_winsys_bo_is_user_ptr(struct pb_buffer
*buf
)
1112 return ((struct radeon_bo
*)buf
)->user_ptr
!= NULL
;
1115 static uint64_t radeon_winsys_bo_va(struct pb_buffer
*buf
)
1117 return ((struct radeon_bo
*)buf
)->va
;
1120 void radeon_drm_bo_init_functions(struct radeon_drm_winsys
*ws
)
1122 ws
->base
.buffer_set_metadata
= radeon_bo_set_metadata
;
1123 ws
->base
.buffer_get_metadata
= radeon_bo_get_metadata
;
1124 ws
->base
.buffer_map
= radeon_bo_map
;
1125 ws
->base
.buffer_unmap
= radeon_bo_unmap
;
1126 ws
->base
.buffer_wait
= radeon_bo_wait
;
1127 ws
->base
.buffer_create
= radeon_winsys_bo_create
;
1128 ws
->base
.buffer_from_handle
= radeon_winsys_bo_from_handle
;
1129 ws
->base
.buffer_from_ptr
= radeon_winsys_bo_from_ptr
;
1130 ws
->base
.buffer_is_user_ptr
= radeon_winsys_bo_is_user_ptr
;
1131 ws
->base
.buffer_get_handle
= radeon_winsys_bo_get_handle
;
1132 ws
->base
.buffer_get_virtual_address
= radeon_winsys_bo_va
;
1133 ws
->base
.buffer_get_initial_domain
= radeon_bo_get_initial_domain
;