2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
27 #include "radeon_drm_cs.h"
29 #include "util/u_hash_table.h"
30 #include "util/u_memory.h"
31 #include "util/simple_list.h"
32 #include "os/os_thread.h"
33 #include "os/os_mman.h"
34 #include "os/os_time.h"
36 #include "state_tracker/drm_driver.h"
38 #include <sys/ioctl.h>
44 static inline struct radeon_bo
*radeon_bo(struct pb_buffer
*bo
)
46 return (struct radeon_bo
*)bo
;
49 struct radeon_bo_va_hole
{
50 struct list_head list
;
55 static bool radeon_bo_is_busy(struct radeon_bo
*bo
)
57 struct drm_radeon_gem_busy args
= {0};
59 args
.handle
= bo
->handle
;
60 return drmCommandWriteRead(bo
->rws
->fd
, DRM_RADEON_GEM_BUSY
,
61 &args
, sizeof(args
)) != 0;
64 static void radeon_bo_wait_idle(struct radeon_bo
*bo
)
66 struct drm_radeon_gem_wait_idle args
= {0};
68 args
.handle
= bo
->handle
;
69 while (drmCommandWrite(bo
->rws
->fd
, DRM_RADEON_GEM_WAIT_IDLE
,
70 &args
, sizeof(args
)) == -EBUSY
);
73 static bool radeon_bo_wait(struct pb_buffer
*_buf
, uint64_t timeout
,
74 enum radeon_bo_usage usage
)
76 struct radeon_bo
*bo
= radeon_bo(_buf
);
79 /* No timeout. Just query. */
81 return !bo
->num_active_ioctls
&& !radeon_bo_is_busy(bo
);
83 abs_timeout
= os_time_get_absolute_timeout(timeout
);
85 /* Wait if any ioctl is being submitted with this buffer. */
86 if (!os_wait_until_zero_abs_timeout(&bo
->num_active_ioctls
, abs_timeout
))
89 /* Infinite timeout. */
90 if (abs_timeout
== PIPE_TIMEOUT_INFINITE
) {
91 radeon_bo_wait_idle(bo
);
95 /* Other timeouts need to be emulated with a loop. */
96 while (radeon_bo_is_busy(bo
)) {
97 if (os_time_get_nano() >= abs_timeout
)
105 static enum radeon_bo_domain
get_valid_domain(enum radeon_bo_domain domain
)
107 /* Zero domains the driver doesn't understand. */
108 domain
&= RADEON_DOMAIN_VRAM_GTT
;
110 /* If no domain is set, we must set something... */
112 domain
= RADEON_DOMAIN_VRAM_GTT
;
117 static enum radeon_bo_domain
radeon_bo_get_initial_domain(
118 struct pb_buffer
*buf
)
120 struct radeon_bo
*bo
= (struct radeon_bo
*)buf
;
121 struct drm_radeon_gem_op args
;
123 if (bo
->rws
->info
.drm_minor
< 38)
124 return RADEON_DOMAIN_VRAM_GTT
;
126 memset(&args
, 0, sizeof(args
));
127 args
.handle
= bo
->handle
;
128 args
.op
= RADEON_GEM_OP_GET_INITIAL_DOMAIN
;
130 drmCommandWriteRead(bo
->rws
->fd
, DRM_RADEON_GEM_OP
,
131 &args
, sizeof(args
));
133 /* GEM domains and winsys domains are defined the same. */
134 return get_valid_domain(args
.value
);
137 static uint64_t radeon_bomgr_find_va(struct radeon_drm_winsys
*rws
,
138 uint64_t size
, uint64_t alignment
)
140 struct radeon_bo_va_hole
*hole
, *n
;
141 uint64_t offset
= 0, waste
= 0;
143 /* All VM address space holes will implicitly start aligned to the
144 * size alignment, so we don't need to sanitize the alignment here
146 size
= align(size
, rws
->size_align
);
148 pipe_mutex_lock(rws
->bo_va_mutex
);
149 /* first look for a hole */
150 LIST_FOR_EACH_ENTRY_SAFE(hole
, n
, &rws
->va_holes
, list
) {
151 offset
= hole
->offset
;
152 waste
= offset
% alignment
;
153 waste
= waste
? alignment
- waste
: 0;
155 if (offset
>= (hole
->offset
+ hole
->size
)) {
158 if (!waste
&& hole
->size
== size
) {
159 offset
= hole
->offset
;
160 list_del(&hole
->list
);
162 pipe_mutex_unlock(rws
->bo_va_mutex
);
165 if ((hole
->size
- waste
) > size
) {
167 n
= CALLOC_STRUCT(radeon_bo_va_hole
);
169 n
->offset
= hole
->offset
;
170 list_add(&n
->list
, &hole
->list
);
172 hole
->size
-= (size
+ waste
);
173 hole
->offset
+= size
+ waste
;
174 pipe_mutex_unlock(rws
->bo_va_mutex
);
177 if ((hole
->size
- waste
) == size
) {
179 pipe_mutex_unlock(rws
->bo_va_mutex
);
184 offset
= rws
->va_offset
;
185 waste
= offset
% alignment
;
186 waste
= waste
? alignment
- waste
: 0;
188 n
= CALLOC_STRUCT(radeon_bo_va_hole
);
191 list_add(&n
->list
, &rws
->va_holes
);
194 rws
->va_offset
+= size
+ waste
;
195 pipe_mutex_unlock(rws
->bo_va_mutex
);
199 static void radeon_bomgr_free_va(struct radeon_drm_winsys
*rws
,
200 uint64_t va
, uint64_t size
)
202 struct radeon_bo_va_hole
*hole
;
204 size
= align(size
, rws
->size_align
);
206 pipe_mutex_lock(rws
->bo_va_mutex
);
207 if ((va
+ size
) == rws
->va_offset
) {
209 /* Delete uppermost hole if it reaches the new top */
210 if (!LIST_IS_EMPTY(&rws
->va_holes
)) {
211 hole
= container_of(rws
->va_holes
.next
, hole
, list
);
212 if ((hole
->offset
+ hole
->size
) == va
) {
213 rws
->va_offset
= hole
->offset
;
214 list_del(&hole
->list
);
219 struct radeon_bo_va_hole
*next
;
221 hole
= container_of(&rws
->va_holes
, hole
, list
);
222 LIST_FOR_EACH_ENTRY(next
, &rws
->va_holes
, list
) {
223 if (next
->offset
< va
)
228 if (&hole
->list
!= &rws
->va_holes
) {
229 /* Grow upper hole if it's adjacent */
230 if (hole
->offset
== (va
+ size
)) {
233 /* Merge lower hole if it's adjacent */
234 if (next
!= hole
&& &next
->list
!= &rws
->va_holes
&&
235 (next
->offset
+ next
->size
) == va
) {
236 next
->size
+= hole
->size
;
237 list_del(&hole
->list
);
244 /* Grow lower hole if it's adjacent */
245 if (next
!= hole
&& &next
->list
!= &rws
->va_holes
&&
246 (next
->offset
+ next
->size
) == va
) {
251 /* FIXME on allocation failure we just lose virtual address space
252 * maybe print a warning
254 next
= CALLOC_STRUCT(radeon_bo_va_hole
);
258 list_add(&next
->list
, &hole
->list
);
262 pipe_mutex_unlock(rws
->bo_va_mutex
);
265 void radeon_bo_destroy(struct pb_buffer
*_buf
)
267 struct radeon_bo
*bo
= radeon_bo(_buf
);
268 struct radeon_drm_winsys
*rws
= bo
->rws
;
269 struct drm_gem_close args
;
271 memset(&args
, 0, sizeof(args
));
273 pipe_mutex_lock(rws
->bo_handles_mutex
);
274 util_hash_table_remove(rws
->bo_handles
, (void*)(uintptr_t)bo
->handle
);
275 if (bo
->flink_name
) {
276 util_hash_table_remove(rws
->bo_names
,
277 (void*)(uintptr_t)bo
->flink_name
);
279 pipe_mutex_unlock(rws
->bo_handles_mutex
);
282 os_munmap(bo
->ptr
, bo
->base
.size
);
284 if (rws
->info
.r600_virtual_address
) {
285 if (rws
->va_unmap_working
) {
286 struct drm_radeon_gem_va va
;
288 va
.handle
= bo
->handle
;
290 va
.operation
= RADEON_VA_UNMAP
;
291 va
.flags
= RADEON_VM_PAGE_READABLE
|
292 RADEON_VM_PAGE_WRITEABLE
|
293 RADEON_VM_PAGE_SNOOPED
;
296 if (drmCommandWriteRead(rws
->fd
, DRM_RADEON_GEM_VA
, &va
,
298 va
.operation
== RADEON_VA_RESULT_ERROR
) {
299 fprintf(stderr
, "radeon: Failed to deallocate virtual address for buffer:\n");
300 fprintf(stderr
, "radeon: size : %d bytes\n", bo
->base
.size
);
301 fprintf(stderr
, "radeon: va : 0x%016llx\n", (unsigned long long)bo
->va
);
305 radeon_bomgr_free_va(rws
, bo
->va
, bo
->base
.size
);
309 args
.handle
= bo
->handle
;
310 drmIoctl(rws
->fd
, DRM_IOCTL_GEM_CLOSE
, &args
);
312 pipe_mutex_destroy(bo
->map_mutex
);
314 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
315 rws
->allocated_vram
-= align(bo
->base
.size
, rws
->size_align
);
316 else if (bo
->initial_domain
& RADEON_DOMAIN_GTT
)
317 rws
->allocated_gtt
-= align(bo
->base
.size
, rws
->size_align
);
321 static void radeon_bo_destroy_or_cache(struct pb_buffer
*_buf
)
323 struct radeon_bo
*bo
= radeon_bo(_buf
);
325 if (bo
->use_reusable_pool
)
326 pb_cache_add_buffer(&bo
->cache_entry
);
328 radeon_bo_destroy(_buf
);
331 void *radeon_bo_do_map(struct radeon_bo
*bo
)
333 struct drm_radeon_gem_mmap args
= {0};
336 /* If the buffer is created from user memory, return the user pointer. */
340 /* Map the buffer. */
341 pipe_mutex_lock(bo
->map_mutex
);
342 /* Return the pointer if it's already mapped. */
345 pipe_mutex_unlock(bo
->map_mutex
);
348 args
.handle
= bo
->handle
;
350 args
.size
= (uint64_t)bo
->base
.size
;
351 if (drmCommandWriteRead(bo
->rws
->fd
,
355 pipe_mutex_unlock(bo
->map_mutex
);
356 fprintf(stderr
, "radeon: gem_mmap failed: %p 0x%08X\n",
361 ptr
= os_mmap(0, args
.size
, PROT_READ
|PROT_WRITE
, MAP_SHARED
,
362 bo
->rws
->fd
, args
.addr_ptr
);
363 if (ptr
== MAP_FAILED
) {
364 /* Clear the cache and try again. */
365 pb_cache_release_all_buffers(&bo
->rws
->bo_cache
);
367 ptr
= os_mmap(0, args
.size
, PROT_READ
|PROT_WRITE
, MAP_SHARED
,
368 bo
->rws
->fd
, args
.addr_ptr
);
369 if (ptr
== MAP_FAILED
) {
370 pipe_mutex_unlock(bo
->map_mutex
);
371 fprintf(stderr
, "radeon: mmap failed, errno: %i\n", errno
);
377 pipe_mutex_unlock(bo
->map_mutex
);
382 static void *radeon_bo_map(struct pb_buffer
*buf
,
383 struct radeon_winsys_cs
*rcs
,
384 enum pipe_transfer_usage usage
)
386 struct radeon_bo
*bo
= (struct radeon_bo
*)buf
;
387 struct radeon_drm_cs
*cs
= (struct radeon_drm_cs
*)rcs
;
389 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
390 if (!(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
391 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
392 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
393 if (!(usage
& PIPE_TRANSFER_WRITE
)) {
396 * Since we are mapping for read, we don't need to wait
397 * if the GPU is using the buffer for read too
398 * (neither one is changing it).
400 * Only check whether the buffer is being used for write. */
401 if (cs
&& radeon_bo_is_referenced_by_cs_for_write(cs
, bo
)) {
402 cs
->flush_cs(cs
->flush_data
, RADEON_FLUSH_ASYNC
, NULL
);
406 if (!radeon_bo_wait((struct pb_buffer
*)bo
, 0,
407 RADEON_USAGE_WRITE
)) {
411 if (cs
&& radeon_bo_is_referenced_by_cs(cs
, bo
)) {
412 cs
->flush_cs(cs
->flush_data
, RADEON_FLUSH_ASYNC
, NULL
);
416 if (!radeon_bo_wait((struct pb_buffer
*)bo
, 0,
417 RADEON_USAGE_READWRITE
)) {
422 uint64_t time
= os_time_get_nano();
424 if (!(usage
& PIPE_TRANSFER_WRITE
)) {
427 * Since we are mapping for read, we don't need to wait
428 * if the GPU is using the buffer for read too
429 * (neither one is changing it).
431 * Only check whether the buffer is being used for write. */
432 if (cs
&& radeon_bo_is_referenced_by_cs_for_write(cs
, bo
)) {
433 cs
->flush_cs(cs
->flush_data
, 0, NULL
);
435 radeon_bo_wait((struct pb_buffer
*)bo
, PIPE_TIMEOUT_INFINITE
,
438 /* Mapping for write. */
440 if (radeon_bo_is_referenced_by_cs(cs
, bo
)) {
441 cs
->flush_cs(cs
->flush_data
, 0, NULL
);
443 /* Try to avoid busy-waiting in radeon_bo_wait. */
444 if (p_atomic_read(&bo
->num_active_ioctls
))
445 radeon_drm_cs_sync_flush(rcs
);
449 radeon_bo_wait((struct pb_buffer
*)bo
, PIPE_TIMEOUT_INFINITE
,
450 RADEON_USAGE_READWRITE
);
453 bo
->rws
->buffer_wait_time
+= os_time_get_nano() - time
;
457 return radeon_bo_do_map(bo
);
460 static void radeon_bo_unmap(struct pb_buffer
*_buf
)
462 struct radeon_bo
*bo
= (struct radeon_bo
*)_buf
;
467 pipe_mutex_lock(bo
->map_mutex
);
469 pipe_mutex_unlock(bo
->map_mutex
);
470 return; /* it's not been mapped */
473 assert(bo
->map_count
);
474 if (--bo
->map_count
) {
475 pipe_mutex_unlock(bo
->map_mutex
);
476 return; /* it's been mapped multiple times */
479 os_munmap(bo
->ptr
, bo
->base
.size
);
481 pipe_mutex_unlock(bo
->map_mutex
);
484 static const struct pb_vtbl radeon_bo_vtbl
= {
485 radeon_bo_destroy_or_cache
486 /* other functions are never called */
489 #ifndef RADEON_GEM_GTT_WC
490 #define RADEON_GEM_GTT_WC (1 << 2)
492 #ifndef RADEON_GEM_CPU_ACCESS
493 /* BO is expected to be accessed by the CPU */
494 #define RADEON_GEM_CPU_ACCESS (1 << 3)
496 #ifndef RADEON_GEM_NO_CPU_ACCESS
497 /* CPU access is not expected to work for this BO */
498 #define RADEON_GEM_NO_CPU_ACCESS (1 << 4)
501 static struct radeon_bo
*radeon_create_bo(struct radeon_drm_winsys
*rws
,
502 unsigned size
, unsigned alignment
,
504 unsigned initial_domains
,
507 struct radeon_bo
*bo
;
508 struct drm_radeon_gem_create args
;
511 memset(&args
, 0, sizeof(args
));
513 assert(initial_domains
);
514 assert((initial_domains
&
515 ~(RADEON_GEM_DOMAIN_GTT
| RADEON_GEM_DOMAIN_VRAM
)) == 0);
518 args
.alignment
= alignment
;
519 args
.initial_domain
= initial_domains
;
522 if (flags
& RADEON_FLAG_GTT_WC
)
523 args
.flags
|= RADEON_GEM_GTT_WC
;
524 if (flags
& RADEON_FLAG_CPU_ACCESS
)
525 args
.flags
|= RADEON_GEM_CPU_ACCESS
;
526 if (flags
& RADEON_FLAG_NO_CPU_ACCESS
)
527 args
.flags
|= RADEON_GEM_NO_CPU_ACCESS
;
529 if (drmCommandWriteRead(rws
->fd
, DRM_RADEON_GEM_CREATE
,
530 &args
, sizeof(args
))) {
531 fprintf(stderr
, "radeon: Failed to allocate a buffer:\n");
532 fprintf(stderr
, "radeon: size : %d bytes\n", size
);
533 fprintf(stderr
, "radeon: alignment : %d bytes\n", alignment
);
534 fprintf(stderr
, "radeon: domains : %d\n", args
.initial_domain
);
535 fprintf(stderr
, "radeon: flags : %d\n", args
.flags
);
539 bo
= CALLOC_STRUCT(radeon_bo
);
543 pipe_reference_init(&bo
->base
.reference
, 1);
544 bo
->base
.alignment
= alignment
;
545 bo
->base
.usage
= usage
;
546 bo
->base
.size
= size
;
547 bo
->base
.vtbl
= &radeon_bo_vtbl
;
549 bo
->handle
= args
.handle
;
551 bo
->initial_domain
= initial_domains
;
552 pipe_mutex_init(bo
->map_mutex
);
553 pb_cache_init_entry(&rws
->bo_cache
, &bo
->cache_entry
, &bo
->base
);
555 if (rws
->info
.r600_virtual_address
) {
556 struct drm_radeon_gem_va va
;
558 bo
->va
= radeon_bomgr_find_va(rws
, size
, alignment
);
560 va
.handle
= bo
->handle
;
562 va
.operation
= RADEON_VA_MAP
;
563 va
.flags
= RADEON_VM_PAGE_READABLE
|
564 RADEON_VM_PAGE_WRITEABLE
|
565 RADEON_VM_PAGE_SNOOPED
;
567 r
= drmCommandWriteRead(rws
->fd
, DRM_RADEON_GEM_VA
, &va
, sizeof(va
));
568 if (r
&& va
.operation
== RADEON_VA_RESULT_ERROR
) {
569 fprintf(stderr
, "radeon: Failed to allocate virtual address for buffer:\n");
570 fprintf(stderr
, "radeon: size : %d bytes\n", size
);
571 fprintf(stderr
, "radeon: alignment : %d bytes\n", alignment
);
572 fprintf(stderr
, "radeon: domains : %d\n", args
.initial_domain
);
573 fprintf(stderr
, "radeon: va : 0x%016llx\n", (unsigned long long)bo
->va
);
574 radeon_bo_destroy(&bo
->base
);
577 pipe_mutex_lock(rws
->bo_handles_mutex
);
578 if (va
.operation
== RADEON_VA_RESULT_VA_EXIST
) {
579 struct pb_buffer
*b
= &bo
->base
;
580 struct radeon_bo
*old_bo
=
581 util_hash_table_get(rws
->bo_vas
, (void*)(uintptr_t)va
.offset
);
583 pipe_mutex_unlock(rws
->bo_handles_mutex
);
584 pb_reference(&b
, &old_bo
->base
);
588 util_hash_table_set(rws
->bo_vas
, (void*)(uintptr_t)bo
->va
, bo
);
589 pipe_mutex_unlock(rws
->bo_handles_mutex
);
592 if (initial_domains
& RADEON_DOMAIN_VRAM
)
593 rws
->allocated_vram
+= align(size
, rws
->size_align
);
594 else if (initial_domains
& RADEON_DOMAIN_GTT
)
595 rws
->allocated_gtt
+= align(size
, rws
->size_align
);
600 bool radeon_bo_can_reclaim(struct pb_buffer
*_buf
)
602 struct radeon_bo
*bo
= radeon_bo(_buf
);
604 if (radeon_bo_is_referenced_by_any_cs(bo
))
607 return radeon_bo_wait(_buf
, 0, RADEON_USAGE_READWRITE
);
610 static unsigned eg_tile_split(unsigned tile_split
)
612 switch (tile_split
) {
613 case 0: tile_split
= 64; break;
614 case 1: tile_split
= 128; break;
615 case 2: tile_split
= 256; break;
616 case 3: tile_split
= 512; break;
618 case 4: tile_split
= 1024; break;
619 case 5: tile_split
= 2048; break;
620 case 6: tile_split
= 4096; break;
625 static unsigned eg_tile_split_rev(unsigned eg_tile_split
)
627 switch (eg_tile_split
) {
639 static void radeon_bo_get_tiling(struct pb_buffer
*_buf
,
640 enum radeon_bo_layout
*microtiled
,
641 enum radeon_bo_layout
*macrotiled
,
642 unsigned *bankw
, unsigned *bankh
,
643 unsigned *tile_split
,
644 unsigned *stencil_tile_split
,
648 struct radeon_bo
*bo
= radeon_bo(_buf
);
649 struct drm_radeon_gem_set_tiling args
;
651 memset(&args
, 0, sizeof(args
));
653 args
.handle
= bo
->handle
;
655 drmCommandWriteRead(bo
->rws
->fd
,
656 DRM_RADEON_GEM_GET_TILING
,
660 *microtiled
= RADEON_LAYOUT_LINEAR
;
661 *macrotiled
= RADEON_LAYOUT_LINEAR
;
662 if (args
.tiling_flags
& RADEON_TILING_MICRO
)
663 *microtiled
= RADEON_LAYOUT_TILED
;
664 else if (args
.tiling_flags
& RADEON_TILING_MICRO_SQUARE
)
665 *microtiled
= RADEON_LAYOUT_SQUARETILED
;
667 if (args
.tiling_flags
& RADEON_TILING_MACRO
)
668 *macrotiled
= RADEON_LAYOUT_TILED
;
669 if (bankw
&& tile_split
&& stencil_tile_split
&& mtilea
&& tile_split
) {
670 *bankw
= (args
.tiling_flags
>> RADEON_TILING_EG_BANKW_SHIFT
) & RADEON_TILING_EG_BANKW_MASK
;
671 *bankh
= (args
.tiling_flags
>> RADEON_TILING_EG_BANKH_SHIFT
) & RADEON_TILING_EG_BANKH_MASK
;
672 *tile_split
= (args
.tiling_flags
>> RADEON_TILING_EG_TILE_SPLIT_SHIFT
) & RADEON_TILING_EG_TILE_SPLIT_MASK
;
673 *stencil_tile_split
= (args
.tiling_flags
>> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT
) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK
;
674 *mtilea
= (args
.tiling_flags
>> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT
) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK
;
675 *tile_split
= eg_tile_split(*tile_split
);
678 *scanout
= bo
->rws
->gen
>= DRV_SI
&& !(args
.tiling_flags
& RADEON_TILING_R600_NO_SCANOUT
);
681 static void radeon_bo_set_tiling(struct pb_buffer
*_buf
,
682 struct radeon_winsys_cs
*rcs
,
683 enum radeon_bo_layout microtiled
,
684 enum radeon_bo_layout macrotiled
,
685 unsigned pipe_config
,
686 unsigned bankw
, unsigned bankh
,
688 unsigned stencil_tile_split
,
689 unsigned mtilea
, unsigned num_banks
,
693 struct radeon_bo
*bo
= radeon_bo(_buf
);
694 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
695 struct drm_radeon_gem_set_tiling args
;
697 memset(&args
, 0, sizeof(args
));
699 /* Tiling determines how DRM treats the buffer data.
700 * We must flush CS when changing it if the buffer is referenced. */
701 if (cs
&& radeon_bo_is_referenced_by_cs(cs
, bo
)) {
702 cs
->flush_cs(cs
->flush_data
, 0, NULL
);
705 os_wait_until_zero(&bo
->num_active_ioctls
, PIPE_TIMEOUT_INFINITE
);
707 if (microtiled
== RADEON_LAYOUT_TILED
)
708 args
.tiling_flags
|= RADEON_TILING_MICRO
;
709 else if (microtiled
== RADEON_LAYOUT_SQUARETILED
)
710 args
.tiling_flags
|= RADEON_TILING_MICRO_SQUARE
;
712 if (macrotiled
== RADEON_LAYOUT_TILED
)
713 args
.tiling_flags
|= RADEON_TILING_MACRO
;
715 args
.tiling_flags
|= (bankw
& RADEON_TILING_EG_BANKW_MASK
) <<
716 RADEON_TILING_EG_BANKW_SHIFT
;
717 args
.tiling_flags
|= (bankh
& RADEON_TILING_EG_BANKH_MASK
) <<
718 RADEON_TILING_EG_BANKH_SHIFT
;
720 args
.tiling_flags
|= (eg_tile_split_rev(tile_split
) &
721 RADEON_TILING_EG_TILE_SPLIT_MASK
) <<
722 RADEON_TILING_EG_TILE_SPLIT_SHIFT
;
724 args
.tiling_flags
|= (stencil_tile_split
&
725 RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK
) <<
726 RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT
;
727 args
.tiling_flags
|= (mtilea
& RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK
) <<
728 RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT
;
730 if (bo
->rws
->gen
>= DRV_SI
&& !scanout
)
731 args
.tiling_flags
|= RADEON_TILING_R600_NO_SCANOUT
;
733 args
.handle
= bo
->handle
;
736 drmCommandWriteRead(bo
->rws
->fd
,
737 DRM_RADEON_GEM_SET_TILING
,
742 static struct pb_buffer
*
743 radeon_winsys_bo_create(struct radeon_winsys
*rws
,
746 boolean use_reusable_pool
,
747 enum radeon_bo_domain domain
,
748 enum radeon_bo_flag flags
)
750 struct radeon_drm_winsys
*ws
= radeon_drm_winsys(rws
);
751 struct radeon_bo
*bo
;
754 /* Align size to page size. This is the minimum alignment for normal
755 * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
756 * like constant/uniform buffers, can benefit from better and more reuse.
758 size
= align(size
, ws
->size_align
);
760 /* Only set one usage bit each for domains and flags, or the cache manager
761 * might consider different sets of domains / flags compatible
763 if (domain
== RADEON_DOMAIN_VRAM_GTT
)
767 assert(flags
< sizeof(usage
) * 8 - 3);
768 usage
|= 1 << (flags
+ 3);
770 if (use_reusable_pool
) {
771 bo
= radeon_bo(pb_cache_reclaim_buffer(&ws
->bo_cache
, size
, alignment
, usage
));
776 bo
= radeon_create_bo(ws
, size
, alignment
, usage
, domain
, flags
);
778 /* Clear the cache and try again. */
779 pb_cache_release_all_buffers(&ws
->bo_cache
);
780 bo
= radeon_create_bo(ws
, size
, alignment
, usage
, domain
, flags
);
785 bo
->use_reusable_pool
= use_reusable_pool
;
787 pipe_mutex_lock(ws
->bo_handles_mutex
);
788 util_hash_table_set(ws
->bo_handles
, (void*)(uintptr_t)bo
->handle
, bo
);
789 pipe_mutex_unlock(ws
->bo_handles_mutex
);
794 static struct pb_buffer
*radeon_winsys_bo_from_ptr(struct radeon_winsys
*rws
,
795 void *pointer
, unsigned size
)
797 struct radeon_drm_winsys
*ws
= radeon_drm_winsys(rws
);
798 struct drm_radeon_gem_userptr args
;
799 struct radeon_bo
*bo
;
802 bo
= CALLOC_STRUCT(radeon_bo
);
806 memset(&args
, 0, sizeof(args
));
807 args
.addr
= (uintptr_t)pointer
;
808 args
.size
= align(size
, sysconf(_SC_PAGE_SIZE
));
809 args
.flags
= RADEON_GEM_USERPTR_ANONONLY
|
810 RADEON_GEM_USERPTR_VALIDATE
|
811 RADEON_GEM_USERPTR_REGISTER
;
812 if (drmCommandWriteRead(ws
->fd
, DRM_RADEON_GEM_USERPTR
,
813 &args
, sizeof(args
))) {
818 pipe_mutex_lock(ws
->bo_handles_mutex
);
821 pipe_reference_init(&bo
->base
.reference
, 1);
822 bo
->handle
= args
.handle
;
823 bo
->base
.alignment
= 0;
824 bo
->base
.usage
= PB_USAGE_GPU_WRITE
| PB_USAGE_GPU_READ
;
825 bo
->base
.size
= size
;
826 bo
->base
.vtbl
= &radeon_bo_vtbl
;
828 bo
->user_ptr
= pointer
;
830 bo
->initial_domain
= RADEON_DOMAIN_GTT
;
831 pipe_mutex_init(bo
->map_mutex
);
833 util_hash_table_set(ws
->bo_handles
, (void*)(uintptr_t)bo
->handle
, bo
);
835 pipe_mutex_unlock(ws
->bo_handles_mutex
);
837 if (ws
->info
.r600_virtual_address
) {
838 struct drm_radeon_gem_va va
;
840 bo
->va
= radeon_bomgr_find_va(ws
, bo
->base
.size
, 1 << 20);
842 va
.handle
= bo
->handle
;
843 va
.operation
= RADEON_VA_MAP
;
846 va
.flags
= RADEON_VM_PAGE_READABLE
|
847 RADEON_VM_PAGE_WRITEABLE
|
848 RADEON_VM_PAGE_SNOOPED
;
850 r
= drmCommandWriteRead(ws
->fd
, DRM_RADEON_GEM_VA
, &va
, sizeof(va
));
851 if (r
&& va
.operation
== RADEON_VA_RESULT_ERROR
) {
852 fprintf(stderr
, "radeon: Failed to assign virtual address space\n");
853 radeon_bo_destroy(&bo
->base
);
856 pipe_mutex_lock(ws
->bo_handles_mutex
);
857 if (va
.operation
== RADEON_VA_RESULT_VA_EXIST
) {
858 struct pb_buffer
*b
= &bo
->base
;
859 struct radeon_bo
*old_bo
=
860 util_hash_table_get(ws
->bo_vas
, (void*)(uintptr_t)va
.offset
);
862 pipe_mutex_unlock(ws
->bo_handles_mutex
);
863 pb_reference(&b
, &old_bo
->base
);
867 util_hash_table_set(ws
->bo_vas
, (void*)(uintptr_t)bo
->va
, bo
);
868 pipe_mutex_unlock(ws
->bo_handles_mutex
);
871 ws
->allocated_gtt
+= align(bo
->base
.size
, ws
->size_align
);
873 return (struct pb_buffer
*)bo
;
876 static struct pb_buffer
*radeon_winsys_bo_from_handle(struct radeon_winsys
*rws
,
877 struct winsys_handle
*whandle
,
880 struct radeon_drm_winsys
*ws
= radeon_drm_winsys(rws
);
881 struct radeon_bo
*bo
;
886 /* We must maintain a list of pairs <handle, bo>, so that we always return
887 * the same BO for one particular handle. If we didn't do that and created
888 * more than one BO for the same handle and then relocated them in a CS,
889 * we would hit a deadlock in the kernel.
891 * The list of pairs is guarded by a mutex, of course. */
892 pipe_mutex_lock(ws
->bo_handles_mutex
);
894 if (whandle
->type
== DRM_API_HANDLE_TYPE_SHARED
) {
895 /* First check if there already is an existing bo for the handle. */
896 bo
= util_hash_table_get(ws
->bo_names
, (void*)(uintptr_t)whandle
->handle
);
897 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_FD
) {
898 /* We must first get the GEM handle, as fds are unreliable keys */
899 r
= drmPrimeFDToHandle(ws
->fd
, whandle
->handle
, &handle
);
902 bo
= util_hash_table_get(ws
->bo_handles
, (void*)(uintptr_t)handle
);
904 /* Unknown handle type */
909 /* Increase the refcount. */
910 struct pb_buffer
*b
= NULL
;
911 pb_reference(&b
, &bo
->base
);
915 /* There isn't, create a new one. */
916 bo
= CALLOC_STRUCT(radeon_bo
);
921 if (whandle
->type
== DRM_API_HANDLE_TYPE_SHARED
) {
922 struct drm_gem_open open_arg
= {};
923 memset(&open_arg
, 0, sizeof(open_arg
));
925 open_arg
.name
= whandle
->handle
;
926 if (drmIoctl(ws
->fd
, DRM_IOCTL_GEM_OPEN
, &open_arg
)) {
930 handle
= open_arg
.handle
;
931 size
= open_arg
.size
;
932 bo
->flink_name
= whandle
->handle
;
933 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_FD
) {
934 size
= lseek(whandle
->handle
, 0, SEEK_END
);
936 * Could check errno to determine whether the kernel is new enough, but
937 * it doesn't really matter why this failed, just that it failed.
939 if (size
== (off_t
)-1) {
943 lseek(whandle
->handle
, 0, SEEK_SET
);
949 pipe_reference_init(&bo
->base
.reference
, 1);
950 bo
->base
.alignment
= 0;
951 bo
->base
.usage
= PB_USAGE_GPU_WRITE
| PB_USAGE_GPU_READ
;
952 bo
->base
.size
= (unsigned) size
;
953 bo
->base
.vtbl
= &radeon_bo_vtbl
;
956 pipe_mutex_init(bo
->map_mutex
);
959 util_hash_table_set(ws
->bo_names
, (void*)(uintptr_t)bo
->flink_name
, bo
);
961 util_hash_table_set(ws
->bo_handles
, (void*)(uintptr_t)bo
->handle
, bo
);
964 pipe_mutex_unlock(ws
->bo_handles_mutex
);
967 *stride
= whandle
->stride
;
969 if (ws
->info
.r600_virtual_address
&& !bo
->va
) {
970 struct drm_radeon_gem_va va
;
972 bo
->va
= radeon_bomgr_find_va(ws
, bo
->base
.size
, 1 << 20);
974 va
.handle
= bo
->handle
;
975 va
.operation
= RADEON_VA_MAP
;
978 va
.flags
= RADEON_VM_PAGE_READABLE
|
979 RADEON_VM_PAGE_WRITEABLE
|
980 RADEON_VM_PAGE_SNOOPED
;
982 r
= drmCommandWriteRead(ws
->fd
, DRM_RADEON_GEM_VA
, &va
, sizeof(va
));
983 if (r
&& va
.operation
== RADEON_VA_RESULT_ERROR
) {
984 fprintf(stderr
, "radeon: Failed to assign virtual address space\n");
985 radeon_bo_destroy(&bo
->base
);
988 pipe_mutex_lock(ws
->bo_handles_mutex
);
989 if (va
.operation
== RADEON_VA_RESULT_VA_EXIST
) {
990 struct pb_buffer
*b
= &bo
->base
;
991 struct radeon_bo
*old_bo
=
992 util_hash_table_get(ws
->bo_vas
, (void*)(uintptr_t)va
.offset
);
994 pipe_mutex_unlock(ws
->bo_handles_mutex
);
995 pb_reference(&b
, &old_bo
->base
);
999 util_hash_table_set(ws
->bo_vas
, (void*)(uintptr_t)bo
->va
, bo
);
1000 pipe_mutex_unlock(ws
->bo_handles_mutex
);
1003 bo
->initial_domain
= radeon_bo_get_initial_domain((void*)bo
);
1005 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
1006 ws
->allocated_vram
+= align(bo
->base
.size
, ws
->size_align
);
1007 else if (bo
->initial_domain
& RADEON_DOMAIN_GTT
)
1008 ws
->allocated_gtt
+= align(bo
->base
.size
, ws
->size_align
);
1010 return (struct pb_buffer
*)bo
;
1013 pipe_mutex_unlock(ws
->bo_handles_mutex
);
1017 static boolean
radeon_winsys_bo_get_handle(struct pb_buffer
*buffer
,
1019 struct winsys_handle
*whandle
)
1021 struct drm_gem_flink flink
;
1022 struct radeon_bo
*bo
= radeon_bo(buffer
);
1023 struct radeon_drm_winsys
*ws
= bo
->rws
;
1025 memset(&flink
, 0, sizeof(flink
));
1027 bo
->use_reusable_pool
= false;
1029 if (whandle
->type
== DRM_API_HANDLE_TYPE_SHARED
) {
1030 if (!bo
->flink_name
) {
1031 flink
.handle
= bo
->handle
;
1033 if (ioctl(ws
->fd
, DRM_IOCTL_GEM_FLINK
, &flink
)) {
1037 bo
->flink_name
= flink
.name
;
1039 pipe_mutex_lock(ws
->bo_handles_mutex
);
1040 util_hash_table_set(ws
->bo_names
, (void*)(uintptr_t)bo
->flink_name
, bo
);
1041 pipe_mutex_unlock(ws
->bo_handles_mutex
);
1043 whandle
->handle
= bo
->flink_name
;
1044 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_KMS
) {
1045 whandle
->handle
= bo
->handle
;
1046 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_FD
) {
1047 if (drmPrimeHandleToFD(ws
->fd
, bo
->handle
, DRM_CLOEXEC
, (int*)&whandle
->handle
))
1051 whandle
->stride
= stride
;
1055 static bool radeon_winsys_bo_is_user_ptr(struct pb_buffer
*buf
)
1057 return ((struct radeon_bo
*)buf
)->user_ptr
!= NULL
;
1060 static uint64_t radeon_winsys_bo_va(struct pb_buffer
*buf
)
1062 return ((struct radeon_bo
*)buf
)->va
;
1065 void radeon_drm_bo_init_functions(struct radeon_drm_winsys
*ws
)
1067 ws
->base
.buffer_set_tiling
= radeon_bo_set_tiling
;
1068 ws
->base
.buffer_get_tiling
= radeon_bo_get_tiling
;
1069 ws
->base
.buffer_map
= radeon_bo_map
;
1070 ws
->base
.buffer_unmap
= radeon_bo_unmap
;
1071 ws
->base
.buffer_wait
= radeon_bo_wait
;
1072 ws
->base
.buffer_create
= radeon_winsys_bo_create
;
1073 ws
->base
.buffer_from_handle
= radeon_winsys_bo_from_handle
;
1074 ws
->base
.buffer_from_ptr
= radeon_winsys_bo_from_ptr
;
1075 ws
->base
.buffer_is_user_ptr
= radeon_winsys_bo_is_user_ptr
;
1076 ws
->base
.buffer_get_handle
= radeon_winsys_bo_get_handle
;
1077 ws
->base
.buffer_get_virtual_address
= radeon_winsys_bo_va
;
1078 ws
->base
.buffer_get_initial_domain
= radeon_bo_get_initial_domain
;