2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
26 * The Iris buffer manager.
28 * XXX: write better comments
31 * - main interface to GEM in the kernel
35 #include <util/u_atomic.h>
42 #include <sys/ioctl.h>
45 #include <sys/types.h>
51 #include "common/gen_aux_map.h"
52 #include "common/gen_clflush.h"
53 #include "dev/gen_debug.h"
54 #include "common/gen_gem.h"
55 #include "dev/gen_device_info.h"
56 #include "main/macros.h"
57 #include "os/os_mman.h"
58 #include "util/debug.h"
59 #include "util/macros.h"
60 #include "util/hash_table.h"
61 #include "util/list.h"
62 #include "util/os_file.h"
63 #include "util/u_dynarray.h"
65 #include "iris_bufmgr.h"
66 #include "iris_context.h"
69 #include "drm-uapi/i915_drm.h"
79 /* VALGRIND_FREELIKE_BLOCK unfortunately does not actually undo the earlier
80 * VALGRIND_MALLOCLIKE_BLOCK but instead leaves vg convinced the memory is
81 * leaked. All because it does not call VG(cli_free) from its
82 * VG_USERREQ__FREELIKE_BLOCK handler. Instead of treating the memory like
83 * and allocation, we mark it available for use upon mmapping and remove
86 #define VG_DEFINED(ptr, size) VG(VALGRIND_MAKE_MEM_DEFINED(ptr, size))
87 #define VG_NOACCESS(ptr, size) VG(VALGRIND_MAKE_MEM_NOACCESS(ptr, size))
89 #define PAGE_SIZE 4096
91 #define WARN_ONCE(cond, fmt...) do { \
92 if (unlikely(cond)) { \
93 static bool _warned = false; \
95 fprintf(stderr, "WARNING: "); \
96 fprintf(stderr, fmt); \
102 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
105 atomic_add_unless(int *v
, int add
, int unless
)
108 c
= p_atomic_read(v
);
109 while (c
!= unless
&& (old
= p_atomic_cmpxchg(v
, c
, c
+ add
)) != c
)
115 memzone_name(enum iris_memory_zone memzone
)
117 const char *names
[] = {
118 [IRIS_MEMZONE_SHADER
] = "shader",
119 [IRIS_MEMZONE_BINDER
] = "binder",
120 [IRIS_MEMZONE_SURFACE
] = "surface",
121 [IRIS_MEMZONE_DYNAMIC
] = "dynamic",
122 [IRIS_MEMZONE_OTHER
] = "other",
123 [IRIS_MEMZONE_BORDER_COLOR_POOL
] = "bordercolor",
125 assert(memzone
< ARRAY_SIZE(names
));
126 return names
[memzone
];
129 struct bo_cache_bucket
{
130 /** List of cached BOs. */
131 struct list_head head
;
133 /** Size of this bucket, in bytes. */
138 /** File descriptor associated with a handle export. */
141 /** GEM handle in drm_fd */
144 struct list_head link
;
149 * List into the list of bufmgr.
151 struct list_head link
;
159 /** Array of lists of cached gem objects of power-of-two sizes */
160 struct bo_cache_bucket cache_bucket
[14 * 4];
164 struct hash_table
*name_table
;
165 struct hash_table
*handle_table
;
168 * List of BOs which we've effectively freed, but are hanging on to
169 * until they're idle before closing and returning the VMA.
171 struct list_head zombie_list
;
173 struct util_vma_heap vma_allocator
[IRIS_MEMZONE_COUNT
];
176 bool has_mmap_offset
:1;
177 bool has_tiling_uapi
:1;
180 struct gen_aux_map_context
*aux_map_ctx
;
183 static mtx_t global_bufmgr_list_mutex
= _MTX_INITIALIZER_NP
;
184 static struct list_head global_bufmgr_list
= {
185 .next
= &global_bufmgr_list
,
186 .prev
= &global_bufmgr_list
,
189 static int bo_set_tiling_internal(struct iris_bo
*bo
, uint32_t tiling_mode
,
192 static void bo_free(struct iris_bo
*bo
);
194 static struct iris_bo
*
195 find_and_ref_external_bo(struct hash_table
*ht
, unsigned int key
)
197 struct hash_entry
*entry
= _mesa_hash_table_search(ht
, &key
);
198 struct iris_bo
*bo
= entry
? entry
->data
: NULL
;
201 assert(bo
->external
);
202 assert(!bo
->reusable
);
204 /* Being non-reusable, the BO cannot be in the cache lists, but it
205 * may be in the zombie list if it had reached zero references, but
206 * we hadn't yet closed it...and then reimported the same BO. If it
207 * is, then remove it since it's now been resurrected.
209 if (bo
->head
.prev
|| bo
->head
.next
)
212 iris_bo_reference(bo
);
219 * This function finds the correct bucket fit for the input size.
220 * The function works with O(1) complexity when the requested size
221 * was queried instead of iterating the size through all the buckets.
223 static struct bo_cache_bucket
*
224 bucket_for_size(struct iris_bufmgr
*bufmgr
, uint64_t size
)
226 /* Calculating the pages and rounding up to the page size. */
227 const unsigned pages
= (size
+ PAGE_SIZE
- 1) / PAGE_SIZE
;
229 /* Row Bucket sizes clz((x-1) | 3) Row Column
230 * in pages stride size
231 * 0: 1 2 3 4 -> 30 30 30 30 4 1
232 * 1: 5 6 7 8 -> 29 29 29 29 4 1
233 * 2: 10 12 14 16 -> 28 28 28 28 8 2
234 * 3: 20 24 28 32 -> 27 27 27 27 16 4
236 const unsigned row
= 30 - __builtin_clz((pages
- 1) | 3);
237 const unsigned row_max_pages
= 4 << row
;
239 /* The '& ~2' is the special case for row 1. In row 1, max pages /
240 * 2 is 2, but the previous row maximum is zero (because there is
241 * no previous row). All row maximum sizes are power of 2, so that
242 * is the only case where that bit will be set.
244 const unsigned prev_row_max_pages
= (row_max_pages
/ 2) & ~2;
245 int col_size_log2
= row
- 1;
246 col_size_log2
+= (col_size_log2
< 0);
248 const unsigned col
= (pages
- prev_row_max_pages
+
249 ((1 << col_size_log2
) - 1)) >> col_size_log2
;
251 /* Calculating the index based on the row and column. */
252 const unsigned index
= (row
* 4) + (col
- 1);
254 return (index
< bufmgr
->num_buckets
) ?
255 &bufmgr
->cache_bucket
[index
] : NULL
;
258 enum iris_memory_zone
259 iris_memzone_for_address(uint64_t address
)
261 STATIC_ASSERT(IRIS_MEMZONE_OTHER_START
> IRIS_MEMZONE_DYNAMIC_START
);
262 STATIC_ASSERT(IRIS_MEMZONE_DYNAMIC_START
> IRIS_MEMZONE_SURFACE_START
);
263 STATIC_ASSERT(IRIS_MEMZONE_SURFACE_START
> IRIS_MEMZONE_BINDER_START
);
264 STATIC_ASSERT(IRIS_MEMZONE_BINDER_START
> IRIS_MEMZONE_SHADER_START
);
265 STATIC_ASSERT(IRIS_BORDER_COLOR_POOL_ADDRESS
== IRIS_MEMZONE_DYNAMIC_START
);
267 if (address
>= IRIS_MEMZONE_OTHER_START
)
268 return IRIS_MEMZONE_OTHER
;
270 if (address
== IRIS_BORDER_COLOR_POOL_ADDRESS
)
271 return IRIS_MEMZONE_BORDER_COLOR_POOL
;
273 if (address
> IRIS_MEMZONE_DYNAMIC_START
)
274 return IRIS_MEMZONE_DYNAMIC
;
276 if (address
>= IRIS_MEMZONE_SURFACE_START
)
277 return IRIS_MEMZONE_SURFACE
;
279 if (address
>= IRIS_MEMZONE_BINDER_START
)
280 return IRIS_MEMZONE_BINDER
;
282 return IRIS_MEMZONE_SHADER
;
286 * Allocate a section of virtual memory for a buffer, assigning an address.
288 * This uses either the bucket allocator for the given size, or the large
289 * object allocator (util_vma).
292 vma_alloc(struct iris_bufmgr
*bufmgr
,
293 enum iris_memory_zone memzone
,
297 /* Force alignment to be some number of pages */
298 alignment
= ALIGN(alignment
, PAGE_SIZE
);
300 if (memzone
== IRIS_MEMZONE_BORDER_COLOR_POOL
)
301 return IRIS_BORDER_COLOR_POOL_ADDRESS
;
303 /* The binder handles its own allocations. Return non-zero here. */
304 if (memzone
== IRIS_MEMZONE_BINDER
)
305 return IRIS_MEMZONE_BINDER_START
;
308 util_vma_heap_alloc(&bufmgr
->vma_allocator
[memzone
], size
, alignment
);
310 assert((addr
>> 48ull) == 0);
311 assert((addr
% alignment
) == 0);
313 return gen_canonical_address(addr
);
317 vma_free(struct iris_bufmgr
*bufmgr
,
321 if (address
== IRIS_BORDER_COLOR_POOL_ADDRESS
)
324 /* Un-canonicalize the address. */
325 address
= gen_48b_address(address
);
330 enum iris_memory_zone memzone
= iris_memzone_for_address(address
);
332 /* The binder handles its own allocations. */
333 if (memzone
== IRIS_MEMZONE_BINDER
)
336 assert(memzone
< ARRAY_SIZE(bufmgr
->vma_allocator
));
338 util_vma_heap_free(&bufmgr
->vma_allocator
[memzone
], address
, size
);
342 iris_bo_busy(struct iris_bo
*bo
)
344 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
345 struct drm_i915_gem_busy busy
= { .handle
= bo
->gem_handle
};
347 int ret
= gen_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_BUSY
, &busy
);
349 bo
->idle
= !busy
.busy
;
356 iris_bo_madvise(struct iris_bo
*bo
, int state
)
358 struct drm_i915_gem_madvise madv
= {
359 .handle
= bo
->gem_handle
,
364 gen_ioctl(bo
->bufmgr
->fd
, DRM_IOCTL_I915_GEM_MADVISE
, &madv
);
366 return madv
.retained
;
369 static struct iris_bo
*
372 struct iris_bo
*bo
= calloc(1, sizeof(*bo
));
376 list_inithead(&bo
->exports
);
378 bo
->hash
= _mesa_hash_pointer(bo
);
383 static struct iris_bo
*
384 alloc_bo_from_cache(struct iris_bufmgr
*bufmgr
,
385 struct bo_cache_bucket
*bucket
,
387 enum iris_memory_zone memzone
,
394 struct iris_bo
*bo
= NULL
;
396 list_for_each_entry_safe(struct iris_bo
, cur
, &bucket
->head
, head
) {
397 /* Try a little harder to find one that's already in the right memzone */
398 if (match_zone
&& memzone
!= iris_memzone_for_address(cur
->gtt_offset
))
401 /* If the last BO in the cache is busy, there are no idle BOs. Bail,
402 * either falling back to a non-matching memzone, or if that fails,
403 * allocating a fresh buffer.
405 if (iris_bo_busy(cur
))
408 list_del(&cur
->head
);
410 /* Tell the kernel we need this BO. If it still exists, we're done! */
411 if (iris_bo_madvise(cur
, I915_MADV_WILLNEED
)) {
416 /* This BO was purged, throw it out and keep looking. */
423 if (bo
->aux_map_address
) {
424 /* This buffer was associated with an aux-buffer range. We make sure
425 * that buffers are not reused from the cache while the buffer is (busy)
426 * being used by an executing batch. Since we are here, the buffer is no
427 * longer being used by a batch and the buffer was deleted (in order to
428 * end up in the cache). Therefore its old aux-buffer range can be
429 * removed from the aux-map.
431 if (bo
->bufmgr
->aux_map_ctx
)
432 gen_aux_map_unmap_range(bo
->bufmgr
->aux_map_ctx
, bo
->gtt_offset
,
434 bo
->aux_map_address
= 0;
437 /* If the cached BO isn't in the right memory zone, or the alignment
438 * isn't sufficient, free the old memory and assign it a new address.
440 if (memzone
!= iris_memzone_for_address(bo
->gtt_offset
) ||
441 bo
->gtt_offset
% alignment
!= 0) {
442 vma_free(bufmgr
, bo
->gtt_offset
, bo
->size
);
443 bo
->gtt_offset
= 0ull;
446 /* Zero the contents if necessary. If this fails, fall back to
447 * allocating a fresh BO, which will always be zeroed by the kernel.
449 if (flags
& BO_ALLOC_ZEROED
) {
450 void *map
= iris_bo_map(NULL
, bo
, MAP_WRITE
| MAP_RAW
);
452 memset(map
, 0, bo
->size
);
462 static struct iris_bo
*
463 alloc_fresh_bo(struct iris_bufmgr
*bufmgr
, uint64_t bo_size
)
465 struct iris_bo
*bo
= bo_calloc();
469 struct drm_i915_gem_create create
= { .size
= bo_size
};
471 /* All new BOs we get from the kernel are zeroed, so we don't need to
472 * worry about that here.
474 if (gen_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_CREATE
, &create
) != 0) {
479 bo
->gem_handle
= create
.handle
;
483 bo
->tiling_mode
= I915_TILING_NONE
;
486 /* Calling set_domain() will allocate pages for the BO outside of the
487 * struct mutex lock in the kernel, which is more efficient than waiting
488 * to create them during the first execbuf that uses the BO.
490 struct drm_i915_gem_set_domain sd
= {
491 .handle
= bo
->gem_handle
,
492 .read_domains
= I915_GEM_DOMAIN_CPU
,
496 if (gen_ioctl(bo
->bufmgr
->fd
, DRM_IOCTL_I915_GEM_SET_DOMAIN
, &sd
) != 0) {
504 static struct iris_bo
*
505 bo_alloc_internal(struct iris_bufmgr
*bufmgr
,
509 enum iris_memory_zone memzone
,
511 uint32_t tiling_mode
,
515 unsigned int page_size
= getpagesize();
516 struct bo_cache_bucket
*bucket
= bucket_for_size(bufmgr
, size
);
518 /* Round the size up to the bucket size, or if we don't have caching
519 * at this size, a multiple of the page size.
522 bucket
? bucket
->size
: MAX2(ALIGN(size
, page_size
), page_size
);
524 mtx_lock(&bufmgr
->lock
);
526 /* Get a buffer out of the cache if available. First, we try to find
527 * one with a matching memory zone so we can avoid reallocating VMA.
529 bo
= alloc_bo_from_cache(bufmgr
, bucket
, alignment
, memzone
, flags
, true);
531 /* If that fails, we try for any cached BO, without matching memzone. */
533 bo
= alloc_bo_from_cache(bufmgr
, bucket
, alignment
, memzone
, flags
,
537 mtx_unlock(&bufmgr
->lock
);
540 bo
= alloc_fresh_bo(bufmgr
, bo_size
);
545 if (bo
->gtt_offset
== 0ull) {
546 mtx_lock(&bufmgr
->lock
);
547 bo
->gtt_offset
= vma_alloc(bufmgr
, memzone
, bo
->size
, alignment
);
548 mtx_unlock(&bufmgr
->lock
);
550 if (bo
->gtt_offset
== 0ull)
554 if (bo_set_tiling_internal(bo
, tiling_mode
, stride
))
558 p_atomic_set(&bo
->refcount
, 1);
559 bo
->reusable
= bucket
&& bufmgr
->bo_reuse
;
560 bo
->cache_coherent
= bufmgr
->has_llc
;
562 bo
->kflags
= EXEC_OBJECT_SUPPORTS_48B_ADDRESS
| EXEC_OBJECT_PINNED
;
564 /* By default, capture all driver-internal buffers like shader kernels,
565 * surface states, dynamic states, border colors, and so on.
567 if (memzone
< IRIS_MEMZONE_OTHER
)
568 bo
->kflags
|= EXEC_OBJECT_CAPTURE
;
570 if ((flags
& BO_ALLOC_COHERENT
) && !bo
->cache_coherent
) {
571 struct drm_i915_gem_caching arg
= {
572 .handle
= bo
->gem_handle
,
575 if (gen_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_SET_CACHING
, &arg
) == 0) {
576 bo
->cache_coherent
= true;
577 bo
->reusable
= false;
581 DBG("bo_create: buf %d (%s) (%s memzone) %llub\n", bo
->gem_handle
,
582 bo
->name
, memzone_name(memzone
), (unsigned long long) size
);
592 iris_bo_alloc(struct iris_bufmgr
*bufmgr
,
595 enum iris_memory_zone memzone
)
597 return bo_alloc_internal(bufmgr
, name
, size
, 1, memzone
,
598 0, I915_TILING_NONE
, 0);
602 iris_bo_alloc_tiled(struct iris_bufmgr
*bufmgr
, const char *name
,
603 uint64_t size
, uint32_t alignment
,
604 enum iris_memory_zone memzone
,
605 uint32_t tiling_mode
, uint32_t pitch
, unsigned flags
)
607 return bo_alloc_internal(bufmgr
, name
, size
, alignment
, memzone
,
608 flags
, tiling_mode
, pitch
);
612 iris_bo_create_userptr(struct iris_bufmgr
*bufmgr
, const char *name
,
613 void *ptr
, size_t size
,
614 enum iris_memory_zone memzone
)
616 struct drm_gem_close close
= { 0, };
623 struct drm_i915_gem_userptr arg
= {
624 .user_ptr
= (uintptr_t)ptr
,
627 if (gen_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_USERPTR
, &arg
))
629 bo
->gem_handle
= arg
.handle
;
631 /* Check the buffer for validity before we try and use it in a batch */
632 struct drm_i915_gem_set_domain sd
= {
633 .handle
= bo
->gem_handle
,
634 .read_domains
= I915_GEM_DOMAIN_CPU
,
636 if (gen_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_SET_DOMAIN
, &sd
))
644 bo
->kflags
= EXEC_OBJECT_SUPPORTS_48B_ADDRESS
| EXEC_OBJECT_PINNED
;
646 mtx_lock(&bufmgr
->lock
);
647 bo
->gtt_offset
= vma_alloc(bufmgr
, memzone
, size
, 1);
648 mtx_unlock(&bufmgr
->lock
);
650 if (bo
->gtt_offset
== 0ull)
653 p_atomic_set(&bo
->refcount
, 1);
655 bo
->cache_coherent
= true;
662 close
.handle
= bo
->gem_handle
;
663 gen_ioctl(bufmgr
->fd
, DRM_IOCTL_GEM_CLOSE
, &close
);
670 * Returns a iris_bo wrapping the given buffer object handle.
672 * This can be used when one application needs to pass a buffer object
676 iris_bo_gem_create_from_name(struct iris_bufmgr
*bufmgr
,
677 const char *name
, unsigned int handle
)
681 /* At the moment most applications only have a few named bo.
682 * For instance, in a DRI client only the render buffers passed
683 * between X and the client are named. And since X returns the
684 * alternating names for the front/back buffer a linear search
685 * provides a sufficiently fast match.
687 mtx_lock(&bufmgr
->lock
);
688 bo
= find_and_ref_external_bo(bufmgr
->name_table
, handle
);
692 struct drm_gem_open open_arg
= { .name
= handle
};
693 int ret
= gen_ioctl(bufmgr
->fd
, DRM_IOCTL_GEM_OPEN
, &open_arg
);
695 DBG("Couldn't reference %s handle 0x%08x: %s\n",
696 name
, handle
, strerror(errno
));
700 /* Now see if someone has used a prime handle to get this
701 * object from the kernel before by looking through the list
702 * again for a matching gem_handle
704 bo
= find_and_ref_external_bo(bufmgr
->handle_table
, open_arg
.handle
);
712 p_atomic_set(&bo
->refcount
, 1);
714 bo
->size
= open_arg
.size
;
716 bo
->gem_handle
= open_arg
.handle
;
718 bo
->global_name
= handle
;
719 bo
->reusable
= false;
721 bo
->kflags
= EXEC_OBJECT_SUPPORTS_48B_ADDRESS
| EXEC_OBJECT_PINNED
;
722 bo
->gtt_offset
= vma_alloc(bufmgr
, IRIS_MEMZONE_OTHER
, bo
->size
, 1);
724 _mesa_hash_table_insert(bufmgr
->handle_table
, &bo
->gem_handle
, bo
);
725 _mesa_hash_table_insert(bufmgr
->name_table
, &bo
->global_name
, bo
);
727 struct drm_i915_gem_get_tiling get_tiling
= { .handle
= bo
->gem_handle
};
728 ret
= gen_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_GET_TILING
, &get_tiling
);
732 bo
->tiling_mode
= get_tiling
.tiling_mode
;
734 /* XXX stride is unknown */
735 DBG("bo_create_from_handle: %d (%s)\n", handle
, bo
->name
);
738 mtx_unlock(&bufmgr
->lock
);
743 mtx_unlock(&bufmgr
->lock
);
748 bo_close(struct iris_bo
*bo
)
750 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
753 struct hash_entry
*entry
;
755 if (bo
->global_name
) {
756 entry
= _mesa_hash_table_search(bufmgr
->name_table
, &bo
->global_name
);
757 _mesa_hash_table_remove(bufmgr
->name_table
, entry
);
760 entry
= _mesa_hash_table_search(bufmgr
->handle_table
, &bo
->gem_handle
);
761 _mesa_hash_table_remove(bufmgr
->handle_table
, entry
);
763 list_for_each_entry_safe(struct bo_export
, export
, &bo
->exports
, link
) {
764 struct drm_gem_close close
= { .handle
= export
->gem_handle
};
765 gen_ioctl(export
->drm_fd
, DRM_IOCTL_GEM_CLOSE
, &close
);
767 list_del(&export
->link
);
771 assert(list_is_empty(&bo
->exports
));
774 /* Close this object */
775 struct drm_gem_close close
= { .handle
= bo
->gem_handle
};
776 int ret
= gen_ioctl(bufmgr
->fd
, DRM_IOCTL_GEM_CLOSE
, &close
);
778 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
779 bo
->gem_handle
, bo
->name
, strerror(errno
));
782 if (bo
->aux_map_address
&& bo
->bufmgr
->aux_map_ctx
) {
783 gen_aux_map_unmap_range(bo
->bufmgr
->aux_map_ctx
, bo
->gtt_offset
,
787 /* Return the VMA for reuse */
788 vma_free(bo
->bufmgr
, bo
->gtt_offset
, bo
->size
);
794 bo_free(struct iris_bo
*bo
)
796 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
798 if (bo
->map_cpu
&& !bo
->userptr
) {
799 VG_NOACCESS(bo
->map_cpu
, bo
->size
);
800 os_munmap(bo
->map_cpu
, bo
->size
);
803 VG_NOACCESS(bo
->map_wc
, bo
->size
);
804 os_munmap(bo
->map_wc
, bo
->size
);
807 VG_NOACCESS(bo
->map_gtt
, bo
->size
);
808 os_munmap(bo
->map_gtt
, bo
->size
);
814 /* Defer closing the GEM BO and returning the VMA for reuse until the
815 * BO is idle. Just move it to the dead list for now.
817 list_addtail(&bo
->head
, &bufmgr
->zombie_list
);
821 /** Frees all cached buffers significantly older than @time. */
823 cleanup_bo_cache(struct iris_bufmgr
*bufmgr
, time_t time
)
827 if (bufmgr
->time
== time
)
830 for (i
= 0; i
< bufmgr
->num_buckets
; i
++) {
831 struct bo_cache_bucket
*bucket
= &bufmgr
->cache_bucket
[i
];
833 list_for_each_entry_safe(struct iris_bo
, bo
, &bucket
->head
, head
) {
834 if (time
- bo
->free_time
<= 1)
843 list_for_each_entry_safe(struct iris_bo
, bo
, &bufmgr
->zombie_list
, head
) {
844 /* Stop once we reach a busy BO - all others past this point were
845 * freed more recently so are likely also busy.
847 if (!bo
->idle
&& iris_bo_busy(bo
))
858 bo_unreference_final(struct iris_bo
*bo
, time_t time
)
860 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
861 struct bo_cache_bucket
*bucket
;
863 DBG("bo_unreference final: %d (%s)\n", bo
->gem_handle
, bo
->name
);
867 bucket
= bucket_for_size(bufmgr
, bo
->size
);
868 /* Put the buffer into our internal cache for reuse if we can. */
869 if (bucket
&& iris_bo_madvise(bo
, I915_MADV_DONTNEED
)) {
870 bo
->free_time
= time
;
873 list_addtail(&bo
->head
, &bucket
->head
);
880 iris_bo_unreference(struct iris_bo
*bo
)
885 assert(p_atomic_read(&bo
->refcount
) > 0);
887 if (atomic_add_unless(&bo
->refcount
, -1, 1)) {
888 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
889 struct timespec time
;
891 clock_gettime(CLOCK_MONOTONIC
, &time
);
893 mtx_lock(&bufmgr
->lock
);
895 if (p_atomic_dec_zero(&bo
->refcount
)) {
896 bo_unreference_final(bo
, time
.tv_sec
);
897 cleanup_bo_cache(bufmgr
, time
.tv_sec
);
900 mtx_unlock(&bufmgr
->lock
);
905 bo_wait_with_stall_warning(struct pipe_debug_callback
*dbg
,
909 bool busy
= dbg
&& !bo
->idle
;
910 double elapsed
= unlikely(busy
) ? -get_time() : 0.0;
912 iris_bo_wait_rendering(bo
);
914 if (unlikely(busy
)) {
915 elapsed
+= get_time();
916 if (elapsed
> 1e-5) /* 0.01ms */ {
917 perf_debug(dbg
, "%s a busy \"%s\" BO stalled and took %.03f ms.\n",
918 action
, bo
->name
, elapsed
* 1000);
924 print_flags(unsigned flags
)
926 if (flags
& MAP_READ
)
928 if (flags
& MAP_WRITE
)
930 if (flags
& MAP_ASYNC
)
932 if (flags
& MAP_PERSISTENT
)
934 if (flags
& MAP_COHERENT
)
942 iris_bo_gem_mmap_legacy(struct pipe_debug_callback
*dbg
,
943 struct iris_bo
*bo
, bool wc
)
945 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
947 struct drm_i915_gem_mmap mmap_arg
= {
948 .handle
= bo
->gem_handle
,
950 .flags
= wc
? I915_MMAP_WC
: 0,
953 int ret
= gen_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_MMAP
, &mmap_arg
);
955 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
956 __FILE__
, __LINE__
, bo
->gem_handle
, bo
->name
, strerror(errno
));
959 void *map
= (void *) (uintptr_t) mmap_arg
.addr_ptr
;
965 iris_bo_gem_mmap_offset(struct pipe_debug_callback
*dbg
, struct iris_bo
*bo
,
968 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
970 struct drm_i915_gem_mmap_offset mmap_arg
= {
971 .handle
= bo
->gem_handle
,
972 .flags
= wc
? I915_MMAP_OFFSET_WC
: I915_MMAP_OFFSET_WB
,
975 /* Get the fake offset back */
976 int ret
= gen_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_MMAP_OFFSET
, &mmap_arg
);
978 DBG("%s:%d: Error preparing buffer %d (%s): %s .\n",
979 __FILE__
, __LINE__
, bo
->gem_handle
, bo
->name
, strerror(errno
));
984 void *map
= mmap(0, bo
->size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
985 bufmgr
->fd
, mmap_arg
.offset
);
986 if (map
== MAP_FAILED
) {
987 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
988 __FILE__
, __LINE__
, bo
->gem_handle
, bo
->name
, strerror(errno
));
996 iris_bo_gem_mmap(struct pipe_debug_callback
*dbg
, struct iris_bo
*bo
, bool wc
)
998 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
1000 if (bufmgr
->has_mmap_offset
)
1001 return iris_bo_gem_mmap_offset(dbg
, bo
, wc
);
1003 return iris_bo_gem_mmap_legacy(dbg
, bo
, wc
);
1007 iris_bo_map_cpu(struct pipe_debug_callback
*dbg
,
1008 struct iris_bo
*bo
, unsigned flags
)
1010 /* We disallow CPU maps for writing to non-coherent buffers, as the
1011 * CPU map can become invalidated when a batch is flushed out, which
1012 * can happen at unpredictable times. You should use WC maps instead.
1014 assert(bo
->cache_coherent
|| !(flags
& MAP_WRITE
));
1017 DBG("iris_bo_map_cpu: %d (%s)\n", bo
->gem_handle
, bo
->name
);
1018 void *map
= iris_bo_gem_mmap(dbg
, bo
, false);
1023 VG_DEFINED(map
, bo
->size
);
1025 if (p_atomic_cmpxchg(&bo
->map_cpu
, NULL
, map
)) {
1026 VG_NOACCESS(map
, bo
->size
);
1027 os_munmap(map
, bo
->size
);
1030 assert(bo
->map_cpu
);
1032 DBG("iris_bo_map_cpu: %d (%s) -> %p, ", bo
->gem_handle
, bo
->name
,
1036 if (!(flags
& MAP_ASYNC
)) {
1037 bo_wait_with_stall_warning(dbg
, bo
, "CPU mapping");
1040 if (!bo
->cache_coherent
&& !bo
->bufmgr
->has_llc
) {
1041 /* If we're reusing an existing CPU mapping, the CPU caches may
1042 * contain stale data from the last time we read from that mapping.
1043 * (With the BO cache, it might even be data from a previous buffer!)
1044 * Even if it's a brand new mapping, the kernel may have zeroed the
1045 * buffer via CPU writes.
1047 * We need to invalidate those cachelines so that we see the latest
1048 * contents, and so long as we only read from the CPU mmap we do not
1049 * need to write those cachelines back afterwards.
1051 * On LLC, the emprical evidence suggests that writes from the GPU
1052 * that bypass the LLC (i.e. for scanout) do *invalidate* the CPU
1053 * cachelines. (Other reads, such as the display engine, bypass the
1054 * LLC entirely requiring us to keep dirty pixels for the scanout
1055 * out of any cache.)
1057 gen_invalidate_range(bo
->map_cpu
, bo
->size
);
1064 iris_bo_map_wc(struct pipe_debug_callback
*dbg
,
1065 struct iris_bo
*bo
, unsigned flags
)
1068 DBG("iris_bo_map_wc: %d (%s)\n", bo
->gem_handle
, bo
->name
);
1069 void *map
= iris_bo_gem_mmap(dbg
, bo
, true);
1074 VG_DEFINED(map
, bo
->size
);
1076 if (p_atomic_cmpxchg(&bo
->map_wc
, NULL
, map
)) {
1077 VG_NOACCESS(map
, bo
->size
);
1078 os_munmap(map
, bo
->size
);
1083 DBG("iris_bo_map_wc: %d (%s) -> %p\n", bo
->gem_handle
, bo
->name
, bo
->map_wc
);
1086 if (!(flags
& MAP_ASYNC
)) {
1087 bo_wait_with_stall_warning(dbg
, bo
, "WC mapping");
1094 * Perform an uncached mapping via the GTT.
1096 * Write access through the GTT is not quite fully coherent. On low power
1097 * systems especially, like modern Atoms, we can observe reads from RAM before
1098 * the write via GTT has landed. A write memory barrier that flushes the Write
1099 * Combining Buffer (i.e. sfence/mfence) is not sufficient to order the later
1100 * read after the write as the GTT write suffers a small delay through the GTT
1101 * indirection. The kernel uses an uncached mmio read to ensure the GTT write
1102 * is ordered with reads (either by the GPU, WB or WC) and unconditionally
1103 * flushes prior to execbuf submission. However, if we are not informing the
1104 * kernel about our GTT writes, it will not flush before earlier access, such
1105 * as when using the cmdparser. Similarly, we need to be careful if we should
1106 * ever issue a CPU read immediately following a GTT write.
1108 * Telling the kernel about write access also has one more important
1109 * side-effect. Upon receiving notification about the write, it cancels any
1110 * scanout buffering for FBC/PSR and friends. Later FBC/PSR is then flushed by
1111 * either SW_FINISH or DIRTYFB. The presumption is that we never write to the
1112 * actual scanout via a mmaping, only to a backbuffer and so all the FBC/PSR
1113 * tracking is handled on the buffer exchange instead.
1116 iris_bo_map_gtt(struct pipe_debug_callback
*dbg
,
1117 struct iris_bo
*bo
, unsigned flags
)
1119 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
1121 /* If we don't support get/set_tiling, there's no support for GTT mapping
1122 * either (it won't do any de-tiling for us).
1124 assert(bufmgr
->has_tiling_uapi
);
1126 /* Get a mapping of the buffer if we haven't before. */
1127 if (bo
->map_gtt
== NULL
) {
1128 DBG("bo_map_gtt: mmap %d (%s)\n", bo
->gem_handle
, bo
->name
);
1130 struct drm_i915_gem_mmap_gtt mmap_arg
= { .handle
= bo
->gem_handle
};
1132 /* Get the fake offset back... */
1133 int ret
= gen_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_MMAP_GTT
, &mmap_arg
);
1135 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1136 __FILE__
, __LINE__
, bo
->gem_handle
, bo
->name
, strerror(errno
));
1141 void *map
= os_mmap(0, bo
->size
, PROT_READ
| PROT_WRITE
,
1142 MAP_SHARED
, bufmgr
->fd
, mmap_arg
.offset
);
1143 if (map
== MAP_FAILED
) {
1144 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1145 __FILE__
, __LINE__
, bo
->gem_handle
, bo
->name
, strerror(errno
));
1149 /* We don't need to use VALGRIND_MALLOCLIKE_BLOCK because Valgrind will
1150 * already intercept this mmap call. However, for consistency between
1151 * all the mmap paths, we mark the pointer as defined now and mark it
1152 * as inaccessible afterwards.
1154 VG_DEFINED(map
, bo
->size
);
1156 if (p_atomic_cmpxchg(&bo
->map_gtt
, NULL
, map
)) {
1157 VG_NOACCESS(map
, bo
->size
);
1158 os_munmap(map
, bo
->size
);
1161 assert(bo
->map_gtt
);
1163 DBG("bo_map_gtt: %d (%s) -> %p, ", bo
->gem_handle
, bo
->name
, bo
->map_gtt
);
1166 if (!(flags
& MAP_ASYNC
)) {
1167 bo_wait_with_stall_warning(dbg
, bo
, "GTT mapping");
1174 can_map_cpu(struct iris_bo
*bo
, unsigned flags
)
1176 if (bo
->cache_coherent
)
1179 /* Even if the buffer itself is not cache-coherent (such as a scanout), on
1180 * an LLC platform reads always are coherent (as they are performed via the
1181 * central system agent). It is just the writes that we need to take special
1182 * care to ensure that land in main memory and not stick in the CPU cache.
1184 if (!(flags
& MAP_WRITE
) && bo
->bufmgr
->has_llc
)
1187 /* If PERSISTENT or COHERENT are set, the mmapping needs to remain valid
1188 * across batch flushes where the kernel will change cache domains of the
1189 * bo, invalidating continued access to the CPU mmap on non-LLC device.
1191 * Similarly, ASYNC typically means that the buffer will be accessed via
1192 * both the CPU and the GPU simultaneously. Batches may be executed that
1193 * use the BO even while it is mapped. While OpenGL technically disallows
1194 * most drawing while non-persistent mappings are active, we may still use
1195 * the GPU for blits or other operations, causing batches to happen at
1196 * inconvenient times.
1198 * If RAW is set, we expect the caller to be able to handle a WC buffer
1199 * more efficiently than the involuntary clflushes.
1201 if (flags
& (MAP_PERSISTENT
| MAP_COHERENT
| MAP_ASYNC
| MAP_RAW
))
1204 return !(flags
& MAP_WRITE
);
1208 iris_bo_map(struct pipe_debug_callback
*dbg
,
1209 struct iris_bo
*bo
, unsigned flags
)
1211 if (bo
->tiling_mode
!= I915_TILING_NONE
&& !(flags
& MAP_RAW
))
1212 return iris_bo_map_gtt(dbg
, bo
, flags
);
1216 if (can_map_cpu(bo
, flags
))
1217 map
= iris_bo_map_cpu(dbg
, bo
, flags
);
1219 map
= iris_bo_map_wc(dbg
, bo
, flags
);
1221 /* Allow the attempt to fail by falling back to the GTT where necessary.
1223 * Not every buffer can be mmaped directly using the CPU (or WC), for
1224 * example buffers that wrap stolen memory or are imported from other
1225 * devices. For those, we have little choice but to use a GTT mmapping.
1226 * However, if we use a slow GTT mmapping for reads where we expected fast
1227 * access, that order of magnitude difference in throughput will be clearly
1228 * expressed by angry users.
1230 * We skip MAP_RAW because we want to avoid map_gtt's fence detiling.
1232 if (!map
&& !(flags
& MAP_RAW
)) {
1233 perf_debug(dbg
, "Fallback GTT mapping for %s with access flags %x\n",
1235 map
= iris_bo_map_gtt(dbg
, bo
, flags
);
1241 /** Waits for all GPU rendering with the object to have completed. */
1243 iris_bo_wait_rendering(struct iris_bo
*bo
)
1245 /* We require a kernel recent enough for WAIT_IOCTL support.
1246 * See intel_init_bufmgr()
1248 iris_bo_wait(bo
, -1);
1252 * Waits on a BO for the given amount of time.
1254 * @bo: buffer object to wait for
1255 * @timeout_ns: amount of time to wait in nanoseconds.
1256 * If value is less than 0, an infinite wait will occur.
1258 * Returns 0 if the wait was successful ie. the last batch referencing the
1259 * object has completed within the allotted time. Otherwise some negative return
1260 * value describes the error. Of particular interest is -ETIME when the wait has
1261 * failed to yield the desired result.
1263 * Similar to iris_bo_wait_rendering except a timeout parameter allows
1264 * the operation to give up after a certain amount of time. Another subtle
1265 * difference is the internal locking semantics are different (this variant does
1266 * not hold the lock for the duration of the wait). This makes the wait subject
1267 * to a larger userspace race window.
1269 * The implementation shall wait until the object is no longer actively
1270 * referenced within a batch buffer at the time of the call. The wait will
1271 * not guarantee that the buffer is re-issued via another thread, or an flinked
1272 * handle. Userspace must make sure this race does not occur if such precision
1275 * Note that some kernels have broken the inifite wait for negative values
1276 * promise, upgrade to latest stable kernels if this is the case.
1279 iris_bo_wait(struct iris_bo
*bo
, int64_t timeout_ns
)
1281 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
1283 /* If we know it's idle, don't bother with the kernel round trip */
1284 if (bo
->idle
&& !bo
->external
)
1287 struct drm_i915_gem_wait wait
= {
1288 .bo_handle
= bo
->gem_handle
,
1289 .timeout_ns
= timeout_ns
,
1291 int ret
= gen_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_WAIT
, &wait
);
1301 iris_bufmgr_destroy(struct iris_bufmgr
*bufmgr
)
1303 /* Free aux-map buffers */
1304 gen_aux_map_finish(bufmgr
->aux_map_ctx
);
1306 /* bufmgr will no longer try to free VMA entries in the aux-map */
1307 bufmgr
->aux_map_ctx
= NULL
;
1309 mtx_destroy(&bufmgr
->lock
);
1311 /* Free any cached buffer objects we were going to reuse */
1312 for (int i
= 0; i
< bufmgr
->num_buckets
; i
++) {
1313 struct bo_cache_bucket
*bucket
= &bufmgr
->cache_bucket
[i
];
1315 list_for_each_entry_safe(struct iris_bo
, bo
, &bucket
->head
, head
) {
1316 list_del(&bo
->head
);
1322 /* Close any buffer objects on the dead list. */
1323 list_for_each_entry_safe(struct iris_bo
, bo
, &bufmgr
->zombie_list
, head
) {
1324 list_del(&bo
->head
);
1328 _mesa_hash_table_destroy(bufmgr
->name_table
, NULL
);
1329 _mesa_hash_table_destroy(bufmgr
->handle_table
, NULL
);
1331 for (int z
= 0; z
< IRIS_MEMZONE_COUNT
; z
++) {
1332 if (z
!= IRIS_MEMZONE_BINDER
)
1333 util_vma_heap_finish(&bufmgr
->vma_allocator
[z
]);
1342 bo_set_tiling_internal(struct iris_bo
*bo
, uint32_t tiling_mode
,
1345 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
1346 struct drm_i915_gem_set_tiling set_tiling
;
1349 if (bo
->global_name
== 0 &&
1350 tiling_mode
== bo
->tiling_mode
&& stride
== bo
->stride
)
1353 /* If we can't do map_gtt, the set/get_tiling API isn't useful. And it's
1354 * actually not supported by the kernel in those cases.
1356 if (!bufmgr
->has_tiling_uapi
) {
1357 bo
->tiling_mode
= tiling_mode
;
1358 bo
->stride
= stride
;
1362 memset(&set_tiling
, 0, sizeof(set_tiling
));
1364 /* set_tiling is slightly broken and overwrites the
1365 * input on the error path, so we have to open code
1368 set_tiling
.handle
= bo
->gem_handle
;
1369 set_tiling
.tiling_mode
= tiling_mode
;
1370 set_tiling
.stride
= stride
;
1372 ret
= ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_SET_TILING
, &set_tiling
);
1373 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
1377 bo
->tiling_mode
= set_tiling
.tiling_mode
;
1378 bo
->stride
= set_tiling
.stride
;
1383 iris_bo_import_dmabuf(struct iris_bufmgr
*bufmgr
, int prime_fd
,
1384 int tiling
, uint32_t stride
)
1389 mtx_lock(&bufmgr
->lock
);
1390 int ret
= drmPrimeFDToHandle(bufmgr
->fd
, prime_fd
, &handle
);
1392 DBG("import_dmabuf: failed to obtain handle from fd: %s\n",
1394 mtx_unlock(&bufmgr
->lock
);
1399 * See if the kernel has already returned this buffer to us. Just as
1400 * for named buffers, we must not create two bo's pointing at the same
1403 bo
= find_and_ref_external_bo(bufmgr
->handle_table
, handle
);
1411 p_atomic_set(&bo
->refcount
, 1);
1413 /* Determine size of bo. The fd-to-handle ioctl really should
1414 * return the size, but it doesn't. If we have kernel 3.12 or
1415 * later, we can lseek on the prime fd to get the size. Older
1416 * kernels will just fail, in which case we fall back to the
1417 * provided (estimated or guess size). */
1418 ret
= lseek(prime_fd
, 0, SEEK_END
);
1422 bo
->bufmgr
= bufmgr
;
1424 bo
->reusable
= false;
1425 bo
->external
= true;
1426 bo
->kflags
= EXEC_OBJECT_SUPPORTS_48B_ADDRESS
| EXEC_OBJECT_PINNED
;
1428 /* From the Bspec, Memory Compression - Gen12:
1430 * The base address for the surface has to be 64K page aligned and the
1431 * surface is expected to be padded in the virtual domain to be 4 4K
1434 * The dmabuf may contain a compressed surface. Align the BO to 64KB just
1435 * in case. We always align to 64KB even on platforms where we don't need
1436 * to, because it's a fairly reasonable thing to do anyway.
1439 vma_alloc(bufmgr
, IRIS_MEMZONE_OTHER
, bo
->size
, 64 * 1024);
1441 bo
->gem_handle
= handle
;
1442 _mesa_hash_table_insert(bufmgr
->handle_table
, &bo
->gem_handle
, bo
);
1444 struct drm_i915_gem_get_tiling get_tiling
= { .handle
= bo
->gem_handle
};
1445 if (!bufmgr
->has_tiling_uapi
)
1446 get_tiling
.tiling_mode
= I915_TILING_NONE
;
1447 else if (gen_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_GET_TILING
, &get_tiling
))
1451 bo
->tiling_mode
= get_tiling
.tiling_mode
;
1452 /* XXX stride is unknown */
1454 /* Modifiers path */
1455 if (get_tiling
.tiling_mode
== tiling
|| !bufmgr
->has_tiling_uapi
) {
1456 bo
->tiling_mode
= tiling
;
1457 bo
->stride
= stride
;
1458 } else if (bo_set_tiling_internal(bo
, tiling
, stride
)) {
1464 mtx_unlock(&bufmgr
->lock
);
1469 mtx_unlock(&bufmgr
->lock
);
1474 iris_bo_make_external_locked(struct iris_bo
*bo
)
1476 if (!bo
->external
) {
1477 _mesa_hash_table_insert(bo
->bufmgr
->handle_table
, &bo
->gem_handle
, bo
);
1478 /* If a BO is going to be used externally, it could be sent to the
1479 * display HW. So make sure our CPU mappings don't assume cache
1480 * coherency since display is outside that cache.
1482 bo
->cache_coherent
= false;
1483 bo
->external
= true;
1484 bo
->reusable
= false;
1489 iris_bo_make_external(struct iris_bo
*bo
)
1491 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
1494 assert(!bo
->reusable
);
1498 mtx_lock(&bufmgr
->lock
);
1499 iris_bo_make_external_locked(bo
);
1500 mtx_unlock(&bufmgr
->lock
);
1504 iris_bo_export_dmabuf(struct iris_bo
*bo
, int *prime_fd
)
1506 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
1508 iris_bo_make_external(bo
);
1510 if (drmPrimeHandleToFD(bufmgr
->fd
, bo
->gem_handle
,
1511 DRM_CLOEXEC
, prime_fd
) != 0)
1518 iris_bo_export_gem_handle(struct iris_bo
*bo
)
1520 iris_bo_make_external(bo
);
1522 return bo
->gem_handle
;
1526 iris_bo_flink(struct iris_bo
*bo
, uint32_t *name
)
1528 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
1530 if (!bo
->global_name
) {
1531 struct drm_gem_flink flink
= { .handle
= bo
->gem_handle
};
1533 if (gen_ioctl(bufmgr
->fd
, DRM_IOCTL_GEM_FLINK
, &flink
))
1536 mtx_lock(&bufmgr
->lock
);
1537 if (!bo
->global_name
) {
1538 iris_bo_make_external_locked(bo
);
1539 bo
->global_name
= flink
.name
;
1540 _mesa_hash_table_insert(bufmgr
->name_table
, &bo
->global_name
, bo
);
1542 mtx_unlock(&bufmgr
->lock
);
1545 *name
= bo
->global_name
;
1550 iris_bo_export_gem_handle_for_device(struct iris_bo
*bo
, int drm_fd
,
1551 uint32_t *out_handle
)
1553 /* Only add the new GEM handle to the list of export if it belongs to a
1554 * different GEM device. Otherwise we might close the same buffer multiple
1557 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
1558 int ret
= os_same_file_description(drm_fd
, bufmgr
->fd
);
1560 "Kernel has no file descriptor comparison support: %s\n",
1563 *out_handle
= iris_bo_export_gem_handle(bo
);
1567 struct bo_export
*export
= calloc(1, sizeof(*export
));
1571 export
->drm_fd
= drm_fd
;
1574 int err
= iris_bo_export_dmabuf(bo
, &dmabuf_fd
);
1580 mtx_lock(&bufmgr
->lock
);
1581 err
= drmPrimeFDToHandle(drm_fd
, dmabuf_fd
, &export
->gem_handle
);
1584 mtx_unlock(&bufmgr
->lock
);
1590 list_for_each_entry(struct bo_export
, iter
, &bo
->exports
, link
) {
1591 if (iter
->drm_fd
!= drm_fd
)
1593 /* Here we assume that for a given DRM fd, we'll always get back the
1594 * same GEM handle for a given buffer.
1596 assert(iter
->gem_handle
== export
->gem_handle
);
1603 list_addtail(&export
->link
, &bo
->exports
);
1605 mtx_unlock(&bufmgr
->lock
);
1607 *out_handle
= export
->gem_handle
;
1613 add_bucket(struct iris_bufmgr
*bufmgr
, int size
)
1615 unsigned int i
= bufmgr
->num_buckets
;
1617 assert(i
< ARRAY_SIZE(bufmgr
->cache_bucket
));
1619 list_inithead(&bufmgr
->cache_bucket
[i
].head
);
1620 bufmgr
->cache_bucket
[i
].size
= size
;
1621 bufmgr
->num_buckets
++;
1623 assert(bucket_for_size(bufmgr
, size
) == &bufmgr
->cache_bucket
[i
]);
1624 assert(bucket_for_size(bufmgr
, size
- 2048) == &bufmgr
->cache_bucket
[i
]);
1625 assert(bucket_for_size(bufmgr
, size
+ 1) != &bufmgr
->cache_bucket
[i
]);
1629 init_cache_buckets(struct iris_bufmgr
*bufmgr
)
1631 uint64_t size
, cache_max_size
= 64 * 1024 * 1024;
1633 /* OK, so power of two buckets was too wasteful of memory.
1634 * Give 3 other sizes between each power of two, to hopefully
1635 * cover things accurately enough. (The alternative is
1636 * probably to just go for exact matching of sizes, and assume
1637 * that for things like composited window resize the tiled
1638 * width/height alignment and rounding of sizes to pages will
1639 * get us useful cache hit rates anyway)
1641 add_bucket(bufmgr
, PAGE_SIZE
);
1642 add_bucket(bufmgr
, PAGE_SIZE
* 2);
1643 add_bucket(bufmgr
, PAGE_SIZE
* 3);
1645 /* Initialize the linked lists for BO reuse cache. */
1646 for (size
= 4 * PAGE_SIZE
; size
<= cache_max_size
; size
*= 2) {
1647 add_bucket(bufmgr
, size
);
1649 add_bucket(bufmgr
, size
+ size
* 1 / 4);
1650 add_bucket(bufmgr
, size
+ size
* 2 / 4);
1651 add_bucket(bufmgr
, size
+ size
* 3 / 4);
1656 iris_create_hw_context(struct iris_bufmgr
*bufmgr
)
1658 struct drm_i915_gem_context_create create
= { };
1659 int ret
= gen_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_CREATE
, &create
);
1661 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n", strerror(errno
));
1665 /* Upon declaring a GPU hang, the kernel will zap the guilty context
1666 * back to the default logical HW state and attempt to continue on to
1667 * our next submitted batchbuffer. However, our render batches assume
1668 * the previous GPU state is preserved, and only emit commands needed
1669 * to incrementally change that state. In particular, we inherit the
1670 * STATE_BASE_ADDRESS and PIPELINE_SELECT settings, which are critical.
1671 * With default base addresses, our next batches will almost certainly
1672 * cause more GPU hangs, leading to repeated hangs until we're banned
1673 * or the machine is dead.
1675 * Here we tell the kernel not to attempt to recover our context but
1676 * immediately (on the next batchbuffer submission) report that the
1677 * context is lost, and we will do the recovery ourselves. Ideally,
1678 * we'll have two lost batches instead of a continual stream of hangs.
1680 struct drm_i915_gem_context_param p
= {
1681 .ctx_id
= create
.ctx_id
,
1682 .param
= I915_CONTEXT_PARAM_RECOVERABLE
,
1685 drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM
, &p
);
1687 return create
.ctx_id
;
1691 iris_hw_context_get_priority(struct iris_bufmgr
*bufmgr
, uint32_t ctx_id
)
1693 struct drm_i915_gem_context_param p
= {
1695 .param
= I915_CONTEXT_PARAM_PRIORITY
,
1697 drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM
, &p
);
1698 return p
.value
; /* on error, return 0 i.e. default priority */
1702 iris_hw_context_set_priority(struct iris_bufmgr
*bufmgr
,
1706 struct drm_i915_gem_context_param p
= {
1708 .param
= I915_CONTEXT_PARAM_PRIORITY
,
1714 if (gen_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM
, &p
))
1721 iris_clone_hw_context(struct iris_bufmgr
*bufmgr
, uint32_t ctx_id
)
1723 uint32_t new_ctx
= iris_create_hw_context(bufmgr
);
1726 int priority
= iris_hw_context_get_priority(bufmgr
, ctx_id
);
1727 iris_hw_context_set_priority(bufmgr
, new_ctx
, priority
);
1734 iris_destroy_hw_context(struct iris_bufmgr
*bufmgr
, uint32_t ctx_id
)
1736 struct drm_i915_gem_context_destroy d
= { .ctx_id
= ctx_id
};
1739 gen_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY
, &d
) != 0) {
1740 fprintf(stderr
, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
1746 iris_reg_read(struct iris_bufmgr
*bufmgr
, uint32_t offset
, uint64_t *result
)
1748 struct drm_i915_reg_read reg_read
= { .offset
= offset
};
1749 int ret
= gen_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_REG_READ
, ®_read
);
1751 *result
= reg_read
.val
;
1756 iris_gtt_size(int fd
)
1758 /* We use the default (already allocated) context to determine
1759 * the default configuration of the virtual address space.
1761 struct drm_i915_gem_context_param p
= {
1762 .param
= I915_CONTEXT_PARAM_GTT_SIZE
,
1764 if (!gen_ioctl(fd
, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM
, &p
))
1770 static struct gen_buffer
*
1771 gen_aux_map_buffer_alloc(void *driver_ctx
, uint32_t size
)
1773 struct gen_buffer
*buf
= malloc(sizeof(struct gen_buffer
));
1777 struct iris_bufmgr
*bufmgr
= (struct iris_bufmgr
*)driver_ctx
;
1779 struct iris_bo
*bo
=
1780 iris_bo_alloc_tiled(bufmgr
, "aux-map", size
, 64 * 1024,
1781 IRIS_MEMZONE_OTHER
, I915_TILING_NONE
, 0, 0);
1783 buf
->driver_bo
= bo
;
1784 buf
->gpu
= bo
->gtt_offset
;
1785 buf
->gpu_end
= buf
->gpu
+ bo
->size
;
1786 buf
->map
= iris_bo_map(NULL
, bo
, MAP_WRITE
| MAP_RAW
);
1791 gen_aux_map_buffer_free(void *driver_ctx
, struct gen_buffer
*buffer
)
1793 iris_bo_unreference((struct iris_bo
*)buffer
->driver_bo
);
1797 static struct gen_mapped_pinned_buffer_alloc aux_map_allocator
= {
1798 .alloc
= gen_aux_map_buffer_alloc
,
1799 .free
= gen_aux_map_buffer_free
,
1803 gem_param(int fd
, int name
)
1805 int v
= -1; /* No param uses (yet) the sign bit, reserve it for errors */
1807 struct drm_i915_getparam gp
= { .param
= name
, .value
= &v
};
1808 if (gen_ioctl(fd
, DRM_IOCTL_I915_GETPARAM
, &gp
))
1815 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
1816 * and manage map buffer objections.
1818 * \param fd File descriptor of the opened DRM device.
1820 static struct iris_bufmgr
*
1821 iris_bufmgr_create(struct gen_device_info
*devinfo
, int fd
, bool bo_reuse
)
1823 uint64_t gtt_size
= iris_gtt_size(fd
);
1824 if (gtt_size
<= IRIS_MEMZONE_OTHER_START
)
1827 struct iris_bufmgr
*bufmgr
= calloc(1, sizeof(*bufmgr
));
1831 /* Handles to buffer objects belong to the device fd and are not
1832 * reference counted by the kernel. If the same fd is used by
1833 * multiple parties (threads sharing the same screen bufmgr, or
1834 * even worse the same device fd passed to multiple libraries)
1835 * ownership of those handles is shared by those independent parties.
1837 * Don't do this! Ensure that each library/bufmgr has its own device
1838 * fd so that its namespace does not clash with another.
1840 bufmgr
->fd
= os_dupfd_cloexec(fd
);
1842 p_atomic_set(&bufmgr
->refcount
, 1);
1844 if (mtx_init(&bufmgr
->lock
, mtx_plain
) != 0) {
1850 list_inithead(&bufmgr
->zombie_list
);
1852 bufmgr
->has_llc
= devinfo
->has_llc
;
1853 bufmgr
->has_tiling_uapi
= devinfo
->has_tiling_uapi
;
1854 bufmgr
->bo_reuse
= bo_reuse
;
1855 bufmgr
->has_mmap_offset
= gem_param(fd
, I915_PARAM_MMAP_GTT_VERSION
) >= 4;
1857 STATIC_ASSERT(IRIS_MEMZONE_SHADER_START
== 0ull);
1858 const uint64_t _4GB
= 1ull << 32;
1859 const uint64_t _2GB
= 1ul << 31;
1861 /* The STATE_BASE_ADDRESS size field can only hold 1 page shy of 4GB */
1862 const uint64_t _4GB_minus_1
= _4GB
- PAGE_SIZE
;
1864 util_vma_heap_init(&bufmgr
->vma_allocator
[IRIS_MEMZONE_SHADER
],
1865 PAGE_SIZE
, _4GB_minus_1
- PAGE_SIZE
);
1866 util_vma_heap_init(&bufmgr
->vma_allocator
[IRIS_MEMZONE_SURFACE
],
1867 IRIS_MEMZONE_SURFACE_START
,
1868 _4GB_minus_1
- IRIS_MAX_BINDERS
* IRIS_BINDER_SIZE
);
1869 /* TODO: Why does limiting to 2GB help some state items on gen12?
1870 * - CC Viewport Pointer
1871 * - Blend State Pointer
1872 * - Color Calc State Pointer
1874 const uint64_t dynamic_pool_size
=
1875 (devinfo
->gen
>= 12 ? _2GB
: _4GB_minus_1
) - IRIS_BORDER_COLOR_POOL_SIZE
;
1876 util_vma_heap_init(&bufmgr
->vma_allocator
[IRIS_MEMZONE_DYNAMIC
],
1877 IRIS_MEMZONE_DYNAMIC_START
+ IRIS_BORDER_COLOR_POOL_SIZE
,
1880 /* Leave the last 4GB out of the high vma range, so that no state
1881 * base address + size can overflow 48 bits.
1883 util_vma_heap_init(&bufmgr
->vma_allocator
[IRIS_MEMZONE_OTHER
],
1884 IRIS_MEMZONE_OTHER_START
,
1885 (gtt_size
- _4GB
) - IRIS_MEMZONE_OTHER_START
);
1887 init_cache_buckets(bufmgr
);
1889 bufmgr
->name_table
=
1890 _mesa_hash_table_create(NULL
, _mesa_hash_uint
, _mesa_key_uint_equal
);
1891 bufmgr
->handle_table
=
1892 _mesa_hash_table_create(NULL
, _mesa_hash_uint
, _mesa_key_uint_equal
);
1894 if (devinfo
->has_aux_map
) {
1895 bufmgr
->aux_map_ctx
= gen_aux_map_init(bufmgr
, &aux_map_allocator
,
1897 assert(bufmgr
->aux_map_ctx
);
1903 static struct iris_bufmgr
*
1904 iris_bufmgr_ref(struct iris_bufmgr
*bufmgr
)
1906 p_atomic_inc(&bufmgr
->refcount
);
1911 iris_bufmgr_unref(struct iris_bufmgr
*bufmgr
)
1913 mtx_lock(&global_bufmgr_list_mutex
);
1914 if (p_atomic_dec_zero(&bufmgr
->refcount
)) {
1915 list_del(&bufmgr
->link
);
1916 iris_bufmgr_destroy(bufmgr
);
1918 mtx_unlock(&global_bufmgr_list_mutex
);
1922 * Gets an already existing GEM buffer manager or create a new one.
1924 * \param fd File descriptor of the opened DRM device.
1926 struct iris_bufmgr
*
1927 iris_bufmgr_get_for_fd(struct gen_device_info
*devinfo
, int fd
, bool bo_reuse
)
1934 struct iris_bufmgr
*bufmgr
= NULL
;
1936 mtx_lock(&global_bufmgr_list_mutex
);
1937 list_for_each_entry(struct iris_bufmgr
, iter_bufmgr
, &global_bufmgr_list
, link
) {
1938 struct stat iter_st
;
1939 if (fstat(iter_bufmgr
->fd
, &iter_st
))
1942 if (st
.st_rdev
== iter_st
.st_rdev
) {
1943 assert(iter_bufmgr
->bo_reuse
== bo_reuse
);
1944 bufmgr
= iris_bufmgr_ref(iter_bufmgr
);
1949 bufmgr
= iris_bufmgr_create(devinfo
, fd
, bo_reuse
);
1950 list_addtail(&bufmgr
->link
, &global_bufmgr_list
);
1953 mtx_unlock(&global_bufmgr_list_mutex
);
1959 iris_bufmgr_get_fd(struct iris_bufmgr
*bufmgr
)
1965 iris_bufmgr_get_aux_map_context(struct iris_bufmgr
*bufmgr
)
1967 return bufmgr
->aux_map_ctx
;