2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
26 * The Iris buffer manager.
28 * XXX: write better comments
31 * - main interface to GEM in the kernel
39 #include <util/u_atomic.h>
46 #include <sys/ioctl.h>
49 #include <sys/types.h>
55 #define ETIME ETIMEDOUT
57 #include "common/gen_clflush.h"
58 #include "common/gen_debug.h"
59 #include "common/gen_gem.h"
60 #include "dev/gen_device_info.h"
61 #include "main/macros.h"
62 #include "util/debug.h"
63 #include "util/macros.h"
64 #include "util/hash_table.h"
65 #include "util/list.h"
66 #include "util/u_dynarray.h"
68 #include "iris_bufmgr.h"
69 #include "iris_context.h"
72 #include "drm-uapi/i915_drm.h"
82 /* VALGRIND_FREELIKE_BLOCK unfortunately does not actually undo the earlier
83 * VALGRIND_MALLOCLIKE_BLOCK but instead leaves vg convinced the memory is
84 * leaked. All because it does not call VG(cli_free) from its
85 * VG_USERREQ__FREELIKE_BLOCK handler. Instead of treating the memory like
86 * and allocation, we mark it available for use upon mmapping and remove
89 #define VG_DEFINED(ptr, size) VG(VALGRIND_MAKE_MEM_DEFINED(ptr, size))
90 #define VG_NOACCESS(ptr, size) VG(VALGRIND_MAKE_MEM_NOACCESS(ptr, size))
92 #define PAGE_SIZE 4096
94 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
97 * Call ioctl, restarting if it is interupted
100 drm_ioctl(int fd
, unsigned long request
, void *arg
)
105 ret
= ioctl(fd
, request
, arg
);
106 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
111 atomic_add_unless(int *v
, int add
, int unless
)
114 c
= p_atomic_read(v
);
115 while (c
!= unless
&& (old
= p_atomic_cmpxchg(v
, c
, c
+ add
)) != c
)
121 memzone_name(enum iris_memory_zone memzone
)
123 const char *names
[] = {
124 [IRIS_MEMZONE_SHADER
] = "shader",
125 [IRIS_MEMZONE_BINDER
] = "binder",
126 [IRIS_MEMZONE_SURFACE
] = "surface",
127 [IRIS_MEMZONE_DYNAMIC
] = "dynamic",
128 [IRIS_MEMZONE_OTHER
] = "other",
129 [IRIS_MEMZONE_BORDER_COLOR_POOL
] = "bordercolor",
131 assert(memzone
< ARRAY_SIZE(names
));
132 return names
[memzone
];
136 * Iris fixed-size bucketing VMA allocator.
138 * The BO cache maintains "cache buckets" for buffers of various sizes.
139 * All buffers in a given bucket are identically sized - when allocating,
140 * we always round up to the bucket size. This means that virtually all
141 * allocations are fixed-size; only buffers which are too large to fit in
142 * a bucket can be variably-sized.
144 * We create an allocator for each bucket. Each contains a free-list, where
145 * each node contains a <starting address, 64-bit bitmap> pair. Each bit
146 * represents a bucket-sized block of memory. (At the first level, each
147 * bit corresponds to a page. For the second bucket, bits correspond to
148 * two pages, and so on.) 1 means a block is free, and 0 means it's in-use.
149 * The lowest bit in the bitmap is for the first block.
151 * This makes allocations cheap - any bit of any node will do. We can pick
152 * the head of the list and use ffs() to find a free block. If there are
153 * none, we allocate 64 blocks from a larger allocator - either a bigger
154 * bucketing allocator, or a fallback top-level allocator for large objects.
156 struct vma_bucket_node
{
157 uint64_t start_address
;
161 struct bo_cache_bucket
{
162 /** List of cached BOs. */
163 struct list_head head
;
165 /** Size of this bucket, in bytes. */
168 /** List of vma_bucket_nodes. */
169 struct util_dynarray vma_list
[IRIS_MEMZONE_COUNT
];
177 /** Array of lists of cached gem objects of power-of-two sizes */
178 struct bo_cache_bucket cache_bucket
[14 * 4];
182 struct hash_table
*name_table
;
183 struct hash_table
*handle_table
;
185 struct util_vma_heap vma_allocator
[IRIS_MEMZONE_COUNT
];
191 static int bo_set_tiling_internal(struct iris_bo
*bo
, uint32_t tiling_mode
,
194 static void bo_free(struct iris_bo
*bo
);
196 static uint64_t vma_alloc(struct iris_bufmgr
*bufmgr
,
197 enum iris_memory_zone memzone
,
198 uint64_t size
, uint64_t alignment
);
201 key_hash_uint(const void *key
)
203 return _mesa_hash_data(key
, 4);
207 key_uint_equal(const void *a
, const void *b
)
209 return *((unsigned *) a
) == *((unsigned *) b
);
212 static struct iris_bo
*
213 hash_find_bo(struct hash_table
*ht
, unsigned int key
)
215 struct hash_entry
*entry
= _mesa_hash_table_search(ht
, &key
);
216 return entry
? (struct iris_bo
*) entry
->data
: NULL
;
220 * This function finds the correct bucket fit for the input size.
221 * The function works with O(1) complexity when the requested size
222 * was queried instead of iterating the size through all the buckets.
224 static struct bo_cache_bucket
*
225 bucket_for_size(struct iris_bufmgr
*bufmgr
, uint64_t size
)
227 /* Calculating the pages and rounding up to the page size. */
228 const unsigned pages
= (size
+ PAGE_SIZE
- 1) / PAGE_SIZE
;
230 /* Row Bucket sizes clz((x-1) | 3) Row Column
231 * in pages stride size
232 * 0: 1 2 3 4 -> 30 30 30 30 4 1
233 * 1: 5 6 7 8 -> 29 29 29 29 4 1
234 * 2: 10 12 14 16 -> 28 28 28 28 8 2
235 * 3: 20 24 28 32 -> 27 27 27 27 16 4
237 const unsigned row
= 30 - __builtin_clz((pages
- 1) | 3);
238 const unsigned row_max_pages
= 4 << row
;
240 /* The '& ~2' is the special case for row 1. In row 1, max pages /
241 * 2 is 2, but the previous row maximum is zero (because there is
242 * no previous row). All row maximum sizes are power of 2, so that
243 * is the only case where that bit will be set.
245 const unsigned prev_row_max_pages
= (row_max_pages
/ 2) & ~2;
246 int col_size_log2
= row
- 1;
247 col_size_log2
+= (col_size_log2
< 0);
249 const unsigned col
= (pages
- prev_row_max_pages
+
250 ((1 << col_size_log2
) - 1)) >> col_size_log2
;
252 /* Calculating the index based on the row and column. */
253 const unsigned index
= (row
* 4) + (col
- 1);
255 return (index
< bufmgr
->num_buckets
) ?
256 &bufmgr
->cache_bucket
[index
] : NULL
;
259 static enum iris_memory_zone
260 memzone_for_address(uint64_t address
)
262 STATIC_ASSERT(IRIS_MEMZONE_OTHER_START
> IRIS_MEMZONE_DYNAMIC_START
);
263 STATIC_ASSERT(IRIS_MEMZONE_DYNAMIC_START
> IRIS_MEMZONE_SURFACE_START
);
264 STATIC_ASSERT(IRIS_MEMZONE_SURFACE_START
> IRIS_MEMZONE_BINDER_START
);
265 STATIC_ASSERT(IRIS_MEMZONE_BINDER_START
> IRIS_MEMZONE_SHADER_START
);
266 STATIC_ASSERT(IRIS_BORDER_COLOR_POOL_ADDRESS
== IRIS_MEMZONE_DYNAMIC_START
);
268 if (address
>= IRIS_MEMZONE_OTHER_START
)
269 return IRIS_MEMZONE_OTHER
;
271 if (address
== IRIS_BORDER_COLOR_POOL_ADDRESS
)
272 return IRIS_MEMZONE_BORDER_COLOR_POOL
;
274 if (address
> IRIS_MEMZONE_DYNAMIC_START
)
275 return IRIS_MEMZONE_DYNAMIC
;
277 if (address
>= IRIS_MEMZONE_SURFACE_START
)
278 return IRIS_MEMZONE_SURFACE
;
280 if (address
>= IRIS_MEMZONE_BINDER_START
)
281 return IRIS_MEMZONE_BINDER
;
283 return IRIS_MEMZONE_SHADER
;
287 bucket_vma_alloc(struct iris_bufmgr
*bufmgr
,
288 struct bo_cache_bucket
*bucket
,
289 enum iris_memory_zone memzone
)
291 struct util_dynarray
*vma_list
= &bucket
->vma_list
[memzone
];
292 struct vma_bucket_node
*node
;
294 if (vma_list
->size
== 0) {
295 /* This bucket allocator is out of space - allocate a new block of
296 * memory for 64 blocks from a larger allocator (either a larger
297 * bucket or util_vma).
299 * We align the address to the node size (64 blocks) so that
300 * bucket_vma_free can easily compute the starting address of this
301 * block by rounding any address we return down to the node size.
303 * Set the first bit used, and return the start address.
305 const uint64_t node_size
= 64ull * bucket
->size
;
306 node
= util_dynarray_grow(vma_list
, sizeof(struct vma_bucket_node
));
311 uint64_t addr
= vma_alloc(bufmgr
, memzone
, node_size
, node_size
);
312 node
->start_address
= gen_48b_address(addr
);
313 node
->bitmap
= ~1ull;
314 return node
->start_address
;
317 /* Pick any bit from any node - they're all the right size and free. */
318 node
= util_dynarray_top_ptr(vma_list
, struct vma_bucket_node
);
319 int bit
= ffsll(node
->bitmap
) - 1;
320 assert(bit
>= 0 && bit
<= 63);
322 /* Reserve the memory by clearing the bit. */
323 assert((node
->bitmap
& (1ull << bit
)) != 0ull);
324 node
->bitmap
&= ~(1ull << bit
);
326 uint64_t addr
= node
->start_address
+ bit
* bucket
->size
;
328 /* If this node is now completely full, remove it from the free list. */
329 if (node
->bitmap
== 0ull) {
330 (void) util_dynarray_pop(vma_list
, struct vma_bucket_node
);
337 bucket_vma_free(struct bo_cache_bucket
*bucket
, uint64_t address
)
339 enum iris_memory_zone memzone
= memzone_for_address(address
);
340 struct util_dynarray
*vma_list
= &bucket
->vma_list
[memzone
];
341 const uint64_t node_bytes
= 64ull * bucket
->size
;
342 struct vma_bucket_node
*node
= NULL
;
344 /* bucket_vma_alloc allocates 64 blocks at a time, and aligns it to
345 * that 64 block size. So, we can round down to get the starting address.
347 uint64_t start
= (address
/ node_bytes
) * node_bytes
;
349 /* Dividing the offset from start by bucket size gives us the bit index. */
350 int bit
= (address
- start
) / bucket
->size
;
352 assert(start
+ bit
* bucket
->size
== address
);
354 util_dynarray_foreach(vma_list
, struct vma_bucket_node
, cur
) {
355 if (cur
->start_address
== start
) {
362 /* No node - the whole group of 64 blocks must have been in-use. */
363 node
= util_dynarray_grow(vma_list
, sizeof(struct vma_bucket_node
));
366 return; /* bogus, leaks some GPU VMA, but nothing we can do... */
368 node
->start_address
= start
;
372 /* Set the bit to return the memory. */
373 assert((node
->bitmap
& (1ull << bit
)) == 0ull);
374 node
->bitmap
|= 1ull << bit
;
376 /* The block might be entirely free now, and if so, we could return it
377 * to the larger allocator. But we may as well hang on to it, in case
378 * we get more allocations at this block size.
382 static struct bo_cache_bucket
*
383 get_bucket_allocator(struct iris_bufmgr
*bufmgr
,
384 enum iris_memory_zone memzone
,
387 /* Skip using the bucket allocator for very large sizes, as it allocates
388 * 64 of them and this can balloon rather quickly.
390 if (size
> 1024 * PAGE_SIZE
)
393 struct bo_cache_bucket
*bucket
= bucket_for_size(bufmgr
, size
);
395 if (bucket
&& bucket
->size
== size
)
402 * Allocate a section of virtual memory for a buffer, assigning an address.
404 * This uses either the bucket allocator for the given size, or the large
405 * object allocator (util_vma).
408 vma_alloc(struct iris_bufmgr
*bufmgr
,
409 enum iris_memory_zone memzone
,
413 if (memzone
== IRIS_MEMZONE_BORDER_COLOR_POOL
)
414 return IRIS_BORDER_COLOR_POOL_ADDRESS
;
416 /* The binder handles its own allocations. Return non-zero here. */
417 if (memzone
== IRIS_MEMZONE_BINDER
)
418 return IRIS_MEMZONE_BINDER_START
;
420 struct bo_cache_bucket
*bucket
=
421 get_bucket_allocator(bufmgr
, memzone
, size
);
425 addr
= bucket_vma_alloc(bufmgr
, bucket
, memzone
);
427 addr
= util_vma_heap_alloc(&bufmgr
->vma_allocator
[memzone
], size
,
431 assert((addr
>> 48ull) == 0);
432 assert((addr
% alignment
) == 0);
434 return gen_canonical_address(addr
);
438 vma_free(struct iris_bufmgr
*bufmgr
,
442 if (address
== IRIS_BORDER_COLOR_POOL_ADDRESS
)
445 /* Un-canonicalize the address. */
446 address
= gen_48b_address(address
);
451 enum iris_memory_zone memzone
= memzone_for_address(address
);
453 /* The binder handles its own allocations. */
454 if (memzone
== IRIS_MEMZONE_BINDER
)
457 struct bo_cache_bucket
*bucket
=
458 get_bucket_allocator(bufmgr
, memzone
, size
);
461 bucket_vma_free(bucket
, address
);
463 util_vma_heap_free(&bufmgr
->vma_allocator
[memzone
], address
, size
);
468 iris_bo_busy(struct iris_bo
*bo
)
470 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
471 struct drm_i915_gem_busy busy
= { .handle
= bo
->gem_handle
};
473 int ret
= drm_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_BUSY
, &busy
);
475 bo
->idle
= !busy
.busy
;
482 iris_bo_madvise(struct iris_bo
*bo
, int state
)
484 struct drm_i915_gem_madvise madv
= {
485 .handle
= bo
->gem_handle
,
490 drm_ioctl(bo
->bufmgr
->fd
, DRM_IOCTL_I915_GEM_MADVISE
, &madv
);
492 return madv
.retained
;
495 /* drop the oldest entries that have been purged by the kernel */
497 iris_bo_cache_purge_bucket(struct iris_bufmgr
*bufmgr
,
498 struct bo_cache_bucket
*bucket
)
500 list_for_each_entry_safe(struct iris_bo
, bo
, &bucket
->head
, head
) {
501 if (iris_bo_madvise(bo
, I915_MADV_DONTNEED
))
509 static struct iris_bo
*
512 struct iris_bo
*bo
= calloc(1, sizeof(*bo
));
514 bo
->hash
= _mesa_hash_pointer(bo
);
519 static struct iris_bo
*
520 bo_alloc_internal(struct iris_bufmgr
*bufmgr
,
523 enum iris_memory_zone memzone
,
525 uint32_t tiling_mode
,
529 unsigned int page_size
= getpagesize();
531 struct bo_cache_bucket
*bucket
;
532 bool alloc_from_cache
;
536 if (flags
& BO_ALLOC_ZEROED
)
539 if ((flags
& BO_ALLOC_COHERENT
) && !bufmgr
->has_llc
) {
540 bo_size
= MAX2(ALIGN(size
, page_size
), page_size
);
545 /* Round the allocated size up to a power of two number of pages. */
546 bucket
= bucket_for_size(bufmgr
, size
);
548 /* If we don't have caching at this size, don't actually round the
551 if (bucket
== NULL
) {
552 bo_size
= MAX2(ALIGN(size
, page_size
), page_size
);
554 bo_size
= bucket
->size
;
557 mtx_lock(&bufmgr
->lock
);
558 /* Get a buffer out of the cache if available */
560 alloc_from_cache
= false;
561 if (bucket
!= NULL
&& !list_empty(&bucket
->head
)) {
562 /* If the last BO in the cache is idle, then reuse it. Otherwise,
563 * allocate a fresh buffer to avoid stalling.
565 bo
= LIST_ENTRY(struct iris_bo
, bucket
->head
.next
, head
);
566 if (!iris_bo_busy(bo
)) {
567 alloc_from_cache
= true;
571 if (alloc_from_cache
) {
572 if (!iris_bo_madvise(bo
, I915_MADV_WILLNEED
)) {
574 iris_bo_cache_purge_bucket(bufmgr
, bucket
);
578 if (bo_set_tiling_internal(bo
, tiling_mode
, stride
)) {
584 void *map
= iris_bo_map(NULL
, bo
, MAP_WRITE
| MAP_RAW
);
589 memset(map
, 0, bo_size
);
594 if (alloc_from_cache
) {
595 /* If the cached BO isn't in the right memory zone, free the old
596 * memory and assign it a new address.
598 if (memzone
!= memzone_for_address(bo
->gtt_offset
)) {
599 vma_free(bufmgr
, bo
->gtt_offset
, bo
->size
);
600 bo
->gtt_offset
= 0ull;
611 struct drm_i915_gem_create create
= { .size
= bo_size
};
613 /* All new BOs we get from the kernel are zeroed, so we don't need to
614 * worry about that here.
616 ret
= drm_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_CREATE
, &create
);
622 bo
->gem_handle
= create
.handle
;
626 bo
->tiling_mode
= I915_TILING_NONE
;
627 bo
->swizzle_mode
= I915_BIT_6_SWIZZLE_NONE
;
630 if (bo_set_tiling_internal(bo
, tiling_mode
, stride
))
633 /* Calling set_domain() will allocate pages for the BO outside of the
634 * struct mutex lock in the kernel, which is more efficient than waiting
635 * to create them during the first execbuf that uses the BO.
637 struct drm_i915_gem_set_domain sd
= {
638 .handle
= bo
->gem_handle
,
639 .read_domains
= I915_GEM_DOMAIN_CPU
,
643 if (drm_ioctl(bo
->bufmgr
->fd
, DRM_IOCTL_I915_GEM_SET_DOMAIN
, &sd
) != 0)
648 p_atomic_set(&bo
->refcount
, 1);
649 bo
->reusable
= bucket
&& bufmgr
->bo_reuse
;
650 bo
->cache_coherent
= bufmgr
->has_llc
;
652 bo
->kflags
= EXEC_OBJECT_SUPPORTS_48B_ADDRESS
| EXEC_OBJECT_PINNED
;
654 /* By default, capture all driver-internal buffers like shader kernels,
655 * surface states, dynamic states, border colors, and so on.
657 if (memzone
< IRIS_MEMZONE_OTHER
)
658 bo
->kflags
|= EXEC_OBJECT_CAPTURE
;
660 if (bo
->gtt_offset
== 0ull) {
661 bo
->gtt_offset
= vma_alloc(bufmgr
, memzone
, bo
->size
, 1);
663 if (bo
->gtt_offset
== 0ull)
667 mtx_unlock(&bufmgr
->lock
);
669 if ((flags
& BO_ALLOC_COHERENT
) && !bo
->cache_coherent
) {
670 struct drm_i915_gem_caching arg
= {
671 .handle
= bo
->gem_handle
,
674 if (drm_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_SET_CACHING
, &arg
) == 0) {
675 bo
->cache_coherent
= true;
676 bo
->reusable
= false;
680 DBG("bo_create: buf %d (%s) (%s memzone) %llub\n", bo
->gem_handle
,
681 bo
->name
, memzone_name(memzone
), (unsigned long long) size
);
688 mtx_unlock(&bufmgr
->lock
);
693 iris_bo_alloc(struct iris_bufmgr
*bufmgr
,
696 enum iris_memory_zone memzone
)
698 return bo_alloc_internal(bufmgr
, name
, size
, memzone
,
699 0, I915_TILING_NONE
, 0);
703 iris_bo_alloc_tiled(struct iris_bufmgr
*bufmgr
, const char *name
,
704 uint64_t size
, enum iris_memory_zone memzone
,
705 uint32_t tiling_mode
, uint32_t pitch
, unsigned flags
)
707 return bo_alloc_internal(bufmgr
, name
, size
, memzone
,
708 flags
, tiling_mode
, pitch
);
712 iris_bo_create_userptr(struct iris_bufmgr
*bufmgr
, const char *name
,
713 void *ptr
, size_t size
,
714 enum iris_memory_zone memzone
)
722 struct drm_i915_gem_userptr arg
= {
723 .user_ptr
= (uintptr_t)ptr
,
726 if (drm_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_USERPTR
, &arg
))
728 bo
->gem_handle
= arg
.handle
;
730 /* Check the buffer for validity before we try and use it in a batch */
731 struct drm_i915_gem_set_domain sd
= {
732 .handle
= bo
->gem_handle
,
733 .read_domains
= I915_GEM_DOMAIN_CPU
,
735 if (drm_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_SET_DOMAIN
, &sd
))
743 bo
->kflags
= EXEC_OBJECT_SUPPORTS_48B_ADDRESS
| EXEC_OBJECT_PINNED
;
744 bo
->gtt_offset
= vma_alloc(bufmgr
, memzone
, size
, 1);
745 if (bo
->gtt_offset
== 0ull)
748 p_atomic_set(&bo
->refcount
, 1);
750 bo
->cache_coherent
= true;
757 drm_ioctl(bufmgr
->fd
, DRM_IOCTL_GEM_CLOSE
, &bo
->gem_handle
);
764 * Returns a iris_bo wrapping the given buffer object handle.
766 * This can be used when one application needs to pass a buffer object
770 iris_bo_gem_create_from_name(struct iris_bufmgr
*bufmgr
,
771 const char *name
, unsigned int handle
)
775 /* At the moment most applications only have a few named bo.
776 * For instance, in a DRI client only the render buffers passed
777 * between X and the client are named. And since X returns the
778 * alternating names for the front/back buffer a linear search
779 * provides a sufficiently fast match.
781 mtx_lock(&bufmgr
->lock
);
782 bo
= hash_find_bo(bufmgr
->name_table
, handle
);
784 iris_bo_reference(bo
);
788 struct drm_gem_open open_arg
= { .name
= handle
};
789 int ret
= drm_ioctl(bufmgr
->fd
, DRM_IOCTL_GEM_OPEN
, &open_arg
);
791 DBG("Couldn't reference %s handle 0x%08x: %s\n",
792 name
, handle
, strerror(errno
));
796 /* Now see if someone has used a prime handle to get this
797 * object from the kernel before by looking through the list
798 * again for a matching gem_handle
800 bo
= hash_find_bo(bufmgr
->handle_table
, open_arg
.handle
);
802 iris_bo_reference(bo
);
810 p_atomic_set(&bo
->refcount
, 1);
812 bo
->size
= open_arg
.size
;
815 bo
->gem_handle
= open_arg
.handle
;
817 bo
->global_name
= handle
;
818 bo
->reusable
= false;
820 bo
->kflags
= EXEC_OBJECT_SUPPORTS_48B_ADDRESS
| EXEC_OBJECT_PINNED
;
821 bo
->gtt_offset
= vma_alloc(bufmgr
, IRIS_MEMZONE_OTHER
, bo
->size
, 1);
823 _mesa_hash_table_insert(bufmgr
->handle_table
, &bo
->gem_handle
, bo
);
824 _mesa_hash_table_insert(bufmgr
->name_table
, &bo
->global_name
, bo
);
826 struct drm_i915_gem_get_tiling get_tiling
= { .handle
= bo
->gem_handle
};
827 ret
= drm_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_GET_TILING
, &get_tiling
);
831 bo
->tiling_mode
= get_tiling
.tiling_mode
;
832 bo
->swizzle_mode
= get_tiling
.swizzle_mode
;
833 /* XXX stride is unknown */
834 DBG("bo_create_from_handle: %d (%s)\n", handle
, bo
->name
);
837 mtx_unlock(&bufmgr
->lock
);
842 mtx_unlock(&bufmgr
->lock
);
847 bo_free(struct iris_bo
*bo
)
849 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
851 if (bo
->map_cpu
&& !bo
->userptr
) {
852 VG_NOACCESS(bo
->map_cpu
, bo
->size
);
853 munmap(bo
->map_cpu
, bo
->size
);
856 VG_NOACCESS(bo
->map_wc
, bo
->size
);
857 munmap(bo
->map_wc
, bo
->size
);
860 VG_NOACCESS(bo
->map_gtt
, bo
->size
);
861 munmap(bo
->map_gtt
, bo
->size
);
865 struct hash_entry
*entry
;
867 if (bo
->global_name
) {
868 entry
= _mesa_hash_table_search(bufmgr
->name_table
, &bo
->global_name
);
869 _mesa_hash_table_remove(bufmgr
->name_table
, entry
);
872 entry
= _mesa_hash_table_search(bufmgr
->handle_table
, &bo
->gem_handle
);
873 _mesa_hash_table_remove(bufmgr
->handle_table
, entry
);
876 /* Close this object */
877 struct drm_gem_close close
= { .handle
= bo
->gem_handle
};
878 int ret
= drm_ioctl(bufmgr
->fd
, DRM_IOCTL_GEM_CLOSE
, &close
);
880 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
881 bo
->gem_handle
, bo
->name
, strerror(errno
));
884 vma_free(bo
->bufmgr
, bo
->gtt_offset
, bo
->size
);
889 /** Frees all cached buffers significantly older than @time. */
891 cleanup_bo_cache(struct iris_bufmgr
*bufmgr
, time_t time
)
895 if (bufmgr
->time
== time
)
898 for (i
= 0; i
< bufmgr
->num_buckets
; i
++) {
899 struct bo_cache_bucket
*bucket
= &bufmgr
->cache_bucket
[i
];
901 list_for_each_entry_safe(struct iris_bo
, bo
, &bucket
->head
, head
) {
902 if (time
- bo
->free_time
<= 1)
915 bo_unreference_final(struct iris_bo
*bo
, time_t time
)
917 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
918 struct bo_cache_bucket
*bucket
;
920 DBG("bo_unreference final: %d (%s)\n", bo
->gem_handle
, bo
->name
);
924 bucket
= bucket_for_size(bufmgr
, bo
->size
);
925 /* Put the buffer into our internal cache for reuse if we can. */
926 if (bucket
&& iris_bo_madvise(bo
, I915_MADV_DONTNEED
)) {
927 bo
->free_time
= time
;
930 list_addtail(&bo
->head
, &bucket
->head
);
937 iris_bo_unreference(struct iris_bo
*bo
)
942 assert(p_atomic_read(&bo
->refcount
) > 0);
944 if (atomic_add_unless(&bo
->refcount
, -1, 1)) {
945 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
946 struct timespec time
;
948 clock_gettime(CLOCK_MONOTONIC
, &time
);
950 mtx_lock(&bufmgr
->lock
);
952 if (p_atomic_dec_zero(&bo
->refcount
)) {
953 bo_unreference_final(bo
, time
.tv_sec
);
954 cleanup_bo_cache(bufmgr
, time
.tv_sec
);
957 mtx_unlock(&bufmgr
->lock
);
962 bo_wait_with_stall_warning(struct pipe_debug_callback
*dbg
,
966 bool busy
= dbg
&& !bo
->idle
;
967 double elapsed
= unlikely(busy
) ? -get_time() : 0.0;
969 iris_bo_wait_rendering(bo
);
971 if (unlikely(busy
)) {
972 elapsed
+= get_time();
973 if (elapsed
> 1e-5) /* 0.01ms */ {
974 perf_debug(dbg
, "%s a busy \"%s\" BO stalled and took %.03f ms.\n",
975 action
, bo
->name
, elapsed
* 1000);
981 print_flags(unsigned flags
)
983 if (flags
& MAP_READ
)
985 if (flags
& MAP_WRITE
)
987 if (flags
& MAP_ASYNC
)
989 if (flags
& MAP_PERSISTENT
)
991 if (flags
& MAP_COHERENT
)
999 iris_bo_map_cpu(struct pipe_debug_callback
*dbg
,
1000 struct iris_bo
*bo
, unsigned flags
)
1002 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
1004 /* We disallow CPU maps for writing to non-coherent buffers, as the
1005 * CPU map can become invalidated when a batch is flushed out, which
1006 * can happen at unpredictable times. You should use WC maps instead.
1008 assert(bo
->cache_coherent
|| !(flags
& MAP_WRITE
));
1011 DBG("iris_bo_map_cpu: %d (%s)\n", bo
->gem_handle
, bo
->name
);
1013 struct drm_i915_gem_mmap mmap_arg
= {
1014 .handle
= bo
->gem_handle
,
1017 int ret
= drm_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_MMAP
, &mmap_arg
);
1019 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1020 __FILE__
, __LINE__
, bo
->gem_handle
, bo
->name
, strerror(errno
));
1023 void *map
= (void *) (uintptr_t) mmap_arg
.addr_ptr
;
1024 VG_DEFINED(map
, bo
->size
);
1026 if (p_atomic_cmpxchg(&bo
->map_cpu
, NULL
, map
)) {
1027 VG_NOACCESS(map
, bo
->size
);
1028 munmap(map
, bo
->size
);
1031 assert(bo
->map_cpu
);
1033 DBG("iris_bo_map_cpu: %d (%s) -> %p, ", bo
->gem_handle
, bo
->name
,
1037 if (!(flags
& MAP_ASYNC
)) {
1038 bo_wait_with_stall_warning(dbg
, bo
, "CPU mapping");
1041 if (!bo
->cache_coherent
&& !bo
->bufmgr
->has_llc
) {
1042 /* If we're reusing an existing CPU mapping, the CPU caches may
1043 * contain stale data from the last time we read from that mapping.
1044 * (With the BO cache, it might even be data from a previous buffer!)
1045 * Even if it's a brand new mapping, the kernel may have zeroed the
1046 * buffer via CPU writes.
1048 * We need to invalidate those cachelines so that we see the latest
1049 * contents, and so long as we only read from the CPU mmap we do not
1050 * need to write those cachelines back afterwards.
1052 * On LLC, the emprical evidence suggests that writes from the GPU
1053 * that bypass the LLC (i.e. for scanout) do *invalidate* the CPU
1054 * cachelines. (Other reads, such as the display engine, bypass the
1055 * LLC entirely requiring us to keep dirty pixels for the scanout
1056 * out of any cache.)
1058 gen_invalidate_range(bo
->map_cpu
, bo
->size
);
1065 iris_bo_map_wc(struct pipe_debug_callback
*dbg
,
1066 struct iris_bo
*bo
, unsigned flags
)
1068 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
1071 DBG("iris_bo_map_wc: %d (%s)\n", bo
->gem_handle
, bo
->name
);
1073 struct drm_i915_gem_mmap mmap_arg
= {
1074 .handle
= bo
->gem_handle
,
1076 .flags
= I915_MMAP_WC
,
1078 int ret
= drm_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_MMAP
, &mmap_arg
);
1080 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1081 __FILE__
, __LINE__
, bo
->gem_handle
, bo
->name
, strerror(errno
));
1085 void *map
= (void *) (uintptr_t) mmap_arg
.addr_ptr
;
1086 VG_DEFINED(map
, bo
->size
);
1088 if (p_atomic_cmpxchg(&bo
->map_wc
, NULL
, map
)) {
1089 VG_NOACCESS(map
, bo
->size
);
1090 munmap(map
, bo
->size
);
1095 DBG("iris_bo_map_wc: %d (%s) -> %p\n", bo
->gem_handle
, bo
->name
, bo
->map_wc
);
1098 if (!(flags
& MAP_ASYNC
)) {
1099 bo_wait_with_stall_warning(dbg
, bo
, "WC mapping");
1106 * Perform an uncached mapping via the GTT.
1108 * Write access through the GTT is not quite fully coherent. On low power
1109 * systems especially, like modern Atoms, we can observe reads from RAM before
1110 * the write via GTT has landed. A write memory barrier that flushes the Write
1111 * Combining Buffer (i.e. sfence/mfence) is not sufficient to order the later
1112 * read after the write as the GTT write suffers a small delay through the GTT
1113 * indirection. The kernel uses an uncached mmio read to ensure the GTT write
1114 * is ordered with reads (either by the GPU, WB or WC) and unconditionally
1115 * flushes prior to execbuf submission. However, if we are not informing the
1116 * kernel about our GTT writes, it will not flush before earlier access, such
1117 * as when using the cmdparser. Similarly, we need to be careful if we should
1118 * ever issue a CPU read immediately following a GTT write.
1120 * Telling the kernel about write access also has one more important
1121 * side-effect. Upon receiving notification about the write, it cancels any
1122 * scanout buffering for FBC/PSR and friends. Later FBC/PSR is then flushed by
1123 * either SW_FINISH or DIRTYFB. The presumption is that we never write to the
1124 * actual scanout via a mmaping, only to a backbuffer and so all the FBC/PSR
1125 * tracking is handled on the buffer exchange instead.
1128 iris_bo_map_gtt(struct pipe_debug_callback
*dbg
,
1129 struct iris_bo
*bo
, unsigned flags
)
1131 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
1133 /* Get a mapping of the buffer if we haven't before. */
1134 if (bo
->map_gtt
== NULL
) {
1135 DBG("bo_map_gtt: mmap %d (%s)\n", bo
->gem_handle
, bo
->name
);
1137 struct drm_i915_gem_mmap_gtt mmap_arg
= { .handle
= bo
->gem_handle
};
1139 /* Get the fake offset back... */
1140 int ret
= drm_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_MMAP_GTT
, &mmap_arg
);
1142 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1143 __FILE__
, __LINE__
, bo
->gem_handle
, bo
->name
, strerror(errno
));
1148 void *map
= mmap(0, bo
->size
, PROT_READ
| PROT_WRITE
,
1149 MAP_SHARED
, bufmgr
->fd
, mmap_arg
.offset
);
1150 if (map
== MAP_FAILED
) {
1151 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1152 __FILE__
, __LINE__
, bo
->gem_handle
, bo
->name
, strerror(errno
));
1156 /* We don't need to use VALGRIND_MALLOCLIKE_BLOCK because Valgrind will
1157 * already intercept this mmap call. However, for consistency between
1158 * all the mmap paths, we mark the pointer as defined now and mark it
1159 * as inaccessible afterwards.
1161 VG_DEFINED(map
, bo
->size
);
1163 if (p_atomic_cmpxchg(&bo
->map_gtt
, NULL
, map
)) {
1164 VG_NOACCESS(map
, bo
->size
);
1165 munmap(map
, bo
->size
);
1168 assert(bo
->map_gtt
);
1170 DBG("bo_map_gtt: %d (%s) -> %p, ", bo
->gem_handle
, bo
->name
, bo
->map_gtt
);
1173 if (!(flags
& MAP_ASYNC
)) {
1174 bo_wait_with_stall_warning(dbg
, bo
, "GTT mapping");
1181 can_map_cpu(struct iris_bo
*bo
, unsigned flags
)
1183 if (bo
->cache_coherent
)
1186 /* Even if the buffer itself is not cache-coherent (such as a scanout), on
1187 * an LLC platform reads always are coherent (as they are performed via the
1188 * central system agent). It is just the writes that we need to take special
1189 * care to ensure that land in main memory and not stick in the CPU cache.
1191 if (!(flags
& MAP_WRITE
) && bo
->bufmgr
->has_llc
)
1194 /* If PERSISTENT or COHERENT are set, the mmapping needs to remain valid
1195 * across batch flushes where the kernel will change cache domains of the
1196 * bo, invalidating continued access to the CPU mmap on non-LLC device.
1198 * Similarly, ASYNC typically means that the buffer will be accessed via
1199 * both the CPU and the GPU simultaneously. Batches may be executed that
1200 * use the BO even while it is mapped. While OpenGL technically disallows
1201 * most drawing while non-persistent mappings are active, we may still use
1202 * the GPU for blits or other operations, causing batches to happen at
1203 * inconvenient times.
1205 * If RAW is set, we expect the caller to be able to handle a WC buffer
1206 * more efficiently than the involuntary clflushes.
1208 if (flags
& (MAP_PERSISTENT
| MAP_COHERENT
| MAP_ASYNC
| MAP_RAW
))
1211 return !(flags
& MAP_WRITE
);
1215 iris_bo_map(struct pipe_debug_callback
*dbg
,
1216 struct iris_bo
*bo
, unsigned flags
)
1218 if (bo
->tiling_mode
!= I915_TILING_NONE
&& !(flags
& MAP_RAW
))
1219 return iris_bo_map_gtt(dbg
, bo
, flags
);
1223 if (can_map_cpu(bo
, flags
))
1224 map
= iris_bo_map_cpu(dbg
, bo
, flags
);
1226 map
= iris_bo_map_wc(dbg
, bo
, flags
);
1228 /* Allow the attempt to fail by falling back to the GTT where necessary.
1230 * Not every buffer can be mmaped directly using the CPU (or WC), for
1231 * example buffers that wrap stolen memory or are imported from other
1232 * devices. For those, we have little choice but to use a GTT mmapping.
1233 * However, if we use a slow GTT mmapping for reads where we expected fast
1234 * access, that order of magnitude difference in throughput will be clearly
1235 * expressed by angry users.
1237 * We skip MAP_RAW because we want to avoid map_gtt's fence detiling.
1239 if (!map
&& !(flags
& MAP_RAW
)) {
1240 perf_debug(dbg
, "Fallback GTT mapping for %s with access flags %x\n",
1242 map
= iris_bo_map_gtt(dbg
, bo
, flags
);
1248 /** Waits for all GPU rendering with the object to have completed. */
1250 iris_bo_wait_rendering(struct iris_bo
*bo
)
1252 /* We require a kernel recent enough for WAIT_IOCTL support.
1253 * See intel_init_bufmgr()
1255 iris_bo_wait(bo
, -1);
1259 * Waits on a BO for the given amount of time.
1261 * @bo: buffer object to wait for
1262 * @timeout_ns: amount of time to wait in nanoseconds.
1263 * If value is less than 0, an infinite wait will occur.
1265 * Returns 0 if the wait was successful ie. the last batch referencing the
1266 * object has completed within the allotted time. Otherwise some negative return
1267 * value describes the error. Of particular interest is -ETIME when the wait has
1268 * failed to yield the desired result.
1270 * Similar to iris_bo_wait_rendering except a timeout parameter allows
1271 * the operation to give up after a certain amount of time. Another subtle
1272 * difference is the internal locking semantics are different (this variant does
1273 * not hold the lock for the duration of the wait). This makes the wait subject
1274 * to a larger userspace race window.
1276 * The implementation shall wait until the object is no longer actively
1277 * referenced within a batch buffer at the time of the call. The wait will
1278 * not guarantee that the buffer is re-issued via another thread, or an flinked
1279 * handle. Userspace must make sure this race does not occur if such precision
1282 * Note that some kernels have broken the inifite wait for negative values
1283 * promise, upgrade to latest stable kernels if this is the case.
1286 iris_bo_wait(struct iris_bo
*bo
, int64_t timeout_ns
)
1288 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
1290 /* If we know it's idle, don't bother with the kernel round trip */
1291 if (bo
->idle
&& !bo
->external
)
1294 struct drm_i915_gem_wait wait
= {
1295 .bo_handle
= bo
->gem_handle
,
1296 .timeout_ns
= timeout_ns
,
1298 int ret
= drm_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_WAIT
, &wait
);
1308 iris_bufmgr_destroy(struct iris_bufmgr
*bufmgr
)
1310 mtx_destroy(&bufmgr
->lock
);
1312 /* Free any cached buffer objects we were going to reuse */
1313 for (int i
= 0; i
< bufmgr
->num_buckets
; i
++) {
1314 struct bo_cache_bucket
*bucket
= &bufmgr
->cache_bucket
[i
];
1316 list_for_each_entry_safe(struct iris_bo
, bo
, &bucket
->head
, head
) {
1317 list_del(&bo
->head
);
1322 for (int z
= 0; z
< IRIS_MEMZONE_COUNT
; z
++)
1323 util_dynarray_fini(&bucket
->vma_list
[z
]);
1326 _mesa_hash_table_destroy(bufmgr
->name_table
, NULL
);
1327 _mesa_hash_table_destroy(bufmgr
->handle_table
, NULL
);
1329 for (int z
= 0; z
< IRIS_MEMZONE_COUNT
; z
++) {
1330 if (z
!= IRIS_MEMZONE_BINDER
)
1331 util_vma_heap_finish(&bufmgr
->vma_allocator
[z
]);
1338 bo_set_tiling_internal(struct iris_bo
*bo
, uint32_t tiling_mode
,
1341 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
1342 struct drm_i915_gem_set_tiling set_tiling
;
1345 if (bo
->global_name
== 0 &&
1346 tiling_mode
== bo
->tiling_mode
&& stride
== bo
->stride
)
1349 memset(&set_tiling
, 0, sizeof(set_tiling
));
1351 /* set_tiling is slightly broken and overwrites the
1352 * input on the error path, so we have to open code
1355 set_tiling
.handle
= bo
->gem_handle
;
1356 set_tiling
.tiling_mode
= tiling_mode
;
1357 set_tiling
.stride
= stride
;
1359 ret
= ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_SET_TILING
, &set_tiling
);
1360 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
1364 bo
->tiling_mode
= set_tiling
.tiling_mode
;
1365 bo
->swizzle_mode
= set_tiling
.swizzle_mode
;
1366 bo
->stride
= set_tiling
.stride
;
1371 iris_bo_get_tiling(struct iris_bo
*bo
, uint32_t *tiling_mode
,
1372 uint32_t *swizzle_mode
)
1374 *tiling_mode
= bo
->tiling_mode
;
1375 *swizzle_mode
= bo
->swizzle_mode
;
1380 iris_bo_import_dmabuf(struct iris_bufmgr
*bufmgr
, int prime_fd
)
1385 mtx_lock(&bufmgr
->lock
);
1386 int ret
= drmPrimeFDToHandle(bufmgr
->fd
, prime_fd
, &handle
);
1388 DBG("import_dmabuf: failed to obtain handle from fd: %s\n",
1390 mtx_unlock(&bufmgr
->lock
);
1395 * See if the kernel has already returned this buffer to us. Just as
1396 * for named buffers, we must not create two bo's pointing at the same
1399 bo
= hash_find_bo(bufmgr
->handle_table
, handle
);
1401 iris_bo_reference(bo
);
1409 p_atomic_set(&bo
->refcount
, 1);
1411 /* Determine size of bo. The fd-to-handle ioctl really should
1412 * return the size, but it doesn't. If we have kernel 3.12 or
1413 * later, we can lseek on the prime fd to get the size. Older
1414 * kernels will just fail, in which case we fall back to the
1415 * provided (estimated or guess size). */
1416 ret
= lseek(prime_fd
, 0, SEEK_END
);
1420 bo
->bufmgr
= bufmgr
;
1422 bo
->gem_handle
= handle
;
1423 _mesa_hash_table_insert(bufmgr
->handle_table
, &bo
->gem_handle
, bo
);
1426 bo
->reusable
= false;
1427 bo
->external
= true;
1428 bo
->kflags
= EXEC_OBJECT_SUPPORTS_48B_ADDRESS
| EXEC_OBJECT_PINNED
;
1429 bo
->gtt_offset
= vma_alloc(bufmgr
, IRIS_MEMZONE_OTHER
, bo
->size
, 1);
1431 struct drm_i915_gem_get_tiling get_tiling
= { .handle
= bo
->gem_handle
};
1432 if (drm_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_GET_TILING
, &get_tiling
))
1435 bo
->tiling_mode
= get_tiling
.tiling_mode
;
1436 bo
->swizzle_mode
= get_tiling
.swizzle_mode
;
1437 /* XXX stride is unknown */
1440 mtx_unlock(&bufmgr
->lock
);
1445 mtx_unlock(&bufmgr
->lock
);
1450 iris_bo_make_external_locked(struct iris_bo
*bo
)
1452 if (!bo
->external
) {
1453 _mesa_hash_table_insert(bo
->bufmgr
->handle_table
, &bo
->gem_handle
, bo
);
1454 bo
->external
= true;
1459 iris_bo_make_external(struct iris_bo
*bo
)
1461 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
1466 mtx_lock(&bufmgr
->lock
);
1467 iris_bo_make_external_locked(bo
);
1468 mtx_unlock(&bufmgr
->lock
);
1472 iris_bo_export_dmabuf(struct iris_bo
*bo
, int *prime_fd
)
1474 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
1476 iris_bo_make_external(bo
);
1478 if (drmPrimeHandleToFD(bufmgr
->fd
, bo
->gem_handle
,
1479 DRM_CLOEXEC
, prime_fd
) != 0)
1482 bo
->reusable
= false;
1488 iris_bo_export_gem_handle(struct iris_bo
*bo
)
1490 iris_bo_make_external(bo
);
1492 return bo
->gem_handle
;
1496 iris_bo_flink(struct iris_bo
*bo
, uint32_t *name
)
1498 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
1500 if (!bo
->global_name
) {
1501 struct drm_gem_flink flink
= { .handle
= bo
->gem_handle
};
1503 if (drm_ioctl(bufmgr
->fd
, DRM_IOCTL_GEM_FLINK
, &flink
))
1506 mtx_lock(&bufmgr
->lock
);
1507 if (!bo
->global_name
) {
1508 iris_bo_make_external_locked(bo
);
1509 bo
->global_name
= flink
.name
;
1510 _mesa_hash_table_insert(bufmgr
->name_table
, &bo
->global_name
, bo
);
1512 mtx_unlock(&bufmgr
->lock
);
1514 bo
->reusable
= false;
1517 *name
= bo
->global_name
;
1522 add_bucket(struct iris_bufmgr
*bufmgr
, int size
)
1524 unsigned int i
= bufmgr
->num_buckets
;
1526 assert(i
< ARRAY_SIZE(bufmgr
->cache_bucket
));
1528 list_inithead(&bufmgr
->cache_bucket
[i
].head
);
1529 for (int z
= 0; z
< IRIS_MEMZONE_COUNT
; z
++)
1530 util_dynarray_init(&bufmgr
->cache_bucket
[i
].vma_list
[z
], NULL
);
1531 bufmgr
->cache_bucket
[i
].size
= size
;
1532 bufmgr
->num_buckets
++;
1534 assert(bucket_for_size(bufmgr
, size
) == &bufmgr
->cache_bucket
[i
]);
1535 assert(bucket_for_size(bufmgr
, size
- 2048) == &bufmgr
->cache_bucket
[i
]);
1536 assert(bucket_for_size(bufmgr
, size
+ 1) != &bufmgr
->cache_bucket
[i
]);
1540 init_cache_buckets(struct iris_bufmgr
*bufmgr
)
1542 uint64_t size
, cache_max_size
= 64 * 1024 * 1024;
1544 /* OK, so power of two buckets was too wasteful of memory.
1545 * Give 3 other sizes between each power of two, to hopefully
1546 * cover things accurately enough. (The alternative is
1547 * probably to just go for exact matching of sizes, and assume
1548 * that for things like composited window resize the tiled
1549 * width/height alignment and rounding of sizes to pages will
1550 * get us useful cache hit rates anyway)
1552 add_bucket(bufmgr
, PAGE_SIZE
);
1553 add_bucket(bufmgr
, PAGE_SIZE
* 2);
1554 add_bucket(bufmgr
, PAGE_SIZE
* 3);
1556 /* Initialize the linked lists for BO reuse cache. */
1557 for (size
= 4 * PAGE_SIZE
; size
<= cache_max_size
; size
*= 2) {
1558 add_bucket(bufmgr
, size
);
1560 add_bucket(bufmgr
, size
+ size
* 1 / 4);
1561 add_bucket(bufmgr
, size
+ size
* 2 / 4);
1562 add_bucket(bufmgr
, size
+ size
* 3 / 4);
1567 iris_create_hw_context(struct iris_bufmgr
*bufmgr
)
1569 struct drm_i915_gem_context_create create
= { };
1570 int ret
= drm_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_CREATE
, &create
);
1572 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n", strerror(errno
));
1576 return create
.ctx_id
;
1580 iris_hw_context_set_priority(struct iris_bufmgr
*bufmgr
,
1584 struct drm_i915_gem_context_param p
= {
1586 .param
= I915_CONTEXT_PARAM_PRIORITY
,
1592 if (drm_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM
, &p
))
1599 iris_destroy_hw_context(struct iris_bufmgr
*bufmgr
, uint32_t ctx_id
)
1601 struct drm_i915_gem_context_destroy d
= { .ctx_id
= ctx_id
};
1604 drm_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY
, &d
) != 0) {
1605 fprintf(stderr
, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
1611 iris_reg_read(struct iris_bufmgr
*bufmgr
, uint32_t offset
, uint64_t *result
)
1613 struct drm_i915_reg_read reg_read
= { .offset
= offset
};
1614 int ret
= drm_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_REG_READ
, ®_read
);
1616 *result
= reg_read
.val
;
1621 iris_gtt_size(int fd
)
1623 /* We use the default (already allocated) context to determine
1624 * the default configuration of the virtual address space.
1626 struct drm_i915_gem_context_param p
= {
1627 .param
= I915_CONTEXT_PARAM_GTT_SIZE
,
1629 if (!drm_ioctl(fd
, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM
, &p
))
1636 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
1637 * and manage map buffer objections.
1639 * \param fd File descriptor of the opened DRM device.
1641 struct iris_bufmgr
*
1642 iris_bufmgr_init(struct gen_device_info
*devinfo
, int fd
)
1644 uint64_t gtt_size
= iris_gtt_size(fd
);
1645 if (gtt_size
<= IRIS_MEMZONE_OTHER_START
)
1648 struct iris_bufmgr
*bufmgr
= calloc(1, sizeof(*bufmgr
));
1652 /* Handles to buffer objects belong to the device fd and are not
1653 * reference counted by the kernel. If the same fd is used by
1654 * multiple parties (threads sharing the same screen bufmgr, or
1655 * even worse the same device fd passed to multiple libraries)
1656 * ownership of those handles is shared by those independent parties.
1658 * Don't do this! Ensure that each library/bufmgr has its own device
1659 * fd so that its namespace does not clash with another.
1663 if (mtx_init(&bufmgr
->lock
, mtx_plain
) != 0) {
1668 bufmgr
->has_llc
= devinfo
->has_llc
;
1670 STATIC_ASSERT(IRIS_MEMZONE_SHADER_START
== 0ull);
1671 const uint64_t _4GB
= 1ull << 32;
1673 util_vma_heap_init(&bufmgr
->vma_allocator
[IRIS_MEMZONE_SHADER
],
1674 PAGE_SIZE
, _4GB
- PAGE_SIZE
);
1675 util_vma_heap_init(&bufmgr
->vma_allocator
[IRIS_MEMZONE_SURFACE
],
1676 IRIS_MEMZONE_SURFACE_START
,
1677 _4GB
- IRIS_MAX_BINDERS
* IRIS_BINDER_SIZE
);
1678 util_vma_heap_init(&bufmgr
->vma_allocator
[IRIS_MEMZONE_DYNAMIC
],
1679 IRIS_MEMZONE_DYNAMIC_START
+ IRIS_BORDER_COLOR_POOL_SIZE
,
1680 _4GB
- IRIS_BORDER_COLOR_POOL_SIZE
);
1681 util_vma_heap_init(&bufmgr
->vma_allocator
[IRIS_MEMZONE_OTHER
],
1682 IRIS_MEMZONE_OTHER_START
,
1683 gtt_size
- IRIS_MEMZONE_OTHER_START
);
1686 bufmgr
->bo_reuse
= env_var_as_boolean("bo_reuse", true);
1688 init_cache_buckets(bufmgr
);
1690 bufmgr
->name_table
=
1691 _mesa_hash_table_create(NULL
, key_hash_uint
, key_uint_equal
);
1692 bufmgr
->handle_table
=
1693 _mesa_hash_table_create(NULL
, key_hash_uint
, key_uint_equal
);