2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 #include <linux/futex.h>
30 #include <linux/memfd.h>
33 #include <sys/syscall.h>
35 #include "anv_private.h"
37 #include "util/hash_table.h"
40 #define VG_NOACCESS_READ(__ptr) ({ \
41 VALGRIND_MAKE_MEM_DEFINED((__ptr), sizeof(*(__ptr))); \
42 __typeof(*(__ptr)) __val = *(__ptr); \
43 VALGRIND_MAKE_MEM_NOACCESS((__ptr), sizeof(*(__ptr)));\
46 #define VG_NOACCESS_WRITE(__ptr, __val) ({ \
47 VALGRIND_MAKE_MEM_UNDEFINED((__ptr), sizeof(*(__ptr))); \
49 VALGRIND_MAKE_MEM_NOACCESS((__ptr), sizeof(*(__ptr))); \
52 #define VG_NOACCESS_READ(__ptr) (*(__ptr))
53 #define VG_NOACCESS_WRITE(__ptr, __val) (*(__ptr) = (__val))
58 * - Lock free (except when resizing underlying bos)
60 * - Constant time allocation with typically only one atomic
62 * - Multiple allocation sizes without fragmentation
64 * - Can grow while keeping addresses and offset of contents stable
66 * - All allocations within one bo so we can point one of the
67 * STATE_BASE_ADDRESS pointers at it.
69 * The overall design is a two-level allocator: top level is a fixed size, big
70 * block (8k) allocator, which operates out of a bo. Allocation is done by
71 * either pulling a block from the free list or growing the used range of the
72 * bo. Growing the range may run out of space in the bo which we then need to
73 * grow. Growing the bo is tricky in a multi-threaded, lockless environment:
74 * we need to keep all pointers and contents in the old map valid. GEM bos in
75 * general can't grow, but we use a trick: we create a memfd and use ftruncate
76 * to grow it as necessary. We mmap the new size and then create a gem bo for
77 * it using the new gem userptr ioctl. Without heavy-handed locking around
78 * our allocation fast-path, there isn't really a way to munmap the old mmap,
79 * so we just keep it around until garbage collection time. While the block
80 * allocator is lockless for normal operations, we block other threads trying
81 * to allocate while we're growing the map. It sholdn't happen often, and
82 * growing is fast anyway.
84 * At the next level we can use various sub-allocators. The state pool is a
85 * pool of smaller, fixed size objects, which operates much like the block
86 * pool. It uses a free list for freeing objects, but when it runs out of
87 * space it just allocates a new block from the block pool. This allocator is
88 * intended for longer lived state objects such as SURFACE_STATE and most
89 * other persistent state objects in the API. We may need to track more info
90 * with these object and a pointer back to the CPU object (eg VkImage). In
91 * those cases we just allocate a slightly bigger object and put the extra
92 * state after the GPU state object.
94 * The state stream allocator works similar to how the i965 DRI driver streams
95 * all its state. Even with Vulkan, we need to emit transient state (whether
96 * surface state base or dynamic state base), and for that we can just get a
97 * block and fill it up. These cases are local to a command buffer and the
98 * sub-allocator need not be thread safe. The streaming allocator gets a new
99 * block when it runs out of space and chains them together so they can be
103 /* Allocations are always at least 64 byte aligned, so 1 is an invalid value.
104 * We use it to indicate the free list is empty. */
107 struct anv_mmap_cleanup
{
113 #define ANV_MMAP_CLEANUP_INIT ((struct anv_mmap_cleanup){0})
116 sys_futex(void *addr1
, int op
, int val1
,
117 struct timespec
*timeout
, void *addr2
, int val3
)
119 return syscall(SYS_futex
, addr1
, op
, val1
, timeout
, addr2
, val3
);
123 futex_wake(uint32_t *addr
, int count
)
125 return sys_futex(addr
, FUTEX_WAKE
, count
, NULL
, NULL
, 0);
129 futex_wait(uint32_t *addr
, int32_t value
)
131 return sys_futex(addr
, FUTEX_WAIT
, value
, NULL
, NULL
, 0);
135 memfd_create(const char *name
, unsigned int flags
)
137 return syscall(SYS_memfd_create
, name
, flags
);
140 static inline uint32_t
141 ilog2_round_up(uint32_t value
)
144 return 32 - __builtin_clz(value
- 1);
147 static inline uint32_t
148 round_to_power_of_two(uint32_t value
)
150 return 1 << ilog2_round_up(value
);
154 anv_free_list_pop(union anv_free_list
*list
, void **map
, int32_t *offset
)
156 union anv_free_list current
, new, old
;
158 current
.u64
= list
->u64
;
159 while (current
.offset
!= EMPTY
) {
160 /* We have to add a memory barrier here so that the list head (and
161 * offset) gets read before we read the map pointer. This way we
162 * know that the map pointer is valid for the given offset at the
163 * point where we read it.
165 __sync_synchronize();
167 int32_t *next_ptr
= *map
+ current
.offset
;
168 new.offset
= VG_NOACCESS_READ(next_ptr
);
169 new.count
= current
.count
+ 1;
170 old
.u64
= __sync_val_compare_and_swap(&list
->u64
, current
.u64
, new.u64
);
171 if (old
.u64
== current
.u64
) {
172 *offset
= current
.offset
;
182 anv_free_list_push(union anv_free_list
*list
, void *map
, int32_t offset
,
183 uint32_t size
, uint32_t count
)
185 union anv_free_list current
, old
, new;
186 int32_t *next_ptr
= map
+ offset
;
188 /* If we're returning more than one chunk, we need to build a chain to add
189 * to the list. Fortunately, we can do this without any atomics since we
190 * own everything in the chain right now. `offset` is left pointing to the
191 * head of our chain list while `next_ptr` points to the tail.
193 for (uint32_t i
= 1; i
< count
; i
++) {
194 VG_NOACCESS_WRITE(next_ptr
, offset
+ i
* size
);
195 next_ptr
= map
+ offset
+ i
* size
;
201 VG_NOACCESS_WRITE(next_ptr
, current
.offset
);
203 new.count
= current
.count
+ 1;
204 old
.u64
= __sync_val_compare_and_swap(&list
->u64
, current
.u64
, new.u64
);
205 } while (old
.u64
!= current
.u64
);
208 /* All pointers in the ptr_free_list are assumed to be page-aligned. This
209 * means that the bottom 12 bits should all be zero.
211 #define PFL_COUNT(x) ((uintptr_t)(x) & 0xfff)
212 #define PFL_PTR(x) ((void *)((uintptr_t)(x) & ~(uintptr_t)0xfff))
213 #define PFL_PACK(ptr, count) ({ \
214 (void *)(((uintptr_t)(ptr) & ~(uintptr_t)0xfff) | ((count) & 0xfff)); \
218 anv_ptr_free_list_pop(void **list
, void **elem
)
220 void *current
= *list
;
221 while (PFL_PTR(current
) != NULL
) {
222 void **next_ptr
= PFL_PTR(current
);
223 void *new_ptr
= VG_NOACCESS_READ(next_ptr
);
224 unsigned new_count
= PFL_COUNT(current
) + 1;
225 void *new = PFL_PACK(new_ptr
, new_count
);
226 void *old
= __sync_val_compare_and_swap(list
, current
, new);
227 if (old
== current
) {
228 *elem
= PFL_PTR(current
);
238 anv_ptr_free_list_push(void **list
, void *elem
)
241 void **next_ptr
= elem
;
243 /* The pointer-based free list requires that the pointer be
244 * page-aligned. This is because we use the bottom 12 bits of the
245 * pointer to store a counter to solve the ABA concurrency problem.
247 assert(((uintptr_t)elem
& 0xfff) == 0);
252 VG_NOACCESS_WRITE(next_ptr
, PFL_PTR(current
));
253 unsigned new_count
= PFL_COUNT(current
) + 1;
254 void *new = PFL_PACK(elem
, new_count
);
255 old
= __sync_val_compare_and_swap(list
, current
, new);
256 } while (old
!= current
);
260 anv_block_pool_expand_range(struct anv_block_pool
*pool
,
261 uint32_t center_bo_offset
, uint32_t size
);
264 anv_block_pool_init(struct anv_block_pool
*pool
,
265 struct anv_device
*device
,
266 uint32_t initial_size
)
270 pool
->device
= device
;
271 anv_bo_init(&pool
->bo
, 0, 0);
273 pool
->fd
= memfd_create("block pool", MFD_CLOEXEC
);
275 return vk_error(VK_ERROR_INITIALIZATION_FAILED
);
277 /* Just make it 2GB up-front. The Linux kernel won't actually back it
278 * with pages until we either map and fault on one of them or we use
279 * userptr and send a chunk of it off to the GPU.
281 if (ftruncate(pool
->fd
, BLOCK_POOL_MEMFD_SIZE
) == -1) {
282 result
= vk_error(VK_ERROR_INITIALIZATION_FAILED
);
286 if (!u_vector_init(&pool
->mmap_cleanups
,
287 round_to_power_of_two(sizeof(struct anv_mmap_cleanup
)),
289 result
= vk_error(VK_ERROR_INITIALIZATION_FAILED
);
293 pool
->state
.next
= 0;
295 pool
->back_state
.next
= 0;
296 pool
->back_state
.end
= 0;
298 result
= anv_block_pool_expand_range(pool
, 0, initial_size
);
299 if (result
!= VK_SUCCESS
)
300 goto fail_mmap_cleanups
;
305 u_vector_finish(&pool
->mmap_cleanups
);
313 anv_block_pool_finish(struct anv_block_pool
*pool
)
315 struct anv_mmap_cleanup
*cleanup
;
317 u_vector_foreach(cleanup
, &pool
->mmap_cleanups
) {
319 munmap(cleanup
->map
, cleanup
->size
);
320 if (cleanup
->gem_handle
)
321 anv_gem_close(pool
->device
, cleanup
->gem_handle
);
324 u_vector_finish(&pool
->mmap_cleanups
);
329 #define PAGE_SIZE 4096
332 anv_block_pool_expand_range(struct anv_block_pool
*pool
,
333 uint32_t center_bo_offset
, uint32_t size
)
337 struct anv_mmap_cleanup
*cleanup
;
339 /* Assert that we only ever grow the pool */
340 assert(center_bo_offset
>= pool
->back_state
.end
);
341 assert(size
- center_bo_offset
>= pool
->state
.end
);
343 /* Assert that we don't go outside the bounds of the memfd */
344 assert(center_bo_offset
<= BLOCK_POOL_MEMFD_CENTER
);
345 assert(size
- center_bo_offset
<=
346 BLOCK_POOL_MEMFD_SIZE
- BLOCK_POOL_MEMFD_CENTER
);
348 cleanup
= u_vector_add(&pool
->mmap_cleanups
);
350 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
352 *cleanup
= ANV_MMAP_CLEANUP_INIT
;
354 /* Just leak the old map until we destroy the pool. We can't munmap it
355 * without races or imposing locking on the block allocate fast path. On
356 * the whole the leaked maps adds up to less than the size of the
357 * current map. MAP_POPULATE seems like the right thing to do, but we
358 * should try to get some numbers.
360 map
= mmap(NULL
, size
, PROT_READ
| PROT_WRITE
,
361 MAP_SHARED
| MAP_POPULATE
, pool
->fd
,
362 BLOCK_POOL_MEMFD_CENTER
- center_bo_offset
);
363 if (map
== MAP_FAILED
)
364 return vk_errorf(VK_ERROR_MEMORY_MAP_FAILED
, "mmap failed: %m");
366 gem_handle
= anv_gem_userptr(pool
->device
, map
, size
);
367 if (gem_handle
== 0) {
369 return vk_errorf(VK_ERROR_TOO_MANY_OBJECTS
, "userptr failed: %m");
373 cleanup
->size
= size
;
374 cleanup
->gem_handle
= gem_handle
;
377 /* Regular objects are created I915_CACHING_CACHED on LLC platforms and
378 * I915_CACHING_NONE on non-LLC platforms. However, userptr objects are
379 * always created as I915_CACHING_CACHED, which on non-LLC means
380 * snooped. That can be useful but comes with a bit of overheard. Since
381 * we're eplicitly clflushing and don't want the overhead we need to turn
383 if (!pool
->device
->info
.has_llc
) {
384 anv_gem_set_caching(pool
->device
, gem_handle
, I915_CACHING_NONE
);
385 anv_gem_set_domain(pool
->device
, gem_handle
,
386 I915_GEM_DOMAIN_GTT
, I915_GEM_DOMAIN_GTT
);
390 /* Now that we successfull allocated everything, we can write the new
391 * values back into pool. */
392 pool
->map
= map
+ center_bo_offset
;
393 pool
->center_bo_offset
= center_bo_offset
;
395 /* For block pool BOs we have to be a bit careful about where we place them
396 * in the GTT. There are two documented workarounds for state base address
397 * placement : Wa32bitGeneralStateOffset and Wa32bitInstructionBaseOffset
398 * which state that those two base addresses do not support 48-bit
399 * addresses and need to be placed in the bottom 32-bit range.
400 * Unfortunately, this is not quite accurate.
402 * The real problem is that we always set the size of our state pools in
403 * STATE_BASE_ADDRESS to 0xfffff (the maximum) even though the BO is most
404 * likely significantly smaller. We do this because we do not no at the
405 * time we emit STATE_BASE_ADDRESS whether or not we will need to expand
406 * the pool during command buffer building so we don't actually have a
407 * valid final size. If the address + size, as seen by STATE_BASE_ADDRESS
408 * overflows 48 bits, the GPU appears to treat all accesses to the buffer
409 * as being out of bounds and returns zero. For dynamic state, this
410 * usually just leads to rendering corruptions, but shaders that are all
411 * zero hang the GPU immediately.
413 * The easiest solution to do is exactly what the bogus workarounds say to
414 * do: restrict these buffers to 32-bit addresses. We could also pin the
415 * BO to some particular location of our choosing, but that's significantly
416 * more work than just not setting a flag. So, we explicitly DO NOT set
417 * the EXEC_OBJECT_SUPPORTS_48B_ADDRESS flag and the kernel does all of the
420 anv_bo_init(&pool
->bo
, gem_handle
, size
);
426 /** Grows and re-centers the block pool.
428 * We grow the block pool in one or both directions in such a way that the
429 * following conditions are met:
431 * 1) The size of the entire pool is always a power of two.
433 * 2) The pool only grows on both ends. Neither end can get
436 * 3) At the end of the allocation, we have about twice as much space
437 * allocated for each end as we have used. This way the pool doesn't
438 * grow too far in one direction or the other.
440 * 4) If the _alloc_back() has never been called, then the back portion of
441 * the pool retains a size of zero. (This makes it easier for users of
442 * the block pool that only want a one-sided pool.)
444 * 5) We have enough space allocated for at least one more block in
445 * whichever side `state` points to.
447 * 6) The center of the pool is always aligned to both the block_size of
448 * the pool and a 4K CPU page.
451 anv_block_pool_grow(struct anv_block_pool
*pool
, struct anv_block_state
*state
)
453 VkResult result
= VK_SUCCESS
;
455 pthread_mutex_lock(&pool
->device
->mutex
);
457 assert(state
== &pool
->state
|| state
== &pool
->back_state
);
459 /* Gather a little usage information on the pool. Since we may have
460 * threadsd waiting in queue to get some storage while we resize, it's
461 * actually possible that total_used will be larger than old_size. In
462 * particular, block_pool_alloc() increments state->next prior to
463 * calling block_pool_grow, so this ensures that we get enough space for
464 * which ever side tries to grow the pool.
466 * We align to a page size because it makes it easier to do our
467 * calculations later in such a way that we state page-aigned.
469 uint32_t back_used
= align_u32(pool
->back_state
.next
, PAGE_SIZE
);
470 uint32_t front_used
= align_u32(pool
->state
.next
, PAGE_SIZE
);
471 uint32_t total_used
= front_used
+ back_used
;
473 assert(state
== &pool
->state
|| back_used
> 0);
475 uint32_t old_size
= pool
->bo
.size
;
477 /* The block pool is always initialized to a nonzero size and this function
478 * is always called after initialization.
480 assert(old_size
> 0);
482 /* The back_used and front_used may actually be smaller than the actual
483 * requirement because they are based on the next pointers which are
484 * updated prior to calling this function.
486 uint32_t back_required
= MAX2(back_used
, pool
->center_bo_offset
);
487 uint32_t front_required
= MAX2(front_used
, old_size
- pool
->center_bo_offset
);
489 if (back_used
* 2 <= back_required
&& front_used
* 2 <= front_required
) {
490 /* If we're in this case then this isn't the firsta allocation and we
491 * already have enough space on both sides to hold double what we
492 * have allocated. There's nothing for us to do.
497 uint32_t size
= old_size
* 2;
498 while (size
< back_required
+ front_required
)
501 assert(size
> pool
->bo
.size
);
503 /* We compute a new center_bo_offset such that, when we double the size
504 * of the pool, we maintain the ratio of how much is used by each side.
505 * This way things should remain more-or-less balanced.
507 uint32_t center_bo_offset
;
508 if (back_used
== 0) {
509 /* If we're in this case then we have never called alloc_back(). In
510 * this case, we want keep the offset at 0 to make things as simple
511 * as possible for users that don't care about back allocations.
513 center_bo_offset
= 0;
515 /* Try to "center" the allocation based on how much is currently in
516 * use on each side of the center line.
518 center_bo_offset
= ((uint64_t)size
* back_used
) / total_used
;
520 /* Align down to a multiple of the page size */
521 center_bo_offset
&= ~(PAGE_SIZE
- 1);
523 assert(center_bo_offset
>= back_used
);
525 /* Make sure we don't shrink the back end of the pool */
526 if (center_bo_offset
< pool
->back_state
.end
)
527 center_bo_offset
= pool
->back_state
.end
;
529 /* Make sure that we don't shrink the front end of the pool */
530 if (size
- center_bo_offset
< pool
->state
.end
)
531 center_bo_offset
= size
- pool
->state
.end
;
534 assert(center_bo_offset
% PAGE_SIZE
== 0);
536 result
= anv_block_pool_expand_range(pool
, center_bo_offset
, size
);
538 if (pool
->device
->instance
->physicalDevice
.has_exec_async
)
539 pool
->bo
.flags
|= EXEC_OBJECT_ASYNC
;
542 pthread_mutex_unlock(&pool
->device
->mutex
);
544 if (result
== VK_SUCCESS
) {
545 /* Return the appropriate new size. This function never actually
546 * updates state->next. Instead, we let the caller do that because it
547 * needs to do so in order to maintain its concurrency model.
549 if (state
== &pool
->state
) {
550 return pool
->bo
.size
- pool
->center_bo_offset
;
552 assert(pool
->center_bo_offset
> 0);
553 return pool
->center_bo_offset
;
561 anv_block_pool_alloc_new(struct anv_block_pool
*pool
,
562 struct anv_block_state
*pool_state
,
565 struct anv_block_state state
, old
, new;
568 state
.u64
= __sync_fetch_and_add(&pool_state
->u64
, block_size
);
569 if (state
.next
+ block_size
<= state
.end
) {
572 } else if (state
.next
<= state
.end
) {
573 /* We allocated the first block outside the pool so we have to grow
574 * the pool. pool_state->next acts a mutex: threads who try to
575 * allocate now will get block indexes above the current limit and
576 * hit futex_wait below.
578 new.next
= state
.next
+ block_size
;
580 new.end
= anv_block_pool_grow(pool
, pool_state
);
581 } while (new.end
< new.next
);
583 old
.u64
= __sync_lock_test_and_set(&pool_state
->u64
, new.u64
);
584 if (old
.next
!= state
.next
)
585 futex_wake(&pool_state
->end
, INT_MAX
);
588 futex_wait(&pool_state
->end
, state
.end
);
595 anv_block_pool_alloc(struct anv_block_pool
*pool
,
598 return anv_block_pool_alloc_new(pool
, &pool
->state
, block_size
);
601 /* Allocates a block out of the back of the block pool.
603 * This will allocated a block earlier than the "start" of the block pool.
604 * The offsets returned from this function will be negative but will still
605 * be correct relative to the block pool's map pointer.
607 * If you ever use anv_block_pool_alloc_back, then you will have to do
608 * gymnastics with the block pool's BO when doing relocations.
611 anv_block_pool_alloc_back(struct anv_block_pool
*pool
,
614 int32_t offset
= anv_block_pool_alloc_new(pool
, &pool
->back_state
,
617 /* The offset we get out of anv_block_pool_alloc_new() is actually the
618 * number of bytes downwards from the middle to the end of the block.
619 * We need to turn it into a (negative) offset from the middle to the
620 * start of the block.
623 return -(offset
+ block_size
);
627 anv_state_pool_init(struct anv_state_pool
*pool
,
628 struct anv_device
*device
,
631 VkResult result
= anv_block_pool_init(&pool
->block_pool
, device
,
633 if (result
!= VK_SUCCESS
)
636 assert(util_is_power_of_two(block_size
));
637 pool
->block_size
= block_size
;
638 pool
->back_alloc_free_list
= ANV_FREE_LIST_EMPTY
;
639 for (unsigned i
= 0; i
< ANV_STATE_BUCKETS
; i
++) {
640 pool
->buckets
[i
].free_list
= ANV_FREE_LIST_EMPTY
;
641 pool
->buckets
[i
].block
.next
= 0;
642 pool
->buckets
[i
].block
.end
= 0;
644 VG(VALGRIND_CREATE_MEMPOOL(pool
, 0, false));
650 anv_state_pool_finish(struct anv_state_pool
*pool
)
652 VG(VALGRIND_DESTROY_MEMPOOL(pool
));
653 anv_block_pool_finish(&pool
->block_pool
);
657 anv_fixed_size_state_pool_alloc_new(struct anv_fixed_size_state_pool
*pool
,
658 struct anv_block_pool
*block_pool
,
662 struct anv_block_state block
, old
, new;
665 /* If our state is large, we don't need any sub-allocation from a block.
666 * Instead, we just grab whole (potentially large) blocks.
668 if (state_size
>= block_size
)
669 return anv_block_pool_alloc(block_pool
, state_size
);
672 block
.u64
= __sync_fetch_and_add(&pool
->block
.u64
, state_size
);
674 if (block
.next
< block
.end
) {
676 } else if (block
.next
== block
.end
) {
677 offset
= anv_block_pool_alloc(block_pool
, block_size
);
678 new.next
= offset
+ state_size
;
679 new.end
= offset
+ block_size
;
680 old
.u64
= __sync_lock_test_and_set(&pool
->block
.u64
, new.u64
);
681 if (old
.next
!= block
.next
)
682 futex_wake(&pool
->block
.end
, INT_MAX
);
685 futex_wait(&pool
->block
.end
, block
.end
);
691 anv_state_pool_get_bucket(uint32_t size
)
693 unsigned size_log2
= ilog2_round_up(size
);
694 assert(size_log2
<= ANV_MAX_STATE_SIZE_LOG2
);
695 if (size_log2
< ANV_MIN_STATE_SIZE_LOG2
)
696 size_log2
= ANV_MIN_STATE_SIZE_LOG2
;
697 return size_log2
- ANV_MIN_STATE_SIZE_LOG2
;
701 anv_state_pool_get_bucket_size(uint32_t bucket
)
703 uint32_t size_log2
= bucket
+ ANV_MIN_STATE_SIZE_LOG2
;
704 return 1 << size_log2
;
707 static struct anv_state
708 anv_state_pool_alloc_no_vg(struct anv_state_pool
*pool
,
709 uint32_t size
, uint32_t align
)
711 uint32_t bucket
= anv_state_pool_get_bucket(MAX2(size
, align
));
713 struct anv_state state
;
714 state
.alloc_size
= anv_state_pool_get_bucket_size(bucket
);
716 /* Try free list first. */
717 if (anv_free_list_pop(&pool
->buckets
[bucket
].free_list
,
718 &pool
->block_pool
.map
, &state
.offset
)) {
719 assert(state
.offset
>= 0);
723 /* Try to grab a chunk from some larger bucket and split it up */
724 for (unsigned b
= bucket
+ 1; b
< ANV_STATE_BUCKETS
; b
++) {
725 int32_t chunk_offset
;
726 if (anv_free_list_pop(&pool
->buckets
[b
].free_list
,
727 &pool
->block_pool
.map
, &chunk_offset
)) {
728 unsigned chunk_size
= anv_state_pool_get_bucket_size(b
);
730 /* We've found a chunk that's larger than the requested state size.
731 * There are a couple of options as to what we do with it:
733 * 1) We could fully split the chunk into state.alloc_size sized
734 * pieces. However, this would mean that allocating a 16B
735 * state could potentially split a 2MB chunk into 512K smaller
736 * chunks. This would lead to unnecessary fragmentation.
738 * 2) The classic "buddy allocator" method would have us split the
739 * chunk in half and return one half. Then we would split the
740 * remaining half in half and return one half, and repeat as
741 * needed until we get down to the size we want. However, if
742 * you are allocating a bunch of the same size state (which is
743 * the common case), this means that every other allocation has
744 * to go up a level and every fourth goes up two levels, etc.
745 * This is not nearly as efficient as it could be if we did a
746 * little more work up-front.
748 * 3) Split the difference between (1) and (2) by doing a
749 * two-level split. If it's bigger than some fixed block_size,
750 * we split it into block_size sized chunks and return all but
751 * one of them. Then we split what remains into
752 * state.alloc_size sized chunks and return all but one.
754 * We choose option (3).
756 if (chunk_size
> pool
->block_size
&&
757 state
.alloc_size
< pool
->block_size
) {
758 assert(chunk_size
% pool
->block_size
== 0);
759 /* We don't want to split giant chunks into tiny chunks. Instead,
760 * break anything bigger than a block into block-sized chunks and
761 * then break it down into bucket-sized chunks from there. Return
762 * all but the first block of the chunk to the block bucket.
764 const uint32_t block_bucket
=
765 anv_state_pool_get_bucket(pool
->block_size
);
766 anv_free_list_push(&pool
->buckets
[block_bucket
].free_list
,
767 pool
->block_pool
.map
,
768 chunk_offset
+ pool
->block_size
,
770 (chunk_size
/ pool
->block_size
) - 1);
771 chunk_size
= pool
->block_size
;
774 assert(chunk_size
% state
.alloc_size
== 0);
775 anv_free_list_push(&pool
->buckets
[bucket
].free_list
,
776 pool
->block_pool
.map
,
777 chunk_offset
+ state
.alloc_size
,
779 (chunk_size
/ state
.alloc_size
) - 1);
781 state
.offset
= chunk_offset
;
786 state
.offset
= anv_fixed_size_state_pool_alloc_new(&pool
->buckets
[bucket
],
792 state
.map
= pool
->block_pool
.map
+ state
.offset
;
797 anv_state_pool_alloc(struct anv_state_pool
*pool
, uint32_t size
, uint32_t align
)
800 return ANV_STATE_NULL
;
802 struct anv_state state
= anv_state_pool_alloc_no_vg(pool
, size
, align
);
803 VG(VALGRIND_MEMPOOL_ALLOC(pool
, state
.map
, size
));
808 anv_state_pool_alloc_back(struct anv_state_pool
*pool
)
810 struct anv_state state
;
811 state
.alloc_size
= pool
->block_size
;
813 if (anv_free_list_pop(&pool
->back_alloc_free_list
,
814 &pool
->block_pool
.map
, &state
.offset
)) {
815 assert(state
.offset
< 0);
819 state
.offset
= anv_block_pool_alloc_back(&pool
->block_pool
,
823 state
.map
= pool
->block_pool
.map
+ state
.offset
;
824 VG(VALGRIND_MEMPOOL_ALLOC(pool
, state
.map
, state
.alloc_size
));
829 anv_state_pool_free_no_vg(struct anv_state_pool
*pool
, struct anv_state state
)
831 assert(util_is_power_of_two(state
.alloc_size
));
832 unsigned bucket
= anv_state_pool_get_bucket(state
.alloc_size
);
834 if (state
.offset
< 0) {
835 assert(state
.alloc_size
== pool
->block_size
);
836 anv_free_list_push(&pool
->back_alloc_free_list
,
837 pool
->block_pool
.map
, state
.offset
,
838 state
.alloc_size
, 1);
840 anv_free_list_push(&pool
->buckets
[bucket
].free_list
,
841 pool
->block_pool
.map
, state
.offset
,
842 state
.alloc_size
, 1);
847 anv_state_pool_free(struct anv_state_pool
*pool
, struct anv_state state
)
849 if (state
.alloc_size
== 0)
852 VG(VALGRIND_MEMPOOL_FREE(pool
, state
.map
));
853 anv_state_pool_free_no_vg(pool
, state
);
856 struct anv_state_stream_block
{
857 struct anv_state block
;
860 struct anv_state_stream_block
*next
;
863 /* A pointer to the first user-allocated thing in this block. This is
864 * what valgrind sees as the start of the block.
870 /* The state stream allocator is a one-shot, single threaded allocator for
871 * variable sized blocks. We use it for allocating dynamic state.
874 anv_state_stream_init(struct anv_state_stream
*stream
,
875 struct anv_state_pool
*state_pool
,
878 stream
->state_pool
= state_pool
;
879 stream
->block_size
= block_size
;
881 stream
->block
= ANV_STATE_NULL
;
883 stream
->block_list
= NULL
;
885 /* Ensure that next + whatever > block_size. This way the first call to
886 * state_stream_alloc fetches a new block.
888 stream
->next
= block_size
;
890 VG(VALGRIND_CREATE_MEMPOOL(stream
, 0, false));
894 anv_state_stream_finish(struct anv_state_stream
*stream
)
896 struct anv_state_stream_block
*next
= stream
->block_list
;
897 while (next
!= NULL
) {
898 struct anv_state_stream_block sb
= VG_NOACCESS_READ(next
);
899 VG(VALGRIND_MEMPOOL_FREE(stream
, sb
._vg_ptr
));
900 VG(VALGRIND_MAKE_MEM_UNDEFINED(next
, stream
->block_size
));
901 anv_state_pool_free_no_vg(stream
->state_pool
, sb
.block
);
905 VG(VALGRIND_DESTROY_MEMPOOL(stream
));
909 anv_state_stream_alloc(struct anv_state_stream
*stream
,
910 uint32_t size
, uint32_t alignment
)
913 return ANV_STATE_NULL
;
915 assert(alignment
<= PAGE_SIZE
);
917 uint32_t offset
= align_u32(stream
->next
, alignment
);
918 if (offset
+ size
> stream
->block
.alloc_size
) {
919 uint32_t block_size
= stream
->block_size
;
920 if (block_size
< size
)
921 block_size
= round_to_power_of_two(size
);
923 stream
->block
= anv_state_pool_alloc_no_vg(stream
->state_pool
,
924 block_size
, PAGE_SIZE
);
926 struct anv_state_stream_block
*sb
= stream
->block
.map
;
927 VG_NOACCESS_WRITE(&sb
->block
, stream
->block
);
928 VG_NOACCESS_WRITE(&sb
->next
, stream
->block_list
);
929 stream
->block_list
= sb
;
930 VG(VG_NOACCESS_WRITE(&sb
->_vg_ptr
, NULL
));
932 VG(VALGRIND_MAKE_MEM_NOACCESS(stream
->block
.map
, stream
->block_size
));
934 /* Reset back to the start plus space for the header */
935 stream
->next
= sizeof(*sb
);
937 offset
= align_u32(stream
->next
, alignment
);
938 assert(offset
+ size
<= stream
->block
.alloc_size
);
941 struct anv_state state
= stream
->block
;
942 state
.offset
+= offset
;
943 state
.alloc_size
= size
;
946 stream
->next
= offset
+ size
;
949 struct anv_state_stream_block
*sb
= stream
->block_list
;
950 void *vg_ptr
= VG_NOACCESS_READ(&sb
->_vg_ptr
);
951 if (vg_ptr
== NULL
) {
953 VG_NOACCESS_WRITE(&sb
->_vg_ptr
, vg_ptr
);
954 VALGRIND_MEMPOOL_ALLOC(stream
, vg_ptr
, size
);
956 void *state_end
= state
.map
+ state
.alloc_size
;
957 /* This only updates the mempool. The newly allocated chunk is still
958 * marked as NOACCESS. */
959 VALGRIND_MEMPOOL_CHANGE(stream
, vg_ptr
, vg_ptr
, state_end
- vg_ptr
);
960 /* Mark the newly allocated chunk as undefined */
961 VALGRIND_MAKE_MEM_UNDEFINED(state
.map
, state
.alloc_size
);
968 struct bo_pool_bo_link
{
969 struct bo_pool_bo_link
*next
;
974 anv_bo_pool_init(struct anv_bo_pool
*pool
, struct anv_device
*device
)
976 pool
->device
= device
;
977 memset(pool
->free_list
, 0, sizeof(pool
->free_list
));
979 VG(VALGRIND_CREATE_MEMPOOL(pool
, 0, false));
983 anv_bo_pool_finish(struct anv_bo_pool
*pool
)
985 for (unsigned i
= 0; i
< ARRAY_SIZE(pool
->free_list
); i
++) {
986 struct bo_pool_bo_link
*link
= PFL_PTR(pool
->free_list
[i
]);
987 while (link
!= NULL
) {
988 struct bo_pool_bo_link link_copy
= VG_NOACCESS_READ(link
);
990 anv_gem_munmap(link_copy
.bo
.map
, link_copy
.bo
.size
);
991 anv_gem_close(pool
->device
, link_copy
.bo
.gem_handle
);
992 link
= link_copy
.next
;
996 VG(VALGRIND_DESTROY_MEMPOOL(pool
));
1000 anv_bo_pool_alloc(struct anv_bo_pool
*pool
, struct anv_bo
*bo
, uint32_t size
)
1004 const unsigned size_log2
= size
< 4096 ? 12 : ilog2_round_up(size
);
1005 const unsigned pow2_size
= 1 << size_log2
;
1006 const unsigned bucket
= size_log2
- 12;
1007 assert(bucket
< ARRAY_SIZE(pool
->free_list
));
1009 void *next_free_void
;
1010 if (anv_ptr_free_list_pop(&pool
->free_list
[bucket
], &next_free_void
)) {
1011 struct bo_pool_bo_link
*next_free
= next_free_void
;
1012 *bo
= VG_NOACCESS_READ(&next_free
->bo
);
1013 assert(bo
->gem_handle
);
1014 assert(bo
->map
== next_free
);
1015 assert(size
<= bo
->size
);
1017 VG(VALGRIND_MEMPOOL_ALLOC(pool
, bo
->map
, size
));
1022 struct anv_bo new_bo
;
1024 result
= anv_bo_init_new(&new_bo
, pool
->device
, pow2_size
);
1025 if (result
!= VK_SUCCESS
)
1028 assert(new_bo
.size
== pow2_size
);
1030 new_bo
.map
= anv_gem_mmap(pool
->device
, new_bo
.gem_handle
, 0, pow2_size
, 0);
1031 if (new_bo
.map
== MAP_FAILED
) {
1032 anv_gem_close(pool
->device
, new_bo
.gem_handle
);
1033 return vk_error(VK_ERROR_MEMORY_MAP_FAILED
);
1038 VG(VALGRIND_MEMPOOL_ALLOC(pool
, bo
->map
, size
));
1044 anv_bo_pool_free(struct anv_bo_pool
*pool
, const struct anv_bo
*bo_in
)
1046 /* Make a copy in case the anv_bo happens to be storred in the BO */
1047 struct anv_bo bo
= *bo_in
;
1049 VG(VALGRIND_MEMPOOL_FREE(pool
, bo
.map
));
1051 struct bo_pool_bo_link
*link
= bo
.map
;
1052 VG_NOACCESS_WRITE(&link
->bo
, bo
);
1054 assert(util_is_power_of_two(bo
.size
));
1055 const unsigned size_log2
= ilog2_round_up(bo
.size
);
1056 const unsigned bucket
= size_log2
- 12;
1057 assert(bucket
< ARRAY_SIZE(pool
->free_list
));
1059 anv_ptr_free_list_push(&pool
->free_list
[bucket
], link
);
1065 anv_scratch_pool_init(struct anv_device
*device
, struct anv_scratch_pool
*pool
)
1067 memset(pool
, 0, sizeof(*pool
));
1071 anv_scratch_pool_finish(struct anv_device
*device
, struct anv_scratch_pool
*pool
)
1073 for (unsigned s
= 0; s
< MESA_SHADER_STAGES
; s
++) {
1074 for (unsigned i
= 0; i
< 16; i
++) {
1075 struct anv_scratch_bo
*bo
= &pool
->bos
[i
][s
];
1077 anv_gem_close(device
, bo
->bo
.gem_handle
);
1083 anv_scratch_pool_alloc(struct anv_device
*device
, struct anv_scratch_pool
*pool
,
1084 gl_shader_stage stage
, unsigned per_thread_scratch
)
1086 if (per_thread_scratch
== 0)
1089 unsigned scratch_size_log2
= ffs(per_thread_scratch
/ 2048);
1090 assert(scratch_size_log2
< 16);
1092 struct anv_scratch_bo
*bo
= &pool
->bos
[scratch_size_log2
][stage
];
1094 /* We can use "exists" to shortcut and ignore the critical section */
1098 pthread_mutex_lock(&device
->mutex
);
1100 __sync_synchronize();
1104 const struct anv_physical_device
*physical_device
=
1105 &device
->instance
->physicalDevice
;
1106 const struct gen_device_info
*devinfo
= &physical_device
->info
;
1108 /* WaCSScratchSize:hsw
1110 * Haswell's scratch space address calculation appears to be sparse
1111 * rather than tightly packed. The Thread ID has bits indicating which
1112 * subslice, EU within a subslice, and thread within an EU it is.
1113 * There's a maximum of two slices and two subslices, so these can be
1114 * stored with a single bit. Even though there are only 10 EUs per
1115 * subslice, this is stored in 4 bits, so there's an effective maximum
1116 * value of 16 EUs. Similarly, although there are only 7 threads per EU,
1117 * this is stored in a 3 bit number, giving an effective maximum value
1118 * of 8 threads per EU.
1120 * This means that we need to use 16 * 8 instead of 10 * 7 for the
1121 * number of threads per subslice.
1123 const unsigned subslices
= MAX2(physical_device
->subslice_total
, 1);
1124 const unsigned scratch_ids_per_subslice
=
1125 device
->info
.is_haswell
? 16 * 8 : devinfo
->max_cs_threads
;
1127 uint32_t max_threads
[] = {
1128 [MESA_SHADER_VERTEX
] = devinfo
->max_vs_threads
,
1129 [MESA_SHADER_TESS_CTRL
] = devinfo
->max_tcs_threads
,
1130 [MESA_SHADER_TESS_EVAL
] = devinfo
->max_tes_threads
,
1131 [MESA_SHADER_GEOMETRY
] = devinfo
->max_gs_threads
,
1132 [MESA_SHADER_FRAGMENT
] = devinfo
->max_wm_threads
,
1133 [MESA_SHADER_COMPUTE
] = scratch_ids_per_subslice
* subslices
,
1136 uint32_t size
= per_thread_scratch
* max_threads
[stage
];
1138 anv_bo_init_new(&bo
->bo
, device
, size
);
1140 /* Even though the Scratch base pointers in 3DSTATE_*S are 64 bits, they
1141 * are still relative to the general state base address. When we emit
1142 * STATE_BASE_ADDRESS, we set general state base address to 0 and the size
1143 * to the maximum (1 page under 4GB). This allows us to just place the
1144 * scratch buffers anywhere we wish in the bottom 32 bits of address space
1145 * and just set the scratch base pointer in 3DSTATE_*S using a relocation.
1146 * However, in order to do so, we need to ensure that the kernel does not
1147 * place the scratch BO above the 32-bit boundary.
1149 * NOTE: Technically, it can't go "anywhere" because the top page is off
1150 * limits. However, when EXEC_OBJECT_SUPPORTS_48B_ADDRESS is set, the
1151 * kernel allocates space using
1153 * end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
1155 * so nothing will ever touch the top page.
1157 bo
->bo
.flags
&= ~EXEC_OBJECT_SUPPORTS_48B_ADDRESS
;
1159 /* Set the exists last because it may be read by other threads */
1160 __sync_synchronize();
1163 pthread_mutex_unlock(&device
->mutex
);
1168 struct anv_cached_bo
{
1175 anv_bo_cache_init(struct anv_bo_cache
*cache
)
1177 cache
->bo_map
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
1178 _mesa_key_pointer_equal
);
1180 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1182 if (pthread_mutex_init(&cache
->mutex
, NULL
)) {
1183 _mesa_hash_table_destroy(cache
->bo_map
, NULL
);
1184 return vk_errorf(VK_ERROR_OUT_OF_HOST_MEMORY
,
1185 "pthread_mutex_init failed: %m");
1192 anv_bo_cache_finish(struct anv_bo_cache
*cache
)
1194 _mesa_hash_table_destroy(cache
->bo_map
, NULL
);
1195 pthread_mutex_destroy(&cache
->mutex
);
1198 static struct anv_cached_bo
*
1199 anv_bo_cache_lookup_locked(struct anv_bo_cache
*cache
, uint32_t gem_handle
)
1201 struct hash_entry
*entry
=
1202 _mesa_hash_table_search(cache
->bo_map
,
1203 (const void *)(uintptr_t)gem_handle
);
1207 struct anv_cached_bo
*bo
= (struct anv_cached_bo
*)entry
->data
;
1208 assert(bo
->bo
.gem_handle
== gem_handle
);
1213 static struct anv_bo
*
1214 anv_bo_cache_lookup(struct anv_bo_cache
*cache
, uint32_t gem_handle
)
1216 pthread_mutex_lock(&cache
->mutex
);
1218 struct anv_cached_bo
*bo
= anv_bo_cache_lookup_locked(cache
, gem_handle
);
1220 pthread_mutex_unlock(&cache
->mutex
);
1222 return bo
? &bo
->bo
: NULL
;
1226 anv_bo_cache_alloc(struct anv_device
*device
,
1227 struct anv_bo_cache
*cache
,
1228 uint64_t size
, struct anv_bo
**bo_out
)
1230 struct anv_cached_bo
*bo
=
1231 vk_alloc(&device
->alloc
, sizeof(struct anv_cached_bo
), 8,
1232 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1234 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1238 /* The kernel is going to give us whole pages anyway */
1239 size
= align_u64(size
, 4096);
1241 VkResult result
= anv_bo_init_new(&bo
->bo
, device
, size
);
1242 if (result
!= VK_SUCCESS
) {
1243 vk_free(&device
->alloc
, bo
);
1247 assert(bo
->bo
.gem_handle
);
1249 pthread_mutex_lock(&cache
->mutex
);
1251 _mesa_hash_table_insert(cache
->bo_map
,
1252 (void *)(uintptr_t)bo
->bo
.gem_handle
, bo
);
1254 pthread_mutex_unlock(&cache
->mutex
);
1262 anv_bo_cache_import(struct anv_device
*device
,
1263 struct anv_bo_cache
*cache
,
1264 int fd
, uint64_t size
, struct anv_bo
**bo_out
)
1266 pthread_mutex_lock(&cache
->mutex
);
1268 /* The kernel is going to give us whole pages anyway */
1269 size
= align_u64(size
, 4096);
1271 uint32_t gem_handle
= anv_gem_fd_to_handle(device
, fd
);
1273 pthread_mutex_unlock(&cache
->mutex
);
1274 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHX
);
1277 struct anv_cached_bo
*bo
= anv_bo_cache_lookup_locked(cache
, gem_handle
);
1279 if (bo
->bo
.size
!= size
) {
1280 pthread_mutex_unlock(&cache
->mutex
);
1281 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHX
);
1283 __sync_fetch_and_add(&bo
->refcount
, 1);
1285 /* For security purposes, we reject BO imports where the size does not
1286 * match exactly. This prevents a malicious client from passing a
1287 * buffer to a trusted client, lying about the size, and telling the
1288 * trusted client to try and texture from an image that goes
1289 * out-of-bounds. This sort of thing could lead to GPU hangs or worse
1290 * in the trusted client. The trusted client can protect itself against
1291 * this sort of attack but only if it can trust the buffer size.
1293 off_t import_size
= lseek(fd
, 0, SEEK_END
);
1294 if (import_size
== (off_t
)-1 || import_size
!= size
) {
1295 anv_gem_close(device
, gem_handle
);
1296 pthread_mutex_unlock(&cache
->mutex
);
1297 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHX
);
1300 bo
= vk_alloc(&device
->alloc
, sizeof(struct anv_cached_bo
), 8,
1301 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1303 anv_gem_close(device
, gem_handle
);
1304 pthread_mutex_unlock(&cache
->mutex
);
1305 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1310 anv_bo_init(&bo
->bo
, gem_handle
, size
);
1312 if (device
->instance
->physicalDevice
.supports_48bit_addresses
)
1313 bo
->bo
.flags
|= EXEC_OBJECT_SUPPORTS_48B_ADDRESS
;
1315 if (device
->instance
->physicalDevice
.has_exec_async
)
1316 bo
->bo
.flags
|= EXEC_OBJECT_ASYNC
;
1318 _mesa_hash_table_insert(cache
->bo_map
, (void *)(uintptr_t)gem_handle
, bo
);
1321 pthread_mutex_unlock(&cache
->mutex
);
1323 /* From the Vulkan spec:
1325 * "Importing memory from a file descriptor transfers ownership of
1326 * the file descriptor from the application to the Vulkan
1327 * implementation. The application must not perform any operations on
1328 * the file descriptor after a successful import."
1330 * If the import fails, we leave the file descriptor open.
1340 anv_bo_cache_export(struct anv_device
*device
,
1341 struct anv_bo_cache
*cache
,
1342 struct anv_bo
*bo_in
, int *fd_out
)
1344 assert(anv_bo_cache_lookup(cache
, bo_in
->gem_handle
) == bo_in
);
1345 struct anv_cached_bo
*bo
= (struct anv_cached_bo
*)bo_in
;
1347 int fd
= anv_gem_handle_to_fd(device
, bo
->bo
.gem_handle
);
1349 return vk_error(VK_ERROR_TOO_MANY_OBJECTS
);
1357 atomic_dec_not_one(uint32_t *counter
)
1366 old
= __sync_val_compare_and_swap(counter
, val
, val
- 1);
1375 anv_bo_cache_release(struct anv_device
*device
,
1376 struct anv_bo_cache
*cache
,
1377 struct anv_bo
*bo_in
)
1379 assert(anv_bo_cache_lookup(cache
, bo_in
->gem_handle
) == bo_in
);
1380 struct anv_cached_bo
*bo
= (struct anv_cached_bo
*)bo_in
;
1382 /* Try to decrement the counter but don't go below one. If this succeeds
1383 * then the refcount has been decremented and we are not the last
1386 if (atomic_dec_not_one(&bo
->refcount
))
1389 pthread_mutex_lock(&cache
->mutex
);
1391 /* We are probably the last reference since our attempt to decrement above
1392 * failed. However, we can't actually know until we are inside the mutex.
1393 * Otherwise, someone could import the BO between the decrement and our
1396 if (unlikely(__sync_sub_and_fetch(&bo
->refcount
, 1) > 0)) {
1397 /* Turns out we're not the last reference. Unlock and bail. */
1398 pthread_mutex_unlock(&cache
->mutex
);
1402 struct hash_entry
*entry
=
1403 _mesa_hash_table_search(cache
->bo_map
,
1404 (const void *)(uintptr_t)bo
->bo
.gem_handle
);
1406 _mesa_hash_table_remove(cache
->bo_map
, entry
);
1409 anv_gem_munmap(bo
->bo
.map
, bo
->bo
.size
);
1411 anv_gem_close(device
, bo
->bo
.gem_handle
);
1413 /* Don't unlock until we've actually closed the BO. The whole point of
1414 * the BO cache is to ensure that we correctly handle races with creating
1415 * and releasing GEM handles and we don't want to let someone import the BO
1416 * again between mutex unlock and closing the GEM handle.
1418 pthread_mutex_unlock(&cache
->mutex
);
1420 vk_free(&device
->alloc
, bo
);