2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include <linux/memfd.h>
31 #include "anv_private.h"
33 #include "util/hash_table.h"
34 #include "util/simple_mtx.h"
37 #define VG_NOACCESS_READ(__ptr) ({ \
38 VALGRIND_MAKE_MEM_DEFINED((__ptr), sizeof(*(__ptr))); \
39 __typeof(*(__ptr)) __val = *(__ptr); \
40 VALGRIND_MAKE_MEM_NOACCESS((__ptr), sizeof(*(__ptr)));\
43 #define VG_NOACCESS_WRITE(__ptr, __val) ({ \
44 VALGRIND_MAKE_MEM_UNDEFINED((__ptr), sizeof(*(__ptr))); \
46 VALGRIND_MAKE_MEM_NOACCESS((__ptr), sizeof(*(__ptr))); \
49 #define VG_NOACCESS_READ(__ptr) (*(__ptr))
50 #define VG_NOACCESS_WRITE(__ptr, __val) (*(__ptr) = (__val))
55 * - Lock free (except when resizing underlying bos)
57 * - Constant time allocation with typically only one atomic
59 * - Multiple allocation sizes without fragmentation
61 * - Can grow while keeping addresses and offset of contents stable
63 * - All allocations within one bo so we can point one of the
64 * STATE_BASE_ADDRESS pointers at it.
66 * The overall design is a two-level allocator: top level is a fixed size, big
67 * block (8k) allocator, which operates out of a bo. Allocation is done by
68 * either pulling a block from the free list or growing the used range of the
69 * bo. Growing the range may run out of space in the bo which we then need to
70 * grow. Growing the bo is tricky in a multi-threaded, lockless environment:
71 * we need to keep all pointers and contents in the old map valid. GEM bos in
72 * general can't grow, but we use a trick: we create a memfd and use ftruncate
73 * to grow it as necessary. We mmap the new size and then create a gem bo for
74 * it using the new gem userptr ioctl. Without heavy-handed locking around
75 * our allocation fast-path, there isn't really a way to munmap the old mmap,
76 * so we just keep it around until garbage collection time. While the block
77 * allocator is lockless for normal operations, we block other threads trying
78 * to allocate while we're growing the map. It sholdn't happen often, and
79 * growing is fast anyway.
81 * At the next level we can use various sub-allocators. The state pool is a
82 * pool of smaller, fixed size objects, which operates much like the block
83 * pool. It uses a free list for freeing objects, but when it runs out of
84 * space it just allocates a new block from the block pool. This allocator is
85 * intended for longer lived state objects such as SURFACE_STATE and most
86 * other persistent state objects in the API. We may need to track more info
87 * with these object and a pointer back to the CPU object (eg VkImage). In
88 * those cases we just allocate a slightly bigger object and put the extra
89 * state after the GPU state object.
91 * The state stream allocator works similar to how the i965 DRI driver streams
92 * all its state. Even with Vulkan, we need to emit transient state (whether
93 * surface state base or dynamic state base), and for that we can just get a
94 * block and fill it up. These cases are local to a command buffer and the
95 * sub-allocator need not be thread safe. The streaming allocator gets a new
96 * block when it runs out of space and chains them together so they can be
100 /* Allocations are always at least 64 byte aligned, so 1 is an invalid value.
101 * We use it to indicate the free list is empty. */
104 struct anv_mmap_cleanup
{
110 #define ANV_MMAP_CLEANUP_INIT ((struct anv_mmap_cleanup){0})
112 #ifndef HAVE_MEMFD_CREATE
114 memfd_create(const char *name
, unsigned int flags
)
116 return syscall(SYS_memfd_create
, name
, flags
);
120 static inline uint32_t
121 ilog2_round_up(uint32_t value
)
124 return 32 - __builtin_clz(value
- 1);
127 static inline uint32_t
128 round_to_power_of_two(uint32_t value
)
130 return 1 << ilog2_round_up(value
);
134 anv_free_list_pop(union anv_free_list
*list
, void **map
, int32_t *offset
)
136 union anv_free_list current
, new, old
;
138 current
.u64
= list
->u64
;
139 while (current
.offset
!= EMPTY
) {
140 /* We have to add a memory barrier here so that the list head (and
141 * offset) gets read before we read the map pointer. This way we
142 * know that the map pointer is valid for the given offset at the
143 * point where we read it.
145 __sync_synchronize();
147 int32_t *next_ptr
= *map
+ current
.offset
;
148 new.offset
= VG_NOACCESS_READ(next_ptr
);
149 new.count
= current
.count
+ 1;
150 old
.u64
= __sync_val_compare_and_swap(&list
->u64
, current
.u64
, new.u64
);
151 if (old
.u64
== current
.u64
) {
152 *offset
= current
.offset
;
162 anv_free_list_push(union anv_free_list
*list
, void *map
, int32_t offset
,
163 uint32_t size
, uint32_t count
)
165 union anv_free_list current
, old
, new;
166 int32_t *next_ptr
= map
+ offset
;
168 /* If we're returning more than one chunk, we need to build a chain to add
169 * to the list. Fortunately, we can do this without any atomics since we
170 * own everything in the chain right now. `offset` is left pointing to the
171 * head of our chain list while `next_ptr` points to the tail.
173 for (uint32_t i
= 1; i
< count
; i
++) {
174 VG_NOACCESS_WRITE(next_ptr
, offset
+ i
* size
);
175 next_ptr
= map
+ offset
+ i
* size
;
181 VG_NOACCESS_WRITE(next_ptr
, current
.offset
);
183 new.count
= current
.count
+ 1;
184 old
.u64
= __sync_val_compare_and_swap(&list
->u64
, current
.u64
, new.u64
);
185 } while (old
.u64
!= current
.u64
);
188 /* All pointers in the ptr_free_list are assumed to be page-aligned. This
189 * means that the bottom 12 bits should all be zero.
191 #define PFL_COUNT(x) ((uintptr_t)(x) & 0xfff)
192 #define PFL_PTR(x) ((void *)((uintptr_t)(x) & ~(uintptr_t)0xfff))
193 #define PFL_PACK(ptr, count) ({ \
194 (void *)(((uintptr_t)(ptr) & ~(uintptr_t)0xfff) | ((count) & 0xfff)); \
198 anv_ptr_free_list_pop(void **list
, void **elem
)
200 void *current
= *list
;
201 while (PFL_PTR(current
) != NULL
) {
202 void **next_ptr
= PFL_PTR(current
);
203 void *new_ptr
= VG_NOACCESS_READ(next_ptr
);
204 unsigned new_count
= PFL_COUNT(current
) + 1;
205 void *new = PFL_PACK(new_ptr
, new_count
);
206 void *old
= __sync_val_compare_and_swap(list
, current
, new);
207 if (old
== current
) {
208 *elem
= PFL_PTR(current
);
218 anv_ptr_free_list_push(void **list
, void *elem
)
221 void **next_ptr
= elem
;
223 /* The pointer-based free list requires that the pointer be
224 * page-aligned. This is because we use the bottom 12 bits of the
225 * pointer to store a counter to solve the ABA concurrency problem.
227 assert(((uintptr_t)elem
& 0xfff) == 0);
232 VG_NOACCESS_WRITE(next_ptr
, PFL_PTR(current
));
233 unsigned new_count
= PFL_COUNT(current
) + 1;
234 void *new = PFL_PACK(elem
, new_count
);
235 old
= __sync_val_compare_and_swap(list
, current
, new);
236 } while (old
!= current
);
240 anv_block_pool_expand_range(struct anv_block_pool
*pool
,
241 uint32_t center_bo_offset
, uint32_t size
);
244 anv_block_pool_init(struct anv_block_pool
*pool
,
245 struct anv_device
*device
,
246 uint64_t start_address
,
247 uint32_t initial_size
,
252 pool
->device
= device
;
253 pool
->bo_flags
= bo_flags
;
254 pool
->start_address
= gen_canonical_address(start_address
);
256 anv_bo_init(&pool
->bo
, 0, 0);
258 pool
->fd
= memfd_create("block pool", MFD_CLOEXEC
);
260 return vk_error(VK_ERROR_INITIALIZATION_FAILED
);
262 /* Just make it 2GB up-front. The Linux kernel won't actually back it
263 * with pages until we either map and fault on one of them or we use
264 * userptr and send a chunk of it off to the GPU.
266 if (ftruncate(pool
->fd
, BLOCK_POOL_MEMFD_SIZE
) == -1) {
267 result
= vk_error(VK_ERROR_INITIALIZATION_FAILED
);
271 if (!u_vector_init(&pool
->mmap_cleanups
,
272 round_to_power_of_two(sizeof(struct anv_mmap_cleanup
)),
274 result
= vk_error(VK_ERROR_INITIALIZATION_FAILED
);
278 pool
->state
.next
= 0;
280 pool
->back_state
.next
= 0;
281 pool
->back_state
.end
= 0;
283 result
= anv_block_pool_expand_range(pool
, 0, initial_size
);
284 if (result
!= VK_SUCCESS
)
285 goto fail_mmap_cleanups
;
290 u_vector_finish(&pool
->mmap_cleanups
);
298 anv_block_pool_finish(struct anv_block_pool
*pool
)
300 struct anv_mmap_cleanup
*cleanup
;
302 u_vector_foreach(cleanup
, &pool
->mmap_cleanups
) {
304 munmap(cleanup
->map
, cleanup
->size
);
305 if (cleanup
->gem_handle
)
306 anv_gem_close(pool
->device
, cleanup
->gem_handle
);
309 u_vector_finish(&pool
->mmap_cleanups
);
314 #define PAGE_SIZE 4096
317 anv_block_pool_expand_range(struct anv_block_pool
*pool
,
318 uint32_t center_bo_offset
, uint32_t size
)
322 struct anv_mmap_cleanup
*cleanup
;
324 /* Assert that we only ever grow the pool */
325 assert(center_bo_offset
>= pool
->back_state
.end
);
326 assert(size
- center_bo_offset
>= pool
->state
.end
);
328 /* Assert that we don't go outside the bounds of the memfd */
329 assert(center_bo_offset
<= BLOCK_POOL_MEMFD_CENTER
);
330 assert(size
- center_bo_offset
<=
331 BLOCK_POOL_MEMFD_SIZE
- BLOCK_POOL_MEMFD_CENTER
);
333 cleanup
= u_vector_add(&pool
->mmap_cleanups
);
335 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
337 *cleanup
= ANV_MMAP_CLEANUP_INIT
;
339 /* Just leak the old map until we destroy the pool. We can't munmap it
340 * without races or imposing locking on the block allocate fast path. On
341 * the whole the leaked maps adds up to less than the size of the
342 * current map. MAP_POPULATE seems like the right thing to do, but we
343 * should try to get some numbers.
345 map
= mmap(NULL
, size
, PROT_READ
| PROT_WRITE
,
346 MAP_SHARED
| MAP_POPULATE
, pool
->fd
,
347 BLOCK_POOL_MEMFD_CENTER
- center_bo_offset
);
348 if (map
== MAP_FAILED
)
349 return vk_errorf(pool
->device
->instance
, pool
->device
,
350 VK_ERROR_MEMORY_MAP_FAILED
, "mmap failed: %m");
352 gem_handle
= anv_gem_userptr(pool
->device
, map
, size
);
353 if (gem_handle
== 0) {
355 return vk_errorf(pool
->device
->instance
, pool
->device
,
356 VK_ERROR_TOO_MANY_OBJECTS
, "userptr failed: %m");
360 cleanup
->size
= size
;
361 cleanup
->gem_handle
= gem_handle
;
364 /* Regular objects are created I915_CACHING_CACHED on LLC platforms and
365 * I915_CACHING_NONE on non-LLC platforms. However, userptr objects are
366 * always created as I915_CACHING_CACHED, which on non-LLC means
367 * snooped. That can be useful but comes with a bit of overheard. Since
368 * we're eplicitly clflushing and don't want the overhead we need to turn
370 if (!pool
->device
->info
.has_llc
) {
371 anv_gem_set_caching(pool
->device
, gem_handle
, I915_CACHING_NONE
);
372 anv_gem_set_domain(pool
->device
, gem_handle
,
373 I915_GEM_DOMAIN_GTT
, I915_GEM_DOMAIN_GTT
);
377 /* Now that we successfull allocated everything, we can write the new
378 * values back into pool. */
379 pool
->map
= map
+ center_bo_offset
;
380 pool
->center_bo_offset
= center_bo_offset
;
382 /* For block pool BOs we have to be a bit careful about where we place them
383 * in the GTT. There are two documented workarounds for state base address
384 * placement : Wa32bitGeneralStateOffset and Wa32bitInstructionBaseOffset
385 * which state that those two base addresses do not support 48-bit
386 * addresses and need to be placed in the bottom 32-bit range.
387 * Unfortunately, this is not quite accurate.
389 * The real problem is that we always set the size of our state pools in
390 * STATE_BASE_ADDRESS to 0xfffff (the maximum) even though the BO is most
391 * likely significantly smaller. We do this because we do not no at the
392 * time we emit STATE_BASE_ADDRESS whether or not we will need to expand
393 * the pool during command buffer building so we don't actually have a
394 * valid final size. If the address + size, as seen by STATE_BASE_ADDRESS
395 * overflows 48 bits, the GPU appears to treat all accesses to the buffer
396 * as being out of bounds and returns zero. For dynamic state, this
397 * usually just leads to rendering corruptions, but shaders that are all
398 * zero hang the GPU immediately.
400 * The easiest solution to do is exactly what the bogus workarounds say to
401 * do: restrict these buffers to 32-bit addresses. We could also pin the
402 * BO to some particular location of our choosing, but that's significantly
403 * more work than just not setting a flag. So, we explicitly DO NOT set
404 * the EXEC_OBJECT_SUPPORTS_48B_ADDRESS flag and the kernel does all of the
407 anv_bo_init(&pool
->bo
, gem_handle
, size
);
408 if (pool
->bo_flags
& EXEC_OBJECT_PINNED
) {
409 pool
->bo
.offset
= pool
->start_address
+ BLOCK_POOL_MEMFD_CENTER
-
412 pool
->bo
.flags
= pool
->bo_flags
;
418 /** Grows and re-centers the block pool.
420 * We grow the block pool in one or both directions in such a way that the
421 * following conditions are met:
423 * 1) The size of the entire pool is always a power of two.
425 * 2) The pool only grows on both ends. Neither end can get
428 * 3) At the end of the allocation, we have about twice as much space
429 * allocated for each end as we have used. This way the pool doesn't
430 * grow too far in one direction or the other.
432 * 4) If the _alloc_back() has never been called, then the back portion of
433 * the pool retains a size of zero. (This makes it easier for users of
434 * the block pool that only want a one-sided pool.)
436 * 5) We have enough space allocated for at least one more block in
437 * whichever side `state` points to.
439 * 6) The center of the pool is always aligned to both the block_size of
440 * the pool and a 4K CPU page.
443 anv_block_pool_grow(struct anv_block_pool
*pool
, struct anv_block_state
*state
)
445 VkResult result
= VK_SUCCESS
;
447 pthread_mutex_lock(&pool
->device
->mutex
);
449 assert(state
== &pool
->state
|| state
== &pool
->back_state
);
451 /* Gather a little usage information on the pool. Since we may have
452 * threadsd waiting in queue to get some storage while we resize, it's
453 * actually possible that total_used will be larger than old_size. In
454 * particular, block_pool_alloc() increments state->next prior to
455 * calling block_pool_grow, so this ensures that we get enough space for
456 * which ever side tries to grow the pool.
458 * We align to a page size because it makes it easier to do our
459 * calculations later in such a way that we state page-aigned.
461 uint32_t back_used
= align_u32(pool
->back_state
.next
, PAGE_SIZE
);
462 uint32_t front_used
= align_u32(pool
->state
.next
, PAGE_SIZE
);
463 uint32_t total_used
= front_used
+ back_used
;
465 assert(state
== &pool
->state
|| back_used
> 0);
467 uint32_t old_size
= pool
->bo
.size
;
469 /* The block pool is always initialized to a nonzero size and this function
470 * is always called after initialization.
472 assert(old_size
> 0);
474 /* The back_used and front_used may actually be smaller than the actual
475 * requirement because they are based on the next pointers which are
476 * updated prior to calling this function.
478 uint32_t back_required
= MAX2(back_used
, pool
->center_bo_offset
);
479 uint32_t front_required
= MAX2(front_used
, old_size
- pool
->center_bo_offset
);
481 if (back_used
* 2 <= back_required
&& front_used
* 2 <= front_required
) {
482 /* If we're in this case then this isn't the firsta allocation and we
483 * already have enough space on both sides to hold double what we
484 * have allocated. There's nothing for us to do.
489 uint32_t size
= old_size
* 2;
490 while (size
< back_required
+ front_required
)
493 assert(size
> pool
->bo
.size
);
495 /* We compute a new center_bo_offset such that, when we double the size
496 * of the pool, we maintain the ratio of how much is used by each side.
497 * This way things should remain more-or-less balanced.
499 uint32_t center_bo_offset
;
500 if (back_used
== 0) {
501 /* If we're in this case then we have never called alloc_back(). In
502 * this case, we want keep the offset at 0 to make things as simple
503 * as possible for users that don't care about back allocations.
505 center_bo_offset
= 0;
507 /* Try to "center" the allocation based on how much is currently in
508 * use on each side of the center line.
510 center_bo_offset
= ((uint64_t)size
* back_used
) / total_used
;
512 /* Align down to a multiple of the page size */
513 center_bo_offset
&= ~(PAGE_SIZE
- 1);
515 assert(center_bo_offset
>= back_used
);
517 /* Make sure we don't shrink the back end of the pool */
518 if (center_bo_offset
< back_required
)
519 center_bo_offset
= back_required
;
521 /* Make sure that we don't shrink the front end of the pool */
522 if (size
- center_bo_offset
< front_required
)
523 center_bo_offset
= size
- front_required
;
526 assert(center_bo_offset
% PAGE_SIZE
== 0);
528 result
= anv_block_pool_expand_range(pool
, center_bo_offset
, size
);
530 pool
->bo
.flags
= pool
->bo_flags
;
533 pthread_mutex_unlock(&pool
->device
->mutex
);
535 if (result
== VK_SUCCESS
) {
536 /* Return the appropriate new size. This function never actually
537 * updates state->next. Instead, we let the caller do that because it
538 * needs to do so in order to maintain its concurrency model.
540 if (state
== &pool
->state
) {
541 return pool
->bo
.size
- pool
->center_bo_offset
;
543 assert(pool
->center_bo_offset
> 0);
544 return pool
->center_bo_offset
;
552 anv_block_pool_alloc_new(struct anv_block_pool
*pool
,
553 struct anv_block_state
*pool_state
,
556 struct anv_block_state state
, old
, new;
559 state
.u64
= __sync_fetch_and_add(&pool_state
->u64
, block_size
);
560 if (state
.next
+ block_size
<= state
.end
) {
563 } else if (state
.next
<= state
.end
) {
564 /* We allocated the first block outside the pool so we have to grow
565 * the pool. pool_state->next acts a mutex: threads who try to
566 * allocate now will get block indexes above the current limit and
567 * hit futex_wait below.
569 new.next
= state
.next
+ block_size
;
571 new.end
= anv_block_pool_grow(pool
, pool_state
);
572 } while (new.end
< new.next
);
574 old
.u64
= __sync_lock_test_and_set(&pool_state
->u64
, new.u64
);
575 if (old
.next
!= state
.next
)
576 futex_wake(&pool_state
->end
, INT_MAX
);
579 futex_wait(&pool_state
->end
, state
.end
, NULL
);
586 anv_block_pool_alloc(struct anv_block_pool
*pool
,
589 return anv_block_pool_alloc_new(pool
, &pool
->state
, block_size
);
592 /* Allocates a block out of the back of the block pool.
594 * This will allocated a block earlier than the "start" of the block pool.
595 * The offsets returned from this function will be negative but will still
596 * be correct relative to the block pool's map pointer.
598 * If you ever use anv_block_pool_alloc_back, then you will have to do
599 * gymnastics with the block pool's BO when doing relocations.
602 anv_block_pool_alloc_back(struct anv_block_pool
*pool
,
605 int32_t offset
= anv_block_pool_alloc_new(pool
, &pool
->back_state
,
608 /* The offset we get out of anv_block_pool_alloc_new() is actually the
609 * number of bytes downwards from the middle to the end of the block.
610 * We need to turn it into a (negative) offset from the middle to the
611 * start of the block.
614 return -(offset
+ block_size
);
618 anv_state_pool_init(struct anv_state_pool
*pool
,
619 struct anv_device
*device
,
620 uint64_t start_address
,
624 VkResult result
= anv_block_pool_init(&pool
->block_pool
, device
,
628 if (result
!= VK_SUCCESS
)
631 assert(util_is_power_of_two_or_zero(block_size
));
632 pool
->block_size
= block_size
;
633 pool
->back_alloc_free_list
= ANV_FREE_LIST_EMPTY
;
634 for (unsigned i
= 0; i
< ANV_STATE_BUCKETS
; i
++) {
635 pool
->buckets
[i
].free_list
= ANV_FREE_LIST_EMPTY
;
636 pool
->buckets
[i
].block
.next
= 0;
637 pool
->buckets
[i
].block
.end
= 0;
639 VG(VALGRIND_CREATE_MEMPOOL(pool
, 0, false));
645 anv_state_pool_finish(struct anv_state_pool
*pool
)
647 VG(VALGRIND_DESTROY_MEMPOOL(pool
));
648 anv_block_pool_finish(&pool
->block_pool
);
652 anv_fixed_size_state_pool_alloc_new(struct anv_fixed_size_state_pool
*pool
,
653 struct anv_block_pool
*block_pool
,
657 struct anv_block_state block
, old
, new;
660 /* If our state is large, we don't need any sub-allocation from a block.
661 * Instead, we just grab whole (potentially large) blocks.
663 if (state_size
>= block_size
)
664 return anv_block_pool_alloc(block_pool
, state_size
);
667 block
.u64
= __sync_fetch_and_add(&pool
->block
.u64
, state_size
);
669 if (block
.next
< block
.end
) {
671 } else if (block
.next
== block
.end
) {
672 offset
= anv_block_pool_alloc(block_pool
, block_size
);
673 new.next
= offset
+ state_size
;
674 new.end
= offset
+ block_size
;
675 old
.u64
= __sync_lock_test_and_set(&pool
->block
.u64
, new.u64
);
676 if (old
.next
!= block
.next
)
677 futex_wake(&pool
->block
.end
, INT_MAX
);
680 futex_wait(&pool
->block
.end
, block
.end
, NULL
);
686 anv_state_pool_get_bucket(uint32_t size
)
688 unsigned size_log2
= ilog2_round_up(size
);
689 assert(size_log2
<= ANV_MAX_STATE_SIZE_LOG2
);
690 if (size_log2
< ANV_MIN_STATE_SIZE_LOG2
)
691 size_log2
= ANV_MIN_STATE_SIZE_LOG2
;
692 return size_log2
- ANV_MIN_STATE_SIZE_LOG2
;
696 anv_state_pool_get_bucket_size(uint32_t bucket
)
698 uint32_t size_log2
= bucket
+ ANV_MIN_STATE_SIZE_LOG2
;
699 return 1 << size_log2
;
702 static struct anv_state
703 anv_state_pool_alloc_no_vg(struct anv_state_pool
*pool
,
704 uint32_t size
, uint32_t align
)
706 uint32_t bucket
= anv_state_pool_get_bucket(MAX2(size
, align
));
708 struct anv_state state
;
709 state
.alloc_size
= anv_state_pool_get_bucket_size(bucket
);
711 /* Try free list first. */
712 if (anv_free_list_pop(&pool
->buckets
[bucket
].free_list
,
713 &pool
->block_pool
.map
, &state
.offset
)) {
714 assert(state
.offset
>= 0);
718 /* Try to grab a chunk from some larger bucket and split it up */
719 for (unsigned b
= bucket
+ 1; b
< ANV_STATE_BUCKETS
; b
++) {
720 int32_t chunk_offset
;
721 if (anv_free_list_pop(&pool
->buckets
[b
].free_list
,
722 &pool
->block_pool
.map
, &chunk_offset
)) {
723 unsigned chunk_size
= anv_state_pool_get_bucket_size(b
);
725 /* We've found a chunk that's larger than the requested state size.
726 * There are a couple of options as to what we do with it:
728 * 1) We could fully split the chunk into state.alloc_size sized
729 * pieces. However, this would mean that allocating a 16B
730 * state could potentially split a 2MB chunk into 512K smaller
731 * chunks. This would lead to unnecessary fragmentation.
733 * 2) The classic "buddy allocator" method would have us split the
734 * chunk in half and return one half. Then we would split the
735 * remaining half in half and return one half, and repeat as
736 * needed until we get down to the size we want. However, if
737 * you are allocating a bunch of the same size state (which is
738 * the common case), this means that every other allocation has
739 * to go up a level and every fourth goes up two levels, etc.
740 * This is not nearly as efficient as it could be if we did a
741 * little more work up-front.
743 * 3) Split the difference between (1) and (2) by doing a
744 * two-level split. If it's bigger than some fixed block_size,
745 * we split it into block_size sized chunks and return all but
746 * one of them. Then we split what remains into
747 * state.alloc_size sized chunks and return all but one.
749 * We choose option (3).
751 if (chunk_size
> pool
->block_size
&&
752 state
.alloc_size
< pool
->block_size
) {
753 assert(chunk_size
% pool
->block_size
== 0);
754 /* We don't want to split giant chunks into tiny chunks. Instead,
755 * break anything bigger than a block into block-sized chunks and
756 * then break it down into bucket-sized chunks from there. Return
757 * all but the first block of the chunk to the block bucket.
759 const uint32_t block_bucket
=
760 anv_state_pool_get_bucket(pool
->block_size
);
761 anv_free_list_push(&pool
->buckets
[block_bucket
].free_list
,
762 pool
->block_pool
.map
,
763 chunk_offset
+ pool
->block_size
,
765 (chunk_size
/ pool
->block_size
) - 1);
766 chunk_size
= pool
->block_size
;
769 assert(chunk_size
% state
.alloc_size
== 0);
770 anv_free_list_push(&pool
->buckets
[bucket
].free_list
,
771 pool
->block_pool
.map
,
772 chunk_offset
+ state
.alloc_size
,
774 (chunk_size
/ state
.alloc_size
) - 1);
776 state
.offset
= chunk_offset
;
781 state
.offset
= anv_fixed_size_state_pool_alloc_new(&pool
->buckets
[bucket
],
787 state
.map
= pool
->block_pool
.map
+ state
.offset
;
792 anv_state_pool_alloc(struct anv_state_pool
*pool
, uint32_t size
, uint32_t align
)
795 return ANV_STATE_NULL
;
797 struct anv_state state
= anv_state_pool_alloc_no_vg(pool
, size
, align
);
798 VG(VALGRIND_MEMPOOL_ALLOC(pool
, state
.map
, size
));
803 anv_state_pool_alloc_back(struct anv_state_pool
*pool
)
805 struct anv_state state
;
806 state
.alloc_size
= pool
->block_size
;
808 if (anv_free_list_pop(&pool
->back_alloc_free_list
,
809 &pool
->block_pool
.map
, &state
.offset
)) {
810 assert(state
.offset
< 0);
814 state
.offset
= anv_block_pool_alloc_back(&pool
->block_pool
,
818 state
.map
= pool
->block_pool
.map
+ state
.offset
;
819 VG(VALGRIND_MEMPOOL_ALLOC(pool
, state
.map
, state
.alloc_size
));
824 anv_state_pool_free_no_vg(struct anv_state_pool
*pool
, struct anv_state state
)
826 assert(util_is_power_of_two_or_zero(state
.alloc_size
));
827 unsigned bucket
= anv_state_pool_get_bucket(state
.alloc_size
);
829 if (state
.offset
< 0) {
830 assert(state
.alloc_size
== pool
->block_size
);
831 anv_free_list_push(&pool
->back_alloc_free_list
,
832 pool
->block_pool
.map
, state
.offset
,
833 state
.alloc_size
, 1);
835 anv_free_list_push(&pool
->buckets
[bucket
].free_list
,
836 pool
->block_pool
.map
, state
.offset
,
837 state
.alloc_size
, 1);
842 anv_state_pool_free(struct anv_state_pool
*pool
, struct anv_state state
)
844 if (state
.alloc_size
== 0)
847 VG(VALGRIND_MEMPOOL_FREE(pool
, state
.map
));
848 anv_state_pool_free_no_vg(pool
, state
);
851 struct anv_state_stream_block
{
852 struct anv_state block
;
855 struct anv_state_stream_block
*next
;
858 /* A pointer to the first user-allocated thing in this block. This is
859 * what valgrind sees as the start of the block.
865 /* The state stream allocator is a one-shot, single threaded allocator for
866 * variable sized blocks. We use it for allocating dynamic state.
869 anv_state_stream_init(struct anv_state_stream
*stream
,
870 struct anv_state_pool
*state_pool
,
873 stream
->state_pool
= state_pool
;
874 stream
->block_size
= block_size
;
876 stream
->block
= ANV_STATE_NULL
;
878 stream
->block_list
= NULL
;
880 /* Ensure that next + whatever > block_size. This way the first call to
881 * state_stream_alloc fetches a new block.
883 stream
->next
= block_size
;
885 VG(VALGRIND_CREATE_MEMPOOL(stream
, 0, false));
889 anv_state_stream_finish(struct anv_state_stream
*stream
)
891 struct anv_state_stream_block
*next
= stream
->block_list
;
892 while (next
!= NULL
) {
893 struct anv_state_stream_block sb
= VG_NOACCESS_READ(next
);
894 VG(VALGRIND_MEMPOOL_FREE(stream
, sb
._vg_ptr
));
895 VG(VALGRIND_MAKE_MEM_UNDEFINED(next
, stream
->block_size
));
896 anv_state_pool_free_no_vg(stream
->state_pool
, sb
.block
);
900 VG(VALGRIND_DESTROY_MEMPOOL(stream
));
904 anv_state_stream_alloc(struct anv_state_stream
*stream
,
905 uint32_t size
, uint32_t alignment
)
908 return ANV_STATE_NULL
;
910 assert(alignment
<= PAGE_SIZE
);
912 uint32_t offset
= align_u32(stream
->next
, alignment
);
913 if (offset
+ size
> stream
->block
.alloc_size
) {
914 uint32_t block_size
= stream
->block_size
;
915 if (block_size
< size
)
916 block_size
= round_to_power_of_two(size
);
918 stream
->block
= anv_state_pool_alloc_no_vg(stream
->state_pool
,
919 block_size
, PAGE_SIZE
);
921 struct anv_state_stream_block
*sb
= stream
->block
.map
;
922 VG_NOACCESS_WRITE(&sb
->block
, stream
->block
);
923 VG_NOACCESS_WRITE(&sb
->next
, stream
->block_list
);
924 stream
->block_list
= sb
;
925 VG(VG_NOACCESS_WRITE(&sb
->_vg_ptr
, NULL
));
927 VG(VALGRIND_MAKE_MEM_NOACCESS(stream
->block
.map
, stream
->block_size
));
929 /* Reset back to the start plus space for the header */
930 stream
->next
= sizeof(*sb
);
932 offset
= align_u32(stream
->next
, alignment
);
933 assert(offset
+ size
<= stream
->block
.alloc_size
);
936 struct anv_state state
= stream
->block
;
937 state
.offset
+= offset
;
938 state
.alloc_size
= size
;
941 stream
->next
= offset
+ size
;
944 struct anv_state_stream_block
*sb
= stream
->block_list
;
945 void *vg_ptr
= VG_NOACCESS_READ(&sb
->_vg_ptr
);
946 if (vg_ptr
== NULL
) {
948 VG_NOACCESS_WRITE(&sb
->_vg_ptr
, vg_ptr
);
949 VALGRIND_MEMPOOL_ALLOC(stream
, vg_ptr
, size
);
951 void *state_end
= state
.map
+ state
.alloc_size
;
952 /* This only updates the mempool. The newly allocated chunk is still
953 * marked as NOACCESS. */
954 VALGRIND_MEMPOOL_CHANGE(stream
, vg_ptr
, vg_ptr
, state_end
- vg_ptr
);
955 /* Mark the newly allocated chunk as undefined */
956 VALGRIND_MAKE_MEM_UNDEFINED(state
.map
, state
.alloc_size
);
963 struct bo_pool_bo_link
{
964 struct bo_pool_bo_link
*next
;
969 anv_bo_pool_init(struct anv_bo_pool
*pool
, struct anv_device
*device
,
972 pool
->device
= device
;
973 pool
->bo_flags
= bo_flags
;
974 memset(pool
->free_list
, 0, sizeof(pool
->free_list
));
976 VG(VALGRIND_CREATE_MEMPOOL(pool
, 0, false));
980 anv_bo_pool_finish(struct anv_bo_pool
*pool
)
982 for (unsigned i
= 0; i
< ARRAY_SIZE(pool
->free_list
); i
++) {
983 struct bo_pool_bo_link
*link
= PFL_PTR(pool
->free_list
[i
]);
984 while (link
!= NULL
) {
985 struct bo_pool_bo_link link_copy
= VG_NOACCESS_READ(link
);
987 anv_gem_munmap(link_copy
.bo
.map
, link_copy
.bo
.size
);
988 anv_vma_free(pool
->device
, &link_copy
.bo
);
989 anv_gem_close(pool
->device
, link_copy
.bo
.gem_handle
);
990 link
= link_copy
.next
;
994 VG(VALGRIND_DESTROY_MEMPOOL(pool
));
998 anv_bo_pool_alloc(struct anv_bo_pool
*pool
, struct anv_bo
*bo
, uint32_t size
)
1002 const unsigned size_log2
= size
< 4096 ? 12 : ilog2_round_up(size
);
1003 const unsigned pow2_size
= 1 << size_log2
;
1004 const unsigned bucket
= size_log2
- 12;
1005 assert(bucket
< ARRAY_SIZE(pool
->free_list
));
1007 void *next_free_void
;
1008 if (anv_ptr_free_list_pop(&pool
->free_list
[bucket
], &next_free_void
)) {
1009 struct bo_pool_bo_link
*next_free
= next_free_void
;
1010 *bo
= VG_NOACCESS_READ(&next_free
->bo
);
1011 assert(bo
->gem_handle
);
1012 assert(bo
->map
== next_free
);
1013 assert(size
<= bo
->size
);
1015 VG(VALGRIND_MEMPOOL_ALLOC(pool
, bo
->map
, size
));
1020 struct anv_bo new_bo
;
1022 result
= anv_bo_init_new(&new_bo
, pool
->device
, pow2_size
);
1023 if (result
!= VK_SUCCESS
)
1026 new_bo
.flags
= pool
->bo_flags
;
1028 if (!anv_vma_alloc(pool
->device
, &new_bo
))
1029 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY
);
1031 assert(new_bo
.size
== pow2_size
);
1033 new_bo
.map
= anv_gem_mmap(pool
->device
, new_bo
.gem_handle
, 0, pow2_size
, 0);
1034 if (new_bo
.map
== MAP_FAILED
) {
1035 anv_gem_close(pool
->device
, new_bo
.gem_handle
);
1036 anv_vma_free(pool
->device
, &new_bo
);
1037 return vk_error(VK_ERROR_MEMORY_MAP_FAILED
);
1042 VG(VALGRIND_MEMPOOL_ALLOC(pool
, bo
->map
, size
));
1048 anv_bo_pool_free(struct anv_bo_pool
*pool
, const struct anv_bo
*bo_in
)
1050 /* Make a copy in case the anv_bo happens to be storred in the BO */
1051 struct anv_bo bo
= *bo_in
;
1053 VG(VALGRIND_MEMPOOL_FREE(pool
, bo
.map
));
1055 struct bo_pool_bo_link
*link
= bo
.map
;
1056 VG_NOACCESS_WRITE(&link
->bo
, bo
);
1058 assert(util_is_power_of_two_or_zero(bo
.size
));
1059 const unsigned size_log2
= ilog2_round_up(bo
.size
);
1060 const unsigned bucket
= size_log2
- 12;
1061 assert(bucket
< ARRAY_SIZE(pool
->free_list
));
1063 anv_ptr_free_list_push(&pool
->free_list
[bucket
], link
);
1069 anv_scratch_pool_init(struct anv_device
*device
, struct anv_scratch_pool
*pool
)
1071 memset(pool
, 0, sizeof(*pool
));
1075 anv_scratch_pool_finish(struct anv_device
*device
, struct anv_scratch_pool
*pool
)
1077 for (unsigned s
= 0; s
< MESA_SHADER_STAGES
; s
++) {
1078 for (unsigned i
= 0; i
< 16; i
++) {
1079 struct anv_scratch_bo
*bo
= &pool
->bos
[i
][s
];
1080 if (bo
->exists
> 0) {
1081 anv_vma_free(device
, &bo
->bo
);
1082 anv_gem_close(device
, bo
->bo
.gem_handle
);
1089 anv_scratch_pool_alloc(struct anv_device
*device
, struct anv_scratch_pool
*pool
,
1090 gl_shader_stage stage
, unsigned per_thread_scratch
)
1092 if (per_thread_scratch
== 0)
1095 unsigned scratch_size_log2
= ffs(per_thread_scratch
/ 2048);
1096 assert(scratch_size_log2
< 16);
1098 struct anv_scratch_bo
*bo
= &pool
->bos
[scratch_size_log2
][stage
];
1100 /* We can use "exists" to shortcut and ignore the critical section */
1104 pthread_mutex_lock(&device
->mutex
);
1106 __sync_synchronize();
1108 pthread_mutex_unlock(&device
->mutex
);
1112 const struct anv_physical_device
*physical_device
=
1113 &device
->instance
->physicalDevice
;
1114 const struct gen_device_info
*devinfo
= &physical_device
->info
;
1116 const unsigned subslices
= MAX2(physical_device
->subslice_total
, 1);
1118 unsigned scratch_ids_per_subslice
;
1119 if (devinfo
->is_haswell
) {
1120 /* WaCSScratchSize:hsw
1122 * Haswell's scratch space address calculation appears to be sparse
1123 * rather than tightly packed. The Thread ID has bits indicating
1124 * which subslice, EU within a subslice, and thread within an EU it
1125 * is. There's a maximum of two slices and two subslices, so these
1126 * can be stored with a single bit. Even though there are only 10 EUs
1127 * per subslice, this is stored in 4 bits, so there's an effective
1128 * maximum value of 16 EUs. Similarly, although there are only 7
1129 * threads per EU, this is stored in a 3 bit number, giving an
1130 * effective maximum value of 8 threads per EU.
1132 * This means that we need to use 16 * 8 instead of 10 * 7 for the
1133 * number of threads per subslice.
1135 scratch_ids_per_subslice
= 16 * 8;
1136 } else if (devinfo
->is_cherryview
) {
1137 /* Cherryview devices have either 6 or 8 EUs per subslice, and each EU
1138 * has 7 threads. The 6 EU devices appear to calculate thread IDs as if
1141 scratch_ids_per_subslice
= 8 * 7;
1143 scratch_ids_per_subslice
= devinfo
->max_cs_threads
;
1146 uint32_t max_threads
[] = {
1147 [MESA_SHADER_VERTEX
] = devinfo
->max_vs_threads
,
1148 [MESA_SHADER_TESS_CTRL
] = devinfo
->max_tcs_threads
,
1149 [MESA_SHADER_TESS_EVAL
] = devinfo
->max_tes_threads
,
1150 [MESA_SHADER_GEOMETRY
] = devinfo
->max_gs_threads
,
1151 [MESA_SHADER_FRAGMENT
] = devinfo
->max_wm_threads
,
1152 [MESA_SHADER_COMPUTE
] = scratch_ids_per_subslice
* subslices
,
1155 uint32_t size
= per_thread_scratch
* max_threads
[stage
];
1157 anv_bo_init_new(&bo
->bo
, device
, size
);
1159 /* Even though the Scratch base pointers in 3DSTATE_*S are 64 bits, they
1160 * are still relative to the general state base address. When we emit
1161 * STATE_BASE_ADDRESS, we set general state base address to 0 and the size
1162 * to the maximum (1 page under 4GB). This allows us to just place the
1163 * scratch buffers anywhere we wish in the bottom 32 bits of address space
1164 * and just set the scratch base pointer in 3DSTATE_*S using a relocation.
1165 * However, in order to do so, we need to ensure that the kernel does not
1166 * place the scratch BO above the 32-bit boundary.
1168 * NOTE: Technically, it can't go "anywhere" because the top page is off
1169 * limits. However, when EXEC_OBJECT_SUPPORTS_48B_ADDRESS is set, the
1170 * kernel allocates space using
1172 * end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
1174 * so nothing will ever touch the top page.
1176 assert(!(bo
->bo
.flags
& EXEC_OBJECT_SUPPORTS_48B_ADDRESS
));
1178 if (device
->instance
->physicalDevice
.has_exec_async
)
1179 bo
->bo
.flags
|= EXEC_OBJECT_ASYNC
;
1181 if (device
->instance
->physicalDevice
.use_softpin
)
1182 bo
->bo
.flags
|= EXEC_OBJECT_PINNED
;
1184 anv_vma_alloc(device
, &bo
->bo
);
1186 /* Set the exists last because it may be read by other threads */
1187 __sync_synchronize();
1190 pthread_mutex_unlock(&device
->mutex
);
1195 struct anv_cached_bo
{
1202 anv_bo_cache_init(struct anv_bo_cache
*cache
)
1204 cache
->bo_map
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
1205 _mesa_key_pointer_equal
);
1207 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1209 if (pthread_mutex_init(&cache
->mutex
, NULL
)) {
1210 _mesa_hash_table_destroy(cache
->bo_map
, NULL
);
1211 return vk_errorf(NULL
, NULL
, VK_ERROR_OUT_OF_HOST_MEMORY
,
1212 "pthread_mutex_init failed: %m");
1219 anv_bo_cache_finish(struct anv_bo_cache
*cache
)
1221 _mesa_hash_table_destroy(cache
->bo_map
, NULL
);
1222 pthread_mutex_destroy(&cache
->mutex
);
1225 static struct anv_cached_bo
*
1226 anv_bo_cache_lookup_locked(struct anv_bo_cache
*cache
, uint32_t gem_handle
)
1228 struct hash_entry
*entry
=
1229 _mesa_hash_table_search(cache
->bo_map
,
1230 (const void *)(uintptr_t)gem_handle
);
1234 struct anv_cached_bo
*bo
= (struct anv_cached_bo
*)entry
->data
;
1235 assert(bo
->bo
.gem_handle
== gem_handle
);
1240 UNUSED
static struct anv_bo
*
1241 anv_bo_cache_lookup(struct anv_bo_cache
*cache
, uint32_t gem_handle
)
1243 pthread_mutex_lock(&cache
->mutex
);
1245 struct anv_cached_bo
*bo
= anv_bo_cache_lookup_locked(cache
, gem_handle
);
1247 pthread_mutex_unlock(&cache
->mutex
);
1249 return bo
? &bo
->bo
: NULL
;
1252 #define ANV_BO_CACHE_SUPPORTED_FLAGS \
1253 (EXEC_OBJECT_WRITE | \
1254 EXEC_OBJECT_ASYNC | \
1255 EXEC_OBJECT_SUPPORTS_48B_ADDRESS | \
1256 EXEC_OBJECT_PINNED | \
1260 anv_bo_cache_alloc(struct anv_device
*device
,
1261 struct anv_bo_cache
*cache
,
1262 uint64_t size
, uint64_t bo_flags
,
1263 struct anv_bo
**bo_out
)
1265 assert(bo_flags
== (bo_flags
& ANV_BO_CACHE_SUPPORTED_FLAGS
));
1267 struct anv_cached_bo
*bo
=
1268 vk_alloc(&device
->alloc
, sizeof(struct anv_cached_bo
), 8,
1269 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1271 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1275 /* The kernel is going to give us whole pages anyway */
1276 size
= align_u64(size
, 4096);
1278 VkResult result
= anv_bo_init_new(&bo
->bo
, device
, size
);
1279 if (result
!= VK_SUCCESS
) {
1280 vk_free(&device
->alloc
, bo
);
1284 bo
->bo
.flags
= bo_flags
;
1286 if (!anv_vma_alloc(device
, &bo
->bo
)) {
1287 anv_gem_close(device
, bo
->bo
.gem_handle
);
1288 vk_free(&device
->alloc
, bo
);
1289 return vk_errorf(device
->instance
, NULL
,
1290 VK_ERROR_OUT_OF_DEVICE_MEMORY
,
1291 "failed to allocate virtual address for BO");
1294 assert(bo
->bo
.gem_handle
);
1296 pthread_mutex_lock(&cache
->mutex
);
1298 _mesa_hash_table_insert(cache
->bo_map
,
1299 (void *)(uintptr_t)bo
->bo
.gem_handle
, bo
);
1301 pthread_mutex_unlock(&cache
->mutex
);
1309 anv_bo_cache_import(struct anv_device
*device
,
1310 struct anv_bo_cache
*cache
,
1311 int fd
, uint64_t bo_flags
,
1312 struct anv_bo
**bo_out
)
1314 assert(bo_flags
== (bo_flags
& ANV_BO_CACHE_SUPPORTED_FLAGS
));
1315 assert(bo_flags
& ANV_BO_EXTERNAL
);
1317 pthread_mutex_lock(&cache
->mutex
);
1319 uint32_t gem_handle
= anv_gem_fd_to_handle(device
, fd
);
1321 pthread_mutex_unlock(&cache
->mutex
);
1322 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR
);
1325 struct anv_cached_bo
*bo
= anv_bo_cache_lookup_locked(cache
, gem_handle
);
1327 /* We have to be careful how we combine flags so that it makes sense.
1328 * Really, though, if we get to this case and it actually matters, the
1329 * client has imported a BO twice in different ways and they get what
1332 uint64_t new_flags
= ANV_BO_EXTERNAL
;
1333 new_flags
|= (bo
->bo
.flags
| bo_flags
) & EXEC_OBJECT_WRITE
;
1334 new_flags
|= (bo
->bo
.flags
& bo_flags
) & EXEC_OBJECT_ASYNC
;
1335 new_flags
|= (bo
->bo
.flags
& bo_flags
) & EXEC_OBJECT_SUPPORTS_48B_ADDRESS
;
1336 new_flags
|= (bo
->bo
.flags
| bo_flags
) & EXEC_OBJECT_PINNED
;
1338 /* It's theoretically possible for a BO to get imported such that it's
1339 * both pinned and not pinned. The only way this can happen is if it
1340 * gets imported as both a semaphore and a memory object and that would
1341 * be an application error. Just fail out in that case.
1343 if ((bo
->bo
.flags
& EXEC_OBJECT_PINNED
) !=
1344 (bo_flags
& EXEC_OBJECT_PINNED
)) {
1345 pthread_mutex_unlock(&cache
->mutex
);
1346 return vk_errorf(device
->instance
, NULL
,
1347 VK_ERROR_INVALID_EXTERNAL_HANDLE
,
1348 "The same BO was imported two different ways");
1351 /* It's also theoretically possible that someone could export a BO from
1352 * one heap and import it into another or to import the same BO into two
1353 * different heaps. If this happens, we could potentially end up both
1354 * allowing and disallowing 48-bit addresses. There's not much we can
1355 * do about it if we're pinning so we just throw an error and hope no
1356 * app is actually that stupid.
1358 if ((new_flags
& EXEC_OBJECT_PINNED
) &&
1359 (bo
->bo
.flags
& EXEC_OBJECT_SUPPORTS_48B_ADDRESS
) !=
1360 (bo_flags
& EXEC_OBJECT_SUPPORTS_48B_ADDRESS
)) {
1361 return vk_errorf(device
->instance
, NULL
,
1362 VK_ERROR_INVALID_EXTERNAL_HANDLE
,
1363 "The same BO was imported on two different heaps");
1366 bo
->bo
.flags
= new_flags
;
1368 __sync_fetch_and_add(&bo
->refcount
, 1);
1370 off_t size
= lseek(fd
, 0, SEEK_END
);
1371 if (size
== (off_t
)-1) {
1372 anv_gem_close(device
, gem_handle
);
1373 pthread_mutex_unlock(&cache
->mutex
);
1374 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR
);
1377 bo
= vk_alloc(&device
->alloc
, sizeof(struct anv_cached_bo
), 8,
1378 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1380 anv_gem_close(device
, gem_handle
);
1381 pthread_mutex_unlock(&cache
->mutex
);
1382 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1387 anv_bo_init(&bo
->bo
, gem_handle
, size
);
1388 bo
->bo
.flags
= bo_flags
;
1390 if (!anv_vma_alloc(device
, &bo
->bo
)) {
1391 anv_gem_close(device
, bo
->bo
.gem_handle
);
1392 pthread_mutex_unlock(&cache
->mutex
);
1393 vk_free(&device
->alloc
, bo
);
1394 return vk_errorf(device
->instance
, NULL
,
1395 VK_ERROR_OUT_OF_DEVICE_MEMORY
,
1396 "failed to allocate virtual address for BO");
1399 _mesa_hash_table_insert(cache
->bo_map
, (void *)(uintptr_t)gem_handle
, bo
);
1402 pthread_mutex_unlock(&cache
->mutex
);
1409 anv_bo_cache_export(struct anv_device
*device
,
1410 struct anv_bo_cache
*cache
,
1411 struct anv_bo
*bo_in
, int *fd_out
)
1413 assert(anv_bo_cache_lookup(cache
, bo_in
->gem_handle
) == bo_in
);
1414 struct anv_cached_bo
*bo
= (struct anv_cached_bo
*)bo_in
;
1416 /* This BO must have been flagged external in order for us to be able
1417 * to export it. This is done based on external options passed into
1418 * anv_AllocateMemory.
1420 assert(bo
->bo
.flags
& ANV_BO_EXTERNAL
);
1422 int fd
= anv_gem_handle_to_fd(device
, bo
->bo
.gem_handle
);
1424 return vk_error(VK_ERROR_TOO_MANY_OBJECTS
);
1432 atomic_dec_not_one(uint32_t *counter
)
1441 old
= __sync_val_compare_and_swap(counter
, val
, val
- 1);
1450 anv_bo_cache_release(struct anv_device
*device
,
1451 struct anv_bo_cache
*cache
,
1452 struct anv_bo
*bo_in
)
1454 assert(anv_bo_cache_lookup(cache
, bo_in
->gem_handle
) == bo_in
);
1455 struct anv_cached_bo
*bo
= (struct anv_cached_bo
*)bo_in
;
1457 /* Try to decrement the counter but don't go below one. If this succeeds
1458 * then the refcount has been decremented and we are not the last
1461 if (atomic_dec_not_one(&bo
->refcount
))
1464 pthread_mutex_lock(&cache
->mutex
);
1466 /* We are probably the last reference since our attempt to decrement above
1467 * failed. However, we can't actually know until we are inside the mutex.
1468 * Otherwise, someone could import the BO between the decrement and our
1471 if (unlikely(__sync_sub_and_fetch(&bo
->refcount
, 1) > 0)) {
1472 /* Turns out we're not the last reference. Unlock and bail. */
1473 pthread_mutex_unlock(&cache
->mutex
);
1477 struct hash_entry
*entry
=
1478 _mesa_hash_table_search(cache
->bo_map
,
1479 (const void *)(uintptr_t)bo
->bo
.gem_handle
);
1481 _mesa_hash_table_remove(cache
->bo_map
, entry
);
1484 anv_gem_munmap(bo
->bo
.map
, bo
->bo
.size
);
1486 anv_vma_free(device
, &bo
->bo
);
1488 anv_gem_close(device
, bo
->bo
.gem_handle
);
1490 /* Don't unlock until we've actually closed the BO. The whole point of
1491 * the BO cache is to ensure that we correctly handle races with creating
1492 * and releasing GEM handles and we don't want to let someone import the BO
1493 * again between mutex unlock and closing the GEM handle.
1495 pthread_mutex_unlock(&cache
->mutex
);
1497 vk_free(&device
->alloc
, bo
);