2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 #include "util/anon_file.h"
35 #define VG_NOACCESS_READ(__ptr) ({ \
36 VALGRIND_MAKE_MEM_DEFINED((__ptr), sizeof(*(__ptr))); \
37 __typeof(*(__ptr)) __val = *(__ptr); \
38 VALGRIND_MAKE_MEM_NOACCESS((__ptr), sizeof(*(__ptr)));\
41 #define VG_NOACCESS_WRITE(__ptr, __val) ({ \
42 VALGRIND_MAKE_MEM_UNDEFINED((__ptr), sizeof(*(__ptr))); \
44 VALGRIND_MAKE_MEM_NOACCESS((__ptr), sizeof(*(__ptr))); \
47 #define VG_NOACCESS_READ(__ptr) (*(__ptr))
48 #define VG_NOACCESS_WRITE(__ptr, __val) (*(__ptr) = (__val))
52 #define MAP_POPULATE 0
57 * - Lock free (except when resizing underlying bos)
59 * - Constant time allocation with typically only one atomic
61 * - Multiple allocation sizes without fragmentation
63 * - Can grow while keeping addresses and offset of contents stable
65 * - All allocations within one bo so we can point one of the
66 * STATE_BASE_ADDRESS pointers at it.
68 * The overall design is a two-level allocator: top level is a fixed size, big
69 * block (8k) allocator, which operates out of a bo. Allocation is done by
70 * either pulling a block from the free list or growing the used range of the
71 * bo. Growing the range may run out of space in the bo which we then need to
72 * grow. Growing the bo is tricky in a multi-threaded, lockless environment:
73 * we need to keep all pointers and contents in the old map valid. GEM bos in
74 * general can't grow, but we use a trick: we create a memfd and use ftruncate
75 * to grow it as necessary. We mmap the new size and then create a gem bo for
76 * it using the new gem userptr ioctl. Without heavy-handed locking around
77 * our allocation fast-path, there isn't really a way to munmap the old mmap,
78 * so we just keep it around until garbage collection time. While the block
79 * allocator is lockless for normal operations, we block other threads trying
80 * to allocate while we're growing the map. It sholdn't happen often, and
81 * growing is fast anyway.
83 * At the next level we can use various sub-allocators. The state pool is a
84 * pool of smaller, fixed size objects, which operates much like the block
85 * pool. It uses a free list for freeing objects, but when it runs out of
86 * space it just allocates a new block from the block pool. This allocator is
87 * intended for longer lived state objects such as SURFACE_STATE and most
88 * other persistent state objects in the API. We may need to track more info
89 * with these object and a pointer back to the CPU object (eg VkImage). In
90 * those cases we just allocate a slightly bigger object and put the extra
91 * state after the GPU state object.
93 * The state stream allocator works similar to how the i965 DRI driver streams
94 * all its state. Even with Vulkan, we need to emit transient state (whether
95 * surface state base or dynamic state base), and for that we can just get a
96 * block and fill it up. These cases are local to a command buffer and the
97 * sub-allocator need not be thread safe. The streaming allocator gets a new
98 * block when it runs out of space and chains them together so they can be
102 /* Allocations are always at least 64 byte aligned, so 1 is an invalid value.
103 * We use it to indicate the free list is empty. */
104 #define EMPTY UINT32_MAX
106 #define PAGE_SIZE 4096
108 struct anv_mmap_cleanup
{
113 static inline uint32_t
114 ilog2_round_up(uint32_t value
)
117 return 32 - __builtin_clz(value
- 1);
120 static inline uint32_t
121 round_to_power_of_two(uint32_t value
)
123 return 1 << ilog2_round_up(value
);
126 struct anv_state_table_cleanup
{
131 #define ANV_STATE_TABLE_CLEANUP_INIT ((struct anv_state_table_cleanup){0})
132 #define ANV_STATE_ENTRY_SIZE (sizeof(struct anv_free_entry))
135 anv_state_table_expand_range(struct anv_state_table
*table
, uint32_t size
);
138 anv_state_table_init(struct anv_state_table
*table
,
139 struct anv_device
*device
,
140 uint32_t initial_entries
)
144 table
->device
= device
;
146 /* Just make it 2GB up-front. The Linux kernel won't actually back it
147 * with pages until we either map and fault on one of them or we use
148 * userptr and send a chunk of it off to the GPU.
150 table
->fd
= os_create_anonymous_file(BLOCK_POOL_MEMFD_SIZE
, "state table");
151 if (table
->fd
== -1) {
152 result
= vk_error(VK_ERROR_INITIALIZATION_FAILED
);
156 if (!u_vector_init(&table
->cleanups
,
157 round_to_power_of_two(sizeof(struct anv_state_table_cleanup
)),
159 result
= vk_error(VK_ERROR_INITIALIZATION_FAILED
);
163 table
->state
.next
= 0;
164 table
->state
.end
= 0;
167 uint32_t initial_size
= initial_entries
* ANV_STATE_ENTRY_SIZE
;
168 result
= anv_state_table_expand_range(table
, initial_size
);
169 if (result
!= VK_SUCCESS
)
175 u_vector_finish(&table
->cleanups
);
183 anv_state_table_expand_range(struct anv_state_table
*table
, uint32_t size
)
186 struct anv_state_table_cleanup
*cleanup
;
188 /* Assert that we only ever grow the pool */
189 assert(size
>= table
->state
.end
);
191 /* Make sure that we don't go outside the bounds of the memfd */
192 if (size
> BLOCK_POOL_MEMFD_SIZE
)
193 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
195 cleanup
= u_vector_add(&table
->cleanups
);
197 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
199 *cleanup
= ANV_STATE_TABLE_CLEANUP_INIT
;
201 /* Just leak the old map until we destroy the pool. We can't munmap it
202 * without races or imposing locking on the block allocate fast path. On
203 * the whole the leaked maps adds up to less than the size of the
204 * current map. MAP_POPULATE seems like the right thing to do, but we
205 * should try to get some numbers.
207 map
= mmap(NULL
, size
, PROT_READ
| PROT_WRITE
,
208 MAP_SHARED
| MAP_POPULATE
, table
->fd
, 0);
209 if (map
== MAP_FAILED
) {
210 return vk_errorf(table
->device
, table
->device
,
211 VK_ERROR_OUT_OF_HOST_MEMORY
, "mmap failed: %m");
215 cleanup
->size
= size
;
224 anv_state_table_grow(struct anv_state_table
*table
)
226 VkResult result
= VK_SUCCESS
;
228 uint32_t used
= align_u32(table
->state
.next
* ANV_STATE_ENTRY_SIZE
,
230 uint32_t old_size
= table
->size
;
232 /* The block pool is always initialized to a nonzero size and this function
233 * is always called after initialization.
235 assert(old_size
> 0);
237 uint32_t required
= MAX2(used
, old_size
);
238 if (used
* 2 <= required
) {
239 /* If we're in this case then this isn't the firsta allocation and we
240 * already have enough space on both sides to hold double what we
241 * have allocated. There's nothing for us to do.
246 uint32_t size
= old_size
* 2;
247 while (size
< required
)
250 assert(size
> table
->size
);
252 result
= anv_state_table_expand_range(table
, size
);
259 anv_state_table_finish(struct anv_state_table
*table
)
261 struct anv_state_table_cleanup
*cleanup
;
263 u_vector_foreach(cleanup
, &table
->cleanups
) {
265 munmap(cleanup
->map
, cleanup
->size
);
268 u_vector_finish(&table
->cleanups
);
274 anv_state_table_add(struct anv_state_table
*table
, uint32_t *idx
,
277 struct anv_block_state state
, old
, new;
283 state
.u64
= __sync_fetch_and_add(&table
->state
.u64
, count
);
284 if (state
.next
+ count
<= state
.end
) {
286 struct anv_free_entry
*entry
= &table
->map
[state
.next
];
287 for (int i
= 0; i
< count
; i
++) {
288 entry
[i
].state
.idx
= state
.next
+ i
;
292 } else if (state
.next
<= state
.end
) {
293 /* We allocated the first block outside the pool so we have to grow
294 * the pool. pool_state->next acts a mutex: threads who try to
295 * allocate now will get block indexes above the current limit and
296 * hit futex_wait below.
298 new.next
= state
.next
+ count
;
300 result
= anv_state_table_grow(table
);
301 if (result
!= VK_SUCCESS
)
303 new.end
= table
->size
/ ANV_STATE_ENTRY_SIZE
;
304 } while (new.end
< new.next
);
306 old
.u64
= __sync_lock_test_and_set(&table
->state
.u64
, new.u64
);
307 if (old
.next
!= state
.next
)
308 futex_wake(&table
->state
.end
, INT_MAX
);
310 futex_wait(&table
->state
.end
, state
.end
, NULL
);
317 anv_free_list_push(union anv_free_list
*list
,
318 struct anv_state_table
*table
,
319 uint32_t first
, uint32_t count
)
321 union anv_free_list current
, old
, new;
322 uint32_t last
= first
;
324 for (uint32_t i
= 1; i
< count
; i
++, last
++)
325 table
->map
[last
].next
= last
+ 1;
330 table
->map
[last
].next
= current
.offset
;
332 new.count
= current
.count
+ 1;
333 old
.u64
= __sync_val_compare_and_swap(&list
->u64
, current
.u64
, new.u64
);
334 } while (old
.u64
!= current
.u64
);
338 anv_free_list_pop(union anv_free_list
*list
,
339 struct anv_state_table
*table
)
341 union anv_free_list current
, new, old
;
343 current
.u64
= list
->u64
;
344 while (current
.offset
!= EMPTY
) {
345 __sync_synchronize();
346 new.offset
= table
->map
[current
.offset
].next
;
347 new.count
= current
.count
+ 1;
348 old
.u64
= __sync_val_compare_and_swap(&list
->u64
, current
.u64
, new.u64
);
349 if (old
.u64
== current
.u64
) {
350 struct anv_free_entry
*entry
= &table
->map
[current
.offset
];
351 return &entry
->state
;
360 anv_block_pool_expand_range(struct anv_block_pool
*pool
,
361 uint32_t center_bo_offset
, uint32_t size
);
364 anv_block_pool_init(struct anv_block_pool
*pool
,
365 struct anv_device
*device
,
366 uint64_t start_address
,
367 uint32_t initial_size
)
371 pool
->device
= device
;
372 pool
->use_softpin
= device
->physical
->use_softpin
;
375 pool
->center_bo_offset
= 0;
376 pool
->start_address
= gen_canonical_address(start_address
);
379 if (pool
->use_softpin
) {
383 /* Just make it 2GB up-front. The Linux kernel won't actually back it
384 * with pages until we either map and fault on one of them or we use
385 * userptr and send a chunk of it off to the GPU.
387 pool
->fd
= os_create_anonymous_file(BLOCK_POOL_MEMFD_SIZE
, "block pool");
389 return vk_error(VK_ERROR_INITIALIZATION_FAILED
);
391 pool
->wrapper_bo
= (struct anv_bo
) {
396 pool
->bo
= &pool
->wrapper_bo
;
399 if (!u_vector_init(&pool
->mmap_cleanups
,
400 round_to_power_of_two(sizeof(struct anv_mmap_cleanup
)),
402 result
= vk_error(VK_ERROR_INITIALIZATION_FAILED
);
406 pool
->state
.next
= 0;
408 pool
->back_state
.next
= 0;
409 pool
->back_state
.end
= 0;
411 result
= anv_block_pool_expand_range(pool
, 0, initial_size
);
412 if (result
!= VK_SUCCESS
)
413 goto fail_mmap_cleanups
;
415 /* Make the entire pool available in the front of the pool. If back
416 * allocation needs to use this space, the "ends" will be re-arranged.
418 pool
->state
.end
= pool
->size
;
423 u_vector_finish(&pool
->mmap_cleanups
);
432 anv_block_pool_finish(struct anv_block_pool
*pool
)
434 anv_block_pool_foreach_bo(bo
, pool
) {
436 anv_gem_munmap(bo
->map
, bo
->size
);
437 anv_gem_close(pool
->device
, bo
->gem_handle
);
440 struct anv_mmap_cleanup
*cleanup
;
441 u_vector_foreach(cleanup
, &pool
->mmap_cleanups
)
442 munmap(cleanup
->map
, cleanup
->size
);
443 u_vector_finish(&pool
->mmap_cleanups
);
450 anv_block_pool_expand_range(struct anv_block_pool
*pool
,
451 uint32_t center_bo_offset
, uint32_t size
)
453 /* Assert that we only ever grow the pool */
454 assert(center_bo_offset
>= pool
->back_state
.end
);
455 assert(size
- center_bo_offset
>= pool
->state
.end
);
457 /* Assert that we don't go outside the bounds of the memfd */
458 assert(center_bo_offset
<= BLOCK_POOL_MEMFD_CENTER
);
459 assert(pool
->use_softpin
||
460 size
- center_bo_offset
<=
461 BLOCK_POOL_MEMFD_SIZE
- BLOCK_POOL_MEMFD_CENTER
);
463 /* For state pool BOs we have to be a bit careful about where we place them
464 * in the GTT. There are two documented workarounds for state base address
465 * placement : Wa32bitGeneralStateOffset and Wa32bitInstructionBaseOffset
466 * which state that those two base addresses do not support 48-bit
467 * addresses and need to be placed in the bottom 32-bit range.
468 * Unfortunately, this is not quite accurate.
470 * The real problem is that we always set the size of our state pools in
471 * STATE_BASE_ADDRESS to 0xfffff (the maximum) even though the BO is most
472 * likely significantly smaller. We do this because we do not no at the
473 * time we emit STATE_BASE_ADDRESS whether or not we will need to expand
474 * the pool during command buffer building so we don't actually have a
475 * valid final size. If the address + size, as seen by STATE_BASE_ADDRESS
476 * overflows 48 bits, the GPU appears to treat all accesses to the buffer
477 * as being out of bounds and returns zero. For dynamic state, this
478 * usually just leads to rendering corruptions, but shaders that are all
479 * zero hang the GPU immediately.
481 * The easiest solution to do is exactly what the bogus workarounds say to
482 * do: restrict these buffers to 32-bit addresses. We could also pin the
483 * BO to some particular location of our choosing, but that's significantly
484 * more work than just not setting a flag. So, we explicitly DO NOT set
485 * the EXEC_OBJECT_SUPPORTS_48B_ADDRESS flag and the kernel does all of the
486 * hard work for us. When using softpin, we're in control and the fixed
487 * addresses we choose are fine for base addresses.
489 enum anv_bo_alloc_flags bo_alloc_flags
= ANV_BO_ALLOC_CAPTURE
;
490 if (!pool
->use_softpin
)
491 bo_alloc_flags
|= ANV_BO_ALLOC_32BIT_ADDRESS
;
493 if (pool
->use_softpin
) {
494 uint32_t new_bo_size
= size
- pool
->size
;
495 struct anv_bo
*new_bo
;
496 assert(center_bo_offset
== 0);
497 VkResult result
= anv_device_alloc_bo(pool
->device
, new_bo_size
,
499 ANV_BO_ALLOC_FIXED_ADDRESS
|
500 ANV_BO_ALLOC_MAPPED
|
501 ANV_BO_ALLOC_SNOOPED
,
502 pool
->start_address
+ pool
->size
,
504 if (result
!= VK_SUCCESS
)
507 pool
->bos
[pool
->nbos
++] = new_bo
;
509 /* This pointer will always point to the first BO in the list */
510 pool
->bo
= pool
->bos
[0];
512 /* Just leak the old map until we destroy the pool. We can't munmap it
513 * without races or imposing locking on the block allocate fast path. On
514 * the whole the leaked maps adds up to less than the size of the
515 * current map. MAP_POPULATE seems like the right thing to do, but we
516 * should try to get some numbers.
518 void *map
= mmap(NULL
, size
, PROT_READ
| PROT_WRITE
,
519 MAP_SHARED
| MAP_POPULATE
, pool
->fd
,
520 BLOCK_POOL_MEMFD_CENTER
- center_bo_offset
);
521 if (map
== MAP_FAILED
)
522 return vk_errorf(pool
->device
, pool
->device
,
523 VK_ERROR_MEMORY_MAP_FAILED
, "mmap failed: %m");
525 struct anv_bo
*new_bo
;
526 VkResult result
= anv_device_import_bo_from_host_ptr(pool
->device
,
529 0 /* client_address */,
531 if (result
!= VK_SUCCESS
) {
536 struct anv_mmap_cleanup
*cleanup
= u_vector_add(&pool
->mmap_cleanups
);
539 anv_device_release_bo(pool
->device
, new_bo
);
540 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
543 cleanup
->size
= size
;
545 /* Now that we mapped the new memory, we can write the new
546 * center_bo_offset back into pool and update pool->map. */
547 pool
->center_bo_offset
= center_bo_offset
;
548 pool
->map
= map
+ center_bo_offset
;
550 pool
->bos
[pool
->nbos
++] = new_bo
;
551 pool
->wrapper_bo
.map
= new_bo
;
554 assert(pool
->nbos
< ANV_MAX_BLOCK_POOL_BOS
);
560 /** Returns current memory map of the block pool.
562 * The returned pointer points to the map for the memory at the specified
563 * offset. The offset parameter is relative to the "center" of the block pool
564 * rather than the start of the block pool BO map.
567 anv_block_pool_map(struct anv_block_pool
*pool
, int32_t offset
)
569 if (pool
->use_softpin
) {
570 struct anv_bo
*bo
= NULL
;
571 int32_t bo_offset
= 0;
572 anv_block_pool_foreach_bo(iter_bo
, pool
) {
573 if (offset
< bo_offset
+ iter_bo
->size
) {
577 bo_offset
+= iter_bo
->size
;
580 assert(offset
>= bo_offset
);
582 return bo
->map
+ (offset
- bo_offset
);
584 return pool
->map
+ offset
;
588 /** Grows and re-centers the block pool.
590 * We grow the block pool in one or both directions in such a way that the
591 * following conditions are met:
593 * 1) The size of the entire pool is always a power of two.
595 * 2) The pool only grows on both ends. Neither end can get
598 * 3) At the end of the allocation, we have about twice as much space
599 * allocated for each end as we have used. This way the pool doesn't
600 * grow too far in one direction or the other.
602 * 4) If the _alloc_back() has never been called, then the back portion of
603 * the pool retains a size of zero. (This makes it easier for users of
604 * the block pool that only want a one-sided pool.)
606 * 5) We have enough space allocated for at least one more block in
607 * whichever side `state` points to.
609 * 6) The center of the pool is always aligned to both the block_size of
610 * the pool and a 4K CPU page.
613 anv_block_pool_grow(struct anv_block_pool
*pool
, struct anv_block_state
*state
)
615 VkResult result
= VK_SUCCESS
;
617 pthread_mutex_lock(&pool
->device
->mutex
);
619 assert(state
== &pool
->state
|| state
== &pool
->back_state
);
621 /* Gather a little usage information on the pool. Since we may have
622 * threadsd waiting in queue to get some storage while we resize, it's
623 * actually possible that total_used will be larger than old_size. In
624 * particular, block_pool_alloc() increments state->next prior to
625 * calling block_pool_grow, so this ensures that we get enough space for
626 * which ever side tries to grow the pool.
628 * We align to a page size because it makes it easier to do our
629 * calculations later in such a way that we state page-aigned.
631 uint32_t back_used
= align_u32(pool
->back_state
.next
, PAGE_SIZE
);
632 uint32_t front_used
= align_u32(pool
->state
.next
, PAGE_SIZE
);
633 uint32_t total_used
= front_used
+ back_used
;
635 assert(state
== &pool
->state
|| back_used
> 0);
637 uint32_t old_size
= pool
->size
;
639 /* The block pool is always initialized to a nonzero size and this function
640 * is always called after initialization.
642 assert(old_size
> 0);
644 /* The back_used and front_used may actually be smaller than the actual
645 * requirement because they are based on the next pointers which are
646 * updated prior to calling this function.
648 uint32_t back_required
= MAX2(back_used
, pool
->center_bo_offset
);
649 uint32_t front_required
= MAX2(front_used
, old_size
- pool
->center_bo_offset
);
651 if (back_used
* 2 <= back_required
&& front_used
* 2 <= front_required
) {
652 /* If we're in this case then this isn't the firsta allocation and we
653 * already have enough space on both sides to hold double what we
654 * have allocated. There's nothing for us to do.
659 uint32_t size
= old_size
* 2;
660 while (size
< back_required
+ front_required
)
663 assert(size
> pool
->size
);
665 /* We compute a new center_bo_offset such that, when we double the size
666 * of the pool, we maintain the ratio of how much is used by each side.
667 * This way things should remain more-or-less balanced.
669 uint32_t center_bo_offset
;
670 if (back_used
== 0) {
671 /* If we're in this case then we have never called alloc_back(). In
672 * this case, we want keep the offset at 0 to make things as simple
673 * as possible for users that don't care about back allocations.
675 center_bo_offset
= 0;
677 /* Try to "center" the allocation based on how much is currently in
678 * use on each side of the center line.
680 center_bo_offset
= ((uint64_t)size
* back_used
) / total_used
;
682 /* Align down to a multiple of the page size */
683 center_bo_offset
&= ~(PAGE_SIZE
- 1);
685 assert(center_bo_offset
>= back_used
);
687 /* Make sure we don't shrink the back end of the pool */
688 if (center_bo_offset
< back_required
)
689 center_bo_offset
= back_required
;
691 /* Make sure that we don't shrink the front end of the pool */
692 if (size
- center_bo_offset
< front_required
)
693 center_bo_offset
= size
- front_required
;
696 assert(center_bo_offset
% PAGE_SIZE
== 0);
698 result
= anv_block_pool_expand_range(pool
, center_bo_offset
, size
);
701 pthread_mutex_unlock(&pool
->device
->mutex
);
703 if (result
== VK_SUCCESS
) {
704 /* Return the appropriate new size. This function never actually
705 * updates state->next. Instead, we let the caller do that because it
706 * needs to do so in order to maintain its concurrency model.
708 if (state
== &pool
->state
) {
709 return pool
->size
- pool
->center_bo_offset
;
711 assert(pool
->center_bo_offset
> 0);
712 return pool
->center_bo_offset
;
720 anv_block_pool_alloc_new(struct anv_block_pool
*pool
,
721 struct anv_block_state
*pool_state
,
722 uint32_t block_size
, uint32_t *padding
)
724 struct anv_block_state state
, old
, new;
726 /* Most allocations won't generate any padding */
731 state
.u64
= __sync_fetch_and_add(&pool_state
->u64
, block_size
);
732 if (state
.next
+ block_size
<= state
.end
) {
734 } else if (state
.next
<= state
.end
) {
735 if (pool
->use_softpin
&& state
.next
< state
.end
) {
736 /* We need to grow the block pool, but still have some leftover
737 * space that can't be used by that particular allocation. So we
738 * add that as a "padding", and return it.
740 uint32_t leftover
= state
.end
- state
.next
;
742 /* If there is some leftover space in the pool, the caller must
745 assert(leftover
== 0 || padding
);
748 state
.next
+= leftover
;
751 /* We allocated the first block outside the pool so we have to grow
752 * the pool. pool_state->next acts a mutex: threads who try to
753 * allocate now will get block indexes above the current limit and
754 * hit futex_wait below.
756 new.next
= state
.next
+ block_size
;
758 new.end
= anv_block_pool_grow(pool
, pool_state
);
759 } while (new.end
< new.next
);
761 old
.u64
= __sync_lock_test_and_set(&pool_state
->u64
, new.u64
);
762 if (old
.next
!= state
.next
)
763 futex_wake(&pool_state
->end
, INT_MAX
);
766 futex_wait(&pool_state
->end
, state
.end
, NULL
);
773 anv_block_pool_alloc(struct anv_block_pool
*pool
,
774 uint32_t block_size
, uint32_t *padding
)
778 offset
= anv_block_pool_alloc_new(pool
, &pool
->state
, block_size
, padding
);
783 /* Allocates a block out of the back of the block pool.
785 * This will allocated a block earlier than the "start" of the block pool.
786 * The offsets returned from this function will be negative but will still
787 * be correct relative to the block pool's map pointer.
789 * If you ever use anv_block_pool_alloc_back, then you will have to do
790 * gymnastics with the block pool's BO when doing relocations.
793 anv_block_pool_alloc_back(struct anv_block_pool
*pool
,
796 int32_t offset
= anv_block_pool_alloc_new(pool
, &pool
->back_state
,
799 /* The offset we get out of anv_block_pool_alloc_new() is actually the
800 * number of bytes downwards from the middle to the end of the block.
801 * We need to turn it into a (negative) offset from the middle to the
802 * start of the block.
805 return -(offset
+ block_size
);
809 anv_state_pool_init(struct anv_state_pool
*pool
,
810 struct anv_device
*device
,
811 uint64_t start_address
,
814 VkResult result
= anv_block_pool_init(&pool
->block_pool
, device
,
817 if (result
!= VK_SUCCESS
)
820 result
= anv_state_table_init(&pool
->table
, device
, 64);
821 if (result
!= VK_SUCCESS
) {
822 anv_block_pool_finish(&pool
->block_pool
);
826 assert(util_is_power_of_two_or_zero(block_size
));
827 pool
->block_size
= block_size
;
828 pool
->back_alloc_free_list
= ANV_FREE_LIST_EMPTY
;
829 for (unsigned i
= 0; i
< ANV_STATE_BUCKETS
; i
++) {
830 pool
->buckets
[i
].free_list
= ANV_FREE_LIST_EMPTY
;
831 pool
->buckets
[i
].block
.next
= 0;
832 pool
->buckets
[i
].block
.end
= 0;
834 VG(VALGRIND_CREATE_MEMPOOL(pool
, 0, false));
840 anv_state_pool_finish(struct anv_state_pool
*pool
)
842 VG(VALGRIND_DESTROY_MEMPOOL(pool
));
843 anv_state_table_finish(&pool
->table
);
844 anv_block_pool_finish(&pool
->block_pool
);
848 anv_fixed_size_state_pool_alloc_new(struct anv_fixed_size_state_pool
*pool
,
849 struct anv_block_pool
*block_pool
,
854 struct anv_block_state block
, old
, new;
857 /* We don't always use anv_block_pool_alloc(), which would set *padding to
858 * zero for us. So if we have a pointer to padding, we must zero it out
859 * ourselves here, to make sure we always return some sensible value.
864 /* If our state is large, we don't need any sub-allocation from a block.
865 * Instead, we just grab whole (potentially large) blocks.
867 if (state_size
>= block_size
)
868 return anv_block_pool_alloc(block_pool
, state_size
, padding
);
871 block
.u64
= __sync_fetch_and_add(&pool
->block
.u64
, state_size
);
873 if (block
.next
< block
.end
) {
875 } else if (block
.next
== block
.end
) {
876 offset
= anv_block_pool_alloc(block_pool
, block_size
, padding
);
877 new.next
= offset
+ state_size
;
878 new.end
= offset
+ block_size
;
879 old
.u64
= __sync_lock_test_and_set(&pool
->block
.u64
, new.u64
);
880 if (old
.next
!= block
.next
)
881 futex_wake(&pool
->block
.end
, INT_MAX
);
884 futex_wait(&pool
->block
.end
, block
.end
, NULL
);
890 anv_state_pool_get_bucket(uint32_t size
)
892 unsigned size_log2
= ilog2_round_up(size
);
893 assert(size_log2
<= ANV_MAX_STATE_SIZE_LOG2
);
894 if (size_log2
< ANV_MIN_STATE_SIZE_LOG2
)
895 size_log2
= ANV_MIN_STATE_SIZE_LOG2
;
896 return size_log2
- ANV_MIN_STATE_SIZE_LOG2
;
900 anv_state_pool_get_bucket_size(uint32_t bucket
)
902 uint32_t size_log2
= bucket
+ ANV_MIN_STATE_SIZE_LOG2
;
903 return 1 << size_log2
;
906 /** Helper to push a chunk into the state table.
908 * It creates 'count' entries into the state table and update their sizes,
909 * offsets and maps, also pushing them as "free" states.
912 anv_state_pool_return_blocks(struct anv_state_pool
*pool
,
913 uint32_t chunk_offset
, uint32_t count
,
916 /* Disallow returning 0 chunks */
919 /* Make sure we always return chunks aligned to the block_size */
920 assert(chunk_offset
% block_size
== 0);
923 UNUSED VkResult result
= anv_state_table_add(&pool
->table
, &st_idx
, count
);
924 assert(result
== VK_SUCCESS
);
925 for (int i
= 0; i
< count
; i
++) {
926 /* update states that were added back to the state table */
927 struct anv_state
*state_i
= anv_state_table_get(&pool
->table
,
929 state_i
->alloc_size
= block_size
;
930 state_i
->offset
= chunk_offset
+ block_size
* i
;
931 state_i
->map
= anv_block_pool_map(&pool
->block_pool
, state_i
->offset
);
934 uint32_t block_bucket
= anv_state_pool_get_bucket(block_size
);
935 anv_free_list_push(&pool
->buckets
[block_bucket
].free_list
,
936 &pool
->table
, st_idx
, count
);
939 /** Returns a chunk of memory back to the state pool.
941 * Do a two-level split. If chunk_size is bigger than divisor
942 * (pool->block_size), we return as many divisor sized blocks as we can, from
943 * the end of the chunk.
945 * The remaining is then split into smaller blocks (starting at small_size if
946 * it is non-zero), with larger blocks always being taken from the end of the
950 anv_state_pool_return_chunk(struct anv_state_pool
*pool
,
951 uint32_t chunk_offset
, uint32_t chunk_size
,
954 uint32_t divisor
= pool
->block_size
;
955 uint32_t nblocks
= chunk_size
/ divisor
;
956 uint32_t rest
= chunk_size
- nblocks
* divisor
;
959 /* First return divisor aligned and sized chunks. We start returning
960 * larger blocks from the end fo the chunk, since they should already be
961 * aligned to divisor. Also anv_state_pool_return_blocks() only accepts
964 uint32_t offset
= chunk_offset
+ rest
;
965 anv_state_pool_return_blocks(pool
, offset
, nblocks
, divisor
);
971 if (small_size
> 0 && small_size
< divisor
)
972 divisor
= small_size
;
974 uint32_t min_size
= 1 << ANV_MIN_STATE_SIZE_LOG2
;
976 /* Just as before, return larger divisor aligned blocks from the end of the
979 while (chunk_size
> 0 && divisor
>= min_size
) {
980 nblocks
= chunk_size
/ divisor
;
981 rest
= chunk_size
- nblocks
* divisor
;
983 anv_state_pool_return_blocks(pool
, chunk_offset
+ rest
,
991 static struct anv_state
992 anv_state_pool_alloc_no_vg(struct anv_state_pool
*pool
,
993 uint32_t size
, uint32_t align
)
995 uint32_t bucket
= anv_state_pool_get_bucket(MAX2(size
, align
));
997 struct anv_state
*state
;
998 uint32_t alloc_size
= anv_state_pool_get_bucket_size(bucket
);
1001 /* Try free list first. */
1002 state
= anv_free_list_pop(&pool
->buckets
[bucket
].free_list
,
1005 assert(state
->offset
>= 0);
1009 /* Try to grab a chunk from some larger bucket and split it up */
1010 for (unsigned b
= bucket
+ 1; b
< ANV_STATE_BUCKETS
; b
++) {
1011 state
= anv_free_list_pop(&pool
->buckets
[b
].free_list
, &pool
->table
);
1013 unsigned chunk_size
= anv_state_pool_get_bucket_size(b
);
1014 int32_t chunk_offset
= state
->offset
;
1016 /* First lets update the state we got to its new size. offset and map
1019 state
->alloc_size
= alloc_size
;
1021 /* Now return the unused part of the chunk back to the pool as free
1024 * There are a couple of options as to what we do with it:
1026 * 1) We could fully split the chunk into state.alloc_size sized
1027 * pieces. However, this would mean that allocating a 16B
1028 * state could potentially split a 2MB chunk into 512K smaller
1029 * chunks. This would lead to unnecessary fragmentation.
1031 * 2) The classic "buddy allocator" method would have us split the
1032 * chunk in half and return one half. Then we would split the
1033 * remaining half in half and return one half, and repeat as
1034 * needed until we get down to the size we want. However, if
1035 * you are allocating a bunch of the same size state (which is
1036 * the common case), this means that every other allocation has
1037 * to go up a level and every fourth goes up two levels, etc.
1038 * This is not nearly as efficient as it could be if we did a
1039 * little more work up-front.
1041 * 3) Split the difference between (1) and (2) by doing a
1042 * two-level split. If it's bigger than some fixed block_size,
1043 * we split it into block_size sized chunks and return all but
1044 * one of them. Then we split what remains into
1045 * state.alloc_size sized chunks and return them.
1047 * We choose something close to option (3), which is implemented with
1048 * anv_state_pool_return_chunk(). That is done by returning the
1049 * remaining of the chunk, with alloc_size as a hint of the size that
1050 * we want the smaller chunk split into.
1052 anv_state_pool_return_chunk(pool
, chunk_offset
+ alloc_size
,
1053 chunk_size
- alloc_size
, alloc_size
);
1059 offset
= anv_fixed_size_state_pool_alloc_new(&pool
->buckets
[bucket
],
1064 /* Everytime we allocate a new state, add it to the state pool */
1066 UNUSED VkResult result
= anv_state_table_add(&pool
->table
, &idx
, 1);
1067 assert(result
== VK_SUCCESS
);
1069 state
= anv_state_table_get(&pool
->table
, idx
);
1070 state
->offset
= offset
;
1071 state
->alloc_size
= alloc_size
;
1072 state
->map
= anv_block_pool_map(&pool
->block_pool
, offset
);
1075 uint32_t return_offset
= offset
- padding
;
1076 anv_state_pool_return_chunk(pool
, return_offset
, padding
, 0);
1084 anv_state_pool_alloc(struct anv_state_pool
*pool
, uint32_t size
, uint32_t align
)
1087 return ANV_STATE_NULL
;
1089 struct anv_state state
= anv_state_pool_alloc_no_vg(pool
, size
, align
);
1090 VG(VALGRIND_MEMPOOL_ALLOC(pool
, state
.map
, size
));
1095 anv_state_pool_alloc_back(struct anv_state_pool
*pool
)
1097 struct anv_state
*state
;
1098 uint32_t alloc_size
= pool
->block_size
;
1100 state
= anv_free_list_pop(&pool
->back_alloc_free_list
, &pool
->table
);
1102 assert(state
->offset
< 0);
1107 offset
= anv_block_pool_alloc_back(&pool
->block_pool
,
1110 UNUSED VkResult result
= anv_state_table_add(&pool
->table
, &idx
, 1);
1111 assert(result
== VK_SUCCESS
);
1113 state
= anv_state_table_get(&pool
->table
, idx
);
1114 state
->offset
= offset
;
1115 state
->alloc_size
= alloc_size
;
1116 state
->map
= anv_block_pool_map(&pool
->block_pool
, state
->offset
);
1119 VG(VALGRIND_MEMPOOL_ALLOC(pool
, state
->map
, state
->alloc_size
));
1124 anv_state_pool_free_no_vg(struct anv_state_pool
*pool
, struct anv_state state
)
1126 assert(util_is_power_of_two_or_zero(state
.alloc_size
));
1127 unsigned bucket
= anv_state_pool_get_bucket(state
.alloc_size
);
1129 if (state
.offset
< 0) {
1130 assert(state
.alloc_size
== pool
->block_size
);
1131 anv_free_list_push(&pool
->back_alloc_free_list
,
1132 &pool
->table
, state
.idx
, 1);
1134 anv_free_list_push(&pool
->buckets
[bucket
].free_list
,
1135 &pool
->table
, state
.idx
, 1);
1140 anv_state_pool_free(struct anv_state_pool
*pool
, struct anv_state state
)
1142 if (state
.alloc_size
== 0)
1145 VG(VALGRIND_MEMPOOL_FREE(pool
, state
.map
));
1146 anv_state_pool_free_no_vg(pool
, state
);
1149 struct anv_state_stream_block
{
1150 struct anv_state block
;
1152 /* The next block */
1153 struct anv_state_stream_block
*next
;
1155 #ifdef HAVE_VALGRIND
1156 /* A pointer to the first user-allocated thing in this block. This is
1157 * what valgrind sees as the start of the block.
1163 /* The state stream allocator is a one-shot, single threaded allocator for
1164 * variable sized blocks. We use it for allocating dynamic state.
1167 anv_state_stream_init(struct anv_state_stream
*stream
,
1168 struct anv_state_pool
*state_pool
,
1169 uint32_t block_size
)
1171 stream
->state_pool
= state_pool
;
1172 stream
->block_size
= block_size
;
1174 stream
->block
= ANV_STATE_NULL
;
1176 stream
->block_list
= NULL
;
1178 /* Ensure that next + whatever > block_size. This way the first call to
1179 * state_stream_alloc fetches a new block.
1181 stream
->next
= block_size
;
1183 VG(VALGRIND_CREATE_MEMPOOL(stream
, 0, false));
1187 anv_state_stream_finish(struct anv_state_stream
*stream
)
1189 struct anv_state_stream_block
*next
= stream
->block_list
;
1190 while (next
!= NULL
) {
1191 struct anv_state_stream_block sb
= VG_NOACCESS_READ(next
);
1192 VG(VALGRIND_MEMPOOL_FREE(stream
, sb
._vg_ptr
));
1193 VG(VALGRIND_MAKE_MEM_UNDEFINED(next
, stream
->block_size
));
1194 anv_state_pool_free_no_vg(stream
->state_pool
, sb
.block
);
1198 VG(VALGRIND_DESTROY_MEMPOOL(stream
));
1202 anv_state_stream_alloc(struct anv_state_stream
*stream
,
1203 uint32_t size
, uint32_t alignment
)
1206 return ANV_STATE_NULL
;
1208 assert(alignment
<= PAGE_SIZE
);
1210 uint32_t offset
= align_u32(stream
->next
, alignment
);
1211 if (offset
+ size
> stream
->block
.alloc_size
) {
1212 uint32_t block_size
= stream
->block_size
;
1213 if (block_size
< size
)
1214 block_size
= round_to_power_of_two(size
);
1216 stream
->block
= anv_state_pool_alloc_no_vg(stream
->state_pool
,
1217 block_size
, PAGE_SIZE
);
1219 struct anv_state_stream_block
*sb
= stream
->block
.map
;
1220 VG_NOACCESS_WRITE(&sb
->block
, stream
->block
);
1221 VG_NOACCESS_WRITE(&sb
->next
, stream
->block_list
);
1222 stream
->block_list
= sb
;
1223 VG(VG_NOACCESS_WRITE(&sb
->_vg_ptr
, NULL
));
1225 VG(VALGRIND_MAKE_MEM_NOACCESS(stream
->block
.map
, stream
->block_size
));
1227 /* Reset back to the start plus space for the header */
1228 stream
->next
= sizeof(*sb
);
1230 offset
= align_u32(stream
->next
, alignment
);
1231 assert(offset
+ size
<= stream
->block
.alloc_size
);
1234 struct anv_state state
= stream
->block
;
1235 state
.offset
+= offset
;
1236 state
.alloc_size
= size
;
1237 state
.map
+= offset
;
1239 stream
->next
= offset
+ size
;
1241 #ifdef HAVE_VALGRIND
1242 struct anv_state_stream_block
*sb
= stream
->block_list
;
1243 void *vg_ptr
= VG_NOACCESS_READ(&sb
->_vg_ptr
);
1244 if (vg_ptr
== NULL
) {
1246 VG_NOACCESS_WRITE(&sb
->_vg_ptr
, vg_ptr
);
1247 VALGRIND_MEMPOOL_ALLOC(stream
, vg_ptr
, size
);
1249 void *state_end
= state
.map
+ state
.alloc_size
;
1250 /* This only updates the mempool. The newly allocated chunk is still
1251 * marked as NOACCESS. */
1252 VALGRIND_MEMPOOL_CHANGE(stream
, vg_ptr
, vg_ptr
, state_end
- vg_ptr
);
1253 /* Mark the newly allocated chunk as undefined */
1254 VALGRIND_MAKE_MEM_UNDEFINED(state
.map
, state
.alloc_size
);
1262 anv_bo_pool_init(struct anv_bo_pool
*pool
, struct anv_device
*device
)
1264 pool
->device
= device
;
1265 for (unsigned i
= 0; i
< ARRAY_SIZE(pool
->free_list
); i
++) {
1266 util_sparse_array_free_list_init(&pool
->free_list
[i
],
1267 &device
->bo_cache
.bo_map
, 0,
1268 offsetof(struct anv_bo
, free_index
));
1271 VG(VALGRIND_CREATE_MEMPOOL(pool
, 0, false));
1275 anv_bo_pool_finish(struct anv_bo_pool
*pool
)
1277 for (unsigned i
= 0; i
< ARRAY_SIZE(pool
->free_list
); i
++) {
1280 util_sparse_array_free_list_pop_elem(&pool
->free_list
[i
]);
1284 /* anv_device_release_bo is going to "free" it */
1285 VG(VALGRIND_MALLOCLIKE_BLOCK(bo
->map
, bo
->size
, 0, 1));
1286 anv_device_release_bo(pool
->device
, bo
);
1290 VG(VALGRIND_DESTROY_MEMPOOL(pool
));
1294 anv_bo_pool_alloc(struct anv_bo_pool
*pool
, uint32_t size
,
1295 struct anv_bo
**bo_out
)
1297 const unsigned size_log2
= size
< 4096 ? 12 : ilog2_round_up(size
);
1298 const unsigned pow2_size
= 1 << size_log2
;
1299 const unsigned bucket
= size_log2
- 12;
1300 assert(bucket
< ARRAY_SIZE(pool
->free_list
));
1303 util_sparse_array_free_list_pop_elem(&pool
->free_list
[bucket
]);
1305 VG(VALGRIND_MEMPOOL_ALLOC(pool
, bo
->map
, size
));
1310 VkResult result
= anv_device_alloc_bo(pool
->device
,
1312 ANV_BO_ALLOC_MAPPED
|
1313 ANV_BO_ALLOC_SNOOPED
|
1314 ANV_BO_ALLOC_CAPTURE
,
1315 0 /* explicit_address */,
1317 if (result
!= VK_SUCCESS
)
1320 /* We want it to look like it came from this pool */
1321 VG(VALGRIND_FREELIKE_BLOCK(bo
->map
, 0));
1322 VG(VALGRIND_MEMPOOL_ALLOC(pool
, bo
->map
, size
));
1330 anv_bo_pool_free(struct anv_bo_pool
*pool
, struct anv_bo
*bo
)
1332 VG(VALGRIND_MEMPOOL_FREE(pool
, bo
->map
));
1334 assert(util_is_power_of_two_or_zero(bo
->size
));
1335 const unsigned size_log2
= ilog2_round_up(bo
->size
);
1336 const unsigned bucket
= size_log2
- 12;
1337 assert(bucket
< ARRAY_SIZE(pool
->free_list
));
1339 assert(util_sparse_array_get(&pool
->device
->bo_cache
.bo_map
,
1340 bo
->gem_handle
) == bo
);
1341 util_sparse_array_free_list_push(&pool
->free_list
[bucket
],
1342 &bo
->gem_handle
, 1);
1348 anv_scratch_pool_init(struct anv_device
*device
, struct anv_scratch_pool
*pool
)
1350 memset(pool
, 0, sizeof(*pool
));
1354 anv_scratch_pool_finish(struct anv_device
*device
, struct anv_scratch_pool
*pool
)
1356 for (unsigned s
= 0; s
< MESA_SHADER_STAGES
; s
++) {
1357 for (unsigned i
= 0; i
< 16; i
++) {
1358 if (pool
->bos
[i
][s
] != NULL
)
1359 anv_device_release_bo(device
, pool
->bos
[i
][s
]);
1365 anv_scratch_pool_alloc(struct anv_device
*device
, struct anv_scratch_pool
*pool
,
1366 gl_shader_stage stage
, unsigned per_thread_scratch
)
1368 if (per_thread_scratch
== 0)
1371 unsigned scratch_size_log2
= ffs(per_thread_scratch
/ 2048);
1372 assert(scratch_size_log2
< 16);
1374 struct anv_bo
*bo
= p_atomic_read(&pool
->bos
[scratch_size_log2
][stage
]);
1379 const struct gen_device_info
*devinfo
= &device
->info
;
1381 const unsigned subslices
= MAX2(device
->physical
->subslice_total
, 1);
1383 unsigned scratch_ids_per_subslice
;
1384 if (devinfo
->gen
>= 11) {
1385 /* The MEDIA_VFE_STATE docs say:
1387 * "Starting with this configuration, the Maximum Number of
1388 * Threads must be set to (#EU * 8) for GPGPU dispatches.
1390 * Although there are only 7 threads per EU in the configuration,
1391 * the FFTID is calculated as if there are 8 threads per EU,
1392 * which in turn requires a larger amount of Scratch Space to be
1393 * allocated by the driver."
1395 scratch_ids_per_subslice
= 8 * 8;
1396 } else if (devinfo
->is_haswell
) {
1397 /* WaCSScratchSize:hsw
1399 * Haswell's scratch space address calculation appears to be sparse
1400 * rather than tightly packed. The Thread ID has bits indicating
1401 * which subslice, EU within a subslice, and thread within an EU it
1402 * is. There's a maximum of two slices and two subslices, so these
1403 * can be stored with a single bit. Even though there are only 10 EUs
1404 * per subslice, this is stored in 4 bits, so there's an effective
1405 * maximum value of 16 EUs. Similarly, although there are only 7
1406 * threads per EU, this is stored in a 3 bit number, giving an
1407 * effective maximum value of 8 threads per EU.
1409 * This means that we need to use 16 * 8 instead of 10 * 7 for the
1410 * number of threads per subslice.
1412 scratch_ids_per_subslice
= 16 * 8;
1413 } else if (devinfo
->is_cherryview
) {
1414 /* Cherryview devices have either 6 or 8 EUs per subslice, and each EU
1415 * has 7 threads. The 6 EU devices appear to calculate thread IDs as if
1418 scratch_ids_per_subslice
= 8 * 7;
1420 scratch_ids_per_subslice
= devinfo
->max_cs_threads
;
1423 uint32_t max_threads
[] = {
1424 [MESA_SHADER_VERTEX
] = devinfo
->max_vs_threads
,
1425 [MESA_SHADER_TESS_CTRL
] = devinfo
->max_tcs_threads
,
1426 [MESA_SHADER_TESS_EVAL
] = devinfo
->max_tes_threads
,
1427 [MESA_SHADER_GEOMETRY
] = devinfo
->max_gs_threads
,
1428 [MESA_SHADER_FRAGMENT
] = devinfo
->max_wm_threads
,
1429 [MESA_SHADER_COMPUTE
] = scratch_ids_per_subslice
* subslices
,
1432 uint32_t size
= per_thread_scratch
* max_threads
[stage
];
1434 /* Even though the Scratch base pointers in 3DSTATE_*S are 64 bits, they
1435 * are still relative to the general state base address. When we emit
1436 * STATE_BASE_ADDRESS, we set general state base address to 0 and the size
1437 * to the maximum (1 page under 4GB). This allows us to just place the
1438 * scratch buffers anywhere we wish in the bottom 32 bits of address space
1439 * and just set the scratch base pointer in 3DSTATE_*S using a relocation.
1440 * However, in order to do so, we need to ensure that the kernel does not
1441 * place the scratch BO above the 32-bit boundary.
1443 * NOTE: Technically, it can't go "anywhere" because the top page is off
1444 * limits. However, when EXEC_OBJECT_SUPPORTS_48B_ADDRESS is set, the
1445 * kernel allocates space using
1447 * end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
1449 * so nothing will ever touch the top page.
1451 VkResult result
= anv_device_alloc_bo(device
, size
,
1452 ANV_BO_ALLOC_32BIT_ADDRESS
,
1453 0 /* explicit_address */,
1455 if (result
!= VK_SUCCESS
)
1456 return NULL
; /* TODO */
1458 struct anv_bo
*current_bo
=
1459 p_atomic_cmpxchg(&pool
->bos
[scratch_size_log2
][stage
], NULL
, bo
);
1461 anv_device_release_bo(device
, bo
);
1469 anv_bo_cache_init(struct anv_bo_cache
*cache
)
1471 util_sparse_array_init(&cache
->bo_map
, sizeof(struct anv_bo
), 1024);
1473 if (pthread_mutex_init(&cache
->mutex
, NULL
)) {
1474 util_sparse_array_finish(&cache
->bo_map
);
1475 return vk_errorf(NULL
, NULL
, VK_ERROR_OUT_OF_HOST_MEMORY
,
1476 "pthread_mutex_init failed: %m");
1483 anv_bo_cache_finish(struct anv_bo_cache
*cache
)
1485 util_sparse_array_finish(&cache
->bo_map
);
1486 pthread_mutex_destroy(&cache
->mutex
);
1489 #define ANV_BO_CACHE_SUPPORTED_FLAGS \
1490 (EXEC_OBJECT_WRITE | \
1491 EXEC_OBJECT_ASYNC | \
1492 EXEC_OBJECT_SUPPORTS_48B_ADDRESS | \
1493 EXEC_OBJECT_PINNED | \
1494 EXEC_OBJECT_CAPTURE)
1497 anv_bo_alloc_flags_to_bo_flags(struct anv_device
*device
,
1498 enum anv_bo_alloc_flags alloc_flags
)
1500 struct anv_physical_device
*pdevice
= device
->physical
;
1502 uint64_t bo_flags
= 0;
1503 if (!(alloc_flags
& ANV_BO_ALLOC_32BIT_ADDRESS
) &&
1504 pdevice
->supports_48bit_addresses
)
1505 bo_flags
|= EXEC_OBJECT_SUPPORTS_48B_ADDRESS
;
1507 if ((alloc_flags
& ANV_BO_ALLOC_CAPTURE
) && pdevice
->has_exec_capture
)
1508 bo_flags
|= EXEC_OBJECT_CAPTURE
;
1510 if (alloc_flags
& ANV_BO_ALLOC_IMPLICIT_WRITE
) {
1511 assert(alloc_flags
& ANV_BO_ALLOC_IMPLICIT_SYNC
);
1512 bo_flags
|= EXEC_OBJECT_WRITE
;
1515 if (!(alloc_flags
& ANV_BO_ALLOC_IMPLICIT_SYNC
) && pdevice
->has_exec_async
)
1516 bo_flags
|= EXEC_OBJECT_ASYNC
;
1518 if (pdevice
->use_softpin
)
1519 bo_flags
|= EXEC_OBJECT_PINNED
;
1525 anv_device_get_bo_align(struct anv_device
*device
)
1527 /* Gen12 CCS surface addresses need to be 64K aligned. We have no way of
1528 * telling what this allocation is for so pick the largest alignment.
1530 if (device
->info
.gen
>= 12)
1537 anv_device_alloc_bo(struct anv_device
*device
,
1539 enum anv_bo_alloc_flags alloc_flags
,
1540 uint64_t explicit_address
,
1541 struct anv_bo
**bo_out
)
1543 const uint32_t bo_flags
=
1544 anv_bo_alloc_flags_to_bo_flags(device
, alloc_flags
);
1545 assert(bo_flags
== (bo_flags
& ANV_BO_CACHE_SUPPORTED_FLAGS
));
1547 /* The kernel is going to give us whole pages anyway */
1548 size
= align_u64(size
, 4096);
1550 const uint32_t align
= anv_device_get_bo_align(device
);
1552 uint32_t gem_handle
= anv_gem_create(device
, size
);
1553 if (gem_handle
== 0)
1554 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY
);
1556 struct anv_bo new_bo
= {
1557 .gem_handle
= gem_handle
,
1562 .is_external
= (alloc_flags
& ANV_BO_ALLOC_EXTERNAL
),
1563 .has_client_visible_address
=
1564 (alloc_flags
& ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS
) != 0,
1567 if (alloc_flags
& ANV_BO_ALLOC_MAPPED
) {
1568 new_bo
.map
= anv_gem_mmap(device
, new_bo
.gem_handle
, 0, size
, 0);
1569 if (new_bo
.map
== MAP_FAILED
) {
1570 anv_gem_close(device
, new_bo
.gem_handle
);
1571 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1575 if (alloc_flags
& ANV_BO_ALLOC_SNOOPED
) {
1576 assert(alloc_flags
& ANV_BO_ALLOC_MAPPED
);
1577 /* We don't want to change these defaults if it's going to be shared
1578 * with another process.
1580 assert(!(alloc_flags
& ANV_BO_ALLOC_EXTERNAL
));
1582 /* Regular objects are created I915_CACHING_CACHED on LLC platforms and
1583 * I915_CACHING_NONE on non-LLC platforms. For many internal state
1584 * objects, we'd rather take the snooping overhead than risk forgetting
1585 * a CLFLUSH somewhere. Userptr objects are always created as
1586 * I915_CACHING_CACHED, which on non-LLC means snooped so there's no
1587 * need to do this there.
1589 if (!device
->info
.has_llc
) {
1590 anv_gem_set_caching(device
, new_bo
.gem_handle
,
1591 I915_CACHING_CACHED
);
1595 if (alloc_flags
& ANV_BO_ALLOC_FIXED_ADDRESS
) {
1596 new_bo
.has_fixed_address
= true;
1597 new_bo
.offset
= explicit_address
;
1598 } else if (new_bo
.flags
& EXEC_OBJECT_PINNED
) {
1599 new_bo
.offset
= anv_vma_alloc(device
, new_bo
.size
, align
,
1600 alloc_flags
, explicit_address
);
1601 if (new_bo
.offset
== 0) {
1603 anv_gem_munmap(new_bo
.map
, size
);
1604 anv_gem_close(device
, new_bo
.gem_handle
);
1605 return vk_errorf(device
, NULL
, VK_ERROR_OUT_OF_DEVICE_MEMORY
,
1606 "failed to allocate virtual address for BO");
1609 assert(!new_bo
.has_client_visible_address
);
1612 assert(new_bo
.gem_handle
);
1614 /* If we just got this gem_handle from anv_bo_init_new then we know no one
1615 * else is touching this BO at the moment so we don't need to lock here.
1617 struct anv_bo
*bo
= anv_device_lookup_bo(device
, new_bo
.gem_handle
);
1626 anv_device_import_bo_from_host_ptr(struct anv_device
*device
,
1627 void *host_ptr
, uint32_t size
,
1628 enum anv_bo_alloc_flags alloc_flags
,
1629 uint64_t client_address
,
1630 struct anv_bo
**bo_out
)
1632 assert(!(alloc_flags
& (ANV_BO_ALLOC_MAPPED
|
1633 ANV_BO_ALLOC_SNOOPED
|
1634 ANV_BO_ALLOC_FIXED_ADDRESS
)));
1636 struct anv_bo_cache
*cache
= &device
->bo_cache
;
1637 const uint32_t bo_flags
=
1638 anv_bo_alloc_flags_to_bo_flags(device
, alloc_flags
);
1639 assert(bo_flags
== (bo_flags
& ANV_BO_CACHE_SUPPORTED_FLAGS
));
1641 uint32_t gem_handle
= anv_gem_userptr(device
, host_ptr
, size
);
1643 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE
);
1645 pthread_mutex_lock(&cache
->mutex
);
1647 struct anv_bo
*bo
= anv_device_lookup_bo(device
, gem_handle
);
1648 if (bo
->refcount
> 0) {
1649 /* VK_EXT_external_memory_host doesn't require handling importing the
1650 * same pointer twice at the same time, but we don't get in the way. If
1651 * kernel gives us the same gem_handle, only succeed if the flags match.
1653 assert(bo
->gem_handle
== gem_handle
);
1654 if (bo_flags
!= bo
->flags
) {
1655 pthread_mutex_unlock(&cache
->mutex
);
1656 return vk_errorf(device
, NULL
, VK_ERROR_INVALID_EXTERNAL_HANDLE
,
1657 "same host pointer imported two different ways");
1660 if (bo
->has_client_visible_address
!=
1661 ((alloc_flags
& ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS
) != 0)) {
1662 pthread_mutex_unlock(&cache
->mutex
);
1663 return vk_errorf(device
, NULL
, VK_ERROR_INVALID_EXTERNAL_HANDLE
,
1664 "The same BO was imported with and without buffer "
1668 if (client_address
&& client_address
!= gen_48b_address(bo
->offset
)) {
1669 pthread_mutex_unlock(&cache
->mutex
);
1670 return vk_errorf(device
, NULL
, VK_ERROR_INVALID_EXTERNAL_HANDLE
,
1671 "The same BO was imported at two different "
1675 __sync_fetch_and_add(&bo
->refcount
, 1);
1677 struct anv_bo new_bo
= {
1678 .gem_handle
= gem_handle
,
1684 .is_external
= true,
1685 .from_host_ptr
= true,
1686 .has_client_visible_address
=
1687 (alloc_flags
& ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS
) != 0,
1690 assert(client_address
== gen_48b_address(client_address
));
1691 if (new_bo
.flags
& EXEC_OBJECT_PINNED
) {
1692 /* Gen12 CCS surface addresses need to be 64K aligned. We have no way
1693 * of telling what this allocation is for so pick the largest
1696 const uint32_t align
= device
->info
.gen
>= 12 ? (64 * 1024) :
1699 new_bo
.offset
= anv_vma_alloc(device
, new_bo
.size
,
1700 anv_device_get_bo_align(device
),
1701 alloc_flags
, client_address
);
1702 if (new_bo
.offset
== 0) {
1703 anv_gem_close(device
, new_bo
.gem_handle
);
1704 pthread_mutex_unlock(&cache
->mutex
);
1705 return vk_errorf(device
, NULL
, VK_ERROR_OUT_OF_DEVICE_MEMORY
,
1706 "failed to allocate virtual address for BO");
1709 assert(!new_bo
.has_client_visible_address
);
1715 pthread_mutex_unlock(&cache
->mutex
);
1722 anv_device_import_bo(struct anv_device
*device
,
1724 enum anv_bo_alloc_flags alloc_flags
,
1725 uint64_t client_address
,
1726 struct anv_bo
**bo_out
)
1728 assert(!(alloc_flags
& (ANV_BO_ALLOC_MAPPED
|
1729 ANV_BO_ALLOC_SNOOPED
|
1730 ANV_BO_ALLOC_FIXED_ADDRESS
)));
1732 struct anv_bo_cache
*cache
= &device
->bo_cache
;
1733 const uint32_t bo_flags
=
1734 anv_bo_alloc_flags_to_bo_flags(device
, alloc_flags
);
1735 assert(bo_flags
== (bo_flags
& ANV_BO_CACHE_SUPPORTED_FLAGS
));
1737 pthread_mutex_lock(&cache
->mutex
);
1739 uint32_t gem_handle
= anv_gem_fd_to_handle(device
, fd
);
1741 pthread_mutex_unlock(&cache
->mutex
);
1742 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE
);
1745 struct anv_bo
*bo
= anv_device_lookup_bo(device
, gem_handle
);
1746 if (bo
->refcount
> 0) {
1747 /* We have to be careful how we combine flags so that it makes sense.
1748 * Really, though, if we get to this case and it actually matters, the
1749 * client has imported a BO twice in different ways and they get what
1752 uint64_t new_flags
= 0;
1753 new_flags
|= (bo
->flags
| bo_flags
) & EXEC_OBJECT_WRITE
;
1754 new_flags
|= (bo
->flags
& bo_flags
) & EXEC_OBJECT_ASYNC
;
1755 new_flags
|= (bo
->flags
& bo_flags
) & EXEC_OBJECT_SUPPORTS_48B_ADDRESS
;
1756 new_flags
|= (bo
->flags
| bo_flags
) & EXEC_OBJECT_PINNED
;
1757 new_flags
|= (bo
->flags
| bo_flags
) & EXEC_OBJECT_CAPTURE
;
1759 /* It's theoretically possible for a BO to get imported such that it's
1760 * both pinned and not pinned. The only way this can happen is if it
1761 * gets imported as both a semaphore and a memory object and that would
1762 * be an application error. Just fail out in that case.
1764 if ((bo
->flags
& EXEC_OBJECT_PINNED
) !=
1765 (bo_flags
& EXEC_OBJECT_PINNED
)) {
1766 pthread_mutex_unlock(&cache
->mutex
);
1767 return vk_errorf(device
, NULL
, VK_ERROR_INVALID_EXTERNAL_HANDLE
,
1768 "The same BO was imported two different ways");
1771 /* It's also theoretically possible that someone could export a BO from
1772 * one heap and import it into another or to import the same BO into two
1773 * different heaps. If this happens, we could potentially end up both
1774 * allowing and disallowing 48-bit addresses. There's not much we can
1775 * do about it if we're pinning so we just throw an error and hope no
1776 * app is actually that stupid.
1778 if ((new_flags
& EXEC_OBJECT_PINNED
) &&
1779 (bo
->flags
& EXEC_OBJECT_SUPPORTS_48B_ADDRESS
) !=
1780 (bo_flags
& EXEC_OBJECT_SUPPORTS_48B_ADDRESS
)) {
1781 pthread_mutex_unlock(&cache
->mutex
);
1782 return vk_errorf(device
, NULL
, VK_ERROR_INVALID_EXTERNAL_HANDLE
,
1783 "The same BO was imported on two different heaps");
1786 if (bo
->has_client_visible_address
!=
1787 ((alloc_flags
& ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS
) != 0)) {
1788 pthread_mutex_unlock(&cache
->mutex
);
1789 return vk_errorf(device
, NULL
, VK_ERROR_INVALID_EXTERNAL_HANDLE
,
1790 "The same BO was imported with and without buffer "
1794 if (client_address
&& client_address
!= gen_48b_address(bo
->offset
)) {
1795 pthread_mutex_unlock(&cache
->mutex
);
1796 return vk_errorf(device
, NULL
, VK_ERROR_INVALID_EXTERNAL_HANDLE
,
1797 "The same BO was imported at two different "
1801 bo
->flags
= new_flags
;
1803 __sync_fetch_and_add(&bo
->refcount
, 1);
1805 off_t size
= lseek(fd
, 0, SEEK_END
);
1806 if (size
== (off_t
)-1) {
1807 anv_gem_close(device
, gem_handle
);
1808 pthread_mutex_unlock(&cache
->mutex
);
1809 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE
);
1812 struct anv_bo new_bo
= {
1813 .gem_handle
= gem_handle
,
1818 .is_external
= true,
1819 .has_client_visible_address
=
1820 (alloc_flags
& ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS
) != 0,
1823 assert(client_address
== gen_48b_address(client_address
));
1824 if (new_bo
.flags
& EXEC_OBJECT_PINNED
) {
1825 new_bo
.offset
= anv_vma_alloc(device
, new_bo
.size
,
1826 anv_device_get_bo_align(device
),
1827 alloc_flags
, client_address
);
1828 if (new_bo
.offset
== 0) {
1829 anv_gem_close(device
, new_bo
.gem_handle
);
1830 pthread_mutex_unlock(&cache
->mutex
);
1831 return vk_errorf(device
, NULL
, VK_ERROR_OUT_OF_DEVICE_MEMORY
,
1832 "failed to allocate virtual address for BO");
1835 assert(!new_bo
.has_client_visible_address
);
1841 pthread_mutex_unlock(&cache
->mutex
);
1848 anv_device_export_bo(struct anv_device
*device
,
1849 struct anv_bo
*bo
, int *fd_out
)
1851 assert(anv_device_lookup_bo(device
, bo
->gem_handle
) == bo
);
1853 /* This BO must have been flagged external in order for us to be able
1854 * to export it. This is done based on external options passed into
1855 * anv_AllocateMemory.
1857 assert(bo
->is_external
);
1859 int fd
= anv_gem_handle_to_fd(device
, bo
->gem_handle
);
1861 return vk_error(VK_ERROR_TOO_MANY_OBJECTS
);
1869 atomic_dec_not_one(uint32_t *counter
)
1878 old
= __sync_val_compare_and_swap(counter
, val
, val
- 1);
1887 anv_device_release_bo(struct anv_device
*device
,
1890 struct anv_bo_cache
*cache
= &device
->bo_cache
;
1891 assert(anv_device_lookup_bo(device
, bo
->gem_handle
) == bo
);
1893 /* Try to decrement the counter but don't go below one. If this succeeds
1894 * then the refcount has been decremented and we are not the last
1897 if (atomic_dec_not_one(&bo
->refcount
))
1900 pthread_mutex_lock(&cache
->mutex
);
1902 /* We are probably the last reference since our attempt to decrement above
1903 * failed. However, we can't actually know until we are inside the mutex.
1904 * Otherwise, someone could import the BO between the decrement and our
1907 if (unlikely(__sync_sub_and_fetch(&bo
->refcount
, 1) > 0)) {
1908 /* Turns out we're not the last reference. Unlock and bail. */
1909 pthread_mutex_unlock(&cache
->mutex
);
1912 assert(bo
->refcount
== 0);
1914 if (bo
->map
&& !bo
->from_host_ptr
)
1915 anv_gem_munmap(bo
->map
, bo
->size
);
1917 if ((bo
->flags
& EXEC_OBJECT_PINNED
) && !bo
->has_fixed_address
)
1918 anv_vma_free(device
, bo
->offset
, bo
->size
);
1920 uint32_t gem_handle
= bo
->gem_handle
;
1922 /* Memset the BO just in case. The refcount being zero should be enough to
1923 * prevent someone from assuming the data is valid but it's safer to just
1924 * stomp to zero just in case. We explicitly do this *before* we close the
1925 * GEM handle to ensure that if anyone allocates something and gets the
1926 * same GEM handle, the memset has already happen and won't stomp all over
1927 * any data they may write in this BO.
1929 memset(bo
, 0, sizeof(*bo
));
1931 anv_gem_close(device
, gem_handle
);
1933 /* Don't unlock until we've actually closed the BO. The whole point of
1934 * the BO cache is to ensure that we correctly handle races with creating
1935 * and releasing GEM handles and we don't want to let someone import the BO
1936 * again between mutex unlock and closing the GEM handle.
1938 pthread_mutex_unlock(&cache
->mutex
);