2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 #include "util/hash_table.h"
33 #include "util/simple_mtx.h"
34 #include "util/anon_file.h"
37 #define VG_NOACCESS_READ(__ptr) ({ \
38 VALGRIND_MAKE_MEM_DEFINED((__ptr), sizeof(*(__ptr))); \
39 __typeof(*(__ptr)) __val = *(__ptr); \
40 VALGRIND_MAKE_MEM_NOACCESS((__ptr), sizeof(*(__ptr)));\
43 #define VG_NOACCESS_WRITE(__ptr, __val) ({ \
44 VALGRIND_MAKE_MEM_UNDEFINED((__ptr), sizeof(*(__ptr))); \
46 VALGRIND_MAKE_MEM_NOACCESS((__ptr), sizeof(*(__ptr))); \
49 #define VG_NOACCESS_READ(__ptr) (*(__ptr))
50 #define VG_NOACCESS_WRITE(__ptr, __val) (*(__ptr) = (__val))
54 #define MAP_POPULATE 0
59 * - Lock free (except when resizing underlying bos)
61 * - Constant time allocation with typically only one atomic
63 * - Multiple allocation sizes without fragmentation
65 * - Can grow while keeping addresses and offset of contents stable
67 * - All allocations within one bo so we can point one of the
68 * STATE_BASE_ADDRESS pointers at it.
70 * The overall design is a two-level allocator: top level is a fixed size, big
71 * block (8k) allocator, which operates out of a bo. Allocation is done by
72 * either pulling a block from the free list or growing the used range of the
73 * bo. Growing the range may run out of space in the bo which we then need to
74 * grow. Growing the bo is tricky in a multi-threaded, lockless environment:
75 * we need to keep all pointers and contents in the old map valid. GEM bos in
76 * general can't grow, but we use a trick: we create a memfd and use ftruncate
77 * to grow it as necessary. We mmap the new size and then create a gem bo for
78 * it using the new gem userptr ioctl. Without heavy-handed locking around
79 * our allocation fast-path, there isn't really a way to munmap the old mmap,
80 * so we just keep it around until garbage collection time. While the block
81 * allocator is lockless for normal operations, we block other threads trying
82 * to allocate while we're growing the map. It sholdn't happen often, and
83 * growing is fast anyway.
85 * At the next level we can use various sub-allocators. The state pool is a
86 * pool of smaller, fixed size objects, which operates much like the block
87 * pool. It uses a free list for freeing objects, but when it runs out of
88 * space it just allocates a new block from the block pool. This allocator is
89 * intended for longer lived state objects such as SURFACE_STATE and most
90 * other persistent state objects in the API. We may need to track more info
91 * with these object and a pointer back to the CPU object (eg VkImage). In
92 * those cases we just allocate a slightly bigger object and put the extra
93 * state after the GPU state object.
95 * The state stream allocator works similar to how the i965 DRI driver streams
96 * all its state. Even with Vulkan, we need to emit transient state (whether
97 * surface state base or dynamic state base), and for that we can just get a
98 * block and fill it up. These cases are local to a command buffer and the
99 * sub-allocator need not be thread safe. The streaming allocator gets a new
100 * block when it runs out of space and chains them together so they can be
104 /* Allocations are always at least 64 byte aligned, so 1 is an invalid value.
105 * We use it to indicate the free list is empty. */
106 #define EMPTY UINT32_MAX
108 #define PAGE_SIZE 4096
110 struct anv_mmap_cleanup
{
116 #define ANV_MMAP_CLEANUP_INIT ((struct anv_mmap_cleanup){0})
118 static inline uint32_t
119 ilog2_round_up(uint32_t value
)
122 return 32 - __builtin_clz(value
- 1);
125 static inline uint32_t
126 round_to_power_of_two(uint32_t value
)
128 return 1 << ilog2_round_up(value
);
131 struct anv_state_table_cleanup
{
136 #define ANV_STATE_TABLE_CLEANUP_INIT ((struct anv_state_table_cleanup){0})
137 #define ANV_STATE_ENTRY_SIZE (sizeof(struct anv_free_entry))
140 anv_state_table_expand_range(struct anv_state_table
*table
, uint32_t size
);
143 anv_state_table_init(struct anv_state_table
*table
,
144 struct anv_device
*device
,
145 uint32_t initial_entries
)
149 table
->device
= device
;
151 /* Just make it 2GB up-front. The Linux kernel won't actually back it
152 * with pages until we either map and fault on one of them or we use
153 * userptr and send a chunk of it off to the GPU.
155 table
->fd
= os_create_anonymous_file(BLOCK_POOL_MEMFD_SIZE
, "state table");
156 if (table
->fd
== -1) {
157 result
= vk_error(VK_ERROR_INITIALIZATION_FAILED
);
161 if (!u_vector_init(&table
->cleanups
,
162 round_to_power_of_two(sizeof(struct anv_state_table_cleanup
)),
164 result
= vk_error(VK_ERROR_INITIALIZATION_FAILED
);
168 table
->state
.next
= 0;
169 table
->state
.end
= 0;
172 uint32_t initial_size
= initial_entries
* ANV_STATE_ENTRY_SIZE
;
173 result
= anv_state_table_expand_range(table
, initial_size
);
174 if (result
!= VK_SUCCESS
)
180 u_vector_finish(&table
->cleanups
);
188 anv_state_table_expand_range(struct anv_state_table
*table
, uint32_t size
)
191 struct anv_state_table_cleanup
*cleanup
;
193 /* Assert that we only ever grow the pool */
194 assert(size
>= table
->state
.end
);
196 /* Make sure that we don't go outside the bounds of the memfd */
197 if (size
> BLOCK_POOL_MEMFD_SIZE
)
198 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
200 cleanup
= u_vector_add(&table
->cleanups
);
202 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
204 *cleanup
= ANV_STATE_TABLE_CLEANUP_INIT
;
206 /* Just leak the old map until we destroy the pool. We can't munmap it
207 * without races or imposing locking on the block allocate fast path. On
208 * the whole the leaked maps adds up to less than the size of the
209 * current map. MAP_POPULATE seems like the right thing to do, but we
210 * should try to get some numbers.
212 map
= mmap(NULL
, size
, PROT_READ
| PROT_WRITE
,
213 MAP_SHARED
| MAP_POPULATE
, table
->fd
, 0);
214 if (map
== MAP_FAILED
) {
215 return vk_errorf(table
->device
->instance
, table
->device
,
216 VK_ERROR_OUT_OF_HOST_MEMORY
, "mmap failed: %m");
220 cleanup
->size
= size
;
229 anv_state_table_grow(struct anv_state_table
*table
)
231 VkResult result
= VK_SUCCESS
;
233 uint32_t used
= align_u32(table
->state
.next
* ANV_STATE_ENTRY_SIZE
,
235 uint32_t old_size
= table
->size
;
237 /* The block pool is always initialized to a nonzero size and this function
238 * is always called after initialization.
240 assert(old_size
> 0);
242 uint32_t required
= MAX2(used
, old_size
);
243 if (used
* 2 <= required
) {
244 /* If we're in this case then this isn't the firsta allocation and we
245 * already have enough space on both sides to hold double what we
246 * have allocated. There's nothing for us to do.
251 uint32_t size
= old_size
* 2;
252 while (size
< required
)
255 assert(size
> table
->size
);
257 result
= anv_state_table_expand_range(table
, size
);
264 anv_state_table_finish(struct anv_state_table
*table
)
266 struct anv_state_table_cleanup
*cleanup
;
268 u_vector_foreach(cleanup
, &table
->cleanups
) {
270 munmap(cleanup
->map
, cleanup
->size
);
273 u_vector_finish(&table
->cleanups
);
279 anv_state_table_add(struct anv_state_table
*table
, uint32_t *idx
,
282 struct anv_block_state state
, old
, new;
288 state
.u64
= __sync_fetch_and_add(&table
->state
.u64
, count
);
289 if (state
.next
+ count
<= state
.end
) {
291 struct anv_free_entry
*entry
= &table
->map
[state
.next
];
292 for (int i
= 0; i
< count
; i
++) {
293 entry
[i
].state
.idx
= state
.next
+ i
;
297 } else if (state
.next
<= state
.end
) {
298 /* We allocated the first block outside the pool so we have to grow
299 * the pool. pool_state->next acts a mutex: threads who try to
300 * allocate now will get block indexes above the current limit and
301 * hit futex_wait below.
303 new.next
= state
.next
+ count
;
305 result
= anv_state_table_grow(table
);
306 if (result
!= VK_SUCCESS
)
308 new.end
= table
->size
/ ANV_STATE_ENTRY_SIZE
;
309 } while (new.end
< new.next
);
311 old
.u64
= __sync_lock_test_and_set(&table
->state
.u64
, new.u64
);
312 if (old
.next
!= state
.next
)
313 futex_wake(&table
->state
.end
, INT_MAX
);
315 futex_wait(&table
->state
.end
, state
.end
, NULL
);
322 anv_free_list_push(union anv_free_list
*list
,
323 struct anv_state_table
*table
,
324 uint32_t first
, uint32_t count
)
326 union anv_free_list current
, old
, new;
327 uint32_t last
= first
;
329 for (uint32_t i
= 1; i
< count
; i
++, last
++)
330 table
->map
[last
].next
= last
+ 1;
335 table
->map
[last
].next
= current
.offset
;
337 new.count
= current
.count
+ 1;
338 old
.u64
= __sync_val_compare_and_swap(&list
->u64
, current
.u64
, new.u64
);
339 } while (old
.u64
!= current
.u64
);
343 anv_free_list_pop(union anv_free_list
*list
,
344 struct anv_state_table
*table
)
346 union anv_free_list current
, new, old
;
348 current
.u64
= list
->u64
;
349 while (current
.offset
!= EMPTY
) {
350 __sync_synchronize();
351 new.offset
= table
->map
[current
.offset
].next
;
352 new.count
= current
.count
+ 1;
353 old
.u64
= __sync_val_compare_and_swap(&list
->u64
, current
.u64
, new.u64
);
354 if (old
.u64
== current
.u64
) {
355 struct anv_free_entry
*entry
= &table
->map
[current
.offset
];
356 return &entry
->state
;
364 /* All pointers in the ptr_free_list are assumed to be page-aligned. This
365 * means that the bottom 12 bits should all be zero.
367 #define PFL_COUNT(x) ((uintptr_t)(x) & 0xfff)
368 #define PFL_PTR(x) ((void *)((uintptr_t)(x) & ~(uintptr_t)0xfff))
369 #define PFL_PACK(ptr, count) ({ \
370 (void *)(((uintptr_t)(ptr) & ~(uintptr_t)0xfff) | ((count) & 0xfff)); \
374 anv_ptr_free_list_pop(void **list
, void **elem
)
376 void *current
= *list
;
377 while (PFL_PTR(current
) != NULL
) {
378 void **next_ptr
= PFL_PTR(current
);
379 void *new_ptr
= VG_NOACCESS_READ(next_ptr
);
380 unsigned new_count
= PFL_COUNT(current
) + 1;
381 void *new = PFL_PACK(new_ptr
, new_count
);
382 void *old
= __sync_val_compare_and_swap(list
, current
, new);
383 if (old
== current
) {
384 *elem
= PFL_PTR(current
);
394 anv_ptr_free_list_push(void **list
, void *elem
)
397 void **next_ptr
= elem
;
399 /* The pointer-based free list requires that the pointer be
400 * page-aligned. This is because we use the bottom 12 bits of the
401 * pointer to store a counter to solve the ABA concurrency problem.
403 assert(((uintptr_t)elem
& 0xfff) == 0);
408 VG_NOACCESS_WRITE(next_ptr
, PFL_PTR(current
));
409 unsigned new_count
= PFL_COUNT(current
) + 1;
410 void *new = PFL_PACK(elem
, new_count
);
411 old
= __sync_val_compare_and_swap(list
, current
, new);
412 } while (old
!= current
);
416 anv_block_pool_expand_range(struct anv_block_pool
*pool
,
417 uint32_t center_bo_offset
, uint32_t size
);
420 anv_block_pool_init(struct anv_block_pool
*pool
,
421 struct anv_device
*device
,
422 uint64_t start_address
,
423 uint32_t initial_size
,
428 pool
->device
= device
;
429 pool
->bo_flags
= bo_flags
;
432 pool
->center_bo_offset
= 0;
433 pool
->start_address
= gen_canonical_address(start_address
);
436 /* This pointer will always point to the first BO in the list */
437 pool
->bo
= &pool
->bos
[0];
439 anv_bo_init(pool
->bo
, 0, 0);
441 if (!(pool
->bo_flags
& EXEC_OBJECT_PINNED
)) {
442 /* Just make it 2GB up-front. The Linux kernel won't actually back it
443 * with pages until we either map and fault on one of them or we use
444 * userptr and send a chunk of it off to the GPU.
446 pool
->fd
= os_create_anonymous_file(BLOCK_POOL_MEMFD_SIZE
, "block pool");
448 return vk_error(VK_ERROR_INITIALIZATION_FAILED
);
453 if (!u_vector_init(&pool
->mmap_cleanups
,
454 round_to_power_of_two(sizeof(struct anv_mmap_cleanup
)),
456 result
= vk_error(VK_ERROR_INITIALIZATION_FAILED
);
460 pool
->state
.next
= 0;
462 pool
->back_state
.next
= 0;
463 pool
->back_state
.end
= 0;
465 result
= anv_block_pool_expand_range(pool
, 0, initial_size
);
466 if (result
!= VK_SUCCESS
)
467 goto fail_mmap_cleanups
;
469 /* Make the entire pool available in the front of the pool. If back
470 * allocation needs to use this space, the "ends" will be re-arranged.
472 pool
->state
.end
= pool
->size
;
477 u_vector_finish(&pool
->mmap_cleanups
);
479 if (!(pool
->bo_flags
& EXEC_OBJECT_PINNED
))
486 anv_block_pool_finish(struct anv_block_pool
*pool
)
488 struct anv_mmap_cleanup
*cleanup
;
489 const bool use_softpin
= !!(pool
->bo_flags
& EXEC_OBJECT_PINNED
);
491 u_vector_foreach(cleanup
, &pool
->mmap_cleanups
) {
493 anv_gem_munmap(cleanup
->map
, cleanup
->size
);
495 munmap(cleanup
->map
, cleanup
->size
);
497 if (cleanup
->gem_handle
)
498 anv_gem_close(pool
->device
, cleanup
->gem_handle
);
501 u_vector_finish(&pool
->mmap_cleanups
);
502 if (!(pool
->bo_flags
& EXEC_OBJECT_PINNED
))
507 anv_block_pool_expand_range(struct anv_block_pool
*pool
,
508 uint32_t center_bo_offset
, uint32_t size
)
512 struct anv_mmap_cleanup
*cleanup
;
513 const bool use_softpin
= !!(pool
->bo_flags
& EXEC_OBJECT_PINNED
);
515 /* Assert that we only ever grow the pool */
516 assert(center_bo_offset
>= pool
->back_state
.end
);
517 assert(size
- center_bo_offset
>= pool
->state
.end
);
519 /* Assert that we don't go outside the bounds of the memfd */
520 assert(center_bo_offset
<= BLOCK_POOL_MEMFD_CENTER
);
521 assert(use_softpin
||
522 size
- center_bo_offset
<=
523 BLOCK_POOL_MEMFD_SIZE
- BLOCK_POOL_MEMFD_CENTER
);
525 cleanup
= u_vector_add(&pool
->mmap_cleanups
);
527 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
529 *cleanup
= ANV_MMAP_CLEANUP_INIT
;
531 uint32_t newbo_size
= size
- pool
->size
;
533 gem_handle
= anv_gem_create(pool
->device
, newbo_size
);
534 map
= anv_gem_mmap(pool
->device
, gem_handle
, 0, newbo_size
, 0);
535 if (map
== MAP_FAILED
)
536 return vk_errorf(pool
->device
->instance
, pool
->device
,
537 VK_ERROR_MEMORY_MAP_FAILED
, "gem mmap failed: %m");
538 assert(center_bo_offset
== 0);
540 /* Just leak the old map until we destroy the pool. We can't munmap it
541 * without races or imposing locking on the block allocate fast path. On
542 * the whole the leaked maps adds up to less than the size of the
543 * current map. MAP_POPULATE seems like the right thing to do, but we
544 * should try to get some numbers.
546 map
= mmap(NULL
, size
, PROT_READ
| PROT_WRITE
,
547 MAP_SHARED
| MAP_POPULATE
, pool
->fd
,
548 BLOCK_POOL_MEMFD_CENTER
- center_bo_offset
);
549 if (map
== MAP_FAILED
)
550 return vk_errorf(pool
->device
->instance
, pool
->device
,
551 VK_ERROR_MEMORY_MAP_FAILED
, "mmap failed: %m");
553 /* Now that we mapped the new memory, we can write the new
554 * center_bo_offset back into pool and update pool->map. */
555 pool
->center_bo_offset
= center_bo_offset
;
556 pool
->map
= map
+ center_bo_offset
;
557 gem_handle
= anv_gem_userptr(pool
->device
, map
, size
);
558 if (gem_handle
== 0) {
560 return vk_errorf(pool
->device
->instance
, pool
->device
,
561 VK_ERROR_TOO_MANY_OBJECTS
, "userptr failed: %m");
566 cleanup
->size
= use_softpin
? newbo_size
: size
;
567 cleanup
->gem_handle
= gem_handle
;
569 /* Regular objects are created I915_CACHING_CACHED on LLC platforms and
570 * I915_CACHING_NONE on non-LLC platforms. However, userptr objects are
571 * always created as I915_CACHING_CACHED, which on non-LLC means
574 * On platforms that support softpin, we are not going to use userptr
575 * anymore, but we still want to rely on the snooped states. So make sure
576 * everything is set to I915_CACHING_CACHED.
578 if (!pool
->device
->info
.has_llc
)
579 anv_gem_set_caching(pool
->device
, gem_handle
, I915_CACHING_CACHED
);
581 /* For block pool BOs we have to be a bit careful about where we place them
582 * in the GTT. There are two documented workarounds for state base address
583 * placement : Wa32bitGeneralStateOffset and Wa32bitInstructionBaseOffset
584 * which state that those two base addresses do not support 48-bit
585 * addresses and need to be placed in the bottom 32-bit range.
586 * Unfortunately, this is not quite accurate.
588 * The real problem is that we always set the size of our state pools in
589 * STATE_BASE_ADDRESS to 0xfffff (the maximum) even though the BO is most
590 * likely significantly smaller. We do this because we do not no at the
591 * time we emit STATE_BASE_ADDRESS whether or not we will need to expand
592 * the pool during command buffer building so we don't actually have a
593 * valid final size. If the address + size, as seen by STATE_BASE_ADDRESS
594 * overflows 48 bits, the GPU appears to treat all accesses to the buffer
595 * as being out of bounds and returns zero. For dynamic state, this
596 * usually just leads to rendering corruptions, but shaders that are all
597 * zero hang the GPU immediately.
599 * The easiest solution to do is exactly what the bogus workarounds say to
600 * do: restrict these buffers to 32-bit addresses. We could also pin the
601 * BO to some particular location of our choosing, but that's significantly
602 * more work than just not setting a flag. So, we explicitly DO NOT set
603 * the EXEC_OBJECT_SUPPORTS_48B_ADDRESS flag and the kernel does all of the
610 assert(pool
->nbos
< ANV_MAX_BLOCK_POOL_BOS
);
613 /* With softpin, we add a new BO to the pool, and set its offset to right
614 * where the previous BO ends (the end of the pool).
616 bo
= &pool
->bos
[pool
->nbos
++];
617 bo_size
= newbo_size
;
618 bo_offset
= pool
->start_address
+ pool
->size
;
620 /* Without softpin, we just need one BO, and we already have a pointer to
621 * it. Simply "allocate" it from our array if we didn't do it before.
622 * The offset doesn't matter since we are not pinning the BO anyway.
631 anv_bo_init(bo
, gem_handle
, bo_size
);
632 bo
->offset
= bo_offset
;
633 bo
->flags
= pool
->bo_flags
;
640 static struct anv_bo
*
641 anv_block_pool_get_bo(struct anv_block_pool
*pool
, int32_t *offset
)
643 struct anv_bo
*bo
, *bo_found
= NULL
;
644 int32_t cur_offset
= 0;
648 if (!(pool
->bo_flags
& EXEC_OBJECT_PINNED
))
651 anv_block_pool_foreach_bo(bo
, pool
) {
652 if (*offset
< cur_offset
+ bo
->size
) {
656 cur_offset
+= bo
->size
;
659 assert(bo_found
!= NULL
);
660 *offset
-= cur_offset
;
665 /** Returns current memory map of the block pool.
667 * The returned pointer points to the map for the memory at the specified
668 * offset. The offset parameter is relative to the "center" of the block pool
669 * rather than the start of the block pool BO map.
672 anv_block_pool_map(struct anv_block_pool
*pool
, int32_t offset
)
674 if (pool
->bo_flags
& EXEC_OBJECT_PINNED
) {
675 struct anv_bo
*bo
= anv_block_pool_get_bo(pool
, &offset
);
676 return bo
->map
+ offset
;
678 return pool
->map
+ offset
;
682 /** Grows and re-centers the block pool.
684 * We grow the block pool in one or both directions in such a way that the
685 * following conditions are met:
687 * 1) The size of the entire pool is always a power of two.
689 * 2) The pool only grows on both ends. Neither end can get
692 * 3) At the end of the allocation, we have about twice as much space
693 * allocated for each end as we have used. This way the pool doesn't
694 * grow too far in one direction or the other.
696 * 4) If the _alloc_back() has never been called, then the back portion of
697 * the pool retains a size of zero. (This makes it easier for users of
698 * the block pool that only want a one-sided pool.)
700 * 5) We have enough space allocated for at least one more block in
701 * whichever side `state` points to.
703 * 6) The center of the pool is always aligned to both the block_size of
704 * the pool and a 4K CPU page.
707 anv_block_pool_grow(struct anv_block_pool
*pool
, struct anv_block_state
*state
)
709 VkResult result
= VK_SUCCESS
;
711 pthread_mutex_lock(&pool
->device
->mutex
);
713 assert(state
== &pool
->state
|| state
== &pool
->back_state
);
715 /* Gather a little usage information on the pool. Since we may have
716 * threadsd waiting in queue to get some storage while we resize, it's
717 * actually possible that total_used will be larger than old_size. In
718 * particular, block_pool_alloc() increments state->next prior to
719 * calling block_pool_grow, so this ensures that we get enough space for
720 * which ever side tries to grow the pool.
722 * We align to a page size because it makes it easier to do our
723 * calculations later in such a way that we state page-aigned.
725 uint32_t back_used
= align_u32(pool
->back_state
.next
, PAGE_SIZE
);
726 uint32_t front_used
= align_u32(pool
->state
.next
, PAGE_SIZE
);
727 uint32_t total_used
= front_used
+ back_used
;
729 assert(state
== &pool
->state
|| back_used
> 0);
731 uint32_t old_size
= pool
->size
;
733 /* The block pool is always initialized to a nonzero size and this function
734 * is always called after initialization.
736 assert(old_size
> 0);
738 /* The back_used and front_used may actually be smaller than the actual
739 * requirement because they are based on the next pointers which are
740 * updated prior to calling this function.
742 uint32_t back_required
= MAX2(back_used
, pool
->center_bo_offset
);
743 uint32_t front_required
= MAX2(front_used
, old_size
- pool
->center_bo_offset
);
745 if (back_used
* 2 <= back_required
&& front_used
* 2 <= front_required
) {
746 /* If we're in this case then this isn't the firsta allocation and we
747 * already have enough space on both sides to hold double what we
748 * have allocated. There's nothing for us to do.
753 uint32_t size
= old_size
* 2;
754 while (size
< back_required
+ front_required
)
757 assert(size
> pool
->size
);
759 /* We compute a new center_bo_offset such that, when we double the size
760 * of the pool, we maintain the ratio of how much is used by each side.
761 * This way things should remain more-or-less balanced.
763 uint32_t center_bo_offset
;
764 if (back_used
== 0) {
765 /* If we're in this case then we have never called alloc_back(). In
766 * this case, we want keep the offset at 0 to make things as simple
767 * as possible for users that don't care about back allocations.
769 center_bo_offset
= 0;
771 /* Try to "center" the allocation based on how much is currently in
772 * use on each side of the center line.
774 center_bo_offset
= ((uint64_t)size
* back_used
) / total_used
;
776 /* Align down to a multiple of the page size */
777 center_bo_offset
&= ~(PAGE_SIZE
- 1);
779 assert(center_bo_offset
>= back_used
);
781 /* Make sure we don't shrink the back end of the pool */
782 if (center_bo_offset
< back_required
)
783 center_bo_offset
= back_required
;
785 /* Make sure that we don't shrink the front end of the pool */
786 if (size
- center_bo_offset
< front_required
)
787 center_bo_offset
= size
- front_required
;
790 assert(center_bo_offset
% PAGE_SIZE
== 0);
792 result
= anv_block_pool_expand_range(pool
, center_bo_offset
, size
);
794 pool
->bo
->flags
= pool
->bo_flags
;
797 pthread_mutex_unlock(&pool
->device
->mutex
);
799 if (result
== VK_SUCCESS
) {
800 /* Return the appropriate new size. This function never actually
801 * updates state->next. Instead, we let the caller do that because it
802 * needs to do so in order to maintain its concurrency model.
804 if (state
== &pool
->state
) {
805 return pool
->size
- pool
->center_bo_offset
;
807 assert(pool
->center_bo_offset
> 0);
808 return pool
->center_bo_offset
;
816 anv_block_pool_alloc_new(struct anv_block_pool
*pool
,
817 struct anv_block_state
*pool_state
,
818 uint32_t block_size
, uint32_t *padding
)
820 struct anv_block_state state
, old
, new;
822 /* Most allocations won't generate any padding */
827 state
.u64
= __sync_fetch_and_add(&pool_state
->u64
, block_size
);
828 if (state
.next
+ block_size
<= state
.end
) {
830 } else if (state
.next
<= state
.end
) {
831 if (pool
->bo_flags
& EXEC_OBJECT_PINNED
&& state
.next
< state
.end
) {
832 /* We need to grow the block pool, but still have some leftover
833 * space that can't be used by that particular allocation. So we
834 * add that as a "padding", and return it.
836 uint32_t leftover
= state
.end
- state
.next
;
838 /* If there is some leftover space in the pool, the caller must
841 assert(leftover
== 0 || padding
);
844 state
.next
+= leftover
;
847 /* We allocated the first block outside the pool so we have to grow
848 * the pool. pool_state->next acts a mutex: threads who try to
849 * allocate now will get block indexes above the current limit and
850 * hit futex_wait below.
852 new.next
= state
.next
+ block_size
;
854 new.end
= anv_block_pool_grow(pool
, pool_state
);
855 } while (new.end
< new.next
);
857 old
.u64
= __sync_lock_test_and_set(&pool_state
->u64
, new.u64
);
858 if (old
.next
!= state
.next
)
859 futex_wake(&pool_state
->end
, INT_MAX
);
862 futex_wait(&pool_state
->end
, state
.end
, NULL
);
869 anv_block_pool_alloc(struct anv_block_pool
*pool
,
870 uint32_t block_size
, uint32_t *padding
)
874 offset
= anv_block_pool_alloc_new(pool
, &pool
->state
, block_size
, padding
);
879 /* Allocates a block out of the back of the block pool.
881 * This will allocated a block earlier than the "start" of the block pool.
882 * The offsets returned from this function will be negative but will still
883 * be correct relative to the block pool's map pointer.
885 * If you ever use anv_block_pool_alloc_back, then you will have to do
886 * gymnastics with the block pool's BO when doing relocations.
889 anv_block_pool_alloc_back(struct anv_block_pool
*pool
,
892 int32_t offset
= anv_block_pool_alloc_new(pool
, &pool
->back_state
,
895 /* The offset we get out of anv_block_pool_alloc_new() is actually the
896 * number of bytes downwards from the middle to the end of the block.
897 * We need to turn it into a (negative) offset from the middle to the
898 * start of the block.
901 return -(offset
+ block_size
);
905 anv_state_pool_init(struct anv_state_pool
*pool
,
906 struct anv_device
*device
,
907 uint64_t start_address
,
911 VkResult result
= anv_block_pool_init(&pool
->block_pool
, device
,
915 if (result
!= VK_SUCCESS
)
918 result
= anv_state_table_init(&pool
->table
, device
, 64);
919 if (result
!= VK_SUCCESS
) {
920 anv_block_pool_finish(&pool
->block_pool
);
924 assert(util_is_power_of_two_or_zero(block_size
));
925 pool
->block_size
= block_size
;
926 pool
->back_alloc_free_list
= ANV_FREE_LIST_EMPTY
;
927 for (unsigned i
= 0; i
< ANV_STATE_BUCKETS
; i
++) {
928 pool
->buckets
[i
].free_list
= ANV_FREE_LIST_EMPTY
;
929 pool
->buckets
[i
].block
.next
= 0;
930 pool
->buckets
[i
].block
.end
= 0;
932 VG(VALGRIND_CREATE_MEMPOOL(pool
, 0, false));
938 anv_state_pool_finish(struct anv_state_pool
*pool
)
940 VG(VALGRIND_DESTROY_MEMPOOL(pool
));
941 anv_state_table_finish(&pool
->table
);
942 anv_block_pool_finish(&pool
->block_pool
);
946 anv_fixed_size_state_pool_alloc_new(struct anv_fixed_size_state_pool
*pool
,
947 struct anv_block_pool
*block_pool
,
952 struct anv_block_state block
, old
, new;
955 /* We don't always use anv_block_pool_alloc(), which would set *padding to
956 * zero for us. So if we have a pointer to padding, we must zero it out
957 * ourselves here, to make sure we always return some sensible value.
962 /* If our state is large, we don't need any sub-allocation from a block.
963 * Instead, we just grab whole (potentially large) blocks.
965 if (state_size
>= block_size
)
966 return anv_block_pool_alloc(block_pool
, state_size
, padding
);
969 block
.u64
= __sync_fetch_and_add(&pool
->block
.u64
, state_size
);
971 if (block
.next
< block
.end
) {
973 } else if (block
.next
== block
.end
) {
974 offset
= anv_block_pool_alloc(block_pool
, block_size
, padding
);
975 new.next
= offset
+ state_size
;
976 new.end
= offset
+ block_size
;
977 old
.u64
= __sync_lock_test_and_set(&pool
->block
.u64
, new.u64
);
978 if (old
.next
!= block
.next
)
979 futex_wake(&pool
->block
.end
, INT_MAX
);
982 futex_wait(&pool
->block
.end
, block
.end
, NULL
);
988 anv_state_pool_get_bucket(uint32_t size
)
990 unsigned size_log2
= ilog2_round_up(size
);
991 assert(size_log2
<= ANV_MAX_STATE_SIZE_LOG2
);
992 if (size_log2
< ANV_MIN_STATE_SIZE_LOG2
)
993 size_log2
= ANV_MIN_STATE_SIZE_LOG2
;
994 return size_log2
- ANV_MIN_STATE_SIZE_LOG2
;
998 anv_state_pool_get_bucket_size(uint32_t bucket
)
1000 uint32_t size_log2
= bucket
+ ANV_MIN_STATE_SIZE_LOG2
;
1001 return 1 << size_log2
;
1004 /** Helper to push a chunk into the state table.
1006 * It creates 'count' entries into the state table and update their sizes,
1007 * offsets and maps, also pushing them as "free" states.
1010 anv_state_pool_return_blocks(struct anv_state_pool
*pool
,
1011 uint32_t chunk_offset
, uint32_t count
,
1012 uint32_t block_size
)
1014 /* Disallow returning 0 chunks */
1017 /* Make sure we always return chunks aligned to the block_size */
1018 assert(chunk_offset
% block_size
== 0);
1021 UNUSED VkResult result
= anv_state_table_add(&pool
->table
, &st_idx
, count
);
1022 assert(result
== VK_SUCCESS
);
1023 for (int i
= 0; i
< count
; i
++) {
1024 /* update states that were added back to the state table */
1025 struct anv_state
*state_i
= anv_state_table_get(&pool
->table
,
1027 state_i
->alloc_size
= block_size
;
1028 state_i
->offset
= chunk_offset
+ block_size
* i
;
1029 state_i
->map
= anv_block_pool_map(&pool
->block_pool
, state_i
->offset
);
1032 uint32_t block_bucket
= anv_state_pool_get_bucket(block_size
);
1033 anv_free_list_push(&pool
->buckets
[block_bucket
].free_list
,
1034 &pool
->table
, st_idx
, count
);
1037 /** Returns a chunk of memory back to the state pool.
1039 * Do a two-level split. If chunk_size is bigger than divisor
1040 * (pool->block_size), we return as many divisor sized blocks as we can, from
1041 * the end of the chunk.
1043 * The remaining is then split into smaller blocks (starting at small_size if
1044 * it is non-zero), with larger blocks always being taken from the end of the
1048 anv_state_pool_return_chunk(struct anv_state_pool
*pool
,
1049 uint32_t chunk_offset
, uint32_t chunk_size
,
1050 uint32_t small_size
)
1052 uint32_t divisor
= pool
->block_size
;
1053 uint32_t nblocks
= chunk_size
/ divisor
;
1054 uint32_t rest
= chunk_size
- nblocks
* divisor
;
1057 /* First return divisor aligned and sized chunks. We start returning
1058 * larger blocks from the end fo the chunk, since they should already be
1059 * aligned to divisor. Also anv_state_pool_return_blocks() only accepts
1062 uint32_t offset
= chunk_offset
+ rest
;
1063 anv_state_pool_return_blocks(pool
, offset
, nblocks
, divisor
);
1069 if (small_size
> 0 && small_size
< divisor
)
1070 divisor
= small_size
;
1072 uint32_t min_size
= 1 << ANV_MIN_STATE_SIZE_LOG2
;
1074 /* Just as before, return larger divisor aligned blocks from the end of the
1077 while (chunk_size
> 0 && divisor
>= min_size
) {
1078 nblocks
= chunk_size
/ divisor
;
1079 rest
= chunk_size
- nblocks
* divisor
;
1081 anv_state_pool_return_blocks(pool
, chunk_offset
+ rest
,
1089 static struct anv_state
1090 anv_state_pool_alloc_no_vg(struct anv_state_pool
*pool
,
1091 uint32_t size
, uint32_t align
)
1093 uint32_t bucket
= anv_state_pool_get_bucket(MAX2(size
, align
));
1095 struct anv_state
*state
;
1096 uint32_t alloc_size
= anv_state_pool_get_bucket_size(bucket
);
1099 /* Try free list first. */
1100 state
= anv_free_list_pop(&pool
->buckets
[bucket
].free_list
,
1103 assert(state
->offset
>= 0);
1107 /* Try to grab a chunk from some larger bucket and split it up */
1108 for (unsigned b
= bucket
+ 1; b
< ANV_STATE_BUCKETS
; b
++) {
1109 state
= anv_free_list_pop(&pool
->buckets
[b
].free_list
, &pool
->table
);
1111 unsigned chunk_size
= anv_state_pool_get_bucket_size(b
);
1112 int32_t chunk_offset
= state
->offset
;
1114 /* First lets update the state we got to its new size. offset and map
1117 state
->alloc_size
= alloc_size
;
1119 /* Now return the unused part of the chunk back to the pool as free
1122 * There are a couple of options as to what we do with it:
1124 * 1) We could fully split the chunk into state.alloc_size sized
1125 * pieces. However, this would mean that allocating a 16B
1126 * state could potentially split a 2MB chunk into 512K smaller
1127 * chunks. This would lead to unnecessary fragmentation.
1129 * 2) The classic "buddy allocator" method would have us split the
1130 * chunk in half and return one half. Then we would split the
1131 * remaining half in half and return one half, and repeat as
1132 * needed until we get down to the size we want. However, if
1133 * you are allocating a bunch of the same size state (which is
1134 * the common case), this means that every other allocation has
1135 * to go up a level and every fourth goes up two levels, etc.
1136 * This is not nearly as efficient as it could be if we did a
1137 * little more work up-front.
1139 * 3) Split the difference between (1) and (2) by doing a
1140 * two-level split. If it's bigger than some fixed block_size,
1141 * we split it into block_size sized chunks and return all but
1142 * one of them. Then we split what remains into
1143 * state.alloc_size sized chunks and return them.
1145 * We choose something close to option (3), which is implemented with
1146 * anv_state_pool_return_chunk(). That is done by returning the
1147 * remaining of the chunk, with alloc_size as a hint of the size that
1148 * we want the smaller chunk split into.
1150 anv_state_pool_return_chunk(pool
, chunk_offset
+ alloc_size
,
1151 chunk_size
- alloc_size
, alloc_size
);
1157 offset
= anv_fixed_size_state_pool_alloc_new(&pool
->buckets
[bucket
],
1162 /* Everytime we allocate a new state, add it to the state pool */
1164 UNUSED VkResult result
= anv_state_table_add(&pool
->table
, &idx
, 1);
1165 assert(result
== VK_SUCCESS
);
1167 state
= anv_state_table_get(&pool
->table
, idx
);
1168 state
->offset
= offset
;
1169 state
->alloc_size
= alloc_size
;
1170 state
->map
= anv_block_pool_map(&pool
->block_pool
, offset
);
1173 uint32_t return_offset
= offset
- padding
;
1174 anv_state_pool_return_chunk(pool
, return_offset
, padding
, 0);
1182 anv_state_pool_alloc(struct anv_state_pool
*pool
, uint32_t size
, uint32_t align
)
1185 return ANV_STATE_NULL
;
1187 struct anv_state state
= anv_state_pool_alloc_no_vg(pool
, size
, align
);
1188 VG(VALGRIND_MEMPOOL_ALLOC(pool
, state
.map
, size
));
1193 anv_state_pool_alloc_back(struct anv_state_pool
*pool
)
1195 struct anv_state
*state
;
1196 uint32_t alloc_size
= pool
->block_size
;
1198 state
= anv_free_list_pop(&pool
->back_alloc_free_list
, &pool
->table
);
1200 assert(state
->offset
< 0);
1205 offset
= anv_block_pool_alloc_back(&pool
->block_pool
,
1208 UNUSED VkResult result
= anv_state_table_add(&pool
->table
, &idx
, 1);
1209 assert(result
== VK_SUCCESS
);
1211 state
= anv_state_table_get(&pool
->table
, idx
);
1212 state
->offset
= offset
;
1213 state
->alloc_size
= alloc_size
;
1214 state
->map
= anv_block_pool_map(&pool
->block_pool
, state
->offset
);
1217 VG(VALGRIND_MEMPOOL_ALLOC(pool
, state
->map
, state
->alloc_size
));
1222 anv_state_pool_free_no_vg(struct anv_state_pool
*pool
, struct anv_state state
)
1224 assert(util_is_power_of_two_or_zero(state
.alloc_size
));
1225 unsigned bucket
= anv_state_pool_get_bucket(state
.alloc_size
);
1227 if (state
.offset
< 0) {
1228 assert(state
.alloc_size
== pool
->block_size
);
1229 anv_free_list_push(&pool
->back_alloc_free_list
,
1230 &pool
->table
, state
.idx
, 1);
1232 anv_free_list_push(&pool
->buckets
[bucket
].free_list
,
1233 &pool
->table
, state
.idx
, 1);
1238 anv_state_pool_free(struct anv_state_pool
*pool
, struct anv_state state
)
1240 if (state
.alloc_size
== 0)
1243 VG(VALGRIND_MEMPOOL_FREE(pool
, state
.map
));
1244 anv_state_pool_free_no_vg(pool
, state
);
1247 struct anv_state_stream_block
{
1248 struct anv_state block
;
1250 /* The next block */
1251 struct anv_state_stream_block
*next
;
1253 #ifdef HAVE_VALGRIND
1254 /* A pointer to the first user-allocated thing in this block. This is
1255 * what valgrind sees as the start of the block.
1261 /* The state stream allocator is a one-shot, single threaded allocator for
1262 * variable sized blocks. We use it for allocating dynamic state.
1265 anv_state_stream_init(struct anv_state_stream
*stream
,
1266 struct anv_state_pool
*state_pool
,
1267 uint32_t block_size
)
1269 stream
->state_pool
= state_pool
;
1270 stream
->block_size
= block_size
;
1272 stream
->block
= ANV_STATE_NULL
;
1274 stream
->block_list
= NULL
;
1276 /* Ensure that next + whatever > block_size. This way the first call to
1277 * state_stream_alloc fetches a new block.
1279 stream
->next
= block_size
;
1281 VG(VALGRIND_CREATE_MEMPOOL(stream
, 0, false));
1285 anv_state_stream_finish(struct anv_state_stream
*stream
)
1287 struct anv_state_stream_block
*next
= stream
->block_list
;
1288 while (next
!= NULL
) {
1289 struct anv_state_stream_block sb
= VG_NOACCESS_READ(next
);
1290 VG(VALGRIND_MEMPOOL_FREE(stream
, sb
._vg_ptr
));
1291 VG(VALGRIND_MAKE_MEM_UNDEFINED(next
, stream
->block_size
));
1292 anv_state_pool_free_no_vg(stream
->state_pool
, sb
.block
);
1296 VG(VALGRIND_DESTROY_MEMPOOL(stream
));
1300 anv_state_stream_alloc(struct anv_state_stream
*stream
,
1301 uint32_t size
, uint32_t alignment
)
1304 return ANV_STATE_NULL
;
1306 assert(alignment
<= PAGE_SIZE
);
1308 uint32_t offset
= align_u32(stream
->next
, alignment
);
1309 if (offset
+ size
> stream
->block
.alloc_size
) {
1310 uint32_t block_size
= stream
->block_size
;
1311 if (block_size
< size
)
1312 block_size
= round_to_power_of_two(size
);
1314 stream
->block
= anv_state_pool_alloc_no_vg(stream
->state_pool
,
1315 block_size
, PAGE_SIZE
);
1317 struct anv_state_stream_block
*sb
= stream
->block
.map
;
1318 VG_NOACCESS_WRITE(&sb
->block
, stream
->block
);
1319 VG_NOACCESS_WRITE(&sb
->next
, stream
->block_list
);
1320 stream
->block_list
= sb
;
1321 VG(VG_NOACCESS_WRITE(&sb
->_vg_ptr
, NULL
));
1323 VG(VALGRIND_MAKE_MEM_NOACCESS(stream
->block
.map
, stream
->block_size
));
1325 /* Reset back to the start plus space for the header */
1326 stream
->next
= sizeof(*sb
);
1328 offset
= align_u32(stream
->next
, alignment
);
1329 assert(offset
+ size
<= stream
->block
.alloc_size
);
1332 struct anv_state state
= stream
->block
;
1333 state
.offset
+= offset
;
1334 state
.alloc_size
= size
;
1335 state
.map
+= offset
;
1337 stream
->next
= offset
+ size
;
1339 #ifdef HAVE_VALGRIND
1340 struct anv_state_stream_block
*sb
= stream
->block_list
;
1341 void *vg_ptr
= VG_NOACCESS_READ(&sb
->_vg_ptr
);
1342 if (vg_ptr
== NULL
) {
1344 VG_NOACCESS_WRITE(&sb
->_vg_ptr
, vg_ptr
);
1345 VALGRIND_MEMPOOL_ALLOC(stream
, vg_ptr
, size
);
1347 void *state_end
= state
.map
+ state
.alloc_size
;
1348 /* This only updates the mempool. The newly allocated chunk is still
1349 * marked as NOACCESS. */
1350 VALGRIND_MEMPOOL_CHANGE(stream
, vg_ptr
, vg_ptr
, state_end
- vg_ptr
);
1351 /* Mark the newly allocated chunk as undefined */
1352 VALGRIND_MAKE_MEM_UNDEFINED(state
.map
, state
.alloc_size
);
1359 struct bo_pool_bo_link
{
1360 struct bo_pool_bo_link
*next
;
1365 anv_bo_pool_init(struct anv_bo_pool
*pool
, struct anv_device
*device
,
1368 pool
->device
= device
;
1369 pool
->bo_flags
= bo_flags
;
1370 memset(pool
->free_list
, 0, sizeof(pool
->free_list
));
1372 VG(VALGRIND_CREATE_MEMPOOL(pool
, 0, false));
1376 anv_bo_pool_finish(struct anv_bo_pool
*pool
)
1378 for (unsigned i
= 0; i
< ARRAY_SIZE(pool
->free_list
); i
++) {
1379 struct bo_pool_bo_link
*link
= PFL_PTR(pool
->free_list
[i
]);
1380 while (link
!= NULL
) {
1381 struct bo_pool_bo_link link_copy
= VG_NOACCESS_READ(link
);
1383 anv_gem_munmap(link_copy
.bo
.map
, link_copy
.bo
.size
);
1384 anv_vma_free(pool
->device
, &link_copy
.bo
);
1385 anv_gem_close(pool
->device
, link_copy
.bo
.gem_handle
);
1386 link
= link_copy
.next
;
1390 VG(VALGRIND_DESTROY_MEMPOOL(pool
));
1394 anv_bo_pool_alloc(struct anv_bo_pool
*pool
, struct anv_bo
*bo
, uint32_t size
)
1398 const unsigned size_log2
= size
< 4096 ? 12 : ilog2_round_up(size
);
1399 const unsigned pow2_size
= 1 << size_log2
;
1400 const unsigned bucket
= size_log2
- 12;
1401 assert(bucket
< ARRAY_SIZE(pool
->free_list
));
1403 void *next_free_void
;
1404 if (anv_ptr_free_list_pop(&pool
->free_list
[bucket
], &next_free_void
)) {
1405 struct bo_pool_bo_link
*next_free
= next_free_void
;
1406 *bo
= VG_NOACCESS_READ(&next_free
->bo
);
1407 assert(bo
->gem_handle
);
1408 assert(bo
->map
== next_free
);
1409 assert(size
<= bo
->size
);
1411 VG(VALGRIND_MEMPOOL_ALLOC(pool
, bo
->map
, size
));
1416 struct anv_bo new_bo
;
1418 result
= anv_bo_init_new(&new_bo
, pool
->device
, pow2_size
);
1419 if (result
!= VK_SUCCESS
)
1422 new_bo
.flags
= pool
->bo_flags
;
1424 if (!anv_vma_alloc(pool
->device
, &new_bo
))
1425 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY
);
1427 assert(new_bo
.size
== pow2_size
);
1429 new_bo
.map
= anv_gem_mmap(pool
->device
, new_bo
.gem_handle
, 0, pow2_size
, 0);
1430 if (new_bo
.map
== MAP_FAILED
) {
1431 anv_gem_close(pool
->device
, new_bo
.gem_handle
);
1432 anv_vma_free(pool
->device
, &new_bo
);
1433 return vk_error(VK_ERROR_MEMORY_MAP_FAILED
);
1436 /* We are removing the state flushes, so lets make sure that these buffers
1437 * are cached/snooped.
1439 if (!pool
->device
->info
.has_llc
) {
1440 anv_gem_set_caching(pool
->device
, new_bo
.gem_handle
,
1441 I915_CACHING_CACHED
);
1446 VG(VALGRIND_MEMPOOL_ALLOC(pool
, bo
->map
, size
));
1452 anv_bo_pool_free(struct anv_bo_pool
*pool
, const struct anv_bo
*bo_in
)
1454 /* Make a copy in case the anv_bo happens to be storred in the BO */
1455 struct anv_bo bo
= *bo_in
;
1457 VG(VALGRIND_MEMPOOL_FREE(pool
, bo
.map
));
1459 struct bo_pool_bo_link
*link
= bo
.map
;
1460 VG_NOACCESS_WRITE(&link
->bo
, bo
);
1462 assert(util_is_power_of_two_or_zero(bo
.size
));
1463 const unsigned size_log2
= ilog2_round_up(bo
.size
);
1464 const unsigned bucket
= size_log2
- 12;
1465 assert(bucket
< ARRAY_SIZE(pool
->free_list
));
1467 anv_ptr_free_list_push(&pool
->free_list
[bucket
], link
);
1473 anv_scratch_pool_init(struct anv_device
*device
, struct anv_scratch_pool
*pool
)
1475 memset(pool
, 0, sizeof(*pool
));
1479 anv_scratch_pool_finish(struct anv_device
*device
, struct anv_scratch_pool
*pool
)
1481 for (unsigned s
= 0; s
< MESA_SHADER_STAGES
; s
++) {
1482 for (unsigned i
= 0; i
< 16; i
++) {
1483 struct anv_scratch_bo
*bo
= &pool
->bos
[i
][s
];
1484 if (bo
->exists
> 0) {
1485 anv_vma_free(device
, &bo
->bo
);
1486 anv_gem_close(device
, bo
->bo
.gem_handle
);
1493 anv_scratch_pool_alloc(struct anv_device
*device
, struct anv_scratch_pool
*pool
,
1494 gl_shader_stage stage
, unsigned per_thread_scratch
)
1496 if (per_thread_scratch
== 0)
1499 unsigned scratch_size_log2
= ffs(per_thread_scratch
/ 2048);
1500 assert(scratch_size_log2
< 16);
1502 struct anv_scratch_bo
*bo
= &pool
->bos
[scratch_size_log2
][stage
];
1504 /* We can use "exists" to shortcut and ignore the critical section */
1508 pthread_mutex_lock(&device
->mutex
);
1510 __sync_synchronize();
1512 pthread_mutex_unlock(&device
->mutex
);
1516 const struct anv_physical_device
*physical_device
=
1517 &device
->instance
->physicalDevice
;
1518 const struct gen_device_info
*devinfo
= &physical_device
->info
;
1520 const unsigned subslices
= MAX2(physical_device
->subslice_total
, 1);
1522 unsigned scratch_ids_per_subslice
;
1523 if (devinfo
->gen
>= 11) {
1524 /* The MEDIA_VFE_STATE docs say:
1526 * "Starting with this configuration, the Maximum Number of
1527 * Threads must be set to (#EU * 8) for GPGPU dispatches.
1529 * Although there are only 7 threads per EU in the configuration,
1530 * the FFTID is calculated as if there are 8 threads per EU,
1531 * which in turn requires a larger amount of Scratch Space to be
1532 * allocated by the driver."
1534 scratch_ids_per_subslice
= 8 * 8;
1535 } else if (devinfo
->is_haswell
) {
1536 /* WaCSScratchSize:hsw
1538 * Haswell's scratch space address calculation appears to be sparse
1539 * rather than tightly packed. The Thread ID has bits indicating
1540 * which subslice, EU within a subslice, and thread within an EU it
1541 * is. There's a maximum of two slices and two subslices, so these
1542 * can be stored with a single bit. Even though there are only 10 EUs
1543 * per subslice, this is stored in 4 bits, so there's an effective
1544 * maximum value of 16 EUs. Similarly, although there are only 7
1545 * threads per EU, this is stored in a 3 bit number, giving an
1546 * effective maximum value of 8 threads per EU.
1548 * This means that we need to use 16 * 8 instead of 10 * 7 for the
1549 * number of threads per subslice.
1551 scratch_ids_per_subslice
= 16 * 8;
1552 } else if (devinfo
->is_cherryview
) {
1553 /* Cherryview devices have either 6 or 8 EUs per subslice, and each EU
1554 * has 7 threads. The 6 EU devices appear to calculate thread IDs as if
1557 scratch_ids_per_subslice
= 8 * 7;
1559 scratch_ids_per_subslice
= devinfo
->max_cs_threads
;
1562 uint32_t max_threads
[] = {
1563 [MESA_SHADER_VERTEX
] = devinfo
->max_vs_threads
,
1564 [MESA_SHADER_TESS_CTRL
] = devinfo
->max_tcs_threads
,
1565 [MESA_SHADER_TESS_EVAL
] = devinfo
->max_tes_threads
,
1566 [MESA_SHADER_GEOMETRY
] = devinfo
->max_gs_threads
,
1567 [MESA_SHADER_FRAGMENT
] = devinfo
->max_wm_threads
,
1568 [MESA_SHADER_COMPUTE
] = scratch_ids_per_subslice
* subslices
,
1571 uint32_t size
= per_thread_scratch
* max_threads
[stage
];
1573 anv_bo_init_new(&bo
->bo
, device
, size
);
1575 /* Even though the Scratch base pointers in 3DSTATE_*S are 64 bits, they
1576 * are still relative to the general state base address. When we emit
1577 * STATE_BASE_ADDRESS, we set general state base address to 0 and the size
1578 * to the maximum (1 page under 4GB). This allows us to just place the
1579 * scratch buffers anywhere we wish in the bottom 32 bits of address space
1580 * and just set the scratch base pointer in 3DSTATE_*S using a relocation.
1581 * However, in order to do so, we need to ensure that the kernel does not
1582 * place the scratch BO above the 32-bit boundary.
1584 * NOTE: Technically, it can't go "anywhere" because the top page is off
1585 * limits. However, when EXEC_OBJECT_SUPPORTS_48B_ADDRESS is set, the
1586 * kernel allocates space using
1588 * end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
1590 * so nothing will ever touch the top page.
1592 assert(!(bo
->bo
.flags
& EXEC_OBJECT_SUPPORTS_48B_ADDRESS
));
1594 if (device
->instance
->physicalDevice
.has_exec_async
)
1595 bo
->bo
.flags
|= EXEC_OBJECT_ASYNC
;
1597 if (device
->instance
->physicalDevice
.use_softpin
)
1598 bo
->bo
.flags
|= EXEC_OBJECT_PINNED
;
1600 anv_vma_alloc(device
, &bo
->bo
);
1602 /* Set the exists last because it may be read by other threads */
1603 __sync_synchronize();
1606 pthread_mutex_unlock(&device
->mutex
);
1611 struct anv_cached_bo
{
1618 anv_bo_cache_init(struct anv_bo_cache
*cache
)
1620 cache
->bo_map
= _mesa_pointer_hash_table_create(NULL
);
1622 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1624 if (pthread_mutex_init(&cache
->mutex
, NULL
)) {
1625 _mesa_hash_table_destroy(cache
->bo_map
, NULL
);
1626 return vk_errorf(NULL
, NULL
, VK_ERROR_OUT_OF_HOST_MEMORY
,
1627 "pthread_mutex_init failed: %m");
1634 anv_bo_cache_finish(struct anv_bo_cache
*cache
)
1636 _mesa_hash_table_destroy(cache
->bo_map
, NULL
);
1637 pthread_mutex_destroy(&cache
->mutex
);
1640 static struct anv_cached_bo
*
1641 anv_bo_cache_lookup_locked(struct anv_bo_cache
*cache
, uint32_t gem_handle
)
1643 struct hash_entry
*entry
=
1644 _mesa_hash_table_search(cache
->bo_map
,
1645 (const void *)(uintptr_t)gem_handle
);
1649 struct anv_cached_bo
*bo
= (struct anv_cached_bo
*)entry
->data
;
1650 assert(bo
->bo
.gem_handle
== gem_handle
);
1655 UNUSED
static struct anv_bo
*
1656 anv_bo_cache_lookup(struct anv_bo_cache
*cache
, uint32_t gem_handle
)
1658 pthread_mutex_lock(&cache
->mutex
);
1660 struct anv_cached_bo
*bo
= anv_bo_cache_lookup_locked(cache
, gem_handle
);
1662 pthread_mutex_unlock(&cache
->mutex
);
1664 return bo
? &bo
->bo
: NULL
;
1667 #define ANV_BO_CACHE_SUPPORTED_FLAGS \
1668 (EXEC_OBJECT_WRITE | \
1669 EXEC_OBJECT_ASYNC | \
1670 EXEC_OBJECT_SUPPORTS_48B_ADDRESS | \
1671 EXEC_OBJECT_PINNED | \
1675 anv_bo_cache_alloc(struct anv_device
*device
,
1676 struct anv_bo_cache
*cache
,
1677 uint64_t size
, uint64_t bo_flags
,
1678 struct anv_bo
**bo_out
)
1680 assert(bo_flags
== (bo_flags
& ANV_BO_CACHE_SUPPORTED_FLAGS
));
1682 struct anv_cached_bo
*bo
=
1683 vk_alloc(&device
->alloc
, sizeof(struct anv_cached_bo
), 8,
1684 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1686 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1690 /* The kernel is going to give us whole pages anyway */
1691 size
= align_u64(size
, 4096);
1693 VkResult result
= anv_bo_init_new(&bo
->bo
, device
, size
);
1694 if (result
!= VK_SUCCESS
) {
1695 vk_free(&device
->alloc
, bo
);
1699 bo
->bo
.flags
= bo_flags
;
1701 if (!anv_vma_alloc(device
, &bo
->bo
)) {
1702 anv_gem_close(device
, bo
->bo
.gem_handle
);
1703 vk_free(&device
->alloc
, bo
);
1704 return vk_errorf(device
->instance
, NULL
,
1705 VK_ERROR_OUT_OF_DEVICE_MEMORY
,
1706 "failed to allocate virtual address for BO");
1709 assert(bo
->bo
.gem_handle
);
1711 pthread_mutex_lock(&cache
->mutex
);
1713 _mesa_hash_table_insert(cache
->bo_map
,
1714 (void *)(uintptr_t)bo
->bo
.gem_handle
, bo
);
1716 pthread_mutex_unlock(&cache
->mutex
);
1724 anv_bo_cache_import_host_ptr(struct anv_device
*device
,
1725 struct anv_bo_cache
*cache
,
1726 void *host_ptr
, uint32_t size
,
1727 uint64_t bo_flags
, struct anv_bo
**bo_out
)
1729 assert(bo_flags
== (bo_flags
& ANV_BO_CACHE_SUPPORTED_FLAGS
));
1730 assert((bo_flags
& ANV_BO_EXTERNAL
) == 0);
1732 uint32_t gem_handle
= anv_gem_userptr(device
, host_ptr
, size
);
1734 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE
);
1736 pthread_mutex_lock(&cache
->mutex
);
1738 struct anv_cached_bo
*bo
= anv_bo_cache_lookup_locked(cache
, gem_handle
);
1740 /* VK_EXT_external_memory_host doesn't require handling importing the
1741 * same pointer twice at the same time, but we don't get in the way. If
1742 * kernel gives us the same gem_handle, only succeed if the flags match.
1744 if (bo_flags
!= bo
->bo
.flags
) {
1745 pthread_mutex_unlock(&cache
->mutex
);
1746 return vk_errorf(device
->instance
, NULL
,
1747 VK_ERROR_INVALID_EXTERNAL_HANDLE
,
1748 "same host pointer imported two different ways");
1750 __sync_fetch_and_add(&bo
->refcount
, 1);
1752 bo
= vk_alloc(&device
->alloc
, sizeof(struct anv_cached_bo
), 8,
1753 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1755 anv_gem_close(device
, gem_handle
);
1756 pthread_mutex_unlock(&cache
->mutex
);
1757 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1762 anv_bo_init(&bo
->bo
, gem_handle
, size
);
1763 bo
->bo
.flags
= bo_flags
;
1765 if (!anv_vma_alloc(device
, &bo
->bo
)) {
1766 anv_gem_close(device
, bo
->bo
.gem_handle
);
1767 pthread_mutex_unlock(&cache
->mutex
);
1768 vk_free(&device
->alloc
, bo
);
1769 return vk_errorf(device
->instance
, NULL
,
1770 VK_ERROR_OUT_OF_DEVICE_MEMORY
,
1771 "failed to allocate virtual address for BO");
1774 _mesa_hash_table_insert(cache
->bo_map
, (void *)(uintptr_t)gem_handle
, bo
);
1777 pthread_mutex_unlock(&cache
->mutex
);
1784 anv_bo_cache_import(struct anv_device
*device
,
1785 struct anv_bo_cache
*cache
,
1786 int fd
, uint64_t bo_flags
,
1787 struct anv_bo
**bo_out
)
1789 assert(bo_flags
== (bo_flags
& ANV_BO_CACHE_SUPPORTED_FLAGS
));
1790 assert(bo_flags
& ANV_BO_EXTERNAL
);
1792 pthread_mutex_lock(&cache
->mutex
);
1794 uint32_t gem_handle
= anv_gem_fd_to_handle(device
, fd
);
1796 pthread_mutex_unlock(&cache
->mutex
);
1797 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE
);
1800 struct anv_cached_bo
*bo
= anv_bo_cache_lookup_locked(cache
, gem_handle
);
1802 /* We have to be careful how we combine flags so that it makes sense.
1803 * Really, though, if we get to this case and it actually matters, the
1804 * client has imported a BO twice in different ways and they get what
1807 uint64_t new_flags
= ANV_BO_EXTERNAL
;
1808 new_flags
|= (bo
->bo
.flags
| bo_flags
) & EXEC_OBJECT_WRITE
;
1809 new_flags
|= (bo
->bo
.flags
& bo_flags
) & EXEC_OBJECT_ASYNC
;
1810 new_flags
|= (bo
->bo
.flags
& bo_flags
) & EXEC_OBJECT_SUPPORTS_48B_ADDRESS
;
1811 new_flags
|= (bo
->bo
.flags
| bo_flags
) & EXEC_OBJECT_PINNED
;
1813 /* It's theoretically possible for a BO to get imported such that it's
1814 * both pinned and not pinned. The only way this can happen is if it
1815 * gets imported as both a semaphore and a memory object and that would
1816 * be an application error. Just fail out in that case.
1818 if ((bo
->bo
.flags
& EXEC_OBJECT_PINNED
) !=
1819 (bo_flags
& EXEC_OBJECT_PINNED
)) {
1820 pthread_mutex_unlock(&cache
->mutex
);
1821 return vk_errorf(device
->instance
, NULL
,
1822 VK_ERROR_INVALID_EXTERNAL_HANDLE
,
1823 "The same BO was imported two different ways");
1826 /* It's also theoretically possible that someone could export a BO from
1827 * one heap and import it into another or to import the same BO into two
1828 * different heaps. If this happens, we could potentially end up both
1829 * allowing and disallowing 48-bit addresses. There's not much we can
1830 * do about it if we're pinning so we just throw an error and hope no
1831 * app is actually that stupid.
1833 if ((new_flags
& EXEC_OBJECT_PINNED
) &&
1834 (bo
->bo
.flags
& EXEC_OBJECT_SUPPORTS_48B_ADDRESS
) !=
1835 (bo_flags
& EXEC_OBJECT_SUPPORTS_48B_ADDRESS
)) {
1836 pthread_mutex_unlock(&cache
->mutex
);
1837 return vk_errorf(device
->instance
, NULL
,
1838 VK_ERROR_INVALID_EXTERNAL_HANDLE
,
1839 "The same BO was imported on two different heaps");
1842 bo
->bo
.flags
= new_flags
;
1844 __sync_fetch_and_add(&bo
->refcount
, 1);
1846 off_t size
= lseek(fd
, 0, SEEK_END
);
1847 if (size
== (off_t
)-1) {
1848 anv_gem_close(device
, gem_handle
);
1849 pthread_mutex_unlock(&cache
->mutex
);
1850 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE
);
1853 bo
= vk_alloc(&device
->alloc
, sizeof(struct anv_cached_bo
), 8,
1854 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1856 anv_gem_close(device
, gem_handle
);
1857 pthread_mutex_unlock(&cache
->mutex
);
1858 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1863 anv_bo_init(&bo
->bo
, gem_handle
, size
);
1864 bo
->bo
.flags
= bo_flags
;
1866 if (!anv_vma_alloc(device
, &bo
->bo
)) {
1867 anv_gem_close(device
, bo
->bo
.gem_handle
);
1868 pthread_mutex_unlock(&cache
->mutex
);
1869 vk_free(&device
->alloc
, bo
);
1870 return vk_errorf(device
->instance
, NULL
,
1871 VK_ERROR_OUT_OF_DEVICE_MEMORY
,
1872 "failed to allocate virtual address for BO");
1875 _mesa_hash_table_insert(cache
->bo_map
, (void *)(uintptr_t)gem_handle
, bo
);
1878 pthread_mutex_unlock(&cache
->mutex
);
1885 anv_bo_cache_export(struct anv_device
*device
,
1886 struct anv_bo_cache
*cache
,
1887 struct anv_bo
*bo_in
, int *fd_out
)
1889 assert(anv_bo_cache_lookup(cache
, bo_in
->gem_handle
) == bo_in
);
1890 struct anv_cached_bo
*bo
= (struct anv_cached_bo
*)bo_in
;
1892 /* This BO must have been flagged external in order for us to be able
1893 * to export it. This is done based on external options passed into
1894 * anv_AllocateMemory.
1896 assert(bo
->bo
.flags
& ANV_BO_EXTERNAL
);
1898 int fd
= anv_gem_handle_to_fd(device
, bo
->bo
.gem_handle
);
1900 return vk_error(VK_ERROR_TOO_MANY_OBJECTS
);
1908 atomic_dec_not_one(uint32_t *counter
)
1917 old
= __sync_val_compare_and_swap(counter
, val
, val
- 1);
1926 anv_bo_cache_release(struct anv_device
*device
,
1927 struct anv_bo_cache
*cache
,
1928 struct anv_bo
*bo_in
)
1930 assert(anv_bo_cache_lookup(cache
, bo_in
->gem_handle
) == bo_in
);
1931 struct anv_cached_bo
*bo
= (struct anv_cached_bo
*)bo_in
;
1933 /* Try to decrement the counter but don't go below one. If this succeeds
1934 * then the refcount has been decremented and we are not the last
1937 if (atomic_dec_not_one(&bo
->refcount
))
1940 pthread_mutex_lock(&cache
->mutex
);
1942 /* We are probably the last reference since our attempt to decrement above
1943 * failed. However, we can't actually know until we are inside the mutex.
1944 * Otherwise, someone could import the BO between the decrement and our
1947 if (unlikely(__sync_sub_and_fetch(&bo
->refcount
, 1) > 0)) {
1948 /* Turns out we're not the last reference. Unlock and bail. */
1949 pthread_mutex_unlock(&cache
->mutex
);
1953 struct hash_entry
*entry
=
1954 _mesa_hash_table_search(cache
->bo_map
,
1955 (const void *)(uintptr_t)bo
->bo
.gem_handle
);
1957 _mesa_hash_table_remove(cache
->bo_map
, entry
);
1960 anv_gem_munmap(bo
->bo
.map
, bo
->bo
.size
);
1962 anv_vma_free(device
, &bo
->bo
);
1964 anv_gem_close(device
, bo
->bo
.gem_handle
);
1966 /* Don't unlock until we've actually closed the BO. The whole point of
1967 * the BO cache is to ensure that we correctly handle races with creating
1968 * and releasing GEM handles and we don't want to let someone import the BO
1969 * again between mutex unlock and closing the GEM handle.
1971 pthread_mutex_unlock(&cache
->mutex
);
1973 vk_free(&device
->alloc
, bo
);