2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #define _DEFAULT_SOURCE
31 #include <linux/futex.h>
32 #include <linux/memfd.h>
35 #include <sys/syscall.h>
40 #define VG_NOACCESS_READ(__ptr) ({ \
41 VALGRIND_MAKE_MEM_DEFINED((__ptr), sizeof(*(__ptr))); \
42 __typeof(*(__ptr)) __val = *(__ptr); \
43 VALGRIND_MAKE_MEM_NOACCESS((__ptr), sizeof(*(__ptr)));\
46 #define VG_NOACCESS_WRITE(__ptr, __val) ({ \
47 VALGRIND_MAKE_MEM_UNDEFINED((__ptr), sizeof(*(__ptr))); \
49 VALGRIND_MAKE_MEM_NOACCESS((__ptr), sizeof(*(__ptr))); \
52 #define VG_NOACCESS_READ(__ptr) (*(__ptr))
53 #define VG_NOACCESS_WRITE(__ptr, __val) (*(__ptr) = (__val))
58 * - Lock free (except when resizing underlying bos)
60 * - Constant time allocation with typically only one atomic
62 * - Multiple allocation sizes without fragmentation
64 * - Can grow while keeping addresses and offset of contents stable
66 * - All allocations within one bo so we can point one of the
67 * STATE_BASE_ADDRESS pointers at it.
69 * The overall design is a two-level allocator: top level is a fixed size, big
70 * block (8k) allocator, which operates out of a bo. Allocation is done by
71 * either pulling a block from the free list or growing the used range of the
72 * bo. Growing the range may run out of space in the bo which we then need to
73 * grow. Growing the bo is tricky in a multi-threaded, lockless environment:
74 * we need to keep all pointers and contents in the old map valid. GEM bos in
75 * general can't grow, but we use a trick: we create a memfd and use ftruncate
76 * to grow it as necessary. We mmap the new size and then create a gem bo for
77 * it using the new gem userptr ioctl. Without heavy-handed locking around
78 * our allocation fast-path, there isn't really a way to munmap the old mmap,
79 * so we just keep it around until garbage collection time. While the block
80 * allocator is lockless for normal operations, we block other threads trying
81 * to allocate while we're growing the map. It sholdn't happen often, and
82 * growing is fast anyway.
84 * At the next level we can use various sub-allocators. The state pool is a
85 * pool of smaller, fixed size objects, which operates much like the block
86 * pool. It uses a free list for freeing objects, but when it runs out of
87 * space it just allocates a new block from the block pool. This allocator is
88 * intended for longer lived state objects such as SURFACE_STATE and most
89 * other persistent state objects in the API. We may need to track more info
90 * with these object and a pointer back to the CPU object (eg VkImage). In
91 * those cases we just allocate a slightly bigger object and put the extra
92 * state after the GPU state object.
94 * The state stream allocator works similar to how the i965 DRI driver streams
95 * all its state. Even with Vulkan, we need to emit transient state (whether
96 * surface state base or dynamic state base), and for that we can just get a
97 * block and fill it up. These cases are local to a command buffer and the
98 * sub-allocator need not be thread safe. The streaming allocator gets a new
99 * block when it runs out of space and chains them together so they can be
103 /* Allocations are always at least 64 byte aligned, so 1 is an invalid value.
104 * We use it to indicate the free list is empty. */
107 struct anv_mmap_cleanup
{
113 #define ANV_MMAP_CLEANUP_INIT ((struct anv_mmap_cleanup){0})
116 sys_futex(void *addr1
, int op
, int val1
,
117 struct timespec
*timeout
, void *addr2
, int val3
)
119 return syscall(SYS_futex
, addr1
, op
, val1
, timeout
, addr2
, val3
);
123 futex_wake(uint32_t *addr
, int count
)
125 return sys_futex(addr
, FUTEX_WAKE
, count
, NULL
, NULL
, 0);
129 futex_wait(uint32_t *addr
, int32_t value
)
131 return sys_futex(addr
, FUTEX_WAIT
, value
, NULL
, NULL
, 0);
135 memfd_create(const char *name
, unsigned int flags
)
137 return syscall(SYS_memfd_create
, name
, flags
);
140 static inline uint32_t
141 ilog2_round_up(uint32_t value
)
144 return 32 - __builtin_clz(value
- 1);
147 static inline uint32_t
148 round_to_power_of_two(uint32_t value
)
150 return 1 << ilog2_round_up(value
);
154 anv_free_list_pop(union anv_free_list
*list
, void **map
, uint32_t *offset
)
156 union anv_free_list current
, next
, old
;
159 while (current
.offset
!= EMPTY
) {
160 /* We have to add a memory barrier here so that the list head (and
161 * offset) gets read before we read the map pointer. This way we
162 * know that the map pointer is valid for the given offset at the
163 * point where we read it.
165 __sync_synchronize();
167 uint32_t *next_ptr
= *map
+ current
.offset
;
168 next
.offset
= VG_NOACCESS_READ(next_ptr
);
169 next
.count
= current
.count
+ 1;
170 old
.u64
= __sync_val_compare_and_swap(&list
->u64
, current
.u64
, next
.u64
);
171 if (old
.u64
== current
.u64
) {
172 *offset
= current
.offset
;
182 anv_free_list_push(union anv_free_list
*list
, void *map
, uint32_t offset
)
184 union anv_free_list current
, old
, new;
185 uint32_t *next_ptr
= map
+ offset
;
190 VG_NOACCESS_WRITE(next_ptr
, current
.offset
);
192 new.count
= current
.count
+ 1;
193 old
.u64
= __sync_val_compare_and_swap(&list
->u64
, current
.u64
, new.u64
);
194 } while (old
.u64
!= current
.u64
);
197 /* All pointers in the ptr_free_list are assumed to be page-aligned. This
198 * means that the bottom 12 bits should all be zero.
200 #define PFL_COUNT(x) ((uintptr_t)(x) & 0xfff)
201 #define PFL_PTR(x) ((void *)((uintptr_t)(x) & ~0xfff))
202 #define PFL_PACK(ptr, count) ({ \
203 assert(((uintptr_t)(ptr) & 0xfff) == 0); \
204 (void *)((uintptr_t)(ptr) | (uintptr_t)((count) & 0xfff)); \
208 anv_ptr_free_list_pop(void **list
, void **elem
)
210 void *current
= *list
;
211 while (PFL_PTR(current
) != NULL
) {
212 void **next_ptr
= PFL_PTR(current
);
213 void *new_ptr
= VG_NOACCESS_READ(next_ptr
);
214 unsigned new_count
= PFL_COUNT(current
) + 1;
215 void *new = PFL_PACK(new_ptr
, new_count
);
216 void *old
= __sync_val_compare_and_swap(list
, current
, new);
217 if (old
== current
) {
218 *elem
= PFL_PTR(current
);
228 anv_ptr_free_list_push(void **list
, void *elem
)
231 void **next_ptr
= elem
;
236 VG_NOACCESS_WRITE(next_ptr
, PFL_PTR(current
));
237 unsigned new_count
= PFL_COUNT(current
) + 1;
238 void *new = PFL_PACK(elem
, new_count
);
239 old
= __sync_val_compare_and_swap(list
, current
, new);
240 } while (old
!= current
);
244 anv_block_pool_grow(struct anv_block_pool
*pool
);
247 anv_block_pool_init(struct anv_block_pool
*pool
,
248 struct anv_device
*device
, uint32_t block_size
)
250 assert(is_power_of_two(block_size
));
252 pool
->device
= device
;
253 pool
->bo
.gem_handle
= 0;
256 pool
->block_size
= block_size
;
257 pool
->next_block
= 0;
258 pool
->free_list
= ANV_FREE_LIST_EMPTY
;
259 anv_vector_init(&pool
->mmap_cleanups
,
260 round_to_power_of_two(sizeof(struct anv_mmap_cleanup
)), 128);
262 /* Immediately grow the pool so we'll have a backing bo. */
263 anv_block_pool_grow(pool
);
267 anv_block_pool_finish(struct anv_block_pool
*pool
)
269 struct anv_mmap_cleanup
*cleanup
;
271 anv_vector_foreach(cleanup
, &pool
->mmap_cleanups
) {
273 munmap(cleanup
->map
, cleanup
->size
);
274 if (cleanup
->gem_handle
)
275 anv_gem_close(pool
->device
, cleanup
->gem_handle
);
278 anv_vector_finish(&pool
->mmap_cleanups
);
284 anv_block_pool_grow(struct anv_block_pool
*pool
)
289 struct anv_mmap_cleanup
*cleanup
;
291 if (pool
->size
== 0) {
292 size
= 32 * pool
->block_size
;
294 size
= pool
->size
* 2;
297 cleanup
= anv_vector_add(&pool
->mmap_cleanups
);
300 *cleanup
= ANV_MMAP_CLEANUP_INIT
;
303 pool
->fd
= memfd_create("block pool", MFD_CLOEXEC
);
308 if (ftruncate(pool
->fd
, size
) == -1)
311 /* First try to see if mremap can grow the map in place. */
314 map
= mremap(pool
->map
, pool
->size
, size
, 0);
315 if (map
== MAP_FAILED
) {
316 /* Just leak the old map until we destroy the pool. We can't munmap it
317 * without races or imposing locking on the block allocate fast path. On
318 * the whole the leaked maps adds up to less than the size of the
319 * current map. MAP_POPULATE seems like the right thing to do, but we
320 * should try to get some numbers.
322 map
= mmap(NULL
, size
, PROT_READ
| PROT_WRITE
,
323 MAP_SHARED
| MAP_POPULATE
, pool
->fd
, 0);
325 cleanup
->size
= size
;
327 if (map
== MAP_FAILED
)
330 gem_handle
= anv_gem_userptr(pool
->device
, map
, size
);
333 cleanup
->gem_handle
= gem_handle
;
335 /* Now that we successfull allocated everything, we can write the new
336 * values back into pool. */
338 pool
->bo
.gem_handle
= gem_handle
;
339 pool
->bo
.size
= size
;
343 /* Write size last and after the memory barrier here. We need the memory
344 * barrier to make sure map and gem_handle are written before other threads
345 * see the new size. A thread could allocate a block and then go try using
346 * the old pool->map and access out of bounds. */
348 __sync_synchronize();
355 anv_block_pool_alloc(struct anv_block_pool
*pool
)
357 uint32_t offset
, block
, size
;
359 /* Try free list first. */
360 if (anv_free_list_pop(&pool
->free_list
, &pool
->map
, &offset
)) {
367 block
= __sync_fetch_and_add(&pool
->next_block
, pool
->block_size
);
371 } else if (block
== size
) {
372 /* We allocated the first block outside the pool, we have to grow it.
373 * pool->next_block acts a mutex: threads who try to allocate now will
374 * get block indexes above the current limit and hit futex_wait
376 int err
= anv_block_pool_grow(pool
);
379 futex_wake(&pool
->size
, INT_MAX
);
381 futex_wait(&pool
->size
, size
);
382 __sync_fetch_and_add(&pool
->next_block
, -pool
->block_size
);
390 anv_block_pool_free(struct anv_block_pool
*pool
, uint32_t offset
)
392 anv_free_list_push(&pool
->free_list
, pool
->map
, offset
);
396 anv_fixed_size_state_pool_init(struct anv_fixed_size_state_pool
*pool
,
399 /* At least a cache line and must divide the block size. */
400 assert(state_size
>= 64 && is_power_of_two(state_size
));
402 pool
->state_size
= state_size
;
403 pool
->free_list
= ANV_FREE_LIST_EMPTY
;
404 pool
->block
.next
= 0;
409 anv_fixed_size_state_pool_alloc(struct anv_fixed_size_state_pool
*pool
,
410 struct anv_block_pool
*block_pool
)
413 struct anv_block_state block
, old
, new;
415 /* Try free list first. */
416 if (anv_free_list_pop(&pool
->free_list
, &block_pool
->map
, &offset
))
419 /* If free list was empty (or somebody raced us and took the items) we
420 * allocate a new item from the end of the block */
422 block
.u64
= __sync_fetch_and_add(&pool
->block
.u64
, pool
->state_size
);
424 if (block
.next
< block
.end
) {
426 } else if (block
.next
== block
.end
) {
427 new.next
= anv_block_pool_alloc(block_pool
);
428 new.end
= new.next
+ block_pool
->block_size
;
429 old
.u64
= __sync_fetch_and_add(&pool
->block
.u64
, new.u64
- block
.u64
);
430 if (old
.next
!= block
.next
)
431 futex_wake(&pool
->block
.end
, INT_MAX
);
434 futex_wait(&pool
->block
.end
, block
.end
);
435 __sync_fetch_and_add(&pool
->block
.u64
, -pool
->state_size
);
441 anv_fixed_size_state_pool_free(struct anv_fixed_size_state_pool
*pool
,
442 struct anv_block_pool
*block_pool
,
445 anv_free_list_push(&pool
->free_list
, block_pool
->map
, offset
);
449 anv_state_pool_init(struct anv_state_pool
*pool
,
450 struct anv_block_pool
*block_pool
)
452 pool
->block_pool
= block_pool
;
453 for (unsigned i
= 0; i
< ANV_STATE_BUCKETS
; i
++) {
454 size_t size
= 1 << (ANV_MIN_STATE_SIZE_LOG2
+ i
);
455 anv_fixed_size_state_pool_init(&pool
->buckets
[i
], size
);
460 anv_state_pool_alloc(struct anv_state_pool
*pool
, size_t size
, size_t align
)
462 unsigned size_log2
= ilog2_round_up(size
< align
? align
: size
);
463 assert(size_log2
<= ANV_MAX_STATE_SIZE_LOG2
);
464 if (size_log2
< ANV_MIN_STATE_SIZE_LOG2
)
465 size_log2
= ANV_MIN_STATE_SIZE_LOG2
;
466 unsigned bucket
= size_log2
- ANV_MIN_STATE_SIZE_LOG2
;
468 struct anv_state state
;
469 state
.alloc_size
= 1 << size_log2
;
470 state
.offset
= anv_fixed_size_state_pool_alloc(&pool
->buckets
[bucket
],
472 state
.map
= pool
->block_pool
->map
+ state
.offset
;
473 VG(VALGRIND_MALLOCLIKE_BLOCK(state
.map
, size
, 0, false));
478 anv_state_pool_free(struct anv_state_pool
*pool
, struct anv_state state
)
480 assert(is_power_of_two(state
.alloc_size
));
481 unsigned size_log2
= ilog2_round_up(state
.alloc_size
);
482 assert(size_log2
>= ANV_MIN_STATE_SIZE_LOG2
&&
483 size_log2
<= ANV_MAX_STATE_SIZE_LOG2
);
484 unsigned bucket
= size_log2
- ANV_MIN_STATE_SIZE_LOG2
;
486 VG(VALGRIND_FREELIKE_BLOCK(state
.map
, 0));
487 anv_fixed_size_state_pool_free(&pool
->buckets
[bucket
],
488 pool
->block_pool
, state
.offset
);
492 struct stream_block
{
495 /* The map for the BO at the time the block was givne to us */
503 /* The state stream allocator is a one-shot, single threaded allocator for
504 * variable sized blocks. We use it for allocating dynamic state.
507 anv_state_stream_init(struct anv_state_stream
*stream
,
508 struct anv_block_pool
*block_pool
)
510 stream
->block_pool
= block_pool
;
513 stream
->current_block
= NULL_BLOCK
;
517 anv_state_stream_finish(struct anv_state_stream
*stream
)
519 struct stream_block
*sb
;
520 uint32_t block
, next_block
;
522 block
= stream
->current_block
;
523 while (block
!= NULL_BLOCK
) {
524 sb
= stream
->block_pool
->map
+ block
;
525 next_block
= VG_NOACCESS_READ(&sb
->next
);
526 VG(VALGRIND_FREELIKE_BLOCK(VG_NOACCESS_READ(&sb
->_vg_ptr
), 0));
527 anv_block_pool_free(stream
->block_pool
, block
);
533 anv_state_stream_alloc(struct anv_state_stream
*stream
,
534 uint32_t size
, uint32_t alignment
)
536 struct stream_block
*sb
;
537 struct anv_state state
;
540 state
.offset
= align_u32(stream
->next
, alignment
);
541 if (state
.offset
+ size
> stream
->end
) {
542 block
= anv_block_pool_alloc(stream
->block_pool
);
543 void *current_map
= stream
->block_pool
->map
;
544 sb
= current_map
+ block
;
545 VG_NOACCESS_WRITE(&sb
->current_map
, current_map
);
546 VG_NOACCESS_WRITE(&sb
->next
, stream
->current_block
);
547 VG(VG_NOACCESS_WRITE(&sb
->_vg_ptr
, 0));
548 stream
->current_block
= block
;
549 stream
->next
= block
+ sizeof(*sb
);
550 stream
->end
= block
+ stream
->block_pool
->block_size
;
551 state
.offset
= align_u32(stream
->next
, alignment
);
552 assert(state
.offset
+ size
<= stream
->end
);
555 sb
= stream
->block_pool
->map
+ stream
->current_block
;
556 void *current_map
= VG_NOACCESS_READ(&sb
->current_map
);
558 state
.map
= current_map
+ state
.offset
;
559 state
.alloc_size
= size
;
562 void *vg_ptr
= VG_NOACCESS_READ(&sb
->_vg_ptr
);
563 if (vg_ptr
== NULL
) {
565 VG_NOACCESS_WRITE(&sb
->_vg_ptr
, vg_ptr
);
566 VALGRIND_MALLOCLIKE_BLOCK(vg_ptr
, size
, 0, false);
568 ptrdiff_t vg_offset
= vg_ptr
- current_map
;
569 assert(vg_offset
>= stream
->current_block
&&
570 vg_offset
< stream
->end
);
571 VALGRIND_RESIZEINPLACE_BLOCK(vg_ptr
,
572 stream
->next
- vg_offset
,
573 (state
.offset
+ size
) - vg_offset
,
578 stream
->next
= state
.offset
+ size
;
583 struct bo_pool_bo_link
{
584 struct bo_pool_bo_link
*next
;
589 anv_bo_pool_init(struct anv_bo_pool
*pool
,
590 struct anv_device
*device
, uint32_t bo_size
)
592 pool
->device
= device
;
593 pool
->bo_size
= bo_size
;
594 pool
->free_list
= NULL
;
598 anv_bo_pool_finish(struct anv_bo_pool
*pool
)
600 struct bo_pool_bo_link
*link
= PFL_PTR(pool
->free_list
);
601 while (link
!= NULL
) {
602 struct bo_pool_bo_link link_copy
= VG_NOACCESS_READ(link
);
604 /* The anv_gem_m[un]map() functions are also valgrind-safe so they
605 * act as an alloc/free. In order to avoid a double-free warning, we
606 * need to mark thiss as malloc'd before we unmap it.
608 VG(VALGRIND_MALLOCLIKE_BLOCK(link_copy
.bo
.map
, pool
->bo_size
, 0, false));
610 anv_gem_munmap(link_copy
.bo
.map
, pool
->bo_size
);
611 anv_gem_close(pool
->device
, link_copy
.bo
.gem_handle
);
612 link
= link_copy
.next
;
617 anv_bo_pool_alloc(struct anv_bo_pool
*pool
, struct anv_bo
*bo
)
621 void *next_free_void
;
622 if (anv_ptr_free_list_pop(&pool
->free_list
, &next_free_void
)) {
623 struct bo_pool_bo_link
*next_free
= next_free_void
;
624 *bo
= VG_NOACCESS_READ(&next_free
->bo
);
625 assert(bo
->map
== next_free
);
626 assert(bo
->size
== pool
->bo_size
);
628 VG(VALGRIND_MALLOCLIKE_BLOCK(bo
->map
, pool
->bo_size
, 0, false));
633 struct anv_bo new_bo
;
635 result
= anv_bo_init_new(&new_bo
, pool
->device
, pool
->bo_size
);
636 if (result
!= VK_SUCCESS
)
639 assert(new_bo
.size
== pool
->bo_size
);
641 new_bo
.map
= anv_gem_mmap(pool
->device
, new_bo
.gem_handle
, 0, pool
->bo_size
);
642 if (new_bo
.map
== NULL
) {
643 anv_gem_close(pool
->device
, new_bo
.gem_handle
);
644 return vk_error(VK_ERROR_MEMORY_MAP_FAILED
);
647 /* We don't need to call VALGRIND_MALLOCLIKE_BLOCK here because gem_mmap
648 * calls it for us. If we really want to be pedantic we could do a
649 * VALGRIND_FREELIKE_BLOCK right after the mmap, but there's no good
658 anv_bo_pool_free(struct anv_bo_pool
*pool
, const struct anv_bo
*bo
)
660 struct bo_pool_bo_link
*link
= bo
->map
;
663 VG(VALGRIND_FREELIKE_BLOCK(bo
->map
, 0));
664 anv_ptr_free_list_push(&pool
->free_list
, link
);