2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 #include <linux/futex.h>
30 #include <linux/memfd.h>
33 #include <sys/syscall.h>
35 #include "anv_private.h"
38 #define VG_NOACCESS_READ(__ptr) ({ \
39 VALGRIND_MAKE_MEM_DEFINED((__ptr), sizeof(*(__ptr))); \
40 __typeof(*(__ptr)) __val = *(__ptr); \
41 VALGRIND_MAKE_MEM_NOACCESS((__ptr), sizeof(*(__ptr)));\
44 #define VG_NOACCESS_WRITE(__ptr, __val) ({ \
45 VALGRIND_MAKE_MEM_UNDEFINED((__ptr), sizeof(*(__ptr))); \
47 VALGRIND_MAKE_MEM_NOACCESS((__ptr), sizeof(*(__ptr))); \
50 #define VG_NOACCESS_READ(__ptr) (*(__ptr))
51 #define VG_NOACCESS_WRITE(__ptr, __val) (*(__ptr) = (__val))
56 * - Lock free (except when resizing underlying bos)
58 * - Constant time allocation with typically only one atomic
60 * - Multiple allocation sizes without fragmentation
62 * - Can grow while keeping addresses and offset of contents stable
64 * - All allocations within one bo so we can point one of the
65 * STATE_BASE_ADDRESS pointers at it.
67 * The overall design is a two-level allocator: top level is a fixed size, big
68 * block (8k) allocator, which operates out of a bo. Allocation is done by
69 * either pulling a block from the free list or growing the used range of the
70 * bo. Growing the range may run out of space in the bo which we then need to
71 * grow. Growing the bo is tricky in a multi-threaded, lockless environment:
72 * we need to keep all pointers and contents in the old map valid. GEM bos in
73 * general can't grow, but we use a trick: we create a memfd and use ftruncate
74 * to grow it as necessary. We mmap the new size and then create a gem bo for
75 * it using the new gem userptr ioctl. Without heavy-handed locking around
76 * our allocation fast-path, there isn't really a way to munmap the old mmap,
77 * so we just keep it around until garbage collection time. While the block
78 * allocator is lockless for normal operations, we block other threads trying
79 * to allocate while we're growing the map. It sholdn't happen often, and
80 * growing is fast anyway.
82 * At the next level we can use various sub-allocators. The state pool is a
83 * pool of smaller, fixed size objects, which operates much like the block
84 * pool. It uses a free list for freeing objects, but when it runs out of
85 * space it just allocates a new block from the block pool. This allocator is
86 * intended for longer lived state objects such as SURFACE_STATE and most
87 * other persistent state objects in the API. We may need to track more info
88 * with these object and a pointer back to the CPU object (eg VkImage). In
89 * those cases we just allocate a slightly bigger object and put the extra
90 * state after the GPU state object.
92 * The state stream allocator works similar to how the i965 DRI driver streams
93 * all its state. Even with Vulkan, we need to emit transient state (whether
94 * surface state base or dynamic state base), and for that we can just get a
95 * block and fill it up. These cases are local to a command buffer and the
96 * sub-allocator need not be thread safe. The streaming allocator gets a new
97 * block when it runs out of space and chains them together so they can be
101 /* Allocations are always at least 64 byte aligned, so 1 is an invalid value.
102 * We use it to indicate the free list is empty. */
105 struct anv_mmap_cleanup
{
111 #define ANV_MMAP_CLEANUP_INIT ((struct anv_mmap_cleanup){0})
114 sys_futex(void *addr1
, int op
, int val1
,
115 struct timespec
*timeout
, void *addr2
, int val3
)
117 return syscall(SYS_futex
, addr1
, op
, val1
, timeout
, addr2
, val3
);
121 futex_wake(uint32_t *addr
, int count
)
123 return sys_futex(addr
, FUTEX_WAKE
, count
, NULL
, NULL
, 0);
127 futex_wait(uint32_t *addr
, int32_t value
)
129 return sys_futex(addr
, FUTEX_WAIT
, value
, NULL
, NULL
, 0);
133 memfd_create(const char *name
, unsigned int flags
)
135 return syscall(SYS_memfd_create
, name
, flags
);
138 static inline uint32_t
139 ilog2_round_up(uint32_t value
)
142 return 32 - __builtin_clz(value
- 1);
145 static inline uint32_t
146 round_to_power_of_two(uint32_t value
)
148 return 1 << ilog2_round_up(value
);
152 anv_free_list_pop(union anv_free_list
*list
, void **map
, int32_t *offset
)
154 union anv_free_list current
, new, old
;
156 current
.u64
= list
->u64
;
157 while (current
.offset
!= EMPTY
) {
158 /* We have to add a memory barrier here so that the list head (and
159 * offset) gets read before we read the map pointer. This way we
160 * know that the map pointer is valid for the given offset at the
161 * point where we read it.
163 __sync_synchronize();
165 int32_t *next_ptr
= *map
+ current
.offset
;
166 new.offset
= VG_NOACCESS_READ(next_ptr
);
167 new.count
= current
.count
+ 1;
168 old
.u64
= __sync_val_compare_and_swap(&list
->u64
, current
.u64
, new.u64
);
169 if (old
.u64
== current
.u64
) {
170 *offset
= current
.offset
;
180 anv_free_list_push(union anv_free_list
*list
, void *map
, int32_t offset
)
182 union anv_free_list current
, old
, new;
183 int32_t *next_ptr
= map
+ offset
;
188 VG_NOACCESS_WRITE(next_ptr
, current
.offset
);
190 new.count
= current
.count
+ 1;
191 old
.u64
= __sync_val_compare_and_swap(&list
->u64
, current
.u64
, new.u64
);
192 } while (old
.u64
!= current
.u64
);
195 /* All pointers in the ptr_free_list are assumed to be page-aligned. This
196 * means that the bottom 12 bits should all be zero.
198 #define PFL_COUNT(x) ((uintptr_t)(x) & 0xfff)
199 #define PFL_PTR(x) ((void *)((uintptr_t)(x) & ~(uintptr_t)0xfff))
200 #define PFL_PACK(ptr, count) ({ \
201 (void *)(((uintptr_t)(ptr) & ~(uintptr_t)0xfff) | ((count) & 0xfff)); \
205 anv_ptr_free_list_pop(void **list
, void **elem
)
207 void *current
= *list
;
208 while (PFL_PTR(current
) != NULL
) {
209 void **next_ptr
= PFL_PTR(current
);
210 void *new_ptr
= VG_NOACCESS_READ(next_ptr
);
211 unsigned new_count
= PFL_COUNT(current
) + 1;
212 void *new = PFL_PACK(new_ptr
, new_count
);
213 void *old
= __sync_val_compare_and_swap(list
, current
, new);
214 if (old
== current
) {
215 *elem
= PFL_PTR(current
);
225 anv_ptr_free_list_push(void **list
, void *elem
)
228 void **next_ptr
= elem
;
230 /* The pointer-based free list requires that the pointer be
231 * page-aligned. This is because we use the bottom 12 bits of the
232 * pointer to store a counter to solve the ABA concurrency problem.
234 assert(((uintptr_t)elem
& 0xfff) == 0);
239 VG_NOACCESS_WRITE(next_ptr
, PFL_PTR(current
));
240 unsigned new_count
= PFL_COUNT(current
) + 1;
241 void *new = PFL_PACK(elem
, new_count
);
242 old
= __sync_val_compare_and_swap(list
, current
, new);
243 } while (old
!= current
);
247 anv_block_pool_grow(struct anv_block_pool
*pool
, struct anv_block_state
*state
);
250 anv_block_pool_init(struct anv_block_pool
*pool
,
251 struct anv_device
*device
, uint32_t block_size
)
253 assert(util_is_power_of_two(block_size
));
255 pool
->device
= device
;
256 anv_bo_init(&pool
->bo
, 0, 0);
257 pool
->block_size
= block_size
;
258 pool
->free_list
= ANV_FREE_LIST_EMPTY
;
259 pool
->back_free_list
= ANV_FREE_LIST_EMPTY
;
261 pool
->fd
= memfd_create("block pool", MFD_CLOEXEC
);
265 /* Just make it 2GB up-front. The Linux kernel won't actually back it
266 * with pages until we either map and fault on one of them or we use
267 * userptr and send a chunk of it off to the GPU.
269 if (ftruncate(pool
->fd
, BLOCK_POOL_MEMFD_SIZE
) == -1)
272 u_vector_init(&pool
->mmap_cleanups
,
273 round_to_power_of_two(sizeof(struct anv_mmap_cleanup
)), 128);
275 pool
->state
.next
= 0;
277 pool
->back_state
.next
= 0;
278 pool
->back_state
.end
= 0;
280 /* Immediately grow the pool so we'll have a backing bo. */
281 pool
->state
.end
= anv_block_pool_grow(pool
, &pool
->state
);
285 anv_block_pool_finish(struct anv_block_pool
*pool
)
287 struct anv_mmap_cleanup
*cleanup
;
289 u_vector_foreach(cleanup
, &pool
->mmap_cleanups
) {
291 munmap(cleanup
->map
, cleanup
->size
);
292 if (cleanup
->gem_handle
)
293 anv_gem_close(pool
->device
, cleanup
->gem_handle
);
296 u_vector_finish(&pool
->mmap_cleanups
);
301 #define PAGE_SIZE 4096
303 /** Grows and re-centers the block pool.
305 * We grow the block pool in one or both directions in such a way that the
306 * following conditions are met:
308 * 1) The size of the entire pool is always a power of two.
310 * 2) The pool only grows on both ends. Neither end can get
313 * 3) At the end of the allocation, we have about twice as much space
314 * allocated for each end as we have used. This way the pool doesn't
315 * grow too far in one direction or the other.
317 * 4) If the _alloc_back() has never been called, then the back portion of
318 * the pool retains a size of zero. (This makes it easier for users of
319 * the block pool that only want a one-sided pool.)
321 * 5) We have enough space allocated for at least one more block in
322 * whichever side `state` points to.
324 * 6) The center of the pool is always aligned to both the block_size of
325 * the pool and a 4K CPU page.
328 anv_block_pool_grow(struct anv_block_pool
*pool
, struct anv_block_state
*state
)
333 struct anv_mmap_cleanup
*cleanup
;
335 pthread_mutex_lock(&pool
->device
->mutex
);
337 assert(state
== &pool
->state
|| state
== &pool
->back_state
);
339 /* Gather a little usage information on the pool. Since we may have
340 * threadsd waiting in queue to get some storage while we resize, it's
341 * actually possible that total_used will be larger than old_size. In
342 * particular, block_pool_alloc() increments state->next prior to
343 * calling block_pool_grow, so this ensures that we get enough space for
344 * which ever side tries to grow the pool.
346 * We align to a page size because it makes it easier to do our
347 * calculations later in such a way that we state page-aigned.
349 uint32_t back_used
= align_u32(pool
->back_state
.next
, PAGE_SIZE
);
350 uint32_t front_used
= align_u32(pool
->state
.next
, PAGE_SIZE
);
351 uint32_t total_used
= front_used
+ back_used
;
353 assert(state
== &pool
->state
|| back_used
> 0);
355 size_t old_size
= pool
->bo
.size
;
358 back_used
* 2 <= pool
->center_bo_offset
&&
359 front_used
* 2 <= (old_size
- pool
->center_bo_offset
)) {
360 /* If we're in this case then this isn't the firsta allocation and we
361 * already have enough space on both sides to hold double what we
362 * have allocated. There's nothing for us to do.
368 /* This is the first allocation */
369 size
= MAX2(32 * pool
->block_size
, PAGE_SIZE
);
374 /* We can't have a block pool bigger than 1GB because we use signed
375 * 32-bit offsets in the free list and we don't want overflow. We
376 * should never need a block pool bigger than 1GB anyway.
378 assert(size
<= (1u << 31));
380 /* We compute a new center_bo_offset such that, when we double the size
381 * of the pool, we maintain the ratio of how much is used by each side.
382 * This way things should remain more-or-less balanced.
384 uint32_t center_bo_offset
;
385 if (back_used
== 0) {
386 /* If we're in this case then we have never called alloc_back(). In
387 * this case, we want keep the offset at 0 to make things as simple
388 * as possible for users that don't care about back allocations.
390 center_bo_offset
= 0;
392 /* Try to "center" the allocation based on how much is currently in
393 * use on each side of the center line.
395 center_bo_offset
= ((uint64_t)size
* back_used
) / total_used
;
397 /* Align down to a multiple of both the block size and page size */
398 uint32_t granularity
= MAX2(pool
->block_size
, PAGE_SIZE
);
399 assert(util_is_power_of_two(granularity
));
400 center_bo_offset
&= ~(granularity
- 1);
402 assert(center_bo_offset
>= back_used
);
404 /* Make sure we don't shrink the back end of the pool */
405 if (center_bo_offset
< pool
->back_state
.end
)
406 center_bo_offset
= pool
->back_state
.end
;
408 /* Make sure that we don't shrink the front end of the pool */
409 if (size
- center_bo_offset
< pool
->state
.end
)
410 center_bo_offset
= size
- pool
->state
.end
;
413 assert(center_bo_offset
% pool
->block_size
== 0);
414 assert(center_bo_offset
% PAGE_SIZE
== 0);
416 /* Assert that we only ever grow the pool */
417 assert(center_bo_offset
>= pool
->back_state
.end
);
418 assert(size
- center_bo_offset
>= pool
->state
.end
);
420 cleanup
= u_vector_add(&pool
->mmap_cleanups
);
423 *cleanup
= ANV_MMAP_CLEANUP_INIT
;
425 /* Just leak the old map until we destroy the pool. We can't munmap it
426 * without races or imposing locking on the block allocate fast path. On
427 * the whole the leaked maps adds up to less than the size of the
428 * current map. MAP_POPULATE seems like the right thing to do, but we
429 * should try to get some numbers.
431 map
= mmap(NULL
, size
, PROT_READ
| PROT_WRITE
,
432 MAP_SHARED
| MAP_POPULATE
, pool
->fd
,
433 BLOCK_POOL_MEMFD_CENTER
- center_bo_offset
);
435 cleanup
->size
= size
;
437 if (map
== MAP_FAILED
)
440 gem_handle
= anv_gem_userptr(pool
->device
, map
, size
);
443 cleanup
->gem_handle
= gem_handle
;
446 /* Regular objects are created I915_CACHING_CACHED on LLC platforms and
447 * I915_CACHING_NONE on non-LLC platforms. However, userptr objects are
448 * always created as I915_CACHING_CACHED, which on non-LLC means
449 * snooped. That can be useful but comes with a bit of overheard. Since
450 * we're eplicitly clflushing and don't want the overhead we need to turn
452 if (!pool
->device
->info
.has_llc
) {
453 anv_gem_set_caching(pool
->device
, gem_handle
, I915_CACHING_NONE
);
454 anv_gem_set_domain(pool
->device
, gem_handle
,
455 I915_GEM_DOMAIN_GTT
, I915_GEM_DOMAIN_GTT
);
459 /* Now that we successfull allocated everything, we can write the new
460 * values back into pool. */
461 pool
->map
= map
+ center_bo_offset
;
462 pool
->center_bo_offset
= center_bo_offset
;
463 anv_bo_init(&pool
->bo
, gem_handle
, size
);
467 pthread_mutex_unlock(&pool
->device
->mutex
);
469 /* Return the appropreate new size. This function never actually
470 * updates state->next. Instead, we let the caller do that because it
471 * needs to do so in order to maintain its concurrency model.
473 if (state
== &pool
->state
) {
474 return pool
->bo
.size
- pool
->center_bo_offset
;
476 assert(pool
->center_bo_offset
> 0);
477 return pool
->center_bo_offset
;
481 pthread_mutex_unlock(&pool
->device
->mutex
);
487 anv_block_pool_alloc_new(struct anv_block_pool
*pool
,
488 struct anv_block_state
*pool_state
)
490 struct anv_block_state state
, old
, new;
493 state
.u64
= __sync_fetch_and_add(&pool_state
->u64
, pool
->block_size
);
494 if (state
.next
< state
.end
) {
497 } else if (state
.next
== state
.end
) {
498 /* We allocated the first block outside the pool, we have to grow it.
499 * pool_state->next acts a mutex: threads who try to allocate now will
500 * get block indexes above the current limit and hit futex_wait
502 new.next
= state
.next
+ pool
->block_size
;
503 new.end
= anv_block_pool_grow(pool
, pool_state
);
504 assert(new.end
>= new.next
&& new.end
% pool
->block_size
== 0);
505 old
.u64
= __sync_lock_test_and_set(&pool_state
->u64
, new.u64
);
506 if (old
.next
!= state
.next
)
507 futex_wake(&pool_state
->end
, INT_MAX
);
510 futex_wait(&pool_state
->end
, state
.end
);
517 anv_block_pool_alloc(struct anv_block_pool
*pool
)
521 /* Try free list first. */
522 if (anv_free_list_pop(&pool
->free_list
, &pool
->map
, &offset
)) {
528 return anv_block_pool_alloc_new(pool
, &pool
->state
);
531 /* Allocates a block out of the back of the block pool.
533 * This will allocated a block earlier than the "start" of the block pool.
534 * The offsets returned from this function will be negative but will still
535 * be correct relative to the block pool's map pointer.
537 * If you ever use anv_block_pool_alloc_back, then you will have to do
538 * gymnastics with the block pool's BO when doing relocations.
541 anv_block_pool_alloc_back(struct anv_block_pool
*pool
)
545 /* Try free list first. */
546 if (anv_free_list_pop(&pool
->back_free_list
, &pool
->map
, &offset
)) {
552 offset
= anv_block_pool_alloc_new(pool
, &pool
->back_state
);
554 /* The offset we get out of anv_block_pool_alloc_new() is actually the
555 * number of bytes downwards from the middle to the end of the block.
556 * We need to turn it into a (negative) offset from the middle to the
557 * start of the block.
560 return -(offset
+ pool
->block_size
);
564 anv_block_pool_free(struct anv_block_pool
*pool
, int32_t offset
)
567 anv_free_list_push(&pool
->back_free_list
, pool
->map
, offset
);
569 anv_free_list_push(&pool
->free_list
, pool
->map
, offset
);
574 anv_fixed_size_state_pool_init(struct anv_fixed_size_state_pool
*pool
,
577 /* At least a cache line and must divide the block size. */
578 assert(state_size
>= 64 && util_is_power_of_two(state_size
));
580 pool
->state_size
= state_size
;
581 pool
->free_list
= ANV_FREE_LIST_EMPTY
;
582 pool
->block
.next
= 0;
587 anv_fixed_size_state_pool_alloc(struct anv_fixed_size_state_pool
*pool
,
588 struct anv_block_pool
*block_pool
)
591 struct anv_block_state block
, old
, new;
593 /* Try free list first. */
594 if (anv_free_list_pop(&pool
->free_list
, &block_pool
->map
, &offset
)) {
599 /* If free list was empty (or somebody raced us and took the items) we
600 * allocate a new item from the end of the block */
602 block
.u64
= __sync_fetch_and_add(&pool
->block
.u64
, pool
->state_size
);
604 if (block
.next
< block
.end
) {
606 } else if (block
.next
== block
.end
) {
607 offset
= anv_block_pool_alloc(block_pool
);
608 new.next
= offset
+ pool
->state_size
;
609 new.end
= offset
+ block_pool
->block_size
;
610 old
.u64
= __sync_lock_test_and_set(&pool
->block
.u64
, new.u64
);
611 if (old
.next
!= block
.next
)
612 futex_wake(&pool
->block
.end
, INT_MAX
);
615 futex_wait(&pool
->block
.end
, block
.end
);
621 anv_fixed_size_state_pool_free(struct anv_fixed_size_state_pool
*pool
,
622 struct anv_block_pool
*block_pool
,
625 anv_free_list_push(&pool
->free_list
, block_pool
->map
, offset
);
629 anv_state_pool_init(struct anv_state_pool
*pool
,
630 struct anv_block_pool
*block_pool
)
632 pool
->block_pool
= block_pool
;
633 for (unsigned i
= 0; i
< ANV_STATE_BUCKETS
; i
++) {
634 size_t size
= 1 << (ANV_MIN_STATE_SIZE_LOG2
+ i
);
635 anv_fixed_size_state_pool_init(&pool
->buckets
[i
], size
);
637 VG(VALGRIND_CREATE_MEMPOOL(pool
, 0, false));
641 anv_state_pool_finish(struct anv_state_pool
*pool
)
643 VG(VALGRIND_DESTROY_MEMPOOL(pool
));
647 anv_state_pool_alloc(struct anv_state_pool
*pool
, size_t size
, size_t align
)
649 unsigned size_log2
= ilog2_round_up(size
< align
? align
: size
);
650 assert(size_log2
<= ANV_MAX_STATE_SIZE_LOG2
);
651 if (size_log2
< ANV_MIN_STATE_SIZE_LOG2
)
652 size_log2
= ANV_MIN_STATE_SIZE_LOG2
;
653 unsigned bucket
= size_log2
- ANV_MIN_STATE_SIZE_LOG2
;
655 struct anv_state state
;
656 state
.alloc_size
= 1 << size_log2
;
657 state
.offset
= anv_fixed_size_state_pool_alloc(&pool
->buckets
[bucket
],
659 state
.map
= pool
->block_pool
->map
+ state
.offset
;
660 VG(VALGRIND_MEMPOOL_ALLOC(pool
, state
.map
, size
));
665 anv_state_pool_free(struct anv_state_pool
*pool
, struct anv_state state
)
667 assert(util_is_power_of_two(state
.alloc_size
));
668 unsigned size_log2
= ilog2_round_up(state
.alloc_size
);
669 assert(size_log2
>= ANV_MIN_STATE_SIZE_LOG2
&&
670 size_log2
<= ANV_MAX_STATE_SIZE_LOG2
);
671 unsigned bucket
= size_log2
- ANV_MIN_STATE_SIZE_LOG2
;
673 VG(VALGRIND_MEMPOOL_FREE(pool
, state
.map
));
674 anv_fixed_size_state_pool_free(&pool
->buckets
[bucket
],
675 pool
->block_pool
, state
.offset
);
679 struct anv_state_stream_block
{
681 struct anv_state_stream_block
*next
;
683 /* The offset into the block pool at which this block starts */
687 /* A pointer to the first user-allocated thing in this block. This is
688 * what valgrind sees as the start of the block.
694 /* The state stream allocator is a one-shot, single threaded allocator for
695 * variable sized blocks. We use it for allocating dynamic state.
698 anv_state_stream_init(struct anv_state_stream
*stream
,
699 struct anv_block_pool
*block_pool
)
701 stream
->block_pool
= block_pool
;
702 stream
->block
= NULL
;
704 /* Ensure that next + whatever > end. This way the first call to
705 * state_stream_alloc fetches a new block.
710 VG(VALGRIND_CREATE_MEMPOOL(stream
, 0, false));
714 anv_state_stream_finish(struct anv_state_stream
*stream
)
716 VG(const uint32_t block_size
= stream
->block_pool
->block_size
);
718 struct anv_state_stream_block
*next
= stream
->block
;
719 while (next
!= NULL
) {
720 VG(VALGRIND_MAKE_MEM_DEFINED(next
, sizeof(*next
)));
721 struct anv_state_stream_block sb
= VG_NOACCESS_READ(next
);
722 VG(VALGRIND_MEMPOOL_FREE(stream
, sb
._vg_ptr
));
723 VG(VALGRIND_MAKE_MEM_UNDEFINED(next
, block_size
));
724 anv_block_pool_free(stream
->block_pool
, sb
.offset
);
728 VG(VALGRIND_DESTROY_MEMPOOL(stream
));
732 anv_state_stream_alloc(struct anv_state_stream
*stream
,
733 uint32_t size
, uint32_t alignment
)
735 struct anv_state_stream_block
*sb
= stream
->block
;
737 struct anv_state state
;
739 state
.offset
= align_u32(stream
->next
, alignment
);
740 if (state
.offset
+ size
> stream
->end
) {
741 uint32_t block
= anv_block_pool_alloc(stream
->block_pool
);
742 sb
= stream
->block_pool
->map
+ block
;
744 VG(VALGRIND_MAKE_MEM_UNDEFINED(sb
, sizeof(*sb
)));
745 sb
->next
= stream
->block
;
747 VG(sb
->_vg_ptr
= NULL
);
748 VG(VALGRIND_MAKE_MEM_NOACCESS(sb
, stream
->block_pool
->block_size
));
751 stream
->start
= block
;
752 stream
->next
= block
+ sizeof(*sb
);
753 stream
->end
= block
+ stream
->block_pool
->block_size
;
755 state
.offset
= align_u32(stream
->next
, alignment
);
756 assert(state
.offset
+ size
<= stream
->end
);
759 assert(state
.offset
> stream
->start
);
760 state
.map
= (void *)sb
+ (state
.offset
- stream
->start
);
761 state
.alloc_size
= size
;
764 void *vg_ptr
= VG_NOACCESS_READ(&sb
->_vg_ptr
);
765 if (vg_ptr
== NULL
) {
767 VG_NOACCESS_WRITE(&sb
->_vg_ptr
, vg_ptr
);
768 VALGRIND_MEMPOOL_ALLOC(stream
, vg_ptr
, size
);
770 void *state_end
= state
.map
+ state
.alloc_size
;
771 /* This only updates the mempool. The newly allocated chunk is still
772 * marked as NOACCESS. */
773 VALGRIND_MEMPOOL_CHANGE(stream
, vg_ptr
, vg_ptr
, state_end
- vg_ptr
);
774 /* Mark the newly allocated chunk as undefined */
775 VALGRIND_MAKE_MEM_UNDEFINED(state
.map
, state
.alloc_size
);
779 stream
->next
= state
.offset
+ size
;
784 struct bo_pool_bo_link
{
785 struct bo_pool_bo_link
*next
;
790 anv_bo_pool_init(struct anv_bo_pool
*pool
, struct anv_device
*device
)
792 pool
->device
= device
;
793 memset(pool
->free_list
, 0, sizeof(pool
->free_list
));
795 VG(VALGRIND_CREATE_MEMPOOL(pool
, 0, false));
799 anv_bo_pool_finish(struct anv_bo_pool
*pool
)
801 for (unsigned i
= 0; i
< ARRAY_SIZE(pool
->free_list
); i
++) {
802 struct bo_pool_bo_link
*link
= PFL_PTR(pool
->free_list
[i
]);
803 while (link
!= NULL
) {
804 struct bo_pool_bo_link link_copy
= VG_NOACCESS_READ(link
);
806 anv_gem_munmap(link_copy
.bo
.map
, link_copy
.bo
.size
);
807 anv_gem_close(pool
->device
, link_copy
.bo
.gem_handle
);
808 link
= link_copy
.next
;
812 VG(VALGRIND_DESTROY_MEMPOOL(pool
));
816 anv_bo_pool_alloc(struct anv_bo_pool
*pool
, struct anv_bo
*bo
, uint32_t size
)
820 const unsigned size_log2
= size
< 4096 ? 12 : ilog2_round_up(size
);
821 const unsigned pow2_size
= 1 << size_log2
;
822 const unsigned bucket
= size_log2
- 12;
823 assert(bucket
< ARRAY_SIZE(pool
->free_list
));
825 void *next_free_void
;
826 if (anv_ptr_free_list_pop(&pool
->free_list
[bucket
], &next_free_void
)) {
827 struct bo_pool_bo_link
*next_free
= next_free_void
;
828 *bo
= VG_NOACCESS_READ(&next_free
->bo
);
829 assert(bo
->gem_handle
);
830 assert(bo
->map
== next_free
);
831 assert(size
<= bo
->size
);
833 VG(VALGRIND_MEMPOOL_ALLOC(pool
, bo
->map
, size
));
838 struct anv_bo new_bo
;
840 result
= anv_bo_init_new(&new_bo
, pool
->device
, pow2_size
);
841 if (result
!= VK_SUCCESS
)
844 assert(new_bo
.size
== pow2_size
);
846 new_bo
.map
= anv_gem_mmap(pool
->device
, new_bo
.gem_handle
, 0, pow2_size
, 0);
847 if (new_bo
.map
== NULL
) {
848 anv_gem_close(pool
->device
, new_bo
.gem_handle
);
849 return vk_error(VK_ERROR_MEMORY_MAP_FAILED
);
854 VG(VALGRIND_MEMPOOL_ALLOC(pool
, bo
->map
, size
));
860 anv_bo_pool_free(struct anv_bo_pool
*pool
, const struct anv_bo
*bo_in
)
862 /* Make a copy in case the anv_bo happens to be storred in the BO */
863 struct anv_bo bo
= *bo_in
;
865 VG(VALGRIND_MEMPOOL_FREE(pool
, bo
.map
));
867 struct bo_pool_bo_link
*link
= bo
.map
;
868 VG_NOACCESS_WRITE(&link
->bo
, bo
);
870 assert(util_is_power_of_two(bo
.size
));
871 const unsigned size_log2
= ilog2_round_up(bo
.size
);
872 const unsigned bucket
= size_log2
- 12;
873 assert(bucket
< ARRAY_SIZE(pool
->free_list
));
875 anv_ptr_free_list_push(&pool
->free_list
[bucket
], link
);
881 anv_scratch_pool_init(struct anv_device
*device
, struct anv_scratch_pool
*pool
)
883 memset(pool
, 0, sizeof(*pool
));
887 anv_scratch_pool_finish(struct anv_device
*device
, struct anv_scratch_pool
*pool
)
889 for (unsigned s
= 0; s
< MESA_SHADER_STAGES
; s
++) {
890 for (unsigned i
= 0; i
< 16; i
++) {
891 struct anv_scratch_bo
*bo
= &pool
->bos
[i
][s
];
893 anv_gem_close(device
, bo
->bo
.gem_handle
);
899 anv_scratch_pool_alloc(struct anv_device
*device
, struct anv_scratch_pool
*pool
,
900 gl_shader_stage stage
, unsigned per_thread_scratch
)
902 if (per_thread_scratch
== 0)
905 unsigned scratch_size_log2
= ffs(per_thread_scratch
/ 2048);
906 assert(scratch_size_log2
< 16);
908 struct anv_scratch_bo
*bo
= &pool
->bos
[scratch_size_log2
][stage
];
910 /* We can use "exists" to shortcut and ignore the critical section */
914 pthread_mutex_lock(&device
->mutex
);
916 __sync_synchronize();
920 const struct anv_physical_device
*physical_device
=
921 &device
->instance
->physicalDevice
;
922 const struct gen_device_info
*devinfo
= &physical_device
->info
;
924 /* WaCSScratchSize:hsw
926 * Haswell's scratch space address calculation appears to be sparse
927 * rather than tightly packed. The Thread ID has bits indicating which
928 * subslice, EU within a subslice, and thread within an EU it is.
929 * There's a maximum of two slices and two subslices, so these can be
930 * stored with a single bit. Even though there are only 10 EUs per
931 * subslice, this is stored in 4 bits, so there's an effective maximum
932 * value of 16 EUs. Similarly, although there are only 7 threads per EU,
933 * this is stored in a 3 bit number, giving an effective maximum value
934 * of 8 threads per EU.
936 * This means that we need to use 16 * 8 instead of 10 * 7 for the
937 * number of threads per subslice.
939 const unsigned subslices
= MAX2(physical_device
->subslice_total
, 1);
940 const unsigned scratch_ids_per_subslice
=
941 device
->info
.is_haswell
? 16 * 8 : devinfo
->max_cs_threads
;
943 uint32_t max_threads
[] = {
944 [MESA_SHADER_VERTEX
] = devinfo
->max_vs_threads
,
945 [MESA_SHADER_TESS_CTRL
] = devinfo
->max_tcs_threads
,
946 [MESA_SHADER_TESS_EVAL
] = devinfo
->max_tes_threads
,
947 [MESA_SHADER_GEOMETRY
] = devinfo
->max_gs_threads
,
948 [MESA_SHADER_FRAGMENT
] = devinfo
->max_wm_threads
,
949 [MESA_SHADER_COMPUTE
] = scratch_ids_per_subslice
* subslices
,
952 uint32_t size
= per_thread_scratch
* max_threads
[stage
];
954 anv_bo_init_new(&bo
->bo
, device
, size
);
956 /* Set the exists last because it may be read by other threads */
957 __sync_synchronize();
960 pthread_mutex_unlock(&device
->mutex
);