util: Move util_is_power_of_two to bitscan.h and rename to util_is_power_of_two_or_zero
[mesa.git] / src / intel / vulkan / anv_allocator.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <stdlib.h>
25 #include <unistd.h>
26 #include <limits.h>
27 #include <assert.h>
28 #include <linux/memfd.h>
29 #include <sys/mman.h>
30
31 #include "anv_private.h"
32
33 #include "util/hash_table.h"
34 #include "util/simple_mtx.h"
35
36 #ifdef HAVE_VALGRIND
37 #define VG_NOACCESS_READ(__ptr) ({ \
38 VALGRIND_MAKE_MEM_DEFINED((__ptr), sizeof(*(__ptr))); \
39 __typeof(*(__ptr)) __val = *(__ptr); \
40 VALGRIND_MAKE_MEM_NOACCESS((__ptr), sizeof(*(__ptr)));\
41 __val; \
42 })
43 #define VG_NOACCESS_WRITE(__ptr, __val) ({ \
44 VALGRIND_MAKE_MEM_UNDEFINED((__ptr), sizeof(*(__ptr))); \
45 *(__ptr) = (__val); \
46 VALGRIND_MAKE_MEM_NOACCESS((__ptr), sizeof(*(__ptr))); \
47 })
48 #else
49 #define VG_NOACCESS_READ(__ptr) (*(__ptr))
50 #define VG_NOACCESS_WRITE(__ptr, __val) (*(__ptr) = (__val))
51 #endif
52
53 /* Design goals:
54 *
55 * - Lock free (except when resizing underlying bos)
56 *
57 * - Constant time allocation with typically only one atomic
58 *
59 * - Multiple allocation sizes without fragmentation
60 *
61 * - Can grow while keeping addresses and offset of contents stable
62 *
63 * - All allocations within one bo so we can point one of the
64 * STATE_BASE_ADDRESS pointers at it.
65 *
66 * The overall design is a two-level allocator: top level is a fixed size, big
67 * block (8k) allocator, which operates out of a bo. Allocation is done by
68 * either pulling a block from the free list or growing the used range of the
69 * bo. Growing the range may run out of space in the bo which we then need to
70 * grow. Growing the bo is tricky in a multi-threaded, lockless environment:
71 * we need to keep all pointers and contents in the old map valid. GEM bos in
72 * general can't grow, but we use a trick: we create a memfd and use ftruncate
73 * to grow it as necessary. We mmap the new size and then create a gem bo for
74 * it using the new gem userptr ioctl. Without heavy-handed locking around
75 * our allocation fast-path, there isn't really a way to munmap the old mmap,
76 * so we just keep it around until garbage collection time. While the block
77 * allocator is lockless for normal operations, we block other threads trying
78 * to allocate while we're growing the map. It sholdn't happen often, and
79 * growing is fast anyway.
80 *
81 * At the next level we can use various sub-allocators. The state pool is a
82 * pool of smaller, fixed size objects, which operates much like the block
83 * pool. It uses a free list for freeing objects, but when it runs out of
84 * space it just allocates a new block from the block pool. This allocator is
85 * intended for longer lived state objects such as SURFACE_STATE and most
86 * other persistent state objects in the API. We may need to track more info
87 * with these object and a pointer back to the CPU object (eg VkImage). In
88 * those cases we just allocate a slightly bigger object and put the extra
89 * state after the GPU state object.
90 *
91 * The state stream allocator works similar to how the i965 DRI driver streams
92 * all its state. Even with Vulkan, we need to emit transient state (whether
93 * surface state base or dynamic state base), and for that we can just get a
94 * block and fill it up. These cases are local to a command buffer and the
95 * sub-allocator need not be thread safe. The streaming allocator gets a new
96 * block when it runs out of space and chains them together so they can be
97 * easily freed.
98 */
99
100 /* Allocations are always at least 64 byte aligned, so 1 is an invalid value.
101 * We use it to indicate the free list is empty. */
102 #define EMPTY 1
103
104 struct anv_mmap_cleanup {
105 void *map;
106 size_t size;
107 uint32_t gem_handle;
108 };
109
110 #define ANV_MMAP_CLEANUP_INIT ((struct anv_mmap_cleanup){0})
111
112 #ifndef HAVE_MEMFD_CREATE
113 static inline int
114 memfd_create(const char *name, unsigned int flags)
115 {
116 return syscall(SYS_memfd_create, name, flags);
117 }
118 #endif
119
120 static inline uint32_t
121 ilog2_round_up(uint32_t value)
122 {
123 assert(value != 0);
124 return 32 - __builtin_clz(value - 1);
125 }
126
127 static inline uint32_t
128 round_to_power_of_two(uint32_t value)
129 {
130 return 1 << ilog2_round_up(value);
131 }
132
133 static bool
134 anv_free_list_pop(union anv_free_list *list, void **map, int32_t *offset)
135 {
136 union anv_free_list current, new, old;
137
138 current.u64 = list->u64;
139 while (current.offset != EMPTY) {
140 /* We have to add a memory barrier here so that the list head (and
141 * offset) gets read before we read the map pointer. This way we
142 * know that the map pointer is valid for the given offset at the
143 * point where we read it.
144 */
145 __sync_synchronize();
146
147 int32_t *next_ptr = *map + current.offset;
148 new.offset = VG_NOACCESS_READ(next_ptr);
149 new.count = current.count + 1;
150 old.u64 = __sync_val_compare_and_swap(&list->u64, current.u64, new.u64);
151 if (old.u64 == current.u64) {
152 *offset = current.offset;
153 return true;
154 }
155 current = old;
156 }
157
158 return false;
159 }
160
161 static void
162 anv_free_list_push(union anv_free_list *list, void *map, int32_t offset,
163 uint32_t size, uint32_t count)
164 {
165 union anv_free_list current, old, new;
166 int32_t *next_ptr = map + offset;
167
168 /* If we're returning more than one chunk, we need to build a chain to add
169 * to the list. Fortunately, we can do this without any atomics since we
170 * own everything in the chain right now. `offset` is left pointing to the
171 * head of our chain list while `next_ptr` points to the tail.
172 */
173 for (uint32_t i = 1; i < count; i++) {
174 VG_NOACCESS_WRITE(next_ptr, offset + i * size);
175 next_ptr = map + offset + i * size;
176 }
177
178 old = *list;
179 do {
180 current = old;
181 VG_NOACCESS_WRITE(next_ptr, current.offset);
182 new.offset = offset;
183 new.count = current.count + 1;
184 old.u64 = __sync_val_compare_and_swap(&list->u64, current.u64, new.u64);
185 } while (old.u64 != current.u64);
186 }
187
188 /* All pointers in the ptr_free_list are assumed to be page-aligned. This
189 * means that the bottom 12 bits should all be zero.
190 */
191 #define PFL_COUNT(x) ((uintptr_t)(x) & 0xfff)
192 #define PFL_PTR(x) ((void *)((uintptr_t)(x) & ~(uintptr_t)0xfff))
193 #define PFL_PACK(ptr, count) ({ \
194 (void *)(((uintptr_t)(ptr) & ~(uintptr_t)0xfff) | ((count) & 0xfff)); \
195 })
196
197 static bool
198 anv_ptr_free_list_pop(void **list, void **elem)
199 {
200 void *current = *list;
201 while (PFL_PTR(current) != NULL) {
202 void **next_ptr = PFL_PTR(current);
203 void *new_ptr = VG_NOACCESS_READ(next_ptr);
204 unsigned new_count = PFL_COUNT(current) + 1;
205 void *new = PFL_PACK(new_ptr, new_count);
206 void *old = __sync_val_compare_and_swap(list, current, new);
207 if (old == current) {
208 *elem = PFL_PTR(current);
209 return true;
210 }
211 current = old;
212 }
213
214 return false;
215 }
216
217 static void
218 anv_ptr_free_list_push(void **list, void *elem)
219 {
220 void *old, *current;
221 void **next_ptr = elem;
222
223 /* The pointer-based free list requires that the pointer be
224 * page-aligned. This is because we use the bottom 12 bits of the
225 * pointer to store a counter to solve the ABA concurrency problem.
226 */
227 assert(((uintptr_t)elem & 0xfff) == 0);
228
229 old = *list;
230 do {
231 current = old;
232 VG_NOACCESS_WRITE(next_ptr, PFL_PTR(current));
233 unsigned new_count = PFL_COUNT(current) + 1;
234 void *new = PFL_PACK(elem, new_count);
235 old = __sync_val_compare_and_swap(list, current, new);
236 } while (old != current);
237 }
238
239 static VkResult
240 anv_block_pool_expand_range(struct anv_block_pool *pool,
241 uint32_t center_bo_offset, uint32_t size);
242
243 VkResult
244 anv_block_pool_init(struct anv_block_pool *pool,
245 struct anv_device *device,
246 uint32_t initial_size,
247 uint64_t bo_flags)
248 {
249 VkResult result;
250
251 pool->device = device;
252 pool->bo_flags = bo_flags;
253 anv_bo_init(&pool->bo, 0, 0);
254
255 pool->fd = memfd_create("block pool", MFD_CLOEXEC);
256 if (pool->fd == -1)
257 return vk_error(VK_ERROR_INITIALIZATION_FAILED);
258
259 /* Just make it 2GB up-front. The Linux kernel won't actually back it
260 * with pages until we either map and fault on one of them or we use
261 * userptr and send a chunk of it off to the GPU.
262 */
263 if (ftruncate(pool->fd, BLOCK_POOL_MEMFD_SIZE) == -1) {
264 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
265 goto fail_fd;
266 }
267
268 if (!u_vector_init(&pool->mmap_cleanups,
269 round_to_power_of_two(sizeof(struct anv_mmap_cleanup)),
270 128)) {
271 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
272 goto fail_fd;
273 }
274
275 pool->state.next = 0;
276 pool->state.end = 0;
277 pool->back_state.next = 0;
278 pool->back_state.end = 0;
279
280 result = anv_block_pool_expand_range(pool, 0, initial_size);
281 if (result != VK_SUCCESS)
282 goto fail_mmap_cleanups;
283
284 return VK_SUCCESS;
285
286 fail_mmap_cleanups:
287 u_vector_finish(&pool->mmap_cleanups);
288 fail_fd:
289 close(pool->fd);
290
291 return result;
292 }
293
294 void
295 anv_block_pool_finish(struct anv_block_pool *pool)
296 {
297 struct anv_mmap_cleanup *cleanup;
298
299 u_vector_foreach(cleanup, &pool->mmap_cleanups) {
300 if (cleanup->map)
301 munmap(cleanup->map, cleanup->size);
302 if (cleanup->gem_handle)
303 anv_gem_close(pool->device, cleanup->gem_handle);
304 }
305
306 u_vector_finish(&pool->mmap_cleanups);
307
308 close(pool->fd);
309 }
310
311 #define PAGE_SIZE 4096
312
313 static VkResult
314 anv_block_pool_expand_range(struct anv_block_pool *pool,
315 uint32_t center_bo_offset, uint32_t size)
316 {
317 void *map;
318 uint32_t gem_handle;
319 struct anv_mmap_cleanup *cleanup;
320
321 /* Assert that we only ever grow the pool */
322 assert(center_bo_offset >= pool->back_state.end);
323 assert(size - center_bo_offset >= pool->state.end);
324
325 /* Assert that we don't go outside the bounds of the memfd */
326 assert(center_bo_offset <= BLOCK_POOL_MEMFD_CENTER);
327 assert(size - center_bo_offset <=
328 BLOCK_POOL_MEMFD_SIZE - BLOCK_POOL_MEMFD_CENTER);
329
330 cleanup = u_vector_add(&pool->mmap_cleanups);
331 if (!cleanup)
332 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
333
334 *cleanup = ANV_MMAP_CLEANUP_INIT;
335
336 /* Just leak the old map until we destroy the pool. We can't munmap it
337 * without races or imposing locking on the block allocate fast path. On
338 * the whole the leaked maps adds up to less than the size of the
339 * current map. MAP_POPULATE seems like the right thing to do, but we
340 * should try to get some numbers.
341 */
342 map = mmap(NULL, size, PROT_READ | PROT_WRITE,
343 MAP_SHARED | MAP_POPULATE, pool->fd,
344 BLOCK_POOL_MEMFD_CENTER - center_bo_offset);
345 if (map == MAP_FAILED)
346 return vk_errorf(pool->device->instance, pool->device,
347 VK_ERROR_MEMORY_MAP_FAILED, "mmap failed: %m");
348
349 gem_handle = anv_gem_userptr(pool->device, map, size);
350 if (gem_handle == 0) {
351 munmap(map, size);
352 return vk_errorf(pool->device->instance, pool->device,
353 VK_ERROR_TOO_MANY_OBJECTS, "userptr failed: %m");
354 }
355
356 cleanup->map = map;
357 cleanup->size = size;
358 cleanup->gem_handle = gem_handle;
359
360 #if 0
361 /* Regular objects are created I915_CACHING_CACHED on LLC platforms and
362 * I915_CACHING_NONE on non-LLC platforms. However, userptr objects are
363 * always created as I915_CACHING_CACHED, which on non-LLC means
364 * snooped. That can be useful but comes with a bit of overheard. Since
365 * we're eplicitly clflushing and don't want the overhead we need to turn
366 * it off. */
367 if (!pool->device->info.has_llc) {
368 anv_gem_set_caching(pool->device, gem_handle, I915_CACHING_NONE);
369 anv_gem_set_domain(pool->device, gem_handle,
370 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
371 }
372 #endif
373
374 /* Now that we successfull allocated everything, we can write the new
375 * values back into pool. */
376 pool->map = map + center_bo_offset;
377 pool->center_bo_offset = center_bo_offset;
378
379 /* For block pool BOs we have to be a bit careful about where we place them
380 * in the GTT. There are two documented workarounds for state base address
381 * placement : Wa32bitGeneralStateOffset and Wa32bitInstructionBaseOffset
382 * which state that those two base addresses do not support 48-bit
383 * addresses and need to be placed in the bottom 32-bit range.
384 * Unfortunately, this is not quite accurate.
385 *
386 * The real problem is that we always set the size of our state pools in
387 * STATE_BASE_ADDRESS to 0xfffff (the maximum) even though the BO is most
388 * likely significantly smaller. We do this because we do not no at the
389 * time we emit STATE_BASE_ADDRESS whether or not we will need to expand
390 * the pool during command buffer building so we don't actually have a
391 * valid final size. If the address + size, as seen by STATE_BASE_ADDRESS
392 * overflows 48 bits, the GPU appears to treat all accesses to the buffer
393 * as being out of bounds and returns zero. For dynamic state, this
394 * usually just leads to rendering corruptions, but shaders that are all
395 * zero hang the GPU immediately.
396 *
397 * The easiest solution to do is exactly what the bogus workarounds say to
398 * do: restrict these buffers to 32-bit addresses. We could also pin the
399 * BO to some particular location of our choosing, but that's significantly
400 * more work than just not setting a flag. So, we explicitly DO NOT set
401 * the EXEC_OBJECT_SUPPORTS_48B_ADDRESS flag and the kernel does all of the
402 * hard work for us.
403 */
404 anv_bo_init(&pool->bo, gem_handle, size);
405 pool->bo.flags = pool->bo_flags;
406 pool->bo.map = map;
407
408 return VK_SUCCESS;
409 }
410
411 /** Grows and re-centers the block pool.
412 *
413 * We grow the block pool in one or both directions in such a way that the
414 * following conditions are met:
415 *
416 * 1) The size of the entire pool is always a power of two.
417 *
418 * 2) The pool only grows on both ends. Neither end can get
419 * shortened.
420 *
421 * 3) At the end of the allocation, we have about twice as much space
422 * allocated for each end as we have used. This way the pool doesn't
423 * grow too far in one direction or the other.
424 *
425 * 4) If the _alloc_back() has never been called, then the back portion of
426 * the pool retains a size of zero. (This makes it easier for users of
427 * the block pool that only want a one-sided pool.)
428 *
429 * 5) We have enough space allocated for at least one more block in
430 * whichever side `state` points to.
431 *
432 * 6) The center of the pool is always aligned to both the block_size of
433 * the pool and a 4K CPU page.
434 */
435 static uint32_t
436 anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state)
437 {
438 VkResult result = VK_SUCCESS;
439
440 pthread_mutex_lock(&pool->device->mutex);
441
442 assert(state == &pool->state || state == &pool->back_state);
443
444 /* Gather a little usage information on the pool. Since we may have
445 * threadsd waiting in queue to get some storage while we resize, it's
446 * actually possible that total_used will be larger than old_size. In
447 * particular, block_pool_alloc() increments state->next prior to
448 * calling block_pool_grow, so this ensures that we get enough space for
449 * which ever side tries to grow the pool.
450 *
451 * We align to a page size because it makes it easier to do our
452 * calculations later in such a way that we state page-aigned.
453 */
454 uint32_t back_used = align_u32(pool->back_state.next, PAGE_SIZE);
455 uint32_t front_used = align_u32(pool->state.next, PAGE_SIZE);
456 uint32_t total_used = front_used + back_used;
457
458 assert(state == &pool->state || back_used > 0);
459
460 uint32_t old_size = pool->bo.size;
461
462 /* The block pool is always initialized to a nonzero size and this function
463 * is always called after initialization.
464 */
465 assert(old_size > 0);
466
467 /* The back_used and front_used may actually be smaller than the actual
468 * requirement because they are based on the next pointers which are
469 * updated prior to calling this function.
470 */
471 uint32_t back_required = MAX2(back_used, pool->center_bo_offset);
472 uint32_t front_required = MAX2(front_used, old_size - pool->center_bo_offset);
473
474 if (back_used * 2 <= back_required && front_used * 2 <= front_required) {
475 /* If we're in this case then this isn't the firsta allocation and we
476 * already have enough space on both sides to hold double what we
477 * have allocated. There's nothing for us to do.
478 */
479 goto done;
480 }
481
482 uint32_t size = old_size * 2;
483 while (size < back_required + front_required)
484 size *= 2;
485
486 assert(size > pool->bo.size);
487
488 /* We compute a new center_bo_offset such that, when we double the size
489 * of the pool, we maintain the ratio of how much is used by each side.
490 * This way things should remain more-or-less balanced.
491 */
492 uint32_t center_bo_offset;
493 if (back_used == 0) {
494 /* If we're in this case then we have never called alloc_back(). In
495 * this case, we want keep the offset at 0 to make things as simple
496 * as possible for users that don't care about back allocations.
497 */
498 center_bo_offset = 0;
499 } else {
500 /* Try to "center" the allocation based on how much is currently in
501 * use on each side of the center line.
502 */
503 center_bo_offset = ((uint64_t)size * back_used) / total_used;
504
505 /* Align down to a multiple of the page size */
506 center_bo_offset &= ~(PAGE_SIZE - 1);
507
508 assert(center_bo_offset >= back_used);
509
510 /* Make sure we don't shrink the back end of the pool */
511 if (center_bo_offset < pool->back_state.end)
512 center_bo_offset = pool->back_state.end;
513
514 /* Make sure that we don't shrink the front end of the pool */
515 if (size - center_bo_offset < pool->state.end)
516 center_bo_offset = size - pool->state.end;
517 }
518
519 assert(center_bo_offset % PAGE_SIZE == 0);
520
521 result = anv_block_pool_expand_range(pool, center_bo_offset, size);
522
523 pool->bo.flags = pool->bo_flags;
524
525 done:
526 pthread_mutex_unlock(&pool->device->mutex);
527
528 if (result == VK_SUCCESS) {
529 /* Return the appropriate new size. This function never actually
530 * updates state->next. Instead, we let the caller do that because it
531 * needs to do so in order to maintain its concurrency model.
532 */
533 if (state == &pool->state) {
534 return pool->bo.size - pool->center_bo_offset;
535 } else {
536 assert(pool->center_bo_offset > 0);
537 return pool->center_bo_offset;
538 }
539 } else {
540 return 0;
541 }
542 }
543
544 static uint32_t
545 anv_block_pool_alloc_new(struct anv_block_pool *pool,
546 struct anv_block_state *pool_state,
547 uint32_t block_size)
548 {
549 struct anv_block_state state, old, new;
550
551 while (1) {
552 state.u64 = __sync_fetch_and_add(&pool_state->u64, block_size);
553 if (state.next + block_size <= state.end) {
554 assert(pool->map);
555 return state.next;
556 } else if (state.next <= state.end) {
557 /* We allocated the first block outside the pool so we have to grow
558 * the pool. pool_state->next acts a mutex: threads who try to
559 * allocate now will get block indexes above the current limit and
560 * hit futex_wait below.
561 */
562 new.next = state.next + block_size;
563 do {
564 new.end = anv_block_pool_grow(pool, pool_state);
565 } while (new.end < new.next);
566
567 old.u64 = __sync_lock_test_and_set(&pool_state->u64, new.u64);
568 if (old.next != state.next)
569 futex_wake(&pool_state->end, INT_MAX);
570 return state.next;
571 } else {
572 futex_wait(&pool_state->end, state.end, NULL);
573 continue;
574 }
575 }
576 }
577
578 int32_t
579 anv_block_pool_alloc(struct anv_block_pool *pool,
580 uint32_t block_size)
581 {
582 return anv_block_pool_alloc_new(pool, &pool->state, block_size);
583 }
584
585 /* Allocates a block out of the back of the block pool.
586 *
587 * This will allocated a block earlier than the "start" of the block pool.
588 * The offsets returned from this function will be negative but will still
589 * be correct relative to the block pool's map pointer.
590 *
591 * If you ever use anv_block_pool_alloc_back, then you will have to do
592 * gymnastics with the block pool's BO when doing relocations.
593 */
594 int32_t
595 anv_block_pool_alloc_back(struct anv_block_pool *pool,
596 uint32_t block_size)
597 {
598 int32_t offset = anv_block_pool_alloc_new(pool, &pool->back_state,
599 block_size);
600
601 /* The offset we get out of anv_block_pool_alloc_new() is actually the
602 * number of bytes downwards from the middle to the end of the block.
603 * We need to turn it into a (negative) offset from the middle to the
604 * start of the block.
605 */
606 assert(offset >= 0);
607 return -(offset + block_size);
608 }
609
610 VkResult
611 anv_state_pool_init(struct anv_state_pool *pool,
612 struct anv_device *device,
613 uint32_t block_size,
614 uint64_t bo_flags)
615 {
616 VkResult result = anv_block_pool_init(&pool->block_pool, device,
617 block_size * 16,
618 bo_flags);
619 if (result != VK_SUCCESS)
620 return result;
621
622 assert(util_is_power_of_two_or_zero(block_size));
623 pool->block_size = block_size;
624 pool->back_alloc_free_list = ANV_FREE_LIST_EMPTY;
625 for (unsigned i = 0; i < ANV_STATE_BUCKETS; i++) {
626 pool->buckets[i].free_list = ANV_FREE_LIST_EMPTY;
627 pool->buckets[i].block.next = 0;
628 pool->buckets[i].block.end = 0;
629 }
630 VG(VALGRIND_CREATE_MEMPOOL(pool, 0, false));
631
632 return VK_SUCCESS;
633 }
634
635 void
636 anv_state_pool_finish(struct anv_state_pool *pool)
637 {
638 VG(VALGRIND_DESTROY_MEMPOOL(pool));
639 anv_block_pool_finish(&pool->block_pool);
640 }
641
642 static uint32_t
643 anv_fixed_size_state_pool_alloc_new(struct anv_fixed_size_state_pool *pool,
644 struct anv_block_pool *block_pool,
645 uint32_t state_size,
646 uint32_t block_size)
647 {
648 struct anv_block_state block, old, new;
649 uint32_t offset;
650
651 /* If our state is large, we don't need any sub-allocation from a block.
652 * Instead, we just grab whole (potentially large) blocks.
653 */
654 if (state_size >= block_size)
655 return anv_block_pool_alloc(block_pool, state_size);
656
657 restart:
658 block.u64 = __sync_fetch_and_add(&pool->block.u64, state_size);
659
660 if (block.next < block.end) {
661 return block.next;
662 } else if (block.next == block.end) {
663 offset = anv_block_pool_alloc(block_pool, block_size);
664 new.next = offset + state_size;
665 new.end = offset + block_size;
666 old.u64 = __sync_lock_test_and_set(&pool->block.u64, new.u64);
667 if (old.next != block.next)
668 futex_wake(&pool->block.end, INT_MAX);
669 return offset;
670 } else {
671 futex_wait(&pool->block.end, block.end, NULL);
672 goto restart;
673 }
674 }
675
676 static uint32_t
677 anv_state_pool_get_bucket(uint32_t size)
678 {
679 unsigned size_log2 = ilog2_round_up(size);
680 assert(size_log2 <= ANV_MAX_STATE_SIZE_LOG2);
681 if (size_log2 < ANV_MIN_STATE_SIZE_LOG2)
682 size_log2 = ANV_MIN_STATE_SIZE_LOG2;
683 return size_log2 - ANV_MIN_STATE_SIZE_LOG2;
684 }
685
686 static uint32_t
687 anv_state_pool_get_bucket_size(uint32_t bucket)
688 {
689 uint32_t size_log2 = bucket + ANV_MIN_STATE_SIZE_LOG2;
690 return 1 << size_log2;
691 }
692
693 static struct anv_state
694 anv_state_pool_alloc_no_vg(struct anv_state_pool *pool,
695 uint32_t size, uint32_t align)
696 {
697 uint32_t bucket = anv_state_pool_get_bucket(MAX2(size, align));
698
699 struct anv_state state;
700 state.alloc_size = anv_state_pool_get_bucket_size(bucket);
701
702 /* Try free list first. */
703 if (anv_free_list_pop(&pool->buckets[bucket].free_list,
704 &pool->block_pool.map, &state.offset)) {
705 assert(state.offset >= 0);
706 goto done;
707 }
708
709 /* Try to grab a chunk from some larger bucket and split it up */
710 for (unsigned b = bucket + 1; b < ANV_STATE_BUCKETS; b++) {
711 int32_t chunk_offset;
712 if (anv_free_list_pop(&pool->buckets[b].free_list,
713 &pool->block_pool.map, &chunk_offset)) {
714 unsigned chunk_size = anv_state_pool_get_bucket_size(b);
715
716 /* We've found a chunk that's larger than the requested state size.
717 * There are a couple of options as to what we do with it:
718 *
719 * 1) We could fully split the chunk into state.alloc_size sized
720 * pieces. However, this would mean that allocating a 16B
721 * state could potentially split a 2MB chunk into 512K smaller
722 * chunks. This would lead to unnecessary fragmentation.
723 *
724 * 2) The classic "buddy allocator" method would have us split the
725 * chunk in half and return one half. Then we would split the
726 * remaining half in half and return one half, and repeat as
727 * needed until we get down to the size we want. However, if
728 * you are allocating a bunch of the same size state (which is
729 * the common case), this means that every other allocation has
730 * to go up a level and every fourth goes up two levels, etc.
731 * This is not nearly as efficient as it could be if we did a
732 * little more work up-front.
733 *
734 * 3) Split the difference between (1) and (2) by doing a
735 * two-level split. If it's bigger than some fixed block_size,
736 * we split it into block_size sized chunks and return all but
737 * one of them. Then we split what remains into
738 * state.alloc_size sized chunks and return all but one.
739 *
740 * We choose option (3).
741 */
742 if (chunk_size > pool->block_size &&
743 state.alloc_size < pool->block_size) {
744 assert(chunk_size % pool->block_size == 0);
745 /* We don't want to split giant chunks into tiny chunks. Instead,
746 * break anything bigger than a block into block-sized chunks and
747 * then break it down into bucket-sized chunks from there. Return
748 * all but the first block of the chunk to the block bucket.
749 */
750 const uint32_t block_bucket =
751 anv_state_pool_get_bucket(pool->block_size);
752 anv_free_list_push(&pool->buckets[block_bucket].free_list,
753 pool->block_pool.map,
754 chunk_offset + pool->block_size,
755 pool->block_size,
756 (chunk_size / pool->block_size) - 1);
757 chunk_size = pool->block_size;
758 }
759
760 assert(chunk_size % state.alloc_size == 0);
761 anv_free_list_push(&pool->buckets[bucket].free_list,
762 pool->block_pool.map,
763 chunk_offset + state.alloc_size,
764 state.alloc_size,
765 (chunk_size / state.alloc_size) - 1);
766
767 state.offset = chunk_offset;
768 goto done;
769 }
770 }
771
772 state.offset = anv_fixed_size_state_pool_alloc_new(&pool->buckets[bucket],
773 &pool->block_pool,
774 state.alloc_size,
775 pool->block_size);
776
777 done:
778 state.map = pool->block_pool.map + state.offset;
779 return state;
780 }
781
782 struct anv_state
783 anv_state_pool_alloc(struct anv_state_pool *pool, uint32_t size, uint32_t align)
784 {
785 if (size == 0)
786 return ANV_STATE_NULL;
787
788 struct anv_state state = anv_state_pool_alloc_no_vg(pool, size, align);
789 VG(VALGRIND_MEMPOOL_ALLOC(pool, state.map, size));
790 return state;
791 }
792
793 struct anv_state
794 anv_state_pool_alloc_back(struct anv_state_pool *pool)
795 {
796 struct anv_state state;
797 state.alloc_size = pool->block_size;
798
799 if (anv_free_list_pop(&pool->back_alloc_free_list,
800 &pool->block_pool.map, &state.offset)) {
801 assert(state.offset < 0);
802 goto done;
803 }
804
805 state.offset = anv_block_pool_alloc_back(&pool->block_pool,
806 pool->block_size);
807
808 done:
809 state.map = pool->block_pool.map + state.offset;
810 VG(VALGRIND_MEMPOOL_ALLOC(pool, state.map, state.alloc_size));
811 return state;
812 }
813
814 static void
815 anv_state_pool_free_no_vg(struct anv_state_pool *pool, struct anv_state state)
816 {
817 assert(util_is_power_of_two_or_zero(state.alloc_size));
818 unsigned bucket = anv_state_pool_get_bucket(state.alloc_size);
819
820 if (state.offset < 0) {
821 assert(state.alloc_size == pool->block_size);
822 anv_free_list_push(&pool->back_alloc_free_list,
823 pool->block_pool.map, state.offset,
824 state.alloc_size, 1);
825 } else {
826 anv_free_list_push(&pool->buckets[bucket].free_list,
827 pool->block_pool.map, state.offset,
828 state.alloc_size, 1);
829 }
830 }
831
832 void
833 anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state)
834 {
835 if (state.alloc_size == 0)
836 return;
837
838 VG(VALGRIND_MEMPOOL_FREE(pool, state.map));
839 anv_state_pool_free_no_vg(pool, state);
840 }
841
842 struct anv_state_stream_block {
843 struct anv_state block;
844
845 /* The next block */
846 struct anv_state_stream_block *next;
847
848 #ifdef HAVE_VALGRIND
849 /* A pointer to the first user-allocated thing in this block. This is
850 * what valgrind sees as the start of the block.
851 */
852 void *_vg_ptr;
853 #endif
854 };
855
856 /* The state stream allocator is a one-shot, single threaded allocator for
857 * variable sized blocks. We use it for allocating dynamic state.
858 */
859 void
860 anv_state_stream_init(struct anv_state_stream *stream,
861 struct anv_state_pool *state_pool,
862 uint32_t block_size)
863 {
864 stream->state_pool = state_pool;
865 stream->block_size = block_size;
866
867 stream->block = ANV_STATE_NULL;
868
869 stream->block_list = NULL;
870
871 /* Ensure that next + whatever > block_size. This way the first call to
872 * state_stream_alloc fetches a new block.
873 */
874 stream->next = block_size;
875
876 VG(VALGRIND_CREATE_MEMPOOL(stream, 0, false));
877 }
878
879 void
880 anv_state_stream_finish(struct anv_state_stream *stream)
881 {
882 struct anv_state_stream_block *next = stream->block_list;
883 while (next != NULL) {
884 struct anv_state_stream_block sb = VG_NOACCESS_READ(next);
885 VG(VALGRIND_MEMPOOL_FREE(stream, sb._vg_ptr));
886 VG(VALGRIND_MAKE_MEM_UNDEFINED(next, stream->block_size));
887 anv_state_pool_free_no_vg(stream->state_pool, sb.block);
888 next = sb.next;
889 }
890
891 VG(VALGRIND_DESTROY_MEMPOOL(stream));
892 }
893
894 struct anv_state
895 anv_state_stream_alloc(struct anv_state_stream *stream,
896 uint32_t size, uint32_t alignment)
897 {
898 if (size == 0)
899 return ANV_STATE_NULL;
900
901 assert(alignment <= PAGE_SIZE);
902
903 uint32_t offset = align_u32(stream->next, alignment);
904 if (offset + size > stream->block.alloc_size) {
905 uint32_t block_size = stream->block_size;
906 if (block_size < size)
907 block_size = round_to_power_of_two(size);
908
909 stream->block = anv_state_pool_alloc_no_vg(stream->state_pool,
910 block_size, PAGE_SIZE);
911
912 struct anv_state_stream_block *sb = stream->block.map;
913 VG_NOACCESS_WRITE(&sb->block, stream->block);
914 VG_NOACCESS_WRITE(&sb->next, stream->block_list);
915 stream->block_list = sb;
916 VG(VG_NOACCESS_WRITE(&sb->_vg_ptr, NULL));
917
918 VG(VALGRIND_MAKE_MEM_NOACCESS(stream->block.map, stream->block_size));
919
920 /* Reset back to the start plus space for the header */
921 stream->next = sizeof(*sb);
922
923 offset = align_u32(stream->next, alignment);
924 assert(offset + size <= stream->block.alloc_size);
925 }
926
927 struct anv_state state = stream->block;
928 state.offset += offset;
929 state.alloc_size = size;
930 state.map += offset;
931
932 stream->next = offset + size;
933
934 #ifdef HAVE_VALGRIND
935 struct anv_state_stream_block *sb = stream->block_list;
936 void *vg_ptr = VG_NOACCESS_READ(&sb->_vg_ptr);
937 if (vg_ptr == NULL) {
938 vg_ptr = state.map;
939 VG_NOACCESS_WRITE(&sb->_vg_ptr, vg_ptr);
940 VALGRIND_MEMPOOL_ALLOC(stream, vg_ptr, size);
941 } else {
942 void *state_end = state.map + state.alloc_size;
943 /* This only updates the mempool. The newly allocated chunk is still
944 * marked as NOACCESS. */
945 VALGRIND_MEMPOOL_CHANGE(stream, vg_ptr, vg_ptr, state_end - vg_ptr);
946 /* Mark the newly allocated chunk as undefined */
947 VALGRIND_MAKE_MEM_UNDEFINED(state.map, state.alloc_size);
948 }
949 #endif
950
951 return state;
952 }
953
954 struct bo_pool_bo_link {
955 struct bo_pool_bo_link *next;
956 struct anv_bo bo;
957 };
958
959 void
960 anv_bo_pool_init(struct anv_bo_pool *pool, struct anv_device *device,
961 uint64_t bo_flags)
962 {
963 pool->device = device;
964 pool->bo_flags = bo_flags;
965 memset(pool->free_list, 0, sizeof(pool->free_list));
966
967 VG(VALGRIND_CREATE_MEMPOOL(pool, 0, false));
968 }
969
970 void
971 anv_bo_pool_finish(struct anv_bo_pool *pool)
972 {
973 for (unsigned i = 0; i < ARRAY_SIZE(pool->free_list); i++) {
974 struct bo_pool_bo_link *link = PFL_PTR(pool->free_list[i]);
975 while (link != NULL) {
976 struct bo_pool_bo_link link_copy = VG_NOACCESS_READ(link);
977
978 anv_gem_munmap(link_copy.bo.map, link_copy.bo.size);
979 anv_gem_close(pool->device, link_copy.bo.gem_handle);
980 link = link_copy.next;
981 }
982 }
983
984 VG(VALGRIND_DESTROY_MEMPOOL(pool));
985 }
986
987 VkResult
988 anv_bo_pool_alloc(struct anv_bo_pool *pool, struct anv_bo *bo, uint32_t size)
989 {
990 VkResult result;
991
992 const unsigned size_log2 = size < 4096 ? 12 : ilog2_round_up(size);
993 const unsigned pow2_size = 1 << size_log2;
994 const unsigned bucket = size_log2 - 12;
995 assert(bucket < ARRAY_SIZE(pool->free_list));
996
997 void *next_free_void;
998 if (anv_ptr_free_list_pop(&pool->free_list[bucket], &next_free_void)) {
999 struct bo_pool_bo_link *next_free = next_free_void;
1000 *bo = VG_NOACCESS_READ(&next_free->bo);
1001 assert(bo->gem_handle);
1002 assert(bo->map == next_free);
1003 assert(size <= bo->size);
1004
1005 VG(VALGRIND_MEMPOOL_ALLOC(pool, bo->map, size));
1006
1007 return VK_SUCCESS;
1008 }
1009
1010 struct anv_bo new_bo;
1011
1012 result = anv_bo_init_new(&new_bo, pool->device, pow2_size);
1013 if (result != VK_SUCCESS)
1014 return result;
1015
1016 new_bo.flags = pool->bo_flags;
1017
1018 assert(new_bo.size == pow2_size);
1019
1020 new_bo.map = anv_gem_mmap(pool->device, new_bo.gem_handle, 0, pow2_size, 0);
1021 if (new_bo.map == MAP_FAILED) {
1022 anv_gem_close(pool->device, new_bo.gem_handle);
1023 return vk_error(VK_ERROR_MEMORY_MAP_FAILED);
1024 }
1025
1026 *bo = new_bo;
1027
1028 VG(VALGRIND_MEMPOOL_ALLOC(pool, bo->map, size));
1029
1030 return VK_SUCCESS;
1031 }
1032
1033 void
1034 anv_bo_pool_free(struct anv_bo_pool *pool, const struct anv_bo *bo_in)
1035 {
1036 /* Make a copy in case the anv_bo happens to be storred in the BO */
1037 struct anv_bo bo = *bo_in;
1038
1039 VG(VALGRIND_MEMPOOL_FREE(pool, bo.map));
1040
1041 struct bo_pool_bo_link *link = bo.map;
1042 VG_NOACCESS_WRITE(&link->bo, bo);
1043
1044 assert(util_is_power_of_two_or_zero(bo.size));
1045 const unsigned size_log2 = ilog2_round_up(bo.size);
1046 const unsigned bucket = size_log2 - 12;
1047 assert(bucket < ARRAY_SIZE(pool->free_list));
1048
1049 anv_ptr_free_list_push(&pool->free_list[bucket], link);
1050 }
1051
1052 // Scratch pool
1053
1054 void
1055 anv_scratch_pool_init(struct anv_device *device, struct anv_scratch_pool *pool)
1056 {
1057 memset(pool, 0, sizeof(*pool));
1058 }
1059
1060 void
1061 anv_scratch_pool_finish(struct anv_device *device, struct anv_scratch_pool *pool)
1062 {
1063 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1064 for (unsigned i = 0; i < 16; i++) {
1065 struct anv_scratch_bo *bo = &pool->bos[i][s];
1066 if (bo->exists > 0)
1067 anv_gem_close(device, bo->bo.gem_handle);
1068 }
1069 }
1070 }
1071
1072 struct anv_bo *
1073 anv_scratch_pool_alloc(struct anv_device *device, struct anv_scratch_pool *pool,
1074 gl_shader_stage stage, unsigned per_thread_scratch)
1075 {
1076 if (per_thread_scratch == 0)
1077 return NULL;
1078
1079 unsigned scratch_size_log2 = ffs(per_thread_scratch / 2048);
1080 assert(scratch_size_log2 < 16);
1081
1082 struct anv_scratch_bo *bo = &pool->bos[scratch_size_log2][stage];
1083
1084 /* We can use "exists" to shortcut and ignore the critical section */
1085 if (bo->exists)
1086 return &bo->bo;
1087
1088 pthread_mutex_lock(&device->mutex);
1089
1090 __sync_synchronize();
1091 if (bo->exists) {
1092 pthread_mutex_unlock(&device->mutex);
1093 return &bo->bo;
1094 }
1095
1096 const struct anv_physical_device *physical_device =
1097 &device->instance->physicalDevice;
1098 const struct gen_device_info *devinfo = &physical_device->info;
1099
1100 const unsigned subslices = MAX2(physical_device->subslice_total, 1);
1101
1102 unsigned scratch_ids_per_subslice;
1103 if (devinfo->is_haswell) {
1104 /* WaCSScratchSize:hsw
1105 *
1106 * Haswell's scratch space address calculation appears to be sparse
1107 * rather than tightly packed. The Thread ID has bits indicating
1108 * which subslice, EU within a subslice, and thread within an EU it
1109 * is. There's a maximum of two slices and two subslices, so these
1110 * can be stored with a single bit. Even though there are only 10 EUs
1111 * per subslice, this is stored in 4 bits, so there's an effective
1112 * maximum value of 16 EUs. Similarly, although there are only 7
1113 * threads per EU, this is stored in a 3 bit number, giving an
1114 * effective maximum value of 8 threads per EU.
1115 *
1116 * This means that we need to use 16 * 8 instead of 10 * 7 for the
1117 * number of threads per subslice.
1118 */
1119 scratch_ids_per_subslice = 16 * 8;
1120 } else if (devinfo->is_cherryview) {
1121 /* Cherryview devices have either 6 or 8 EUs per subslice, and each EU
1122 * has 7 threads. The 6 EU devices appear to calculate thread IDs as if
1123 * it had 8 EUs.
1124 */
1125 scratch_ids_per_subslice = 8 * 7;
1126 } else {
1127 scratch_ids_per_subslice = devinfo->max_cs_threads;
1128 }
1129
1130 uint32_t max_threads[] = {
1131 [MESA_SHADER_VERTEX] = devinfo->max_vs_threads,
1132 [MESA_SHADER_TESS_CTRL] = devinfo->max_tcs_threads,
1133 [MESA_SHADER_TESS_EVAL] = devinfo->max_tes_threads,
1134 [MESA_SHADER_GEOMETRY] = devinfo->max_gs_threads,
1135 [MESA_SHADER_FRAGMENT] = devinfo->max_wm_threads,
1136 [MESA_SHADER_COMPUTE] = scratch_ids_per_subslice * subslices,
1137 };
1138
1139 uint32_t size = per_thread_scratch * max_threads[stage];
1140
1141 anv_bo_init_new(&bo->bo, device, size);
1142
1143 /* Even though the Scratch base pointers in 3DSTATE_*S are 64 bits, they
1144 * are still relative to the general state base address. When we emit
1145 * STATE_BASE_ADDRESS, we set general state base address to 0 and the size
1146 * to the maximum (1 page under 4GB). This allows us to just place the
1147 * scratch buffers anywhere we wish in the bottom 32 bits of address space
1148 * and just set the scratch base pointer in 3DSTATE_*S using a relocation.
1149 * However, in order to do so, we need to ensure that the kernel does not
1150 * place the scratch BO above the 32-bit boundary.
1151 *
1152 * NOTE: Technically, it can't go "anywhere" because the top page is off
1153 * limits. However, when EXEC_OBJECT_SUPPORTS_48B_ADDRESS is set, the
1154 * kernel allocates space using
1155 *
1156 * end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
1157 *
1158 * so nothing will ever touch the top page.
1159 */
1160 assert(!(bo->bo.flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS));
1161
1162 if (device->instance->physicalDevice.has_exec_async)
1163 bo->bo.flags |= EXEC_OBJECT_ASYNC;
1164
1165 /* Set the exists last because it may be read by other threads */
1166 __sync_synchronize();
1167 bo->exists = true;
1168
1169 pthread_mutex_unlock(&device->mutex);
1170
1171 return &bo->bo;
1172 }
1173
1174 struct anv_cached_bo {
1175 struct anv_bo bo;
1176
1177 uint32_t refcount;
1178 };
1179
1180 VkResult
1181 anv_bo_cache_init(struct anv_bo_cache *cache)
1182 {
1183 cache->bo_map = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
1184 _mesa_key_pointer_equal);
1185 if (!cache->bo_map)
1186 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1187
1188 if (pthread_mutex_init(&cache->mutex, NULL)) {
1189 _mesa_hash_table_destroy(cache->bo_map, NULL);
1190 return vk_errorf(NULL, NULL, VK_ERROR_OUT_OF_HOST_MEMORY,
1191 "pthread_mutex_init failed: %m");
1192 }
1193
1194 return VK_SUCCESS;
1195 }
1196
1197 void
1198 anv_bo_cache_finish(struct anv_bo_cache *cache)
1199 {
1200 _mesa_hash_table_destroy(cache->bo_map, NULL);
1201 pthread_mutex_destroy(&cache->mutex);
1202 }
1203
1204 static struct anv_cached_bo *
1205 anv_bo_cache_lookup_locked(struct anv_bo_cache *cache, uint32_t gem_handle)
1206 {
1207 struct hash_entry *entry =
1208 _mesa_hash_table_search(cache->bo_map,
1209 (const void *)(uintptr_t)gem_handle);
1210 if (!entry)
1211 return NULL;
1212
1213 struct anv_cached_bo *bo = (struct anv_cached_bo *)entry->data;
1214 assert(bo->bo.gem_handle == gem_handle);
1215
1216 return bo;
1217 }
1218
1219 UNUSED static struct anv_bo *
1220 anv_bo_cache_lookup(struct anv_bo_cache *cache, uint32_t gem_handle)
1221 {
1222 pthread_mutex_lock(&cache->mutex);
1223
1224 struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(cache, gem_handle);
1225
1226 pthread_mutex_unlock(&cache->mutex);
1227
1228 return bo ? &bo->bo : NULL;
1229 }
1230
1231 VkResult
1232 anv_bo_cache_alloc(struct anv_device *device,
1233 struct anv_bo_cache *cache,
1234 uint64_t size, struct anv_bo **bo_out)
1235 {
1236 struct anv_cached_bo *bo =
1237 vk_alloc(&device->alloc, sizeof(struct anv_cached_bo), 8,
1238 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1239 if (!bo)
1240 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1241
1242 bo->refcount = 1;
1243
1244 /* The kernel is going to give us whole pages anyway */
1245 size = align_u64(size, 4096);
1246
1247 VkResult result = anv_bo_init_new(&bo->bo, device, size);
1248 if (result != VK_SUCCESS) {
1249 vk_free(&device->alloc, bo);
1250 return result;
1251 }
1252
1253 assert(bo->bo.gem_handle);
1254
1255 pthread_mutex_lock(&cache->mutex);
1256
1257 _mesa_hash_table_insert(cache->bo_map,
1258 (void *)(uintptr_t)bo->bo.gem_handle, bo);
1259
1260 pthread_mutex_unlock(&cache->mutex);
1261
1262 *bo_out = &bo->bo;
1263
1264 return VK_SUCCESS;
1265 }
1266
1267 VkResult
1268 anv_bo_cache_import(struct anv_device *device,
1269 struct anv_bo_cache *cache,
1270 int fd, struct anv_bo **bo_out)
1271 {
1272 pthread_mutex_lock(&cache->mutex);
1273
1274 uint32_t gem_handle = anv_gem_fd_to_handle(device, fd);
1275 if (!gem_handle) {
1276 pthread_mutex_unlock(&cache->mutex);
1277 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
1278 }
1279
1280 struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(cache, gem_handle);
1281 if (bo) {
1282 __sync_fetch_and_add(&bo->refcount, 1);
1283 } else {
1284 off_t size = lseek(fd, 0, SEEK_END);
1285 if (size == (off_t)-1) {
1286 anv_gem_close(device, gem_handle);
1287 pthread_mutex_unlock(&cache->mutex);
1288 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
1289 }
1290
1291 bo = vk_alloc(&device->alloc, sizeof(struct anv_cached_bo), 8,
1292 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1293 if (!bo) {
1294 anv_gem_close(device, gem_handle);
1295 pthread_mutex_unlock(&cache->mutex);
1296 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1297 }
1298
1299 bo->refcount = 1;
1300
1301 anv_bo_init(&bo->bo, gem_handle, size);
1302
1303 _mesa_hash_table_insert(cache->bo_map, (void *)(uintptr_t)gem_handle, bo);
1304 }
1305
1306 pthread_mutex_unlock(&cache->mutex);
1307 *bo_out = &bo->bo;
1308
1309 return VK_SUCCESS;
1310 }
1311
1312 VkResult
1313 anv_bo_cache_export(struct anv_device *device,
1314 struct anv_bo_cache *cache,
1315 struct anv_bo *bo_in, int *fd_out)
1316 {
1317 assert(anv_bo_cache_lookup(cache, bo_in->gem_handle) == bo_in);
1318 struct anv_cached_bo *bo = (struct anv_cached_bo *)bo_in;
1319
1320 int fd = anv_gem_handle_to_fd(device, bo->bo.gem_handle);
1321 if (fd < 0)
1322 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
1323
1324 *fd_out = fd;
1325
1326 return VK_SUCCESS;
1327 }
1328
1329 static bool
1330 atomic_dec_not_one(uint32_t *counter)
1331 {
1332 uint32_t old, val;
1333
1334 val = *counter;
1335 while (1) {
1336 if (val == 1)
1337 return false;
1338
1339 old = __sync_val_compare_and_swap(counter, val, val - 1);
1340 if (old == val)
1341 return true;
1342
1343 val = old;
1344 }
1345 }
1346
1347 void
1348 anv_bo_cache_release(struct anv_device *device,
1349 struct anv_bo_cache *cache,
1350 struct anv_bo *bo_in)
1351 {
1352 assert(anv_bo_cache_lookup(cache, bo_in->gem_handle) == bo_in);
1353 struct anv_cached_bo *bo = (struct anv_cached_bo *)bo_in;
1354
1355 /* Try to decrement the counter but don't go below one. If this succeeds
1356 * then the refcount has been decremented and we are not the last
1357 * reference.
1358 */
1359 if (atomic_dec_not_one(&bo->refcount))
1360 return;
1361
1362 pthread_mutex_lock(&cache->mutex);
1363
1364 /* We are probably the last reference since our attempt to decrement above
1365 * failed. However, we can't actually know until we are inside the mutex.
1366 * Otherwise, someone could import the BO between the decrement and our
1367 * taking the mutex.
1368 */
1369 if (unlikely(__sync_sub_and_fetch(&bo->refcount, 1) > 0)) {
1370 /* Turns out we're not the last reference. Unlock and bail. */
1371 pthread_mutex_unlock(&cache->mutex);
1372 return;
1373 }
1374
1375 struct hash_entry *entry =
1376 _mesa_hash_table_search(cache->bo_map,
1377 (const void *)(uintptr_t)bo->bo.gem_handle);
1378 assert(entry);
1379 _mesa_hash_table_remove(cache->bo_map, entry);
1380
1381 if (bo->bo.map)
1382 anv_gem_munmap(bo->bo.map, bo->bo.size);
1383
1384 anv_gem_close(device, bo->bo.gem_handle);
1385
1386 /* Don't unlock until we've actually closed the BO. The whole point of
1387 * the BO cache is to ensure that we correctly handle races with creating
1388 * and releasing GEM handles and we don't want to let someone import the BO
1389 * again between mutex unlock and closing the GEM handle.
1390 */
1391 pthread_mutex_unlock(&cache->mutex);
1392
1393 vk_free(&device->alloc, bo);
1394 }