iris: precompute hashes for cache tracking
[mesa.git] / src / gallium / drivers / iris / iris_bufmgr.c
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_bufmgr.c
25 *
26 * The Iris buffer manager.
27 *
28 * XXX: write better comments
29 * - BOs
30 * - Explain BO cache
31 * - main interface to GEM in the kernel
32 */
33
34 #ifdef HAVE_CONFIG_H
35 #include "config.h"
36 #endif
37
38 #include <xf86drm.h>
39 #include <util/u_atomic.h>
40 #include <fcntl.h>
41 #include <stdio.h>
42 #include <stdlib.h>
43 #include <string.h>
44 #include <unistd.h>
45 #include <assert.h>
46 #include <sys/ioctl.h>
47 #include <sys/mman.h>
48 #include <sys/stat.h>
49 #include <sys/types.h>
50 #include <stdbool.h>
51 #include <time.h>
52
53 #include "errno.h"
54 #ifndef ETIME
55 #define ETIME ETIMEDOUT
56 #endif
57 #include "common/gen_clflush.h"
58 #include "common/gen_debug.h"
59 #include "common/gen_gem.h"
60 #include "dev/gen_device_info.h"
61 #include "main/macros.h"
62 #include "util/debug.h"
63 #include "util/macros.h"
64 #include "util/hash_table.h"
65 #include "util/list.h"
66 #include "util/u_dynarray.h"
67 #include "util/vma.h"
68 #include "iris_bufmgr.h"
69 #include "iris_context.h"
70 #include "string.h"
71
72 #include "drm-uapi/i915_drm.h"
73
74 #ifdef HAVE_VALGRIND
75 #include <valgrind.h>
76 #include <memcheck.h>
77 #define VG(x) x
78 #else
79 #define VG(x)
80 #endif
81
82 /* VALGRIND_FREELIKE_BLOCK unfortunately does not actually undo the earlier
83 * VALGRIND_MALLOCLIKE_BLOCK but instead leaves vg convinced the memory is
84 * leaked. All because it does not call VG(cli_free) from its
85 * VG_USERREQ__FREELIKE_BLOCK handler. Instead of treating the memory like
86 * and allocation, we mark it available for use upon mmapping and remove
87 * it upon unmapping.
88 */
89 #define VG_DEFINED(ptr, size) VG(VALGRIND_MAKE_MEM_DEFINED(ptr, size))
90 #define VG_NOACCESS(ptr, size) VG(VALGRIND_MAKE_MEM_NOACCESS(ptr, size))
91
92 #define PAGE_SIZE 4096
93
94 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
95
96 /**
97 * Call ioctl, restarting if it is interupted
98 */
99 int
100 drm_ioctl(int fd, unsigned long request, void *arg)
101 {
102 int ret;
103
104 do {
105 ret = ioctl(fd, request, arg);
106 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
107 return ret;
108 }
109
110 static inline int
111 atomic_add_unless(int *v, int add, int unless)
112 {
113 int c, old;
114 c = p_atomic_read(v);
115 while (c != unless && (old = p_atomic_cmpxchg(v, c, c + add)) != c)
116 c = old;
117 return c == unless;
118 }
119
120 /**
121 * Iris fixed-size bucketing VMA allocator.
122 *
123 * The BO cache maintains "cache buckets" for buffers of various sizes.
124 * All buffers in a given bucket are identically sized - when allocating,
125 * we always round up to the bucket size. This means that virtually all
126 * allocations are fixed-size; only buffers which are too large to fit in
127 * a bucket can be variably-sized.
128 *
129 * We create an allocator for each bucket. Each contains a free-list, where
130 * each node contains a <starting address, 64-bit bitmap> pair. Each bit
131 * represents a bucket-sized block of memory. (At the first level, each
132 * bit corresponds to a page. For the second bucket, bits correspond to
133 * two pages, and so on.) 1 means a block is free, and 0 means it's in-use.
134 * The lowest bit in the bitmap is for the first block.
135 *
136 * This makes allocations cheap - any bit of any node will do. We can pick
137 * the head of the list and use ffs() to find a free block. If there are
138 * none, we allocate 64 blocks from a larger allocator - either a bigger
139 * bucketing allocator, or a fallback top-level allocator for large objects.
140 */
141 struct vma_bucket_node {
142 uint64_t start_address;
143 uint64_t bitmap;
144 };
145
146 struct bo_cache_bucket {
147 /** List of cached BOs. */
148 struct list_head head;
149
150 /** Size of this bucket, in bytes. */
151 uint64_t size;
152
153 /** List of vma_bucket_nodes. */
154 struct util_dynarray vma_list[IRIS_MEMZONE_COUNT];
155 };
156
157 struct iris_bufmgr {
158 int fd;
159
160 mtx_t lock;
161
162 /** Array of lists of cached gem objects of power-of-two sizes */
163 struct bo_cache_bucket cache_bucket[14 * 4];
164 int num_buckets;
165 time_t time;
166
167 struct hash_table *name_table;
168 struct hash_table *handle_table;
169
170 struct util_vma_heap vma_allocator[IRIS_MEMZONE_COUNT];
171
172 bool has_llc:1;
173 bool bo_reuse:1;
174 };
175
176 static int bo_set_tiling_internal(struct iris_bo *bo, uint32_t tiling_mode,
177 uint32_t stride);
178
179 static void bo_free(struct iris_bo *bo);
180
181 static uint64_t vma_alloc(struct iris_bufmgr *bufmgr,
182 enum iris_memory_zone memzone,
183 uint64_t size, uint64_t alignment);
184
185 static uint32_t
186 key_hash_uint(const void *key)
187 {
188 return _mesa_hash_data(key, 4);
189 }
190
191 static bool
192 key_uint_equal(const void *a, const void *b)
193 {
194 return *((unsigned *) a) == *((unsigned *) b);
195 }
196
197 static struct iris_bo *
198 hash_find_bo(struct hash_table *ht, unsigned int key)
199 {
200 struct hash_entry *entry = _mesa_hash_table_search(ht, &key);
201 return entry ? (struct iris_bo *) entry->data : NULL;
202 }
203
204 /**
205 * This function finds the correct bucket fit for the input size.
206 * The function works with O(1) complexity when the requested size
207 * was queried instead of iterating the size through all the buckets.
208 */
209 static struct bo_cache_bucket *
210 bucket_for_size(struct iris_bufmgr *bufmgr, uint64_t size)
211 {
212 /* Calculating the pages and rounding up to the page size. */
213 const unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
214
215 /* Row Bucket sizes clz((x-1) | 3) Row Column
216 * in pages stride size
217 * 0: 1 2 3 4 -> 30 30 30 30 4 1
218 * 1: 5 6 7 8 -> 29 29 29 29 4 1
219 * 2: 10 12 14 16 -> 28 28 28 28 8 2
220 * 3: 20 24 28 32 -> 27 27 27 27 16 4
221 */
222 const unsigned row = 30 - __builtin_clz((pages - 1) | 3);
223 const unsigned row_max_pages = 4 << row;
224
225 /* The '& ~2' is the special case for row 1. In row 1, max pages /
226 * 2 is 2, but the previous row maximum is zero (because there is
227 * no previous row). All row maximum sizes are power of 2, so that
228 * is the only case where that bit will be set.
229 */
230 const unsigned prev_row_max_pages = (row_max_pages / 2) & ~2;
231 int col_size_log2 = row - 1;
232 col_size_log2 += (col_size_log2 < 0);
233
234 const unsigned col = (pages - prev_row_max_pages +
235 ((1 << col_size_log2) - 1)) >> col_size_log2;
236
237 /* Calculating the index based on the row and column. */
238 const unsigned index = (row * 4) + (col - 1);
239
240 return (index < bufmgr->num_buckets) ?
241 &bufmgr->cache_bucket[index] : NULL;
242 }
243
244 static enum iris_memory_zone
245 memzone_for_address(uint64_t address)
246 {
247 STATIC_ASSERT(IRIS_MEMZONE_OTHER_START > IRIS_MEMZONE_DYNAMIC_START);
248 STATIC_ASSERT(IRIS_MEMZONE_DYNAMIC_START > IRIS_MEMZONE_SURFACE_START);
249 STATIC_ASSERT(IRIS_MEMZONE_SURFACE_START > IRIS_MEMZONE_SHADER_START);
250 STATIC_ASSERT(IRIS_BINDER_ADDRESS == IRIS_MEMZONE_SURFACE_START);
251 STATIC_ASSERT(IRIS_BORDER_COLOR_POOL_ADDRESS == IRIS_MEMZONE_DYNAMIC_START);
252
253 if (address >= IRIS_MEMZONE_OTHER_START)
254 return IRIS_MEMZONE_OTHER;
255
256 if (address == IRIS_BORDER_COLOR_POOL_ADDRESS)
257 return IRIS_MEMZONE_BORDER_COLOR_POOL;
258
259 if (address > IRIS_MEMZONE_DYNAMIC_START)
260 return IRIS_MEMZONE_DYNAMIC;
261
262 if (address == IRIS_BINDER_ADDRESS)
263 return IRIS_MEMZONE_BINDER;
264
265 if (address > IRIS_MEMZONE_SURFACE_START)
266 return IRIS_MEMZONE_SURFACE;
267
268 return IRIS_MEMZONE_SHADER;
269 }
270
271 static uint64_t
272 bucket_vma_alloc(struct iris_bufmgr *bufmgr,
273 struct bo_cache_bucket *bucket,
274 enum iris_memory_zone memzone)
275 {
276 struct util_dynarray *vma_list = &bucket->vma_list[memzone];
277 struct vma_bucket_node *node;
278
279 if (vma_list->size == 0) {
280 /* This bucket allocator is out of space - allocate a new block of
281 * memory for 64 blocks from a larger allocator (either a larger
282 * bucket or util_vma).
283 *
284 * We align the address to the node size (64 blocks) so that
285 * bucket_vma_free can easily compute the starting address of this
286 * block by rounding any address we return down to the node size.
287 *
288 * Set the first bit used, and return the start address.
289 */
290 const uint64_t node_size = 64ull * bucket->size;
291 node = util_dynarray_grow(vma_list, sizeof(struct vma_bucket_node));
292
293 if (unlikely(!node))
294 return 0ull;
295
296 uint64_t addr = vma_alloc(bufmgr, memzone, node_size, node_size);
297 node->start_address = gen_48b_address(addr);
298 node->bitmap = ~1ull;
299 return node->start_address;
300 }
301
302 /* Pick any bit from any node - they're all the right size and free. */
303 node = util_dynarray_top_ptr(vma_list, struct vma_bucket_node);
304 int bit = ffsll(node->bitmap) - 1;
305 assert(bit >= 0 && bit <= 63);
306
307 /* Reserve the memory by clearing the bit. */
308 assert((node->bitmap & (1ull << bit)) != 0ull);
309 node->bitmap &= ~(1ull << bit);
310
311 uint64_t addr = node->start_address + bit * bucket->size;
312
313 /* If this node is now completely full, remove it from the free list. */
314 if (node->bitmap == 0ull) {
315 (void) util_dynarray_pop(vma_list, struct vma_bucket_node);
316 }
317
318 return addr;
319 }
320
321 static void
322 bucket_vma_free(struct bo_cache_bucket *bucket, uint64_t address)
323 {
324 enum iris_memory_zone memzone = memzone_for_address(address);
325 struct util_dynarray *vma_list = &bucket->vma_list[memzone];
326 const uint64_t node_bytes = 64ull * bucket->size;
327 struct vma_bucket_node *node = NULL;
328
329 /* bucket_vma_alloc allocates 64 blocks at a time, and aligns it to
330 * that 64 block size. So, we can round down to get the starting address.
331 */
332 uint64_t start = (address / node_bytes) * node_bytes;
333
334 /* Dividing the offset from start by bucket size gives us the bit index. */
335 int bit = (address - start) / bucket->size;
336
337 assert(start + bit * bucket->size == address);
338
339 util_dynarray_foreach(vma_list, struct vma_bucket_node, cur) {
340 if (cur->start_address == start) {
341 node = cur;
342 break;
343 }
344 }
345
346 if (!node) {
347 /* No node - the whole group of 64 blocks must have been in-use. */
348 node = util_dynarray_grow(vma_list, sizeof(struct vma_bucket_node));
349
350 if (unlikely(!node))
351 return; /* bogus, leaks some GPU VMA, but nothing we can do... */
352
353 node->start_address = start;
354 node->bitmap = 0ull;
355 }
356
357 /* Set the bit to return the memory. */
358 assert((node->bitmap & (1ull << bit)) == 0ull);
359 node->bitmap |= 1ull << bit;
360
361 /* The block might be entirely free now, and if so, we could return it
362 * to the larger allocator. But we may as well hang on to it, in case
363 * we get more allocations at this block size.
364 */
365 }
366
367 static struct bo_cache_bucket *
368 get_bucket_allocator(struct iris_bufmgr *bufmgr, uint64_t size)
369 {
370 /* Skip using the bucket allocator for very large sizes, as it allocates
371 * 64 of them and this can balloon rather quickly.
372 */
373 if (size > 1024 * PAGE_SIZE)
374 return NULL;
375
376 struct bo_cache_bucket *bucket = bucket_for_size(bufmgr, size);
377
378 if (bucket && bucket->size == size)
379 return bucket;
380
381 return NULL;
382 }
383
384 /**
385 * Allocate a section of virtual memory for a buffer, assigning an address.
386 *
387 * This uses either the bucket allocator for the given size, or the large
388 * object allocator (util_vma).
389 */
390 static uint64_t
391 vma_alloc(struct iris_bufmgr *bufmgr,
392 enum iris_memory_zone memzone,
393 uint64_t size,
394 uint64_t alignment)
395 {
396 if (memzone == IRIS_MEMZONE_BINDER)
397 return IRIS_BINDER_ADDRESS;
398 else if (memzone == IRIS_MEMZONE_BORDER_COLOR_POOL)
399 return IRIS_BORDER_COLOR_POOL_ADDRESS;
400
401 struct bo_cache_bucket *bucket = get_bucket_allocator(bufmgr, size);
402 uint64_t addr;
403
404 if (bucket) {
405 addr = bucket_vma_alloc(bufmgr, bucket, memzone);
406 } else {
407 addr = util_vma_heap_alloc(&bufmgr->vma_allocator[memzone], size,
408 alignment);
409 }
410
411 assert((addr >> 48ull) == 0);
412 assert((addr % alignment) == 0);
413
414 return gen_canonical_address(addr);
415 }
416
417 static void
418 vma_free(struct iris_bufmgr *bufmgr,
419 uint64_t address,
420 uint64_t size)
421 {
422 if (address == IRIS_BINDER_ADDRESS ||
423 address == IRIS_BORDER_COLOR_POOL_ADDRESS)
424 return;
425
426 /* Un-canonicalize the address. */
427 address = gen_48b_address(address);
428
429 if (address == 0ull)
430 return;
431
432 struct bo_cache_bucket *bucket = get_bucket_allocator(bufmgr, size);
433
434 if (bucket) {
435 bucket_vma_free(bucket, address);
436 } else {
437 enum iris_memory_zone memzone = memzone_for_address(address);
438 util_vma_heap_free(&bufmgr->vma_allocator[memzone], address, size);
439 }
440 }
441
442 int
443 iris_bo_busy(struct iris_bo *bo)
444 {
445 struct iris_bufmgr *bufmgr = bo->bufmgr;
446 struct drm_i915_gem_busy busy = { .handle = bo->gem_handle };
447
448 int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
449 if (ret == 0) {
450 bo->idle = !busy.busy;
451 return busy.busy;
452 }
453 return false;
454 }
455
456 int
457 iris_bo_madvise(struct iris_bo *bo, int state)
458 {
459 struct drm_i915_gem_madvise madv = {
460 .handle = bo->gem_handle,
461 .madv = state,
462 .retained = 1,
463 };
464
465 drm_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
466
467 return madv.retained;
468 }
469
470 /* drop the oldest entries that have been purged by the kernel */
471 static void
472 iris_bo_cache_purge_bucket(struct iris_bufmgr *bufmgr,
473 struct bo_cache_bucket *bucket)
474 {
475 list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
476 if (iris_bo_madvise(bo, I915_MADV_DONTNEED))
477 break;
478
479 list_del(&bo->head);
480 bo_free(bo);
481 }
482 }
483
484 static struct iris_bo *
485 bo_calloc(void)
486 {
487 struct iris_bo *bo = calloc(1, sizeof(*bo));
488 if (bo) {
489 bo->hash = _mesa_hash_pointer(bo);
490 }
491 return bo;
492 }
493
494 static struct iris_bo *
495 bo_alloc_internal(struct iris_bufmgr *bufmgr,
496 const char *name,
497 uint64_t size,
498 enum iris_memory_zone memzone,
499 unsigned flags,
500 uint32_t tiling_mode,
501 uint32_t stride)
502 {
503 struct iris_bo *bo;
504 unsigned int page_size = getpagesize();
505 int ret;
506 struct bo_cache_bucket *bucket;
507 bool alloc_from_cache;
508 uint64_t bo_size;
509 bool zeroed = false;
510
511 if (flags & BO_ALLOC_ZEROED)
512 zeroed = true;
513
514 /* Round the allocated size up to a power of two number of pages. */
515 bucket = bucket_for_size(bufmgr, size);
516
517 /* If we don't have caching at this size, don't actually round the
518 * allocation up.
519 */
520 if (bucket == NULL) {
521 bo_size = MAX2(ALIGN(size, page_size), page_size);
522 } else {
523 bo_size = bucket->size;
524 }
525
526 mtx_lock(&bufmgr->lock);
527 /* Get a buffer out of the cache if available */
528 retry:
529 alloc_from_cache = false;
530 if (bucket != NULL && !list_empty(&bucket->head)) {
531 /* If the last BO in the cache is idle, then reuse it. Otherwise,
532 * allocate a fresh buffer to avoid stalling.
533 */
534 bo = LIST_ENTRY(struct iris_bo, bucket->head.next, head);
535 if (!iris_bo_busy(bo)) {
536 alloc_from_cache = true;
537 list_del(&bo->head);
538 }
539
540 if (alloc_from_cache) {
541 if (!iris_bo_madvise(bo, I915_MADV_WILLNEED)) {
542 bo_free(bo);
543 iris_bo_cache_purge_bucket(bufmgr, bucket);
544 goto retry;
545 }
546
547 if (bo_set_tiling_internal(bo, tiling_mode, stride)) {
548 bo_free(bo);
549 goto retry;
550 }
551
552 if (zeroed) {
553 void *map = iris_bo_map(NULL, bo, MAP_WRITE | MAP_RAW);
554 if (!map) {
555 bo_free(bo);
556 goto retry;
557 }
558 memset(map, 0, bo_size);
559 }
560 }
561 }
562
563 if (alloc_from_cache) {
564 /* If the cached BO isn't in the right memory zone, free the old
565 * memory and assign it a new address.
566 */
567 if (memzone != memzone_for_address(bo->gtt_offset)) {
568 vma_free(bufmgr, bo->gtt_offset, bo_size);
569 bo->gtt_offset = 0ull;
570 }
571 } else {
572 bo = bo_calloc();
573 if (!bo)
574 goto err;
575
576 bo->size = bo_size;
577 bo->idle = true;
578
579 struct drm_i915_gem_create create = { .size = bo_size };
580
581 /* All new BOs we get from the kernel are zeroed, so we don't need to
582 * worry about that here.
583 */
584 ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
585 if (ret != 0) {
586 free(bo);
587 goto err;
588 }
589
590 bo->gem_handle = create.handle;
591
592 bo->bufmgr = bufmgr;
593
594 bo->tiling_mode = I915_TILING_NONE;
595 bo->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
596 bo->stride = 0;
597
598 if (bo_set_tiling_internal(bo, tiling_mode, stride))
599 goto err_free;
600
601 /* Calling set_domain() will allocate pages for the BO outside of the
602 * struct mutex lock in the kernel, which is more efficient than waiting
603 * to create them during the first execbuf that uses the BO.
604 */
605 struct drm_i915_gem_set_domain sd = {
606 .handle = bo->gem_handle,
607 .read_domains = I915_GEM_DOMAIN_CPU,
608 .write_domain = 0,
609 };
610
611 if (drm_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd) != 0)
612 goto err_free;
613 }
614
615 bo->name = name;
616 p_atomic_set(&bo->refcount, 1);
617 bo->reusable = true;
618 bo->cache_coherent = bufmgr->has_llc;
619 bo->index = -1;
620 bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
621
622 if (bo->gtt_offset == 0ull) {
623 bo->gtt_offset = vma_alloc(bufmgr, memzone, bo->size, 1);
624
625 if (bo->gtt_offset == 0ull)
626 goto err_free;
627 }
628
629 mtx_unlock(&bufmgr->lock);
630
631 DBG("bo_create: buf %d (%s) %llub\n", bo->gem_handle, bo->name,
632 (unsigned long long) size);
633
634 return bo;
635
636 err_free:
637 bo_free(bo);
638 err:
639 mtx_unlock(&bufmgr->lock);
640 return NULL;
641 }
642
643 struct iris_bo *
644 iris_bo_alloc(struct iris_bufmgr *bufmgr,
645 const char *name,
646 uint64_t size,
647 enum iris_memory_zone memzone)
648 {
649 return bo_alloc_internal(bufmgr, name, size, memzone,
650 0, I915_TILING_NONE, 0);
651 }
652
653 struct iris_bo *
654 iris_bo_alloc_tiled(struct iris_bufmgr *bufmgr, const char *name,
655 uint64_t size, enum iris_memory_zone memzone,
656 uint32_t tiling_mode, uint32_t pitch, unsigned flags)
657 {
658 return bo_alloc_internal(bufmgr, name, size, memzone,
659 flags, tiling_mode, pitch);
660 }
661
662 struct iris_bo *
663 iris_bo_create_userptr(struct iris_bufmgr *bufmgr, const char *name,
664 void *ptr, size_t size,
665 enum iris_memory_zone memzone)
666 {
667 struct iris_bo *bo;
668
669 bo = bo_calloc();
670 if (!bo)
671 return NULL;
672
673 struct drm_i915_gem_userptr arg = {
674 .user_ptr = (uintptr_t)ptr,
675 .user_size = size,
676 };
677 if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_USERPTR, &arg))
678 goto err_free;
679 bo->gem_handle = arg.handle;
680
681 /* Check the buffer for validity before we try and use it in a batch */
682 struct drm_i915_gem_set_domain sd = {
683 .handle = bo->gem_handle,
684 .read_domains = I915_GEM_DOMAIN_CPU,
685 };
686 if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd))
687 goto err_close;
688
689 bo->name = name;
690 bo->size = size;
691 bo->map_cpu = ptr;
692
693 bo->bufmgr = bufmgr;
694 bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
695 bo->gtt_offset = vma_alloc(bufmgr, memzone, size, 1);
696 if (bo->gtt_offset == 0ull)
697 goto err_close;
698
699 p_atomic_set(&bo->refcount, 1);
700 bo->userptr = true;
701 bo->cache_coherent = true;
702 bo->index = -1;
703 bo->idle = true;
704
705 return bo;
706
707 err_close:
708 drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &bo->gem_handle);
709 err_free:
710 free(bo);
711 return NULL;
712 }
713
714 /**
715 * Returns a iris_bo wrapping the given buffer object handle.
716 *
717 * This can be used when one application needs to pass a buffer object
718 * to another.
719 */
720 struct iris_bo *
721 iris_bo_gem_create_from_name(struct iris_bufmgr *bufmgr,
722 const char *name, unsigned int handle)
723 {
724 struct iris_bo *bo;
725
726 /* At the moment most applications only have a few named bo.
727 * For instance, in a DRI client only the render buffers passed
728 * between X and the client are named. And since X returns the
729 * alternating names for the front/back buffer a linear search
730 * provides a sufficiently fast match.
731 */
732 mtx_lock(&bufmgr->lock);
733 bo = hash_find_bo(bufmgr->name_table, handle);
734 if (bo) {
735 iris_bo_reference(bo);
736 goto out;
737 }
738
739 struct drm_gem_open open_arg = { .name = handle };
740 int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
741 if (ret != 0) {
742 DBG("Couldn't reference %s handle 0x%08x: %s\n",
743 name, handle, strerror(errno));
744 bo = NULL;
745 goto out;
746 }
747 /* Now see if someone has used a prime handle to get this
748 * object from the kernel before by looking through the list
749 * again for a matching gem_handle
750 */
751 bo = hash_find_bo(bufmgr->handle_table, open_arg.handle);
752 if (bo) {
753 iris_bo_reference(bo);
754 goto out;
755 }
756
757 bo = bo_calloc();
758 if (!bo)
759 goto out;
760
761 p_atomic_set(&bo->refcount, 1);
762
763 bo->size = open_arg.size;
764 bo->gtt_offset = 0;
765 bo->bufmgr = bufmgr;
766 bo->gem_handle = open_arg.handle;
767 bo->name = name;
768 bo->global_name = handle;
769 bo->reusable = false;
770 bo->external = true;
771 bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
772 bo->gtt_offset = vma_alloc(bufmgr, IRIS_MEMZONE_OTHER, bo->size, 1);
773
774 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
775 _mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
776
777 struct drm_i915_gem_get_tiling get_tiling = { .handle = bo->gem_handle };
778 ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
779 if (ret != 0)
780 goto err_unref;
781
782 bo->tiling_mode = get_tiling.tiling_mode;
783 bo->swizzle_mode = get_tiling.swizzle_mode;
784 /* XXX stride is unknown */
785 DBG("bo_create_from_handle: %d (%s)\n", handle, bo->name);
786
787 out:
788 mtx_unlock(&bufmgr->lock);
789 return bo;
790
791 err_unref:
792 bo_free(bo);
793 mtx_unlock(&bufmgr->lock);
794 return NULL;
795 }
796
797 static void
798 bo_free(struct iris_bo *bo)
799 {
800 struct iris_bufmgr *bufmgr = bo->bufmgr;
801
802 if (bo->map_cpu && !bo->userptr) {
803 VG_NOACCESS(bo->map_cpu, bo->size);
804 munmap(bo->map_cpu, bo->size);
805 }
806 if (bo->map_wc) {
807 VG_NOACCESS(bo->map_wc, bo->size);
808 munmap(bo->map_wc, bo->size);
809 }
810 if (bo->map_gtt) {
811 VG_NOACCESS(bo->map_gtt, bo->size);
812 munmap(bo->map_gtt, bo->size);
813 }
814
815 if (bo->external) {
816 struct hash_entry *entry;
817
818 if (bo->global_name) {
819 entry = _mesa_hash_table_search(bufmgr->name_table, &bo->global_name);
820 _mesa_hash_table_remove(bufmgr->name_table, entry);
821 }
822
823 entry = _mesa_hash_table_search(bufmgr->handle_table, &bo->gem_handle);
824 _mesa_hash_table_remove(bufmgr->handle_table, entry);
825 }
826
827 /* Close this object */
828 struct drm_gem_close close = { .handle = bo->gem_handle };
829 int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &close);
830 if (ret != 0) {
831 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
832 bo->gem_handle, bo->name, strerror(errno));
833 }
834
835 vma_free(bo->bufmgr, bo->gtt_offset, bo->size);
836
837 free(bo);
838 }
839
840 /** Frees all cached buffers significantly older than @time. */
841 static void
842 cleanup_bo_cache(struct iris_bufmgr *bufmgr, time_t time)
843 {
844 int i;
845
846 if (bufmgr->time == time)
847 return;
848
849 for (i = 0; i < bufmgr->num_buckets; i++) {
850 struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
851
852 list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
853 if (time - bo->free_time <= 1)
854 break;
855
856 list_del(&bo->head);
857
858 bo_free(bo);
859 }
860 }
861
862 bufmgr->time = time;
863 }
864
865 static void
866 bo_unreference_final(struct iris_bo *bo, time_t time)
867 {
868 struct iris_bufmgr *bufmgr = bo->bufmgr;
869 struct bo_cache_bucket *bucket;
870
871 DBG("bo_unreference final: %d (%s)\n", bo->gem_handle, bo->name);
872
873 bucket = bucket_for_size(bufmgr, bo->size);
874 /* Put the buffer into our internal cache for reuse if we can. */
875 if (bufmgr->bo_reuse && bo->reusable && bucket != NULL &&
876 iris_bo_madvise(bo, I915_MADV_DONTNEED)) {
877 bo->free_time = time;
878 bo->name = NULL;
879
880 list_addtail(&bo->head, &bucket->head);
881 } else {
882 bo_free(bo);
883 }
884 }
885
886 void
887 iris_bo_unreference(struct iris_bo *bo)
888 {
889 if (bo == NULL)
890 return;
891
892 assert(p_atomic_read(&bo->refcount) > 0);
893
894 if (atomic_add_unless(&bo->refcount, -1, 1)) {
895 struct iris_bufmgr *bufmgr = bo->bufmgr;
896 struct timespec time;
897
898 clock_gettime(CLOCK_MONOTONIC, &time);
899
900 mtx_lock(&bufmgr->lock);
901
902 if (p_atomic_dec_zero(&bo->refcount)) {
903 bo_unreference_final(bo, time.tv_sec);
904 cleanup_bo_cache(bufmgr, time.tv_sec);
905 }
906
907 mtx_unlock(&bufmgr->lock);
908 }
909 }
910
911 static void
912 bo_wait_with_stall_warning(struct pipe_debug_callback *dbg,
913 struct iris_bo *bo,
914 const char *action)
915 {
916 bool busy = dbg && !bo->idle;
917 double elapsed = unlikely(busy) ? -get_time() : 0.0;
918
919 iris_bo_wait_rendering(bo);
920
921 if (unlikely(busy)) {
922 elapsed += get_time();
923 if (elapsed > 1e-5) /* 0.01ms */ {
924 perf_debug(dbg, "%s a busy \"%s\" BO stalled and took %.03f ms.\n",
925 action, bo->name, elapsed * 1000);
926 }
927 }
928 }
929
930 static void
931 print_flags(unsigned flags)
932 {
933 if (flags & MAP_READ)
934 DBG("READ ");
935 if (flags & MAP_WRITE)
936 DBG("WRITE ");
937 if (flags & MAP_ASYNC)
938 DBG("ASYNC ");
939 if (flags & MAP_PERSISTENT)
940 DBG("PERSISTENT ");
941 if (flags & MAP_COHERENT)
942 DBG("COHERENT ");
943 if (flags & MAP_RAW)
944 DBG("RAW ");
945 DBG("\n");
946 }
947
948 static void *
949 iris_bo_map_cpu(struct pipe_debug_callback *dbg,
950 struct iris_bo *bo, unsigned flags)
951 {
952 struct iris_bufmgr *bufmgr = bo->bufmgr;
953
954 /* We disallow CPU maps for writing to non-coherent buffers, as the
955 * CPU map can become invalidated when a batch is flushed out, which
956 * can happen at unpredictable times. You should use WC maps instead.
957 */
958 assert(bo->cache_coherent || !(flags & MAP_WRITE));
959
960 if (!bo->map_cpu) {
961 DBG("iris_bo_map_cpu: %d (%s)\n", bo->gem_handle, bo->name);
962
963 struct drm_i915_gem_mmap mmap_arg = {
964 .handle = bo->gem_handle,
965 .size = bo->size,
966 };
967 int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
968 if (ret != 0) {
969 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
970 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
971 return NULL;
972 }
973 void *map = (void *) (uintptr_t) mmap_arg.addr_ptr;
974 VG_DEFINED(map, bo->size);
975
976 if (p_atomic_cmpxchg(&bo->map_cpu, NULL, map)) {
977 VG_NOACCESS(map, bo->size);
978 munmap(map, bo->size);
979 }
980 }
981 assert(bo->map_cpu);
982
983 DBG("iris_bo_map_cpu: %d (%s) -> %p, ", bo->gem_handle, bo->name,
984 bo->map_cpu);
985 print_flags(flags);
986
987 if (!(flags & MAP_ASYNC)) {
988 bo_wait_with_stall_warning(dbg, bo, "CPU mapping");
989 }
990
991 if (!bo->cache_coherent && !bo->bufmgr->has_llc) {
992 /* If we're reusing an existing CPU mapping, the CPU caches may
993 * contain stale data from the last time we read from that mapping.
994 * (With the BO cache, it might even be data from a previous buffer!)
995 * Even if it's a brand new mapping, the kernel may have zeroed the
996 * buffer via CPU writes.
997 *
998 * We need to invalidate those cachelines so that we see the latest
999 * contents, and so long as we only read from the CPU mmap we do not
1000 * need to write those cachelines back afterwards.
1001 *
1002 * On LLC, the emprical evidence suggests that writes from the GPU
1003 * that bypass the LLC (i.e. for scanout) do *invalidate* the CPU
1004 * cachelines. (Other reads, such as the display engine, bypass the
1005 * LLC entirely requiring us to keep dirty pixels for the scanout
1006 * out of any cache.)
1007 */
1008 gen_invalidate_range(bo->map_cpu, bo->size);
1009 }
1010
1011 return bo->map_cpu;
1012 }
1013
1014 static void *
1015 iris_bo_map_wc(struct pipe_debug_callback *dbg,
1016 struct iris_bo *bo, unsigned flags)
1017 {
1018 struct iris_bufmgr *bufmgr = bo->bufmgr;
1019
1020 if (!bo->map_wc) {
1021 DBG("iris_bo_map_wc: %d (%s)\n", bo->gem_handle, bo->name);
1022
1023 struct drm_i915_gem_mmap mmap_arg = {
1024 .handle = bo->gem_handle,
1025 .size = bo->size,
1026 .flags = I915_MMAP_WC,
1027 };
1028 int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
1029 if (ret != 0) {
1030 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1031 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
1032 return NULL;
1033 }
1034
1035 void *map = (void *) (uintptr_t) mmap_arg.addr_ptr;
1036 VG_DEFINED(map, bo->size);
1037
1038 if (p_atomic_cmpxchg(&bo->map_wc, NULL, map)) {
1039 VG_NOACCESS(map, bo->size);
1040 munmap(map, bo->size);
1041 }
1042 }
1043 assert(bo->map_wc);
1044
1045 DBG("iris_bo_map_wc: %d (%s) -> %p\n", bo->gem_handle, bo->name, bo->map_wc);
1046 print_flags(flags);
1047
1048 if (!(flags & MAP_ASYNC)) {
1049 bo_wait_with_stall_warning(dbg, bo, "WC mapping");
1050 }
1051
1052 return bo->map_wc;
1053 }
1054
1055 /**
1056 * Perform an uncached mapping via the GTT.
1057 *
1058 * Write access through the GTT is not quite fully coherent. On low power
1059 * systems especially, like modern Atoms, we can observe reads from RAM before
1060 * the write via GTT has landed. A write memory barrier that flushes the Write
1061 * Combining Buffer (i.e. sfence/mfence) is not sufficient to order the later
1062 * read after the write as the GTT write suffers a small delay through the GTT
1063 * indirection. The kernel uses an uncached mmio read to ensure the GTT write
1064 * is ordered with reads (either by the GPU, WB or WC) and unconditionally
1065 * flushes prior to execbuf submission. However, if we are not informing the
1066 * kernel about our GTT writes, it will not flush before earlier access, such
1067 * as when using the cmdparser. Similarly, we need to be careful if we should
1068 * ever issue a CPU read immediately following a GTT write.
1069 *
1070 * Telling the kernel about write access also has one more important
1071 * side-effect. Upon receiving notification about the write, it cancels any
1072 * scanout buffering for FBC/PSR and friends. Later FBC/PSR is then flushed by
1073 * either SW_FINISH or DIRTYFB. The presumption is that we never write to the
1074 * actual scanout via a mmaping, only to a backbuffer and so all the FBC/PSR
1075 * tracking is handled on the buffer exchange instead.
1076 */
1077 static void *
1078 iris_bo_map_gtt(struct pipe_debug_callback *dbg,
1079 struct iris_bo *bo, unsigned flags)
1080 {
1081 struct iris_bufmgr *bufmgr = bo->bufmgr;
1082
1083 /* Get a mapping of the buffer if we haven't before. */
1084 if (bo->map_gtt == NULL) {
1085 DBG("bo_map_gtt: mmap %d (%s)\n", bo->gem_handle, bo->name);
1086
1087 struct drm_i915_gem_mmap_gtt mmap_arg = { .handle = bo->gem_handle };
1088
1089 /* Get the fake offset back... */
1090 int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);
1091 if (ret != 0) {
1092 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1093 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
1094 return NULL;
1095 }
1096
1097 /* and mmap it. */
1098 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE,
1099 MAP_SHARED, bufmgr->fd, mmap_arg.offset);
1100 if (map == MAP_FAILED) {
1101 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1102 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
1103 return NULL;
1104 }
1105
1106 /* We don't need to use VALGRIND_MALLOCLIKE_BLOCK because Valgrind will
1107 * already intercept this mmap call. However, for consistency between
1108 * all the mmap paths, we mark the pointer as defined now and mark it
1109 * as inaccessible afterwards.
1110 */
1111 VG_DEFINED(map, bo->size);
1112
1113 if (p_atomic_cmpxchg(&bo->map_gtt, NULL, map)) {
1114 VG_NOACCESS(map, bo->size);
1115 munmap(map, bo->size);
1116 }
1117 }
1118 assert(bo->map_gtt);
1119
1120 DBG("bo_map_gtt: %d (%s) -> %p, ", bo->gem_handle, bo->name, bo->map_gtt);
1121 print_flags(flags);
1122
1123 if (!(flags & MAP_ASYNC)) {
1124 bo_wait_with_stall_warning(dbg, bo, "GTT mapping");
1125 }
1126
1127 return bo->map_gtt;
1128 }
1129
1130 static bool
1131 can_map_cpu(struct iris_bo *bo, unsigned flags)
1132 {
1133 if (bo->cache_coherent)
1134 return true;
1135
1136 /* Even if the buffer itself is not cache-coherent (such as a scanout), on
1137 * an LLC platform reads always are coherent (as they are performed via the
1138 * central system agent). It is just the writes that we need to take special
1139 * care to ensure that land in main memory and not stick in the CPU cache.
1140 */
1141 if (!(flags & MAP_WRITE) && bo->bufmgr->has_llc)
1142 return true;
1143
1144 /* If PERSISTENT or COHERENT are set, the mmapping needs to remain valid
1145 * across batch flushes where the kernel will change cache domains of the
1146 * bo, invalidating continued access to the CPU mmap on non-LLC device.
1147 *
1148 * Similarly, ASYNC typically means that the buffer will be accessed via
1149 * both the CPU and the GPU simultaneously. Batches may be executed that
1150 * use the BO even while it is mapped. While OpenGL technically disallows
1151 * most drawing while non-persistent mappings are active, we may still use
1152 * the GPU for blits or other operations, causing batches to happen at
1153 * inconvenient times.
1154 */
1155 if (flags & (MAP_PERSISTENT | MAP_COHERENT | MAP_ASYNC))
1156 return false;
1157
1158 return !(flags & MAP_WRITE);
1159 }
1160
1161 void *
1162 iris_bo_map(struct pipe_debug_callback *dbg,
1163 struct iris_bo *bo, unsigned flags)
1164 {
1165 if (bo->tiling_mode != I915_TILING_NONE && !(flags & MAP_RAW))
1166 return iris_bo_map_gtt(dbg, bo, flags);
1167
1168 void *map;
1169
1170 if (can_map_cpu(bo, flags))
1171 map = iris_bo_map_cpu(dbg, bo, flags);
1172 else
1173 map = iris_bo_map_wc(dbg, bo, flags);
1174
1175 /* Allow the attempt to fail by falling back to the GTT where necessary.
1176 *
1177 * Not every buffer can be mmaped directly using the CPU (or WC), for
1178 * example buffers that wrap stolen memory or are imported from other
1179 * devices. For those, we have little choice but to use a GTT mmapping.
1180 * However, if we use a slow GTT mmapping for reads where we expected fast
1181 * access, that order of magnitude difference in throughput will be clearly
1182 * expressed by angry users.
1183 *
1184 * We skip MAP_RAW because we want to avoid map_gtt's fence detiling.
1185 */
1186 if (!map && !(flags & MAP_RAW)) {
1187 perf_debug(dbg, "Fallback GTT mapping for %s with access flags %x\n",
1188 bo->name, flags);
1189 map = iris_bo_map_gtt(dbg, bo, flags);
1190 }
1191
1192 return map;
1193 }
1194
1195 /** Waits for all GPU rendering with the object to have completed. */
1196 void
1197 iris_bo_wait_rendering(struct iris_bo *bo)
1198 {
1199 /* We require a kernel recent enough for WAIT_IOCTL support.
1200 * See intel_init_bufmgr()
1201 */
1202 iris_bo_wait(bo, -1);
1203 }
1204
1205 /**
1206 * Waits on a BO for the given amount of time.
1207 *
1208 * @bo: buffer object to wait for
1209 * @timeout_ns: amount of time to wait in nanoseconds.
1210 * If value is less than 0, an infinite wait will occur.
1211 *
1212 * Returns 0 if the wait was successful ie. the last batch referencing the
1213 * object has completed within the allotted time. Otherwise some negative return
1214 * value describes the error. Of particular interest is -ETIME when the wait has
1215 * failed to yield the desired result.
1216 *
1217 * Similar to iris_bo_wait_rendering except a timeout parameter allows
1218 * the operation to give up after a certain amount of time. Another subtle
1219 * difference is the internal locking semantics are different (this variant does
1220 * not hold the lock for the duration of the wait). This makes the wait subject
1221 * to a larger userspace race window.
1222 *
1223 * The implementation shall wait until the object is no longer actively
1224 * referenced within a batch buffer at the time of the call. The wait will
1225 * not guarantee that the buffer is re-issued via another thread, or an flinked
1226 * handle. Userspace must make sure this race does not occur if such precision
1227 * is important.
1228 *
1229 * Note that some kernels have broken the inifite wait for negative values
1230 * promise, upgrade to latest stable kernels if this is the case.
1231 */
1232 int
1233 iris_bo_wait(struct iris_bo *bo, int64_t timeout_ns)
1234 {
1235 struct iris_bufmgr *bufmgr = bo->bufmgr;
1236
1237 /* If we know it's idle, don't bother with the kernel round trip */
1238 if (bo->idle && !bo->external)
1239 return 0;
1240
1241 struct drm_i915_gem_wait wait = {
1242 .bo_handle = bo->gem_handle,
1243 .timeout_ns = timeout_ns,
1244 };
1245 int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
1246 if (ret != 0)
1247 return -errno;
1248
1249 bo->idle = true;
1250
1251 return ret;
1252 }
1253
1254 void
1255 iris_bufmgr_destroy(struct iris_bufmgr *bufmgr)
1256 {
1257 mtx_destroy(&bufmgr->lock);
1258
1259 /* Free any cached buffer objects we were going to reuse */
1260 for (int i = 0; i < bufmgr->num_buckets; i++) {
1261 struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
1262
1263 list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
1264 list_del(&bo->head);
1265
1266 bo_free(bo);
1267 }
1268
1269 for (int z = 0; z < IRIS_MEMZONE_COUNT; z++)
1270 util_dynarray_fini(&bucket->vma_list[z]);
1271 }
1272
1273 _mesa_hash_table_destroy(bufmgr->name_table, NULL);
1274 _mesa_hash_table_destroy(bufmgr->handle_table, NULL);
1275
1276 for (int z = 0; z < IRIS_MEMZONE_COUNT; z++) {
1277 util_vma_heap_finish(&bufmgr->vma_allocator[z]);
1278 }
1279
1280 free(bufmgr);
1281 }
1282
1283 static int
1284 bo_set_tiling_internal(struct iris_bo *bo, uint32_t tiling_mode,
1285 uint32_t stride)
1286 {
1287 struct iris_bufmgr *bufmgr = bo->bufmgr;
1288 struct drm_i915_gem_set_tiling set_tiling;
1289 int ret;
1290
1291 if (bo->global_name == 0 &&
1292 tiling_mode == bo->tiling_mode && stride == bo->stride)
1293 return 0;
1294
1295 memset(&set_tiling, 0, sizeof(set_tiling));
1296 do {
1297 /* set_tiling is slightly broken and overwrites the
1298 * input on the error path, so we have to open code
1299 * drm_ioctl.
1300 */
1301 set_tiling.handle = bo->gem_handle;
1302 set_tiling.tiling_mode = tiling_mode;
1303 set_tiling.stride = stride;
1304
1305 ret = ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
1306 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
1307 if (ret == -1)
1308 return -errno;
1309
1310 bo->tiling_mode = set_tiling.tiling_mode;
1311 bo->swizzle_mode = set_tiling.swizzle_mode;
1312 bo->stride = set_tiling.stride;
1313 return 0;
1314 }
1315
1316 int
1317 iris_bo_get_tiling(struct iris_bo *bo, uint32_t *tiling_mode,
1318 uint32_t *swizzle_mode)
1319 {
1320 *tiling_mode = bo->tiling_mode;
1321 *swizzle_mode = bo->swizzle_mode;
1322 return 0;
1323 }
1324
1325 struct iris_bo *
1326 iris_bo_import_dmabuf(struct iris_bufmgr *bufmgr, int prime_fd)
1327 {
1328 uint32_t handle;
1329 struct iris_bo *bo;
1330
1331 mtx_lock(&bufmgr->lock);
1332 int ret = drmPrimeFDToHandle(bufmgr->fd, prime_fd, &handle);
1333 if (ret) {
1334 DBG("import_dmabuf: failed to obtain handle from fd: %s\n",
1335 strerror(errno));
1336 mtx_unlock(&bufmgr->lock);
1337 return NULL;
1338 }
1339
1340 /*
1341 * See if the kernel has already returned this buffer to us. Just as
1342 * for named buffers, we must not create two bo's pointing at the same
1343 * kernel object
1344 */
1345 bo = hash_find_bo(bufmgr->handle_table, handle);
1346 if (bo) {
1347 iris_bo_reference(bo);
1348 goto out;
1349 }
1350
1351 bo = bo_calloc();
1352 if (!bo)
1353 goto out;
1354
1355 p_atomic_set(&bo->refcount, 1);
1356
1357 /* Determine size of bo. The fd-to-handle ioctl really should
1358 * return the size, but it doesn't. If we have kernel 3.12 or
1359 * later, we can lseek on the prime fd to get the size. Older
1360 * kernels will just fail, in which case we fall back to the
1361 * provided (estimated or guess size). */
1362 ret = lseek(prime_fd, 0, SEEK_END);
1363 if (ret != -1)
1364 bo->size = ret;
1365
1366 bo->bufmgr = bufmgr;
1367
1368 bo->gem_handle = handle;
1369 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
1370
1371 bo->name = "prime";
1372 bo->reusable = false;
1373 bo->external = true;
1374 bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
1375 bo->gtt_offset = vma_alloc(bufmgr, IRIS_MEMZONE_OTHER, bo->size, 1);
1376
1377 struct drm_i915_gem_get_tiling get_tiling = { .handle = bo->gem_handle };
1378 if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling))
1379 goto err;
1380
1381 bo->tiling_mode = get_tiling.tiling_mode;
1382 bo->swizzle_mode = get_tiling.swizzle_mode;
1383 /* XXX stride is unknown */
1384
1385 out:
1386 mtx_unlock(&bufmgr->lock);
1387 return bo;
1388
1389 err:
1390 bo_free(bo);
1391 mtx_unlock(&bufmgr->lock);
1392 return NULL;
1393 }
1394
1395 static void
1396 iris_bo_make_external(struct iris_bo *bo)
1397 {
1398 struct iris_bufmgr *bufmgr = bo->bufmgr;
1399
1400 if (!bo->external) {
1401 mtx_lock(&bufmgr->lock);
1402 if (!bo->external) {
1403 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
1404 bo->external = true;
1405 }
1406 mtx_unlock(&bufmgr->lock);
1407 }
1408 }
1409
1410 int
1411 iris_bo_export_dmabuf(struct iris_bo *bo, int *prime_fd)
1412 {
1413 struct iris_bufmgr *bufmgr = bo->bufmgr;
1414
1415 iris_bo_make_external(bo);
1416
1417 if (drmPrimeHandleToFD(bufmgr->fd, bo->gem_handle,
1418 DRM_CLOEXEC, prime_fd) != 0)
1419 return -errno;
1420
1421 bo->reusable = false;
1422
1423 return 0;
1424 }
1425
1426 uint32_t
1427 iris_bo_export_gem_handle(struct iris_bo *bo)
1428 {
1429 iris_bo_make_external(bo);
1430
1431 return bo->gem_handle;
1432 }
1433
1434 int
1435 iris_bo_flink(struct iris_bo *bo, uint32_t *name)
1436 {
1437 struct iris_bufmgr *bufmgr = bo->bufmgr;
1438
1439 if (!bo->global_name) {
1440 struct drm_gem_flink flink = { .handle = bo->gem_handle };
1441
1442 if (drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_FLINK, &flink))
1443 return -errno;
1444
1445 iris_bo_make_external(bo);
1446 mtx_lock(&bufmgr->lock);
1447 if (!bo->global_name) {
1448 bo->global_name = flink.name;
1449 _mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
1450 }
1451 mtx_unlock(&bufmgr->lock);
1452
1453 bo->reusable = false;
1454 }
1455
1456 *name = bo->global_name;
1457 return 0;
1458 }
1459
1460 static void
1461 add_bucket(struct iris_bufmgr *bufmgr, int size)
1462 {
1463 unsigned int i = bufmgr->num_buckets;
1464
1465 assert(i < ARRAY_SIZE(bufmgr->cache_bucket));
1466
1467 list_inithead(&bufmgr->cache_bucket[i].head);
1468 for (int z = 0; z < IRIS_MEMZONE_COUNT; z++)
1469 util_dynarray_init(&bufmgr->cache_bucket[i].vma_list[z], NULL);
1470 bufmgr->cache_bucket[i].size = size;
1471 bufmgr->num_buckets++;
1472
1473 assert(bucket_for_size(bufmgr, size) == &bufmgr->cache_bucket[i]);
1474 assert(bucket_for_size(bufmgr, size - 2048) == &bufmgr->cache_bucket[i]);
1475 assert(bucket_for_size(bufmgr, size + 1) != &bufmgr->cache_bucket[i]);
1476 }
1477
1478 static void
1479 init_cache_buckets(struct iris_bufmgr *bufmgr)
1480 {
1481 uint64_t size, cache_max_size = 64 * 1024 * 1024;
1482
1483 /* OK, so power of two buckets was too wasteful of memory.
1484 * Give 3 other sizes between each power of two, to hopefully
1485 * cover things accurately enough. (The alternative is
1486 * probably to just go for exact matching of sizes, and assume
1487 * that for things like composited window resize the tiled
1488 * width/height alignment and rounding of sizes to pages will
1489 * get us useful cache hit rates anyway)
1490 */
1491 add_bucket(bufmgr, PAGE_SIZE);
1492 add_bucket(bufmgr, PAGE_SIZE * 2);
1493 add_bucket(bufmgr, PAGE_SIZE * 3);
1494
1495 /* Initialize the linked lists for BO reuse cache. */
1496 for (size = 4 * PAGE_SIZE; size <= cache_max_size; size *= 2) {
1497 add_bucket(bufmgr, size);
1498
1499 add_bucket(bufmgr, size + size * 1 / 4);
1500 add_bucket(bufmgr, size + size * 2 / 4);
1501 add_bucket(bufmgr, size + size * 3 / 4);
1502 }
1503 }
1504
1505 uint32_t
1506 iris_create_hw_context(struct iris_bufmgr *bufmgr)
1507 {
1508 struct drm_i915_gem_context_create create = { };
1509 int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
1510 if (ret != 0) {
1511 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n", strerror(errno));
1512 return 0;
1513 }
1514
1515 return create.ctx_id;
1516 }
1517
1518 int
1519 iris_hw_context_set_priority(struct iris_bufmgr *bufmgr,
1520 uint32_t ctx_id,
1521 int priority)
1522 {
1523 struct drm_i915_gem_context_param p = {
1524 .ctx_id = ctx_id,
1525 .param = I915_CONTEXT_PARAM_PRIORITY,
1526 .value = priority,
1527 };
1528 int err;
1529
1530 err = 0;
1531 if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p))
1532 err = -errno;
1533
1534 return err;
1535 }
1536
1537 void
1538 iris_destroy_hw_context(struct iris_bufmgr *bufmgr, uint32_t ctx_id)
1539 {
1540 struct drm_i915_gem_context_destroy d = { .ctx_id = ctx_id };
1541
1542 if (ctx_id != 0 &&
1543 drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &d) != 0) {
1544 fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
1545 strerror(errno));
1546 }
1547 }
1548
1549 int
1550 iris_reg_read(struct iris_bufmgr *bufmgr, uint32_t offset, uint64_t *result)
1551 {
1552 struct drm_i915_reg_read reg_read = { .offset = offset };
1553 int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_REG_READ, &reg_read);
1554
1555 *result = reg_read.val;
1556 return ret;
1557 }
1558
1559 /**
1560 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
1561 * and manage map buffer objections.
1562 *
1563 * \param fd File descriptor of the opened DRM device.
1564 */
1565 struct iris_bufmgr *
1566 iris_bufmgr_init(struct gen_device_info *devinfo, int fd)
1567 {
1568 struct iris_bufmgr *bufmgr = calloc(1, sizeof(*bufmgr));
1569 if (bufmgr == NULL)
1570 return NULL;
1571
1572 /* Handles to buffer objects belong to the device fd and are not
1573 * reference counted by the kernel. If the same fd is used by
1574 * multiple parties (threads sharing the same screen bufmgr, or
1575 * even worse the same device fd passed to multiple libraries)
1576 * ownership of those handles is shared by those independent parties.
1577 *
1578 * Don't do this! Ensure that each library/bufmgr has its own device
1579 * fd so that its namespace does not clash with another.
1580 */
1581 bufmgr->fd = fd;
1582
1583 if (mtx_init(&bufmgr->lock, mtx_plain) != 0) {
1584 free(bufmgr);
1585 return NULL;
1586 }
1587
1588 bufmgr->has_llc = devinfo->has_llc;
1589
1590 STATIC_ASSERT(IRIS_MEMZONE_SHADER_START == 0ull);
1591 const uint64_t _4GB = 1ull << 32;
1592
1593 util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_SHADER],
1594 PAGE_SIZE, _4GB);
1595 util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_SURFACE],
1596 IRIS_MEMZONE_SURFACE_START + IRIS_BINDER_SIZE,
1597 _4GB - IRIS_BINDER_SIZE);
1598 util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_DYNAMIC],
1599 IRIS_MEMZONE_DYNAMIC_START + IRIS_BORDER_COLOR_POOL_SIZE,
1600 _4GB - IRIS_BORDER_COLOR_POOL_SIZE);
1601 util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_OTHER],
1602 IRIS_MEMZONE_OTHER_START,
1603 (1ull << 48) - IRIS_MEMZONE_OTHER_START);
1604
1605 // XXX: driconf
1606 bufmgr->bo_reuse = env_var_as_boolean("bo_reuse", true);
1607
1608 init_cache_buckets(bufmgr);
1609
1610 bufmgr->name_table =
1611 _mesa_hash_table_create(NULL, key_hash_uint, key_uint_equal);
1612 bufmgr->handle_table =
1613 _mesa_hash_table_create(NULL, key_hash_uint, key_uint_equal);
1614
1615 return bufmgr;
1616 }