bc9ad13c171abf97238fac30f91c810141623dfc
[mesa.git] / src / gallium / drivers / iris / iris_bufmgr.c
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_bufmgr.c
25 *
26 * The Iris buffer manager.
27 *
28 * XXX: write better comments
29 * - BOs
30 * - Explain BO cache
31 * - main interface to GEM in the kernel
32 */
33
34 #ifdef HAVE_CONFIG_H
35 #include "config.h"
36 #endif
37
38 #include <xf86drm.h>
39 #include <util/u_atomic.h>
40 #include <fcntl.h>
41 #include <stdio.h>
42 #include <stdlib.h>
43 #include <string.h>
44 #include <unistd.h>
45 #include <assert.h>
46 #include <sys/ioctl.h>
47 #include <sys/mman.h>
48 #include <sys/stat.h>
49 #include <sys/types.h>
50 #include <stdbool.h>
51 #include <time.h>
52 #include <unistd.h>
53
54 #include "errno.h"
55 #include "common/gen_aux_map.h"
56 #include "common/gen_clflush.h"
57 #include "dev/gen_debug.h"
58 #include "common/gen_gem.h"
59 #include "dev/gen_device_info.h"
60 #include "main/macros.h"
61 #include "os/os_mman.h"
62 #include "util/debug.h"
63 #include "util/macros.h"
64 #include "util/hash_table.h"
65 #include "util/list.h"
66 #include "util/os_file.h"
67 #include "util/u_dynarray.h"
68 #include "util/vma.h"
69 #include "iris_bufmgr.h"
70 #include "iris_context.h"
71 #include "string.h"
72
73 #include "drm-uapi/i915_drm.h"
74
75 #ifdef HAVE_VALGRIND
76 #include <valgrind.h>
77 #include <memcheck.h>
78 #define VG(x) x
79 #else
80 #define VG(x)
81 #endif
82
83 /* VALGRIND_FREELIKE_BLOCK unfortunately does not actually undo the earlier
84 * VALGRIND_MALLOCLIKE_BLOCK but instead leaves vg convinced the memory is
85 * leaked. All because it does not call VG(cli_free) from its
86 * VG_USERREQ__FREELIKE_BLOCK handler. Instead of treating the memory like
87 * and allocation, we mark it available for use upon mmapping and remove
88 * it upon unmapping.
89 */
90 #define VG_DEFINED(ptr, size) VG(VALGRIND_MAKE_MEM_DEFINED(ptr, size))
91 #define VG_NOACCESS(ptr, size) VG(VALGRIND_MAKE_MEM_NOACCESS(ptr, size))
92
93 #define PAGE_SIZE 4096
94
95 #define WARN_ONCE(cond, fmt...) do { \
96 if (unlikely(cond)) { \
97 static bool _warned = false; \
98 if (!_warned) { \
99 fprintf(stderr, "WARNING: "); \
100 fprintf(stderr, fmt); \
101 _warned = true; \
102 } \
103 } \
104 } while (0)
105
106 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
107
108 static inline int
109 atomic_add_unless(int *v, int add, int unless)
110 {
111 int c, old;
112 c = p_atomic_read(v);
113 while (c != unless && (old = p_atomic_cmpxchg(v, c, c + add)) != c)
114 c = old;
115 return c == unless;
116 }
117
118 static const char *
119 memzone_name(enum iris_memory_zone memzone)
120 {
121 const char *names[] = {
122 [IRIS_MEMZONE_SHADER] = "shader",
123 [IRIS_MEMZONE_BINDER] = "binder",
124 [IRIS_MEMZONE_SURFACE] = "surface",
125 [IRIS_MEMZONE_DYNAMIC] = "dynamic",
126 [IRIS_MEMZONE_OTHER] = "other",
127 [IRIS_MEMZONE_BORDER_COLOR_POOL] = "bordercolor",
128 };
129 assert(memzone < ARRAY_SIZE(names));
130 return names[memzone];
131 }
132
133 struct bo_cache_bucket {
134 /** List of cached BOs. */
135 struct list_head head;
136
137 /** Size of this bucket, in bytes. */
138 uint64_t size;
139 };
140
141 struct bo_export {
142 /** File descriptor associated with a handle export. */
143 int drm_fd;
144
145 /** GEM handle in drm_fd */
146 uint32_t gem_handle;
147
148 struct list_head link;
149 };
150
151 struct iris_bufmgr {
152 /**
153 * List into the list of bufmgr.
154 */
155 struct list_head link;
156
157 uint32_t refcount;
158
159 int fd;
160
161 mtx_t lock;
162
163 /** Array of lists of cached gem objects of power-of-two sizes */
164 struct bo_cache_bucket cache_bucket[14 * 4];
165 int num_buckets;
166 time_t time;
167
168 struct hash_table *name_table;
169 struct hash_table *handle_table;
170
171 /**
172 * List of BOs which we've effectively freed, but are hanging on to
173 * until they're idle before closing and returning the VMA.
174 */
175 struct list_head zombie_list;
176
177 struct util_vma_heap vma_allocator[IRIS_MEMZONE_COUNT];
178
179 bool has_llc:1;
180 bool has_mmap_offset:1;
181 bool bo_reuse:1;
182
183 struct gen_aux_map_context *aux_map_ctx;
184 };
185
186 static mtx_t global_bufmgr_list_mutex = _MTX_INITIALIZER_NP;
187 static struct list_head global_bufmgr_list = {
188 .next = &global_bufmgr_list,
189 .prev = &global_bufmgr_list,
190 };
191
192 static int bo_set_tiling_internal(struct iris_bo *bo, uint32_t tiling_mode,
193 uint32_t stride);
194
195 static void bo_free(struct iris_bo *bo);
196
197 static struct iris_bo *
198 find_and_ref_external_bo(struct hash_table *ht, unsigned int key)
199 {
200 struct hash_entry *entry = _mesa_hash_table_search(ht, &key);
201 struct iris_bo *bo = entry ? entry->data : NULL;
202
203 if (bo) {
204 assert(bo->external);
205 assert(!bo->reusable);
206
207 /* Being non-reusable, the BO cannot be in the cache lists, but it
208 * may be in the zombie list if it had reached zero references, but
209 * we hadn't yet closed it...and then reimported the same BO. If it
210 * is, then remove it since it's now been resurrected.
211 */
212 if (bo->head.prev || bo->head.next)
213 list_del(&bo->head);
214
215 iris_bo_reference(bo);
216 }
217
218 return bo;
219 }
220
221 /**
222 * This function finds the correct bucket fit for the input size.
223 * The function works with O(1) complexity when the requested size
224 * was queried instead of iterating the size through all the buckets.
225 */
226 static struct bo_cache_bucket *
227 bucket_for_size(struct iris_bufmgr *bufmgr, uint64_t size)
228 {
229 /* Calculating the pages and rounding up to the page size. */
230 const unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
231
232 /* Row Bucket sizes clz((x-1) | 3) Row Column
233 * in pages stride size
234 * 0: 1 2 3 4 -> 30 30 30 30 4 1
235 * 1: 5 6 7 8 -> 29 29 29 29 4 1
236 * 2: 10 12 14 16 -> 28 28 28 28 8 2
237 * 3: 20 24 28 32 -> 27 27 27 27 16 4
238 */
239 const unsigned row = 30 - __builtin_clz((pages - 1) | 3);
240 const unsigned row_max_pages = 4 << row;
241
242 /* The '& ~2' is the special case for row 1. In row 1, max pages /
243 * 2 is 2, but the previous row maximum is zero (because there is
244 * no previous row). All row maximum sizes are power of 2, so that
245 * is the only case where that bit will be set.
246 */
247 const unsigned prev_row_max_pages = (row_max_pages / 2) & ~2;
248 int col_size_log2 = row - 1;
249 col_size_log2 += (col_size_log2 < 0);
250
251 const unsigned col = (pages - prev_row_max_pages +
252 ((1 << col_size_log2) - 1)) >> col_size_log2;
253
254 /* Calculating the index based on the row and column. */
255 const unsigned index = (row * 4) + (col - 1);
256
257 return (index < bufmgr->num_buckets) ?
258 &bufmgr->cache_bucket[index] : NULL;
259 }
260
261 enum iris_memory_zone
262 iris_memzone_for_address(uint64_t address)
263 {
264 STATIC_ASSERT(IRIS_MEMZONE_OTHER_START > IRIS_MEMZONE_DYNAMIC_START);
265 STATIC_ASSERT(IRIS_MEMZONE_DYNAMIC_START > IRIS_MEMZONE_SURFACE_START);
266 STATIC_ASSERT(IRIS_MEMZONE_SURFACE_START > IRIS_MEMZONE_BINDER_START);
267 STATIC_ASSERT(IRIS_MEMZONE_BINDER_START > IRIS_MEMZONE_SHADER_START);
268 STATIC_ASSERT(IRIS_BORDER_COLOR_POOL_ADDRESS == IRIS_MEMZONE_DYNAMIC_START);
269
270 if (address >= IRIS_MEMZONE_OTHER_START)
271 return IRIS_MEMZONE_OTHER;
272
273 if (address == IRIS_BORDER_COLOR_POOL_ADDRESS)
274 return IRIS_MEMZONE_BORDER_COLOR_POOL;
275
276 if (address > IRIS_MEMZONE_DYNAMIC_START)
277 return IRIS_MEMZONE_DYNAMIC;
278
279 if (address >= IRIS_MEMZONE_SURFACE_START)
280 return IRIS_MEMZONE_SURFACE;
281
282 if (address >= IRIS_MEMZONE_BINDER_START)
283 return IRIS_MEMZONE_BINDER;
284
285 return IRIS_MEMZONE_SHADER;
286 }
287
288 /**
289 * Allocate a section of virtual memory for a buffer, assigning an address.
290 *
291 * This uses either the bucket allocator for the given size, or the large
292 * object allocator (util_vma).
293 */
294 static uint64_t
295 vma_alloc(struct iris_bufmgr *bufmgr,
296 enum iris_memory_zone memzone,
297 uint64_t size,
298 uint64_t alignment)
299 {
300 /* Force alignment to be some number of pages */
301 alignment = ALIGN(alignment, PAGE_SIZE);
302
303 if (memzone == IRIS_MEMZONE_BORDER_COLOR_POOL)
304 return IRIS_BORDER_COLOR_POOL_ADDRESS;
305
306 /* The binder handles its own allocations. Return non-zero here. */
307 if (memzone == IRIS_MEMZONE_BINDER)
308 return IRIS_MEMZONE_BINDER_START;
309
310 uint64_t addr =
311 util_vma_heap_alloc(&bufmgr->vma_allocator[memzone], size, alignment);
312
313 assert((addr >> 48ull) == 0);
314 assert((addr % alignment) == 0);
315
316 return gen_canonical_address(addr);
317 }
318
319 static void
320 vma_free(struct iris_bufmgr *bufmgr,
321 uint64_t address,
322 uint64_t size)
323 {
324 if (address == IRIS_BORDER_COLOR_POOL_ADDRESS)
325 return;
326
327 /* Un-canonicalize the address. */
328 address = gen_48b_address(address);
329
330 if (address == 0ull)
331 return;
332
333 enum iris_memory_zone memzone = iris_memzone_for_address(address);
334
335 /* The binder handles its own allocations. */
336 if (memzone == IRIS_MEMZONE_BINDER)
337 return;
338
339 util_vma_heap_free(&bufmgr->vma_allocator[memzone], address, size);
340 }
341
342 int
343 iris_bo_busy(struct iris_bo *bo)
344 {
345 struct iris_bufmgr *bufmgr = bo->bufmgr;
346 struct drm_i915_gem_busy busy = { .handle = bo->gem_handle };
347
348 int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
349 if (ret == 0) {
350 bo->idle = !busy.busy;
351 return busy.busy;
352 }
353 return false;
354 }
355
356 int
357 iris_bo_madvise(struct iris_bo *bo, int state)
358 {
359 struct drm_i915_gem_madvise madv = {
360 .handle = bo->gem_handle,
361 .madv = state,
362 .retained = 1,
363 };
364
365 gen_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
366
367 return madv.retained;
368 }
369
370 static struct iris_bo *
371 bo_calloc(void)
372 {
373 struct iris_bo *bo = calloc(1, sizeof(*bo));
374 if (!bo)
375 return NULL;
376
377 list_inithead(&bo->exports);
378
379 bo->hash = _mesa_hash_pointer(bo);
380
381 return bo;
382 }
383
384 static struct iris_bo *
385 alloc_bo_from_cache(struct iris_bufmgr *bufmgr,
386 struct bo_cache_bucket *bucket,
387 uint32_t alignment,
388 enum iris_memory_zone memzone,
389 unsigned flags,
390 bool match_zone)
391 {
392 if (!bucket)
393 return NULL;
394
395 struct iris_bo *bo = NULL;
396
397 list_for_each_entry_safe(struct iris_bo, cur, &bucket->head, head) {
398 /* Try a little harder to find one that's already in the right memzone */
399 if (match_zone && memzone != iris_memzone_for_address(cur->gtt_offset))
400 continue;
401
402 /* If the last BO in the cache is busy, there are no idle BOs. Bail,
403 * either falling back to a non-matching memzone, or if that fails,
404 * allocating a fresh buffer.
405 */
406 if (iris_bo_busy(cur))
407 return NULL;
408
409 list_del(&cur->head);
410
411 /* Tell the kernel we need this BO. If it still exists, we're done! */
412 if (iris_bo_madvise(cur, I915_MADV_WILLNEED)) {
413 bo = cur;
414 break;
415 }
416
417 /* This BO was purged, throw it out and keep looking. */
418 bo_free(cur);
419 }
420
421 if (!bo)
422 return NULL;
423
424 if (bo->aux_map_address) {
425 /* This buffer was associated with an aux-buffer range. We make sure
426 * that buffers are not reused from the cache while the buffer is (busy)
427 * being used by an executing batch. Since we are here, the buffer is no
428 * longer being used by a batch and the buffer was deleted (in order to
429 * end up in the cache). Therefore its old aux-buffer range can be
430 * removed from the aux-map.
431 */
432 if (bo->bufmgr->aux_map_ctx)
433 gen_aux_map_unmap_range(bo->bufmgr->aux_map_ctx, bo->gtt_offset,
434 bo->size);
435 bo->aux_map_address = 0;
436 }
437
438 /* If the cached BO isn't in the right memory zone, or the alignment
439 * isn't sufficient, free the old memory and assign it a new address.
440 */
441 if (memzone != iris_memzone_for_address(bo->gtt_offset) ||
442 bo->gtt_offset % alignment != 0) {
443 vma_free(bufmgr, bo->gtt_offset, bo->size);
444 bo->gtt_offset = 0ull;
445 }
446
447 /* Zero the contents if necessary. If this fails, fall back to
448 * allocating a fresh BO, which will always be zeroed by the kernel.
449 */
450 if (flags & BO_ALLOC_ZEROED) {
451 void *map = iris_bo_map(NULL, bo, MAP_WRITE | MAP_RAW);
452 if (map) {
453 memset(map, 0, bo->size);
454 } else {
455 bo_free(bo);
456 return NULL;
457 }
458 }
459
460 return bo;
461 }
462
463 static struct iris_bo *
464 alloc_fresh_bo(struct iris_bufmgr *bufmgr, uint64_t bo_size)
465 {
466 struct iris_bo *bo = bo_calloc();
467 if (!bo)
468 return NULL;
469
470 struct drm_i915_gem_create create = { .size = bo_size };
471
472 /* All new BOs we get from the kernel are zeroed, so we don't need to
473 * worry about that here.
474 */
475 if (gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CREATE, &create) != 0) {
476 free(bo);
477 return NULL;
478 }
479
480 bo->gem_handle = create.handle;
481 bo->bufmgr = bufmgr;
482 bo->size = bo_size;
483 bo->idle = true;
484 bo->tiling_mode = I915_TILING_NONE;
485 bo->stride = 0;
486
487 /* Calling set_domain() will allocate pages for the BO outside of the
488 * struct mutex lock in the kernel, which is more efficient than waiting
489 * to create them during the first execbuf that uses the BO.
490 */
491 struct drm_i915_gem_set_domain sd = {
492 .handle = bo->gem_handle,
493 .read_domains = I915_GEM_DOMAIN_CPU,
494 .write_domain = 0,
495 };
496
497 if (gen_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd) != 0) {
498 bo_free(bo);
499 return NULL;
500 }
501
502 return bo;
503 }
504
505 static struct iris_bo *
506 bo_alloc_internal(struct iris_bufmgr *bufmgr,
507 const char *name,
508 uint64_t size,
509 uint32_t alignment,
510 enum iris_memory_zone memzone,
511 unsigned flags,
512 uint32_t tiling_mode,
513 uint32_t stride)
514 {
515 struct iris_bo *bo;
516 unsigned int page_size = getpagesize();
517 struct bo_cache_bucket *bucket = bucket_for_size(bufmgr, size);
518
519 /* Round the size up to the bucket size, or if we don't have caching
520 * at this size, a multiple of the page size.
521 */
522 uint64_t bo_size =
523 bucket ? bucket->size : MAX2(ALIGN(size, page_size), page_size);
524
525 mtx_lock(&bufmgr->lock);
526
527 /* Get a buffer out of the cache if available. First, we try to find
528 * one with a matching memory zone so we can avoid reallocating VMA.
529 */
530 bo = alloc_bo_from_cache(bufmgr, bucket, alignment, memzone, flags, true);
531
532 /* If that fails, we try for any cached BO, without matching memzone. */
533 if (!bo) {
534 bo = alloc_bo_from_cache(bufmgr, bucket, alignment, memzone, flags,
535 false);
536 }
537
538 mtx_unlock(&bufmgr->lock);
539
540 if (!bo) {
541 bo = alloc_fresh_bo(bufmgr, bo_size);
542 if (!bo)
543 return NULL;
544 }
545
546 if (bo->gtt_offset == 0ull) {
547 mtx_lock(&bufmgr->lock);
548 bo->gtt_offset = vma_alloc(bufmgr, memzone, bo->size, alignment);
549 mtx_unlock(&bufmgr->lock);
550
551 if (bo->gtt_offset == 0ull)
552 goto err_free;
553 }
554
555 if (bo_set_tiling_internal(bo, tiling_mode, stride))
556 goto err_free;
557
558 bo->name = name;
559 p_atomic_set(&bo->refcount, 1);
560 bo->reusable = bucket && bufmgr->bo_reuse;
561 bo->cache_coherent = bufmgr->has_llc;
562 bo->index = -1;
563 bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
564
565 /* By default, capture all driver-internal buffers like shader kernels,
566 * surface states, dynamic states, border colors, and so on.
567 */
568 if (memzone < IRIS_MEMZONE_OTHER)
569 bo->kflags |= EXEC_OBJECT_CAPTURE;
570
571 if ((flags & BO_ALLOC_COHERENT) && !bo->cache_coherent) {
572 struct drm_i915_gem_caching arg = {
573 .handle = bo->gem_handle,
574 .caching = 1,
575 };
576 if (gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &arg) == 0) {
577 bo->cache_coherent = true;
578 bo->reusable = false;
579 }
580 }
581
582 DBG("bo_create: buf %d (%s) (%s memzone) %llub\n", bo->gem_handle,
583 bo->name, memzone_name(memzone), (unsigned long long) size);
584
585 return bo;
586
587 err_free:
588 bo_free(bo);
589 return NULL;
590 }
591
592 struct iris_bo *
593 iris_bo_alloc(struct iris_bufmgr *bufmgr,
594 const char *name,
595 uint64_t size,
596 enum iris_memory_zone memzone)
597 {
598 return bo_alloc_internal(bufmgr, name, size, 1, memzone,
599 0, I915_TILING_NONE, 0);
600 }
601
602 struct iris_bo *
603 iris_bo_alloc_tiled(struct iris_bufmgr *bufmgr, const char *name,
604 uint64_t size, uint32_t alignment,
605 enum iris_memory_zone memzone,
606 uint32_t tiling_mode, uint32_t pitch, unsigned flags)
607 {
608 return bo_alloc_internal(bufmgr, name, size, alignment, memzone,
609 flags, tiling_mode, pitch);
610 }
611
612 struct iris_bo *
613 iris_bo_create_userptr(struct iris_bufmgr *bufmgr, const char *name,
614 void *ptr, size_t size,
615 enum iris_memory_zone memzone)
616 {
617 struct drm_gem_close close = { 0, };
618 struct iris_bo *bo;
619
620 bo = bo_calloc();
621 if (!bo)
622 return NULL;
623
624 struct drm_i915_gem_userptr arg = {
625 .user_ptr = (uintptr_t)ptr,
626 .user_size = size,
627 };
628 if (gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_USERPTR, &arg))
629 goto err_free;
630 bo->gem_handle = arg.handle;
631
632 /* Check the buffer for validity before we try and use it in a batch */
633 struct drm_i915_gem_set_domain sd = {
634 .handle = bo->gem_handle,
635 .read_domains = I915_GEM_DOMAIN_CPU,
636 };
637 if (gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd))
638 goto err_close;
639
640 bo->name = name;
641 bo->size = size;
642 bo->map_cpu = ptr;
643
644 bo->bufmgr = bufmgr;
645 bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
646
647 mtx_lock(&bufmgr->lock);
648 bo->gtt_offset = vma_alloc(bufmgr, memzone, size, 1);
649 mtx_unlock(&bufmgr->lock);
650
651 if (bo->gtt_offset == 0ull)
652 goto err_close;
653
654 p_atomic_set(&bo->refcount, 1);
655 bo->userptr = true;
656 bo->cache_coherent = true;
657 bo->index = -1;
658 bo->idle = true;
659
660 return bo;
661
662 err_close:
663 close.handle = bo->gem_handle;
664 gen_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &close);
665 err_free:
666 free(bo);
667 return NULL;
668 }
669
670 /**
671 * Returns a iris_bo wrapping the given buffer object handle.
672 *
673 * This can be used when one application needs to pass a buffer object
674 * to another.
675 */
676 struct iris_bo *
677 iris_bo_gem_create_from_name(struct iris_bufmgr *bufmgr,
678 const char *name, unsigned int handle)
679 {
680 struct iris_bo *bo;
681
682 /* At the moment most applications only have a few named bo.
683 * For instance, in a DRI client only the render buffers passed
684 * between X and the client are named. And since X returns the
685 * alternating names for the front/back buffer a linear search
686 * provides a sufficiently fast match.
687 */
688 mtx_lock(&bufmgr->lock);
689 bo = find_and_ref_external_bo(bufmgr->name_table, handle);
690 if (bo)
691 goto out;
692
693 struct drm_gem_open open_arg = { .name = handle };
694 int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
695 if (ret != 0) {
696 DBG("Couldn't reference %s handle 0x%08x: %s\n",
697 name, handle, strerror(errno));
698 bo = NULL;
699 goto out;
700 }
701 /* Now see if someone has used a prime handle to get this
702 * object from the kernel before by looking through the list
703 * again for a matching gem_handle
704 */
705 bo = find_and_ref_external_bo(bufmgr->handle_table, open_arg.handle);
706 if (bo)
707 goto out;
708
709 bo = bo_calloc();
710 if (!bo)
711 goto out;
712
713 p_atomic_set(&bo->refcount, 1);
714
715 bo->size = open_arg.size;
716 bo->bufmgr = bufmgr;
717 bo->gem_handle = open_arg.handle;
718 bo->name = name;
719 bo->global_name = handle;
720 bo->reusable = false;
721 bo->external = true;
722 bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
723 bo->gtt_offset = vma_alloc(bufmgr, IRIS_MEMZONE_OTHER, bo->size, 1);
724
725 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
726 _mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
727
728 struct drm_i915_gem_get_tiling get_tiling = { .handle = bo->gem_handle };
729 ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
730 if (ret != 0)
731 goto err_unref;
732
733 bo->tiling_mode = get_tiling.tiling_mode;
734
735 /* XXX stride is unknown */
736 DBG("bo_create_from_handle: %d (%s)\n", handle, bo->name);
737
738 out:
739 mtx_unlock(&bufmgr->lock);
740 return bo;
741
742 err_unref:
743 bo_free(bo);
744 mtx_unlock(&bufmgr->lock);
745 return NULL;
746 }
747
748 static void
749 bo_close(struct iris_bo *bo)
750 {
751 struct iris_bufmgr *bufmgr = bo->bufmgr;
752
753 if (bo->external) {
754 struct hash_entry *entry;
755
756 if (bo->global_name) {
757 entry = _mesa_hash_table_search(bufmgr->name_table, &bo->global_name);
758 _mesa_hash_table_remove(bufmgr->name_table, entry);
759 }
760
761 entry = _mesa_hash_table_search(bufmgr->handle_table, &bo->gem_handle);
762 _mesa_hash_table_remove(bufmgr->handle_table, entry);
763
764 list_for_each_entry_safe(struct bo_export, export, &bo->exports, link) {
765 struct drm_gem_close close = { .handle = export->gem_handle };
766 gen_ioctl(export->drm_fd, DRM_IOCTL_GEM_CLOSE, &close);
767
768 list_del(&export->link);
769 free(export);
770 }
771 } else {
772 assert(list_is_empty(&bo->exports));
773 }
774
775 /* Close this object */
776 struct drm_gem_close close = { .handle = bo->gem_handle };
777 int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &close);
778 if (ret != 0) {
779 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
780 bo->gem_handle, bo->name, strerror(errno));
781 }
782
783 if (bo->aux_map_address && bo->bufmgr->aux_map_ctx) {
784 gen_aux_map_unmap_range(bo->bufmgr->aux_map_ctx, bo->gtt_offset,
785 bo->size);
786 }
787
788 /* Return the VMA for reuse */
789 vma_free(bo->bufmgr, bo->gtt_offset, bo->size);
790
791 free(bo);
792 }
793
794 static void
795 bo_free(struct iris_bo *bo)
796 {
797 struct iris_bufmgr *bufmgr = bo->bufmgr;
798
799 if (bo->map_cpu && !bo->userptr) {
800 VG_NOACCESS(bo->map_cpu, bo->size);
801 os_munmap(bo->map_cpu, bo->size);
802 }
803 if (bo->map_wc) {
804 VG_NOACCESS(bo->map_wc, bo->size);
805 os_munmap(bo->map_wc, bo->size);
806 }
807 if (bo->map_gtt) {
808 VG_NOACCESS(bo->map_gtt, bo->size);
809 os_munmap(bo->map_gtt, bo->size);
810 }
811
812 if (bo->idle) {
813 bo_close(bo);
814 } else {
815 /* Defer closing the GEM BO and returning the VMA for reuse until the
816 * BO is idle. Just move it to the dead list for now.
817 */
818 list_addtail(&bo->head, &bufmgr->zombie_list);
819 }
820 }
821
822 /** Frees all cached buffers significantly older than @time. */
823 static void
824 cleanup_bo_cache(struct iris_bufmgr *bufmgr, time_t time)
825 {
826 int i;
827
828 if (bufmgr->time == time)
829 return;
830
831 for (i = 0; i < bufmgr->num_buckets; i++) {
832 struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
833
834 list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
835 if (time - bo->free_time <= 1)
836 break;
837
838 list_del(&bo->head);
839
840 bo_free(bo);
841 }
842 }
843
844 list_for_each_entry_safe(struct iris_bo, bo, &bufmgr->zombie_list, head) {
845 /* Stop once we reach a busy BO - all others past this point were
846 * freed more recently so are likely also busy.
847 */
848 if (!bo->idle && iris_bo_busy(bo))
849 break;
850
851 list_del(&bo->head);
852 bo_close(bo);
853 }
854
855 bufmgr->time = time;
856 }
857
858 static void
859 bo_unreference_final(struct iris_bo *bo, time_t time)
860 {
861 struct iris_bufmgr *bufmgr = bo->bufmgr;
862 struct bo_cache_bucket *bucket;
863
864 DBG("bo_unreference final: %d (%s)\n", bo->gem_handle, bo->name);
865
866 bucket = NULL;
867 if (bo->reusable)
868 bucket = bucket_for_size(bufmgr, bo->size);
869 /* Put the buffer into our internal cache for reuse if we can. */
870 if (bucket && iris_bo_madvise(bo, I915_MADV_DONTNEED)) {
871 bo->free_time = time;
872 bo->name = NULL;
873
874 list_addtail(&bo->head, &bucket->head);
875 } else {
876 bo_free(bo);
877 }
878 }
879
880 void
881 iris_bo_unreference(struct iris_bo *bo)
882 {
883 if (bo == NULL)
884 return;
885
886 assert(p_atomic_read(&bo->refcount) > 0);
887
888 if (atomic_add_unless(&bo->refcount, -1, 1)) {
889 struct iris_bufmgr *bufmgr = bo->bufmgr;
890 struct timespec time;
891
892 clock_gettime(CLOCK_MONOTONIC, &time);
893
894 mtx_lock(&bufmgr->lock);
895
896 if (p_atomic_dec_zero(&bo->refcount)) {
897 bo_unreference_final(bo, time.tv_sec);
898 cleanup_bo_cache(bufmgr, time.tv_sec);
899 }
900
901 mtx_unlock(&bufmgr->lock);
902 }
903 }
904
905 static void
906 bo_wait_with_stall_warning(struct pipe_debug_callback *dbg,
907 struct iris_bo *bo,
908 const char *action)
909 {
910 bool busy = dbg && !bo->idle;
911 double elapsed = unlikely(busy) ? -get_time() : 0.0;
912
913 iris_bo_wait_rendering(bo);
914
915 if (unlikely(busy)) {
916 elapsed += get_time();
917 if (elapsed > 1e-5) /* 0.01ms */ {
918 perf_debug(dbg, "%s a busy \"%s\" BO stalled and took %.03f ms.\n",
919 action, bo->name, elapsed * 1000);
920 }
921 }
922 }
923
924 static void
925 print_flags(unsigned flags)
926 {
927 if (flags & MAP_READ)
928 DBG("READ ");
929 if (flags & MAP_WRITE)
930 DBG("WRITE ");
931 if (flags & MAP_ASYNC)
932 DBG("ASYNC ");
933 if (flags & MAP_PERSISTENT)
934 DBG("PERSISTENT ");
935 if (flags & MAP_COHERENT)
936 DBG("COHERENT ");
937 if (flags & MAP_RAW)
938 DBG("RAW ");
939 DBG("\n");
940 }
941
942 static void *
943 iris_bo_gem_mmap_legacy(struct pipe_debug_callback *dbg,
944 struct iris_bo *bo, bool wc)
945 {
946 struct iris_bufmgr *bufmgr = bo->bufmgr;
947
948 struct drm_i915_gem_mmap mmap_arg = {
949 .handle = bo->gem_handle,
950 .size = bo->size,
951 .flags = wc ? I915_MMAP_WC : 0,
952 };
953
954 int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
955 if (ret != 0) {
956 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
957 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
958 return NULL;
959 }
960 void *map = (void *) (uintptr_t) mmap_arg.addr_ptr;
961
962 return map;
963 }
964
965 static void *
966 iris_bo_gem_mmap_offset(struct pipe_debug_callback *dbg, struct iris_bo *bo,
967 bool wc)
968 {
969 struct iris_bufmgr *bufmgr = bo->bufmgr;
970
971 struct drm_i915_gem_mmap_offset mmap_arg = {
972 .handle = bo->gem_handle,
973 .flags = wc ? I915_MMAP_OFFSET_WC : I915_MMAP_OFFSET_WB,
974 };
975
976 /* Get the fake offset back */
977 int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_OFFSET, &mmap_arg);
978 if (ret != 0) {
979 DBG("%s:%d: Error preparing buffer %d (%s): %s .\n",
980 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
981 return NULL;
982 }
983
984 /* And map it */
985 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
986 bufmgr->fd, mmap_arg.offset);
987 if (map == MAP_FAILED) {
988 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
989 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
990 return NULL;
991 }
992
993 return map;
994 }
995
996 static void *
997 iris_bo_gem_mmap(struct pipe_debug_callback *dbg, struct iris_bo *bo, bool wc)
998 {
999 struct iris_bufmgr *bufmgr = bo->bufmgr;
1000
1001 if (bufmgr->has_mmap_offset)
1002 return iris_bo_gem_mmap_offset(dbg, bo, wc);
1003 else
1004 return iris_bo_gem_mmap_legacy(dbg, bo, wc);
1005 }
1006
1007 static void *
1008 iris_bo_map_cpu(struct pipe_debug_callback *dbg,
1009 struct iris_bo *bo, unsigned flags)
1010 {
1011 /* We disallow CPU maps for writing to non-coherent buffers, as the
1012 * CPU map can become invalidated when a batch is flushed out, which
1013 * can happen at unpredictable times. You should use WC maps instead.
1014 */
1015 assert(bo->cache_coherent || !(flags & MAP_WRITE));
1016
1017 if (!bo->map_cpu) {
1018 DBG("iris_bo_map_cpu: %d (%s)\n", bo->gem_handle, bo->name);
1019 void *map = iris_bo_gem_mmap(dbg, bo, false);
1020 if (!map) {
1021 return NULL;
1022 }
1023
1024 VG_DEFINED(map, bo->size);
1025
1026 if (p_atomic_cmpxchg(&bo->map_cpu, NULL, map)) {
1027 VG_NOACCESS(map, bo->size);
1028 os_munmap(map, bo->size);
1029 }
1030 }
1031 assert(bo->map_cpu);
1032
1033 DBG("iris_bo_map_cpu: %d (%s) -> %p, ", bo->gem_handle, bo->name,
1034 bo->map_cpu);
1035 print_flags(flags);
1036
1037 if (!(flags & MAP_ASYNC)) {
1038 bo_wait_with_stall_warning(dbg, bo, "CPU mapping");
1039 }
1040
1041 if (!bo->cache_coherent && !bo->bufmgr->has_llc) {
1042 /* If we're reusing an existing CPU mapping, the CPU caches may
1043 * contain stale data from the last time we read from that mapping.
1044 * (With the BO cache, it might even be data from a previous buffer!)
1045 * Even if it's a brand new mapping, the kernel may have zeroed the
1046 * buffer via CPU writes.
1047 *
1048 * We need to invalidate those cachelines so that we see the latest
1049 * contents, and so long as we only read from the CPU mmap we do not
1050 * need to write those cachelines back afterwards.
1051 *
1052 * On LLC, the emprical evidence suggests that writes from the GPU
1053 * that bypass the LLC (i.e. for scanout) do *invalidate* the CPU
1054 * cachelines. (Other reads, such as the display engine, bypass the
1055 * LLC entirely requiring us to keep dirty pixels for the scanout
1056 * out of any cache.)
1057 */
1058 gen_invalidate_range(bo->map_cpu, bo->size);
1059 }
1060
1061 return bo->map_cpu;
1062 }
1063
1064 static void *
1065 iris_bo_map_wc(struct pipe_debug_callback *dbg,
1066 struct iris_bo *bo, unsigned flags)
1067 {
1068 if (!bo->map_wc) {
1069 DBG("iris_bo_map_wc: %d (%s)\n", bo->gem_handle, bo->name);
1070 void *map = iris_bo_gem_mmap(dbg, bo, true);
1071 if (!map) {
1072 return NULL;
1073 }
1074
1075 VG_DEFINED(map, bo->size);
1076
1077 if (p_atomic_cmpxchg(&bo->map_wc, NULL, map)) {
1078 VG_NOACCESS(map, bo->size);
1079 os_munmap(map, bo->size);
1080 }
1081 }
1082 assert(bo->map_wc);
1083
1084 DBG("iris_bo_map_wc: %d (%s) -> %p\n", bo->gem_handle, bo->name, bo->map_wc);
1085 print_flags(flags);
1086
1087 if (!(flags & MAP_ASYNC)) {
1088 bo_wait_with_stall_warning(dbg, bo, "WC mapping");
1089 }
1090
1091 return bo->map_wc;
1092 }
1093
1094 /**
1095 * Perform an uncached mapping via the GTT.
1096 *
1097 * Write access through the GTT is not quite fully coherent. On low power
1098 * systems especially, like modern Atoms, we can observe reads from RAM before
1099 * the write via GTT has landed. A write memory barrier that flushes the Write
1100 * Combining Buffer (i.e. sfence/mfence) is not sufficient to order the later
1101 * read after the write as the GTT write suffers a small delay through the GTT
1102 * indirection. The kernel uses an uncached mmio read to ensure the GTT write
1103 * is ordered with reads (either by the GPU, WB or WC) and unconditionally
1104 * flushes prior to execbuf submission. However, if we are not informing the
1105 * kernel about our GTT writes, it will not flush before earlier access, such
1106 * as when using the cmdparser. Similarly, we need to be careful if we should
1107 * ever issue a CPU read immediately following a GTT write.
1108 *
1109 * Telling the kernel about write access also has one more important
1110 * side-effect. Upon receiving notification about the write, it cancels any
1111 * scanout buffering for FBC/PSR and friends. Later FBC/PSR is then flushed by
1112 * either SW_FINISH or DIRTYFB. The presumption is that we never write to the
1113 * actual scanout via a mmaping, only to a backbuffer and so all the FBC/PSR
1114 * tracking is handled on the buffer exchange instead.
1115 */
1116 static void *
1117 iris_bo_map_gtt(struct pipe_debug_callback *dbg,
1118 struct iris_bo *bo, unsigned flags)
1119 {
1120 struct iris_bufmgr *bufmgr = bo->bufmgr;
1121
1122 /* Get a mapping of the buffer if we haven't before. */
1123 if (bo->map_gtt == NULL) {
1124 DBG("bo_map_gtt: mmap %d (%s)\n", bo->gem_handle, bo->name);
1125
1126 struct drm_i915_gem_mmap_gtt mmap_arg = { .handle = bo->gem_handle };
1127
1128 /* Get the fake offset back... */
1129 int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);
1130 if (ret != 0) {
1131 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1132 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
1133 return NULL;
1134 }
1135
1136 /* and mmap it. */
1137 void *map = os_mmap(0, bo->size, PROT_READ | PROT_WRITE,
1138 MAP_SHARED, bufmgr->fd, mmap_arg.offset);
1139 if (map == MAP_FAILED) {
1140 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1141 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
1142 return NULL;
1143 }
1144
1145 /* We don't need to use VALGRIND_MALLOCLIKE_BLOCK because Valgrind will
1146 * already intercept this mmap call. However, for consistency between
1147 * all the mmap paths, we mark the pointer as defined now and mark it
1148 * as inaccessible afterwards.
1149 */
1150 VG_DEFINED(map, bo->size);
1151
1152 if (p_atomic_cmpxchg(&bo->map_gtt, NULL, map)) {
1153 VG_NOACCESS(map, bo->size);
1154 os_munmap(map, bo->size);
1155 }
1156 }
1157 assert(bo->map_gtt);
1158
1159 DBG("bo_map_gtt: %d (%s) -> %p, ", bo->gem_handle, bo->name, bo->map_gtt);
1160 print_flags(flags);
1161
1162 if (!(flags & MAP_ASYNC)) {
1163 bo_wait_with_stall_warning(dbg, bo, "GTT mapping");
1164 }
1165
1166 return bo->map_gtt;
1167 }
1168
1169 static bool
1170 can_map_cpu(struct iris_bo *bo, unsigned flags)
1171 {
1172 if (bo->cache_coherent)
1173 return true;
1174
1175 /* Even if the buffer itself is not cache-coherent (such as a scanout), on
1176 * an LLC platform reads always are coherent (as they are performed via the
1177 * central system agent). It is just the writes that we need to take special
1178 * care to ensure that land in main memory and not stick in the CPU cache.
1179 */
1180 if (!(flags & MAP_WRITE) && bo->bufmgr->has_llc)
1181 return true;
1182
1183 /* If PERSISTENT or COHERENT are set, the mmapping needs to remain valid
1184 * across batch flushes where the kernel will change cache domains of the
1185 * bo, invalidating continued access to the CPU mmap on non-LLC device.
1186 *
1187 * Similarly, ASYNC typically means that the buffer will be accessed via
1188 * both the CPU and the GPU simultaneously. Batches may be executed that
1189 * use the BO even while it is mapped. While OpenGL technically disallows
1190 * most drawing while non-persistent mappings are active, we may still use
1191 * the GPU for blits or other operations, causing batches to happen at
1192 * inconvenient times.
1193 *
1194 * If RAW is set, we expect the caller to be able to handle a WC buffer
1195 * more efficiently than the involuntary clflushes.
1196 */
1197 if (flags & (MAP_PERSISTENT | MAP_COHERENT | MAP_ASYNC | MAP_RAW))
1198 return false;
1199
1200 return !(flags & MAP_WRITE);
1201 }
1202
1203 void *
1204 iris_bo_map(struct pipe_debug_callback *dbg,
1205 struct iris_bo *bo, unsigned flags)
1206 {
1207 if (bo->tiling_mode != I915_TILING_NONE && !(flags & MAP_RAW))
1208 return iris_bo_map_gtt(dbg, bo, flags);
1209
1210 void *map;
1211
1212 if (can_map_cpu(bo, flags))
1213 map = iris_bo_map_cpu(dbg, bo, flags);
1214 else
1215 map = iris_bo_map_wc(dbg, bo, flags);
1216
1217 /* Allow the attempt to fail by falling back to the GTT where necessary.
1218 *
1219 * Not every buffer can be mmaped directly using the CPU (or WC), for
1220 * example buffers that wrap stolen memory or are imported from other
1221 * devices. For those, we have little choice but to use a GTT mmapping.
1222 * However, if we use a slow GTT mmapping for reads where we expected fast
1223 * access, that order of magnitude difference in throughput will be clearly
1224 * expressed by angry users.
1225 *
1226 * We skip MAP_RAW because we want to avoid map_gtt's fence detiling.
1227 */
1228 if (!map && !(flags & MAP_RAW)) {
1229 perf_debug(dbg, "Fallback GTT mapping for %s with access flags %x\n",
1230 bo->name, flags);
1231 map = iris_bo_map_gtt(dbg, bo, flags);
1232 }
1233
1234 return map;
1235 }
1236
1237 /** Waits for all GPU rendering with the object to have completed. */
1238 void
1239 iris_bo_wait_rendering(struct iris_bo *bo)
1240 {
1241 /* We require a kernel recent enough for WAIT_IOCTL support.
1242 * See intel_init_bufmgr()
1243 */
1244 iris_bo_wait(bo, -1);
1245 }
1246
1247 /**
1248 * Waits on a BO for the given amount of time.
1249 *
1250 * @bo: buffer object to wait for
1251 * @timeout_ns: amount of time to wait in nanoseconds.
1252 * If value is less than 0, an infinite wait will occur.
1253 *
1254 * Returns 0 if the wait was successful ie. the last batch referencing the
1255 * object has completed within the allotted time. Otherwise some negative return
1256 * value describes the error. Of particular interest is -ETIME when the wait has
1257 * failed to yield the desired result.
1258 *
1259 * Similar to iris_bo_wait_rendering except a timeout parameter allows
1260 * the operation to give up after a certain amount of time. Another subtle
1261 * difference is the internal locking semantics are different (this variant does
1262 * not hold the lock for the duration of the wait). This makes the wait subject
1263 * to a larger userspace race window.
1264 *
1265 * The implementation shall wait until the object is no longer actively
1266 * referenced within a batch buffer at the time of the call. The wait will
1267 * not guarantee that the buffer is re-issued via another thread, or an flinked
1268 * handle. Userspace must make sure this race does not occur if such precision
1269 * is important.
1270 *
1271 * Note that some kernels have broken the inifite wait for negative values
1272 * promise, upgrade to latest stable kernels if this is the case.
1273 */
1274 int
1275 iris_bo_wait(struct iris_bo *bo, int64_t timeout_ns)
1276 {
1277 struct iris_bufmgr *bufmgr = bo->bufmgr;
1278
1279 /* If we know it's idle, don't bother with the kernel round trip */
1280 if (bo->idle && !bo->external)
1281 return 0;
1282
1283 struct drm_i915_gem_wait wait = {
1284 .bo_handle = bo->gem_handle,
1285 .timeout_ns = timeout_ns,
1286 };
1287 int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
1288 if (ret != 0)
1289 return -errno;
1290
1291 bo->idle = true;
1292
1293 return ret;
1294 }
1295
1296 static void
1297 iris_bufmgr_destroy(struct iris_bufmgr *bufmgr)
1298 {
1299 /* Free aux-map buffers */
1300 gen_aux_map_finish(bufmgr->aux_map_ctx);
1301
1302 /* bufmgr will no longer try to free VMA entries in the aux-map */
1303 bufmgr->aux_map_ctx = NULL;
1304
1305 mtx_destroy(&bufmgr->lock);
1306
1307 /* Free any cached buffer objects we were going to reuse */
1308 for (int i = 0; i < bufmgr->num_buckets; i++) {
1309 struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
1310
1311 list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
1312 list_del(&bo->head);
1313
1314 bo_free(bo);
1315 }
1316 }
1317
1318 /* Close any buffer objects on the dead list. */
1319 list_for_each_entry_safe(struct iris_bo, bo, &bufmgr->zombie_list, head) {
1320 list_del(&bo->head);
1321 bo_close(bo);
1322 }
1323
1324 _mesa_hash_table_destroy(bufmgr->name_table, NULL);
1325 _mesa_hash_table_destroy(bufmgr->handle_table, NULL);
1326
1327 for (int z = 0; z < IRIS_MEMZONE_COUNT; z++) {
1328 if (z != IRIS_MEMZONE_BINDER)
1329 util_vma_heap_finish(&bufmgr->vma_allocator[z]);
1330 }
1331
1332 close(bufmgr->fd);
1333
1334 free(bufmgr);
1335 }
1336
1337 static int
1338 bo_set_tiling_internal(struct iris_bo *bo, uint32_t tiling_mode,
1339 uint32_t stride)
1340 {
1341 struct iris_bufmgr *bufmgr = bo->bufmgr;
1342 struct drm_i915_gem_set_tiling set_tiling;
1343 int ret;
1344
1345 if (bo->global_name == 0 &&
1346 tiling_mode == bo->tiling_mode && stride == bo->stride)
1347 return 0;
1348
1349 memset(&set_tiling, 0, sizeof(set_tiling));
1350 do {
1351 /* set_tiling is slightly broken and overwrites the
1352 * input on the error path, so we have to open code
1353 * drm_ioctl.
1354 */
1355 set_tiling.handle = bo->gem_handle;
1356 set_tiling.tiling_mode = tiling_mode;
1357 set_tiling.stride = stride;
1358
1359 ret = ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
1360 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
1361 if (ret == -1)
1362 return -errno;
1363
1364 bo->tiling_mode = set_tiling.tiling_mode;
1365 bo->stride = set_tiling.stride;
1366 return 0;
1367 }
1368
1369 struct iris_bo *
1370 iris_bo_import_dmabuf(struct iris_bufmgr *bufmgr, int prime_fd,
1371 uint32_t tiling, uint32_t stride)
1372 {
1373 uint32_t handle;
1374 struct iris_bo *bo;
1375
1376 mtx_lock(&bufmgr->lock);
1377 int ret = drmPrimeFDToHandle(bufmgr->fd, prime_fd, &handle);
1378 if (ret) {
1379 DBG("import_dmabuf: failed to obtain handle from fd: %s\n",
1380 strerror(errno));
1381 mtx_unlock(&bufmgr->lock);
1382 return NULL;
1383 }
1384
1385 /*
1386 * See if the kernel has already returned this buffer to us. Just as
1387 * for named buffers, we must not create two bo's pointing at the same
1388 * kernel object
1389 */
1390 bo = find_and_ref_external_bo(bufmgr->handle_table, handle);
1391 if (bo)
1392 goto out;
1393
1394 bo = bo_calloc();
1395 if (!bo)
1396 goto out;
1397
1398 p_atomic_set(&bo->refcount, 1);
1399
1400 /* Determine size of bo. The fd-to-handle ioctl really should
1401 * return the size, but it doesn't. If we have kernel 3.12 or
1402 * later, we can lseek on the prime fd to get the size. Older
1403 * kernels will just fail, in which case we fall back to the
1404 * provided (estimated or guess size). */
1405 ret = lseek(prime_fd, 0, SEEK_END);
1406 if (ret != -1)
1407 bo->size = ret;
1408
1409 bo->bufmgr = bufmgr;
1410 bo->name = "prime";
1411 bo->reusable = false;
1412 bo->external = true;
1413 bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
1414 bo->gtt_offset = vma_alloc(bufmgr, IRIS_MEMZONE_OTHER, bo->size, 1);
1415 bo->gem_handle = handle;
1416 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
1417
1418 struct drm_i915_gem_get_tiling get_tiling = { .handle = bo->gem_handle };
1419 if (gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling))
1420 goto err;
1421
1422 if (get_tiling.tiling_mode == tiling || tiling > I915_TILING_LAST) {
1423 bo->tiling_mode = get_tiling.tiling_mode;
1424 /* XXX stride is unknown */
1425 } else {
1426 if (bo_set_tiling_internal(bo, tiling, stride)) {
1427 goto err;
1428 }
1429 }
1430
1431 out:
1432 mtx_unlock(&bufmgr->lock);
1433 return bo;
1434
1435 err:
1436 bo_free(bo);
1437 mtx_unlock(&bufmgr->lock);
1438 return NULL;
1439 }
1440
1441 static void
1442 iris_bo_make_external_locked(struct iris_bo *bo)
1443 {
1444 if (!bo->external) {
1445 _mesa_hash_table_insert(bo->bufmgr->handle_table, &bo->gem_handle, bo);
1446 /* If a BO is going to be used externally, it could be sent to the
1447 * display HW. So make sure our CPU mappings don't assume cache
1448 * coherency since display is outside that cache.
1449 */
1450 bo->cache_coherent = false;
1451 bo->external = true;
1452 bo->reusable = false;
1453 }
1454 }
1455
1456 void
1457 iris_bo_make_external(struct iris_bo *bo)
1458 {
1459 struct iris_bufmgr *bufmgr = bo->bufmgr;
1460
1461 if (bo->external) {
1462 assert(!bo->reusable);
1463 return;
1464 }
1465
1466 mtx_lock(&bufmgr->lock);
1467 iris_bo_make_external_locked(bo);
1468 mtx_unlock(&bufmgr->lock);
1469 }
1470
1471 int
1472 iris_bo_export_dmabuf(struct iris_bo *bo, int *prime_fd)
1473 {
1474 struct iris_bufmgr *bufmgr = bo->bufmgr;
1475
1476 iris_bo_make_external(bo);
1477
1478 if (drmPrimeHandleToFD(bufmgr->fd, bo->gem_handle,
1479 DRM_CLOEXEC, prime_fd) != 0)
1480 return -errno;
1481
1482 return 0;
1483 }
1484
1485 uint32_t
1486 iris_bo_export_gem_handle(struct iris_bo *bo)
1487 {
1488 iris_bo_make_external(bo);
1489
1490 return bo->gem_handle;
1491 }
1492
1493 int
1494 iris_bo_flink(struct iris_bo *bo, uint32_t *name)
1495 {
1496 struct iris_bufmgr *bufmgr = bo->bufmgr;
1497
1498 if (!bo->global_name) {
1499 struct drm_gem_flink flink = { .handle = bo->gem_handle };
1500
1501 if (gen_ioctl(bufmgr->fd, DRM_IOCTL_GEM_FLINK, &flink))
1502 return -errno;
1503
1504 mtx_lock(&bufmgr->lock);
1505 if (!bo->global_name) {
1506 iris_bo_make_external_locked(bo);
1507 bo->global_name = flink.name;
1508 _mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
1509 }
1510 mtx_unlock(&bufmgr->lock);
1511 }
1512
1513 *name = bo->global_name;
1514 return 0;
1515 }
1516
1517 int
1518 iris_bo_export_gem_handle_for_device(struct iris_bo *bo, int drm_fd,
1519 uint32_t *out_handle)
1520 {
1521 /* Only add the new GEM handle to the list of export if it belongs to a
1522 * different GEM device. Otherwise we might close the same buffer multiple
1523 * times.
1524 */
1525 struct iris_bufmgr *bufmgr = bo->bufmgr;
1526 int ret = os_same_file_description(drm_fd, bufmgr->fd);
1527 WARN_ONCE(ret < 0,
1528 "Kernel has no file descriptor comparison support: %s\n",
1529 strerror(errno));
1530 if (ret == 0) {
1531 *out_handle = iris_bo_export_gem_handle(bo);
1532 return 0;
1533 }
1534
1535 struct bo_export *export = calloc(1, sizeof(*export));
1536 if (!export)
1537 return -ENOMEM;
1538
1539 export->drm_fd = drm_fd;
1540
1541 int dmabuf_fd = -1;
1542 int err = iris_bo_export_dmabuf(bo, &dmabuf_fd);
1543 if (err) {
1544 free(export);
1545 return err;
1546 }
1547
1548 mtx_lock(&bufmgr->lock);
1549 err = drmPrimeFDToHandle(drm_fd, dmabuf_fd, &export->gem_handle);
1550 close(dmabuf_fd);
1551 if (err) {
1552 mtx_unlock(&bufmgr->lock);
1553 free(export);
1554 return err;
1555 }
1556
1557 bool found = false;
1558 list_for_each_entry(struct bo_export, iter, &bo->exports, link) {
1559 if (iter->drm_fd != drm_fd)
1560 continue;
1561 /* Here we assume that for a given DRM fd, we'll always get back the
1562 * same GEM handle for a given buffer.
1563 */
1564 assert(iter->gem_handle == export->gem_handle);
1565 free(export);
1566 export = iter;
1567 found = true;
1568 break;
1569 }
1570 if (!found)
1571 list_addtail(&export->link, &bo->exports);
1572
1573 mtx_unlock(&bufmgr->lock);
1574
1575 *out_handle = export->gem_handle;
1576
1577 return 0;
1578 }
1579
1580 static void
1581 add_bucket(struct iris_bufmgr *bufmgr, int size)
1582 {
1583 unsigned int i = bufmgr->num_buckets;
1584
1585 assert(i < ARRAY_SIZE(bufmgr->cache_bucket));
1586
1587 list_inithead(&bufmgr->cache_bucket[i].head);
1588 bufmgr->cache_bucket[i].size = size;
1589 bufmgr->num_buckets++;
1590
1591 assert(bucket_for_size(bufmgr, size) == &bufmgr->cache_bucket[i]);
1592 assert(bucket_for_size(bufmgr, size - 2048) == &bufmgr->cache_bucket[i]);
1593 assert(bucket_for_size(bufmgr, size + 1) != &bufmgr->cache_bucket[i]);
1594 }
1595
1596 static void
1597 init_cache_buckets(struct iris_bufmgr *bufmgr)
1598 {
1599 uint64_t size, cache_max_size = 64 * 1024 * 1024;
1600
1601 /* OK, so power of two buckets was too wasteful of memory.
1602 * Give 3 other sizes between each power of two, to hopefully
1603 * cover things accurately enough. (The alternative is
1604 * probably to just go for exact matching of sizes, and assume
1605 * that for things like composited window resize the tiled
1606 * width/height alignment and rounding of sizes to pages will
1607 * get us useful cache hit rates anyway)
1608 */
1609 add_bucket(bufmgr, PAGE_SIZE);
1610 add_bucket(bufmgr, PAGE_SIZE * 2);
1611 add_bucket(bufmgr, PAGE_SIZE * 3);
1612
1613 /* Initialize the linked lists for BO reuse cache. */
1614 for (size = 4 * PAGE_SIZE; size <= cache_max_size; size *= 2) {
1615 add_bucket(bufmgr, size);
1616
1617 add_bucket(bufmgr, size + size * 1 / 4);
1618 add_bucket(bufmgr, size + size * 2 / 4);
1619 add_bucket(bufmgr, size + size * 3 / 4);
1620 }
1621 }
1622
1623 uint32_t
1624 iris_create_hw_context(struct iris_bufmgr *bufmgr)
1625 {
1626 struct drm_i915_gem_context_create create = { };
1627 int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
1628 if (ret != 0) {
1629 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n", strerror(errno));
1630 return 0;
1631 }
1632
1633 /* Upon declaring a GPU hang, the kernel will zap the guilty context
1634 * back to the default logical HW state and attempt to continue on to
1635 * our next submitted batchbuffer. However, our render batches assume
1636 * the previous GPU state is preserved, and only emit commands needed
1637 * to incrementally change that state. In particular, we inherit the
1638 * STATE_BASE_ADDRESS and PIPELINE_SELECT settings, which are critical.
1639 * With default base addresses, our next batches will almost certainly
1640 * cause more GPU hangs, leading to repeated hangs until we're banned
1641 * or the machine is dead.
1642 *
1643 * Here we tell the kernel not to attempt to recover our context but
1644 * immediately (on the next batchbuffer submission) report that the
1645 * context is lost, and we will do the recovery ourselves. Ideally,
1646 * we'll have two lost batches instead of a continual stream of hangs.
1647 */
1648 struct drm_i915_gem_context_param p = {
1649 .ctx_id = create.ctx_id,
1650 .param = I915_CONTEXT_PARAM_RECOVERABLE,
1651 .value = false,
1652 };
1653 drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p);
1654
1655 return create.ctx_id;
1656 }
1657
1658 static int
1659 iris_hw_context_get_priority(struct iris_bufmgr *bufmgr, uint32_t ctx_id)
1660 {
1661 struct drm_i915_gem_context_param p = {
1662 .ctx_id = ctx_id,
1663 .param = I915_CONTEXT_PARAM_PRIORITY,
1664 };
1665 drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p);
1666 return p.value; /* on error, return 0 i.e. default priority */
1667 }
1668
1669 int
1670 iris_hw_context_set_priority(struct iris_bufmgr *bufmgr,
1671 uint32_t ctx_id,
1672 int priority)
1673 {
1674 struct drm_i915_gem_context_param p = {
1675 .ctx_id = ctx_id,
1676 .param = I915_CONTEXT_PARAM_PRIORITY,
1677 .value = priority,
1678 };
1679 int err;
1680
1681 err = 0;
1682 if (gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p))
1683 err = -errno;
1684
1685 return err;
1686 }
1687
1688 uint32_t
1689 iris_clone_hw_context(struct iris_bufmgr *bufmgr, uint32_t ctx_id)
1690 {
1691 uint32_t new_ctx = iris_create_hw_context(bufmgr);
1692
1693 if (new_ctx) {
1694 int priority = iris_hw_context_get_priority(bufmgr, ctx_id);
1695 iris_hw_context_set_priority(bufmgr, new_ctx, priority);
1696 }
1697
1698 return new_ctx;
1699 }
1700
1701 void
1702 iris_destroy_hw_context(struct iris_bufmgr *bufmgr, uint32_t ctx_id)
1703 {
1704 struct drm_i915_gem_context_destroy d = { .ctx_id = ctx_id };
1705
1706 if (ctx_id != 0 &&
1707 gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &d) != 0) {
1708 fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
1709 strerror(errno));
1710 }
1711 }
1712
1713 int
1714 iris_reg_read(struct iris_bufmgr *bufmgr, uint32_t offset, uint64_t *result)
1715 {
1716 struct drm_i915_reg_read reg_read = { .offset = offset };
1717 int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_REG_READ, &reg_read);
1718
1719 *result = reg_read.val;
1720 return ret;
1721 }
1722
1723 static uint64_t
1724 iris_gtt_size(int fd)
1725 {
1726 /* We use the default (already allocated) context to determine
1727 * the default configuration of the virtual address space.
1728 */
1729 struct drm_i915_gem_context_param p = {
1730 .param = I915_CONTEXT_PARAM_GTT_SIZE,
1731 };
1732 if (!gen_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p))
1733 return p.value;
1734
1735 return 0;
1736 }
1737
1738 static struct gen_buffer *
1739 gen_aux_map_buffer_alloc(void *driver_ctx, uint32_t size)
1740 {
1741 struct gen_buffer *buf = malloc(sizeof(struct gen_buffer));
1742 if (!buf)
1743 return NULL;
1744
1745 struct iris_bufmgr *bufmgr = (struct iris_bufmgr *)driver_ctx;
1746
1747 struct iris_bo *bo =
1748 iris_bo_alloc_tiled(bufmgr, "aux-map", size, 64 * 1024,
1749 IRIS_MEMZONE_OTHER, I915_TILING_NONE, 0, 0);
1750
1751 buf->driver_bo = bo;
1752 buf->gpu = bo->gtt_offset;
1753 buf->gpu_end = buf->gpu + bo->size;
1754 buf->map = iris_bo_map(NULL, bo, MAP_WRITE | MAP_RAW);
1755 return buf;
1756 }
1757
1758 static void
1759 gen_aux_map_buffer_free(void *driver_ctx, struct gen_buffer *buffer)
1760 {
1761 iris_bo_unreference((struct iris_bo*)buffer->driver_bo);
1762 free(buffer);
1763 }
1764
1765 static struct gen_mapped_pinned_buffer_alloc aux_map_allocator = {
1766 .alloc = gen_aux_map_buffer_alloc,
1767 .free = gen_aux_map_buffer_free,
1768 };
1769
1770 static int
1771 gem_param(int fd, int name)
1772 {
1773 int v = -1; /* No param uses (yet) the sign bit, reserve it for errors */
1774
1775 struct drm_i915_getparam gp = { .param = name, .value = &v };
1776 if (gen_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
1777 return -1;
1778
1779 return v;
1780 }
1781
1782 /**
1783 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
1784 * and manage map buffer objections.
1785 *
1786 * \param fd File descriptor of the opened DRM device.
1787 */
1788 static struct iris_bufmgr *
1789 iris_bufmgr_create(struct gen_device_info *devinfo, int fd, bool bo_reuse)
1790 {
1791 uint64_t gtt_size = iris_gtt_size(fd);
1792 if (gtt_size <= IRIS_MEMZONE_OTHER_START)
1793 return NULL;
1794
1795 struct iris_bufmgr *bufmgr = calloc(1, sizeof(*bufmgr));
1796 if (bufmgr == NULL)
1797 return NULL;
1798
1799 /* Handles to buffer objects belong to the device fd and are not
1800 * reference counted by the kernel. If the same fd is used by
1801 * multiple parties (threads sharing the same screen bufmgr, or
1802 * even worse the same device fd passed to multiple libraries)
1803 * ownership of those handles is shared by those independent parties.
1804 *
1805 * Don't do this! Ensure that each library/bufmgr has its own device
1806 * fd so that its namespace does not clash with another.
1807 */
1808 bufmgr->fd = dup(fd);
1809
1810 p_atomic_set(&bufmgr->refcount, 1);
1811
1812 if (mtx_init(&bufmgr->lock, mtx_plain) != 0) {
1813 close(bufmgr->fd);
1814 free(bufmgr);
1815 return NULL;
1816 }
1817
1818 list_inithead(&bufmgr->zombie_list);
1819
1820 bufmgr->has_llc = devinfo->has_llc;
1821 bufmgr->bo_reuse = bo_reuse;
1822 bufmgr->has_mmap_offset = gem_param(fd, I915_PARAM_MMAP_GTT_VERSION) >= 4;
1823
1824 STATIC_ASSERT(IRIS_MEMZONE_SHADER_START == 0ull);
1825 const uint64_t _4GB = 1ull << 32;
1826 const uint64_t _2GB = 1ul << 31;
1827
1828 /* The STATE_BASE_ADDRESS size field can only hold 1 page shy of 4GB */
1829 const uint64_t _4GB_minus_1 = _4GB - PAGE_SIZE;
1830
1831 util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_SHADER],
1832 PAGE_SIZE, _4GB_minus_1 - PAGE_SIZE);
1833 util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_SURFACE],
1834 IRIS_MEMZONE_SURFACE_START,
1835 _4GB_minus_1 - IRIS_MAX_BINDERS * IRIS_BINDER_SIZE);
1836 /* TODO: Why does limiting to 2GB help some state items on gen12?
1837 * - CC Viewport Pointer
1838 * - Blend State Pointer
1839 * - Color Calc State Pointer
1840 */
1841 const uint64_t dynamic_pool_size =
1842 (devinfo->gen >= 12 ? _2GB : _4GB_minus_1) - IRIS_BORDER_COLOR_POOL_SIZE;
1843 util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_DYNAMIC],
1844 IRIS_MEMZONE_DYNAMIC_START + IRIS_BORDER_COLOR_POOL_SIZE,
1845 dynamic_pool_size);
1846
1847 /* Leave the last 4GB out of the high vma range, so that no state
1848 * base address + size can overflow 48 bits.
1849 */
1850 util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_OTHER],
1851 IRIS_MEMZONE_OTHER_START,
1852 (gtt_size - _4GB) - IRIS_MEMZONE_OTHER_START);
1853
1854 init_cache_buckets(bufmgr);
1855
1856 bufmgr->name_table =
1857 _mesa_hash_table_create(NULL, _mesa_hash_uint, _mesa_key_uint_equal);
1858 bufmgr->handle_table =
1859 _mesa_hash_table_create(NULL, _mesa_hash_uint, _mesa_key_uint_equal);
1860
1861 if (devinfo->gen >= 12) {
1862 bufmgr->aux_map_ctx = gen_aux_map_init(bufmgr, &aux_map_allocator,
1863 devinfo);
1864 assert(bufmgr->aux_map_ctx);
1865 }
1866
1867 return bufmgr;
1868 }
1869
1870 static struct iris_bufmgr *
1871 iris_bufmgr_ref(struct iris_bufmgr *bufmgr)
1872 {
1873 p_atomic_inc(&bufmgr->refcount);
1874 return bufmgr;
1875 }
1876
1877 void
1878 iris_bufmgr_unref(struct iris_bufmgr *bufmgr)
1879 {
1880 mtx_lock(&global_bufmgr_list_mutex);
1881 if (p_atomic_dec_zero(&bufmgr->refcount)) {
1882 list_del(&bufmgr->link);
1883 iris_bufmgr_destroy(bufmgr);
1884 }
1885 mtx_unlock(&global_bufmgr_list_mutex);
1886 }
1887
1888 /**
1889 * Gets an already existing GEM buffer manager or create a new one.
1890 *
1891 * \param fd File descriptor of the opened DRM device.
1892 */
1893 struct iris_bufmgr *
1894 iris_bufmgr_get_for_fd(struct gen_device_info *devinfo, int fd, bool bo_reuse)
1895 {
1896 struct stat st;
1897
1898 if (fstat(fd, &st))
1899 return NULL;
1900
1901 struct iris_bufmgr *bufmgr = NULL;
1902
1903 mtx_lock(&global_bufmgr_list_mutex);
1904 list_for_each_entry(struct iris_bufmgr, iter_bufmgr, &global_bufmgr_list, link) {
1905 struct stat iter_st;
1906 if (fstat(iter_bufmgr->fd, &iter_st))
1907 continue;
1908
1909 if (st.st_rdev == iter_st.st_rdev) {
1910 assert(iter_bufmgr->bo_reuse == bo_reuse);
1911 bufmgr = iris_bufmgr_ref(iter_bufmgr);
1912 goto unlock;
1913 }
1914 }
1915
1916 bufmgr = iris_bufmgr_create(devinfo, fd, bo_reuse);
1917 list_addtail(&bufmgr->link, &global_bufmgr_list);
1918
1919 unlock:
1920 mtx_unlock(&global_bufmgr_list_mutex);
1921
1922 return bufmgr;
1923 }
1924
1925 int
1926 iris_bufmgr_get_fd(struct iris_bufmgr *bufmgr)
1927 {
1928 return bufmgr->fd;
1929 }
1930
1931 void*
1932 iris_bufmgr_get_aux_map_context(struct iris_bufmgr *bufmgr)
1933 {
1934 return bufmgr->aux_map_ctx;
1935 }