68f092369605a62f0ab9715b94f29e7c2c02478f
[mesa.git] / src / gallium / drivers / iris / iris_bufmgr.c
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifdef HAVE_CONFIG_H
25 #include "config.h"
26 #endif
27
28 #include <xf86drm.h>
29 #include <util/u_atomic.h>
30 #include <fcntl.h>
31 #include <stdio.h>
32 #include <stdlib.h>
33 #include <string.h>
34 #include <unistd.h>
35 #include <assert.h>
36 #include <sys/ioctl.h>
37 #include <sys/mman.h>
38 #include <sys/stat.h>
39 #include <sys/types.h>
40 #include <stdbool.h>
41 #include <time.h>
42
43 #include "errno.h"
44 #ifndef ETIME
45 #define ETIME ETIMEDOUT
46 #endif
47 #include "common/gen_clflush.h"
48 #include "common/gen_debug.h"
49 #include "dev/gen_device_info.h"
50 #include "main/macros.h"
51 #include "util/debug.h"
52 #include "util/macros.h"
53 #include "util/hash_table.h"
54 #include "util/list.h"
55 #include "util/u_dynarray.h"
56 #include "util/vma.h"
57 #include "iris_bufmgr.h"
58 #include "iris_context.h"
59 #include "string.h"
60
61 #include "drm-uapi/i915_drm.h"
62
63 #ifdef HAVE_VALGRIND
64 #include <valgrind.h>
65 #include <memcheck.h>
66 #define VG(x) x
67 #else
68 #define VG(x)
69 #endif
70
71 /* VALGRIND_FREELIKE_BLOCK unfortunately does not actually undo the earlier
72 * VALGRIND_MALLOCLIKE_BLOCK but instead leaves vg convinced the memory is
73 * leaked. All because it does not call VG(cli_free) from its
74 * VG_USERREQ__FREELIKE_BLOCK handler. Instead of treating the memory like
75 * and allocation, we mark it available for use upon mmapping and remove
76 * it upon unmapping.
77 */
78 #define VG_DEFINED(ptr, size) VG(VALGRIND_MAKE_MEM_DEFINED(ptr, size))
79 #define VG_NOACCESS(ptr, size) VG(VALGRIND_MAKE_MEM_NOACCESS(ptr, size))
80
81 #define PAGE_SIZE 4096
82
83 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
84
85 /**
86 * Call ioctl, restarting if it is interupted
87 */
88 int
89 drm_ioctl(int fd, unsigned long request, void *arg)
90 {
91 int ret;
92
93 do {
94 ret = ioctl(fd, request, arg);
95 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
96 return ret;
97 }
98
99 static inline int
100 atomic_add_unless(int *v, int add, int unless)
101 {
102 int c, old;
103 c = p_atomic_read(v);
104 while (c != unless && (old = p_atomic_cmpxchg(v, c, c + add)) != c)
105 c = old;
106 return c == unless;
107 }
108
109 /*
110 * Idea:
111 *
112 * Have a bitmap-allocator for each BO cache bucket size. Because bo_alloc
113 * rounds up allocations to the bucket size anyway, we can make 1 bit in the
114 * bitmap represent N pages of memory, where N = <bucket size / page size>.
115 * Allocations and frees always set/unset a single bit. Because ffsll only
116 * works on uint64_t, use a tree(?) of those.
117 *
118 * Nodes contain a starting address and a uint64_t bitmap. (pair-of-uint64_t)
119 * Bitmap uses 1 for a free block, 0 for in-use.
120 *
121 * Bucket contains...
122 *
123 * Dynamic array of nodes. (pointer, two ints)
124 */
125
126 struct vma_bucket_node {
127 uint64_t start_address;
128 uint64_t bitmap;
129 };
130
131 struct bo_cache_bucket {
132 /** List of cached BOs. */
133 struct list_head head;
134
135 /** Size of this bucket, in bytes. */
136 uint64_t size;
137
138 /** List of vma_bucket_nodes */
139 struct util_dynarray vma_list[IRIS_MEMZONE_COUNT];
140 };
141
142 struct iris_bufmgr {
143 int fd;
144
145 mtx_t lock;
146
147 /** Array of lists of cached gem objects of power-of-two sizes */
148 struct bo_cache_bucket cache_bucket[14 * 4];
149 int num_buckets;
150 time_t time;
151
152 struct hash_table *name_table;
153 struct hash_table *handle_table;
154
155 struct util_vma_heap vma_allocator[IRIS_MEMZONE_COUNT];
156
157 bool has_llc:1;
158 bool bo_reuse:1;
159 };
160
161 static int bo_set_tiling_internal(struct iris_bo *bo, uint32_t tiling_mode,
162 uint32_t stride);
163
164 static void bo_free(struct iris_bo *bo);
165
166 static uint64_t __vma_alloc(struct iris_bufmgr *bufmgr,
167 enum iris_memory_zone memzone,
168 uint64_t size, uint64_t alignment);
169
170 static uint32_t
171 key_hash_uint(const void *key)
172 {
173 return _mesa_hash_data(key, 4);
174 }
175
176 static bool
177 key_uint_equal(const void *a, const void *b)
178 {
179 return *((unsigned *) a) == *((unsigned *) b);
180 }
181
182 static struct iris_bo *
183 hash_find_bo(struct hash_table *ht, unsigned int key)
184 {
185 struct hash_entry *entry = _mesa_hash_table_search(ht, &key);
186 return entry ? (struct iris_bo *) entry->data : NULL;
187 }
188
189 /**
190 * This function finds the correct bucket fit for the input size.
191 * The function works with O(1) complexity when the requested size
192 * was queried instead of iterating the size through all the buckets.
193 */
194 static struct bo_cache_bucket *
195 bucket_for_size(struct iris_bufmgr *bufmgr, uint64_t size)
196 {
197 /* Calculating the pages and rounding up to the page size. */
198 const unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
199
200 /* Row Bucket sizes clz((x-1) | 3) Row Column
201 * in pages stride size
202 * 0: 1 2 3 4 -> 30 30 30 30 4 1
203 * 1: 5 6 7 8 -> 29 29 29 29 4 1
204 * 2: 10 12 14 16 -> 28 28 28 28 8 2
205 * 3: 20 24 28 32 -> 27 27 27 27 16 4
206 */
207 const unsigned row = 30 - __builtin_clz((pages - 1) | 3);
208 const unsigned row_max_pages = 4 << row;
209
210 /* The '& ~2' is the special case for row 1. In row 1, max pages /
211 * 2 is 2, but the previous row maximum is zero (because there is
212 * no previous row). All row maximum sizes are power of 2, so that
213 * is the only case where that bit will be set.
214 */
215 const unsigned prev_row_max_pages = (row_max_pages / 2) & ~2;
216 int col_size_log2 = row - 1;
217 col_size_log2 += (col_size_log2 < 0);
218
219 const unsigned col = (pages - prev_row_max_pages +
220 ((1 << col_size_log2) - 1)) >> col_size_log2;
221
222 /* Calculating the index based on the row and column. */
223 const unsigned index = (row * 4) + (col - 1);
224
225 return (index < bufmgr->num_buckets) ?
226 &bufmgr->cache_bucket[index] : NULL;
227 }
228
229 static enum iris_memory_zone
230 memzone_for_address(uint64_t address)
231 {
232 const uint64_t _4GB = 1ull << 32;
233
234 if (address >= 3 * _4GB)
235 return IRIS_MEMZONE_OTHER;
236
237 if (address >= 2 * _4GB)
238 return IRIS_MEMZONE_DYNAMIC;
239
240 if (address > 1 * _4GB)
241 return IRIS_MEMZONE_SURFACE;
242
243 /* The binder isn't in any memory zone. */
244 if (address == 1 * _4GB)
245 return IRIS_MEMZONE_BINDER;
246
247 return IRIS_MEMZONE_SHADER;
248 }
249
250 static uint64_t
251 bucket_vma_alloc(struct iris_bufmgr *bufmgr,
252 struct bo_cache_bucket *bucket,
253 enum iris_memory_zone memzone)
254 {
255 struct util_dynarray *vma_list = &bucket->vma_list[memzone];
256 struct vma_bucket_node *node;
257
258 if (vma_list->size == 0) {
259 /* This bucket allocator is out of space - allocate a new block of
260 * memory from a larger allocator (either another bucket or util_vma).
261 *
262 * Set the first bit used, and return the start address.
263 */
264 uint64_t node_size = 64ull * bucket->size;
265 node = util_dynarray_grow(vma_list, sizeof(struct vma_bucket_node));
266 node->start_address = __vma_alloc(bufmgr, memzone, node_size, node_size);
267 node->bitmap = ~1ull;
268 return node->start_address;
269 }
270
271 /* Pick any bit from any node - they're all the right size and free. */
272 node = util_dynarray_top_ptr(vma_list, struct vma_bucket_node);
273 int bit = ffsll(node->bitmap) - 1;
274 assert(bit >= 0 && bit <= 63);
275
276 /* Reserve the memory by clearing the bit. */
277 assert((node->bitmap & (1ull << bit)) != 0ull);
278 node->bitmap &= ~(1ull << bit);
279
280 /* If this node is now completely full, remove it from the free list. */
281 if (node->bitmap == 0ull) {
282 (void) util_dynarray_pop(vma_list, struct vma_bucket_node);
283 }
284
285 return node->start_address + bit * bucket->size;
286 }
287
288 static void
289 bucket_vma_free(struct bo_cache_bucket *bucket,
290 uint64_t address,
291 uint64_t size)
292 {
293 enum iris_memory_zone memzone = memzone_for_address(address);
294 struct util_dynarray *vma_list = &bucket->vma_list[memzone];
295 const uint64_t node_bytes = 64ull * bucket->size;
296 struct vma_bucket_node *node = NULL;
297
298 uint64_t start = (address / node_bytes) * node_bytes;
299 int bit = (address - start) / bucket->size;
300
301 assert(start + bit * bucket->size == address);
302
303 util_dynarray_foreach(vma_list, struct vma_bucket_node, cur) {
304 if (cur->start_address == start) {
305 node = cur;
306 break;
307 }
308 }
309
310 if (!node) {
311 node = util_dynarray_grow(vma_list, sizeof(struct vma_bucket_node));
312 node->start_address = start;
313 node->bitmap = 0ull;
314 }
315
316 /* Set the bit to return the memory. */
317 assert((node->bitmap & (1ull << bit)) != 0ull);
318 node->bitmap |= 1ull << bit;
319
320 /* The block might be entirely free now, and if so, we could return it
321 * to the larger allocator. But we may as well hang on to it, in case
322 * we get more allocations at this block size.
323 */
324 }
325
326 static struct bo_cache_bucket *
327 get_bucket_allocator(struct iris_bufmgr *bufmgr, uint64_t size)
328 {
329 /* Skip using the bucket allocator for very large sizes, as it allocates
330 * 64 of them and this can balloon rather quickly.
331 */
332 if (size > 1024 * PAGE_SIZE)
333 return NULL;
334
335 struct bo_cache_bucket *bucket = bucket_for_size(bufmgr, size);
336
337 if (bucket && bucket->size == size)
338 return bucket;
339
340 return NULL;
341 }
342
343 /** Like vma_alloc, but returns a non-canonicalized address. */
344 static uint64_t
345 __vma_alloc(struct iris_bufmgr *bufmgr,
346 enum iris_memory_zone memzone,
347 uint64_t size,
348 uint64_t alignment)
349 {
350 if (memzone == IRIS_MEMZONE_BINDER)
351 return 1ull << 32;
352
353 struct bo_cache_bucket *bucket = get_bucket_allocator(bufmgr, size);
354 uint64_t addr;
355
356 if (bucket) {
357 addr = bucket_vma_alloc(bufmgr, bucket, memzone);
358 } else {
359 addr = util_vma_heap_alloc(&bufmgr->vma_allocator[memzone], size,
360 alignment);
361 }
362
363 assert((addr >> 48ull) == 0);
364 return addr;
365 }
366
367 /**
368 * Allocate a section of virtual memory for a buffer, assigning an address.
369 *
370 * This uses either the bucket allocator for the given size, or the large
371 * object allocator (util_vma).
372 */
373 static uint64_t
374 vma_alloc(struct iris_bufmgr *bufmgr,
375 enum iris_memory_zone memzone,
376 uint64_t size,
377 uint64_t alignment)
378 {
379 uint64_t addr = __vma_alloc(bufmgr, memzone, size, alignment);
380
381 /* Canonicalize the address.
382 *
383 * The Broadwell PRM Vol. 2a, MI_LOAD_REGISTER_MEM::MemoryAddress says:
384 *
385 * "This field specifies the address of the memory location where the
386 * register value specified in the DWord above will read from. The
387 * address specifies the DWord location of the data. Range =
388 * GraphicsVirtualAddress[63:2] for a DWord register GraphicsAddress
389 * [63:48] are ignored by the HW and assumed to be in correct
390 * canonical form [63:48] == [47]."
391 */
392 const int shift = 63 - 47;
393 addr = (((int64_t) addr) << shift) >> shift;
394
395 return addr;
396 }
397
398 static void
399 vma_free(struct iris_bufmgr *bufmgr,
400 uint64_t address,
401 uint64_t size)
402 {
403 /* Un-canonicalize the address; our allocators expect 0 in the high bits */
404 address &= (1ull << 48) - 1;
405
406 struct bo_cache_bucket *bucket = get_bucket_allocator(bufmgr, size);
407
408 if (bucket) {
409 bucket_vma_free(bucket, address, size);
410 } else {
411 enum iris_memory_zone memzone = memzone_for_address(address);
412 util_vma_heap_free(&bufmgr->vma_allocator[memzone], address, size);
413 }
414 }
415
416 int
417 iris_bo_busy(struct iris_bo *bo)
418 {
419 struct iris_bufmgr *bufmgr = bo->bufmgr;
420 struct drm_i915_gem_busy busy = { .handle = bo->gem_handle };
421
422 int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
423 if (ret == 0) {
424 bo->idle = !busy.busy;
425 return busy.busy;
426 }
427 return false;
428 }
429
430 int
431 iris_bo_madvise(struct iris_bo *bo, int state)
432 {
433 struct drm_i915_gem_madvise madv = {
434 .handle = bo->gem_handle,
435 .madv = state,
436 .retained = 1,
437 };
438
439 drm_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
440
441 return madv.retained;
442 }
443
444 /* drop the oldest entries that have been purged by the kernel */
445 static void
446 iris_bo_cache_purge_bucket(struct iris_bufmgr *bufmgr,
447 struct bo_cache_bucket *bucket)
448 {
449 list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
450 if (iris_bo_madvise(bo, I915_MADV_DONTNEED))
451 break;
452
453 list_del(&bo->head);
454 bo_free(bo);
455 }
456 }
457
458 static struct iris_bo *
459 bo_alloc_internal(struct iris_bufmgr *bufmgr,
460 const char *name,
461 uint64_t size,
462 enum iris_memory_zone memzone,
463 unsigned flags,
464 uint32_t tiling_mode,
465 uint32_t stride)
466 {
467 struct iris_bo *bo;
468 unsigned int page_size = getpagesize();
469 int ret;
470 struct bo_cache_bucket *bucket;
471 bool alloc_from_cache;
472 uint64_t bo_size;
473 bool zeroed = false;
474
475 if (flags & BO_ALLOC_ZEROED)
476 zeroed = true;
477
478 /* Round the allocated size up to a power of two number of pages. */
479 bucket = bucket_for_size(bufmgr, size);
480
481 /* If we don't have caching at this size, don't actually round the
482 * allocation up.
483 */
484 if (bucket == NULL) {
485 bo_size = size;
486 if (bo_size < page_size)
487 bo_size = page_size;
488 } else {
489 bo_size = bucket->size;
490 }
491
492 mtx_lock(&bufmgr->lock);
493 /* Get a buffer out of the cache if available */
494 retry:
495 alloc_from_cache = false;
496 if (bucket != NULL && !list_empty(&bucket->head)) {
497 /* If the last BO in the cache is idle, then reuse it. Otherwise,
498 * allocate a fresh buffer to avoid stalling.
499 */
500 bo = LIST_ENTRY(struct iris_bo, bucket->head.next, head);
501 if (!iris_bo_busy(bo)) {
502 alloc_from_cache = true;
503 list_del(&bo->head);
504 }
505
506 if (alloc_from_cache) {
507 if (!iris_bo_madvise(bo, I915_MADV_WILLNEED)) {
508 bo_free(bo);
509 iris_bo_cache_purge_bucket(bufmgr, bucket);
510 goto retry;
511 }
512
513 if (bo_set_tiling_internal(bo, tiling_mode, stride)) {
514 bo_free(bo);
515 goto retry;
516 }
517
518 if (zeroed) {
519 void *map = iris_bo_map(NULL, bo, MAP_WRITE | MAP_RAW);
520 if (!map) {
521 bo_free(bo);
522 goto retry;
523 }
524 memset(map, 0, bo_size);
525 }
526 }
527 }
528
529 if (alloc_from_cache) {
530 /* If the cached BO isn't in the right memory zone, free the old
531 * memory and assign it a new address.
532 */
533 if (memzone != memzone_for_address(bo->gtt_offset)) {
534 vma_free(bufmgr, bo->gtt_offset, bo_size);
535 bo->gtt_offset = 0ull;
536 }
537 } else {
538 bo = calloc(1, sizeof(*bo));
539 if (!bo)
540 goto err;
541
542 bo->size = bo_size;
543 bo->idle = true;
544
545 struct drm_i915_gem_create create = { .size = bo_size };
546
547 /* All new BOs we get from the kernel are zeroed, so we don't need to
548 * worry about that here.
549 */
550 ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
551 if (ret != 0) {
552 free(bo);
553 goto err;
554 }
555
556 bo->gem_handle = create.handle;
557
558 bo->bufmgr = bufmgr;
559 bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
560
561 bo->tiling_mode = I915_TILING_NONE;
562 bo->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
563 bo->stride = 0;
564
565 if (bo_set_tiling_internal(bo, tiling_mode, stride))
566 goto err_free;
567
568 /* Calling set_domain() will allocate pages for the BO outside of the
569 * struct mutex lock in the kernel, which is more efficient than waiting
570 * to create them during the first execbuf that uses the BO.
571 */
572 struct drm_i915_gem_set_domain sd = {
573 .handle = bo->gem_handle,
574 .read_domains = I915_GEM_DOMAIN_CPU,
575 .write_domain = 0,
576 };
577
578 if (drm_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd) != 0)
579 goto err_free;
580 }
581
582 if (bo->gtt_offset == 0ull) {
583 bo->gtt_offset = vma_alloc(bufmgr, memzone, bo->size, 1);
584
585 if (bo->gtt_offset == 0ull)
586 goto err_free;
587 }
588
589 bo->name = name;
590 p_atomic_set(&bo->refcount, 1);
591 bo->reusable = true;
592 bo->cache_coherent = bufmgr->has_llc;
593 bo->index = -1;
594
595 mtx_unlock(&bufmgr->lock);
596
597 DBG("bo_create: buf %d (%s) %llub\n", bo->gem_handle, bo->name,
598 (unsigned long long) size);
599
600 return bo;
601
602 err_free:
603 bo_free(bo);
604 err:
605 mtx_unlock(&bufmgr->lock);
606 return NULL;
607 }
608
609 struct iris_bo *
610 iris_bo_alloc(struct iris_bufmgr *bufmgr,
611 const char *name,
612 uint64_t size,
613 enum iris_memory_zone memzone)
614 {
615 return bo_alloc_internal(bufmgr, name, size, memzone,
616 0, I915_TILING_NONE, 0);
617 }
618
619 struct iris_bo *
620 iris_bo_alloc_tiled(struct iris_bufmgr *bufmgr, const char *name,
621 uint64_t size, enum iris_memory_zone memzone,
622 uint32_t tiling_mode, uint32_t pitch, unsigned flags)
623 {
624 return bo_alloc_internal(bufmgr, name, size, memzone,
625 flags, tiling_mode, pitch);
626 }
627
628 /**
629 * Returns a iris_bo wrapping the given buffer object handle.
630 *
631 * This can be used when one application needs to pass a buffer object
632 * to another.
633 */
634 struct iris_bo *
635 iris_bo_gem_create_from_name(struct iris_bufmgr *bufmgr,
636 const char *name, unsigned int handle)
637 {
638 struct iris_bo *bo;
639
640 /* At the moment most applications only have a few named bo.
641 * For instance, in a DRI client only the render buffers passed
642 * between X and the client are named. And since X returns the
643 * alternating names for the front/back buffer a linear search
644 * provides a sufficiently fast match.
645 */
646 mtx_lock(&bufmgr->lock);
647 bo = hash_find_bo(bufmgr->name_table, handle);
648 if (bo) {
649 iris_bo_reference(bo);
650 goto out;
651 }
652
653 struct drm_gem_open open_arg = { .name = handle };
654 int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
655 if (ret != 0) {
656 DBG("Couldn't reference %s handle 0x%08x: %s\n",
657 name, handle, strerror(errno));
658 bo = NULL;
659 goto out;
660 }
661 /* Now see if someone has used a prime handle to get this
662 * object from the kernel before by looking through the list
663 * again for a matching gem_handle
664 */
665 bo = hash_find_bo(bufmgr->handle_table, open_arg.handle);
666 if (bo) {
667 iris_bo_reference(bo);
668 goto out;
669 }
670
671 bo = calloc(1, sizeof(*bo));
672 if (!bo)
673 goto out;
674
675 p_atomic_set(&bo->refcount, 1);
676
677 bo->size = open_arg.size;
678 bo->gtt_offset = 0;
679 bo->bufmgr = bufmgr;
680 bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
681 bo->gem_handle = open_arg.handle;
682 bo->name = name;
683 bo->global_name = handle;
684 bo->reusable = false;
685 bo->external = true;
686 bo->gtt_offset = vma_alloc(bufmgr, IRIS_MEMZONE_OTHER, bo->size, 1);
687
688 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
689 _mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
690
691 struct drm_i915_gem_get_tiling get_tiling = { .handle = bo->gem_handle };
692 ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
693 if (ret != 0)
694 goto err_unref;
695
696 bo->tiling_mode = get_tiling.tiling_mode;
697 bo->swizzle_mode = get_tiling.swizzle_mode;
698 /* XXX stride is unknown */
699 DBG("bo_create_from_handle: %d (%s)\n", handle, bo->name);
700
701 out:
702 mtx_unlock(&bufmgr->lock);
703 return bo;
704
705 err_unref:
706 bo_free(bo);
707 mtx_unlock(&bufmgr->lock);
708 return NULL;
709 }
710
711 static void
712 bo_free(struct iris_bo *bo)
713 {
714 struct iris_bufmgr *bufmgr = bo->bufmgr;
715
716 if (bo->map_cpu) {
717 VG_NOACCESS(bo->map_cpu, bo->size);
718 munmap(bo->map_cpu, bo->size);
719 }
720 if (bo->map_wc) {
721 VG_NOACCESS(bo->map_wc, bo->size);
722 munmap(bo->map_wc, bo->size);
723 }
724 if (bo->map_gtt) {
725 VG_NOACCESS(bo->map_gtt, bo->size);
726 munmap(bo->map_gtt, bo->size);
727 }
728
729 if (bo->external) {
730 struct hash_entry *entry;
731
732 if (bo->global_name) {
733 entry = _mesa_hash_table_search(bufmgr->name_table, &bo->global_name);
734 _mesa_hash_table_remove(bufmgr->name_table, entry);
735 }
736
737 entry = _mesa_hash_table_search(bufmgr->handle_table, &bo->gem_handle);
738 _mesa_hash_table_remove(bufmgr->handle_table, entry);
739 }
740
741 vma_free(bo->bufmgr, bo->gtt_offset, bo->size);
742
743 /* Close this object */
744 struct drm_gem_close close = { .handle = bo->gem_handle };
745 int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &close);
746 if (ret != 0) {
747 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
748 bo->gem_handle, bo->name, strerror(errno));
749 }
750 free(bo);
751 }
752
753 /** Frees all cached buffers significantly older than @time. */
754 static void
755 cleanup_bo_cache(struct iris_bufmgr *bufmgr, time_t time)
756 {
757 int i;
758
759 if (bufmgr->time == time)
760 return;
761
762 for (i = 0; i < bufmgr->num_buckets; i++) {
763 struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
764
765 list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
766 if (time - bo->free_time <= 1)
767 break;
768
769 list_del(&bo->head);
770
771 bo_free(bo);
772 }
773 }
774
775 bufmgr->time = time;
776 }
777
778 static void
779 bo_unreference_final(struct iris_bo *bo, time_t time)
780 {
781 struct iris_bufmgr *bufmgr = bo->bufmgr;
782 struct bo_cache_bucket *bucket;
783
784 DBG("bo_unreference final: %d (%s)\n", bo->gem_handle, bo->name);
785
786 bucket = bucket_for_size(bufmgr, bo->size);
787 /* Put the buffer into our internal cache for reuse if we can. */
788 if (bufmgr->bo_reuse && bo->reusable && bucket != NULL &&
789 iris_bo_madvise(bo, I915_MADV_DONTNEED)) {
790 bo->free_time = time;
791 bo->name = NULL;
792
793 list_addtail(&bo->head, &bucket->head);
794 } else {
795 bo_free(bo);
796 }
797 }
798
799 void
800 iris_bo_unreference(struct iris_bo *bo)
801 {
802 if (bo == NULL)
803 return;
804
805 assert(p_atomic_read(&bo->refcount) > 0);
806
807 if (atomic_add_unless(&bo->refcount, -1, 1)) {
808 struct iris_bufmgr *bufmgr = bo->bufmgr;
809 struct timespec time;
810
811 clock_gettime(CLOCK_MONOTONIC, &time);
812
813 mtx_lock(&bufmgr->lock);
814
815 if (p_atomic_dec_zero(&bo->refcount)) {
816 bo_unreference_final(bo, time.tv_sec);
817 cleanup_bo_cache(bufmgr, time.tv_sec);
818 }
819
820 mtx_unlock(&bufmgr->lock);
821 }
822 }
823
824 static void
825 bo_wait_with_stall_warning(struct pipe_debug_callback *dbg,
826 struct iris_bo *bo,
827 const char *action)
828 {
829 bool busy = dbg && !bo->idle;
830 double elapsed = unlikely(busy) ? -get_time() : 0.0;
831
832 iris_bo_wait_rendering(bo);
833
834 if (unlikely(busy)) {
835 elapsed += get_time();
836 if (elapsed > 1e-5) /* 0.01ms */ {
837 perf_debug(dbg, "%s a busy \"%s\" BO stalled and took %.03f ms.\n",
838 action, bo->name, elapsed * 1000);
839 }
840 }
841 }
842
843 static void
844 print_flags(unsigned flags)
845 {
846 if (flags & MAP_READ)
847 DBG("READ ");
848 if (flags & MAP_WRITE)
849 DBG("WRITE ");
850 if (flags & MAP_ASYNC)
851 DBG("ASYNC ");
852 if (flags & MAP_PERSISTENT)
853 DBG("PERSISTENT ");
854 if (flags & MAP_COHERENT)
855 DBG("COHERENT ");
856 if (flags & MAP_RAW)
857 DBG("RAW ");
858 DBG("\n");
859 }
860
861 static void *
862 iris_bo_map_cpu(struct pipe_debug_callback *dbg,
863 struct iris_bo *bo, unsigned flags)
864 {
865 struct iris_bufmgr *bufmgr = bo->bufmgr;
866
867 /* We disallow CPU maps for writing to non-coherent buffers, as the
868 * CPU map can become invalidated when a batch is flushed out, which
869 * can happen at unpredictable times. You should use WC maps instead.
870 */
871 assert(bo->cache_coherent || !(flags & MAP_WRITE));
872
873 if (!bo->map_cpu) {
874 DBG("iris_bo_map_cpu: %d (%s)\n", bo->gem_handle, bo->name);
875
876 struct drm_i915_gem_mmap mmap_arg = {
877 .handle = bo->gem_handle,
878 .size = bo->size,
879 };
880 int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
881 if (ret != 0) {
882 ret = -errno;
883 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
884 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
885 return NULL;
886 }
887 void *map = (void *) (uintptr_t) mmap_arg.addr_ptr;
888 VG_DEFINED(map, bo->size);
889
890 if (p_atomic_cmpxchg(&bo->map_cpu, NULL, map)) {
891 VG_NOACCESS(map, bo->size);
892 munmap(map, bo->size);
893 }
894 }
895 assert(bo->map_cpu);
896
897 DBG("iris_bo_map_cpu: %d (%s) -> %p, ", bo->gem_handle, bo->name,
898 bo->map_cpu);
899 print_flags(flags);
900
901 if (!(flags & MAP_ASYNC)) {
902 bo_wait_with_stall_warning(dbg, bo, "CPU mapping");
903 }
904
905 if (!bo->cache_coherent && !bo->bufmgr->has_llc) {
906 /* If we're reusing an existing CPU mapping, the CPU caches may
907 * contain stale data from the last time we read from that mapping.
908 * (With the BO cache, it might even be data from a previous buffer!)
909 * Even if it's a brand new mapping, the kernel may have zeroed the
910 * buffer via CPU writes.
911 *
912 * We need to invalidate those cachelines so that we see the latest
913 * contents, and so long as we only read from the CPU mmap we do not
914 * need to write those cachelines back afterwards.
915 *
916 * On LLC, the emprical evidence suggests that writes from the GPU
917 * that bypass the LLC (i.e. for scanout) do *invalidate* the CPU
918 * cachelines. (Other reads, such as the display engine, bypass the
919 * LLC entirely requiring us to keep dirty pixels for the scanout
920 * out of any cache.)
921 */
922 gen_invalidate_range(bo->map_cpu, bo->size);
923 }
924
925 return bo->map_cpu;
926 }
927
928 static void *
929 iris_bo_map_wc(struct pipe_debug_callback *dbg,
930 struct iris_bo *bo, unsigned flags)
931 {
932 struct iris_bufmgr *bufmgr = bo->bufmgr;
933
934 if (!bo->map_wc) {
935 DBG("iris_bo_map_wc: %d (%s)\n", bo->gem_handle, bo->name);
936
937 struct drm_i915_gem_mmap mmap_arg = {
938 .handle = bo->gem_handle,
939 .size = bo->size,
940 .flags = I915_MMAP_WC,
941 };
942 int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
943 if (ret != 0) {
944 ret = -errno;
945 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
946 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
947 return NULL;
948 }
949
950 void *map = (void *) (uintptr_t) mmap_arg.addr_ptr;
951 VG_DEFINED(map, bo->size);
952
953 if (p_atomic_cmpxchg(&bo->map_wc, NULL, map)) {
954 VG_NOACCESS(map, bo->size);
955 munmap(map, bo->size);
956 }
957 }
958 assert(bo->map_wc);
959
960 DBG("iris_bo_map_wc: %d (%s) -> %p\n", bo->gem_handle, bo->name, bo->map_wc);
961 print_flags(flags);
962
963 if (!(flags & MAP_ASYNC)) {
964 bo_wait_with_stall_warning(dbg, bo, "WC mapping");
965 }
966
967 return bo->map_wc;
968 }
969
970 /**
971 * Perform an uncached mapping via the GTT.
972 *
973 * Write access through the GTT is not quite fully coherent. On low power
974 * systems especially, like modern Atoms, we can observe reads from RAM before
975 * the write via GTT has landed. A write memory barrier that flushes the Write
976 * Combining Buffer (i.e. sfence/mfence) is not sufficient to order the later
977 * read after the write as the GTT write suffers a small delay through the GTT
978 * indirection. The kernel uses an uncached mmio read to ensure the GTT write
979 * is ordered with reads (either by the GPU, WB or WC) and unconditionally
980 * flushes prior to execbuf submission. However, if we are not informing the
981 * kernel about our GTT writes, it will not flush before earlier access, such
982 * as when using the cmdparser. Similarly, we need to be careful if we should
983 * ever issue a CPU read immediately following a GTT write.
984 *
985 * Telling the kernel about write access also has one more important
986 * side-effect. Upon receiving notification about the write, it cancels any
987 * scanout buffering for FBC/PSR and friends. Later FBC/PSR is then flushed by
988 * either SW_FINISH or DIRTYFB. The presumption is that we never write to the
989 * actual scanout via a mmaping, only to a backbuffer and so all the FBC/PSR
990 * tracking is handled on the buffer exchange instead.
991 */
992 static void *
993 iris_bo_map_gtt(struct pipe_debug_callback *dbg,
994 struct iris_bo *bo, unsigned flags)
995 {
996 struct iris_bufmgr *bufmgr = bo->bufmgr;
997
998 /* Get a mapping of the buffer if we haven't before. */
999 if (bo->map_gtt == NULL) {
1000 DBG("bo_map_gtt: mmap %d (%s)\n", bo->gem_handle, bo->name);
1001
1002 struct drm_i915_gem_mmap_gtt mmap_arg = { .handle = bo->gem_handle };
1003
1004 /* Get the fake offset back... */
1005 int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);
1006 if (ret != 0) {
1007 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1008 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
1009 return NULL;
1010 }
1011
1012 /* and mmap it. */
1013 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE,
1014 MAP_SHARED, bufmgr->fd, mmap_arg.offset);
1015 if (map == MAP_FAILED) {
1016 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1017 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
1018 return NULL;
1019 }
1020
1021 /* We don't need to use VALGRIND_MALLOCLIKE_BLOCK because Valgrind will
1022 * already intercept this mmap call. However, for consistency between
1023 * all the mmap paths, we mark the pointer as defined now and mark it
1024 * as inaccessible afterwards.
1025 */
1026 VG_DEFINED(map, bo->size);
1027
1028 if (p_atomic_cmpxchg(&bo->map_gtt, NULL, map)) {
1029 VG_NOACCESS(map, bo->size);
1030 munmap(map, bo->size);
1031 }
1032 }
1033 assert(bo->map_gtt);
1034
1035 DBG("bo_map_gtt: %d (%s) -> %p, ", bo->gem_handle, bo->name, bo->map_gtt);
1036 print_flags(flags);
1037
1038 if (!(flags & MAP_ASYNC)) {
1039 bo_wait_with_stall_warning(dbg, bo, "GTT mapping");
1040 }
1041
1042 return bo->map_gtt;
1043 }
1044
1045 static bool
1046 can_map_cpu(struct iris_bo *bo, unsigned flags)
1047 {
1048 if (bo->cache_coherent)
1049 return true;
1050
1051 /* Even if the buffer itself is not cache-coherent (such as a scanout), on
1052 * an LLC platform reads always are coherent (as they are performed via the
1053 * central system agent). It is just the writes that we need to take special
1054 * care to ensure that land in main memory and not stick in the CPU cache.
1055 */
1056 if (!(flags & MAP_WRITE) && bo->bufmgr->has_llc)
1057 return true;
1058
1059 /* If PERSISTENT or COHERENT are set, the mmapping needs to remain valid
1060 * across batch flushes where the kernel will change cache domains of the
1061 * bo, invalidating continued access to the CPU mmap on non-LLC device.
1062 *
1063 * Similarly, ASYNC typically means that the buffer will be accessed via
1064 * both the CPU and the GPU simultaneously. Batches may be executed that
1065 * use the BO even while it is mapped. While OpenGL technically disallows
1066 * most drawing while non-persistent mappings are active, we may still use
1067 * the GPU for blits or other operations, causing batches to happen at
1068 * inconvenient times.
1069 */
1070 if (flags & (MAP_PERSISTENT | MAP_COHERENT | MAP_ASYNC))
1071 return false;
1072
1073 return !(flags & MAP_WRITE);
1074 }
1075
1076 void *
1077 iris_bo_map(struct pipe_debug_callback *dbg,
1078 struct iris_bo *bo, unsigned flags)
1079 {
1080 if (bo->tiling_mode != I915_TILING_NONE && !(flags & MAP_RAW))
1081 return iris_bo_map_gtt(dbg, bo, flags);
1082
1083 void *map;
1084
1085 if (can_map_cpu(bo, flags))
1086 map = iris_bo_map_cpu(dbg, bo, flags);
1087 else
1088 map = iris_bo_map_wc(dbg, bo, flags);
1089
1090 /* Allow the attempt to fail by falling back to the GTT where necessary.
1091 *
1092 * Not every buffer can be mmaped directly using the CPU (or WC), for
1093 * example buffers that wrap stolen memory or are imported from other
1094 * devices. For those, we have little choice but to use a GTT mmapping.
1095 * However, if we use a slow GTT mmapping for reads where we expected fast
1096 * access, that order of magnitude difference in throughput will be clearly
1097 * expressed by angry users.
1098 *
1099 * We skip MAP_RAW because we want to avoid map_gtt's fence detiling.
1100 */
1101 if (!map && !(flags & MAP_RAW)) {
1102 perf_debug(dbg, "Fallback GTT mapping for %s with access flags %x\n",
1103 bo->name, flags);
1104 map = iris_bo_map_gtt(dbg, bo, flags);
1105 }
1106
1107 return map;
1108 }
1109
1110 int
1111 iris_bo_subdata(struct iris_bo *bo, uint64_t offset,
1112 uint64_t size, const void *data)
1113 {
1114 struct iris_bufmgr *bufmgr = bo->bufmgr;
1115
1116 struct drm_i915_gem_pwrite pwrite = {
1117 .handle = bo->gem_handle,
1118 .offset = offset,
1119 .size = size,
1120 .data_ptr = (uint64_t) (uintptr_t) data,
1121 };
1122
1123 int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
1124 if (ret != 0) {
1125 ret = -errno;
1126 DBG("%s:%d: Error writing data to buffer %d: "
1127 "(%"PRIu64" %"PRIu64") %s .\n",
1128 __FILE__, __LINE__, bo->gem_handle, offset, size, strerror(errno));
1129 }
1130
1131 return ret;
1132 }
1133
1134 /** Waits for all GPU rendering with the object to have completed. */
1135 void
1136 iris_bo_wait_rendering(struct iris_bo *bo)
1137 {
1138 /* We require a kernel recent enough for WAIT_IOCTL support.
1139 * See intel_init_bufmgr()
1140 */
1141 iris_bo_wait(bo, -1);
1142 }
1143
1144 /**
1145 * Waits on a BO for the given amount of time.
1146 *
1147 * @bo: buffer object to wait for
1148 * @timeout_ns: amount of time to wait in nanoseconds.
1149 * If value is less than 0, an infinite wait will occur.
1150 *
1151 * Returns 0 if the wait was successful ie. the last batch referencing the
1152 * object has completed within the allotted time. Otherwise some negative return
1153 * value describes the error. Of particular interest is -ETIME when the wait has
1154 * failed to yield the desired result.
1155 *
1156 * Similar to iris_bo_wait_rendering except a timeout parameter allows
1157 * the operation to give up after a certain amount of time. Another subtle
1158 * difference is the internal locking semantics are different (this variant does
1159 * not hold the lock for the duration of the wait). This makes the wait subject
1160 * to a larger userspace race window.
1161 *
1162 * The implementation shall wait until the object is no longer actively
1163 * referenced within a batch buffer at the time of the call. The wait will
1164 * not guarantee that the buffer is re-issued via another thread, or an flinked
1165 * handle. Userspace must make sure this race does not occur if such precision
1166 * is important.
1167 *
1168 * Note that some kernels have broken the inifite wait for negative values
1169 * promise, upgrade to latest stable kernels if this is the case.
1170 */
1171 int
1172 iris_bo_wait(struct iris_bo *bo, int64_t timeout_ns)
1173 {
1174 struct iris_bufmgr *bufmgr = bo->bufmgr;
1175
1176 /* If we know it's idle, don't bother with the kernel round trip */
1177 if (bo->idle && !bo->external)
1178 return 0;
1179
1180 struct drm_i915_gem_wait wait = {
1181 .bo_handle = bo->gem_handle,
1182 .timeout_ns = timeout_ns,
1183 };
1184 int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
1185 if (ret == -1)
1186 return -errno;
1187
1188 bo->idle = true;
1189
1190 return ret;
1191 }
1192
1193 void
1194 iris_bufmgr_destroy(struct iris_bufmgr *bufmgr)
1195 {
1196 mtx_destroy(&bufmgr->lock);
1197
1198 /* Free any cached buffer objects we were going to reuse */
1199 for (int i = 0; i < bufmgr->num_buckets; i++) {
1200 struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
1201
1202 list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
1203 list_del(&bo->head);
1204
1205 bo_free(bo);
1206 }
1207
1208 for (int i = 0; i < IRIS_MEMZONE_COUNT; i++)
1209 util_dynarray_fini(&bucket->vma_list[i]);
1210 }
1211
1212 _mesa_hash_table_destroy(bufmgr->name_table, NULL);
1213 _mesa_hash_table_destroy(bufmgr->handle_table, NULL);
1214
1215 free(bufmgr);
1216 }
1217
1218 static int
1219 bo_set_tiling_internal(struct iris_bo *bo, uint32_t tiling_mode,
1220 uint32_t stride)
1221 {
1222 struct iris_bufmgr *bufmgr = bo->bufmgr;
1223 struct drm_i915_gem_set_tiling set_tiling;
1224 int ret;
1225
1226 if (bo->global_name == 0 &&
1227 tiling_mode == bo->tiling_mode && stride == bo->stride)
1228 return 0;
1229
1230 memset(&set_tiling, 0, sizeof(set_tiling));
1231 do {
1232 /* set_tiling is slightly broken and overwrites the
1233 * input on the error path, so we have to open code
1234 * drm_ioctl.
1235 */
1236 set_tiling.handle = bo->gem_handle;
1237 set_tiling.tiling_mode = tiling_mode;
1238 set_tiling.stride = stride;
1239
1240 ret = ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
1241 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
1242 if (ret == -1)
1243 return -errno;
1244
1245 bo->tiling_mode = set_tiling.tiling_mode;
1246 bo->swizzle_mode = set_tiling.swizzle_mode;
1247 bo->stride = set_tiling.stride;
1248 return 0;
1249 }
1250
1251 int
1252 iris_bo_get_tiling(struct iris_bo *bo, uint32_t *tiling_mode,
1253 uint32_t *swizzle_mode)
1254 {
1255 *tiling_mode = bo->tiling_mode;
1256 *swizzle_mode = bo->swizzle_mode;
1257 return 0;
1258 }
1259
1260 struct iris_bo *
1261 iris_bo_import_dmabuf(struct iris_bufmgr *bufmgr, int prime_fd)
1262 {
1263 uint32_t handle;
1264 struct iris_bo *bo;
1265
1266 mtx_lock(&bufmgr->lock);
1267 int ret = drmPrimeFDToHandle(bufmgr->fd, prime_fd, &handle);
1268 if (ret) {
1269 DBG("import_dmabuf: failed to obtain handle from fd: %s\n",
1270 strerror(errno));
1271 mtx_unlock(&bufmgr->lock);
1272 return NULL;
1273 }
1274
1275 /*
1276 * See if the kernel has already returned this buffer to us. Just as
1277 * for named buffers, we must not create two bo's pointing at the same
1278 * kernel object
1279 */
1280 bo = hash_find_bo(bufmgr->handle_table, handle);
1281 if (bo) {
1282 iris_bo_reference(bo);
1283 goto out;
1284 }
1285
1286 bo = calloc(1, sizeof(*bo));
1287 if (!bo)
1288 goto out;
1289
1290 p_atomic_set(&bo->refcount, 1);
1291
1292 /* Determine size of bo. The fd-to-handle ioctl really should
1293 * return the size, but it doesn't. If we have kernel 3.12 or
1294 * later, we can lseek on the prime fd to get the size. Older
1295 * kernels will just fail, in which case we fall back to the
1296 * provided (estimated or guess size). */
1297 ret = lseek(prime_fd, 0, SEEK_END);
1298 if (ret != -1)
1299 bo->size = ret;
1300
1301 bo->bufmgr = bufmgr;
1302 bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
1303
1304 bo->gem_handle = handle;
1305 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
1306
1307 bo->name = "prime";
1308 bo->reusable = false;
1309 bo->external = true;
1310 bo->gtt_offset = vma_alloc(bufmgr, IRIS_MEMZONE_OTHER, bo->size, 1);
1311
1312 struct drm_i915_gem_get_tiling get_tiling = { .handle = bo->gem_handle };
1313 if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling))
1314 goto err;
1315
1316 bo->tiling_mode = get_tiling.tiling_mode;
1317 bo->swizzle_mode = get_tiling.swizzle_mode;
1318 /* XXX stride is unknown */
1319
1320 out:
1321 mtx_unlock(&bufmgr->lock);
1322 return bo;
1323
1324 err:
1325 bo_free(bo);
1326 mtx_unlock(&bufmgr->lock);
1327 return NULL;
1328 }
1329
1330 static void
1331 iris_bo_make_external(struct iris_bo *bo)
1332 {
1333 struct iris_bufmgr *bufmgr = bo->bufmgr;
1334
1335 if (!bo->external) {
1336 mtx_lock(&bufmgr->lock);
1337 if (!bo->external) {
1338 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
1339 bo->external = true;
1340 }
1341 mtx_unlock(&bufmgr->lock);
1342 }
1343 }
1344
1345 int
1346 iris_bo_export_dmabuf(struct iris_bo *bo, int *prime_fd)
1347 {
1348 struct iris_bufmgr *bufmgr = bo->bufmgr;
1349
1350 iris_bo_make_external(bo);
1351
1352 if (drmPrimeHandleToFD(bufmgr->fd, bo->gem_handle,
1353 DRM_CLOEXEC, prime_fd) != 0)
1354 return -errno;
1355
1356 bo->reusable = false;
1357
1358 return 0;
1359 }
1360
1361 uint32_t
1362 iris_bo_export_gem_handle(struct iris_bo *bo)
1363 {
1364 iris_bo_make_external(bo);
1365
1366 return bo->gem_handle;
1367 }
1368
1369 int
1370 iris_bo_flink(struct iris_bo *bo, uint32_t *name)
1371 {
1372 struct iris_bufmgr *bufmgr = bo->bufmgr;
1373
1374 if (!bo->global_name) {
1375 struct drm_gem_flink flink = { .handle = bo->gem_handle };
1376
1377 if (drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_FLINK, &flink))
1378 return -errno;
1379
1380 iris_bo_make_external(bo);
1381 mtx_lock(&bufmgr->lock);
1382 if (!bo->global_name) {
1383 bo->global_name = flink.name;
1384 _mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
1385 }
1386 mtx_unlock(&bufmgr->lock);
1387
1388 bo->reusable = false;
1389 }
1390
1391 *name = bo->global_name;
1392 return 0;
1393 }
1394
1395 static void
1396 add_bucket(struct iris_bufmgr *bufmgr, int size)
1397 {
1398 unsigned int i = bufmgr->num_buckets;
1399
1400 assert(i < ARRAY_SIZE(bufmgr->cache_bucket));
1401
1402 list_inithead(&bufmgr->cache_bucket[i].head);
1403 for (int z = 0; z < IRIS_MEMZONE_COUNT; z++)
1404 util_dynarray_init(&bufmgr->cache_bucket[i].vma_list[z], NULL);
1405 bufmgr->cache_bucket[i].size = size;
1406 bufmgr->num_buckets++;
1407
1408 assert(bucket_for_size(bufmgr, size) == &bufmgr->cache_bucket[i]);
1409 assert(bucket_for_size(bufmgr, size - 2048) == &bufmgr->cache_bucket[i]);
1410 assert(bucket_for_size(bufmgr, size + 1) != &bufmgr->cache_bucket[i]);
1411 }
1412
1413 static void
1414 init_cache_buckets(struct iris_bufmgr *bufmgr)
1415 {
1416 uint64_t size, cache_max_size = 64 * 1024 * 1024;
1417
1418 /* OK, so power of two buckets was too wasteful of memory.
1419 * Give 3 other sizes between each power of two, to hopefully
1420 * cover things accurately enough. (The alternative is
1421 * probably to just go for exact matching of sizes, and assume
1422 * that for things like composited window resize the tiled
1423 * width/height alignment and rounding of sizes to pages will
1424 * get us useful cache hit rates anyway)
1425 */
1426 add_bucket(bufmgr, PAGE_SIZE);
1427 add_bucket(bufmgr, PAGE_SIZE * 2);
1428 add_bucket(bufmgr, PAGE_SIZE * 3);
1429
1430 /* Initialize the linked lists for BO reuse cache. */
1431 for (size = 4 * PAGE_SIZE; size <= cache_max_size; size *= 2) {
1432 add_bucket(bufmgr, size);
1433
1434 add_bucket(bufmgr, size + size * 1 / 4);
1435 add_bucket(bufmgr, size + size * 2 / 4);
1436 add_bucket(bufmgr, size + size * 3 / 4);
1437 }
1438 }
1439
1440 uint32_t
1441 iris_create_hw_context(struct iris_bufmgr *bufmgr)
1442 {
1443 struct drm_i915_gem_context_create create = { };
1444 int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
1445 if (ret != 0) {
1446 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n", strerror(errno));
1447 return 0;
1448 }
1449
1450 return create.ctx_id;
1451 }
1452
1453 int
1454 iris_hw_context_set_priority(struct iris_bufmgr *bufmgr,
1455 uint32_t ctx_id,
1456 int priority)
1457 {
1458 struct drm_i915_gem_context_param p = {
1459 .ctx_id = ctx_id,
1460 .param = I915_CONTEXT_PARAM_PRIORITY,
1461 .value = priority,
1462 };
1463 int err;
1464
1465 err = 0;
1466 if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p))
1467 err = -errno;
1468
1469 return err;
1470 }
1471
1472 void
1473 iris_destroy_hw_context(struct iris_bufmgr *bufmgr, uint32_t ctx_id)
1474 {
1475 struct drm_i915_gem_context_destroy d = { .ctx_id = ctx_id };
1476
1477 if (ctx_id != 0 &&
1478 drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &d) != 0) {
1479 fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
1480 strerror(errno));
1481 }
1482 }
1483
1484 int
1485 iris_reg_read(struct iris_bufmgr *bufmgr, uint32_t offset, uint64_t *result)
1486 {
1487 struct drm_i915_reg_read reg_read = { .offset = offset };
1488 int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_REG_READ, &reg_read);
1489
1490 *result = reg_read.val;
1491 return ret;
1492 }
1493
1494 /**
1495 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
1496 * and manage map buffer objections.
1497 *
1498 * \param fd File descriptor of the opened DRM device.
1499 */
1500 struct iris_bufmgr *
1501 iris_bufmgr_init(struct gen_device_info *devinfo, int fd)
1502 {
1503 struct iris_bufmgr *bufmgr = calloc(1, sizeof(*bufmgr));
1504 if (bufmgr == NULL)
1505 return NULL;
1506
1507 /* Handles to buffer objects belong to the device fd and are not
1508 * reference counted by the kernel. If the same fd is used by
1509 * multiple parties (threads sharing the same screen bufmgr, or
1510 * even worse the same device fd passed to multiple libraries)
1511 * ownership of those handles is shared by those independent parties.
1512 *
1513 * Don't do this! Ensure that each library/bufmgr has its own device
1514 * fd so that its namespace does not clash with another.
1515 */
1516 bufmgr->fd = fd;
1517
1518 if (mtx_init(&bufmgr->lock, mtx_plain) != 0) {
1519 free(bufmgr);
1520 return NULL;
1521 }
1522
1523 bufmgr->has_llc = devinfo->has_llc;
1524
1525 const uint64_t _4GB = 1ull << 32;
1526
1527 util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_SHADER],
1528 PAGE_SIZE, _4GB);
1529 util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_SURFACE],
1530 1 * _4GB, _4GB);
1531 util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_DYNAMIC],
1532 2 * _4GB, _4GB);
1533 util_vma_heap_init(&bufmgr->vma_allocator[IRIS_MEMZONE_OTHER],
1534 3 * _4GB, (1ull << 48) - 3 * _4GB);
1535
1536 // XXX: driconf
1537 bufmgr->bo_reuse = env_var_as_boolean("bo_reuse", true);
1538
1539 init_cache_buckets(bufmgr);
1540
1541 bufmgr->name_table =
1542 _mesa_hash_table_create(NULL, key_hash_uint, key_uint_equal);
1543 bufmgr->handle_table =
1544 _mesa_hash_table_create(NULL, key_hash_uint, key_uint_equal);
1545
1546 return bufmgr;
1547 }