i965: replace all dup() with os_dupfd_cloexec()
[mesa.git] / src / mesa / drivers / dri / i965 / brw_bufmgr.c
1 /*
2 * Copyright © 2007 Red Hat Inc.
3 * Copyright © 2007-2017 Intel Corporation
4 * Copyright © 2006 VMware, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * IN THE SOFTWARE.
25 */
26
27 /*
28 * Authors: Thomas Hellström <thellstrom@vmware.com>
29 * Keith Whitwell <keithw@vmware.com>
30 * Eric Anholt <eric@anholt.net>
31 * Dave Airlie <airlied@linux.ie>
32 */
33
34 #include <xf86drm.h>
35 #include <util/u_atomic.h>
36 #include <fcntl.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <unistd.h>
41 #include <assert.h>
42 #include <sys/ioctl.h>
43 #include <sys/stat.h>
44 #include <sys/types.h>
45 #include <stdbool.h>
46
47 #include "errno.h"
48 #include "common/gen_clflush.h"
49 #include "dev/gen_debug.h"
50 #include "common/gen_gem.h"
51 #include "dev/gen_device_info.h"
52 #include "libdrm_macros.h"
53 #include "main/macros.h"
54 #include "util/macros.h"
55 #include "util/hash_table.h"
56 #include "util/list.h"
57 #include "util/os_file.h"
58 #include "util/u_dynarray.h"
59 #include "util/vma.h"
60 #include "brw_bufmgr.h"
61 #include "brw_context.h"
62 #include "string.h"
63
64 #include "drm-uapi/i915_drm.h"
65
66 #ifdef HAVE_VALGRIND
67 #include <valgrind.h>
68 #include <memcheck.h>
69 #define VG(x) x
70 #else
71 #define VG(x)
72 #endif
73
74 /* Bufmgr is not aware of brw_context. */
75 #undef WARN_ONCE
76 #define WARN_ONCE(cond, fmt...) do { \
77 if (unlikely(cond)) { \
78 static bool _warned = false; \
79 if (!_warned) { \
80 fprintf(stderr, "WARNING: "); \
81 fprintf(stderr, fmt); \
82 _warned = true; \
83 } \
84 } \
85 } while (0)
86
87
88 /* VALGRIND_FREELIKE_BLOCK unfortunately does not actually undo the earlier
89 * VALGRIND_MALLOCLIKE_BLOCK but instead leaves vg convinced the memory is
90 * leaked. All because it does not call VG(cli_free) from its
91 * VG_USERREQ__FREELIKE_BLOCK handler. Instead of treating the memory like
92 * and allocation, we mark it available for use upon mmapping and remove
93 * it upon unmapping.
94 */
95 #define VG_DEFINED(ptr, size) VG(VALGRIND_MAKE_MEM_DEFINED(ptr, size))
96 #define VG_NOACCESS(ptr, size) VG(VALGRIND_MAKE_MEM_NOACCESS(ptr, size))
97
98 #define PAGE_SIZE 4096
99
100 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
101
102 static inline int
103 atomic_add_unless(int *v, int add, int unless)
104 {
105 int c, old;
106 c = p_atomic_read(v);
107 while (c != unless && (old = p_atomic_cmpxchg(v, c, c + add)) != c)
108 c = old;
109 return c == unless;
110 }
111
112 /**
113 * i965 fixed-size bucketing VMA allocator.
114 *
115 * The BO cache maintains "cache buckets" for buffers of various sizes.
116 * All buffers in a given bucket are identically sized - when allocating,
117 * we always round up to the bucket size. This means that virtually all
118 * allocations are fixed-size; only buffers which are too large to fit in
119 * a bucket can be variably-sized.
120 *
121 * We create an allocator for each bucket. Each contains a free-list, where
122 * each node contains a <starting address, 64-bit bitmap> pair. Each bit
123 * represents a bucket-sized block of memory. (At the first level, each
124 * bit corresponds to a page. For the second bucket, bits correspond to
125 * two pages, and so on.) 1 means a block is free, and 0 means it's in-use.
126 * The lowest bit in the bitmap is for the first block.
127 *
128 * This makes allocations cheap - any bit of any node will do. We can pick
129 * the head of the list and use ffs() to find a free block. If there are
130 * none, we allocate 64 blocks from a larger allocator - either a bigger
131 * bucketing allocator, or a fallback top-level allocator for large objects.
132 */
133 struct vma_bucket_node {
134 uint64_t start_address;
135 uint64_t bitmap;
136 };
137
138 struct bo_cache_bucket {
139 /** List of cached BOs. */
140 struct list_head head;
141
142 /** Size of this bucket, in bytes. */
143 uint64_t size;
144
145 /** List of vma_bucket_nodes. */
146 struct util_dynarray vma_list[BRW_MEMZONE_COUNT];
147 };
148
149 struct bo_export {
150 /** File descriptor associated with a handle export. */
151 int drm_fd;
152
153 /** GEM handle in drm_fd */
154 uint32_t gem_handle;
155
156 struct list_head link;
157 };
158
159 struct brw_bufmgr {
160 uint32_t refcount;
161
162 struct list_head link;
163
164 int fd;
165
166 mtx_t lock;
167
168 /** Array of lists of cached gem objects of power-of-two sizes */
169 struct bo_cache_bucket cache_bucket[14 * 4];
170 int num_buckets;
171 time_t time;
172
173 struct hash_table *name_table;
174 struct hash_table *handle_table;
175
176 struct util_vma_heap vma_allocator[BRW_MEMZONE_COUNT];
177
178 bool has_llc:1;
179 bool has_mmap_wc:1;
180 bool has_mmap_offset:1;
181 bool bo_reuse:1;
182
183 uint64_t initial_kflags;
184 };
185
186 static mtx_t global_bufmgr_list_mutex = _MTX_INITIALIZER_NP;
187 static struct list_head global_bufmgr_list = {
188 .next = &global_bufmgr_list,
189 .prev = &global_bufmgr_list,
190 };
191
192 static int bo_set_tiling_internal(struct brw_bo *bo, uint32_t tiling_mode,
193 uint32_t stride);
194
195 static void bo_free(struct brw_bo *bo);
196
197 static uint64_t vma_alloc(struct brw_bufmgr *bufmgr,
198 enum brw_memory_zone memzone,
199 uint64_t size, uint64_t alignment);
200
201 static struct brw_bo *
202 hash_find_bo(struct hash_table *ht, unsigned int key)
203 {
204 struct hash_entry *entry = _mesa_hash_table_search(ht, &key);
205 return entry ? (struct brw_bo *) entry->data : NULL;
206 }
207
208 static uint64_t
209 bo_tile_size(struct brw_bufmgr *bufmgr, uint64_t size, uint32_t tiling)
210 {
211 if (tiling == I915_TILING_NONE)
212 return size;
213
214 /* 965+ just need multiples of page size for tiling */
215 return ALIGN(size, PAGE_SIZE);
216 }
217
218 /*
219 * Round a given pitch up to the minimum required for X tiling on a
220 * given chip. We use 512 as the minimum to allow for a later tiling
221 * change.
222 */
223 static uint32_t
224 bo_tile_pitch(struct brw_bufmgr *bufmgr, uint32_t pitch, uint32_t tiling)
225 {
226 unsigned long tile_width;
227
228 /* If untiled, then just align it so that we can do rendering
229 * to it with the 3D engine.
230 */
231 if (tiling == I915_TILING_NONE)
232 return ALIGN(pitch, 64);
233
234 if (tiling == I915_TILING_X)
235 tile_width = 512;
236 else
237 tile_width = 128;
238
239 /* 965 is flexible */
240 return ALIGN(pitch, tile_width);
241 }
242
243 /**
244 * This function finds the correct bucket fit for the input size.
245 * The function works with O(1) complexity when the requested size
246 * was queried instead of iterating the size through all the buckets.
247 */
248 static struct bo_cache_bucket *
249 bucket_for_size(struct brw_bufmgr *bufmgr, uint64_t size)
250 {
251 /* Calculating the pages and rounding up to the page size. */
252 const unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
253
254 /* Row Bucket sizes clz((x-1) | 3) Row Column
255 * in pages stride size
256 * 0: 1 2 3 4 -> 30 30 30 30 4 1
257 * 1: 5 6 7 8 -> 29 29 29 29 4 1
258 * 2: 10 12 14 16 -> 28 28 28 28 8 2
259 * 3: 20 24 28 32 -> 27 27 27 27 16 4
260 */
261 const unsigned row = 30 - __builtin_clz((pages - 1) | 3);
262 const unsigned row_max_pages = 4 << row;
263
264 /* The '& ~2' is the special case for row 1. In row 1, max pages /
265 * 2 is 2, but the previous row maximum is zero (because there is
266 * no previous row). All row maximum sizes are power of 2, so that
267 * is the only case where that bit will be set.
268 */
269 const unsigned prev_row_max_pages = (row_max_pages / 2) & ~2;
270 int col_size_log2 = row - 1;
271 col_size_log2 += (col_size_log2 < 0);
272
273 const unsigned col = (pages - prev_row_max_pages +
274 ((1 << col_size_log2) - 1)) >> col_size_log2;
275
276 /* Calculating the index based on the row and column. */
277 const unsigned index = (row * 4) + (col - 1);
278
279 return (index < bufmgr->num_buckets) ?
280 &bufmgr->cache_bucket[index] : NULL;
281 }
282
283 static enum brw_memory_zone
284 memzone_for_address(uint64_t address)
285 {
286 const uint64_t _4GB = 1ull << 32;
287
288 if (address >= _4GB)
289 return BRW_MEMZONE_OTHER;
290
291 return BRW_MEMZONE_LOW_4G;
292 }
293
294 static uint64_t
295 bucket_vma_alloc(struct brw_bufmgr *bufmgr,
296 struct bo_cache_bucket *bucket,
297 enum brw_memory_zone memzone)
298 {
299 struct util_dynarray *vma_list = &bucket->vma_list[memzone];
300 struct vma_bucket_node *node;
301
302 if (vma_list->size == 0) {
303 /* This bucket allocator is out of space - allocate a new block of
304 * memory for 64 blocks from a larger allocator (either a larger
305 * bucket or util_vma).
306 *
307 * We align the address to the node size (64 blocks) so that
308 * bucket_vma_free can easily compute the starting address of this
309 * block by rounding any address we return down to the node size.
310 *
311 * Set the first bit used, and return the start address.
312 */
313 uint64_t node_size = 64ull * bucket->size;
314 node = util_dynarray_grow(vma_list, struct vma_bucket_node, 1);
315
316 if (unlikely(!node))
317 return 0ull;
318
319 uint64_t addr = vma_alloc(bufmgr, memzone, node_size, node_size);
320 node->start_address = gen_48b_address(addr);
321 node->bitmap = ~1ull;
322 return node->start_address;
323 }
324
325 /* Pick any bit from any node - they're all the right size and free. */
326 node = util_dynarray_top_ptr(vma_list, struct vma_bucket_node);
327 int bit = ffsll(node->bitmap) - 1;
328 assert(bit >= 0 && bit <= 63);
329
330 /* Reserve the memory by clearing the bit. */
331 assert((node->bitmap & (1ull << bit)) != 0ull);
332 node->bitmap &= ~(1ull << bit);
333
334 uint64_t addr = node->start_address + bit * bucket->size;
335
336 /* If this node is now completely full, remove it from the free list. */
337 if (node->bitmap == 0ull) {
338 (void) util_dynarray_pop(vma_list, struct vma_bucket_node);
339 }
340
341 return addr;
342 }
343
344 static void
345 bucket_vma_free(struct bo_cache_bucket *bucket, uint64_t address)
346 {
347 enum brw_memory_zone memzone = memzone_for_address(address);
348 struct util_dynarray *vma_list = &bucket->vma_list[memzone];
349 const uint64_t node_bytes = 64ull * bucket->size;
350 struct vma_bucket_node *node = NULL;
351
352 /* bucket_vma_alloc allocates 64 blocks at a time, and aligns it to
353 * that 64 block size. So, we can round down to get the starting address.
354 */
355 uint64_t start = (address / node_bytes) * node_bytes;
356
357 /* Dividing the offset from start by bucket size gives us the bit index. */
358 int bit = (address - start) / bucket->size;
359
360 assert(start + bit * bucket->size == address);
361
362 util_dynarray_foreach(vma_list, struct vma_bucket_node, cur) {
363 if (cur->start_address == start) {
364 node = cur;
365 break;
366 }
367 }
368
369 if (!node) {
370 /* No node - the whole group of 64 blocks must have been in-use. */
371 node = util_dynarray_grow(vma_list, struct vma_bucket_node, 1);
372
373 if (unlikely(!node))
374 return; /* bogus, leaks some GPU VMA, but nothing we can do... */
375
376 node->start_address = start;
377 node->bitmap = 0ull;
378 }
379
380 /* Set the bit to return the memory. */
381 assert((node->bitmap & (1ull << bit)) == 0ull);
382 node->bitmap |= 1ull << bit;
383
384 /* The block might be entirely free now, and if so, we could return it
385 * to the larger allocator. But we may as well hang on to it, in case
386 * we get more allocations at this block size.
387 */
388 }
389
390 static struct bo_cache_bucket *
391 get_bucket_allocator(struct brw_bufmgr *bufmgr, uint64_t size)
392 {
393 /* Skip using the bucket allocator for very large sizes, as it allocates
394 * 64 of them and this can balloon rather quickly.
395 */
396 if (size > 1024 * PAGE_SIZE)
397 return NULL;
398
399 struct bo_cache_bucket *bucket = bucket_for_size(bufmgr, size);
400
401 if (bucket && bucket->size == size)
402 return bucket;
403
404 return NULL;
405 }
406
407 /**
408 * Allocate a section of virtual memory for a buffer, assigning an address.
409 *
410 * This uses either the bucket allocator for the given size, or the large
411 * object allocator (util_vma).
412 */
413 static uint64_t
414 vma_alloc(struct brw_bufmgr *bufmgr,
415 enum brw_memory_zone memzone,
416 uint64_t size,
417 uint64_t alignment)
418 {
419 /* Without softpin support, we let the kernel assign addresses. */
420 assert(brw_using_softpin(bufmgr));
421
422 alignment = ALIGN(alignment, PAGE_SIZE);
423
424 struct bo_cache_bucket *bucket = get_bucket_allocator(bufmgr, size);
425 uint64_t addr;
426
427 if (bucket) {
428 addr = bucket_vma_alloc(bufmgr, bucket, memzone);
429 } else {
430 addr = util_vma_heap_alloc(&bufmgr->vma_allocator[memzone], size,
431 alignment);
432 }
433
434 assert((addr >> 48ull) == 0);
435 assert((addr % alignment) == 0);
436
437 return gen_canonical_address(addr);
438 }
439
440 /**
441 * Free a virtual memory area, allowing the address to be reused.
442 */
443 static void
444 vma_free(struct brw_bufmgr *bufmgr,
445 uint64_t address,
446 uint64_t size)
447 {
448 assert(brw_using_softpin(bufmgr));
449
450 /* Un-canonicalize the address. */
451 address = gen_48b_address(address);
452
453 if (address == 0ull)
454 return;
455
456 struct bo_cache_bucket *bucket = get_bucket_allocator(bufmgr, size);
457
458 if (bucket) {
459 bucket_vma_free(bucket, address);
460 } else {
461 enum brw_memory_zone memzone = memzone_for_address(address);
462 util_vma_heap_free(&bufmgr->vma_allocator[memzone], address, size);
463 }
464 }
465
466 int
467 brw_bo_busy(struct brw_bo *bo)
468 {
469 struct brw_bufmgr *bufmgr = bo->bufmgr;
470 struct drm_i915_gem_busy busy = { .handle = bo->gem_handle };
471
472 int ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
473 if (ret == 0) {
474 bo->idle = !busy.busy;
475 return busy.busy;
476 }
477 return false;
478 }
479
480 int
481 brw_bo_madvise(struct brw_bo *bo, int state)
482 {
483 struct drm_i915_gem_madvise madv = {
484 .handle = bo->gem_handle,
485 .madv = state,
486 .retained = 1,
487 };
488
489 drmIoctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
490
491 return madv.retained;
492 }
493
494 /* drop the oldest entries that have been purged by the kernel */
495 static void
496 brw_bo_cache_purge_bucket(struct brw_bufmgr *bufmgr,
497 struct bo_cache_bucket *bucket)
498 {
499 list_for_each_entry_safe(struct brw_bo, bo, &bucket->head, head) {
500 if (brw_bo_madvise(bo, I915_MADV_DONTNEED))
501 break;
502
503 list_del(&bo->head);
504 bo_free(bo);
505 }
506 }
507
508 static struct brw_bo *
509 bo_calloc(void)
510 {
511 struct brw_bo *bo = calloc(1, sizeof(*bo));
512 if (!bo)
513 return NULL;
514
515 list_inithead(&bo->exports);
516
517 return bo;
518 }
519
520 static struct brw_bo *
521 bo_alloc_internal(struct brw_bufmgr *bufmgr,
522 const char *name,
523 uint64_t size,
524 enum brw_memory_zone memzone,
525 unsigned flags,
526 uint32_t tiling_mode,
527 uint32_t stride)
528 {
529 struct brw_bo *bo;
530 int ret;
531 struct bo_cache_bucket *bucket;
532 bool alloc_from_cache;
533 uint64_t bo_size;
534 bool busy = false;
535 bool zeroed = false;
536
537 if (flags & BO_ALLOC_BUSY)
538 busy = true;
539
540 if (flags & BO_ALLOC_ZEROED)
541 zeroed = true;
542
543 /* BUSY does doesn't really jive with ZEROED as we have to wait for it to
544 * be idle before we can memset. Just disallow that combination.
545 */
546 assert(!(busy && zeroed));
547
548 /* Round the allocated size up to a power of two number of pages. */
549 bucket = bucket_for_size(bufmgr, size);
550
551 /* If we don't have caching at this size, don't actually round the
552 * allocation up.
553 */
554 if (bucket == NULL) {
555 unsigned int page_size = getpagesize();
556 bo_size = size == 0 ? page_size : ALIGN(size, page_size);
557 } else {
558 bo_size = bucket->size;
559 }
560 assert(bo_size);
561
562 mtx_lock(&bufmgr->lock);
563 /* Get a buffer out of the cache if available */
564 retry:
565 alloc_from_cache = false;
566 if (bucket != NULL && !list_is_empty(&bucket->head)) {
567 if (busy && !zeroed) {
568 /* Allocate new render-target BOs from the tail (MRU)
569 * of the list, as it will likely be hot in the GPU
570 * cache and in the aperture for us. If the caller
571 * asked us to zero the buffer, we don't want this
572 * because we are going to mmap it.
573 */
574 bo = LIST_ENTRY(struct brw_bo, bucket->head.prev, head);
575 list_del(&bo->head);
576 alloc_from_cache = true;
577 } else {
578 /* For non-render-target BOs (where we're probably
579 * going to map it first thing in order to fill it
580 * with data), check if the last BO in the cache is
581 * unbusy, and only reuse in that case. Otherwise,
582 * allocating a new buffer is probably faster than
583 * waiting for the GPU to finish.
584 */
585 bo = LIST_ENTRY(struct brw_bo, bucket->head.next, head);
586 if (!brw_bo_busy(bo)) {
587 alloc_from_cache = true;
588 list_del(&bo->head);
589 }
590 }
591
592 if (alloc_from_cache) {
593 assert(list_is_empty(&bo->exports));
594 if (!brw_bo_madvise(bo, I915_MADV_WILLNEED)) {
595 bo_free(bo);
596 brw_bo_cache_purge_bucket(bufmgr, bucket);
597 goto retry;
598 }
599
600 if (bo_set_tiling_internal(bo, tiling_mode, stride)) {
601 bo_free(bo);
602 goto retry;
603 }
604
605 if (zeroed) {
606 void *map = brw_bo_map(NULL, bo, MAP_WRITE | MAP_RAW);
607 if (!map) {
608 bo_free(bo);
609 goto retry;
610 }
611 memset(map, 0, bo_size);
612 }
613 }
614 }
615
616 if (alloc_from_cache) {
617 /* If the cache BO isn't in the right memory zone, free the old
618 * memory and assign it a new address.
619 */
620 if ((bo->kflags & EXEC_OBJECT_PINNED) &&
621 memzone != memzone_for_address(bo->gtt_offset)) {
622 vma_free(bufmgr, bo->gtt_offset, bo->size);
623 bo->gtt_offset = 0ull;
624 }
625 } else {
626 bo = bo_calloc();
627 if (!bo)
628 goto err;
629
630 bo->size = bo_size;
631 bo->idle = true;
632
633 struct drm_i915_gem_create create = { .size = bo_size };
634
635 /* All new BOs we get from the kernel are zeroed, so we don't need to
636 * worry about that here.
637 */
638 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
639 if (ret != 0) {
640 free(bo);
641 goto err;
642 }
643
644 bo->gem_handle = create.handle;
645
646 bo->bufmgr = bufmgr;
647
648 bo->tiling_mode = I915_TILING_NONE;
649 bo->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
650 bo->stride = 0;
651
652 if (bo_set_tiling_internal(bo, tiling_mode, stride))
653 goto err_free;
654
655 /* Calling set_domain() will allocate pages for the BO outside of the
656 * struct mutex lock in the kernel, which is more efficient than waiting
657 * to create them during the first execbuf that uses the BO.
658 */
659 struct drm_i915_gem_set_domain sd = {
660 .handle = bo->gem_handle,
661 .read_domains = I915_GEM_DOMAIN_CPU,
662 .write_domain = 0,
663 };
664
665 if (drmIoctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd) != 0)
666 goto err_free;
667 }
668
669 bo->name = name;
670 p_atomic_set(&bo->refcount, 1);
671 bo->reusable = true;
672 bo->cache_coherent = bufmgr->has_llc;
673 bo->index = -1;
674 bo->kflags = bufmgr->initial_kflags;
675
676 if ((bo->kflags & EXEC_OBJECT_PINNED) && bo->gtt_offset == 0ull) {
677 bo->gtt_offset = vma_alloc(bufmgr, memzone, bo->size, 1);
678
679 if (bo->gtt_offset == 0ull)
680 goto err_free;
681 }
682
683 mtx_unlock(&bufmgr->lock);
684
685 DBG("bo_create: buf %d (%s) %llub\n", bo->gem_handle, bo->name,
686 (unsigned long long) size);
687
688 return bo;
689
690 err_free:
691 bo_free(bo);
692 err:
693 mtx_unlock(&bufmgr->lock);
694 return NULL;
695 }
696
697 struct brw_bo *
698 brw_bo_alloc(struct brw_bufmgr *bufmgr,
699 const char *name, uint64_t size,
700 enum brw_memory_zone memzone)
701 {
702 return bo_alloc_internal(bufmgr, name, size, memzone,
703 0, I915_TILING_NONE, 0);
704 }
705
706 struct brw_bo *
707 brw_bo_alloc_tiled(struct brw_bufmgr *bufmgr, const char *name,
708 uint64_t size, enum brw_memory_zone memzone,
709 uint32_t tiling_mode, uint32_t pitch,
710 unsigned flags)
711 {
712 return bo_alloc_internal(bufmgr, name, size, memzone,
713 flags, tiling_mode, pitch);
714 }
715
716 struct brw_bo *
717 brw_bo_alloc_tiled_2d(struct brw_bufmgr *bufmgr, const char *name,
718 int x, int y, int cpp, enum brw_memory_zone memzone,
719 uint32_t tiling, uint32_t *pitch, unsigned flags)
720 {
721 uint64_t size;
722 uint32_t stride;
723 unsigned long aligned_y, height_alignment;
724
725 /* If we're tiled, our allocations are in 8 or 32-row blocks,
726 * so failure to align our height means that we won't allocate
727 * enough pages.
728 *
729 * If we're untiled, we still have to align to 2 rows high
730 * because the data port accesses 2x2 blocks even if the
731 * bottom row isn't to be rendered, so failure to align means
732 * we could walk off the end of the GTT and fault. This is
733 * documented on 965, and may be the case on older chipsets
734 * too so we try to be careful.
735 */
736 aligned_y = y;
737 height_alignment = 2;
738
739 if (tiling == I915_TILING_X)
740 height_alignment = 8;
741 else if (tiling == I915_TILING_Y)
742 height_alignment = 32;
743 aligned_y = ALIGN(y, height_alignment);
744
745 stride = x * cpp;
746 stride = bo_tile_pitch(bufmgr, stride, tiling);
747 size = stride * aligned_y;
748 size = bo_tile_size(bufmgr, size, tiling);
749 *pitch = stride;
750
751 if (tiling == I915_TILING_NONE)
752 stride = 0;
753
754 return bo_alloc_internal(bufmgr, name, size, memzone,
755 flags, tiling, stride);
756 }
757
758 /**
759 * Returns a brw_bo wrapping the given buffer object handle.
760 *
761 * This can be used when one application needs to pass a buffer object
762 * to another.
763 */
764 struct brw_bo *
765 brw_bo_gem_create_from_name(struct brw_bufmgr *bufmgr,
766 const char *name, unsigned int handle)
767 {
768 struct brw_bo *bo;
769
770 /* At the moment most applications only have a few named bo.
771 * For instance, in a DRI client only the render buffers passed
772 * between X and the client are named. And since X returns the
773 * alternating names for the front/back buffer a linear search
774 * provides a sufficiently fast match.
775 */
776 mtx_lock(&bufmgr->lock);
777 bo = hash_find_bo(bufmgr->name_table, handle);
778 if (bo) {
779 brw_bo_reference(bo);
780 goto out;
781 }
782
783 struct drm_gem_open open_arg = { .name = handle };
784 int ret = drmIoctl(bufmgr->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
785 if (ret != 0) {
786 DBG("Couldn't reference %s handle 0x%08x: %s\n",
787 name, handle, strerror(errno));
788 bo = NULL;
789 goto out;
790 }
791 /* Now see if someone has used a prime handle to get this
792 * object from the kernel before by looking through the list
793 * again for a matching gem_handle
794 */
795 bo = hash_find_bo(bufmgr->handle_table, open_arg.handle);
796 if (bo) {
797 assert(list_is_empty(&bo->exports));
798 brw_bo_reference(bo);
799 goto out;
800 }
801
802 bo = bo_calloc();
803 if (!bo)
804 goto out;
805
806 p_atomic_set(&bo->refcount, 1);
807
808 bo->size = open_arg.size;
809 bo->gtt_offset = 0;
810 bo->bufmgr = bufmgr;
811 bo->gem_handle = open_arg.handle;
812 bo->name = name;
813 bo->global_name = handle;
814 bo->reusable = false;
815 bo->external = true;
816 bo->kflags = bufmgr->initial_kflags;
817
818 if (bo->kflags & EXEC_OBJECT_PINNED)
819 bo->gtt_offset = vma_alloc(bufmgr, BRW_MEMZONE_OTHER, bo->size, 1);
820
821 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
822 _mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
823
824 struct drm_i915_gem_get_tiling get_tiling = { .handle = bo->gem_handle };
825 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
826 if (ret != 0)
827 goto err_unref;
828
829 bo->tiling_mode = get_tiling.tiling_mode;
830 bo->swizzle_mode = get_tiling.swizzle_mode;
831 /* XXX stride is unknown */
832 DBG("bo_create_from_handle: %d (%s)\n", handle, bo->name);
833
834 out:
835 mtx_unlock(&bufmgr->lock);
836 return bo;
837
838 err_unref:
839 bo_free(bo);
840 mtx_unlock(&bufmgr->lock);
841 return NULL;
842 }
843
844 static void
845 bo_free(struct brw_bo *bo)
846 {
847 struct brw_bufmgr *bufmgr = bo->bufmgr;
848
849 if (bo->map_cpu) {
850 VG_NOACCESS(bo->map_cpu, bo->size);
851 drm_munmap(bo->map_cpu, bo->size);
852 }
853 if (bo->map_wc) {
854 VG_NOACCESS(bo->map_wc, bo->size);
855 drm_munmap(bo->map_wc, bo->size);
856 }
857 if (bo->map_gtt) {
858 VG_NOACCESS(bo->map_gtt, bo->size);
859 drm_munmap(bo->map_gtt, bo->size);
860 }
861
862 if (bo->external) {
863 struct hash_entry *entry;
864
865 if (bo->global_name) {
866 entry = _mesa_hash_table_search(bufmgr->name_table, &bo->global_name);
867 _mesa_hash_table_remove(bufmgr->name_table, entry);
868 }
869
870 entry = _mesa_hash_table_search(bufmgr->handle_table, &bo->gem_handle);
871 _mesa_hash_table_remove(bufmgr->handle_table, entry);
872 } else {
873 assert(list_is_empty(&bo->exports));
874 }
875
876 /* Close this object */
877 struct drm_gem_close close = { .handle = bo->gem_handle };
878 int ret = drmIoctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &close);
879 if (ret != 0) {
880 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
881 bo->gem_handle, bo->name, strerror(errno));
882 }
883
884 if (bo->kflags & EXEC_OBJECT_PINNED)
885 vma_free(bo->bufmgr, bo->gtt_offset, bo->size);
886
887 free(bo);
888 }
889
890 /** Frees all cached buffers significantly older than @time. */
891 static void
892 cleanup_bo_cache(struct brw_bufmgr *bufmgr, time_t time)
893 {
894 int i;
895
896 if (bufmgr->time == time)
897 return;
898
899 for (i = 0; i < bufmgr->num_buckets; i++) {
900 struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
901
902 list_for_each_entry_safe(struct brw_bo, bo, &bucket->head, head) {
903 if (time - bo->free_time <= 1)
904 break;
905
906 list_del(&bo->head);
907
908 bo_free(bo);
909 }
910 }
911
912 bufmgr->time = time;
913 }
914
915 static void
916 bo_unreference_final(struct brw_bo *bo, time_t time)
917 {
918 struct brw_bufmgr *bufmgr = bo->bufmgr;
919 struct bo_cache_bucket *bucket;
920
921 DBG("bo_unreference final: %d (%s)\n", bo->gem_handle, bo->name);
922
923 list_for_each_entry_safe(struct bo_export, export, &bo->exports, link) {
924 struct drm_gem_close close = { .handle = export->gem_handle };
925 gen_ioctl(export->drm_fd, DRM_IOCTL_GEM_CLOSE, &close);
926
927 list_del(&export->link);
928 free(export);
929 }
930
931 bucket = bucket_for_size(bufmgr, bo->size);
932 /* Put the buffer into our internal cache for reuse if we can. */
933 if (bufmgr->bo_reuse && bo->reusable && bucket != NULL &&
934 brw_bo_madvise(bo, I915_MADV_DONTNEED)) {
935 bo->free_time = time;
936
937 bo->name = NULL;
938
939 list_addtail(&bo->head, &bucket->head);
940 } else {
941 bo_free(bo);
942 }
943 }
944
945 void
946 brw_bo_unreference(struct brw_bo *bo)
947 {
948 if (bo == NULL)
949 return;
950
951 assert(p_atomic_read(&bo->refcount) > 0);
952
953 if (atomic_add_unless(&bo->refcount, -1, 1)) {
954 struct brw_bufmgr *bufmgr = bo->bufmgr;
955 struct timespec time;
956
957 clock_gettime(CLOCK_MONOTONIC, &time);
958
959 mtx_lock(&bufmgr->lock);
960
961 if (p_atomic_dec_zero(&bo->refcount)) {
962 bo_unreference_final(bo, time.tv_sec);
963 cleanup_bo_cache(bufmgr, time.tv_sec);
964 }
965
966 mtx_unlock(&bufmgr->lock);
967 }
968 }
969
970 static void
971 bo_wait_with_stall_warning(struct brw_context *brw,
972 struct brw_bo *bo,
973 const char *action)
974 {
975 bool busy = brw && brw->perf_debug && !bo->idle;
976 double elapsed = unlikely(busy) ? -get_time() : 0.0;
977
978 brw_bo_wait_rendering(bo);
979
980 if (unlikely(busy)) {
981 elapsed += get_time();
982 if (elapsed > 1e-5) /* 0.01ms */
983 perf_debug("%s a busy \"%s\" BO stalled and took %.03f ms.\n",
984 action, bo->name, elapsed * 1000);
985 }
986 }
987
988 static void
989 print_flags(unsigned flags)
990 {
991 if (flags & MAP_READ)
992 DBG("READ ");
993 if (flags & MAP_WRITE)
994 DBG("WRITE ");
995 if (flags & MAP_ASYNC)
996 DBG("ASYNC ");
997 if (flags & MAP_PERSISTENT)
998 DBG("PERSISTENT ");
999 if (flags & MAP_COHERENT)
1000 DBG("COHERENT ");
1001 if (flags & MAP_RAW)
1002 DBG("RAW ");
1003 DBG("\n");
1004 }
1005
1006 static void *
1007 brw_bo_gem_mmap_legacy(struct brw_context *brw, struct brw_bo *bo, bool wc)
1008 {
1009 struct brw_bufmgr *bufmgr = bo->bufmgr;
1010
1011 struct drm_i915_gem_mmap mmap_arg = {
1012 .handle = bo->gem_handle,
1013 .size = bo->size,
1014 .flags = wc ? I915_MMAP_WC : 0,
1015 };
1016
1017 int ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
1018 if (ret != 0) {
1019 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1020 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
1021 return NULL;
1022 }
1023 void *map = (void *) (uintptr_t) mmap_arg.addr_ptr;
1024
1025 return map;
1026 }
1027
1028 static void *
1029 brw_bo_gem_mmap_offset(struct brw_context *brw, struct brw_bo *bo, bool wc)
1030 {
1031 struct brw_bufmgr *bufmgr = bo->bufmgr;
1032
1033 struct drm_i915_gem_mmap_offset mmap_arg = {
1034 .handle = bo->gem_handle,
1035 .flags = wc ? I915_MMAP_OFFSET_WC : I915_MMAP_OFFSET_WB,
1036 };
1037
1038 /* Get the fake offset back */
1039 int ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_OFFSET, &mmap_arg);
1040 if (ret != 0) {
1041 DBG("%s:%d: Error preparing buffer %d (%s): %s .\n",
1042 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
1043 return NULL;
1044 }
1045
1046 /* And map it */
1047 void *map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
1048 bufmgr->fd, mmap_arg.offset);
1049 if (map == MAP_FAILED) {
1050 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1051 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
1052 return NULL;
1053 }
1054
1055 return map;
1056 }
1057
1058 static void *
1059 brw_bo_gem_mmap(struct brw_context *brw, struct brw_bo *bo, bool wc)
1060 {
1061 struct brw_bufmgr *bufmgr = bo->bufmgr;
1062
1063 if (bufmgr->has_mmap_offset)
1064 return brw_bo_gem_mmap_offset(brw, bo, wc);
1065 else
1066 return brw_bo_gem_mmap_legacy(brw, bo, wc);
1067 }
1068
1069 static void *
1070 brw_bo_map_cpu(struct brw_context *brw, struct brw_bo *bo, unsigned flags)
1071 {
1072 /* We disallow CPU maps for writing to non-coherent buffers, as the
1073 * CPU map can become invalidated when a batch is flushed out, which
1074 * can happen at unpredictable times. You should use WC maps instead.
1075 */
1076 assert(bo->cache_coherent || !(flags & MAP_WRITE));
1077
1078 if (!bo->map_cpu) {
1079 DBG("brw_bo_map_cpu: %d (%s)\n", bo->gem_handle, bo->name);
1080
1081 void *map = brw_bo_gem_mmap(brw, bo, false);
1082 VG_DEFINED(map, bo->size);
1083
1084 if (p_atomic_cmpxchg(&bo->map_cpu, NULL, map)) {
1085 VG_NOACCESS(map, bo->size);
1086 drm_munmap(map, bo->size);
1087 }
1088 }
1089 assert(bo->map_cpu);
1090
1091 DBG("brw_bo_map_cpu: %d (%s) -> %p, ", bo->gem_handle, bo->name,
1092 bo->map_cpu);
1093 print_flags(flags);
1094
1095 if (!(flags & MAP_ASYNC)) {
1096 bo_wait_with_stall_warning(brw, bo, "CPU mapping");
1097 }
1098
1099 if (!bo->cache_coherent && !bo->bufmgr->has_llc) {
1100 /* If we're reusing an existing CPU mapping, the CPU caches may
1101 * contain stale data from the last time we read from that mapping.
1102 * (With the BO cache, it might even be data from a previous buffer!)
1103 * Even if it's a brand new mapping, the kernel may have zeroed the
1104 * buffer via CPU writes.
1105 *
1106 * We need to invalidate those cachelines so that we see the latest
1107 * contents, and so long as we only read from the CPU mmap we do not
1108 * need to write those cachelines back afterwards.
1109 *
1110 * On LLC, the emprical evidence suggests that writes from the GPU
1111 * that bypass the LLC (i.e. for scanout) do *invalidate* the CPU
1112 * cachelines. (Other reads, such as the display engine, bypass the
1113 * LLC entirely requiring us to keep dirty pixels for the scanout
1114 * out of any cache.)
1115 */
1116 gen_invalidate_range(bo->map_cpu, bo->size);
1117 }
1118
1119 return bo->map_cpu;
1120 }
1121
1122 static void *
1123 brw_bo_map_wc(struct brw_context *brw, struct brw_bo *bo, unsigned flags)
1124 {
1125 struct brw_bufmgr *bufmgr = bo->bufmgr;
1126
1127 if (!bufmgr->has_mmap_wc)
1128 return NULL;
1129
1130 if (!bo->map_wc) {
1131 DBG("brw_bo_map_wc: %d (%s)\n", bo->gem_handle, bo->name);
1132 void *map = brw_bo_gem_mmap(brw, bo, true);
1133 VG_DEFINED(map, bo->size);
1134
1135 if (p_atomic_cmpxchg(&bo->map_wc, NULL, map)) {
1136 VG_NOACCESS(map, bo->size);
1137 drm_munmap(map, bo->size);
1138 }
1139 }
1140 assert(bo->map_wc);
1141
1142 DBG("brw_bo_map_wc: %d (%s) -> %p\n", bo->gem_handle, bo->name, bo->map_wc);
1143 print_flags(flags);
1144
1145 if (!(flags & MAP_ASYNC)) {
1146 bo_wait_with_stall_warning(brw, bo, "WC mapping");
1147 }
1148
1149 return bo->map_wc;
1150 }
1151
1152 /**
1153 * Perform an uncached mapping via the GTT.
1154 *
1155 * Write access through the GTT is not quite fully coherent. On low power
1156 * systems especially, like modern Atoms, we can observe reads from RAM before
1157 * the write via GTT has landed. A write memory barrier that flushes the Write
1158 * Combining Buffer (i.e. sfence/mfence) is not sufficient to order the later
1159 * read after the write as the GTT write suffers a small delay through the GTT
1160 * indirection. The kernel uses an uncached mmio read to ensure the GTT write
1161 * is ordered with reads (either by the GPU, WB or WC) and unconditionally
1162 * flushes prior to execbuf submission. However, if we are not informing the
1163 * kernel about our GTT writes, it will not flush before earlier access, such
1164 * as when using the cmdparser. Similarly, we need to be careful if we should
1165 * ever issue a CPU read immediately following a GTT write.
1166 *
1167 * Telling the kernel about write access also has one more important
1168 * side-effect. Upon receiving notification about the write, it cancels any
1169 * scanout buffering for FBC/PSR and friends. Later FBC/PSR is then flushed by
1170 * either SW_FINISH or DIRTYFB. The presumption is that we never write to the
1171 * actual scanout via a mmaping, only to a backbuffer and so all the FBC/PSR
1172 * tracking is handled on the buffer exchange instead.
1173 */
1174 static void *
1175 brw_bo_map_gtt(struct brw_context *brw, struct brw_bo *bo, unsigned flags)
1176 {
1177 struct brw_bufmgr *bufmgr = bo->bufmgr;
1178
1179 /* Get a mapping of the buffer if we haven't before. */
1180 if (bo->map_gtt == NULL) {
1181 DBG("bo_map_gtt: mmap %d (%s)\n", bo->gem_handle, bo->name);
1182
1183 struct drm_i915_gem_mmap_gtt mmap_arg = { .handle = bo->gem_handle };
1184
1185 /* Get the fake offset back... */
1186 int ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);
1187 if (ret != 0) {
1188 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1189 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
1190 return NULL;
1191 }
1192
1193 /* and mmap it. */
1194 void *map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
1195 MAP_SHARED, bufmgr->fd, mmap_arg.offset);
1196 if (map == MAP_FAILED) {
1197 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1198 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
1199 return NULL;
1200 }
1201
1202 /* We don't need to use VALGRIND_MALLOCLIKE_BLOCK because Valgrind will
1203 * already intercept this mmap call. However, for consistency between
1204 * all the mmap paths, we mark the pointer as defined now and mark it
1205 * as inaccessible afterwards.
1206 */
1207 VG_DEFINED(map, bo->size);
1208
1209 if (p_atomic_cmpxchg(&bo->map_gtt, NULL, map)) {
1210 VG_NOACCESS(map, bo->size);
1211 drm_munmap(map, bo->size);
1212 }
1213 }
1214 assert(bo->map_gtt);
1215
1216 DBG("bo_map_gtt: %d (%s) -> %p, ", bo->gem_handle, bo->name, bo->map_gtt);
1217 print_flags(flags);
1218
1219 if (!(flags & MAP_ASYNC)) {
1220 bo_wait_with_stall_warning(brw, bo, "GTT mapping");
1221 }
1222
1223 return bo->map_gtt;
1224 }
1225
1226 static bool
1227 can_map_cpu(struct brw_bo *bo, unsigned flags)
1228 {
1229 if (bo->cache_coherent)
1230 return true;
1231
1232 /* Even if the buffer itself is not cache-coherent (such as a scanout), on
1233 * an LLC platform reads always are coherent (as they are performed via the
1234 * central system agent). It is just the writes that we need to take special
1235 * care to ensure that land in main memory and not stick in the CPU cache.
1236 */
1237 if (!(flags & MAP_WRITE) && bo->bufmgr->has_llc)
1238 return true;
1239
1240 /* If PERSISTENT or COHERENT are set, the mmapping needs to remain valid
1241 * across batch flushes where the kernel will change cache domains of the
1242 * bo, invalidating continued access to the CPU mmap on non-LLC device.
1243 *
1244 * Similarly, ASYNC typically means that the buffer will be accessed via
1245 * both the CPU and the GPU simultaneously. Batches may be executed that
1246 * use the BO even while it is mapped. While OpenGL technically disallows
1247 * most drawing while non-persistent mappings are active, we may still use
1248 * the GPU for blits or other operations, causing batches to happen at
1249 * inconvenient times.
1250 */
1251 if (flags & (MAP_PERSISTENT | MAP_COHERENT | MAP_ASYNC))
1252 return false;
1253
1254 return !(flags & MAP_WRITE);
1255 }
1256
1257 void *
1258 brw_bo_map(struct brw_context *brw, struct brw_bo *bo, unsigned flags)
1259 {
1260 if (bo->tiling_mode != I915_TILING_NONE && !(flags & MAP_RAW))
1261 return brw_bo_map_gtt(brw, bo, flags);
1262
1263 void *map;
1264
1265 if (can_map_cpu(bo, flags))
1266 map = brw_bo_map_cpu(brw, bo, flags);
1267 else
1268 map = brw_bo_map_wc(brw, bo, flags);
1269
1270 /* Allow the attempt to fail by falling back to the GTT where necessary.
1271 *
1272 * Not every buffer can be mmaped directly using the CPU (or WC), for
1273 * example buffers that wrap stolen memory or are imported from other
1274 * devices. For those, we have little choice but to use a GTT mmapping.
1275 * However, if we use a slow GTT mmapping for reads where we expected fast
1276 * access, that order of magnitude difference in throughput will be clearly
1277 * expressed by angry users.
1278 *
1279 * We skip MAP_RAW because we want to avoid map_gtt's fence detiling.
1280 */
1281 if (!map && !(flags & MAP_RAW)) {
1282 if (brw) {
1283 perf_debug("Fallback GTT mapping for %s with access flags %x\n",
1284 bo->name, flags);
1285 }
1286 map = brw_bo_map_gtt(brw, bo, flags);
1287 }
1288
1289 return map;
1290 }
1291
1292 int
1293 brw_bo_subdata(struct brw_bo *bo, uint64_t offset,
1294 uint64_t size, const void *data)
1295 {
1296 struct brw_bufmgr *bufmgr = bo->bufmgr;
1297
1298 struct drm_i915_gem_pwrite pwrite = {
1299 .handle = bo->gem_handle,
1300 .offset = offset,
1301 .size = size,
1302 .data_ptr = (uint64_t) (uintptr_t) data,
1303 };
1304
1305 int ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
1306 if (ret != 0) {
1307 ret = -errno;
1308 DBG("%s:%d: Error writing data to buffer %d: "
1309 "(%"PRIu64" %"PRIu64") %s .\n",
1310 __FILE__, __LINE__, bo->gem_handle, offset, size, strerror(errno));
1311 }
1312
1313 return ret;
1314 }
1315
1316 /** Waits for all GPU rendering with the object to have completed. */
1317 void
1318 brw_bo_wait_rendering(struct brw_bo *bo)
1319 {
1320 /* We require a kernel recent enough for WAIT_IOCTL support.
1321 * See intel_init_bufmgr()
1322 */
1323 brw_bo_wait(bo, -1);
1324 }
1325
1326 /**
1327 * Waits on a BO for the given amount of time.
1328 *
1329 * @bo: buffer object to wait for
1330 * @timeout_ns: amount of time to wait in nanoseconds.
1331 * If value is less than 0, an infinite wait will occur.
1332 *
1333 * Returns 0 if the wait was successful ie. the last batch referencing the
1334 * object has completed within the allotted time. Otherwise some negative return
1335 * value describes the error. Of particular interest is -ETIME when the wait has
1336 * failed to yield the desired result.
1337 *
1338 * Similar to brw_bo_wait_rendering except a timeout parameter allows
1339 * the operation to give up after a certain amount of time. Another subtle
1340 * difference is the internal locking semantics are different (this variant does
1341 * not hold the lock for the duration of the wait). This makes the wait subject
1342 * to a larger userspace race window.
1343 *
1344 * The implementation shall wait until the object is no longer actively
1345 * referenced within a batch buffer at the time of the call. The wait will
1346 * not guarantee that the buffer is re-issued via another thread, or an flinked
1347 * handle. Userspace must make sure this race does not occur if such precision
1348 * is important.
1349 *
1350 * Note that some kernels have broken the inifite wait for negative values
1351 * promise, upgrade to latest stable kernels if this is the case.
1352 */
1353 int
1354 brw_bo_wait(struct brw_bo *bo, int64_t timeout_ns)
1355 {
1356 struct brw_bufmgr *bufmgr = bo->bufmgr;
1357
1358 /* If we know it's idle, don't bother with the kernel round trip */
1359 if (bo->idle && !bo->external)
1360 return 0;
1361
1362 struct drm_i915_gem_wait wait = {
1363 .bo_handle = bo->gem_handle,
1364 .timeout_ns = timeout_ns,
1365 };
1366 int ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
1367 if (ret != 0)
1368 return -errno;
1369
1370 bo->idle = true;
1371
1372 return ret;
1373 }
1374
1375 void
1376 brw_bufmgr_unref(struct brw_bufmgr *bufmgr)
1377 {
1378 mtx_lock(&global_bufmgr_list_mutex);
1379 if (p_atomic_dec_zero(&bufmgr->refcount)) {
1380 list_del(&bufmgr->link);
1381 } else {
1382 bufmgr = NULL;
1383 }
1384 mtx_unlock(&global_bufmgr_list_mutex);
1385
1386 if (!bufmgr)
1387 return;
1388
1389 mtx_destroy(&bufmgr->lock);
1390
1391 /* Free any cached buffer objects we were going to reuse */
1392 for (int i = 0; i < bufmgr->num_buckets; i++) {
1393 struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
1394
1395 list_for_each_entry_safe(struct brw_bo, bo, &bucket->head, head) {
1396 list_del(&bo->head);
1397
1398 bo_free(bo);
1399 }
1400
1401 if (brw_using_softpin(bufmgr)) {
1402 for (int z = 0; z < BRW_MEMZONE_COUNT; z++) {
1403 util_dynarray_fini(&bucket->vma_list[z]);
1404 }
1405 }
1406 }
1407
1408 _mesa_hash_table_destroy(bufmgr->name_table, NULL);
1409 _mesa_hash_table_destroy(bufmgr->handle_table, NULL);
1410
1411 if (brw_using_softpin(bufmgr)) {
1412 for (int z = 0; z < BRW_MEMZONE_COUNT; z++) {
1413 util_vma_heap_finish(&bufmgr->vma_allocator[z]);
1414 }
1415 }
1416
1417 close(bufmgr->fd);
1418 bufmgr->fd = -1;
1419
1420 free(bufmgr);
1421 }
1422
1423 static int
1424 bo_set_tiling_internal(struct brw_bo *bo, uint32_t tiling_mode,
1425 uint32_t stride)
1426 {
1427 struct brw_bufmgr *bufmgr = bo->bufmgr;
1428 struct drm_i915_gem_set_tiling set_tiling;
1429 int ret;
1430
1431 if (bo->global_name == 0 &&
1432 tiling_mode == bo->tiling_mode && stride == bo->stride)
1433 return 0;
1434
1435 memset(&set_tiling, 0, sizeof(set_tiling));
1436 do {
1437 /* set_tiling is slightly broken and overwrites the
1438 * input on the error path, so we have to open code
1439 * rmIoctl.
1440 */
1441 set_tiling.handle = bo->gem_handle;
1442 set_tiling.tiling_mode = tiling_mode;
1443 set_tiling.stride = stride;
1444
1445 ret = ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
1446 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
1447 if (ret == -1)
1448 return -errno;
1449
1450 bo->tiling_mode = set_tiling.tiling_mode;
1451 bo->swizzle_mode = set_tiling.swizzle_mode;
1452 bo->stride = set_tiling.stride;
1453 return 0;
1454 }
1455
1456 int
1457 brw_bo_get_tiling(struct brw_bo *bo, uint32_t *tiling_mode,
1458 uint32_t *swizzle_mode)
1459 {
1460 *tiling_mode = bo->tiling_mode;
1461 *swizzle_mode = bo->swizzle_mode;
1462 return 0;
1463 }
1464
1465 static struct brw_bo *
1466 brw_bo_gem_create_from_prime_internal(struct brw_bufmgr *bufmgr, int prime_fd,
1467 int tiling_mode, uint32_t stride)
1468 {
1469 uint32_t handle;
1470 struct brw_bo *bo;
1471
1472 mtx_lock(&bufmgr->lock);
1473 int ret = drmPrimeFDToHandle(bufmgr->fd, prime_fd, &handle);
1474 if (ret) {
1475 DBG("create_from_prime: failed to obtain handle from fd: %s\n",
1476 strerror(errno));
1477 mtx_unlock(&bufmgr->lock);
1478 return NULL;
1479 }
1480
1481 /*
1482 * See if the kernel has already returned this buffer to us. Just as
1483 * for named buffers, we must not create two bo's pointing at the same
1484 * kernel object
1485 */
1486 bo = hash_find_bo(bufmgr->handle_table, handle);
1487 if (bo) {
1488 assert(list_is_empty(&bo->exports));
1489 brw_bo_reference(bo);
1490 goto out;
1491 }
1492
1493 bo = bo_calloc();
1494 if (!bo)
1495 goto out;
1496
1497 p_atomic_set(&bo->refcount, 1);
1498
1499 /* Determine size of bo. The fd-to-handle ioctl really should
1500 * return the size, but it doesn't. If we have kernel 3.12 or
1501 * later, we can lseek on the prime fd to get the size. Older
1502 * kernels will just fail, in which case we fall back to the
1503 * provided (estimated or guess size). */
1504 ret = lseek(prime_fd, 0, SEEK_END);
1505 if (ret != -1)
1506 bo->size = ret;
1507
1508 bo->bufmgr = bufmgr;
1509
1510 bo->gem_handle = handle;
1511 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
1512
1513 bo->name = "prime";
1514 bo->reusable = false;
1515 bo->external = true;
1516 bo->kflags = bufmgr->initial_kflags;
1517
1518 if (bo->kflags & EXEC_OBJECT_PINNED) {
1519 assert(bo->size > 0);
1520 bo->gtt_offset = vma_alloc(bufmgr, BRW_MEMZONE_OTHER, bo->size, 1);
1521 }
1522
1523 if (tiling_mode < 0) {
1524 struct drm_i915_gem_get_tiling get_tiling = { .handle = bo->gem_handle };
1525 if (drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling))
1526 goto err;
1527
1528 bo->tiling_mode = get_tiling.tiling_mode;
1529 bo->swizzle_mode = get_tiling.swizzle_mode;
1530 /* XXX stride is unknown */
1531 } else {
1532 bo_set_tiling_internal(bo, tiling_mode, stride);
1533 }
1534
1535 out:
1536 mtx_unlock(&bufmgr->lock);
1537 return bo;
1538
1539 err:
1540 bo_free(bo);
1541 mtx_unlock(&bufmgr->lock);
1542 return NULL;
1543 }
1544
1545 struct brw_bo *
1546 brw_bo_gem_create_from_prime(struct brw_bufmgr *bufmgr, int prime_fd)
1547 {
1548 return brw_bo_gem_create_from_prime_internal(bufmgr, prime_fd, -1, 0);
1549 }
1550
1551 struct brw_bo *
1552 brw_bo_gem_create_from_prime_tiled(struct brw_bufmgr *bufmgr, int prime_fd,
1553 uint32_t tiling_mode, uint32_t stride)
1554 {
1555 assert(tiling_mode == I915_TILING_NONE ||
1556 tiling_mode == I915_TILING_X ||
1557 tiling_mode == I915_TILING_Y);
1558
1559 return brw_bo_gem_create_from_prime_internal(bufmgr, prime_fd,
1560 tiling_mode, stride);
1561 }
1562
1563 static void
1564 brw_bo_make_external(struct brw_bo *bo)
1565 {
1566 struct brw_bufmgr *bufmgr = bo->bufmgr;
1567
1568 if (!bo->external) {
1569 mtx_lock(&bufmgr->lock);
1570 if (!bo->external) {
1571 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
1572 bo->external = true;
1573 }
1574 mtx_unlock(&bufmgr->lock);
1575 }
1576 }
1577
1578 int
1579 brw_bo_gem_export_to_prime(struct brw_bo *bo, int *prime_fd)
1580 {
1581 struct brw_bufmgr *bufmgr = bo->bufmgr;
1582
1583 brw_bo_make_external(bo);
1584
1585 if (drmPrimeHandleToFD(bufmgr->fd, bo->gem_handle,
1586 DRM_CLOEXEC, prime_fd) != 0)
1587 return -errno;
1588
1589 bo->reusable = false;
1590
1591 return 0;
1592 }
1593
1594 uint32_t
1595 brw_bo_export_gem_handle(struct brw_bo *bo)
1596 {
1597 brw_bo_make_external(bo);
1598
1599 return bo->gem_handle;
1600 }
1601
1602 int
1603 brw_bo_flink(struct brw_bo *bo, uint32_t *name)
1604 {
1605 struct brw_bufmgr *bufmgr = bo->bufmgr;
1606
1607 if (!bo->global_name) {
1608 struct drm_gem_flink flink = { .handle = bo->gem_handle };
1609
1610 if (drmIoctl(bufmgr->fd, DRM_IOCTL_GEM_FLINK, &flink))
1611 return -errno;
1612
1613 brw_bo_make_external(bo);
1614 mtx_lock(&bufmgr->lock);
1615 if (!bo->global_name) {
1616 bo->global_name = flink.name;
1617 _mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
1618 }
1619 mtx_unlock(&bufmgr->lock);
1620
1621 bo->reusable = false;
1622 }
1623
1624 *name = bo->global_name;
1625 return 0;
1626 }
1627
1628 int
1629 brw_bo_export_gem_handle_for_device(struct brw_bo *bo, int drm_fd,
1630 uint32_t *out_handle)
1631 {
1632 struct brw_bufmgr *bufmgr = bo->bufmgr;
1633
1634 /* Only add the new GEM handle to the list of export if it belongs to a
1635 * different GEM device. Otherwise we might close the same buffer multiple
1636 * times.
1637 */
1638 int ret = os_same_file_description(drm_fd, bufmgr->fd);
1639 WARN_ONCE(ret < 0,
1640 "Kernel has no file descriptor comparison support: %s\n",
1641 strerror(errno));
1642 if (ret == 0) {
1643 *out_handle = brw_bo_export_gem_handle(bo);
1644 return 0;
1645 }
1646
1647 struct bo_export *export = calloc(1, sizeof(*export));
1648 if (!export)
1649 return -ENOMEM;
1650
1651 export->drm_fd = drm_fd;
1652
1653 int dmabuf_fd = -1;
1654 int err = brw_bo_gem_export_to_prime(bo, &dmabuf_fd);
1655 if (err) {
1656 free(export);
1657 return err;
1658 }
1659
1660 mtx_lock(&bufmgr->lock);
1661 err = drmPrimeFDToHandle(drm_fd, dmabuf_fd, &export->gem_handle);
1662 close(dmabuf_fd);
1663 if (err) {
1664 mtx_unlock(&bufmgr->lock);
1665 free(export);
1666 return err;
1667 }
1668
1669 bool found = false;
1670 list_for_each_entry(struct bo_export, iter, &bo->exports, link) {
1671 if (iter->drm_fd != drm_fd)
1672 continue;
1673 /* Here we assume that for a given DRM fd, we'll always get back the
1674 * same GEM handle for a given buffer.
1675 */
1676 assert(iter->gem_handle == export->gem_handle);
1677 free(export);
1678 export = iter;
1679 found = true;
1680 break;
1681 }
1682 if (!found)
1683 list_addtail(&export->link, &bo->exports);
1684
1685 mtx_unlock(&bufmgr->lock);
1686
1687 *out_handle = export->gem_handle;
1688
1689 return 0;
1690 }
1691
1692 static void
1693 add_bucket(struct brw_bufmgr *bufmgr, int size)
1694 {
1695 unsigned int i = bufmgr->num_buckets;
1696
1697 assert(i < ARRAY_SIZE(bufmgr->cache_bucket));
1698
1699 list_inithead(&bufmgr->cache_bucket[i].head);
1700 if (brw_using_softpin(bufmgr)) {
1701 for (int z = 0; z < BRW_MEMZONE_COUNT; z++)
1702 util_dynarray_init(&bufmgr->cache_bucket[i].vma_list[z], NULL);
1703 }
1704 bufmgr->cache_bucket[i].size = size;
1705 bufmgr->num_buckets++;
1706
1707 assert(bucket_for_size(bufmgr, size) == &bufmgr->cache_bucket[i]);
1708 assert(bucket_for_size(bufmgr, size - 2048) == &bufmgr->cache_bucket[i]);
1709 assert(bucket_for_size(bufmgr, size + 1) != &bufmgr->cache_bucket[i]);
1710 }
1711
1712 static void
1713 init_cache_buckets(struct brw_bufmgr *bufmgr)
1714 {
1715 uint64_t size, cache_max_size = 64 * 1024 * 1024;
1716
1717 /* OK, so power of two buckets was too wasteful of memory.
1718 * Give 3 other sizes between each power of two, to hopefully
1719 * cover things accurately enough. (The alternative is
1720 * probably to just go for exact matching of sizes, and assume
1721 * that for things like composited window resize the tiled
1722 * width/height alignment and rounding of sizes to pages will
1723 * get us useful cache hit rates anyway)
1724 */
1725 add_bucket(bufmgr, PAGE_SIZE);
1726 add_bucket(bufmgr, PAGE_SIZE * 2);
1727 add_bucket(bufmgr, PAGE_SIZE * 3);
1728
1729 /* Initialize the linked lists for BO reuse cache. */
1730 for (size = 4 * PAGE_SIZE; size <= cache_max_size; size *= 2) {
1731 add_bucket(bufmgr, size);
1732
1733 add_bucket(bufmgr, size + size * 1 / 4);
1734 add_bucket(bufmgr, size + size * 2 / 4);
1735 add_bucket(bufmgr, size + size * 3 / 4);
1736 }
1737 }
1738
1739 uint32_t
1740 brw_create_hw_context(struct brw_bufmgr *bufmgr)
1741 {
1742 struct drm_i915_gem_context_create create = { };
1743 int ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
1744 if (ret != 0) {
1745 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n", strerror(errno));
1746 return 0;
1747 }
1748
1749 return create.ctx_id;
1750 }
1751
1752 int
1753 brw_hw_context_set_priority(struct brw_bufmgr *bufmgr,
1754 uint32_t ctx_id,
1755 int priority)
1756 {
1757 struct drm_i915_gem_context_param p = {
1758 .ctx_id = ctx_id,
1759 .param = I915_CONTEXT_PARAM_PRIORITY,
1760 .value = priority,
1761 };
1762 int err;
1763
1764 err = 0;
1765 if (drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p))
1766 err = -errno;
1767
1768 return err;
1769 }
1770
1771 void
1772 brw_destroy_hw_context(struct brw_bufmgr *bufmgr, uint32_t ctx_id)
1773 {
1774 struct drm_i915_gem_context_destroy d = { .ctx_id = ctx_id };
1775
1776 if (ctx_id != 0 &&
1777 drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &d) != 0) {
1778 fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
1779 strerror(errno));
1780 }
1781 }
1782
1783 int
1784 brw_reg_read(struct brw_bufmgr *bufmgr, uint32_t offset, uint64_t *result)
1785 {
1786 struct drm_i915_reg_read reg_read = { .offset = offset };
1787 int ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_REG_READ, &reg_read);
1788
1789 *result = reg_read.val;
1790 return ret;
1791 }
1792
1793 static int
1794 gem_param(int fd, int name)
1795 {
1796 int v = -1; /* No param uses (yet) the sign bit, reserve it for errors */
1797
1798 struct drm_i915_getparam gp = { .param = name, .value = &v };
1799 if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
1800 return -1;
1801
1802 return v;
1803 }
1804
1805 static int
1806 gem_context_getparam(int fd, uint32_t context, uint64_t param, uint64_t *value)
1807 {
1808 struct drm_i915_gem_context_param gp = {
1809 .ctx_id = context,
1810 .param = param,
1811 };
1812
1813 if (drmIoctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &gp))
1814 return -1;
1815
1816 *value = gp.value;
1817
1818 return 0;
1819 }
1820
1821 bool
1822 brw_using_softpin(struct brw_bufmgr *bufmgr)
1823 {
1824 return bufmgr->initial_kflags & EXEC_OBJECT_PINNED;
1825 }
1826
1827 static struct brw_bufmgr *
1828 brw_bufmgr_ref(struct brw_bufmgr *bufmgr)
1829 {
1830 p_atomic_inc(&bufmgr->refcount);
1831 return bufmgr;
1832 }
1833
1834 /**
1835 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
1836 * and manage map buffer objections.
1837 *
1838 * \param fd File descriptor of the opened DRM device.
1839 */
1840 static struct brw_bufmgr *
1841 brw_bufmgr_create(struct gen_device_info *devinfo, int fd, bool bo_reuse)
1842 {
1843 struct brw_bufmgr *bufmgr;
1844
1845 bufmgr = calloc(1, sizeof(*bufmgr));
1846 if (bufmgr == NULL)
1847 return NULL;
1848
1849 /* Handles to buffer objects belong to the device fd and are not
1850 * reference counted by the kernel. If the same fd is used by
1851 * multiple parties (threads sharing the same screen bufmgr, or
1852 * even worse the same device fd passed to multiple libraries)
1853 * ownership of those handles is shared by those independent parties.
1854 *
1855 * Don't do this! Ensure that each library/bufmgr has its own device
1856 * fd so that its namespace does not clash with another.
1857 */
1858 bufmgr->fd = os_dupfd_cloexec(fd);
1859 if (bufmgr->fd < 0) {
1860 free(bufmgr);
1861 return NULL;
1862 }
1863
1864 p_atomic_set(&bufmgr->refcount, 1);
1865
1866 if (mtx_init(&bufmgr->lock, mtx_plain) != 0) {
1867 close(bufmgr->fd);
1868 free(bufmgr);
1869 return NULL;
1870 }
1871
1872 uint64_t gtt_size;
1873 if (gem_context_getparam(fd, 0, I915_CONTEXT_PARAM_GTT_SIZE, &gtt_size))
1874 gtt_size = 0;
1875
1876 bufmgr->has_llc = devinfo->has_llc;
1877 bufmgr->has_mmap_wc = gem_param(fd, I915_PARAM_MMAP_VERSION) > 0;
1878 bufmgr->bo_reuse = bo_reuse;
1879 bufmgr->has_mmap_offset = gem_param(fd, I915_PARAM_MMAP_GTT_VERSION) >= 4;
1880
1881 const uint64_t _4GB = 4ull << 30;
1882
1883 /* The STATE_BASE_ADDRESS size field can only hold 1 page shy of 4GB */
1884 const uint64_t _4GB_minus_1 = _4GB - PAGE_SIZE;
1885
1886 if (devinfo->gen >= 8 && gtt_size > _4GB) {
1887 bufmgr->initial_kflags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
1888
1889 /* Allocate VMA in userspace if we have softpin and full PPGTT. */
1890 if (gem_param(fd, I915_PARAM_HAS_EXEC_SOFTPIN) > 0 &&
1891 gem_param(fd, I915_PARAM_HAS_ALIASING_PPGTT) > 1) {
1892 bufmgr->initial_kflags |= EXEC_OBJECT_PINNED;
1893
1894 util_vma_heap_init(&bufmgr->vma_allocator[BRW_MEMZONE_LOW_4G],
1895 PAGE_SIZE, _4GB_minus_1);
1896
1897 /* Leave the last 4GB out of the high vma range, so that no state
1898 * base address + size can overflow 48 bits.
1899 */
1900 util_vma_heap_init(&bufmgr->vma_allocator[BRW_MEMZONE_OTHER],
1901 1 * _4GB, gtt_size - 2 * _4GB);
1902 } else if (devinfo->gen >= 10) {
1903 /* Softpin landed in 4.5, but GVT used an aliasing PPGTT until
1904 * kernel commit 6b3816d69628becb7ff35978aa0751798b4a940a in
1905 * 4.14. Gen10+ GVT hasn't landed yet, so it's not actually a
1906 * problem - but extending this requirement back to earlier gens
1907 * might actually mean requiring 4.14.
1908 */
1909 fprintf(stderr, "i965 requires softpin (Kernel 4.5) on Gen10+.");
1910 close(bufmgr->fd);
1911 free(bufmgr);
1912 return NULL;
1913 }
1914 }
1915
1916 init_cache_buckets(bufmgr);
1917
1918 bufmgr->name_table =
1919 _mesa_hash_table_create(NULL, _mesa_hash_uint, _mesa_key_uint_equal);
1920 bufmgr->handle_table =
1921 _mesa_hash_table_create(NULL, _mesa_hash_uint, _mesa_key_uint_equal);
1922
1923 return bufmgr;
1924 }
1925
1926 struct brw_bufmgr *
1927 brw_bufmgr_get_for_fd(struct gen_device_info *devinfo, int fd, bool bo_reuse)
1928 {
1929 struct stat st;
1930
1931 if (fstat(fd, &st))
1932 return NULL;
1933
1934 struct brw_bufmgr *bufmgr = NULL;
1935
1936 mtx_lock(&global_bufmgr_list_mutex);
1937 list_for_each_entry(struct brw_bufmgr, iter_bufmgr, &global_bufmgr_list, link) {
1938 struct stat iter_st;
1939 if (fstat(iter_bufmgr->fd, &iter_st))
1940 continue;
1941
1942 if (st.st_rdev == iter_st.st_rdev) {
1943 assert(iter_bufmgr->bo_reuse == bo_reuse);
1944 bufmgr = brw_bufmgr_ref(iter_bufmgr);
1945 goto unlock;
1946 }
1947 }
1948
1949 bufmgr = brw_bufmgr_create(devinfo, fd, bo_reuse);
1950 list_addtail(&bufmgr->link, &global_bufmgr_list);
1951
1952 unlock:
1953 mtx_unlock(&global_bufmgr_list_mutex);
1954
1955 return bufmgr;
1956 }
1957
1958 int
1959 brw_bufmgr_get_fd(struct brw_bufmgr *bufmgr)
1960 {
1961 return bufmgr->fd;
1962 }