2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 #include <util/u_atomic.h>
36 #include <sys/ioctl.h>
39 #include <sys/types.h>
45 #define ETIME ETIMEDOUT
47 #include "common/gen_clflush.h"
48 #include "common/gen_debug.h"
49 #include "dev/gen_device_info.h"
50 #include "main/macros.h"
51 #include "util/macros.h"
52 #include "util/hash_table.h"
53 #include "util/list.h"
54 #include "iris_bufmgr.h"
55 #include "iris_context.h"
58 #include "drm-uapi/i915_drm.h"
68 /* VALGRIND_FREELIKE_BLOCK unfortunately does not actually undo the earlier
69 * VALGRIND_MALLOCLIKE_BLOCK but instead leaves vg convinced the memory is
70 * leaked. All because it does not call VG(cli_free) from its
71 * VG_USERREQ__FREELIKE_BLOCK handler. Instead of treating the memory like
72 * and allocation, we mark it available for use upon mmapping and remove
75 #define VG_DEFINED(ptr, size) VG(VALGRIND_MAKE_MEM_DEFINED(ptr, size))
76 #define VG_NOACCESS(ptr, size) VG(VALGRIND_MAKE_MEM_NOACCESS(ptr, size))
78 #define PAGE_SIZE 4096
80 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
83 * Call ioctl, restarting if it is interupted
86 drm_ioctl(int fd
, unsigned long request
, void *arg
)
91 ret
= ioctl(fd
, request
, arg
);
92 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
99 atomic_add_unless(int *v
, int add
, int unless
)
102 c
= p_atomic_read(v
);
103 while (c
!= unless
&& (old
= p_atomic_cmpxchg(v
, c
, c
+ add
)) != c
)
108 struct bo_cache_bucket
{
109 struct list_head head
;
118 /** Array of lists of cached gem objects of power-of-two sizes */
119 struct bo_cache_bucket cache_bucket
[14 * 4];
123 struct hash_table
*name_table
;
124 struct hash_table
*handle_table
;
130 static int bo_set_tiling_internal(struct iris_bo
*bo
, uint32_t tiling_mode
,
133 static void bo_free(struct iris_bo
*bo
);
136 key_hash_uint(const void *key
)
138 return _mesa_hash_data(key
, 4);
142 key_uint_equal(const void *a
, const void *b
)
144 return *((unsigned *) a
) == *((unsigned *) b
);
147 static struct iris_bo
*
148 hash_find_bo(struct hash_table
*ht
, unsigned int key
)
150 struct hash_entry
*entry
= _mesa_hash_table_search(ht
, &key
);
151 return entry
? (struct iris_bo
*) entry
->data
: NULL
;
155 * This function finds the correct bucket fit for the input size.
156 * The function works with O(1) complexity when the requested size
157 * was queried instead of iterating the size through all the buckets.
159 static struct bo_cache_bucket
*
160 bucket_for_size(struct iris_bufmgr
*bufmgr
, uint64_t size
)
162 /* Calculating the pages and rounding up to the page size. */
163 const unsigned pages
= (size
+ PAGE_SIZE
- 1) / PAGE_SIZE
;
165 /* Row Bucket sizes clz((x-1) | 3) Row Column
166 * in pages stride size
167 * 0: 1 2 3 4 -> 30 30 30 30 4 1
168 * 1: 5 6 7 8 -> 29 29 29 29 4 1
169 * 2: 10 12 14 16 -> 28 28 28 28 8 2
170 * 3: 20 24 28 32 -> 27 27 27 27 16 4
172 const unsigned row
= 30 - __builtin_clz((pages
- 1) | 3);
173 const unsigned row_max_pages
= 4 << row
;
175 /* The '& ~2' is the special case for row 1. In row 1, max pages /
176 * 2 is 2, but the previous row maximum is zero (because there is
177 * no previous row). All row maximum sizes are power of 2, so that
178 * is the only case where that bit will be set.
180 const unsigned prev_row_max_pages
= (row_max_pages
/ 2) & ~2;
181 int col_size_log2
= row
- 1;
182 col_size_log2
+= (col_size_log2
< 0);
184 const unsigned col
= (pages
- prev_row_max_pages
+
185 ((1 << col_size_log2
) - 1)) >> col_size_log2
;
187 /* Calculating the index based on the row and column. */
188 const unsigned index
= (row
* 4) + (col
- 1);
190 return (index
< bufmgr
->num_buckets
) ?
191 &bufmgr
->cache_bucket
[index
] : NULL
;
195 iris_bo_busy(struct iris_bo
*bo
)
197 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
198 struct drm_i915_gem_busy busy
= { .handle
= bo
->gem_handle
};
200 int ret
= drm_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_BUSY
, &busy
);
202 bo
->idle
= !busy
.busy
;
209 iris_bo_madvise(struct iris_bo
*bo
, int state
)
211 struct drm_i915_gem_madvise madv
= {
212 .handle
= bo
->gem_handle
,
217 drm_ioctl(bo
->bufmgr
->fd
, DRM_IOCTL_I915_GEM_MADVISE
, &madv
);
219 return madv
.retained
;
222 /* drop the oldest entries that have been purged by the kernel */
224 iris_bo_cache_purge_bucket(struct iris_bufmgr
*bufmgr
,
225 struct bo_cache_bucket
*bucket
)
227 list_for_each_entry_safe(struct iris_bo
, bo
, &bucket
->head
, head
) {
228 if (iris_bo_madvise(bo
, I915_MADV_DONTNEED
))
236 static struct iris_bo
*
237 bo_alloc_internal(struct iris_bufmgr
*bufmgr
,
241 uint32_t tiling_mode
,
242 uint32_t stride
, uint64_t alignment
)
245 unsigned int page_size
= getpagesize();
247 struct bo_cache_bucket
*bucket
;
248 bool alloc_from_cache
;
253 if (flags
& BO_ALLOC_BUSY
)
256 if (flags
& BO_ALLOC_ZEROED
)
259 /* BUSY does doesn't really jive with ZEROED as we have to wait for it to
260 * be idle before we can memset. Just disallow that combination.
262 assert(!(busy
&& zeroed
));
264 /* Round the allocated size up to a power of two number of pages. */
265 bucket
= bucket_for_size(bufmgr
, size
);
267 /* If we don't have caching at this size, don't actually round the
270 if (bucket
== NULL
) {
272 if (bo_size
< page_size
)
275 bo_size
= bucket
->size
;
278 mtx_lock(&bufmgr
->lock
);
279 /* Get a buffer out of the cache if available */
281 alloc_from_cache
= false;
282 if (bucket
!= NULL
&& !list_empty(&bucket
->head
)) {
283 if (busy
&& !zeroed
) {
284 /* Allocate new render-target BOs from the tail (MRU)
285 * of the list, as it will likely be hot in the GPU
286 * cache and in the aperture for us. If the caller
287 * asked us to zero the buffer, we don't want this
288 * because we are going to mmap it.
290 bo
= LIST_ENTRY(struct iris_bo
, bucket
->head
.prev
, head
);
292 alloc_from_cache
= true;
293 bo
->align
= alignment
;
295 assert(alignment
== 0);
296 /* For non-render-target BOs (where we're probably
297 * going to map it first thing in order to fill it
298 * with data), check if the last BO in the cache is
299 * unbusy, and only reuse in that case. Otherwise,
300 * allocating a new buffer is probably faster than
301 * waiting for the GPU to finish.
303 bo
= LIST_ENTRY(struct iris_bo
, bucket
->head
.next
, head
);
304 if (!iris_bo_busy(bo
)) {
305 alloc_from_cache
= true;
310 if (alloc_from_cache
) {
311 if (!iris_bo_madvise(bo
, I915_MADV_WILLNEED
)) {
313 iris_bo_cache_purge_bucket(bufmgr
, bucket
);
317 if (bo_set_tiling_internal(bo
, tiling_mode
, stride
)) {
323 void *map
= iris_bo_map(NULL
, bo
, MAP_WRITE
| MAP_RAW
);
328 memset(map
, 0, bo_size
);
333 if (!alloc_from_cache
) {
334 bo
= calloc(1, sizeof(*bo
));
341 struct drm_i915_gem_create create
= { .size
= bo_size
};
343 /* All new BOs we get from the kernel are zeroed, so we don't need to
344 * worry about that here.
346 ret
= drm_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_CREATE
, &create
);
352 bo
->gem_handle
= create
.handle
;
355 bo
->align
= alignment
;
357 bo
->tiling_mode
= I915_TILING_NONE
;
358 bo
->swizzle_mode
= I915_BIT_6_SWIZZLE_NONE
;
361 if (bo_set_tiling_internal(bo
, tiling_mode
, stride
))
364 /* Calling set_domain() will allocate pages for the BO outside of the
365 * struct mutex lock in the kernel, which is more efficient than waiting
366 * to create them during the first execbuf that uses the BO.
368 struct drm_i915_gem_set_domain sd
= {
369 .handle
= bo
->gem_handle
,
370 .read_domains
= I915_GEM_DOMAIN_CPU
,
374 if (drm_ioctl(bo
->bufmgr
->fd
, DRM_IOCTL_I915_GEM_SET_DOMAIN
, &sd
) != 0)
379 p_atomic_set(&bo
->refcount
, 1);
381 bo
->cache_coherent
= bufmgr
->has_llc
;
384 mtx_unlock(&bufmgr
->lock
);
386 DBG("bo_create: buf %d (%s) %llub\n", bo
->gem_handle
, bo
->name
,
387 (unsigned long long) size
);
394 mtx_unlock(&bufmgr
->lock
);
399 iris_bo_alloc(struct iris_bufmgr
*bufmgr
,
400 const char *name
, uint64_t size
, uint64_t alignment
)
402 return bo_alloc_internal(bufmgr
, name
, size
, 0, I915_TILING_NONE
, 0, 0);
406 iris_bo_alloc_tiled(struct iris_bufmgr
*bufmgr
, const char *name
,
407 uint64_t size
, uint32_t tiling_mode
, uint32_t pitch
,
410 return bo_alloc_internal(bufmgr
, name
, size
, flags
, tiling_mode
, pitch
, 0);
414 * Returns a iris_bo wrapping the given buffer object handle.
416 * This can be used when one application needs to pass a buffer object
420 iris_bo_gem_create_from_name(struct iris_bufmgr
*bufmgr
,
421 const char *name
, unsigned int handle
)
425 /* At the moment most applications only have a few named bo.
426 * For instance, in a DRI client only the render buffers passed
427 * between X and the client are named. And since X returns the
428 * alternating names for the front/back buffer a linear search
429 * provides a sufficiently fast match.
431 mtx_lock(&bufmgr
->lock
);
432 bo
= hash_find_bo(bufmgr
->name_table
, handle
);
434 iris_bo_reference(bo
);
438 struct drm_gem_open open_arg
= { .name
= handle
};
439 int ret
= drm_ioctl(bufmgr
->fd
, DRM_IOCTL_GEM_OPEN
, &open_arg
);
441 DBG("Couldn't reference %s handle 0x%08x: %s\n",
442 name
, handle
, strerror(errno
));
446 /* Now see if someone has used a prime handle to get this
447 * object from the kernel before by looking through the list
448 * again for a matching gem_handle
450 bo
= hash_find_bo(bufmgr
->handle_table
, open_arg
.handle
);
452 iris_bo_reference(bo
);
456 bo
= calloc(1, sizeof(*bo
));
460 p_atomic_set(&bo
->refcount
, 1);
462 bo
->size
= open_arg
.size
;
465 bo
->gem_handle
= open_arg
.handle
;
467 bo
->global_name
= handle
;
468 bo
->reusable
= false;
471 _mesa_hash_table_insert(bufmgr
->handle_table
, &bo
->gem_handle
, bo
);
472 _mesa_hash_table_insert(bufmgr
->name_table
, &bo
->global_name
, bo
);
474 struct drm_i915_gem_get_tiling get_tiling
= { .handle
= bo
->gem_handle
};
475 ret
= drm_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_GET_TILING
, &get_tiling
);
479 bo
->tiling_mode
= get_tiling
.tiling_mode
;
480 bo
->swizzle_mode
= get_tiling
.swizzle_mode
;
481 /* XXX stride is unknown */
482 DBG("bo_create_from_handle: %d (%s)\n", handle
, bo
->name
);
485 mtx_unlock(&bufmgr
->lock
);
490 mtx_unlock(&bufmgr
->lock
);
495 bo_free(struct iris_bo
*bo
)
497 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
500 VG_NOACCESS(bo
->map_cpu
, bo
->size
);
501 munmap(bo
->map_cpu
, bo
->size
);
504 VG_NOACCESS(bo
->map_wc
, bo
->size
);
505 munmap(bo
->map_wc
, bo
->size
);
508 VG_NOACCESS(bo
->map_gtt
, bo
->size
);
509 munmap(bo
->map_gtt
, bo
->size
);
513 struct hash_entry
*entry
;
515 if (bo
->global_name
) {
516 entry
= _mesa_hash_table_search(bufmgr
->name_table
, &bo
->global_name
);
517 _mesa_hash_table_remove(bufmgr
->name_table
, entry
);
520 entry
= _mesa_hash_table_search(bufmgr
->handle_table
, &bo
->gem_handle
);
521 _mesa_hash_table_remove(bufmgr
->handle_table
, entry
);
524 /* Close this object */
525 struct drm_gem_close close
= { .handle
= bo
->gem_handle
};
526 int ret
= drm_ioctl(bufmgr
->fd
, DRM_IOCTL_GEM_CLOSE
, &close
);
528 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
529 bo
->gem_handle
, bo
->name
, strerror(errno
));
534 /** Frees all cached buffers significantly older than @time. */
536 cleanup_bo_cache(struct iris_bufmgr
*bufmgr
, time_t time
)
540 if (bufmgr
->time
== time
)
543 for (i
= 0; i
< bufmgr
->num_buckets
; i
++) {
544 struct bo_cache_bucket
*bucket
= &bufmgr
->cache_bucket
[i
];
546 list_for_each_entry_safe(struct iris_bo
, bo
, &bucket
->head
, head
) {
547 if (time
- bo
->free_time
<= 1)
560 bo_unreference_final(struct iris_bo
*bo
, time_t time
)
562 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
563 struct bo_cache_bucket
*bucket
;
565 DBG("bo_unreference final: %d (%s)\n", bo
->gem_handle
, bo
->name
);
567 bucket
= bucket_for_size(bufmgr
, bo
->size
);
568 /* Put the buffer into our internal cache for reuse if we can. */
569 if (bufmgr
->bo_reuse
&& bo
->reusable
&& bucket
!= NULL
&&
570 iris_bo_madvise(bo
, I915_MADV_DONTNEED
)) {
571 bo
->free_time
= time
;
576 list_addtail(&bo
->head
, &bucket
->head
);
583 iris_bo_unreference(struct iris_bo
*bo
)
588 assert(p_atomic_read(&bo
->refcount
) > 0);
590 if (atomic_add_unless(&bo
->refcount
, -1, 1)) {
591 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
592 struct timespec time
;
594 clock_gettime(CLOCK_MONOTONIC
, &time
);
596 mtx_lock(&bufmgr
->lock
);
598 if (p_atomic_dec_zero(&bo
->refcount
)) {
599 bo_unreference_final(bo
, time
.tv_sec
);
600 cleanup_bo_cache(bufmgr
, time
.tv_sec
);
603 mtx_unlock(&bufmgr
->lock
);
608 bo_wait_with_stall_warning(struct pipe_debug_callback
*dbg
,
612 bool busy
= dbg
&& !bo
->idle
;
613 double elapsed
= unlikely(busy
) ? -get_time() : 0.0;
615 iris_bo_wait_rendering(bo
);
617 if (unlikely(busy
)) {
618 elapsed
+= get_time();
619 if (elapsed
> 1e-5) /* 0.01ms */ {
620 perf_debug(dbg
, "%s a busy \"%s\" BO stalled and took %.03f ms.\n",
621 action
, bo
->name
, elapsed
* 1000);
627 print_flags(unsigned flags
)
629 if (flags
& MAP_READ
)
631 if (flags
& MAP_WRITE
)
633 if (flags
& MAP_ASYNC
)
635 if (flags
& MAP_PERSISTENT
)
637 if (flags
& MAP_COHERENT
)
645 iris_bo_map_cpu(struct pipe_debug_callback
*dbg
,
646 struct iris_bo
*bo
, unsigned flags
)
648 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
650 /* We disallow CPU maps for writing to non-coherent buffers, as the
651 * CPU map can become invalidated when a batch is flushed out, which
652 * can happen at unpredictable times. You should use WC maps instead.
654 assert(bo
->cache_coherent
|| !(flags
& MAP_WRITE
));
657 DBG("iris_bo_map_cpu: %d (%s)\n", bo
->gem_handle
, bo
->name
);
659 struct drm_i915_gem_mmap mmap_arg
= {
660 .handle
= bo
->gem_handle
,
663 int ret
= drm_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_MMAP
, &mmap_arg
);
666 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
667 __FILE__
, __LINE__
, bo
->gem_handle
, bo
->name
, strerror(errno
));
670 void *map
= (void *) (uintptr_t) mmap_arg
.addr_ptr
;
671 VG_DEFINED(map
, bo
->size
);
673 if (p_atomic_cmpxchg(&bo
->map_cpu
, NULL
, map
)) {
674 VG_NOACCESS(map
, bo
->size
);
675 munmap(map
, bo
->size
);
680 DBG("iris_bo_map_cpu: %d (%s) -> %p, ", bo
->gem_handle
, bo
->name
,
684 if (!(flags
& MAP_ASYNC
)) {
685 bo_wait_with_stall_warning(dbg
, bo
, "CPU mapping");
688 if (!bo
->cache_coherent
&& !bo
->bufmgr
->has_llc
) {
689 /* If we're reusing an existing CPU mapping, the CPU caches may
690 * contain stale data from the last time we read from that mapping.
691 * (With the BO cache, it might even be data from a previous buffer!)
692 * Even if it's a brand new mapping, the kernel may have zeroed the
693 * buffer via CPU writes.
695 * We need to invalidate those cachelines so that we see the latest
696 * contents, and so long as we only read from the CPU mmap we do not
697 * need to write those cachelines back afterwards.
699 * On LLC, the emprical evidence suggests that writes from the GPU
700 * that bypass the LLC (i.e. for scanout) do *invalidate* the CPU
701 * cachelines. (Other reads, such as the display engine, bypass the
702 * LLC entirely requiring us to keep dirty pixels for the scanout
705 gen_invalidate_range(bo
->map_cpu
, bo
->size
);
712 iris_bo_map_wc(struct pipe_debug_callback
*dbg
,
713 struct iris_bo
*bo
, unsigned flags
)
715 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
718 DBG("iris_bo_map_wc: %d (%s)\n", bo
->gem_handle
, bo
->name
);
720 struct drm_i915_gem_mmap mmap_arg
= {
721 .handle
= bo
->gem_handle
,
723 .flags
= I915_MMAP_WC
,
725 int ret
= drm_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_MMAP
, &mmap_arg
);
728 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
729 __FILE__
, __LINE__
, bo
->gem_handle
, bo
->name
, strerror(errno
));
733 void *map
= (void *) (uintptr_t) mmap_arg
.addr_ptr
;
734 VG_DEFINED(map
, bo
->size
);
736 if (p_atomic_cmpxchg(&bo
->map_wc
, NULL
, map
)) {
737 VG_NOACCESS(map
, bo
->size
);
738 munmap(map
, bo
->size
);
743 DBG("iris_bo_map_wc: %d (%s) -> %p\n", bo
->gem_handle
, bo
->name
, bo
->map_wc
);
746 if (!(flags
& MAP_ASYNC
)) {
747 bo_wait_with_stall_warning(dbg
, bo
, "WC mapping");
754 * Perform an uncached mapping via the GTT.
756 * Write access through the GTT is not quite fully coherent. On low power
757 * systems especially, like modern Atoms, we can observe reads from RAM before
758 * the write via GTT has landed. A write memory barrier that flushes the Write
759 * Combining Buffer (i.e. sfence/mfence) is not sufficient to order the later
760 * read after the write as the GTT write suffers a small delay through the GTT
761 * indirection. The kernel uses an uncached mmio read to ensure the GTT write
762 * is ordered with reads (either by the GPU, WB or WC) and unconditionally
763 * flushes prior to execbuf submission. However, if we are not informing the
764 * kernel about our GTT writes, it will not flush before earlier access, such
765 * as when using the cmdparser. Similarly, we need to be careful if we should
766 * ever issue a CPU read immediately following a GTT write.
768 * Telling the kernel about write access also has one more important
769 * side-effect. Upon receiving notification about the write, it cancels any
770 * scanout buffering for FBC/PSR and friends. Later FBC/PSR is then flushed by
771 * either SW_FINISH or DIRTYFB. The presumption is that we never write to the
772 * actual scanout via a mmaping, only to a backbuffer and so all the FBC/PSR
773 * tracking is handled on the buffer exchange instead.
776 iris_bo_map_gtt(struct pipe_debug_callback
*dbg
,
777 struct iris_bo
*bo
, unsigned flags
)
779 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
781 /* Get a mapping of the buffer if we haven't before. */
782 if (bo
->map_gtt
== NULL
) {
783 DBG("bo_map_gtt: mmap %d (%s)\n", bo
->gem_handle
, bo
->name
);
785 struct drm_i915_gem_mmap_gtt mmap_arg
= { .handle
= bo
->gem_handle
};
787 /* Get the fake offset back... */
788 int ret
= drm_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_MMAP_GTT
, &mmap_arg
);
790 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
791 __FILE__
, __LINE__
, bo
->gem_handle
, bo
->name
, strerror(errno
));
796 void *map
= mmap(0, bo
->size
, PROT_READ
| PROT_WRITE
,
797 MAP_SHARED
, bufmgr
->fd
, mmap_arg
.offset
);
798 if (map
== MAP_FAILED
) {
799 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
800 __FILE__
, __LINE__
, bo
->gem_handle
, bo
->name
, strerror(errno
));
804 /* We don't need to use VALGRIND_MALLOCLIKE_BLOCK because Valgrind will
805 * already intercept this mmap call. However, for consistency between
806 * all the mmap paths, we mark the pointer as defined now and mark it
807 * as inaccessible afterwards.
809 VG_DEFINED(map
, bo
->size
);
811 if (p_atomic_cmpxchg(&bo
->map_gtt
, NULL
, map
)) {
812 VG_NOACCESS(map
, bo
->size
);
813 munmap(map
, bo
->size
);
818 DBG("bo_map_gtt: %d (%s) -> %p, ", bo
->gem_handle
, bo
->name
, bo
->map_gtt
);
821 if (!(flags
& MAP_ASYNC
)) {
822 bo_wait_with_stall_warning(dbg
, bo
, "GTT mapping");
829 can_map_cpu(struct iris_bo
*bo
, unsigned flags
)
831 if (bo
->cache_coherent
)
834 /* Even if the buffer itself is not cache-coherent (such as a scanout), on
835 * an LLC platform reads always are coherent (as they are performed via the
836 * central system agent). It is just the writes that we need to take special
837 * care to ensure that land in main memory and not stick in the CPU cache.
839 if (!(flags
& MAP_WRITE
) && bo
->bufmgr
->has_llc
)
842 /* If PERSISTENT or COHERENT are set, the mmapping needs to remain valid
843 * across batch flushes where the kernel will change cache domains of the
844 * bo, invalidating continued access to the CPU mmap on non-LLC device.
846 * Similarly, ASYNC typically means that the buffer will be accessed via
847 * both the CPU and the GPU simultaneously. Batches may be executed that
848 * use the BO even while it is mapped. While OpenGL technically disallows
849 * most drawing while non-persistent mappings are active, we may still use
850 * the GPU for blits or other operations, causing batches to happen at
851 * inconvenient times.
853 if (flags
& (MAP_PERSISTENT
| MAP_COHERENT
| MAP_ASYNC
))
856 return !(flags
& MAP_WRITE
);
860 iris_bo_map(struct pipe_debug_callback
*dbg
,
861 struct iris_bo
*bo
, unsigned flags
)
863 if (bo
->tiling_mode
!= I915_TILING_NONE
&& !(flags
& MAP_RAW
))
864 return iris_bo_map_gtt(dbg
, bo
, flags
);
868 if (can_map_cpu(bo
, flags
))
869 map
= iris_bo_map_cpu(dbg
, bo
, flags
);
871 map
= iris_bo_map_wc(dbg
, bo
, flags
);
873 /* Allow the attempt to fail by falling back to the GTT where necessary.
875 * Not every buffer can be mmaped directly using the CPU (or WC), for
876 * example buffers that wrap stolen memory or are imported from other
877 * devices. For those, we have little choice but to use a GTT mmapping.
878 * However, if we use a slow GTT mmapping for reads where we expected fast
879 * access, that order of magnitude difference in throughput will be clearly
880 * expressed by angry users.
882 * We skip MAP_RAW because we want to avoid map_gtt's fence detiling.
884 if (!map
&& !(flags
& MAP_RAW
)) {
885 perf_debug(dbg
, "Fallback GTT mapping for %s with access flags %x\n",
887 map
= iris_bo_map_gtt(dbg
, bo
, flags
);
894 iris_bo_subdata(struct iris_bo
*bo
, uint64_t offset
,
895 uint64_t size
, const void *data
)
897 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
899 struct drm_i915_gem_pwrite pwrite
= {
900 .handle
= bo
->gem_handle
,
903 .data_ptr
= (uint64_t) (uintptr_t) data
,
906 int ret
= drm_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_PWRITE
, &pwrite
);
909 DBG("%s:%d: Error writing data to buffer %d: "
910 "(%"PRIu64
" %"PRIu64
") %s .\n",
911 __FILE__
, __LINE__
, bo
->gem_handle
, offset
, size
, strerror(errno
));
917 /** Waits for all GPU rendering with the object to have completed. */
919 iris_bo_wait_rendering(struct iris_bo
*bo
)
921 /* We require a kernel recent enough for WAIT_IOCTL support.
922 * See intel_init_bufmgr()
924 iris_bo_wait(bo
, -1);
928 * Waits on a BO for the given amount of time.
930 * @bo: buffer object to wait for
931 * @timeout_ns: amount of time to wait in nanoseconds.
932 * If value is less than 0, an infinite wait will occur.
934 * Returns 0 if the wait was successful ie. the last batch referencing the
935 * object has completed within the allotted time. Otherwise some negative return
936 * value describes the error. Of particular interest is -ETIME when the wait has
937 * failed to yield the desired result.
939 * Similar to iris_bo_wait_rendering except a timeout parameter allows
940 * the operation to give up after a certain amount of time. Another subtle
941 * difference is the internal locking semantics are different (this variant does
942 * not hold the lock for the duration of the wait). This makes the wait subject
943 * to a larger userspace race window.
945 * The implementation shall wait until the object is no longer actively
946 * referenced within a batch buffer at the time of the call. The wait will
947 * not guarantee that the buffer is re-issued via another thread, or an flinked
948 * handle. Userspace must make sure this race does not occur if such precision
951 * Note that some kernels have broken the inifite wait for negative values
952 * promise, upgrade to latest stable kernels if this is the case.
955 iris_bo_wait(struct iris_bo
*bo
, int64_t timeout_ns
)
957 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
959 /* If we know it's idle, don't bother with the kernel round trip */
960 if (bo
->idle
&& !bo
->external
)
963 struct drm_i915_gem_wait wait
= {
964 .bo_handle
= bo
->gem_handle
,
965 .timeout_ns
= timeout_ns
,
967 int ret
= drm_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_WAIT
, &wait
);
977 iris_bufmgr_destroy(struct iris_bufmgr
*bufmgr
)
979 mtx_destroy(&bufmgr
->lock
);
981 /* Free any cached buffer objects we were going to reuse */
982 for (int i
= 0; i
< bufmgr
->num_buckets
; i
++) {
983 struct bo_cache_bucket
*bucket
= &bufmgr
->cache_bucket
[i
];
985 list_for_each_entry_safe(struct iris_bo
, bo
, &bucket
->head
, head
) {
992 _mesa_hash_table_destroy(bufmgr
->name_table
, NULL
);
993 _mesa_hash_table_destroy(bufmgr
->handle_table
, NULL
);
999 bo_set_tiling_internal(struct iris_bo
*bo
, uint32_t tiling_mode
,
1002 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
1003 struct drm_i915_gem_set_tiling set_tiling
;
1006 if (bo
->global_name
== 0 &&
1007 tiling_mode
== bo
->tiling_mode
&& stride
== bo
->stride
)
1010 memset(&set_tiling
, 0, sizeof(set_tiling
));
1012 /* set_tiling is slightly broken and overwrites the
1013 * input on the error path, so we have to open code
1016 set_tiling
.handle
= bo
->gem_handle
;
1017 set_tiling
.tiling_mode
= tiling_mode
;
1018 set_tiling
.stride
= stride
;
1020 ret
= ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_SET_TILING
, &set_tiling
);
1021 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
1025 bo
->tiling_mode
= set_tiling
.tiling_mode
;
1026 bo
->swizzle_mode
= set_tiling
.swizzle_mode
;
1027 bo
->stride
= set_tiling
.stride
;
1032 iris_bo_get_tiling(struct iris_bo
*bo
, uint32_t *tiling_mode
,
1033 uint32_t *swizzle_mode
)
1035 *tiling_mode
= bo
->tiling_mode
;
1036 *swizzle_mode
= bo
->swizzle_mode
;
1041 iris_bo_import_dmabuf(struct iris_bufmgr
*bufmgr
, int prime_fd
)
1046 mtx_lock(&bufmgr
->lock
);
1047 int ret
= drmPrimeFDToHandle(bufmgr
->fd
, prime_fd
, &handle
);
1049 DBG("import_dmabuf: failed to obtain handle from fd: %s\n",
1051 mtx_unlock(&bufmgr
->lock
);
1056 * See if the kernel has already returned this buffer to us. Just as
1057 * for named buffers, we must not create two bo's pointing at the same
1060 bo
= hash_find_bo(bufmgr
->handle_table
, handle
);
1062 iris_bo_reference(bo
);
1066 bo
= calloc(1, sizeof(*bo
));
1070 p_atomic_set(&bo
->refcount
, 1);
1072 /* Determine size of bo. The fd-to-handle ioctl really should
1073 * return the size, but it doesn't. If we have kernel 3.12 or
1074 * later, we can lseek on the prime fd to get the size. Older
1075 * kernels will just fail, in which case we fall back to the
1076 * provided (estimated or guess size). */
1077 ret
= lseek(prime_fd
, 0, SEEK_END
);
1081 bo
->bufmgr
= bufmgr
;
1083 bo
->gem_handle
= handle
;
1084 _mesa_hash_table_insert(bufmgr
->handle_table
, &bo
->gem_handle
, bo
);
1087 bo
->reusable
= false;
1088 bo
->external
= true;
1090 struct drm_i915_gem_get_tiling get_tiling
= { .handle
= bo
->gem_handle
};
1091 if (drm_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_GET_TILING
, &get_tiling
))
1094 bo
->tiling_mode
= get_tiling
.tiling_mode
;
1095 bo
->swizzle_mode
= get_tiling
.swizzle_mode
;
1096 /* XXX stride is unknown */
1099 mtx_unlock(&bufmgr
->lock
);
1104 mtx_unlock(&bufmgr
->lock
);
1109 iris_bo_make_external(struct iris_bo
*bo
)
1111 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
1113 if (!bo
->external
) {
1114 mtx_lock(&bufmgr
->lock
);
1115 if (!bo
->external
) {
1116 _mesa_hash_table_insert(bufmgr
->handle_table
, &bo
->gem_handle
, bo
);
1117 bo
->external
= true;
1119 mtx_unlock(&bufmgr
->lock
);
1124 iris_bo_export_dmabuf(struct iris_bo
*bo
, int *prime_fd
)
1126 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
1128 iris_bo_make_external(bo
);
1130 if (drmPrimeHandleToFD(bufmgr
->fd
, bo
->gem_handle
,
1131 DRM_CLOEXEC
, prime_fd
) != 0)
1134 bo
->reusable
= false;
1140 iris_bo_export_gem_handle(struct iris_bo
*bo
)
1142 iris_bo_make_external(bo
);
1144 return bo
->gem_handle
;
1148 iris_bo_flink(struct iris_bo
*bo
, uint32_t *name
)
1150 struct iris_bufmgr
*bufmgr
= bo
->bufmgr
;
1152 if (!bo
->global_name
) {
1153 struct drm_gem_flink flink
= { .handle
= bo
->gem_handle
};
1155 if (drm_ioctl(bufmgr
->fd
, DRM_IOCTL_GEM_FLINK
, &flink
))
1158 iris_bo_make_external(bo
);
1159 mtx_lock(&bufmgr
->lock
);
1160 if (!bo
->global_name
) {
1161 bo
->global_name
= flink
.name
;
1162 _mesa_hash_table_insert(bufmgr
->name_table
, &bo
->global_name
, bo
);
1164 mtx_unlock(&bufmgr
->lock
);
1166 bo
->reusable
= false;
1169 *name
= bo
->global_name
;
1174 * Enables unlimited caching of buffer objects for reuse.
1176 * This is potentially very memory expensive, as the cache at each bucket
1177 * size is only bounded by how many buffers of that size we've managed to have
1178 * in flight at once.
1181 iris_bufmgr_enable_reuse(struct iris_bufmgr
*bufmgr
)
1183 bufmgr
->bo_reuse
= true;
1187 add_bucket(struct iris_bufmgr
*bufmgr
, int size
)
1189 unsigned int i
= bufmgr
->num_buckets
;
1191 assert(i
< ARRAY_SIZE(bufmgr
->cache_bucket
));
1193 list_inithead(&bufmgr
->cache_bucket
[i
].head
);
1194 bufmgr
->cache_bucket
[i
].size
= size
;
1195 bufmgr
->num_buckets
++;
1197 assert(bucket_for_size(bufmgr
, size
) == &bufmgr
->cache_bucket
[i
]);
1198 assert(bucket_for_size(bufmgr
, size
- 2048) == &bufmgr
->cache_bucket
[i
]);
1199 assert(bucket_for_size(bufmgr
, size
+ 1) != &bufmgr
->cache_bucket
[i
]);
1203 init_cache_buckets(struct iris_bufmgr
*bufmgr
)
1205 uint64_t size
, cache_max_size
= 64 * 1024 * 1024;
1207 /* OK, so power of two buckets was too wasteful of memory.
1208 * Give 3 other sizes between each power of two, to hopefully
1209 * cover things accurately enough. (The alternative is
1210 * probably to just go for exact matching of sizes, and assume
1211 * that for things like composited window resize the tiled
1212 * width/height alignment and rounding of sizes to pages will
1213 * get us useful cache hit rates anyway)
1215 add_bucket(bufmgr
, 4096);
1216 add_bucket(bufmgr
, 4096 * 2);
1217 add_bucket(bufmgr
, 4096 * 3);
1219 /* Initialize the linked lists for BO reuse cache. */
1220 for (size
= 4 * 4096; size
<= cache_max_size
; size
*= 2) {
1221 add_bucket(bufmgr
, size
);
1223 add_bucket(bufmgr
, size
+ size
* 1 / 4);
1224 add_bucket(bufmgr
, size
+ size
* 2 / 4);
1225 add_bucket(bufmgr
, size
+ size
* 3 / 4);
1230 iris_create_hw_context(struct iris_bufmgr
*bufmgr
)
1232 struct drm_i915_gem_context_create create
= { };
1233 int ret
= drm_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_CREATE
, &create
);
1235 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n", strerror(errno
));
1239 return create
.ctx_id
;
1243 iris_hw_context_set_priority(struct iris_bufmgr
*bufmgr
,
1247 struct drm_i915_gem_context_param p
= {
1249 .param
= I915_CONTEXT_PARAM_PRIORITY
,
1255 if (drm_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM
, &p
))
1262 iris_destroy_hw_context(struct iris_bufmgr
*bufmgr
, uint32_t ctx_id
)
1264 struct drm_i915_gem_context_destroy d
= { .ctx_id
= ctx_id
};
1267 drm_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY
, &d
) != 0) {
1268 fprintf(stderr
, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
1274 iris_reg_read(struct iris_bufmgr
*bufmgr
, uint32_t offset
, uint64_t *result
)
1276 struct drm_i915_reg_read reg_read
= { .offset
= offset
};
1277 int ret
= drm_ioctl(bufmgr
->fd
, DRM_IOCTL_I915_REG_READ
, ®_read
);
1279 *result
= reg_read
.val
;
1284 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
1285 * and manage map buffer objections.
1287 * \param fd File descriptor of the opened DRM device.
1289 struct iris_bufmgr
*
1290 iris_bufmgr_init(struct gen_device_info
*devinfo
, int fd
)
1292 struct iris_bufmgr
*bufmgr
= calloc(1, sizeof(*bufmgr
));
1296 /* Handles to buffer objects belong to the device fd and are not
1297 * reference counted by the kernel. If the same fd is used by
1298 * multiple parties (threads sharing the same screen bufmgr, or
1299 * even worse the same device fd passed to multiple libraries)
1300 * ownership of those handles is shared by those independent parties.
1302 * Don't do this! Ensure that each library/bufmgr has its own device
1303 * fd so that its namespace does not clash with another.
1307 if (mtx_init(&bufmgr
->lock
, mtx_plain
) != 0) {
1312 bufmgr
->has_llc
= devinfo
->has_llc
;
1314 init_cache_buckets(bufmgr
);
1316 bufmgr
->name_table
=
1317 _mesa_hash_table_create(NULL
, key_hash_uint
, key_uint_equal
);
1318 bufmgr
->handle_table
=
1319 _mesa_hash_table_create(NULL
, key_hash_uint
, key_uint_equal
);