2 * Copyright © 2007 Red Hat Inc.
3 * Copyright © 2007-2017 Intel Corporation
4 * Copyright © 2006 VMware, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * Authors: Thomas Hellström <thellstrom@vmware.com>
29 * Keith Whitwell <keithw@vmware.com>
30 * Eric Anholt <eric@anholt.net>
31 * Dave Airlie <airlied@linux.ie>
39 #include <util/u_atomic.h>
46 #include <sys/ioctl.h>
48 #include <sys/types.h>
53 #define ETIME ETIMEDOUT
55 #include "common/gen_clflush.h"
56 #include "common/gen_debug.h"
57 #include "common/gen_device_info.h"
58 #include "libdrm_macros.h"
59 #include "main/macros.h"
60 #include "util/macros.h"
61 #include "util/hash_table.h"
62 #include "util/list.h"
63 #include "brw_bufmgr.h"
64 #include "brw_context.h"
77 /* VALGRIND_FREELIKE_BLOCK unfortunately does not actually undo the earlier
78 * VALGRIND_MALLOCLIKE_BLOCK but instead leaves vg convinced the memory is
79 * leaked. All because it does not call VG(cli_free) from its
80 * VG_USERREQ__FREELIKE_BLOCK handler. Instead of treating the memory like
81 * and allocation, we mark it available for use upon mmapping and remove
84 #define VG_DEFINED(ptr, size) VG(VALGRIND_MAKE_MEM_DEFINED(ptr, size))
85 #define VG_NOACCESS(ptr, size) VG(VALGRIND_MAKE_MEM_NOACCESS(ptr, size))
87 #define memclear(s) memset(&s, 0, sizeof(s))
89 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
92 atomic_add_unless(int *v
, int add
, int unless
)
96 while (c
!= unless
&& (old
= p_atomic_cmpxchg(v
, c
, c
+ add
)) != c
)
101 struct bo_cache_bucket
{
102 struct list_head head
;
111 /** Array of lists of cached gem objects of power-of-two sizes */
112 struct bo_cache_bucket cache_bucket
[14 * 4];
116 struct hash_table
*name_table
;
117 struct hash_table
*handle_table
;
124 static int bo_set_tiling_internal(struct brw_bo
*bo
, uint32_t tiling_mode
,
127 static void bo_free(struct brw_bo
*bo
);
130 key_hash_uint(const void *key
)
132 return _mesa_hash_data(key
, 4);
136 key_uint_equal(const void *a
, const void *b
)
138 return *((unsigned *) a
) == *((unsigned *) b
);
141 static struct brw_bo
*
142 hash_find_bo(struct hash_table
*ht
, unsigned int key
)
144 struct hash_entry
*entry
= _mesa_hash_table_search(ht
, &key
);
145 return entry
? (struct brw_bo
*) entry
->data
: NULL
;
149 bo_tile_size(struct brw_bufmgr
*bufmgr
, uint64_t size
, uint32_t tiling
)
151 if (tiling
== I915_TILING_NONE
)
154 /* 965+ just need multiples of page size for tiling */
155 return ALIGN(size
, 4096);
159 * Round a given pitch up to the minimum required for X tiling on a
160 * given chip. We use 512 as the minimum to allow for a later tiling
164 bo_tile_pitch(struct brw_bufmgr
*bufmgr
, uint32_t pitch
, uint32_t tiling
)
166 unsigned long tile_width
;
168 /* If untiled, then just align it so that we can do rendering
169 * to it with the 3D engine.
171 if (tiling
== I915_TILING_NONE
)
172 return ALIGN(pitch
, 64);
174 if (tiling
== I915_TILING_X
)
179 /* 965 is flexible */
180 return ALIGN(pitch
, tile_width
);
183 static struct bo_cache_bucket
*
184 bucket_for_size(struct brw_bufmgr
*bufmgr
, uint64_t size
)
188 for (i
= 0; i
< bufmgr
->num_buckets
; i
++) {
189 struct bo_cache_bucket
*bucket
= &bufmgr
->cache_bucket
[i
];
190 if (bucket
->size
>= size
) {
199 brw_bo_busy(struct brw_bo
*bo
)
201 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
202 struct drm_i915_gem_busy busy
;
206 busy
.handle
= bo
->gem_handle
;
208 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_BUSY
, &busy
);
210 bo
->idle
= !busy
.busy
;
217 brw_bo_madvise(struct brw_bo
*bo
, int state
)
219 struct drm_i915_gem_madvise madv
;
222 madv
.handle
= bo
->gem_handle
;
225 drmIoctl(bo
->bufmgr
->fd
, DRM_IOCTL_I915_GEM_MADVISE
, &madv
);
227 return madv
.retained
;
230 /* drop the oldest entries that have been purged by the kernel */
232 brw_bo_cache_purge_bucket(struct brw_bufmgr
*bufmgr
,
233 struct bo_cache_bucket
*bucket
)
235 list_for_each_entry_safe(struct brw_bo
, bo
, &bucket
->head
, head
) {
236 if (brw_bo_madvise(bo
, I915_MADV_DONTNEED
))
244 static struct brw_bo
*
245 bo_alloc_internal(struct brw_bufmgr
*bufmgr
,
249 uint32_t tiling_mode
,
250 uint32_t stride
, uint64_t alignment
)
253 unsigned int page_size
= getpagesize();
255 struct bo_cache_bucket
*bucket
;
256 bool alloc_from_cache
;
261 if (flags
& BO_ALLOC_BUSY
)
264 if (flags
& BO_ALLOC_ZEROED
)
267 /* BUSY does doesn't really jive with ZEROED as we have to wait for it to
268 * be idle before we can memset. Just disallow that combination.
270 assert(!(busy
&& zeroed
));
272 /* Round the allocated size up to a power of two number of pages. */
273 bucket
= bucket_for_size(bufmgr
, size
);
275 /* If we don't have caching at this size, don't actually round the
278 if (bucket
== NULL
) {
280 if (bo_size
< page_size
)
283 bo_size
= bucket
->size
;
286 mtx_lock(&bufmgr
->lock
);
287 /* Get a buffer out of the cache if available */
289 alloc_from_cache
= false;
290 if (bucket
!= NULL
&& !list_empty(&bucket
->head
)) {
291 if (busy
&& !zeroed
) {
292 /* Allocate new render-target BOs from the tail (MRU)
293 * of the list, as it will likely be hot in the GPU
294 * cache and in the aperture for us. If the caller
295 * asked us to zero the buffer, we don't want this
296 * because we are going to mmap it.
298 bo
= LIST_ENTRY(struct brw_bo
, bucket
->head
.prev
, head
);
300 alloc_from_cache
= true;
301 bo
->align
= alignment
;
303 assert(alignment
== 0);
304 /* For non-render-target BOs (where we're probably
305 * going to map it first thing in order to fill it
306 * with data), check if the last BO in the cache is
307 * unbusy, and only reuse in that case. Otherwise,
308 * allocating a new buffer is probably faster than
309 * waiting for the GPU to finish.
311 bo
= LIST_ENTRY(struct brw_bo
, bucket
->head
.next
, head
);
312 if (!brw_bo_busy(bo
)) {
313 alloc_from_cache
= true;
318 if (alloc_from_cache
) {
319 if (!brw_bo_madvise(bo
, I915_MADV_WILLNEED
)) {
321 brw_bo_cache_purge_bucket(bufmgr
, bucket
);
325 if (bo_set_tiling_internal(bo
, tiling_mode
, stride
)) {
331 void *map
= brw_bo_map(NULL
, bo
, MAP_WRITE
| MAP_RAW
);
336 memset(map
, 0, bo_size
);
341 if (!alloc_from_cache
) {
342 struct drm_i915_gem_create create
;
344 bo
= calloc(1, sizeof(*bo
));
352 create
.size
= bo_size
;
354 /* All new BOs we get from the kernel are zeroed, so we don't need to
355 * worry about that here.
357 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_CREATE
, &create
);
363 bo
->gem_handle
= create
.handle
;
366 bo
->align
= alignment
;
368 bo
->tiling_mode
= I915_TILING_NONE
;
369 bo
->swizzle_mode
= I915_BIT_6_SWIZZLE_NONE
;
372 if (bo_set_tiling_internal(bo
, tiling_mode
, stride
))
375 /* Calling set_domain() will allocate pages for the BO outside of the
376 * struct mutex lock in the kernel, which is more efficient than waiting
377 * to create them during the first execbuf that uses the BO.
379 struct drm_i915_gem_set_domain sd
= {
380 .handle
= bo
->gem_handle
,
381 .read_domains
= I915_GEM_DOMAIN_CPU
,
385 if (drmIoctl(bo
->bufmgr
->fd
, DRM_IOCTL_I915_GEM_SET_DOMAIN
, &sd
) != 0)
390 p_atomic_set(&bo
->refcount
, 1);
392 bo
->cache_coherent
= bufmgr
->has_llc
;
395 mtx_unlock(&bufmgr
->lock
);
397 DBG("bo_create: buf %d (%s) %llub\n", bo
->gem_handle
, bo
->name
,
398 (unsigned long long) size
);
405 mtx_unlock(&bufmgr
->lock
);
410 brw_bo_alloc(struct brw_bufmgr
*bufmgr
,
411 const char *name
, uint64_t size
, uint64_t alignment
)
413 return bo_alloc_internal(bufmgr
, name
, size
, 0, I915_TILING_NONE
, 0, 0);
417 brw_bo_alloc_tiled(struct brw_bufmgr
*bufmgr
, const char *name
,
418 uint64_t size
, uint32_t tiling_mode
, uint32_t pitch
,
421 return bo_alloc_internal(bufmgr
, name
, size
, flags
, tiling_mode
, pitch
, 0);
425 brw_bo_alloc_tiled_2d(struct brw_bufmgr
*bufmgr
, const char *name
,
426 int x
, int y
, int cpp
, uint32_t tiling
,
427 uint32_t *pitch
, unsigned flags
)
431 unsigned long aligned_y
, height_alignment
;
433 /* If we're tiled, our allocations are in 8 or 32-row blocks,
434 * so failure to align our height means that we won't allocate
437 * If we're untiled, we still have to align to 2 rows high
438 * because the data port accesses 2x2 blocks even if the
439 * bottom row isn't to be rendered, so failure to align means
440 * we could walk off the end of the GTT and fault. This is
441 * documented on 965, and may be the case on older chipsets
442 * too so we try to be careful.
445 height_alignment
= 2;
447 if (tiling
== I915_TILING_X
)
448 height_alignment
= 8;
449 else if (tiling
== I915_TILING_Y
)
450 height_alignment
= 32;
451 aligned_y
= ALIGN(y
, height_alignment
);
454 stride
= bo_tile_pitch(bufmgr
, stride
, tiling
);
455 size
= stride
* aligned_y
;
456 size
= bo_tile_size(bufmgr
, size
, tiling
);
459 if (tiling
== I915_TILING_NONE
)
462 return bo_alloc_internal(bufmgr
, name
, size
, flags
, tiling
, stride
, 0);
466 * Returns a brw_bo wrapping the given buffer object handle.
468 * This can be used when one application needs to pass a buffer object
472 brw_bo_gem_create_from_name(struct brw_bufmgr
*bufmgr
,
473 const char *name
, unsigned int handle
)
477 struct drm_gem_open open_arg
;
478 struct drm_i915_gem_get_tiling get_tiling
;
480 /* At the moment most applications only have a few named bo.
481 * For instance, in a DRI client only the render buffers passed
482 * between X and the client are named. And since X returns the
483 * alternating names for the front/back buffer a linear search
484 * provides a sufficiently fast match.
486 mtx_lock(&bufmgr
->lock
);
487 bo
= hash_find_bo(bufmgr
->name_table
, handle
);
489 brw_bo_reference(bo
);
494 open_arg
.name
= handle
;
495 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_GEM_OPEN
, &open_arg
);
497 DBG("Couldn't reference %s handle 0x%08x: %s\n",
498 name
, handle
, strerror(errno
));
502 /* Now see if someone has used a prime handle to get this
503 * object from the kernel before by looking through the list
504 * again for a matching gem_handle
506 bo
= hash_find_bo(bufmgr
->handle_table
, open_arg
.handle
);
508 brw_bo_reference(bo
);
512 bo
= calloc(1, sizeof(*bo
));
516 p_atomic_set(&bo
->refcount
, 1);
518 bo
->size
= open_arg
.size
;
521 bo
->gem_handle
= open_arg
.handle
;
523 bo
->global_name
= handle
;
524 bo
->reusable
= false;
527 _mesa_hash_table_insert(bufmgr
->handle_table
, &bo
->gem_handle
, bo
);
528 _mesa_hash_table_insert(bufmgr
->name_table
, &bo
->global_name
, bo
);
530 memclear(get_tiling
);
531 get_tiling
.handle
= bo
->gem_handle
;
532 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_GET_TILING
, &get_tiling
);
536 bo
->tiling_mode
= get_tiling
.tiling_mode
;
537 bo
->swizzle_mode
= get_tiling
.swizzle_mode
;
538 /* XXX stride is unknown */
539 DBG("bo_create_from_handle: %d (%s)\n", handle
, bo
->name
);
542 mtx_unlock(&bufmgr
->lock
);
547 mtx_unlock(&bufmgr
->lock
);
552 bo_free(struct brw_bo
*bo
)
554 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
555 struct drm_gem_close close
;
559 VG_NOACCESS(bo
->map_cpu
, bo
->size
);
560 drm_munmap(bo
->map_cpu
, bo
->size
);
563 VG_NOACCESS(bo
->map_wc
, bo
->size
);
564 drm_munmap(bo
->map_wc
, bo
->size
);
567 VG_NOACCESS(bo
->map_gtt
, bo
->size
);
568 drm_munmap(bo
->map_gtt
, bo
->size
);
572 struct hash_entry
*entry
;
574 if (bo
->global_name
) {
575 entry
= _mesa_hash_table_search(bufmgr
->name_table
, &bo
->global_name
);
576 _mesa_hash_table_remove(bufmgr
->name_table
, entry
);
579 entry
= _mesa_hash_table_search(bufmgr
->handle_table
, &bo
->gem_handle
);
580 _mesa_hash_table_remove(bufmgr
->handle_table
, entry
);
583 /* Close this object */
585 close
.handle
= bo
->gem_handle
;
586 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_GEM_CLOSE
, &close
);
588 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
589 bo
->gem_handle
, bo
->name
, strerror(errno
));
594 /** Frees all cached buffers significantly older than @time. */
596 cleanup_bo_cache(struct brw_bufmgr
*bufmgr
, time_t time
)
600 if (bufmgr
->time
== time
)
603 for (i
= 0; i
< bufmgr
->num_buckets
; i
++) {
604 struct bo_cache_bucket
*bucket
= &bufmgr
->cache_bucket
[i
];
606 list_for_each_entry_safe(struct brw_bo
, bo
, &bucket
->head
, head
) {
607 if (time
- bo
->free_time
<= 1)
620 bo_unreference_final(struct brw_bo
*bo
, time_t time
)
622 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
623 struct bo_cache_bucket
*bucket
;
625 DBG("bo_unreference final: %d (%s)\n", bo
->gem_handle
, bo
->name
);
627 bucket
= bucket_for_size(bufmgr
, bo
->size
);
628 /* Put the buffer into our internal cache for reuse if we can. */
629 if (bufmgr
->bo_reuse
&& bo
->reusable
&& bucket
!= NULL
&&
630 brw_bo_madvise(bo
, I915_MADV_DONTNEED
)) {
631 bo
->free_time
= time
;
636 list_addtail(&bo
->head
, &bucket
->head
);
643 brw_bo_unreference(struct brw_bo
*bo
)
648 assert(p_atomic_read(&bo
->refcount
) > 0);
650 if (atomic_add_unless(&bo
->refcount
, -1, 1)) {
651 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
652 struct timespec time
;
654 clock_gettime(CLOCK_MONOTONIC
, &time
);
656 mtx_lock(&bufmgr
->lock
);
658 if (p_atomic_dec_zero(&bo
->refcount
)) {
659 bo_unreference_final(bo
, time
.tv_sec
);
660 cleanup_bo_cache(bufmgr
, time
.tv_sec
);
663 mtx_unlock(&bufmgr
->lock
);
668 bo_wait_with_stall_warning(struct brw_context
*brw
,
672 bool busy
= brw
&& brw
->perf_debug
&& !bo
->idle
;
673 double elapsed
= unlikely(busy
) ? -get_time() : 0.0;
675 brw_bo_wait_rendering(bo
);
677 if (unlikely(busy
)) {
678 elapsed
+= get_time();
679 if (elapsed
> 1e-5) /* 0.01ms */
680 perf_debug("%s a busy \"%s\" BO stalled and took %.03f ms.\n",
681 action
, bo
->name
, elapsed
* 1000);
686 print_flags(unsigned flags
)
688 if (flags
& MAP_READ
)
690 if (flags
& MAP_WRITE
)
692 if (flags
& MAP_ASYNC
)
694 if (flags
& MAP_PERSISTENT
)
696 if (flags
& MAP_COHERENT
)
704 brw_bo_map_cpu(struct brw_context
*brw
, struct brw_bo
*bo
, unsigned flags
)
706 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
708 /* We disallow CPU maps for writing to non-coherent buffers, as the
709 * CPU map can become invalidated when a batch is flushed out, which
710 * can happen at unpredictable times. You should use WC maps instead.
712 assert(bo
->cache_coherent
|| !(flags
& MAP_WRITE
));
715 struct drm_i915_gem_mmap mmap_arg
;
718 DBG("brw_bo_map_cpu: %d (%s)\n", bo
->gem_handle
, bo
->name
);
721 mmap_arg
.handle
= bo
->gem_handle
;
722 mmap_arg
.size
= bo
->size
;
723 int ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_MMAP
, &mmap_arg
);
726 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
727 __FILE__
, __LINE__
, bo
->gem_handle
, bo
->name
, strerror(errno
));
730 map
= (void *) (uintptr_t) mmap_arg
.addr_ptr
;
731 VG_DEFINED(map
, bo
->size
);
733 if (p_atomic_cmpxchg(&bo
->map_cpu
, NULL
, map
)) {
734 VG_NOACCESS(map
, bo
->size
);
735 drm_munmap(map
, bo
->size
);
740 DBG("brw_bo_map_cpu: %d (%s) -> %p, ", bo
->gem_handle
, bo
->name
,
744 if (!(flags
& MAP_ASYNC
)) {
745 bo_wait_with_stall_warning(brw
, bo
, "CPU mapping");
748 if (!bo
->cache_coherent
&& !bo
->bufmgr
->has_llc
) {
749 /* If we're reusing an existing CPU mapping, the CPU caches may
750 * contain stale data from the last time we read from that mapping.
751 * (With the BO cache, it might even be data from a previous buffer!)
752 * Even if it's a brand new mapping, the kernel may have zeroed the
753 * buffer via CPU writes.
755 * We need to invalidate those cachelines so that we see the latest
756 * contents, and so long as we only read from the CPU mmap we do not
757 * need to write those cachelines back afterwards.
759 * On LLC, the emprical evidence suggests that writes from the GPU
760 * that bypass the LLC (i.e. for scanout) do *invalidate* the CPU
761 * cachelines. (Other reads, such as the display engine, bypass the
762 * LLC entirely requiring us to keep dirty pixels for the scanout
765 gen_invalidate_range(bo
->map_cpu
, bo
->size
);
772 brw_bo_map_wc(struct brw_context
*brw
, struct brw_bo
*bo
, unsigned flags
)
774 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
776 if (!bufmgr
->has_mmap_wc
)
780 struct drm_i915_gem_mmap mmap_arg
;
783 DBG("brw_bo_map_wc: %d (%s)\n", bo
->gem_handle
, bo
->name
);
786 mmap_arg
.handle
= bo
->gem_handle
;
787 mmap_arg
.size
= bo
->size
;
788 mmap_arg
.flags
= I915_MMAP_WC
;
789 int ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_MMAP
, &mmap_arg
);
792 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
793 __FILE__
, __LINE__
, bo
->gem_handle
, bo
->name
, strerror(errno
));
797 map
= (void *) (uintptr_t) mmap_arg
.addr_ptr
;
798 VG_DEFINED(map
, bo
->size
);
800 if (p_atomic_cmpxchg(&bo
->map_wc
, NULL
, map
)) {
801 VG_NOACCESS(map
, bo
->size
);
802 drm_munmap(map
, bo
->size
);
807 DBG("brw_bo_map_wc: %d (%s) -> %p\n", bo
->gem_handle
, bo
->name
, bo
->map_wc
);
810 if (!(flags
& MAP_ASYNC
)) {
811 bo_wait_with_stall_warning(brw
, bo
, "WC mapping");
818 * Perform an uncached mapping via the GTT.
820 * Write access through the GTT is not quite fully coherent. On low power
821 * systems especially, like modern Atoms, we can observe reads from RAM before
822 * the write via GTT has landed. A write memory barrier that flushes the Write
823 * Combining Buffer (i.e. sfence/mfence) is not sufficient to order the later
824 * read after the write as the GTT write suffers a small delay through the GTT
825 * indirection. The kernel uses an uncached mmio read to ensure the GTT write
826 * is ordered with reads (either by the GPU, WB or WC) and unconditionally
827 * flushes prior to execbuf submission. However, if we are not informing the
828 * kernel about our GTT writes, it will not flush before earlier access, such
829 * as when using the cmdparser. Similarly, we need to be careful if we should
830 * ever issue a CPU read immediately following a GTT write.
832 * Telling the kernel about write access also has one more important
833 * side-effect. Upon receiving notification about the write, it cancels any
834 * scanout buffering for FBC/PSR and friends. Later FBC/PSR is then flushed by
835 * either SW_FINISH or DIRTYFB. The presumption is that we never write to the
836 * actual scanout via a mmaping, only to a backbuffer and so all the FBC/PSR
837 * tracking is handled on the buffer exchange instead.
840 brw_bo_map_gtt(struct brw_context
*brw
, struct brw_bo
*bo
, unsigned flags
)
842 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
844 /* Get a mapping of the buffer if we haven't before. */
845 if (bo
->map_gtt
== NULL
) {
846 struct drm_i915_gem_mmap_gtt mmap_arg
;
849 DBG("bo_map_gtt: mmap %d (%s)\n", bo
->gem_handle
, bo
->name
);
852 mmap_arg
.handle
= bo
->gem_handle
;
854 /* Get the fake offset back... */
855 int ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_MMAP_GTT
, &mmap_arg
);
857 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
858 __FILE__
, __LINE__
, bo
->gem_handle
, bo
->name
, strerror(errno
));
863 map
= drm_mmap(0, bo
->size
, PROT_READ
| PROT_WRITE
,
864 MAP_SHARED
, bufmgr
->fd
, mmap_arg
.offset
);
865 if (map
== MAP_FAILED
) {
866 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
867 __FILE__
, __LINE__
, bo
->gem_handle
, bo
->name
, strerror(errno
));
871 /* We don't need to use VALGRIND_MALLOCLIKE_BLOCK because Valgrind will
872 * already intercept this mmap call. However, for consistency between
873 * all the mmap paths, we mark the pointer as defined now and mark it
874 * as inaccessible afterwards.
876 VG_DEFINED(map
, bo
->size
);
878 if (p_atomic_cmpxchg(&bo
->map_gtt
, NULL
, map
)) {
879 VG_NOACCESS(map
, bo
->size
);
880 drm_munmap(map
, bo
->size
);
885 DBG("bo_map_gtt: %d (%s) -> %p, ", bo
->gem_handle
, bo
->name
, bo
->map_gtt
);
888 if (!(flags
& MAP_ASYNC
)) {
889 bo_wait_with_stall_warning(brw
, bo
, "GTT mapping");
896 can_map_cpu(struct brw_bo
*bo
, unsigned flags
)
898 if (bo
->cache_coherent
)
901 /* Even if the buffer itself is not cache-coherent (such as a scanout), on
902 * an LLC platform reads always are coherent (as they are performed via the
903 * central system agent). It is just the writes that we need to take special
904 * care to ensure that land in main memory and not stick in the CPU cache.
906 if (!(flags
& MAP_WRITE
) && bo
->bufmgr
->has_llc
)
909 /* If PERSISTENT or COHERENT are set, the mmapping needs to remain valid
910 * across batch flushes where the kernel will change cache domains of the
911 * bo, invalidating continued access to the CPU mmap on non-LLC device.
913 * Similarly, ASYNC typically means that the buffer will be accessed via
914 * both the CPU and the GPU simultaneously. Batches may be executed that
915 * use the BO even while it is mapped. While OpenGL technically disallows
916 * most drawing while non-persistent mappings are active, we may still use
917 * the GPU for blits or other operations, causing batches to happen at
918 * inconvenient times.
920 if (flags
& (MAP_PERSISTENT
| MAP_COHERENT
| MAP_ASYNC
))
923 return !(flags
& MAP_WRITE
);
927 brw_bo_map(struct brw_context
*brw
, struct brw_bo
*bo
, unsigned flags
)
929 if (bo
->tiling_mode
!= I915_TILING_NONE
&& !(flags
& MAP_RAW
))
930 return brw_bo_map_gtt(brw
, bo
, flags
);
934 if (can_map_cpu(bo
, flags
))
935 map
= brw_bo_map_cpu(brw
, bo
, flags
);
937 map
= brw_bo_map_wc(brw
, bo
, flags
);
939 /* Allow the attempt to fail by falling back to the GTT where necessary.
941 * Not every buffer can be mmaped directly using the CPU (or WC), for
942 * example buffers that wrap stolen memory or are imported from other
943 * devices. For those, we have little choice but to use a GTT mmapping.
944 * However, if we use a slow GTT mmapping for reads where we expected fast
945 * access, that order of magnitude difference in throughput will be clearly
946 * expressed by angry users.
948 * We skip MAP_RAW because we want to avoid map_gtt's fence detiling.
950 if (!map
&& !(flags
& MAP_RAW
)) {
952 perf_debug("Fallback GTT mapping for %s with access flags %x\n",
955 map
= brw_bo_map_gtt(brw
, bo
, flags
);
962 brw_bo_subdata(struct brw_bo
*bo
, uint64_t offset
,
963 uint64_t size
, const void *data
)
965 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
966 struct drm_i915_gem_pwrite pwrite
;
970 pwrite
.handle
= bo
->gem_handle
;
971 pwrite
.offset
= offset
;
973 pwrite
.data_ptr
= (uint64_t) (uintptr_t) data
;
974 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_PWRITE
, &pwrite
);
977 DBG("%s:%d: Error writing data to buffer %d: "
978 "(%"PRIu64
" %"PRIu64
") %s .\n",
979 __FILE__
, __LINE__
, bo
->gem_handle
, offset
, size
, strerror(errno
));
985 /** Waits for all GPU rendering with the object to have completed. */
987 brw_bo_wait_rendering(struct brw_bo
*bo
)
989 /* We require a kernel recent enough for WAIT_IOCTL support.
990 * See intel_init_bufmgr()
996 * Waits on a BO for the given amount of time.
998 * @bo: buffer object to wait for
999 * @timeout_ns: amount of time to wait in nanoseconds.
1000 * If value is less than 0, an infinite wait will occur.
1002 * Returns 0 if the wait was successful ie. the last batch referencing the
1003 * object has completed within the allotted time. Otherwise some negative return
1004 * value describes the error. Of particular interest is -ETIME when the wait has
1005 * failed to yield the desired result.
1007 * Similar to brw_bo_wait_rendering except a timeout parameter allows
1008 * the operation to give up after a certain amount of time. Another subtle
1009 * difference is the internal locking semantics are different (this variant does
1010 * not hold the lock for the duration of the wait). This makes the wait subject
1011 * to a larger userspace race window.
1013 * The implementation shall wait until the object is no longer actively
1014 * referenced within a batch buffer at the time of the call. The wait will
1015 * not guarantee that the buffer is re-issued via another thread, or an flinked
1016 * handle. Userspace must make sure this race does not occur if such precision
1019 * Note that some kernels have broken the inifite wait for negative values
1020 * promise, upgrade to latest stable kernels if this is the case.
1023 brw_bo_wait(struct brw_bo
*bo
, int64_t timeout_ns
)
1025 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
1026 struct drm_i915_gem_wait wait
;
1029 /* If we know it's idle, don't bother with the kernel round trip */
1030 if (bo
->idle
&& !bo
->external
)
1034 wait
.bo_handle
= bo
->gem_handle
;
1035 wait
.timeout_ns
= timeout_ns
;
1036 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_WAIT
, &wait
);
1046 brw_bufmgr_destroy(struct brw_bufmgr
*bufmgr
)
1048 mtx_destroy(&bufmgr
->lock
);
1050 /* Free any cached buffer objects we were going to reuse */
1051 for (int i
= 0; i
< bufmgr
->num_buckets
; i
++) {
1052 struct bo_cache_bucket
*bucket
= &bufmgr
->cache_bucket
[i
];
1054 list_for_each_entry_safe(struct brw_bo
, bo
, &bucket
->head
, head
) {
1055 list_del(&bo
->head
);
1061 _mesa_hash_table_destroy(bufmgr
->name_table
, NULL
);
1062 _mesa_hash_table_destroy(bufmgr
->handle_table
, NULL
);
1068 bo_set_tiling_internal(struct brw_bo
*bo
, uint32_t tiling_mode
,
1071 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
1072 struct drm_i915_gem_set_tiling set_tiling
;
1075 if (bo
->global_name
== 0 &&
1076 tiling_mode
== bo
->tiling_mode
&& stride
== bo
->stride
)
1079 memset(&set_tiling
, 0, sizeof(set_tiling
));
1081 /* set_tiling is slightly broken and overwrites the
1082 * input on the error path, so we have to open code
1085 set_tiling
.handle
= bo
->gem_handle
;
1086 set_tiling
.tiling_mode
= tiling_mode
;
1087 set_tiling
.stride
= stride
;
1089 ret
= ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_SET_TILING
, &set_tiling
);
1090 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
1094 bo
->tiling_mode
= set_tiling
.tiling_mode
;
1095 bo
->swizzle_mode
= set_tiling
.swizzle_mode
;
1096 bo
->stride
= set_tiling
.stride
;
1101 brw_bo_get_tiling(struct brw_bo
*bo
, uint32_t *tiling_mode
,
1102 uint32_t *swizzle_mode
)
1104 *tiling_mode
= bo
->tiling_mode
;
1105 *swizzle_mode
= bo
->swizzle_mode
;
1110 brw_bo_gem_create_from_prime(struct brw_bufmgr
*bufmgr
, int prime_fd
)
1115 struct drm_i915_gem_get_tiling get_tiling
;
1117 mtx_lock(&bufmgr
->lock
);
1118 ret
= drmPrimeFDToHandle(bufmgr
->fd
, prime_fd
, &handle
);
1120 DBG("create_from_prime: failed to obtain handle from fd: %s\n",
1122 mtx_unlock(&bufmgr
->lock
);
1127 * See if the kernel has already returned this buffer to us. Just as
1128 * for named buffers, we must not create two bo's pointing at the same
1131 bo
= hash_find_bo(bufmgr
->handle_table
, handle
);
1133 brw_bo_reference(bo
);
1137 bo
= calloc(1, sizeof(*bo
));
1141 p_atomic_set(&bo
->refcount
, 1);
1143 /* Determine size of bo. The fd-to-handle ioctl really should
1144 * return the size, but it doesn't. If we have kernel 3.12 or
1145 * later, we can lseek on the prime fd to get the size. Older
1146 * kernels will just fail, in which case we fall back to the
1147 * provided (estimated or guess size). */
1148 ret
= lseek(prime_fd
, 0, SEEK_END
);
1152 bo
->bufmgr
= bufmgr
;
1154 bo
->gem_handle
= handle
;
1155 _mesa_hash_table_insert(bufmgr
->handle_table
, &bo
->gem_handle
, bo
);
1158 bo
->reusable
= false;
1159 bo
->external
= true;
1161 memclear(get_tiling
);
1162 get_tiling
.handle
= bo
->gem_handle
;
1163 if (drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_GET_TILING
, &get_tiling
))
1166 bo
->tiling_mode
= get_tiling
.tiling_mode
;
1167 bo
->swizzle_mode
= get_tiling
.swizzle_mode
;
1168 /* XXX stride is unknown */
1171 mtx_unlock(&bufmgr
->lock
);
1176 mtx_unlock(&bufmgr
->lock
);
1181 brw_bo_make_external(struct brw_bo
*bo
)
1183 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
1185 if (!bo
->external
) {
1186 mtx_lock(&bufmgr
->lock
);
1187 if (!bo
->external
) {
1188 _mesa_hash_table_insert(bufmgr
->handle_table
, &bo
->gem_handle
, bo
);
1189 bo
->external
= true;
1191 mtx_unlock(&bufmgr
->lock
);
1196 brw_bo_gem_export_to_prime(struct brw_bo
*bo
, int *prime_fd
)
1198 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
1200 brw_bo_make_external(bo
);
1202 if (drmPrimeHandleToFD(bufmgr
->fd
, bo
->gem_handle
,
1203 DRM_CLOEXEC
, prime_fd
) != 0)
1206 bo
->reusable
= false;
1212 brw_bo_export_gem_handle(struct brw_bo
*bo
)
1214 brw_bo_make_external(bo
);
1216 return bo
->gem_handle
;
1220 brw_bo_flink(struct brw_bo
*bo
, uint32_t *name
)
1222 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
1224 if (!bo
->global_name
) {
1225 struct drm_gem_flink flink
;
1228 flink
.handle
= bo
->gem_handle
;
1229 if (drmIoctl(bufmgr
->fd
, DRM_IOCTL_GEM_FLINK
, &flink
))
1232 brw_bo_make_external(bo
);
1233 mtx_lock(&bufmgr
->lock
);
1234 if (!bo
->global_name
) {
1235 bo
->global_name
= flink
.name
;
1236 _mesa_hash_table_insert(bufmgr
->name_table
, &bo
->global_name
, bo
);
1238 mtx_unlock(&bufmgr
->lock
);
1240 bo
->reusable
= false;
1243 *name
= bo
->global_name
;
1248 * Enables unlimited caching of buffer objects for reuse.
1250 * This is potentially very memory expensive, as the cache at each bucket
1251 * size is only bounded by how many buffers of that size we've managed to have
1252 * in flight at once.
1255 brw_bufmgr_enable_reuse(struct brw_bufmgr
*bufmgr
)
1257 bufmgr
->bo_reuse
= true;
1261 add_bucket(struct brw_bufmgr
*bufmgr
, int size
)
1263 unsigned int i
= bufmgr
->num_buckets
;
1265 assert(i
< ARRAY_SIZE(bufmgr
->cache_bucket
));
1267 list_inithead(&bufmgr
->cache_bucket
[i
].head
);
1268 bufmgr
->cache_bucket
[i
].size
= size
;
1269 bufmgr
->num_buckets
++;
1273 init_cache_buckets(struct brw_bufmgr
*bufmgr
)
1275 uint64_t size
, cache_max_size
= 64 * 1024 * 1024;
1277 /* OK, so power of two buckets was too wasteful of memory.
1278 * Give 3 other sizes between each power of two, to hopefully
1279 * cover things accurately enough. (The alternative is
1280 * probably to just go for exact matching of sizes, and assume
1281 * that for things like composited window resize the tiled
1282 * width/height alignment and rounding of sizes to pages will
1283 * get us useful cache hit rates anyway)
1285 add_bucket(bufmgr
, 4096);
1286 add_bucket(bufmgr
, 4096 * 2);
1287 add_bucket(bufmgr
, 4096 * 3);
1289 /* Initialize the linked lists for BO reuse cache. */
1290 for (size
= 4 * 4096; size
<= cache_max_size
; size
*= 2) {
1291 add_bucket(bufmgr
, size
);
1293 add_bucket(bufmgr
, size
+ size
* 1 / 4);
1294 add_bucket(bufmgr
, size
+ size
* 2 / 4);
1295 add_bucket(bufmgr
, size
+ size
* 3 / 4);
1300 brw_create_hw_context(struct brw_bufmgr
*bufmgr
)
1302 struct drm_i915_gem_context_create create
;
1306 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_CREATE
, &create
);
1308 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n", strerror(errno
));
1312 return create
.ctx_id
;
1316 brw_hw_context_set_priority(struct brw_bufmgr
*bufmgr
,
1320 struct drm_i915_gem_context_param p
= {
1322 .param
= I915_CONTEXT_PARAM_PRIORITY
,
1328 if (drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM
, &p
))
1335 brw_destroy_hw_context(struct brw_bufmgr
*bufmgr
, uint32_t ctx_id
)
1337 struct drm_i915_gem_context_destroy d
= {.ctx_id
= ctx_id
};
1340 drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY
, &d
) != 0) {
1341 fprintf(stderr
, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
1347 brw_reg_read(struct brw_bufmgr
*bufmgr
, uint32_t offset
, uint64_t *result
)
1349 struct drm_i915_reg_read reg_read
;
1353 reg_read
.offset
= offset
;
1355 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_REG_READ
, ®_read
);
1357 *result
= reg_read
.val
;
1362 gem_param(int fd
, int name
)
1364 drm_i915_getparam_t gp
;
1365 int v
= -1; /* No param uses (yet) the sign bit, reserve it for errors */
1367 memset(&gp
, 0, sizeof(gp
));
1370 if (drmIoctl(fd
, DRM_IOCTL_I915_GETPARAM
, &gp
))
1377 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
1378 * and manage map buffer objections.
1380 * \param fd File descriptor of the opened DRM device.
1383 brw_bufmgr_init(struct gen_device_info
*devinfo
, int fd
)
1385 struct brw_bufmgr
*bufmgr
;
1387 bufmgr
= calloc(1, sizeof(*bufmgr
));
1391 /* Handles to buffer objects belong to the device fd and are not
1392 * reference counted by the kernel. If the same fd is used by
1393 * multiple parties (threads sharing the same screen bufmgr, or
1394 * even worse the same device fd passed to multiple libraries)
1395 * ownership of those handles is shared by those independent parties.
1397 * Don't do this! Ensure that each library/bufmgr has its own device
1398 * fd so that its namespace does not clash with another.
1402 if (mtx_init(&bufmgr
->lock
, mtx_plain
) != 0) {
1407 bufmgr
->has_llc
= devinfo
->has_llc
;
1408 bufmgr
->has_mmap_wc
= gem_param(fd
, I915_PARAM_MMAP_VERSION
) > 0;
1410 init_cache_buckets(bufmgr
);
1412 bufmgr
->name_table
=
1413 _mesa_hash_table_create(NULL
, key_hash_uint
, key_uint_equal
);
1414 bufmgr
->handle_table
=
1415 _mesa_hash_table_create(NULL
, key_hash_uint
, key_uint_equal
);