1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007-2012 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
42 #include <util/u_atomic.h>
50 #include <sys/ioctl.h>
52 #include <sys/types.h>
57 #define ETIME ETIMEDOUT
59 #include "common/gen_debug.h"
60 #include "common/gen_device_info.h"
61 #include "libdrm_macros.h"
62 #include "main/macros.h"
63 #include "util/macros.h"
64 #include "util/hash_table.h"
65 #include "util/list.h"
66 #include "brw_bufmgr.h"
79 #define memclear(s) memset(&s, 0, sizeof(s))
81 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
84 atomic_add_unless(int *v
, int add
, int unless
)
88 while (c
!= unless
&& (old
= p_atomic_cmpxchg(v
, c
, c
+ add
)) != c
)
93 struct _drm_bacon_context
{
95 struct _drm_bacon_bufmgr
*bufmgr
;
98 struct drm_bacon_gem_bo_bucket
{
99 struct list_head head
;
103 typedef struct _drm_bacon_bufmgr
{
106 pthread_mutex_t lock
;
108 /** Array of lists of cached gem objects of power-of-two sizes */
109 struct drm_bacon_gem_bo_bucket cache_bucket
[14 * 4];
113 struct hash_table
*name_table
;
114 struct hash_table
*handle_table
;
116 struct list_head vma_cache
;
117 int vma_count
, vma_open
, vma_max
;
119 unsigned int has_llc
: 1;
120 unsigned int bo_reuse
: 1;
124 drm_bacon_gem_bo_set_tiling_internal(drm_bacon_bo
*bo
,
125 uint32_t tiling_mode
,
128 static void drm_bacon_gem_bo_free(drm_bacon_bo
*bo
);
131 key_hash_uint(const void *key
)
133 return _mesa_hash_data(key
, 4);
137 key_uint_equal(const void *a
, const void *b
)
139 return *((unsigned *) a
) == *((unsigned *) b
);
142 static drm_bacon_bo
*
143 hash_find_bo(struct hash_table
*ht
, unsigned int key
)
145 struct hash_entry
*entry
= _mesa_hash_table_search(ht
, &key
);
146 return entry
? (drm_bacon_bo
*) entry
->data
: NULL
;
150 drm_bacon_gem_bo_tile_size(drm_bacon_bufmgr
*bufmgr
, unsigned long size
,
151 uint32_t *tiling_mode
)
153 if (*tiling_mode
== I915_TILING_NONE
)
156 /* 965+ just need multiples of page size for tiling */
157 return ALIGN(size
, 4096);
161 * Round a given pitch up to the minimum required for X tiling on a
162 * given chip. We use 512 as the minimum to allow for a later tiling
166 drm_bacon_gem_bo_tile_pitch(drm_bacon_bufmgr
*bufmgr
,
167 unsigned long pitch
, uint32_t *tiling_mode
)
169 unsigned long tile_width
;
171 /* If untiled, then just align it so that we can do rendering
172 * to it with the 3D engine.
174 if (*tiling_mode
== I915_TILING_NONE
)
175 return ALIGN(pitch
, 64);
177 if (*tiling_mode
== I915_TILING_X
)
182 /* 965 is flexible */
183 return ALIGN(pitch
, tile_width
);
186 static struct drm_bacon_gem_bo_bucket
*
187 drm_bacon_gem_bo_bucket_for_size(drm_bacon_bufmgr
*bufmgr
,
192 for (i
= 0; i
< bufmgr
->num_buckets
; i
++) {
193 struct drm_bacon_gem_bo_bucket
*bucket
=
194 &bufmgr
->cache_bucket
[i
];
195 if (bucket
->size
>= size
) {
204 drm_bacon_bo_reference(drm_bacon_bo
*bo
)
206 p_atomic_inc(&bo
->refcount
);
210 drm_bacon_bo_busy(drm_bacon_bo
*bo
)
212 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
213 struct drm_i915_gem_busy busy
;
217 busy
.handle
= bo
->gem_handle
;
219 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_BUSY
, &busy
);
221 bo
->idle
= !busy
.busy
;
226 return (ret
== 0 && busy
.busy
);
230 drm_bacon_gem_bo_madvise_internal(drm_bacon_bufmgr
*bufmgr
,
231 drm_bacon_bo
*bo
, int state
)
233 struct drm_i915_gem_madvise madv
;
236 madv
.handle
= bo
->gem_handle
;
239 drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_MADVISE
, &madv
);
241 return madv
.retained
;
245 drm_bacon_bo_madvise(drm_bacon_bo
*bo
, int madv
)
247 return drm_bacon_gem_bo_madvise_internal(bo
->bufmgr
, bo
, madv
);
250 /* drop the oldest entries that have been purged by the kernel */
252 drm_bacon_gem_bo_cache_purge_bucket(drm_bacon_bufmgr
*bufmgr
,
253 struct drm_bacon_gem_bo_bucket
*bucket
)
255 while (!list_empty(&bucket
->head
)) {
258 bo
= LIST_ENTRY(drm_bacon_bo
, bucket
->head
.next
, head
);
259 if (drm_bacon_gem_bo_madvise_internal
260 (bufmgr
, bo
, I915_MADV_DONTNEED
))
264 drm_bacon_gem_bo_free(bo
);
268 static drm_bacon_bo
*
269 drm_bacon_gem_bo_alloc_internal(drm_bacon_bufmgr
*bufmgr
,
273 uint32_t tiling_mode
,
274 unsigned long stride
,
275 unsigned int alignment
)
278 unsigned int page_size
= getpagesize();
280 struct drm_bacon_gem_bo_bucket
*bucket
;
281 bool alloc_from_cache
;
282 unsigned long bo_size
;
283 bool for_render
= false;
285 if (flags
& BO_ALLOC_FOR_RENDER
)
288 /* Round the allocated size up to a power of two number of pages. */
289 bucket
= drm_bacon_gem_bo_bucket_for_size(bufmgr
, size
);
291 /* If we don't have caching at this size, don't actually round the
294 if (bucket
== NULL
) {
296 if (bo_size
< page_size
)
299 bo_size
= bucket
->size
;
302 pthread_mutex_lock(&bufmgr
->lock
);
303 /* Get a buffer out of the cache if available */
305 alloc_from_cache
= false;
306 if (bucket
!= NULL
&& !list_empty(&bucket
->head
)) {
308 /* Allocate new render-target BOs from the tail (MRU)
309 * of the list, as it will likely be hot in the GPU
310 * cache and in the aperture for us.
312 bo
= LIST_ENTRY(drm_bacon_bo
, bucket
->head
.prev
, head
);
314 alloc_from_cache
= true;
315 bo
->align
= alignment
;
317 assert(alignment
== 0);
318 /* For non-render-target BOs (where we're probably
319 * going to map it first thing in order to fill it
320 * with data), check if the last BO in the cache is
321 * unbusy, and only reuse in that case. Otherwise,
322 * allocating a new buffer is probably faster than
323 * waiting for the GPU to finish.
325 bo
= LIST_ENTRY(drm_bacon_bo
, bucket
->head
.next
, head
);
326 if (!drm_bacon_bo_busy(bo
)) {
327 alloc_from_cache
= true;
332 if (alloc_from_cache
) {
333 if (!drm_bacon_gem_bo_madvise_internal
334 (bufmgr
, bo
, I915_MADV_WILLNEED
)) {
335 drm_bacon_gem_bo_free(bo
);
336 drm_bacon_gem_bo_cache_purge_bucket(bufmgr
,
341 if (drm_bacon_gem_bo_set_tiling_internal(bo
,
344 drm_bacon_gem_bo_free(bo
);
350 if (!alloc_from_cache
) {
351 struct drm_i915_gem_create create
;
353 bo
= calloc(1, sizeof(*bo
));
357 /* drm_bacon_gem_bo_free calls list_del() for an uninitialized
358 list (vma_list), so better set the list head here */
359 list_inithead(&bo
->vma_list
);
364 create
.size
= bo_size
;
366 ret
= drmIoctl(bufmgr
->fd
,
367 DRM_IOCTL_I915_GEM_CREATE
,
374 bo
->gem_handle
= create
.handle
;
375 _mesa_hash_table_insert(bufmgr
->handle_table
,
376 &bo
->gem_handle
, bo
);
379 bo
->align
= alignment
;
381 bo
->tiling_mode
= I915_TILING_NONE
;
382 bo
->swizzle_mode
= I915_BIT_6_SWIZZLE_NONE
;
385 if (drm_bacon_gem_bo_set_tiling_internal(bo
,
392 p_atomic_set(&bo
->refcount
, 1);
395 pthread_mutex_unlock(&bufmgr
->lock
);
397 DBG("bo_create: buf %d (%s) %ldb\n",
398 bo
->gem_handle
, bo
->name
, size
);
403 drm_bacon_gem_bo_free(bo
);
405 pthread_mutex_unlock(&bufmgr
->lock
);
410 drm_bacon_bo_alloc_for_render(drm_bacon_bufmgr
*bufmgr
,
413 unsigned int alignment
)
415 return drm_bacon_gem_bo_alloc_internal(bufmgr
, name
, size
,
422 drm_bacon_bo_alloc(drm_bacon_bufmgr
*bufmgr
,
425 unsigned int alignment
)
427 return drm_bacon_gem_bo_alloc_internal(bufmgr
, name
, size
, 0,
428 I915_TILING_NONE
, 0, 0);
432 drm_bacon_bo_alloc_tiled(drm_bacon_bufmgr
*bufmgr
, const char *name
,
433 int x
, int y
, int cpp
, uint32_t *tiling_mode
,
434 unsigned long *pitch
, unsigned long flags
)
436 unsigned long size
, stride
;
440 unsigned long aligned_y
, height_alignment
;
442 tiling
= *tiling_mode
;
444 /* If we're tiled, our allocations are in 8 or 32-row blocks,
445 * so failure to align our height means that we won't allocate
448 * If we're untiled, we still have to align to 2 rows high
449 * because the data port accesses 2x2 blocks even if the
450 * bottom row isn't to be rendered, so failure to align means
451 * we could walk off the end of the GTT and fault. This is
452 * documented on 965, and may be the case on older chipsets
453 * too so we try to be careful.
456 height_alignment
= 2;
458 if (tiling
== I915_TILING_X
)
459 height_alignment
= 8;
460 else if (tiling
== I915_TILING_Y
)
461 height_alignment
= 32;
462 aligned_y
= ALIGN(y
, height_alignment
);
465 stride
= drm_bacon_gem_bo_tile_pitch(bufmgr
, stride
, tiling_mode
);
466 size
= stride
* aligned_y
;
467 size
= drm_bacon_gem_bo_tile_size(bufmgr
, size
, tiling_mode
);
468 } while (*tiling_mode
!= tiling
);
471 if (tiling
== I915_TILING_NONE
)
474 return drm_bacon_gem_bo_alloc_internal(bufmgr
, name
, size
, flags
,
479 * Returns a drm_bacon_bo wrapping the given buffer object handle.
481 * This can be used when one application needs to pass a buffer object
485 drm_bacon_bo_gem_create_from_name(drm_bacon_bufmgr
*bufmgr
,
491 struct drm_gem_open open_arg
;
492 struct drm_i915_gem_get_tiling get_tiling
;
494 /* At the moment most applications only have a few named bo.
495 * For instance, in a DRI client only the render buffers passed
496 * between X and the client are named. And since X returns the
497 * alternating names for the front/back buffer a linear search
498 * provides a sufficiently fast match.
500 pthread_mutex_lock(&bufmgr
->lock
);
501 bo
= hash_find_bo(bufmgr
->name_table
, handle
);
503 drm_bacon_bo_reference(bo
);
508 open_arg
.name
= handle
;
509 ret
= drmIoctl(bufmgr
->fd
,
513 DBG("Couldn't reference %s handle 0x%08x: %s\n",
514 name
, handle
, strerror(errno
));
518 /* Now see if someone has used a prime handle to get this
519 * object from the kernel before by looking through the list
520 * again for a matching gem_handle
522 bo
= hash_find_bo(bufmgr
->handle_table
, open_arg
.handle
);
524 drm_bacon_bo_reference(bo
);
528 bo
= calloc(1, sizeof(*bo
));
532 p_atomic_set(&bo
->refcount
, 1);
533 list_inithead(&bo
->vma_list
);
535 bo
->size
= open_arg
.size
;
539 bo
->gem_handle
= open_arg
.handle
;
541 bo
->global_name
= handle
;
542 bo
->reusable
= false;
544 _mesa_hash_table_insert(bufmgr
->handle_table
, &bo
->gem_handle
, bo
);
545 _mesa_hash_table_insert(bufmgr
->name_table
, &bo
->global_name
, bo
);
547 memclear(get_tiling
);
548 get_tiling
.handle
= bo
->gem_handle
;
549 ret
= drmIoctl(bufmgr
->fd
,
550 DRM_IOCTL_I915_GEM_GET_TILING
,
555 bo
->tiling_mode
= get_tiling
.tiling_mode
;
556 bo
->swizzle_mode
= get_tiling
.swizzle_mode
;
557 /* XXX stride is unknown */
558 DBG("bo_create_from_handle: %d (%s)\n", handle
, bo
->name
);
561 pthread_mutex_unlock(&bufmgr
->lock
);
565 drm_bacon_gem_bo_free(bo
);
566 pthread_mutex_unlock(&bufmgr
->lock
);
571 drm_bacon_gem_bo_free(drm_bacon_bo
*bo
)
573 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
574 struct drm_gem_close close
;
575 struct hash_entry
*entry
;
578 list_del(&bo
->vma_list
);
579 if (bo
->mem_virtual
) {
580 VG(VALGRIND_FREELIKE_BLOCK(bo
->mem_virtual
, 0));
581 drm_munmap(bo
->mem_virtual
, bo
->size
);
584 if (bo
->wc_virtual
) {
585 VG(VALGRIND_FREELIKE_BLOCK(bo
->wc_virtual
, 0));
586 drm_munmap(bo
->wc_virtual
, bo
->size
);
589 if (bo
->gtt_virtual
) {
590 drm_munmap(bo
->gtt_virtual
, bo
->size
);
594 if (bo
->global_name
) {
595 entry
= _mesa_hash_table_search(bufmgr
->name_table
,
597 _mesa_hash_table_remove(bufmgr
->name_table
, entry
);
599 entry
= _mesa_hash_table_search(bufmgr
->handle_table
,
601 _mesa_hash_table_remove(bufmgr
->handle_table
, entry
);
603 /* Close this object */
605 close
.handle
= bo
->gem_handle
;
606 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_GEM_CLOSE
, &close
);
608 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
609 bo
->gem_handle
, bo
->name
, strerror(errno
));
615 drm_bacon_gem_bo_mark_mmaps_incoherent(drm_bacon_bo
*bo
)
619 VALGRIND_MAKE_MEM_NOACCESS(bo
->mem_virtual
, bo
->size
);
622 VALGRIND_MAKE_MEM_NOACCESS(bo
->wc_virtual
, bo
->size
);
625 VALGRIND_MAKE_MEM_NOACCESS(bo
->gtt_virtual
, bo
->size
);
629 /** Frees all cached buffers significantly older than @time. */
631 drm_bacon_gem_cleanup_bo_cache(drm_bacon_bufmgr
*bufmgr
, time_t time
)
635 if (bufmgr
->time
== time
)
638 for (i
= 0; i
< bufmgr
->num_buckets
; i
++) {
639 struct drm_bacon_gem_bo_bucket
*bucket
=
640 &bufmgr
->cache_bucket
[i
];
642 while (!list_empty(&bucket
->head
)) {
645 bo
= LIST_ENTRY(drm_bacon_bo
, bucket
->head
.next
, head
);
646 if (time
- bo
->free_time
<= 1)
651 drm_bacon_gem_bo_free(bo
);
658 static void drm_bacon_gem_bo_purge_vma_cache(drm_bacon_bufmgr
*bufmgr
)
662 DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__
,
663 bufmgr
->vma_count
, bufmgr
->vma_open
, bufmgr
->vma_max
);
665 if (bufmgr
->vma_max
< 0)
668 /* We may need to evict a few entries in order to create new mmaps */
669 limit
= bufmgr
->vma_max
- 2*bufmgr
->vma_open
;
673 while (bufmgr
->vma_count
> limit
) {
676 bo
= LIST_ENTRY(drm_bacon_bo
, bufmgr
->vma_cache
.next
, vma_list
);
677 assert(bo
->map_count
== 0);
678 list_delinit(&bo
->vma_list
);
680 if (bo
->mem_virtual
) {
681 drm_munmap(bo
->mem_virtual
, bo
->size
);
682 bo
->mem_virtual
= NULL
;
685 if (bo
->wc_virtual
) {
686 drm_munmap(bo
->wc_virtual
, bo
->size
);
687 bo
->wc_virtual
= NULL
;
690 if (bo
->gtt_virtual
) {
691 drm_munmap(bo
->gtt_virtual
, bo
->size
);
692 bo
->gtt_virtual
= NULL
;
698 static void drm_bacon_gem_bo_close_vma(drm_bacon_bufmgr
*bufmgr
,
702 list_addtail(&bo
->vma_list
, &bufmgr
->vma_cache
);
709 drm_bacon_gem_bo_purge_vma_cache(bufmgr
);
712 static void drm_bacon_gem_bo_open_vma(drm_bacon_bufmgr
*bufmgr
,
716 list_del(&bo
->vma_list
);
723 drm_bacon_gem_bo_purge_vma_cache(bufmgr
);
727 drm_bacon_gem_bo_unreference_final(drm_bacon_bo
*bo
, time_t time
)
729 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
730 struct drm_bacon_gem_bo_bucket
*bucket
;
732 DBG("bo_unreference final: %d (%s)\n",
733 bo
->gem_handle
, bo
->name
);
735 /* Clear any left-over mappings */
737 DBG("bo freed with non-zero map-count %d\n", bo
->map_count
);
739 drm_bacon_gem_bo_close_vma(bufmgr
, bo
);
740 drm_bacon_gem_bo_mark_mmaps_incoherent(bo
);
743 bucket
= drm_bacon_gem_bo_bucket_for_size(bufmgr
, bo
->size
);
744 /* Put the buffer into our internal cache for reuse if we can. */
745 if (bufmgr
->bo_reuse
&& bo
->reusable
&& bucket
!= NULL
&&
746 drm_bacon_gem_bo_madvise_internal(bufmgr
, bo
,
747 I915_MADV_DONTNEED
)) {
748 bo
->free_time
= time
;
752 list_addtail(&bo
->head
, &bucket
->head
);
754 drm_bacon_gem_bo_free(bo
);
759 drm_bacon_bo_unreference(drm_bacon_bo
*bo
)
764 assert(p_atomic_read(&bo
->refcount
) > 0);
766 if (atomic_add_unless(&bo
->refcount
, -1, 1)) {
767 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
768 struct timespec time
;
770 clock_gettime(CLOCK_MONOTONIC
, &time
);
772 pthread_mutex_lock(&bufmgr
->lock
);
774 if (p_atomic_dec_zero(&bo
->refcount
)) {
775 drm_bacon_gem_bo_unreference_final(bo
, time
.tv_sec
);
776 drm_bacon_gem_cleanup_bo_cache(bufmgr
, time
.tv_sec
);
779 pthread_mutex_unlock(&bufmgr
->lock
);
784 drm_bacon_bo_map(drm_bacon_bo
*bo
, int write_enable
)
786 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
787 struct drm_i915_gem_set_domain set_domain
;
790 pthread_mutex_lock(&bufmgr
->lock
);
792 if (bo
->map_count
++ == 0)
793 drm_bacon_gem_bo_open_vma(bufmgr
, bo
);
795 if (!bo
->mem_virtual
) {
796 struct drm_i915_gem_mmap mmap_arg
;
798 DBG("bo_map: %d (%s), map_count=%d\n",
799 bo
->gem_handle
, bo
->name
, bo
->map_count
);
802 mmap_arg
.handle
= bo
->gem_handle
;
803 mmap_arg
.size
= bo
->size
;
804 ret
= drmIoctl(bufmgr
->fd
,
805 DRM_IOCTL_I915_GEM_MMAP
,
809 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
810 __FILE__
, __LINE__
, bo
->gem_handle
,
811 bo
->name
, strerror(errno
));
812 if (--bo
->map_count
== 0)
813 drm_bacon_gem_bo_close_vma(bufmgr
, bo
);
814 pthread_mutex_unlock(&bufmgr
->lock
);
817 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg
.addr_ptr
, mmap_arg
.size
, 0, 1));
818 bo
->mem_virtual
= (void *)(uintptr_t) mmap_arg
.addr_ptr
;
820 DBG("bo_map: %d (%s) -> %p\n", bo
->gem_handle
, bo
->name
,
822 bo
->virtual = bo
->mem_virtual
;
824 memclear(set_domain
);
825 set_domain
.handle
= bo
->gem_handle
;
826 set_domain
.read_domains
= I915_GEM_DOMAIN_CPU
;
828 set_domain
.write_domain
= I915_GEM_DOMAIN_CPU
;
830 set_domain
.write_domain
= 0;
831 ret
= drmIoctl(bufmgr
->fd
,
832 DRM_IOCTL_I915_GEM_SET_DOMAIN
,
835 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
836 __FILE__
, __LINE__
, bo
->gem_handle
,
840 drm_bacon_gem_bo_mark_mmaps_incoherent(bo
);
841 VG(VALGRIND_MAKE_MEM_DEFINED(bo
->mem_virtual
, bo
->size
));
842 pthread_mutex_unlock(&bufmgr
->lock
);
848 map_gtt(drm_bacon_bo
*bo
)
850 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
853 if (bo
->map_count
++ == 0)
854 drm_bacon_gem_bo_open_vma(bufmgr
, bo
);
856 /* Get a mapping of the buffer if we haven't before. */
857 if (bo
->gtt_virtual
== NULL
) {
858 struct drm_i915_gem_mmap_gtt mmap_arg
;
860 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
861 bo
->gem_handle
, bo
->name
, bo
->map_count
);
864 mmap_arg
.handle
= bo
->gem_handle
;
866 /* Get the fake offset back... */
867 ret
= drmIoctl(bufmgr
->fd
,
868 DRM_IOCTL_I915_GEM_MMAP_GTT
,
872 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
874 bo
->gem_handle
, bo
->name
,
876 if (--bo
->map_count
== 0)
877 drm_bacon_gem_bo_close_vma(bufmgr
, bo
);
882 bo
->gtt_virtual
= drm_mmap(0, bo
->size
, PROT_READ
| PROT_WRITE
,
883 MAP_SHARED
, bufmgr
->fd
,
885 if (bo
->gtt_virtual
== MAP_FAILED
) {
886 bo
->gtt_virtual
= NULL
;
888 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
890 bo
->gem_handle
, bo
->name
,
892 if (--bo
->map_count
== 0)
893 drm_bacon_gem_bo_close_vma(bufmgr
, bo
);
898 bo
->virtual = bo
->gtt_virtual
;
900 DBG("bo_map_gtt: %d (%s) -> %p\n", bo
->gem_handle
, bo
->name
,
907 drm_bacon_gem_bo_map_gtt(drm_bacon_bo
*bo
)
909 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
910 struct drm_i915_gem_set_domain set_domain
;
913 pthread_mutex_lock(&bufmgr
->lock
);
917 pthread_mutex_unlock(&bufmgr
->lock
);
921 /* Now move it to the GTT domain so that the GPU and CPU
922 * caches are flushed and the GPU isn't actively using the
925 * The pagefault handler does this domain change for us when
926 * it has unbound the BO from the GTT, but it's up to us to
927 * tell it when we're about to use things if we had done
928 * rendering and it still happens to be bound to the GTT.
930 memclear(set_domain
);
931 set_domain
.handle
= bo
->gem_handle
;
932 set_domain
.read_domains
= I915_GEM_DOMAIN_GTT
;
933 set_domain
.write_domain
= I915_GEM_DOMAIN_GTT
;
934 ret
= drmIoctl(bufmgr
->fd
,
935 DRM_IOCTL_I915_GEM_SET_DOMAIN
,
938 DBG("%s:%d: Error setting domain %d: %s\n",
939 __FILE__
, __LINE__
, bo
->gem_handle
,
943 drm_bacon_gem_bo_mark_mmaps_incoherent(bo
);
944 VG(VALGRIND_MAKE_MEM_DEFINED(bo
->gtt_virtual
, bo
->size
));
945 pthread_mutex_unlock(&bufmgr
->lock
);
951 * Performs a mapping of the buffer object like the normal GTT
952 * mapping, but avoids waiting for the GPU to be done reading from or
953 * rendering to the buffer.
955 * This is used in the implementation of GL_ARB_map_buffer_range: The
956 * user asks to create a buffer, then does a mapping, fills some
957 * space, runs a drawing command, then asks to map it again without
958 * synchronizing because it guarantees that it won't write over the
959 * data that the GPU is busy using (or, more specifically, that if it
960 * does write over the data, it acknowledges that rendering is
965 drm_bacon_gem_bo_map_unsynchronized(drm_bacon_bo
*bo
)
967 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
970 /* If the CPU cache isn't coherent with the GTT, then use a
971 * regular synchronized mapping. The problem is that we don't
972 * track where the buffer was last used on the CPU side in
973 * terms of drm_bacon_bo_map vs drm_bacon_gem_bo_map_gtt, so
974 * we would potentially corrupt the buffer even when the user
975 * does reasonable things.
977 if (!bufmgr
->has_llc
)
978 return drm_bacon_gem_bo_map_gtt(bo
);
980 pthread_mutex_lock(&bufmgr
->lock
);
984 drm_bacon_gem_bo_mark_mmaps_incoherent(bo
);
985 VG(VALGRIND_MAKE_MEM_DEFINED(bo
->gtt_virtual
, bo
->size
));
988 pthread_mutex_unlock(&bufmgr
->lock
);
994 drm_bacon_bo_unmap(drm_bacon_bo
*bo
)
996 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
1002 pthread_mutex_lock(&bufmgr
->lock
);
1004 if (bo
->map_count
<= 0) {
1005 DBG("attempted to unmap an unmapped bo\n");
1006 pthread_mutex_unlock(&bufmgr
->lock
);
1007 /* Preserve the old behaviour of just treating this as a
1008 * no-op rather than reporting the error.
1013 /* We need to unmap after every innovation as we cannot track
1014 * an open vma for every bo as that will exhaust the system
1015 * limits and cause later failures.
1017 if (--bo
->map_count
== 0) {
1018 drm_bacon_gem_bo_close_vma(bufmgr
, bo
);
1019 drm_bacon_gem_bo_mark_mmaps_incoherent(bo
);
1022 pthread_mutex_unlock(&bufmgr
->lock
);
1028 drm_bacon_bo_subdata(drm_bacon_bo
*bo
, unsigned long offset
,
1029 unsigned long size
, const void *data
)
1031 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
1032 struct drm_i915_gem_pwrite pwrite
;
1036 pwrite
.handle
= bo
->gem_handle
;
1037 pwrite
.offset
= offset
;
1039 pwrite
.data_ptr
= (uint64_t) (uintptr_t) data
;
1040 ret
= drmIoctl(bufmgr
->fd
,
1041 DRM_IOCTL_I915_GEM_PWRITE
,
1045 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1046 __FILE__
, __LINE__
, bo
->gem_handle
, (int)offset
,
1047 (int)size
, strerror(errno
));
1054 drm_bacon_bo_get_subdata(drm_bacon_bo
*bo
, unsigned long offset
,
1055 unsigned long size
, void *data
)
1057 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
1058 struct drm_i915_gem_pread pread
;
1062 pread
.handle
= bo
->gem_handle
;
1063 pread
.offset
= offset
;
1065 pread
.data_ptr
= (uint64_t) (uintptr_t) data
;
1066 ret
= drmIoctl(bufmgr
->fd
,
1067 DRM_IOCTL_I915_GEM_PREAD
,
1071 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1072 __FILE__
, __LINE__
, bo
->gem_handle
, (int)offset
,
1073 (int)size
, strerror(errno
));
1079 /** Waits for all GPU rendering with the object to have completed. */
1081 drm_bacon_bo_wait_rendering(drm_bacon_bo
*bo
)
1083 drm_bacon_gem_bo_start_gtt_access(bo
, 1);
1087 * Waits on a BO for the given amount of time.
1089 * @bo: buffer object to wait for
1090 * @timeout_ns: amount of time to wait in nanoseconds.
1091 * If value is less than 0, an infinite wait will occur.
1093 * Returns 0 if the wait was successful ie. the last batch referencing the
1094 * object has completed within the allotted time. Otherwise some negative return
1095 * value describes the error. Of particular interest is -ETIME when the wait has
1096 * failed to yield the desired result.
1098 * Similar to drm_bacon_gem_bo_wait_rendering except a timeout parameter allows
1099 * the operation to give up after a certain amount of time. Another subtle
1100 * difference is the internal locking semantics are different (this variant does
1101 * not hold the lock for the duration of the wait). This makes the wait subject
1102 * to a larger userspace race window.
1104 * The implementation shall wait until the object is no longer actively
1105 * referenced within a batch buffer at the time of the call. The wait will
1106 * not guarantee that the buffer is re-issued via another thread, or an flinked
1107 * handle. Userspace must make sure this race does not occur if such precision
1110 * Note that some kernels have broken the inifite wait for negative values
1111 * promise, upgrade to latest stable kernels if this is the case.
1114 drm_bacon_gem_bo_wait(drm_bacon_bo
*bo
, int64_t timeout_ns
)
1116 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
1117 struct drm_i915_gem_wait wait
;
1121 wait
.bo_handle
= bo
->gem_handle
;
1122 wait
.timeout_ns
= timeout_ns
;
1123 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_WAIT
, &wait
);
1131 * Sets the object to the GTT read and possibly write domain, used by the X
1132 * 2D driver in the absence of kernel support to do drm_bacon_gem_bo_map_gtt().
1134 * In combination with drm_bacon_gem_bo_pin() and manual fence management, we
1135 * can do tiled pixmaps this way.
1138 drm_bacon_gem_bo_start_gtt_access(drm_bacon_bo
*bo
, int write_enable
)
1140 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
1141 struct drm_i915_gem_set_domain set_domain
;
1144 memclear(set_domain
);
1145 set_domain
.handle
= bo
->gem_handle
;
1146 set_domain
.read_domains
= I915_GEM_DOMAIN_GTT
;
1147 set_domain
.write_domain
= write_enable
? I915_GEM_DOMAIN_GTT
: 0;
1148 ret
= drmIoctl(bufmgr
->fd
,
1149 DRM_IOCTL_I915_GEM_SET_DOMAIN
,
1152 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1153 __FILE__
, __LINE__
, bo
->gem_handle
,
1154 set_domain
.read_domains
, set_domain
.write_domain
,
1160 drm_bacon_bufmgr_destroy(drm_bacon_bufmgr
*bufmgr
)
1162 pthread_mutex_destroy(&bufmgr
->lock
);
1164 /* Free any cached buffer objects we were going to reuse */
1165 for (int i
= 0; i
< bufmgr
->num_buckets
; i
++) {
1166 struct drm_bacon_gem_bo_bucket
*bucket
=
1167 &bufmgr
->cache_bucket
[i
];
1170 while (!list_empty(&bucket
->head
)) {
1171 bo
= LIST_ENTRY(drm_bacon_bo
, bucket
->head
.next
, head
);
1172 list_del(&bo
->head
);
1174 drm_bacon_gem_bo_free(bo
);
1178 _mesa_hash_table_destroy(bufmgr
->name_table
, NULL
);
1179 _mesa_hash_table_destroy(bufmgr
->handle_table
, NULL
);
1185 drm_bacon_gem_bo_set_tiling_internal(drm_bacon_bo
*bo
,
1186 uint32_t tiling_mode
,
1189 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
1190 struct drm_i915_gem_set_tiling set_tiling
;
1193 if (bo
->global_name
== 0 &&
1194 tiling_mode
== bo
->tiling_mode
&&
1195 stride
== bo
->stride
)
1198 memset(&set_tiling
, 0, sizeof(set_tiling
));
1200 /* set_tiling is slightly broken and overwrites the
1201 * input on the error path, so we have to open code
1204 set_tiling
.handle
= bo
->gem_handle
;
1205 set_tiling
.tiling_mode
= tiling_mode
;
1206 set_tiling
.stride
= stride
;
1208 ret
= ioctl(bufmgr
->fd
,
1209 DRM_IOCTL_I915_GEM_SET_TILING
,
1211 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
1215 bo
->tiling_mode
= set_tiling
.tiling_mode
;
1216 bo
->swizzle_mode
= set_tiling
.swizzle_mode
;
1217 bo
->stride
= set_tiling
.stride
;
1222 drm_bacon_bo_set_tiling(drm_bacon_bo
*bo
, uint32_t * tiling_mode
,
1227 /* Linear buffers have no stride. By ensuring that we only ever use
1228 * stride 0 with linear buffers, we simplify our code.
1230 if (*tiling_mode
== I915_TILING_NONE
)
1233 ret
= drm_bacon_gem_bo_set_tiling_internal(bo
, *tiling_mode
, stride
);
1235 *tiling_mode
= bo
->tiling_mode
;
1240 drm_bacon_bo_get_tiling(drm_bacon_bo
*bo
, uint32_t * tiling_mode
,
1241 uint32_t *swizzle_mode
)
1243 *tiling_mode
= bo
->tiling_mode
;
1244 *swizzle_mode
= bo
->swizzle_mode
;
1249 drm_bacon_bo_gem_create_from_prime(drm_bacon_bufmgr
*bufmgr
, int prime_fd
, int size
)
1254 struct drm_i915_gem_get_tiling get_tiling
;
1256 pthread_mutex_lock(&bufmgr
->lock
);
1257 ret
= drmPrimeFDToHandle(bufmgr
->fd
, prime_fd
, &handle
);
1259 DBG("create_from_prime: failed to obtain handle from fd: %s\n", strerror(errno
));
1260 pthread_mutex_unlock(&bufmgr
->lock
);
1265 * See if the kernel has already returned this buffer to us. Just as
1266 * for named buffers, we must not create two bo's pointing at the same
1269 bo
= hash_find_bo(bufmgr
->handle_table
, handle
);
1271 drm_bacon_bo_reference(bo
);
1275 bo
= calloc(1, sizeof(*bo
));
1279 p_atomic_set(&bo
->refcount
, 1);
1280 list_inithead(&bo
->vma_list
);
1282 /* Determine size of bo. The fd-to-handle ioctl really should
1283 * return the size, but it doesn't. If we have kernel 3.12 or
1284 * later, we can lseek on the prime fd to get the size. Older
1285 * kernels will just fail, in which case we fall back to the
1286 * provided (estimated or guess size). */
1287 ret
= lseek(prime_fd
, 0, SEEK_END
);
1293 bo
->bufmgr
= bufmgr
;
1295 bo
->gem_handle
= handle
;
1296 _mesa_hash_table_insert(bufmgr
->handle_table
,
1297 &bo
->gem_handle
, bo
);
1300 bo
->reusable
= false;
1302 memclear(get_tiling
);
1303 get_tiling
.handle
= bo
->gem_handle
;
1304 if (drmIoctl(bufmgr
->fd
,
1305 DRM_IOCTL_I915_GEM_GET_TILING
,
1309 bo
->tiling_mode
= get_tiling
.tiling_mode
;
1310 bo
->swizzle_mode
= get_tiling
.swizzle_mode
;
1311 /* XXX stride is unknown */
1314 pthread_mutex_unlock(&bufmgr
->lock
);
1318 drm_bacon_gem_bo_free(bo
);
1319 pthread_mutex_unlock(&bufmgr
->lock
);
1324 drm_bacon_bo_gem_export_to_prime(drm_bacon_bo
*bo
, int *prime_fd
)
1326 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
1328 if (drmPrimeHandleToFD(bufmgr
->fd
, bo
->gem_handle
,
1329 DRM_CLOEXEC
, prime_fd
) != 0)
1332 bo
->reusable
= false;
1338 drm_bacon_bo_flink(drm_bacon_bo
*bo
, uint32_t *name
)
1340 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
1342 if (!bo
->global_name
) {
1343 struct drm_gem_flink flink
;
1346 flink
.handle
= bo
->gem_handle
;
1347 if (drmIoctl(bufmgr
->fd
, DRM_IOCTL_GEM_FLINK
, &flink
))
1350 pthread_mutex_lock(&bufmgr
->lock
);
1351 if (!bo
->global_name
) {
1352 bo
->global_name
= flink
.name
;
1353 bo
->reusable
= false;
1355 _mesa_hash_table_insert(bufmgr
->name_table
,
1356 &bo
->global_name
, bo
);
1358 pthread_mutex_unlock(&bufmgr
->lock
);
1361 *name
= bo
->global_name
;
1366 * Enables unlimited caching of buffer objects for reuse.
1368 * This is potentially very memory expensive, as the cache at each bucket
1369 * size is only bounded by how many buffers of that size we've managed to have
1370 * in flight at once.
1373 drm_bacon_bufmgr_gem_enable_reuse(drm_bacon_bufmgr
*bufmgr
)
1375 bufmgr
->bo_reuse
= true;
1379 * Disable buffer reuse for objects which are shared with the kernel
1380 * as scanout buffers
1383 drm_bacon_bo_disable_reuse(drm_bacon_bo
*bo
)
1385 bo
->reusable
= false;
1390 drm_bacon_bo_is_reusable(drm_bacon_bo
*bo
)
1392 return bo
->reusable
;
1396 add_bucket(drm_bacon_bufmgr
*bufmgr
, int size
)
1398 unsigned int i
= bufmgr
->num_buckets
;
1400 assert(i
< ARRAY_SIZE(bufmgr
->cache_bucket
));
1402 list_inithead(&bufmgr
->cache_bucket
[i
].head
);
1403 bufmgr
->cache_bucket
[i
].size
= size
;
1404 bufmgr
->num_buckets
++;
1408 init_cache_buckets(drm_bacon_bufmgr
*bufmgr
)
1410 unsigned long size
, cache_max_size
= 64 * 1024 * 1024;
1412 /* OK, so power of two buckets was too wasteful of memory.
1413 * Give 3 other sizes between each power of two, to hopefully
1414 * cover things accurately enough. (The alternative is
1415 * probably to just go for exact matching of sizes, and assume
1416 * that for things like composited window resize the tiled
1417 * width/height alignment and rounding of sizes to pages will
1418 * get us useful cache hit rates anyway)
1420 add_bucket(bufmgr
, 4096);
1421 add_bucket(bufmgr
, 4096 * 2);
1422 add_bucket(bufmgr
, 4096 * 3);
1424 /* Initialize the linked lists for BO reuse cache. */
1425 for (size
= 4 * 4096; size
<= cache_max_size
; size
*= 2) {
1426 add_bucket(bufmgr
, size
);
1428 add_bucket(bufmgr
, size
+ size
* 1 / 4);
1429 add_bucket(bufmgr
, size
+ size
* 2 / 4);
1430 add_bucket(bufmgr
, size
+ size
* 3 / 4);
1435 drm_bacon_bufmgr_gem_set_vma_cache_size(drm_bacon_bufmgr
*bufmgr
, int limit
)
1437 bufmgr
->vma_max
= limit
;
1439 drm_bacon_gem_bo_purge_vma_cache(bufmgr
);
1443 drm_bacon_gem_context_create(drm_bacon_bufmgr
*bufmgr
)
1445 struct drm_i915_gem_context_create create
;
1446 drm_bacon_context
*context
= NULL
;
1449 context
= calloc(1, sizeof(*context
));
1454 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_CREATE
, &create
);
1456 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
1462 context
->ctx_id
= create
.ctx_id
;
1463 context
->bufmgr
= bufmgr
;
1469 drm_bacon_gem_context_get_id(drm_bacon_context
*ctx
, uint32_t *ctx_id
)
1474 *ctx_id
= ctx
->ctx_id
;
1480 drm_bacon_gem_context_destroy(drm_bacon_context
*ctx
)
1482 struct drm_i915_gem_context_destroy destroy
;
1490 destroy
.ctx_id
= ctx
->ctx_id
;
1491 ret
= drmIoctl(ctx
->bufmgr
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY
,
1494 fprintf(stderr
, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
1501 drm_bacon_get_reset_stats(drm_bacon_context
*ctx
,
1502 uint32_t *reset_count
,
1506 struct drm_i915_reset_stats stats
;
1514 stats
.ctx_id
= ctx
->ctx_id
;
1515 ret
= drmIoctl(ctx
->bufmgr
->fd
,
1516 DRM_IOCTL_I915_GET_RESET_STATS
,
1519 if (reset_count
!= NULL
)
1520 *reset_count
= stats
.reset_count
;
1523 *active
= stats
.batch_active
;
1525 if (pending
!= NULL
)
1526 *pending
= stats
.batch_pending
;
1533 drm_bacon_reg_read(drm_bacon_bufmgr
*bufmgr
,
1537 struct drm_i915_reg_read reg_read
;
1541 reg_read
.offset
= offset
;
1543 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_REG_READ
, ®_read
);
1545 *result
= reg_read
.val
;
1549 void *drm_bacon_gem_bo_map__gtt(drm_bacon_bo
*bo
)
1551 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
1553 if (bo
->gtt_virtual
)
1554 return bo
->gtt_virtual
;
1556 pthread_mutex_lock(&bufmgr
->lock
);
1557 if (bo
->gtt_virtual
== NULL
) {
1558 struct drm_i915_gem_mmap_gtt mmap_arg
;
1561 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1562 bo
->gem_handle
, bo
->name
, bo
->map_count
);
1564 if (bo
->map_count
++ == 0)
1565 drm_bacon_gem_bo_open_vma(bufmgr
, bo
);
1568 mmap_arg
.handle
= bo
->gem_handle
;
1570 /* Get the fake offset back... */
1572 if (drmIoctl(bufmgr
->fd
,
1573 DRM_IOCTL_I915_GEM_MMAP_GTT
,
1576 ptr
= drm_mmap(0, bo
->size
, PROT_READ
| PROT_WRITE
,
1577 MAP_SHARED
, bufmgr
->fd
,
1580 if (ptr
== MAP_FAILED
) {
1581 if (--bo
->map_count
== 0)
1582 drm_bacon_gem_bo_close_vma(bufmgr
, bo
);
1586 bo
->gtt_virtual
= ptr
;
1588 pthread_mutex_unlock(&bufmgr
->lock
);
1590 return bo
->gtt_virtual
;
1593 void *drm_bacon_gem_bo_map__cpu(drm_bacon_bo
*bo
)
1595 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
1597 if (bo
->mem_virtual
)
1598 return bo
->mem_virtual
;
1600 pthread_mutex_lock(&bufmgr
->lock
);
1601 if (!bo
->mem_virtual
) {
1602 struct drm_i915_gem_mmap mmap_arg
;
1604 if (bo
->map_count
++ == 0)
1605 drm_bacon_gem_bo_open_vma(bufmgr
, bo
);
1607 DBG("bo_map: %d (%s), map_count=%d\n",
1608 bo
->gem_handle
, bo
->name
, bo
->map_count
);
1611 mmap_arg
.handle
= bo
->gem_handle
;
1612 mmap_arg
.size
= bo
->size
;
1613 if (drmIoctl(bufmgr
->fd
,
1614 DRM_IOCTL_I915_GEM_MMAP
,
1616 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1617 __FILE__
, __LINE__
, bo
->gem_handle
,
1618 bo
->name
, strerror(errno
));
1619 if (--bo
->map_count
== 0)
1620 drm_bacon_gem_bo_close_vma(bufmgr
, bo
);
1622 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg
.addr_ptr
, mmap_arg
.size
, 0, 1));
1623 bo
->mem_virtual
= (void *)(uintptr_t) mmap_arg
.addr_ptr
;
1626 pthread_mutex_unlock(&bufmgr
->lock
);
1628 return bo
->mem_virtual
;
1631 void *drm_bacon_gem_bo_map__wc(drm_bacon_bo
*bo
)
1633 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
1636 return bo
->wc_virtual
;
1638 pthread_mutex_lock(&bufmgr
->lock
);
1639 if (!bo
->wc_virtual
) {
1640 struct drm_i915_gem_mmap mmap_arg
;
1642 if (bo
->map_count
++ == 0)
1643 drm_bacon_gem_bo_open_vma(bufmgr
, bo
);
1645 DBG("bo_map: %d (%s), map_count=%d\n",
1646 bo
->gem_handle
, bo
->name
, bo
->map_count
);
1649 mmap_arg
.handle
= bo
->gem_handle
;
1650 mmap_arg
.size
= bo
->size
;
1651 mmap_arg
.flags
= I915_MMAP_WC
;
1652 if (drmIoctl(bufmgr
->fd
,
1653 DRM_IOCTL_I915_GEM_MMAP
,
1655 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1656 __FILE__
, __LINE__
, bo
->gem_handle
,
1657 bo
->name
, strerror(errno
));
1658 if (--bo
->map_count
== 0)
1659 drm_bacon_gem_bo_close_vma(bufmgr
, bo
);
1661 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg
.addr_ptr
, mmap_arg
.size
, 0, 1));
1662 bo
->wc_virtual
= (void *)(uintptr_t) mmap_arg
.addr_ptr
;
1665 pthread_mutex_unlock(&bufmgr
->lock
);
1667 return bo
->wc_virtual
;
1671 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
1672 * and manage map buffer objections.
1674 * \param fd File descriptor of the opened DRM device.
1677 drm_bacon_bufmgr_gem_init(struct gen_device_info
*devinfo
,
1678 int fd
, int batch_size
)
1680 drm_bacon_bufmgr
*bufmgr
;
1682 bufmgr
= calloc(1, sizeof(*bufmgr
));
1686 /* Handles to buffer objects belong to the device fd and are not
1687 * reference counted by the kernel. If the same fd is used by
1688 * multiple parties (threads sharing the same screen bufmgr, or
1689 * even worse the same device fd passed to multiple libraries)
1690 * ownership of those handles is shared by those independent parties.
1692 * Don't do this! Ensure that each library/bufmgr has its own device
1693 * fd so that its namespace does not clash with another.
1697 if (pthread_mutex_init(&bufmgr
->lock
, NULL
) != 0) {
1702 bufmgr
->has_llc
= devinfo
->has_llc
;
1704 init_cache_buckets(bufmgr
);
1706 list_inithead(&bufmgr
->vma_cache
);
1707 bufmgr
->vma_max
= -1; /* unlimited by default */
1709 bufmgr
->name_table
=
1710 _mesa_hash_table_create(NULL
, key_hash_uint
, key_uint_equal
);
1711 bufmgr
->handle_table
=
1712 _mesa_hash_table_create(NULL
, key_hash_uint
, key_uint_equal
);