1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007-2012 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
42 #include <util/u_atomic.h>
50 #include <sys/ioctl.h>
52 #include <sys/types.h>
57 #define ETIME ETIMEDOUT
59 #include "common/gen_debug.h"
60 #include "common/gen_device_info.h"
61 #include "libdrm_macros.h"
62 #include "main/macros.h"
63 #include "util/macros.h"
64 #include "util/hash_table.h"
65 #include "util/list.h"
66 #include "brw_bufmgr.h"
67 #include "brw_context.h"
80 #define memclear(s) memset(&s, 0, sizeof(s))
82 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
85 atomic_add_unless(int *v
, int add
, int unless
)
89 while (c
!= unless
&& (old
= p_atomic_cmpxchg(v
, c
, c
+ add
)) != c
)
94 struct bo_cache_bucket
{
95 struct list_head head
;
102 pthread_mutex_t lock
;
104 /** Array of lists of cached gem objects of power-of-two sizes */
105 struct bo_cache_bucket cache_bucket
[14 * 4];
109 struct hash_table
*name_table
;
110 struct hash_table
*handle_table
;
112 unsigned int has_llc
:1;
113 unsigned int bo_reuse
:1;
116 static int bo_set_tiling_internal(struct brw_bo
*bo
, uint32_t tiling_mode
,
119 static void bo_free(struct brw_bo
*bo
);
122 key_hash_uint(const void *key
)
124 return _mesa_hash_data(key
, 4);
128 key_uint_equal(const void *a
, const void *b
)
130 return *((unsigned *) a
) == *((unsigned *) b
);
133 static struct brw_bo
*
134 hash_find_bo(struct hash_table
*ht
, unsigned int key
)
136 struct hash_entry
*entry
= _mesa_hash_table_search(ht
, &key
);
137 return entry
? (struct brw_bo
*) entry
->data
: NULL
;
141 bo_tile_size(struct brw_bufmgr
*bufmgr
, unsigned long size
,
142 uint32_t *tiling_mode
)
144 if (*tiling_mode
== I915_TILING_NONE
)
147 /* 965+ just need multiples of page size for tiling */
148 return ALIGN(size
, 4096);
152 * Round a given pitch up to the minimum required for X tiling on a
153 * given chip. We use 512 as the minimum to allow for a later tiling
157 bo_tile_pitch(struct brw_bufmgr
*bufmgr
,
158 unsigned long pitch
, uint32_t *tiling_mode
)
160 unsigned long tile_width
;
162 /* If untiled, then just align it so that we can do rendering
163 * to it with the 3D engine.
165 if (*tiling_mode
== I915_TILING_NONE
)
166 return ALIGN(pitch
, 64);
168 if (*tiling_mode
== I915_TILING_X
)
173 /* 965 is flexible */
174 return ALIGN(pitch
, tile_width
);
177 static struct bo_cache_bucket
*
178 bucket_for_size(struct brw_bufmgr
*bufmgr
, unsigned long size
)
182 for (i
= 0; i
< bufmgr
->num_buckets
; i
++) {
183 struct bo_cache_bucket
*bucket
= &bufmgr
->cache_bucket
[i
];
184 if (bucket
->size
>= size
) {
193 brw_bo_reference(struct brw_bo
*bo
)
195 p_atomic_inc(&bo
->refcount
);
199 brw_bo_busy(struct brw_bo
*bo
)
201 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
202 struct drm_i915_gem_busy busy
;
206 busy
.handle
= bo
->gem_handle
;
208 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_BUSY
, &busy
);
210 bo
->idle
= !busy
.busy
;
215 return (ret
== 0 && busy
.busy
);
219 brw_bo_madvise(struct brw_bo
*bo
, int state
)
221 struct drm_i915_gem_madvise madv
;
224 madv
.handle
= bo
->gem_handle
;
227 drmIoctl(bo
->bufmgr
->fd
, DRM_IOCTL_I915_GEM_MADVISE
, &madv
);
229 return madv
.retained
;
232 /* drop the oldest entries that have been purged by the kernel */
234 brw_bo_cache_purge_bucket(struct brw_bufmgr
*bufmgr
,
235 struct bo_cache_bucket
*bucket
)
237 list_for_each_entry_safe(struct brw_bo
, bo
, &bucket
->head
, head
) {
238 if (brw_bo_madvise(bo
, I915_MADV_DONTNEED
))
246 static struct brw_bo
*
247 bo_alloc_internal(struct brw_bufmgr
*bufmgr
,
251 uint32_t tiling_mode
,
252 unsigned long stride
, unsigned int alignment
)
255 unsigned int page_size
= getpagesize();
257 struct bo_cache_bucket
*bucket
;
258 bool alloc_from_cache
;
259 unsigned long bo_size
;
260 bool for_render
= false;
262 if (flags
& BO_ALLOC_FOR_RENDER
)
265 /* Round the allocated size up to a power of two number of pages. */
266 bucket
= bucket_for_size(bufmgr
, size
);
268 /* If we don't have caching at this size, don't actually round the
271 if (bucket
== NULL
) {
273 if (bo_size
< page_size
)
276 bo_size
= bucket
->size
;
279 pthread_mutex_lock(&bufmgr
->lock
);
280 /* Get a buffer out of the cache if available */
282 alloc_from_cache
= false;
283 if (bucket
!= NULL
&& !list_empty(&bucket
->head
)) {
285 /* Allocate new render-target BOs from the tail (MRU)
286 * of the list, as it will likely be hot in the GPU
287 * cache and in the aperture for us.
289 bo
= LIST_ENTRY(struct brw_bo
, bucket
->head
.prev
, head
);
291 alloc_from_cache
= true;
292 bo
->align
= alignment
;
294 assert(alignment
== 0);
295 /* For non-render-target BOs (where we're probably
296 * going to map it first thing in order to fill it
297 * with data), check if the last BO in the cache is
298 * unbusy, and only reuse in that case. Otherwise,
299 * allocating a new buffer is probably faster than
300 * waiting for the GPU to finish.
302 bo
= LIST_ENTRY(struct brw_bo
, bucket
->head
.next
, head
);
303 if (!brw_bo_busy(bo
)) {
304 alloc_from_cache
= true;
309 if (alloc_from_cache
) {
310 if (!brw_bo_madvise(bo
, I915_MADV_WILLNEED
)) {
312 brw_bo_cache_purge_bucket(bufmgr
, bucket
);
316 if (bo_set_tiling_internal(bo
, tiling_mode
, stride
)) {
323 if (!alloc_from_cache
) {
324 struct drm_i915_gem_create create
;
326 bo
= calloc(1, sizeof(*bo
));
333 create
.size
= bo_size
;
335 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_CREATE
, &create
);
341 bo
->gem_handle
= create
.handle
;
342 _mesa_hash_table_insert(bufmgr
->handle_table
, &bo
->gem_handle
, bo
);
345 bo
->align
= alignment
;
347 bo
->tiling_mode
= I915_TILING_NONE
;
348 bo
->swizzle_mode
= I915_BIT_6_SWIZZLE_NONE
;
351 if (bo_set_tiling_internal(bo
, tiling_mode
, stride
))
356 p_atomic_set(&bo
->refcount
, 1);
359 pthread_mutex_unlock(&bufmgr
->lock
);
361 DBG("bo_create: buf %d (%s) %ldb\n", bo
->gem_handle
, bo
->name
, size
);
368 pthread_mutex_unlock(&bufmgr
->lock
);
373 brw_bo_alloc(struct brw_bufmgr
*bufmgr
,
374 const char *name
, unsigned long size
, unsigned int alignment
)
376 return bo_alloc_internal(bufmgr
, name
, size
, 0, I915_TILING_NONE
, 0, 0);
380 brw_bo_alloc_tiled(struct brw_bufmgr
*bufmgr
, const char *name
,
381 int x
, int y
, int cpp
, uint32_t *tiling_mode
,
382 unsigned long *pitch
, unsigned long flags
)
384 unsigned long size
, stride
;
388 unsigned long aligned_y
, height_alignment
;
390 tiling
= *tiling_mode
;
392 /* If we're tiled, our allocations are in 8 or 32-row blocks,
393 * so failure to align our height means that we won't allocate
396 * If we're untiled, we still have to align to 2 rows high
397 * because the data port accesses 2x2 blocks even if the
398 * bottom row isn't to be rendered, so failure to align means
399 * we could walk off the end of the GTT and fault. This is
400 * documented on 965, and may be the case on older chipsets
401 * too so we try to be careful.
404 height_alignment
= 2;
406 if (tiling
== I915_TILING_X
)
407 height_alignment
= 8;
408 else if (tiling
== I915_TILING_Y
)
409 height_alignment
= 32;
410 aligned_y
= ALIGN(y
, height_alignment
);
413 stride
= bo_tile_pitch(bufmgr
, stride
, tiling_mode
);
414 size
= stride
* aligned_y
;
415 size
= bo_tile_size(bufmgr
, size
, tiling_mode
);
416 } while (*tiling_mode
!= tiling
);
419 if (tiling
== I915_TILING_NONE
)
422 return bo_alloc_internal(bufmgr
, name
, size
, flags
, tiling
, stride
, 0);
426 * Returns a brw_bo wrapping the given buffer object handle.
428 * This can be used when one application needs to pass a buffer object
432 brw_bo_gem_create_from_name(struct brw_bufmgr
*bufmgr
,
433 const char *name
, unsigned int handle
)
437 struct drm_gem_open open_arg
;
438 struct drm_i915_gem_get_tiling get_tiling
;
440 /* At the moment most applications only have a few named bo.
441 * For instance, in a DRI client only the render buffers passed
442 * between X and the client are named. And since X returns the
443 * alternating names for the front/back buffer a linear search
444 * provides a sufficiently fast match.
446 pthread_mutex_lock(&bufmgr
->lock
);
447 bo
= hash_find_bo(bufmgr
->name_table
, handle
);
449 brw_bo_reference(bo
);
454 open_arg
.name
= handle
;
455 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_GEM_OPEN
, &open_arg
);
457 DBG("Couldn't reference %s handle 0x%08x: %s\n",
458 name
, handle
, strerror(errno
));
462 /* Now see if someone has used a prime handle to get this
463 * object from the kernel before by looking through the list
464 * again for a matching gem_handle
466 bo
= hash_find_bo(bufmgr
->handle_table
, open_arg
.handle
);
468 brw_bo_reference(bo
);
472 bo
= calloc(1, sizeof(*bo
));
476 p_atomic_set(&bo
->refcount
, 1);
478 bo
->size
= open_arg
.size
;
482 bo
->gem_handle
= open_arg
.handle
;
484 bo
->global_name
= handle
;
485 bo
->reusable
= false;
487 _mesa_hash_table_insert(bufmgr
->handle_table
, &bo
->gem_handle
, bo
);
488 _mesa_hash_table_insert(bufmgr
->name_table
, &bo
->global_name
, bo
);
490 memclear(get_tiling
);
491 get_tiling
.handle
= bo
->gem_handle
;
492 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_GET_TILING
, &get_tiling
);
496 bo
->tiling_mode
= get_tiling
.tiling_mode
;
497 bo
->swizzle_mode
= get_tiling
.swizzle_mode
;
498 /* XXX stride is unknown */
499 DBG("bo_create_from_handle: %d (%s)\n", handle
, bo
->name
);
502 pthread_mutex_unlock(&bufmgr
->lock
);
507 pthread_mutex_unlock(&bufmgr
->lock
);
512 bo_free(struct brw_bo
*bo
)
514 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
515 struct drm_gem_close close
;
516 struct hash_entry
*entry
;
519 if (bo
->mem_virtual
) {
520 VG(VALGRIND_FREELIKE_BLOCK(bo
->mem_virtual
, 0));
521 drm_munmap(bo
->mem_virtual
, bo
->size
);
523 if (bo
->wc_virtual
) {
524 VG(VALGRIND_FREELIKE_BLOCK(bo
->wc_virtual
, 0));
525 drm_munmap(bo
->wc_virtual
, bo
->size
);
527 if (bo
->gtt_virtual
) {
528 drm_munmap(bo
->gtt_virtual
, bo
->size
);
531 if (bo
->global_name
) {
532 entry
= _mesa_hash_table_search(bufmgr
->name_table
, &bo
->global_name
);
533 _mesa_hash_table_remove(bufmgr
->name_table
, entry
);
535 entry
= _mesa_hash_table_search(bufmgr
->handle_table
, &bo
->gem_handle
);
536 _mesa_hash_table_remove(bufmgr
->handle_table
, entry
);
538 /* Close this object */
540 close
.handle
= bo
->gem_handle
;
541 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_GEM_CLOSE
, &close
);
543 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
544 bo
->gem_handle
, bo
->name
, strerror(errno
));
550 bo_mark_mmaps_incoherent(struct brw_bo
*bo
)
554 VALGRIND_MAKE_MEM_NOACCESS(bo
->mem_virtual
, bo
->size
);
557 VALGRIND_MAKE_MEM_NOACCESS(bo
->wc_virtual
, bo
->size
);
560 VALGRIND_MAKE_MEM_NOACCESS(bo
->gtt_virtual
, bo
->size
);
564 /** Frees all cached buffers significantly older than @time. */
566 cleanup_bo_cache(struct brw_bufmgr
*bufmgr
, time_t time
)
570 if (bufmgr
->time
== time
)
573 for (i
= 0; i
< bufmgr
->num_buckets
; i
++) {
574 struct bo_cache_bucket
*bucket
= &bufmgr
->cache_bucket
[i
];
576 list_for_each_entry_safe(struct brw_bo
, bo
, &bucket
->head
, head
) {
577 if (time
- bo
->free_time
<= 1)
590 bo_unreference_final(struct brw_bo
*bo
, time_t time
)
592 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
593 struct bo_cache_bucket
*bucket
;
595 DBG("bo_unreference final: %d (%s)\n", bo
->gem_handle
, bo
->name
);
597 /* Clear any left-over mappings */
599 DBG("bo freed with non-zero map-count %d\n", bo
->map_count
);
601 bo_mark_mmaps_incoherent(bo
);
604 bucket
= bucket_for_size(bufmgr
, bo
->size
);
605 /* Put the buffer into our internal cache for reuse if we can. */
606 if (bufmgr
->bo_reuse
&& bo
->reusable
&& bucket
!= NULL
&&
607 brw_bo_madvise(bo
, I915_MADV_DONTNEED
)) {
608 bo
->free_time
= time
;
612 list_addtail(&bo
->head
, &bucket
->head
);
619 brw_bo_unreference(struct brw_bo
*bo
)
624 assert(p_atomic_read(&bo
->refcount
) > 0);
626 if (atomic_add_unless(&bo
->refcount
, -1, 1)) {
627 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
628 struct timespec time
;
630 clock_gettime(CLOCK_MONOTONIC
, &time
);
632 pthread_mutex_lock(&bufmgr
->lock
);
634 if (p_atomic_dec_zero(&bo
->refcount
)) {
635 bo_unreference_final(bo
, time
.tv_sec
);
636 cleanup_bo_cache(bufmgr
, time
.tv_sec
);
639 pthread_mutex_unlock(&bufmgr
->lock
);
644 set_domain(struct brw_context
*brw
, const char *action
,
645 struct brw_bo
*bo
, uint32_t read_domains
, uint32_t write_domain
)
647 struct drm_i915_gem_set_domain sd
= {
648 .handle
= bo
->gem_handle
,
649 .read_domains
= read_domains
,
650 .write_domain
= write_domain
,
653 double elapsed
= unlikely(brw
&& brw
->perf_debug
) ? -get_time() : 0.0;
655 if (drmIoctl(bo
->bufmgr
->fd
, DRM_IOCTL_I915_GEM_SET_DOMAIN
, &sd
) != 0) {
656 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s.\n",
657 __FILE__
, __LINE__
, bo
->gem_handle
, read_domains
, write_domain
,
661 if (unlikely(brw
&& brw
->perf_debug
)) {
662 elapsed
+= get_time();
663 if (elapsed
> 1e-5) /* 0.01ms */
664 perf_debug("%s a busy \"%s\" BO stalled and took %.03f ms.\n",
665 action
, bo
->name
, elapsed
* 1000);
670 brw_bo_map(struct brw_context
*brw
, struct brw_bo
*bo
, int write_enable
)
672 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
675 pthread_mutex_lock(&bufmgr
->lock
);
677 if (!bo
->mem_virtual
) {
678 struct drm_i915_gem_mmap mmap_arg
;
680 DBG("bo_map: %d (%s), map_count=%d\n",
681 bo
->gem_handle
, bo
->name
, bo
->map_count
);
684 mmap_arg
.handle
= bo
->gem_handle
;
685 mmap_arg
.size
= bo
->size
;
686 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_MMAP
, &mmap_arg
);
689 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
690 __FILE__
, __LINE__
, bo
->gem_handle
, bo
->name
, strerror(errno
));
691 pthread_mutex_unlock(&bufmgr
->lock
);
695 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg
.addr_ptr
, mmap_arg
.size
, 0, 1));
696 bo
->mem_virtual
= (void *) (uintptr_t) mmap_arg
.addr_ptr
;
698 DBG("bo_map: %d (%s) -> %p\n", bo
->gem_handle
, bo
->name
, bo
->mem_virtual
);
699 bo
->virtual = bo
->mem_virtual
;
701 set_domain(brw
, "CPU mapping", bo
, I915_GEM_DOMAIN_CPU
,
702 write_enable
? I915_GEM_DOMAIN_CPU
: 0);
704 bo_mark_mmaps_incoherent(bo
);
705 VG(VALGRIND_MAKE_MEM_DEFINED(bo
->mem_virtual
, bo
->size
));
706 pthread_mutex_unlock(&bufmgr
->lock
);
712 map_gtt(struct brw_bo
*bo
)
714 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
717 /* Get a mapping of the buffer if we haven't before. */
718 if (bo
->gtt_virtual
== NULL
) {
719 struct drm_i915_gem_mmap_gtt mmap_arg
;
721 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
722 bo
->gem_handle
, bo
->name
, bo
->map_count
);
725 mmap_arg
.handle
= bo
->gem_handle
;
727 /* Get the fake offset back... */
728 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_MMAP_GTT
, &mmap_arg
);
731 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
732 __FILE__
, __LINE__
, bo
->gem_handle
, bo
->name
, strerror(errno
));
737 bo
->gtt_virtual
= drm_mmap(0, bo
->size
, PROT_READ
| PROT_WRITE
,
738 MAP_SHARED
, bufmgr
->fd
, mmap_arg
.offset
);
739 if (bo
->gtt_virtual
== MAP_FAILED
) {
740 bo
->gtt_virtual
= NULL
;
742 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
743 __FILE__
, __LINE__
, bo
->gem_handle
, bo
->name
, strerror(errno
));
749 bo
->virtual = bo
->gtt_virtual
;
751 DBG("bo_map_gtt: %d (%s) -> %p\n", bo
->gem_handle
, bo
->name
,
758 brw_bo_map_gtt(struct brw_context
*brw
, struct brw_bo
*bo
)
760 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
763 pthread_mutex_lock(&bufmgr
->lock
);
767 pthread_mutex_unlock(&bufmgr
->lock
);
771 /* Now move it to the GTT domain so that the GPU and CPU
772 * caches are flushed and the GPU isn't actively using the
775 * The pagefault handler does this domain change for us when
776 * it has unbound the BO from the GTT, but it's up to us to
777 * tell it when we're about to use things if we had done
778 * rendering and it still happens to be bound to the GTT.
780 set_domain(brw
, "GTT mapping", bo
,
781 I915_GEM_DOMAIN_GTT
, I915_GEM_DOMAIN_GTT
);
783 bo_mark_mmaps_incoherent(bo
);
784 VG(VALGRIND_MAKE_MEM_DEFINED(bo
->gtt_virtual
, bo
->size
));
785 pthread_mutex_unlock(&bufmgr
->lock
);
791 * Performs a mapping of the buffer object like the normal GTT
792 * mapping, but avoids waiting for the GPU to be done reading from or
793 * rendering to the buffer.
795 * This is used in the implementation of GL_ARB_map_buffer_range: The
796 * user asks to create a buffer, then does a mapping, fills some
797 * space, runs a drawing command, then asks to map it again without
798 * synchronizing because it guarantees that it won't write over the
799 * data that the GPU is busy using (or, more specifically, that if it
800 * does write over the data, it acknowledges that rendering is
805 brw_bo_map_unsynchronized(struct brw_context
*brw
, struct brw_bo
*bo
)
807 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
810 /* If the CPU cache isn't coherent with the GTT, then use a
811 * regular synchronized mapping. The problem is that we don't
812 * track where the buffer was last used on the CPU side in
813 * terms of brw_bo_map vs brw_bo_map_gtt, so
814 * we would potentially corrupt the buffer even when the user
815 * does reasonable things.
817 if (!bufmgr
->has_llc
)
818 return brw_bo_map_gtt(brw
, bo
);
820 pthread_mutex_lock(&bufmgr
->lock
);
824 bo_mark_mmaps_incoherent(bo
);
825 VG(VALGRIND_MAKE_MEM_DEFINED(bo
->gtt_virtual
, bo
->size
));
828 pthread_mutex_unlock(&bufmgr
->lock
);
834 brw_bo_unmap(struct brw_bo
*bo
)
836 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
842 pthread_mutex_lock(&bufmgr
->lock
);
844 if (bo
->map_count
<= 0) {
845 DBG("attempted to unmap an unmapped bo\n");
846 pthread_mutex_unlock(&bufmgr
->lock
);
847 /* Preserve the old behaviour of just treating this as a
848 * no-op rather than reporting the error.
853 if (--bo
->map_count
== 0) {
854 bo_mark_mmaps_incoherent(bo
);
857 pthread_mutex_unlock(&bufmgr
->lock
);
863 brw_bo_subdata(struct brw_bo
*bo
, unsigned long offset
,
864 unsigned long size
, const void *data
)
866 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
867 struct drm_i915_gem_pwrite pwrite
;
871 pwrite
.handle
= bo
->gem_handle
;
872 pwrite
.offset
= offset
;
874 pwrite
.data_ptr
= (uint64_t) (uintptr_t) data
;
875 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_PWRITE
, &pwrite
);
878 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
879 __FILE__
, __LINE__
, bo
->gem_handle
, (int) offset
,
880 (int) size
, strerror(errno
));
887 brw_bo_get_subdata(struct brw_bo
*bo
, unsigned long offset
,
888 unsigned long size
, void *data
)
890 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
891 struct drm_i915_gem_pread pread
;
895 pread
.handle
= bo
->gem_handle
;
896 pread
.offset
= offset
;
898 pread
.data_ptr
= (uint64_t) (uintptr_t) data
;
899 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_PREAD
, &pread
);
902 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
903 __FILE__
, __LINE__
, bo
->gem_handle
, (int) offset
,
904 (int) size
, strerror(errno
));
910 /** Waits for all GPU rendering with the object to have completed. */
912 brw_bo_wait_rendering(struct brw_context
*brw
, struct brw_bo
*bo
)
914 set_domain(brw
, "waiting for",
915 bo
, I915_GEM_DOMAIN_GTT
, I915_GEM_DOMAIN_GTT
);
919 * Waits on a BO for the given amount of time.
921 * @bo: buffer object to wait for
922 * @timeout_ns: amount of time to wait in nanoseconds.
923 * If value is less than 0, an infinite wait will occur.
925 * Returns 0 if the wait was successful ie. the last batch referencing the
926 * object has completed within the allotted time. Otherwise some negative return
927 * value describes the error. Of particular interest is -ETIME when the wait has
928 * failed to yield the desired result.
930 * Similar to brw_bo_wait_rendering except a timeout parameter allows
931 * the operation to give up after a certain amount of time. Another subtle
932 * difference is the internal locking semantics are different (this variant does
933 * not hold the lock for the duration of the wait). This makes the wait subject
934 * to a larger userspace race window.
936 * The implementation shall wait until the object is no longer actively
937 * referenced within a batch buffer at the time of the call. The wait will
938 * not guarantee that the buffer is re-issued via another thread, or an flinked
939 * handle. Userspace must make sure this race does not occur if such precision
942 * Note that some kernels have broken the inifite wait for negative values
943 * promise, upgrade to latest stable kernels if this is the case.
946 brw_bo_wait(struct brw_bo
*bo
, int64_t timeout_ns
)
948 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
949 struct drm_i915_gem_wait wait
;
953 wait
.bo_handle
= bo
->gem_handle
;
954 wait
.timeout_ns
= timeout_ns
;
955 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_WAIT
, &wait
);
963 brw_bufmgr_destroy(struct brw_bufmgr
*bufmgr
)
965 pthread_mutex_destroy(&bufmgr
->lock
);
967 /* Free any cached buffer objects we were going to reuse */
968 for (int i
= 0; i
< bufmgr
->num_buckets
; i
++) {
969 struct bo_cache_bucket
*bucket
= &bufmgr
->cache_bucket
[i
];
971 list_for_each_entry_safe(struct brw_bo
, bo
, &bucket
->head
, head
) {
978 _mesa_hash_table_destroy(bufmgr
->name_table
, NULL
);
979 _mesa_hash_table_destroy(bufmgr
->handle_table
, NULL
);
985 bo_set_tiling_internal(struct brw_bo
*bo
, uint32_t tiling_mode
,
988 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
989 struct drm_i915_gem_set_tiling set_tiling
;
992 if (bo
->global_name
== 0 &&
993 tiling_mode
== bo
->tiling_mode
&& stride
== bo
->stride
)
996 memset(&set_tiling
, 0, sizeof(set_tiling
));
998 /* set_tiling is slightly broken and overwrites the
999 * input on the error path, so we have to open code
1002 set_tiling
.handle
= bo
->gem_handle
;
1003 set_tiling
.tiling_mode
= tiling_mode
;
1004 set_tiling
.stride
= stride
;
1006 ret
= ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_SET_TILING
, &set_tiling
);
1007 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
1011 bo
->tiling_mode
= set_tiling
.tiling_mode
;
1012 bo
->swizzle_mode
= set_tiling
.swizzle_mode
;
1013 bo
->stride
= set_tiling
.stride
;
1018 brw_bo_get_tiling(struct brw_bo
*bo
, uint32_t *tiling_mode
,
1019 uint32_t *swizzle_mode
)
1021 *tiling_mode
= bo
->tiling_mode
;
1022 *swizzle_mode
= bo
->swizzle_mode
;
1027 brw_bo_gem_create_from_prime(struct brw_bufmgr
*bufmgr
, int prime_fd
,
1033 struct drm_i915_gem_get_tiling get_tiling
;
1035 pthread_mutex_lock(&bufmgr
->lock
);
1036 ret
= drmPrimeFDToHandle(bufmgr
->fd
, prime_fd
, &handle
);
1038 DBG("create_from_prime: failed to obtain handle from fd: %s\n",
1040 pthread_mutex_unlock(&bufmgr
->lock
);
1045 * See if the kernel has already returned this buffer to us. Just as
1046 * for named buffers, we must not create two bo's pointing at the same
1049 bo
= hash_find_bo(bufmgr
->handle_table
, handle
);
1051 brw_bo_reference(bo
);
1055 bo
= calloc(1, sizeof(*bo
));
1059 p_atomic_set(&bo
->refcount
, 1);
1061 /* Determine size of bo. The fd-to-handle ioctl really should
1062 * return the size, but it doesn't. If we have kernel 3.12 or
1063 * later, we can lseek on the prime fd to get the size. Older
1064 * kernels will just fail, in which case we fall back to the
1065 * provided (estimated or guess size). */
1066 ret
= lseek(prime_fd
, 0, SEEK_END
);
1072 bo
->bufmgr
= bufmgr
;
1074 bo
->gem_handle
= handle
;
1075 _mesa_hash_table_insert(bufmgr
->handle_table
, &bo
->gem_handle
, bo
);
1078 bo
->reusable
= false;
1080 memclear(get_tiling
);
1081 get_tiling
.handle
= bo
->gem_handle
;
1082 if (drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_GET_TILING
, &get_tiling
))
1085 bo
->tiling_mode
= get_tiling
.tiling_mode
;
1086 bo
->swizzle_mode
= get_tiling
.swizzle_mode
;
1087 /* XXX stride is unknown */
1090 pthread_mutex_unlock(&bufmgr
->lock
);
1095 pthread_mutex_unlock(&bufmgr
->lock
);
1100 brw_bo_gem_export_to_prime(struct brw_bo
*bo
, int *prime_fd
)
1102 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
1104 if (drmPrimeHandleToFD(bufmgr
->fd
, bo
->gem_handle
,
1105 DRM_CLOEXEC
, prime_fd
) != 0)
1108 bo
->reusable
= false;
1114 brw_bo_flink(struct brw_bo
*bo
, uint32_t *name
)
1116 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
1118 if (!bo
->global_name
) {
1119 struct drm_gem_flink flink
;
1122 flink
.handle
= bo
->gem_handle
;
1123 if (drmIoctl(bufmgr
->fd
, DRM_IOCTL_GEM_FLINK
, &flink
))
1126 pthread_mutex_lock(&bufmgr
->lock
);
1127 if (!bo
->global_name
) {
1128 bo
->global_name
= flink
.name
;
1129 bo
->reusable
= false;
1131 _mesa_hash_table_insert(bufmgr
->name_table
, &bo
->global_name
, bo
);
1133 pthread_mutex_unlock(&bufmgr
->lock
);
1136 *name
= bo
->global_name
;
1141 * Enables unlimited caching of buffer objects for reuse.
1143 * This is potentially very memory expensive, as the cache at each bucket
1144 * size is only bounded by how many buffers of that size we've managed to have
1145 * in flight at once.
1148 brw_bufmgr_enable_reuse(struct brw_bufmgr
*bufmgr
)
1150 bufmgr
->bo_reuse
= true;
1154 add_bucket(struct brw_bufmgr
*bufmgr
, int size
)
1156 unsigned int i
= bufmgr
->num_buckets
;
1158 assert(i
< ARRAY_SIZE(bufmgr
->cache_bucket
));
1160 list_inithead(&bufmgr
->cache_bucket
[i
].head
);
1161 bufmgr
->cache_bucket
[i
].size
= size
;
1162 bufmgr
->num_buckets
++;
1166 init_cache_buckets(struct brw_bufmgr
*bufmgr
)
1168 unsigned long size
, cache_max_size
= 64 * 1024 * 1024;
1170 /* OK, so power of two buckets was too wasteful of memory.
1171 * Give 3 other sizes between each power of two, to hopefully
1172 * cover things accurately enough. (The alternative is
1173 * probably to just go for exact matching of sizes, and assume
1174 * that for things like composited window resize the tiled
1175 * width/height alignment and rounding of sizes to pages will
1176 * get us useful cache hit rates anyway)
1178 add_bucket(bufmgr
, 4096);
1179 add_bucket(bufmgr
, 4096 * 2);
1180 add_bucket(bufmgr
, 4096 * 3);
1182 /* Initialize the linked lists for BO reuse cache. */
1183 for (size
= 4 * 4096; size
<= cache_max_size
; size
*= 2) {
1184 add_bucket(bufmgr
, size
);
1186 add_bucket(bufmgr
, size
+ size
* 1 / 4);
1187 add_bucket(bufmgr
, size
+ size
* 2 / 4);
1188 add_bucket(bufmgr
, size
+ size
* 3 / 4);
1193 brw_create_hw_context(struct brw_bufmgr
*bufmgr
)
1195 struct drm_i915_gem_context_create create
;
1199 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_CREATE
, &create
);
1201 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n", strerror(errno
));
1205 return create
.ctx_id
;
1209 brw_destroy_hw_context(struct brw_bufmgr
*bufmgr
, uint32_t ctx_id
)
1211 struct drm_i915_gem_context_destroy d
= {.ctx_id
= ctx_id
};
1214 drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY
, &d
) != 0) {
1215 fprintf(stderr
, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
1221 brw_reg_read(struct brw_bufmgr
*bufmgr
, uint32_t offset
, uint64_t *result
)
1223 struct drm_i915_reg_read reg_read
;
1227 reg_read
.offset
= offset
;
1229 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_REG_READ
, ®_read
);
1231 *result
= reg_read
.val
;
1236 brw_bo_map__gtt(struct brw_bo
*bo
)
1238 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
1240 if (bo
->gtt_virtual
)
1241 return bo
->gtt_virtual
;
1243 pthread_mutex_lock(&bufmgr
->lock
);
1244 if (bo
->gtt_virtual
== NULL
) {
1245 struct drm_i915_gem_mmap_gtt mmap_arg
;
1248 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1249 bo
->gem_handle
, bo
->name
, bo
->map_count
);
1252 mmap_arg
.handle
= bo
->gem_handle
;
1254 /* Get the fake offset back... */
1256 if (drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_MMAP_GTT
, &mmap_arg
) == 0) {
1258 ptr
= drm_mmap(0, bo
->size
, PROT_READ
| PROT_WRITE
,
1259 MAP_SHARED
, bufmgr
->fd
, mmap_arg
.offset
);
1261 if (ptr
== MAP_FAILED
) {
1266 bo
->gtt_virtual
= ptr
;
1268 pthread_mutex_unlock(&bufmgr
->lock
);
1270 return bo
->gtt_virtual
;
1274 brw_bo_map__cpu(struct brw_bo
*bo
)
1276 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
1278 if (bo
->mem_virtual
)
1279 return bo
->mem_virtual
;
1281 pthread_mutex_lock(&bufmgr
->lock
);
1282 if (!bo
->mem_virtual
) {
1283 struct drm_i915_gem_mmap mmap_arg
;
1285 DBG("bo_map: %d (%s), map_count=%d\n",
1286 bo
->gem_handle
, bo
->name
, bo
->map_count
);
1289 mmap_arg
.handle
= bo
->gem_handle
;
1290 mmap_arg
.size
= bo
->size
;
1291 if (drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_MMAP
, &mmap_arg
)) {
1292 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1293 __FILE__
, __LINE__
, bo
->gem_handle
, bo
->name
, strerror(errno
));
1296 VG(VALGRIND_MALLOCLIKE_BLOCK
1297 (mmap_arg
.addr_ptr
, mmap_arg
.size
, 0, 1));
1298 bo
->mem_virtual
= (void *) (uintptr_t) mmap_arg
.addr_ptr
;
1301 pthread_mutex_unlock(&bufmgr
->lock
);
1303 return bo
->mem_virtual
;
1307 brw_bo_map__wc(struct brw_bo
*bo
)
1309 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
1312 return bo
->wc_virtual
;
1314 pthread_mutex_lock(&bufmgr
->lock
);
1315 if (!bo
->wc_virtual
) {
1316 struct drm_i915_gem_mmap mmap_arg
;
1318 DBG("bo_map: %d (%s), map_count=%d\n",
1319 bo
->gem_handle
, bo
->name
, bo
->map_count
);
1322 mmap_arg
.handle
= bo
->gem_handle
;
1323 mmap_arg
.size
= bo
->size
;
1324 mmap_arg
.flags
= I915_MMAP_WC
;
1325 if (drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_MMAP
, &mmap_arg
)) {
1326 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1327 __FILE__
, __LINE__
, bo
->gem_handle
, bo
->name
, strerror(errno
));
1330 VG(VALGRIND_MALLOCLIKE_BLOCK
1331 (mmap_arg
.addr_ptr
, mmap_arg
.size
, 0, 1));
1332 bo
->wc_virtual
= (void *) (uintptr_t) mmap_arg
.addr_ptr
;
1335 pthread_mutex_unlock(&bufmgr
->lock
);
1337 return bo
->wc_virtual
;
1341 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
1342 * and manage map buffer objections.
1344 * \param fd File descriptor of the opened DRM device.
1347 brw_bufmgr_init(struct gen_device_info
*devinfo
, int fd
, int batch_size
)
1349 struct brw_bufmgr
*bufmgr
;
1351 bufmgr
= calloc(1, sizeof(*bufmgr
));
1355 /* Handles to buffer objects belong to the device fd and are not
1356 * reference counted by the kernel. If the same fd is used by
1357 * multiple parties (threads sharing the same screen bufmgr, or
1358 * even worse the same device fd passed to multiple libraries)
1359 * ownership of those handles is shared by those independent parties.
1361 * Don't do this! Ensure that each library/bufmgr has its own device
1362 * fd so that its namespace does not clash with another.
1366 if (pthread_mutex_init(&bufmgr
->lock
, NULL
) != 0) {
1371 bufmgr
->has_llc
= devinfo
->has_llc
;
1373 init_cache_buckets(bufmgr
);
1375 bufmgr
->name_table
=
1376 _mesa_hash_table_create(NULL
, key_hash_uint
, key_uint_equal
);
1377 bufmgr
->handle_table
=
1378 _mesa_hash_table_create(NULL
, key_hash_uint
, key_uint_equal
);