2 * Copyright © 2007 Red Hat Inc.
3 * Copyright © 2007-2017 Intel Corporation
4 * Copyright © 2006 VMware, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * Authors: Thomas Hellström <thellstrom@vmware.com>
29 * Keith Whitwell <keithw@vmware.com>
30 * Eric Anholt <eric@anholt.net>
31 * Dave Airlie <airlied@linux.ie>
35 #include <util/u_atomic.h>
42 #include <sys/ioctl.h>
44 #include <sys/types.h>
48 #include "common/gen_clflush.h"
49 #include "dev/gen_debug.h"
50 #include "common/gen_gem.h"
51 #include "dev/gen_device_info.h"
52 #include "libdrm_macros.h"
53 #include "main/macros.h"
54 #include "util/macros.h"
55 #include "util/hash_table.h"
56 #include "util/list.h"
57 #include "util/os_file.h"
58 #include "util/u_dynarray.h"
60 #include "brw_bufmgr.h"
61 #include "brw_context.h"
64 #include "drm-uapi/i915_drm.h"
74 /* Bufmgr is not aware of brw_context. */
76 #define WARN_ONCE(cond, fmt...) do { \
77 if (unlikely(cond)) { \
78 static bool _warned = false; \
80 fprintf(stderr, "WARNING: "); \
81 fprintf(stderr, fmt); \
88 /* VALGRIND_FREELIKE_BLOCK unfortunately does not actually undo the earlier
89 * VALGRIND_MALLOCLIKE_BLOCK but instead leaves vg convinced the memory is
90 * leaked. All because it does not call VG(cli_free) from its
91 * VG_USERREQ__FREELIKE_BLOCK handler. Instead of treating the memory like
92 * and allocation, we mark it available for use upon mmapping and remove
95 #define VG_DEFINED(ptr, size) VG(VALGRIND_MAKE_MEM_DEFINED(ptr, size))
96 #define VG_NOACCESS(ptr, size) VG(VALGRIND_MAKE_MEM_NOACCESS(ptr, size))
98 #define PAGE_SIZE 4096
100 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
103 atomic_add_unless(int *v
, int add
, int unless
)
106 c
= p_atomic_read(v
);
107 while (c
!= unless
&& (old
= p_atomic_cmpxchg(v
, c
, c
+ add
)) != c
)
113 * i965 fixed-size bucketing VMA allocator.
115 * The BO cache maintains "cache buckets" for buffers of various sizes.
116 * All buffers in a given bucket are identically sized - when allocating,
117 * we always round up to the bucket size. This means that virtually all
118 * allocations are fixed-size; only buffers which are too large to fit in
119 * a bucket can be variably-sized.
121 * We create an allocator for each bucket. Each contains a free-list, where
122 * each node contains a <starting address, 64-bit bitmap> pair. Each bit
123 * represents a bucket-sized block of memory. (At the first level, each
124 * bit corresponds to a page. For the second bucket, bits correspond to
125 * two pages, and so on.) 1 means a block is free, and 0 means it's in-use.
126 * The lowest bit in the bitmap is for the first block.
128 * This makes allocations cheap - any bit of any node will do. We can pick
129 * the head of the list and use ffs() to find a free block. If there are
130 * none, we allocate 64 blocks from a larger allocator - either a bigger
131 * bucketing allocator, or a fallback top-level allocator for large objects.
133 struct vma_bucket_node
{
134 uint64_t start_address
;
138 struct bo_cache_bucket
{
139 /** List of cached BOs. */
140 struct list_head head
;
142 /** Size of this bucket, in bytes. */
145 /** List of vma_bucket_nodes. */
146 struct util_dynarray vma_list
[BRW_MEMZONE_COUNT
];
150 /** File descriptor associated with a handle export. */
153 /** GEM handle in drm_fd */
156 struct list_head link
;
162 struct list_head link
;
168 /** Array of lists of cached gem objects of power-of-two sizes */
169 struct bo_cache_bucket cache_bucket
[14 * 4];
173 struct hash_table
*name_table
;
174 struct hash_table
*handle_table
;
176 struct util_vma_heap vma_allocator
[BRW_MEMZONE_COUNT
];
180 bool has_mmap_offset
:1;
183 uint64_t initial_kflags
;
186 static mtx_t global_bufmgr_list_mutex
= _MTX_INITIALIZER_NP
;
187 static struct list_head global_bufmgr_list
= {
188 .next
= &global_bufmgr_list
,
189 .prev
= &global_bufmgr_list
,
192 static int bo_set_tiling_internal(struct brw_bo
*bo
, uint32_t tiling_mode
,
195 static void bo_free(struct brw_bo
*bo
);
197 static uint64_t vma_alloc(struct brw_bufmgr
*bufmgr
,
198 enum brw_memory_zone memzone
,
199 uint64_t size
, uint64_t alignment
);
201 static struct brw_bo
*
202 hash_find_bo(struct hash_table
*ht
, unsigned int key
)
204 struct hash_entry
*entry
= _mesa_hash_table_search(ht
, &key
);
205 return entry
? (struct brw_bo
*) entry
->data
: NULL
;
209 bo_tile_size(struct brw_bufmgr
*bufmgr
, uint64_t size
, uint32_t tiling
)
211 if (tiling
== I915_TILING_NONE
)
214 /* 965+ just need multiples of page size for tiling */
215 return ALIGN(size
, PAGE_SIZE
);
219 * Round a given pitch up to the minimum required for X tiling on a
220 * given chip. We use 512 as the minimum to allow for a later tiling
224 bo_tile_pitch(struct brw_bufmgr
*bufmgr
, uint32_t pitch
, uint32_t tiling
)
226 unsigned long tile_width
;
228 /* If untiled, then just align it so that we can do rendering
229 * to it with the 3D engine.
231 if (tiling
== I915_TILING_NONE
)
232 return ALIGN(pitch
, 64);
234 if (tiling
== I915_TILING_X
)
239 /* 965 is flexible */
240 return ALIGN(pitch
, tile_width
);
244 * This function finds the correct bucket fit for the input size.
245 * The function works with O(1) complexity when the requested size
246 * was queried instead of iterating the size through all the buckets.
248 static struct bo_cache_bucket
*
249 bucket_for_size(struct brw_bufmgr
*bufmgr
, uint64_t size
)
251 /* Calculating the pages and rounding up to the page size. */
252 const unsigned pages
= (size
+ PAGE_SIZE
- 1) / PAGE_SIZE
;
254 /* Row Bucket sizes clz((x-1) | 3) Row Column
255 * in pages stride size
256 * 0: 1 2 3 4 -> 30 30 30 30 4 1
257 * 1: 5 6 7 8 -> 29 29 29 29 4 1
258 * 2: 10 12 14 16 -> 28 28 28 28 8 2
259 * 3: 20 24 28 32 -> 27 27 27 27 16 4
261 const unsigned row
= 30 - __builtin_clz((pages
- 1) | 3);
262 const unsigned row_max_pages
= 4 << row
;
264 /* The '& ~2' is the special case for row 1. In row 1, max pages /
265 * 2 is 2, but the previous row maximum is zero (because there is
266 * no previous row). All row maximum sizes are power of 2, so that
267 * is the only case where that bit will be set.
269 const unsigned prev_row_max_pages
= (row_max_pages
/ 2) & ~2;
270 int col_size_log2
= row
- 1;
271 col_size_log2
+= (col_size_log2
< 0);
273 const unsigned col
= (pages
- prev_row_max_pages
+
274 ((1 << col_size_log2
) - 1)) >> col_size_log2
;
276 /* Calculating the index based on the row and column. */
277 const unsigned index
= (row
* 4) + (col
- 1);
279 return (index
< bufmgr
->num_buckets
) ?
280 &bufmgr
->cache_bucket
[index
] : NULL
;
283 static enum brw_memory_zone
284 memzone_for_address(uint64_t address
)
286 const uint64_t _4GB
= 1ull << 32;
289 return BRW_MEMZONE_OTHER
;
291 return BRW_MEMZONE_LOW_4G
;
295 bucket_vma_alloc(struct brw_bufmgr
*bufmgr
,
296 struct bo_cache_bucket
*bucket
,
297 enum brw_memory_zone memzone
)
299 struct util_dynarray
*vma_list
= &bucket
->vma_list
[memzone
];
300 struct vma_bucket_node
*node
;
302 if (vma_list
->size
== 0) {
303 /* This bucket allocator is out of space - allocate a new block of
304 * memory for 64 blocks from a larger allocator (either a larger
305 * bucket or util_vma).
307 * We align the address to the node size (64 blocks) so that
308 * bucket_vma_free can easily compute the starting address of this
309 * block by rounding any address we return down to the node size.
311 * Set the first bit used, and return the start address.
313 uint64_t node_size
= 64ull * bucket
->size
;
314 node
= util_dynarray_grow(vma_list
, struct vma_bucket_node
, 1);
319 uint64_t addr
= vma_alloc(bufmgr
, memzone
, node_size
, node_size
);
320 node
->start_address
= gen_48b_address(addr
);
321 node
->bitmap
= ~1ull;
322 return node
->start_address
;
325 /* Pick any bit from any node - they're all the right size and free. */
326 node
= util_dynarray_top_ptr(vma_list
, struct vma_bucket_node
);
327 int bit
= ffsll(node
->bitmap
) - 1;
328 assert(bit
>= 0 && bit
<= 63);
330 /* Reserve the memory by clearing the bit. */
331 assert((node
->bitmap
& (1ull << bit
)) != 0ull);
332 node
->bitmap
&= ~(1ull << bit
);
334 uint64_t addr
= node
->start_address
+ bit
* bucket
->size
;
336 /* If this node is now completely full, remove it from the free list. */
337 if (node
->bitmap
== 0ull) {
338 (void) util_dynarray_pop(vma_list
, struct vma_bucket_node
);
345 bucket_vma_free(struct bo_cache_bucket
*bucket
, uint64_t address
)
347 enum brw_memory_zone memzone
= memzone_for_address(address
);
348 struct util_dynarray
*vma_list
= &bucket
->vma_list
[memzone
];
349 const uint64_t node_bytes
= 64ull * bucket
->size
;
350 struct vma_bucket_node
*node
= NULL
;
352 /* bucket_vma_alloc allocates 64 blocks at a time, and aligns it to
353 * that 64 block size. So, we can round down to get the starting address.
355 uint64_t start
= (address
/ node_bytes
) * node_bytes
;
357 /* Dividing the offset from start by bucket size gives us the bit index. */
358 int bit
= (address
- start
) / bucket
->size
;
360 assert(start
+ bit
* bucket
->size
== address
);
362 util_dynarray_foreach(vma_list
, struct vma_bucket_node
, cur
) {
363 if (cur
->start_address
== start
) {
370 /* No node - the whole group of 64 blocks must have been in-use. */
371 node
= util_dynarray_grow(vma_list
, struct vma_bucket_node
, 1);
374 return; /* bogus, leaks some GPU VMA, but nothing we can do... */
376 node
->start_address
= start
;
380 /* Set the bit to return the memory. */
381 assert((node
->bitmap
& (1ull << bit
)) == 0ull);
382 node
->bitmap
|= 1ull << bit
;
384 /* The block might be entirely free now, and if so, we could return it
385 * to the larger allocator. But we may as well hang on to it, in case
386 * we get more allocations at this block size.
390 static struct bo_cache_bucket
*
391 get_bucket_allocator(struct brw_bufmgr
*bufmgr
, uint64_t size
)
393 /* Skip using the bucket allocator for very large sizes, as it allocates
394 * 64 of them and this can balloon rather quickly.
396 if (size
> 1024 * PAGE_SIZE
)
399 struct bo_cache_bucket
*bucket
= bucket_for_size(bufmgr
, size
);
401 if (bucket
&& bucket
->size
== size
)
408 * Allocate a section of virtual memory for a buffer, assigning an address.
410 * This uses either the bucket allocator for the given size, or the large
411 * object allocator (util_vma).
414 vma_alloc(struct brw_bufmgr
*bufmgr
,
415 enum brw_memory_zone memzone
,
419 /* Without softpin support, we let the kernel assign addresses. */
420 assert(brw_using_softpin(bufmgr
));
422 alignment
= ALIGN(alignment
, PAGE_SIZE
);
424 struct bo_cache_bucket
*bucket
= get_bucket_allocator(bufmgr
, size
);
428 addr
= bucket_vma_alloc(bufmgr
, bucket
, memzone
);
430 addr
= util_vma_heap_alloc(&bufmgr
->vma_allocator
[memzone
], size
,
434 assert((addr
>> 48ull) == 0);
435 assert((addr
% alignment
) == 0);
437 return gen_canonical_address(addr
);
441 * Free a virtual memory area, allowing the address to be reused.
444 vma_free(struct brw_bufmgr
*bufmgr
,
448 assert(brw_using_softpin(bufmgr
));
450 /* Un-canonicalize the address. */
451 address
= gen_48b_address(address
);
456 struct bo_cache_bucket
*bucket
= get_bucket_allocator(bufmgr
, size
);
459 bucket_vma_free(bucket
, address
);
461 enum brw_memory_zone memzone
= memzone_for_address(address
);
462 util_vma_heap_free(&bufmgr
->vma_allocator
[memzone
], address
, size
);
467 brw_bo_busy(struct brw_bo
*bo
)
469 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
470 struct drm_i915_gem_busy busy
= { .handle
= bo
->gem_handle
};
472 int ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_BUSY
, &busy
);
474 bo
->idle
= !busy
.busy
;
481 brw_bo_madvise(struct brw_bo
*bo
, int state
)
483 struct drm_i915_gem_madvise madv
= {
484 .handle
= bo
->gem_handle
,
489 drmIoctl(bo
->bufmgr
->fd
, DRM_IOCTL_I915_GEM_MADVISE
, &madv
);
491 return madv
.retained
;
494 /* drop the oldest entries that have been purged by the kernel */
496 brw_bo_cache_purge_bucket(struct brw_bufmgr
*bufmgr
,
497 struct bo_cache_bucket
*bucket
)
499 list_for_each_entry_safe(struct brw_bo
, bo
, &bucket
->head
, head
) {
500 if (brw_bo_madvise(bo
, I915_MADV_DONTNEED
))
508 static struct brw_bo
*
511 struct brw_bo
*bo
= calloc(1, sizeof(*bo
));
515 list_inithead(&bo
->exports
);
520 static struct brw_bo
*
521 bo_alloc_internal(struct brw_bufmgr
*bufmgr
,
524 enum brw_memory_zone memzone
,
526 uint32_t tiling_mode
,
531 struct bo_cache_bucket
*bucket
;
532 bool alloc_from_cache
;
537 if (flags
& BO_ALLOC_BUSY
)
540 if (flags
& BO_ALLOC_ZEROED
)
543 /* BUSY does doesn't really jive with ZEROED as we have to wait for it to
544 * be idle before we can memset. Just disallow that combination.
546 assert(!(busy
&& zeroed
));
548 /* Round the allocated size up to a power of two number of pages. */
549 bucket
= bucket_for_size(bufmgr
, size
);
551 /* If we don't have caching at this size, don't actually round the
554 if (bucket
== NULL
) {
555 unsigned int page_size
= getpagesize();
556 bo_size
= size
== 0 ? page_size
: ALIGN(size
, page_size
);
558 bo_size
= bucket
->size
;
562 mtx_lock(&bufmgr
->lock
);
563 /* Get a buffer out of the cache if available */
565 alloc_from_cache
= false;
566 if (bucket
!= NULL
&& !list_is_empty(&bucket
->head
)) {
567 if (busy
&& !zeroed
) {
568 /* Allocate new render-target BOs from the tail (MRU)
569 * of the list, as it will likely be hot in the GPU
570 * cache and in the aperture for us. If the caller
571 * asked us to zero the buffer, we don't want this
572 * because we are going to mmap it.
574 bo
= LIST_ENTRY(struct brw_bo
, bucket
->head
.prev
, head
);
576 alloc_from_cache
= true;
578 /* For non-render-target BOs (where we're probably
579 * going to map it first thing in order to fill it
580 * with data), check if the last BO in the cache is
581 * unbusy, and only reuse in that case. Otherwise,
582 * allocating a new buffer is probably faster than
583 * waiting for the GPU to finish.
585 bo
= LIST_ENTRY(struct brw_bo
, bucket
->head
.next
, head
);
586 if (!brw_bo_busy(bo
)) {
587 alloc_from_cache
= true;
592 if (alloc_from_cache
) {
593 assert(list_is_empty(&bo
->exports
));
594 if (!brw_bo_madvise(bo
, I915_MADV_WILLNEED
)) {
596 brw_bo_cache_purge_bucket(bufmgr
, bucket
);
600 if (bo_set_tiling_internal(bo
, tiling_mode
, stride
)) {
606 void *map
= brw_bo_map(NULL
, bo
, MAP_WRITE
| MAP_RAW
);
611 memset(map
, 0, bo_size
);
616 if (alloc_from_cache
) {
617 /* If the cache BO isn't in the right memory zone, free the old
618 * memory and assign it a new address.
620 if ((bo
->kflags
& EXEC_OBJECT_PINNED
) &&
621 memzone
!= memzone_for_address(bo
->gtt_offset
)) {
622 vma_free(bufmgr
, bo
->gtt_offset
, bo
->size
);
623 bo
->gtt_offset
= 0ull;
633 struct drm_i915_gem_create create
= { .size
= bo_size
};
635 /* All new BOs we get from the kernel are zeroed, so we don't need to
636 * worry about that here.
638 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_CREATE
, &create
);
644 bo
->gem_handle
= create
.handle
;
648 bo
->tiling_mode
= I915_TILING_NONE
;
649 bo
->swizzle_mode
= I915_BIT_6_SWIZZLE_NONE
;
652 if (bo_set_tiling_internal(bo
, tiling_mode
, stride
))
655 /* Calling set_domain() will allocate pages for the BO outside of the
656 * struct mutex lock in the kernel, which is more efficient than waiting
657 * to create them during the first execbuf that uses the BO.
659 struct drm_i915_gem_set_domain sd
= {
660 .handle
= bo
->gem_handle
,
661 .read_domains
= I915_GEM_DOMAIN_CPU
,
665 if (drmIoctl(bo
->bufmgr
->fd
, DRM_IOCTL_I915_GEM_SET_DOMAIN
, &sd
) != 0)
670 p_atomic_set(&bo
->refcount
, 1);
672 bo
->cache_coherent
= bufmgr
->has_llc
;
674 bo
->kflags
= bufmgr
->initial_kflags
;
676 if ((bo
->kflags
& EXEC_OBJECT_PINNED
) && bo
->gtt_offset
== 0ull) {
677 bo
->gtt_offset
= vma_alloc(bufmgr
, memzone
, bo
->size
, 1);
679 if (bo
->gtt_offset
== 0ull)
683 mtx_unlock(&bufmgr
->lock
);
685 DBG("bo_create: buf %d (%s) %llub\n", bo
->gem_handle
, bo
->name
,
686 (unsigned long long) size
);
693 mtx_unlock(&bufmgr
->lock
);
698 brw_bo_alloc(struct brw_bufmgr
*bufmgr
,
699 const char *name
, uint64_t size
,
700 enum brw_memory_zone memzone
)
702 return bo_alloc_internal(bufmgr
, name
, size
, memzone
,
703 0, I915_TILING_NONE
, 0);
707 brw_bo_alloc_tiled(struct brw_bufmgr
*bufmgr
, const char *name
,
708 uint64_t size
, enum brw_memory_zone memzone
,
709 uint32_t tiling_mode
, uint32_t pitch
,
712 return bo_alloc_internal(bufmgr
, name
, size
, memzone
,
713 flags
, tiling_mode
, pitch
);
717 brw_bo_alloc_tiled_2d(struct brw_bufmgr
*bufmgr
, const char *name
,
718 int x
, int y
, int cpp
, enum brw_memory_zone memzone
,
719 uint32_t tiling
, uint32_t *pitch
, unsigned flags
)
723 unsigned long aligned_y
, height_alignment
;
725 /* If we're tiled, our allocations are in 8 or 32-row blocks,
726 * so failure to align our height means that we won't allocate
729 * If we're untiled, we still have to align to 2 rows high
730 * because the data port accesses 2x2 blocks even if the
731 * bottom row isn't to be rendered, so failure to align means
732 * we could walk off the end of the GTT and fault. This is
733 * documented on 965, and may be the case on older chipsets
734 * too so we try to be careful.
737 height_alignment
= 2;
739 if (tiling
== I915_TILING_X
)
740 height_alignment
= 8;
741 else if (tiling
== I915_TILING_Y
)
742 height_alignment
= 32;
743 aligned_y
= ALIGN(y
, height_alignment
);
746 stride
= bo_tile_pitch(bufmgr
, stride
, tiling
);
747 size
= stride
* aligned_y
;
748 size
= bo_tile_size(bufmgr
, size
, tiling
);
751 if (tiling
== I915_TILING_NONE
)
754 return bo_alloc_internal(bufmgr
, name
, size
, memzone
,
755 flags
, tiling
, stride
);
759 * Returns a brw_bo wrapping the given buffer object handle.
761 * This can be used when one application needs to pass a buffer object
765 brw_bo_gem_create_from_name(struct brw_bufmgr
*bufmgr
,
766 const char *name
, unsigned int handle
)
770 /* At the moment most applications only have a few named bo.
771 * For instance, in a DRI client only the render buffers passed
772 * between X and the client are named. And since X returns the
773 * alternating names for the front/back buffer a linear search
774 * provides a sufficiently fast match.
776 mtx_lock(&bufmgr
->lock
);
777 bo
= hash_find_bo(bufmgr
->name_table
, handle
);
779 brw_bo_reference(bo
);
783 struct drm_gem_open open_arg
= { .name
= handle
};
784 int ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_GEM_OPEN
, &open_arg
);
786 DBG("Couldn't reference %s handle 0x%08x: %s\n",
787 name
, handle
, strerror(errno
));
791 /* Now see if someone has used a prime handle to get this
792 * object from the kernel before by looking through the list
793 * again for a matching gem_handle
795 bo
= hash_find_bo(bufmgr
->handle_table
, open_arg
.handle
);
797 assert(list_is_empty(&bo
->exports
));
798 brw_bo_reference(bo
);
806 p_atomic_set(&bo
->refcount
, 1);
808 bo
->size
= open_arg
.size
;
811 bo
->gem_handle
= open_arg
.handle
;
813 bo
->global_name
= handle
;
814 bo
->reusable
= false;
816 bo
->kflags
= bufmgr
->initial_kflags
;
818 if (bo
->kflags
& EXEC_OBJECT_PINNED
)
819 bo
->gtt_offset
= vma_alloc(bufmgr
, BRW_MEMZONE_OTHER
, bo
->size
, 1);
821 _mesa_hash_table_insert(bufmgr
->handle_table
, &bo
->gem_handle
, bo
);
822 _mesa_hash_table_insert(bufmgr
->name_table
, &bo
->global_name
, bo
);
824 struct drm_i915_gem_get_tiling get_tiling
= { .handle
= bo
->gem_handle
};
825 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_GET_TILING
, &get_tiling
);
829 bo
->tiling_mode
= get_tiling
.tiling_mode
;
830 bo
->swizzle_mode
= get_tiling
.swizzle_mode
;
831 /* XXX stride is unknown */
832 DBG("bo_create_from_handle: %d (%s)\n", handle
, bo
->name
);
835 mtx_unlock(&bufmgr
->lock
);
840 mtx_unlock(&bufmgr
->lock
);
845 bo_free(struct brw_bo
*bo
)
847 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
850 VG_NOACCESS(bo
->map_cpu
, bo
->size
);
851 drm_munmap(bo
->map_cpu
, bo
->size
);
854 VG_NOACCESS(bo
->map_wc
, bo
->size
);
855 drm_munmap(bo
->map_wc
, bo
->size
);
858 VG_NOACCESS(bo
->map_gtt
, bo
->size
);
859 drm_munmap(bo
->map_gtt
, bo
->size
);
863 struct hash_entry
*entry
;
865 if (bo
->global_name
) {
866 entry
= _mesa_hash_table_search(bufmgr
->name_table
, &bo
->global_name
);
867 _mesa_hash_table_remove(bufmgr
->name_table
, entry
);
870 entry
= _mesa_hash_table_search(bufmgr
->handle_table
, &bo
->gem_handle
);
871 _mesa_hash_table_remove(bufmgr
->handle_table
, entry
);
873 assert(list_is_empty(&bo
->exports
));
876 /* Close this object */
877 struct drm_gem_close close
= { .handle
= bo
->gem_handle
};
878 int ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_GEM_CLOSE
, &close
);
880 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
881 bo
->gem_handle
, bo
->name
, strerror(errno
));
884 if (bo
->kflags
& EXEC_OBJECT_PINNED
)
885 vma_free(bo
->bufmgr
, bo
->gtt_offset
, bo
->size
);
890 /** Frees all cached buffers significantly older than @time. */
892 cleanup_bo_cache(struct brw_bufmgr
*bufmgr
, time_t time
)
896 if (bufmgr
->time
== time
)
899 for (i
= 0; i
< bufmgr
->num_buckets
; i
++) {
900 struct bo_cache_bucket
*bucket
= &bufmgr
->cache_bucket
[i
];
902 list_for_each_entry_safe(struct brw_bo
, bo
, &bucket
->head
, head
) {
903 if (time
- bo
->free_time
<= 1)
916 bo_unreference_final(struct brw_bo
*bo
, time_t time
)
918 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
919 struct bo_cache_bucket
*bucket
;
921 DBG("bo_unreference final: %d (%s)\n", bo
->gem_handle
, bo
->name
);
923 list_for_each_entry_safe(struct bo_export
, export
, &bo
->exports
, link
) {
924 struct drm_gem_close close
= { .handle
= export
->gem_handle
};
925 gen_ioctl(export
->drm_fd
, DRM_IOCTL_GEM_CLOSE
, &close
);
927 list_del(&export
->link
);
931 bucket
= bucket_for_size(bufmgr
, bo
->size
);
932 /* Put the buffer into our internal cache for reuse if we can. */
933 if (bufmgr
->bo_reuse
&& bo
->reusable
&& bucket
!= NULL
&&
934 brw_bo_madvise(bo
, I915_MADV_DONTNEED
)) {
935 bo
->free_time
= time
;
939 list_addtail(&bo
->head
, &bucket
->head
);
946 brw_bo_unreference(struct brw_bo
*bo
)
951 assert(p_atomic_read(&bo
->refcount
) > 0);
953 if (atomic_add_unless(&bo
->refcount
, -1, 1)) {
954 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
955 struct timespec time
;
957 clock_gettime(CLOCK_MONOTONIC
, &time
);
959 mtx_lock(&bufmgr
->lock
);
961 if (p_atomic_dec_zero(&bo
->refcount
)) {
962 bo_unreference_final(bo
, time
.tv_sec
);
963 cleanup_bo_cache(bufmgr
, time
.tv_sec
);
966 mtx_unlock(&bufmgr
->lock
);
971 bo_wait_with_stall_warning(struct brw_context
*brw
,
975 bool busy
= brw
&& brw
->perf_debug
&& !bo
->idle
;
976 double elapsed
= unlikely(busy
) ? -get_time() : 0.0;
978 brw_bo_wait_rendering(bo
);
980 if (unlikely(busy
)) {
981 elapsed
+= get_time();
982 if (elapsed
> 1e-5) /* 0.01ms */
983 perf_debug("%s a busy \"%s\" BO stalled and took %.03f ms.\n",
984 action
, bo
->name
, elapsed
* 1000);
989 print_flags(unsigned flags
)
991 if (flags
& MAP_READ
)
993 if (flags
& MAP_WRITE
)
995 if (flags
& MAP_ASYNC
)
997 if (flags
& MAP_PERSISTENT
)
999 if (flags
& MAP_COHERENT
)
1001 if (flags
& MAP_RAW
)
1007 brw_bo_gem_mmap_legacy(struct brw_context
*brw
, struct brw_bo
*bo
, bool wc
)
1009 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
1011 struct drm_i915_gem_mmap mmap_arg
= {
1012 .handle
= bo
->gem_handle
,
1014 .flags
= wc
? I915_MMAP_WC
: 0,
1017 int ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_MMAP
, &mmap_arg
);
1019 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1020 __FILE__
, __LINE__
, bo
->gem_handle
, bo
->name
, strerror(errno
));
1023 void *map
= (void *) (uintptr_t) mmap_arg
.addr_ptr
;
1029 brw_bo_gem_mmap_offset(struct brw_context
*brw
, struct brw_bo
*bo
, bool wc
)
1031 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
1033 struct drm_i915_gem_mmap_offset mmap_arg
= {
1034 .handle
= bo
->gem_handle
,
1035 .flags
= wc
? I915_MMAP_OFFSET_WC
: I915_MMAP_OFFSET_WB
,
1038 /* Get the fake offset back */
1039 int ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_MMAP_OFFSET
, &mmap_arg
);
1041 DBG("%s:%d: Error preparing buffer %d (%s): %s .\n",
1042 __FILE__
, __LINE__
, bo
->gem_handle
, bo
->name
, strerror(errno
));
1047 void *map
= drm_mmap(0, bo
->size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
1048 bufmgr
->fd
, mmap_arg
.offset
);
1049 if (map
== MAP_FAILED
) {
1050 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1051 __FILE__
, __LINE__
, bo
->gem_handle
, bo
->name
, strerror(errno
));
1059 brw_bo_gem_mmap(struct brw_context
*brw
, struct brw_bo
*bo
, bool wc
)
1061 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
1063 if (bufmgr
->has_mmap_offset
)
1064 return brw_bo_gem_mmap_offset(brw
, bo
, wc
);
1066 return brw_bo_gem_mmap_legacy(brw
, bo
, wc
);
1070 brw_bo_map_cpu(struct brw_context
*brw
, struct brw_bo
*bo
, unsigned flags
)
1072 /* We disallow CPU maps for writing to non-coherent buffers, as the
1073 * CPU map can become invalidated when a batch is flushed out, which
1074 * can happen at unpredictable times. You should use WC maps instead.
1076 assert(bo
->cache_coherent
|| !(flags
& MAP_WRITE
));
1079 DBG("brw_bo_map_cpu: %d (%s)\n", bo
->gem_handle
, bo
->name
);
1081 void *map
= brw_bo_gem_mmap(brw
, bo
, false);
1082 VG_DEFINED(map
, bo
->size
);
1084 if (p_atomic_cmpxchg(&bo
->map_cpu
, NULL
, map
)) {
1085 VG_NOACCESS(map
, bo
->size
);
1086 drm_munmap(map
, bo
->size
);
1089 assert(bo
->map_cpu
);
1091 DBG("brw_bo_map_cpu: %d (%s) -> %p, ", bo
->gem_handle
, bo
->name
,
1095 if (!(flags
& MAP_ASYNC
)) {
1096 bo_wait_with_stall_warning(brw
, bo
, "CPU mapping");
1099 if (!bo
->cache_coherent
&& !bo
->bufmgr
->has_llc
) {
1100 /* If we're reusing an existing CPU mapping, the CPU caches may
1101 * contain stale data from the last time we read from that mapping.
1102 * (With the BO cache, it might even be data from a previous buffer!)
1103 * Even if it's a brand new mapping, the kernel may have zeroed the
1104 * buffer via CPU writes.
1106 * We need to invalidate those cachelines so that we see the latest
1107 * contents, and so long as we only read from the CPU mmap we do not
1108 * need to write those cachelines back afterwards.
1110 * On LLC, the emprical evidence suggests that writes from the GPU
1111 * that bypass the LLC (i.e. for scanout) do *invalidate* the CPU
1112 * cachelines. (Other reads, such as the display engine, bypass the
1113 * LLC entirely requiring us to keep dirty pixels for the scanout
1114 * out of any cache.)
1116 gen_invalidate_range(bo
->map_cpu
, bo
->size
);
1123 brw_bo_map_wc(struct brw_context
*brw
, struct brw_bo
*bo
, unsigned flags
)
1125 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
1127 if (!bufmgr
->has_mmap_wc
)
1131 DBG("brw_bo_map_wc: %d (%s)\n", bo
->gem_handle
, bo
->name
);
1132 void *map
= brw_bo_gem_mmap(brw
, bo
, true);
1133 VG_DEFINED(map
, bo
->size
);
1135 if (p_atomic_cmpxchg(&bo
->map_wc
, NULL
, map
)) {
1136 VG_NOACCESS(map
, bo
->size
);
1137 drm_munmap(map
, bo
->size
);
1142 DBG("brw_bo_map_wc: %d (%s) -> %p\n", bo
->gem_handle
, bo
->name
, bo
->map_wc
);
1145 if (!(flags
& MAP_ASYNC
)) {
1146 bo_wait_with_stall_warning(brw
, bo
, "WC mapping");
1153 * Perform an uncached mapping via the GTT.
1155 * Write access through the GTT is not quite fully coherent. On low power
1156 * systems especially, like modern Atoms, we can observe reads from RAM before
1157 * the write via GTT has landed. A write memory barrier that flushes the Write
1158 * Combining Buffer (i.e. sfence/mfence) is not sufficient to order the later
1159 * read after the write as the GTT write suffers a small delay through the GTT
1160 * indirection. The kernel uses an uncached mmio read to ensure the GTT write
1161 * is ordered with reads (either by the GPU, WB or WC) and unconditionally
1162 * flushes prior to execbuf submission. However, if we are not informing the
1163 * kernel about our GTT writes, it will not flush before earlier access, such
1164 * as when using the cmdparser. Similarly, we need to be careful if we should
1165 * ever issue a CPU read immediately following a GTT write.
1167 * Telling the kernel about write access also has one more important
1168 * side-effect. Upon receiving notification about the write, it cancels any
1169 * scanout buffering for FBC/PSR and friends. Later FBC/PSR is then flushed by
1170 * either SW_FINISH or DIRTYFB. The presumption is that we never write to the
1171 * actual scanout via a mmaping, only to a backbuffer and so all the FBC/PSR
1172 * tracking is handled on the buffer exchange instead.
1175 brw_bo_map_gtt(struct brw_context
*brw
, struct brw_bo
*bo
, unsigned flags
)
1177 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
1179 /* Get a mapping of the buffer if we haven't before. */
1180 if (bo
->map_gtt
== NULL
) {
1181 DBG("bo_map_gtt: mmap %d (%s)\n", bo
->gem_handle
, bo
->name
);
1183 struct drm_i915_gem_mmap_gtt mmap_arg
= { .handle
= bo
->gem_handle
};
1185 /* Get the fake offset back... */
1186 int ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_MMAP_GTT
, &mmap_arg
);
1188 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1189 __FILE__
, __LINE__
, bo
->gem_handle
, bo
->name
, strerror(errno
));
1194 void *map
= drm_mmap(0, bo
->size
, PROT_READ
| PROT_WRITE
,
1195 MAP_SHARED
, bufmgr
->fd
, mmap_arg
.offset
);
1196 if (map
== MAP_FAILED
) {
1197 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1198 __FILE__
, __LINE__
, bo
->gem_handle
, bo
->name
, strerror(errno
));
1202 /* We don't need to use VALGRIND_MALLOCLIKE_BLOCK because Valgrind will
1203 * already intercept this mmap call. However, for consistency between
1204 * all the mmap paths, we mark the pointer as defined now and mark it
1205 * as inaccessible afterwards.
1207 VG_DEFINED(map
, bo
->size
);
1209 if (p_atomic_cmpxchg(&bo
->map_gtt
, NULL
, map
)) {
1210 VG_NOACCESS(map
, bo
->size
);
1211 drm_munmap(map
, bo
->size
);
1214 assert(bo
->map_gtt
);
1216 DBG("bo_map_gtt: %d (%s) -> %p, ", bo
->gem_handle
, bo
->name
, bo
->map_gtt
);
1219 if (!(flags
& MAP_ASYNC
)) {
1220 bo_wait_with_stall_warning(brw
, bo
, "GTT mapping");
1227 can_map_cpu(struct brw_bo
*bo
, unsigned flags
)
1229 if (bo
->cache_coherent
)
1232 /* Even if the buffer itself is not cache-coherent (such as a scanout), on
1233 * an LLC platform reads always are coherent (as they are performed via the
1234 * central system agent). It is just the writes that we need to take special
1235 * care to ensure that land in main memory and not stick in the CPU cache.
1237 if (!(flags
& MAP_WRITE
) && bo
->bufmgr
->has_llc
)
1240 /* If PERSISTENT or COHERENT are set, the mmapping needs to remain valid
1241 * across batch flushes where the kernel will change cache domains of the
1242 * bo, invalidating continued access to the CPU mmap on non-LLC device.
1244 * Similarly, ASYNC typically means that the buffer will be accessed via
1245 * both the CPU and the GPU simultaneously. Batches may be executed that
1246 * use the BO even while it is mapped. While OpenGL technically disallows
1247 * most drawing while non-persistent mappings are active, we may still use
1248 * the GPU for blits or other operations, causing batches to happen at
1249 * inconvenient times.
1251 if (flags
& (MAP_PERSISTENT
| MAP_COHERENT
| MAP_ASYNC
))
1254 return !(flags
& MAP_WRITE
);
1258 brw_bo_map(struct brw_context
*brw
, struct brw_bo
*bo
, unsigned flags
)
1260 if (bo
->tiling_mode
!= I915_TILING_NONE
&& !(flags
& MAP_RAW
))
1261 return brw_bo_map_gtt(brw
, bo
, flags
);
1265 if (can_map_cpu(bo
, flags
))
1266 map
= brw_bo_map_cpu(brw
, bo
, flags
);
1268 map
= brw_bo_map_wc(brw
, bo
, flags
);
1270 /* Allow the attempt to fail by falling back to the GTT where necessary.
1272 * Not every buffer can be mmaped directly using the CPU (or WC), for
1273 * example buffers that wrap stolen memory or are imported from other
1274 * devices. For those, we have little choice but to use a GTT mmapping.
1275 * However, if we use a slow GTT mmapping for reads where we expected fast
1276 * access, that order of magnitude difference in throughput will be clearly
1277 * expressed by angry users.
1279 * We skip MAP_RAW because we want to avoid map_gtt's fence detiling.
1281 if (!map
&& !(flags
& MAP_RAW
)) {
1283 perf_debug("Fallback GTT mapping for %s with access flags %x\n",
1286 map
= brw_bo_map_gtt(brw
, bo
, flags
);
1293 brw_bo_subdata(struct brw_bo
*bo
, uint64_t offset
,
1294 uint64_t size
, const void *data
)
1296 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
1298 struct drm_i915_gem_pwrite pwrite
= {
1299 .handle
= bo
->gem_handle
,
1302 .data_ptr
= (uint64_t) (uintptr_t) data
,
1305 int ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_PWRITE
, &pwrite
);
1308 DBG("%s:%d: Error writing data to buffer %d: "
1309 "(%"PRIu64
" %"PRIu64
") %s .\n",
1310 __FILE__
, __LINE__
, bo
->gem_handle
, offset
, size
, strerror(errno
));
1316 /** Waits for all GPU rendering with the object to have completed. */
1318 brw_bo_wait_rendering(struct brw_bo
*bo
)
1320 /* We require a kernel recent enough for WAIT_IOCTL support.
1321 * See intel_init_bufmgr()
1323 brw_bo_wait(bo
, -1);
1327 * Waits on a BO for the given amount of time.
1329 * @bo: buffer object to wait for
1330 * @timeout_ns: amount of time to wait in nanoseconds.
1331 * If value is less than 0, an infinite wait will occur.
1333 * Returns 0 if the wait was successful ie. the last batch referencing the
1334 * object has completed within the allotted time. Otherwise some negative return
1335 * value describes the error. Of particular interest is -ETIME when the wait has
1336 * failed to yield the desired result.
1338 * Similar to brw_bo_wait_rendering except a timeout parameter allows
1339 * the operation to give up after a certain amount of time. Another subtle
1340 * difference is the internal locking semantics are different (this variant does
1341 * not hold the lock for the duration of the wait). This makes the wait subject
1342 * to a larger userspace race window.
1344 * The implementation shall wait until the object is no longer actively
1345 * referenced within a batch buffer at the time of the call. The wait will
1346 * not guarantee that the buffer is re-issued via another thread, or an flinked
1347 * handle. Userspace must make sure this race does not occur if such precision
1350 * Note that some kernels have broken the inifite wait for negative values
1351 * promise, upgrade to latest stable kernels if this is the case.
1354 brw_bo_wait(struct brw_bo
*bo
, int64_t timeout_ns
)
1356 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
1358 /* If we know it's idle, don't bother with the kernel round trip */
1359 if (bo
->idle
&& !bo
->external
)
1362 struct drm_i915_gem_wait wait
= {
1363 .bo_handle
= bo
->gem_handle
,
1364 .timeout_ns
= timeout_ns
,
1366 int ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_WAIT
, &wait
);
1376 brw_bufmgr_unref(struct brw_bufmgr
*bufmgr
)
1378 mtx_lock(&global_bufmgr_list_mutex
);
1379 if (p_atomic_dec_zero(&bufmgr
->refcount
)) {
1380 list_del(&bufmgr
->link
);
1384 mtx_unlock(&global_bufmgr_list_mutex
);
1389 mtx_destroy(&bufmgr
->lock
);
1391 /* Free any cached buffer objects we were going to reuse */
1392 for (int i
= 0; i
< bufmgr
->num_buckets
; i
++) {
1393 struct bo_cache_bucket
*bucket
= &bufmgr
->cache_bucket
[i
];
1395 list_for_each_entry_safe(struct brw_bo
, bo
, &bucket
->head
, head
) {
1396 list_del(&bo
->head
);
1401 if (brw_using_softpin(bufmgr
)) {
1402 for (int z
= 0; z
< BRW_MEMZONE_COUNT
; z
++) {
1403 util_dynarray_fini(&bucket
->vma_list
[z
]);
1408 _mesa_hash_table_destroy(bufmgr
->name_table
, NULL
);
1409 _mesa_hash_table_destroy(bufmgr
->handle_table
, NULL
);
1411 if (brw_using_softpin(bufmgr
)) {
1412 for (int z
= 0; z
< BRW_MEMZONE_COUNT
; z
++) {
1413 util_vma_heap_finish(&bufmgr
->vma_allocator
[z
]);
1424 bo_set_tiling_internal(struct brw_bo
*bo
, uint32_t tiling_mode
,
1427 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
1428 struct drm_i915_gem_set_tiling set_tiling
;
1431 if (bo
->global_name
== 0 &&
1432 tiling_mode
== bo
->tiling_mode
&& stride
== bo
->stride
)
1435 memset(&set_tiling
, 0, sizeof(set_tiling
));
1437 /* set_tiling is slightly broken and overwrites the
1438 * input on the error path, so we have to open code
1441 set_tiling
.handle
= bo
->gem_handle
;
1442 set_tiling
.tiling_mode
= tiling_mode
;
1443 set_tiling
.stride
= stride
;
1445 ret
= ioctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_SET_TILING
, &set_tiling
);
1446 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
1450 bo
->tiling_mode
= set_tiling
.tiling_mode
;
1451 bo
->swizzle_mode
= set_tiling
.swizzle_mode
;
1452 bo
->stride
= set_tiling
.stride
;
1457 brw_bo_get_tiling(struct brw_bo
*bo
, uint32_t *tiling_mode
,
1458 uint32_t *swizzle_mode
)
1460 *tiling_mode
= bo
->tiling_mode
;
1461 *swizzle_mode
= bo
->swizzle_mode
;
1465 static struct brw_bo
*
1466 brw_bo_gem_create_from_prime_internal(struct brw_bufmgr
*bufmgr
, int prime_fd
,
1467 int tiling_mode
, uint32_t stride
)
1472 mtx_lock(&bufmgr
->lock
);
1473 int ret
= drmPrimeFDToHandle(bufmgr
->fd
, prime_fd
, &handle
);
1475 DBG("create_from_prime: failed to obtain handle from fd: %s\n",
1477 mtx_unlock(&bufmgr
->lock
);
1482 * See if the kernel has already returned this buffer to us. Just as
1483 * for named buffers, we must not create two bo's pointing at the same
1486 bo
= hash_find_bo(bufmgr
->handle_table
, handle
);
1488 assert(list_is_empty(&bo
->exports
));
1489 brw_bo_reference(bo
);
1497 p_atomic_set(&bo
->refcount
, 1);
1499 /* Determine size of bo. The fd-to-handle ioctl really should
1500 * return the size, but it doesn't. If we have kernel 3.12 or
1501 * later, we can lseek on the prime fd to get the size. Older
1502 * kernels will just fail, in which case we fall back to the
1503 * provided (estimated or guess size). */
1504 ret
= lseek(prime_fd
, 0, SEEK_END
);
1508 bo
->bufmgr
= bufmgr
;
1510 bo
->gem_handle
= handle
;
1511 _mesa_hash_table_insert(bufmgr
->handle_table
, &bo
->gem_handle
, bo
);
1514 bo
->reusable
= false;
1515 bo
->external
= true;
1516 bo
->kflags
= bufmgr
->initial_kflags
;
1518 if (bo
->kflags
& EXEC_OBJECT_PINNED
) {
1519 assert(bo
->size
> 0);
1520 bo
->gtt_offset
= vma_alloc(bufmgr
, BRW_MEMZONE_OTHER
, bo
->size
, 1);
1523 if (tiling_mode
< 0) {
1524 struct drm_i915_gem_get_tiling get_tiling
= { .handle
= bo
->gem_handle
};
1525 if (drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_GET_TILING
, &get_tiling
))
1528 bo
->tiling_mode
= get_tiling
.tiling_mode
;
1529 bo
->swizzle_mode
= get_tiling
.swizzle_mode
;
1530 /* XXX stride is unknown */
1532 bo_set_tiling_internal(bo
, tiling_mode
, stride
);
1536 mtx_unlock(&bufmgr
->lock
);
1541 mtx_unlock(&bufmgr
->lock
);
1546 brw_bo_gem_create_from_prime(struct brw_bufmgr
*bufmgr
, int prime_fd
)
1548 return brw_bo_gem_create_from_prime_internal(bufmgr
, prime_fd
, -1, 0);
1552 brw_bo_gem_create_from_prime_tiled(struct brw_bufmgr
*bufmgr
, int prime_fd
,
1553 uint32_t tiling_mode
, uint32_t stride
)
1555 assert(tiling_mode
== I915_TILING_NONE
||
1556 tiling_mode
== I915_TILING_X
||
1557 tiling_mode
== I915_TILING_Y
);
1559 return brw_bo_gem_create_from_prime_internal(bufmgr
, prime_fd
,
1560 tiling_mode
, stride
);
1564 brw_bo_make_external(struct brw_bo
*bo
)
1566 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
1568 if (!bo
->external
) {
1569 mtx_lock(&bufmgr
->lock
);
1570 if (!bo
->external
) {
1571 _mesa_hash_table_insert(bufmgr
->handle_table
, &bo
->gem_handle
, bo
);
1572 bo
->external
= true;
1574 mtx_unlock(&bufmgr
->lock
);
1579 brw_bo_gem_export_to_prime(struct brw_bo
*bo
, int *prime_fd
)
1581 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
1583 brw_bo_make_external(bo
);
1585 if (drmPrimeHandleToFD(bufmgr
->fd
, bo
->gem_handle
,
1586 DRM_CLOEXEC
, prime_fd
) != 0)
1589 bo
->reusable
= false;
1595 brw_bo_export_gem_handle(struct brw_bo
*bo
)
1597 brw_bo_make_external(bo
);
1599 return bo
->gem_handle
;
1603 brw_bo_flink(struct brw_bo
*bo
, uint32_t *name
)
1605 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
1607 if (!bo
->global_name
) {
1608 struct drm_gem_flink flink
= { .handle
= bo
->gem_handle
};
1610 if (drmIoctl(bufmgr
->fd
, DRM_IOCTL_GEM_FLINK
, &flink
))
1613 brw_bo_make_external(bo
);
1614 mtx_lock(&bufmgr
->lock
);
1615 if (!bo
->global_name
) {
1616 bo
->global_name
= flink
.name
;
1617 _mesa_hash_table_insert(bufmgr
->name_table
, &bo
->global_name
, bo
);
1619 mtx_unlock(&bufmgr
->lock
);
1621 bo
->reusable
= false;
1624 *name
= bo
->global_name
;
1629 brw_bo_export_gem_handle_for_device(struct brw_bo
*bo
, int drm_fd
,
1630 uint32_t *out_handle
)
1632 struct brw_bufmgr
*bufmgr
= bo
->bufmgr
;
1634 /* Only add the new GEM handle to the list of export if it belongs to a
1635 * different GEM device. Otherwise we might close the same buffer multiple
1638 int ret
= os_same_file_description(drm_fd
, bufmgr
->fd
);
1640 "Kernel has no file descriptor comparison support: %s\n",
1643 *out_handle
= brw_bo_export_gem_handle(bo
);
1647 struct bo_export
*export
= calloc(1, sizeof(*export
));
1651 export
->drm_fd
= drm_fd
;
1654 int err
= brw_bo_gem_export_to_prime(bo
, &dmabuf_fd
);
1660 mtx_lock(&bufmgr
->lock
);
1661 err
= drmPrimeFDToHandle(drm_fd
, dmabuf_fd
, &export
->gem_handle
);
1664 mtx_unlock(&bufmgr
->lock
);
1670 list_for_each_entry(struct bo_export
, iter
, &bo
->exports
, link
) {
1671 if (iter
->drm_fd
!= drm_fd
)
1673 /* Here we assume that for a given DRM fd, we'll always get back the
1674 * same GEM handle for a given buffer.
1676 assert(iter
->gem_handle
== export
->gem_handle
);
1683 list_addtail(&export
->link
, &bo
->exports
);
1685 mtx_unlock(&bufmgr
->lock
);
1687 *out_handle
= export
->gem_handle
;
1693 add_bucket(struct brw_bufmgr
*bufmgr
, int size
)
1695 unsigned int i
= bufmgr
->num_buckets
;
1697 assert(i
< ARRAY_SIZE(bufmgr
->cache_bucket
));
1699 list_inithead(&bufmgr
->cache_bucket
[i
].head
);
1700 if (brw_using_softpin(bufmgr
)) {
1701 for (int z
= 0; z
< BRW_MEMZONE_COUNT
; z
++)
1702 util_dynarray_init(&bufmgr
->cache_bucket
[i
].vma_list
[z
], NULL
);
1704 bufmgr
->cache_bucket
[i
].size
= size
;
1705 bufmgr
->num_buckets
++;
1707 assert(bucket_for_size(bufmgr
, size
) == &bufmgr
->cache_bucket
[i
]);
1708 assert(bucket_for_size(bufmgr
, size
- 2048) == &bufmgr
->cache_bucket
[i
]);
1709 assert(bucket_for_size(bufmgr
, size
+ 1) != &bufmgr
->cache_bucket
[i
]);
1713 init_cache_buckets(struct brw_bufmgr
*bufmgr
)
1715 uint64_t size
, cache_max_size
= 64 * 1024 * 1024;
1717 /* OK, so power of two buckets was too wasteful of memory.
1718 * Give 3 other sizes between each power of two, to hopefully
1719 * cover things accurately enough. (The alternative is
1720 * probably to just go for exact matching of sizes, and assume
1721 * that for things like composited window resize the tiled
1722 * width/height alignment and rounding of sizes to pages will
1723 * get us useful cache hit rates anyway)
1725 add_bucket(bufmgr
, PAGE_SIZE
);
1726 add_bucket(bufmgr
, PAGE_SIZE
* 2);
1727 add_bucket(bufmgr
, PAGE_SIZE
* 3);
1729 /* Initialize the linked lists for BO reuse cache. */
1730 for (size
= 4 * PAGE_SIZE
; size
<= cache_max_size
; size
*= 2) {
1731 add_bucket(bufmgr
, size
);
1733 add_bucket(bufmgr
, size
+ size
* 1 / 4);
1734 add_bucket(bufmgr
, size
+ size
* 2 / 4);
1735 add_bucket(bufmgr
, size
+ size
* 3 / 4);
1740 brw_create_hw_context(struct brw_bufmgr
*bufmgr
)
1742 struct drm_i915_gem_context_create create
= { };
1743 int ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_CREATE
, &create
);
1745 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n", strerror(errno
));
1749 return create
.ctx_id
;
1753 brw_hw_context_set_priority(struct brw_bufmgr
*bufmgr
,
1757 struct drm_i915_gem_context_param p
= {
1759 .param
= I915_CONTEXT_PARAM_PRIORITY
,
1765 if (drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM
, &p
))
1772 brw_destroy_hw_context(struct brw_bufmgr
*bufmgr
, uint32_t ctx_id
)
1774 struct drm_i915_gem_context_destroy d
= { .ctx_id
= ctx_id
};
1777 drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY
, &d
) != 0) {
1778 fprintf(stderr
, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
1784 brw_reg_read(struct brw_bufmgr
*bufmgr
, uint32_t offset
, uint64_t *result
)
1786 struct drm_i915_reg_read reg_read
= { .offset
= offset
};
1787 int ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_REG_READ
, ®_read
);
1789 *result
= reg_read
.val
;
1794 gem_param(int fd
, int name
)
1796 int v
= -1; /* No param uses (yet) the sign bit, reserve it for errors */
1798 struct drm_i915_getparam gp
= { .param
= name
, .value
= &v
};
1799 if (drmIoctl(fd
, DRM_IOCTL_I915_GETPARAM
, &gp
))
1806 gem_context_getparam(int fd
, uint32_t context
, uint64_t param
, uint64_t *value
)
1808 struct drm_i915_gem_context_param gp
= {
1813 if (drmIoctl(fd
, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM
, &gp
))
1822 brw_using_softpin(struct brw_bufmgr
*bufmgr
)
1824 return bufmgr
->initial_kflags
& EXEC_OBJECT_PINNED
;
1827 static struct brw_bufmgr
*
1828 brw_bufmgr_ref(struct brw_bufmgr
*bufmgr
)
1830 p_atomic_inc(&bufmgr
->refcount
);
1835 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
1836 * and manage map buffer objections.
1838 * \param fd File descriptor of the opened DRM device.
1840 static struct brw_bufmgr
*
1841 brw_bufmgr_create(struct gen_device_info
*devinfo
, int fd
, bool bo_reuse
)
1843 struct brw_bufmgr
*bufmgr
;
1845 bufmgr
= calloc(1, sizeof(*bufmgr
));
1849 /* Handles to buffer objects belong to the device fd and are not
1850 * reference counted by the kernel. If the same fd is used by
1851 * multiple parties (threads sharing the same screen bufmgr, or
1852 * even worse the same device fd passed to multiple libraries)
1853 * ownership of those handles is shared by those independent parties.
1855 * Don't do this! Ensure that each library/bufmgr has its own device
1856 * fd so that its namespace does not clash with another.
1858 bufmgr
->fd
= os_dupfd_cloexec(fd
);
1859 if (bufmgr
->fd
< 0) {
1864 p_atomic_set(&bufmgr
->refcount
, 1);
1866 if (mtx_init(&bufmgr
->lock
, mtx_plain
) != 0) {
1873 if (gem_context_getparam(fd
, 0, I915_CONTEXT_PARAM_GTT_SIZE
, >t_size
))
1876 bufmgr
->has_llc
= devinfo
->has_llc
;
1877 bufmgr
->has_mmap_wc
= gem_param(fd
, I915_PARAM_MMAP_VERSION
) > 0;
1878 bufmgr
->bo_reuse
= bo_reuse
;
1879 bufmgr
->has_mmap_offset
= gem_param(fd
, I915_PARAM_MMAP_GTT_VERSION
) >= 4;
1881 const uint64_t _4GB
= 4ull << 30;
1883 /* The STATE_BASE_ADDRESS size field can only hold 1 page shy of 4GB */
1884 const uint64_t _4GB_minus_1
= _4GB
- PAGE_SIZE
;
1886 if (devinfo
->gen
>= 8 && gtt_size
> _4GB
) {
1887 bufmgr
->initial_kflags
|= EXEC_OBJECT_SUPPORTS_48B_ADDRESS
;
1889 /* Allocate VMA in userspace if we have softpin and full PPGTT. */
1890 if (gem_param(fd
, I915_PARAM_HAS_EXEC_SOFTPIN
) > 0 &&
1891 gem_param(fd
, I915_PARAM_HAS_ALIASING_PPGTT
) > 1) {
1892 bufmgr
->initial_kflags
|= EXEC_OBJECT_PINNED
;
1894 util_vma_heap_init(&bufmgr
->vma_allocator
[BRW_MEMZONE_LOW_4G
],
1895 PAGE_SIZE
, _4GB_minus_1
);
1897 /* Leave the last 4GB out of the high vma range, so that no state
1898 * base address + size can overflow 48 bits.
1900 util_vma_heap_init(&bufmgr
->vma_allocator
[BRW_MEMZONE_OTHER
],
1901 1 * _4GB
, gtt_size
- 2 * _4GB
);
1902 } else if (devinfo
->gen
>= 10) {
1903 /* Softpin landed in 4.5, but GVT used an aliasing PPGTT until
1904 * kernel commit 6b3816d69628becb7ff35978aa0751798b4a940a in
1905 * 4.14. Gen10+ GVT hasn't landed yet, so it's not actually a
1906 * problem - but extending this requirement back to earlier gens
1907 * might actually mean requiring 4.14.
1909 fprintf(stderr
, "i965 requires softpin (Kernel 4.5) on Gen10+.");
1916 init_cache_buckets(bufmgr
);
1918 bufmgr
->name_table
=
1919 _mesa_hash_table_create(NULL
, _mesa_hash_uint
, _mesa_key_uint_equal
);
1920 bufmgr
->handle_table
=
1921 _mesa_hash_table_create(NULL
, _mesa_hash_uint
, _mesa_key_uint_equal
);
1927 brw_bufmgr_get_for_fd(struct gen_device_info
*devinfo
, int fd
, bool bo_reuse
)
1934 struct brw_bufmgr
*bufmgr
= NULL
;
1936 mtx_lock(&global_bufmgr_list_mutex
);
1937 list_for_each_entry(struct brw_bufmgr
, iter_bufmgr
, &global_bufmgr_list
, link
) {
1938 struct stat iter_st
;
1939 if (fstat(iter_bufmgr
->fd
, &iter_st
))
1942 if (st
.st_rdev
== iter_st
.st_rdev
) {
1943 assert(iter_bufmgr
->bo_reuse
== bo_reuse
);
1944 bufmgr
= brw_bufmgr_ref(iter_bufmgr
);
1949 bufmgr
= brw_bufmgr_create(devinfo
, fd
, bo_reuse
);
1950 list_addtail(&bufmgr
->link
, &global_bufmgr_list
);
1953 mtx_unlock(&global_bufmgr_list_mutex
);
1959 brw_bufmgr_get_fd(struct brw_bufmgr
*bufmgr
)