1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007-2012 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
42 #include <util/u_atomic.h>
50 #include <sys/ioctl.h>
52 #include <sys/types.h>
57 #define ETIME ETIMEDOUT
59 #include "common/gen_debug.h"
60 #include "libdrm_macros.h"
61 #include "main/macros.h"
62 #include "util/macros.h"
63 #include "util/list.h"
64 #include "brw_bufmgr.h"
65 #include "intel_chipset.h"
79 #define memclear(s) memset(&s, 0, sizeof(s))
81 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
84 atomic_add_unless(int *v
, int add
, int unless
)
88 while (c
!= unless
&& (old
= p_atomic_cmpxchg(v
, c
, c
+ add
)) != c
)
94 * upper_32_bits - return bits 32-63 of a number
95 * @n: the number we're accessing
97 * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress
98 * the "right shift count >= width of type" warning when that quantity is
101 #define upper_32_bits(n) ((__u32)(((n) >> 16) >> 16))
104 * lower_32_bits - return bits 0-31 of a number
105 * @n: the number we're accessing
107 #define lower_32_bits(n) ((__u32)(n))
109 struct _drm_bacon_context
{
111 struct _drm_bacon_bufmgr
*bufmgr
;
114 typedef struct _drm_bacon_bo_gem drm_bacon_bo_gem
;
116 struct drm_bacon_gem_bo_bucket
{
117 struct list_head head
;
121 typedef struct _drm_bacon_bufmgr
{
128 pthread_mutex_t lock
;
130 struct drm_i915_gem_exec_object2
*exec2_objects
;
131 drm_bacon_bo
**exec_bos
;
135 /** Array of lists of cached gem objects of power-of-two sizes */
136 struct drm_bacon_gem_bo_bucket cache_bucket
[14 * 4];
140 struct list_head managers
;
142 drm_bacon_bo_gem
*name_table
;
143 drm_bacon_bo_gem
*handle_table
;
145 struct list_head vma_cache
;
146 int vma_count
, vma_open
, vma_max
;
151 unsigned int has_bsd
: 1;
152 unsigned int has_blt
: 1;
153 unsigned int has_llc
: 1;
154 unsigned int bo_reuse
: 1;
155 unsigned int no_exec
: 1;
156 unsigned int has_vebox
: 1;
157 unsigned int has_exec_async
: 1;
166 typedef struct _drm_bacon_reloc_target_info
{
168 } drm_bacon_reloc_target
;
170 struct _drm_bacon_bo_gem
{
178 * Kenel-assigned global name for this object
180 * List contains both flink named and prime fd'd objects
182 unsigned int global_name
;
184 UT_hash_handle handle_hh
;
185 UT_hash_handle name_hh
;
188 * Index of the buffer within the validation list while preparing a
189 * batchbuffer execution.
194 * Current tiling mode
196 uint32_t tiling_mode
;
197 uint32_t swizzle_mode
;
198 unsigned long stride
;
200 unsigned long kflags
;
204 /** Array passed to the DRM containing relocation information. */
205 struct drm_i915_gem_relocation_entry
*relocs
;
207 * Array of info structs corresponding to relocs[i].target_handle etc
209 drm_bacon_reloc_target
*reloc_target_info
;
210 /** Number of entries in relocs */
212 /** Array of BOs that are referenced by this buffer and will be softpinned */
213 drm_bacon_bo
**softpin_target
;
214 /** Number softpinned BOs that are referenced by this buffer */
215 int softpin_target_count
;
216 /** Maximum amount of softpinned BOs that are referenced by this buffer */
217 int softpin_target_size
;
219 /** Mapped address for the buffer, saved across map/unmap cycles */
221 /** GTT virtual address for the buffer, saved across map/unmap cycles */
223 /** WC CPU address for the buffer, saved across map/unmap cycles */
226 * Virtual address of the buffer allocated by user, used for userptr
231 struct list_head vma_list
;
234 struct list_head head
;
237 * Boolean of whether this BO and its children have been included in
238 * the current drm_bacon_bufmgr_check_aperture_space() total.
240 bool included_in_check_aperture
;
243 * Boolean of whether this buffer has been used as a relocation
244 * target and had its size accounted for, and thus can't have any
245 * further relocations added to it.
247 bool used_as_reloc_target
;
250 * Boolean of whether we have encountered an error whilst building the relocation tree.
255 * Boolean of whether this buffer can be re-used
260 * Boolean of whether the GPU is definitely not accessing the buffer.
262 * This is only valid when reusable, since non-reusable
263 * buffers are those that have been shared with other
264 * processes, so we don't know their state.
269 * Boolean of whether this buffer was allocated with userptr
274 * Size in bytes of this buffer and its relocation descendents.
276 * Used to avoid costly tree walking in
277 * drm_bacon_bufmgr_check_aperture in the common case.
281 /** Flags that we may need to do the SW_FINISH ioctl on unmap. */
282 bool mapped_cpu_write
;
286 drm_bacon_gem_estimate_batch_space(drm_bacon_bo
** bo_array
, int count
);
289 drm_bacon_gem_compute_batch_space(drm_bacon_bo
** bo_array
, int count
);
292 drm_bacon_gem_bo_set_tiling_internal(drm_bacon_bo
*bo
,
293 uint32_t tiling_mode
,
296 static void drm_bacon_gem_bo_unreference_locked_timed(drm_bacon_bo
*bo
,
299 static void drm_bacon_gem_bo_free(drm_bacon_bo
*bo
);
301 static inline drm_bacon_bo_gem
*to_bo_gem(drm_bacon_bo
*bo
)
303 return (drm_bacon_bo_gem
*)bo
;
307 drm_bacon_gem_bo_tile_size(drm_bacon_bufmgr
*bufmgr
, unsigned long size
,
308 uint32_t *tiling_mode
)
310 if (*tiling_mode
== I915_TILING_NONE
)
313 /* 965+ just need multiples of page size for tiling */
314 return ALIGN(size
, 4096);
318 * Round a given pitch up to the minimum required for X tiling on a
319 * given chip. We use 512 as the minimum to allow for a later tiling
323 drm_bacon_gem_bo_tile_pitch(drm_bacon_bufmgr
*bufmgr
,
324 unsigned long pitch
, uint32_t *tiling_mode
)
326 unsigned long tile_width
;
328 /* If untiled, then just align it so that we can do rendering
329 * to it with the 3D engine.
331 if (*tiling_mode
== I915_TILING_NONE
)
332 return ALIGN(pitch
, 64);
334 if (*tiling_mode
== I915_TILING_X
)
339 /* 965 is flexible */
340 return ALIGN(pitch
, tile_width
);
343 static struct drm_bacon_gem_bo_bucket
*
344 drm_bacon_gem_bo_bucket_for_size(drm_bacon_bufmgr
*bufmgr
,
349 for (i
= 0; i
< bufmgr
->num_buckets
; i
++) {
350 struct drm_bacon_gem_bo_bucket
*bucket
=
351 &bufmgr
->cache_bucket
[i
];
352 if (bucket
->size
>= size
) {
361 drm_bacon_gem_dump_validation_list(drm_bacon_bufmgr
*bufmgr
)
365 for (i
= 0; i
< bufmgr
->exec_count
; i
++) {
366 drm_bacon_bo
*bo
= bufmgr
->exec_bos
[i
];
367 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
369 if (bo_gem
->relocs
== NULL
&& bo_gem
->softpin_target
== NULL
) {
370 DBG("%2d: %d %s(%s)\n", i
, bo_gem
->gem_handle
,
371 bo_gem
->kflags
& EXEC_OBJECT_PINNED
? "*" : "",
376 for (j
= 0; j
< bo_gem
->reloc_count
; j
++) {
377 drm_bacon_bo
*target_bo
= bo_gem
->reloc_target_info
[j
].bo
;
378 drm_bacon_bo_gem
*target_gem
=
379 (drm_bacon_bo_gem
*) target_bo
;
381 DBG("%2d: %d %s(%s)@0x%08x %08x -> "
382 "%d (%s)@0x%08x %08x + 0x%08x\n",
385 bo_gem
->kflags
& EXEC_OBJECT_PINNED
? "*" : "",
387 upper_32_bits(bo_gem
->relocs
[j
].offset
),
388 lower_32_bits(bo_gem
->relocs
[j
].offset
),
389 target_gem
->gem_handle
,
391 upper_32_bits(target_bo
->offset64
),
392 lower_32_bits(target_bo
->offset64
),
393 bo_gem
->relocs
[j
].delta
);
396 for (j
= 0; j
< bo_gem
->softpin_target_count
; j
++) {
397 drm_bacon_bo
*target_bo
= bo_gem
->softpin_target
[j
];
398 drm_bacon_bo_gem
*target_gem
=
399 (drm_bacon_bo_gem
*) target_bo
;
400 DBG("%2d: %d %s(%s) -> "
401 "%d *(%s)@0x%08x %08x\n",
404 bo_gem
->kflags
& EXEC_OBJECT_PINNED
? "*" : "",
406 target_gem
->gem_handle
,
408 upper_32_bits(target_bo
->offset64
),
409 lower_32_bits(target_bo
->offset64
));
415 drm_bacon_bo_reference(drm_bacon_bo
*bo
)
417 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
419 p_atomic_inc(&bo_gem
->refcount
);
423 drm_bacon_add_validate_buffer2(drm_bacon_bo
*bo
)
425 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
426 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*)bo
;
429 if (bo_gem
->validate_index
!= -1)
432 /* Extend the array of validation entries as necessary. */
433 if (bufmgr
->exec_count
== bufmgr
->exec_size
) {
434 int new_size
= bufmgr
->exec_size
* 2;
439 bufmgr
->exec2_objects
=
440 realloc(bufmgr
->exec2_objects
,
441 sizeof(*bufmgr
->exec2_objects
) * new_size
);
443 realloc(bufmgr
->exec_bos
,
444 sizeof(*bufmgr
->exec_bos
) * new_size
);
445 bufmgr
->exec_size
= new_size
;
448 index
= bufmgr
->exec_count
;
449 bo_gem
->validate_index
= index
;
450 /* Fill in array entry */
451 bufmgr
->exec2_objects
[index
].handle
= bo_gem
->gem_handle
;
452 bufmgr
->exec2_objects
[index
].relocation_count
= bo_gem
->reloc_count
;
453 bufmgr
->exec2_objects
[index
].relocs_ptr
= (uintptr_t)bo_gem
->relocs
;
454 bufmgr
->exec2_objects
[index
].alignment
= bo
->align
;
455 bufmgr
->exec2_objects
[index
].offset
= bo
->offset64
;
456 bufmgr
->exec2_objects
[index
].flags
= bo_gem
->kflags
;
457 bufmgr
->exec2_objects
[index
].rsvd1
= 0;
458 bufmgr
->exec2_objects
[index
].rsvd2
= 0;
459 bufmgr
->exec_bos
[index
] = bo
;
460 bufmgr
->exec_count
++;
464 drm_bacon_bo_gem_set_in_aperture_size(drm_bacon_bufmgr
*bufmgr
,
465 drm_bacon_bo_gem
*bo_gem
,
466 unsigned int alignment
)
470 assert(!bo_gem
->used_as_reloc_target
);
472 /* The older chipsets are far-less flexible in terms of tiling,
473 * and require tiled buffer to be size aligned in the aperture.
474 * This means that in the worst possible case we will need a hole
475 * twice as large as the object in order for it to fit into the
476 * aperture. Optimal packing is for wimps.
478 size
= bo_gem
->bo
.size
;
480 bo_gem
->reloc_tree_size
= size
+ alignment
;
484 drm_bacon_setup_reloc_list(drm_bacon_bo
*bo
)
486 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
487 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
488 unsigned int max_relocs
= bufmgr
->max_relocs
;
490 if (bo
->size
/ 4 < max_relocs
)
491 max_relocs
= bo
->size
/ 4;
493 bo_gem
->relocs
= malloc(max_relocs
*
494 sizeof(struct drm_i915_gem_relocation_entry
));
495 bo_gem
->reloc_target_info
= malloc(max_relocs
*
496 sizeof(drm_bacon_reloc_target
));
497 if (bo_gem
->relocs
== NULL
|| bo_gem
->reloc_target_info
== NULL
) {
498 bo_gem
->has_error
= true;
500 free (bo_gem
->relocs
);
501 bo_gem
->relocs
= NULL
;
503 free (bo_gem
->reloc_target_info
);
504 bo_gem
->reloc_target_info
= NULL
;
513 drm_bacon_bo_busy(drm_bacon_bo
*bo
)
515 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
516 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
517 struct drm_i915_gem_busy busy
;
520 if (bo_gem
->reusable
&& bo_gem
->idle
)
524 busy
.handle
= bo_gem
->gem_handle
;
526 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_BUSY
, &busy
);
528 bo_gem
->idle
= !busy
.busy
;
533 return (ret
== 0 && busy
.busy
);
537 drm_bacon_gem_bo_madvise_internal(drm_bacon_bufmgr
*bufmgr
,
538 drm_bacon_bo_gem
*bo_gem
, int state
)
540 struct drm_i915_gem_madvise madv
;
543 madv
.handle
= bo_gem
->gem_handle
;
546 drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_MADVISE
, &madv
);
548 return madv
.retained
;
552 drm_bacon_bo_madvise(drm_bacon_bo
*bo
, int madv
)
554 return drm_bacon_gem_bo_madvise_internal(bo
->bufmgr
,
555 (drm_bacon_bo_gem
*) bo
,
559 /* drop the oldest entries that have been purged by the kernel */
561 drm_bacon_gem_bo_cache_purge_bucket(drm_bacon_bufmgr
*bufmgr
,
562 struct drm_bacon_gem_bo_bucket
*bucket
)
564 while (!list_empty(&bucket
->head
)) {
565 drm_bacon_bo_gem
*bo_gem
;
567 bo_gem
= LIST_ENTRY(drm_bacon_bo_gem
,
568 bucket
->head
.next
, head
);
569 if (drm_bacon_gem_bo_madvise_internal
570 (bufmgr
, bo_gem
, I915_MADV_DONTNEED
))
573 list_del(&bo_gem
->head
);
574 drm_bacon_gem_bo_free(&bo_gem
->bo
);
578 static drm_bacon_bo
*
579 drm_bacon_gem_bo_alloc_internal(drm_bacon_bufmgr
*bufmgr
,
583 uint32_t tiling_mode
,
584 unsigned long stride
,
585 unsigned int alignment
)
587 drm_bacon_bo_gem
*bo_gem
;
588 unsigned int page_size
= getpagesize();
590 struct drm_bacon_gem_bo_bucket
*bucket
;
591 bool alloc_from_cache
;
592 unsigned long bo_size
;
593 bool for_render
= false;
595 if (flags
& BO_ALLOC_FOR_RENDER
)
598 /* Round the allocated size up to a power of two number of pages. */
599 bucket
= drm_bacon_gem_bo_bucket_for_size(bufmgr
, size
);
601 /* If we don't have caching at this size, don't actually round the
604 if (bucket
== NULL
) {
606 if (bo_size
< page_size
)
609 bo_size
= bucket
->size
;
612 pthread_mutex_lock(&bufmgr
->lock
);
613 /* Get a buffer out of the cache if available */
615 alloc_from_cache
= false;
616 if (bucket
!= NULL
&& !list_empty(&bucket
->head
)) {
618 /* Allocate new render-target BOs from the tail (MRU)
619 * of the list, as it will likely be hot in the GPU
620 * cache and in the aperture for us.
622 bo_gem
= LIST_ENTRY(drm_bacon_bo_gem
,
623 bucket
->head
.prev
, head
);
624 list_del(&bo_gem
->head
);
625 alloc_from_cache
= true;
626 bo_gem
->bo
.align
= alignment
;
628 assert(alignment
== 0);
629 /* For non-render-target BOs (where we're probably
630 * going to map it first thing in order to fill it
631 * with data), check if the last BO in the cache is
632 * unbusy, and only reuse in that case. Otherwise,
633 * allocating a new buffer is probably faster than
634 * waiting for the GPU to finish.
636 bo_gem
= LIST_ENTRY(drm_bacon_bo_gem
,
637 bucket
->head
.next
, head
);
638 if (!drm_bacon_bo_busy(&bo_gem
->bo
)) {
639 alloc_from_cache
= true;
640 list_del(&bo_gem
->head
);
644 if (alloc_from_cache
) {
645 if (!drm_bacon_gem_bo_madvise_internal
646 (bufmgr
, bo_gem
, I915_MADV_WILLNEED
)) {
647 drm_bacon_gem_bo_free(&bo_gem
->bo
);
648 drm_bacon_gem_bo_cache_purge_bucket(bufmgr
,
653 if (drm_bacon_gem_bo_set_tiling_internal(&bo_gem
->bo
,
656 drm_bacon_gem_bo_free(&bo_gem
->bo
);
662 if (!alloc_from_cache
) {
663 struct drm_i915_gem_create create
;
665 bo_gem
= calloc(1, sizeof(*bo_gem
));
669 /* drm_bacon_gem_bo_free calls list_del() for an uninitialized
670 list (vma_list), so better set the list head here */
671 list_inithead(&bo_gem
->vma_list
);
673 bo_gem
->bo
.size
= bo_size
;
676 create
.size
= bo_size
;
678 ret
= drmIoctl(bufmgr
->fd
,
679 DRM_IOCTL_I915_GEM_CREATE
,
686 bo_gem
->gem_handle
= create
.handle
;
687 HASH_ADD(handle_hh
, bufmgr
->handle_table
,
688 gem_handle
, sizeof(bo_gem
->gem_handle
),
691 bo_gem
->bo
.handle
= bo_gem
->gem_handle
;
692 bo_gem
->bo
.bufmgr
= bufmgr
;
693 bo_gem
->bo
.align
= alignment
;
695 bo_gem
->tiling_mode
= I915_TILING_NONE
;
696 bo_gem
->swizzle_mode
= I915_BIT_6_SWIZZLE_NONE
;
699 if (drm_bacon_gem_bo_set_tiling_internal(&bo_gem
->bo
,
706 p_atomic_set(&bo_gem
->refcount
, 1);
707 bo_gem
->validate_index
= -1;
708 bo_gem
->used_as_reloc_target
= false;
709 bo_gem
->has_error
= false;
710 bo_gem
->reusable
= true;
712 drm_bacon_bo_gem_set_in_aperture_size(bufmgr
, bo_gem
, alignment
);
713 pthread_mutex_unlock(&bufmgr
->lock
);
715 DBG("bo_create: buf %d (%s) %ldb\n",
716 bo_gem
->gem_handle
, bo_gem
->name
, size
);
721 drm_bacon_gem_bo_free(&bo_gem
->bo
);
723 pthread_mutex_unlock(&bufmgr
->lock
);
728 drm_bacon_bo_alloc_for_render(drm_bacon_bufmgr
*bufmgr
,
731 unsigned int alignment
)
733 return drm_bacon_gem_bo_alloc_internal(bufmgr
, name
, size
,
740 drm_bacon_bo_alloc(drm_bacon_bufmgr
*bufmgr
,
743 unsigned int alignment
)
745 return drm_bacon_gem_bo_alloc_internal(bufmgr
, name
, size
, 0,
746 I915_TILING_NONE
, 0, 0);
750 drm_bacon_bo_alloc_tiled(drm_bacon_bufmgr
*bufmgr
, const char *name
,
751 int x
, int y
, int cpp
, uint32_t *tiling_mode
,
752 unsigned long *pitch
, unsigned long flags
)
754 unsigned long size
, stride
;
758 unsigned long aligned_y
, height_alignment
;
760 tiling
= *tiling_mode
;
762 /* If we're tiled, our allocations are in 8 or 32-row blocks,
763 * so failure to align our height means that we won't allocate
766 * If we're untiled, we still have to align to 2 rows high
767 * because the data port accesses 2x2 blocks even if the
768 * bottom row isn't to be rendered, so failure to align means
769 * we could walk off the end of the GTT and fault. This is
770 * documented on 965, and may be the case on older chipsets
771 * too so we try to be careful.
774 height_alignment
= 2;
776 if (tiling
== I915_TILING_X
)
777 height_alignment
= 8;
778 else if (tiling
== I915_TILING_Y
)
779 height_alignment
= 32;
780 aligned_y
= ALIGN(y
, height_alignment
);
783 stride
= drm_bacon_gem_bo_tile_pitch(bufmgr
, stride
, tiling_mode
);
784 size
= stride
* aligned_y
;
785 size
= drm_bacon_gem_bo_tile_size(bufmgr
, size
, tiling_mode
);
786 } while (*tiling_mode
!= tiling
);
789 if (tiling
== I915_TILING_NONE
)
792 return drm_bacon_gem_bo_alloc_internal(bufmgr
, name
, size
, flags
,
797 drm_bacon_bo_alloc_userptr(drm_bacon_bufmgr
*bufmgr
,
800 uint32_t tiling_mode
,
805 drm_bacon_bo_gem
*bo_gem
;
807 struct drm_i915_gem_userptr userptr
;
809 /* Tiling with userptr surfaces is not supported
810 * on all hardware so refuse it for time being.
812 if (tiling_mode
!= I915_TILING_NONE
)
815 bo_gem
= calloc(1, sizeof(*bo_gem
));
819 p_atomic_set(&bo_gem
->refcount
, 1);
820 list_inithead(&bo_gem
->vma_list
);
822 bo_gem
->bo
.size
= size
;
825 userptr
.user_ptr
= (__u64
)((unsigned long)addr
);
826 userptr
.user_size
= size
;
827 userptr
.flags
= flags
;
829 ret
= drmIoctl(bufmgr
->fd
,
830 DRM_IOCTL_I915_GEM_USERPTR
,
833 DBG("bo_create_userptr: "
834 "ioctl failed with user ptr %p size 0x%lx, "
835 "user flags 0x%lx\n", addr
, size
, flags
);
840 pthread_mutex_lock(&bufmgr
->lock
);
842 bo_gem
->gem_handle
= userptr
.handle
;
843 bo_gem
->bo
.handle
= bo_gem
->gem_handle
;
844 bo_gem
->bo
.bufmgr
= bufmgr
;
845 bo_gem
->is_userptr
= true;
846 bo_gem
->bo
.virtual = addr
;
847 /* Save the address provided by user */
848 bo_gem
->user_virtual
= addr
;
849 bo_gem
->tiling_mode
= I915_TILING_NONE
;
850 bo_gem
->swizzle_mode
= I915_BIT_6_SWIZZLE_NONE
;
853 HASH_ADD(handle_hh
, bufmgr
->handle_table
,
854 gem_handle
, sizeof(bo_gem
->gem_handle
),
858 bo_gem
->validate_index
= -1;
859 bo_gem
->used_as_reloc_target
= false;
860 bo_gem
->has_error
= false;
861 bo_gem
->reusable
= false;
863 drm_bacon_bo_gem_set_in_aperture_size(bufmgr
, bo_gem
, 0);
864 pthread_mutex_unlock(&bufmgr
->lock
);
866 DBG("bo_create_userptr: "
867 "ptr %p buf %d (%s) size %ldb, stride 0x%x, tile mode %d\n",
868 addr
, bo_gem
->gem_handle
, bo_gem
->name
,
869 size
, stride
, tiling_mode
);
875 drm_bacon_has_userptr(drm_bacon_bufmgr
*bufmgr
)
880 struct drm_i915_gem_userptr userptr
;
882 pgsz
= sysconf(_SC_PAGESIZE
);
885 ret
= posix_memalign(&ptr
, pgsz
, pgsz
);
887 DBG("Failed to get a page (%ld) for userptr detection!\n",
893 userptr
.user_ptr
= (__u64
)(unsigned long)ptr
;
894 userptr
.user_size
= pgsz
;
897 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_USERPTR
, &userptr
);
899 if (errno
== ENODEV
&& userptr
.flags
== 0) {
900 userptr
.flags
= I915_USERPTR_UNSYNCHRONIZED
;
907 /* We don't release the userptr bo here as we want to keep the
908 * kernel mm tracking alive for our lifetime. The first time we
909 * create a userptr object the kernel has to install a mmu_notifer
910 * which is a heavyweight operation (e.g. it requires taking all
911 * mm_locks and stop_machine()).
914 bufmgr
->userptr_active
.ptr
= ptr
;
915 bufmgr
->userptr_active
.handle
= userptr
.handle
;
921 * Returns a drm_bacon_bo wrapping the given buffer object handle.
923 * This can be used when one application needs to pass a buffer object
927 drm_bacon_bo_gem_create_from_name(drm_bacon_bufmgr
*bufmgr
,
931 drm_bacon_bo_gem
*bo_gem
;
933 struct drm_gem_open open_arg
;
934 struct drm_i915_gem_get_tiling get_tiling
;
936 /* At the moment most applications only have a few named bo.
937 * For instance, in a DRI client only the render buffers passed
938 * between X and the client are named. And since X returns the
939 * alternating names for the front/back buffer a linear search
940 * provides a sufficiently fast match.
942 pthread_mutex_lock(&bufmgr
->lock
);
943 HASH_FIND(name_hh
, bufmgr
->name_table
,
944 &handle
, sizeof(handle
), bo_gem
);
946 drm_bacon_bo_reference(&bo_gem
->bo
);
951 open_arg
.name
= handle
;
952 ret
= drmIoctl(bufmgr
->fd
,
956 DBG("Couldn't reference %s handle 0x%08x: %s\n",
957 name
, handle
, strerror(errno
));
961 /* Now see if someone has used a prime handle to get this
962 * object from the kernel before by looking through the list
963 * again for a matching gem_handle
965 HASH_FIND(handle_hh
, bufmgr
->handle_table
,
966 &open_arg
.handle
, sizeof(open_arg
.handle
), bo_gem
);
968 drm_bacon_bo_reference(&bo_gem
->bo
);
972 bo_gem
= calloc(1, sizeof(*bo_gem
));
976 p_atomic_set(&bo_gem
->refcount
, 1);
977 list_inithead(&bo_gem
->vma_list
);
979 bo_gem
->bo
.size
= open_arg
.size
;
980 bo_gem
->bo
.offset64
= 0;
981 bo_gem
->bo
.virtual = NULL
;
982 bo_gem
->bo
.bufmgr
= bufmgr
;
984 bo_gem
->validate_index
= -1;
985 bo_gem
->gem_handle
= open_arg
.handle
;
986 bo_gem
->bo
.handle
= open_arg
.handle
;
987 bo_gem
->global_name
= handle
;
988 bo_gem
->reusable
= false;
990 HASH_ADD(handle_hh
, bufmgr
->handle_table
,
991 gem_handle
, sizeof(bo_gem
->gem_handle
), bo_gem
);
992 HASH_ADD(name_hh
, bufmgr
->name_table
,
993 global_name
, sizeof(bo_gem
->global_name
), bo_gem
);
995 memclear(get_tiling
);
996 get_tiling
.handle
= bo_gem
->gem_handle
;
997 ret
= drmIoctl(bufmgr
->fd
,
998 DRM_IOCTL_I915_GEM_GET_TILING
,
1003 bo_gem
->tiling_mode
= get_tiling
.tiling_mode
;
1004 bo_gem
->swizzle_mode
= get_tiling
.swizzle_mode
;
1005 /* XXX stride is unknown */
1006 drm_bacon_bo_gem_set_in_aperture_size(bufmgr
, bo_gem
, 0);
1007 DBG("bo_create_from_handle: %d (%s)\n", handle
, bo_gem
->name
);
1010 pthread_mutex_unlock(&bufmgr
->lock
);
1014 drm_bacon_gem_bo_free(&bo_gem
->bo
);
1015 pthread_mutex_unlock(&bufmgr
->lock
);
1020 drm_bacon_gem_bo_free(drm_bacon_bo
*bo
)
1022 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
1023 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1024 struct drm_gem_close close
;
1027 list_del(&bo_gem
->vma_list
);
1028 if (bo_gem
->mem_virtual
) {
1029 VG(VALGRIND_FREELIKE_BLOCK(bo_gem
->mem_virtual
, 0));
1030 drm_munmap(bo_gem
->mem_virtual
, bo_gem
->bo
.size
);
1031 bufmgr
->vma_count
--;
1033 if (bo_gem
->wc_virtual
) {
1034 VG(VALGRIND_FREELIKE_BLOCK(bo_gem
->wc_virtual
, 0));
1035 drm_munmap(bo_gem
->wc_virtual
, bo_gem
->bo
.size
);
1036 bufmgr
->vma_count
--;
1038 if (bo_gem
->gtt_virtual
) {
1039 drm_munmap(bo_gem
->gtt_virtual
, bo_gem
->bo
.size
);
1040 bufmgr
->vma_count
--;
1043 if (bo_gem
->global_name
)
1044 HASH_DELETE(name_hh
, bufmgr
->name_table
, bo_gem
);
1045 HASH_DELETE(handle_hh
, bufmgr
->handle_table
, bo_gem
);
1047 /* Close this object */
1049 close
.handle
= bo_gem
->gem_handle
;
1050 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_GEM_CLOSE
, &close
);
1052 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
1053 bo_gem
->gem_handle
, bo_gem
->name
, strerror(errno
));
1059 drm_bacon_gem_bo_mark_mmaps_incoherent(drm_bacon_bo
*bo
)
1062 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1064 if (bo_gem
->mem_virtual
)
1065 VALGRIND_MAKE_MEM_NOACCESS(bo_gem
->mem_virtual
, bo
->size
);
1067 if (bo_gem
->wc_virtual
)
1068 VALGRIND_MAKE_MEM_NOACCESS(bo_gem
->wc_virtual
, bo
->size
);
1070 if (bo_gem
->gtt_virtual
)
1071 VALGRIND_MAKE_MEM_NOACCESS(bo_gem
->gtt_virtual
, bo
->size
);
1075 /** Frees all cached buffers significantly older than @time. */
1077 drm_bacon_gem_cleanup_bo_cache(drm_bacon_bufmgr
*bufmgr
, time_t time
)
1081 if (bufmgr
->time
== time
)
1084 for (i
= 0; i
< bufmgr
->num_buckets
; i
++) {
1085 struct drm_bacon_gem_bo_bucket
*bucket
=
1086 &bufmgr
->cache_bucket
[i
];
1088 while (!list_empty(&bucket
->head
)) {
1089 drm_bacon_bo_gem
*bo_gem
;
1091 bo_gem
= LIST_ENTRY(drm_bacon_bo_gem
,
1092 bucket
->head
.next
, head
);
1093 if (time
- bo_gem
->free_time
<= 1)
1096 list_del(&bo_gem
->head
);
1098 drm_bacon_gem_bo_free(&bo_gem
->bo
);
1102 bufmgr
->time
= time
;
1105 static void drm_bacon_gem_bo_purge_vma_cache(drm_bacon_bufmgr
*bufmgr
)
1109 DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__
,
1110 bufmgr
->vma_count
, bufmgr
->vma_open
, bufmgr
->vma_max
);
1112 if (bufmgr
->vma_max
< 0)
1115 /* We may need to evict a few entries in order to create new mmaps */
1116 limit
= bufmgr
->vma_max
- 2*bufmgr
->vma_open
;
1120 while (bufmgr
->vma_count
> limit
) {
1121 drm_bacon_bo_gem
*bo_gem
;
1123 bo_gem
= LIST_ENTRY(drm_bacon_bo_gem
,
1124 bufmgr
->vma_cache
.next
,
1126 assert(bo_gem
->map_count
== 0);
1127 list_delinit(&bo_gem
->vma_list
);
1129 if (bo_gem
->mem_virtual
) {
1130 drm_munmap(bo_gem
->mem_virtual
, bo_gem
->bo
.size
);
1131 bo_gem
->mem_virtual
= NULL
;
1132 bufmgr
->vma_count
--;
1134 if (bo_gem
->wc_virtual
) {
1135 drm_munmap(bo_gem
->wc_virtual
, bo_gem
->bo
.size
);
1136 bo_gem
->wc_virtual
= NULL
;
1137 bufmgr
->vma_count
--;
1139 if (bo_gem
->gtt_virtual
) {
1140 drm_munmap(bo_gem
->gtt_virtual
, bo_gem
->bo
.size
);
1141 bo_gem
->gtt_virtual
= NULL
;
1142 bufmgr
->vma_count
--;
1147 static void drm_bacon_gem_bo_close_vma(drm_bacon_bufmgr
*bufmgr
,
1148 drm_bacon_bo_gem
*bo_gem
)
1151 list_addtail(&bo_gem
->vma_list
, &bufmgr
->vma_cache
);
1152 if (bo_gem
->mem_virtual
)
1153 bufmgr
->vma_count
++;
1154 if (bo_gem
->wc_virtual
)
1155 bufmgr
->vma_count
++;
1156 if (bo_gem
->gtt_virtual
)
1157 bufmgr
->vma_count
++;
1158 drm_bacon_gem_bo_purge_vma_cache(bufmgr
);
1161 static void drm_bacon_gem_bo_open_vma(drm_bacon_bufmgr
*bufmgr
,
1162 drm_bacon_bo_gem
*bo_gem
)
1165 list_del(&bo_gem
->vma_list
);
1166 if (bo_gem
->mem_virtual
)
1167 bufmgr
->vma_count
--;
1168 if (bo_gem
->wc_virtual
)
1169 bufmgr
->vma_count
--;
1170 if (bo_gem
->gtt_virtual
)
1171 bufmgr
->vma_count
--;
1172 drm_bacon_gem_bo_purge_vma_cache(bufmgr
);
1176 drm_bacon_gem_bo_unreference_final(drm_bacon_bo
*bo
, time_t time
)
1178 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
1179 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1180 struct drm_bacon_gem_bo_bucket
*bucket
;
1183 /* Unreference all the target buffers */
1184 for (i
= 0; i
< bo_gem
->reloc_count
; i
++) {
1185 if (bo_gem
->reloc_target_info
[i
].bo
!= bo
) {
1186 drm_bacon_gem_bo_unreference_locked_timed(bo_gem
->
1187 reloc_target_info
[i
].bo
,
1191 for (i
= 0; i
< bo_gem
->softpin_target_count
; i
++)
1192 drm_bacon_gem_bo_unreference_locked_timed(bo_gem
->softpin_target
[i
],
1195 bo_gem
->reloc_count
= 0;
1196 bo_gem
->used_as_reloc_target
= false;
1197 bo_gem
->softpin_target_count
= 0;
1199 DBG("bo_unreference final: %d (%s)\n",
1200 bo_gem
->gem_handle
, bo_gem
->name
);
1202 /* release memory associated with this object */
1203 if (bo_gem
->reloc_target_info
) {
1204 free(bo_gem
->reloc_target_info
);
1205 bo_gem
->reloc_target_info
= NULL
;
1207 if (bo_gem
->relocs
) {
1208 free(bo_gem
->relocs
);
1209 bo_gem
->relocs
= NULL
;
1211 if (bo_gem
->softpin_target
) {
1212 free(bo_gem
->softpin_target
);
1213 bo_gem
->softpin_target
= NULL
;
1214 bo_gem
->softpin_target_size
= 0;
1217 /* Clear any left-over mappings */
1218 if (bo_gem
->map_count
) {
1219 DBG("bo freed with non-zero map-count %d\n", bo_gem
->map_count
);
1220 bo_gem
->map_count
= 0;
1221 drm_bacon_gem_bo_close_vma(bufmgr
, bo_gem
);
1222 drm_bacon_gem_bo_mark_mmaps_incoherent(bo
);
1225 bucket
= drm_bacon_gem_bo_bucket_for_size(bufmgr
, bo
->size
);
1226 /* Put the buffer into our internal cache for reuse if we can. */
1227 if (bufmgr
->bo_reuse
&& bo_gem
->reusable
&& bucket
!= NULL
&&
1228 drm_bacon_gem_bo_madvise_internal(bufmgr
, bo_gem
,
1229 I915_MADV_DONTNEED
)) {
1230 bo_gem
->free_time
= time
;
1232 bo_gem
->name
= NULL
;
1233 bo_gem
->validate_index
= -1;
1235 list_addtail(&bo_gem
->head
, &bucket
->head
);
1237 drm_bacon_gem_bo_free(bo
);
1241 static void drm_bacon_gem_bo_unreference_locked_timed(drm_bacon_bo
*bo
,
1244 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1246 assert(p_atomic_read(&bo_gem
->refcount
) > 0);
1247 if (p_atomic_dec_zero(&bo_gem
->refcount
))
1248 drm_bacon_gem_bo_unreference_final(bo
, time
);
1252 drm_bacon_bo_unreference(drm_bacon_bo
*bo
)
1254 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1259 assert(p_atomic_read(&bo_gem
->refcount
) > 0);
1261 if (atomic_add_unless(&bo_gem
->refcount
, -1, 1)) {
1262 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
1263 struct timespec time
;
1265 clock_gettime(CLOCK_MONOTONIC
, &time
);
1267 pthread_mutex_lock(&bufmgr
->lock
);
1269 if (p_atomic_dec_zero(&bo_gem
->refcount
)) {
1270 drm_bacon_gem_bo_unreference_final(bo
, time
.tv_sec
);
1271 drm_bacon_gem_cleanup_bo_cache(bufmgr
, time
.tv_sec
);
1274 pthread_mutex_unlock(&bufmgr
->lock
);
1279 drm_bacon_bo_map(drm_bacon_bo
*bo
, int write_enable
)
1281 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
1282 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1283 struct drm_i915_gem_set_domain set_domain
;
1286 if (bo_gem
->is_userptr
) {
1287 /* Return the same user ptr */
1288 bo
->virtual = bo_gem
->user_virtual
;
1292 pthread_mutex_lock(&bufmgr
->lock
);
1294 if (bo_gem
->map_count
++ == 0)
1295 drm_bacon_gem_bo_open_vma(bufmgr
, bo_gem
);
1297 if (!bo_gem
->mem_virtual
) {
1298 struct drm_i915_gem_mmap mmap_arg
;
1300 DBG("bo_map: %d (%s), map_count=%d\n",
1301 bo_gem
->gem_handle
, bo_gem
->name
, bo_gem
->map_count
);
1304 mmap_arg
.handle
= bo_gem
->gem_handle
;
1305 mmap_arg
.size
= bo
->size
;
1306 ret
= drmIoctl(bufmgr
->fd
,
1307 DRM_IOCTL_I915_GEM_MMAP
,
1311 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1312 __FILE__
, __LINE__
, bo_gem
->gem_handle
,
1313 bo_gem
->name
, strerror(errno
));
1314 if (--bo_gem
->map_count
== 0)
1315 drm_bacon_gem_bo_close_vma(bufmgr
, bo_gem
);
1316 pthread_mutex_unlock(&bufmgr
->lock
);
1319 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg
.addr_ptr
, mmap_arg
.size
, 0, 1));
1320 bo_gem
->mem_virtual
= (void *)(uintptr_t) mmap_arg
.addr_ptr
;
1322 DBG("bo_map: %d (%s) -> %p\n", bo_gem
->gem_handle
, bo_gem
->name
,
1323 bo_gem
->mem_virtual
);
1324 bo
->virtual = bo_gem
->mem_virtual
;
1326 memclear(set_domain
);
1327 set_domain
.handle
= bo_gem
->gem_handle
;
1328 set_domain
.read_domains
= I915_GEM_DOMAIN_CPU
;
1330 set_domain
.write_domain
= I915_GEM_DOMAIN_CPU
;
1332 set_domain
.write_domain
= 0;
1333 ret
= drmIoctl(bufmgr
->fd
,
1334 DRM_IOCTL_I915_GEM_SET_DOMAIN
,
1337 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1338 __FILE__
, __LINE__
, bo_gem
->gem_handle
,
1343 bo_gem
->mapped_cpu_write
= true;
1345 drm_bacon_gem_bo_mark_mmaps_incoherent(bo
);
1346 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem
->mem_virtual
, bo
->size
));
1347 pthread_mutex_unlock(&bufmgr
->lock
);
1353 map_gtt(drm_bacon_bo
*bo
)
1355 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
1356 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1359 if (bo_gem
->is_userptr
)
1362 if (bo_gem
->map_count
++ == 0)
1363 drm_bacon_gem_bo_open_vma(bufmgr
, bo_gem
);
1365 /* Get a mapping of the buffer if we haven't before. */
1366 if (bo_gem
->gtt_virtual
== NULL
) {
1367 struct drm_i915_gem_mmap_gtt mmap_arg
;
1369 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1370 bo_gem
->gem_handle
, bo_gem
->name
, bo_gem
->map_count
);
1373 mmap_arg
.handle
= bo_gem
->gem_handle
;
1375 /* Get the fake offset back... */
1376 ret
= drmIoctl(bufmgr
->fd
,
1377 DRM_IOCTL_I915_GEM_MMAP_GTT
,
1381 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1383 bo_gem
->gem_handle
, bo_gem
->name
,
1385 if (--bo_gem
->map_count
== 0)
1386 drm_bacon_gem_bo_close_vma(bufmgr
, bo_gem
);
1391 bo_gem
->gtt_virtual
= drm_mmap(0, bo
->size
, PROT_READ
| PROT_WRITE
,
1392 MAP_SHARED
, bufmgr
->fd
,
1394 if (bo_gem
->gtt_virtual
== MAP_FAILED
) {
1395 bo_gem
->gtt_virtual
= NULL
;
1397 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1399 bo_gem
->gem_handle
, bo_gem
->name
,
1401 if (--bo_gem
->map_count
== 0)
1402 drm_bacon_gem_bo_close_vma(bufmgr
, bo_gem
);
1407 bo
->virtual = bo_gem
->gtt_virtual
;
1409 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem
->gem_handle
, bo_gem
->name
,
1410 bo_gem
->gtt_virtual
);
1416 drm_bacon_gem_bo_map_gtt(drm_bacon_bo
*bo
)
1418 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
1419 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1420 struct drm_i915_gem_set_domain set_domain
;
1423 pthread_mutex_lock(&bufmgr
->lock
);
1427 pthread_mutex_unlock(&bufmgr
->lock
);
1431 /* Now move it to the GTT domain so that the GPU and CPU
1432 * caches are flushed and the GPU isn't actively using the
1435 * The pagefault handler does this domain change for us when
1436 * it has unbound the BO from the GTT, but it's up to us to
1437 * tell it when we're about to use things if we had done
1438 * rendering and it still happens to be bound to the GTT.
1440 memclear(set_domain
);
1441 set_domain
.handle
= bo_gem
->gem_handle
;
1442 set_domain
.read_domains
= I915_GEM_DOMAIN_GTT
;
1443 set_domain
.write_domain
= I915_GEM_DOMAIN_GTT
;
1444 ret
= drmIoctl(bufmgr
->fd
,
1445 DRM_IOCTL_I915_GEM_SET_DOMAIN
,
1448 DBG("%s:%d: Error setting domain %d: %s\n",
1449 __FILE__
, __LINE__
, bo_gem
->gem_handle
,
1453 drm_bacon_gem_bo_mark_mmaps_incoherent(bo
);
1454 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem
->gtt_virtual
, bo
->size
));
1455 pthread_mutex_unlock(&bufmgr
->lock
);
1461 * Performs a mapping of the buffer object like the normal GTT
1462 * mapping, but avoids waiting for the GPU to be done reading from or
1463 * rendering to the buffer.
1465 * This is used in the implementation of GL_ARB_map_buffer_range: The
1466 * user asks to create a buffer, then does a mapping, fills some
1467 * space, runs a drawing command, then asks to map it again without
1468 * synchronizing because it guarantees that it won't write over the
1469 * data that the GPU is busy using (or, more specifically, that if it
1470 * does write over the data, it acknowledges that rendering is
1475 drm_bacon_gem_bo_map_unsynchronized(drm_bacon_bo
*bo
)
1477 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
1478 #ifdef HAVE_VALGRIND
1479 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1483 /* If the CPU cache isn't coherent with the GTT, then use a
1484 * regular synchronized mapping. The problem is that we don't
1485 * track where the buffer was last used on the CPU side in
1486 * terms of drm_bacon_bo_map vs drm_bacon_gem_bo_map_gtt, so
1487 * we would potentially corrupt the buffer even when the user
1488 * does reasonable things.
1490 if (!bufmgr
->has_llc
)
1491 return drm_bacon_gem_bo_map_gtt(bo
);
1493 pthread_mutex_lock(&bufmgr
->lock
);
1497 drm_bacon_gem_bo_mark_mmaps_incoherent(bo
);
1498 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem
->gtt_virtual
, bo
->size
));
1501 pthread_mutex_unlock(&bufmgr
->lock
);
1507 drm_bacon_bo_unmap(drm_bacon_bo
*bo
)
1509 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
1510 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1516 if (bo_gem
->is_userptr
)
1519 pthread_mutex_lock(&bufmgr
->lock
);
1521 if (bo_gem
->map_count
<= 0) {
1522 DBG("attempted to unmap an unmapped bo\n");
1523 pthread_mutex_unlock(&bufmgr
->lock
);
1524 /* Preserve the old behaviour of just treating this as a
1525 * no-op rather than reporting the error.
1530 if (bo_gem
->mapped_cpu_write
) {
1531 struct drm_i915_gem_sw_finish sw_finish
;
1533 /* Cause a flush to happen if the buffer's pinned for
1534 * scanout, so the results show up in a timely manner.
1535 * Unlike GTT set domains, this only does work if the
1536 * buffer should be scanout-related.
1538 memclear(sw_finish
);
1539 sw_finish
.handle
= bo_gem
->gem_handle
;
1540 ret
= drmIoctl(bufmgr
->fd
,
1541 DRM_IOCTL_I915_GEM_SW_FINISH
,
1543 ret
= ret
== -1 ? -errno
: 0;
1545 bo_gem
->mapped_cpu_write
= false;
1548 /* We need to unmap after every innovation as we cannot track
1549 * an open vma for every bo as that will exhaust the system
1550 * limits and cause later failures.
1552 if (--bo_gem
->map_count
== 0) {
1553 drm_bacon_gem_bo_close_vma(bufmgr
, bo_gem
);
1554 drm_bacon_gem_bo_mark_mmaps_incoherent(bo
);
1557 pthread_mutex_unlock(&bufmgr
->lock
);
1563 drm_bacon_bo_subdata(drm_bacon_bo
*bo
, unsigned long offset
,
1564 unsigned long size
, const void *data
)
1566 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
1567 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1568 struct drm_i915_gem_pwrite pwrite
;
1571 if (bo_gem
->is_userptr
)
1575 pwrite
.handle
= bo_gem
->gem_handle
;
1576 pwrite
.offset
= offset
;
1578 pwrite
.data_ptr
= (uint64_t) (uintptr_t) data
;
1579 ret
= drmIoctl(bufmgr
->fd
,
1580 DRM_IOCTL_I915_GEM_PWRITE
,
1584 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1585 __FILE__
, __LINE__
, bo_gem
->gem_handle
, (int)offset
,
1586 (int)size
, strerror(errno
));
1593 drm_bacon_bo_get_subdata(drm_bacon_bo
*bo
, unsigned long offset
,
1594 unsigned long size
, void *data
)
1596 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
1597 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1598 struct drm_i915_gem_pread pread
;
1601 if (bo_gem
->is_userptr
)
1605 pread
.handle
= bo_gem
->gem_handle
;
1606 pread
.offset
= offset
;
1608 pread
.data_ptr
= (uint64_t) (uintptr_t) data
;
1609 ret
= drmIoctl(bufmgr
->fd
,
1610 DRM_IOCTL_I915_GEM_PREAD
,
1614 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1615 __FILE__
, __LINE__
, bo_gem
->gem_handle
, (int)offset
,
1616 (int)size
, strerror(errno
));
1622 /** Waits for all GPU rendering with the object to have completed. */
1624 drm_bacon_bo_wait_rendering(drm_bacon_bo
*bo
)
1626 drm_bacon_gem_bo_start_gtt_access(bo
, 1);
1630 * Waits on a BO for the given amount of time.
1632 * @bo: buffer object to wait for
1633 * @timeout_ns: amount of time to wait in nanoseconds.
1634 * If value is less than 0, an infinite wait will occur.
1636 * Returns 0 if the wait was successful ie. the last batch referencing the
1637 * object has completed within the allotted time. Otherwise some negative return
1638 * value describes the error. Of particular interest is -ETIME when the wait has
1639 * failed to yield the desired result.
1641 * Similar to drm_bacon_gem_bo_wait_rendering except a timeout parameter allows
1642 * the operation to give up after a certain amount of time. Another subtle
1643 * difference is the internal locking semantics are different (this variant does
1644 * not hold the lock for the duration of the wait). This makes the wait subject
1645 * to a larger userspace race window.
1647 * The implementation shall wait until the object is no longer actively
1648 * referenced within a batch buffer at the time of the call. The wait will
1649 * not guarantee that the buffer is re-issued via another thread, or an flinked
1650 * handle. Userspace must make sure this race does not occur if such precision
1653 * Note that some kernels have broken the inifite wait for negative values
1654 * promise, upgrade to latest stable kernels if this is the case.
1657 drm_bacon_gem_bo_wait(drm_bacon_bo
*bo
, int64_t timeout_ns
)
1659 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
1660 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1661 struct drm_i915_gem_wait wait
;
1665 wait
.bo_handle
= bo_gem
->gem_handle
;
1666 wait
.timeout_ns
= timeout_ns
;
1667 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_WAIT
, &wait
);
1675 * Sets the object to the GTT read and possibly write domain, used by the X
1676 * 2D driver in the absence of kernel support to do drm_bacon_gem_bo_map_gtt().
1678 * In combination with drm_bacon_gem_bo_pin() and manual fence management, we
1679 * can do tiled pixmaps this way.
1682 drm_bacon_gem_bo_start_gtt_access(drm_bacon_bo
*bo
, int write_enable
)
1684 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
1685 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1686 struct drm_i915_gem_set_domain set_domain
;
1689 memclear(set_domain
);
1690 set_domain
.handle
= bo_gem
->gem_handle
;
1691 set_domain
.read_domains
= I915_GEM_DOMAIN_GTT
;
1692 set_domain
.write_domain
= write_enable
? I915_GEM_DOMAIN_GTT
: 0;
1693 ret
= drmIoctl(bufmgr
->fd
,
1694 DRM_IOCTL_I915_GEM_SET_DOMAIN
,
1697 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1698 __FILE__
, __LINE__
, bo_gem
->gem_handle
,
1699 set_domain
.read_domains
, set_domain
.write_domain
,
1705 drm_bacon_bufmgr_gem_destroy(drm_bacon_bufmgr
*bufmgr
)
1707 struct drm_gem_close close_bo
;
1710 free(bufmgr
->exec2_objects
);
1711 free(bufmgr
->exec_bos
);
1713 pthread_mutex_destroy(&bufmgr
->lock
);
1715 /* Free any cached buffer objects we were going to reuse */
1716 for (i
= 0; i
< bufmgr
->num_buckets
; i
++) {
1717 struct drm_bacon_gem_bo_bucket
*bucket
=
1718 &bufmgr
->cache_bucket
[i
];
1719 drm_bacon_bo_gem
*bo_gem
;
1721 while (!list_empty(&bucket
->head
)) {
1722 bo_gem
= LIST_ENTRY(drm_bacon_bo_gem
,
1723 bucket
->head
.next
, head
);
1724 list_del(&bo_gem
->head
);
1726 drm_bacon_gem_bo_free(&bo_gem
->bo
);
1730 /* Release userptr bo kept hanging around for optimisation. */
1731 if (bufmgr
->userptr_active
.ptr
) {
1733 close_bo
.handle
= bufmgr
->userptr_active
.handle
;
1734 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_GEM_CLOSE
, &close_bo
);
1735 free(bufmgr
->userptr_active
.ptr
);
1738 "Failed to release test userptr object! (%d) "
1739 "i915 kernel driver may not be sane!\n", errno
);
1746 * Adds the target buffer to the validation list and adds the relocation
1747 * to the reloc_buffer's relocation list.
1749 * The relocation entry at the given offset must already contain the
1750 * precomputed relocation value, because the kernel will optimize out
1751 * the relocation entry write when the buffer hasn't moved from the
1752 * last known offset in target_bo.
1755 do_bo_emit_reloc(drm_bacon_bo
*bo
, uint32_t offset
,
1756 drm_bacon_bo
*target_bo
, uint32_t target_offset
,
1757 uint32_t read_domains
, uint32_t write_domain
)
1759 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
1760 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1761 drm_bacon_bo_gem
*target_bo_gem
= (drm_bacon_bo_gem
*) target_bo
;
1763 if (bo_gem
->has_error
)
1766 if (target_bo_gem
->has_error
) {
1767 bo_gem
->has_error
= true;
1771 /* Create a new relocation list if needed */
1772 if (bo_gem
->relocs
== NULL
&& drm_bacon_setup_reloc_list(bo
))
1775 /* Check overflow */
1776 assert(bo_gem
->reloc_count
< bufmgr
->max_relocs
);
1779 assert(offset
<= bo
->size
- 4);
1780 assert((write_domain
& (write_domain
- 1)) == 0);
1782 /* Make sure that we're not adding a reloc to something whose size has
1783 * already been accounted for.
1785 assert(!bo_gem
->used_as_reloc_target
);
1786 if (target_bo_gem
!= bo_gem
) {
1787 target_bo_gem
->used_as_reloc_target
= true;
1788 bo_gem
->reloc_tree_size
+= target_bo_gem
->reloc_tree_size
;
1791 bo_gem
->reloc_target_info
[bo_gem
->reloc_count
].bo
= target_bo
;
1792 if (target_bo
!= bo
)
1793 drm_bacon_bo_reference(target_bo
);
1795 bo_gem
->relocs
[bo_gem
->reloc_count
].offset
= offset
;
1796 bo_gem
->relocs
[bo_gem
->reloc_count
].delta
= target_offset
;
1797 bo_gem
->relocs
[bo_gem
->reloc_count
].target_handle
=
1798 target_bo_gem
->gem_handle
;
1799 bo_gem
->relocs
[bo_gem
->reloc_count
].read_domains
= read_domains
;
1800 bo_gem
->relocs
[bo_gem
->reloc_count
].write_domain
= write_domain
;
1801 bo_gem
->relocs
[bo_gem
->reloc_count
].presumed_offset
= target_bo
->offset64
;
1802 bo_gem
->reloc_count
++;
1808 drm_bacon_gem_bo_add_softpin_target(drm_bacon_bo
*bo
, drm_bacon_bo
*target_bo
)
1810 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
1811 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1812 drm_bacon_bo_gem
*target_bo_gem
= (drm_bacon_bo_gem
*) target_bo
;
1814 if (bo_gem
->has_error
)
1817 if (target_bo_gem
->has_error
) {
1818 bo_gem
->has_error
= true;
1822 if (!(target_bo_gem
->kflags
& EXEC_OBJECT_PINNED
))
1824 if (target_bo_gem
== bo_gem
)
1827 if (bo_gem
->softpin_target_count
== bo_gem
->softpin_target_size
) {
1828 int new_size
= bo_gem
->softpin_target_size
* 2;
1830 new_size
= bufmgr
->max_relocs
;
1832 bo_gem
->softpin_target
= realloc(bo_gem
->softpin_target
, new_size
*
1833 sizeof(drm_bacon_bo
*));
1834 if (!bo_gem
->softpin_target
)
1837 bo_gem
->softpin_target_size
= new_size
;
1839 bo_gem
->softpin_target
[bo_gem
->softpin_target_count
] = target_bo
;
1840 drm_bacon_bo_reference(target_bo
);
1841 bo_gem
->softpin_target_count
++;
1847 drm_bacon_bo_emit_reloc(drm_bacon_bo
*bo
, uint32_t offset
,
1848 drm_bacon_bo
*target_bo
, uint32_t target_offset
,
1849 uint32_t read_domains
, uint32_t write_domain
)
1851 drm_bacon_bo_gem
*target_bo_gem
= (drm_bacon_bo_gem
*)target_bo
;
1853 if (target_bo_gem
->kflags
& EXEC_OBJECT_PINNED
)
1854 return drm_bacon_gem_bo_add_softpin_target(bo
, target_bo
);
1856 return do_bo_emit_reloc(bo
, offset
, target_bo
, target_offset
,
1857 read_domains
, write_domain
);
1861 drm_bacon_gem_bo_get_reloc_count(drm_bacon_bo
*bo
)
1863 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1865 return bo_gem
->reloc_count
;
1869 * Removes existing relocation entries in the BO after "start".
1871 * This allows a user to avoid a two-step process for state setup with
1872 * counting up all the buffer objects and doing a
1873 * drm_bacon_bufmgr_check_aperture_space() before emitting any of the
1874 * relocations for the state setup. Instead, save the state of the
1875 * batchbuffer including drm_bacon_gem_get_reloc_count(), emit all the
1876 * state, and then check if it still fits in the aperture.
1878 * Any further drm_bacon_bufmgr_check_aperture_space() queries
1879 * involving this buffer in the tree are undefined after this call.
1881 * This also removes all softpinned targets being referenced by the BO.
1884 drm_bacon_gem_bo_clear_relocs(drm_bacon_bo
*bo
, int start
)
1886 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
1887 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1889 struct timespec time
;
1891 clock_gettime(CLOCK_MONOTONIC
, &time
);
1893 assert(bo_gem
->reloc_count
>= start
);
1895 /* Unreference the cleared target buffers */
1896 pthread_mutex_lock(&bufmgr
->lock
);
1898 for (i
= start
; i
< bo_gem
->reloc_count
; i
++) {
1899 drm_bacon_bo_gem
*target_bo_gem
= (drm_bacon_bo_gem
*) bo_gem
->reloc_target_info
[i
].bo
;
1900 if (&target_bo_gem
->bo
!= bo
) {
1901 drm_bacon_gem_bo_unreference_locked_timed(&target_bo_gem
->bo
,
1905 bo_gem
->reloc_count
= start
;
1907 for (i
= 0; i
< bo_gem
->softpin_target_count
; i
++) {
1908 drm_bacon_bo_gem
*target_bo_gem
= (drm_bacon_bo_gem
*) bo_gem
->softpin_target
[i
];
1909 drm_bacon_gem_bo_unreference_locked_timed(&target_bo_gem
->bo
, time
.tv_sec
);
1911 bo_gem
->softpin_target_count
= 0;
1913 pthread_mutex_unlock(&bufmgr
->lock
);
1918 drm_bacon_gem_bo_process_reloc2(drm_bacon_bo
*bo
)
1920 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*)bo
;
1923 if (bo_gem
->relocs
== NULL
&& bo_gem
->softpin_target
== NULL
)
1926 for (i
= 0; i
< bo_gem
->reloc_count
; i
++) {
1927 drm_bacon_bo
*target_bo
= bo_gem
->reloc_target_info
[i
].bo
;
1929 if (target_bo
== bo
)
1932 drm_bacon_gem_bo_mark_mmaps_incoherent(bo
);
1934 /* Continue walking the tree depth-first. */
1935 drm_bacon_gem_bo_process_reloc2(target_bo
);
1937 /* Add the target to the validate list */
1938 drm_bacon_add_validate_buffer2(target_bo
);
1941 for (i
= 0; i
< bo_gem
->softpin_target_count
; i
++) {
1942 drm_bacon_bo
*target_bo
= bo_gem
->softpin_target
[i
];
1944 if (target_bo
== bo
)
1947 drm_bacon_gem_bo_mark_mmaps_incoherent(bo
);
1948 drm_bacon_gem_bo_process_reloc2(target_bo
);
1949 drm_bacon_add_validate_buffer2(target_bo
);
1954 drm_bacon_update_buffer_offsets2 (drm_bacon_bufmgr
*bufmgr
)
1958 for (i
= 0; i
< bufmgr
->exec_count
; i
++) {
1959 drm_bacon_bo
*bo
= bufmgr
->exec_bos
[i
];
1960 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*)bo
;
1962 /* Update the buffer offset */
1963 if (bufmgr
->exec2_objects
[i
].offset
!= bo
->offset64
) {
1964 /* If we're seeing softpinned object here it means that the kernel
1965 * has relocated our object... Indicating a programming error
1967 assert(!(bo_gem
->kflags
& EXEC_OBJECT_PINNED
));
1968 DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
1969 bo_gem
->gem_handle
, bo_gem
->name
,
1970 upper_32_bits(bo
->offset64
),
1971 lower_32_bits(bo
->offset64
),
1972 upper_32_bits(bufmgr
->exec2_objects
[i
].offset
),
1973 lower_32_bits(bufmgr
->exec2_objects
[i
].offset
));
1974 bo
->offset64
= bufmgr
->exec2_objects
[i
].offset
;
1980 do_exec2(drm_bacon_bo
*bo
, int used
, drm_bacon_context
*ctx
,
1981 int in_fence
, int *out_fence
,
1984 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
1985 struct drm_i915_gem_execbuffer2 execbuf
;
1989 if (to_bo_gem(bo
)->has_error
)
1992 switch (flags
& 0x7) {
1996 if (!bufmgr
->has_blt
)
2000 if (!bufmgr
->has_bsd
)
2003 case I915_EXEC_VEBOX
:
2004 if (!bufmgr
->has_vebox
)
2007 case I915_EXEC_RENDER
:
2008 case I915_EXEC_DEFAULT
:
2012 pthread_mutex_lock(&bufmgr
->lock
);
2013 /* Update indices and set up the validate list. */
2014 drm_bacon_gem_bo_process_reloc2(bo
);
2016 /* Add the batch buffer to the validation list. There are no relocations
2019 drm_bacon_add_validate_buffer2(bo
);
2022 execbuf
.buffers_ptr
= (uintptr_t)bufmgr
->exec2_objects
;
2023 execbuf
.buffer_count
= bufmgr
->exec_count
;
2024 execbuf
.batch_start_offset
= 0;
2025 execbuf
.batch_len
= used
;
2026 execbuf
.cliprects_ptr
= 0;
2027 execbuf
.num_cliprects
= 0;
2030 execbuf
.flags
= flags
;
2032 i915_execbuffer2_set_context_id(execbuf
, 0);
2034 i915_execbuffer2_set_context_id(execbuf
, ctx
->ctx_id
);
2036 if (in_fence
!= -1) {
2037 execbuf
.rsvd2
= in_fence
;
2038 execbuf
.flags
|= I915_EXEC_FENCE_IN
;
2040 if (out_fence
!= NULL
) {
2042 execbuf
.flags
|= I915_EXEC_FENCE_OUT
;
2045 if (bufmgr
->no_exec
)
2046 goto skip_execution
;
2048 ret
= drmIoctl(bufmgr
->fd
,
2049 DRM_IOCTL_I915_GEM_EXECBUFFER2_WR
,
2053 if (ret
== -ENOSPC
) {
2054 DBG("Execbuffer fails to pin. "
2055 "Estimate: %u. Actual: %u. Available: %u\n",
2056 drm_bacon_gem_estimate_batch_space(bufmgr
->exec_bos
,
2057 bufmgr
->exec_count
),
2058 drm_bacon_gem_compute_batch_space(bufmgr
->exec_bos
,
2059 bufmgr
->exec_count
),
2060 (unsigned int) bufmgr
->gtt_size
);
2063 drm_bacon_update_buffer_offsets2(bufmgr
);
2065 if (ret
== 0 && out_fence
!= NULL
)
2066 *out_fence
= execbuf
.rsvd2
>> 32;
2069 if (INTEL_DEBUG
& DEBUG_BUFMGR
)
2070 drm_bacon_gem_dump_validation_list(bufmgr
);
2072 for (i
= 0; i
< bufmgr
->exec_count
; i
++) {
2073 drm_bacon_bo_gem
*bo_gem
= to_bo_gem(bufmgr
->exec_bos
[i
]);
2075 bo_gem
->idle
= false;
2077 /* Disconnect the buffer from the validate list */
2078 bo_gem
->validate_index
= -1;
2079 bufmgr
->exec_bos
[i
] = NULL
;
2081 bufmgr
->exec_count
= 0;
2082 pthread_mutex_unlock(&bufmgr
->lock
);
2088 drm_bacon_bo_exec(drm_bacon_bo
*bo
, int used
)
2090 return do_exec2(bo
, used
, NULL
, -1, NULL
, I915_EXEC_RENDER
);
2094 drm_bacon_bo_mrb_exec(drm_bacon_bo
*bo
, int used
, unsigned int flags
)
2096 return do_exec2(bo
, used
, NULL
, -1, NULL
, flags
);
2100 drm_bacon_gem_bo_context_exec(drm_bacon_bo
*bo
, drm_bacon_context
*ctx
,
2101 int used
, unsigned int flags
)
2103 return do_exec2(bo
, used
, ctx
, -1, NULL
, flags
);
2107 drm_bacon_gem_bo_fence_exec(drm_bacon_bo
*bo
,
2108 drm_bacon_context
*ctx
,
2114 return do_exec2(bo
, used
, ctx
, in_fence
, out_fence
, flags
);
2118 drm_bacon_gem_bo_set_tiling_internal(drm_bacon_bo
*bo
,
2119 uint32_t tiling_mode
,
2122 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
2123 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2124 struct drm_i915_gem_set_tiling set_tiling
;
2127 if (bo_gem
->global_name
== 0 &&
2128 tiling_mode
== bo_gem
->tiling_mode
&&
2129 stride
== bo_gem
->stride
)
2132 memset(&set_tiling
, 0, sizeof(set_tiling
));
2134 /* set_tiling is slightly broken and overwrites the
2135 * input on the error path, so we have to open code
2138 set_tiling
.handle
= bo_gem
->gem_handle
;
2139 set_tiling
.tiling_mode
= tiling_mode
;
2140 set_tiling
.stride
= stride
;
2142 ret
= ioctl(bufmgr
->fd
,
2143 DRM_IOCTL_I915_GEM_SET_TILING
,
2145 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
2149 bo_gem
->tiling_mode
= set_tiling
.tiling_mode
;
2150 bo_gem
->swizzle_mode
= set_tiling
.swizzle_mode
;
2151 bo_gem
->stride
= set_tiling
.stride
;
2156 drm_bacon_bo_set_tiling(drm_bacon_bo
*bo
, uint32_t * tiling_mode
,
2159 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
2160 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2163 /* Tiling with userptr surfaces is not supported
2164 * on all hardware so refuse it for time being.
2166 if (bo_gem
->is_userptr
)
2169 /* Linear buffers have no stride. By ensuring that we only ever use
2170 * stride 0 with linear buffers, we simplify our code.
2172 if (*tiling_mode
== I915_TILING_NONE
)
2175 ret
= drm_bacon_gem_bo_set_tiling_internal(bo
, *tiling_mode
, stride
);
2177 drm_bacon_bo_gem_set_in_aperture_size(bufmgr
, bo_gem
, 0);
2179 *tiling_mode
= bo_gem
->tiling_mode
;
2184 drm_bacon_bo_get_tiling(drm_bacon_bo
*bo
, uint32_t * tiling_mode
,
2185 uint32_t *swizzle_mode
)
2187 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2189 *tiling_mode
= bo_gem
->tiling_mode
;
2190 *swizzle_mode
= bo_gem
->swizzle_mode
;
2195 drm_bacon_bo_set_softpin_offset(drm_bacon_bo
*bo
, uint64_t offset
)
2197 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2199 bo
->offset64
= offset
;
2200 bo_gem
->kflags
|= EXEC_OBJECT_PINNED
;
2206 drm_bacon_bo_gem_create_from_prime(drm_bacon_bufmgr
*bufmgr
, int prime_fd
, int size
)
2210 drm_bacon_bo_gem
*bo_gem
;
2211 struct drm_i915_gem_get_tiling get_tiling
;
2213 pthread_mutex_lock(&bufmgr
->lock
);
2214 ret
= drmPrimeFDToHandle(bufmgr
->fd
, prime_fd
, &handle
);
2216 DBG("create_from_prime: failed to obtain handle from fd: %s\n", strerror(errno
));
2217 pthread_mutex_unlock(&bufmgr
->lock
);
2222 * See if the kernel has already returned this buffer to us. Just as
2223 * for named buffers, we must not create two bo's pointing at the same
2226 HASH_FIND(handle_hh
, bufmgr
->handle_table
,
2227 &handle
, sizeof(handle
), bo_gem
);
2229 drm_bacon_bo_reference(&bo_gem
->bo
);
2233 bo_gem
= calloc(1, sizeof(*bo_gem
));
2237 p_atomic_set(&bo_gem
->refcount
, 1);
2238 list_inithead(&bo_gem
->vma_list
);
2240 /* Determine size of bo. The fd-to-handle ioctl really should
2241 * return the size, but it doesn't. If we have kernel 3.12 or
2242 * later, we can lseek on the prime fd to get the size. Older
2243 * kernels will just fail, in which case we fall back to the
2244 * provided (estimated or guess size). */
2245 ret
= lseek(prime_fd
, 0, SEEK_END
);
2247 bo_gem
->bo
.size
= ret
;
2249 bo_gem
->bo
.size
= size
;
2251 bo_gem
->bo
.handle
= handle
;
2252 bo_gem
->bo
.bufmgr
= bufmgr
;
2254 bo_gem
->gem_handle
= handle
;
2255 HASH_ADD(handle_hh
, bufmgr
->handle_table
,
2256 gem_handle
, sizeof(bo_gem
->gem_handle
), bo_gem
);
2258 bo_gem
->name
= "prime";
2259 bo_gem
->validate_index
= -1;
2260 bo_gem
->used_as_reloc_target
= false;
2261 bo_gem
->has_error
= false;
2262 bo_gem
->reusable
= false;
2264 memclear(get_tiling
);
2265 get_tiling
.handle
= bo_gem
->gem_handle
;
2266 if (drmIoctl(bufmgr
->fd
,
2267 DRM_IOCTL_I915_GEM_GET_TILING
,
2271 bo_gem
->tiling_mode
= get_tiling
.tiling_mode
;
2272 bo_gem
->swizzle_mode
= get_tiling
.swizzle_mode
;
2273 /* XXX stride is unknown */
2274 drm_bacon_bo_gem_set_in_aperture_size(bufmgr
, bo_gem
, 0);
2277 pthread_mutex_unlock(&bufmgr
->lock
);
2281 drm_bacon_gem_bo_free(&bo_gem
->bo
);
2282 pthread_mutex_unlock(&bufmgr
->lock
);
2287 drm_bacon_bo_gem_export_to_prime(drm_bacon_bo
*bo
, int *prime_fd
)
2289 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
2290 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2292 if (drmPrimeHandleToFD(bufmgr
->fd
, bo_gem
->gem_handle
,
2293 DRM_CLOEXEC
, prime_fd
) != 0)
2296 bo_gem
->reusable
= false;
2302 drm_bacon_bo_flink(drm_bacon_bo
*bo
, uint32_t *name
)
2304 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
2305 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2307 if (!bo_gem
->global_name
) {
2308 struct drm_gem_flink flink
;
2311 flink
.handle
= bo_gem
->gem_handle
;
2312 if (drmIoctl(bufmgr
->fd
, DRM_IOCTL_GEM_FLINK
, &flink
))
2315 pthread_mutex_lock(&bufmgr
->lock
);
2316 if (!bo_gem
->global_name
) {
2317 bo_gem
->global_name
= flink
.name
;
2318 bo_gem
->reusable
= false;
2320 HASH_ADD(name_hh
, bufmgr
->name_table
,
2321 global_name
, sizeof(bo_gem
->global_name
),
2324 pthread_mutex_unlock(&bufmgr
->lock
);
2327 *name
= bo_gem
->global_name
;
2332 * Enables unlimited caching of buffer objects for reuse.
2334 * This is potentially very memory expensive, as the cache at each bucket
2335 * size is only bounded by how many buffers of that size we've managed to have
2336 * in flight at once.
2339 drm_bacon_bufmgr_gem_enable_reuse(drm_bacon_bufmgr
*bufmgr
)
2341 bufmgr
->bo_reuse
= true;
2345 * Disables implicit synchronisation before executing the bo
2347 * This will cause rendering corruption unless you correctly manage explicit
2348 * fences for all rendering involving this buffer - including use by others.
2349 * Disabling the implicit serialisation is only required if that serialisation
2350 * is too coarse (for example, you have split the buffer into many
2351 * non-overlapping regions and are sharing the whole buffer between concurrent
2352 * independent command streams).
2354 * Note the kernel must advertise support via I915_PARAM_HAS_EXEC_ASYNC,
2355 * which can be checked using drm_bacon_bufmgr_can_disable_implicit_sync,
2356 * or subsequent execbufs involving the bo will generate EINVAL.
2359 drm_bacon_gem_bo_disable_implicit_sync(drm_bacon_bo
*bo
)
2361 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2363 bo_gem
->kflags
|= EXEC_OBJECT_ASYNC
;
2367 * Enables implicit synchronisation before executing the bo
2369 * This is the default behaviour of the kernel, to wait upon prior writes
2370 * completing on the object before rendering with it, or to wait for prior
2371 * reads to complete before writing into the object.
2372 * drm_bacon_gem_bo_disable_implicit_sync() can stop this behaviour, telling
2373 * the kernel never to insert a stall before using the object. Then this
2374 * function can be used to restore the implicit sync before subsequent
2378 drm_bacon_gem_bo_enable_implicit_sync(drm_bacon_bo
*bo
)
2380 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2382 bo_gem
->kflags
&= ~EXEC_OBJECT_ASYNC
;
2386 * Query whether the kernel supports disabling of its implicit synchronisation
2387 * before execbuf. See drm_bacon_gem_bo_disable_implicit_sync()
2390 drm_bacon_bufmgr_gem_can_disable_implicit_sync(drm_bacon_bufmgr
*bufmgr
)
2392 return bufmgr
->has_exec_async
;
2396 * Return the additional aperture space required by the tree of buffer objects
2400 drm_bacon_gem_bo_get_aperture_space(drm_bacon_bo
*bo
)
2402 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2406 if (bo
== NULL
|| bo_gem
->included_in_check_aperture
)
2410 bo_gem
->included_in_check_aperture
= true;
2412 for (i
= 0; i
< bo_gem
->reloc_count
; i
++)
2414 drm_bacon_gem_bo_get_aperture_space(bo_gem
->
2415 reloc_target_info
[i
].bo
);
2421 * Clear the flag set by drm_bacon_gem_bo_get_aperture_space() so we're ready
2422 * for the next drm_bacon_bufmgr_check_aperture_space() call.
2425 drm_bacon_gem_bo_clear_aperture_space_flag(drm_bacon_bo
*bo
)
2427 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2430 if (bo
== NULL
|| !bo_gem
->included_in_check_aperture
)
2433 bo_gem
->included_in_check_aperture
= false;
2435 for (i
= 0; i
< bo_gem
->reloc_count
; i
++)
2436 drm_bacon_gem_bo_clear_aperture_space_flag(bo_gem
->
2437 reloc_target_info
[i
].bo
);
2441 * Return a conservative estimate for the amount of aperture required
2442 * for a collection of buffers. This may double-count some buffers.
2445 drm_bacon_gem_estimate_batch_space(drm_bacon_bo
**bo_array
, int count
)
2448 unsigned int total
= 0;
2450 for (i
= 0; i
< count
; i
++) {
2451 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo_array
[i
];
2453 total
+= bo_gem
->reloc_tree_size
;
2459 * Return the amount of aperture needed for a collection of buffers.
2460 * This avoids double counting any buffers, at the cost of looking
2461 * at every buffer in the set.
2464 drm_bacon_gem_compute_batch_space(drm_bacon_bo
**bo_array
, int count
)
2467 unsigned int total
= 0;
2469 for (i
= 0; i
< count
; i
++) {
2470 total
+= drm_bacon_gem_bo_get_aperture_space(bo_array
[i
]);
2471 /* For the first buffer object in the array, we get an
2472 * accurate count back for its reloc_tree size (since nothing
2473 * had been flagged as being counted yet). We can save that
2474 * value out as a more conservative reloc_tree_size that
2475 * avoids double-counting target buffers. Since the first
2476 * buffer happens to usually be the batch buffer in our
2477 * callers, this can pull us back from doing the tree
2478 * walk on every new batch emit.
2481 drm_bacon_bo_gem
*bo_gem
=
2482 (drm_bacon_bo_gem
*) bo_array
[i
];
2483 bo_gem
->reloc_tree_size
= total
;
2487 for (i
= 0; i
< count
; i
++)
2488 drm_bacon_gem_bo_clear_aperture_space_flag(bo_array
[i
]);
2493 * Return -1 if the batchbuffer should be flushed before attempting to
2494 * emit rendering referencing the buffers pointed to by bo_array.
2496 * This is required because if we try to emit a batchbuffer with relocations
2497 * to a tree of buffers that won't simultaneously fit in the aperture,
2498 * the rendering will return an error at a point where the software is not
2499 * prepared to recover from it.
2501 * However, we also want to emit the batchbuffer significantly before we reach
2502 * the limit, as a series of batchbuffers each of which references buffers
2503 * covering almost all of the aperture means that at each emit we end up
2504 * waiting to evict a buffer from the last rendering, and we get synchronous
2505 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
2506 * get better parallelism.
2509 drm_bacon_bufmgr_check_aperture_space(drm_bacon_bo
**bo_array
, int count
)
2511 drm_bacon_bufmgr
*bufmgr
= bo_array
[0]->bufmgr
;
2512 unsigned int total
= 0;
2513 unsigned int threshold
= bufmgr
->gtt_size
* 3 / 4;
2515 total
= drm_bacon_gem_estimate_batch_space(bo_array
, count
);
2517 if (total
> threshold
)
2518 total
= drm_bacon_gem_compute_batch_space(bo_array
, count
);
2520 if (total
> threshold
) {
2521 DBG("check_space: overflowed available aperture, "
2523 total
/ 1024, (int)bufmgr
->gtt_size
/ 1024);
2526 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total
/ 1024,
2527 (int)bufmgr
->gtt_size
/ 1024);
2533 * Disable buffer reuse for objects which are shared with the kernel
2534 * as scanout buffers
2537 drm_bacon_bo_disable_reuse(drm_bacon_bo
*bo
)
2539 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2541 bo_gem
->reusable
= false;
2546 drm_bacon_bo_is_reusable(drm_bacon_bo
*bo
)
2548 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2550 return bo_gem
->reusable
;
2554 _drm_bacon_gem_bo_references(drm_bacon_bo
*bo
, drm_bacon_bo
*target_bo
)
2556 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2559 for (i
= 0; i
< bo_gem
->reloc_count
; i
++) {
2560 if (bo_gem
->reloc_target_info
[i
].bo
== target_bo
)
2562 if (bo
== bo_gem
->reloc_target_info
[i
].bo
)
2564 if (_drm_bacon_gem_bo_references(bo_gem
->reloc_target_info
[i
].bo
,
2569 for (i
= 0; i
< bo_gem
->softpin_target_count
; i
++) {
2570 if (bo_gem
->softpin_target
[i
] == target_bo
)
2572 if (_drm_bacon_gem_bo_references(bo_gem
->softpin_target
[i
], target_bo
))
2579 /** Return true if target_bo is referenced by bo's relocation tree. */
2581 drm_bacon_bo_references(drm_bacon_bo
*bo
, drm_bacon_bo
*target_bo
)
2583 drm_bacon_bo_gem
*target_bo_gem
= (drm_bacon_bo_gem
*) target_bo
;
2585 if (bo
== NULL
|| target_bo
== NULL
)
2587 if (target_bo_gem
->used_as_reloc_target
)
2588 return _drm_bacon_gem_bo_references(bo
, target_bo
);
2593 add_bucket(drm_bacon_bufmgr
*bufmgr
, int size
)
2595 unsigned int i
= bufmgr
->num_buckets
;
2597 assert(i
< ARRAY_SIZE(bufmgr
->cache_bucket
));
2599 list_inithead(&bufmgr
->cache_bucket
[i
].head
);
2600 bufmgr
->cache_bucket
[i
].size
= size
;
2601 bufmgr
->num_buckets
++;
2605 init_cache_buckets(drm_bacon_bufmgr
*bufmgr
)
2607 unsigned long size
, cache_max_size
= 64 * 1024 * 1024;
2609 /* OK, so power of two buckets was too wasteful of memory.
2610 * Give 3 other sizes between each power of two, to hopefully
2611 * cover things accurately enough. (The alternative is
2612 * probably to just go for exact matching of sizes, and assume
2613 * that for things like composited window resize the tiled
2614 * width/height alignment and rounding of sizes to pages will
2615 * get us useful cache hit rates anyway)
2617 add_bucket(bufmgr
, 4096);
2618 add_bucket(bufmgr
, 4096 * 2);
2619 add_bucket(bufmgr
, 4096 * 3);
2621 /* Initialize the linked lists for BO reuse cache. */
2622 for (size
= 4 * 4096; size
<= cache_max_size
; size
*= 2) {
2623 add_bucket(bufmgr
, size
);
2625 add_bucket(bufmgr
, size
+ size
* 1 / 4);
2626 add_bucket(bufmgr
, size
+ size
* 2 / 4);
2627 add_bucket(bufmgr
, size
+ size
* 3 / 4);
2632 drm_bacon_bufmgr_gem_set_vma_cache_size(drm_bacon_bufmgr
*bufmgr
, int limit
)
2634 bufmgr
->vma_max
= limit
;
2636 drm_bacon_gem_bo_purge_vma_cache(bufmgr
);
2640 parse_devid_override(const char *devid_override
)
2642 static const struct {
2646 { "brw", PCI_CHIP_I965_GM
},
2647 { "g4x", PCI_CHIP_GM45_GM
},
2648 { "ilk", PCI_CHIP_ILD_G
},
2649 { "snb", PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS
},
2650 { "ivb", PCI_CHIP_IVYBRIDGE_S_GT2
},
2651 { "hsw", PCI_CHIP_HASWELL_CRW_E_GT3
},
2652 { "byt", PCI_CHIP_VALLEYVIEW_3
},
2653 { "bdw", 0x1620 | BDW_ULX
},
2654 { "skl", PCI_CHIP_SKYLAKE_DT_GT2
},
2655 { "kbl", PCI_CHIP_KABYLAKE_DT_GT2
},
2659 for (i
= 0; i
< ARRAY_SIZE(name_map
); i
++) {
2660 if (!strcmp(name_map
[i
].name
, devid_override
))
2661 return name_map
[i
].pci_id
;
2664 return strtod(devid_override
, NULL
);
2668 * Get the PCI ID for the device. This can be overridden by setting the
2669 * INTEL_DEVID_OVERRIDE environment variable to the desired ID.
2672 get_pci_device_id(drm_bacon_bufmgr
*bufmgr
)
2674 char *devid_override
;
2677 drm_i915_getparam_t gp
;
2679 if (geteuid() == getuid()) {
2680 devid_override
= getenv("INTEL_DEVID_OVERRIDE");
2681 if (devid_override
) {
2682 bufmgr
->no_exec
= true;
2683 return parse_devid_override(devid_override
);
2688 gp
.param
= I915_PARAM_CHIPSET_ID
;
2690 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
2692 fprintf(stderr
, "get chip id failed: %d [%d]\n", ret
, errno
);
2693 fprintf(stderr
, "param: %d, val: %d\n", gp
.param
, *gp
.value
);
2699 drm_bacon_bufmgr_gem_get_devid(drm_bacon_bufmgr
*bufmgr
)
2701 return bufmgr
->pci_device
;
2705 drm_bacon_gem_context_create(drm_bacon_bufmgr
*bufmgr
)
2707 struct drm_i915_gem_context_create create
;
2708 drm_bacon_context
*context
= NULL
;
2711 context
= calloc(1, sizeof(*context
));
2716 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_CREATE
, &create
);
2718 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
2724 context
->ctx_id
= create
.ctx_id
;
2725 context
->bufmgr
= bufmgr
;
2731 drm_bacon_gem_context_get_id(drm_bacon_context
*ctx
, uint32_t *ctx_id
)
2736 *ctx_id
= ctx
->ctx_id
;
2742 drm_bacon_gem_context_destroy(drm_bacon_context
*ctx
)
2744 struct drm_i915_gem_context_destroy destroy
;
2752 destroy
.ctx_id
= ctx
->ctx_id
;
2753 ret
= drmIoctl(ctx
->bufmgr
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY
,
2756 fprintf(stderr
, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
2763 drm_bacon_get_reset_stats(drm_bacon_context
*ctx
,
2764 uint32_t *reset_count
,
2768 struct drm_i915_reset_stats stats
;
2776 stats
.ctx_id
= ctx
->ctx_id
;
2777 ret
= drmIoctl(ctx
->bufmgr
->fd
,
2778 DRM_IOCTL_I915_GET_RESET_STATS
,
2781 if (reset_count
!= NULL
)
2782 *reset_count
= stats
.reset_count
;
2785 *active
= stats
.batch_active
;
2787 if (pending
!= NULL
)
2788 *pending
= stats
.batch_pending
;
2795 drm_bacon_reg_read(drm_bacon_bufmgr
*bufmgr
,
2799 struct drm_i915_reg_read reg_read
;
2803 reg_read
.offset
= offset
;
2805 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_REG_READ
, ®_read
);
2807 *result
= reg_read
.val
;
2811 static pthread_mutex_t bufmgr_list_mutex
= PTHREAD_MUTEX_INITIALIZER
;
2812 static struct list_head bufmgr_list
= { &bufmgr_list
, &bufmgr_list
};
2814 static drm_bacon_bufmgr
*
2815 drm_bacon_bufmgr_gem_find(int fd
)
2817 list_for_each_entry(drm_bacon_bufmgr
,
2818 bufmgr
, &bufmgr_list
, managers
) {
2819 if (bufmgr
->fd
== fd
) {
2820 p_atomic_inc(&bufmgr
->refcount
);
2829 drm_bacon_bufmgr_destroy(drm_bacon_bufmgr
*bufmgr
)
2831 if (atomic_add_unless(&bufmgr
->refcount
, -1, 1)) {
2832 pthread_mutex_lock(&bufmgr_list_mutex
);
2834 if (p_atomic_dec_zero(&bufmgr
->refcount
)) {
2835 list_del(&bufmgr
->managers
);
2836 drm_bacon_bufmgr_gem_destroy(bufmgr
);
2839 pthread_mutex_unlock(&bufmgr_list_mutex
);
2843 void *drm_bacon_gem_bo_map__gtt(drm_bacon_bo
*bo
)
2845 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
2846 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2848 if (bo_gem
->gtt_virtual
)
2849 return bo_gem
->gtt_virtual
;
2851 if (bo_gem
->is_userptr
)
2854 pthread_mutex_lock(&bufmgr
->lock
);
2855 if (bo_gem
->gtt_virtual
== NULL
) {
2856 struct drm_i915_gem_mmap_gtt mmap_arg
;
2859 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
2860 bo_gem
->gem_handle
, bo_gem
->name
, bo_gem
->map_count
);
2862 if (bo_gem
->map_count
++ == 0)
2863 drm_bacon_gem_bo_open_vma(bufmgr
, bo_gem
);
2866 mmap_arg
.handle
= bo_gem
->gem_handle
;
2868 /* Get the fake offset back... */
2870 if (drmIoctl(bufmgr
->fd
,
2871 DRM_IOCTL_I915_GEM_MMAP_GTT
,
2874 ptr
= drm_mmap(0, bo
->size
, PROT_READ
| PROT_WRITE
,
2875 MAP_SHARED
, bufmgr
->fd
,
2878 if (ptr
== MAP_FAILED
) {
2879 if (--bo_gem
->map_count
== 0)
2880 drm_bacon_gem_bo_close_vma(bufmgr
, bo_gem
);
2884 bo_gem
->gtt_virtual
= ptr
;
2886 pthread_mutex_unlock(&bufmgr
->lock
);
2888 return bo_gem
->gtt_virtual
;
2891 void *drm_bacon_gem_bo_map__cpu(drm_bacon_bo
*bo
)
2893 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
2894 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2896 if (bo_gem
->mem_virtual
)
2897 return bo_gem
->mem_virtual
;
2899 if (bo_gem
->is_userptr
) {
2900 /* Return the same user ptr */
2901 return bo_gem
->user_virtual
;
2904 pthread_mutex_lock(&bufmgr
->lock
);
2905 if (!bo_gem
->mem_virtual
) {
2906 struct drm_i915_gem_mmap mmap_arg
;
2908 if (bo_gem
->map_count
++ == 0)
2909 drm_bacon_gem_bo_open_vma(bufmgr
, bo_gem
);
2911 DBG("bo_map: %d (%s), map_count=%d\n",
2912 bo_gem
->gem_handle
, bo_gem
->name
, bo_gem
->map_count
);
2915 mmap_arg
.handle
= bo_gem
->gem_handle
;
2916 mmap_arg
.size
= bo
->size
;
2917 if (drmIoctl(bufmgr
->fd
,
2918 DRM_IOCTL_I915_GEM_MMAP
,
2920 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
2921 __FILE__
, __LINE__
, bo_gem
->gem_handle
,
2922 bo_gem
->name
, strerror(errno
));
2923 if (--bo_gem
->map_count
== 0)
2924 drm_bacon_gem_bo_close_vma(bufmgr
, bo_gem
);
2926 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg
.addr_ptr
, mmap_arg
.size
, 0, 1));
2927 bo_gem
->mem_virtual
= (void *)(uintptr_t) mmap_arg
.addr_ptr
;
2930 pthread_mutex_unlock(&bufmgr
->lock
);
2932 return bo_gem
->mem_virtual
;
2935 void *drm_bacon_gem_bo_map__wc(drm_bacon_bo
*bo
)
2937 drm_bacon_bufmgr
*bufmgr
= bo
->bufmgr
;
2938 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2940 if (bo_gem
->wc_virtual
)
2941 return bo_gem
->wc_virtual
;
2943 if (bo_gem
->is_userptr
)
2946 pthread_mutex_lock(&bufmgr
->lock
);
2947 if (!bo_gem
->wc_virtual
) {
2948 struct drm_i915_gem_mmap mmap_arg
;
2950 if (bo_gem
->map_count
++ == 0)
2951 drm_bacon_gem_bo_open_vma(bufmgr
, bo_gem
);
2953 DBG("bo_map: %d (%s), map_count=%d\n",
2954 bo_gem
->gem_handle
, bo_gem
->name
, bo_gem
->map_count
);
2957 mmap_arg
.handle
= bo_gem
->gem_handle
;
2958 mmap_arg
.size
= bo
->size
;
2959 mmap_arg
.flags
= I915_MMAP_WC
;
2960 if (drmIoctl(bufmgr
->fd
,
2961 DRM_IOCTL_I915_GEM_MMAP
,
2963 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
2964 __FILE__
, __LINE__
, bo_gem
->gem_handle
,
2965 bo_gem
->name
, strerror(errno
));
2966 if (--bo_gem
->map_count
== 0)
2967 drm_bacon_gem_bo_close_vma(bufmgr
, bo_gem
);
2969 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg
.addr_ptr
, mmap_arg
.size
, 0, 1));
2970 bo_gem
->wc_virtual
= (void *)(uintptr_t) mmap_arg
.addr_ptr
;
2973 pthread_mutex_unlock(&bufmgr
->lock
);
2975 return bo_gem
->wc_virtual
;
2979 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
2980 * and manage map buffer objections.
2982 * \param fd File descriptor of the opened DRM device.
2985 drm_bacon_bufmgr_gem_init(int fd
, int batch_size
)
2987 drm_bacon_bufmgr
*bufmgr
;
2988 struct drm_i915_gem_get_aperture aperture
;
2989 drm_i915_getparam_t gp
;
2992 pthread_mutex_lock(&bufmgr_list_mutex
);
2994 bufmgr
= drm_bacon_bufmgr_gem_find(fd
);
2998 bufmgr
= calloc(1, sizeof(*bufmgr
));
3003 p_atomic_set(&bufmgr
->refcount
, 1);
3005 if (pthread_mutex_init(&bufmgr
->lock
, NULL
) != 0) {
3012 drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GEM_GET_APERTURE
, &aperture
);
3013 bufmgr
->gtt_size
= aperture
.aper_available_size
;
3015 bufmgr
->pci_device
= get_pci_device_id(bufmgr
);
3017 if (IS_GEN4(bufmgr
->pci_device
))
3019 else if (IS_GEN5(bufmgr
->pci_device
))
3021 else if (IS_GEN6(bufmgr
->pci_device
))
3023 else if (IS_GEN7(bufmgr
->pci_device
))
3025 else if (IS_GEN8(bufmgr
->pci_device
))
3027 else if (IS_GEN9(bufmgr
->pci_device
))
3038 gp
.param
= I915_PARAM_HAS_BSD
;
3039 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3040 bufmgr
->has_bsd
= ret
== 0;
3042 gp
.param
= I915_PARAM_HAS_BLT
;
3043 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3044 bufmgr
->has_blt
= ret
== 0;
3046 gp
.param
= I915_PARAM_HAS_EXEC_ASYNC
;
3047 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3048 bufmgr
->has_exec_async
= ret
== 0;
3050 gp
.param
= I915_PARAM_HAS_LLC
;
3051 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3053 /* Kernel does not supports HAS_LLC query, fallback to GPU
3054 * generation detection and assume that we have LLC on GEN6/7
3056 bufmgr
->has_llc
= (IS_GEN6(bufmgr
->pci_device
) |
3057 IS_GEN7(bufmgr
->pci_device
));
3059 bufmgr
->has_llc
= *gp
.value
;
3061 gp
.param
= I915_PARAM_HAS_VEBOX
;
3062 ret
= drmIoctl(bufmgr
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3063 bufmgr
->has_vebox
= (ret
== 0) & (*gp
.value
> 0);
3065 /* Let's go with one relocation per every 2 dwords (but round down a bit
3066 * since a power of two will mean an extra page allocation for the reloc
3069 * Every 4 was too few for the blender benchmark.
3071 bufmgr
->max_relocs
= batch_size
/ sizeof(uint32_t) / 2 - 2;
3073 init_cache_buckets(bufmgr
);
3075 list_inithead(&bufmgr
->vma_cache
);
3076 bufmgr
->vma_max
= -1; /* unlimited by default */
3078 list_add(&bufmgr
->managers
, &bufmgr_list
);
3081 pthread_mutex_unlock(&bufmgr_list_mutex
);