1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007-2012 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
42 #include <util/u_atomic.h>
50 #include <sys/ioctl.h>
52 #include <sys/types.h>
57 #define ETIME ETIMEDOUT
59 #include "common/gen_debug.h"
60 #include "libdrm_macros.h"
61 #include "main/macros.h"
62 #include "util/macros.h"
63 #include "util/list.h"
64 #include "brw_bufmgr.h"
65 #include "intel_bufmgr_priv.h"
66 #include "intel_chipset.h"
80 #define memclear(s) memset(&s, 0, sizeof(s))
82 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
85 atomic_add_unless(int *v
, int add
, int unless
)
89 while (c
!= unless
&& (old
= p_atomic_cmpxchg(v
, c
, c
+ add
)) != c
)
95 * upper_32_bits - return bits 32-63 of a number
96 * @n: the number we're accessing
98 * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress
99 * the "right shift count >= width of type" warning when that quantity is
102 #define upper_32_bits(n) ((__u32)(((n) >> 16) >> 16))
105 * lower_32_bits - return bits 0-31 of a number
106 * @n: the number we're accessing
108 #define lower_32_bits(n) ((__u32)(n))
110 typedef struct _drm_bacon_bo_gem drm_bacon_bo_gem
;
112 struct drm_bacon_gem_bo_bucket
{
113 struct list_head head
;
117 typedef struct _drm_bacon_bufmgr_gem
{
118 drm_bacon_bufmgr bufmgr
;
126 pthread_mutex_t lock
;
128 struct drm_i915_gem_exec_object2
*exec2_objects
;
129 drm_bacon_bo
**exec_bos
;
133 /** Array of lists of cached gem objects of power-of-two sizes */
134 struct drm_bacon_gem_bo_bucket cache_bucket
[14 * 4];
138 struct list_head managers
;
140 drm_bacon_bo_gem
*name_table
;
141 drm_bacon_bo_gem
*handle_table
;
143 struct list_head vma_cache
;
144 int vma_count
, vma_open
, vma_max
;
149 unsigned int has_bsd
: 1;
150 unsigned int has_blt
: 1;
151 unsigned int has_llc
: 1;
152 unsigned int has_wait_timeout
: 1;
153 unsigned int bo_reuse
: 1;
154 unsigned int no_exec
: 1;
155 unsigned int has_vebox
: 1;
156 unsigned int has_exec_async
: 1;
163 } drm_bacon_bufmgr_gem
;
165 typedef struct _drm_bacon_reloc_target_info
{
167 } drm_bacon_reloc_target
;
169 struct _drm_bacon_bo_gem
{
177 * Kenel-assigned global name for this object
179 * List contains both flink named and prime fd'd objects
181 unsigned int global_name
;
183 UT_hash_handle handle_hh
;
184 UT_hash_handle name_hh
;
187 * Index of the buffer within the validation list while preparing a
188 * batchbuffer execution.
193 * Current tiling mode
195 uint32_t tiling_mode
;
196 uint32_t swizzle_mode
;
197 unsigned long stride
;
199 unsigned long kflags
;
203 /** Array passed to the DRM containing relocation information. */
204 struct drm_i915_gem_relocation_entry
*relocs
;
206 * Array of info structs corresponding to relocs[i].target_handle etc
208 drm_bacon_reloc_target
*reloc_target_info
;
209 /** Number of entries in relocs */
211 /** Array of BOs that are referenced by this buffer and will be softpinned */
212 drm_bacon_bo
**softpin_target
;
213 /** Number softpinned BOs that are referenced by this buffer */
214 int softpin_target_count
;
215 /** Maximum amount of softpinned BOs that are referenced by this buffer */
216 int softpin_target_size
;
218 /** Mapped address for the buffer, saved across map/unmap cycles */
220 /** GTT virtual address for the buffer, saved across map/unmap cycles */
222 /** WC CPU address for the buffer, saved across map/unmap cycles */
225 * Virtual address of the buffer allocated by user, used for userptr
230 struct list_head vma_list
;
233 struct list_head head
;
236 * Boolean of whether this BO and its children have been included in
237 * the current drm_bacon_bufmgr_check_aperture_space() total.
239 bool included_in_check_aperture
;
242 * Boolean of whether this buffer has been used as a relocation
243 * target and had its size accounted for, and thus can't have any
244 * further relocations added to it.
246 bool used_as_reloc_target
;
249 * Boolean of whether we have encountered an error whilst building the relocation tree.
254 * Boolean of whether this buffer can be re-used
259 * Boolean of whether the GPU is definitely not accessing the buffer.
261 * This is only valid when reusable, since non-reusable
262 * buffers are those that have been shared with other
263 * processes, so we don't know their state.
268 * Boolean of whether this buffer was allocated with userptr
273 * Size in bytes of this buffer and its relocation descendents.
275 * Used to avoid costly tree walking in
276 * drm_bacon_bufmgr_check_aperture in the common case.
280 /** Flags that we may need to do the SW_FINISH ioctl on unmap. */
281 bool mapped_cpu_write
;
285 drm_bacon_gem_estimate_batch_space(drm_bacon_bo
** bo_array
, int count
);
288 drm_bacon_gem_compute_batch_space(drm_bacon_bo
** bo_array
, int count
);
291 drm_bacon_gem_bo_set_tiling_internal(drm_bacon_bo
*bo
,
292 uint32_t tiling_mode
,
295 static void drm_bacon_gem_bo_unreference_locked_timed(drm_bacon_bo
*bo
,
298 static void drm_bacon_gem_bo_free(drm_bacon_bo
*bo
);
300 static inline drm_bacon_bo_gem
*to_bo_gem(drm_bacon_bo
*bo
)
302 return (drm_bacon_bo_gem
*)bo
;
306 drm_bacon_gem_bo_tile_size(drm_bacon_bufmgr_gem
*bufmgr_gem
, unsigned long size
,
307 uint32_t *tiling_mode
)
309 if (*tiling_mode
== I915_TILING_NONE
)
312 /* 965+ just need multiples of page size for tiling */
313 return ALIGN(size
, 4096);
317 * Round a given pitch up to the minimum required for X tiling on a
318 * given chip. We use 512 as the minimum to allow for a later tiling
322 drm_bacon_gem_bo_tile_pitch(drm_bacon_bufmgr_gem
*bufmgr_gem
,
323 unsigned long pitch
, uint32_t *tiling_mode
)
325 unsigned long tile_width
;
327 /* If untiled, then just align it so that we can do rendering
328 * to it with the 3D engine.
330 if (*tiling_mode
== I915_TILING_NONE
)
331 return ALIGN(pitch
, 64);
333 if (*tiling_mode
== I915_TILING_X
)
338 /* 965 is flexible */
339 return ALIGN(pitch
, tile_width
);
342 static struct drm_bacon_gem_bo_bucket
*
343 drm_bacon_gem_bo_bucket_for_size(drm_bacon_bufmgr_gem
*bufmgr_gem
,
348 for (i
= 0; i
< bufmgr_gem
->num_buckets
; i
++) {
349 struct drm_bacon_gem_bo_bucket
*bucket
=
350 &bufmgr_gem
->cache_bucket
[i
];
351 if (bucket
->size
>= size
) {
360 drm_bacon_gem_dump_validation_list(drm_bacon_bufmgr_gem
*bufmgr_gem
)
364 for (i
= 0; i
< bufmgr_gem
->exec_count
; i
++) {
365 drm_bacon_bo
*bo
= bufmgr_gem
->exec_bos
[i
];
366 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
368 if (bo_gem
->relocs
== NULL
&& bo_gem
->softpin_target
== NULL
) {
369 DBG("%2d: %d %s(%s)\n", i
, bo_gem
->gem_handle
,
370 bo_gem
->kflags
& EXEC_OBJECT_PINNED
? "*" : "",
375 for (j
= 0; j
< bo_gem
->reloc_count
; j
++) {
376 drm_bacon_bo
*target_bo
= bo_gem
->reloc_target_info
[j
].bo
;
377 drm_bacon_bo_gem
*target_gem
=
378 (drm_bacon_bo_gem
*) target_bo
;
380 DBG("%2d: %d %s(%s)@0x%08x %08x -> "
381 "%d (%s)@0x%08x %08x + 0x%08x\n",
384 bo_gem
->kflags
& EXEC_OBJECT_PINNED
? "*" : "",
386 upper_32_bits(bo_gem
->relocs
[j
].offset
),
387 lower_32_bits(bo_gem
->relocs
[j
].offset
),
388 target_gem
->gem_handle
,
390 upper_32_bits(target_bo
->offset64
),
391 lower_32_bits(target_bo
->offset64
),
392 bo_gem
->relocs
[j
].delta
);
395 for (j
= 0; j
< bo_gem
->softpin_target_count
; j
++) {
396 drm_bacon_bo
*target_bo
= bo_gem
->softpin_target
[j
];
397 drm_bacon_bo_gem
*target_gem
=
398 (drm_bacon_bo_gem
*) target_bo
;
399 DBG("%2d: %d %s(%s) -> "
400 "%d *(%s)@0x%08x %08x\n",
403 bo_gem
->kflags
& EXEC_OBJECT_PINNED
? "*" : "",
405 target_gem
->gem_handle
,
407 upper_32_bits(target_bo
->offset64
),
408 lower_32_bits(target_bo
->offset64
));
414 drm_bacon_bo_reference(drm_bacon_bo
*bo
)
416 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
418 p_atomic_inc(&bo_gem
->refcount
);
422 drm_bacon_add_validate_buffer2(drm_bacon_bo
*bo
)
424 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*)bo
->bufmgr
;
425 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*)bo
;
428 if (bo_gem
->validate_index
!= -1)
431 /* Extend the array of validation entries as necessary. */
432 if (bufmgr_gem
->exec_count
== bufmgr_gem
->exec_size
) {
433 int new_size
= bufmgr_gem
->exec_size
* 2;
438 bufmgr_gem
->exec2_objects
=
439 realloc(bufmgr_gem
->exec2_objects
,
440 sizeof(*bufmgr_gem
->exec2_objects
) * new_size
);
441 bufmgr_gem
->exec_bos
=
442 realloc(bufmgr_gem
->exec_bos
,
443 sizeof(*bufmgr_gem
->exec_bos
) * new_size
);
444 bufmgr_gem
->exec_size
= new_size
;
447 index
= bufmgr_gem
->exec_count
;
448 bo_gem
->validate_index
= index
;
449 /* Fill in array entry */
450 bufmgr_gem
->exec2_objects
[index
].handle
= bo_gem
->gem_handle
;
451 bufmgr_gem
->exec2_objects
[index
].relocation_count
= bo_gem
->reloc_count
;
452 bufmgr_gem
->exec2_objects
[index
].relocs_ptr
= (uintptr_t)bo_gem
->relocs
;
453 bufmgr_gem
->exec2_objects
[index
].alignment
= bo
->align
;
454 bufmgr_gem
->exec2_objects
[index
].offset
= bo
->offset64
;
455 bufmgr_gem
->exec2_objects
[index
].flags
= bo_gem
->kflags
;
456 bufmgr_gem
->exec2_objects
[index
].rsvd1
= 0;
457 bufmgr_gem
->exec2_objects
[index
].rsvd2
= 0;
458 bufmgr_gem
->exec_bos
[index
] = bo
;
459 bufmgr_gem
->exec_count
++;
463 drm_bacon_bo_gem_set_in_aperture_size(drm_bacon_bufmgr_gem
*bufmgr_gem
,
464 drm_bacon_bo_gem
*bo_gem
,
465 unsigned int alignment
)
469 assert(!bo_gem
->used_as_reloc_target
);
471 /* The older chipsets are far-less flexible in terms of tiling,
472 * and require tiled buffer to be size aligned in the aperture.
473 * This means that in the worst possible case we will need a hole
474 * twice as large as the object in order for it to fit into the
475 * aperture. Optimal packing is for wimps.
477 size
= bo_gem
->bo
.size
;
479 bo_gem
->reloc_tree_size
= size
+ alignment
;
483 drm_bacon_setup_reloc_list(drm_bacon_bo
*bo
)
485 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
486 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
487 unsigned int max_relocs
= bufmgr_gem
->max_relocs
;
489 if (bo
->size
/ 4 < max_relocs
)
490 max_relocs
= bo
->size
/ 4;
492 bo_gem
->relocs
= malloc(max_relocs
*
493 sizeof(struct drm_i915_gem_relocation_entry
));
494 bo_gem
->reloc_target_info
= malloc(max_relocs
*
495 sizeof(drm_bacon_reloc_target
));
496 if (bo_gem
->relocs
== NULL
|| bo_gem
->reloc_target_info
== NULL
) {
497 bo_gem
->has_error
= true;
499 free (bo_gem
->relocs
);
500 bo_gem
->relocs
= NULL
;
502 free (bo_gem
->reloc_target_info
);
503 bo_gem
->reloc_target_info
= NULL
;
512 drm_bacon_bo_busy(drm_bacon_bo
*bo
)
514 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
515 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
516 struct drm_i915_gem_busy busy
;
519 if (bo_gem
->reusable
&& bo_gem
->idle
)
523 busy
.handle
= bo_gem
->gem_handle
;
525 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GEM_BUSY
, &busy
);
527 bo_gem
->idle
= !busy
.busy
;
532 return (ret
== 0 && busy
.busy
);
536 drm_bacon_gem_bo_madvise_internal(drm_bacon_bufmgr_gem
*bufmgr_gem
,
537 drm_bacon_bo_gem
*bo_gem
, int state
)
539 struct drm_i915_gem_madvise madv
;
542 madv
.handle
= bo_gem
->gem_handle
;
545 drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GEM_MADVISE
, &madv
);
547 return madv
.retained
;
551 drm_bacon_bo_madvise(drm_bacon_bo
*bo
, int madv
)
553 return drm_bacon_gem_bo_madvise_internal
554 ((drm_bacon_bufmgr_gem
*) bo
->bufmgr
,
555 (drm_bacon_bo_gem
*) bo
,
559 /* drop the oldest entries that have been purged by the kernel */
561 drm_bacon_gem_bo_cache_purge_bucket(drm_bacon_bufmgr_gem
*bufmgr_gem
,
562 struct drm_bacon_gem_bo_bucket
*bucket
)
564 while (!list_empty(&bucket
->head
)) {
565 drm_bacon_bo_gem
*bo_gem
;
567 bo_gem
= LIST_ENTRY(drm_bacon_bo_gem
,
568 bucket
->head
.next
, head
);
569 if (drm_bacon_gem_bo_madvise_internal
570 (bufmgr_gem
, bo_gem
, I915_MADV_DONTNEED
))
573 list_del(&bo_gem
->head
);
574 drm_bacon_gem_bo_free(&bo_gem
->bo
);
578 static drm_bacon_bo
*
579 drm_bacon_gem_bo_alloc_internal(drm_bacon_bufmgr
*bufmgr
,
583 uint32_t tiling_mode
,
584 unsigned long stride
,
585 unsigned int alignment
)
587 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bufmgr
;
588 drm_bacon_bo_gem
*bo_gem
;
589 unsigned int page_size
= getpagesize();
591 struct drm_bacon_gem_bo_bucket
*bucket
;
592 bool alloc_from_cache
;
593 unsigned long bo_size
;
594 bool for_render
= false;
596 if (flags
& BO_ALLOC_FOR_RENDER
)
599 /* Round the allocated size up to a power of two number of pages. */
600 bucket
= drm_bacon_gem_bo_bucket_for_size(bufmgr_gem
, size
);
602 /* If we don't have caching at this size, don't actually round the
605 if (bucket
== NULL
) {
607 if (bo_size
< page_size
)
610 bo_size
= bucket
->size
;
613 pthread_mutex_lock(&bufmgr_gem
->lock
);
614 /* Get a buffer out of the cache if available */
616 alloc_from_cache
= false;
617 if (bucket
!= NULL
&& !list_empty(&bucket
->head
)) {
619 /* Allocate new render-target BOs from the tail (MRU)
620 * of the list, as it will likely be hot in the GPU
621 * cache and in the aperture for us.
623 bo_gem
= LIST_ENTRY(drm_bacon_bo_gem
,
624 bucket
->head
.prev
, head
);
625 list_del(&bo_gem
->head
);
626 alloc_from_cache
= true;
627 bo_gem
->bo
.align
= alignment
;
629 assert(alignment
== 0);
630 /* For non-render-target BOs (where we're probably
631 * going to map it first thing in order to fill it
632 * with data), check if the last BO in the cache is
633 * unbusy, and only reuse in that case. Otherwise,
634 * allocating a new buffer is probably faster than
635 * waiting for the GPU to finish.
637 bo_gem
= LIST_ENTRY(drm_bacon_bo_gem
,
638 bucket
->head
.next
, head
);
639 if (!drm_bacon_bo_busy(&bo_gem
->bo
)) {
640 alloc_from_cache
= true;
641 list_del(&bo_gem
->head
);
645 if (alloc_from_cache
) {
646 if (!drm_bacon_gem_bo_madvise_internal
647 (bufmgr_gem
, bo_gem
, I915_MADV_WILLNEED
)) {
648 drm_bacon_gem_bo_free(&bo_gem
->bo
);
649 drm_bacon_gem_bo_cache_purge_bucket(bufmgr_gem
,
654 if (drm_bacon_gem_bo_set_tiling_internal(&bo_gem
->bo
,
657 drm_bacon_gem_bo_free(&bo_gem
->bo
);
663 if (!alloc_from_cache
) {
664 struct drm_i915_gem_create create
;
666 bo_gem
= calloc(1, sizeof(*bo_gem
));
670 /* drm_bacon_gem_bo_free calls list_del() for an uninitialized
671 list (vma_list), so better set the list head here */
672 list_inithead(&bo_gem
->vma_list
);
674 bo_gem
->bo
.size
= bo_size
;
677 create
.size
= bo_size
;
679 ret
= drmIoctl(bufmgr_gem
->fd
,
680 DRM_IOCTL_I915_GEM_CREATE
,
687 bo_gem
->gem_handle
= create
.handle
;
688 HASH_ADD(handle_hh
, bufmgr_gem
->handle_table
,
689 gem_handle
, sizeof(bo_gem
->gem_handle
),
692 bo_gem
->bo
.handle
= bo_gem
->gem_handle
;
693 bo_gem
->bo
.bufmgr
= bufmgr
;
694 bo_gem
->bo
.align
= alignment
;
696 bo_gem
->tiling_mode
= I915_TILING_NONE
;
697 bo_gem
->swizzle_mode
= I915_BIT_6_SWIZZLE_NONE
;
700 if (drm_bacon_gem_bo_set_tiling_internal(&bo_gem
->bo
,
707 p_atomic_set(&bo_gem
->refcount
, 1);
708 bo_gem
->validate_index
= -1;
709 bo_gem
->used_as_reloc_target
= false;
710 bo_gem
->has_error
= false;
711 bo_gem
->reusable
= true;
713 drm_bacon_bo_gem_set_in_aperture_size(bufmgr_gem
, bo_gem
, alignment
);
714 pthread_mutex_unlock(&bufmgr_gem
->lock
);
716 DBG("bo_create: buf %d (%s) %ldb\n",
717 bo_gem
->gem_handle
, bo_gem
->name
, size
);
722 drm_bacon_gem_bo_free(&bo_gem
->bo
);
724 pthread_mutex_unlock(&bufmgr_gem
->lock
);
729 drm_bacon_bo_alloc_for_render(drm_bacon_bufmgr
*bufmgr
,
732 unsigned int alignment
)
734 return drm_bacon_gem_bo_alloc_internal(bufmgr
, name
, size
,
741 drm_bacon_bo_alloc(drm_bacon_bufmgr
*bufmgr
,
744 unsigned int alignment
)
746 return drm_bacon_gem_bo_alloc_internal(bufmgr
, name
, size
, 0,
747 I915_TILING_NONE
, 0, 0);
751 drm_bacon_bo_alloc_tiled(drm_bacon_bufmgr
*bufmgr
, const char *name
,
752 int x
, int y
, int cpp
, uint32_t *tiling_mode
,
753 unsigned long *pitch
, unsigned long flags
)
755 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*)bufmgr
;
756 unsigned long size
, stride
;
760 unsigned long aligned_y
, height_alignment
;
762 tiling
= *tiling_mode
;
764 /* If we're tiled, our allocations are in 8 or 32-row blocks,
765 * so failure to align our height means that we won't allocate
768 * If we're untiled, we still have to align to 2 rows high
769 * because the data port accesses 2x2 blocks even if the
770 * bottom row isn't to be rendered, so failure to align means
771 * we could walk off the end of the GTT and fault. This is
772 * documented on 965, and may be the case on older chipsets
773 * too so we try to be careful.
776 height_alignment
= 2;
778 if (tiling
== I915_TILING_X
)
779 height_alignment
= 8;
780 else if (tiling
== I915_TILING_Y
)
781 height_alignment
= 32;
782 aligned_y
= ALIGN(y
, height_alignment
);
785 stride
= drm_bacon_gem_bo_tile_pitch(bufmgr_gem
, stride
, tiling_mode
);
786 size
= stride
* aligned_y
;
787 size
= drm_bacon_gem_bo_tile_size(bufmgr_gem
, size
, tiling_mode
);
788 } while (*tiling_mode
!= tiling
);
791 if (tiling
== I915_TILING_NONE
)
794 return drm_bacon_gem_bo_alloc_internal(bufmgr
, name
, size
, flags
,
799 drm_bacon_bo_alloc_userptr(drm_bacon_bufmgr
*bufmgr
,
802 uint32_t tiling_mode
,
807 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bufmgr
;
808 drm_bacon_bo_gem
*bo_gem
;
810 struct drm_i915_gem_userptr userptr
;
812 /* Tiling with userptr surfaces is not supported
813 * on all hardware so refuse it for time being.
815 if (tiling_mode
!= I915_TILING_NONE
)
818 bo_gem
= calloc(1, sizeof(*bo_gem
));
822 p_atomic_set(&bo_gem
->refcount
, 1);
823 list_inithead(&bo_gem
->vma_list
);
825 bo_gem
->bo
.size
= size
;
828 userptr
.user_ptr
= (__u64
)((unsigned long)addr
);
829 userptr
.user_size
= size
;
830 userptr
.flags
= flags
;
832 ret
= drmIoctl(bufmgr_gem
->fd
,
833 DRM_IOCTL_I915_GEM_USERPTR
,
836 DBG("bo_create_userptr: "
837 "ioctl failed with user ptr %p size 0x%lx, "
838 "user flags 0x%lx\n", addr
, size
, flags
);
843 pthread_mutex_lock(&bufmgr_gem
->lock
);
845 bo_gem
->gem_handle
= userptr
.handle
;
846 bo_gem
->bo
.handle
= bo_gem
->gem_handle
;
847 bo_gem
->bo
.bufmgr
= bufmgr
;
848 bo_gem
->is_userptr
= true;
849 bo_gem
->bo
.virtual = addr
;
850 /* Save the address provided by user */
851 bo_gem
->user_virtual
= addr
;
852 bo_gem
->tiling_mode
= I915_TILING_NONE
;
853 bo_gem
->swizzle_mode
= I915_BIT_6_SWIZZLE_NONE
;
856 HASH_ADD(handle_hh
, bufmgr_gem
->handle_table
,
857 gem_handle
, sizeof(bo_gem
->gem_handle
),
861 bo_gem
->validate_index
= -1;
862 bo_gem
->used_as_reloc_target
= false;
863 bo_gem
->has_error
= false;
864 bo_gem
->reusable
= false;
866 drm_bacon_bo_gem_set_in_aperture_size(bufmgr_gem
, bo_gem
, 0);
867 pthread_mutex_unlock(&bufmgr_gem
->lock
);
869 DBG("bo_create_userptr: "
870 "ptr %p buf %d (%s) size %ldb, stride 0x%x, tile mode %d\n",
871 addr
, bo_gem
->gem_handle
, bo_gem
->name
,
872 size
, stride
, tiling_mode
);
878 drm_bacon_has_userptr(drm_bacon_bufmgr
*bufmgr
)
880 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bufmgr
;
884 struct drm_i915_gem_userptr userptr
;
886 pgsz
= sysconf(_SC_PAGESIZE
);
889 ret
= posix_memalign(&ptr
, pgsz
, pgsz
);
891 DBG("Failed to get a page (%ld) for userptr detection!\n",
897 userptr
.user_ptr
= (__u64
)(unsigned long)ptr
;
898 userptr
.user_size
= pgsz
;
901 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GEM_USERPTR
, &userptr
);
903 if (errno
== ENODEV
&& userptr
.flags
== 0) {
904 userptr
.flags
= I915_USERPTR_UNSYNCHRONIZED
;
911 /* We don't release the userptr bo here as we want to keep the
912 * kernel mm tracking alive for our lifetime. The first time we
913 * create a userptr object the kernel has to install a mmu_notifer
914 * which is a heavyweight operation (e.g. it requires taking all
915 * mm_locks and stop_machine()).
918 bufmgr_gem
->userptr_active
.ptr
= ptr
;
919 bufmgr_gem
->userptr_active
.handle
= userptr
.handle
;
925 * Returns a drm_bacon_bo wrapping the given buffer object handle.
927 * This can be used when one application needs to pass a buffer object
931 drm_bacon_bo_gem_create_from_name(drm_bacon_bufmgr
*bufmgr
,
935 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bufmgr
;
936 drm_bacon_bo_gem
*bo_gem
;
938 struct drm_gem_open open_arg
;
939 struct drm_i915_gem_get_tiling get_tiling
;
941 /* At the moment most applications only have a few named bo.
942 * For instance, in a DRI client only the render buffers passed
943 * between X and the client are named. And since X returns the
944 * alternating names for the front/back buffer a linear search
945 * provides a sufficiently fast match.
947 pthread_mutex_lock(&bufmgr_gem
->lock
);
948 HASH_FIND(name_hh
, bufmgr_gem
->name_table
,
949 &handle
, sizeof(handle
), bo_gem
);
951 drm_bacon_bo_reference(&bo_gem
->bo
);
956 open_arg
.name
= handle
;
957 ret
= drmIoctl(bufmgr_gem
->fd
,
961 DBG("Couldn't reference %s handle 0x%08x: %s\n",
962 name
, handle
, strerror(errno
));
966 /* Now see if someone has used a prime handle to get this
967 * object from the kernel before by looking through the list
968 * again for a matching gem_handle
970 HASH_FIND(handle_hh
, bufmgr_gem
->handle_table
,
971 &open_arg
.handle
, sizeof(open_arg
.handle
), bo_gem
);
973 drm_bacon_bo_reference(&bo_gem
->bo
);
977 bo_gem
= calloc(1, sizeof(*bo_gem
));
981 p_atomic_set(&bo_gem
->refcount
, 1);
982 list_inithead(&bo_gem
->vma_list
);
984 bo_gem
->bo
.size
= open_arg
.size
;
985 bo_gem
->bo
.offset
= 0;
986 bo_gem
->bo
.offset64
= 0;
987 bo_gem
->bo
.virtual = NULL
;
988 bo_gem
->bo
.bufmgr
= bufmgr
;
990 bo_gem
->validate_index
= -1;
991 bo_gem
->gem_handle
= open_arg
.handle
;
992 bo_gem
->bo
.handle
= open_arg
.handle
;
993 bo_gem
->global_name
= handle
;
994 bo_gem
->reusable
= false;
996 HASH_ADD(handle_hh
, bufmgr_gem
->handle_table
,
997 gem_handle
, sizeof(bo_gem
->gem_handle
), bo_gem
);
998 HASH_ADD(name_hh
, bufmgr_gem
->name_table
,
999 global_name
, sizeof(bo_gem
->global_name
), bo_gem
);
1001 memclear(get_tiling
);
1002 get_tiling
.handle
= bo_gem
->gem_handle
;
1003 ret
= drmIoctl(bufmgr_gem
->fd
,
1004 DRM_IOCTL_I915_GEM_GET_TILING
,
1009 bo_gem
->tiling_mode
= get_tiling
.tiling_mode
;
1010 bo_gem
->swizzle_mode
= get_tiling
.swizzle_mode
;
1011 /* XXX stride is unknown */
1012 drm_bacon_bo_gem_set_in_aperture_size(bufmgr_gem
, bo_gem
, 0);
1013 DBG("bo_create_from_handle: %d (%s)\n", handle
, bo_gem
->name
);
1016 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1020 drm_bacon_gem_bo_free(&bo_gem
->bo
);
1021 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1026 drm_bacon_gem_bo_free(drm_bacon_bo
*bo
)
1028 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
1029 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1030 struct drm_gem_close close
;
1033 list_del(&bo_gem
->vma_list
);
1034 if (bo_gem
->mem_virtual
) {
1035 VG(VALGRIND_FREELIKE_BLOCK(bo_gem
->mem_virtual
, 0));
1036 drm_munmap(bo_gem
->mem_virtual
, bo_gem
->bo
.size
);
1037 bufmgr_gem
->vma_count
--;
1039 if (bo_gem
->wc_virtual
) {
1040 VG(VALGRIND_FREELIKE_BLOCK(bo_gem
->wc_virtual
, 0));
1041 drm_munmap(bo_gem
->wc_virtual
, bo_gem
->bo
.size
);
1042 bufmgr_gem
->vma_count
--;
1044 if (bo_gem
->gtt_virtual
) {
1045 drm_munmap(bo_gem
->gtt_virtual
, bo_gem
->bo
.size
);
1046 bufmgr_gem
->vma_count
--;
1049 if (bo_gem
->global_name
)
1050 HASH_DELETE(name_hh
, bufmgr_gem
->name_table
, bo_gem
);
1051 HASH_DELETE(handle_hh
, bufmgr_gem
->handle_table
, bo_gem
);
1053 /* Close this object */
1055 close
.handle
= bo_gem
->gem_handle
;
1056 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_GEM_CLOSE
, &close
);
1058 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
1059 bo_gem
->gem_handle
, bo_gem
->name
, strerror(errno
));
1065 drm_bacon_gem_bo_mark_mmaps_incoherent(drm_bacon_bo
*bo
)
1068 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1070 if (bo_gem
->mem_virtual
)
1071 VALGRIND_MAKE_MEM_NOACCESS(bo_gem
->mem_virtual
, bo
->size
);
1073 if (bo_gem
->wc_virtual
)
1074 VALGRIND_MAKE_MEM_NOACCESS(bo_gem
->wc_virtual
, bo
->size
);
1076 if (bo_gem
->gtt_virtual
)
1077 VALGRIND_MAKE_MEM_NOACCESS(bo_gem
->gtt_virtual
, bo
->size
);
1081 /** Frees all cached buffers significantly older than @time. */
1083 drm_bacon_gem_cleanup_bo_cache(drm_bacon_bufmgr_gem
*bufmgr_gem
, time_t time
)
1087 if (bufmgr_gem
->time
== time
)
1090 for (i
= 0; i
< bufmgr_gem
->num_buckets
; i
++) {
1091 struct drm_bacon_gem_bo_bucket
*bucket
=
1092 &bufmgr_gem
->cache_bucket
[i
];
1094 while (!list_empty(&bucket
->head
)) {
1095 drm_bacon_bo_gem
*bo_gem
;
1097 bo_gem
= LIST_ENTRY(drm_bacon_bo_gem
,
1098 bucket
->head
.next
, head
);
1099 if (time
- bo_gem
->free_time
<= 1)
1102 list_del(&bo_gem
->head
);
1104 drm_bacon_gem_bo_free(&bo_gem
->bo
);
1108 bufmgr_gem
->time
= time
;
1111 static void drm_bacon_gem_bo_purge_vma_cache(drm_bacon_bufmgr_gem
*bufmgr_gem
)
1115 DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__
,
1116 bufmgr_gem
->vma_count
, bufmgr_gem
->vma_open
, bufmgr_gem
->vma_max
);
1118 if (bufmgr_gem
->vma_max
< 0)
1121 /* We may need to evict a few entries in order to create new mmaps */
1122 limit
= bufmgr_gem
->vma_max
- 2*bufmgr_gem
->vma_open
;
1126 while (bufmgr_gem
->vma_count
> limit
) {
1127 drm_bacon_bo_gem
*bo_gem
;
1129 bo_gem
= LIST_ENTRY(drm_bacon_bo_gem
,
1130 bufmgr_gem
->vma_cache
.next
,
1132 assert(bo_gem
->map_count
== 0);
1133 list_delinit(&bo_gem
->vma_list
);
1135 if (bo_gem
->mem_virtual
) {
1136 drm_munmap(bo_gem
->mem_virtual
, bo_gem
->bo
.size
);
1137 bo_gem
->mem_virtual
= NULL
;
1138 bufmgr_gem
->vma_count
--;
1140 if (bo_gem
->wc_virtual
) {
1141 drm_munmap(bo_gem
->wc_virtual
, bo_gem
->bo
.size
);
1142 bo_gem
->wc_virtual
= NULL
;
1143 bufmgr_gem
->vma_count
--;
1145 if (bo_gem
->gtt_virtual
) {
1146 drm_munmap(bo_gem
->gtt_virtual
, bo_gem
->bo
.size
);
1147 bo_gem
->gtt_virtual
= NULL
;
1148 bufmgr_gem
->vma_count
--;
1153 static void drm_bacon_gem_bo_close_vma(drm_bacon_bufmgr_gem
*bufmgr_gem
,
1154 drm_bacon_bo_gem
*bo_gem
)
1156 bufmgr_gem
->vma_open
--;
1157 list_addtail(&bo_gem
->vma_list
, &bufmgr_gem
->vma_cache
);
1158 if (bo_gem
->mem_virtual
)
1159 bufmgr_gem
->vma_count
++;
1160 if (bo_gem
->wc_virtual
)
1161 bufmgr_gem
->vma_count
++;
1162 if (bo_gem
->gtt_virtual
)
1163 bufmgr_gem
->vma_count
++;
1164 drm_bacon_gem_bo_purge_vma_cache(bufmgr_gem
);
1167 static void drm_bacon_gem_bo_open_vma(drm_bacon_bufmgr_gem
*bufmgr_gem
,
1168 drm_bacon_bo_gem
*bo_gem
)
1170 bufmgr_gem
->vma_open
++;
1171 list_del(&bo_gem
->vma_list
);
1172 if (bo_gem
->mem_virtual
)
1173 bufmgr_gem
->vma_count
--;
1174 if (bo_gem
->wc_virtual
)
1175 bufmgr_gem
->vma_count
--;
1176 if (bo_gem
->gtt_virtual
)
1177 bufmgr_gem
->vma_count
--;
1178 drm_bacon_gem_bo_purge_vma_cache(bufmgr_gem
);
1182 drm_bacon_gem_bo_unreference_final(drm_bacon_bo
*bo
, time_t time
)
1184 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
1185 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1186 struct drm_bacon_gem_bo_bucket
*bucket
;
1189 /* Unreference all the target buffers */
1190 for (i
= 0; i
< bo_gem
->reloc_count
; i
++) {
1191 if (bo_gem
->reloc_target_info
[i
].bo
!= bo
) {
1192 drm_bacon_gem_bo_unreference_locked_timed(bo_gem
->
1193 reloc_target_info
[i
].bo
,
1197 for (i
= 0; i
< bo_gem
->softpin_target_count
; i
++)
1198 drm_bacon_gem_bo_unreference_locked_timed(bo_gem
->softpin_target
[i
],
1201 bo_gem
->reloc_count
= 0;
1202 bo_gem
->used_as_reloc_target
= false;
1203 bo_gem
->softpin_target_count
= 0;
1205 DBG("bo_unreference final: %d (%s)\n",
1206 bo_gem
->gem_handle
, bo_gem
->name
);
1208 /* release memory associated with this object */
1209 if (bo_gem
->reloc_target_info
) {
1210 free(bo_gem
->reloc_target_info
);
1211 bo_gem
->reloc_target_info
= NULL
;
1213 if (bo_gem
->relocs
) {
1214 free(bo_gem
->relocs
);
1215 bo_gem
->relocs
= NULL
;
1217 if (bo_gem
->softpin_target
) {
1218 free(bo_gem
->softpin_target
);
1219 bo_gem
->softpin_target
= NULL
;
1220 bo_gem
->softpin_target_size
= 0;
1223 /* Clear any left-over mappings */
1224 if (bo_gem
->map_count
) {
1225 DBG("bo freed with non-zero map-count %d\n", bo_gem
->map_count
);
1226 bo_gem
->map_count
= 0;
1227 drm_bacon_gem_bo_close_vma(bufmgr_gem
, bo_gem
);
1228 drm_bacon_gem_bo_mark_mmaps_incoherent(bo
);
1231 bucket
= drm_bacon_gem_bo_bucket_for_size(bufmgr_gem
, bo
->size
);
1232 /* Put the buffer into our internal cache for reuse if we can. */
1233 if (bufmgr_gem
->bo_reuse
&& bo_gem
->reusable
&& bucket
!= NULL
&&
1234 drm_bacon_gem_bo_madvise_internal(bufmgr_gem
, bo_gem
,
1235 I915_MADV_DONTNEED
)) {
1236 bo_gem
->free_time
= time
;
1238 bo_gem
->name
= NULL
;
1239 bo_gem
->validate_index
= -1;
1241 list_addtail(&bo_gem
->head
, &bucket
->head
);
1243 drm_bacon_gem_bo_free(bo
);
1247 static void drm_bacon_gem_bo_unreference_locked_timed(drm_bacon_bo
*bo
,
1250 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1252 assert(p_atomic_read(&bo_gem
->refcount
) > 0);
1253 if (p_atomic_dec_zero(&bo_gem
->refcount
))
1254 drm_bacon_gem_bo_unreference_final(bo
, time
);
1258 drm_bacon_bo_unreference(drm_bacon_bo
*bo
)
1260 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1265 assert(p_atomic_read(&bo_gem
->refcount
) > 0);
1267 if (atomic_add_unless(&bo_gem
->refcount
, -1, 1)) {
1268 drm_bacon_bufmgr_gem
*bufmgr_gem
=
1269 (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
1270 struct timespec time
;
1272 clock_gettime(CLOCK_MONOTONIC
, &time
);
1274 pthread_mutex_lock(&bufmgr_gem
->lock
);
1276 if (p_atomic_dec_zero(&bo_gem
->refcount
)) {
1277 drm_bacon_gem_bo_unreference_final(bo
, time
.tv_sec
);
1278 drm_bacon_gem_cleanup_bo_cache(bufmgr_gem
, time
.tv_sec
);
1281 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1286 drm_bacon_bo_map(drm_bacon_bo
*bo
, int write_enable
)
1288 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
1289 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1290 struct drm_i915_gem_set_domain set_domain
;
1293 if (bo_gem
->is_userptr
) {
1294 /* Return the same user ptr */
1295 bo
->virtual = bo_gem
->user_virtual
;
1299 pthread_mutex_lock(&bufmgr_gem
->lock
);
1301 if (bo_gem
->map_count
++ == 0)
1302 drm_bacon_gem_bo_open_vma(bufmgr_gem
, bo_gem
);
1304 if (!bo_gem
->mem_virtual
) {
1305 struct drm_i915_gem_mmap mmap_arg
;
1307 DBG("bo_map: %d (%s), map_count=%d\n",
1308 bo_gem
->gem_handle
, bo_gem
->name
, bo_gem
->map_count
);
1311 mmap_arg
.handle
= bo_gem
->gem_handle
;
1312 mmap_arg
.size
= bo
->size
;
1313 ret
= drmIoctl(bufmgr_gem
->fd
,
1314 DRM_IOCTL_I915_GEM_MMAP
,
1318 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1319 __FILE__
, __LINE__
, bo_gem
->gem_handle
,
1320 bo_gem
->name
, strerror(errno
));
1321 if (--bo_gem
->map_count
== 0)
1322 drm_bacon_gem_bo_close_vma(bufmgr_gem
, bo_gem
);
1323 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1326 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg
.addr_ptr
, mmap_arg
.size
, 0, 1));
1327 bo_gem
->mem_virtual
= (void *)(uintptr_t) mmap_arg
.addr_ptr
;
1329 DBG("bo_map: %d (%s) -> %p\n", bo_gem
->gem_handle
, bo_gem
->name
,
1330 bo_gem
->mem_virtual
);
1331 bo
->virtual = bo_gem
->mem_virtual
;
1333 memclear(set_domain
);
1334 set_domain
.handle
= bo_gem
->gem_handle
;
1335 set_domain
.read_domains
= I915_GEM_DOMAIN_CPU
;
1337 set_domain
.write_domain
= I915_GEM_DOMAIN_CPU
;
1339 set_domain
.write_domain
= 0;
1340 ret
= drmIoctl(bufmgr_gem
->fd
,
1341 DRM_IOCTL_I915_GEM_SET_DOMAIN
,
1344 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1345 __FILE__
, __LINE__
, bo_gem
->gem_handle
,
1350 bo_gem
->mapped_cpu_write
= true;
1352 drm_bacon_gem_bo_mark_mmaps_incoherent(bo
);
1353 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem
->mem_virtual
, bo
->size
));
1354 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1360 map_gtt(drm_bacon_bo
*bo
)
1362 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
1363 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1366 if (bo_gem
->is_userptr
)
1369 if (bo_gem
->map_count
++ == 0)
1370 drm_bacon_gem_bo_open_vma(bufmgr_gem
, bo_gem
);
1372 /* Get a mapping of the buffer if we haven't before. */
1373 if (bo_gem
->gtt_virtual
== NULL
) {
1374 struct drm_i915_gem_mmap_gtt mmap_arg
;
1376 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1377 bo_gem
->gem_handle
, bo_gem
->name
, bo_gem
->map_count
);
1380 mmap_arg
.handle
= bo_gem
->gem_handle
;
1382 /* Get the fake offset back... */
1383 ret
= drmIoctl(bufmgr_gem
->fd
,
1384 DRM_IOCTL_I915_GEM_MMAP_GTT
,
1388 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1390 bo_gem
->gem_handle
, bo_gem
->name
,
1392 if (--bo_gem
->map_count
== 0)
1393 drm_bacon_gem_bo_close_vma(bufmgr_gem
, bo_gem
);
1398 bo_gem
->gtt_virtual
= drm_mmap(0, bo
->size
, PROT_READ
| PROT_WRITE
,
1399 MAP_SHARED
, bufmgr_gem
->fd
,
1401 if (bo_gem
->gtt_virtual
== MAP_FAILED
) {
1402 bo_gem
->gtt_virtual
= NULL
;
1404 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1406 bo_gem
->gem_handle
, bo_gem
->name
,
1408 if (--bo_gem
->map_count
== 0)
1409 drm_bacon_gem_bo_close_vma(bufmgr_gem
, bo_gem
);
1414 bo
->virtual = bo_gem
->gtt_virtual
;
1416 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem
->gem_handle
, bo_gem
->name
,
1417 bo_gem
->gtt_virtual
);
1423 drm_bacon_gem_bo_map_gtt(drm_bacon_bo
*bo
)
1425 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
1426 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1427 struct drm_i915_gem_set_domain set_domain
;
1430 pthread_mutex_lock(&bufmgr_gem
->lock
);
1434 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1438 /* Now move it to the GTT domain so that the GPU and CPU
1439 * caches are flushed and the GPU isn't actively using the
1442 * The pagefault handler does this domain change for us when
1443 * it has unbound the BO from the GTT, but it's up to us to
1444 * tell it when we're about to use things if we had done
1445 * rendering and it still happens to be bound to the GTT.
1447 memclear(set_domain
);
1448 set_domain
.handle
= bo_gem
->gem_handle
;
1449 set_domain
.read_domains
= I915_GEM_DOMAIN_GTT
;
1450 set_domain
.write_domain
= I915_GEM_DOMAIN_GTT
;
1451 ret
= drmIoctl(bufmgr_gem
->fd
,
1452 DRM_IOCTL_I915_GEM_SET_DOMAIN
,
1455 DBG("%s:%d: Error setting domain %d: %s\n",
1456 __FILE__
, __LINE__
, bo_gem
->gem_handle
,
1460 drm_bacon_gem_bo_mark_mmaps_incoherent(bo
);
1461 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem
->gtt_virtual
, bo
->size
));
1462 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1468 * Performs a mapping of the buffer object like the normal GTT
1469 * mapping, but avoids waiting for the GPU to be done reading from or
1470 * rendering to the buffer.
1472 * This is used in the implementation of GL_ARB_map_buffer_range: The
1473 * user asks to create a buffer, then does a mapping, fills some
1474 * space, runs a drawing command, then asks to map it again without
1475 * synchronizing because it guarantees that it won't write over the
1476 * data that the GPU is busy using (or, more specifically, that if it
1477 * does write over the data, it acknowledges that rendering is
1482 drm_bacon_gem_bo_map_unsynchronized(drm_bacon_bo
*bo
)
1484 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
1485 #ifdef HAVE_VALGRIND
1486 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1490 /* If the CPU cache isn't coherent with the GTT, then use a
1491 * regular synchronized mapping. The problem is that we don't
1492 * track where the buffer was last used on the CPU side in
1493 * terms of drm_bacon_bo_map vs drm_bacon_gem_bo_map_gtt, so
1494 * we would potentially corrupt the buffer even when the user
1495 * does reasonable things.
1497 if (!bufmgr_gem
->has_llc
)
1498 return drm_bacon_gem_bo_map_gtt(bo
);
1500 pthread_mutex_lock(&bufmgr_gem
->lock
);
1504 drm_bacon_gem_bo_mark_mmaps_incoherent(bo
);
1505 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem
->gtt_virtual
, bo
->size
));
1508 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1514 drm_bacon_bo_unmap(drm_bacon_bo
*bo
)
1516 drm_bacon_bufmgr_gem
*bufmgr_gem
;
1517 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1523 if (bo_gem
->is_userptr
)
1526 bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
1528 pthread_mutex_lock(&bufmgr_gem
->lock
);
1530 if (bo_gem
->map_count
<= 0) {
1531 DBG("attempted to unmap an unmapped bo\n");
1532 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1533 /* Preserve the old behaviour of just treating this as a
1534 * no-op rather than reporting the error.
1539 if (bo_gem
->mapped_cpu_write
) {
1540 struct drm_i915_gem_sw_finish sw_finish
;
1542 /* Cause a flush to happen if the buffer's pinned for
1543 * scanout, so the results show up in a timely manner.
1544 * Unlike GTT set domains, this only does work if the
1545 * buffer should be scanout-related.
1547 memclear(sw_finish
);
1548 sw_finish
.handle
= bo_gem
->gem_handle
;
1549 ret
= drmIoctl(bufmgr_gem
->fd
,
1550 DRM_IOCTL_I915_GEM_SW_FINISH
,
1552 ret
= ret
== -1 ? -errno
: 0;
1554 bo_gem
->mapped_cpu_write
= false;
1557 /* We need to unmap after every innovation as we cannot track
1558 * an open vma for every bo as that will exhaust the system
1559 * limits and cause later failures.
1561 if (--bo_gem
->map_count
== 0) {
1562 drm_bacon_gem_bo_close_vma(bufmgr_gem
, bo_gem
);
1563 drm_bacon_gem_bo_mark_mmaps_incoherent(bo
);
1566 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1572 drm_bacon_bo_subdata(drm_bacon_bo
*bo
, unsigned long offset
,
1573 unsigned long size
, const void *data
)
1575 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
1576 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1577 struct drm_i915_gem_pwrite pwrite
;
1580 if (bo_gem
->is_userptr
)
1584 pwrite
.handle
= bo_gem
->gem_handle
;
1585 pwrite
.offset
= offset
;
1587 pwrite
.data_ptr
= (uint64_t) (uintptr_t) data
;
1588 ret
= drmIoctl(bufmgr_gem
->fd
,
1589 DRM_IOCTL_I915_GEM_PWRITE
,
1593 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1594 __FILE__
, __LINE__
, bo_gem
->gem_handle
, (int)offset
,
1595 (int)size
, strerror(errno
));
1602 drm_bacon_bo_get_subdata(drm_bacon_bo
*bo
, unsigned long offset
,
1603 unsigned long size
, void *data
)
1605 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
1606 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1607 struct drm_i915_gem_pread pread
;
1610 if (bo_gem
->is_userptr
)
1614 pread
.handle
= bo_gem
->gem_handle
;
1615 pread
.offset
= offset
;
1617 pread
.data_ptr
= (uint64_t) (uintptr_t) data
;
1618 ret
= drmIoctl(bufmgr_gem
->fd
,
1619 DRM_IOCTL_I915_GEM_PREAD
,
1623 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1624 __FILE__
, __LINE__
, bo_gem
->gem_handle
, (int)offset
,
1625 (int)size
, strerror(errno
));
1631 /** Waits for all GPU rendering with the object to have completed. */
1633 drm_bacon_bo_wait_rendering(drm_bacon_bo
*bo
)
1635 drm_bacon_gem_bo_start_gtt_access(bo
, 1);
1639 * Waits on a BO for the given amount of time.
1641 * @bo: buffer object to wait for
1642 * @timeout_ns: amount of time to wait in nanoseconds.
1643 * If value is less than 0, an infinite wait will occur.
1645 * Returns 0 if the wait was successful ie. the last batch referencing the
1646 * object has completed within the allotted time. Otherwise some negative return
1647 * value describes the error. Of particular interest is -ETIME when the wait has
1648 * failed to yield the desired result.
1650 * Similar to drm_bacon_gem_bo_wait_rendering except a timeout parameter allows
1651 * the operation to give up after a certain amount of time. Another subtle
1652 * difference is the internal locking semantics are different (this variant does
1653 * not hold the lock for the duration of the wait). This makes the wait subject
1654 * to a larger userspace race window.
1656 * The implementation shall wait until the object is no longer actively
1657 * referenced within a batch buffer at the time of the call. The wait will
1658 * not guarantee that the buffer is re-issued via another thread, or an flinked
1659 * handle. Userspace must make sure this race does not occur if such precision
1662 * Note that some kernels have broken the inifite wait for negative values
1663 * promise, upgrade to latest stable kernels if this is the case.
1666 drm_bacon_gem_bo_wait(drm_bacon_bo
*bo
, int64_t timeout_ns
)
1668 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
1669 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1670 struct drm_i915_gem_wait wait
;
1673 if (!bufmgr_gem
->has_wait_timeout
) {
1674 DBG("%s:%d: Timed wait is not supported. Falling back to "
1675 "infinite wait\n", __FILE__
, __LINE__
);
1677 drm_bacon_bo_wait_rendering(bo
);
1680 return drm_bacon_bo_busy(bo
) ? -ETIME
: 0;
1685 wait
.bo_handle
= bo_gem
->gem_handle
;
1686 wait
.timeout_ns
= timeout_ns
;
1687 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GEM_WAIT
, &wait
);
1695 * Sets the object to the GTT read and possibly write domain, used by the X
1696 * 2D driver in the absence of kernel support to do drm_bacon_gem_bo_map_gtt().
1698 * In combination with drm_bacon_gem_bo_pin() and manual fence management, we
1699 * can do tiled pixmaps this way.
1702 drm_bacon_gem_bo_start_gtt_access(drm_bacon_bo
*bo
, int write_enable
)
1704 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
1705 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1706 struct drm_i915_gem_set_domain set_domain
;
1709 memclear(set_domain
);
1710 set_domain
.handle
= bo_gem
->gem_handle
;
1711 set_domain
.read_domains
= I915_GEM_DOMAIN_GTT
;
1712 set_domain
.write_domain
= write_enable
? I915_GEM_DOMAIN_GTT
: 0;
1713 ret
= drmIoctl(bufmgr_gem
->fd
,
1714 DRM_IOCTL_I915_GEM_SET_DOMAIN
,
1717 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1718 __FILE__
, __LINE__
, bo_gem
->gem_handle
,
1719 set_domain
.read_domains
, set_domain
.write_domain
,
1725 drm_bacon_bufmgr_gem_destroy(drm_bacon_bufmgr
*bufmgr
)
1727 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bufmgr
;
1728 struct drm_gem_close close_bo
;
1731 free(bufmgr_gem
->exec2_objects
);
1732 free(bufmgr_gem
->exec_bos
);
1734 pthread_mutex_destroy(&bufmgr_gem
->lock
);
1736 /* Free any cached buffer objects we were going to reuse */
1737 for (i
= 0; i
< bufmgr_gem
->num_buckets
; i
++) {
1738 struct drm_bacon_gem_bo_bucket
*bucket
=
1739 &bufmgr_gem
->cache_bucket
[i
];
1740 drm_bacon_bo_gem
*bo_gem
;
1742 while (!list_empty(&bucket
->head
)) {
1743 bo_gem
= LIST_ENTRY(drm_bacon_bo_gem
,
1744 bucket
->head
.next
, head
);
1745 list_del(&bo_gem
->head
);
1747 drm_bacon_gem_bo_free(&bo_gem
->bo
);
1751 /* Release userptr bo kept hanging around for optimisation. */
1752 if (bufmgr_gem
->userptr_active
.ptr
) {
1754 close_bo
.handle
= bufmgr_gem
->userptr_active
.handle
;
1755 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_GEM_CLOSE
, &close_bo
);
1756 free(bufmgr_gem
->userptr_active
.ptr
);
1759 "Failed to release test userptr object! (%d) "
1760 "i915 kernel driver may not be sane!\n", errno
);
1767 * Adds the target buffer to the validation list and adds the relocation
1768 * to the reloc_buffer's relocation list.
1770 * The relocation entry at the given offset must already contain the
1771 * precomputed relocation value, because the kernel will optimize out
1772 * the relocation entry write when the buffer hasn't moved from the
1773 * last known offset in target_bo.
1776 do_bo_emit_reloc(drm_bacon_bo
*bo
, uint32_t offset
,
1777 drm_bacon_bo
*target_bo
, uint32_t target_offset
,
1778 uint32_t read_domains
, uint32_t write_domain
)
1780 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
1781 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1782 drm_bacon_bo_gem
*target_bo_gem
= (drm_bacon_bo_gem
*) target_bo
;
1784 if (bo_gem
->has_error
)
1787 if (target_bo_gem
->has_error
) {
1788 bo_gem
->has_error
= true;
1792 /* Create a new relocation list if needed */
1793 if (bo_gem
->relocs
== NULL
&& drm_bacon_setup_reloc_list(bo
))
1796 /* Check overflow */
1797 assert(bo_gem
->reloc_count
< bufmgr_gem
->max_relocs
);
1800 assert(offset
<= bo
->size
- 4);
1801 assert((write_domain
& (write_domain
- 1)) == 0);
1803 /* Make sure that we're not adding a reloc to something whose size has
1804 * already been accounted for.
1806 assert(!bo_gem
->used_as_reloc_target
);
1807 if (target_bo_gem
!= bo_gem
) {
1808 target_bo_gem
->used_as_reloc_target
= true;
1809 bo_gem
->reloc_tree_size
+= target_bo_gem
->reloc_tree_size
;
1812 bo_gem
->reloc_target_info
[bo_gem
->reloc_count
].bo
= target_bo
;
1813 if (target_bo
!= bo
)
1814 drm_bacon_bo_reference(target_bo
);
1816 bo_gem
->relocs
[bo_gem
->reloc_count
].offset
= offset
;
1817 bo_gem
->relocs
[bo_gem
->reloc_count
].delta
= target_offset
;
1818 bo_gem
->relocs
[bo_gem
->reloc_count
].target_handle
=
1819 target_bo_gem
->gem_handle
;
1820 bo_gem
->relocs
[bo_gem
->reloc_count
].read_domains
= read_domains
;
1821 bo_gem
->relocs
[bo_gem
->reloc_count
].write_domain
= write_domain
;
1822 bo_gem
->relocs
[bo_gem
->reloc_count
].presumed_offset
= target_bo
->offset64
;
1823 bo_gem
->reloc_count
++;
1829 drm_bacon_gem_bo_add_softpin_target(drm_bacon_bo
*bo
, drm_bacon_bo
*target_bo
)
1831 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
1832 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1833 drm_bacon_bo_gem
*target_bo_gem
= (drm_bacon_bo_gem
*) target_bo
;
1835 if (bo_gem
->has_error
)
1838 if (target_bo_gem
->has_error
) {
1839 bo_gem
->has_error
= true;
1843 if (!(target_bo_gem
->kflags
& EXEC_OBJECT_PINNED
))
1845 if (target_bo_gem
== bo_gem
)
1848 if (bo_gem
->softpin_target_count
== bo_gem
->softpin_target_size
) {
1849 int new_size
= bo_gem
->softpin_target_size
* 2;
1851 new_size
= bufmgr_gem
->max_relocs
;
1853 bo_gem
->softpin_target
= realloc(bo_gem
->softpin_target
, new_size
*
1854 sizeof(drm_bacon_bo
*));
1855 if (!bo_gem
->softpin_target
)
1858 bo_gem
->softpin_target_size
= new_size
;
1860 bo_gem
->softpin_target
[bo_gem
->softpin_target_count
] = target_bo
;
1861 drm_bacon_bo_reference(target_bo
);
1862 bo_gem
->softpin_target_count
++;
1868 drm_bacon_bo_emit_reloc(drm_bacon_bo
*bo
, uint32_t offset
,
1869 drm_bacon_bo
*target_bo
, uint32_t target_offset
,
1870 uint32_t read_domains
, uint32_t write_domain
)
1872 drm_bacon_bo_gem
*target_bo_gem
= (drm_bacon_bo_gem
*)target_bo
;
1874 if (target_bo_gem
->kflags
& EXEC_OBJECT_PINNED
)
1875 return drm_bacon_gem_bo_add_softpin_target(bo
, target_bo
);
1877 return do_bo_emit_reloc(bo
, offset
, target_bo
, target_offset
,
1878 read_domains
, write_domain
);
1882 drm_bacon_gem_bo_get_reloc_count(drm_bacon_bo
*bo
)
1884 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1886 return bo_gem
->reloc_count
;
1890 * Removes existing relocation entries in the BO after "start".
1892 * This allows a user to avoid a two-step process for state setup with
1893 * counting up all the buffer objects and doing a
1894 * drm_bacon_bufmgr_check_aperture_space() before emitting any of the
1895 * relocations for the state setup. Instead, save the state of the
1896 * batchbuffer including drm_bacon_gem_get_reloc_count(), emit all the
1897 * state, and then check if it still fits in the aperture.
1899 * Any further drm_bacon_bufmgr_check_aperture_space() queries
1900 * involving this buffer in the tree are undefined after this call.
1902 * This also removes all softpinned targets being referenced by the BO.
1905 drm_bacon_gem_bo_clear_relocs(drm_bacon_bo
*bo
, int start
)
1907 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
1908 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1910 struct timespec time
;
1912 clock_gettime(CLOCK_MONOTONIC
, &time
);
1914 assert(bo_gem
->reloc_count
>= start
);
1916 /* Unreference the cleared target buffers */
1917 pthread_mutex_lock(&bufmgr_gem
->lock
);
1919 for (i
= start
; i
< bo_gem
->reloc_count
; i
++) {
1920 drm_bacon_bo_gem
*target_bo_gem
= (drm_bacon_bo_gem
*) bo_gem
->reloc_target_info
[i
].bo
;
1921 if (&target_bo_gem
->bo
!= bo
) {
1922 drm_bacon_gem_bo_unreference_locked_timed(&target_bo_gem
->bo
,
1926 bo_gem
->reloc_count
= start
;
1928 for (i
= 0; i
< bo_gem
->softpin_target_count
; i
++) {
1929 drm_bacon_bo_gem
*target_bo_gem
= (drm_bacon_bo_gem
*) bo_gem
->softpin_target
[i
];
1930 drm_bacon_gem_bo_unreference_locked_timed(&target_bo_gem
->bo
, time
.tv_sec
);
1932 bo_gem
->softpin_target_count
= 0;
1934 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1939 drm_bacon_gem_bo_process_reloc2(drm_bacon_bo
*bo
)
1941 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*)bo
;
1944 if (bo_gem
->relocs
== NULL
&& bo_gem
->softpin_target
== NULL
)
1947 for (i
= 0; i
< bo_gem
->reloc_count
; i
++) {
1948 drm_bacon_bo
*target_bo
= bo_gem
->reloc_target_info
[i
].bo
;
1950 if (target_bo
== bo
)
1953 drm_bacon_gem_bo_mark_mmaps_incoherent(bo
);
1955 /* Continue walking the tree depth-first. */
1956 drm_bacon_gem_bo_process_reloc2(target_bo
);
1958 /* Add the target to the validate list */
1959 drm_bacon_add_validate_buffer2(target_bo
);
1962 for (i
= 0; i
< bo_gem
->softpin_target_count
; i
++) {
1963 drm_bacon_bo
*target_bo
= bo_gem
->softpin_target
[i
];
1965 if (target_bo
== bo
)
1968 drm_bacon_gem_bo_mark_mmaps_incoherent(bo
);
1969 drm_bacon_gem_bo_process_reloc2(target_bo
);
1970 drm_bacon_add_validate_buffer2(target_bo
);
1975 drm_bacon_update_buffer_offsets2 (drm_bacon_bufmgr_gem
*bufmgr_gem
)
1979 for (i
= 0; i
< bufmgr_gem
->exec_count
; i
++) {
1980 drm_bacon_bo
*bo
= bufmgr_gem
->exec_bos
[i
];
1981 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*)bo
;
1983 /* Update the buffer offset */
1984 if (bufmgr_gem
->exec2_objects
[i
].offset
!= bo
->offset64
) {
1985 /* If we're seeing softpinned object here it means that the kernel
1986 * has relocated our object... Indicating a programming error
1988 assert(!(bo_gem
->kflags
& EXEC_OBJECT_PINNED
));
1989 DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
1990 bo_gem
->gem_handle
, bo_gem
->name
,
1991 upper_32_bits(bo
->offset64
),
1992 lower_32_bits(bo
->offset64
),
1993 upper_32_bits(bufmgr_gem
->exec2_objects
[i
].offset
),
1994 lower_32_bits(bufmgr_gem
->exec2_objects
[i
].offset
));
1995 bo
->offset64
= bufmgr_gem
->exec2_objects
[i
].offset
;
1996 bo
->offset
= bufmgr_gem
->exec2_objects
[i
].offset
;
2002 do_exec2(drm_bacon_bo
*bo
, int used
, drm_bacon_context
*ctx
,
2003 int in_fence
, int *out_fence
,
2006 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*)bo
->bufmgr
;
2007 struct drm_i915_gem_execbuffer2 execbuf
;
2011 if (to_bo_gem(bo
)->has_error
)
2014 switch (flags
& 0x7) {
2018 if (!bufmgr_gem
->has_blt
)
2022 if (!bufmgr_gem
->has_bsd
)
2025 case I915_EXEC_VEBOX
:
2026 if (!bufmgr_gem
->has_vebox
)
2029 case I915_EXEC_RENDER
:
2030 case I915_EXEC_DEFAULT
:
2034 pthread_mutex_lock(&bufmgr_gem
->lock
);
2035 /* Update indices and set up the validate list. */
2036 drm_bacon_gem_bo_process_reloc2(bo
);
2038 /* Add the batch buffer to the validation list. There are no relocations
2041 drm_bacon_add_validate_buffer2(bo
);
2044 execbuf
.buffers_ptr
= (uintptr_t)bufmgr_gem
->exec2_objects
;
2045 execbuf
.buffer_count
= bufmgr_gem
->exec_count
;
2046 execbuf
.batch_start_offset
= 0;
2047 execbuf
.batch_len
= used
;
2048 execbuf
.cliprects_ptr
= 0;
2049 execbuf
.num_cliprects
= 0;
2052 execbuf
.flags
= flags
;
2054 i915_execbuffer2_set_context_id(execbuf
, 0);
2056 i915_execbuffer2_set_context_id(execbuf
, ctx
->ctx_id
);
2058 if (in_fence
!= -1) {
2059 execbuf
.rsvd2
= in_fence
;
2060 execbuf
.flags
|= I915_EXEC_FENCE_IN
;
2062 if (out_fence
!= NULL
) {
2064 execbuf
.flags
|= I915_EXEC_FENCE_OUT
;
2067 if (bufmgr_gem
->no_exec
)
2068 goto skip_execution
;
2070 ret
= drmIoctl(bufmgr_gem
->fd
,
2071 DRM_IOCTL_I915_GEM_EXECBUFFER2_WR
,
2075 if (ret
== -ENOSPC
) {
2076 DBG("Execbuffer fails to pin. "
2077 "Estimate: %u. Actual: %u. Available: %u\n",
2078 drm_bacon_gem_estimate_batch_space(bufmgr_gem
->exec_bos
,
2079 bufmgr_gem
->exec_count
),
2080 drm_bacon_gem_compute_batch_space(bufmgr_gem
->exec_bos
,
2081 bufmgr_gem
->exec_count
),
2082 (unsigned int) bufmgr_gem
->gtt_size
);
2085 drm_bacon_update_buffer_offsets2(bufmgr_gem
);
2087 if (ret
== 0 && out_fence
!= NULL
)
2088 *out_fence
= execbuf
.rsvd2
>> 32;
2091 if (INTEL_DEBUG
& DEBUG_BUFMGR
)
2092 drm_bacon_gem_dump_validation_list(bufmgr_gem
);
2094 for (i
= 0; i
< bufmgr_gem
->exec_count
; i
++) {
2095 drm_bacon_bo_gem
*bo_gem
= to_bo_gem(bufmgr_gem
->exec_bos
[i
]);
2097 bo_gem
->idle
= false;
2099 /* Disconnect the buffer from the validate list */
2100 bo_gem
->validate_index
= -1;
2101 bufmgr_gem
->exec_bos
[i
] = NULL
;
2103 bufmgr_gem
->exec_count
= 0;
2104 pthread_mutex_unlock(&bufmgr_gem
->lock
);
2110 drm_bacon_bo_exec(drm_bacon_bo
*bo
, int used
)
2112 return do_exec2(bo
, used
, NULL
, -1, NULL
, I915_EXEC_RENDER
);
2116 drm_bacon_bo_mrb_exec(drm_bacon_bo
*bo
, int used
, unsigned int flags
)
2118 return do_exec2(bo
, used
, NULL
, -1, NULL
, flags
);
2122 drm_bacon_gem_bo_context_exec(drm_bacon_bo
*bo
, drm_bacon_context
*ctx
,
2123 int used
, unsigned int flags
)
2125 return do_exec2(bo
, used
, ctx
, -1, NULL
, flags
);
2129 drm_bacon_gem_bo_fence_exec(drm_bacon_bo
*bo
,
2130 drm_bacon_context
*ctx
,
2136 return do_exec2(bo
, used
, ctx
, in_fence
, out_fence
, flags
);
2140 drm_bacon_gem_bo_set_tiling_internal(drm_bacon_bo
*bo
,
2141 uint32_t tiling_mode
,
2144 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
2145 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2146 struct drm_i915_gem_set_tiling set_tiling
;
2149 if (bo_gem
->global_name
== 0 &&
2150 tiling_mode
== bo_gem
->tiling_mode
&&
2151 stride
== bo_gem
->stride
)
2154 memset(&set_tiling
, 0, sizeof(set_tiling
));
2156 /* set_tiling is slightly broken and overwrites the
2157 * input on the error path, so we have to open code
2160 set_tiling
.handle
= bo_gem
->gem_handle
;
2161 set_tiling
.tiling_mode
= tiling_mode
;
2162 set_tiling
.stride
= stride
;
2164 ret
= ioctl(bufmgr_gem
->fd
,
2165 DRM_IOCTL_I915_GEM_SET_TILING
,
2167 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
2171 bo_gem
->tiling_mode
= set_tiling
.tiling_mode
;
2172 bo_gem
->swizzle_mode
= set_tiling
.swizzle_mode
;
2173 bo_gem
->stride
= set_tiling
.stride
;
2178 drm_bacon_bo_set_tiling(drm_bacon_bo
*bo
, uint32_t * tiling_mode
,
2181 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
2182 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2185 /* Tiling with userptr surfaces is not supported
2186 * on all hardware so refuse it for time being.
2188 if (bo_gem
->is_userptr
)
2191 /* Linear buffers have no stride. By ensuring that we only ever use
2192 * stride 0 with linear buffers, we simplify our code.
2194 if (*tiling_mode
== I915_TILING_NONE
)
2197 ret
= drm_bacon_gem_bo_set_tiling_internal(bo
, *tiling_mode
, stride
);
2199 drm_bacon_bo_gem_set_in_aperture_size(bufmgr_gem
, bo_gem
, 0);
2201 *tiling_mode
= bo_gem
->tiling_mode
;
2206 drm_bacon_bo_get_tiling(drm_bacon_bo
*bo
, uint32_t * tiling_mode
,
2207 uint32_t *swizzle_mode
)
2209 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2211 *tiling_mode
= bo_gem
->tiling_mode
;
2212 *swizzle_mode
= bo_gem
->swizzle_mode
;
2217 drm_bacon_bo_set_softpin_offset(drm_bacon_bo
*bo
, uint64_t offset
)
2219 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2221 bo
->offset64
= offset
;
2222 bo
->offset
= offset
;
2223 bo_gem
->kflags
|= EXEC_OBJECT_PINNED
;
2229 drm_bacon_bo_gem_create_from_prime(drm_bacon_bufmgr
*bufmgr
, int prime_fd
, int size
)
2231 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bufmgr
;
2234 drm_bacon_bo_gem
*bo_gem
;
2235 struct drm_i915_gem_get_tiling get_tiling
;
2237 pthread_mutex_lock(&bufmgr_gem
->lock
);
2238 ret
= drmPrimeFDToHandle(bufmgr_gem
->fd
, prime_fd
, &handle
);
2240 DBG("create_from_prime: failed to obtain handle from fd: %s\n", strerror(errno
));
2241 pthread_mutex_unlock(&bufmgr_gem
->lock
);
2246 * See if the kernel has already returned this buffer to us. Just as
2247 * for named buffers, we must not create two bo's pointing at the same
2250 HASH_FIND(handle_hh
, bufmgr_gem
->handle_table
,
2251 &handle
, sizeof(handle
), bo_gem
);
2253 drm_bacon_bo_reference(&bo_gem
->bo
);
2257 bo_gem
= calloc(1, sizeof(*bo_gem
));
2261 p_atomic_set(&bo_gem
->refcount
, 1);
2262 list_inithead(&bo_gem
->vma_list
);
2264 /* Determine size of bo. The fd-to-handle ioctl really should
2265 * return the size, but it doesn't. If we have kernel 3.12 or
2266 * later, we can lseek on the prime fd to get the size. Older
2267 * kernels will just fail, in which case we fall back to the
2268 * provided (estimated or guess size). */
2269 ret
= lseek(prime_fd
, 0, SEEK_END
);
2271 bo_gem
->bo
.size
= ret
;
2273 bo_gem
->bo
.size
= size
;
2275 bo_gem
->bo
.handle
= handle
;
2276 bo_gem
->bo
.bufmgr
= bufmgr
;
2278 bo_gem
->gem_handle
= handle
;
2279 HASH_ADD(handle_hh
, bufmgr_gem
->handle_table
,
2280 gem_handle
, sizeof(bo_gem
->gem_handle
), bo_gem
);
2282 bo_gem
->name
= "prime";
2283 bo_gem
->validate_index
= -1;
2284 bo_gem
->used_as_reloc_target
= false;
2285 bo_gem
->has_error
= false;
2286 bo_gem
->reusable
= false;
2288 memclear(get_tiling
);
2289 get_tiling
.handle
= bo_gem
->gem_handle
;
2290 if (drmIoctl(bufmgr_gem
->fd
,
2291 DRM_IOCTL_I915_GEM_GET_TILING
,
2295 bo_gem
->tiling_mode
= get_tiling
.tiling_mode
;
2296 bo_gem
->swizzle_mode
= get_tiling
.swizzle_mode
;
2297 /* XXX stride is unknown */
2298 drm_bacon_bo_gem_set_in_aperture_size(bufmgr_gem
, bo_gem
, 0);
2301 pthread_mutex_unlock(&bufmgr_gem
->lock
);
2305 drm_bacon_gem_bo_free(&bo_gem
->bo
);
2306 pthread_mutex_unlock(&bufmgr_gem
->lock
);
2311 drm_bacon_bo_gem_export_to_prime(drm_bacon_bo
*bo
, int *prime_fd
)
2313 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
2314 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2316 if (drmPrimeHandleToFD(bufmgr_gem
->fd
, bo_gem
->gem_handle
,
2317 DRM_CLOEXEC
, prime_fd
) != 0)
2320 bo_gem
->reusable
= false;
2326 drm_bacon_bo_flink(drm_bacon_bo
*bo
, uint32_t *name
)
2328 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
2329 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2331 if (!bo_gem
->global_name
) {
2332 struct drm_gem_flink flink
;
2335 flink
.handle
= bo_gem
->gem_handle
;
2336 if (drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_GEM_FLINK
, &flink
))
2339 pthread_mutex_lock(&bufmgr_gem
->lock
);
2340 if (!bo_gem
->global_name
) {
2341 bo_gem
->global_name
= flink
.name
;
2342 bo_gem
->reusable
= false;
2344 HASH_ADD(name_hh
, bufmgr_gem
->name_table
,
2345 global_name
, sizeof(bo_gem
->global_name
),
2348 pthread_mutex_unlock(&bufmgr_gem
->lock
);
2351 *name
= bo_gem
->global_name
;
2356 * Enables unlimited caching of buffer objects for reuse.
2358 * This is potentially very memory expensive, as the cache at each bucket
2359 * size is only bounded by how many buffers of that size we've managed to have
2360 * in flight at once.
2363 drm_bacon_bufmgr_gem_enable_reuse(drm_bacon_bufmgr
*bufmgr
)
2365 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bufmgr
;
2367 bufmgr_gem
->bo_reuse
= true;
2371 * Disables implicit synchronisation before executing the bo
2373 * This will cause rendering corruption unless you correctly manage explicit
2374 * fences for all rendering involving this buffer - including use by others.
2375 * Disabling the implicit serialisation is only required if that serialisation
2376 * is too coarse (for example, you have split the buffer into many
2377 * non-overlapping regions and are sharing the whole buffer between concurrent
2378 * independent command streams).
2380 * Note the kernel must advertise support via I915_PARAM_HAS_EXEC_ASYNC,
2381 * which can be checked using drm_bacon_bufmgr_can_disable_implicit_sync,
2382 * or subsequent execbufs involving the bo will generate EINVAL.
2385 drm_bacon_gem_bo_disable_implicit_sync(drm_bacon_bo
*bo
)
2387 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2389 bo_gem
->kflags
|= EXEC_OBJECT_ASYNC
;
2393 * Enables implicit synchronisation before executing the bo
2395 * This is the default behaviour of the kernel, to wait upon prior writes
2396 * completing on the object before rendering with it, or to wait for prior
2397 * reads to complete before writing into the object.
2398 * drm_bacon_gem_bo_disable_implicit_sync() can stop this behaviour, telling
2399 * the kernel never to insert a stall before using the object. Then this
2400 * function can be used to restore the implicit sync before subsequent
2404 drm_bacon_gem_bo_enable_implicit_sync(drm_bacon_bo
*bo
)
2406 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2408 bo_gem
->kflags
&= ~EXEC_OBJECT_ASYNC
;
2412 * Query whether the kernel supports disabling of its implicit synchronisation
2413 * before execbuf. See drm_bacon_gem_bo_disable_implicit_sync()
2416 drm_bacon_bufmgr_gem_can_disable_implicit_sync(drm_bacon_bufmgr
*bufmgr
)
2418 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bufmgr
;
2420 return bufmgr_gem
->has_exec_async
;
2424 * Return the additional aperture space required by the tree of buffer objects
2428 drm_bacon_gem_bo_get_aperture_space(drm_bacon_bo
*bo
)
2430 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2434 if (bo
== NULL
|| bo_gem
->included_in_check_aperture
)
2438 bo_gem
->included_in_check_aperture
= true;
2440 for (i
= 0; i
< bo_gem
->reloc_count
; i
++)
2442 drm_bacon_gem_bo_get_aperture_space(bo_gem
->
2443 reloc_target_info
[i
].bo
);
2449 * Clear the flag set by drm_bacon_gem_bo_get_aperture_space() so we're ready
2450 * for the next drm_bacon_bufmgr_check_aperture_space() call.
2453 drm_bacon_gem_bo_clear_aperture_space_flag(drm_bacon_bo
*bo
)
2455 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2458 if (bo
== NULL
|| !bo_gem
->included_in_check_aperture
)
2461 bo_gem
->included_in_check_aperture
= false;
2463 for (i
= 0; i
< bo_gem
->reloc_count
; i
++)
2464 drm_bacon_gem_bo_clear_aperture_space_flag(bo_gem
->
2465 reloc_target_info
[i
].bo
);
2469 * Return a conservative estimate for the amount of aperture required
2470 * for a collection of buffers. This may double-count some buffers.
2473 drm_bacon_gem_estimate_batch_space(drm_bacon_bo
**bo_array
, int count
)
2476 unsigned int total
= 0;
2478 for (i
= 0; i
< count
; i
++) {
2479 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo_array
[i
];
2481 total
+= bo_gem
->reloc_tree_size
;
2487 * Return the amount of aperture needed for a collection of buffers.
2488 * This avoids double counting any buffers, at the cost of looking
2489 * at every buffer in the set.
2492 drm_bacon_gem_compute_batch_space(drm_bacon_bo
**bo_array
, int count
)
2495 unsigned int total
= 0;
2497 for (i
= 0; i
< count
; i
++) {
2498 total
+= drm_bacon_gem_bo_get_aperture_space(bo_array
[i
]);
2499 /* For the first buffer object in the array, we get an
2500 * accurate count back for its reloc_tree size (since nothing
2501 * had been flagged as being counted yet). We can save that
2502 * value out as a more conservative reloc_tree_size that
2503 * avoids double-counting target buffers. Since the first
2504 * buffer happens to usually be the batch buffer in our
2505 * callers, this can pull us back from doing the tree
2506 * walk on every new batch emit.
2509 drm_bacon_bo_gem
*bo_gem
=
2510 (drm_bacon_bo_gem
*) bo_array
[i
];
2511 bo_gem
->reloc_tree_size
= total
;
2515 for (i
= 0; i
< count
; i
++)
2516 drm_bacon_gem_bo_clear_aperture_space_flag(bo_array
[i
]);
2521 * Return -1 if the batchbuffer should be flushed before attempting to
2522 * emit rendering referencing the buffers pointed to by bo_array.
2524 * This is required because if we try to emit a batchbuffer with relocations
2525 * to a tree of buffers that won't simultaneously fit in the aperture,
2526 * the rendering will return an error at a point where the software is not
2527 * prepared to recover from it.
2529 * However, we also want to emit the batchbuffer significantly before we reach
2530 * the limit, as a series of batchbuffers each of which references buffers
2531 * covering almost all of the aperture means that at each emit we end up
2532 * waiting to evict a buffer from the last rendering, and we get synchronous
2533 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
2534 * get better parallelism.
2537 drm_bacon_bufmgr_check_aperture_space(drm_bacon_bo
**bo_array
, int count
)
2539 drm_bacon_bufmgr_gem
*bufmgr_gem
=
2540 (drm_bacon_bufmgr_gem
*) bo_array
[0]->bufmgr
;
2541 unsigned int total
= 0;
2542 unsigned int threshold
= bufmgr_gem
->gtt_size
* 3 / 4;
2544 total
= drm_bacon_gem_estimate_batch_space(bo_array
, count
);
2546 if (total
> threshold
)
2547 total
= drm_bacon_gem_compute_batch_space(bo_array
, count
);
2549 if (total
> threshold
) {
2550 DBG("check_space: overflowed available aperture, "
2552 total
/ 1024, (int)bufmgr_gem
->gtt_size
/ 1024);
2555 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total
/ 1024,
2556 (int)bufmgr_gem
->gtt_size
/ 1024);
2562 * Disable buffer reuse for objects which are shared with the kernel
2563 * as scanout buffers
2566 drm_bacon_bo_disable_reuse(drm_bacon_bo
*bo
)
2568 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2570 bo_gem
->reusable
= false;
2575 drm_bacon_bo_is_reusable(drm_bacon_bo
*bo
)
2577 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2579 return bo_gem
->reusable
;
2583 _drm_bacon_gem_bo_references(drm_bacon_bo
*bo
, drm_bacon_bo
*target_bo
)
2585 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2588 for (i
= 0; i
< bo_gem
->reloc_count
; i
++) {
2589 if (bo_gem
->reloc_target_info
[i
].bo
== target_bo
)
2591 if (bo
== bo_gem
->reloc_target_info
[i
].bo
)
2593 if (_drm_bacon_gem_bo_references(bo_gem
->reloc_target_info
[i
].bo
,
2598 for (i
= 0; i
< bo_gem
->softpin_target_count
; i
++) {
2599 if (bo_gem
->softpin_target
[i
] == target_bo
)
2601 if (_drm_bacon_gem_bo_references(bo_gem
->softpin_target
[i
], target_bo
))
2608 /** Return true if target_bo is referenced by bo's relocation tree. */
2610 drm_bacon_bo_references(drm_bacon_bo
*bo
, drm_bacon_bo
*target_bo
)
2612 drm_bacon_bo_gem
*target_bo_gem
= (drm_bacon_bo_gem
*) target_bo
;
2614 if (bo
== NULL
|| target_bo
== NULL
)
2616 if (target_bo_gem
->used_as_reloc_target
)
2617 return _drm_bacon_gem_bo_references(bo
, target_bo
);
2622 add_bucket(drm_bacon_bufmgr_gem
*bufmgr_gem
, int size
)
2624 unsigned int i
= bufmgr_gem
->num_buckets
;
2626 assert(i
< ARRAY_SIZE(bufmgr_gem
->cache_bucket
));
2628 list_inithead(&bufmgr_gem
->cache_bucket
[i
].head
);
2629 bufmgr_gem
->cache_bucket
[i
].size
= size
;
2630 bufmgr_gem
->num_buckets
++;
2634 init_cache_buckets(drm_bacon_bufmgr_gem
*bufmgr_gem
)
2636 unsigned long size
, cache_max_size
= 64 * 1024 * 1024;
2638 /* OK, so power of two buckets was too wasteful of memory.
2639 * Give 3 other sizes between each power of two, to hopefully
2640 * cover things accurately enough. (The alternative is
2641 * probably to just go for exact matching of sizes, and assume
2642 * that for things like composited window resize the tiled
2643 * width/height alignment and rounding of sizes to pages will
2644 * get us useful cache hit rates anyway)
2646 add_bucket(bufmgr_gem
, 4096);
2647 add_bucket(bufmgr_gem
, 4096 * 2);
2648 add_bucket(bufmgr_gem
, 4096 * 3);
2650 /* Initialize the linked lists for BO reuse cache. */
2651 for (size
= 4 * 4096; size
<= cache_max_size
; size
*= 2) {
2652 add_bucket(bufmgr_gem
, size
);
2654 add_bucket(bufmgr_gem
, size
+ size
* 1 / 4);
2655 add_bucket(bufmgr_gem
, size
+ size
* 2 / 4);
2656 add_bucket(bufmgr_gem
, size
+ size
* 3 / 4);
2661 drm_bacon_bufmgr_gem_set_vma_cache_size(drm_bacon_bufmgr
*bufmgr
, int limit
)
2663 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*)bufmgr
;
2665 bufmgr_gem
->vma_max
= limit
;
2667 drm_bacon_gem_bo_purge_vma_cache(bufmgr_gem
);
2671 parse_devid_override(const char *devid_override
)
2673 static const struct {
2677 { "brw", PCI_CHIP_I965_GM
},
2678 { "g4x", PCI_CHIP_GM45_GM
},
2679 { "ilk", PCI_CHIP_ILD_G
},
2680 { "snb", PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS
},
2681 { "ivb", PCI_CHIP_IVYBRIDGE_S_GT2
},
2682 { "hsw", PCI_CHIP_HASWELL_CRW_E_GT3
},
2683 { "byt", PCI_CHIP_VALLEYVIEW_3
},
2684 { "bdw", 0x1620 | BDW_ULX
},
2685 { "skl", PCI_CHIP_SKYLAKE_DT_GT2
},
2686 { "kbl", PCI_CHIP_KABYLAKE_DT_GT2
},
2690 for (i
= 0; i
< ARRAY_SIZE(name_map
); i
++) {
2691 if (!strcmp(name_map
[i
].name
, devid_override
))
2692 return name_map
[i
].pci_id
;
2695 return strtod(devid_override
, NULL
);
2699 * Get the PCI ID for the device. This can be overridden by setting the
2700 * INTEL_DEVID_OVERRIDE environment variable to the desired ID.
2703 get_pci_device_id(drm_bacon_bufmgr_gem
*bufmgr_gem
)
2705 char *devid_override
;
2708 drm_i915_getparam_t gp
;
2710 if (geteuid() == getuid()) {
2711 devid_override
= getenv("INTEL_DEVID_OVERRIDE");
2712 if (devid_override
) {
2713 bufmgr_gem
->no_exec
= true;
2714 return parse_devid_override(devid_override
);
2719 gp
.param
= I915_PARAM_CHIPSET_ID
;
2721 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
2723 fprintf(stderr
, "get chip id failed: %d [%d]\n", ret
, errno
);
2724 fprintf(stderr
, "param: %d, val: %d\n", gp
.param
, *gp
.value
);
2730 drm_bacon_bufmgr_gem_get_devid(drm_bacon_bufmgr
*bufmgr
)
2732 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*)bufmgr
;
2734 return bufmgr_gem
->pci_device
;
2738 drm_bacon_gem_context_create(drm_bacon_bufmgr
*bufmgr
)
2740 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*)bufmgr
;
2741 struct drm_i915_gem_context_create create
;
2742 drm_bacon_context
*context
= NULL
;
2745 context
= calloc(1, sizeof(*context
));
2750 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_CREATE
, &create
);
2752 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
2758 context
->ctx_id
= create
.ctx_id
;
2759 context
->bufmgr
= bufmgr
;
2765 drm_bacon_gem_context_get_id(drm_bacon_context
*ctx
, uint32_t *ctx_id
)
2770 *ctx_id
= ctx
->ctx_id
;
2776 drm_bacon_gem_context_destroy(drm_bacon_context
*ctx
)
2778 drm_bacon_bufmgr_gem
*bufmgr_gem
;
2779 struct drm_i915_gem_context_destroy destroy
;
2787 bufmgr_gem
= (drm_bacon_bufmgr_gem
*)ctx
->bufmgr
;
2788 destroy
.ctx_id
= ctx
->ctx_id
;
2789 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY
,
2792 fprintf(stderr
, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
2799 drm_bacon_get_reset_stats(drm_bacon_context
*ctx
,
2800 uint32_t *reset_count
,
2804 drm_bacon_bufmgr_gem
*bufmgr_gem
;
2805 struct drm_i915_reset_stats stats
;
2813 bufmgr_gem
= (drm_bacon_bufmgr_gem
*)ctx
->bufmgr
;
2814 stats
.ctx_id
= ctx
->ctx_id
;
2815 ret
= drmIoctl(bufmgr_gem
->fd
,
2816 DRM_IOCTL_I915_GET_RESET_STATS
,
2819 if (reset_count
!= NULL
)
2820 *reset_count
= stats
.reset_count
;
2823 *active
= stats
.batch_active
;
2825 if (pending
!= NULL
)
2826 *pending
= stats
.batch_pending
;
2833 drm_bacon_reg_read(drm_bacon_bufmgr
*bufmgr
,
2837 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*)bufmgr
;
2838 struct drm_i915_reg_read reg_read
;
2842 reg_read
.offset
= offset
;
2844 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_REG_READ
, ®_read
);
2846 *result
= reg_read
.val
;
2850 static pthread_mutex_t bufmgr_list_mutex
= PTHREAD_MUTEX_INITIALIZER
;
2851 static struct list_head bufmgr_list
= { &bufmgr_list
, &bufmgr_list
};
2853 static drm_bacon_bufmgr_gem
*
2854 drm_bacon_bufmgr_gem_find(int fd
)
2856 list_for_each_entry(drm_bacon_bufmgr_gem
,
2857 bufmgr_gem
, &bufmgr_list
, managers
) {
2858 if (bufmgr_gem
->fd
== fd
) {
2859 p_atomic_inc(&bufmgr_gem
->refcount
);
2868 drm_bacon_bufmgr_destroy(drm_bacon_bufmgr
*bufmgr
)
2870 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*)bufmgr
;
2872 if (atomic_add_unless(&bufmgr_gem
->refcount
, -1, 1)) {
2873 pthread_mutex_lock(&bufmgr_list_mutex
);
2875 if (p_atomic_dec_zero(&bufmgr_gem
->refcount
)) {
2876 list_del(&bufmgr_gem
->managers
);
2877 drm_bacon_bufmgr_gem_destroy(bufmgr
);
2880 pthread_mutex_unlock(&bufmgr_list_mutex
);
2884 void *drm_bacon_gem_bo_map__gtt(drm_bacon_bo
*bo
)
2886 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
2887 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2889 if (bo_gem
->gtt_virtual
)
2890 return bo_gem
->gtt_virtual
;
2892 if (bo_gem
->is_userptr
)
2895 pthread_mutex_lock(&bufmgr_gem
->lock
);
2896 if (bo_gem
->gtt_virtual
== NULL
) {
2897 struct drm_i915_gem_mmap_gtt mmap_arg
;
2900 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
2901 bo_gem
->gem_handle
, bo_gem
->name
, bo_gem
->map_count
);
2903 if (bo_gem
->map_count
++ == 0)
2904 drm_bacon_gem_bo_open_vma(bufmgr_gem
, bo_gem
);
2907 mmap_arg
.handle
= bo_gem
->gem_handle
;
2909 /* Get the fake offset back... */
2911 if (drmIoctl(bufmgr_gem
->fd
,
2912 DRM_IOCTL_I915_GEM_MMAP_GTT
,
2915 ptr
= drm_mmap(0, bo
->size
, PROT_READ
| PROT_WRITE
,
2916 MAP_SHARED
, bufmgr_gem
->fd
,
2919 if (ptr
== MAP_FAILED
) {
2920 if (--bo_gem
->map_count
== 0)
2921 drm_bacon_gem_bo_close_vma(bufmgr_gem
, bo_gem
);
2925 bo_gem
->gtt_virtual
= ptr
;
2927 pthread_mutex_unlock(&bufmgr_gem
->lock
);
2929 return bo_gem
->gtt_virtual
;
2932 void *drm_bacon_gem_bo_map__cpu(drm_bacon_bo
*bo
)
2934 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
2935 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2937 if (bo_gem
->mem_virtual
)
2938 return bo_gem
->mem_virtual
;
2940 if (bo_gem
->is_userptr
) {
2941 /* Return the same user ptr */
2942 return bo_gem
->user_virtual
;
2945 pthread_mutex_lock(&bufmgr_gem
->lock
);
2946 if (!bo_gem
->mem_virtual
) {
2947 struct drm_i915_gem_mmap mmap_arg
;
2949 if (bo_gem
->map_count
++ == 0)
2950 drm_bacon_gem_bo_open_vma(bufmgr_gem
, bo_gem
);
2952 DBG("bo_map: %d (%s), map_count=%d\n",
2953 bo_gem
->gem_handle
, bo_gem
->name
, bo_gem
->map_count
);
2956 mmap_arg
.handle
= bo_gem
->gem_handle
;
2957 mmap_arg
.size
= bo
->size
;
2958 if (drmIoctl(bufmgr_gem
->fd
,
2959 DRM_IOCTL_I915_GEM_MMAP
,
2961 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
2962 __FILE__
, __LINE__
, bo_gem
->gem_handle
,
2963 bo_gem
->name
, strerror(errno
));
2964 if (--bo_gem
->map_count
== 0)
2965 drm_bacon_gem_bo_close_vma(bufmgr_gem
, bo_gem
);
2967 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg
.addr_ptr
, mmap_arg
.size
, 0, 1));
2968 bo_gem
->mem_virtual
= (void *)(uintptr_t) mmap_arg
.addr_ptr
;
2971 pthread_mutex_unlock(&bufmgr_gem
->lock
);
2973 return bo_gem
->mem_virtual
;
2976 void *drm_bacon_gem_bo_map__wc(drm_bacon_bo
*bo
)
2978 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
2979 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2981 if (bo_gem
->wc_virtual
)
2982 return bo_gem
->wc_virtual
;
2984 if (bo_gem
->is_userptr
)
2987 pthread_mutex_lock(&bufmgr_gem
->lock
);
2988 if (!bo_gem
->wc_virtual
) {
2989 struct drm_i915_gem_mmap mmap_arg
;
2991 if (bo_gem
->map_count
++ == 0)
2992 drm_bacon_gem_bo_open_vma(bufmgr_gem
, bo_gem
);
2994 DBG("bo_map: %d (%s), map_count=%d\n",
2995 bo_gem
->gem_handle
, bo_gem
->name
, bo_gem
->map_count
);
2998 mmap_arg
.handle
= bo_gem
->gem_handle
;
2999 mmap_arg
.size
= bo
->size
;
3000 mmap_arg
.flags
= I915_MMAP_WC
;
3001 if (drmIoctl(bufmgr_gem
->fd
,
3002 DRM_IOCTL_I915_GEM_MMAP
,
3004 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
3005 __FILE__
, __LINE__
, bo_gem
->gem_handle
,
3006 bo_gem
->name
, strerror(errno
));
3007 if (--bo_gem
->map_count
== 0)
3008 drm_bacon_gem_bo_close_vma(bufmgr_gem
, bo_gem
);
3010 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg
.addr_ptr
, mmap_arg
.size
, 0, 1));
3011 bo_gem
->wc_virtual
= (void *)(uintptr_t) mmap_arg
.addr_ptr
;
3014 pthread_mutex_unlock(&bufmgr_gem
->lock
);
3016 return bo_gem
->wc_virtual
;
3020 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
3021 * and manage map buffer objections.
3023 * \param fd File descriptor of the opened DRM device.
3026 drm_bacon_bufmgr_gem_init(int fd
, int batch_size
)
3028 drm_bacon_bufmgr_gem
*bufmgr_gem
;
3029 struct drm_i915_gem_get_aperture aperture
;
3030 drm_i915_getparam_t gp
;
3033 pthread_mutex_lock(&bufmgr_list_mutex
);
3035 bufmgr_gem
= drm_bacon_bufmgr_gem_find(fd
);
3039 bufmgr_gem
= calloc(1, sizeof(*bufmgr_gem
));
3040 if (bufmgr_gem
== NULL
)
3043 bufmgr_gem
->fd
= fd
;
3044 p_atomic_set(&bufmgr_gem
->refcount
, 1);
3046 if (pthread_mutex_init(&bufmgr_gem
->lock
, NULL
) != 0) {
3053 ret
= drmIoctl(bufmgr_gem
->fd
,
3054 DRM_IOCTL_I915_GEM_GET_APERTURE
,
3058 bufmgr_gem
->gtt_size
= aperture
.aper_available_size
;
3060 fprintf(stderr
, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
3062 bufmgr_gem
->gtt_size
= 128 * 1024 * 1024;
3063 fprintf(stderr
, "Assuming %dkB available aperture size.\n"
3064 "May lead to reduced performance or incorrect "
3066 (int)bufmgr_gem
->gtt_size
/ 1024);
3069 bufmgr_gem
->pci_device
= get_pci_device_id(bufmgr_gem
);
3071 if (IS_GEN4(bufmgr_gem
->pci_device
))
3072 bufmgr_gem
->gen
= 4;
3073 else if (IS_GEN5(bufmgr_gem
->pci_device
))
3074 bufmgr_gem
->gen
= 5;
3075 else if (IS_GEN6(bufmgr_gem
->pci_device
))
3076 bufmgr_gem
->gen
= 6;
3077 else if (IS_GEN7(bufmgr_gem
->pci_device
))
3078 bufmgr_gem
->gen
= 7;
3079 else if (IS_GEN8(bufmgr_gem
->pci_device
))
3080 bufmgr_gem
->gen
= 8;
3081 else if (IS_GEN9(bufmgr_gem
->pci_device
))
3082 bufmgr_gem
->gen
= 9;
3092 gp
.param
= I915_PARAM_HAS_BSD
;
3093 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3094 bufmgr_gem
->has_bsd
= ret
== 0;
3096 gp
.param
= I915_PARAM_HAS_BLT
;
3097 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3098 bufmgr_gem
->has_blt
= ret
== 0;
3100 gp
.param
= I915_PARAM_HAS_EXEC_ASYNC
;
3101 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3102 bufmgr_gem
->has_exec_async
= ret
== 0;
3104 gp
.param
= I915_PARAM_HAS_WAIT_TIMEOUT
;
3105 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3106 bufmgr_gem
->has_wait_timeout
= ret
== 0;
3108 gp
.param
= I915_PARAM_HAS_LLC
;
3109 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3111 /* Kernel does not supports HAS_LLC query, fallback to GPU
3112 * generation detection and assume that we have LLC on GEN6/7
3114 bufmgr_gem
->has_llc
= (IS_GEN6(bufmgr_gem
->pci_device
) |
3115 IS_GEN7(bufmgr_gem
->pci_device
));
3117 bufmgr_gem
->has_llc
= *gp
.value
;
3119 gp
.param
= I915_PARAM_HAS_VEBOX
;
3120 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3121 bufmgr_gem
->has_vebox
= (ret
== 0) & (*gp
.value
> 0);
3123 /* Let's go with one relocation per every 2 dwords (but round down a bit
3124 * since a power of two will mean an extra page allocation for the reloc
3127 * Every 4 was too few for the blender benchmark.
3129 bufmgr_gem
->max_relocs
= batch_size
/ sizeof(uint32_t) / 2 - 2;
3131 init_cache_buckets(bufmgr_gem
);
3133 list_inithead(&bufmgr_gem
->vma_cache
);
3134 bufmgr_gem
->vma_max
= -1; /* unlimited by default */
3136 list_add(&bufmgr_gem
->managers
, &bufmgr_list
);
3139 pthread_mutex_unlock(&bufmgr_list_mutex
);
3141 return bufmgr_gem
!= NULL
? &bufmgr_gem
->bufmgr
: NULL
;