1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007-2012 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
42 #include <util/u_atomic.h>
50 #include <sys/ioctl.h>
52 #include <sys/types.h>
57 #define ETIME ETIMEDOUT
59 #include "libdrm_macros.h"
60 #include "main/macros.h"
61 #include "util/macros.h"
62 #include "util/list.h"
63 #include "brw_bufmgr.h"
64 #include "intel_bufmgr_priv.h"
65 #include "intel_chipset.h"
79 #define memclear(s) memset(&s, 0, sizeof(s))
81 #define DBG(...) do { \
82 if (bufmgr_gem->bufmgr.debug) \
83 fprintf(stderr, __VA_ARGS__); \
87 atomic_add_unless(int *v
, int add
, int unless
)
91 while (c
!= unless
&& (old
= p_atomic_cmpxchg(v
, c
, c
+ add
)) != c
)
97 * upper_32_bits - return bits 32-63 of a number
98 * @n: the number we're accessing
100 * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress
101 * the "right shift count >= width of type" warning when that quantity is
104 #define upper_32_bits(n) ((__u32)(((n) >> 16) >> 16))
107 * lower_32_bits - return bits 0-31 of a number
108 * @n: the number we're accessing
110 #define lower_32_bits(n) ((__u32)(n))
112 typedef struct _drm_bacon_bo_gem drm_bacon_bo_gem
;
114 struct drm_bacon_gem_bo_bucket
{
115 struct list_head head
;
119 typedef struct _drm_bacon_bufmgr_gem
{
120 drm_bacon_bufmgr bufmgr
;
128 pthread_mutex_t lock
;
130 struct drm_i915_gem_exec_object2
*exec2_objects
;
131 drm_bacon_bo
**exec_bos
;
135 /** Array of lists of cached gem objects of power-of-two sizes */
136 struct drm_bacon_gem_bo_bucket cache_bucket
[14 * 4];
140 struct list_head managers
;
142 drm_bacon_bo_gem
*name_table
;
143 drm_bacon_bo_gem
*handle_table
;
145 struct list_head vma_cache
;
146 int vma_count
, vma_open
, vma_max
;
151 unsigned int has_bsd
: 1;
152 unsigned int has_blt
: 1;
153 unsigned int has_llc
: 1;
154 unsigned int has_wait_timeout
: 1;
155 unsigned int bo_reuse
: 1;
156 unsigned int no_exec
: 1;
157 unsigned int has_vebox
: 1;
158 unsigned int has_exec_async
: 1;
165 } drm_bacon_bufmgr_gem
;
167 typedef struct _drm_bacon_reloc_target_info
{
169 } drm_bacon_reloc_target
;
171 struct _drm_bacon_bo_gem
{
179 * Kenel-assigned global name for this object
181 * List contains both flink named and prime fd'd objects
183 unsigned int global_name
;
185 UT_hash_handle handle_hh
;
186 UT_hash_handle name_hh
;
189 * Index of the buffer within the validation list while preparing a
190 * batchbuffer execution.
195 * Current tiling mode
197 uint32_t tiling_mode
;
198 uint32_t swizzle_mode
;
199 unsigned long stride
;
201 unsigned long kflags
;
205 /** Array passed to the DRM containing relocation information. */
206 struct drm_i915_gem_relocation_entry
*relocs
;
208 * Array of info structs corresponding to relocs[i].target_handle etc
210 drm_bacon_reloc_target
*reloc_target_info
;
211 /** Number of entries in relocs */
213 /** Array of BOs that are referenced by this buffer and will be softpinned */
214 drm_bacon_bo
**softpin_target
;
215 /** Number softpinned BOs that are referenced by this buffer */
216 int softpin_target_count
;
217 /** Maximum amount of softpinned BOs that are referenced by this buffer */
218 int softpin_target_size
;
220 /** Mapped address for the buffer, saved across map/unmap cycles */
222 /** GTT virtual address for the buffer, saved across map/unmap cycles */
224 /** WC CPU address for the buffer, saved across map/unmap cycles */
227 * Virtual address of the buffer allocated by user, used for userptr
232 struct list_head vma_list
;
235 struct list_head head
;
238 * Boolean of whether this BO and its children have been included in
239 * the current drm_bacon_bufmgr_check_aperture_space() total.
241 bool included_in_check_aperture
;
244 * Boolean of whether this buffer has been used as a relocation
245 * target and had its size accounted for, and thus can't have any
246 * further relocations added to it.
248 bool used_as_reloc_target
;
251 * Boolean of whether we have encountered an error whilst building the relocation tree.
256 * Boolean of whether this buffer can be re-used
261 * Boolean of whether the GPU is definitely not accessing the buffer.
263 * This is only valid when reusable, since non-reusable
264 * buffers are those that have been shared with other
265 * processes, so we don't know their state.
270 * Boolean of whether this buffer was allocated with userptr
275 * Size in bytes of this buffer and its relocation descendents.
277 * Used to avoid costly tree walking in
278 * drm_bacon_bufmgr_check_aperture in the common case.
282 /** Flags that we may need to do the SW_FINISH ioctl on unmap. */
283 bool mapped_cpu_write
;
287 drm_bacon_gem_estimate_batch_space(drm_bacon_bo
** bo_array
, int count
);
290 drm_bacon_gem_compute_batch_space(drm_bacon_bo
** bo_array
, int count
);
293 drm_bacon_gem_bo_get_tiling(drm_bacon_bo
*bo
, uint32_t * tiling_mode
,
294 uint32_t * swizzle_mode
);
297 drm_bacon_gem_bo_set_tiling_internal(drm_bacon_bo
*bo
,
298 uint32_t tiling_mode
,
301 static void drm_bacon_gem_bo_unreference_locked_timed(drm_bacon_bo
*bo
,
304 static void drm_bacon_gem_bo_unreference(drm_bacon_bo
*bo
);
306 static void drm_bacon_gem_bo_free(drm_bacon_bo
*bo
);
308 static inline drm_bacon_bo_gem
*to_bo_gem(drm_bacon_bo
*bo
)
310 return (drm_bacon_bo_gem
*)bo
;
314 drm_bacon_gem_bo_tile_size(drm_bacon_bufmgr_gem
*bufmgr_gem
, unsigned long size
,
315 uint32_t *tiling_mode
)
317 if (*tiling_mode
== I915_TILING_NONE
)
320 /* 965+ just need multiples of page size for tiling */
321 return ALIGN(size
, 4096);
325 * Round a given pitch up to the minimum required for X tiling on a
326 * given chip. We use 512 as the minimum to allow for a later tiling
330 drm_bacon_gem_bo_tile_pitch(drm_bacon_bufmgr_gem
*bufmgr_gem
,
331 unsigned long pitch
, uint32_t *tiling_mode
)
333 unsigned long tile_width
;
335 /* If untiled, then just align it so that we can do rendering
336 * to it with the 3D engine.
338 if (*tiling_mode
== I915_TILING_NONE
)
339 return ALIGN(pitch
, 64);
341 if (*tiling_mode
== I915_TILING_X
)
346 /* 965 is flexible */
347 return ALIGN(pitch
, tile_width
);
350 static struct drm_bacon_gem_bo_bucket
*
351 drm_bacon_gem_bo_bucket_for_size(drm_bacon_bufmgr_gem
*bufmgr_gem
,
356 for (i
= 0; i
< bufmgr_gem
->num_buckets
; i
++) {
357 struct drm_bacon_gem_bo_bucket
*bucket
=
358 &bufmgr_gem
->cache_bucket
[i
];
359 if (bucket
->size
>= size
) {
368 drm_bacon_gem_dump_validation_list(drm_bacon_bufmgr_gem
*bufmgr_gem
)
372 for (i
= 0; i
< bufmgr_gem
->exec_count
; i
++) {
373 drm_bacon_bo
*bo
= bufmgr_gem
->exec_bos
[i
];
374 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
376 if (bo_gem
->relocs
== NULL
&& bo_gem
->softpin_target
== NULL
) {
377 DBG("%2d: %d %s(%s)\n", i
, bo_gem
->gem_handle
,
378 bo_gem
->kflags
& EXEC_OBJECT_PINNED
? "*" : "",
383 for (j
= 0; j
< bo_gem
->reloc_count
; j
++) {
384 drm_bacon_bo
*target_bo
= bo_gem
->reloc_target_info
[j
].bo
;
385 drm_bacon_bo_gem
*target_gem
=
386 (drm_bacon_bo_gem
*) target_bo
;
388 DBG("%2d: %d %s(%s)@0x%08x %08x -> "
389 "%d (%s)@0x%08x %08x + 0x%08x\n",
392 bo_gem
->kflags
& EXEC_OBJECT_PINNED
? "*" : "",
394 upper_32_bits(bo_gem
->relocs
[j
].offset
),
395 lower_32_bits(bo_gem
->relocs
[j
].offset
),
396 target_gem
->gem_handle
,
398 upper_32_bits(target_bo
->offset64
),
399 lower_32_bits(target_bo
->offset64
),
400 bo_gem
->relocs
[j
].delta
);
403 for (j
= 0; j
< bo_gem
->softpin_target_count
; j
++) {
404 drm_bacon_bo
*target_bo
= bo_gem
->softpin_target
[j
];
405 drm_bacon_bo_gem
*target_gem
=
406 (drm_bacon_bo_gem
*) target_bo
;
407 DBG("%2d: %d %s(%s) -> "
408 "%d *(%s)@0x%08x %08x\n",
411 bo_gem
->kflags
& EXEC_OBJECT_PINNED
? "*" : "",
413 target_gem
->gem_handle
,
415 upper_32_bits(target_bo
->offset64
),
416 lower_32_bits(target_bo
->offset64
));
422 drm_bacon_gem_bo_reference(drm_bacon_bo
*bo
)
424 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
426 p_atomic_inc(&bo_gem
->refcount
);
430 drm_bacon_add_validate_buffer2(drm_bacon_bo
*bo
)
432 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*)bo
->bufmgr
;
433 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*)bo
;
436 if (bo_gem
->validate_index
!= -1)
439 /* Extend the array of validation entries as necessary. */
440 if (bufmgr_gem
->exec_count
== bufmgr_gem
->exec_size
) {
441 int new_size
= bufmgr_gem
->exec_size
* 2;
446 bufmgr_gem
->exec2_objects
=
447 realloc(bufmgr_gem
->exec2_objects
,
448 sizeof(*bufmgr_gem
->exec2_objects
) * new_size
);
449 bufmgr_gem
->exec_bos
=
450 realloc(bufmgr_gem
->exec_bos
,
451 sizeof(*bufmgr_gem
->exec_bos
) * new_size
);
452 bufmgr_gem
->exec_size
= new_size
;
455 index
= bufmgr_gem
->exec_count
;
456 bo_gem
->validate_index
= index
;
457 /* Fill in array entry */
458 bufmgr_gem
->exec2_objects
[index
].handle
= bo_gem
->gem_handle
;
459 bufmgr_gem
->exec2_objects
[index
].relocation_count
= bo_gem
->reloc_count
;
460 bufmgr_gem
->exec2_objects
[index
].relocs_ptr
= (uintptr_t)bo_gem
->relocs
;
461 bufmgr_gem
->exec2_objects
[index
].alignment
= bo
->align
;
462 bufmgr_gem
->exec2_objects
[index
].offset
= bo
->offset64
;
463 bufmgr_gem
->exec2_objects
[index
].flags
= bo_gem
->kflags
;
464 bufmgr_gem
->exec2_objects
[index
].rsvd1
= 0;
465 bufmgr_gem
->exec2_objects
[index
].rsvd2
= 0;
466 bufmgr_gem
->exec_bos
[index
] = bo
;
467 bufmgr_gem
->exec_count
++;
471 drm_bacon_bo_gem_set_in_aperture_size(drm_bacon_bufmgr_gem
*bufmgr_gem
,
472 drm_bacon_bo_gem
*bo_gem
,
473 unsigned int alignment
)
477 assert(!bo_gem
->used_as_reloc_target
);
479 /* The older chipsets are far-less flexible in terms of tiling,
480 * and require tiled buffer to be size aligned in the aperture.
481 * This means that in the worst possible case we will need a hole
482 * twice as large as the object in order for it to fit into the
483 * aperture. Optimal packing is for wimps.
485 size
= bo_gem
->bo
.size
;
487 bo_gem
->reloc_tree_size
= size
+ alignment
;
491 drm_bacon_setup_reloc_list(drm_bacon_bo
*bo
)
493 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
494 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
495 unsigned int max_relocs
= bufmgr_gem
->max_relocs
;
497 if (bo
->size
/ 4 < max_relocs
)
498 max_relocs
= bo
->size
/ 4;
500 bo_gem
->relocs
= malloc(max_relocs
*
501 sizeof(struct drm_i915_gem_relocation_entry
));
502 bo_gem
->reloc_target_info
= malloc(max_relocs
*
503 sizeof(drm_bacon_reloc_target
));
504 if (bo_gem
->relocs
== NULL
|| bo_gem
->reloc_target_info
== NULL
) {
505 bo_gem
->has_error
= true;
507 free (bo_gem
->relocs
);
508 bo_gem
->relocs
= NULL
;
510 free (bo_gem
->reloc_target_info
);
511 bo_gem
->reloc_target_info
= NULL
;
520 drm_bacon_gem_bo_busy(drm_bacon_bo
*bo
)
522 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
523 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
524 struct drm_i915_gem_busy busy
;
527 if (bo_gem
->reusable
&& bo_gem
->idle
)
531 busy
.handle
= bo_gem
->gem_handle
;
533 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GEM_BUSY
, &busy
);
535 bo_gem
->idle
= !busy
.busy
;
540 return (ret
== 0 && busy
.busy
);
544 drm_bacon_gem_bo_madvise_internal(drm_bacon_bufmgr_gem
*bufmgr_gem
,
545 drm_bacon_bo_gem
*bo_gem
, int state
)
547 struct drm_i915_gem_madvise madv
;
550 madv
.handle
= bo_gem
->gem_handle
;
553 drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GEM_MADVISE
, &madv
);
555 return madv
.retained
;
559 drm_bacon_gem_bo_madvise(drm_bacon_bo
*bo
, int madv
)
561 return drm_bacon_gem_bo_madvise_internal
562 ((drm_bacon_bufmgr_gem
*) bo
->bufmgr
,
563 (drm_bacon_bo_gem
*) bo
,
567 /* drop the oldest entries that have been purged by the kernel */
569 drm_bacon_gem_bo_cache_purge_bucket(drm_bacon_bufmgr_gem
*bufmgr_gem
,
570 struct drm_bacon_gem_bo_bucket
*bucket
)
572 while (!list_empty(&bucket
->head
)) {
573 drm_bacon_bo_gem
*bo_gem
;
575 bo_gem
= LIST_ENTRY(drm_bacon_bo_gem
,
576 bucket
->head
.next
, head
);
577 if (drm_bacon_gem_bo_madvise_internal
578 (bufmgr_gem
, bo_gem
, I915_MADV_DONTNEED
))
581 list_del(&bo_gem
->head
);
582 drm_bacon_gem_bo_free(&bo_gem
->bo
);
586 static drm_bacon_bo
*
587 drm_bacon_gem_bo_alloc_internal(drm_bacon_bufmgr
*bufmgr
,
591 uint32_t tiling_mode
,
592 unsigned long stride
,
593 unsigned int alignment
)
595 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bufmgr
;
596 drm_bacon_bo_gem
*bo_gem
;
597 unsigned int page_size
= getpagesize();
599 struct drm_bacon_gem_bo_bucket
*bucket
;
600 bool alloc_from_cache
;
601 unsigned long bo_size
;
602 bool for_render
= false;
604 if (flags
& BO_ALLOC_FOR_RENDER
)
607 /* Round the allocated size up to a power of two number of pages. */
608 bucket
= drm_bacon_gem_bo_bucket_for_size(bufmgr_gem
, size
);
610 /* If we don't have caching at this size, don't actually round the
613 if (bucket
== NULL
) {
615 if (bo_size
< page_size
)
618 bo_size
= bucket
->size
;
621 pthread_mutex_lock(&bufmgr_gem
->lock
);
622 /* Get a buffer out of the cache if available */
624 alloc_from_cache
= false;
625 if (bucket
!= NULL
&& !list_empty(&bucket
->head
)) {
627 /* Allocate new render-target BOs from the tail (MRU)
628 * of the list, as it will likely be hot in the GPU
629 * cache and in the aperture for us.
631 bo_gem
= LIST_ENTRY(drm_bacon_bo_gem
,
632 bucket
->head
.prev
, head
);
633 list_del(&bo_gem
->head
);
634 alloc_from_cache
= true;
635 bo_gem
->bo
.align
= alignment
;
637 assert(alignment
== 0);
638 /* For non-render-target BOs (where we're probably
639 * going to map it first thing in order to fill it
640 * with data), check if the last BO in the cache is
641 * unbusy, and only reuse in that case. Otherwise,
642 * allocating a new buffer is probably faster than
643 * waiting for the GPU to finish.
645 bo_gem
= LIST_ENTRY(drm_bacon_bo_gem
,
646 bucket
->head
.next
, head
);
647 if (!drm_bacon_gem_bo_busy(&bo_gem
->bo
)) {
648 alloc_from_cache
= true;
649 list_del(&bo_gem
->head
);
653 if (alloc_from_cache
) {
654 if (!drm_bacon_gem_bo_madvise_internal
655 (bufmgr_gem
, bo_gem
, I915_MADV_WILLNEED
)) {
656 drm_bacon_gem_bo_free(&bo_gem
->bo
);
657 drm_bacon_gem_bo_cache_purge_bucket(bufmgr_gem
,
662 if (drm_bacon_gem_bo_set_tiling_internal(&bo_gem
->bo
,
665 drm_bacon_gem_bo_free(&bo_gem
->bo
);
671 if (!alloc_from_cache
) {
672 struct drm_i915_gem_create create
;
674 bo_gem
= calloc(1, sizeof(*bo_gem
));
678 /* drm_bacon_gem_bo_free calls list_del() for an uninitialized
679 list (vma_list), so better set the list head here */
680 list_inithead(&bo_gem
->vma_list
);
682 bo_gem
->bo
.size
= bo_size
;
685 create
.size
= bo_size
;
687 ret
= drmIoctl(bufmgr_gem
->fd
,
688 DRM_IOCTL_I915_GEM_CREATE
,
695 bo_gem
->gem_handle
= create
.handle
;
696 HASH_ADD(handle_hh
, bufmgr_gem
->handle_table
,
697 gem_handle
, sizeof(bo_gem
->gem_handle
),
700 bo_gem
->bo
.handle
= bo_gem
->gem_handle
;
701 bo_gem
->bo
.bufmgr
= bufmgr
;
702 bo_gem
->bo
.align
= alignment
;
704 bo_gem
->tiling_mode
= I915_TILING_NONE
;
705 bo_gem
->swizzle_mode
= I915_BIT_6_SWIZZLE_NONE
;
708 if (drm_bacon_gem_bo_set_tiling_internal(&bo_gem
->bo
,
715 p_atomic_set(&bo_gem
->refcount
, 1);
716 bo_gem
->validate_index
= -1;
717 bo_gem
->used_as_reloc_target
= false;
718 bo_gem
->has_error
= false;
719 bo_gem
->reusable
= true;
721 drm_bacon_bo_gem_set_in_aperture_size(bufmgr_gem
, bo_gem
, alignment
);
722 pthread_mutex_unlock(&bufmgr_gem
->lock
);
724 DBG("bo_create: buf %d (%s) %ldb\n",
725 bo_gem
->gem_handle
, bo_gem
->name
, size
);
730 drm_bacon_gem_bo_free(&bo_gem
->bo
);
732 pthread_mutex_unlock(&bufmgr_gem
->lock
);
736 static drm_bacon_bo
*
737 drm_bacon_gem_bo_alloc_for_render(drm_bacon_bufmgr
*bufmgr
,
740 unsigned int alignment
)
742 return drm_bacon_gem_bo_alloc_internal(bufmgr
, name
, size
,
748 static drm_bacon_bo
*
749 drm_bacon_gem_bo_alloc(drm_bacon_bufmgr
*bufmgr
,
752 unsigned int alignment
)
754 return drm_bacon_gem_bo_alloc_internal(bufmgr
, name
, size
, 0,
755 I915_TILING_NONE
, 0, 0);
758 static drm_bacon_bo
*
759 drm_bacon_gem_bo_alloc_tiled(drm_bacon_bufmgr
*bufmgr
, const char *name
,
760 int x
, int y
, int cpp
, uint32_t *tiling_mode
,
761 unsigned long *pitch
, unsigned long flags
)
763 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*)bufmgr
;
764 unsigned long size
, stride
;
768 unsigned long aligned_y
, height_alignment
;
770 tiling
= *tiling_mode
;
772 /* If we're tiled, our allocations are in 8 or 32-row blocks,
773 * so failure to align our height means that we won't allocate
776 * If we're untiled, we still have to align to 2 rows high
777 * because the data port accesses 2x2 blocks even if the
778 * bottom row isn't to be rendered, so failure to align means
779 * we could walk off the end of the GTT and fault. This is
780 * documented on 965, and may be the case on older chipsets
781 * too so we try to be careful.
784 height_alignment
= 2;
786 if (tiling
== I915_TILING_X
)
787 height_alignment
= 8;
788 else if (tiling
== I915_TILING_Y
)
789 height_alignment
= 32;
790 aligned_y
= ALIGN(y
, height_alignment
);
793 stride
= drm_bacon_gem_bo_tile_pitch(bufmgr_gem
, stride
, tiling_mode
);
794 size
= stride
* aligned_y
;
795 size
= drm_bacon_gem_bo_tile_size(bufmgr_gem
, size
, tiling_mode
);
796 } while (*tiling_mode
!= tiling
);
799 if (tiling
== I915_TILING_NONE
)
802 return drm_bacon_gem_bo_alloc_internal(bufmgr
, name
, size
, flags
,
806 static drm_bacon_bo
*
807 drm_bacon_gem_bo_alloc_userptr(drm_bacon_bufmgr
*bufmgr
,
810 uint32_t tiling_mode
,
815 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bufmgr
;
816 drm_bacon_bo_gem
*bo_gem
;
818 struct drm_i915_gem_userptr userptr
;
820 /* Tiling with userptr surfaces is not supported
821 * on all hardware so refuse it for time being.
823 if (tiling_mode
!= I915_TILING_NONE
)
826 bo_gem
= calloc(1, sizeof(*bo_gem
));
830 p_atomic_set(&bo_gem
->refcount
, 1);
831 list_inithead(&bo_gem
->vma_list
);
833 bo_gem
->bo
.size
= size
;
836 userptr
.user_ptr
= (__u64
)((unsigned long)addr
);
837 userptr
.user_size
= size
;
838 userptr
.flags
= flags
;
840 ret
= drmIoctl(bufmgr_gem
->fd
,
841 DRM_IOCTL_I915_GEM_USERPTR
,
844 DBG("bo_create_userptr: "
845 "ioctl failed with user ptr %p size 0x%lx, "
846 "user flags 0x%lx\n", addr
, size
, flags
);
851 pthread_mutex_lock(&bufmgr_gem
->lock
);
853 bo_gem
->gem_handle
= userptr
.handle
;
854 bo_gem
->bo
.handle
= bo_gem
->gem_handle
;
855 bo_gem
->bo
.bufmgr
= bufmgr
;
856 bo_gem
->is_userptr
= true;
857 bo_gem
->bo
.virtual = addr
;
858 /* Save the address provided by user */
859 bo_gem
->user_virtual
= addr
;
860 bo_gem
->tiling_mode
= I915_TILING_NONE
;
861 bo_gem
->swizzle_mode
= I915_BIT_6_SWIZZLE_NONE
;
864 HASH_ADD(handle_hh
, bufmgr_gem
->handle_table
,
865 gem_handle
, sizeof(bo_gem
->gem_handle
),
869 bo_gem
->validate_index
= -1;
870 bo_gem
->used_as_reloc_target
= false;
871 bo_gem
->has_error
= false;
872 bo_gem
->reusable
= false;
874 drm_bacon_bo_gem_set_in_aperture_size(bufmgr_gem
, bo_gem
, 0);
875 pthread_mutex_unlock(&bufmgr_gem
->lock
);
877 DBG("bo_create_userptr: "
878 "ptr %p buf %d (%s) size %ldb, stride 0x%x, tile mode %d\n",
879 addr
, bo_gem
->gem_handle
, bo_gem
->name
,
880 size
, stride
, tiling_mode
);
886 has_userptr(drm_bacon_bufmgr_gem
*bufmgr_gem
)
891 struct drm_i915_gem_userptr userptr
;
893 pgsz
= sysconf(_SC_PAGESIZE
);
896 ret
= posix_memalign(&ptr
, pgsz
, pgsz
);
898 DBG("Failed to get a page (%ld) for userptr detection!\n",
904 userptr
.user_ptr
= (__u64
)(unsigned long)ptr
;
905 userptr
.user_size
= pgsz
;
908 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GEM_USERPTR
, &userptr
);
910 if (errno
== ENODEV
&& userptr
.flags
== 0) {
911 userptr
.flags
= I915_USERPTR_UNSYNCHRONIZED
;
918 /* We don't release the userptr bo here as we want to keep the
919 * kernel mm tracking alive for our lifetime. The first time we
920 * create a userptr object the kernel has to install a mmu_notifer
921 * which is a heavyweight operation (e.g. it requires taking all
922 * mm_locks and stop_machine()).
925 bufmgr_gem
->userptr_active
.ptr
= ptr
;
926 bufmgr_gem
->userptr_active
.handle
= userptr
.handle
;
931 static drm_bacon_bo
*
932 check_bo_alloc_userptr(drm_bacon_bufmgr
*bufmgr
,
935 uint32_t tiling_mode
,
940 if (has_userptr((drm_bacon_bufmgr_gem
*)bufmgr
))
941 bufmgr
->bo_alloc_userptr
= drm_bacon_gem_bo_alloc_userptr
;
943 bufmgr
->bo_alloc_userptr
= NULL
;
945 return drm_bacon_bo_alloc_userptr(bufmgr
, name
, addr
,
946 tiling_mode
, stride
, size
, flags
);
950 * Returns a drm_bacon_bo wrapping the given buffer object handle.
952 * This can be used when one application needs to pass a buffer object
956 drm_bacon_bo_gem_create_from_name(drm_bacon_bufmgr
*bufmgr
,
960 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bufmgr
;
961 drm_bacon_bo_gem
*bo_gem
;
963 struct drm_gem_open open_arg
;
964 struct drm_i915_gem_get_tiling get_tiling
;
966 /* At the moment most applications only have a few named bo.
967 * For instance, in a DRI client only the render buffers passed
968 * between X and the client are named. And since X returns the
969 * alternating names for the front/back buffer a linear search
970 * provides a sufficiently fast match.
972 pthread_mutex_lock(&bufmgr_gem
->lock
);
973 HASH_FIND(name_hh
, bufmgr_gem
->name_table
,
974 &handle
, sizeof(handle
), bo_gem
);
976 drm_bacon_gem_bo_reference(&bo_gem
->bo
);
981 open_arg
.name
= handle
;
982 ret
= drmIoctl(bufmgr_gem
->fd
,
986 DBG("Couldn't reference %s handle 0x%08x: %s\n",
987 name
, handle
, strerror(errno
));
991 /* Now see if someone has used a prime handle to get this
992 * object from the kernel before by looking through the list
993 * again for a matching gem_handle
995 HASH_FIND(handle_hh
, bufmgr_gem
->handle_table
,
996 &open_arg
.handle
, sizeof(open_arg
.handle
), bo_gem
);
998 drm_bacon_gem_bo_reference(&bo_gem
->bo
);
1002 bo_gem
= calloc(1, sizeof(*bo_gem
));
1006 p_atomic_set(&bo_gem
->refcount
, 1);
1007 list_inithead(&bo_gem
->vma_list
);
1009 bo_gem
->bo
.size
= open_arg
.size
;
1010 bo_gem
->bo
.offset
= 0;
1011 bo_gem
->bo
.offset64
= 0;
1012 bo_gem
->bo
.virtual = NULL
;
1013 bo_gem
->bo
.bufmgr
= bufmgr
;
1014 bo_gem
->name
= name
;
1015 bo_gem
->validate_index
= -1;
1016 bo_gem
->gem_handle
= open_arg
.handle
;
1017 bo_gem
->bo
.handle
= open_arg
.handle
;
1018 bo_gem
->global_name
= handle
;
1019 bo_gem
->reusable
= false;
1021 HASH_ADD(handle_hh
, bufmgr_gem
->handle_table
,
1022 gem_handle
, sizeof(bo_gem
->gem_handle
), bo_gem
);
1023 HASH_ADD(name_hh
, bufmgr_gem
->name_table
,
1024 global_name
, sizeof(bo_gem
->global_name
), bo_gem
);
1026 memclear(get_tiling
);
1027 get_tiling
.handle
= bo_gem
->gem_handle
;
1028 ret
= drmIoctl(bufmgr_gem
->fd
,
1029 DRM_IOCTL_I915_GEM_GET_TILING
,
1034 bo_gem
->tiling_mode
= get_tiling
.tiling_mode
;
1035 bo_gem
->swizzle_mode
= get_tiling
.swizzle_mode
;
1036 /* XXX stride is unknown */
1037 drm_bacon_bo_gem_set_in_aperture_size(bufmgr_gem
, bo_gem
, 0);
1038 DBG("bo_create_from_handle: %d (%s)\n", handle
, bo_gem
->name
);
1041 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1045 drm_bacon_gem_bo_free(&bo_gem
->bo
);
1046 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1051 drm_bacon_gem_bo_free(drm_bacon_bo
*bo
)
1053 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
1054 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1055 struct drm_gem_close close
;
1058 list_del(&bo_gem
->vma_list
);
1059 if (bo_gem
->mem_virtual
) {
1060 VG(VALGRIND_FREELIKE_BLOCK(bo_gem
->mem_virtual
, 0));
1061 drm_munmap(bo_gem
->mem_virtual
, bo_gem
->bo
.size
);
1062 bufmgr_gem
->vma_count
--;
1064 if (bo_gem
->wc_virtual
) {
1065 VG(VALGRIND_FREELIKE_BLOCK(bo_gem
->wc_virtual
, 0));
1066 drm_munmap(bo_gem
->wc_virtual
, bo_gem
->bo
.size
);
1067 bufmgr_gem
->vma_count
--;
1069 if (bo_gem
->gtt_virtual
) {
1070 drm_munmap(bo_gem
->gtt_virtual
, bo_gem
->bo
.size
);
1071 bufmgr_gem
->vma_count
--;
1074 if (bo_gem
->global_name
)
1075 HASH_DELETE(name_hh
, bufmgr_gem
->name_table
, bo_gem
);
1076 HASH_DELETE(handle_hh
, bufmgr_gem
->handle_table
, bo_gem
);
1078 /* Close this object */
1080 close
.handle
= bo_gem
->gem_handle
;
1081 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_GEM_CLOSE
, &close
);
1083 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
1084 bo_gem
->gem_handle
, bo_gem
->name
, strerror(errno
));
1090 drm_bacon_gem_bo_mark_mmaps_incoherent(drm_bacon_bo
*bo
)
1093 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1095 if (bo_gem
->mem_virtual
)
1096 VALGRIND_MAKE_MEM_NOACCESS(bo_gem
->mem_virtual
, bo
->size
);
1098 if (bo_gem
->wc_virtual
)
1099 VALGRIND_MAKE_MEM_NOACCESS(bo_gem
->wc_virtual
, bo
->size
);
1101 if (bo_gem
->gtt_virtual
)
1102 VALGRIND_MAKE_MEM_NOACCESS(bo_gem
->gtt_virtual
, bo
->size
);
1106 /** Frees all cached buffers significantly older than @time. */
1108 drm_bacon_gem_cleanup_bo_cache(drm_bacon_bufmgr_gem
*bufmgr_gem
, time_t time
)
1112 if (bufmgr_gem
->time
== time
)
1115 for (i
= 0; i
< bufmgr_gem
->num_buckets
; i
++) {
1116 struct drm_bacon_gem_bo_bucket
*bucket
=
1117 &bufmgr_gem
->cache_bucket
[i
];
1119 while (!list_empty(&bucket
->head
)) {
1120 drm_bacon_bo_gem
*bo_gem
;
1122 bo_gem
= LIST_ENTRY(drm_bacon_bo_gem
,
1123 bucket
->head
.next
, head
);
1124 if (time
- bo_gem
->free_time
<= 1)
1127 list_del(&bo_gem
->head
);
1129 drm_bacon_gem_bo_free(&bo_gem
->bo
);
1133 bufmgr_gem
->time
= time
;
1136 static void drm_bacon_gem_bo_purge_vma_cache(drm_bacon_bufmgr_gem
*bufmgr_gem
)
1140 DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__
,
1141 bufmgr_gem
->vma_count
, bufmgr_gem
->vma_open
, bufmgr_gem
->vma_max
);
1143 if (bufmgr_gem
->vma_max
< 0)
1146 /* We may need to evict a few entries in order to create new mmaps */
1147 limit
= bufmgr_gem
->vma_max
- 2*bufmgr_gem
->vma_open
;
1151 while (bufmgr_gem
->vma_count
> limit
) {
1152 drm_bacon_bo_gem
*bo_gem
;
1154 bo_gem
= LIST_ENTRY(drm_bacon_bo_gem
,
1155 bufmgr_gem
->vma_cache
.next
,
1157 assert(bo_gem
->map_count
== 0);
1158 list_delinit(&bo_gem
->vma_list
);
1160 if (bo_gem
->mem_virtual
) {
1161 drm_munmap(bo_gem
->mem_virtual
, bo_gem
->bo
.size
);
1162 bo_gem
->mem_virtual
= NULL
;
1163 bufmgr_gem
->vma_count
--;
1165 if (bo_gem
->wc_virtual
) {
1166 drm_munmap(bo_gem
->wc_virtual
, bo_gem
->bo
.size
);
1167 bo_gem
->wc_virtual
= NULL
;
1168 bufmgr_gem
->vma_count
--;
1170 if (bo_gem
->gtt_virtual
) {
1171 drm_munmap(bo_gem
->gtt_virtual
, bo_gem
->bo
.size
);
1172 bo_gem
->gtt_virtual
= NULL
;
1173 bufmgr_gem
->vma_count
--;
1178 static void drm_bacon_gem_bo_close_vma(drm_bacon_bufmgr_gem
*bufmgr_gem
,
1179 drm_bacon_bo_gem
*bo_gem
)
1181 bufmgr_gem
->vma_open
--;
1182 list_addtail(&bo_gem
->vma_list
, &bufmgr_gem
->vma_cache
);
1183 if (bo_gem
->mem_virtual
)
1184 bufmgr_gem
->vma_count
++;
1185 if (bo_gem
->wc_virtual
)
1186 bufmgr_gem
->vma_count
++;
1187 if (bo_gem
->gtt_virtual
)
1188 bufmgr_gem
->vma_count
++;
1189 drm_bacon_gem_bo_purge_vma_cache(bufmgr_gem
);
1192 static void drm_bacon_gem_bo_open_vma(drm_bacon_bufmgr_gem
*bufmgr_gem
,
1193 drm_bacon_bo_gem
*bo_gem
)
1195 bufmgr_gem
->vma_open
++;
1196 list_del(&bo_gem
->vma_list
);
1197 if (bo_gem
->mem_virtual
)
1198 bufmgr_gem
->vma_count
--;
1199 if (bo_gem
->wc_virtual
)
1200 bufmgr_gem
->vma_count
--;
1201 if (bo_gem
->gtt_virtual
)
1202 bufmgr_gem
->vma_count
--;
1203 drm_bacon_gem_bo_purge_vma_cache(bufmgr_gem
);
1207 drm_bacon_gem_bo_unreference_final(drm_bacon_bo
*bo
, time_t time
)
1209 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
1210 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1211 struct drm_bacon_gem_bo_bucket
*bucket
;
1214 /* Unreference all the target buffers */
1215 for (i
= 0; i
< bo_gem
->reloc_count
; i
++) {
1216 if (bo_gem
->reloc_target_info
[i
].bo
!= bo
) {
1217 drm_bacon_gem_bo_unreference_locked_timed(bo_gem
->
1218 reloc_target_info
[i
].bo
,
1222 for (i
= 0; i
< bo_gem
->softpin_target_count
; i
++)
1223 drm_bacon_gem_bo_unreference_locked_timed(bo_gem
->softpin_target
[i
],
1226 bo_gem
->reloc_count
= 0;
1227 bo_gem
->used_as_reloc_target
= false;
1228 bo_gem
->softpin_target_count
= 0;
1230 DBG("bo_unreference final: %d (%s)\n",
1231 bo_gem
->gem_handle
, bo_gem
->name
);
1233 /* release memory associated with this object */
1234 if (bo_gem
->reloc_target_info
) {
1235 free(bo_gem
->reloc_target_info
);
1236 bo_gem
->reloc_target_info
= NULL
;
1238 if (bo_gem
->relocs
) {
1239 free(bo_gem
->relocs
);
1240 bo_gem
->relocs
= NULL
;
1242 if (bo_gem
->softpin_target
) {
1243 free(bo_gem
->softpin_target
);
1244 bo_gem
->softpin_target
= NULL
;
1245 bo_gem
->softpin_target_size
= 0;
1248 /* Clear any left-over mappings */
1249 if (bo_gem
->map_count
) {
1250 DBG("bo freed with non-zero map-count %d\n", bo_gem
->map_count
);
1251 bo_gem
->map_count
= 0;
1252 drm_bacon_gem_bo_close_vma(bufmgr_gem
, bo_gem
);
1253 drm_bacon_gem_bo_mark_mmaps_incoherent(bo
);
1256 bucket
= drm_bacon_gem_bo_bucket_for_size(bufmgr_gem
, bo
->size
);
1257 /* Put the buffer into our internal cache for reuse if we can. */
1258 if (bufmgr_gem
->bo_reuse
&& bo_gem
->reusable
&& bucket
!= NULL
&&
1259 drm_bacon_gem_bo_madvise_internal(bufmgr_gem
, bo_gem
,
1260 I915_MADV_DONTNEED
)) {
1261 bo_gem
->free_time
= time
;
1263 bo_gem
->name
= NULL
;
1264 bo_gem
->validate_index
= -1;
1266 list_addtail(&bo_gem
->head
, &bucket
->head
);
1268 drm_bacon_gem_bo_free(bo
);
1272 static void drm_bacon_gem_bo_unreference_locked_timed(drm_bacon_bo
*bo
,
1275 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1277 assert(p_atomic_read(&bo_gem
->refcount
) > 0);
1278 if (p_atomic_dec_zero(&bo_gem
->refcount
))
1279 drm_bacon_gem_bo_unreference_final(bo
, time
);
1282 static void drm_bacon_gem_bo_unreference(drm_bacon_bo
*bo
)
1284 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1286 assert(p_atomic_read(&bo_gem
->refcount
) > 0);
1288 if (atomic_add_unless(&bo_gem
->refcount
, -1, 1)) {
1289 drm_bacon_bufmgr_gem
*bufmgr_gem
=
1290 (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
1291 struct timespec time
;
1293 clock_gettime(CLOCK_MONOTONIC
, &time
);
1295 pthread_mutex_lock(&bufmgr_gem
->lock
);
1297 if (p_atomic_dec_zero(&bo_gem
->refcount
)) {
1298 drm_bacon_gem_bo_unreference_final(bo
, time
.tv_sec
);
1299 drm_bacon_gem_cleanup_bo_cache(bufmgr_gem
, time
.tv_sec
);
1302 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1306 static int drm_bacon_gem_bo_map(drm_bacon_bo
*bo
, int write_enable
)
1308 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
1309 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1310 struct drm_i915_gem_set_domain set_domain
;
1313 if (bo_gem
->is_userptr
) {
1314 /* Return the same user ptr */
1315 bo
->virtual = bo_gem
->user_virtual
;
1319 pthread_mutex_lock(&bufmgr_gem
->lock
);
1321 if (bo_gem
->map_count
++ == 0)
1322 drm_bacon_gem_bo_open_vma(bufmgr_gem
, bo_gem
);
1324 if (!bo_gem
->mem_virtual
) {
1325 struct drm_i915_gem_mmap mmap_arg
;
1327 DBG("bo_map: %d (%s), map_count=%d\n",
1328 bo_gem
->gem_handle
, bo_gem
->name
, bo_gem
->map_count
);
1331 mmap_arg
.handle
= bo_gem
->gem_handle
;
1332 mmap_arg
.size
= bo
->size
;
1333 ret
= drmIoctl(bufmgr_gem
->fd
,
1334 DRM_IOCTL_I915_GEM_MMAP
,
1338 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1339 __FILE__
, __LINE__
, bo_gem
->gem_handle
,
1340 bo_gem
->name
, strerror(errno
));
1341 if (--bo_gem
->map_count
== 0)
1342 drm_bacon_gem_bo_close_vma(bufmgr_gem
, bo_gem
);
1343 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1346 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg
.addr_ptr
, mmap_arg
.size
, 0, 1));
1347 bo_gem
->mem_virtual
= (void *)(uintptr_t) mmap_arg
.addr_ptr
;
1349 DBG("bo_map: %d (%s) -> %p\n", bo_gem
->gem_handle
, bo_gem
->name
,
1350 bo_gem
->mem_virtual
);
1351 bo
->virtual = bo_gem
->mem_virtual
;
1353 memclear(set_domain
);
1354 set_domain
.handle
= bo_gem
->gem_handle
;
1355 set_domain
.read_domains
= I915_GEM_DOMAIN_CPU
;
1357 set_domain
.write_domain
= I915_GEM_DOMAIN_CPU
;
1359 set_domain
.write_domain
= 0;
1360 ret
= drmIoctl(bufmgr_gem
->fd
,
1361 DRM_IOCTL_I915_GEM_SET_DOMAIN
,
1364 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1365 __FILE__
, __LINE__
, bo_gem
->gem_handle
,
1370 bo_gem
->mapped_cpu_write
= true;
1372 drm_bacon_gem_bo_mark_mmaps_incoherent(bo
);
1373 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem
->mem_virtual
, bo
->size
));
1374 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1380 map_gtt(drm_bacon_bo
*bo
)
1382 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
1383 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1386 if (bo_gem
->is_userptr
)
1389 if (bo_gem
->map_count
++ == 0)
1390 drm_bacon_gem_bo_open_vma(bufmgr_gem
, bo_gem
);
1392 /* Get a mapping of the buffer if we haven't before. */
1393 if (bo_gem
->gtt_virtual
== NULL
) {
1394 struct drm_i915_gem_mmap_gtt mmap_arg
;
1396 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1397 bo_gem
->gem_handle
, bo_gem
->name
, bo_gem
->map_count
);
1400 mmap_arg
.handle
= bo_gem
->gem_handle
;
1402 /* Get the fake offset back... */
1403 ret
= drmIoctl(bufmgr_gem
->fd
,
1404 DRM_IOCTL_I915_GEM_MMAP_GTT
,
1408 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1410 bo_gem
->gem_handle
, bo_gem
->name
,
1412 if (--bo_gem
->map_count
== 0)
1413 drm_bacon_gem_bo_close_vma(bufmgr_gem
, bo_gem
);
1418 bo_gem
->gtt_virtual
= drm_mmap(0, bo
->size
, PROT_READ
| PROT_WRITE
,
1419 MAP_SHARED
, bufmgr_gem
->fd
,
1421 if (bo_gem
->gtt_virtual
== MAP_FAILED
) {
1422 bo_gem
->gtt_virtual
= NULL
;
1424 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1426 bo_gem
->gem_handle
, bo_gem
->name
,
1428 if (--bo_gem
->map_count
== 0)
1429 drm_bacon_gem_bo_close_vma(bufmgr_gem
, bo_gem
);
1434 bo
->virtual = bo_gem
->gtt_virtual
;
1436 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem
->gem_handle
, bo_gem
->name
,
1437 bo_gem
->gtt_virtual
);
1443 drm_bacon_gem_bo_map_gtt(drm_bacon_bo
*bo
)
1445 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
1446 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1447 struct drm_i915_gem_set_domain set_domain
;
1450 pthread_mutex_lock(&bufmgr_gem
->lock
);
1454 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1458 /* Now move it to the GTT domain so that the GPU and CPU
1459 * caches are flushed and the GPU isn't actively using the
1462 * The pagefault handler does this domain change for us when
1463 * it has unbound the BO from the GTT, but it's up to us to
1464 * tell it when we're about to use things if we had done
1465 * rendering and it still happens to be bound to the GTT.
1467 memclear(set_domain
);
1468 set_domain
.handle
= bo_gem
->gem_handle
;
1469 set_domain
.read_domains
= I915_GEM_DOMAIN_GTT
;
1470 set_domain
.write_domain
= I915_GEM_DOMAIN_GTT
;
1471 ret
= drmIoctl(bufmgr_gem
->fd
,
1472 DRM_IOCTL_I915_GEM_SET_DOMAIN
,
1475 DBG("%s:%d: Error setting domain %d: %s\n",
1476 __FILE__
, __LINE__
, bo_gem
->gem_handle
,
1480 drm_bacon_gem_bo_mark_mmaps_incoherent(bo
);
1481 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem
->gtt_virtual
, bo
->size
));
1482 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1488 * Performs a mapping of the buffer object like the normal GTT
1489 * mapping, but avoids waiting for the GPU to be done reading from or
1490 * rendering to the buffer.
1492 * This is used in the implementation of GL_ARB_map_buffer_range: The
1493 * user asks to create a buffer, then does a mapping, fills some
1494 * space, runs a drawing command, then asks to map it again without
1495 * synchronizing because it guarantees that it won't write over the
1496 * data that the GPU is busy using (or, more specifically, that if it
1497 * does write over the data, it acknowledges that rendering is
1502 drm_bacon_gem_bo_map_unsynchronized(drm_bacon_bo
*bo
)
1504 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
1505 #ifdef HAVE_VALGRIND
1506 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1510 /* If the CPU cache isn't coherent with the GTT, then use a
1511 * regular synchronized mapping. The problem is that we don't
1512 * track where the buffer was last used on the CPU side in
1513 * terms of drm_bacon_bo_map vs drm_bacon_gem_bo_map_gtt, so
1514 * we would potentially corrupt the buffer even when the user
1515 * does reasonable things.
1517 if (!bufmgr_gem
->has_llc
)
1518 return drm_bacon_gem_bo_map_gtt(bo
);
1520 pthread_mutex_lock(&bufmgr_gem
->lock
);
1524 drm_bacon_gem_bo_mark_mmaps_incoherent(bo
);
1525 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem
->gtt_virtual
, bo
->size
));
1528 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1533 static int drm_bacon_gem_bo_unmap(drm_bacon_bo
*bo
)
1535 drm_bacon_bufmgr_gem
*bufmgr_gem
;
1536 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1542 if (bo_gem
->is_userptr
)
1545 bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
1547 pthread_mutex_lock(&bufmgr_gem
->lock
);
1549 if (bo_gem
->map_count
<= 0) {
1550 DBG("attempted to unmap an unmapped bo\n");
1551 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1552 /* Preserve the old behaviour of just treating this as a
1553 * no-op rather than reporting the error.
1558 if (bo_gem
->mapped_cpu_write
) {
1559 struct drm_i915_gem_sw_finish sw_finish
;
1561 /* Cause a flush to happen if the buffer's pinned for
1562 * scanout, so the results show up in a timely manner.
1563 * Unlike GTT set domains, this only does work if the
1564 * buffer should be scanout-related.
1566 memclear(sw_finish
);
1567 sw_finish
.handle
= bo_gem
->gem_handle
;
1568 ret
= drmIoctl(bufmgr_gem
->fd
,
1569 DRM_IOCTL_I915_GEM_SW_FINISH
,
1571 ret
= ret
== -1 ? -errno
: 0;
1573 bo_gem
->mapped_cpu_write
= false;
1576 /* We need to unmap after every innovation as we cannot track
1577 * an open vma for every bo as that will exhaust the system
1578 * limits and cause later failures.
1580 if (--bo_gem
->map_count
== 0) {
1581 drm_bacon_gem_bo_close_vma(bufmgr_gem
, bo_gem
);
1582 drm_bacon_gem_bo_mark_mmaps_incoherent(bo
);
1585 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1591 drm_bacon_gem_bo_subdata(drm_bacon_bo
*bo
, unsigned long offset
,
1592 unsigned long size
, const void *data
)
1594 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
1595 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1596 struct drm_i915_gem_pwrite pwrite
;
1599 if (bo_gem
->is_userptr
)
1603 pwrite
.handle
= bo_gem
->gem_handle
;
1604 pwrite
.offset
= offset
;
1606 pwrite
.data_ptr
= (uint64_t) (uintptr_t) data
;
1607 ret
= drmIoctl(bufmgr_gem
->fd
,
1608 DRM_IOCTL_I915_GEM_PWRITE
,
1612 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1613 __FILE__
, __LINE__
, bo_gem
->gem_handle
, (int)offset
,
1614 (int)size
, strerror(errno
));
1621 drm_bacon_gem_bo_get_subdata(drm_bacon_bo
*bo
, unsigned long offset
,
1622 unsigned long size
, void *data
)
1624 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
1625 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1626 struct drm_i915_gem_pread pread
;
1629 if (bo_gem
->is_userptr
)
1633 pread
.handle
= bo_gem
->gem_handle
;
1634 pread
.offset
= offset
;
1636 pread
.data_ptr
= (uint64_t) (uintptr_t) data
;
1637 ret
= drmIoctl(bufmgr_gem
->fd
,
1638 DRM_IOCTL_I915_GEM_PREAD
,
1642 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1643 __FILE__
, __LINE__
, bo_gem
->gem_handle
, (int)offset
,
1644 (int)size
, strerror(errno
));
1650 /** Waits for all GPU rendering with the object to have completed. */
1652 drm_bacon_gem_bo_wait_rendering(drm_bacon_bo
*bo
)
1654 drm_bacon_gem_bo_start_gtt_access(bo
, 1);
1658 * Waits on a BO for the given amount of time.
1660 * @bo: buffer object to wait for
1661 * @timeout_ns: amount of time to wait in nanoseconds.
1662 * If value is less than 0, an infinite wait will occur.
1664 * Returns 0 if the wait was successful ie. the last batch referencing the
1665 * object has completed within the allotted time. Otherwise some negative return
1666 * value describes the error. Of particular interest is -ETIME when the wait has
1667 * failed to yield the desired result.
1669 * Similar to drm_bacon_gem_bo_wait_rendering except a timeout parameter allows
1670 * the operation to give up after a certain amount of time. Another subtle
1671 * difference is the internal locking semantics are different (this variant does
1672 * not hold the lock for the duration of the wait). This makes the wait subject
1673 * to a larger userspace race window.
1675 * The implementation shall wait until the object is no longer actively
1676 * referenced within a batch buffer at the time of the call. The wait will
1677 * not guarantee that the buffer is re-issued via another thread, or an flinked
1678 * handle. Userspace must make sure this race does not occur if such precision
1681 * Note that some kernels have broken the inifite wait for negative values
1682 * promise, upgrade to latest stable kernels if this is the case.
1685 drm_bacon_gem_bo_wait(drm_bacon_bo
*bo
, int64_t timeout_ns
)
1687 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
1688 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1689 struct drm_i915_gem_wait wait
;
1692 if (!bufmgr_gem
->has_wait_timeout
) {
1693 DBG("%s:%d: Timed wait is not supported. Falling back to "
1694 "infinite wait\n", __FILE__
, __LINE__
);
1696 drm_bacon_gem_bo_wait_rendering(bo
);
1699 return drm_bacon_gem_bo_busy(bo
) ? -ETIME
: 0;
1704 wait
.bo_handle
= bo_gem
->gem_handle
;
1705 wait
.timeout_ns
= timeout_ns
;
1706 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GEM_WAIT
, &wait
);
1714 * Sets the object to the GTT read and possibly write domain, used by the X
1715 * 2D driver in the absence of kernel support to do drm_bacon_gem_bo_map_gtt().
1717 * In combination with drm_bacon_gem_bo_pin() and manual fence management, we
1718 * can do tiled pixmaps this way.
1721 drm_bacon_gem_bo_start_gtt_access(drm_bacon_bo
*bo
, int write_enable
)
1723 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
1724 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1725 struct drm_i915_gem_set_domain set_domain
;
1728 memclear(set_domain
);
1729 set_domain
.handle
= bo_gem
->gem_handle
;
1730 set_domain
.read_domains
= I915_GEM_DOMAIN_GTT
;
1731 set_domain
.write_domain
= write_enable
? I915_GEM_DOMAIN_GTT
: 0;
1732 ret
= drmIoctl(bufmgr_gem
->fd
,
1733 DRM_IOCTL_I915_GEM_SET_DOMAIN
,
1736 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1737 __FILE__
, __LINE__
, bo_gem
->gem_handle
,
1738 set_domain
.read_domains
, set_domain
.write_domain
,
1744 drm_bacon_bufmgr_gem_destroy(drm_bacon_bufmgr
*bufmgr
)
1746 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bufmgr
;
1747 struct drm_gem_close close_bo
;
1750 free(bufmgr_gem
->exec2_objects
);
1751 free(bufmgr_gem
->exec_bos
);
1753 pthread_mutex_destroy(&bufmgr_gem
->lock
);
1755 /* Free any cached buffer objects we were going to reuse */
1756 for (i
= 0; i
< bufmgr_gem
->num_buckets
; i
++) {
1757 struct drm_bacon_gem_bo_bucket
*bucket
=
1758 &bufmgr_gem
->cache_bucket
[i
];
1759 drm_bacon_bo_gem
*bo_gem
;
1761 while (!list_empty(&bucket
->head
)) {
1762 bo_gem
= LIST_ENTRY(drm_bacon_bo_gem
,
1763 bucket
->head
.next
, head
);
1764 list_del(&bo_gem
->head
);
1766 drm_bacon_gem_bo_free(&bo_gem
->bo
);
1770 /* Release userptr bo kept hanging around for optimisation. */
1771 if (bufmgr_gem
->userptr_active
.ptr
) {
1773 close_bo
.handle
= bufmgr_gem
->userptr_active
.handle
;
1774 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_GEM_CLOSE
, &close_bo
);
1775 free(bufmgr_gem
->userptr_active
.ptr
);
1778 "Failed to release test userptr object! (%d) "
1779 "i915 kernel driver may not be sane!\n", errno
);
1786 * Adds the target buffer to the validation list and adds the relocation
1787 * to the reloc_buffer's relocation list.
1789 * The relocation entry at the given offset must already contain the
1790 * precomputed relocation value, because the kernel will optimize out
1791 * the relocation entry write when the buffer hasn't moved from the
1792 * last known offset in target_bo.
1795 do_bo_emit_reloc(drm_bacon_bo
*bo
, uint32_t offset
,
1796 drm_bacon_bo
*target_bo
, uint32_t target_offset
,
1797 uint32_t read_domains
, uint32_t write_domain
)
1799 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
1800 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1801 drm_bacon_bo_gem
*target_bo_gem
= (drm_bacon_bo_gem
*) target_bo
;
1803 if (bo_gem
->has_error
)
1806 if (target_bo_gem
->has_error
) {
1807 bo_gem
->has_error
= true;
1811 /* Create a new relocation list if needed */
1812 if (bo_gem
->relocs
== NULL
&& drm_bacon_setup_reloc_list(bo
))
1815 /* Check overflow */
1816 assert(bo_gem
->reloc_count
< bufmgr_gem
->max_relocs
);
1819 assert(offset
<= bo
->size
- 4);
1820 assert((write_domain
& (write_domain
- 1)) == 0);
1822 /* Make sure that we're not adding a reloc to something whose size has
1823 * already been accounted for.
1825 assert(!bo_gem
->used_as_reloc_target
);
1826 if (target_bo_gem
!= bo_gem
) {
1827 target_bo_gem
->used_as_reloc_target
= true;
1828 bo_gem
->reloc_tree_size
+= target_bo_gem
->reloc_tree_size
;
1831 bo_gem
->reloc_target_info
[bo_gem
->reloc_count
].bo
= target_bo
;
1832 if (target_bo
!= bo
)
1833 drm_bacon_gem_bo_reference(target_bo
);
1835 bo_gem
->relocs
[bo_gem
->reloc_count
].offset
= offset
;
1836 bo_gem
->relocs
[bo_gem
->reloc_count
].delta
= target_offset
;
1837 bo_gem
->relocs
[bo_gem
->reloc_count
].target_handle
=
1838 target_bo_gem
->gem_handle
;
1839 bo_gem
->relocs
[bo_gem
->reloc_count
].read_domains
= read_domains
;
1840 bo_gem
->relocs
[bo_gem
->reloc_count
].write_domain
= write_domain
;
1841 bo_gem
->relocs
[bo_gem
->reloc_count
].presumed_offset
= target_bo
->offset64
;
1842 bo_gem
->reloc_count
++;
1848 drm_bacon_gem_bo_add_softpin_target(drm_bacon_bo
*bo
, drm_bacon_bo
*target_bo
)
1850 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
1851 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1852 drm_bacon_bo_gem
*target_bo_gem
= (drm_bacon_bo_gem
*) target_bo
;
1853 if (bo_gem
->has_error
)
1856 if (target_bo_gem
->has_error
) {
1857 bo_gem
->has_error
= true;
1861 if (!(target_bo_gem
->kflags
& EXEC_OBJECT_PINNED
))
1863 if (target_bo_gem
== bo_gem
)
1866 if (bo_gem
->softpin_target_count
== bo_gem
->softpin_target_size
) {
1867 int new_size
= bo_gem
->softpin_target_size
* 2;
1869 new_size
= bufmgr_gem
->max_relocs
;
1871 bo_gem
->softpin_target
= realloc(bo_gem
->softpin_target
, new_size
*
1872 sizeof(drm_bacon_bo
*));
1873 if (!bo_gem
->softpin_target
)
1876 bo_gem
->softpin_target_size
= new_size
;
1878 bo_gem
->softpin_target
[bo_gem
->softpin_target_count
] = target_bo
;
1879 drm_bacon_gem_bo_reference(target_bo
);
1880 bo_gem
->softpin_target_count
++;
1886 drm_bacon_gem_bo_emit_reloc(drm_bacon_bo
*bo
, uint32_t offset
,
1887 drm_bacon_bo
*target_bo
, uint32_t target_offset
,
1888 uint32_t read_domains
, uint32_t write_domain
)
1890 drm_bacon_bo_gem
*target_bo_gem
= (drm_bacon_bo_gem
*)target_bo
;
1892 if (target_bo_gem
->kflags
& EXEC_OBJECT_PINNED
)
1893 return drm_bacon_gem_bo_add_softpin_target(bo
, target_bo
);
1895 return do_bo_emit_reloc(bo
, offset
, target_bo
, target_offset
,
1896 read_domains
, write_domain
);
1900 drm_bacon_gem_bo_get_reloc_count(drm_bacon_bo
*bo
)
1902 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1904 return bo_gem
->reloc_count
;
1908 * Removes existing relocation entries in the BO after "start".
1910 * This allows a user to avoid a two-step process for state setup with
1911 * counting up all the buffer objects and doing a
1912 * drm_bacon_bufmgr_check_aperture_space() before emitting any of the
1913 * relocations for the state setup. Instead, save the state of the
1914 * batchbuffer including drm_bacon_gem_get_reloc_count(), emit all the
1915 * state, and then check if it still fits in the aperture.
1917 * Any further drm_bacon_bufmgr_check_aperture_space() queries
1918 * involving this buffer in the tree are undefined after this call.
1920 * This also removes all softpinned targets being referenced by the BO.
1923 drm_bacon_gem_bo_clear_relocs(drm_bacon_bo
*bo
, int start
)
1925 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
1926 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
1928 struct timespec time
;
1930 clock_gettime(CLOCK_MONOTONIC
, &time
);
1932 assert(bo_gem
->reloc_count
>= start
);
1934 /* Unreference the cleared target buffers */
1935 pthread_mutex_lock(&bufmgr_gem
->lock
);
1937 for (i
= start
; i
< bo_gem
->reloc_count
; i
++) {
1938 drm_bacon_bo_gem
*target_bo_gem
= (drm_bacon_bo_gem
*) bo_gem
->reloc_target_info
[i
].bo
;
1939 if (&target_bo_gem
->bo
!= bo
) {
1940 drm_bacon_gem_bo_unreference_locked_timed(&target_bo_gem
->bo
,
1944 bo_gem
->reloc_count
= start
;
1946 for (i
= 0; i
< bo_gem
->softpin_target_count
; i
++) {
1947 drm_bacon_bo_gem
*target_bo_gem
= (drm_bacon_bo_gem
*) bo_gem
->softpin_target
[i
];
1948 drm_bacon_gem_bo_unreference_locked_timed(&target_bo_gem
->bo
, time
.tv_sec
);
1950 bo_gem
->softpin_target_count
= 0;
1952 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1957 drm_bacon_gem_bo_process_reloc2(drm_bacon_bo
*bo
)
1959 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*)bo
;
1962 if (bo_gem
->relocs
== NULL
&& bo_gem
->softpin_target
== NULL
)
1965 for (i
= 0; i
< bo_gem
->reloc_count
; i
++) {
1966 drm_bacon_bo
*target_bo
= bo_gem
->reloc_target_info
[i
].bo
;
1968 if (target_bo
== bo
)
1971 drm_bacon_gem_bo_mark_mmaps_incoherent(bo
);
1973 /* Continue walking the tree depth-first. */
1974 drm_bacon_gem_bo_process_reloc2(target_bo
);
1976 /* Add the target to the validate list */
1977 drm_bacon_add_validate_buffer2(target_bo
);
1980 for (i
= 0; i
< bo_gem
->softpin_target_count
; i
++) {
1981 drm_bacon_bo
*target_bo
= bo_gem
->softpin_target
[i
];
1983 if (target_bo
== bo
)
1986 drm_bacon_gem_bo_mark_mmaps_incoherent(bo
);
1987 drm_bacon_gem_bo_process_reloc2(target_bo
);
1988 drm_bacon_add_validate_buffer2(target_bo
);
1993 drm_bacon_update_buffer_offsets2 (drm_bacon_bufmgr_gem
*bufmgr_gem
)
1997 for (i
= 0; i
< bufmgr_gem
->exec_count
; i
++) {
1998 drm_bacon_bo
*bo
= bufmgr_gem
->exec_bos
[i
];
1999 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*)bo
;
2001 /* Update the buffer offset */
2002 if (bufmgr_gem
->exec2_objects
[i
].offset
!= bo
->offset64
) {
2003 /* If we're seeing softpinned object here it means that the kernel
2004 * has relocated our object... Indicating a programming error
2006 assert(!(bo_gem
->kflags
& EXEC_OBJECT_PINNED
));
2007 DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
2008 bo_gem
->gem_handle
, bo_gem
->name
,
2009 upper_32_bits(bo
->offset64
),
2010 lower_32_bits(bo
->offset64
),
2011 upper_32_bits(bufmgr_gem
->exec2_objects
[i
].offset
),
2012 lower_32_bits(bufmgr_gem
->exec2_objects
[i
].offset
));
2013 bo
->offset64
= bufmgr_gem
->exec2_objects
[i
].offset
;
2014 bo
->offset
= bufmgr_gem
->exec2_objects
[i
].offset
;
2020 do_exec2(drm_bacon_bo
*bo
, int used
, drm_bacon_context
*ctx
,
2021 drm_clip_rect_t
*cliprects
, int num_cliprects
, int DR4
,
2022 int in_fence
, int *out_fence
,
2025 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*)bo
->bufmgr
;
2026 struct drm_i915_gem_execbuffer2 execbuf
;
2030 if (to_bo_gem(bo
)->has_error
)
2033 switch (flags
& 0x7) {
2037 if (!bufmgr_gem
->has_blt
)
2041 if (!bufmgr_gem
->has_bsd
)
2044 case I915_EXEC_VEBOX
:
2045 if (!bufmgr_gem
->has_vebox
)
2048 case I915_EXEC_RENDER
:
2049 case I915_EXEC_DEFAULT
:
2053 pthread_mutex_lock(&bufmgr_gem
->lock
);
2054 /* Update indices and set up the validate list. */
2055 drm_bacon_gem_bo_process_reloc2(bo
);
2057 /* Add the batch buffer to the validation list. There are no relocations
2060 drm_bacon_add_validate_buffer2(bo
);
2063 execbuf
.buffers_ptr
= (uintptr_t)bufmgr_gem
->exec2_objects
;
2064 execbuf
.buffer_count
= bufmgr_gem
->exec_count
;
2065 execbuf
.batch_start_offset
= 0;
2066 execbuf
.batch_len
= used
;
2067 execbuf
.cliprects_ptr
= (uintptr_t)cliprects
;
2068 execbuf
.num_cliprects
= num_cliprects
;
2071 execbuf
.flags
= flags
;
2073 i915_execbuffer2_set_context_id(execbuf
, 0);
2075 i915_execbuffer2_set_context_id(execbuf
, ctx
->ctx_id
);
2077 if (in_fence
!= -1) {
2078 execbuf
.rsvd2
= in_fence
;
2079 execbuf
.flags
|= I915_EXEC_FENCE_IN
;
2081 if (out_fence
!= NULL
) {
2083 execbuf
.flags
|= I915_EXEC_FENCE_OUT
;
2086 if (bufmgr_gem
->no_exec
)
2087 goto skip_execution
;
2089 ret
= drmIoctl(bufmgr_gem
->fd
,
2090 DRM_IOCTL_I915_GEM_EXECBUFFER2_WR
,
2094 if (ret
== -ENOSPC
) {
2095 DBG("Execbuffer fails to pin. "
2096 "Estimate: %u. Actual: %u. Available: %u\n",
2097 drm_bacon_gem_estimate_batch_space(bufmgr_gem
->exec_bos
,
2098 bufmgr_gem
->exec_count
),
2099 drm_bacon_gem_compute_batch_space(bufmgr_gem
->exec_bos
,
2100 bufmgr_gem
->exec_count
),
2101 (unsigned int) bufmgr_gem
->gtt_size
);
2104 drm_bacon_update_buffer_offsets2(bufmgr_gem
);
2106 if (ret
== 0 && out_fence
!= NULL
)
2107 *out_fence
= execbuf
.rsvd2
>> 32;
2110 if (bufmgr_gem
->bufmgr
.debug
)
2111 drm_bacon_gem_dump_validation_list(bufmgr_gem
);
2113 for (i
= 0; i
< bufmgr_gem
->exec_count
; i
++) {
2114 drm_bacon_bo_gem
*bo_gem
= to_bo_gem(bufmgr_gem
->exec_bos
[i
]);
2116 bo_gem
->idle
= false;
2118 /* Disconnect the buffer from the validate list */
2119 bo_gem
->validate_index
= -1;
2120 bufmgr_gem
->exec_bos
[i
] = NULL
;
2122 bufmgr_gem
->exec_count
= 0;
2123 pthread_mutex_unlock(&bufmgr_gem
->lock
);
2129 drm_bacon_gem_bo_exec2(drm_bacon_bo
*bo
, int used
,
2130 drm_clip_rect_t
*cliprects
, int num_cliprects
,
2133 return do_exec2(bo
, used
, NULL
, cliprects
, num_cliprects
, DR4
,
2134 -1, NULL
, I915_EXEC_RENDER
);
2138 drm_bacon_gem_bo_mrb_exec2(drm_bacon_bo
*bo
, int used
,
2139 drm_clip_rect_t
*cliprects
, int num_cliprects
, int DR4
,
2142 return do_exec2(bo
, used
, NULL
, cliprects
, num_cliprects
, DR4
,
2147 drm_bacon_gem_bo_context_exec(drm_bacon_bo
*bo
, drm_bacon_context
*ctx
,
2148 int used
, unsigned int flags
)
2150 return do_exec2(bo
, used
, ctx
, NULL
, 0, 0, -1, NULL
, flags
);
2154 drm_bacon_gem_bo_fence_exec(drm_bacon_bo
*bo
,
2155 drm_bacon_context
*ctx
,
2161 return do_exec2(bo
, used
, ctx
, NULL
, 0, 0, in_fence
, out_fence
, flags
);
2165 drm_bacon_gem_bo_set_tiling_internal(drm_bacon_bo
*bo
,
2166 uint32_t tiling_mode
,
2169 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
2170 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2171 struct drm_i915_gem_set_tiling set_tiling
;
2174 if (bo_gem
->global_name
== 0 &&
2175 tiling_mode
== bo_gem
->tiling_mode
&&
2176 stride
== bo_gem
->stride
)
2179 memset(&set_tiling
, 0, sizeof(set_tiling
));
2181 /* set_tiling is slightly broken and overwrites the
2182 * input on the error path, so we have to open code
2185 set_tiling
.handle
= bo_gem
->gem_handle
;
2186 set_tiling
.tiling_mode
= tiling_mode
;
2187 set_tiling
.stride
= stride
;
2189 ret
= ioctl(bufmgr_gem
->fd
,
2190 DRM_IOCTL_I915_GEM_SET_TILING
,
2192 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
2196 bo_gem
->tiling_mode
= set_tiling
.tiling_mode
;
2197 bo_gem
->swizzle_mode
= set_tiling
.swizzle_mode
;
2198 bo_gem
->stride
= set_tiling
.stride
;
2203 drm_bacon_gem_bo_set_tiling(drm_bacon_bo
*bo
, uint32_t * tiling_mode
,
2206 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
2207 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2210 /* Tiling with userptr surfaces is not supported
2211 * on all hardware so refuse it for time being.
2213 if (bo_gem
->is_userptr
)
2216 /* Linear buffers have no stride. By ensuring that we only ever use
2217 * stride 0 with linear buffers, we simplify our code.
2219 if (*tiling_mode
== I915_TILING_NONE
)
2222 ret
= drm_bacon_gem_bo_set_tiling_internal(bo
, *tiling_mode
, stride
);
2224 drm_bacon_bo_gem_set_in_aperture_size(bufmgr_gem
, bo_gem
, 0);
2226 *tiling_mode
= bo_gem
->tiling_mode
;
2231 drm_bacon_gem_bo_get_tiling(drm_bacon_bo
*bo
, uint32_t * tiling_mode
,
2232 uint32_t * swizzle_mode
)
2234 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2236 *tiling_mode
= bo_gem
->tiling_mode
;
2237 *swizzle_mode
= bo_gem
->swizzle_mode
;
2242 drm_bacon_gem_bo_set_softpin_offset(drm_bacon_bo
*bo
, uint64_t offset
)
2244 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2246 bo
->offset64
= offset
;
2247 bo
->offset
= offset
;
2248 bo_gem
->kflags
|= EXEC_OBJECT_PINNED
;
2254 drm_bacon_bo_gem_create_from_prime(drm_bacon_bufmgr
*bufmgr
, int prime_fd
, int size
)
2256 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bufmgr
;
2259 drm_bacon_bo_gem
*bo_gem
;
2260 struct drm_i915_gem_get_tiling get_tiling
;
2262 pthread_mutex_lock(&bufmgr_gem
->lock
);
2263 ret
= drmPrimeFDToHandle(bufmgr_gem
->fd
, prime_fd
, &handle
);
2265 DBG("create_from_prime: failed to obtain handle from fd: %s\n", strerror(errno
));
2266 pthread_mutex_unlock(&bufmgr_gem
->lock
);
2271 * See if the kernel has already returned this buffer to us. Just as
2272 * for named buffers, we must not create two bo's pointing at the same
2275 HASH_FIND(handle_hh
, bufmgr_gem
->handle_table
,
2276 &handle
, sizeof(handle
), bo_gem
);
2278 drm_bacon_gem_bo_reference(&bo_gem
->bo
);
2282 bo_gem
= calloc(1, sizeof(*bo_gem
));
2286 p_atomic_set(&bo_gem
->refcount
, 1);
2287 list_inithead(&bo_gem
->vma_list
);
2289 /* Determine size of bo. The fd-to-handle ioctl really should
2290 * return the size, but it doesn't. If we have kernel 3.12 or
2291 * later, we can lseek on the prime fd to get the size. Older
2292 * kernels will just fail, in which case we fall back to the
2293 * provided (estimated or guess size). */
2294 ret
= lseek(prime_fd
, 0, SEEK_END
);
2296 bo_gem
->bo
.size
= ret
;
2298 bo_gem
->bo
.size
= size
;
2300 bo_gem
->bo
.handle
= handle
;
2301 bo_gem
->bo
.bufmgr
= bufmgr
;
2303 bo_gem
->gem_handle
= handle
;
2304 HASH_ADD(handle_hh
, bufmgr_gem
->handle_table
,
2305 gem_handle
, sizeof(bo_gem
->gem_handle
), bo_gem
);
2307 bo_gem
->name
= "prime";
2308 bo_gem
->validate_index
= -1;
2309 bo_gem
->used_as_reloc_target
= false;
2310 bo_gem
->has_error
= false;
2311 bo_gem
->reusable
= false;
2313 memclear(get_tiling
);
2314 get_tiling
.handle
= bo_gem
->gem_handle
;
2315 if (drmIoctl(bufmgr_gem
->fd
,
2316 DRM_IOCTL_I915_GEM_GET_TILING
,
2320 bo_gem
->tiling_mode
= get_tiling
.tiling_mode
;
2321 bo_gem
->swizzle_mode
= get_tiling
.swizzle_mode
;
2322 /* XXX stride is unknown */
2323 drm_bacon_bo_gem_set_in_aperture_size(bufmgr_gem
, bo_gem
, 0);
2326 pthread_mutex_unlock(&bufmgr_gem
->lock
);
2330 drm_bacon_gem_bo_free(&bo_gem
->bo
);
2331 pthread_mutex_unlock(&bufmgr_gem
->lock
);
2336 drm_bacon_bo_gem_export_to_prime(drm_bacon_bo
*bo
, int *prime_fd
)
2338 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
2339 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2341 if (drmPrimeHandleToFD(bufmgr_gem
->fd
, bo_gem
->gem_handle
,
2342 DRM_CLOEXEC
, prime_fd
) != 0)
2345 bo_gem
->reusable
= false;
2351 drm_bacon_gem_bo_flink(drm_bacon_bo
*bo
, uint32_t * name
)
2353 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
2354 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2356 if (!bo_gem
->global_name
) {
2357 struct drm_gem_flink flink
;
2360 flink
.handle
= bo_gem
->gem_handle
;
2361 if (drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_GEM_FLINK
, &flink
))
2364 pthread_mutex_lock(&bufmgr_gem
->lock
);
2365 if (!bo_gem
->global_name
) {
2366 bo_gem
->global_name
= flink
.name
;
2367 bo_gem
->reusable
= false;
2369 HASH_ADD(name_hh
, bufmgr_gem
->name_table
,
2370 global_name
, sizeof(bo_gem
->global_name
),
2373 pthread_mutex_unlock(&bufmgr_gem
->lock
);
2376 *name
= bo_gem
->global_name
;
2381 * Enables unlimited caching of buffer objects for reuse.
2383 * This is potentially very memory expensive, as the cache at each bucket
2384 * size is only bounded by how many buffers of that size we've managed to have
2385 * in flight at once.
2388 drm_bacon_bufmgr_gem_enable_reuse(drm_bacon_bufmgr
*bufmgr
)
2390 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bufmgr
;
2392 bufmgr_gem
->bo_reuse
= true;
2396 * Disables implicit synchronisation before executing the bo
2398 * This will cause rendering corruption unless you correctly manage explicit
2399 * fences for all rendering involving this buffer - including use by others.
2400 * Disabling the implicit serialisation is only required if that serialisation
2401 * is too coarse (for example, you have split the buffer into many
2402 * non-overlapping regions and are sharing the whole buffer between concurrent
2403 * independent command streams).
2405 * Note the kernel must advertise support via I915_PARAM_HAS_EXEC_ASYNC,
2406 * which can be checked using drm_bacon_bufmgr_can_disable_implicit_sync,
2407 * or subsequent execbufs involving the bo will generate EINVAL.
2410 drm_bacon_gem_bo_disable_implicit_sync(drm_bacon_bo
*bo
)
2412 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2414 bo_gem
->kflags
|= EXEC_OBJECT_ASYNC
;
2418 * Enables implicit synchronisation before executing the bo
2420 * This is the default behaviour of the kernel, to wait upon prior writes
2421 * completing on the object before rendering with it, or to wait for prior
2422 * reads to complete before writing into the object.
2423 * drm_bacon_gem_bo_disable_implicit_sync() can stop this behaviour, telling
2424 * the kernel never to insert a stall before using the object. Then this
2425 * function can be used to restore the implicit sync before subsequent
2429 drm_bacon_gem_bo_enable_implicit_sync(drm_bacon_bo
*bo
)
2431 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2433 bo_gem
->kflags
&= ~EXEC_OBJECT_ASYNC
;
2437 * Query whether the kernel supports disabling of its implicit synchronisation
2438 * before execbuf. See drm_bacon_gem_bo_disable_implicit_sync()
2441 drm_bacon_bufmgr_gem_can_disable_implicit_sync(drm_bacon_bufmgr
*bufmgr
)
2443 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bufmgr
;
2445 return bufmgr_gem
->has_exec_async
;
2449 * Return the additional aperture space required by the tree of buffer objects
2453 drm_bacon_gem_bo_get_aperture_space(drm_bacon_bo
*bo
)
2455 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2459 if (bo
== NULL
|| bo_gem
->included_in_check_aperture
)
2463 bo_gem
->included_in_check_aperture
= true;
2465 for (i
= 0; i
< bo_gem
->reloc_count
; i
++)
2467 drm_bacon_gem_bo_get_aperture_space(bo_gem
->
2468 reloc_target_info
[i
].bo
);
2474 * Clear the flag set by drm_bacon_gem_bo_get_aperture_space() so we're ready
2475 * for the next drm_bacon_bufmgr_check_aperture_space() call.
2478 drm_bacon_gem_bo_clear_aperture_space_flag(drm_bacon_bo
*bo
)
2480 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2483 if (bo
== NULL
|| !bo_gem
->included_in_check_aperture
)
2486 bo_gem
->included_in_check_aperture
= false;
2488 for (i
= 0; i
< bo_gem
->reloc_count
; i
++)
2489 drm_bacon_gem_bo_clear_aperture_space_flag(bo_gem
->
2490 reloc_target_info
[i
].bo
);
2494 * Return a conservative estimate for the amount of aperture required
2495 * for a collection of buffers. This may double-count some buffers.
2498 drm_bacon_gem_estimate_batch_space(drm_bacon_bo
**bo_array
, int count
)
2501 unsigned int total
= 0;
2503 for (i
= 0; i
< count
; i
++) {
2504 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo_array
[i
];
2506 total
+= bo_gem
->reloc_tree_size
;
2512 * Return the amount of aperture needed for a collection of buffers.
2513 * This avoids double counting any buffers, at the cost of looking
2514 * at every buffer in the set.
2517 drm_bacon_gem_compute_batch_space(drm_bacon_bo
**bo_array
, int count
)
2520 unsigned int total
= 0;
2522 for (i
= 0; i
< count
; i
++) {
2523 total
+= drm_bacon_gem_bo_get_aperture_space(bo_array
[i
]);
2524 /* For the first buffer object in the array, we get an
2525 * accurate count back for its reloc_tree size (since nothing
2526 * had been flagged as being counted yet). We can save that
2527 * value out as a more conservative reloc_tree_size that
2528 * avoids double-counting target buffers. Since the first
2529 * buffer happens to usually be the batch buffer in our
2530 * callers, this can pull us back from doing the tree
2531 * walk on every new batch emit.
2534 drm_bacon_bo_gem
*bo_gem
=
2535 (drm_bacon_bo_gem
*) bo_array
[i
];
2536 bo_gem
->reloc_tree_size
= total
;
2540 for (i
= 0; i
< count
; i
++)
2541 drm_bacon_gem_bo_clear_aperture_space_flag(bo_array
[i
]);
2546 * Return -1 if the batchbuffer should be flushed before attempting to
2547 * emit rendering referencing the buffers pointed to by bo_array.
2549 * This is required because if we try to emit a batchbuffer with relocations
2550 * to a tree of buffers that won't simultaneously fit in the aperture,
2551 * the rendering will return an error at a point where the software is not
2552 * prepared to recover from it.
2554 * However, we also want to emit the batchbuffer significantly before we reach
2555 * the limit, as a series of batchbuffers each of which references buffers
2556 * covering almost all of the aperture means that at each emit we end up
2557 * waiting to evict a buffer from the last rendering, and we get synchronous
2558 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
2559 * get better parallelism.
2562 drm_bacon_gem_check_aperture_space(drm_bacon_bo
**bo_array
, int count
)
2564 drm_bacon_bufmgr_gem
*bufmgr_gem
=
2565 (drm_bacon_bufmgr_gem
*) bo_array
[0]->bufmgr
;
2566 unsigned int total
= 0;
2567 unsigned int threshold
= bufmgr_gem
->gtt_size
* 3 / 4;
2569 total
= drm_bacon_gem_estimate_batch_space(bo_array
, count
);
2571 if (total
> threshold
)
2572 total
= drm_bacon_gem_compute_batch_space(bo_array
, count
);
2574 if (total
> threshold
) {
2575 DBG("check_space: overflowed available aperture, "
2577 total
/ 1024, (int)bufmgr_gem
->gtt_size
/ 1024);
2580 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total
/ 1024,
2581 (int)bufmgr_gem
->gtt_size
/ 1024);
2587 * Disable buffer reuse for objects which are shared with the kernel
2588 * as scanout buffers
2591 drm_bacon_gem_bo_disable_reuse(drm_bacon_bo
*bo
)
2593 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2595 bo_gem
->reusable
= false;
2600 drm_bacon_gem_bo_is_reusable(drm_bacon_bo
*bo
)
2602 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2604 return bo_gem
->reusable
;
2608 _drm_bacon_gem_bo_references(drm_bacon_bo
*bo
, drm_bacon_bo
*target_bo
)
2610 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2613 for (i
= 0; i
< bo_gem
->reloc_count
; i
++) {
2614 if (bo_gem
->reloc_target_info
[i
].bo
== target_bo
)
2616 if (bo
== bo_gem
->reloc_target_info
[i
].bo
)
2618 if (_drm_bacon_gem_bo_references(bo_gem
->reloc_target_info
[i
].bo
,
2623 for (i
= 0; i
< bo_gem
->softpin_target_count
; i
++) {
2624 if (bo_gem
->softpin_target
[i
] == target_bo
)
2626 if (_drm_bacon_gem_bo_references(bo_gem
->softpin_target
[i
], target_bo
))
2633 /** Return true if target_bo is referenced by bo's relocation tree. */
2635 drm_bacon_gem_bo_references(drm_bacon_bo
*bo
, drm_bacon_bo
*target_bo
)
2637 drm_bacon_bo_gem
*target_bo_gem
= (drm_bacon_bo_gem
*) target_bo
;
2639 if (bo
== NULL
|| target_bo
== NULL
)
2641 if (target_bo_gem
->used_as_reloc_target
)
2642 return _drm_bacon_gem_bo_references(bo
, target_bo
);
2647 add_bucket(drm_bacon_bufmgr_gem
*bufmgr_gem
, int size
)
2649 unsigned int i
= bufmgr_gem
->num_buckets
;
2651 assert(i
< ARRAY_SIZE(bufmgr_gem
->cache_bucket
));
2653 list_inithead(&bufmgr_gem
->cache_bucket
[i
].head
);
2654 bufmgr_gem
->cache_bucket
[i
].size
= size
;
2655 bufmgr_gem
->num_buckets
++;
2659 init_cache_buckets(drm_bacon_bufmgr_gem
*bufmgr_gem
)
2661 unsigned long size
, cache_max_size
= 64 * 1024 * 1024;
2663 /* OK, so power of two buckets was too wasteful of memory.
2664 * Give 3 other sizes between each power of two, to hopefully
2665 * cover things accurately enough. (The alternative is
2666 * probably to just go for exact matching of sizes, and assume
2667 * that for things like composited window resize the tiled
2668 * width/height alignment and rounding of sizes to pages will
2669 * get us useful cache hit rates anyway)
2671 add_bucket(bufmgr_gem
, 4096);
2672 add_bucket(bufmgr_gem
, 4096 * 2);
2673 add_bucket(bufmgr_gem
, 4096 * 3);
2675 /* Initialize the linked lists for BO reuse cache. */
2676 for (size
= 4 * 4096; size
<= cache_max_size
; size
*= 2) {
2677 add_bucket(bufmgr_gem
, size
);
2679 add_bucket(bufmgr_gem
, size
+ size
* 1 / 4);
2680 add_bucket(bufmgr_gem
, size
+ size
* 2 / 4);
2681 add_bucket(bufmgr_gem
, size
+ size
* 3 / 4);
2686 drm_bacon_bufmgr_gem_set_vma_cache_size(drm_bacon_bufmgr
*bufmgr
, int limit
)
2688 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*)bufmgr
;
2690 bufmgr_gem
->vma_max
= limit
;
2692 drm_bacon_gem_bo_purge_vma_cache(bufmgr_gem
);
2696 parse_devid_override(const char *devid_override
)
2698 static const struct {
2702 { "brw", PCI_CHIP_I965_GM
},
2703 { "g4x", PCI_CHIP_GM45_GM
},
2704 { "ilk", PCI_CHIP_ILD_G
},
2705 { "snb", PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS
},
2706 { "ivb", PCI_CHIP_IVYBRIDGE_S_GT2
},
2707 { "hsw", PCI_CHIP_HASWELL_CRW_E_GT3
},
2708 { "byt", PCI_CHIP_VALLEYVIEW_3
},
2709 { "bdw", 0x1620 | BDW_ULX
},
2710 { "skl", PCI_CHIP_SKYLAKE_DT_GT2
},
2711 { "kbl", PCI_CHIP_KABYLAKE_DT_GT2
},
2715 for (i
= 0; i
< ARRAY_SIZE(name_map
); i
++) {
2716 if (!strcmp(name_map
[i
].name
, devid_override
))
2717 return name_map
[i
].pci_id
;
2720 return strtod(devid_override
, NULL
);
2724 * Get the PCI ID for the device. This can be overridden by setting the
2725 * INTEL_DEVID_OVERRIDE environment variable to the desired ID.
2728 get_pci_device_id(drm_bacon_bufmgr_gem
*bufmgr_gem
)
2730 char *devid_override
;
2733 drm_i915_getparam_t gp
;
2735 if (geteuid() == getuid()) {
2736 devid_override
= getenv("INTEL_DEVID_OVERRIDE");
2737 if (devid_override
) {
2738 bufmgr_gem
->no_exec
= true;
2739 return parse_devid_override(devid_override
);
2744 gp
.param
= I915_PARAM_CHIPSET_ID
;
2746 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
2748 fprintf(stderr
, "get chip id failed: %d [%d]\n", ret
, errno
);
2749 fprintf(stderr
, "param: %d, val: %d\n", gp
.param
, *gp
.value
);
2755 drm_bacon_bufmgr_gem_get_devid(drm_bacon_bufmgr
*bufmgr
)
2757 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*)bufmgr
;
2759 return bufmgr_gem
->pci_device
;
2763 drm_bacon_gem_context_create(drm_bacon_bufmgr
*bufmgr
)
2765 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*)bufmgr
;
2766 struct drm_i915_gem_context_create create
;
2767 drm_bacon_context
*context
= NULL
;
2770 context
= calloc(1, sizeof(*context
));
2775 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_CREATE
, &create
);
2777 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
2783 context
->ctx_id
= create
.ctx_id
;
2784 context
->bufmgr
= bufmgr
;
2790 drm_bacon_gem_context_get_id(drm_bacon_context
*ctx
, uint32_t *ctx_id
)
2795 *ctx_id
= ctx
->ctx_id
;
2801 drm_bacon_gem_context_destroy(drm_bacon_context
*ctx
)
2803 drm_bacon_bufmgr_gem
*bufmgr_gem
;
2804 struct drm_i915_gem_context_destroy destroy
;
2812 bufmgr_gem
= (drm_bacon_bufmgr_gem
*)ctx
->bufmgr
;
2813 destroy
.ctx_id
= ctx
->ctx_id
;
2814 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY
,
2817 fprintf(stderr
, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
2824 drm_bacon_get_reset_stats(drm_bacon_context
*ctx
,
2825 uint32_t *reset_count
,
2829 drm_bacon_bufmgr_gem
*bufmgr_gem
;
2830 struct drm_i915_reset_stats stats
;
2838 bufmgr_gem
= (drm_bacon_bufmgr_gem
*)ctx
->bufmgr
;
2839 stats
.ctx_id
= ctx
->ctx_id
;
2840 ret
= drmIoctl(bufmgr_gem
->fd
,
2841 DRM_IOCTL_I915_GET_RESET_STATS
,
2844 if (reset_count
!= NULL
)
2845 *reset_count
= stats
.reset_count
;
2848 *active
= stats
.batch_active
;
2850 if (pending
!= NULL
)
2851 *pending
= stats
.batch_pending
;
2858 drm_bacon_reg_read(drm_bacon_bufmgr
*bufmgr
,
2862 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*)bufmgr
;
2863 struct drm_i915_reg_read reg_read
;
2867 reg_read
.offset
= offset
;
2869 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_REG_READ
, ®_read
);
2871 *result
= reg_read
.val
;
2875 static pthread_mutex_t bufmgr_list_mutex
= PTHREAD_MUTEX_INITIALIZER
;
2876 static struct list_head bufmgr_list
= { &bufmgr_list
, &bufmgr_list
};
2878 static drm_bacon_bufmgr_gem
*
2879 drm_bacon_bufmgr_gem_find(int fd
)
2881 list_for_each_entry(drm_bacon_bufmgr_gem
,
2882 bufmgr_gem
, &bufmgr_list
, managers
) {
2883 if (bufmgr_gem
->fd
== fd
) {
2884 p_atomic_inc(&bufmgr_gem
->refcount
);
2893 drm_bacon_bufmgr_gem_unref(drm_bacon_bufmgr
*bufmgr
)
2895 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*)bufmgr
;
2897 if (atomic_add_unless(&bufmgr_gem
->refcount
, -1, 1)) {
2898 pthread_mutex_lock(&bufmgr_list_mutex
);
2900 if (p_atomic_dec_zero(&bufmgr_gem
->refcount
)) {
2901 list_del(&bufmgr_gem
->managers
);
2902 drm_bacon_bufmgr_gem_destroy(bufmgr
);
2905 pthread_mutex_unlock(&bufmgr_list_mutex
);
2909 void *drm_bacon_gem_bo_map__gtt(drm_bacon_bo
*bo
)
2911 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
2912 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2914 if (bo_gem
->gtt_virtual
)
2915 return bo_gem
->gtt_virtual
;
2917 if (bo_gem
->is_userptr
)
2920 pthread_mutex_lock(&bufmgr_gem
->lock
);
2921 if (bo_gem
->gtt_virtual
== NULL
) {
2922 struct drm_i915_gem_mmap_gtt mmap_arg
;
2925 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
2926 bo_gem
->gem_handle
, bo_gem
->name
, bo_gem
->map_count
);
2928 if (bo_gem
->map_count
++ == 0)
2929 drm_bacon_gem_bo_open_vma(bufmgr_gem
, bo_gem
);
2932 mmap_arg
.handle
= bo_gem
->gem_handle
;
2934 /* Get the fake offset back... */
2936 if (drmIoctl(bufmgr_gem
->fd
,
2937 DRM_IOCTL_I915_GEM_MMAP_GTT
,
2940 ptr
= drm_mmap(0, bo
->size
, PROT_READ
| PROT_WRITE
,
2941 MAP_SHARED
, bufmgr_gem
->fd
,
2944 if (ptr
== MAP_FAILED
) {
2945 if (--bo_gem
->map_count
== 0)
2946 drm_bacon_gem_bo_close_vma(bufmgr_gem
, bo_gem
);
2950 bo_gem
->gtt_virtual
= ptr
;
2952 pthread_mutex_unlock(&bufmgr_gem
->lock
);
2954 return bo_gem
->gtt_virtual
;
2957 void *drm_bacon_gem_bo_map__cpu(drm_bacon_bo
*bo
)
2959 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
2960 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
2962 if (bo_gem
->mem_virtual
)
2963 return bo_gem
->mem_virtual
;
2965 if (bo_gem
->is_userptr
) {
2966 /* Return the same user ptr */
2967 return bo_gem
->user_virtual
;
2970 pthread_mutex_lock(&bufmgr_gem
->lock
);
2971 if (!bo_gem
->mem_virtual
) {
2972 struct drm_i915_gem_mmap mmap_arg
;
2974 if (bo_gem
->map_count
++ == 0)
2975 drm_bacon_gem_bo_open_vma(bufmgr_gem
, bo_gem
);
2977 DBG("bo_map: %d (%s), map_count=%d\n",
2978 bo_gem
->gem_handle
, bo_gem
->name
, bo_gem
->map_count
);
2981 mmap_arg
.handle
= bo_gem
->gem_handle
;
2982 mmap_arg
.size
= bo
->size
;
2983 if (drmIoctl(bufmgr_gem
->fd
,
2984 DRM_IOCTL_I915_GEM_MMAP
,
2986 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
2987 __FILE__
, __LINE__
, bo_gem
->gem_handle
,
2988 bo_gem
->name
, strerror(errno
));
2989 if (--bo_gem
->map_count
== 0)
2990 drm_bacon_gem_bo_close_vma(bufmgr_gem
, bo_gem
);
2992 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg
.addr_ptr
, mmap_arg
.size
, 0, 1));
2993 bo_gem
->mem_virtual
= (void *)(uintptr_t) mmap_arg
.addr_ptr
;
2996 pthread_mutex_unlock(&bufmgr_gem
->lock
);
2998 return bo_gem
->mem_virtual
;
3001 void *drm_bacon_gem_bo_map__wc(drm_bacon_bo
*bo
)
3003 drm_bacon_bufmgr_gem
*bufmgr_gem
= (drm_bacon_bufmgr_gem
*) bo
->bufmgr
;
3004 drm_bacon_bo_gem
*bo_gem
= (drm_bacon_bo_gem
*) bo
;
3006 if (bo_gem
->wc_virtual
)
3007 return bo_gem
->wc_virtual
;
3009 if (bo_gem
->is_userptr
)
3012 pthread_mutex_lock(&bufmgr_gem
->lock
);
3013 if (!bo_gem
->wc_virtual
) {
3014 struct drm_i915_gem_mmap mmap_arg
;
3016 if (bo_gem
->map_count
++ == 0)
3017 drm_bacon_gem_bo_open_vma(bufmgr_gem
, bo_gem
);
3019 DBG("bo_map: %d (%s), map_count=%d\n",
3020 bo_gem
->gem_handle
, bo_gem
->name
, bo_gem
->map_count
);
3023 mmap_arg
.handle
= bo_gem
->gem_handle
;
3024 mmap_arg
.size
= bo
->size
;
3025 mmap_arg
.flags
= I915_MMAP_WC
;
3026 if (drmIoctl(bufmgr_gem
->fd
,
3027 DRM_IOCTL_I915_GEM_MMAP
,
3029 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
3030 __FILE__
, __LINE__
, bo_gem
->gem_handle
,
3031 bo_gem
->name
, strerror(errno
));
3032 if (--bo_gem
->map_count
== 0)
3033 drm_bacon_gem_bo_close_vma(bufmgr_gem
, bo_gem
);
3035 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg
.addr_ptr
, mmap_arg
.size
, 0, 1));
3036 bo_gem
->wc_virtual
= (void *)(uintptr_t) mmap_arg
.addr_ptr
;
3039 pthread_mutex_unlock(&bufmgr_gem
->lock
);
3041 return bo_gem
->wc_virtual
;
3045 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
3046 * and manage map buffer objections.
3048 * \param fd File descriptor of the opened DRM device.
3051 drm_bacon_bufmgr_gem_init(int fd
, int batch_size
)
3053 drm_bacon_bufmgr_gem
*bufmgr_gem
;
3054 struct drm_i915_gem_get_aperture aperture
;
3055 drm_i915_getparam_t gp
;
3058 pthread_mutex_lock(&bufmgr_list_mutex
);
3060 bufmgr_gem
= drm_bacon_bufmgr_gem_find(fd
);
3064 bufmgr_gem
= calloc(1, sizeof(*bufmgr_gem
));
3065 if (bufmgr_gem
== NULL
)
3068 bufmgr_gem
->fd
= fd
;
3069 p_atomic_set(&bufmgr_gem
->refcount
, 1);
3071 if (pthread_mutex_init(&bufmgr_gem
->lock
, NULL
) != 0) {
3078 ret
= drmIoctl(bufmgr_gem
->fd
,
3079 DRM_IOCTL_I915_GEM_GET_APERTURE
,
3083 bufmgr_gem
->gtt_size
= aperture
.aper_available_size
;
3085 fprintf(stderr
, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
3087 bufmgr_gem
->gtt_size
= 128 * 1024 * 1024;
3088 fprintf(stderr
, "Assuming %dkB available aperture size.\n"
3089 "May lead to reduced performance or incorrect "
3091 (int)bufmgr_gem
->gtt_size
/ 1024);
3094 bufmgr_gem
->pci_device
= get_pci_device_id(bufmgr_gem
);
3096 if (IS_GEN4(bufmgr_gem
->pci_device
))
3097 bufmgr_gem
->gen
= 4;
3098 else if (IS_GEN5(bufmgr_gem
->pci_device
))
3099 bufmgr_gem
->gen
= 5;
3100 else if (IS_GEN6(bufmgr_gem
->pci_device
))
3101 bufmgr_gem
->gen
= 6;
3102 else if (IS_GEN7(bufmgr_gem
->pci_device
))
3103 bufmgr_gem
->gen
= 7;
3104 else if (IS_GEN8(bufmgr_gem
->pci_device
))
3105 bufmgr_gem
->gen
= 8;
3106 else if (IS_GEN9(bufmgr_gem
->pci_device
))
3107 bufmgr_gem
->gen
= 9;
3117 gp
.param
= I915_PARAM_HAS_BSD
;
3118 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3119 bufmgr_gem
->has_bsd
= ret
== 0;
3121 gp
.param
= I915_PARAM_HAS_BLT
;
3122 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3123 bufmgr_gem
->has_blt
= ret
== 0;
3125 gp
.param
= I915_PARAM_HAS_EXEC_ASYNC
;
3126 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3127 bufmgr_gem
->has_exec_async
= ret
== 0;
3129 bufmgr_gem
->bufmgr
.bo_alloc_userptr
= check_bo_alloc_userptr
;
3131 gp
.param
= I915_PARAM_HAS_WAIT_TIMEOUT
;
3132 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3133 bufmgr_gem
->has_wait_timeout
= ret
== 0;
3135 gp
.param
= I915_PARAM_HAS_LLC
;
3136 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3138 /* Kernel does not supports HAS_LLC query, fallback to GPU
3139 * generation detection and assume that we have LLC on GEN6/7
3141 bufmgr_gem
->has_llc
= (IS_GEN6(bufmgr_gem
->pci_device
) |
3142 IS_GEN7(bufmgr_gem
->pci_device
));
3144 bufmgr_gem
->has_llc
= *gp
.value
;
3146 gp
.param
= I915_PARAM_HAS_VEBOX
;
3147 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3148 bufmgr_gem
->has_vebox
= (ret
== 0) & (*gp
.value
> 0);
3150 gp
.param
= I915_PARAM_HAS_EXEC_SOFTPIN
;
3151 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3152 if (ret
== 0 && *gp
.value
> 0)
3153 bufmgr_gem
->bufmgr
.bo_set_softpin_offset
= drm_bacon_gem_bo_set_softpin_offset
;
3155 /* Let's go with one relocation per every 2 dwords (but round down a bit
3156 * since a power of two will mean an extra page allocation for the reloc
3159 * Every 4 was too few for the blender benchmark.
3161 bufmgr_gem
->max_relocs
= batch_size
/ sizeof(uint32_t) / 2 - 2;
3163 bufmgr_gem
->bufmgr
.bo_alloc
= drm_bacon_gem_bo_alloc
;
3164 bufmgr_gem
->bufmgr
.bo_alloc_for_render
=
3165 drm_bacon_gem_bo_alloc_for_render
;
3166 bufmgr_gem
->bufmgr
.bo_alloc_tiled
= drm_bacon_gem_bo_alloc_tiled
;
3167 bufmgr_gem
->bufmgr
.bo_reference
= drm_bacon_gem_bo_reference
;
3168 bufmgr_gem
->bufmgr
.bo_unreference
= drm_bacon_gem_bo_unreference
;
3169 bufmgr_gem
->bufmgr
.bo_map
= drm_bacon_gem_bo_map
;
3170 bufmgr_gem
->bufmgr
.bo_unmap
= drm_bacon_gem_bo_unmap
;
3171 bufmgr_gem
->bufmgr
.bo_subdata
= drm_bacon_gem_bo_subdata
;
3172 bufmgr_gem
->bufmgr
.bo_get_subdata
= drm_bacon_gem_bo_get_subdata
;
3173 bufmgr_gem
->bufmgr
.bo_wait_rendering
= drm_bacon_gem_bo_wait_rendering
;
3174 bufmgr_gem
->bufmgr
.bo_emit_reloc
= drm_bacon_gem_bo_emit_reloc
;
3175 bufmgr_gem
->bufmgr
.bo_get_tiling
= drm_bacon_gem_bo_get_tiling
;
3176 bufmgr_gem
->bufmgr
.bo_set_tiling
= drm_bacon_gem_bo_set_tiling
;
3177 bufmgr_gem
->bufmgr
.bo_flink
= drm_bacon_gem_bo_flink
;
3178 bufmgr_gem
->bufmgr
.bo_exec
= drm_bacon_gem_bo_exec2
;
3179 bufmgr_gem
->bufmgr
.bo_mrb_exec
= drm_bacon_gem_bo_mrb_exec2
;
3180 bufmgr_gem
->bufmgr
.bo_busy
= drm_bacon_gem_bo_busy
;
3181 bufmgr_gem
->bufmgr
.bo_madvise
= drm_bacon_gem_bo_madvise
;
3182 bufmgr_gem
->bufmgr
.destroy
= drm_bacon_bufmgr_gem_unref
;
3183 bufmgr_gem
->bufmgr
.debug
= 0;
3184 bufmgr_gem
->bufmgr
.check_aperture_space
=
3185 drm_bacon_gem_check_aperture_space
;
3186 bufmgr_gem
->bufmgr
.bo_disable_reuse
= drm_bacon_gem_bo_disable_reuse
;
3187 bufmgr_gem
->bufmgr
.bo_is_reusable
= drm_bacon_gem_bo_is_reusable
;
3188 bufmgr_gem
->bufmgr
.bo_references
= drm_bacon_gem_bo_references
;
3190 init_cache_buckets(bufmgr_gem
);
3192 list_inithead(&bufmgr_gem
->vma_cache
);
3193 bufmgr_gem
->vma_max
= -1; /* unlimited by default */
3195 list_add(&bufmgr_gem
->managers
, &bufmgr_list
);
3198 pthread_mutex_unlock(&bufmgr_list_mutex
);
3200 return bufmgr_gem
!= NULL
? &bufmgr_gem
->bufmgr
: NULL
;