1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007-2012 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
42 #include <xf86atomic.h>
50 #include <sys/ioctl.h>
52 #include <sys/types.h>
57 #define ETIME ETIMEDOUT
59 #include "libdrm_macros.h"
60 #include "libdrm_lists.h"
61 #include "intel_bufmgr.h"
62 #include "intel_bufmgr_priv.h"
63 #include "intel_chipset.h"
77 #define memclear(s) memset(&s, 0, sizeof(s))
79 #define DBG(...) do { \
80 if (bufmgr_gem->bufmgr.debug) \
81 fprintf(stderr, __VA_ARGS__); \
84 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
85 #define MAX2(A, B) ((A) > (B) ? (A) : (B))
88 * upper_32_bits - return bits 32-63 of a number
89 * @n: the number we're accessing
91 * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress
92 * the "right shift count >= width of type" warning when that quantity is
95 #define upper_32_bits(n) ((__u32)(((n) >> 16) >> 16))
98 * lower_32_bits - return bits 0-31 of a number
99 * @n: the number we're accessing
101 #define lower_32_bits(n) ((__u32)(n))
103 typedef struct _drm_intel_bo_gem drm_intel_bo_gem
;
105 struct drm_intel_gem_bo_bucket
{
110 typedef struct _drm_intel_bufmgr_gem
{
111 drm_intel_bufmgr bufmgr
;
119 pthread_mutex_t lock
;
121 struct drm_i915_gem_exec_object
*exec_objects
;
122 struct drm_i915_gem_exec_object2
*exec2_objects
;
123 drm_intel_bo
**exec_bos
;
127 /** Array of lists of cached gem objects of power-of-two sizes */
128 struct drm_intel_gem_bo_bucket cache_bucket
[14 * 4];
132 drmMMListHead managers
;
134 drm_intel_bo_gem
*name_table
;
135 drm_intel_bo_gem
*handle_table
;
137 drmMMListHead vma_cache
;
138 int vma_count
, vma_open
, vma_max
;
141 int available_fences
;
144 unsigned int has_bsd
: 1;
145 unsigned int has_blt
: 1;
146 unsigned int has_relaxed_fencing
: 1;
147 unsigned int has_llc
: 1;
148 unsigned int has_wait_timeout
: 1;
149 unsigned int bo_reuse
: 1;
150 unsigned int no_exec
: 1;
151 unsigned int has_vebox
: 1;
152 unsigned int has_exec_async
: 1;
160 } drm_intel_bufmgr_gem
;
162 #define DRM_INTEL_RELOC_FENCE (1<<0)
164 typedef struct _drm_intel_reloc_target_info
{
167 } drm_intel_reloc_target
;
169 struct _drm_intel_bo_gem
{
177 * Kenel-assigned global name for this object
179 * List contains both flink named and prime fd'd objects
181 unsigned int global_name
;
183 UT_hash_handle handle_hh
;
184 UT_hash_handle name_hh
;
187 * Index of the buffer within the validation list while preparing a
188 * batchbuffer execution.
193 * Current tiling mode
195 uint32_t tiling_mode
;
196 uint32_t swizzle_mode
;
197 unsigned long stride
;
199 unsigned long kflags
;
203 /** Array passed to the DRM containing relocation information. */
204 struct drm_i915_gem_relocation_entry
*relocs
;
206 * Array of info structs corresponding to relocs[i].target_handle etc
208 drm_intel_reloc_target
*reloc_target_info
;
209 /** Number of entries in relocs */
211 /** Array of BOs that are referenced by this buffer and will be softpinned */
212 drm_intel_bo
**softpin_target
;
213 /** Number softpinned BOs that are referenced by this buffer */
214 int softpin_target_count
;
215 /** Maximum amount of softpinned BOs that are referenced by this buffer */
216 int softpin_target_size
;
218 /** Mapped address for the buffer, saved across map/unmap cycles */
220 /** GTT virtual address for the buffer, saved across map/unmap cycles */
222 /** WC CPU address for the buffer, saved across map/unmap cycles */
225 * Virtual address of the buffer allocated by user, used for userptr
230 drmMMListHead vma_list
;
236 * Boolean of whether this BO and its children have been included in
237 * the current drm_intel_bufmgr_check_aperture_space() total.
239 bool included_in_check_aperture
;
242 * Boolean of whether this buffer has been used as a relocation
243 * target and had its size accounted for, and thus can't have any
244 * further relocations added to it.
246 bool used_as_reloc_target
;
249 * Boolean of whether we have encountered an error whilst building the relocation tree.
254 * Boolean of whether this buffer can be re-used
259 * Boolean of whether the GPU is definitely not accessing the buffer.
261 * This is only valid when reusable, since non-reusable
262 * buffers are those that have been shared with other
263 * processes, so we don't know their state.
268 * Boolean of whether this buffer was allocated with userptr
273 * Size in bytes of this buffer and its relocation descendents.
275 * Used to avoid costly tree walking in
276 * drm_intel_bufmgr_check_aperture in the common case.
281 * Number of potential fence registers required by this buffer and its
284 int reloc_tree_fences
;
286 /** Flags that we may need to do the SW_FINISH ioctl on unmap. */
287 bool mapped_cpu_write
;
291 drm_intel_gem_estimate_batch_space(drm_intel_bo
** bo_array
, int count
);
294 drm_intel_gem_compute_batch_space(drm_intel_bo
** bo_array
, int count
);
297 drm_intel_gem_bo_get_tiling(drm_intel_bo
*bo
, uint32_t * tiling_mode
,
298 uint32_t * swizzle_mode
);
301 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo
*bo
,
302 uint32_t tiling_mode
,
305 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo
*bo
,
308 static void drm_intel_gem_bo_unreference(drm_intel_bo
*bo
);
310 static void drm_intel_gem_bo_free(drm_intel_bo
*bo
);
312 static inline drm_intel_bo_gem
*to_bo_gem(drm_intel_bo
*bo
)
314 return (drm_intel_bo_gem
*)bo
;
318 drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem
*bufmgr_gem
, unsigned long size
,
319 uint32_t *tiling_mode
)
321 unsigned long min_size
, max_size
;
324 if (*tiling_mode
== I915_TILING_NONE
)
327 /* 965+ just need multiples of page size for tiling */
328 if (bufmgr_gem
->gen
>= 4)
329 return ROUND_UP_TO(size
, 4096);
331 /* Older chips need powers of two, of at least 512k or 1M */
332 if (bufmgr_gem
->gen
== 3) {
333 min_size
= 1024*1024;
334 max_size
= 128*1024*1024;
337 max_size
= 64*1024*1024;
340 if (size
> max_size
) {
341 *tiling_mode
= I915_TILING_NONE
;
345 /* Do we need to allocate every page for the fence? */
346 if (bufmgr_gem
->has_relaxed_fencing
)
347 return ROUND_UP_TO(size
, 4096);
349 for (i
= min_size
; i
< size
; i
<<= 1)
356 * Round a given pitch up to the minimum required for X tiling on a
357 * given chip. We use 512 as the minimum to allow for a later tiling
361 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem
*bufmgr_gem
,
362 unsigned long pitch
, uint32_t *tiling_mode
)
364 unsigned long tile_width
;
367 /* If untiled, then just align it so that we can do rendering
368 * to it with the 3D engine.
370 if (*tiling_mode
== I915_TILING_NONE
)
371 return ALIGN(pitch
, 64);
373 if (*tiling_mode
== I915_TILING_X
374 || (IS_915(bufmgr_gem
->pci_device
)
375 && *tiling_mode
== I915_TILING_Y
))
380 /* 965 is flexible */
381 if (bufmgr_gem
->gen
>= 4)
382 return ROUND_UP_TO(pitch
, tile_width
);
384 /* The older hardware has a maximum pitch of 8192 with tiled
385 * surfaces, so fallback to untiled if it's too large.
388 *tiling_mode
= I915_TILING_NONE
;
389 return ALIGN(pitch
, 64);
392 /* Pre-965 needs power of two tile width */
393 for (i
= tile_width
; i
< pitch
; i
<<= 1)
399 static struct drm_intel_gem_bo_bucket
*
400 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem
*bufmgr_gem
,
405 for (i
= 0; i
< bufmgr_gem
->num_buckets
; i
++) {
406 struct drm_intel_gem_bo_bucket
*bucket
=
407 &bufmgr_gem
->cache_bucket
[i
];
408 if (bucket
->size
>= size
) {
417 drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem
*bufmgr_gem
)
421 for (i
= 0; i
< bufmgr_gem
->exec_count
; i
++) {
422 drm_intel_bo
*bo
= bufmgr_gem
->exec_bos
[i
];
423 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
425 if (bo_gem
->relocs
== NULL
&& bo_gem
->softpin_target
== NULL
) {
426 DBG("%2d: %d %s(%s)\n", i
, bo_gem
->gem_handle
,
427 bo_gem
->kflags
& EXEC_OBJECT_PINNED
? "*" : "",
432 for (j
= 0; j
< bo_gem
->reloc_count
; j
++) {
433 drm_intel_bo
*target_bo
= bo_gem
->reloc_target_info
[j
].bo
;
434 drm_intel_bo_gem
*target_gem
=
435 (drm_intel_bo_gem
*) target_bo
;
437 DBG("%2d: %d %s(%s)@0x%08x %08x -> "
438 "%d (%s)@0x%08x %08x + 0x%08x\n",
441 bo_gem
->kflags
& EXEC_OBJECT_PINNED
? "*" : "",
443 upper_32_bits(bo_gem
->relocs
[j
].offset
),
444 lower_32_bits(bo_gem
->relocs
[j
].offset
),
445 target_gem
->gem_handle
,
447 upper_32_bits(target_bo
->offset64
),
448 lower_32_bits(target_bo
->offset64
),
449 bo_gem
->relocs
[j
].delta
);
452 for (j
= 0; j
< bo_gem
->softpin_target_count
; j
++) {
453 drm_intel_bo
*target_bo
= bo_gem
->softpin_target
[j
];
454 drm_intel_bo_gem
*target_gem
=
455 (drm_intel_bo_gem
*) target_bo
;
456 DBG("%2d: %d %s(%s) -> "
457 "%d *(%s)@0x%08x %08x\n",
460 bo_gem
->kflags
& EXEC_OBJECT_PINNED
? "*" : "",
462 target_gem
->gem_handle
,
464 upper_32_bits(target_bo
->offset64
),
465 lower_32_bits(target_bo
->offset64
));
471 drm_intel_gem_bo_reference(drm_intel_bo
*bo
)
473 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
475 atomic_inc(&bo_gem
->refcount
);
479 * Adds the given buffer to the list of buffers to be validated (moved into the
480 * appropriate memory type) with the next batch submission.
482 * If a buffer is validated multiple times in a batch submission, it ends up
483 * with the intersection of the memory type flags and the union of the
487 drm_intel_add_validate_buffer(drm_intel_bo
*bo
)
489 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bo
->bufmgr
;
490 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
493 if (bo_gem
->validate_index
!= -1)
496 /* Extend the array of validation entries as necessary. */
497 if (bufmgr_gem
->exec_count
== bufmgr_gem
->exec_size
) {
498 int new_size
= bufmgr_gem
->exec_size
* 2;
503 bufmgr_gem
->exec_objects
=
504 realloc(bufmgr_gem
->exec_objects
,
505 sizeof(*bufmgr_gem
->exec_objects
) * new_size
);
506 bufmgr_gem
->exec_bos
=
507 realloc(bufmgr_gem
->exec_bos
,
508 sizeof(*bufmgr_gem
->exec_bos
) * new_size
);
509 bufmgr_gem
->exec_size
= new_size
;
512 index
= bufmgr_gem
->exec_count
;
513 bo_gem
->validate_index
= index
;
514 /* Fill in array entry */
515 bufmgr_gem
->exec_objects
[index
].handle
= bo_gem
->gem_handle
;
516 bufmgr_gem
->exec_objects
[index
].relocation_count
= bo_gem
->reloc_count
;
517 bufmgr_gem
->exec_objects
[index
].relocs_ptr
= (uintptr_t) bo_gem
->relocs
;
518 bufmgr_gem
->exec_objects
[index
].alignment
= bo
->align
;
519 bufmgr_gem
->exec_objects
[index
].offset
= 0;
520 bufmgr_gem
->exec_bos
[index
] = bo
;
521 bufmgr_gem
->exec_count
++;
525 drm_intel_add_validate_buffer2(drm_intel_bo
*bo
, int need_fence
)
527 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*)bo
->bufmgr
;
528 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*)bo
;
534 flags
|= EXEC_OBJECT_NEEDS_FENCE
;
536 if (bo_gem
->validate_index
!= -1) {
537 bufmgr_gem
->exec2_objects
[bo_gem
->validate_index
].flags
|= flags
;
541 /* Extend the array of validation entries as necessary. */
542 if (bufmgr_gem
->exec_count
== bufmgr_gem
->exec_size
) {
543 int new_size
= bufmgr_gem
->exec_size
* 2;
548 bufmgr_gem
->exec2_objects
=
549 realloc(bufmgr_gem
->exec2_objects
,
550 sizeof(*bufmgr_gem
->exec2_objects
) * new_size
);
551 bufmgr_gem
->exec_bos
=
552 realloc(bufmgr_gem
->exec_bos
,
553 sizeof(*bufmgr_gem
->exec_bos
) * new_size
);
554 bufmgr_gem
->exec_size
= new_size
;
557 index
= bufmgr_gem
->exec_count
;
558 bo_gem
->validate_index
= index
;
559 /* Fill in array entry */
560 bufmgr_gem
->exec2_objects
[index
].handle
= bo_gem
->gem_handle
;
561 bufmgr_gem
->exec2_objects
[index
].relocation_count
= bo_gem
->reloc_count
;
562 bufmgr_gem
->exec2_objects
[index
].relocs_ptr
= (uintptr_t)bo_gem
->relocs
;
563 bufmgr_gem
->exec2_objects
[index
].alignment
= bo
->align
;
564 bufmgr_gem
->exec2_objects
[index
].offset
= bo
->offset64
;
565 bufmgr_gem
->exec2_objects
[index
].flags
= bo_gem
->kflags
| flags
;
566 bufmgr_gem
->exec2_objects
[index
].rsvd1
= 0;
567 bufmgr_gem
->exec2_objects
[index
].rsvd2
= 0;
568 bufmgr_gem
->exec_bos
[index
] = bo
;
569 bufmgr_gem
->exec_count
++;
572 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
576 drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem
*bufmgr_gem
,
577 drm_intel_bo_gem
*bo_gem
,
578 unsigned int alignment
)
582 assert(!bo_gem
->used_as_reloc_target
);
584 /* The older chipsets are far-less flexible in terms of tiling,
585 * and require tiled buffer to be size aligned in the aperture.
586 * This means that in the worst possible case we will need a hole
587 * twice as large as the object in order for it to fit into the
588 * aperture. Optimal packing is for wimps.
590 size
= bo_gem
->bo
.size
;
591 if (bufmgr_gem
->gen
< 4 && bo_gem
->tiling_mode
!= I915_TILING_NONE
) {
592 unsigned int min_size
;
594 if (bufmgr_gem
->has_relaxed_fencing
) {
595 if (bufmgr_gem
->gen
== 3)
596 min_size
= 1024*1024;
600 while (min_size
< size
)
605 /* Account for worst-case alignment. */
606 alignment
= MAX2(alignment
, min_size
);
609 bo_gem
->reloc_tree_size
= size
+ alignment
;
613 drm_intel_setup_reloc_list(drm_intel_bo
*bo
)
615 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
616 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bo
->bufmgr
;
617 unsigned int max_relocs
= bufmgr_gem
->max_relocs
;
619 if (bo
->size
/ 4 < max_relocs
)
620 max_relocs
= bo
->size
/ 4;
622 bo_gem
->relocs
= malloc(max_relocs
*
623 sizeof(struct drm_i915_gem_relocation_entry
));
624 bo_gem
->reloc_target_info
= malloc(max_relocs
*
625 sizeof(drm_intel_reloc_target
));
626 if (bo_gem
->relocs
== NULL
|| bo_gem
->reloc_target_info
== NULL
) {
627 bo_gem
->has_error
= true;
629 free (bo_gem
->relocs
);
630 bo_gem
->relocs
= NULL
;
632 free (bo_gem
->reloc_target_info
);
633 bo_gem
->reloc_target_info
= NULL
;
642 drm_intel_gem_bo_busy(drm_intel_bo
*bo
)
644 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bo
->bufmgr
;
645 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
646 struct drm_i915_gem_busy busy
;
649 if (bo_gem
->reusable
&& bo_gem
->idle
)
653 busy
.handle
= bo_gem
->gem_handle
;
655 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GEM_BUSY
, &busy
);
657 bo_gem
->idle
= !busy
.busy
;
662 return (ret
== 0 && busy
.busy
);
666 drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem
*bufmgr_gem
,
667 drm_intel_bo_gem
*bo_gem
, int state
)
669 struct drm_i915_gem_madvise madv
;
672 madv
.handle
= bo_gem
->gem_handle
;
675 drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GEM_MADVISE
, &madv
);
677 return madv
.retained
;
681 drm_intel_gem_bo_madvise(drm_intel_bo
*bo
, int madv
)
683 return drm_intel_gem_bo_madvise_internal
684 ((drm_intel_bufmgr_gem
*) bo
->bufmgr
,
685 (drm_intel_bo_gem
*) bo
,
689 /* drop the oldest entries that have been purged by the kernel */
691 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem
*bufmgr_gem
,
692 struct drm_intel_gem_bo_bucket
*bucket
)
694 while (!DRMLISTEMPTY(&bucket
->head
)) {
695 drm_intel_bo_gem
*bo_gem
;
697 bo_gem
= DRMLISTENTRY(drm_intel_bo_gem
,
698 bucket
->head
.next
, head
);
699 if (drm_intel_gem_bo_madvise_internal
700 (bufmgr_gem
, bo_gem
, I915_MADV_DONTNEED
))
703 DRMLISTDEL(&bo_gem
->head
);
704 drm_intel_gem_bo_free(&bo_gem
->bo
);
708 static drm_intel_bo
*
709 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr
*bufmgr
,
713 uint32_t tiling_mode
,
714 unsigned long stride
,
715 unsigned int alignment
)
717 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bufmgr
;
718 drm_intel_bo_gem
*bo_gem
;
719 unsigned int page_size
= getpagesize();
721 struct drm_intel_gem_bo_bucket
*bucket
;
722 bool alloc_from_cache
;
723 unsigned long bo_size
;
724 bool for_render
= false;
726 if (flags
& BO_ALLOC_FOR_RENDER
)
729 /* Round the allocated size up to a power of two number of pages. */
730 bucket
= drm_intel_gem_bo_bucket_for_size(bufmgr_gem
, size
);
732 /* If we don't have caching at this size, don't actually round the
735 if (bucket
== NULL
) {
737 if (bo_size
< page_size
)
740 bo_size
= bucket
->size
;
743 pthread_mutex_lock(&bufmgr_gem
->lock
);
744 /* Get a buffer out of the cache if available */
746 alloc_from_cache
= false;
747 if (bucket
!= NULL
&& !DRMLISTEMPTY(&bucket
->head
)) {
749 /* Allocate new render-target BOs from the tail (MRU)
750 * of the list, as it will likely be hot in the GPU
751 * cache and in the aperture for us.
753 bo_gem
= DRMLISTENTRY(drm_intel_bo_gem
,
754 bucket
->head
.prev
, head
);
755 DRMLISTDEL(&bo_gem
->head
);
756 alloc_from_cache
= true;
757 bo_gem
->bo
.align
= alignment
;
759 assert(alignment
== 0);
760 /* For non-render-target BOs (where we're probably
761 * going to map it first thing in order to fill it
762 * with data), check if the last BO in the cache is
763 * unbusy, and only reuse in that case. Otherwise,
764 * allocating a new buffer is probably faster than
765 * waiting for the GPU to finish.
767 bo_gem
= DRMLISTENTRY(drm_intel_bo_gem
,
768 bucket
->head
.next
, head
);
769 if (!drm_intel_gem_bo_busy(&bo_gem
->bo
)) {
770 alloc_from_cache
= true;
771 DRMLISTDEL(&bo_gem
->head
);
775 if (alloc_from_cache
) {
776 if (!drm_intel_gem_bo_madvise_internal
777 (bufmgr_gem
, bo_gem
, I915_MADV_WILLNEED
)) {
778 drm_intel_gem_bo_free(&bo_gem
->bo
);
779 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem
,
784 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem
->bo
,
787 drm_intel_gem_bo_free(&bo_gem
->bo
);
793 if (!alloc_from_cache
) {
794 struct drm_i915_gem_create create
;
796 bo_gem
= calloc(1, sizeof(*bo_gem
));
800 /* drm_intel_gem_bo_free calls DRMLISTDEL() for an uninitialized
801 list (vma_list), so better set the list head here */
802 DRMINITLISTHEAD(&bo_gem
->vma_list
);
804 bo_gem
->bo
.size
= bo_size
;
807 create
.size
= bo_size
;
809 ret
= drmIoctl(bufmgr_gem
->fd
,
810 DRM_IOCTL_I915_GEM_CREATE
,
817 bo_gem
->gem_handle
= create
.handle
;
818 HASH_ADD(handle_hh
, bufmgr_gem
->handle_table
,
819 gem_handle
, sizeof(bo_gem
->gem_handle
),
822 bo_gem
->bo
.handle
= bo_gem
->gem_handle
;
823 bo_gem
->bo
.bufmgr
= bufmgr
;
824 bo_gem
->bo
.align
= alignment
;
826 bo_gem
->tiling_mode
= I915_TILING_NONE
;
827 bo_gem
->swizzle_mode
= I915_BIT_6_SWIZZLE_NONE
;
830 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem
->bo
,
837 atomic_set(&bo_gem
->refcount
, 1);
838 bo_gem
->validate_index
= -1;
839 bo_gem
->reloc_tree_fences
= 0;
840 bo_gem
->used_as_reloc_target
= false;
841 bo_gem
->has_error
= false;
842 bo_gem
->reusable
= true;
844 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem
, bo_gem
, alignment
);
845 pthread_mutex_unlock(&bufmgr_gem
->lock
);
847 DBG("bo_create: buf %d (%s) %ldb\n",
848 bo_gem
->gem_handle
, bo_gem
->name
, size
);
853 drm_intel_gem_bo_free(&bo_gem
->bo
);
855 pthread_mutex_unlock(&bufmgr_gem
->lock
);
859 static drm_intel_bo
*
860 drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr
*bufmgr
,
863 unsigned int alignment
)
865 return drm_intel_gem_bo_alloc_internal(bufmgr
, name
, size
,
871 static drm_intel_bo
*
872 drm_intel_gem_bo_alloc(drm_intel_bufmgr
*bufmgr
,
875 unsigned int alignment
)
877 return drm_intel_gem_bo_alloc_internal(bufmgr
, name
, size
, 0,
878 I915_TILING_NONE
, 0, 0);
881 static drm_intel_bo
*
882 drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr
*bufmgr
, const char *name
,
883 int x
, int y
, int cpp
, uint32_t *tiling_mode
,
884 unsigned long *pitch
, unsigned long flags
)
886 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*)bufmgr
;
887 unsigned long size
, stride
;
891 unsigned long aligned_y
, height_alignment
;
893 tiling
= *tiling_mode
;
895 /* If we're tiled, our allocations are in 8 or 32-row blocks,
896 * so failure to align our height means that we won't allocate
899 * If we're untiled, we still have to align to 2 rows high
900 * because the data port accesses 2x2 blocks even if the
901 * bottom row isn't to be rendered, so failure to align means
902 * we could walk off the end of the GTT and fault. This is
903 * documented on 965, and may be the case on older chipsets
904 * too so we try to be careful.
907 height_alignment
= 2;
909 if ((bufmgr_gem
->gen
== 2) && tiling
!= I915_TILING_NONE
)
910 height_alignment
= 16;
911 else if (tiling
== I915_TILING_X
912 || (IS_915(bufmgr_gem
->pci_device
)
913 && tiling
== I915_TILING_Y
))
914 height_alignment
= 8;
915 else if (tiling
== I915_TILING_Y
)
916 height_alignment
= 32;
917 aligned_y
= ALIGN(y
, height_alignment
);
920 stride
= drm_intel_gem_bo_tile_pitch(bufmgr_gem
, stride
, tiling_mode
);
921 size
= stride
* aligned_y
;
922 size
= drm_intel_gem_bo_tile_size(bufmgr_gem
, size
, tiling_mode
);
923 } while (*tiling_mode
!= tiling
);
926 if (tiling
== I915_TILING_NONE
)
929 return drm_intel_gem_bo_alloc_internal(bufmgr
, name
, size
, flags
,
933 static drm_intel_bo
*
934 drm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr
*bufmgr
,
937 uint32_t tiling_mode
,
942 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bufmgr
;
943 drm_intel_bo_gem
*bo_gem
;
945 struct drm_i915_gem_userptr userptr
;
947 /* Tiling with userptr surfaces is not supported
948 * on all hardware so refuse it for time being.
950 if (tiling_mode
!= I915_TILING_NONE
)
953 bo_gem
= calloc(1, sizeof(*bo_gem
));
957 atomic_set(&bo_gem
->refcount
, 1);
958 DRMINITLISTHEAD(&bo_gem
->vma_list
);
960 bo_gem
->bo
.size
= size
;
963 userptr
.user_ptr
= (__u64
)((unsigned long)addr
);
964 userptr
.user_size
= size
;
965 userptr
.flags
= flags
;
967 ret
= drmIoctl(bufmgr_gem
->fd
,
968 DRM_IOCTL_I915_GEM_USERPTR
,
971 DBG("bo_create_userptr: "
972 "ioctl failed with user ptr %p size 0x%lx, "
973 "user flags 0x%lx\n", addr
, size
, flags
);
978 pthread_mutex_lock(&bufmgr_gem
->lock
);
980 bo_gem
->gem_handle
= userptr
.handle
;
981 bo_gem
->bo
.handle
= bo_gem
->gem_handle
;
982 bo_gem
->bo
.bufmgr
= bufmgr
;
983 bo_gem
->is_userptr
= true;
984 bo_gem
->bo
.virtual = addr
;
985 /* Save the address provided by user */
986 bo_gem
->user_virtual
= addr
;
987 bo_gem
->tiling_mode
= I915_TILING_NONE
;
988 bo_gem
->swizzle_mode
= I915_BIT_6_SWIZZLE_NONE
;
991 HASH_ADD(handle_hh
, bufmgr_gem
->handle_table
,
992 gem_handle
, sizeof(bo_gem
->gem_handle
),
996 bo_gem
->validate_index
= -1;
997 bo_gem
->reloc_tree_fences
= 0;
998 bo_gem
->used_as_reloc_target
= false;
999 bo_gem
->has_error
= false;
1000 bo_gem
->reusable
= false;
1002 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem
, bo_gem
, 0);
1003 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1005 DBG("bo_create_userptr: "
1006 "ptr %p buf %d (%s) size %ldb, stride 0x%x, tile mode %d\n",
1007 addr
, bo_gem
->gem_handle
, bo_gem
->name
,
1008 size
, stride
, tiling_mode
);
1014 has_userptr(drm_intel_bufmgr_gem
*bufmgr_gem
)
1019 struct drm_i915_gem_userptr userptr
;
1021 pgsz
= sysconf(_SC_PAGESIZE
);
1024 ret
= posix_memalign(&ptr
, pgsz
, pgsz
);
1026 DBG("Failed to get a page (%ld) for userptr detection!\n",
1032 userptr
.user_ptr
= (__u64
)(unsigned long)ptr
;
1033 userptr
.user_size
= pgsz
;
1036 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GEM_USERPTR
, &userptr
);
1038 if (errno
== ENODEV
&& userptr
.flags
== 0) {
1039 userptr
.flags
= I915_USERPTR_UNSYNCHRONIZED
;
1046 /* We don't release the userptr bo here as we want to keep the
1047 * kernel mm tracking alive for our lifetime. The first time we
1048 * create a userptr object the kernel has to install a mmu_notifer
1049 * which is a heavyweight operation (e.g. it requires taking all
1050 * mm_locks and stop_machine()).
1053 bufmgr_gem
->userptr_active
.ptr
= ptr
;
1054 bufmgr_gem
->userptr_active
.handle
= userptr
.handle
;
1059 static drm_intel_bo
*
1060 check_bo_alloc_userptr(drm_intel_bufmgr
*bufmgr
,
1063 uint32_t tiling_mode
,
1066 unsigned long flags
)
1068 if (has_userptr((drm_intel_bufmgr_gem
*)bufmgr
))
1069 bufmgr
->bo_alloc_userptr
= drm_intel_gem_bo_alloc_userptr
;
1071 bufmgr
->bo_alloc_userptr
= NULL
;
1073 return drm_intel_bo_alloc_userptr(bufmgr
, name
, addr
,
1074 tiling_mode
, stride
, size
, flags
);
1078 * Returns a drm_intel_bo wrapping the given buffer object handle.
1080 * This can be used when one application needs to pass a buffer object
1084 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr
*bufmgr
,
1086 unsigned int handle
)
1088 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bufmgr
;
1089 drm_intel_bo_gem
*bo_gem
;
1091 struct drm_gem_open open_arg
;
1092 struct drm_i915_gem_get_tiling get_tiling
;
1094 /* At the moment most applications only have a few named bo.
1095 * For instance, in a DRI client only the render buffers passed
1096 * between X and the client are named. And since X returns the
1097 * alternating names for the front/back buffer a linear search
1098 * provides a sufficiently fast match.
1100 pthread_mutex_lock(&bufmgr_gem
->lock
);
1101 HASH_FIND(name_hh
, bufmgr_gem
->name_table
,
1102 &handle
, sizeof(handle
), bo_gem
);
1104 drm_intel_gem_bo_reference(&bo_gem
->bo
);
1109 open_arg
.name
= handle
;
1110 ret
= drmIoctl(bufmgr_gem
->fd
,
1114 DBG("Couldn't reference %s handle 0x%08x: %s\n",
1115 name
, handle
, strerror(errno
));
1119 /* Now see if someone has used a prime handle to get this
1120 * object from the kernel before by looking through the list
1121 * again for a matching gem_handle
1123 HASH_FIND(handle_hh
, bufmgr_gem
->handle_table
,
1124 &open_arg
.handle
, sizeof(open_arg
.handle
), bo_gem
);
1126 drm_intel_gem_bo_reference(&bo_gem
->bo
);
1130 bo_gem
= calloc(1, sizeof(*bo_gem
));
1134 atomic_set(&bo_gem
->refcount
, 1);
1135 DRMINITLISTHEAD(&bo_gem
->vma_list
);
1137 bo_gem
->bo
.size
= open_arg
.size
;
1138 bo_gem
->bo
.offset
= 0;
1139 bo_gem
->bo
.offset64
= 0;
1140 bo_gem
->bo
.virtual = NULL
;
1141 bo_gem
->bo
.bufmgr
= bufmgr
;
1142 bo_gem
->name
= name
;
1143 bo_gem
->validate_index
= -1;
1144 bo_gem
->gem_handle
= open_arg
.handle
;
1145 bo_gem
->bo
.handle
= open_arg
.handle
;
1146 bo_gem
->global_name
= handle
;
1147 bo_gem
->reusable
= false;
1149 HASH_ADD(handle_hh
, bufmgr_gem
->handle_table
,
1150 gem_handle
, sizeof(bo_gem
->gem_handle
), bo_gem
);
1151 HASH_ADD(name_hh
, bufmgr_gem
->name_table
,
1152 global_name
, sizeof(bo_gem
->global_name
), bo_gem
);
1154 memclear(get_tiling
);
1155 get_tiling
.handle
= bo_gem
->gem_handle
;
1156 ret
= drmIoctl(bufmgr_gem
->fd
,
1157 DRM_IOCTL_I915_GEM_GET_TILING
,
1162 bo_gem
->tiling_mode
= get_tiling
.tiling_mode
;
1163 bo_gem
->swizzle_mode
= get_tiling
.swizzle_mode
;
1164 /* XXX stride is unknown */
1165 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem
, bo_gem
, 0);
1166 DBG("bo_create_from_handle: %d (%s)\n", handle
, bo_gem
->name
);
1169 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1173 drm_intel_gem_bo_free(&bo_gem
->bo
);
1174 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1179 drm_intel_gem_bo_free(drm_intel_bo
*bo
)
1181 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bo
->bufmgr
;
1182 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
1183 struct drm_gem_close close
;
1186 DRMLISTDEL(&bo_gem
->vma_list
);
1187 if (bo_gem
->mem_virtual
) {
1188 VG(VALGRIND_FREELIKE_BLOCK(bo_gem
->mem_virtual
, 0));
1189 drm_munmap(bo_gem
->mem_virtual
, bo_gem
->bo
.size
);
1190 bufmgr_gem
->vma_count
--;
1192 if (bo_gem
->wc_virtual
) {
1193 VG(VALGRIND_FREELIKE_BLOCK(bo_gem
->wc_virtual
, 0));
1194 drm_munmap(bo_gem
->wc_virtual
, bo_gem
->bo
.size
);
1195 bufmgr_gem
->vma_count
--;
1197 if (bo_gem
->gtt_virtual
) {
1198 drm_munmap(bo_gem
->gtt_virtual
, bo_gem
->bo
.size
);
1199 bufmgr_gem
->vma_count
--;
1202 if (bo_gem
->global_name
)
1203 HASH_DELETE(name_hh
, bufmgr_gem
->name_table
, bo_gem
);
1204 HASH_DELETE(handle_hh
, bufmgr_gem
->handle_table
, bo_gem
);
1206 /* Close this object */
1208 close
.handle
= bo_gem
->gem_handle
;
1209 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_GEM_CLOSE
, &close
);
1211 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
1212 bo_gem
->gem_handle
, bo_gem
->name
, strerror(errno
));
1218 drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo
*bo
)
1221 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
1223 if (bo_gem
->mem_virtual
)
1224 VALGRIND_MAKE_MEM_NOACCESS(bo_gem
->mem_virtual
, bo
->size
);
1226 if (bo_gem
->wc_virtual
)
1227 VALGRIND_MAKE_MEM_NOACCESS(bo_gem
->wc_virtual
, bo
->size
);
1229 if (bo_gem
->gtt_virtual
)
1230 VALGRIND_MAKE_MEM_NOACCESS(bo_gem
->gtt_virtual
, bo
->size
);
1234 /** Frees all cached buffers significantly older than @time. */
1236 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem
*bufmgr_gem
, time_t time
)
1240 if (bufmgr_gem
->time
== time
)
1243 for (i
= 0; i
< bufmgr_gem
->num_buckets
; i
++) {
1244 struct drm_intel_gem_bo_bucket
*bucket
=
1245 &bufmgr_gem
->cache_bucket
[i
];
1247 while (!DRMLISTEMPTY(&bucket
->head
)) {
1248 drm_intel_bo_gem
*bo_gem
;
1250 bo_gem
= DRMLISTENTRY(drm_intel_bo_gem
,
1251 bucket
->head
.next
, head
);
1252 if (time
- bo_gem
->free_time
<= 1)
1255 DRMLISTDEL(&bo_gem
->head
);
1257 drm_intel_gem_bo_free(&bo_gem
->bo
);
1261 bufmgr_gem
->time
= time
;
1264 static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem
*bufmgr_gem
)
1268 DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__
,
1269 bufmgr_gem
->vma_count
, bufmgr_gem
->vma_open
, bufmgr_gem
->vma_max
);
1271 if (bufmgr_gem
->vma_max
< 0)
1274 /* We may need to evict a few entries in order to create new mmaps */
1275 limit
= bufmgr_gem
->vma_max
- 2*bufmgr_gem
->vma_open
;
1279 while (bufmgr_gem
->vma_count
> limit
) {
1280 drm_intel_bo_gem
*bo_gem
;
1282 bo_gem
= DRMLISTENTRY(drm_intel_bo_gem
,
1283 bufmgr_gem
->vma_cache
.next
,
1285 assert(bo_gem
->map_count
== 0);
1286 DRMLISTDELINIT(&bo_gem
->vma_list
);
1288 if (bo_gem
->mem_virtual
) {
1289 drm_munmap(bo_gem
->mem_virtual
, bo_gem
->bo
.size
);
1290 bo_gem
->mem_virtual
= NULL
;
1291 bufmgr_gem
->vma_count
--;
1293 if (bo_gem
->wc_virtual
) {
1294 drm_munmap(bo_gem
->wc_virtual
, bo_gem
->bo
.size
);
1295 bo_gem
->wc_virtual
= NULL
;
1296 bufmgr_gem
->vma_count
--;
1298 if (bo_gem
->gtt_virtual
) {
1299 drm_munmap(bo_gem
->gtt_virtual
, bo_gem
->bo
.size
);
1300 bo_gem
->gtt_virtual
= NULL
;
1301 bufmgr_gem
->vma_count
--;
1306 static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem
*bufmgr_gem
,
1307 drm_intel_bo_gem
*bo_gem
)
1309 bufmgr_gem
->vma_open
--;
1310 DRMLISTADDTAIL(&bo_gem
->vma_list
, &bufmgr_gem
->vma_cache
);
1311 if (bo_gem
->mem_virtual
)
1312 bufmgr_gem
->vma_count
++;
1313 if (bo_gem
->wc_virtual
)
1314 bufmgr_gem
->vma_count
++;
1315 if (bo_gem
->gtt_virtual
)
1316 bufmgr_gem
->vma_count
++;
1317 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem
);
1320 static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem
*bufmgr_gem
,
1321 drm_intel_bo_gem
*bo_gem
)
1323 bufmgr_gem
->vma_open
++;
1324 DRMLISTDEL(&bo_gem
->vma_list
);
1325 if (bo_gem
->mem_virtual
)
1326 bufmgr_gem
->vma_count
--;
1327 if (bo_gem
->wc_virtual
)
1328 bufmgr_gem
->vma_count
--;
1329 if (bo_gem
->gtt_virtual
)
1330 bufmgr_gem
->vma_count
--;
1331 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem
);
1335 drm_intel_gem_bo_unreference_final(drm_intel_bo
*bo
, time_t time
)
1337 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bo
->bufmgr
;
1338 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
1339 struct drm_intel_gem_bo_bucket
*bucket
;
1342 /* Unreference all the target buffers */
1343 for (i
= 0; i
< bo_gem
->reloc_count
; i
++) {
1344 if (bo_gem
->reloc_target_info
[i
].bo
!= bo
) {
1345 drm_intel_gem_bo_unreference_locked_timed(bo_gem
->
1346 reloc_target_info
[i
].bo
,
1350 for (i
= 0; i
< bo_gem
->softpin_target_count
; i
++)
1351 drm_intel_gem_bo_unreference_locked_timed(bo_gem
->softpin_target
[i
],
1354 bo_gem
->reloc_count
= 0;
1355 bo_gem
->used_as_reloc_target
= false;
1356 bo_gem
->softpin_target_count
= 0;
1358 DBG("bo_unreference final: %d (%s)\n",
1359 bo_gem
->gem_handle
, bo_gem
->name
);
1361 /* release memory associated with this object */
1362 if (bo_gem
->reloc_target_info
) {
1363 free(bo_gem
->reloc_target_info
);
1364 bo_gem
->reloc_target_info
= NULL
;
1366 if (bo_gem
->relocs
) {
1367 free(bo_gem
->relocs
);
1368 bo_gem
->relocs
= NULL
;
1370 if (bo_gem
->softpin_target
) {
1371 free(bo_gem
->softpin_target
);
1372 bo_gem
->softpin_target
= NULL
;
1373 bo_gem
->softpin_target_size
= 0;
1376 /* Clear any left-over mappings */
1377 if (bo_gem
->map_count
) {
1378 DBG("bo freed with non-zero map-count %d\n", bo_gem
->map_count
);
1379 bo_gem
->map_count
= 0;
1380 drm_intel_gem_bo_close_vma(bufmgr_gem
, bo_gem
);
1381 drm_intel_gem_bo_mark_mmaps_incoherent(bo
);
1384 bucket
= drm_intel_gem_bo_bucket_for_size(bufmgr_gem
, bo
->size
);
1385 /* Put the buffer into our internal cache for reuse if we can. */
1386 if (bufmgr_gem
->bo_reuse
&& bo_gem
->reusable
&& bucket
!= NULL
&&
1387 drm_intel_gem_bo_madvise_internal(bufmgr_gem
, bo_gem
,
1388 I915_MADV_DONTNEED
)) {
1389 bo_gem
->free_time
= time
;
1391 bo_gem
->name
= NULL
;
1392 bo_gem
->validate_index
= -1;
1394 DRMLISTADDTAIL(&bo_gem
->head
, &bucket
->head
);
1396 drm_intel_gem_bo_free(bo
);
1400 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo
*bo
,
1403 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
1405 assert(atomic_read(&bo_gem
->refcount
) > 0);
1406 if (atomic_dec_and_test(&bo_gem
->refcount
))
1407 drm_intel_gem_bo_unreference_final(bo
, time
);
1410 static void drm_intel_gem_bo_unreference(drm_intel_bo
*bo
)
1412 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
1414 assert(atomic_read(&bo_gem
->refcount
) > 0);
1416 if (atomic_add_unless(&bo_gem
->refcount
, -1, 1)) {
1417 drm_intel_bufmgr_gem
*bufmgr_gem
=
1418 (drm_intel_bufmgr_gem
*) bo
->bufmgr
;
1419 struct timespec time
;
1421 clock_gettime(CLOCK_MONOTONIC
, &time
);
1423 pthread_mutex_lock(&bufmgr_gem
->lock
);
1425 if (atomic_dec_and_test(&bo_gem
->refcount
)) {
1426 drm_intel_gem_bo_unreference_final(bo
, time
.tv_sec
);
1427 drm_intel_gem_cleanup_bo_cache(bufmgr_gem
, time
.tv_sec
);
1430 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1434 static int drm_intel_gem_bo_map(drm_intel_bo
*bo
, int write_enable
)
1436 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bo
->bufmgr
;
1437 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
1438 struct drm_i915_gem_set_domain set_domain
;
1441 if (bo_gem
->is_userptr
) {
1442 /* Return the same user ptr */
1443 bo
->virtual = bo_gem
->user_virtual
;
1447 pthread_mutex_lock(&bufmgr_gem
->lock
);
1449 if (bo_gem
->map_count
++ == 0)
1450 drm_intel_gem_bo_open_vma(bufmgr_gem
, bo_gem
);
1452 if (!bo_gem
->mem_virtual
) {
1453 struct drm_i915_gem_mmap mmap_arg
;
1455 DBG("bo_map: %d (%s), map_count=%d\n",
1456 bo_gem
->gem_handle
, bo_gem
->name
, bo_gem
->map_count
);
1459 mmap_arg
.handle
= bo_gem
->gem_handle
;
1460 mmap_arg
.size
= bo
->size
;
1461 ret
= drmIoctl(bufmgr_gem
->fd
,
1462 DRM_IOCTL_I915_GEM_MMAP
,
1466 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1467 __FILE__
, __LINE__
, bo_gem
->gem_handle
,
1468 bo_gem
->name
, strerror(errno
));
1469 if (--bo_gem
->map_count
== 0)
1470 drm_intel_gem_bo_close_vma(bufmgr_gem
, bo_gem
);
1471 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1474 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg
.addr_ptr
, mmap_arg
.size
, 0, 1));
1475 bo_gem
->mem_virtual
= (void *)(uintptr_t) mmap_arg
.addr_ptr
;
1477 DBG("bo_map: %d (%s) -> %p\n", bo_gem
->gem_handle
, bo_gem
->name
,
1478 bo_gem
->mem_virtual
);
1479 bo
->virtual = bo_gem
->mem_virtual
;
1481 memclear(set_domain
);
1482 set_domain
.handle
= bo_gem
->gem_handle
;
1483 set_domain
.read_domains
= I915_GEM_DOMAIN_CPU
;
1485 set_domain
.write_domain
= I915_GEM_DOMAIN_CPU
;
1487 set_domain
.write_domain
= 0;
1488 ret
= drmIoctl(bufmgr_gem
->fd
,
1489 DRM_IOCTL_I915_GEM_SET_DOMAIN
,
1492 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1493 __FILE__
, __LINE__
, bo_gem
->gem_handle
,
1498 bo_gem
->mapped_cpu_write
= true;
1500 drm_intel_gem_bo_mark_mmaps_incoherent(bo
);
1501 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem
->mem_virtual
, bo
->size
));
1502 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1508 map_gtt(drm_intel_bo
*bo
)
1510 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bo
->bufmgr
;
1511 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
1514 if (bo_gem
->is_userptr
)
1517 if (bo_gem
->map_count
++ == 0)
1518 drm_intel_gem_bo_open_vma(bufmgr_gem
, bo_gem
);
1520 /* Get a mapping of the buffer if we haven't before. */
1521 if (bo_gem
->gtt_virtual
== NULL
) {
1522 struct drm_i915_gem_mmap_gtt mmap_arg
;
1524 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1525 bo_gem
->gem_handle
, bo_gem
->name
, bo_gem
->map_count
);
1528 mmap_arg
.handle
= bo_gem
->gem_handle
;
1530 /* Get the fake offset back... */
1531 ret
= drmIoctl(bufmgr_gem
->fd
,
1532 DRM_IOCTL_I915_GEM_MMAP_GTT
,
1536 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1538 bo_gem
->gem_handle
, bo_gem
->name
,
1540 if (--bo_gem
->map_count
== 0)
1541 drm_intel_gem_bo_close_vma(bufmgr_gem
, bo_gem
);
1546 bo_gem
->gtt_virtual
= drm_mmap(0, bo
->size
, PROT_READ
| PROT_WRITE
,
1547 MAP_SHARED
, bufmgr_gem
->fd
,
1549 if (bo_gem
->gtt_virtual
== MAP_FAILED
) {
1550 bo_gem
->gtt_virtual
= NULL
;
1552 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1554 bo_gem
->gem_handle
, bo_gem
->name
,
1556 if (--bo_gem
->map_count
== 0)
1557 drm_intel_gem_bo_close_vma(bufmgr_gem
, bo_gem
);
1562 bo
->virtual = bo_gem
->gtt_virtual
;
1564 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem
->gem_handle
, bo_gem
->name
,
1565 bo_gem
->gtt_virtual
);
1571 drm_intel_gem_bo_map_gtt(drm_intel_bo
*bo
)
1573 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bo
->bufmgr
;
1574 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
1575 struct drm_i915_gem_set_domain set_domain
;
1578 pthread_mutex_lock(&bufmgr_gem
->lock
);
1582 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1586 /* Now move it to the GTT domain so that the GPU and CPU
1587 * caches are flushed and the GPU isn't actively using the
1590 * The pagefault handler does this domain change for us when
1591 * it has unbound the BO from the GTT, but it's up to us to
1592 * tell it when we're about to use things if we had done
1593 * rendering and it still happens to be bound to the GTT.
1595 memclear(set_domain
);
1596 set_domain
.handle
= bo_gem
->gem_handle
;
1597 set_domain
.read_domains
= I915_GEM_DOMAIN_GTT
;
1598 set_domain
.write_domain
= I915_GEM_DOMAIN_GTT
;
1599 ret
= drmIoctl(bufmgr_gem
->fd
,
1600 DRM_IOCTL_I915_GEM_SET_DOMAIN
,
1603 DBG("%s:%d: Error setting domain %d: %s\n",
1604 __FILE__
, __LINE__
, bo_gem
->gem_handle
,
1608 drm_intel_gem_bo_mark_mmaps_incoherent(bo
);
1609 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem
->gtt_virtual
, bo
->size
));
1610 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1616 * Performs a mapping of the buffer object like the normal GTT
1617 * mapping, but avoids waiting for the GPU to be done reading from or
1618 * rendering to the buffer.
1620 * This is used in the implementation of GL_ARB_map_buffer_range: The
1621 * user asks to create a buffer, then does a mapping, fills some
1622 * space, runs a drawing command, then asks to map it again without
1623 * synchronizing because it guarantees that it won't write over the
1624 * data that the GPU is busy using (or, more specifically, that if it
1625 * does write over the data, it acknowledges that rendering is
1630 drm_intel_gem_bo_map_unsynchronized(drm_intel_bo
*bo
)
1632 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bo
->bufmgr
;
1633 #ifdef HAVE_VALGRIND
1634 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
1638 /* If the CPU cache isn't coherent with the GTT, then use a
1639 * regular synchronized mapping. The problem is that we don't
1640 * track where the buffer was last used on the CPU side in
1641 * terms of drm_intel_bo_map vs drm_intel_gem_bo_map_gtt, so
1642 * we would potentially corrupt the buffer even when the user
1643 * does reasonable things.
1645 if (!bufmgr_gem
->has_llc
)
1646 return drm_intel_gem_bo_map_gtt(bo
);
1648 pthread_mutex_lock(&bufmgr_gem
->lock
);
1652 drm_intel_gem_bo_mark_mmaps_incoherent(bo
);
1653 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem
->gtt_virtual
, bo
->size
));
1656 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1661 static int drm_intel_gem_bo_unmap(drm_intel_bo
*bo
)
1663 drm_intel_bufmgr_gem
*bufmgr_gem
;
1664 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
1670 if (bo_gem
->is_userptr
)
1673 bufmgr_gem
= (drm_intel_bufmgr_gem
*) bo
->bufmgr
;
1675 pthread_mutex_lock(&bufmgr_gem
->lock
);
1677 if (bo_gem
->map_count
<= 0) {
1678 DBG("attempted to unmap an unmapped bo\n");
1679 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1680 /* Preserve the old behaviour of just treating this as a
1681 * no-op rather than reporting the error.
1686 if (bo_gem
->mapped_cpu_write
) {
1687 struct drm_i915_gem_sw_finish sw_finish
;
1689 /* Cause a flush to happen if the buffer's pinned for
1690 * scanout, so the results show up in a timely manner.
1691 * Unlike GTT set domains, this only does work if the
1692 * buffer should be scanout-related.
1694 memclear(sw_finish
);
1695 sw_finish
.handle
= bo_gem
->gem_handle
;
1696 ret
= drmIoctl(bufmgr_gem
->fd
,
1697 DRM_IOCTL_I915_GEM_SW_FINISH
,
1699 ret
= ret
== -1 ? -errno
: 0;
1701 bo_gem
->mapped_cpu_write
= false;
1704 /* We need to unmap after every innovation as we cannot track
1705 * an open vma for every bo as that will exhaust the system
1706 * limits and cause later failures.
1708 if (--bo_gem
->map_count
== 0) {
1709 drm_intel_gem_bo_close_vma(bufmgr_gem
, bo_gem
);
1710 drm_intel_gem_bo_mark_mmaps_incoherent(bo
);
1713 pthread_mutex_unlock(&bufmgr_gem
->lock
);
1719 drm_intel_gem_bo_unmap_gtt(drm_intel_bo
*bo
)
1721 return drm_intel_gem_bo_unmap(bo
);
1725 drm_intel_gem_bo_subdata(drm_intel_bo
*bo
, unsigned long offset
,
1726 unsigned long size
, const void *data
)
1728 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bo
->bufmgr
;
1729 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
1730 struct drm_i915_gem_pwrite pwrite
;
1733 if (bo_gem
->is_userptr
)
1737 pwrite
.handle
= bo_gem
->gem_handle
;
1738 pwrite
.offset
= offset
;
1740 pwrite
.data_ptr
= (uint64_t) (uintptr_t) data
;
1741 ret
= drmIoctl(bufmgr_gem
->fd
,
1742 DRM_IOCTL_I915_GEM_PWRITE
,
1746 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1747 __FILE__
, __LINE__
, bo_gem
->gem_handle
, (int)offset
,
1748 (int)size
, strerror(errno
));
1755 drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr
*bufmgr
, int crtc_id
)
1757 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bufmgr
;
1758 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id
;
1761 memclear(get_pipe_from_crtc_id
);
1762 get_pipe_from_crtc_id
.crtc_id
= crtc_id
;
1763 ret
= drmIoctl(bufmgr_gem
->fd
,
1764 DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID
,
1765 &get_pipe_from_crtc_id
);
1767 /* We return -1 here to signal that we don't
1768 * know which pipe is associated with this crtc.
1769 * This lets the caller know that this information
1770 * isn't available; using the wrong pipe for
1771 * vblank waiting can cause the chipset to lock up
1776 return get_pipe_from_crtc_id
.pipe
;
1780 drm_intel_gem_bo_get_subdata(drm_intel_bo
*bo
, unsigned long offset
,
1781 unsigned long size
, void *data
)
1783 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bo
->bufmgr
;
1784 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
1785 struct drm_i915_gem_pread pread
;
1788 if (bo_gem
->is_userptr
)
1792 pread
.handle
= bo_gem
->gem_handle
;
1793 pread
.offset
= offset
;
1795 pread
.data_ptr
= (uint64_t) (uintptr_t) data
;
1796 ret
= drmIoctl(bufmgr_gem
->fd
,
1797 DRM_IOCTL_I915_GEM_PREAD
,
1801 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1802 __FILE__
, __LINE__
, bo_gem
->gem_handle
, (int)offset
,
1803 (int)size
, strerror(errno
));
1809 /** Waits for all GPU rendering with the object to have completed. */
1811 drm_intel_gem_bo_wait_rendering(drm_intel_bo
*bo
)
1813 drm_intel_gem_bo_start_gtt_access(bo
, 1);
1817 * Waits on a BO for the given amount of time.
1819 * @bo: buffer object to wait for
1820 * @timeout_ns: amount of time to wait in nanoseconds.
1821 * If value is less than 0, an infinite wait will occur.
1823 * Returns 0 if the wait was successful ie. the last batch referencing the
1824 * object has completed within the allotted time. Otherwise some negative return
1825 * value describes the error. Of particular interest is -ETIME when the wait has
1826 * failed to yield the desired result.
1828 * Similar to drm_intel_gem_bo_wait_rendering except a timeout parameter allows
1829 * the operation to give up after a certain amount of time. Another subtle
1830 * difference is the internal locking semantics are different (this variant does
1831 * not hold the lock for the duration of the wait). This makes the wait subject
1832 * to a larger userspace race window.
1834 * The implementation shall wait until the object is no longer actively
1835 * referenced within a batch buffer at the time of the call. The wait will
1836 * not guarantee that the buffer is re-issued via another thread, or an flinked
1837 * handle. Userspace must make sure this race does not occur if such precision
1840 * Note that some kernels have broken the inifite wait for negative values
1841 * promise, upgrade to latest stable kernels if this is the case.
1844 drm_intel_gem_bo_wait(drm_intel_bo
*bo
, int64_t timeout_ns
)
1846 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bo
->bufmgr
;
1847 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
1848 struct drm_i915_gem_wait wait
;
1851 if (!bufmgr_gem
->has_wait_timeout
) {
1852 DBG("%s:%d: Timed wait is not supported. Falling back to "
1853 "infinite wait\n", __FILE__
, __LINE__
);
1855 drm_intel_gem_bo_wait_rendering(bo
);
1858 return drm_intel_gem_bo_busy(bo
) ? -ETIME
: 0;
1863 wait
.bo_handle
= bo_gem
->gem_handle
;
1864 wait
.timeout_ns
= timeout_ns
;
1865 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GEM_WAIT
, &wait
);
1873 * Sets the object to the GTT read and possibly write domain, used by the X
1874 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1876 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1877 * can do tiled pixmaps this way.
1880 drm_intel_gem_bo_start_gtt_access(drm_intel_bo
*bo
, int write_enable
)
1882 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bo
->bufmgr
;
1883 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
1884 struct drm_i915_gem_set_domain set_domain
;
1887 memclear(set_domain
);
1888 set_domain
.handle
= bo_gem
->gem_handle
;
1889 set_domain
.read_domains
= I915_GEM_DOMAIN_GTT
;
1890 set_domain
.write_domain
= write_enable
? I915_GEM_DOMAIN_GTT
: 0;
1891 ret
= drmIoctl(bufmgr_gem
->fd
,
1892 DRM_IOCTL_I915_GEM_SET_DOMAIN
,
1895 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1896 __FILE__
, __LINE__
, bo_gem
->gem_handle
,
1897 set_domain
.read_domains
, set_domain
.write_domain
,
1903 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr
*bufmgr
)
1905 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bufmgr
;
1906 struct drm_gem_close close_bo
;
1909 free(bufmgr_gem
->exec2_objects
);
1910 free(bufmgr_gem
->exec_objects
);
1911 free(bufmgr_gem
->exec_bos
);
1913 pthread_mutex_destroy(&bufmgr_gem
->lock
);
1915 /* Free any cached buffer objects we were going to reuse */
1916 for (i
= 0; i
< bufmgr_gem
->num_buckets
; i
++) {
1917 struct drm_intel_gem_bo_bucket
*bucket
=
1918 &bufmgr_gem
->cache_bucket
[i
];
1919 drm_intel_bo_gem
*bo_gem
;
1921 while (!DRMLISTEMPTY(&bucket
->head
)) {
1922 bo_gem
= DRMLISTENTRY(drm_intel_bo_gem
,
1923 bucket
->head
.next
, head
);
1924 DRMLISTDEL(&bo_gem
->head
);
1926 drm_intel_gem_bo_free(&bo_gem
->bo
);
1930 /* Release userptr bo kept hanging around for optimisation. */
1931 if (bufmgr_gem
->userptr_active
.ptr
) {
1933 close_bo
.handle
= bufmgr_gem
->userptr_active
.handle
;
1934 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_GEM_CLOSE
, &close_bo
);
1935 free(bufmgr_gem
->userptr_active
.ptr
);
1938 "Failed to release test userptr object! (%d) "
1939 "i915 kernel driver may not be sane!\n", errno
);
1946 * Adds the target buffer to the validation list and adds the relocation
1947 * to the reloc_buffer's relocation list.
1949 * The relocation entry at the given offset must already contain the
1950 * precomputed relocation value, because the kernel will optimize out
1951 * the relocation entry write when the buffer hasn't moved from the
1952 * last known offset in target_bo.
1955 do_bo_emit_reloc(drm_intel_bo
*bo
, uint32_t offset
,
1956 drm_intel_bo
*target_bo
, uint32_t target_offset
,
1957 uint32_t read_domains
, uint32_t write_domain
,
1960 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bo
->bufmgr
;
1961 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
1962 drm_intel_bo_gem
*target_bo_gem
= (drm_intel_bo_gem
*) target_bo
;
1963 bool fenced_command
;
1965 if (bo_gem
->has_error
)
1968 if (target_bo_gem
->has_error
) {
1969 bo_gem
->has_error
= true;
1973 /* We never use HW fences for rendering on 965+ */
1974 if (bufmgr_gem
->gen
>= 4)
1977 fenced_command
= need_fence
;
1978 if (target_bo_gem
->tiling_mode
== I915_TILING_NONE
)
1981 /* Create a new relocation list if needed */
1982 if (bo_gem
->relocs
== NULL
&& drm_intel_setup_reloc_list(bo
))
1985 /* Check overflow */
1986 assert(bo_gem
->reloc_count
< bufmgr_gem
->max_relocs
);
1989 assert(offset
<= bo
->size
- 4);
1990 assert((write_domain
& (write_domain
- 1)) == 0);
1992 /* An object needing a fence is a tiled buffer, so it won't have
1993 * relocs to other buffers.
1996 assert(target_bo_gem
->reloc_count
== 0);
1997 target_bo_gem
->reloc_tree_fences
= 1;
2000 /* Make sure that we're not adding a reloc to something whose size has
2001 * already been accounted for.
2003 assert(!bo_gem
->used_as_reloc_target
);
2004 if (target_bo_gem
!= bo_gem
) {
2005 target_bo_gem
->used_as_reloc_target
= true;
2006 bo_gem
->reloc_tree_size
+= target_bo_gem
->reloc_tree_size
;
2007 bo_gem
->reloc_tree_fences
+= target_bo_gem
->reloc_tree_fences
;
2010 bo_gem
->reloc_target_info
[bo_gem
->reloc_count
].bo
= target_bo
;
2011 if (target_bo
!= bo
)
2012 drm_intel_gem_bo_reference(target_bo
);
2014 bo_gem
->reloc_target_info
[bo_gem
->reloc_count
].flags
=
2015 DRM_INTEL_RELOC_FENCE
;
2017 bo_gem
->reloc_target_info
[bo_gem
->reloc_count
].flags
= 0;
2019 bo_gem
->relocs
[bo_gem
->reloc_count
].offset
= offset
;
2020 bo_gem
->relocs
[bo_gem
->reloc_count
].delta
= target_offset
;
2021 bo_gem
->relocs
[bo_gem
->reloc_count
].target_handle
=
2022 target_bo_gem
->gem_handle
;
2023 bo_gem
->relocs
[bo_gem
->reloc_count
].read_domains
= read_domains
;
2024 bo_gem
->relocs
[bo_gem
->reloc_count
].write_domain
= write_domain
;
2025 bo_gem
->relocs
[bo_gem
->reloc_count
].presumed_offset
= target_bo
->offset64
;
2026 bo_gem
->reloc_count
++;
2032 drm_intel_gem_bo_use_48b_address_range(drm_intel_bo
*bo
, uint32_t enable
)
2034 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
2037 bo_gem
->kflags
|= EXEC_OBJECT_SUPPORTS_48B_ADDRESS
;
2039 bo_gem
->kflags
&= ~EXEC_OBJECT_SUPPORTS_48B_ADDRESS
;
2043 drm_intel_gem_bo_add_softpin_target(drm_intel_bo
*bo
, drm_intel_bo
*target_bo
)
2045 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bo
->bufmgr
;
2046 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
2047 drm_intel_bo_gem
*target_bo_gem
= (drm_intel_bo_gem
*) target_bo
;
2048 if (bo_gem
->has_error
)
2051 if (target_bo_gem
->has_error
) {
2052 bo_gem
->has_error
= true;
2056 if (!(target_bo_gem
->kflags
& EXEC_OBJECT_PINNED
))
2058 if (target_bo_gem
== bo_gem
)
2061 if (bo_gem
->softpin_target_count
== bo_gem
->softpin_target_size
) {
2062 int new_size
= bo_gem
->softpin_target_size
* 2;
2064 new_size
= bufmgr_gem
->max_relocs
;
2066 bo_gem
->softpin_target
= realloc(bo_gem
->softpin_target
, new_size
*
2067 sizeof(drm_intel_bo
*));
2068 if (!bo_gem
->softpin_target
)
2071 bo_gem
->softpin_target_size
= new_size
;
2073 bo_gem
->softpin_target
[bo_gem
->softpin_target_count
] = target_bo
;
2074 drm_intel_gem_bo_reference(target_bo
);
2075 bo_gem
->softpin_target_count
++;
2081 drm_intel_gem_bo_emit_reloc(drm_intel_bo
*bo
, uint32_t offset
,
2082 drm_intel_bo
*target_bo
, uint32_t target_offset
,
2083 uint32_t read_domains
, uint32_t write_domain
)
2085 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*)bo
->bufmgr
;
2086 drm_intel_bo_gem
*target_bo_gem
= (drm_intel_bo_gem
*)target_bo
;
2088 if (target_bo_gem
->kflags
& EXEC_OBJECT_PINNED
)
2089 return drm_intel_gem_bo_add_softpin_target(bo
, target_bo
);
2091 return do_bo_emit_reloc(bo
, offset
, target_bo
, target_offset
,
2092 read_domains
, write_domain
,
2093 !bufmgr_gem
->fenced_relocs
);
2097 drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo
*bo
, uint32_t offset
,
2098 drm_intel_bo
*target_bo
,
2099 uint32_t target_offset
,
2100 uint32_t read_domains
, uint32_t write_domain
)
2102 return do_bo_emit_reloc(bo
, offset
, target_bo
, target_offset
,
2103 read_domains
, write_domain
, true);
2107 drm_intel_gem_bo_get_reloc_count(drm_intel_bo
*bo
)
2109 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
2111 return bo_gem
->reloc_count
;
2115 * Removes existing relocation entries in the BO after "start".
2117 * This allows a user to avoid a two-step process for state setup with
2118 * counting up all the buffer objects and doing a
2119 * drm_intel_bufmgr_check_aperture_space() before emitting any of the
2120 * relocations for the state setup. Instead, save the state of the
2121 * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
2122 * state, and then check if it still fits in the aperture.
2124 * Any further drm_intel_bufmgr_check_aperture_space() queries
2125 * involving this buffer in the tree are undefined after this call.
2127 * This also removes all softpinned targets being referenced by the BO.
2130 drm_intel_gem_bo_clear_relocs(drm_intel_bo
*bo
, int start
)
2132 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bo
->bufmgr
;
2133 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
2135 struct timespec time
;
2137 clock_gettime(CLOCK_MONOTONIC
, &time
);
2139 assert(bo_gem
->reloc_count
>= start
);
2141 /* Unreference the cleared target buffers */
2142 pthread_mutex_lock(&bufmgr_gem
->lock
);
2144 for (i
= start
; i
< bo_gem
->reloc_count
; i
++) {
2145 drm_intel_bo_gem
*target_bo_gem
= (drm_intel_bo_gem
*) bo_gem
->reloc_target_info
[i
].bo
;
2146 if (&target_bo_gem
->bo
!= bo
) {
2147 bo_gem
->reloc_tree_fences
-= target_bo_gem
->reloc_tree_fences
;
2148 drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem
->bo
,
2152 bo_gem
->reloc_count
= start
;
2154 for (i
= 0; i
< bo_gem
->softpin_target_count
; i
++) {
2155 drm_intel_bo_gem
*target_bo_gem
= (drm_intel_bo_gem
*) bo_gem
->softpin_target
[i
];
2156 drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem
->bo
, time
.tv_sec
);
2158 bo_gem
->softpin_target_count
= 0;
2160 pthread_mutex_unlock(&bufmgr_gem
->lock
);
2165 * Walk the tree of relocations rooted at BO and accumulate the list of
2166 * validations to be performed and update the relocation buffers with
2167 * index values into the validation list.
2170 drm_intel_gem_bo_process_reloc(drm_intel_bo
*bo
)
2172 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
2175 if (bo_gem
->relocs
== NULL
)
2178 for (i
= 0; i
< bo_gem
->reloc_count
; i
++) {
2179 drm_intel_bo
*target_bo
= bo_gem
->reloc_target_info
[i
].bo
;
2181 if (target_bo
== bo
)
2184 drm_intel_gem_bo_mark_mmaps_incoherent(bo
);
2186 /* Continue walking the tree depth-first. */
2187 drm_intel_gem_bo_process_reloc(target_bo
);
2189 /* Add the target to the validate list */
2190 drm_intel_add_validate_buffer(target_bo
);
2195 drm_intel_gem_bo_process_reloc2(drm_intel_bo
*bo
)
2197 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*)bo
;
2200 if (bo_gem
->relocs
== NULL
&& bo_gem
->softpin_target
== NULL
)
2203 for (i
= 0; i
< bo_gem
->reloc_count
; i
++) {
2204 drm_intel_bo
*target_bo
= bo_gem
->reloc_target_info
[i
].bo
;
2207 if (target_bo
== bo
)
2210 drm_intel_gem_bo_mark_mmaps_incoherent(bo
);
2212 /* Continue walking the tree depth-first. */
2213 drm_intel_gem_bo_process_reloc2(target_bo
);
2215 need_fence
= (bo_gem
->reloc_target_info
[i
].flags
&
2216 DRM_INTEL_RELOC_FENCE
);
2218 /* Add the target to the validate list */
2219 drm_intel_add_validate_buffer2(target_bo
, need_fence
);
2222 for (i
= 0; i
< bo_gem
->softpin_target_count
; i
++) {
2223 drm_intel_bo
*target_bo
= bo_gem
->softpin_target
[i
];
2225 if (target_bo
== bo
)
2228 drm_intel_gem_bo_mark_mmaps_incoherent(bo
);
2229 drm_intel_gem_bo_process_reloc2(target_bo
);
2230 drm_intel_add_validate_buffer2(target_bo
, false);
2236 drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem
*bufmgr_gem
)
2240 for (i
= 0; i
< bufmgr_gem
->exec_count
; i
++) {
2241 drm_intel_bo
*bo
= bufmgr_gem
->exec_bos
[i
];
2242 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
2244 /* Update the buffer offset */
2245 if (bufmgr_gem
->exec_objects
[i
].offset
!= bo
->offset64
) {
2246 DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
2247 bo_gem
->gem_handle
, bo_gem
->name
,
2248 upper_32_bits(bo
->offset64
),
2249 lower_32_bits(bo
->offset64
),
2250 upper_32_bits(bufmgr_gem
->exec_objects
[i
].offset
),
2251 lower_32_bits(bufmgr_gem
->exec_objects
[i
].offset
));
2252 bo
->offset64
= bufmgr_gem
->exec_objects
[i
].offset
;
2253 bo
->offset
= bufmgr_gem
->exec_objects
[i
].offset
;
2259 drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem
*bufmgr_gem
)
2263 for (i
= 0; i
< bufmgr_gem
->exec_count
; i
++) {
2264 drm_intel_bo
*bo
= bufmgr_gem
->exec_bos
[i
];
2265 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*)bo
;
2267 /* Update the buffer offset */
2268 if (bufmgr_gem
->exec2_objects
[i
].offset
!= bo
->offset64
) {
2269 /* If we're seeing softpinned object here it means that the kernel
2270 * has relocated our object... Indicating a programming error
2272 assert(!(bo_gem
->kflags
& EXEC_OBJECT_PINNED
));
2273 DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
2274 bo_gem
->gem_handle
, bo_gem
->name
,
2275 upper_32_bits(bo
->offset64
),
2276 lower_32_bits(bo
->offset64
),
2277 upper_32_bits(bufmgr_gem
->exec2_objects
[i
].offset
),
2278 lower_32_bits(bufmgr_gem
->exec2_objects
[i
].offset
));
2279 bo
->offset64
= bufmgr_gem
->exec2_objects
[i
].offset
;
2280 bo
->offset
= bufmgr_gem
->exec2_objects
[i
].offset
;
2286 drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo
*bo
,
2287 int x1
, int y1
, int width
, int height
,
2288 enum aub_dump_bmp_format format
,
2289 int pitch
, int offset
)
2294 drm_intel_gem_bo_exec(drm_intel_bo
*bo
, int used
,
2295 drm_clip_rect_t
* cliprects
, int num_cliprects
, int DR4
)
2297 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bo
->bufmgr
;
2298 struct drm_i915_gem_execbuffer execbuf
;
2301 if (to_bo_gem(bo
)->has_error
)
2304 pthread_mutex_lock(&bufmgr_gem
->lock
);
2305 /* Update indices and set up the validate list. */
2306 drm_intel_gem_bo_process_reloc(bo
);
2308 /* Add the batch buffer to the validation list. There are no
2309 * relocations pointing to it.
2311 drm_intel_add_validate_buffer(bo
);
2314 execbuf
.buffers_ptr
= (uintptr_t) bufmgr_gem
->exec_objects
;
2315 execbuf
.buffer_count
= bufmgr_gem
->exec_count
;
2316 execbuf
.batch_start_offset
= 0;
2317 execbuf
.batch_len
= used
;
2318 execbuf
.cliprects_ptr
= (uintptr_t) cliprects
;
2319 execbuf
.num_cliprects
= num_cliprects
;
2323 ret
= drmIoctl(bufmgr_gem
->fd
,
2324 DRM_IOCTL_I915_GEM_EXECBUFFER
,
2328 if (errno
== ENOSPC
) {
2329 DBG("Execbuffer fails to pin. "
2330 "Estimate: %u. Actual: %u. Available: %u\n",
2331 drm_intel_gem_estimate_batch_space(bufmgr_gem
->exec_bos
,
2334 drm_intel_gem_compute_batch_space(bufmgr_gem
->exec_bos
,
2337 (unsigned int)bufmgr_gem
->gtt_size
);
2340 drm_intel_update_buffer_offsets(bufmgr_gem
);
2342 if (bufmgr_gem
->bufmgr
.debug
)
2343 drm_intel_gem_dump_validation_list(bufmgr_gem
);
2345 for (i
= 0; i
< bufmgr_gem
->exec_count
; i
++) {
2346 drm_intel_bo_gem
*bo_gem
= to_bo_gem(bufmgr_gem
->exec_bos
[i
]);
2348 bo_gem
->idle
= false;
2350 /* Disconnect the buffer from the validate list */
2351 bo_gem
->validate_index
= -1;
2352 bufmgr_gem
->exec_bos
[i
] = NULL
;
2354 bufmgr_gem
->exec_count
= 0;
2355 pthread_mutex_unlock(&bufmgr_gem
->lock
);
2361 do_exec2(drm_intel_bo
*bo
, int used
, drm_intel_context
*ctx
,
2362 drm_clip_rect_t
*cliprects
, int num_cliprects
, int DR4
,
2363 int in_fence
, int *out_fence
,
2366 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*)bo
->bufmgr
;
2367 struct drm_i915_gem_execbuffer2 execbuf
;
2371 if (to_bo_gem(bo
)->has_error
)
2374 switch (flags
& 0x7) {
2378 if (!bufmgr_gem
->has_blt
)
2382 if (!bufmgr_gem
->has_bsd
)
2385 case I915_EXEC_VEBOX
:
2386 if (!bufmgr_gem
->has_vebox
)
2389 case I915_EXEC_RENDER
:
2390 case I915_EXEC_DEFAULT
:
2394 pthread_mutex_lock(&bufmgr_gem
->lock
);
2395 /* Update indices and set up the validate list. */
2396 drm_intel_gem_bo_process_reloc2(bo
);
2398 /* Add the batch buffer to the validation list. There are no relocations
2401 drm_intel_add_validate_buffer2(bo
, 0);
2404 execbuf
.buffers_ptr
= (uintptr_t)bufmgr_gem
->exec2_objects
;
2405 execbuf
.buffer_count
= bufmgr_gem
->exec_count
;
2406 execbuf
.batch_start_offset
= 0;
2407 execbuf
.batch_len
= used
;
2408 execbuf
.cliprects_ptr
= (uintptr_t)cliprects
;
2409 execbuf
.num_cliprects
= num_cliprects
;
2412 execbuf
.flags
= flags
;
2414 i915_execbuffer2_set_context_id(execbuf
, 0);
2416 i915_execbuffer2_set_context_id(execbuf
, ctx
->ctx_id
);
2418 if (in_fence
!= -1) {
2419 execbuf
.rsvd2
= in_fence
;
2420 execbuf
.flags
|= I915_EXEC_FENCE_IN
;
2422 if (out_fence
!= NULL
) {
2424 execbuf
.flags
|= I915_EXEC_FENCE_OUT
;
2427 if (bufmgr_gem
->no_exec
)
2428 goto skip_execution
;
2430 ret
= drmIoctl(bufmgr_gem
->fd
,
2431 DRM_IOCTL_I915_GEM_EXECBUFFER2_WR
,
2435 if (ret
== -ENOSPC
) {
2436 DBG("Execbuffer fails to pin. "
2437 "Estimate: %u. Actual: %u. Available: %u\n",
2438 drm_intel_gem_estimate_batch_space(bufmgr_gem
->exec_bos
,
2439 bufmgr_gem
->exec_count
),
2440 drm_intel_gem_compute_batch_space(bufmgr_gem
->exec_bos
,
2441 bufmgr_gem
->exec_count
),
2442 (unsigned int) bufmgr_gem
->gtt_size
);
2445 drm_intel_update_buffer_offsets2(bufmgr_gem
);
2447 if (ret
== 0 && out_fence
!= NULL
)
2448 *out_fence
= execbuf
.rsvd2
>> 32;
2451 if (bufmgr_gem
->bufmgr
.debug
)
2452 drm_intel_gem_dump_validation_list(bufmgr_gem
);
2454 for (i
= 0; i
< bufmgr_gem
->exec_count
; i
++) {
2455 drm_intel_bo_gem
*bo_gem
= to_bo_gem(bufmgr_gem
->exec_bos
[i
]);
2457 bo_gem
->idle
= false;
2459 /* Disconnect the buffer from the validate list */
2460 bo_gem
->validate_index
= -1;
2461 bufmgr_gem
->exec_bos
[i
] = NULL
;
2463 bufmgr_gem
->exec_count
= 0;
2464 pthread_mutex_unlock(&bufmgr_gem
->lock
);
2470 drm_intel_gem_bo_exec2(drm_intel_bo
*bo
, int used
,
2471 drm_clip_rect_t
*cliprects
, int num_cliprects
,
2474 return do_exec2(bo
, used
, NULL
, cliprects
, num_cliprects
, DR4
,
2475 -1, NULL
, I915_EXEC_RENDER
);
2479 drm_intel_gem_bo_mrb_exec2(drm_intel_bo
*bo
, int used
,
2480 drm_clip_rect_t
*cliprects
, int num_cliprects
, int DR4
,
2483 return do_exec2(bo
, used
, NULL
, cliprects
, num_cliprects
, DR4
,
2488 drm_intel_gem_bo_context_exec(drm_intel_bo
*bo
, drm_intel_context
*ctx
,
2489 int used
, unsigned int flags
)
2491 return do_exec2(bo
, used
, ctx
, NULL
, 0, 0, -1, NULL
, flags
);
2495 drm_intel_gem_bo_fence_exec(drm_intel_bo
*bo
,
2496 drm_intel_context
*ctx
,
2502 return do_exec2(bo
, used
, ctx
, NULL
, 0, 0, in_fence
, out_fence
, flags
);
2506 drm_intel_gem_bo_pin(drm_intel_bo
*bo
, uint32_t alignment
)
2508 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bo
->bufmgr
;
2509 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
2510 struct drm_i915_gem_pin pin
;
2514 pin
.handle
= bo_gem
->gem_handle
;
2515 pin
.alignment
= alignment
;
2517 ret
= drmIoctl(bufmgr_gem
->fd
,
2518 DRM_IOCTL_I915_GEM_PIN
,
2523 bo
->offset64
= pin
.offset
;
2524 bo
->offset
= pin
.offset
;
2529 drm_intel_gem_bo_unpin(drm_intel_bo
*bo
)
2531 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bo
->bufmgr
;
2532 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
2533 struct drm_i915_gem_unpin unpin
;
2537 unpin
.handle
= bo_gem
->gem_handle
;
2539 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GEM_UNPIN
, &unpin
);
2547 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo
*bo
,
2548 uint32_t tiling_mode
,
2551 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bo
->bufmgr
;
2552 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
2553 struct drm_i915_gem_set_tiling set_tiling
;
2556 if (bo_gem
->global_name
== 0 &&
2557 tiling_mode
== bo_gem
->tiling_mode
&&
2558 stride
== bo_gem
->stride
)
2561 memset(&set_tiling
, 0, sizeof(set_tiling
));
2563 /* set_tiling is slightly broken and overwrites the
2564 * input on the error path, so we have to open code
2567 set_tiling
.handle
= bo_gem
->gem_handle
;
2568 set_tiling
.tiling_mode
= tiling_mode
;
2569 set_tiling
.stride
= stride
;
2571 ret
= ioctl(bufmgr_gem
->fd
,
2572 DRM_IOCTL_I915_GEM_SET_TILING
,
2574 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
2578 bo_gem
->tiling_mode
= set_tiling
.tiling_mode
;
2579 bo_gem
->swizzle_mode
= set_tiling
.swizzle_mode
;
2580 bo_gem
->stride
= set_tiling
.stride
;
2585 drm_intel_gem_bo_set_tiling(drm_intel_bo
*bo
, uint32_t * tiling_mode
,
2588 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bo
->bufmgr
;
2589 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
2592 /* Tiling with userptr surfaces is not supported
2593 * on all hardware so refuse it for time being.
2595 if (bo_gem
->is_userptr
)
2598 /* Linear buffers have no stride. By ensuring that we only ever use
2599 * stride 0 with linear buffers, we simplify our code.
2601 if (*tiling_mode
== I915_TILING_NONE
)
2604 ret
= drm_intel_gem_bo_set_tiling_internal(bo
, *tiling_mode
, stride
);
2606 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem
, bo_gem
, 0);
2608 *tiling_mode
= bo_gem
->tiling_mode
;
2613 drm_intel_gem_bo_get_tiling(drm_intel_bo
*bo
, uint32_t * tiling_mode
,
2614 uint32_t * swizzle_mode
)
2616 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
2618 *tiling_mode
= bo_gem
->tiling_mode
;
2619 *swizzle_mode
= bo_gem
->swizzle_mode
;
2624 drm_intel_gem_bo_set_softpin_offset(drm_intel_bo
*bo
, uint64_t offset
)
2626 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
2628 bo
->offset64
= offset
;
2629 bo
->offset
= offset
;
2630 bo_gem
->kflags
|= EXEC_OBJECT_PINNED
;
2636 drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr
*bufmgr
, int prime_fd
, int size
)
2638 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bufmgr
;
2641 drm_intel_bo_gem
*bo_gem
;
2642 struct drm_i915_gem_get_tiling get_tiling
;
2644 pthread_mutex_lock(&bufmgr_gem
->lock
);
2645 ret
= drmPrimeFDToHandle(bufmgr_gem
->fd
, prime_fd
, &handle
);
2647 DBG("create_from_prime: failed to obtain handle from fd: %s\n", strerror(errno
));
2648 pthread_mutex_unlock(&bufmgr_gem
->lock
);
2653 * See if the kernel has already returned this buffer to us. Just as
2654 * for named buffers, we must not create two bo's pointing at the same
2657 HASH_FIND(handle_hh
, bufmgr_gem
->handle_table
,
2658 &handle
, sizeof(handle
), bo_gem
);
2660 drm_intel_gem_bo_reference(&bo_gem
->bo
);
2664 bo_gem
= calloc(1, sizeof(*bo_gem
));
2668 atomic_set(&bo_gem
->refcount
, 1);
2669 DRMINITLISTHEAD(&bo_gem
->vma_list
);
2671 /* Determine size of bo. The fd-to-handle ioctl really should
2672 * return the size, but it doesn't. If we have kernel 3.12 or
2673 * later, we can lseek on the prime fd to get the size. Older
2674 * kernels will just fail, in which case we fall back to the
2675 * provided (estimated or guess size). */
2676 ret
= lseek(prime_fd
, 0, SEEK_END
);
2678 bo_gem
->bo
.size
= ret
;
2680 bo_gem
->bo
.size
= size
;
2682 bo_gem
->bo
.handle
= handle
;
2683 bo_gem
->bo
.bufmgr
= bufmgr
;
2685 bo_gem
->gem_handle
= handle
;
2686 HASH_ADD(handle_hh
, bufmgr_gem
->handle_table
,
2687 gem_handle
, sizeof(bo_gem
->gem_handle
), bo_gem
);
2689 bo_gem
->name
= "prime";
2690 bo_gem
->validate_index
= -1;
2691 bo_gem
->reloc_tree_fences
= 0;
2692 bo_gem
->used_as_reloc_target
= false;
2693 bo_gem
->has_error
= false;
2694 bo_gem
->reusable
= false;
2696 memclear(get_tiling
);
2697 get_tiling
.handle
= bo_gem
->gem_handle
;
2698 if (drmIoctl(bufmgr_gem
->fd
,
2699 DRM_IOCTL_I915_GEM_GET_TILING
,
2703 bo_gem
->tiling_mode
= get_tiling
.tiling_mode
;
2704 bo_gem
->swizzle_mode
= get_tiling
.swizzle_mode
;
2705 /* XXX stride is unknown */
2706 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem
, bo_gem
, 0);
2709 pthread_mutex_unlock(&bufmgr_gem
->lock
);
2713 drm_intel_gem_bo_free(&bo_gem
->bo
);
2714 pthread_mutex_unlock(&bufmgr_gem
->lock
);
2719 drm_intel_bo_gem_export_to_prime(drm_intel_bo
*bo
, int *prime_fd
)
2721 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bo
->bufmgr
;
2722 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
2724 if (drmPrimeHandleToFD(bufmgr_gem
->fd
, bo_gem
->gem_handle
,
2725 DRM_CLOEXEC
, prime_fd
) != 0)
2728 bo_gem
->reusable
= false;
2734 drm_intel_gem_bo_flink(drm_intel_bo
*bo
, uint32_t * name
)
2736 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bo
->bufmgr
;
2737 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
2739 if (!bo_gem
->global_name
) {
2740 struct drm_gem_flink flink
;
2743 flink
.handle
= bo_gem
->gem_handle
;
2744 if (drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_GEM_FLINK
, &flink
))
2747 pthread_mutex_lock(&bufmgr_gem
->lock
);
2748 if (!bo_gem
->global_name
) {
2749 bo_gem
->global_name
= flink
.name
;
2750 bo_gem
->reusable
= false;
2752 HASH_ADD(name_hh
, bufmgr_gem
->name_table
,
2753 global_name
, sizeof(bo_gem
->global_name
),
2756 pthread_mutex_unlock(&bufmgr_gem
->lock
);
2759 *name
= bo_gem
->global_name
;
2764 * Enables unlimited caching of buffer objects for reuse.
2766 * This is potentially very memory expensive, as the cache at each bucket
2767 * size is only bounded by how many buffers of that size we've managed to have
2768 * in flight at once.
2771 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr
*bufmgr
)
2773 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bufmgr
;
2775 bufmgr_gem
->bo_reuse
= true;
2779 * Disables implicit synchronisation before executing the bo
2781 * This will cause rendering corruption unless you correctly manage explicit
2782 * fences for all rendering involving this buffer - including use by others.
2783 * Disabling the implicit serialisation is only required if that serialisation
2784 * is too coarse (for example, you have split the buffer into many
2785 * non-overlapping regions and are sharing the whole buffer between concurrent
2786 * independent command streams).
2788 * Note the kernel must advertise support via I915_PARAM_HAS_EXEC_ASYNC,
2789 * which can be checked using drm_intel_bufmgr_can_disable_implicit_sync,
2790 * or subsequent execbufs involving the bo will generate EINVAL.
2793 drm_intel_gem_bo_disable_implicit_sync(drm_intel_bo
*bo
)
2795 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
2797 bo_gem
->kflags
|= EXEC_OBJECT_ASYNC
;
2801 * Enables implicit synchronisation before executing the bo
2803 * This is the default behaviour of the kernel, to wait upon prior writes
2804 * completing on the object before rendering with it, or to wait for prior
2805 * reads to complete before writing into the object.
2806 * drm_intel_gem_bo_disable_implicit_sync() can stop this behaviour, telling
2807 * the kernel never to insert a stall before using the object. Then this
2808 * function can be used to restore the implicit sync before subsequent
2812 drm_intel_gem_bo_enable_implicit_sync(drm_intel_bo
*bo
)
2814 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
2816 bo_gem
->kflags
&= ~EXEC_OBJECT_ASYNC
;
2820 * Query whether the kernel supports disabling of its implicit synchronisation
2821 * before execbuf. See drm_intel_gem_bo_disable_implicit_sync()
2824 drm_intel_bufmgr_gem_can_disable_implicit_sync(drm_intel_bufmgr
*bufmgr
)
2826 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bufmgr
;
2828 return bufmgr_gem
->has_exec_async
;
2832 * Enable use of fenced reloc type.
2834 * New code should enable this to avoid unnecessary fence register
2835 * allocation. If this option is not enabled, all relocs will have fence
2836 * register allocated.
2839 drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr
*bufmgr
)
2841 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*)bufmgr
;
2843 if (bufmgr_gem
->bufmgr
.bo_exec
== drm_intel_gem_bo_exec2
)
2844 bufmgr_gem
->fenced_relocs
= true;
2848 * Return the additional aperture space required by the tree of buffer objects
2852 drm_intel_gem_bo_get_aperture_space(drm_intel_bo
*bo
)
2854 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
2858 if (bo
== NULL
|| bo_gem
->included_in_check_aperture
)
2862 bo_gem
->included_in_check_aperture
= true;
2864 for (i
= 0; i
< bo_gem
->reloc_count
; i
++)
2866 drm_intel_gem_bo_get_aperture_space(bo_gem
->
2867 reloc_target_info
[i
].bo
);
2873 * Count the number of buffers in this list that need a fence reg
2875 * If the count is greater than the number of available regs, we'll have
2876 * to ask the caller to resubmit a batch with fewer tiled buffers.
2878 * This function over-counts if the same buffer is used multiple times.
2881 drm_intel_gem_total_fences(drm_intel_bo
** bo_array
, int count
)
2884 unsigned int total
= 0;
2886 for (i
= 0; i
< count
; i
++) {
2887 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo_array
[i
];
2892 total
+= bo_gem
->reloc_tree_fences
;
2898 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
2899 * for the next drm_intel_bufmgr_check_aperture_space() call.
2902 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo
*bo
)
2904 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
2907 if (bo
== NULL
|| !bo_gem
->included_in_check_aperture
)
2910 bo_gem
->included_in_check_aperture
= false;
2912 for (i
= 0; i
< bo_gem
->reloc_count
; i
++)
2913 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem
->
2914 reloc_target_info
[i
].bo
);
2918 * Return a conservative estimate for the amount of aperture required
2919 * for a collection of buffers. This may double-count some buffers.
2922 drm_intel_gem_estimate_batch_space(drm_intel_bo
**bo_array
, int count
)
2925 unsigned int total
= 0;
2927 for (i
= 0; i
< count
; i
++) {
2928 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo_array
[i
];
2930 total
+= bo_gem
->reloc_tree_size
;
2936 * Return the amount of aperture needed for a collection of buffers.
2937 * This avoids double counting any buffers, at the cost of looking
2938 * at every buffer in the set.
2941 drm_intel_gem_compute_batch_space(drm_intel_bo
**bo_array
, int count
)
2944 unsigned int total
= 0;
2946 for (i
= 0; i
< count
; i
++) {
2947 total
+= drm_intel_gem_bo_get_aperture_space(bo_array
[i
]);
2948 /* For the first buffer object in the array, we get an
2949 * accurate count back for its reloc_tree size (since nothing
2950 * had been flagged as being counted yet). We can save that
2951 * value out as a more conservative reloc_tree_size that
2952 * avoids double-counting target buffers. Since the first
2953 * buffer happens to usually be the batch buffer in our
2954 * callers, this can pull us back from doing the tree
2955 * walk on every new batch emit.
2958 drm_intel_bo_gem
*bo_gem
=
2959 (drm_intel_bo_gem
*) bo_array
[i
];
2960 bo_gem
->reloc_tree_size
= total
;
2964 for (i
= 0; i
< count
; i
++)
2965 drm_intel_gem_bo_clear_aperture_space_flag(bo_array
[i
]);
2970 * Return -1 if the batchbuffer should be flushed before attempting to
2971 * emit rendering referencing the buffers pointed to by bo_array.
2973 * This is required because if we try to emit a batchbuffer with relocations
2974 * to a tree of buffers that won't simultaneously fit in the aperture,
2975 * the rendering will return an error at a point where the software is not
2976 * prepared to recover from it.
2978 * However, we also want to emit the batchbuffer significantly before we reach
2979 * the limit, as a series of batchbuffers each of which references buffers
2980 * covering almost all of the aperture means that at each emit we end up
2981 * waiting to evict a buffer from the last rendering, and we get synchronous
2982 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
2983 * get better parallelism.
2986 drm_intel_gem_check_aperture_space(drm_intel_bo
**bo_array
, int count
)
2988 drm_intel_bufmgr_gem
*bufmgr_gem
=
2989 (drm_intel_bufmgr_gem
*) bo_array
[0]->bufmgr
;
2990 unsigned int total
= 0;
2991 unsigned int threshold
= bufmgr_gem
->gtt_size
* 3 / 4;
2994 /* Check for fence reg constraints if necessary */
2995 if (bufmgr_gem
->available_fences
) {
2996 total_fences
= drm_intel_gem_total_fences(bo_array
, count
);
2997 if (total_fences
> bufmgr_gem
->available_fences
)
3001 total
= drm_intel_gem_estimate_batch_space(bo_array
, count
);
3003 if (total
> threshold
)
3004 total
= drm_intel_gem_compute_batch_space(bo_array
, count
);
3006 if (total
> threshold
) {
3007 DBG("check_space: overflowed available aperture, "
3009 total
/ 1024, (int)bufmgr_gem
->gtt_size
/ 1024);
3012 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total
/ 1024,
3013 (int)bufmgr_gem
->gtt_size
/ 1024);
3019 * Disable buffer reuse for objects which are shared with the kernel
3020 * as scanout buffers
3023 drm_intel_gem_bo_disable_reuse(drm_intel_bo
*bo
)
3025 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
3027 bo_gem
->reusable
= false;
3032 drm_intel_gem_bo_is_reusable(drm_intel_bo
*bo
)
3034 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
3036 return bo_gem
->reusable
;
3040 _drm_intel_gem_bo_references(drm_intel_bo
*bo
, drm_intel_bo
*target_bo
)
3042 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
3045 for (i
= 0; i
< bo_gem
->reloc_count
; i
++) {
3046 if (bo_gem
->reloc_target_info
[i
].bo
== target_bo
)
3048 if (bo
== bo_gem
->reloc_target_info
[i
].bo
)
3050 if (_drm_intel_gem_bo_references(bo_gem
->reloc_target_info
[i
].bo
,
3055 for (i
= 0; i
< bo_gem
->softpin_target_count
; i
++) {
3056 if (bo_gem
->softpin_target
[i
] == target_bo
)
3058 if (_drm_intel_gem_bo_references(bo_gem
->softpin_target
[i
], target_bo
))
3065 /** Return true if target_bo is referenced by bo's relocation tree. */
3067 drm_intel_gem_bo_references(drm_intel_bo
*bo
, drm_intel_bo
*target_bo
)
3069 drm_intel_bo_gem
*target_bo_gem
= (drm_intel_bo_gem
*) target_bo
;
3071 if (bo
== NULL
|| target_bo
== NULL
)
3073 if (target_bo_gem
->used_as_reloc_target
)
3074 return _drm_intel_gem_bo_references(bo
, target_bo
);
3079 add_bucket(drm_intel_bufmgr_gem
*bufmgr_gem
, int size
)
3081 unsigned int i
= bufmgr_gem
->num_buckets
;
3083 assert(i
< ARRAY_SIZE(bufmgr_gem
->cache_bucket
));
3085 DRMINITLISTHEAD(&bufmgr_gem
->cache_bucket
[i
].head
);
3086 bufmgr_gem
->cache_bucket
[i
].size
= size
;
3087 bufmgr_gem
->num_buckets
++;
3091 init_cache_buckets(drm_intel_bufmgr_gem
*bufmgr_gem
)
3093 unsigned long size
, cache_max_size
= 64 * 1024 * 1024;
3095 /* OK, so power of two buckets was too wasteful of memory.
3096 * Give 3 other sizes between each power of two, to hopefully
3097 * cover things accurately enough. (The alternative is
3098 * probably to just go for exact matching of sizes, and assume
3099 * that for things like composited window resize the tiled
3100 * width/height alignment and rounding of sizes to pages will
3101 * get us useful cache hit rates anyway)
3103 add_bucket(bufmgr_gem
, 4096);
3104 add_bucket(bufmgr_gem
, 4096 * 2);
3105 add_bucket(bufmgr_gem
, 4096 * 3);
3107 /* Initialize the linked lists for BO reuse cache. */
3108 for (size
= 4 * 4096; size
<= cache_max_size
; size
*= 2) {
3109 add_bucket(bufmgr_gem
, size
);
3111 add_bucket(bufmgr_gem
, size
+ size
* 1 / 4);
3112 add_bucket(bufmgr_gem
, size
+ size
* 2 / 4);
3113 add_bucket(bufmgr_gem
, size
+ size
* 3 / 4);
3118 drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr
*bufmgr
, int limit
)
3120 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*)bufmgr
;
3122 bufmgr_gem
->vma_max
= limit
;
3124 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem
);
3128 parse_devid_override(const char *devid_override
)
3130 static const struct {
3134 { "brw", PCI_CHIP_I965_GM
},
3135 { "g4x", PCI_CHIP_GM45_GM
},
3136 { "ilk", PCI_CHIP_ILD_G
},
3137 { "snb", PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS
},
3138 { "ivb", PCI_CHIP_IVYBRIDGE_S_GT2
},
3139 { "hsw", PCI_CHIP_HASWELL_CRW_E_GT3
},
3140 { "byt", PCI_CHIP_VALLEYVIEW_3
},
3141 { "bdw", 0x1620 | BDW_ULX
},
3142 { "skl", PCI_CHIP_SKYLAKE_DT_GT2
},
3143 { "kbl", PCI_CHIP_KABYLAKE_DT_GT2
},
3147 for (i
= 0; i
< ARRAY_SIZE(name_map
); i
++) {
3148 if (!strcmp(name_map
[i
].name
, devid_override
))
3149 return name_map
[i
].pci_id
;
3152 return strtod(devid_override
, NULL
);
3156 * Get the PCI ID for the device. This can be overridden by setting the
3157 * INTEL_DEVID_OVERRIDE environment variable to the desired ID.
3160 get_pci_device_id(drm_intel_bufmgr_gem
*bufmgr_gem
)
3162 char *devid_override
;
3165 drm_i915_getparam_t gp
;
3167 if (geteuid() == getuid()) {
3168 devid_override
= getenv("INTEL_DEVID_OVERRIDE");
3169 if (devid_override
) {
3170 bufmgr_gem
->no_exec
= true;
3171 return parse_devid_override(devid_override
);
3176 gp
.param
= I915_PARAM_CHIPSET_ID
;
3178 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3180 fprintf(stderr
, "get chip id failed: %d [%d]\n", ret
, errno
);
3181 fprintf(stderr
, "param: %d, val: %d\n", gp
.param
, *gp
.value
);
3187 drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr
*bufmgr
)
3189 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*)bufmgr
;
3191 return bufmgr_gem
->pci_device
;
3195 * Sets the AUB filename.
3197 * This function has to be called before drm_intel_bufmgr_gem_set_aub_dump()
3198 * for it to have any effect.
3201 drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr
*bufmgr
,
3202 const char *filename
)
3207 * Sets up AUB dumping.
3209 * This is a trace file format that can be used with the simulator.
3210 * Packets are emitted in a format somewhat like GPU command packets.
3211 * You can set up a GTT and upload your objects into the referenced
3212 * space, then send off batchbuffers and get BMPs out the other end.
3215 drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr
*bufmgr
, int enable
)
3217 fprintf(stderr
, "libdrm aub dumping is deprecated.\n\n"
3218 "Use intel_aubdump from intel-gpu-tools instead. Install intel-gpu-tools,\n"
3219 "then run (for example)\n\n"
3220 "\t$ intel_aubdump --output=trace.aub glxgears -geometry 500x500\n\n"
3221 "See the intel_aubdump man page for more details.\n");
3225 drm_intel_gem_context_create(drm_intel_bufmgr
*bufmgr
)
3227 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*)bufmgr
;
3228 struct drm_i915_gem_context_create create
;
3229 drm_intel_context
*context
= NULL
;
3232 context
= calloc(1, sizeof(*context
));
3237 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_CREATE
, &create
);
3239 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
3245 context
->ctx_id
= create
.ctx_id
;
3246 context
->bufmgr
= bufmgr
;
3252 drm_intel_gem_context_get_id(drm_intel_context
*ctx
, uint32_t *ctx_id
)
3257 *ctx_id
= ctx
->ctx_id
;
3263 drm_intel_gem_context_destroy(drm_intel_context
*ctx
)
3265 drm_intel_bufmgr_gem
*bufmgr_gem
;
3266 struct drm_i915_gem_context_destroy destroy
;
3274 bufmgr_gem
= (drm_intel_bufmgr_gem
*)ctx
->bufmgr
;
3275 destroy
.ctx_id
= ctx
->ctx_id
;
3276 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY
,
3279 fprintf(stderr
, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
3286 drm_intel_get_reset_stats(drm_intel_context
*ctx
,
3287 uint32_t *reset_count
,
3291 drm_intel_bufmgr_gem
*bufmgr_gem
;
3292 struct drm_i915_reset_stats stats
;
3300 bufmgr_gem
= (drm_intel_bufmgr_gem
*)ctx
->bufmgr
;
3301 stats
.ctx_id
= ctx
->ctx_id
;
3302 ret
= drmIoctl(bufmgr_gem
->fd
,
3303 DRM_IOCTL_I915_GET_RESET_STATS
,
3306 if (reset_count
!= NULL
)
3307 *reset_count
= stats
.reset_count
;
3310 *active
= stats
.batch_active
;
3312 if (pending
!= NULL
)
3313 *pending
= stats
.batch_pending
;
3320 drm_intel_reg_read(drm_intel_bufmgr
*bufmgr
,
3324 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*)bufmgr
;
3325 struct drm_i915_reg_read reg_read
;
3329 reg_read
.offset
= offset
;
3331 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_REG_READ
, ®_read
);
3333 *result
= reg_read
.val
;
3338 drm_intel_get_subslice_total(int fd
, unsigned int *subslice_total
)
3340 drm_i915_getparam_t gp
;
3344 gp
.value
= (int*)subslice_total
;
3345 gp
.param
= I915_PARAM_SUBSLICE_TOTAL
;
3346 ret
= drmIoctl(fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3354 drm_intel_get_eu_total(int fd
, unsigned int *eu_total
)
3356 drm_i915_getparam_t gp
;
3360 gp
.value
= (int*)eu_total
;
3361 gp
.param
= I915_PARAM_EU_TOTAL
;
3362 ret
= drmIoctl(fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3370 drm_intel_get_pooled_eu(int fd
)
3372 drm_i915_getparam_t gp
;
3376 gp
.param
= I915_PARAM_HAS_POOLED_EU
;
3378 if (drmIoctl(fd
, DRM_IOCTL_I915_GETPARAM
, &gp
))
3385 drm_intel_get_min_eu_in_pool(int fd
)
3387 drm_i915_getparam_t gp
;
3391 gp
.param
= I915_PARAM_MIN_EU_IN_POOL
;
3393 if (drmIoctl(fd
, DRM_IOCTL_I915_GETPARAM
, &gp
))
3400 * Annotate the given bo for use in aub dumping.
3402 * \param annotations is an array of drm_intel_aub_annotation objects
3403 * describing the type of data in various sections of the bo. Each
3404 * element of the array specifies the type and subtype of a section of
3405 * the bo, and the past-the-end offset of that section. The elements
3406 * of \c annotations must be sorted so that ending_offset is
3409 * \param count is the number of elements in the \c annotations array.
3410 * If \c count is zero, then \c annotations will not be dereferenced.
3412 * Annotations are copied into a private data structure, so caller may
3413 * re-use the memory pointed to by \c annotations after the call
3416 * Annotations are stored for the lifetime of the bo; to reset to the
3417 * default state (no annotations), call this function with a \c count
3421 drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo
*bo
,
3422 drm_intel_aub_annotation
*annotations
,
3427 static pthread_mutex_t bufmgr_list_mutex
= PTHREAD_MUTEX_INITIALIZER
;
3428 static drmMMListHead bufmgr_list
= { &bufmgr_list
, &bufmgr_list
};
3430 static drm_intel_bufmgr_gem
*
3431 drm_intel_bufmgr_gem_find(int fd
)
3433 drm_intel_bufmgr_gem
*bufmgr_gem
;
3435 DRMLISTFOREACHENTRY(bufmgr_gem
, &bufmgr_list
, managers
) {
3436 if (bufmgr_gem
->fd
== fd
) {
3437 atomic_inc(&bufmgr_gem
->refcount
);
3446 drm_intel_bufmgr_gem_unref(drm_intel_bufmgr
*bufmgr
)
3448 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*)bufmgr
;
3450 if (atomic_add_unless(&bufmgr_gem
->refcount
, -1, 1)) {
3451 pthread_mutex_lock(&bufmgr_list_mutex
);
3453 if (atomic_dec_and_test(&bufmgr_gem
->refcount
)) {
3454 DRMLISTDEL(&bufmgr_gem
->managers
);
3455 drm_intel_bufmgr_gem_destroy(bufmgr
);
3458 pthread_mutex_unlock(&bufmgr_list_mutex
);
3462 void *drm_intel_gem_bo_map__gtt(drm_intel_bo
*bo
)
3464 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bo
->bufmgr
;
3465 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
3467 if (bo_gem
->gtt_virtual
)
3468 return bo_gem
->gtt_virtual
;
3470 if (bo_gem
->is_userptr
)
3473 pthread_mutex_lock(&bufmgr_gem
->lock
);
3474 if (bo_gem
->gtt_virtual
== NULL
) {
3475 struct drm_i915_gem_mmap_gtt mmap_arg
;
3478 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
3479 bo_gem
->gem_handle
, bo_gem
->name
, bo_gem
->map_count
);
3481 if (bo_gem
->map_count
++ == 0)
3482 drm_intel_gem_bo_open_vma(bufmgr_gem
, bo_gem
);
3485 mmap_arg
.handle
= bo_gem
->gem_handle
;
3487 /* Get the fake offset back... */
3489 if (drmIoctl(bufmgr_gem
->fd
,
3490 DRM_IOCTL_I915_GEM_MMAP_GTT
,
3493 ptr
= drm_mmap(0, bo
->size
, PROT_READ
| PROT_WRITE
,
3494 MAP_SHARED
, bufmgr_gem
->fd
,
3497 if (ptr
== MAP_FAILED
) {
3498 if (--bo_gem
->map_count
== 0)
3499 drm_intel_gem_bo_close_vma(bufmgr_gem
, bo_gem
);
3503 bo_gem
->gtt_virtual
= ptr
;
3505 pthread_mutex_unlock(&bufmgr_gem
->lock
);
3507 return bo_gem
->gtt_virtual
;
3510 void *drm_intel_gem_bo_map__cpu(drm_intel_bo
*bo
)
3512 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bo
->bufmgr
;
3513 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
3515 if (bo_gem
->mem_virtual
)
3516 return bo_gem
->mem_virtual
;
3518 if (bo_gem
->is_userptr
) {
3519 /* Return the same user ptr */
3520 return bo_gem
->user_virtual
;
3523 pthread_mutex_lock(&bufmgr_gem
->lock
);
3524 if (!bo_gem
->mem_virtual
) {
3525 struct drm_i915_gem_mmap mmap_arg
;
3527 if (bo_gem
->map_count
++ == 0)
3528 drm_intel_gem_bo_open_vma(bufmgr_gem
, bo_gem
);
3530 DBG("bo_map: %d (%s), map_count=%d\n",
3531 bo_gem
->gem_handle
, bo_gem
->name
, bo_gem
->map_count
);
3534 mmap_arg
.handle
= bo_gem
->gem_handle
;
3535 mmap_arg
.size
= bo
->size
;
3536 if (drmIoctl(bufmgr_gem
->fd
,
3537 DRM_IOCTL_I915_GEM_MMAP
,
3539 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
3540 __FILE__
, __LINE__
, bo_gem
->gem_handle
,
3541 bo_gem
->name
, strerror(errno
));
3542 if (--bo_gem
->map_count
== 0)
3543 drm_intel_gem_bo_close_vma(bufmgr_gem
, bo_gem
);
3545 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg
.addr_ptr
, mmap_arg
.size
, 0, 1));
3546 bo_gem
->mem_virtual
= (void *)(uintptr_t) mmap_arg
.addr_ptr
;
3549 pthread_mutex_unlock(&bufmgr_gem
->lock
);
3551 return bo_gem
->mem_virtual
;
3554 void *drm_intel_gem_bo_map__wc(drm_intel_bo
*bo
)
3556 drm_intel_bufmgr_gem
*bufmgr_gem
= (drm_intel_bufmgr_gem
*) bo
->bufmgr
;
3557 drm_intel_bo_gem
*bo_gem
= (drm_intel_bo_gem
*) bo
;
3559 if (bo_gem
->wc_virtual
)
3560 return bo_gem
->wc_virtual
;
3562 if (bo_gem
->is_userptr
)
3565 pthread_mutex_lock(&bufmgr_gem
->lock
);
3566 if (!bo_gem
->wc_virtual
) {
3567 struct drm_i915_gem_mmap mmap_arg
;
3569 if (bo_gem
->map_count
++ == 0)
3570 drm_intel_gem_bo_open_vma(bufmgr_gem
, bo_gem
);
3572 DBG("bo_map: %d (%s), map_count=%d\n",
3573 bo_gem
->gem_handle
, bo_gem
->name
, bo_gem
->map_count
);
3576 mmap_arg
.handle
= bo_gem
->gem_handle
;
3577 mmap_arg
.size
= bo
->size
;
3578 mmap_arg
.flags
= I915_MMAP_WC
;
3579 if (drmIoctl(bufmgr_gem
->fd
,
3580 DRM_IOCTL_I915_GEM_MMAP
,
3582 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
3583 __FILE__
, __LINE__
, bo_gem
->gem_handle
,
3584 bo_gem
->name
, strerror(errno
));
3585 if (--bo_gem
->map_count
== 0)
3586 drm_intel_gem_bo_close_vma(bufmgr_gem
, bo_gem
);
3588 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg
.addr_ptr
, mmap_arg
.size
, 0, 1));
3589 bo_gem
->wc_virtual
= (void *)(uintptr_t) mmap_arg
.addr_ptr
;
3592 pthread_mutex_unlock(&bufmgr_gem
->lock
);
3594 return bo_gem
->wc_virtual
;
3598 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
3599 * and manage map buffer objections.
3601 * \param fd File descriptor of the opened DRM device.
3604 drm_intel_bufmgr_gem_init(int fd
, int batch_size
)
3606 drm_intel_bufmgr_gem
*bufmgr_gem
;
3607 struct drm_i915_gem_get_aperture aperture
;
3608 drm_i915_getparam_t gp
;
3612 pthread_mutex_lock(&bufmgr_list_mutex
);
3614 bufmgr_gem
= drm_intel_bufmgr_gem_find(fd
);
3618 bufmgr_gem
= calloc(1, sizeof(*bufmgr_gem
));
3619 if (bufmgr_gem
== NULL
)
3622 bufmgr_gem
->fd
= fd
;
3623 atomic_set(&bufmgr_gem
->refcount
, 1);
3625 if (pthread_mutex_init(&bufmgr_gem
->lock
, NULL
) != 0) {
3632 ret
= drmIoctl(bufmgr_gem
->fd
,
3633 DRM_IOCTL_I915_GEM_GET_APERTURE
,
3637 bufmgr_gem
->gtt_size
= aperture
.aper_available_size
;
3639 fprintf(stderr
, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
3641 bufmgr_gem
->gtt_size
= 128 * 1024 * 1024;
3642 fprintf(stderr
, "Assuming %dkB available aperture size.\n"
3643 "May lead to reduced performance or incorrect "
3645 (int)bufmgr_gem
->gtt_size
/ 1024);
3648 bufmgr_gem
->pci_device
= get_pci_device_id(bufmgr_gem
);
3650 if (IS_GEN2(bufmgr_gem
->pci_device
))
3651 bufmgr_gem
->gen
= 2;
3652 else if (IS_GEN3(bufmgr_gem
->pci_device
))
3653 bufmgr_gem
->gen
= 3;
3654 else if (IS_GEN4(bufmgr_gem
->pci_device
))
3655 bufmgr_gem
->gen
= 4;
3656 else if (IS_GEN5(bufmgr_gem
->pci_device
))
3657 bufmgr_gem
->gen
= 5;
3658 else if (IS_GEN6(bufmgr_gem
->pci_device
))
3659 bufmgr_gem
->gen
= 6;
3660 else if (IS_GEN7(bufmgr_gem
->pci_device
))
3661 bufmgr_gem
->gen
= 7;
3662 else if (IS_GEN8(bufmgr_gem
->pci_device
))
3663 bufmgr_gem
->gen
= 8;
3664 else if (IS_GEN9(bufmgr_gem
->pci_device
))
3665 bufmgr_gem
->gen
= 9;
3672 if (IS_GEN3(bufmgr_gem
->pci_device
) &&
3673 bufmgr_gem
->gtt_size
> 256*1024*1024) {
3674 /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
3675 * be used for tiled blits. To simplify the accounting, just
3676 * subtract the unmappable part (fixed to 256MB on all known
3677 * gen3 devices) if the kernel advertises it. */
3678 bufmgr_gem
->gtt_size
-= 256*1024*1024;
3684 gp
.param
= I915_PARAM_HAS_EXECBUF2
;
3685 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3689 gp
.param
= I915_PARAM_HAS_BSD
;
3690 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3691 bufmgr_gem
->has_bsd
= ret
== 0;
3693 gp
.param
= I915_PARAM_HAS_BLT
;
3694 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3695 bufmgr_gem
->has_blt
= ret
== 0;
3697 gp
.param
= I915_PARAM_HAS_RELAXED_FENCING
;
3698 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3699 bufmgr_gem
->has_relaxed_fencing
= ret
== 0;
3701 gp
.param
= I915_PARAM_HAS_EXEC_ASYNC
;
3702 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3703 bufmgr_gem
->has_exec_async
= ret
== 0;
3705 bufmgr_gem
->bufmgr
.bo_alloc_userptr
= check_bo_alloc_userptr
;
3707 gp
.param
= I915_PARAM_HAS_WAIT_TIMEOUT
;
3708 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3709 bufmgr_gem
->has_wait_timeout
= ret
== 0;
3711 gp
.param
= I915_PARAM_HAS_LLC
;
3712 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3714 /* Kernel does not supports HAS_LLC query, fallback to GPU
3715 * generation detection and assume that we have LLC on GEN6/7
3717 bufmgr_gem
->has_llc
= (IS_GEN6(bufmgr_gem
->pci_device
) |
3718 IS_GEN7(bufmgr_gem
->pci_device
));
3720 bufmgr_gem
->has_llc
= *gp
.value
;
3722 gp
.param
= I915_PARAM_HAS_VEBOX
;
3723 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3724 bufmgr_gem
->has_vebox
= (ret
== 0) & (*gp
.value
> 0);
3726 gp
.param
= I915_PARAM_HAS_EXEC_SOFTPIN
;
3727 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3728 if (ret
== 0 && *gp
.value
> 0)
3729 bufmgr_gem
->bufmgr
.bo_set_softpin_offset
= drm_intel_gem_bo_set_softpin_offset
;
3731 if (bufmgr_gem
->gen
< 4) {
3732 gp
.param
= I915_PARAM_NUM_FENCES_AVAIL
;
3733 gp
.value
= &bufmgr_gem
->available_fences
;
3734 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3736 fprintf(stderr
, "get fences failed: %d [%d]\n", ret
,
3738 fprintf(stderr
, "param: %d, val: %d\n", gp
.param
,
3740 bufmgr_gem
->available_fences
= 0;
3742 /* XXX The kernel reports the total number of fences,
3743 * including any that may be pinned.
3745 * We presume that there will be at least one pinned
3746 * fence for the scanout buffer, but there may be more
3747 * than one scanout and the user may be manually
3748 * pinning buffers. Let's move to execbuffer2 and
3749 * thereby forget the insanity of using fences...
3751 bufmgr_gem
->available_fences
-= 2;
3752 if (bufmgr_gem
->available_fences
< 0)
3753 bufmgr_gem
->available_fences
= 0;
3757 if (bufmgr_gem
->gen
>= 8) {
3758 gp
.param
= I915_PARAM_HAS_ALIASING_PPGTT
;
3759 ret
= drmIoctl(bufmgr_gem
->fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
3760 if (ret
== 0 && *gp
.value
== 3)
3761 bufmgr_gem
->bufmgr
.bo_use_48b_address_range
= drm_intel_gem_bo_use_48b_address_range
;
3764 /* Let's go with one relocation per every 2 dwords (but round down a bit
3765 * since a power of two will mean an extra page allocation for the reloc
3768 * Every 4 was too few for the blender benchmark.
3770 bufmgr_gem
->max_relocs
= batch_size
/ sizeof(uint32_t) / 2 - 2;
3772 bufmgr_gem
->bufmgr
.bo_alloc
= drm_intel_gem_bo_alloc
;
3773 bufmgr_gem
->bufmgr
.bo_alloc_for_render
=
3774 drm_intel_gem_bo_alloc_for_render
;
3775 bufmgr_gem
->bufmgr
.bo_alloc_tiled
= drm_intel_gem_bo_alloc_tiled
;
3776 bufmgr_gem
->bufmgr
.bo_reference
= drm_intel_gem_bo_reference
;
3777 bufmgr_gem
->bufmgr
.bo_unreference
= drm_intel_gem_bo_unreference
;
3778 bufmgr_gem
->bufmgr
.bo_map
= drm_intel_gem_bo_map
;
3779 bufmgr_gem
->bufmgr
.bo_unmap
= drm_intel_gem_bo_unmap
;
3780 bufmgr_gem
->bufmgr
.bo_subdata
= drm_intel_gem_bo_subdata
;
3781 bufmgr_gem
->bufmgr
.bo_get_subdata
= drm_intel_gem_bo_get_subdata
;
3782 bufmgr_gem
->bufmgr
.bo_wait_rendering
= drm_intel_gem_bo_wait_rendering
;
3783 bufmgr_gem
->bufmgr
.bo_emit_reloc
= drm_intel_gem_bo_emit_reloc
;
3784 bufmgr_gem
->bufmgr
.bo_emit_reloc_fence
= drm_intel_gem_bo_emit_reloc_fence
;
3785 bufmgr_gem
->bufmgr
.bo_pin
= drm_intel_gem_bo_pin
;
3786 bufmgr_gem
->bufmgr
.bo_unpin
= drm_intel_gem_bo_unpin
;
3787 bufmgr_gem
->bufmgr
.bo_get_tiling
= drm_intel_gem_bo_get_tiling
;
3788 bufmgr_gem
->bufmgr
.bo_set_tiling
= drm_intel_gem_bo_set_tiling
;
3789 bufmgr_gem
->bufmgr
.bo_flink
= drm_intel_gem_bo_flink
;
3790 /* Use the new one if available */
3792 bufmgr_gem
->bufmgr
.bo_exec
= drm_intel_gem_bo_exec2
;
3793 bufmgr_gem
->bufmgr
.bo_mrb_exec
= drm_intel_gem_bo_mrb_exec2
;
3795 bufmgr_gem
->bufmgr
.bo_exec
= drm_intel_gem_bo_exec
;
3796 bufmgr_gem
->bufmgr
.bo_busy
= drm_intel_gem_bo_busy
;
3797 bufmgr_gem
->bufmgr
.bo_madvise
= drm_intel_gem_bo_madvise
;
3798 bufmgr_gem
->bufmgr
.destroy
= drm_intel_bufmgr_gem_unref
;
3799 bufmgr_gem
->bufmgr
.debug
= 0;
3800 bufmgr_gem
->bufmgr
.check_aperture_space
=
3801 drm_intel_gem_check_aperture_space
;
3802 bufmgr_gem
->bufmgr
.bo_disable_reuse
= drm_intel_gem_bo_disable_reuse
;
3803 bufmgr_gem
->bufmgr
.bo_is_reusable
= drm_intel_gem_bo_is_reusable
;
3804 bufmgr_gem
->bufmgr
.get_pipe_from_crtc_id
=
3805 drm_intel_gem_get_pipe_from_crtc_id
;
3806 bufmgr_gem
->bufmgr
.bo_references
= drm_intel_gem_bo_references
;
3808 init_cache_buckets(bufmgr_gem
);
3810 DRMINITLISTHEAD(&bufmgr_gem
->vma_cache
);
3811 bufmgr_gem
->vma_max
= -1; /* unlimited by default */
3813 DRMLISTADD(&bufmgr_gem
->managers
, &bufmgr_list
);
3816 pthread_mutex_unlock(&bufmgr_list_mutex
);
3818 return bufmgr_gem
!= NULL
? &bufmgr_gem
->bufmgr
: NULL
;