2 * Copyright 2019 Collabora, Ltd.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * Authors (Collabora):
24 * Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
31 #include "drm-uapi/panfrost_drm.h"
34 #include "pan_screen.h"
36 #include "pandecode/decode.h"
38 #include "os/os_mman.h"
40 #include "util/u_inlines.h"
41 #include "util/u_math.h"
43 /* This file implements a userspace BO cache. Allocating and freeing
44 * GPU-visible buffers is very expensive, and even the extra kernel roundtrips
45 * adds more work than we would like at this point. So caching BOs in userspace
46 * solves both of these problems and does not require kernel updates.
48 * Cached BOs are sorted into a bucket based on rounding their size down to the
49 * nearest power-of-two. Each bucket contains a linked list of free panfrost_bo
50 * objects. Putting a BO into the cache is accomplished by adding it to the
51 * corresponding bucket. Getting a BO from the cache consists of finding the
52 * appropriate bucket and sorting. A cache eviction is a kernel-level free of a
53 * BO and removing it from the bucket. We special case evicting all BOs from
54 * the cache, since that's what helpful in practice and avoids extra logic
55 * around the linked list.
58 static struct panfrost_bo
*
59 panfrost_bo_alloc(struct panfrost_screen
*screen
, size_t size
,
62 struct drm_panfrost_create_bo create_bo
= { .size
= size
};
63 struct panfrost_bo
*bo
;
66 if (screen
->kernel_version
->version_major
> 1 ||
67 screen
->kernel_version
->version_minor
>= 1) {
68 if (flags
& PAN_BO_GROWABLE
)
69 create_bo
.flags
|= PANFROST_BO_HEAP
;
70 if (!(flags
& PAN_BO_EXECUTE
))
71 create_bo
.flags
|= PANFROST_BO_NOEXEC
;
74 ret
= drmIoctl(screen
->fd
, DRM_IOCTL_PANFROST_CREATE_BO
, &create_bo
);
76 fprintf(stderr
, "DRM_IOCTL_PANFROST_CREATE_BO failed: %m\n");
80 bo
= rzalloc(screen
, struct panfrost_bo
);
82 bo
->size
= create_bo
.size
;
83 bo
->gpu
= create_bo
.offset
;
84 bo
->gem_handle
= create_bo
.handle
;
91 panfrost_bo_free(struct panfrost_bo
*bo
)
93 struct drm_gem_close gem_close
= { .handle
= bo
->gem_handle
};
96 ret
= drmIoctl(bo
->screen
->fd
, DRM_IOCTL_GEM_CLOSE
, &gem_close
);
98 fprintf(stderr
, "DRM_IOCTL_GEM_CLOSE failed: %m\n");
105 /* Returns true if the BO is ready, false otherwise.
106 * access_type is encoding the type of access one wants to ensure is done.
107 * Say you want to make sure all writers are done writing, you should pass
108 * PAN_BO_ACCESS_WRITE.
109 * If you want to wait for all users, you should pass PAN_BO_ACCESS_RW.
110 * PAN_BO_ACCESS_READ would work too as waiting for readers implies
111 * waiting for writers as well, but we want to make things explicit and waiting
112 * only for readers is impossible.
115 panfrost_bo_wait(struct panfrost_bo
*bo
, int64_t timeout_ns
,
116 uint32_t access_type
)
118 struct drm_panfrost_wait_bo req
= {
119 .handle
= bo
->gem_handle
,
120 .timeout_ns
= timeout_ns
,
124 assert(access_type
== PAN_BO_ACCESS_WRITE
||
125 access_type
== PAN_BO_ACCESS_RW
);
127 /* If the BO has been exported or imported we can't rely on the cached
128 * state, we need to call the WAIT_BO ioctl.
130 if (!(bo
->flags
& (PAN_BO_IMPORTED
| PAN_BO_EXPORTED
))) {
131 /* If ->gpu_access is 0, the BO is idle, no need to wait. */
135 /* If the caller only wants to wait for writers and no
136 * writes are pending, we don't have to wait.
138 if (access_type
== PAN_BO_ACCESS_WRITE
&&
139 !(bo
->gpu_access
& PAN_BO_ACCESS_WRITE
))
143 /* The ioctl returns >= 0 value when the BO we are waiting for is ready
146 ret
= drmIoctl(bo
->screen
->fd
, DRM_IOCTL_PANFROST_WAIT_BO
, &req
);
148 /* Set gpu_access to 0 so that the next call to bo_wait()
149 * doesn't have to call the WAIT_BO ioctl.
155 /* If errno is not ETIMEDOUT or EBUSY that means the handle we passed
156 * is invalid, which shouldn't happen here.
158 assert(errno
== ETIMEDOUT
|| errno
== EBUSY
);
162 /* Helper to calculate the bucket index of a BO */
165 pan_bucket_index(unsigned size
)
167 /* Round down to POT to compute a bucket index */
169 unsigned bucket_index
= util_logbase2(size
);
171 /* Clamp the bucket index; all huge allocations will be
172 * sorted into the largest bucket */
174 bucket_index
= MIN2(bucket_index
, MAX_BO_CACHE_BUCKET
);
176 /* The minimum bucket size must equal the minimum allocation
177 * size; the maximum we clamped */
179 assert(bucket_index
>= MIN_BO_CACHE_BUCKET
);
180 assert(bucket_index
<= MAX_BO_CACHE_BUCKET
);
183 return (bucket_index
- MIN_BO_CACHE_BUCKET
);
186 static struct list_head
*
187 pan_bucket(struct panfrost_screen
*screen
, unsigned size
)
189 return &screen
->bo_cache
.buckets
[pan_bucket_index(size
)];
192 /* Tries to fetch a BO of sufficient size with the appropriate flags from the
193 * BO cache. If it succeeds, it returns that BO and removes the BO from the
194 * cache. If it fails, it returns NULL signaling the caller to allocate a new
197 static struct panfrost_bo
*
198 panfrost_bo_cache_fetch(struct panfrost_screen
*screen
,
199 size_t size
, uint32_t flags
, bool dontwait
)
201 pthread_mutex_lock(&screen
->bo_cache
.lock
);
202 struct list_head
*bucket
= pan_bucket(screen
, size
);
203 struct panfrost_bo
*bo
= NULL
;
205 /* Iterate the bucket looking for something suitable */
206 list_for_each_entry_safe(struct panfrost_bo
, entry
, bucket
,
208 if (entry
->size
< size
|| entry
->flags
!= flags
)
211 if (!panfrost_bo_wait(entry
, dontwait
? 0 : INT64_MAX
,
215 struct drm_panfrost_madvise madv
= {
216 .handle
= entry
->gem_handle
,
217 .madv
= PANFROST_MADV_WILLNEED
,
221 /* This one works, splice it out of the cache */
222 list_del(&entry
->bucket_link
);
223 list_del(&entry
->lru_link
);
225 ret
= drmIoctl(screen
->fd
, DRM_IOCTL_PANFROST_MADVISE
, &madv
);
226 if (!ret
&& !madv
.retained
) {
227 panfrost_bo_free(entry
);
234 pthread_mutex_unlock(&screen
->bo_cache
.lock
);
240 panfrost_bo_cache_evict_stale_bos(struct panfrost_screen
*screen
)
242 struct timespec time
;
244 clock_gettime(CLOCK_MONOTONIC
, &time
);
245 list_for_each_entry_safe(struct panfrost_bo
, entry
,
246 &screen
->bo_cache
.lru
, lru_link
) {
247 /* We want all entries that have been used more than 1 sec
248 * ago to be dropped, others can be kept.
249 * Note the <= 2 check and not <= 1. It's here to account for
250 * the fact that we're only testing ->tv_sec, not ->tv_nsec.
251 * That means we might keep entries that are between 1 and 2
252 * seconds old, but we don't really care, as long as unused BOs
253 * are dropped at some point.
255 if (time
.tv_sec
- entry
->last_used
<= 2)
258 list_del(&entry
->bucket_link
);
259 list_del(&entry
->lru_link
);
260 panfrost_bo_free(entry
);
264 /* Tries to add a BO to the cache. Returns if it was
268 panfrost_bo_cache_put(struct panfrost_bo
*bo
)
270 struct panfrost_screen
*screen
= bo
->screen
;
272 if (bo
->flags
& PAN_BO_DONT_REUSE
)
275 pthread_mutex_lock(&screen
->bo_cache
.lock
);
276 struct list_head
*bucket
= pan_bucket(screen
, bo
->size
);
277 struct drm_panfrost_madvise madv
;
278 struct timespec time
;
280 madv
.handle
= bo
->gem_handle
;
281 madv
.madv
= PANFROST_MADV_DONTNEED
;
284 drmIoctl(screen
->fd
, DRM_IOCTL_PANFROST_MADVISE
, &madv
);
286 /* Add us to the bucket */
287 list_addtail(&bo
->bucket_link
, bucket
);
289 /* Add us to the LRU list and update the last_used field. */
290 list_addtail(&bo
->lru_link
, &screen
->bo_cache
.lru
);
291 clock_gettime(CLOCK_MONOTONIC
, &time
);
292 bo
->last_used
= time
.tv_sec
;
294 /* Let's do some cleanup in the BO cache while we hold the
297 panfrost_bo_cache_evict_stale_bos(screen
);
298 pthread_mutex_unlock(&screen
->bo_cache
.lock
);
303 /* Evicts all BOs from the cache. Called during context
304 * destroy or during low-memory situations (to free up
305 * memory that may be unused by us just sitting in our
306 * cache, but still reserved from the perspective of the
310 panfrost_bo_cache_evict_all(
311 struct panfrost_screen
*screen
)
313 pthread_mutex_lock(&screen
->bo_cache
.lock
);
314 for (unsigned i
= 0; i
< ARRAY_SIZE(screen
->bo_cache
.buckets
); ++i
) {
315 struct list_head
*bucket
= &screen
->bo_cache
.buckets
[i
];
317 list_for_each_entry_safe(struct panfrost_bo
, entry
, bucket
,
319 list_del(&entry
->bucket_link
);
320 list_del(&entry
->lru_link
);
321 panfrost_bo_free(entry
);
324 pthread_mutex_unlock(&screen
->bo_cache
.lock
);
328 panfrost_bo_mmap(struct panfrost_bo
*bo
)
330 struct drm_panfrost_mmap_bo mmap_bo
= { .handle
= bo
->gem_handle
};
336 ret
= drmIoctl(bo
->screen
->fd
, DRM_IOCTL_PANFROST_MMAP_BO
, &mmap_bo
);
338 fprintf(stderr
, "DRM_IOCTL_PANFROST_MMAP_BO failed: %m\n");
342 bo
->cpu
= os_mmap(NULL
, bo
->size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
343 bo
->screen
->fd
, mmap_bo
.offset
);
344 if (bo
->cpu
== MAP_FAILED
) {
345 fprintf(stderr
, "mmap failed: %p %m\n", bo
->cpu
);
349 /* Record the mmap if we're tracing */
350 if (pan_debug
& PAN_DBG_TRACE
)
351 pandecode_inject_mmap(bo
->gpu
, bo
->cpu
, bo
->size
, NULL
);
355 panfrost_bo_munmap(struct panfrost_bo
*bo
)
360 if (os_munmap((void *) (uintptr_t)bo
->cpu
, bo
->size
)) {
369 panfrost_bo_create(struct panfrost_screen
*screen
, size_t size
,
372 struct panfrost_bo
*bo
;
374 /* Kernel will fail (confusingly) with EPERM otherwise */
377 /* To maximize BO cache usage, don't allocate tiny BOs */
378 size
= MAX2(size
, 4096);
380 /* GROWABLE BOs cannot be mmapped */
381 if (flags
& PAN_BO_GROWABLE
)
382 assert(flags
& PAN_BO_INVISIBLE
);
384 /* Before creating a BO, we first want to check the cache but without
385 * waiting for BO readiness (BOs in the cache can still be referenced
386 * by jobs that are not finished yet).
387 * If the cached allocation fails we fall back on fresh BO allocation,
388 * and if that fails too, we try one more time to allocate from the
389 * cache, but this time we accept to wait.
391 bo
= panfrost_bo_cache_fetch(screen
, size
, flags
, true);
393 bo
= panfrost_bo_alloc(screen
, size
, flags
);
395 bo
= panfrost_bo_cache_fetch(screen
, size
, flags
, false);
398 fprintf(stderr
, "BO creation failed\n");
402 /* Only mmap now if we know we need to. For CPU-invisible buffers, we
403 * never map since we don't care about their contents; they're purely
404 * for GPU-internal use. But we do trace them anyway. */
406 if (!(flags
& (PAN_BO_INVISIBLE
| PAN_BO_DELAY_MMAP
)))
407 panfrost_bo_mmap(bo
);
408 else if (flags
& PAN_BO_INVISIBLE
) {
409 if (pan_debug
& PAN_DBG_TRACE
)
410 pandecode_inject_mmap(bo
->gpu
, NULL
, bo
->size
, NULL
);
413 pipe_reference_init(&bo
->reference
, 1);
415 pthread_mutex_lock(&screen
->active_bos_lock
);
416 _mesa_set_add(bo
->screen
->active_bos
, bo
);
417 pthread_mutex_unlock(&screen
->active_bos_lock
);
423 panfrost_bo_reference(struct panfrost_bo
*bo
)
426 pipe_reference(NULL
, &bo
->reference
);
430 panfrost_bo_unreference(struct panfrost_bo
*bo
)
435 if (!pipe_reference(&bo
->reference
, NULL
))
438 struct panfrost_screen
*screen
= bo
->screen
;
440 pthread_mutex_lock(&screen
->active_bos_lock
);
441 /* Someone might have imported this BO while we were waiting for the
442 * lock, let's make sure it's still not referenced before freeing it.
444 if (!pipe_is_referenced(&bo
->reference
)) {
445 _mesa_set_remove_key(bo
->screen
->active_bos
, bo
);
447 /* When the reference count goes to zero, we need to cleanup */
448 panfrost_bo_munmap(bo
);
450 /* Rather than freeing the BO now, we'll cache the BO for later
451 * allocations if we're allowed to.
453 if (!panfrost_bo_cache_put(bo
))
454 panfrost_bo_free(bo
);
456 pthread_mutex_unlock(&screen
->active_bos_lock
);
460 panfrost_bo_import(struct panfrost_screen
*screen
, int fd
)
462 struct panfrost_bo
*bo
, *newbo
= rzalloc(screen
, struct panfrost_bo
);
463 struct drm_panfrost_get_bo_offset get_bo_offset
= {0,};
464 struct set_entry
*entry
;
468 newbo
->screen
= screen
;
470 ret
= drmPrimeFDToHandle(screen
->fd
, fd
, &gem_handle
);
473 newbo
->gem_handle
= gem_handle
;
475 pthread_mutex_lock(&screen
->active_bos_lock
);
476 entry
= _mesa_set_search_or_add(screen
->active_bos
, newbo
);
478 bo
= (struct panfrost_bo
*)entry
->key
;
480 get_bo_offset
.handle
= gem_handle
;
481 ret
= drmIoctl(screen
->fd
, DRM_IOCTL_PANFROST_GET_BO_OFFSET
, &get_bo_offset
);
484 newbo
->gpu
= (mali_ptr
) get_bo_offset
.offset
;
485 newbo
->size
= lseek(fd
, 0, SEEK_END
);
486 newbo
->flags
|= PAN_BO_DONT_REUSE
| PAN_BO_IMPORTED
;
487 assert(newbo
->size
> 0);
488 pipe_reference_init(&newbo
->reference
, 1);
489 // TODO map and unmap on demand?
490 panfrost_bo_mmap(newbo
);
493 /* !pipe_is_referenced(&bo->reference) can happen if the BO
494 * was being released but panfrost_bo_import() acquired the
495 * lock before panfrost_bo_unreference(). In that case, refcnt
496 * is 0 and we can't use panfrost_bo_reference() directly, we
497 * have to re-initialize it with pipe_reference_init().
498 * Note that panfrost_bo_unreference() checks
499 * pipe_is_referenced() value just after acquiring the lock to
500 * make sure the object is not freed if panfrost_bo_import()
501 * acquired it in the meantime.
503 if (!pipe_is_referenced(&bo
->reference
))
504 pipe_reference_init(&newbo
->reference
, 1);
506 panfrost_bo_reference(bo
);
509 pthread_mutex_unlock(&screen
->active_bos_lock
);
515 panfrost_bo_export(struct panfrost_bo
*bo
)
517 struct drm_prime_handle args
= {
518 .handle
= bo
->gem_handle
,
519 .flags
= DRM_CLOEXEC
,
522 int ret
= drmIoctl(bo
->screen
->fd
, DRM_IOCTL_PRIME_HANDLE_TO_FD
, &args
);
526 bo
->flags
|= PAN_BO_DONT_REUSE
| PAN_BO_EXPORTED
;