2 * Copyright (C) 2012-2018 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #include "os/os_mman.h"
29 #include "freedreno_drmif.h"
30 #include "freedreno_priv.h"
32 pthread_mutex_t table_lock
= PTHREAD_MUTEX_INITIALIZER
;
33 void bo_del(struct fd_bo
*bo
);
35 /* set buffer name, and add to table, call w/ table_lock held: */
36 static void set_name(struct fd_bo
*bo
, uint32_t name
)
39 /* add ourself into the handle table: */
40 _mesa_hash_table_insert(bo
->dev
->name_table
, &bo
->name
, bo
);
43 /* lookup a buffer, call w/ table_lock held: */
44 static struct fd_bo
* lookup_bo(struct hash_table
*tbl
, uint32_t key
)
46 struct fd_bo
*bo
= NULL
;
47 struct hash_entry
*entry
= _mesa_hash_table_search(tbl
, &key
);
49 /* found, incr refcnt and return: */
50 bo
= fd_bo_ref(entry
->data
);
52 /* don't break the bucket if this bo was found in one */
53 list_delinit(&bo
->list
);
58 /* allocate a new buffer object, call w/ table_lock held */
59 static struct fd_bo
* bo_from_handle(struct fd_device
*dev
,
60 uint32_t size
, uint32_t handle
)
64 bo
= dev
->funcs
->bo_from_handle(dev
, size
, handle
);
66 struct drm_gem_close req
= {
69 drmIoctl(dev
->fd
, DRM_IOCTL_GEM_CLOSE
, &req
);
72 bo
->dev
= fd_device_ref(dev
);
75 bo
->iova
= bo
->funcs
->iova(bo
);
76 bo
->flags
= FD_RELOC_FLAGS_INIT
;
78 p_atomic_set(&bo
->refcnt
, 1);
79 list_inithead(&bo
->list
);
80 /* add ourself into the handle table: */
81 _mesa_hash_table_insert(dev
->handle_table
, &bo
->handle
, bo
);
86 bo_new(struct fd_device
*dev
, uint32_t size
, uint32_t flags
,
87 struct fd_bo_cache
*cache
)
89 struct fd_bo
*bo
= NULL
;
93 bo
= fd_bo_cache_alloc(cache
, &size
, flags
);
97 ret
= dev
->funcs
->bo_new_handle(dev
, size
, flags
, &handle
);
101 pthread_mutex_lock(&table_lock
);
102 bo
= bo_from_handle(dev
, size
, handle
);
103 pthread_mutex_unlock(&table_lock
);
111 _fd_bo_new(struct fd_device
*dev
, uint32_t size
, uint32_t flags
)
113 struct fd_bo
*bo
= bo_new(dev
, size
, flags
, &dev
->bo_cache
);
115 bo
->bo_reuse
= BO_CACHE
;
120 _fd_bo_set_name(struct fd_bo
*bo
, const char *fmt
, va_list ap
)
122 bo
->funcs
->set_name(bo
, fmt
, ap
);
125 /* internal function to allocate bo's that use the ringbuffer cache
126 * instead of the normal bo_cache. The purpose is, because cmdstream
127 * bo's get vmap'd on the kernel side, and that is expensive, we want
128 * to re-use cmdstream bo's for cmdstream and not unrelated purposes.
131 fd_bo_new_ring(struct fd_device
*dev
, uint32_t size
)
133 uint32_t flags
= DRM_FREEDRENO_GEM_GPUREADONLY
;
134 struct fd_bo
*bo
= bo_new(dev
, size
, flags
, &dev
->ring_cache
);
136 bo
->bo_reuse
= RING_CACHE
;
137 fd_bo_set_name(bo
, "cmdstream");
142 fd_bo_from_handle(struct fd_device
*dev
, uint32_t handle
, uint32_t size
)
144 struct fd_bo
*bo
= NULL
;
146 pthread_mutex_lock(&table_lock
);
148 bo
= lookup_bo(dev
->handle_table
, handle
);
152 bo
= bo_from_handle(dev
, size
, handle
);
157 pthread_mutex_unlock(&table_lock
);
163 fd_bo_from_dmabuf(struct fd_device
*dev
, int fd
)
169 pthread_mutex_lock(&table_lock
);
170 ret
= drmPrimeFDToHandle(dev
->fd
, fd
, &handle
);
172 pthread_mutex_unlock(&table_lock
);
176 bo
= lookup_bo(dev
->handle_table
, handle
);
180 /* lseek() to get bo size */
181 size
= lseek(fd
, 0, SEEK_END
);
182 lseek(fd
, 0, SEEK_CUR
);
184 bo
= bo_from_handle(dev
, size
, handle
);
189 pthread_mutex_unlock(&table_lock
);
194 struct fd_bo
* fd_bo_from_name(struct fd_device
*dev
, uint32_t name
)
196 struct drm_gem_open req
= {
201 pthread_mutex_lock(&table_lock
);
203 /* check name table first, to see if bo is already open: */
204 bo
= lookup_bo(dev
->name_table
, name
);
208 if (drmIoctl(dev
->fd
, DRM_IOCTL_GEM_OPEN
, &req
)) {
209 ERROR_MSG("gem-open failed: %s", strerror(errno
));
213 bo
= lookup_bo(dev
->handle_table
, req
.handle
);
217 bo
= bo_from_handle(dev
, req
.size
, req
.handle
);
224 pthread_mutex_unlock(&table_lock
);
230 fd_bo_mark_for_dump(struct fd_bo
*bo
)
232 bo
->flags
|= FD_RELOC_DUMP
;
235 uint64_t fd_bo_get_iova(struct fd_bo
*bo
)
240 struct fd_bo
* fd_bo_ref(struct fd_bo
*bo
)
242 p_atomic_inc(&bo
->refcnt
);
246 void fd_bo_del(struct fd_bo
*bo
)
248 struct fd_device
*dev
= bo
->dev
;
250 if (!atomic_dec_and_test(&bo
->refcnt
))
253 pthread_mutex_lock(&table_lock
);
255 if ((bo
->bo_reuse
== BO_CACHE
) && (fd_bo_cache_free(&dev
->bo_cache
, bo
) == 0))
257 if ((bo
->bo_reuse
== RING_CACHE
) && (fd_bo_cache_free(&dev
->ring_cache
, bo
) == 0))
261 fd_device_del_locked(dev
);
263 pthread_mutex_unlock(&table_lock
);
266 /* Called under table_lock */
267 void bo_del(struct fd_bo
*bo
)
272 os_munmap(bo
->map
, bo
->size
);
274 /* TODO probably bo's in bucket list get removed from
279 struct drm_gem_close req
= {
280 .handle
= bo
->handle
,
282 _mesa_hash_table_remove_key(bo
->dev
->handle_table
, &bo
->handle
);
284 _mesa_hash_table_remove_key(bo
->dev
->name_table
, &bo
->name
);
285 drmIoctl(bo
->dev
->fd
, DRM_IOCTL_GEM_CLOSE
, &req
);
288 bo
->funcs
->destroy(bo
);
291 int fd_bo_get_name(struct fd_bo
*bo
, uint32_t *name
)
294 struct drm_gem_flink req
= {
295 .handle
= bo
->handle
,
299 ret
= drmIoctl(bo
->dev
->fd
, DRM_IOCTL_GEM_FLINK
, &req
);
304 pthread_mutex_lock(&table_lock
);
305 set_name(bo
, req
.name
);
306 pthread_mutex_unlock(&table_lock
);
307 bo
->bo_reuse
= NO_CACHE
;
315 uint32_t fd_bo_handle(struct fd_bo
*bo
)
320 int fd_bo_dmabuf(struct fd_bo
*bo
)
324 ret
= drmPrimeHandleToFD(bo
->dev
->fd
, bo
->handle
, DRM_CLOEXEC
,
327 ERROR_MSG("failed to get dmabuf fd: %d", ret
);
331 bo
->bo_reuse
= NO_CACHE
;
336 uint32_t fd_bo_size(struct fd_bo
*bo
)
341 void * fd_bo_map(struct fd_bo
*bo
)
347 ret
= bo
->funcs
->offset(bo
, &offset
);
352 bo
->map
= os_mmap(0, bo
->size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
353 bo
->dev
->fd
, offset
);
354 if (bo
->map
== MAP_FAILED
) {
355 ERROR_MSG("mmap failed: %s", strerror(errno
));
362 /* a bit odd to take the pipe as an arg, but it's a, umm, quirk of kgsl.. */
363 int fd_bo_cpu_prep(struct fd_bo
*bo
, struct fd_pipe
*pipe
, uint32_t op
)
365 return bo
->funcs
->cpu_prep(bo
, pipe
, op
);
368 void fd_bo_cpu_fini(struct fd_bo
*bo
)
370 bo
->funcs
->cpu_fini(bo
);