2 * Copyright (C) 2012-2018 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #include "os/os_mman.h"
29 #include "freedreno_drmif.h"
30 #include "freedreno_priv.h"
32 pthread_mutex_t table_lock
= PTHREAD_MUTEX_INITIALIZER
;
33 void bo_del(struct fd_bo
*bo
);
35 /* set buffer name, and add to table, call w/ table_lock held: */
36 static void set_name(struct fd_bo
*bo
, uint32_t name
)
39 /* add ourself into the handle table: */
40 _mesa_hash_table_insert(bo
->dev
->name_table
, &bo
->name
, bo
);
43 /* lookup a buffer, call w/ table_lock held: */
44 static struct fd_bo
* lookup_bo(struct hash_table
*tbl
, uint32_t key
)
46 struct fd_bo
*bo
= NULL
;
47 struct hash_entry
*entry
= _mesa_hash_table_search(tbl
, &key
);
49 /* found, incr refcnt and return: */
50 bo
= fd_bo_ref(entry
->data
);
52 /* don't break the bucket if this bo was found in one */
53 list_delinit(&bo
->list
);
58 /* allocate a new buffer object, call w/ table_lock held */
59 static struct fd_bo
* bo_from_handle(struct fd_device
*dev
,
60 uint32_t size
, uint32_t handle
)
64 bo
= dev
->funcs
->bo_from_handle(dev
, size
, handle
);
66 struct drm_gem_close req
= {
69 drmIoctl(dev
->fd
, DRM_IOCTL_GEM_CLOSE
, &req
);
72 bo
->dev
= fd_device_ref(dev
);
75 p_atomic_set(&bo
->refcnt
, 1);
76 list_inithead(&bo
->list
);
77 /* add ourself into the handle table: */
78 _mesa_hash_table_insert(dev
->handle_table
, &bo
->handle
, bo
);
83 bo_new(struct fd_device
*dev
, uint32_t size
, uint32_t flags
,
84 struct fd_bo_cache
*cache
)
86 struct fd_bo
*bo
= NULL
;
90 bo
= fd_bo_cache_alloc(cache
, &size
, flags
);
94 ret
= dev
->funcs
->bo_new_handle(dev
, size
, flags
, &handle
);
98 pthread_mutex_lock(&table_lock
);
99 bo
= bo_from_handle(dev
, size
, handle
);
100 pthread_mutex_unlock(&table_lock
);
108 fd_bo_new(struct fd_device
*dev
, uint32_t size
, uint32_t flags
)
110 struct fd_bo
*bo
= bo_new(dev
, size
, flags
, &dev
->bo_cache
);
112 bo
->bo_reuse
= BO_CACHE
;
116 /* internal function to allocate bo's that use the ringbuffer cache
117 * instead of the normal bo_cache. The purpose is, because cmdstream
118 * bo's get vmap'd on the kernel side, and that is expensive, we want
119 * to re-use cmdstream bo's for cmdstream and not unrelated purposes.
122 fd_bo_new_ring(struct fd_device
*dev
, uint32_t size
, uint32_t flags
)
124 struct fd_bo
*bo
= bo_new(dev
, size
, flags
, &dev
->ring_cache
);
126 bo
->bo_reuse
= RING_CACHE
;
131 fd_bo_from_handle(struct fd_device
*dev
, uint32_t handle
, uint32_t size
)
133 struct fd_bo
*bo
= NULL
;
135 pthread_mutex_lock(&table_lock
);
137 bo
= lookup_bo(dev
->handle_table
, handle
);
141 bo
= bo_from_handle(dev
, size
, handle
);
146 pthread_mutex_unlock(&table_lock
);
152 fd_bo_from_dmabuf(struct fd_device
*dev
, int fd
)
158 pthread_mutex_lock(&table_lock
);
159 ret
= drmPrimeFDToHandle(dev
->fd
, fd
, &handle
);
161 pthread_mutex_unlock(&table_lock
);
165 bo
= lookup_bo(dev
->handle_table
, handle
);
169 /* lseek() to get bo size */
170 size
= lseek(fd
, 0, SEEK_END
);
171 lseek(fd
, 0, SEEK_CUR
);
173 bo
= bo_from_handle(dev
, size
, handle
);
178 pthread_mutex_unlock(&table_lock
);
183 struct fd_bo
* fd_bo_from_name(struct fd_device
*dev
, uint32_t name
)
185 struct drm_gem_open req
= {
190 pthread_mutex_lock(&table_lock
);
192 /* check name table first, to see if bo is already open: */
193 bo
= lookup_bo(dev
->name_table
, name
);
197 if (drmIoctl(dev
->fd
, DRM_IOCTL_GEM_OPEN
, &req
)) {
198 ERROR_MSG("gem-open failed: %s", strerror(errno
));
202 bo
= lookup_bo(dev
->handle_table
, req
.handle
);
206 bo
= bo_from_handle(dev
, req
.size
, req
.handle
);
213 pthread_mutex_unlock(&table_lock
);
218 uint64_t fd_bo_get_iova(struct fd_bo
*bo
)
221 bo
->iova
= bo
->funcs
->iova(bo
);
225 void fd_bo_put_iova(struct fd_bo
*bo
)
227 /* currently a no-op */
230 struct fd_bo
* fd_bo_ref(struct fd_bo
*bo
)
232 p_atomic_inc(&bo
->refcnt
);
236 void fd_bo_del(struct fd_bo
*bo
)
238 struct fd_device
*dev
= bo
->dev
;
240 if (!atomic_dec_and_test(&bo
->refcnt
))
243 pthread_mutex_lock(&table_lock
);
245 if ((bo
->bo_reuse
== BO_CACHE
) && (fd_bo_cache_free(&dev
->bo_cache
, bo
) == 0))
247 if ((bo
->bo_reuse
== RING_CACHE
) && (fd_bo_cache_free(&dev
->ring_cache
, bo
) == 0))
251 fd_device_del_locked(dev
);
253 pthread_mutex_unlock(&table_lock
);
256 /* Called under table_lock */
257 void bo_del(struct fd_bo
*bo
)
262 os_munmap(bo
->map
, bo
->size
);
264 /* TODO probably bo's in bucket list get removed from
269 struct drm_gem_close req
= {
270 .handle
= bo
->handle
,
272 _mesa_hash_table_remove_key(bo
->dev
->handle_table
, &bo
->handle
);
274 _mesa_hash_table_remove_key(bo
->dev
->name_table
, &bo
->name
);
275 drmIoctl(bo
->dev
->fd
, DRM_IOCTL_GEM_CLOSE
, &req
);
278 bo
->funcs
->destroy(bo
);
281 int fd_bo_get_name(struct fd_bo
*bo
, uint32_t *name
)
284 struct drm_gem_flink req
= {
285 .handle
= bo
->handle
,
289 ret
= drmIoctl(bo
->dev
->fd
, DRM_IOCTL_GEM_FLINK
, &req
);
294 pthread_mutex_lock(&table_lock
);
295 set_name(bo
, req
.name
);
296 pthread_mutex_unlock(&table_lock
);
297 bo
->bo_reuse
= NO_CACHE
;
305 uint32_t fd_bo_handle(struct fd_bo
*bo
)
310 int fd_bo_dmabuf(struct fd_bo
*bo
)
314 ret
= drmPrimeHandleToFD(bo
->dev
->fd
, bo
->handle
, DRM_CLOEXEC
,
317 ERROR_MSG("failed to get dmabuf fd: %d", ret
);
321 bo
->bo_reuse
= NO_CACHE
;
326 uint32_t fd_bo_size(struct fd_bo
*bo
)
331 void * fd_bo_map(struct fd_bo
*bo
)
337 ret
= bo
->funcs
->offset(bo
, &offset
);
342 bo
->map
= os_mmap(0, bo
->size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
343 bo
->dev
->fd
, offset
);
344 if (bo
->map
== MAP_FAILED
) {
345 ERROR_MSG("mmap failed: %s", strerror(errno
));
352 /* a bit odd to take the pipe as an arg, but it's a, umm, quirk of kgsl.. */
353 int fd_bo_cpu_prep(struct fd_bo
*bo
, struct fd_pipe
*pipe
, uint32_t op
)
355 return bo
->funcs
->cpu_prep(bo
, pipe
, op
);
358 void fd_bo_cpu_fini(struct fd_bo
*bo
)
360 bo
->funcs
->cpu_fini(bo
);