2 * Copyright (C) 2012-2018 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #include "os/os_mman.h"
29 #include "freedreno_drmif.h"
30 #include "freedreno_priv.h"
32 pthread_mutex_t table_lock
= PTHREAD_MUTEX_INITIALIZER
;
33 void bo_del(struct fd_bo
*bo
);
35 /* set buffer name, and add to table, call w/ table_lock held: */
36 static void set_name(struct fd_bo
*bo
, uint32_t name
)
39 /* add ourself into the handle table: */
40 _mesa_hash_table_insert(bo
->dev
->name_table
, &bo
->name
, bo
);
43 /* lookup a buffer, call w/ table_lock held: */
44 static struct fd_bo
* lookup_bo(struct hash_table
*tbl
, uint32_t key
)
46 struct fd_bo
*bo
= NULL
;
47 struct hash_entry
*entry
= _mesa_hash_table_search(tbl
, &key
);
49 /* found, incr refcnt and return: */
50 bo
= fd_bo_ref(entry
->data
);
52 /* don't break the bucket if this bo was found in one */
53 list_delinit(&bo
->list
);
58 /* allocate a new buffer object, call w/ table_lock held */
59 static struct fd_bo
* bo_from_handle(struct fd_device
*dev
,
60 uint32_t size
, uint32_t handle
)
64 bo
= dev
->funcs
->bo_from_handle(dev
, size
, handle
);
66 struct drm_gem_close req
= {
69 drmIoctl(dev
->fd
, DRM_IOCTL_GEM_CLOSE
, &req
);
72 bo
->dev
= fd_device_ref(dev
);
75 bo
->iova
= bo
->funcs
->iova(bo
);
77 p_atomic_set(&bo
->refcnt
, 1);
78 list_inithead(&bo
->list
);
79 /* add ourself into the handle table: */
80 _mesa_hash_table_insert(dev
->handle_table
, &bo
->handle
, bo
);
85 bo_new(struct fd_device
*dev
, uint32_t size
, uint32_t flags
,
86 struct fd_bo_cache
*cache
)
88 struct fd_bo
*bo
= NULL
;
92 bo
= fd_bo_cache_alloc(cache
, &size
, flags
);
96 ret
= dev
->funcs
->bo_new_handle(dev
, size
, flags
, &handle
);
100 pthread_mutex_lock(&table_lock
);
101 bo
= bo_from_handle(dev
, size
, handle
);
102 pthread_mutex_unlock(&table_lock
);
110 _fd_bo_new(struct fd_device
*dev
, uint32_t size
, uint32_t flags
)
112 struct fd_bo
*bo
= bo_new(dev
, size
, flags
, &dev
->bo_cache
);
114 bo
->bo_reuse
= BO_CACHE
;
119 _fd_bo_set_name(struct fd_bo
*bo
, const char *fmt
, va_list ap
)
121 bo
->funcs
->set_name(bo
, fmt
, ap
);
124 /* internal function to allocate bo's that use the ringbuffer cache
125 * instead of the normal bo_cache. The purpose is, because cmdstream
126 * bo's get vmap'd on the kernel side, and that is expensive, we want
127 * to re-use cmdstream bo's for cmdstream and not unrelated purposes.
130 fd_bo_new_ring(struct fd_device
*dev
, uint32_t size
)
132 uint32_t flags
= DRM_FREEDRENO_GEM_GPUREADONLY
;
133 struct fd_bo
*bo
= bo_new(dev
, size
, flags
, &dev
->ring_cache
);
135 bo
->bo_reuse
= RING_CACHE
;
136 fd_bo_set_name(bo
, "cmdstream");
141 fd_bo_from_handle(struct fd_device
*dev
, uint32_t handle
, uint32_t size
)
143 struct fd_bo
*bo
= NULL
;
145 pthread_mutex_lock(&table_lock
);
147 bo
= lookup_bo(dev
->handle_table
, handle
);
151 bo
= bo_from_handle(dev
, size
, handle
);
156 pthread_mutex_unlock(&table_lock
);
162 fd_bo_from_dmabuf(struct fd_device
*dev
, int fd
)
168 pthread_mutex_lock(&table_lock
);
169 ret
= drmPrimeFDToHandle(dev
->fd
, fd
, &handle
);
171 pthread_mutex_unlock(&table_lock
);
175 bo
= lookup_bo(dev
->handle_table
, handle
);
179 /* lseek() to get bo size */
180 size
= lseek(fd
, 0, SEEK_END
);
181 lseek(fd
, 0, SEEK_CUR
);
183 bo
= bo_from_handle(dev
, size
, handle
);
188 pthread_mutex_unlock(&table_lock
);
193 struct fd_bo
* fd_bo_from_name(struct fd_device
*dev
, uint32_t name
)
195 struct drm_gem_open req
= {
200 pthread_mutex_lock(&table_lock
);
202 /* check name table first, to see if bo is already open: */
203 bo
= lookup_bo(dev
->name_table
, name
);
207 if (drmIoctl(dev
->fd
, DRM_IOCTL_GEM_OPEN
, &req
)) {
208 ERROR_MSG("gem-open failed: %s", strerror(errno
));
212 bo
= lookup_bo(dev
->handle_table
, req
.handle
);
216 bo
= bo_from_handle(dev
, req
.size
, req
.handle
);
223 pthread_mutex_unlock(&table_lock
);
228 uint64_t fd_bo_get_iova(struct fd_bo
*bo
)
233 struct fd_bo
* fd_bo_ref(struct fd_bo
*bo
)
235 p_atomic_inc(&bo
->refcnt
);
239 void fd_bo_del(struct fd_bo
*bo
)
241 struct fd_device
*dev
= bo
->dev
;
243 if (!atomic_dec_and_test(&bo
->refcnt
))
246 pthread_mutex_lock(&table_lock
);
248 if ((bo
->bo_reuse
== BO_CACHE
) && (fd_bo_cache_free(&dev
->bo_cache
, bo
) == 0))
250 if ((bo
->bo_reuse
== RING_CACHE
) && (fd_bo_cache_free(&dev
->ring_cache
, bo
) == 0))
254 fd_device_del_locked(dev
);
256 pthread_mutex_unlock(&table_lock
);
259 /* Called under table_lock */
260 void bo_del(struct fd_bo
*bo
)
265 os_munmap(bo
->map
, bo
->size
);
267 /* TODO probably bo's in bucket list get removed from
272 struct drm_gem_close req
= {
273 .handle
= bo
->handle
,
275 _mesa_hash_table_remove_key(bo
->dev
->handle_table
, &bo
->handle
);
277 _mesa_hash_table_remove_key(bo
->dev
->name_table
, &bo
->name
);
278 drmIoctl(bo
->dev
->fd
, DRM_IOCTL_GEM_CLOSE
, &req
);
281 bo
->funcs
->destroy(bo
);
284 int fd_bo_get_name(struct fd_bo
*bo
, uint32_t *name
)
287 struct drm_gem_flink req
= {
288 .handle
= bo
->handle
,
292 ret
= drmIoctl(bo
->dev
->fd
, DRM_IOCTL_GEM_FLINK
, &req
);
297 pthread_mutex_lock(&table_lock
);
298 set_name(bo
, req
.name
);
299 pthread_mutex_unlock(&table_lock
);
300 bo
->bo_reuse
= NO_CACHE
;
308 uint32_t fd_bo_handle(struct fd_bo
*bo
)
313 int fd_bo_dmabuf(struct fd_bo
*bo
)
317 ret
= drmPrimeHandleToFD(bo
->dev
->fd
, bo
->handle
, DRM_CLOEXEC
,
320 ERROR_MSG("failed to get dmabuf fd: %d", ret
);
324 bo
->bo_reuse
= NO_CACHE
;
329 uint32_t fd_bo_size(struct fd_bo
*bo
)
334 void * fd_bo_map(struct fd_bo
*bo
)
340 ret
= bo
->funcs
->offset(bo
, &offset
);
345 bo
->map
= os_mmap(0, bo
->size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
346 bo
->dev
->fd
, offset
);
347 if (bo
->map
== MAP_FAILED
) {
348 ERROR_MSG("mmap failed: %s", strerror(errno
));
355 /* a bit odd to take the pipe as an arg, but it's a, umm, quirk of kgsl.. */
356 int fd_bo_cpu_prep(struct fd_bo
*bo
, struct fd_pipe
*pipe
, uint32_t op
)
358 return bo
->funcs
->cpu_prep(bo
, pipe
, op
);
361 void fd_bo_cpu_fini(struct fd_bo
*bo
)
363 bo
->funcs
->cpu_fini(bo
);