Added few more stubs so that control reaches to DestroyDevice().
[mesa.git] / src / etnaviv / drm / etnaviv_bo.c
1 /*
2 * Copyright (C) 2014 Etnaviv Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Christian Gmeiner <christian.gmeiner@gmail.com>
25 */
26
27 #include "os/os_mman.h"
28 #include "util/hash_table.h"
29
30 #include "etnaviv_priv.h"
31 #include "etnaviv_drmif.h"
32
33 pthread_mutex_t etna_drm_table_lock = PTHREAD_MUTEX_INITIALIZER;
34 void _etna_bo_del(struct etna_bo *bo);
35
36 /* set buffer name, and add to table, call w/ etna_drm_table_lock held: */
37 static void set_name(struct etna_bo *bo, uint32_t name)
38 {
39 bo->name = name;
40 /* add ourself into the name table: */
41 _mesa_hash_table_insert(bo->dev->name_table, &bo->name, bo);
42 }
43
44 /* Called under etna_drm_table_lock */
45 void _etna_bo_del(struct etna_bo *bo)
46 {
47 VG_BO_FREE(bo);
48
49 if (bo->va)
50 util_vma_heap_free(&bo->dev->address_space, bo->va, bo->size);
51
52 if (bo->map)
53 os_munmap(bo->map, bo->size);
54
55 if (bo->handle) {
56 struct drm_gem_close req = {
57 .handle = bo->handle,
58 };
59
60 if (bo->name)
61 _mesa_hash_table_remove_key(bo->dev->name_table, &bo->name);
62
63 _mesa_hash_table_remove_key(bo->dev->handle_table, &bo->handle);
64 drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
65 }
66
67 free(bo);
68 }
69
70 /* lookup a buffer from it's handle, call w/ etna_drm_table_lock held: */
71 static struct etna_bo *lookup_bo(void *tbl, uint32_t handle)
72 {
73 struct etna_bo *bo = NULL;
74 struct hash_entry *entry = _mesa_hash_table_search(tbl, &handle);
75
76 if (entry) {
77 /* found, incr refcnt and return: */
78 bo = etna_bo_ref(entry->data);
79
80 /* don't break the bucket if this bo was found in one */
81 list_delinit(&bo->list);
82 }
83
84 return bo;
85 }
86
87 /* allocate a new buffer object, call w/ etna_drm_table_lock held */
88 static struct etna_bo *bo_from_handle(struct etna_device *dev,
89 uint32_t size, uint32_t handle, uint32_t flags)
90 {
91 struct etna_bo *bo = calloc(sizeof(*bo), 1);
92
93 if (!bo) {
94 struct drm_gem_close req = {
95 .handle = handle,
96 };
97
98 drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
99
100 return NULL;
101 }
102
103 bo->dev = etna_device_ref(dev);
104 bo->size = size;
105 bo->handle = handle;
106 bo->flags = flags;
107 p_atomic_set(&bo->refcnt, 1);
108 list_inithead(&bo->list);
109 /* add ourselves to the handle table: */
110 _mesa_hash_table_insert(dev->handle_table, &bo->handle, bo);
111
112 if (dev->use_softpin)
113 bo->va = util_vma_heap_alloc(&dev->address_space, bo->size, 4096);
114
115 return bo;
116 }
117
118 /* allocate a new (un-tiled) buffer object */
119 struct etna_bo *etna_bo_new(struct etna_device *dev, uint32_t size,
120 uint32_t flags)
121 {
122 struct etna_bo *bo;
123 int ret;
124 struct drm_etnaviv_gem_new req = {
125 .flags = flags,
126 };
127
128 bo = etna_bo_cache_alloc(&dev->bo_cache, &size, flags);
129 if (bo)
130 return bo;
131
132 req.size = size;
133 ret = drmCommandWriteRead(dev->fd, DRM_ETNAVIV_GEM_NEW,
134 &req, sizeof(req));
135 if (ret)
136 return NULL;
137
138 pthread_mutex_lock(&etna_drm_table_lock);
139 bo = bo_from_handle(dev, size, req.handle, flags);
140 bo->reuse = 1;
141 pthread_mutex_unlock(&etna_drm_table_lock);
142
143 VG_BO_ALLOC(bo);
144
145 return bo;
146 }
147
148 struct etna_bo *etna_bo_ref(struct etna_bo *bo)
149 {
150 p_atomic_inc(&bo->refcnt);
151
152 return bo;
153 }
154
155 /* get buffer info */
156 static int get_buffer_info(struct etna_bo *bo)
157 {
158 int ret;
159 struct drm_etnaviv_gem_info req = {
160 .handle = bo->handle,
161 };
162
163 ret = drmCommandWriteRead(bo->dev->fd, DRM_ETNAVIV_GEM_INFO,
164 &req, sizeof(req));
165 if (ret) {
166 return ret;
167 }
168
169 /* really all we need for now is mmap offset */
170 bo->offset = req.offset;
171
172 return 0;
173 }
174
175 /* import a buffer object from DRI2 name */
176 struct etna_bo *etna_bo_from_name(struct etna_device *dev,
177 uint32_t name)
178 {
179 struct etna_bo *bo;
180 struct drm_gem_open req = {
181 .name = name,
182 };
183
184 pthread_mutex_lock(&etna_drm_table_lock);
185
186 /* check name table first, to see if bo is already open: */
187 bo = lookup_bo(dev->name_table, name);
188 if (bo)
189 goto out_unlock;
190
191 if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
192 ERROR_MSG("gem-open failed: %s", strerror(errno));
193 goto out_unlock;
194 }
195
196 bo = lookup_bo(dev->handle_table, req.handle);
197 if (bo)
198 goto out_unlock;
199
200 bo = bo_from_handle(dev, req.size, req.handle, 0);
201 if (bo) {
202 set_name(bo, name);
203 VG_BO_ALLOC(bo);
204 }
205
206 out_unlock:
207 pthread_mutex_unlock(&etna_drm_table_lock);
208
209 return bo;
210 }
211
212 /* import a buffer from dmabuf fd, does not take ownership of the
213 * fd so caller should close() the fd when it is otherwise done
214 * with it (even if it is still using the 'struct etna_bo *')
215 */
216 struct etna_bo *etna_bo_from_dmabuf(struct etna_device *dev, int fd)
217 {
218 struct etna_bo *bo;
219 int ret, size;
220 uint32_t handle;
221
222 /* take the lock before calling drmPrimeFDToHandle to avoid
223 * racing against etna_bo_del, which might invalidate the
224 * returned handle.
225 */
226 pthread_mutex_lock(&etna_drm_table_lock);
227
228 ret = drmPrimeFDToHandle(dev->fd, fd, &handle);
229 if (ret) {
230 pthread_mutex_unlock(&etna_drm_table_lock);
231 return NULL;
232 }
233
234 bo = lookup_bo(dev->handle_table, handle);
235 if (bo)
236 goto out_unlock;
237
238 /* lseek() to get bo size */
239 size = lseek(fd, 0, SEEK_END);
240 lseek(fd, 0, SEEK_CUR);
241
242 bo = bo_from_handle(dev, size, handle, 0);
243
244 VG_BO_ALLOC(bo);
245
246 out_unlock:
247 pthread_mutex_unlock(&etna_drm_table_lock);
248
249 return bo;
250 }
251
252 /* destroy a buffer object */
253 void etna_bo_del(struct etna_bo *bo)
254 {
255 if (!bo)
256 return;
257
258 struct etna_device *dev = bo->dev;
259
260 if (!p_atomic_dec_zero(&bo->refcnt))
261 return;
262
263 pthread_mutex_lock(&etna_drm_table_lock);
264
265 if (bo->reuse && (etna_bo_cache_free(&dev->bo_cache, bo) == 0))
266 goto out;
267
268 _etna_bo_del(bo);
269 etna_device_del_locked(dev);
270 out:
271 pthread_mutex_unlock(&etna_drm_table_lock);
272 }
273
274 /* get the global flink/DRI2 buffer name */
275 int etna_bo_get_name(struct etna_bo *bo, uint32_t *name)
276 {
277 if (!bo->name) {
278 struct drm_gem_flink req = {
279 .handle = bo->handle,
280 };
281 int ret;
282
283 ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
284 if (ret) {
285 return ret;
286 }
287
288 pthread_mutex_lock(&etna_drm_table_lock);
289 set_name(bo, req.name);
290 pthread_mutex_unlock(&etna_drm_table_lock);
291 bo->reuse = 0;
292 }
293
294 *name = bo->name;
295
296 return 0;
297 }
298
299 uint32_t etna_bo_handle(struct etna_bo *bo)
300 {
301 return bo->handle;
302 }
303
304 /* caller owns the dmabuf fd that is returned and is responsible
305 * to close() it when done
306 */
307 int etna_bo_dmabuf(struct etna_bo *bo)
308 {
309 int ret, prime_fd;
310
311 ret = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
312 &prime_fd);
313 if (ret) {
314 ERROR_MSG("failed to get dmabuf fd: %d", ret);
315 return ret;
316 }
317
318 bo->reuse = 0;
319
320 return prime_fd;
321 }
322
323 uint32_t etna_bo_size(struct etna_bo *bo)
324 {
325 return bo->size;
326 }
327
328 uint32_t etna_bo_gpu_va(struct etna_bo *bo)
329 {
330 return bo->va;
331 }
332
333 void *etna_bo_map(struct etna_bo *bo)
334 {
335 if (!bo->map) {
336 if (!bo->offset) {
337 get_buffer_info(bo);
338 }
339
340 bo->map = os_mmap(0, bo->size, PROT_READ | PROT_WRITE,
341 MAP_SHARED, bo->dev->fd, bo->offset);
342 if (bo->map == MAP_FAILED) {
343 ERROR_MSG("mmap failed: %s", strerror(errno));
344 bo->map = NULL;
345 }
346 }
347
348 return bo->map;
349 }
350
351 int etna_bo_cpu_prep(struct etna_bo *bo, uint32_t op)
352 {
353 struct drm_etnaviv_gem_cpu_prep req = {
354 .handle = bo->handle,
355 .op = op,
356 };
357
358 get_abs_timeout(&req.timeout, 5000000000);
359
360 return drmCommandWrite(bo->dev->fd, DRM_ETNAVIV_GEM_CPU_PREP,
361 &req, sizeof(req));
362 }
363
364 void etna_bo_cpu_fini(struct etna_bo *bo)
365 {
366 struct drm_etnaviv_gem_cpu_fini req = {
367 .handle = bo->handle,
368 };
369
370 drmCommandWrite(bo->dev->fd, DRM_ETNAVIV_GEM_CPU_FINI,
371 &req, sizeof(req));
372 }