Added few more stubs so that control reaches to DestroyDevice().
[mesa.git] / src / gallium / winsys / virgl / drm / virgl_drm_winsys.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <errno.h>
25 #include <fcntl.h>
26 #include <limits.h>
27 #include <stdio.h>
28 #include <sys/ioctl.h>
29 #include <sys/stat.h>
30
31 #include "os/os_mman.h"
32 #include "util/os_file.h"
33 #include "util/os_time.h"
34 #include "util/u_memory.h"
35 #include "util/format/u_format.h"
36 #include "util/u_hash_table.h"
37 #include "util/u_inlines.h"
38 #include "util/u_pointer.h"
39 #include "frontend/drm_driver.h"
40 #include "virgl/virgl_screen.h"
41 #include "virgl/virgl_public.h"
42
43 #include <xf86drm.h>
44 #include <libsync.h>
45 #include "drm-uapi/virtgpu_drm.h"
46
47 #include "virgl_drm_winsys.h"
48 #include "virgl_drm_public.h"
49
50
51 #define VIRGL_DRM_VERSION(major, minor) ((major) << 16 | (minor))
52 #define VIRGL_DRM_VERSION_FENCE_FD VIRGL_DRM_VERSION(0, 1)
53
54 /* Gets a pointer to the virgl_hw_res containing the pointed to cache entry. */
55 #define cache_entry_container_res(ptr) \
56 (struct virgl_hw_res*)((char*)ptr - offsetof(struct virgl_hw_res, cache_entry))
57
58 static inline boolean can_cache_resource_with_bind(uint32_t bind)
59 {
60 return bind == VIRGL_BIND_CONSTANT_BUFFER ||
61 bind == VIRGL_BIND_INDEX_BUFFER ||
62 bind == VIRGL_BIND_VERTEX_BUFFER ||
63 bind == VIRGL_BIND_CUSTOM ||
64 bind == VIRGL_BIND_STAGING;
65 }
66
67 static void virgl_hw_res_destroy(struct virgl_drm_winsys *qdws,
68 struct virgl_hw_res *res)
69 {
70 struct drm_gem_close args;
71
72 mtx_lock(&qdws->bo_handles_mutex);
73 _mesa_hash_table_remove_key(qdws->bo_handles,
74 (void *)(uintptr_t)res->bo_handle);
75 if (res->flink_name)
76 _mesa_hash_table_remove_key(qdws->bo_names,
77 (void *)(uintptr_t)res->flink_name);
78 mtx_unlock(&qdws->bo_handles_mutex);
79 if (res->ptr)
80 os_munmap(res->ptr, res->size);
81
82 memset(&args, 0, sizeof(args));
83 args.handle = res->bo_handle;
84 drmIoctl(qdws->fd, DRM_IOCTL_GEM_CLOSE, &args);
85 FREE(res);
86 }
87
88 static boolean virgl_drm_resource_is_busy(struct virgl_winsys *vws,
89 struct virgl_hw_res *res)
90 {
91 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
92 struct drm_virtgpu_3d_wait waitcmd;
93 int ret;
94
95 if (!p_atomic_read(&res->maybe_busy) && !p_atomic_read(&res->external))
96 return false;
97
98 memset(&waitcmd, 0, sizeof(waitcmd));
99 waitcmd.handle = res->bo_handle;
100 waitcmd.flags = VIRTGPU_WAIT_NOWAIT;
101
102 ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
103 if (ret && errno == EBUSY)
104 return TRUE;
105
106 p_atomic_set(&res->maybe_busy, false);
107
108 return FALSE;
109 }
110
111 static void
112 virgl_drm_winsys_destroy(struct virgl_winsys *qws)
113 {
114 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
115
116 virgl_resource_cache_flush(&qdws->cache);
117
118 _mesa_hash_table_destroy(qdws->bo_handles, NULL);
119 _mesa_hash_table_destroy(qdws->bo_names, NULL);
120 mtx_destroy(&qdws->bo_handles_mutex);
121 mtx_destroy(&qdws->mutex);
122
123 FREE(qdws);
124 }
125
126 static void virgl_drm_resource_reference(struct virgl_winsys *qws,
127 struct virgl_hw_res **dres,
128 struct virgl_hw_res *sres)
129 {
130 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
131 struct virgl_hw_res *old = *dres;
132
133 if (pipe_reference(&(*dres)->reference, &sres->reference)) {
134
135 if (!can_cache_resource_with_bind(old->bind) ||
136 p_atomic_read(&old->external)) {
137 virgl_hw_res_destroy(qdws, old);
138 } else {
139 mtx_lock(&qdws->mutex);
140 virgl_resource_cache_add(&qdws->cache, &old->cache_entry);
141 mtx_unlock(&qdws->mutex);
142 }
143 }
144 *dres = sres;
145 }
146
147 static struct virgl_hw_res *
148 virgl_drm_winsys_resource_create(struct virgl_winsys *qws,
149 enum pipe_texture_target target,
150 uint32_t format,
151 uint32_t bind,
152 uint32_t width,
153 uint32_t height,
154 uint32_t depth,
155 uint32_t array_size,
156 uint32_t last_level,
157 uint32_t nr_samples,
158 uint32_t size,
159 bool for_fencing)
160 {
161 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
162 struct drm_virtgpu_resource_create createcmd;
163 int ret;
164 struct virgl_hw_res *res;
165 uint32_t stride = width * util_format_get_blocksize(format);
166
167 res = CALLOC_STRUCT(virgl_hw_res);
168 if (!res)
169 return NULL;
170
171 memset(&createcmd, 0, sizeof(createcmd));
172 createcmd.target = target;
173 createcmd.format = pipe_to_virgl_format(format);
174 createcmd.bind = bind;
175 createcmd.width = width;
176 createcmd.height = height;
177 createcmd.depth = depth;
178 createcmd.array_size = array_size;
179 createcmd.last_level = last_level;
180 createcmd.nr_samples = nr_samples;
181 createcmd.stride = stride;
182 createcmd.size = size;
183
184 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &createcmd);
185 if (ret != 0) {
186 FREE(res);
187 return NULL;
188 }
189
190 res->bind = bind;
191
192 res->res_handle = createcmd.res_handle;
193 res->bo_handle = createcmd.bo_handle;
194 res->size = size;
195 pipe_reference_init(&res->reference, 1);
196 p_atomic_set(&res->external, false);
197 p_atomic_set(&res->num_cs_references, 0);
198
199 /* A newly created resource is considered busy by the kernel until the
200 * command is retired. But for our purposes, we can consider it idle
201 * unless it is used for fencing.
202 */
203 p_atomic_set(&res->maybe_busy, for_fencing);
204
205 virgl_resource_cache_entry_init(&res->cache_entry, size, bind, format);
206
207 return res;
208 }
209
210 static int
211 virgl_bo_transfer_put(struct virgl_winsys *vws,
212 struct virgl_hw_res *res,
213 const struct pipe_box *box,
214 uint32_t stride, uint32_t layer_stride,
215 uint32_t buf_offset, uint32_t level)
216 {
217 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
218 struct drm_virtgpu_3d_transfer_to_host tohostcmd;
219
220 p_atomic_set(&res->maybe_busy, true);
221
222 memset(&tohostcmd, 0, sizeof(tohostcmd));
223 tohostcmd.bo_handle = res->bo_handle;
224 tohostcmd.box.x = box->x;
225 tohostcmd.box.y = box->y;
226 tohostcmd.box.z = box->z;
227 tohostcmd.box.w = box->width;
228 tohostcmd.box.h = box->height;
229 tohostcmd.box.d = box->depth;
230 tohostcmd.offset = buf_offset;
231 tohostcmd.level = level;
232 // tohostcmd.stride = stride;
233 // tohostcmd.layer_stride = stride;
234 return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &tohostcmd);
235 }
236
237 static int
238 virgl_bo_transfer_get(struct virgl_winsys *vws,
239 struct virgl_hw_res *res,
240 const struct pipe_box *box,
241 uint32_t stride, uint32_t layer_stride,
242 uint32_t buf_offset, uint32_t level)
243 {
244 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
245 struct drm_virtgpu_3d_transfer_from_host fromhostcmd;
246
247 p_atomic_set(&res->maybe_busy, true);
248
249 memset(&fromhostcmd, 0, sizeof(fromhostcmd));
250 fromhostcmd.bo_handle = res->bo_handle;
251 fromhostcmd.level = level;
252 fromhostcmd.offset = buf_offset;
253 // fromhostcmd.stride = stride;
254 // fromhostcmd.layer_stride = layer_stride;
255 fromhostcmd.box.x = box->x;
256 fromhostcmd.box.y = box->y;
257 fromhostcmd.box.z = box->z;
258 fromhostcmd.box.w = box->width;
259 fromhostcmd.box.h = box->height;
260 fromhostcmd.box.d = box->depth;
261 return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &fromhostcmd);
262 }
263
264 static struct virgl_hw_res *
265 virgl_drm_winsys_resource_cache_create(struct virgl_winsys *qws,
266 enum pipe_texture_target target,
267 uint32_t format,
268 uint32_t bind,
269 uint32_t width,
270 uint32_t height,
271 uint32_t depth,
272 uint32_t array_size,
273 uint32_t last_level,
274 uint32_t nr_samples,
275 uint32_t size)
276 {
277 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
278 struct virgl_hw_res *res;
279 struct virgl_resource_cache_entry *entry;
280
281 if (!can_cache_resource_with_bind(bind))
282 goto alloc;
283
284 mtx_lock(&qdws->mutex);
285
286 entry = virgl_resource_cache_remove_compatible(&qdws->cache, size,
287 bind, format);
288 if (entry) {
289 res = cache_entry_container_res(entry);
290 mtx_unlock(&qdws->mutex);
291 pipe_reference_init(&res->reference, 1);
292 return res;
293 }
294
295 mtx_unlock(&qdws->mutex);
296
297 alloc:
298 res = virgl_drm_winsys_resource_create(qws, target, format, bind,
299 width, height, depth, array_size,
300 last_level, nr_samples, size, false);
301 return res;
302 }
303
304 static struct virgl_hw_res *
305 virgl_drm_winsys_resource_create_handle(struct virgl_winsys *qws,
306 struct winsys_handle *whandle,
307 uint32_t *plane,
308 uint32_t *stride,
309 uint32_t *plane_offset,
310 uint64_t *modifier)
311 {
312 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
313 struct drm_gem_open open_arg = {};
314 struct drm_virtgpu_resource_info info_arg = {};
315 struct virgl_hw_res *res = NULL;
316 uint32_t handle = whandle->handle;
317
318 if (whandle->offset != 0 && whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
319 _debug_printf("attempt to import unsupported winsys offset %u\n",
320 whandle->offset);
321 return NULL;
322 } else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
323 *plane = whandle->plane;
324 *stride = whandle->stride;
325 *plane_offset = whandle->offset;
326 *modifier = whandle->modifier;
327 }
328
329 mtx_lock(&qdws->bo_handles_mutex);
330
331 /* We must maintain a list of pairs <handle, bo>, so that we always return
332 * the same BO for one particular handle. If we didn't do that and created
333 * more than one BO for the same handle and then relocated them in a CS,
334 * we would hit a deadlock in the kernel.
335 *
336 * The list of pairs is guarded by a mutex, of course. */
337 if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
338 res = util_hash_table_get(qdws->bo_names, (void*)(uintptr_t)handle);
339 } else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
340 int r;
341 r = drmPrimeFDToHandle(qdws->fd, whandle->handle, &handle);
342 if (r)
343 goto done;
344 res = util_hash_table_get(qdws->bo_handles, (void*)(uintptr_t)handle);
345 } else {
346 /* Unknown handle type */
347 goto done;
348 }
349
350 if (res) {
351 struct virgl_hw_res *r = NULL;
352 virgl_drm_resource_reference(&qdws->base, &r, res);
353 goto done;
354 }
355
356 res = CALLOC_STRUCT(virgl_hw_res);
357 if (!res)
358 goto done;
359
360 if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
361 res->bo_handle = handle;
362 } else {
363 memset(&open_arg, 0, sizeof(open_arg));
364 open_arg.name = whandle->handle;
365 if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
366 FREE(res);
367 res = NULL;
368 goto done;
369 }
370 res->bo_handle = open_arg.handle;
371 res->flink_name = whandle->handle;
372 }
373
374 memset(&info_arg, 0, sizeof(info_arg));
375 info_arg.bo_handle = res->bo_handle;
376
377 if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &info_arg)) {
378 /* close */
379 FREE(res);
380 res = NULL;
381 goto done;
382 }
383
384 res->res_handle = info_arg.res_handle;
385
386 res->size = info_arg.size;
387 pipe_reference_init(&res->reference, 1);
388 p_atomic_set(&res->external, true);
389 res->num_cs_references = 0;
390
391 if (res->flink_name)
392 _mesa_hash_table_insert(qdws->bo_names, (void *)(uintptr_t)res->flink_name, res);
393 _mesa_hash_table_insert(qdws->bo_handles, (void *)(uintptr_t)res->bo_handle, res);
394
395 done:
396 mtx_unlock(&qdws->bo_handles_mutex);
397 return res;
398 }
399
400 static boolean virgl_drm_winsys_resource_get_handle(struct virgl_winsys *qws,
401 struct virgl_hw_res *res,
402 uint32_t stride,
403 struct winsys_handle *whandle)
404 {
405 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
406 struct drm_gem_flink flink;
407
408 if (!res)
409 return FALSE;
410
411 if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
412 if (!res->flink_name) {
413 memset(&flink, 0, sizeof(flink));
414 flink.handle = res->bo_handle;
415
416 if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
417 return FALSE;
418 }
419 res->flink_name = flink.name;
420
421 mtx_lock(&qdws->bo_handles_mutex);
422 _mesa_hash_table_insert(qdws->bo_names, (void *)(uintptr_t)res->flink_name, res);
423 mtx_unlock(&qdws->bo_handles_mutex);
424 }
425 whandle->handle = res->flink_name;
426 } else if (whandle->type == WINSYS_HANDLE_TYPE_KMS) {
427 whandle->handle = res->bo_handle;
428 } else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
429 if (drmPrimeHandleToFD(qdws->fd, res->bo_handle, DRM_CLOEXEC, (int*)&whandle->handle))
430 return FALSE;
431 mtx_lock(&qdws->bo_handles_mutex);
432 _mesa_hash_table_insert(qdws->bo_handles, (void *)(uintptr_t)res->bo_handle, res);
433 mtx_unlock(&qdws->bo_handles_mutex);
434 }
435
436 p_atomic_set(&res->external, true);
437
438 whandle->stride = stride;
439 return TRUE;
440 }
441
442 static void *virgl_drm_resource_map(struct virgl_winsys *qws,
443 struct virgl_hw_res *res)
444 {
445 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
446 struct drm_virtgpu_map mmap_arg;
447 void *ptr;
448
449 if (res->ptr)
450 return res->ptr;
451
452 memset(&mmap_arg, 0, sizeof(mmap_arg));
453 mmap_arg.handle = res->bo_handle;
454 if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_MAP, &mmap_arg))
455 return NULL;
456
457 ptr = os_mmap(0, res->size, PROT_READ|PROT_WRITE, MAP_SHARED,
458 qdws->fd, mmap_arg.offset);
459 if (ptr == MAP_FAILED)
460 return NULL;
461
462 res->ptr = ptr;
463 return ptr;
464
465 }
466
467 static void virgl_drm_resource_wait(struct virgl_winsys *qws,
468 struct virgl_hw_res *res)
469 {
470 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
471 struct drm_virtgpu_3d_wait waitcmd;
472 int ret;
473
474 if (!p_atomic_read(&res->maybe_busy) && !p_atomic_read(&res->external))
475 return;
476
477 memset(&waitcmd, 0, sizeof(waitcmd));
478 waitcmd.handle = res->bo_handle;
479
480 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
481 if (ret)
482 _debug_printf("waiting got error - %d, slow gpu or hang?\n", errno);
483
484 p_atomic_set(&res->maybe_busy, false);
485 }
486
487 static bool virgl_drm_alloc_res_list(struct virgl_drm_cmd_buf *cbuf,
488 int initial_size)
489 {
490 cbuf->nres = initial_size;
491 cbuf->cres = 0;
492
493 cbuf->res_bo = CALLOC(cbuf->nres, sizeof(struct virgl_hw_buf*));
494 if (!cbuf->res_bo)
495 return false;
496
497 cbuf->res_hlist = MALLOC(cbuf->nres * sizeof(uint32_t));
498 if (!cbuf->res_hlist) {
499 FREE(cbuf->res_bo);
500 return false;
501 }
502
503 return true;
504 }
505
506 static void virgl_drm_free_res_list(struct virgl_drm_cmd_buf *cbuf)
507 {
508 int i;
509
510 for (i = 0; i < cbuf->cres; i++) {
511 p_atomic_dec(&cbuf->res_bo[i]->num_cs_references);
512 virgl_drm_resource_reference(cbuf->ws, &cbuf->res_bo[i], NULL);
513 }
514 FREE(cbuf->res_hlist);
515 FREE(cbuf->res_bo);
516 }
517
518 static boolean virgl_drm_lookup_res(struct virgl_drm_cmd_buf *cbuf,
519 struct virgl_hw_res *res)
520 {
521 unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
522 int i;
523
524 if (cbuf->is_handle_added[hash]) {
525 i = cbuf->reloc_indices_hashlist[hash];
526 if (cbuf->res_bo[i] == res)
527 return true;
528
529 for (i = 0; i < cbuf->cres; i++) {
530 if (cbuf->res_bo[i] == res) {
531 cbuf->reloc_indices_hashlist[hash] = i;
532 return true;
533 }
534 }
535 }
536 return false;
537 }
538
539 static void virgl_drm_add_res(struct virgl_drm_winsys *qdws,
540 struct virgl_drm_cmd_buf *cbuf,
541 struct virgl_hw_res *res)
542 {
543 unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
544
545 if (cbuf->cres >= cbuf->nres) {
546 unsigned new_nres = cbuf->nres + 256;
547 void *new_ptr = REALLOC(cbuf->res_bo,
548 cbuf->nres * sizeof(struct virgl_hw_buf*),
549 new_nres * sizeof(struct virgl_hw_buf*));
550 if (!new_ptr) {
551 _debug_printf("failure to add relocation %d, %d\n", cbuf->cres, new_nres);
552 return;
553 }
554 cbuf->res_bo = new_ptr;
555
556 new_ptr = REALLOC(cbuf->res_hlist,
557 cbuf->nres * sizeof(uint32_t),
558 new_nres * sizeof(uint32_t));
559 if (!new_ptr) {
560 _debug_printf("failure to add hlist relocation %d, %d\n", cbuf->cres, cbuf->nres);
561 return;
562 }
563 cbuf->res_hlist = new_ptr;
564 cbuf->nres = new_nres;
565 }
566
567 cbuf->res_bo[cbuf->cres] = NULL;
568 virgl_drm_resource_reference(&qdws->base, &cbuf->res_bo[cbuf->cres], res);
569 cbuf->res_hlist[cbuf->cres] = res->bo_handle;
570 cbuf->is_handle_added[hash] = TRUE;
571
572 cbuf->reloc_indices_hashlist[hash] = cbuf->cres;
573 p_atomic_inc(&res->num_cs_references);
574 cbuf->cres++;
575 }
576
577 /* This is called after the cbuf is submitted. */
578 static void virgl_drm_clear_res_list(struct virgl_drm_cmd_buf *cbuf)
579 {
580 int i;
581
582 for (i = 0; i < cbuf->cres; i++) {
583 /* mark all BOs busy after submission */
584 p_atomic_set(&cbuf->res_bo[i]->maybe_busy, true);
585
586 p_atomic_dec(&cbuf->res_bo[i]->num_cs_references);
587 virgl_drm_resource_reference(cbuf->ws, &cbuf->res_bo[i], NULL);
588 }
589
590 cbuf->cres = 0;
591
592 memset(cbuf->is_handle_added, 0, sizeof(cbuf->is_handle_added));
593 }
594
595 static void virgl_drm_emit_res(struct virgl_winsys *qws,
596 struct virgl_cmd_buf *_cbuf,
597 struct virgl_hw_res *res, boolean write_buf)
598 {
599 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
600 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
601 boolean already_in_list = virgl_drm_lookup_res(cbuf, res);
602
603 if (write_buf)
604 cbuf->base.buf[cbuf->base.cdw++] = res->res_handle;
605
606 if (!already_in_list)
607 virgl_drm_add_res(qdws, cbuf, res);
608 }
609
610 static boolean virgl_drm_res_is_ref(struct virgl_winsys *qws,
611 struct virgl_cmd_buf *_cbuf,
612 struct virgl_hw_res *res)
613 {
614 if (!p_atomic_read(&res->num_cs_references))
615 return FALSE;
616
617 return TRUE;
618 }
619
620 static struct virgl_cmd_buf *virgl_drm_cmd_buf_create(struct virgl_winsys *qws,
621 uint32_t size)
622 {
623 struct virgl_drm_cmd_buf *cbuf;
624
625 cbuf = CALLOC_STRUCT(virgl_drm_cmd_buf);
626 if (!cbuf)
627 return NULL;
628
629 cbuf->ws = qws;
630
631 if (!virgl_drm_alloc_res_list(cbuf, 512)) {
632 FREE(cbuf);
633 return NULL;
634 }
635
636 cbuf->buf = CALLOC(size, sizeof(uint32_t));
637 if (!cbuf->buf) {
638 FREE(cbuf->res_hlist);
639 FREE(cbuf->res_bo);
640 FREE(cbuf);
641 return NULL;
642 }
643
644 cbuf->in_fence_fd = -1;
645 cbuf->base.buf = cbuf->buf;
646 return &cbuf->base;
647 }
648
649 static void virgl_drm_cmd_buf_destroy(struct virgl_cmd_buf *_cbuf)
650 {
651 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
652
653 virgl_drm_free_res_list(cbuf);
654
655 FREE(cbuf->buf);
656 FREE(cbuf);
657 }
658
659 static struct pipe_fence_handle *
660 virgl_drm_fence_create(struct virgl_winsys *vws, int fd, bool external)
661 {
662 struct virgl_drm_fence *fence;
663
664 assert(vws->supports_fences);
665
666 if (external) {
667 fd = os_dupfd_cloexec(fd);
668 if (fd < 0)
669 return NULL;
670 }
671
672 fence = CALLOC_STRUCT(virgl_drm_fence);
673 if (!fence) {
674 close(fd);
675 return NULL;
676 }
677
678 fence->fd = fd;
679 fence->external = external;
680
681 pipe_reference_init(&fence->reference, 1);
682
683 return (struct pipe_fence_handle *)fence;
684 }
685
686 static struct pipe_fence_handle *
687 virgl_drm_fence_create_legacy(struct virgl_winsys *vws)
688 {
689 struct virgl_drm_fence *fence;
690
691 assert(!vws->supports_fences);
692
693 fence = CALLOC_STRUCT(virgl_drm_fence);
694 if (!fence)
695 return NULL;
696 fence->fd = -1;
697
698 /* Resources for fences should not be from the cache, since we are basing
699 * the fence status on the resource creation busy status.
700 */
701 fence->hw_res = virgl_drm_winsys_resource_create(vws, PIPE_BUFFER,
702 PIPE_FORMAT_R8_UNORM, VIRGL_BIND_CUSTOM, 8, 1, 1, 0, 0, 0, 8, true);
703 if (!fence->hw_res) {
704 FREE(fence);
705 return NULL;
706 }
707
708 pipe_reference_init(&fence->reference, 1);
709
710 return (struct pipe_fence_handle *)fence;
711 }
712
713 static int virgl_drm_winsys_submit_cmd(struct virgl_winsys *qws,
714 struct virgl_cmd_buf *_cbuf,
715 struct pipe_fence_handle **fence)
716 {
717 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
718 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
719 struct drm_virtgpu_execbuffer eb;
720 int ret;
721
722 if (cbuf->base.cdw == 0)
723 return 0;
724
725 memset(&eb, 0, sizeof(struct drm_virtgpu_execbuffer));
726 eb.command = (unsigned long)(void*)cbuf->buf;
727 eb.size = cbuf->base.cdw * 4;
728 eb.num_bo_handles = cbuf->cres;
729 eb.bo_handles = (unsigned long)(void *)cbuf->res_hlist;
730
731 eb.fence_fd = -1;
732 if (qws->supports_fences) {
733 if (cbuf->in_fence_fd >= 0) {
734 eb.flags |= VIRTGPU_EXECBUF_FENCE_FD_IN;
735 eb.fence_fd = cbuf->in_fence_fd;
736 }
737
738 if (fence != NULL)
739 eb.flags |= VIRTGPU_EXECBUF_FENCE_FD_OUT;
740 } else {
741 assert(cbuf->in_fence_fd < 0);
742 }
743
744 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &eb);
745 if (ret == -1)
746 _debug_printf("got error from kernel - expect bad rendering %d\n", errno);
747 cbuf->base.cdw = 0;
748
749 if (qws->supports_fences) {
750 if (cbuf->in_fence_fd >= 0) {
751 close(cbuf->in_fence_fd);
752 cbuf->in_fence_fd = -1;
753 }
754
755 if (fence != NULL && ret == 0)
756 *fence = virgl_drm_fence_create(qws, eb.fence_fd, false);
757 } else {
758 if (fence != NULL && ret == 0)
759 *fence = virgl_drm_fence_create_legacy(qws);
760 }
761
762 virgl_drm_clear_res_list(cbuf);
763
764 return ret;
765 }
766
767 static int virgl_drm_get_caps(struct virgl_winsys *vws,
768 struct virgl_drm_caps *caps)
769 {
770 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
771 struct drm_virtgpu_get_caps args;
772 int ret;
773
774 virgl_ws_fill_new_caps_defaults(caps);
775
776 memset(&args, 0, sizeof(args));
777 if (vdws->has_capset_query_fix) {
778 /* if we have the query fix - try and get cap set id 2 first */
779 args.cap_set_id = 2;
780 args.size = sizeof(union virgl_caps);
781 } else {
782 args.cap_set_id = 1;
783 args.size = sizeof(struct virgl_caps_v1);
784 }
785 args.addr = (unsigned long)&caps->caps;
786
787 ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
788 if (ret == -1 && errno == EINVAL) {
789 /* Fallback to v1 */
790 args.cap_set_id = 1;
791 args.size = sizeof(struct virgl_caps_v1);
792 ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
793 if (ret == -1)
794 return ret;
795 }
796 return ret;
797 }
798
799 static struct pipe_fence_handle *
800 virgl_cs_create_fence(struct virgl_winsys *vws, int fd)
801 {
802 if (!vws->supports_fences)
803 return NULL;
804
805 return virgl_drm_fence_create(vws, fd, true);
806 }
807
808 static bool virgl_fence_wait(struct virgl_winsys *vws,
809 struct pipe_fence_handle *_fence,
810 uint64_t timeout)
811 {
812 struct virgl_drm_fence *fence = virgl_drm_fence(_fence);
813
814 if (vws->supports_fences) {
815 uint64_t timeout_ms;
816 int timeout_poll;
817
818 if (timeout == 0)
819 return sync_wait(fence->fd, 0) == 0;
820
821 timeout_ms = timeout / 1000000;
822 /* round up */
823 if (timeout_ms * 1000000 < timeout)
824 timeout_ms++;
825
826 timeout_poll = timeout_ms <= INT_MAX ? (int) timeout_ms : -1;
827
828 return sync_wait(fence->fd, timeout_poll) == 0;
829 }
830
831 if (timeout == 0)
832 return !virgl_drm_resource_is_busy(vws, fence->hw_res);
833
834 if (timeout != PIPE_TIMEOUT_INFINITE) {
835 int64_t start_time = os_time_get();
836 timeout /= 1000;
837 while (virgl_drm_resource_is_busy(vws, fence->hw_res)) {
838 if (os_time_get() - start_time >= timeout)
839 return FALSE;
840 os_time_sleep(10);
841 }
842 return TRUE;
843 }
844 virgl_drm_resource_wait(vws, fence->hw_res);
845
846 return TRUE;
847 }
848
849 static void virgl_fence_reference(struct virgl_winsys *vws,
850 struct pipe_fence_handle **dst,
851 struct pipe_fence_handle *src)
852 {
853 struct virgl_drm_fence *dfence = virgl_drm_fence(*dst);
854 struct virgl_drm_fence *sfence = virgl_drm_fence(src);
855
856 if (pipe_reference(&dfence->reference, &sfence->reference)) {
857 if (vws->supports_fences) {
858 close(dfence->fd);
859 } else {
860 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
861 virgl_hw_res_destroy(vdws, dfence->hw_res);
862 }
863 FREE(dfence);
864 }
865
866 *dst = src;
867 }
868
869 static void virgl_fence_server_sync(struct virgl_winsys *vws,
870 struct virgl_cmd_buf *_cbuf,
871 struct pipe_fence_handle *_fence)
872 {
873 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
874 struct virgl_drm_fence *fence = virgl_drm_fence(_fence);
875
876 if (!vws->supports_fences)
877 return;
878
879 /* if not an external fence, then nothing more to do without preemption: */
880 if (!fence->external)
881 return;
882
883 sync_accumulate("virgl", &cbuf->in_fence_fd, fence->fd);
884 }
885
886 static int virgl_fence_get_fd(struct virgl_winsys *vws,
887 struct pipe_fence_handle *_fence)
888 {
889 struct virgl_drm_fence *fence = virgl_drm_fence(_fence);
890
891 if (!vws->supports_fences)
892 return -1;
893
894 return os_dupfd_cloexec(fence->fd);
895 }
896
897 static int virgl_drm_get_version(int fd)
898 {
899 int ret;
900 drmVersionPtr version;
901
902 version = drmGetVersion(fd);
903
904 if (!version)
905 ret = -EFAULT;
906 else if (version->version_major != 0)
907 ret = -EINVAL;
908 else
909 ret = VIRGL_DRM_VERSION(0, version->version_minor);
910
911 drmFreeVersion(version);
912
913 return ret;
914 }
915
916 static bool
917 virgl_drm_resource_cache_entry_is_busy(struct virgl_resource_cache_entry *entry,
918 void *user_data)
919 {
920 struct virgl_drm_winsys *qdws = user_data;
921 struct virgl_hw_res *res = cache_entry_container_res(entry);
922
923 return virgl_drm_resource_is_busy(&qdws->base, res);
924 }
925
926 static void
927 virgl_drm_resource_cache_entry_release(struct virgl_resource_cache_entry *entry,
928 void *user_data)
929 {
930 struct virgl_drm_winsys *qdws = user_data;
931 struct virgl_hw_res *res = cache_entry_container_res(entry);
932
933 virgl_hw_res_destroy(qdws, res);
934 }
935
936 static struct virgl_winsys *
937 virgl_drm_winsys_create(int drmFD)
938 {
939 static const unsigned CACHE_TIMEOUT_USEC = 1000000;
940 struct virgl_drm_winsys *qdws;
941 int drm_version;
942 int ret;
943 int gl = 0;
944 struct drm_virtgpu_getparam getparam = {0};
945
946 getparam.param = VIRTGPU_PARAM_3D_FEATURES;
947 getparam.value = (uint64_t)(uintptr_t)&gl;
948 ret = drmIoctl(drmFD, DRM_IOCTL_VIRTGPU_GETPARAM, &getparam);
949 if (ret < 0 || !gl)
950 return NULL;
951
952 drm_version = virgl_drm_get_version(drmFD);
953 if (drm_version < 0)
954 return NULL;
955
956 qdws = CALLOC_STRUCT(virgl_drm_winsys);
957 if (!qdws)
958 return NULL;
959
960 qdws->fd = drmFD;
961 virgl_resource_cache_init(&qdws->cache, CACHE_TIMEOUT_USEC,
962 virgl_drm_resource_cache_entry_is_busy,
963 virgl_drm_resource_cache_entry_release,
964 qdws);
965 (void) mtx_init(&qdws->mutex, mtx_plain);
966 (void) mtx_init(&qdws->bo_handles_mutex, mtx_plain);
967 qdws->bo_handles = util_hash_table_create_ptr_keys();
968 qdws->bo_names = util_hash_table_create_ptr_keys();
969 qdws->base.destroy = virgl_drm_winsys_destroy;
970
971 qdws->base.transfer_put = virgl_bo_transfer_put;
972 qdws->base.transfer_get = virgl_bo_transfer_get;
973 qdws->base.resource_create = virgl_drm_winsys_resource_cache_create;
974 qdws->base.resource_reference = virgl_drm_resource_reference;
975 qdws->base.resource_create_from_handle = virgl_drm_winsys_resource_create_handle;
976 qdws->base.resource_get_handle = virgl_drm_winsys_resource_get_handle;
977 qdws->base.resource_map = virgl_drm_resource_map;
978 qdws->base.resource_wait = virgl_drm_resource_wait;
979 qdws->base.resource_is_busy = virgl_drm_resource_is_busy;
980 qdws->base.cmd_buf_create = virgl_drm_cmd_buf_create;
981 qdws->base.cmd_buf_destroy = virgl_drm_cmd_buf_destroy;
982 qdws->base.submit_cmd = virgl_drm_winsys_submit_cmd;
983 qdws->base.emit_res = virgl_drm_emit_res;
984 qdws->base.res_is_referenced = virgl_drm_res_is_ref;
985
986 qdws->base.cs_create_fence = virgl_cs_create_fence;
987 qdws->base.fence_wait = virgl_fence_wait;
988 qdws->base.fence_reference = virgl_fence_reference;
989 qdws->base.fence_server_sync = virgl_fence_server_sync;
990 qdws->base.fence_get_fd = virgl_fence_get_fd;
991 qdws->base.supports_fences = drm_version >= VIRGL_DRM_VERSION_FENCE_FD;
992 qdws->base.supports_encoded_transfers = 1;
993
994 qdws->base.get_caps = virgl_drm_get_caps;
995
996 uint32_t value = 0;
997 getparam.param = VIRTGPU_PARAM_CAPSET_QUERY_FIX;
998 getparam.value = (uint64_t)(uintptr_t)&value;
999 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_GETPARAM, &getparam);
1000 if (ret == 0) {
1001 if (value == 1)
1002 qdws->has_capset_query_fix = true;
1003 }
1004
1005 return &qdws->base;
1006
1007 }
1008
1009 static struct hash_table *fd_tab = NULL;
1010 static mtx_t virgl_screen_mutex = _MTX_INITIALIZER_NP;
1011
1012 static void
1013 virgl_drm_screen_destroy(struct pipe_screen *pscreen)
1014 {
1015 struct virgl_screen *screen = virgl_screen(pscreen);
1016 boolean destroy;
1017
1018 mtx_lock(&virgl_screen_mutex);
1019 destroy = --screen->refcnt == 0;
1020 if (destroy) {
1021 int fd = virgl_drm_winsys(screen->vws)->fd;
1022 _mesa_hash_table_remove_key(fd_tab, intptr_to_pointer(fd));
1023 close(fd);
1024 }
1025 mtx_unlock(&virgl_screen_mutex);
1026
1027 if (destroy) {
1028 pscreen->destroy = screen->winsys_priv;
1029 pscreen->destroy(pscreen);
1030 }
1031 }
1032
1033 struct pipe_screen *
1034 virgl_drm_screen_create(int fd, const struct pipe_screen_config *config)
1035 {
1036 struct pipe_screen *pscreen = NULL;
1037
1038 mtx_lock(&virgl_screen_mutex);
1039 if (!fd_tab) {
1040 fd_tab = util_hash_table_create_fd_keys();
1041 if (!fd_tab)
1042 goto unlock;
1043 }
1044
1045 pscreen = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
1046 if (pscreen) {
1047 virgl_screen(pscreen)->refcnt++;
1048 } else {
1049 struct virgl_winsys *vws;
1050 int dup_fd = os_dupfd_cloexec(fd);
1051
1052 vws = virgl_drm_winsys_create(dup_fd);
1053 if (!vws) {
1054 close(dup_fd);
1055 goto unlock;
1056 }
1057
1058 pscreen = virgl_create_screen(vws, config);
1059 if (pscreen) {
1060 _mesa_hash_table_insert(fd_tab, intptr_to_pointer(dup_fd), pscreen);
1061
1062 /* Bit of a hack, to avoid circular linkage dependency,
1063 * ie. pipe driver having to call in to winsys, we
1064 * override the pipe drivers screen->destroy():
1065 */
1066 virgl_screen(pscreen)->winsys_priv = pscreen->destroy;
1067 pscreen->destroy = virgl_drm_screen_destroy;
1068 }
1069 }
1070
1071 unlock:
1072 mtx_unlock(&virgl_screen_mutex);
1073 return pscreen;
1074 }