virgl: reuse screen when fd is already open
[mesa.git] / src / gallium / winsys / virgl / drm / virgl_drm_winsys.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <errno.h>
25 #include <fcntl.h>
26 #include <stdio.h>
27 #include <sys/ioctl.h>
28 #include <sys/stat.h>
29
30 #include "os/os_mman.h"
31 #include "os/os_time.h"
32 #include "util/u_memory.h"
33 #include "util/u_format.h"
34 #include "util/u_hash_table.h"
35 #include "util/u_inlines.h"
36 #include "state_tracker/drm_driver.h"
37 #include "virgl/virgl_screen.h"
38 #include "virgl/virgl_public.h"
39
40 #include <xf86drm.h>
41 #include "virtgpu_drm.h"
42
43 #include "virgl_drm_winsys.h"
44 #include "virgl_drm_public.h"
45
46 static inline boolean can_cache_resource(struct virgl_hw_res *res)
47 {
48 return res->cacheable == TRUE;
49 }
50
51 static void virgl_hw_res_destroy(struct virgl_drm_winsys *qdws,
52 struct virgl_hw_res *res)
53 {
54 struct drm_gem_close args;
55
56 if (res->name) {
57 pipe_mutex_lock(qdws->bo_handles_mutex);
58 util_hash_table_remove(qdws->bo_handles,
59 (void *)(uintptr_t)res->name);
60 pipe_mutex_unlock(qdws->bo_handles_mutex);
61 }
62
63 if (res->ptr)
64 os_munmap(res->ptr, res->size);
65
66 memset(&args, 0, sizeof(args));
67 args.handle = res->bo_handle;
68 drmIoctl(qdws->fd, DRM_IOCTL_GEM_CLOSE, &args);
69 FREE(res);
70 }
71
72 static boolean virgl_drm_resource_is_busy(struct virgl_drm_winsys *qdws,
73 struct virgl_hw_res *res)
74 {
75 struct drm_virtgpu_3d_wait waitcmd;
76 int ret;
77
78 memset(&waitcmd, 0, sizeof(waitcmd));
79 waitcmd.handle = res->bo_handle;
80 waitcmd.flags = VIRTGPU_WAIT_NOWAIT;
81
82 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
83 if (ret && errno == EBUSY)
84 return TRUE;
85 return FALSE;
86 }
87
88 static void
89 virgl_cache_flush(struct virgl_drm_winsys *qdws)
90 {
91 struct list_head *curr, *next;
92 struct virgl_hw_res *res;
93
94 pipe_mutex_lock(qdws->mutex);
95 curr = qdws->delayed.next;
96 next = curr->next;
97
98 while (curr != &qdws->delayed) {
99 res = LIST_ENTRY(struct virgl_hw_res, curr, head);
100 LIST_DEL(&res->head);
101 virgl_hw_res_destroy(qdws, res);
102 curr = next;
103 next = curr->next;
104 }
105 pipe_mutex_unlock(qdws->mutex);
106 }
107 static void
108 virgl_drm_winsys_destroy(struct virgl_winsys *qws)
109 {
110 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
111
112 virgl_cache_flush(qdws);
113
114 util_hash_table_destroy(qdws->bo_handles);
115 pipe_mutex_destroy(qdws->bo_handles_mutex);
116 pipe_mutex_destroy(qdws->mutex);
117
118 FREE(qdws);
119 }
120
121 static void
122 virgl_cache_list_check_free(struct virgl_drm_winsys *qdws)
123 {
124 struct list_head *curr, *next;
125 struct virgl_hw_res *res;
126 int64_t now;
127
128 now = os_time_get();
129 curr = qdws->delayed.next;
130 next = curr->next;
131 while (curr != &qdws->delayed) {
132 res = LIST_ENTRY(struct virgl_hw_res, curr, head);
133 if (!os_time_timeout(res->start, res->end, now))
134 break;
135
136 LIST_DEL(&res->head);
137 virgl_hw_res_destroy(qdws, res);
138 curr = next;
139 next = curr->next;
140 }
141 }
142
143 static void virgl_drm_resource_reference(struct virgl_drm_winsys *qdws,
144 struct virgl_hw_res **dres,
145 struct virgl_hw_res *sres)
146 {
147 struct virgl_hw_res *old = *dres;
148 if (pipe_reference(&(*dres)->reference, &sres->reference)) {
149
150 if (!can_cache_resource(old)) {
151 virgl_hw_res_destroy(qdws, old);
152 } else {
153 pipe_mutex_lock(qdws->mutex);
154 virgl_cache_list_check_free(qdws);
155
156 old->start = os_time_get();
157 old->end = old->start + qdws->usecs;
158 LIST_ADDTAIL(&old->head, &qdws->delayed);
159 qdws->num_delayed++;
160 pipe_mutex_unlock(qdws->mutex);
161 }
162 }
163 *dres = sres;
164 }
165
166 static struct virgl_hw_res *
167 virgl_drm_winsys_resource_create(struct virgl_winsys *qws,
168 enum pipe_texture_target target,
169 uint32_t format,
170 uint32_t bind,
171 uint32_t width,
172 uint32_t height,
173 uint32_t depth,
174 uint32_t array_size,
175 uint32_t last_level,
176 uint32_t nr_samples,
177 uint32_t size)
178 {
179 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
180 struct drm_virtgpu_resource_create createcmd;
181 int ret;
182 struct virgl_hw_res *res;
183 uint32_t stride = width * util_format_get_blocksize(format);
184
185 res = CALLOC_STRUCT(virgl_hw_res);
186 if (!res)
187 return NULL;
188
189 memset(&createcmd, 0, sizeof(createcmd));
190 createcmd.target = target;
191 createcmd.format = format;
192 createcmd.bind = bind;
193 createcmd.width = width;
194 createcmd.height = height;
195 createcmd.depth = depth;
196 createcmd.array_size = array_size;
197 createcmd.last_level = last_level;
198 createcmd.nr_samples = nr_samples;
199 createcmd.stride = stride;
200 createcmd.size = size;
201
202 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &createcmd);
203 if (ret != 0) {
204 FREE(res);
205 return NULL;
206 }
207
208 res->bind = bind;
209 res->format = format;
210
211 res->res_handle = createcmd.res_handle;
212 res->bo_handle = createcmd.bo_handle;
213 res->size = size;
214 res->stride = stride;
215 pipe_reference_init(&res->reference, 1);
216 res->num_cs_references = 0;
217 return res;
218 }
219
220 static inline int virgl_is_res_compat(struct virgl_drm_winsys *qdws,
221 struct virgl_hw_res *res,
222 uint32_t size, uint32_t bind,
223 uint32_t format)
224 {
225 if (res->bind != bind)
226 return 0;
227 if (res->format != format)
228 return 0;
229 if (res->size < size)
230 return 0;
231 if (res->size > size * 2)
232 return 0;
233
234 if (virgl_drm_resource_is_busy(qdws, res)) {
235 return -1;
236 }
237
238 return 1;
239 }
240
241 static int
242 virgl_bo_transfer_put(struct virgl_winsys *vws,
243 struct virgl_hw_res *res,
244 const struct pipe_box *box,
245 uint32_t stride, uint32_t layer_stride,
246 uint32_t buf_offset, uint32_t level)
247 {
248 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
249 struct drm_virtgpu_3d_transfer_to_host tohostcmd;
250
251 memset(&tohostcmd, 0, sizeof(tohostcmd));
252 tohostcmd.bo_handle = res->bo_handle;
253 tohostcmd.box = *(struct drm_virtgpu_3d_box *)box;
254 tohostcmd.offset = buf_offset;
255 tohostcmd.level = level;
256 // tohostcmd.stride = stride;
257 // tohostcmd.layer_stride = stride;
258 return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &tohostcmd);
259 }
260
261 static int
262 virgl_bo_transfer_get(struct virgl_winsys *vws,
263 struct virgl_hw_res *res,
264 const struct pipe_box *box,
265 uint32_t stride, uint32_t layer_stride,
266 uint32_t buf_offset, uint32_t level)
267 {
268 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
269 struct drm_virtgpu_3d_transfer_from_host fromhostcmd;
270
271 memset(&fromhostcmd, 0, sizeof(fromhostcmd));
272 fromhostcmd.bo_handle = res->bo_handle;
273 fromhostcmd.level = level;
274 fromhostcmd.offset = buf_offset;
275 // fromhostcmd.stride = stride;
276 // fromhostcmd.layer_stride = layer_stride;
277 fromhostcmd.box = *(struct drm_virtgpu_3d_box *)box;
278 return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &fromhostcmd);
279 }
280
281 static struct virgl_hw_res *
282 virgl_drm_winsys_resource_cache_create(struct virgl_winsys *qws,
283 enum pipe_texture_target target,
284 uint32_t format,
285 uint32_t bind,
286 uint32_t width,
287 uint32_t height,
288 uint32_t depth,
289 uint32_t array_size,
290 uint32_t last_level,
291 uint32_t nr_samples,
292 uint32_t size)
293 {
294 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
295 struct virgl_hw_res *res, *curr_res;
296 struct list_head *curr, *next;
297 int64_t now;
298 int ret;
299
300 /* only store binds for vertex/index/const buffers */
301 if (bind != VIRGL_BIND_CONSTANT_BUFFER && bind != VIRGL_BIND_INDEX_BUFFER &&
302 bind != VIRGL_BIND_VERTEX_BUFFER && bind != VIRGL_BIND_CUSTOM)
303 goto alloc;
304
305 pipe_mutex_lock(qdws->mutex);
306
307 res = NULL;
308 curr = qdws->delayed.next;
309 next = curr->next;
310
311 now = os_time_get();
312 while (curr != &qdws->delayed) {
313 curr_res = LIST_ENTRY(struct virgl_hw_res, curr, head);
314
315 if (!res && ((ret = virgl_is_res_compat(qdws, curr_res, size, bind, format)) > 0))
316 res = curr_res;
317 else if (os_time_timeout(curr_res->start, curr_res->end, now)) {
318 LIST_DEL(&curr_res->head);
319 virgl_hw_res_destroy(qdws, curr_res);
320 } else
321 break;
322
323 if (ret == -1)
324 break;
325
326 curr = next;
327 next = curr->next;
328 }
329
330 if (!res && ret != -1) {
331 while (curr != &qdws->delayed) {
332 curr_res = LIST_ENTRY(struct virgl_hw_res, curr, head);
333 ret = virgl_is_res_compat(qdws, curr_res, size, bind, format);
334 if (ret > 0) {
335 res = curr_res;
336 break;
337 }
338 if (ret == -1)
339 break;
340 curr = next;
341 next = curr->next;
342 }
343 }
344
345 if (res) {
346 LIST_DEL(&res->head);
347 --qdws->num_delayed;
348 pipe_mutex_unlock(qdws->mutex);
349 pipe_reference_init(&res->reference, 1);
350 return res;
351 }
352
353 pipe_mutex_unlock(qdws->mutex);
354
355 alloc:
356 res = virgl_drm_winsys_resource_create(qws, target, format, bind,
357 width, height, depth, array_size,
358 last_level, nr_samples, size);
359 if (bind == VIRGL_BIND_CONSTANT_BUFFER || bind == VIRGL_BIND_INDEX_BUFFER ||
360 bind == VIRGL_BIND_VERTEX_BUFFER)
361 res->cacheable = TRUE;
362 return res;
363 }
364
365 static struct virgl_hw_res *
366 virgl_drm_winsys_resource_create_handle(struct virgl_winsys *qws,
367 struct winsys_handle *whandle)
368 {
369 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
370 struct drm_gem_open open_arg = {};
371 struct drm_virtgpu_resource_info info_arg = {};
372 struct virgl_hw_res *res;
373
374 pipe_mutex_lock(qdws->bo_handles_mutex);
375
376 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
377 res = util_hash_table_get(qdws->bo_handles, (void*)(uintptr_t)whandle->handle);
378 if (res) {
379 struct virgl_hw_res *r = NULL;
380 virgl_drm_resource_reference(qdws, &r, res);
381 goto done;
382 }
383 }
384
385 res = CALLOC_STRUCT(virgl_hw_res);
386 if (!res)
387 goto done;
388
389 if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
390 int r;
391 uint32_t handle;
392 r = drmPrimeFDToHandle(qdws->fd, whandle->handle, &handle);
393 if (r) {
394 FREE(res);
395 res = NULL;
396 goto done;
397 }
398 res->bo_handle = handle;
399 } else {
400 memset(&open_arg, 0, sizeof(open_arg));
401 open_arg.name = whandle->handle;
402 if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
403 FREE(res);
404 res = NULL;
405 goto done;
406 }
407 res->bo_handle = open_arg.handle;
408 }
409 res->name = whandle->handle;
410
411 memset(&info_arg, 0, sizeof(info_arg));
412 info_arg.bo_handle = res->bo_handle;
413
414 if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &info_arg)) {
415 /* close */
416 FREE(res);
417 res = NULL;
418 goto done;
419 }
420
421 res->res_handle = info_arg.res_handle;
422
423 res->size = info_arg.size;
424 res->stride = info_arg.stride;
425 pipe_reference_init(&res->reference, 1);
426 res->num_cs_references = 0;
427
428 util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)whandle->handle, res);
429
430 done:
431 pipe_mutex_unlock(qdws->bo_handles_mutex);
432 return res;
433 }
434
435 static boolean virgl_drm_winsys_resource_get_handle(struct virgl_winsys *qws,
436 struct virgl_hw_res *res,
437 uint32_t stride,
438 struct winsys_handle *whandle)
439 {
440 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
441 struct drm_gem_flink flink;
442
443 if (!res)
444 return FALSE;
445
446 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
447 if (!res->flinked) {
448 memset(&flink, 0, sizeof(flink));
449 flink.handle = res->bo_handle;
450
451 if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
452 return FALSE;
453 }
454 res->flinked = TRUE;
455 res->flink = flink.name;
456
457 pipe_mutex_lock(qdws->bo_handles_mutex);
458 util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)res->flink, res);
459 pipe_mutex_unlock(qdws->bo_handles_mutex);
460 }
461 whandle->handle = res->flink;
462 } else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
463 whandle->handle = res->bo_handle;
464 } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
465 if (drmPrimeHandleToFD(qdws->fd, res->bo_handle, DRM_CLOEXEC, (int*)&whandle->handle))
466 return FALSE;
467 }
468 whandle->stride = stride;
469 return TRUE;
470 }
471
472 static void virgl_drm_winsys_resource_unref(struct virgl_winsys *qws,
473 struct virgl_hw_res *hres)
474 {
475 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
476
477 virgl_drm_resource_reference(qdws, &hres, NULL);
478 }
479
480 static void *virgl_drm_resource_map(struct virgl_winsys *qws,
481 struct virgl_hw_res *res)
482 {
483 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
484 struct drm_virtgpu_map mmap_arg;
485 void *ptr;
486
487 if (res->ptr)
488 return res->ptr;
489
490 memset(&mmap_arg, 0, sizeof(mmap_arg));
491 mmap_arg.handle = res->bo_handle;
492 if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_MAP, &mmap_arg))
493 return NULL;
494
495 ptr = os_mmap(0, res->size, PROT_READ|PROT_WRITE, MAP_SHARED,
496 qdws->fd, mmap_arg.offset);
497 if (ptr == MAP_FAILED)
498 return NULL;
499
500 res->ptr = ptr;
501 return ptr;
502
503 }
504
505 static void virgl_drm_resource_wait(struct virgl_winsys *qws,
506 struct virgl_hw_res *res)
507 {
508 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
509 struct drm_virtgpu_3d_wait waitcmd;
510 int ret;
511
512 memset(&waitcmd, 0, sizeof(waitcmd));
513 waitcmd.handle = res->bo_handle;
514 again:
515 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
516 if (ret == -EAGAIN)
517 goto again;
518 }
519
520 static struct virgl_cmd_buf *virgl_drm_cmd_buf_create(struct virgl_winsys *qws)
521 {
522 struct virgl_drm_cmd_buf *cbuf;
523
524 cbuf = CALLOC_STRUCT(virgl_drm_cmd_buf);
525 if (!cbuf)
526 return NULL;
527
528 cbuf->ws = qws;
529
530 cbuf->nres = 512;
531 cbuf->res_bo = CALLOC(cbuf->nres, sizeof(struct virgl_hw_buf*));
532 if (!cbuf->res_bo) {
533 FREE(cbuf);
534 return NULL;
535 }
536 cbuf->res_hlist = MALLOC(cbuf->nres * sizeof(uint32_t));
537 if (!cbuf->res_hlist) {
538 FREE(cbuf->res_bo);
539 FREE(cbuf);
540 return NULL;
541 }
542
543 cbuf->base.buf = cbuf->buf;
544 return &cbuf->base;
545 }
546
547 static void virgl_drm_cmd_buf_destroy(struct virgl_cmd_buf *_cbuf)
548 {
549 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
550
551 FREE(cbuf->res_hlist);
552 FREE(cbuf->res_bo);
553 FREE(cbuf);
554
555 }
556
557 static boolean virgl_drm_lookup_res(struct virgl_drm_cmd_buf *cbuf,
558 struct virgl_hw_res *res)
559 {
560 unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
561 int i;
562
563 if (cbuf->is_handle_added[hash]) {
564 i = cbuf->reloc_indices_hashlist[hash];
565 if (cbuf->res_bo[i] == res)
566 return true;
567
568 for (i = 0; i < cbuf->cres; i++) {
569 if (cbuf->res_bo[i] == res) {
570 cbuf->reloc_indices_hashlist[hash] = i;
571 return true;
572 }
573 }
574 }
575 return false;
576 }
577
578 static void virgl_drm_add_res(struct virgl_drm_winsys *qdws,
579 struct virgl_drm_cmd_buf *cbuf,
580 struct virgl_hw_res *res)
581 {
582 unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
583
584 if (cbuf->cres > cbuf->nres) {
585 fprintf(stderr,"failure to add relocation\n");
586 return;
587 }
588
589 cbuf->res_bo[cbuf->cres] = NULL;
590 virgl_drm_resource_reference(qdws, &cbuf->res_bo[cbuf->cres], res);
591 cbuf->res_hlist[cbuf->cres] = res->bo_handle;
592 cbuf->is_handle_added[hash] = TRUE;
593
594 cbuf->reloc_indices_hashlist[hash] = cbuf->cres;
595 p_atomic_inc(&res->num_cs_references);
596 cbuf->cres++;
597 }
598
599 static void virgl_drm_release_all_res(struct virgl_drm_winsys *qdws,
600 struct virgl_drm_cmd_buf *cbuf)
601 {
602 int i;
603
604 for (i = 0; i < cbuf->cres; i++) {
605 p_atomic_dec(&cbuf->res_bo[i]->num_cs_references);
606 virgl_drm_resource_reference(qdws, &cbuf->res_bo[i], NULL);
607 }
608 cbuf->cres = 0;
609 }
610
611 static void virgl_drm_emit_res(struct virgl_winsys *qws,
612 struct virgl_cmd_buf *_cbuf,
613 struct virgl_hw_res *res, boolean write_buf)
614 {
615 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
616 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
617 boolean already_in_list = virgl_drm_lookup_res(cbuf, res);
618
619 if (write_buf)
620 cbuf->base.buf[cbuf->base.cdw++] = res->res_handle;
621
622 if (!already_in_list)
623 virgl_drm_add_res(qdws, cbuf, res);
624 }
625
626 static boolean virgl_drm_res_is_ref(struct virgl_winsys *qws,
627 struct virgl_cmd_buf *_cbuf,
628 struct virgl_hw_res *res)
629 {
630 if (!res->num_cs_references)
631 return FALSE;
632
633 return TRUE;
634 }
635
636 static int virgl_drm_winsys_submit_cmd(struct virgl_winsys *qws,
637 struct virgl_cmd_buf *_cbuf)
638 {
639 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
640 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
641 struct drm_virtgpu_execbuffer eb;
642 int ret;
643
644 if (cbuf->base.cdw == 0)
645 return 0;
646
647 memset(&eb, 0, sizeof(struct drm_virtgpu_execbuffer));
648 eb.command = (unsigned long)(void*)cbuf->buf;
649 eb.size = cbuf->base.cdw * 4;
650 eb.num_bo_handles = cbuf->cres;
651 eb.bo_handles = (unsigned long)(void *)cbuf->res_hlist;
652
653 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &eb);
654 if (ret == -1)
655 fprintf(stderr,"got error from kernel - expect bad rendering %d\n", errno);
656 cbuf->base.cdw = 0;
657
658 virgl_drm_release_all_res(qdws, cbuf);
659
660 memset(cbuf->is_handle_added, 0, sizeof(cbuf->is_handle_added));
661 return ret;
662 }
663
664 static int virgl_drm_get_caps(struct virgl_winsys *vws,
665 struct virgl_drm_caps *caps)
666 {
667 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
668 struct drm_virtgpu_get_caps args;
669
670 memset(&args, 0, sizeof(args));
671
672 args.cap_set_id = 1;
673 args.addr = (unsigned long)&caps->caps;
674 args.size = sizeof(union virgl_caps);
675 return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
676 }
677
678 #define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
679
680 static unsigned handle_hash(void *key)
681 {
682 return PTR_TO_UINT(key);
683 }
684
685 static int handle_compare(void *key1, void *key2)
686 {
687 return PTR_TO_UINT(key1) != PTR_TO_UINT(key2);
688 }
689
690 static struct pipe_fence_handle *
691 virgl_cs_create_fence(struct virgl_winsys *vws)
692 {
693 struct virgl_hw_res *res;
694
695 res = virgl_drm_winsys_resource_cache_create(vws,
696 PIPE_BUFFER,
697 PIPE_FORMAT_R8_UNORM,
698 VIRGL_BIND_CUSTOM,
699 8, 1, 1, 0, 0, 0, 8);
700
701 return (struct pipe_fence_handle *)res;
702 }
703
704 static bool virgl_fence_wait(struct virgl_winsys *vws,
705 struct pipe_fence_handle *fence,
706 uint64_t timeout)
707 {
708 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
709 struct virgl_hw_res *res = virgl_hw_res(fence);
710
711 if (timeout == 0)
712 return virgl_drm_resource_is_busy(vdws, res);
713
714 if (timeout != PIPE_TIMEOUT_INFINITE) {
715 int64_t start_time = os_time_get();
716 timeout /= 1000;
717 while (virgl_drm_resource_is_busy(vdws, res)) {
718 if (os_time_get() - start_time >= timeout)
719 return FALSE;
720 os_time_sleep(10);
721 }
722 return TRUE;
723 }
724 virgl_drm_resource_wait(vws, res);
725 return TRUE;
726 }
727
728 static void virgl_fence_reference(struct virgl_winsys *vws,
729 struct pipe_fence_handle **dst,
730 struct pipe_fence_handle *src)
731 {
732 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
733 virgl_drm_resource_reference(vdws, (struct virgl_hw_res **)dst,
734 virgl_hw_res(src));
735 }
736
737
738 struct virgl_winsys *
739 virgl_drm_winsys_create(int drmFD)
740 {
741 struct virgl_drm_winsys *qdws;
742
743 qdws = CALLOC_STRUCT(virgl_drm_winsys);
744 if (!qdws)
745 return NULL;
746
747 qdws->fd = drmFD;
748 qdws->num_delayed = 0;
749 qdws->usecs = 1000000;
750 LIST_INITHEAD(&qdws->delayed);
751 pipe_mutex_init(qdws->mutex);
752 pipe_mutex_init(qdws->bo_handles_mutex);
753 qdws->bo_handles = util_hash_table_create(handle_hash, handle_compare);
754 qdws->base.destroy = virgl_drm_winsys_destroy;
755
756 qdws->base.transfer_put = virgl_bo_transfer_put;
757 qdws->base.transfer_get = virgl_bo_transfer_get;
758 qdws->base.resource_create = virgl_drm_winsys_resource_cache_create;
759 qdws->base.resource_unref = virgl_drm_winsys_resource_unref;
760 qdws->base.resource_create_from_handle = virgl_drm_winsys_resource_create_handle;
761 qdws->base.resource_get_handle = virgl_drm_winsys_resource_get_handle;
762 qdws->base.resource_map = virgl_drm_resource_map;
763 qdws->base.resource_wait = virgl_drm_resource_wait;
764 qdws->base.cmd_buf_create = virgl_drm_cmd_buf_create;
765 qdws->base.cmd_buf_destroy = virgl_drm_cmd_buf_destroy;
766 qdws->base.submit_cmd = virgl_drm_winsys_submit_cmd;
767 qdws->base.emit_res = virgl_drm_emit_res;
768 qdws->base.res_is_referenced = virgl_drm_res_is_ref;
769
770 qdws->base.cs_create_fence = virgl_cs_create_fence;
771 qdws->base.fence_wait = virgl_fence_wait;
772 qdws->base.fence_reference = virgl_fence_reference;
773
774 qdws->base.get_caps = virgl_drm_get_caps;
775 return &qdws->base;
776
777 }
778
779 static struct util_hash_table *fd_tab = NULL;
780 pipe_static_mutex(virgl_screen_mutex);
781
782 static void
783 virgl_drm_screen_destroy(struct pipe_screen *pscreen)
784 {
785 struct virgl_screen *screen = virgl_screen(pscreen);
786 boolean destroy;
787
788 pipe_mutex_lock(virgl_screen_mutex);
789 destroy = --screen->refcnt == 0;
790 if (destroy) {
791 int fd = virgl_drm_winsys(screen->vws)->fd;
792 util_hash_table_remove(fd_tab, intptr_to_pointer(fd));
793 }
794 pipe_mutex_unlock(virgl_screen_mutex);
795
796 if (destroy) {
797 pscreen->destroy = screen->winsys_priv;
798 pscreen->destroy(pscreen);
799 }
800 }
801
802 static unsigned hash_fd(void *key)
803 {
804 int fd = pointer_to_intptr(key);
805 struct stat stat;
806 fstat(fd, &stat);
807
808 return stat.st_dev ^ stat.st_ino ^ stat.st_rdev;
809 }
810
811 static int compare_fd(void *key1, void *key2)
812 {
813 int fd1 = pointer_to_intptr(key1);
814 int fd2 = pointer_to_intptr(key2);
815 struct stat stat1, stat2;
816 fstat(fd1, &stat1);
817 fstat(fd2, &stat2);
818
819 return stat1.st_dev != stat2.st_dev ||
820 stat1.st_ino != stat2.st_ino ||
821 stat1.st_rdev != stat2.st_rdev;
822 }
823
824 struct pipe_screen *
825 virgl_drm_screen_create(int fd)
826 {
827 struct pipe_screen *pscreen = NULL;
828
829 pipe_mutex_lock(virgl_screen_mutex);
830 if (!fd_tab) {
831 fd_tab = util_hash_table_create(hash_fd, compare_fd);
832 if (!fd_tab)
833 goto unlock;
834 }
835
836 pscreen = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
837 if (pscreen) {
838 virgl_screen(pscreen)->refcnt++;
839 } else {
840 struct virgl_winsys *vws;
841 int dup_fd = dup(fd);
842
843 vws = virgl_drm_winsys_create(dup_fd);
844
845 pscreen = virgl_create_screen(vws);
846 if (pscreen) {
847 util_hash_table_set(fd_tab, intptr_to_pointer(dup_fd), pscreen);
848
849 /* Bit of a hack, to avoid circular linkage dependency,
850 * ie. pipe driver having to call in to winsys, we
851 * override the pipe drivers screen->destroy():
852 */
853 virgl_screen(pscreen)->winsys_priv = pscreen->destroy;
854 pscreen->destroy = virgl_drm_screen_destroy;
855 }
856 }
857
858 unlock:
859 pipe_mutex_unlock(virgl_screen_mutex);
860 return pscreen;
861 }