virgl: add driver for virtio-gpu 3D (v2)
[mesa.git] / src / gallium / winsys / virgl / drm / virgl_drm_winsys.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 /* TODO - remove this */
24 #define _FILE_OFFSET_BITS 64
25
26 #include "virgl_drm_winsys.h"
27 #include "virgl_drm_public.h"
28 #include "util/u_memory.h"
29 #include "util/u_format.h"
30 #include "state_tracker/drm_driver.h"
31
32 #include "os/os_mman.h"
33 #include "os/os_time.h"
34 #include <sys/ioctl.h>
35 #include <errno.h>
36 #include <xf86drm.h>
37 #include <fcntl.h>
38 #include <stdio.h>
39 #include "virtgpu_drm.h"
40
41 static inline boolean can_cache_resource(struct virgl_hw_res *res)
42 {
43 return res->cacheable == TRUE;
44 }
45
46 static void virgl_hw_res_destroy(struct virgl_drm_winsys *qdws,
47 struct virgl_hw_res *res)
48 {
49 struct drm_gem_close args;
50
51 if (res->name) {
52 pipe_mutex_lock(qdws->bo_handles_mutex);
53 util_hash_table_remove(qdws->bo_handles,
54 (void *)(uintptr_t)res->name);
55 pipe_mutex_unlock(qdws->bo_handles_mutex);
56 }
57
58 if (res->ptr)
59 os_munmap(res->ptr, res->size);
60
61 args.handle = res->bo_handle;
62 drmIoctl(qdws->fd, DRM_IOCTL_GEM_CLOSE, &args);
63 FREE(res);
64 }
65
66 static boolean virgl_drm_resource_is_busy(struct virgl_drm_winsys *qdws, struct virgl_hw_res *res)
67 {
68 struct drm_virtgpu_3d_wait waitcmd;
69 int ret;
70
71 waitcmd.handle = res->bo_handle;
72 waitcmd.flags = VIRTGPU_WAIT_NOWAIT;
73
74 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
75 if (ret && errno == EBUSY)
76 return TRUE;
77 return FALSE;
78 }
79
80 static void
81 virgl_cache_flush(struct virgl_drm_winsys *qdws)
82 {
83 struct list_head *curr, *next;
84 struct virgl_hw_res *res;
85
86 pipe_mutex_lock(qdws->mutex);
87 curr = qdws->delayed.next;
88 next = curr->next;
89
90 while (curr != &qdws->delayed) {
91 res = LIST_ENTRY(struct virgl_hw_res, curr, head);
92 LIST_DEL(&res->head);
93 virgl_hw_res_destroy(qdws, res);
94 curr = next;
95 next = curr->next;
96 }
97 pipe_mutex_unlock(qdws->mutex);
98 }
99 static void
100 virgl_drm_winsys_destroy(struct virgl_winsys *qws)
101 {
102 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
103
104 virgl_cache_flush(qdws);
105
106 util_hash_table_destroy(qdws->bo_handles);
107 pipe_mutex_destroy(qdws->bo_handles_mutex);
108 pipe_mutex_destroy(qdws->mutex);
109
110 FREE(qdws);
111 }
112
113 static void
114 virgl_cache_list_check_free(struct virgl_drm_winsys *qdws)
115 {
116 struct list_head *curr, *next;
117 struct virgl_hw_res *res;
118 int64_t now;
119
120 now = os_time_get();
121 curr = qdws->delayed.next;
122 next = curr->next;
123 while (curr != &qdws->delayed) {
124 res = LIST_ENTRY(struct virgl_hw_res, curr, head);
125 if (!os_time_timeout(res->start, res->end, now))
126 break;
127
128 LIST_DEL(&res->head);
129 virgl_hw_res_destroy(qdws, res);
130 curr = next;
131 next = curr->next;
132 }
133 }
134
135 static void virgl_drm_resource_reference(struct virgl_drm_winsys *qdws,
136 struct virgl_hw_res **dres,
137 struct virgl_hw_res *sres)
138 {
139 struct virgl_hw_res *old = *dres;
140 if (pipe_reference(&(*dres)->reference, &sres->reference)) {
141
142 if (!can_cache_resource(old)) {
143 virgl_hw_res_destroy(qdws, old);
144 } else {
145 pipe_mutex_lock(qdws->mutex);
146 virgl_cache_list_check_free(qdws);
147
148 old->start = os_time_get();
149 old->end = old->start + qdws->usecs;
150 LIST_ADDTAIL(&old->head, &qdws->delayed);
151 qdws->num_delayed++;
152 pipe_mutex_unlock(qdws->mutex);
153 }
154 }
155 *dres = sres;
156 }
157
158 static struct virgl_hw_res *virgl_drm_winsys_resource_create(struct virgl_winsys *qws,
159 enum pipe_texture_target target,
160 uint32_t format,
161 uint32_t bind,
162 uint32_t width,
163 uint32_t height,
164 uint32_t depth,
165 uint32_t array_size,
166 uint32_t last_level,
167 uint32_t nr_samples,
168 uint32_t size)
169 {
170 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
171 struct drm_virtgpu_resource_create createcmd;
172 int ret;
173 struct virgl_hw_res *res;
174 uint32_t stride = width * util_format_get_blocksize(format);
175
176 res = CALLOC_STRUCT(virgl_hw_res);
177 if (!res)
178 return NULL;
179
180 createcmd.target = target;
181 createcmd.format = format;
182 createcmd.bind = bind;
183 createcmd.width = width;
184 createcmd.height = height;
185 createcmd.depth = depth;
186 createcmd.array_size = array_size;
187 createcmd.last_level = last_level;
188 createcmd.nr_samples = nr_samples;
189 createcmd.res_handle = 0;
190 createcmd.stride = stride;
191 createcmd.size = size;
192 createcmd.flags = 0;
193
194 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &createcmd);
195 if (ret != 0) {
196 FREE(res);
197 return NULL;
198 }
199
200 res->bind = bind;
201 res->format = format;
202
203 res->res_handle = createcmd.res_handle;
204 res->bo_handle = createcmd.bo_handle;
205 res->size = size;
206 res->stride = stride;
207 pipe_reference_init(&res->reference, 1);
208 res->num_cs_references = 0;
209 return res;
210 }
211
212 static inline int virgl_is_res_compat(struct virgl_drm_winsys *qdws,
213 struct virgl_hw_res *res,
214 uint32_t size, uint32_t bind, uint32_t format)
215 {
216 if (res->bind != bind)
217 return 0;
218 if (res->format != format)
219 return 0;
220 if (res->size < size)
221 return 0;
222 if (res->size > size * 2)
223 return 0;
224
225 if (virgl_drm_resource_is_busy(qdws, res)) {
226 return -1;
227 }
228
229 return 1;
230 }
231
232 static int
233 virgl_bo_transfer_put(struct virgl_winsys *vws,
234 struct virgl_hw_res *res,
235 const struct pipe_box *box,
236 uint32_t stride, uint32_t layer_stride,
237 uint32_t buf_offset, uint32_t level)
238 {
239 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
240 struct drm_virtgpu_3d_transfer_to_host tohostcmd;
241 int ret;
242
243 tohostcmd.bo_handle = res->bo_handle;
244 tohostcmd.box = *(struct drm_virtgpu_3d_box *)box;
245 tohostcmd.offset = buf_offset;
246 tohostcmd.level = level;
247 // tohostcmd.stride = stride;
248 // tohostcmd.layer_stride = stride;
249 ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &tohostcmd);
250 return ret;
251 }
252
253 static int
254 virgl_bo_transfer_get(struct virgl_winsys *vws,
255 struct virgl_hw_res *res,
256 const struct pipe_box *box,
257 uint32_t stride, uint32_t layer_stride,
258 uint32_t buf_offset, uint32_t level)
259 {
260 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
261 struct drm_virtgpu_3d_transfer_from_host fromhostcmd;
262 int ret;
263
264 fromhostcmd.bo_handle = res->bo_handle;
265 fromhostcmd.level = level;
266 fromhostcmd.offset = buf_offset;
267 // fromhostcmd.stride = stride;
268 // fromhostcmd.layer_stride = layer_stride;
269 fromhostcmd.box = *(struct drm_virtgpu_3d_box *)box;
270 ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &fromhostcmd);
271 return ret;
272 }
273
274 static struct virgl_hw_res *virgl_drm_winsys_resource_cache_create(struct virgl_winsys *qws,
275 enum pipe_texture_target target,
276 uint32_t format,
277 uint32_t bind,
278 uint32_t width,
279 uint32_t height,
280 uint32_t depth,
281 uint32_t array_size,
282 uint32_t last_level,
283 uint32_t nr_samples,
284 uint32_t size)
285 {
286 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
287 struct virgl_hw_res *res, *curr_res;
288 struct list_head *curr, *next;
289 int64_t now;
290 int ret;
291
292 /* only store binds for vertex/index/const buffers */
293 if (bind != VIRGL_BIND_CONSTANT_BUFFER && bind != VIRGL_BIND_INDEX_BUFFER &&
294 bind != VIRGL_BIND_VERTEX_BUFFER && bind != VIRGL_BIND_CUSTOM)
295 goto alloc;
296
297 pipe_mutex_lock(qdws->mutex);
298
299 res = NULL;
300 curr = qdws->delayed.next;
301 next = curr->next;
302
303 now = os_time_get();
304 while (curr != &qdws->delayed) {
305 curr_res = LIST_ENTRY(struct virgl_hw_res, curr, head);
306
307 if (!res && (ret = virgl_is_res_compat(qdws, curr_res, size, bind, format) > 0))
308 res = curr_res;
309 else if (os_time_timeout(curr_res->start, curr_res->end, now)) {
310 LIST_DEL(&curr_res->head);
311 virgl_hw_res_destroy(qdws, curr_res);
312 } else
313 break;
314
315 if (ret == -1)
316 break;
317
318 curr = next;
319 next = curr->next;
320 }
321
322 if (!res && ret != -1) {
323 while (curr != &qdws->delayed) {
324 curr_res = LIST_ENTRY(struct virgl_hw_res, curr, head);
325 ret = virgl_is_res_compat(qdws, curr_res, size, bind, format);
326 if (ret > 0) {
327 res = curr_res;
328 break;
329 }
330 if (ret == -1)
331 break;
332 curr = next;
333 next = curr->next;
334 }
335 }
336
337 if (res) {
338 LIST_DEL(&res->head);
339 --qdws->num_delayed;
340 pipe_mutex_unlock(qdws->mutex);
341 pipe_reference_init(&res->reference, 1);
342 return res;
343 }
344
345 pipe_mutex_unlock(qdws->mutex);
346
347 alloc:
348 res = virgl_drm_winsys_resource_create(qws, target, format, bind,
349 width, height, depth, array_size,
350 last_level, nr_samples, size);
351 if (bind == VIRGL_BIND_CONSTANT_BUFFER || bind == VIRGL_BIND_INDEX_BUFFER ||
352 bind == VIRGL_BIND_VERTEX_BUFFER)
353 res->cacheable = TRUE;
354 return res;
355 }
356
357 static struct virgl_hw_res *virgl_drm_winsys_resource_create_handle(struct virgl_winsys *qws,
358 struct winsys_handle *whandle)
359 {
360 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
361 struct drm_gem_open open_arg = {};
362 struct drm_virtgpu_resource_info info_arg = {};
363 struct virgl_hw_res *res;
364
365 pipe_mutex_lock(qdws->bo_handles_mutex);
366
367 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
368 res = util_hash_table_get(qdws->bo_handles, (void*)(uintptr_t)whandle->handle);
369 if (res) {
370 struct virgl_hw_res *r = NULL;
371 virgl_drm_resource_reference(qdws, &r, res);
372 goto done;
373 }
374 }
375
376 res = CALLOC_STRUCT(virgl_hw_res);
377 if (!res)
378 goto done;
379
380 if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
381 int r;
382 uint32_t handle;
383 r = drmPrimeFDToHandle(qdws->fd, whandle->handle, &handle);
384 if (r) {
385 FREE(res);
386 res = NULL;
387 goto done;
388 }
389 res->bo_handle = handle;
390 } else {
391 memset(&open_arg, 0, sizeof(open_arg));
392 open_arg.name = whandle->handle;
393 if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
394 FREE(res);
395 res = NULL;
396 goto done;
397 }
398 res->bo_handle = open_arg.handle;
399 }
400 res->name = whandle->handle;
401
402 memset(&info_arg, 0, sizeof(info_arg));
403 info_arg.bo_handle = res->bo_handle;
404
405 if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &info_arg)) {
406 /* close */
407 FREE(res);
408 res = NULL;
409 goto done;
410 }
411
412 res->res_handle = info_arg.res_handle;
413
414 res->size = info_arg.size;
415 res->stride = info_arg.stride;
416 pipe_reference_init(&res->reference, 1);
417 res->num_cs_references = 0;
418
419 util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)whandle->handle, res);
420
421 done:
422 pipe_mutex_unlock(qdws->bo_handles_mutex);
423 return res;
424 }
425
426 static boolean virgl_drm_winsys_resource_get_handle(struct virgl_winsys *qws,
427 struct virgl_hw_res *res,
428 uint32_t stride,
429 struct winsys_handle *whandle)
430 {
431 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
432 struct drm_gem_flink flink;
433
434 if (!res)
435 return FALSE;
436 memset(&flink, 0, sizeof(flink));
437
438 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
439 if (!res->flinked) {
440 flink.handle = res->bo_handle;
441
442 if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
443 return FALSE;
444 }
445 res->flinked = TRUE;
446 res->flink = flink.name;
447
448 pipe_mutex_lock(qdws->bo_handles_mutex);
449 util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)res->flink, res);
450 pipe_mutex_unlock(qdws->bo_handles_mutex);
451 }
452 whandle->handle = res->flink;
453 } else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
454 whandle->handle = res->bo_handle;
455 } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
456 if (drmPrimeHandleToFD(qdws->fd, res->bo_handle, DRM_CLOEXEC, (int*)&whandle->handle))
457 return FALSE;
458 }
459 whandle->stride = stride;
460 return TRUE;
461 }
462
463 static void virgl_drm_winsys_resource_unref(struct virgl_winsys *qws,
464 struct virgl_hw_res *hres)
465 {
466 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
467
468 virgl_drm_resource_reference(qdws, &hres, NULL);
469 }
470
471 static void *virgl_drm_resource_map(struct virgl_winsys *qws, struct virgl_hw_res *res)
472 {
473 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
474 struct drm_virtgpu_map mmap_arg;
475 void *ptr;
476
477 if (res->ptr)
478 return res->ptr;
479
480 mmap_arg.handle = res->bo_handle;
481 if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_MAP, &mmap_arg))
482 return NULL;
483
484 ptr = os_mmap(0, res->size, PROT_READ|PROT_WRITE, MAP_SHARED,
485 qdws->fd, mmap_arg.offset);
486 if (ptr == MAP_FAILED)
487 return NULL;
488
489 res->ptr = ptr;
490 return ptr;
491
492 }
493
494 static void virgl_drm_resource_wait(struct virgl_winsys *qws, struct virgl_hw_res *res)
495 {
496 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
497 struct drm_virtgpu_3d_wait waitcmd;
498 int ret;
499
500 waitcmd.handle = res->bo_handle;
501 waitcmd.flags = 0;
502 again:
503 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
504 if (ret == -EAGAIN)
505 goto again;
506 }
507
508 static struct virgl_cmd_buf *virgl_drm_cmd_buf_create(struct virgl_winsys *qws)
509 {
510 struct virgl_drm_cmd_buf *cbuf;
511
512 cbuf = CALLOC_STRUCT(virgl_drm_cmd_buf);
513 if (!cbuf)
514 return NULL;
515
516 cbuf->ws = qws;
517
518 cbuf->nres = 512;
519 cbuf->res_bo = (struct virgl_hw_res **)
520 CALLOC(cbuf->nres, sizeof(struct virgl_hw_buf*));
521 if (!cbuf->res_bo) {
522 FREE(cbuf);
523 return NULL;
524 }
525 cbuf->res_hlist = (uint32_t *)malloc(cbuf->nres * sizeof(uint32_t));
526 if (!cbuf->res_hlist) {
527 FREE(cbuf->res_bo);
528 FREE(cbuf);
529 return NULL;
530 }
531
532 cbuf->base.buf = cbuf->buf;
533 return &cbuf->base;
534 }
535
536 static void virgl_drm_cmd_buf_destroy(struct virgl_cmd_buf *_cbuf)
537 {
538 struct virgl_drm_cmd_buf *cbuf = (struct virgl_drm_cmd_buf *)_cbuf;
539
540 FREE(cbuf->res_hlist);
541 FREE(cbuf->res_bo);
542 FREE(cbuf);
543
544 }
545
546 static boolean virgl_drm_lookup_res(struct virgl_drm_cmd_buf *cbuf,
547 struct virgl_hw_res *res)
548 {
549 unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
550 int i;
551
552 if (cbuf->is_handle_added[hash]) {
553 i = cbuf->reloc_indices_hashlist[hash];
554 if (cbuf->res_bo[i] == res)
555 return true;
556
557 for (i = 0; i < cbuf->cres; i++) {
558 if (cbuf->res_bo[i] == res) {
559 cbuf->reloc_indices_hashlist[hash] = i;
560 return true;
561 }
562 }
563 }
564 return false;
565 }
566
567 static void virgl_drm_add_res(struct virgl_drm_winsys *qdws,
568 struct virgl_drm_cmd_buf *cbuf, struct virgl_hw_res *res)
569 {
570 unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
571
572 if (cbuf->cres > cbuf->nres) {
573 fprintf(stderr,"failure to add relocation\n");
574 return;
575 }
576
577 cbuf->res_bo[cbuf->cres] = NULL;
578 virgl_drm_resource_reference(qdws, &cbuf->res_bo[cbuf->cres], res);
579 cbuf->res_hlist[cbuf->cres] = res->bo_handle;
580 cbuf->is_handle_added[hash] = TRUE;
581
582 cbuf->reloc_indices_hashlist[hash] = cbuf->cres;
583 p_atomic_inc(&res->num_cs_references);
584 cbuf->cres++;
585 }
586
587 static void virgl_drm_release_all_res(struct virgl_drm_winsys *qdws,
588 struct virgl_drm_cmd_buf *cbuf)
589 {
590 int i;
591
592 for (i = 0; i < cbuf->cres; i++) {
593 p_atomic_dec(&cbuf->res_bo[i]->num_cs_references);
594 virgl_drm_resource_reference(qdws, &cbuf->res_bo[i], NULL);
595 }
596 cbuf->cres = 0;
597 }
598
599 static void virgl_drm_emit_res(struct virgl_winsys *qws,
600 struct virgl_cmd_buf *_cbuf, struct virgl_hw_res *res, boolean write_buf)
601 {
602 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
603 struct virgl_drm_cmd_buf *cbuf = (struct virgl_drm_cmd_buf *)_cbuf;
604 boolean already_in_list = virgl_drm_lookup_res(cbuf, res);
605
606 if (write_buf)
607 cbuf->base.buf[cbuf->base.cdw++] = res->res_handle;
608
609 if (!already_in_list)
610 virgl_drm_add_res(qdws, cbuf, res);
611 }
612
613 static boolean virgl_drm_res_is_ref(struct virgl_winsys *qws,
614 struct virgl_cmd_buf *_cbuf,
615 struct virgl_hw_res *res)
616 {
617 if (!res->num_cs_references)
618 return FALSE;
619
620 return TRUE;
621 }
622
623 static int virgl_drm_winsys_submit_cmd(struct virgl_winsys *qws, struct virgl_cmd_buf *_cbuf)
624 {
625 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
626 struct virgl_drm_cmd_buf *cbuf = (struct virgl_drm_cmd_buf *)_cbuf;
627 struct drm_virtgpu_execbuffer eb;
628 int ret;
629
630 if (cbuf->base.cdw == 0)
631 return 0;
632
633 memset(&eb, 0, sizeof(struct drm_virtgpu_execbuffer));
634 eb.command = (unsigned long)(void*)cbuf->buf;
635 eb.size = cbuf->base.cdw * 4;
636 eb.num_bo_handles = cbuf->cres;
637 eb.bo_handles = (unsigned long)(void *)cbuf->res_hlist;
638
639 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &eb);
640 if (ret == -1)
641 fprintf(stderr,"got error from kernel - expect bad rendering %d\n", errno);
642 cbuf->base.cdw = 0;
643
644 virgl_drm_release_all_res(qdws, cbuf);
645
646 memset(cbuf->is_handle_added, 0, sizeof(cbuf->is_handle_added));
647 return ret;
648 }
649
650 static int virgl_drm_get_caps(struct virgl_winsys *vws, struct virgl_drm_caps *caps)
651 {
652 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
653 struct drm_virtgpu_get_caps args;
654 int ret;
655
656 memset(&args, 0, sizeof(args));
657
658 args.cap_set_id = 1;
659 args.addr = (unsigned long)&caps->caps;
660 args.size = sizeof(union virgl_caps);
661 ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
662 return ret;
663 }
664
665 #define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
666
667 static unsigned handle_hash(void *key)
668 {
669 return PTR_TO_UINT(key);
670 }
671
672 static int handle_compare(void *key1, void *key2)
673 {
674 return PTR_TO_UINT(key1) != PTR_TO_UINT(key2);
675 }
676
677 static struct pipe_fence_handle *
678 virgl_cs_create_fence(struct virgl_winsys *vws)
679 {
680 struct virgl_hw_res *res;
681
682 res = virgl_drm_winsys_resource_cache_create(vws,
683 PIPE_BUFFER,
684 PIPE_FORMAT_R8_UNORM,
685 VIRGL_BIND_CUSTOM,
686 8, 1, 1, 0, 0, 0, 8);
687
688 return (struct pipe_fence_handle *)res;
689 }
690
691 static bool virgl_fence_wait(struct virgl_winsys *vws,
692 struct pipe_fence_handle *fence,
693 uint64_t timeout)
694 {
695 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
696 struct virgl_hw_res *res = (struct virgl_hw_res *)fence;
697
698 if (timeout == 0)
699 return virgl_drm_resource_is_busy(vdws, res);
700
701 if (timeout != PIPE_TIMEOUT_INFINITE) {
702 int64_t start_time = os_time_get();
703 timeout /= 1000;
704 while (virgl_drm_resource_is_busy(vdws, res)) {
705 if (os_time_get() - start_time >= timeout)
706 return FALSE;
707 os_time_sleep(10);
708 }
709 return TRUE;
710 }
711 virgl_drm_resource_wait(vws, res);
712 return TRUE;
713 }
714
715 static void virgl_fence_reference(struct virgl_winsys *vws,
716 struct pipe_fence_handle **dst,
717 struct pipe_fence_handle *src)
718 {
719 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
720 virgl_drm_resource_reference(vdws, (struct virgl_hw_res **)dst,
721 (struct virgl_hw_res *)src);
722 }
723
724
725 struct virgl_winsys *
726 virgl_drm_winsys_create(int drmFD)
727 {
728 struct virgl_drm_winsys *qdws;
729
730 qdws = CALLOC_STRUCT(virgl_drm_winsys);
731 if (!qdws)
732 return NULL;
733
734 qdws->fd = drmFD;
735 qdws->num_delayed = 0;
736 qdws->usecs = 1000000;
737 LIST_INITHEAD(&qdws->delayed);
738 pipe_mutex_init(qdws->mutex);
739 pipe_mutex_init(qdws->bo_handles_mutex);
740 qdws->bo_handles = util_hash_table_create(handle_hash, handle_compare);
741 qdws->base.destroy = virgl_drm_winsys_destroy;
742
743 qdws->base.transfer_put = virgl_bo_transfer_put;
744 qdws->base.transfer_get = virgl_bo_transfer_get;
745 qdws->base.resource_create = virgl_drm_winsys_resource_cache_create;
746 qdws->base.resource_unref = virgl_drm_winsys_resource_unref;
747 qdws->base.resource_create_from_handle = virgl_drm_winsys_resource_create_handle;
748 qdws->base.resource_get_handle = virgl_drm_winsys_resource_get_handle;
749 qdws->base.resource_map = virgl_drm_resource_map;
750 qdws->base.resource_wait = virgl_drm_resource_wait;
751 qdws->base.cmd_buf_create = virgl_drm_cmd_buf_create;
752 qdws->base.cmd_buf_destroy = virgl_drm_cmd_buf_destroy;
753 qdws->base.submit_cmd = virgl_drm_winsys_submit_cmd;
754 qdws->base.emit_res = virgl_drm_emit_res;
755 qdws->base.res_is_referenced = virgl_drm_res_is_ref;
756
757 qdws->base.cs_create_fence = virgl_cs_create_fence;
758 qdws->base.fence_wait = virgl_fence_wait;
759 qdws->base.fence_reference = virgl_fence_reference;
760
761 qdws->base.get_caps = virgl_drm_get_caps;
762 return &qdws->base;
763
764 }