0616de31309e59eb6fa10e6d5dabf768a43f5d1b
[mesa.git] / src / gallium / winsys / virgl / drm / virgl_drm_winsys.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "virgl_drm_winsys.h"
24 #include "virgl_drm_public.h"
25 #include "util/u_memory.h"
26 #include "util/u_format.h"
27 #include "state_tracker/drm_driver.h"
28
29 #include "os/os_mman.h"
30 #include "os/os_time.h"
31 #include <sys/ioctl.h>
32 #include <errno.h>
33 #include <xf86drm.h>
34 #include <fcntl.h>
35 #include <stdio.h>
36 #include "virtgpu_drm.h"
37
38 static inline boolean can_cache_resource(struct virgl_hw_res *res)
39 {
40 return res->cacheable == TRUE;
41 }
42
43 static void virgl_hw_res_destroy(struct virgl_drm_winsys *qdws,
44 struct virgl_hw_res *res)
45 {
46 struct drm_gem_close args;
47
48 if (res->name) {
49 pipe_mutex_lock(qdws->bo_handles_mutex);
50 util_hash_table_remove(qdws->bo_handles,
51 (void *)(uintptr_t)res->name);
52 pipe_mutex_unlock(qdws->bo_handles_mutex);
53 }
54
55 if (res->ptr)
56 os_munmap(res->ptr, res->size);
57
58 memset(&args, 0, sizeof(args));
59 args.handle = res->bo_handle;
60 drmIoctl(qdws->fd, DRM_IOCTL_GEM_CLOSE, &args);
61 FREE(res);
62 }
63
64 static boolean virgl_drm_resource_is_busy(struct virgl_drm_winsys *qdws, struct virgl_hw_res *res)
65 {
66 struct drm_virtgpu_3d_wait waitcmd;
67 int ret;
68
69 memset(&waitcmd, 0, sizeof(waitcmd));
70 waitcmd.handle = res->bo_handle;
71 waitcmd.flags = VIRTGPU_WAIT_NOWAIT;
72
73 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
74 if (ret && errno == EBUSY)
75 return TRUE;
76 return FALSE;
77 }
78
79 static void
80 virgl_cache_flush(struct virgl_drm_winsys *qdws)
81 {
82 struct list_head *curr, *next;
83 struct virgl_hw_res *res;
84
85 pipe_mutex_lock(qdws->mutex);
86 curr = qdws->delayed.next;
87 next = curr->next;
88
89 while (curr != &qdws->delayed) {
90 res = LIST_ENTRY(struct virgl_hw_res, curr, head);
91 LIST_DEL(&res->head);
92 virgl_hw_res_destroy(qdws, res);
93 curr = next;
94 next = curr->next;
95 }
96 pipe_mutex_unlock(qdws->mutex);
97 }
98 static void
99 virgl_drm_winsys_destroy(struct virgl_winsys *qws)
100 {
101 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
102
103 virgl_cache_flush(qdws);
104
105 util_hash_table_destroy(qdws->bo_handles);
106 pipe_mutex_destroy(qdws->bo_handles_mutex);
107 pipe_mutex_destroy(qdws->mutex);
108
109 FREE(qdws);
110 }
111
112 static void
113 virgl_cache_list_check_free(struct virgl_drm_winsys *qdws)
114 {
115 struct list_head *curr, *next;
116 struct virgl_hw_res *res;
117 int64_t now;
118
119 now = os_time_get();
120 curr = qdws->delayed.next;
121 next = curr->next;
122 while (curr != &qdws->delayed) {
123 res = LIST_ENTRY(struct virgl_hw_res, curr, head);
124 if (!os_time_timeout(res->start, res->end, now))
125 break;
126
127 LIST_DEL(&res->head);
128 virgl_hw_res_destroy(qdws, res);
129 curr = next;
130 next = curr->next;
131 }
132 }
133
134 static void virgl_drm_resource_reference(struct virgl_drm_winsys *qdws,
135 struct virgl_hw_res **dres,
136 struct virgl_hw_res *sres)
137 {
138 struct virgl_hw_res *old = *dres;
139 if (pipe_reference(&(*dres)->reference, &sres->reference)) {
140
141 if (!can_cache_resource(old)) {
142 virgl_hw_res_destroy(qdws, old);
143 } else {
144 pipe_mutex_lock(qdws->mutex);
145 virgl_cache_list_check_free(qdws);
146
147 old->start = os_time_get();
148 old->end = old->start + qdws->usecs;
149 LIST_ADDTAIL(&old->head, &qdws->delayed);
150 qdws->num_delayed++;
151 pipe_mutex_unlock(qdws->mutex);
152 }
153 }
154 *dres = sres;
155 }
156
157 static struct virgl_hw_res *virgl_drm_winsys_resource_create(struct virgl_winsys *qws,
158 enum pipe_texture_target target,
159 uint32_t format,
160 uint32_t bind,
161 uint32_t width,
162 uint32_t height,
163 uint32_t depth,
164 uint32_t array_size,
165 uint32_t last_level,
166 uint32_t nr_samples,
167 uint32_t size)
168 {
169 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
170 struct drm_virtgpu_resource_create createcmd;
171 int ret;
172 struct virgl_hw_res *res;
173 uint32_t stride = width * util_format_get_blocksize(format);
174
175 res = CALLOC_STRUCT(virgl_hw_res);
176 if (!res)
177 return NULL;
178
179 memset(&createcmd, 0, sizeof(createcmd));
180 createcmd.target = target;
181 createcmd.format = format;
182 createcmd.bind = bind;
183 createcmd.width = width;
184 createcmd.height = height;
185 createcmd.depth = depth;
186 createcmd.array_size = array_size;
187 createcmd.last_level = last_level;
188 createcmd.nr_samples = nr_samples;
189 createcmd.stride = stride;
190 createcmd.size = size;
191
192 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &createcmd);
193 if (ret != 0) {
194 FREE(res);
195 return NULL;
196 }
197
198 res->bind = bind;
199 res->format = format;
200
201 res->res_handle = createcmd.res_handle;
202 res->bo_handle = createcmd.bo_handle;
203 res->size = size;
204 res->stride = stride;
205 pipe_reference_init(&res->reference, 1);
206 res->num_cs_references = 0;
207 return res;
208 }
209
210 static inline int virgl_is_res_compat(struct virgl_drm_winsys *qdws,
211 struct virgl_hw_res *res,
212 uint32_t size, uint32_t bind, uint32_t format)
213 {
214 if (res->bind != bind)
215 return 0;
216 if (res->format != format)
217 return 0;
218 if (res->size < size)
219 return 0;
220 if (res->size > size * 2)
221 return 0;
222
223 if (virgl_drm_resource_is_busy(qdws, res)) {
224 return -1;
225 }
226
227 return 1;
228 }
229
230 static int
231 virgl_bo_transfer_put(struct virgl_winsys *vws,
232 struct virgl_hw_res *res,
233 const struct pipe_box *box,
234 uint32_t stride, uint32_t layer_stride,
235 uint32_t buf_offset, uint32_t level)
236 {
237 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
238 struct drm_virtgpu_3d_transfer_to_host tohostcmd;
239
240 memset(&tohostcmd, 0, sizeof(tohostcmd));
241 tohostcmd.bo_handle = res->bo_handle;
242 tohostcmd.box = *(struct drm_virtgpu_3d_box *)box;
243 tohostcmd.offset = buf_offset;
244 tohostcmd.level = level;
245 // tohostcmd.stride = stride;
246 // tohostcmd.layer_stride = stride;
247 return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &tohostcmd);
248 }
249
250 static int
251 virgl_bo_transfer_get(struct virgl_winsys *vws,
252 struct virgl_hw_res *res,
253 const struct pipe_box *box,
254 uint32_t stride, uint32_t layer_stride,
255 uint32_t buf_offset, uint32_t level)
256 {
257 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
258 struct drm_virtgpu_3d_transfer_from_host fromhostcmd;
259
260 memset(&fromhostcmd, 0, sizeof(fromhostcmd));
261 fromhostcmd.bo_handle = res->bo_handle;
262 fromhostcmd.level = level;
263 fromhostcmd.offset = buf_offset;
264 // fromhostcmd.stride = stride;
265 // fromhostcmd.layer_stride = layer_stride;
266 fromhostcmd.box = *(struct drm_virtgpu_3d_box *)box;
267 return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &fromhostcmd);
268 }
269
270 static struct virgl_hw_res *virgl_drm_winsys_resource_cache_create(struct virgl_winsys *qws,
271 enum pipe_texture_target target,
272 uint32_t format,
273 uint32_t bind,
274 uint32_t width,
275 uint32_t height,
276 uint32_t depth,
277 uint32_t array_size,
278 uint32_t last_level,
279 uint32_t nr_samples,
280 uint32_t size)
281 {
282 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
283 struct virgl_hw_res *res, *curr_res;
284 struct list_head *curr, *next;
285 int64_t now;
286 int ret;
287
288 /* only store binds for vertex/index/const buffers */
289 if (bind != VIRGL_BIND_CONSTANT_BUFFER && bind != VIRGL_BIND_INDEX_BUFFER &&
290 bind != VIRGL_BIND_VERTEX_BUFFER && bind != VIRGL_BIND_CUSTOM)
291 goto alloc;
292
293 pipe_mutex_lock(qdws->mutex);
294
295 res = NULL;
296 curr = qdws->delayed.next;
297 next = curr->next;
298
299 now = os_time_get();
300 while (curr != &qdws->delayed) {
301 curr_res = LIST_ENTRY(struct virgl_hw_res, curr, head);
302
303 if (!res && (ret = virgl_is_res_compat(qdws, curr_res, size, bind, format) > 0))
304 res = curr_res;
305 else if (os_time_timeout(curr_res->start, curr_res->end, now)) {
306 LIST_DEL(&curr_res->head);
307 virgl_hw_res_destroy(qdws, curr_res);
308 } else
309 break;
310
311 if (ret == -1)
312 break;
313
314 curr = next;
315 next = curr->next;
316 }
317
318 if (!res && ret != -1) {
319 while (curr != &qdws->delayed) {
320 curr_res = LIST_ENTRY(struct virgl_hw_res, curr, head);
321 ret = virgl_is_res_compat(qdws, curr_res, size, bind, format);
322 if (ret > 0) {
323 res = curr_res;
324 break;
325 }
326 if (ret == -1)
327 break;
328 curr = next;
329 next = curr->next;
330 }
331 }
332
333 if (res) {
334 LIST_DEL(&res->head);
335 --qdws->num_delayed;
336 pipe_mutex_unlock(qdws->mutex);
337 pipe_reference_init(&res->reference, 1);
338 return res;
339 }
340
341 pipe_mutex_unlock(qdws->mutex);
342
343 alloc:
344 res = virgl_drm_winsys_resource_create(qws, target, format, bind,
345 width, height, depth, array_size,
346 last_level, nr_samples, size);
347 if (bind == VIRGL_BIND_CONSTANT_BUFFER || bind == VIRGL_BIND_INDEX_BUFFER ||
348 bind == VIRGL_BIND_VERTEX_BUFFER)
349 res->cacheable = TRUE;
350 return res;
351 }
352
353 static struct virgl_hw_res *virgl_drm_winsys_resource_create_handle(struct virgl_winsys *qws,
354 struct winsys_handle *whandle)
355 {
356 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
357 struct drm_gem_open open_arg = {};
358 struct drm_virtgpu_resource_info info_arg = {};
359 struct virgl_hw_res *res;
360
361 pipe_mutex_lock(qdws->bo_handles_mutex);
362
363 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
364 res = util_hash_table_get(qdws->bo_handles, (void*)(uintptr_t)whandle->handle);
365 if (res) {
366 struct virgl_hw_res *r = NULL;
367 virgl_drm_resource_reference(qdws, &r, res);
368 goto done;
369 }
370 }
371
372 res = CALLOC_STRUCT(virgl_hw_res);
373 if (!res)
374 goto done;
375
376 if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
377 int r;
378 uint32_t handle;
379 r = drmPrimeFDToHandle(qdws->fd, whandle->handle, &handle);
380 if (r) {
381 FREE(res);
382 res = NULL;
383 goto done;
384 }
385 res->bo_handle = handle;
386 } else {
387 memset(&open_arg, 0, sizeof(open_arg));
388 open_arg.name = whandle->handle;
389 if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
390 FREE(res);
391 res = NULL;
392 goto done;
393 }
394 res->bo_handle = open_arg.handle;
395 }
396 res->name = whandle->handle;
397
398 memset(&info_arg, 0, sizeof(info_arg));
399 info_arg.bo_handle = res->bo_handle;
400
401 if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &info_arg)) {
402 /* close */
403 FREE(res);
404 res = NULL;
405 goto done;
406 }
407
408 res->res_handle = info_arg.res_handle;
409
410 res->size = info_arg.size;
411 res->stride = info_arg.stride;
412 pipe_reference_init(&res->reference, 1);
413 res->num_cs_references = 0;
414
415 util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)whandle->handle, res);
416
417 done:
418 pipe_mutex_unlock(qdws->bo_handles_mutex);
419 return res;
420 }
421
422 static boolean virgl_drm_winsys_resource_get_handle(struct virgl_winsys *qws,
423 struct virgl_hw_res *res,
424 uint32_t stride,
425 struct winsys_handle *whandle)
426 {
427 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
428 struct drm_gem_flink flink;
429
430 if (!res)
431 return FALSE;
432
433 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
434 if (!res->flinked) {
435 memset(&flink, 0, sizeof(flink));
436 flink.handle = res->bo_handle;
437
438 if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
439 return FALSE;
440 }
441 res->flinked = TRUE;
442 res->flink = flink.name;
443
444 pipe_mutex_lock(qdws->bo_handles_mutex);
445 util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)res->flink, res);
446 pipe_mutex_unlock(qdws->bo_handles_mutex);
447 }
448 whandle->handle = res->flink;
449 } else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
450 whandle->handle = res->bo_handle;
451 } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
452 if (drmPrimeHandleToFD(qdws->fd, res->bo_handle, DRM_CLOEXEC, (int*)&whandle->handle))
453 return FALSE;
454 }
455 whandle->stride = stride;
456 return TRUE;
457 }
458
459 static void virgl_drm_winsys_resource_unref(struct virgl_winsys *qws,
460 struct virgl_hw_res *hres)
461 {
462 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
463
464 virgl_drm_resource_reference(qdws, &hres, NULL);
465 }
466
467 static void *virgl_drm_resource_map(struct virgl_winsys *qws, struct virgl_hw_res *res)
468 {
469 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
470 struct drm_virtgpu_map mmap_arg;
471 void *ptr;
472
473 if (res->ptr)
474 return res->ptr;
475
476 memset(&mmap_arg, 0, sizeof(mmap_arg));
477 mmap_arg.handle = res->bo_handle;
478 if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_MAP, &mmap_arg))
479 return NULL;
480
481 ptr = os_mmap(0, res->size, PROT_READ|PROT_WRITE, MAP_SHARED,
482 qdws->fd, mmap_arg.offset);
483 if (ptr == MAP_FAILED)
484 return NULL;
485
486 res->ptr = ptr;
487 return ptr;
488
489 }
490
491 static void virgl_drm_resource_wait(struct virgl_winsys *qws, struct virgl_hw_res *res)
492 {
493 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
494 struct drm_virtgpu_3d_wait waitcmd;
495 int ret;
496
497 memset(&waitcmd, 0, sizeof(waitcmd));
498 waitcmd.handle = res->bo_handle;
499 again:
500 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
501 if (ret == -EAGAIN)
502 goto again;
503 }
504
505 static struct virgl_cmd_buf *virgl_drm_cmd_buf_create(struct virgl_winsys *qws)
506 {
507 struct virgl_drm_cmd_buf *cbuf;
508
509 cbuf = CALLOC_STRUCT(virgl_drm_cmd_buf);
510 if (!cbuf)
511 return NULL;
512
513 cbuf->ws = qws;
514
515 cbuf->nres = 512;
516 cbuf->res_bo = CALLOC(cbuf->nres, sizeof(struct virgl_hw_buf*));
517 if (!cbuf->res_bo) {
518 FREE(cbuf);
519 return NULL;
520 }
521 cbuf->res_hlist = MALLOC(cbuf->nres * sizeof(uint32_t));
522 if (!cbuf->res_hlist) {
523 FREE(cbuf->res_bo);
524 FREE(cbuf);
525 return NULL;
526 }
527
528 cbuf->base.buf = cbuf->buf;
529 return &cbuf->base;
530 }
531
532 static void virgl_drm_cmd_buf_destroy(struct virgl_cmd_buf *_cbuf)
533 {
534 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
535
536 FREE(cbuf->res_hlist);
537 FREE(cbuf->res_bo);
538 FREE(cbuf);
539
540 }
541
542 static boolean virgl_drm_lookup_res(struct virgl_drm_cmd_buf *cbuf,
543 struct virgl_hw_res *res)
544 {
545 unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
546 int i;
547
548 if (cbuf->is_handle_added[hash]) {
549 i = cbuf->reloc_indices_hashlist[hash];
550 if (cbuf->res_bo[i] == res)
551 return true;
552
553 for (i = 0; i < cbuf->cres; i++) {
554 if (cbuf->res_bo[i] == res) {
555 cbuf->reloc_indices_hashlist[hash] = i;
556 return true;
557 }
558 }
559 }
560 return false;
561 }
562
563 static void virgl_drm_add_res(struct virgl_drm_winsys *qdws,
564 struct virgl_drm_cmd_buf *cbuf, struct virgl_hw_res *res)
565 {
566 unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
567
568 if (cbuf->cres > cbuf->nres) {
569 fprintf(stderr,"failure to add relocation\n");
570 return;
571 }
572
573 cbuf->res_bo[cbuf->cres] = NULL;
574 virgl_drm_resource_reference(qdws, &cbuf->res_bo[cbuf->cres], res);
575 cbuf->res_hlist[cbuf->cres] = res->bo_handle;
576 cbuf->is_handle_added[hash] = TRUE;
577
578 cbuf->reloc_indices_hashlist[hash] = cbuf->cres;
579 p_atomic_inc(&res->num_cs_references);
580 cbuf->cres++;
581 }
582
583 static void virgl_drm_release_all_res(struct virgl_drm_winsys *qdws,
584 struct virgl_drm_cmd_buf *cbuf)
585 {
586 int i;
587
588 for (i = 0; i < cbuf->cres; i++) {
589 p_atomic_dec(&cbuf->res_bo[i]->num_cs_references);
590 virgl_drm_resource_reference(qdws, &cbuf->res_bo[i], NULL);
591 }
592 cbuf->cres = 0;
593 }
594
595 static void virgl_drm_emit_res(struct virgl_winsys *qws,
596 struct virgl_cmd_buf *_cbuf, struct virgl_hw_res *res, boolean write_buf)
597 {
598 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
599 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
600 boolean already_in_list = virgl_drm_lookup_res(cbuf, res);
601
602 if (write_buf)
603 cbuf->base.buf[cbuf->base.cdw++] = res->res_handle;
604
605 if (!already_in_list)
606 virgl_drm_add_res(qdws, cbuf, res);
607 }
608
609 static boolean virgl_drm_res_is_ref(struct virgl_winsys *qws,
610 struct virgl_cmd_buf *_cbuf,
611 struct virgl_hw_res *res)
612 {
613 if (!res->num_cs_references)
614 return FALSE;
615
616 return TRUE;
617 }
618
619 static int virgl_drm_winsys_submit_cmd(struct virgl_winsys *qws, struct virgl_cmd_buf *_cbuf)
620 {
621 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
622 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
623 struct drm_virtgpu_execbuffer eb;
624 int ret;
625
626 if (cbuf->base.cdw == 0)
627 return 0;
628
629 memset(&eb, 0, sizeof(struct drm_virtgpu_execbuffer));
630 eb.command = (unsigned long)(void*)cbuf->buf;
631 eb.size = cbuf->base.cdw * 4;
632 eb.num_bo_handles = cbuf->cres;
633 eb.bo_handles = (unsigned long)(void *)cbuf->res_hlist;
634
635 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &eb);
636 if (ret == -1)
637 fprintf(stderr,"got error from kernel - expect bad rendering %d\n", errno);
638 cbuf->base.cdw = 0;
639
640 virgl_drm_release_all_res(qdws, cbuf);
641
642 memset(cbuf->is_handle_added, 0, sizeof(cbuf->is_handle_added));
643 return ret;
644 }
645
646 static int virgl_drm_get_caps(struct virgl_winsys *vws, struct virgl_drm_caps *caps)
647 {
648 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
649 struct drm_virtgpu_get_caps args;
650
651 memset(&args, 0, sizeof(args));
652
653 args.cap_set_id = 1;
654 args.addr = (unsigned long)&caps->caps;
655 args.size = sizeof(union virgl_caps);
656 return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
657 }
658
659 #define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
660
661 static unsigned handle_hash(void *key)
662 {
663 return PTR_TO_UINT(key);
664 }
665
666 static int handle_compare(void *key1, void *key2)
667 {
668 return PTR_TO_UINT(key1) != PTR_TO_UINT(key2);
669 }
670
671 static struct pipe_fence_handle *
672 virgl_cs_create_fence(struct virgl_winsys *vws)
673 {
674 struct virgl_hw_res *res;
675
676 res = virgl_drm_winsys_resource_cache_create(vws,
677 PIPE_BUFFER,
678 PIPE_FORMAT_R8_UNORM,
679 VIRGL_BIND_CUSTOM,
680 8, 1, 1, 0, 0, 0, 8);
681
682 return (struct pipe_fence_handle *)res;
683 }
684
685 static bool virgl_fence_wait(struct virgl_winsys *vws,
686 struct pipe_fence_handle *fence,
687 uint64_t timeout)
688 {
689 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
690 struct virgl_hw_res *res = virgl_hw_res(fence);
691
692 if (timeout == 0)
693 return virgl_drm_resource_is_busy(vdws, res);
694
695 if (timeout != PIPE_TIMEOUT_INFINITE) {
696 int64_t start_time = os_time_get();
697 timeout /= 1000;
698 while (virgl_drm_resource_is_busy(vdws, res)) {
699 if (os_time_get() - start_time >= timeout)
700 return FALSE;
701 os_time_sleep(10);
702 }
703 return TRUE;
704 }
705 virgl_drm_resource_wait(vws, res);
706 return TRUE;
707 }
708
709 static void virgl_fence_reference(struct virgl_winsys *vws,
710 struct pipe_fence_handle **dst,
711 struct pipe_fence_handle *src)
712 {
713 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
714 virgl_drm_resource_reference(vdws, (struct virgl_hw_res **)dst,
715 virgl_hw_res(src));
716 }
717
718
719 struct virgl_winsys *
720 virgl_drm_winsys_create(int drmFD)
721 {
722 struct virgl_drm_winsys *qdws;
723
724 qdws = CALLOC_STRUCT(virgl_drm_winsys);
725 if (!qdws)
726 return NULL;
727
728 qdws->fd = drmFD;
729 qdws->num_delayed = 0;
730 qdws->usecs = 1000000;
731 LIST_INITHEAD(&qdws->delayed);
732 pipe_mutex_init(qdws->mutex);
733 pipe_mutex_init(qdws->bo_handles_mutex);
734 qdws->bo_handles = util_hash_table_create(handle_hash, handle_compare);
735 qdws->base.destroy = virgl_drm_winsys_destroy;
736
737 qdws->base.transfer_put = virgl_bo_transfer_put;
738 qdws->base.transfer_get = virgl_bo_transfer_get;
739 qdws->base.resource_create = virgl_drm_winsys_resource_cache_create;
740 qdws->base.resource_unref = virgl_drm_winsys_resource_unref;
741 qdws->base.resource_create_from_handle = virgl_drm_winsys_resource_create_handle;
742 qdws->base.resource_get_handle = virgl_drm_winsys_resource_get_handle;
743 qdws->base.resource_map = virgl_drm_resource_map;
744 qdws->base.resource_wait = virgl_drm_resource_wait;
745 qdws->base.cmd_buf_create = virgl_drm_cmd_buf_create;
746 qdws->base.cmd_buf_destroy = virgl_drm_cmd_buf_destroy;
747 qdws->base.submit_cmd = virgl_drm_winsys_submit_cmd;
748 qdws->base.emit_res = virgl_drm_emit_res;
749 qdws->base.res_is_referenced = virgl_drm_res_is_ref;
750
751 qdws->base.cs_create_fence = virgl_cs_create_fence;
752 qdws->base.fence_wait = virgl_fence_wait;
753 qdws->base.fence_reference = virgl_fence_reference;
754
755 qdws->base.get_caps = virgl_drm_get_caps;
756 return &qdws->base;
757
758 }