virgl: remove the _FILE_OFFSET_BITS defines
[mesa.git] / src / gallium / winsys / virgl / drm / virgl_drm_winsys.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "virgl_drm_winsys.h"
24 #include "virgl_drm_public.h"
25 #include "util/u_memory.h"
26 #include "util/u_format.h"
27 #include "state_tracker/drm_driver.h"
28
29 #include "os/os_mman.h"
30 #include "os/os_time.h"
31 #include <sys/ioctl.h>
32 #include <errno.h>
33 #include <xf86drm.h>
34 #include <fcntl.h>
35 #include <stdio.h>
36 #include "virtgpu_drm.h"
37
38 static inline boolean can_cache_resource(struct virgl_hw_res *res)
39 {
40 return res->cacheable == TRUE;
41 }
42
43 static void virgl_hw_res_destroy(struct virgl_drm_winsys *qdws,
44 struct virgl_hw_res *res)
45 {
46 struct drm_gem_close args;
47
48 if (res->name) {
49 pipe_mutex_lock(qdws->bo_handles_mutex);
50 util_hash_table_remove(qdws->bo_handles,
51 (void *)(uintptr_t)res->name);
52 pipe_mutex_unlock(qdws->bo_handles_mutex);
53 }
54
55 if (res->ptr)
56 os_munmap(res->ptr, res->size);
57
58 args.handle = res->bo_handle;
59 drmIoctl(qdws->fd, DRM_IOCTL_GEM_CLOSE, &args);
60 FREE(res);
61 }
62
63 static boolean virgl_drm_resource_is_busy(struct virgl_drm_winsys *qdws, struct virgl_hw_res *res)
64 {
65 struct drm_virtgpu_3d_wait waitcmd;
66 int ret;
67
68 waitcmd.handle = res->bo_handle;
69 waitcmd.flags = VIRTGPU_WAIT_NOWAIT;
70
71 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
72 if (ret && errno == EBUSY)
73 return TRUE;
74 return FALSE;
75 }
76
77 static void
78 virgl_cache_flush(struct virgl_drm_winsys *qdws)
79 {
80 struct list_head *curr, *next;
81 struct virgl_hw_res *res;
82
83 pipe_mutex_lock(qdws->mutex);
84 curr = qdws->delayed.next;
85 next = curr->next;
86
87 while (curr != &qdws->delayed) {
88 res = LIST_ENTRY(struct virgl_hw_res, curr, head);
89 LIST_DEL(&res->head);
90 virgl_hw_res_destroy(qdws, res);
91 curr = next;
92 next = curr->next;
93 }
94 pipe_mutex_unlock(qdws->mutex);
95 }
96 static void
97 virgl_drm_winsys_destroy(struct virgl_winsys *qws)
98 {
99 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
100
101 virgl_cache_flush(qdws);
102
103 util_hash_table_destroy(qdws->bo_handles);
104 pipe_mutex_destroy(qdws->bo_handles_mutex);
105 pipe_mutex_destroy(qdws->mutex);
106
107 FREE(qdws);
108 }
109
110 static void
111 virgl_cache_list_check_free(struct virgl_drm_winsys *qdws)
112 {
113 struct list_head *curr, *next;
114 struct virgl_hw_res *res;
115 int64_t now;
116
117 now = os_time_get();
118 curr = qdws->delayed.next;
119 next = curr->next;
120 while (curr != &qdws->delayed) {
121 res = LIST_ENTRY(struct virgl_hw_res, curr, head);
122 if (!os_time_timeout(res->start, res->end, now))
123 break;
124
125 LIST_DEL(&res->head);
126 virgl_hw_res_destroy(qdws, res);
127 curr = next;
128 next = curr->next;
129 }
130 }
131
132 static void virgl_drm_resource_reference(struct virgl_drm_winsys *qdws,
133 struct virgl_hw_res **dres,
134 struct virgl_hw_res *sres)
135 {
136 struct virgl_hw_res *old = *dres;
137 if (pipe_reference(&(*dres)->reference, &sres->reference)) {
138
139 if (!can_cache_resource(old)) {
140 virgl_hw_res_destroy(qdws, old);
141 } else {
142 pipe_mutex_lock(qdws->mutex);
143 virgl_cache_list_check_free(qdws);
144
145 old->start = os_time_get();
146 old->end = old->start + qdws->usecs;
147 LIST_ADDTAIL(&old->head, &qdws->delayed);
148 qdws->num_delayed++;
149 pipe_mutex_unlock(qdws->mutex);
150 }
151 }
152 *dres = sres;
153 }
154
155 static struct virgl_hw_res *virgl_drm_winsys_resource_create(struct virgl_winsys *qws,
156 enum pipe_texture_target target,
157 uint32_t format,
158 uint32_t bind,
159 uint32_t width,
160 uint32_t height,
161 uint32_t depth,
162 uint32_t array_size,
163 uint32_t last_level,
164 uint32_t nr_samples,
165 uint32_t size)
166 {
167 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
168 struct drm_virtgpu_resource_create createcmd;
169 int ret;
170 struct virgl_hw_res *res;
171 uint32_t stride = width * util_format_get_blocksize(format);
172
173 res = CALLOC_STRUCT(virgl_hw_res);
174 if (!res)
175 return NULL;
176
177 createcmd.target = target;
178 createcmd.format = format;
179 createcmd.bind = bind;
180 createcmd.width = width;
181 createcmd.height = height;
182 createcmd.depth = depth;
183 createcmd.array_size = array_size;
184 createcmd.last_level = last_level;
185 createcmd.nr_samples = nr_samples;
186 createcmd.res_handle = 0;
187 createcmd.stride = stride;
188 createcmd.size = size;
189 createcmd.flags = 0;
190
191 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &createcmd);
192 if (ret != 0) {
193 FREE(res);
194 return NULL;
195 }
196
197 res->bind = bind;
198 res->format = format;
199
200 res->res_handle = createcmd.res_handle;
201 res->bo_handle = createcmd.bo_handle;
202 res->size = size;
203 res->stride = stride;
204 pipe_reference_init(&res->reference, 1);
205 res->num_cs_references = 0;
206 return res;
207 }
208
209 static inline int virgl_is_res_compat(struct virgl_drm_winsys *qdws,
210 struct virgl_hw_res *res,
211 uint32_t size, uint32_t bind, uint32_t format)
212 {
213 if (res->bind != bind)
214 return 0;
215 if (res->format != format)
216 return 0;
217 if (res->size < size)
218 return 0;
219 if (res->size > size * 2)
220 return 0;
221
222 if (virgl_drm_resource_is_busy(qdws, res)) {
223 return -1;
224 }
225
226 return 1;
227 }
228
229 static int
230 virgl_bo_transfer_put(struct virgl_winsys *vws,
231 struct virgl_hw_res *res,
232 const struct pipe_box *box,
233 uint32_t stride, uint32_t layer_stride,
234 uint32_t buf_offset, uint32_t level)
235 {
236 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
237 struct drm_virtgpu_3d_transfer_to_host tohostcmd;
238 int ret;
239
240 tohostcmd.bo_handle = res->bo_handle;
241 tohostcmd.box = *(struct drm_virtgpu_3d_box *)box;
242 tohostcmd.offset = buf_offset;
243 tohostcmd.level = level;
244 // tohostcmd.stride = stride;
245 // tohostcmd.layer_stride = stride;
246 ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &tohostcmd);
247 return ret;
248 }
249
250 static int
251 virgl_bo_transfer_get(struct virgl_winsys *vws,
252 struct virgl_hw_res *res,
253 const struct pipe_box *box,
254 uint32_t stride, uint32_t layer_stride,
255 uint32_t buf_offset, uint32_t level)
256 {
257 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
258 struct drm_virtgpu_3d_transfer_from_host fromhostcmd;
259 int ret;
260
261 fromhostcmd.bo_handle = res->bo_handle;
262 fromhostcmd.level = level;
263 fromhostcmd.offset = buf_offset;
264 // fromhostcmd.stride = stride;
265 // fromhostcmd.layer_stride = layer_stride;
266 fromhostcmd.box = *(struct drm_virtgpu_3d_box *)box;
267 ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &fromhostcmd);
268 return ret;
269 }
270
271 static struct virgl_hw_res *virgl_drm_winsys_resource_cache_create(struct virgl_winsys *qws,
272 enum pipe_texture_target target,
273 uint32_t format,
274 uint32_t bind,
275 uint32_t width,
276 uint32_t height,
277 uint32_t depth,
278 uint32_t array_size,
279 uint32_t last_level,
280 uint32_t nr_samples,
281 uint32_t size)
282 {
283 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
284 struct virgl_hw_res *res, *curr_res;
285 struct list_head *curr, *next;
286 int64_t now;
287 int ret;
288
289 /* only store binds for vertex/index/const buffers */
290 if (bind != VIRGL_BIND_CONSTANT_BUFFER && bind != VIRGL_BIND_INDEX_BUFFER &&
291 bind != VIRGL_BIND_VERTEX_BUFFER && bind != VIRGL_BIND_CUSTOM)
292 goto alloc;
293
294 pipe_mutex_lock(qdws->mutex);
295
296 res = NULL;
297 curr = qdws->delayed.next;
298 next = curr->next;
299
300 now = os_time_get();
301 while (curr != &qdws->delayed) {
302 curr_res = LIST_ENTRY(struct virgl_hw_res, curr, head);
303
304 if (!res && (ret = virgl_is_res_compat(qdws, curr_res, size, bind, format) > 0))
305 res = curr_res;
306 else if (os_time_timeout(curr_res->start, curr_res->end, now)) {
307 LIST_DEL(&curr_res->head);
308 virgl_hw_res_destroy(qdws, curr_res);
309 } else
310 break;
311
312 if (ret == -1)
313 break;
314
315 curr = next;
316 next = curr->next;
317 }
318
319 if (!res && ret != -1) {
320 while (curr != &qdws->delayed) {
321 curr_res = LIST_ENTRY(struct virgl_hw_res, curr, head);
322 ret = virgl_is_res_compat(qdws, curr_res, size, bind, format);
323 if (ret > 0) {
324 res = curr_res;
325 break;
326 }
327 if (ret == -1)
328 break;
329 curr = next;
330 next = curr->next;
331 }
332 }
333
334 if (res) {
335 LIST_DEL(&res->head);
336 --qdws->num_delayed;
337 pipe_mutex_unlock(qdws->mutex);
338 pipe_reference_init(&res->reference, 1);
339 return res;
340 }
341
342 pipe_mutex_unlock(qdws->mutex);
343
344 alloc:
345 res = virgl_drm_winsys_resource_create(qws, target, format, bind,
346 width, height, depth, array_size,
347 last_level, nr_samples, size);
348 if (bind == VIRGL_BIND_CONSTANT_BUFFER || bind == VIRGL_BIND_INDEX_BUFFER ||
349 bind == VIRGL_BIND_VERTEX_BUFFER)
350 res->cacheable = TRUE;
351 return res;
352 }
353
354 static struct virgl_hw_res *virgl_drm_winsys_resource_create_handle(struct virgl_winsys *qws,
355 struct winsys_handle *whandle)
356 {
357 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
358 struct drm_gem_open open_arg = {};
359 struct drm_virtgpu_resource_info info_arg = {};
360 struct virgl_hw_res *res;
361
362 pipe_mutex_lock(qdws->bo_handles_mutex);
363
364 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
365 res = util_hash_table_get(qdws->bo_handles, (void*)(uintptr_t)whandle->handle);
366 if (res) {
367 struct virgl_hw_res *r = NULL;
368 virgl_drm_resource_reference(qdws, &r, res);
369 goto done;
370 }
371 }
372
373 res = CALLOC_STRUCT(virgl_hw_res);
374 if (!res)
375 goto done;
376
377 if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
378 int r;
379 uint32_t handle;
380 r = drmPrimeFDToHandle(qdws->fd, whandle->handle, &handle);
381 if (r) {
382 FREE(res);
383 res = NULL;
384 goto done;
385 }
386 res->bo_handle = handle;
387 } else {
388 memset(&open_arg, 0, sizeof(open_arg));
389 open_arg.name = whandle->handle;
390 if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
391 FREE(res);
392 res = NULL;
393 goto done;
394 }
395 res->bo_handle = open_arg.handle;
396 }
397 res->name = whandle->handle;
398
399 memset(&info_arg, 0, sizeof(info_arg));
400 info_arg.bo_handle = res->bo_handle;
401
402 if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &info_arg)) {
403 /* close */
404 FREE(res);
405 res = NULL;
406 goto done;
407 }
408
409 res->res_handle = info_arg.res_handle;
410
411 res->size = info_arg.size;
412 res->stride = info_arg.stride;
413 pipe_reference_init(&res->reference, 1);
414 res->num_cs_references = 0;
415
416 util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)whandle->handle, res);
417
418 done:
419 pipe_mutex_unlock(qdws->bo_handles_mutex);
420 return res;
421 }
422
423 static boolean virgl_drm_winsys_resource_get_handle(struct virgl_winsys *qws,
424 struct virgl_hw_res *res,
425 uint32_t stride,
426 struct winsys_handle *whandle)
427 {
428 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
429 struct drm_gem_flink flink;
430
431 if (!res)
432 return FALSE;
433 memset(&flink, 0, sizeof(flink));
434
435 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
436 if (!res->flinked) {
437 flink.handle = res->bo_handle;
438
439 if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
440 return FALSE;
441 }
442 res->flinked = TRUE;
443 res->flink = flink.name;
444
445 pipe_mutex_lock(qdws->bo_handles_mutex);
446 util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)res->flink, res);
447 pipe_mutex_unlock(qdws->bo_handles_mutex);
448 }
449 whandle->handle = res->flink;
450 } else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
451 whandle->handle = res->bo_handle;
452 } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
453 if (drmPrimeHandleToFD(qdws->fd, res->bo_handle, DRM_CLOEXEC, (int*)&whandle->handle))
454 return FALSE;
455 }
456 whandle->stride = stride;
457 return TRUE;
458 }
459
460 static void virgl_drm_winsys_resource_unref(struct virgl_winsys *qws,
461 struct virgl_hw_res *hres)
462 {
463 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
464
465 virgl_drm_resource_reference(qdws, &hres, NULL);
466 }
467
468 static void *virgl_drm_resource_map(struct virgl_winsys *qws, struct virgl_hw_res *res)
469 {
470 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
471 struct drm_virtgpu_map mmap_arg;
472 void *ptr;
473
474 if (res->ptr)
475 return res->ptr;
476
477 mmap_arg.handle = res->bo_handle;
478 if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_MAP, &mmap_arg))
479 return NULL;
480
481 ptr = os_mmap(0, res->size, PROT_READ|PROT_WRITE, MAP_SHARED,
482 qdws->fd, mmap_arg.offset);
483 if (ptr == MAP_FAILED)
484 return NULL;
485
486 res->ptr = ptr;
487 return ptr;
488
489 }
490
491 static void virgl_drm_resource_wait(struct virgl_winsys *qws, struct virgl_hw_res *res)
492 {
493 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
494 struct drm_virtgpu_3d_wait waitcmd;
495 int ret;
496
497 waitcmd.handle = res->bo_handle;
498 waitcmd.flags = 0;
499 again:
500 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
501 if (ret == -EAGAIN)
502 goto again;
503 }
504
505 static struct virgl_cmd_buf *virgl_drm_cmd_buf_create(struct virgl_winsys *qws)
506 {
507 struct virgl_drm_cmd_buf *cbuf;
508
509 cbuf = CALLOC_STRUCT(virgl_drm_cmd_buf);
510 if (!cbuf)
511 return NULL;
512
513 cbuf->ws = qws;
514
515 cbuf->nres = 512;
516 cbuf->res_bo = (struct virgl_hw_res **)
517 CALLOC(cbuf->nres, sizeof(struct virgl_hw_buf*));
518 if (!cbuf->res_bo) {
519 FREE(cbuf);
520 return NULL;
521 }
522 cbuf->res_hlist = (uint32_t *)malloc(cbuf->nres * sizeof(uint32_t));
523 if (!cbuf->res_hlist) {
524 FREE(cbuf->res_bo);
525 FREE(cbuf);
526 return NULL;
527 }
528
529 cbuf->base.buf = cbuf->buf;
530 return &cbuf->base;
531 }
532
533 static void virgl_drm_cmd_buf_destroy(struct virgl_cmd_buf *_cbuf)
534 {
535 struct virgl_drm_cmd_buf *cbuf = (struct virgl_drm_cmd_buf *)_cbuf;
536
537 FREE(cbuf->res_hlist);
538 FREE(cbuf->res_bo);
539 FREE(cbuf);
540
541 }
542
543 static boolean virgl_drm_lookup_res(struct virgl_drm_cmd_buf *cbuf,
544 struct virgl_hw_res *res)
545 {
546 unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
547 int i;
548
549 if (cbuf->is_handle_added[hash]) {
550 i = cbuf->reloc_indices_hashlist[hash];
551 if (cbuf->res_bo[i] == res)
552 return true;
553
554 for (i = 0; i < cbuf->cres; i++) {
555 if (cbuf->res_bo[i] == res) {
556 cbuf->reloc_indices_hashlist[hash] = i;
557 return true;
558 }
559 }
560 }
561 return false;
562 }
563
564 static void virgl_drm_add_res(struct virgl_drm_winsys *qdws,
565 struct virgl_drm_cmd_buf *cbuf, struct virgl_hw_res *res)
566 {
567 unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
568
569 if (cbuf->cres > cbuf->nres) {
570 fprintf(stderr,"failure to add relocation\n");
571 return;
572 }
573
574 cbuf->res_bo[cbuf->cres] = NULL;
575 virgl_drm_resource_reference(qdws, &cbuf->res_bo[cbuf->cres], res);
576 cbuf->res_hlist[cbuf->cres] = res->bo_handle;
577 cbuf->is_handle_added[hash] = TRUE;
578
579 cbuf->reloc_indices_hashlist[hash] = cbuf->cres;
580 p_atomic_inc(&res->num_cs_references);
581 cbuf->cres++;
582 }
583
584 static void virgl_drm_release_all_res(struct virgl_drm_winsys *qdws,
585 struct virgl_drm_cmd_buf *cbuf)
586 {
587 int i;
588
589 for (i = 0; i < cbuf->cres; i++) {
590 p_atomic_dec(&cbuf->res_bo[i]->num_cs_references);
591 virgl_drm_resource_reference(qdws, &cbuf->res_bo[i], NULL);
592 }
593 cbuf->cres = 0;
594 }
595
596 static void virgl_drm_emit_res(struct virgl_winsys *qws,
597 struct virgl_cmd_buf *_cbuf, struct virgl_hw_res *res, boolean write_buf)
598 {
599 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
600 struct virgl_drm_cmd_buf *cbuf = (struct virgl_drm_cmd_buf *)_cbuf;
601 boolean already_in_list = virgl_drm_lookup_res(cbuf, res);
602
603 if (write_buf)
604 cbuf->base.buf[cbuf->base.cdw++] = res->res_handle;
605
606 if (!already_in_list)
607 virgl_drm_add_res(qdws, cbuf, res);
608 }
609
610 static boolean virgl_drm_res_is_ref(struct virgl_winsys *qws,
611 struct virgl_cmd_buf *_cbuf,
612 struct virgl_hw_res *res)
613 {
614 if (!res->num_cs_references)
615 return FALSE;
616
617 return TRUE;
618 }
619
620 static int virgl_drm_winsys_submit_cmd(struct virgl_winsys *qws, struct virgl_cmd_buf *_cbuf)
621 {
622 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
623 struct virgl_drm_cmd_buf *cbuf = (struct virgl_drm_cmd_buf *)_cbuf;
624 struct drm_virtgpu_execbuffer eb;
625 int ret;
626
627 if (cbuf->base.cdw == 0)
628 return 0;
629
630 memset(&eb, 0, sizeof(struct drm_virtgpu_execbuffer));
631 eb.command = (unsigned long)(void*)cbuf->buf;
632 eb.size = cbuf->base.cdw * 4;
633 eb.num_bo_handles = cbuf->cres;
634 eb.bo_handles = (unsigned long)(void *)cbuf->res_hlist;
635
636 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &eb);
637 if (ret == -1)
638 fprintf(stderr,"got error from kernel - expect bad rendering %d\n", errno);
639 cbuf->base.cdw = 0;
640
641 virgl_drm_release_all_res(qdws, cbuf);
642
643 memset(cbuf->is_handle_added, 0, sizeof(cbuf->is_handle_added));
644 return ret;
645 }
646
647 static int virgl_drm_get_caps(struct virgl_winsys *vws, struct virgl_drm_caps *caps)
648 {
649 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
650 struct drm_virtgpu_get_caps args;
651 int ret;
652
653 memset(&args, 0, sizeof(args));
654
655 args.cap_set_id = 1;
656 args.addr = (unsigned long)&caps->caps;
657 args.size = sizeof(union virgl_caps);
658 ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
659 return ret;
660 }
661
662 #define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
663
664 static unsigned handle_hash(void *key)
665 {
666 return PTR_TO_UINT(key);
667 }
668
669 static int handle_compare(void *key1, void *key2)
670 {
671 return PTR_TO_UINT(key1) != PTR_TO_UINT(key2);
672 }
673
674 static struct pipe_fence_handle *
675 virgl_cs_create_fence(struct virgl_winsys *vws)
676 {
677 struct virgl_hw_res *res;
678
679 res = virgl_drm_winsys_resource_cache_create(vws,
680 PIPE_BUFFER,
681 PIPE_FORMAT_R8_UNORM,
682 VIRGL_BIND_CUSTOM,
683 8, 1, 1, 0, 0, 0, 8);
684
685 return (struct pipe_fence_handle *)res;
686 }
687
688 static bool virgl_fence_wait(struct virgl_winsys *vws,
689 struct pipe_fence_handle *fence,
690 uint64_t timeout)
691 {
692 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
693 struct virgl_hw_res *res = (struct virgl_hw_res *)fence;
694
695 if (timeout == 0)
696 return virgl_drm_resource_is_busy(vdws, res);
697
698 if (timeout != PIPE_TIMEOUT_INFINITE) {
699 int64_t start_time = os_time_get();
700 timeout /= 1000;
701 while (virgl_drm_resource_is_busy(vdws, res)) {
702 if (os_time_get() - start_time >= timeout)
703 return FALSE;
704 os_time_sleep(10);
705 }
706 return TRUE;
707 }
708 virgl_drm_resource_wait(vws, res);
709 return TRUE;
710 }
711
712 static void virgl_fence_reference(struct virgl_winsys *vws,
713 struct pipe_fence_handle **dst,
714 struct pipe_fence_handle *src)
715 {
716 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
717 virgl_drm_resource_reference(vdws, (struct virgl_hw_res **)dst,
718 (struct virgl_hw_res *)src);
719 }
720
721
722 struct virgl_winsys *
723 virgl_drm_winsys_create(int drmFD)
724 {
725 struct virgl_drm_winsys *qdws;
726
727 qdws = CALLOC_STRUCT(virgl_drm_winsys);
728 if (!qdws)
729 return NULL;
730
731 qdws->fd = drmFD;
732 qdws->num_delayed = 0;
733 qdws->usecs = 1000000;
734 LIST_INITHEAD(&qdws->delayed);
735 pipe_mutex_init(qdws->mutex);
736 pipe_mutex_init(qdws->bo_handles_mutex);
737 qdws->bo_handles = util_hash_table_create(handle_hash, handle_compare);
738 qdws->base.destroy = virgl_drm_winsys_destroy;
739
740 qdws->base.transfer_put = virgl_bo_transfer_put;
741 qdws->base.transfer_get = virgl_bo_transfer_get;
742 qdws->base.resource_create = virgl_drm_winsys_resource_cache_create;
743 qdws->base.resource_unref = virgl_drm_winsys_resource_unref;
744 qdws->base.resource_create_from_handle = virgl_drm_winsys_resource_create_handle;
745 qdws->base.resource_get_handle = virgl_drm_winsys_resource_get_handle;
746 qdws->base.resource_map = virgl_drm_resource_map;
747 qdws->base.resource_wait = virgl_drm_resource_wait;
748 qdws->base.cmd_buf_create = virgl_drm_cmd_buf_create;
749 qdws->base.cmd_buf_destroy = virgl_drm_cmd_buf_destroy;
750 qdws->base.submit_cmd = virgl_drm_winsys_submit_cmd;
751 qdws->base.emit_res = virgl_drm_emit_res;
752 qdws->base.res_is_referenced = virgl_drm_res_is_ref;
753
754 qdws->base.cs_create_fence = virgl_cs_create_fence;
755 qdws->base.fence_wait = virgl_fence_wait;
756 qdws->base.fence_reference = virgl_fence_reference;
757
758 qdws->base.get_caps = virgl_drm_get_caps;
759 return &qdws->base;
760
761 }