4cefbe92077aa7feed3aff52d065fbff26181767
[mesa.git] / src / gallium / winsys / virgl / drm / virgl_drm_winsys.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <errno.h>
25 #include <fcntl.h>
26 #include <limits.h>
27 #include <stdio.h>
28 #include <sys/ioctl.h>
29 #include <sys/stat.h>
30
31 #include "os/os_mman.h"
32 #include "util/os_time.h"
33 #include "util/u_memory.h"
34 #include "util/u_format.h"
35 #include "util/u_hash_table.h"
36 #include "util/u_inlines.h"
37 #include "state_tracker/drm_driver.h"
38 #include "virgl/virgl_screen.h"
39 #include "virgl/virgl_public.h"
40
41 #include <xf86drm.h>
42 #include <libsync.h>
43 #include "virtgpu_drm.h"
44
45 #include "virgl_drm_winsys.h"
46 #include "virgl_drm_public.h"
47
48
49 #define VIRGL_DRM_VERSION(major, minor) ((major) << 16 | (minor))
50 #define VIRGL_DRM_VERSION_FENCE_FD VIRGL_DRM_VERSION(0, 1)
51
52
53 static inline boolean can_cache_resource(struct virgl_hw_res *res)
54 {
55 return res->cacheable == TRUE;
56 }
57
58 static void virgl_hw_res_destroy(struct virgl_drm_winsys *qdws,
59 struct virgl_hw_res *res)
60 {
61 struct drm_gem_close args;
62
63 if (res->flinked) {
64 mtx_lock(&qdws->bo_handles_mutex);
65 util_hash_table_remove(qdws->bo_names,
66 (void *)(uintptr_t)res->flink);
67 mtx_unlock(&qdws->bo_handles_mutex);
68 }
69
70 if (res->bo_handle) {
71 mtx_lock(&qdws->bo_handles_mutex);
72 util_hash_table_remove(qdws->bo_handles,
73 (void *)(uintptr_t)res->bo_handle);
74 mtx_unlock(&qdws->bo_handles_mutex);
75 }
76
77 if (res->ptr)
78 os_munmap(res->ptr, res->size);
79
80 memset(&args, 0, sizeof(args));
81 args.handle = res->bo_handle;
82 drmIoctl(qdws->fd, DRM_IOCTL_GEM_CLOSE, &args);
83 FREE(res);
84 }
85
86 static boolean virgl_drm_resource_is_busy(struct virgl_drm_winsys *qdws,
87 struct virgl_hw_res *res)
88 {
89 struct drm_virtgpu_3d_wait waitcmd;
90 int ret;
91
92 memset(&waitcmd, 0, sizeof(waitcmd));
93 waitcmd.handle = res->bo_handle;
94 waitcmd.flags = VIRTGPU_WAIT_NOWAIT;
95
96 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
97 if (ret && errno == EBUSY)
98 return TRUE;
99 return FALSE;
100 }
101
102 static void
103 virgl_cache_flush(struct virgl_drm_winsys *qdws)
104 {
105 struct list_head *curr, *next;
106 struct virgl_hw_res *res;
107
108 mtx_lock(&qdws->mutex);
109 curr = qdws->delayed.next;
110 next = curr->next;
111
112 while (curr != &qdws->delayed) {
113 res = LIST_ENTRY(struct virgl_hw_res, curr, head);
114 LIST_DEL(&res->head);
115 virgl_hw_res_destroy(qdws, res);
116 curr = next;
117 next = curr->next;
118 }
119 mtx_unlock(&qdws->mutex);
120 }
121 static void
122 virgl_drm_winsys_destroy(struct virgl_winsys *qws)
123 {
124 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
125
126 virgl_cache_flush(qdws);
127
128 util_hash_table_destroy(qdws->bo_handles);
129 util_hash_table_destroy(qdws->bo_names);
130 mtx_destroy(&qdws->bo_handles_mutex);
131 mtx_destroy(&qdws->mutex);
132
133 FREE(qdws);
134 }
135
136 static void
137 virgl_cache_list_check_free(struct virgl_drm_winsys *qdws)
138 {
139 struct list_head *curr, *next;
140 struct virgl_hw_res *res;
141 int64_t now;
142
143 now = os_time_get();
144 curr = qdws->delayed.next;
145 next = curr->next;
146 while (curr != &qdws->delayed) {
147 res = LIST_ENTRY(struct virgl_hw_res, curr, head);
148 if (!os_time_timeout(res->start, res->end, now))
149 break;
150
151 LIST_DEL(&res->head);
152 virgl_hw_res_destroy(qdws, res);
153 curr = next;
154 next = curr->next;
155 }
156 }
157
158 static void virgl_drm_resource_reference(struct virgl_drm_winsys *qdws,
159 struct virgl_hw_res **dres,
160 struct virgl_hw_res *sres)
161 {
162 struct virgl_hw_res *old = *dres;
163 if (pipe_reference(&(*dres)->reference, &sres->reference)) {
164
165 if (!can_cache_resource(old)) {
166 virgl_hw_res_destroy(qdws, old);
167 } else {
168 mtx_lock(&qdws->mutex);
169 virgl_cache_list_check_free(qdws);
170
171 old->start = os_time_get();
172 old->end = old->start + qdws->usecs;
173 LIST_ADDTAIL(&old->head, &qdws->delayed);
174 qdws->num_delayed++;
175 mtx_unlock(&qdws->mutex);
176 }
177 }
178 *dres = sres;
179 }
180
181 static struct virgl_hw_res *
182 virgl_drm_winsys_resource_create(struct virgl_winsys *qws,
183 enum pipe_texture_target target,
184 uint32_t format,
185 uint32_t bind,
186 uint32_t width,
187 uint32_t height,
188 uint32_t depth,
189 uint32_t array_size,
190 uint32_t last_level,
191 uint32_t nr_samples,
192 uint32_t size)
193 {
194 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
195 struct drm_virtgpu_resource_create createcmd;
196 int ret;
197 struct virgl_hw_res *res;
198 uint32_t stride = width * util_format_get_blocksize(format);
199
200 res = CALLOC_STRUCT(virgl_hw_res);
201 if (!res)
202 return NULL;
203
204 memset(&createcmd, 0, sizeof(createcmd));
205 createcmd.target = target;
206 createcmd.format = format;
207 createcmd.bind = bind;
208 createcmd.width = width;
209 createcmd.height = height;
210 createcmd.depth = depth;
211 createcmd.array_size = array_size;
212 createcmd.last_level = last_level;
213 createcmd.nr_samples = nr_samples;
214 createcmd.stride = stride;
215 createcmd.size = size;
216
217 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &createcmd);
218 if (ret != 0) {
219 FREE(res);
220 return NULL;
221 }
222
223 res->bind = bind;
224 res->format = format;
225
226 res->res_handle = createcmd.res_handle;
227 res->bo_handle = createcmd.bo_handle;
228 res->size = size;
229 res->stride = stride;
230 pipe_reference_init(&res->reference, 1);
231 p_atomic_set(&res->num_cs_references, 0);
232 return res;
233 }
234
235 static inline int virgl_is_res_compat(struct virgl_drm_winsys *qdws,
236 struct virgl_hw_res *res,
237 uint32_t size, uint32_t bind,
238 uint32_t format)
239 {
240 if (res->bind != bind)
241 return 0;
242 if (res->format != format)
243 return 0;
244 if (res->size < size)
245 return 0;
246 if (res->size > size * 2)
247 return 0;
248
249 if (virgl_drm_resource_is_busy(qdws, res)) {
250 return -1;
251 }
252
253 return 1;
254 }
255
256 static int
257 virgl_bo_transfer_put(struct virgl_winsys *vws,
258 struct virgl_hw_res *res,
259 const struct pipe_box *box,
260 uint32_t stride, uint32_t layer_stride,
261 uint32_t buf_offset, uint32_t level)
262 {
263 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
264 struct drm_virtgpu_3d_transfer_to_host tohostcmd;
265
266 memset(&tohostcmd, 0, sizeof(tohostcmd));
267 tohostcmd.bo_handle = res->bo_handle;
268 tohostcmd.box.x = box->x;
269 tohostcmd.box.y = box->y;
270 tohostcmd.box.z = box->z;
271 tohostcmd.box.w = box->width;
272 tohostcmd.box.h = box->height;
273 tohostcmd.box.d = box->depth;
274 tohostcmd.offset = buf_offset;
275 tohostcmd.level = level;
276 // tohostcmd.stride = stride;
277 // tohostcmd.layer_stride = stride;
278 return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &tohostcmd);
279 }
280
281 static int
282 virgl_bo_transfer_get(struct virgl_winsys *vws,
283 struct virgl_hw_res *res,
284 const struct pipe_box *box,
285 uint32_t stride, uint32_t layer_stride,
286 uint32_t buf_offset, uint32_t level)
287 {
288 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
289 struct drm_virtgpu_3d_transfer_from_host fromhostcmd;
290
291 memset(&fromhostcmd, 0, sizeof(fromhostcmd));
292 fromhostcmd.bo_handle = res->bo_handle;
293 fromhostcmd.level = level;
294 fromhostcmd.offset = buf_offset;
295 // fromhostcmd.stride = stride;
296 // fromhostcmd.layer_stride = layer_stride;
297 fromhostcmd.box.x = box->x;
298 fromhostcmd.box.y = box->y;
299 fromhostcmd.box.z = box->z;
300 fromhostcmd.box.w = box->width;
301 fromhostcmd.box.h = box->height;
302 fromhostcmd.box.d = box->depth;
303 return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &fromhostcmd);
304 }
305
306 static struct virgl_hw_res *
307 virgl_drm_winsys_resource_cache_create(struct virgl_winsys *qws,
308 enum pipe_texture_target target,
309 uint32_t format,
310 uint32_t bind,
311 uint32_t width,
312 uint32_t height,
313 uint32_t depth,
314 uint32_t array_size,
315 uint32_t last_level,
316 uint32_t nr_samples,
317 uint32_t size)
318 {
319 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
320 struct virgl_hw_res *res, *curr_res;
321 struct list_head *curr, *next;
322 int64_t now;
323 int ret = 0;
324
325 /* only store binds for vertex/index/const buffers */
326 if (bind != VIRGL_BIND_CONSTANT_BUFFER && bind != VIRGL_BIND_INDEX_BUFFER &&
327 bind != VIRGL_BIND_VERTEX_BUFFER && bind != VIRGL_BIND_CUSTOM)
328 goto alloc;
329
330 mtx_lock(&qdws->mutex);
331
332 res = NULL;
333 curr = qdws->delayed.next;
334 next = curr->next;
335
336 now = os_time_get();
337 while (curr != &qdws->delayed) {
338 curr_res = LIST_ENTRY(struct virgl_hw_res, curr, head);
339
340 if (!res && ((ret = virgl_is_res_compat(qdws, curr_res, size, bind, format)) > 0))
341 res = curr_res;
342 else if (os_time_timeout(curr_res->start, curr_res->end, now)) {
343 LIST_DEL(&curr_res->head);
344 virgl_hw_res_destroy(qdws, curr_res);
345 } else
346 break;
347
348 if (ret == -1)
349 break;
350
351 curr = next;
352 next = curr->next;
353 }
354
355 if (!res && ret != -1) {
356 while (curr != &qdws->delayed) {
357 curr_res = LIST_ENTRY(struct virgl_hw_res, curr, head);
358 ret = virgl_is_res_compat(qdws, curr_res, size, bind, format);
359 if (ret > 0) {
360 res = curr_res;
361 break;
362 }
363 if (ret == -1)
364 break;
365 curr = next;
366 next = curr->next;
367 }
368 }
369
370 if (res) {
371 LIST_DEL(&res->head);
372 --qdws->num_delayed;
373 mtx_unlock(&qdws->mutex);
374 pipe_reference_init(&res->reference, 1);
375 return res;
376 }
377
378 mtx_unlock(&qdws->mutex);
379
380 alloc:
381 res = virgl_drm_winsys_resource_create(qws, target, format, bind,
382 width, height, depth, array_size,
383 last_level, nr_samples, size);
384 if (bind == VIRGL_BIND_CONSTANT_BUFFER || bind == VIRGL_BIND_INDEX_BUFFER ||
385 bind == VIRGL_BIND_VERTEX_BUFFER)
386 res->cacheable = TRUE;
387 return res;
388 }
389
390 static struct virgl_hw_res *
391 virgl_drm_winsys_resource_create_handle(struct virgl_winsys *qws,
392 struct winsys_handle *whandle)
393 {
394 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
395 struct drm_gem_open open_arg = {};
396 struct drm_virtgpu_resource_info info_arg = {};
397 struct virgl_hw_res *res;
398 uint32_t handle = whandle->handle;
399
400 if (whandle->offset != 0) {
401 fprintf(stderr, "attempt to import unsupported winsys offset %u\n",
402 whandle->offset);
403 return NULL;
404 }
405
406 mtx_lock(&qdws->bo_handles_mutex);
407
408 if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
409 res = util_hash_table_get(qdws->bo_names, (void*)(uintptr_t)handle);
410 if (res) {
411 struct virgl_hw_res *r = NULL;
412 virgl_drm_resource_reference(qdws, &r, res);
413 goto done;
414 }
415 }
416
417 if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
418 int r;
419 r = drmPrimeFDToHandle(qdws->fd, whandle->handle, &handle);
420 if (r) {
421 res = NULL;
422 goto done;
423 }
424 }
425
426 res = util_hash_table_get(qdws->bo_handles, (void*)(uintptr_t)handle);
427 if (res) {
428 struct virgl_hw_res *r = NULL;
429 virgl_drm_resource_reference(qdws, &r, res);
430 goto done;
431 }
432
433 res = CALLOC_STRUCT(virgl_hw_res);
434 if (!res)
435 goto done;
436
437 if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
438 res->bo_handle = handle;
439 } else {
440 memset(&open_arg, 0, sizeof(open_arg));
441 open_arg.name = whandle->handle;
442 if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
443 FREE(res);
444 res = NULL;
445 goto done;
446 }
447 res->bo_handle = open_arg.handle;
448 }
449 res->name = handle;
450
451 memset(&info_arg, 0, sizeof(info_arg));
452 info_arg.bo_handle = res->bo_handle;
453
454 if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &info_arg)) {
455 /* close */
456 FREE(res);
457 res = NULL;
458 goto done;
459 }
460
461 res->res_handle = info_arg.res_handle;
462
463 res->size = info_arg.size;
464 res->stride = info_arg.stride;
465 pipe_reference_init(&res->reference, 1);
466 res->num_cs_references = 0;
467
468 util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)handle, res);
469
470 done:
471 mtx_unlock(&qdws->bo_handles_mutex);
472 return res;
473 }
474
475 static boolean virgl_drm_winsys_resource_get_handle(struct virgl_winsys *qws,
476 struct virgl_hw_res *res,
477 uint32_t stride,
478 struct winsys_handle *whandle)
479 {
480 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
481 struct drm_gem_flink flink;
482
483 if (!res)
484 return FALSE;
485
486 if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
487 if (!res->flinked) {
488 memset(&flink, 0, sizeof(flink));
489 flink.handle = res->bo_handle;
490
491 if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
492 return FALSE;
493 }
494 res->flinked = TRUE;
495 res->flink = flink.name;
496
497 mtx_lock(&qdws->bo_handles_mutex);
498 util_hash_table_set(qdws->bo_names, (void *)(uintptr_t)res->flink, res);
499 mtx_unlock(&qdws->bo_handles_mutex);
500 }
501 whandle->handle = res->flink;
502 } else if (whandle->type == WINSYS_HANDLE_TYPE_KMS) {
503 whandle->handle = res->bo_handle;
504 } else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
505 if (drmPrimeHandleToFD(qdws->fd, res->bo_handle, DRM_CLOEXEC, (int*)&whandle->handle))
506 return FALSE;
507 mtx_lock(&qdws->bo_handles_mutex);
508 util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)res->bo_handle, res);
509 mtx_unlock(&qdws->bo_handles_mutex);
510 }
511 whandle->stride = stride;
512 return TRUE;
513 }
514
515 static void virgl_drm_winsys_resource_unref(struct virgl_winsys *qws,
516 struct virgl_hw_res *hres)
517 {
518 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
519
520 virgl_drm_resource_reference(qdws, &hres, NULL);
521 }
522
523 static void *virgl_drm_resource_map(struct virgl_winsys *qws,
524 struct virgl_hw_res *res)
525 {
526 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
527 struct drm_virtgpu_map mmap_arg;
528 void *ptr;
529
530 if (res->ptr)
531 return res->ptr;
532
533 memset(&mmap_arg, 0, sizeof(mmap_arg));
534 mmap_arg.handle = res->bo_handle;
535 if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_MAP, &mmap_arg))
536 return NULL;
537
538 ptr = os_mmap(0, res->size, PROT_READ|PROT_WRITE, MAP_SHARED,
539 qdws->fd, mmap_arg.offset);
540 if (ptr == MAP_FAILED)
541 return NULL;
542
543 res->ptr = ptr;
544 return ptr;
545
546 }
547
548 static void virgl_drm_resource_wait(struct virgl_winsys *qws,
549 struct virgl_hw_res *res)
550 {
551 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
552 struct drm_virtgpu_3d_wait waitcmd;
553 int ret;
554
555 memset(&waitcmd, 0, sizeof(waitcmd));
556 waitcmd.handle = res->bo_handle;
557 again:
558 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
559 if (ret == -EAGAIN)
560 goto again;
561 }
562
563 static boolean virgl_drm_lookup_res(struct virgl_drm_cmd_buf *cbuf,
564 struct virgl_hw_res *res)
565 {
566 unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
567 int i;
568
569 if (cbuf->is_handle_added[hash]) {
570 i = cbuf->reloc_indices_hashlist[hash];
571 if (cbuf->res_bo[i] == res)
572 return true;
573
574 for (i = 0; i < cbuf->cres; i++) {
575 if (cbuf->res_bo[i] == res) {
576 cbuf->reloc_indices_hashlist[hash] = i;
577 return true;
578 }
579 }
580 }
581 return false;
582 }
583
584 static void virgl_drm_add_res(struct virgl_drm_winsys *qdws,
585 struct virgl_drm_cmd_buf *cbuf,
586 struct virgl_hw_res *res)
587 {
588 unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
589
590 if (cbuf->cres >= cbuf->nres) {
591 unsigned new_nres = cbuf->nres + 256;
592 void *new_ptr = REALLOC(cbuf->res_bo,
593 cbuf->nres * sizeof(struct virgl_hw_buf*),
594 new_nres * sizeof(struct virgl_hw_buf*));
595 if (!new_ptr) {
596 fprintf(stderr,"failure to add relocation %d, %d\n", cbuf->cres, new_nres);
597 return;
598 }
599 cbuf->res_bo = new_ptr;
600
601 new_ptr = REALLOC(cbuf->res_hlist,
602 cbuf->nres * sizeof(uint32_t),
603 new_nres * sizeof(uint32_t));
604 if (!new_ptr) {
605 fprintf(stderr,"failure to add hlist relocation %d, %d\n", cbuf->cres, cbuf->nres);
606 return;
607 }
608 cbuf->res_hlist = new_ptr;
609 cbuf->nres = new_nres;
610 }
611
612 cbuf->res_bo[cbuf->cres] = NULL;
613 virgl_drm_resource_reference(qdws, &cbuf->res_bo[cbuf->cres], res);
614 cbuf->res_hlist[cbuf->cres] = res->bo_handle;
615 cbuf->is_handle_added[hash] = TRUE;
616
617 cbuf->reloc_indices_hashlist[hash] = cbuf->cres;
618 p_atomic_inc(&res->num_cs_references);
619 cbuf->cres++;
620 }
621
622 static void virgl_drm_release_all_res(struct virgl_drm_winsys *qdws,
623 struct virgl_drm_cmd_buf *cbuf)
624 {
625 int i;
626
627 for (i = 0; i < cbuf->cres; i++) {
628 p_atomic_dec(&cbuf->res_bo[i]->num_cs_references);
629 virgl_drm_resource_reference(qdws, &cbuf->res_bo[i], NULL);
630 }
631 cbuf->cres = 0;
632 }
633
634 static void virgl_drm_emit_res(struct virgl_winsys *qws,
635 struct virgl_cmd_buf *_cbuf,
636 struct virgl_hw_res *res, boolean write_buf)
637 {
638 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
639 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
640 boolean already_in_list = virgl_drm_lookup_res(cbuf, res);
641
642 if (write_buf)
643 cbuf->base.buf[cbuf->base.cdw++] = res->res_handle;
644
645 if (!already_in_list)
646 virgl_drm_add_res(qdws, cbuf, res);
647 }
648
649 static boolean virgl_drm_res_is_ref(struct virgl_winsys *qws,
650 struct virgl_cmd_buf *_cbuf,
651 struct virgl_hw_res *res)
652 {
653 if (!p_atomic_read(&res->num_cs_references))
654 return FALSE;
655
656 return TRUE;
657 }
658
659 static struct virgl_cmd_buf *virgl_drm_cmd_buf_create(struct virgl_winsys *qws,
660 uint32_t size)
661 {
662 struct virgl_drm_cmd_buf *cbuf;
663
664 cbuf = CALLOC_STRUCT(virgl_drm_cmd_buf);
665 if (!cbuf)
666 return NULL;
667
668 cbuf->ws = qws;
669
670 cbuf->nres = 512;
671 cbuf->res_bo = CALLOC(cbuf->nres, sizeof(struct virgl_hw_buf*));
672 if (!cbuf->res_bo) {
673 FREE(cbuf);
674 return NULL;
675 }
676 cbuf->res_hlist = MALLOC(cbuf->nres * sizeof(uint32_t));
677 if (!cbuf->res_hlist) {
678 FREE(cbuf->res_bo);
679 FREE(cbuf);
680 return NULL;
681 }
682
683 cbuf->buf = CALLOC(size, sizeof(uint32_t));
684 if (!cbuf->buf) {
685 FREE(cbuf->res_hlist);
686 FREE(cbuf->res_bo);
687 FREE(cbuf);
688 return NULL;
689 }
690
691 cbuf->in_fence_fd = -1;
692 cbuf->base.buf = cbuf->buf;
693 return &cbuf->base;
694 }
695
696 static void virgl_drm_cmd_buf_destroy(struct virgl_cmd_buf *_cbuf)
697 {
698 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
699
700 virgl_drm_release_all_res(virgl_drm_winsys(cbuf->ws), cbuf);
701 FREE(cbuf->res_hlist);
702 FREE(cbuf->res_bo);
703 FREE(cbuf->buf);
704 FREE(cbuf);
705
706 }
707
708 static struct pipe_fence_handle *
709 virgl_drm_fence_create(struct virgl_winsys *vws, int fd, bool external)
710 {
711 struct virgl_drm_fence *fence;
712
713 assert(vws->supports_fences);
714
715 if (external) {
716 fd = dup(fd);
717 if (fd < 0)
718 return NULL;
719 }
720
721 fence = CALLOC_STRUCT(virgl_drm_fence);
722 if (!fence) {
723 close(fd);
724 return NULL;
725 }
726
727 fence->fd = fd;
728 fence->external = external;
729
730 pipe_reference_init(&fence->reference, 1);
731
732 return (struct pipe_fence_handle *)fence;
733 }
734
735 static struct pipe_fence_handle *
736 virgl_drm_fence_create_legacy(struct virgl_winsys *vws)
737 {
738 struct virgl_drm_fence *fence;
739
740 assert(!vws->supports_fences);
741
742 fence = CALLOC_STRUCT(virgl_drm_fence);
743 if (!fence)
744 return NULL;
745 fence->fd = -1;
746
747 fence->hw_res = virgl_drm_winsys_resource_cache_create(vws, PIPE_BUFFER,
748 PIPE_FORMAT_R8_UNORM, VIRGL_BIND_CUSTOM, 8, 1, 1, 0, 0, 0, 8);
749 if (!fence->hw_res) {
750 FREE(fence);
751 return NULL;
752 }
753
754 pipe_reference_init(&fence->reference, 1);
755
756 return (struct pipe_fence_handle *)fence;
757 }
758
759 static int virgl_drm_winsys_submit_cmd(struct virgl_winsys *qws,
760 struct virgl_cmd_buf *_cbuf,
761 struct pipe_fence_handle **fence)
762 {
763 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
764 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
765 struct drm_virtgpu_execbuffer eb;
766 int ret;
767
768 if (cbuf->base.cdw == 0)
769 return 0;
770
771 memset(&eb, 0, sizeof(struct drm_virtgpu_execbuffer));
772 eb.command = (unsigned long)(void*)cbuf->buf;
773 eb.size = cbuf->base.cdw * 4;
774 eb.num_bo_handles = cbuf->cres;
775 eb.bo_handles = (unsigned long)(void *)cbuf->res_hlist;
776
777 eb.fence_fd = -1;
778 if (qws->supports_fences) {
779 if (cbuf->in_fence_fd >= 0) {
780 eb.flags |= VIRTGPU_EXECBUF_FENCE_FD_IN;
781 eb.fence_fd = cbuf->in_fence_fd;
782 }
783
784 if (fence != NULL)
785 eb.flags |= VIRTGPU_EXECBUF_FENCE_FD_OUT;
786 } else {
787 assert(cbuf->in_fence_fd < 0);
788 }
789
790 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &eb);
791 if (ret == -1)
792 fprintf(stderr,"got error from kernel - expect bad rendering %d\n", errno);
793 cbuf->base.cdw = 0;
794
795 if (qws->supports_fences) {
796 if (cbuf->in_fence_fd >= 0) {
797 close(cbuf->in_fence_fd);
798 cbuf->in_fence_fd = -1;
799 }
800
801 if (fence != NULL && ret == 0)
802 *fence = virgl_drm_fence_create(qws, eb.fence_fd, false);
803 } else {
804 if (fence != NULL && ret == 0)
805 *fence = virgl_drm_fence_create_legacy(qws);
806 }
807
808 virgl_drm_release_all_res(qdws, cbuf);
809
810 memset(cbuf->is_handle_added, 0, sizeof(cbuf->is_handle_added));
811 return ret;
812 }
813
814 static int virgl_drm_get_caps(struct virgl_winsys *vws,
815 struct virgl_drm_caps *caps)
816 {
817 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
818 struct drm_virtgpu_get_caps args;
819 int ret;
820
821 virgl_ws_fill_new_caps_defaults(caps);
822
823 memset(&args, 0, sizeof(args));
824 if (vdws->has_capset_query_fix) {
825 /* if we have the query fix - try and get cap set id 2 first */
826 args.cap_set_id = 2;
827 args.size = sizeof(union virgl_caps);
828 } else {
829 args.cap_set_id = 1;
830 args.size = sizeof(struct virgl_caps_v1);
831 }
832 args.addr = (unsigned long)&caps->caps;
833
834 ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
835 if (ret == -1 && errno == EINVAL) {
836 /* Fallback to v1 */
837 args.cap_set_id = 1;
838 args.size = sizeof(struct virgl_caps_v1);
839 ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
840 if (ret == -1)
841 return ret;
842 }
843 return ret;
844 }
845
846 #define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
847
848 static unsigned handle_hash(void *key)
849 {
850 return PTR_TO_UINT(key);
851 }
852
853 static int handle_compare(void *key1, void *key2)
854 {
855 return PTR_TO_UINT(key1) != PTR_TO_UINT(key2);
856 }
857
858 static struct pipe_fence_handle *
859 virgl_cs_create_fence(struct virgl_winsys *vws, int fd)
860 {
861 if (!vws->supports_fences)
862 return NULL;
863
864 return virgl_drm_fence_create(vws, fd, true);
865 }
866
867 static bool virgl_fence_wait(struct virgl_winsys *vws,
868 struct pipe_fence_handle *_fence,
869 uint64_t timeout)
870 {
871 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
872 struct virgl_drm_fence *fence = virgl_drm_fence(_fence);
873
874 if (vws->supports_fences) {
875 uint64_t timeout_ms;
876 int timeout_poll;
877
878 if (timeout == 0)
879 return sync_wait(fence->fd, 0) == 0;
880
881 timeout_ms = timeout / 1000000;
882 /* round up */
883 if (timeout_ms * 1000000 < timeout)
884 timeout_ms++;
885
886 timeout_poll = timeout_ms <= INT_MAX ? (int) timeout_ms : -1;
887
888 return sync_wait(fence->fd, timeout_poll) == 0;
889 }
890
891 if (timeout == 0)
892 return !virgl_drm_resource_is_busy(vdws, fence->hw_res);
893
894 if (timeout != PIPE_TIMEOUT_INFINITE) {
895 int64_t start_time = os_time_get();
896 timeout /= 1000;
897 while (virgl_drm_resource_is_busy(vdws, fence->hw_res)) {
898 if (os_time_get() - start_time >= timeout)
899 return FALSE;
900 os_time_sleep(10);
901 }
902 return TRUE;
903 }
904 virgl_drm_resource_wait(vws, fence->hw_res);
905
906 return TRUE;
907 }
908
909 static void virgl_fence_reference(struct virgl_winsys *vws,
910 struct pipe_fence_handle **dst,
911 struct pipe_fence_handle *src)
912 {
913 struct virgl_drm_fence *dfence = virgl_drm_fence(*dst);
914 struct virgl_drm_fence *sfence = virgl_drm_fence(src);
915
916 if (pipe_reference(&dfence->reference, &sfence->reference)) {
917 if (vws->supports_fences) {
918 close(dfence->fd);
919 } else {
920 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
921 virgl_hw_res_destroy(vdws, dfence->hw_res);
922 }
923 FREE(dfence);
924 }
925
926 *dst = src;
927 }
928
929 static void virgl_fence_server_sync(struct virgl_winsys *vws,
930 struct virgl_cmd_buf *_cbuf,
931 struct pipe_fence_handle *_fence)
932 {
933 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
934 struct virgl_drm_fence *fence = virgl_drm_fence(_fence);
935
936 if (!vws->supports_fences)
937 return;
938
939 /* if not an external fence, then nothing more to do without preemption: */
940 if (!fence->external)
941 return;
942
943 sync_accumulate("virgl", &cbuf->in_fence_fd, fence->fd);
944 }
945
946 static int virgl_fence_get_fd(struct virgl_winsys *vws,
947 struct pipe_fence_handle *_fence)
948 {
949 struct virgl_drm_fence *fence = virgl_drm_fence(_fence);
950
951 if (!vws->supports_fences)
952 return -1;
953
954 return dup(fence->fd);
955 }
956
957 static int virgl_drm_get_version(int fd)
958 {
959 int ret;
960 drmVersionPtr version;
961
962 version = drmGetVersion(fd);
963
964 if (!version)
965 ret = -EFAULT;
966 else if (version->version_major != 0)
967 ret = -EINVAL;
968 else
969 ret = VIRGL_DRM_VERSION(0, version->version_minor);
970
971 drmFreeVersion(version);
972
973 return ret;
974 }
975
976 static struct virgl_winsys *
977 virgl_drm_winsys_create(int drmFD)
978 {
979 struct virgl_drm_winsys *qdws;
980 int drm_version;
981 int ret;
982 int gl = 0;
983 struct drm_virtgpu_getparam getparam = {0};
984
985 getparam.param = VIRTGPU_PARAM_3D_FEATURES;
986 getparam.value = (uint64_t)(uintptr_t)&gl;
987 ret = drmIoctl(drmFD, DRM_IOCTL_VIRTGPU_GETPARAM, &getparam);
988 if (ret < 0 || !gl)
989 return NULL;
990
991 drm_version = virgl_drm_get_version(drmFD);
992 if (drm_version < 0)
993 return NULL;
994
995 qdws = CALLOC_STRUCT(virgl_drm_winsys);
996 if (!qdws)
997 return NULL;
998
999 qdws->fd = drmFD;
1000 qdws->num_delayed = 0;
1001 qdws->usecs = 1000000;
1002 LIST_INITHEAD(&qdws->delayed);
1003 (void) mtx_init(&qdws->mutex, mtx_plain);
1004 (void) mtx_init(&qdws->bo_handles_mutex, mtx_plain);
1005 qdws->bo_handles = util_hash_table_create(handle_hash, handle_compare);
1006 qdws->bo_names = util_hash_table_create(handle_hash, handle_compare);
1007 qdws->base.destroy = virgl_drm_winsys_destroy;
1008
1009 qdws->base.transfer_put = virgl_bo_transfer_put;
1010 qdws->base.transfer_get = virgl_bo_transfer_get;
1011 qdws->base.resource_create = virgl_drm_winsys_resource_cache_create;
1012 qdws->base.resource_unref = virgl_drm_winsys_resource_unref;
1013 qdws->base.resource_create_from_handle = virgl_drm_winsys_resource_create_handle;
1014 qdws->base.resource_get_handle = virgl_drm_winsys_resource_get_handle;
1015 qdws->base.resource_map = virgl_drm_resource_map;
1016 qdws->base.resource_wait = virgl_drm_resource_wait;
1017 qdws->base.cmd_buf_create = virgl_drm_cmd_buf_create;
1018 qdws->base.cmd_buf_destroy = virgl_drm_cmd_buf_destroy;
1019 qdws->base.submit_cmd = virgl_drm_winsys_submit_cmd;
1020 qdws->base.emit_res = virgl_drm_emit_res;
1021 qdws->base.res_is_referenced = virgl_drm_res_is_ref;
1022
1023 qdws->base.cs_create_fence = virgl_cs_create_fence;
1024 qdws->base.fence_wait = virgl_fence_wait;
1025 qdws->base.fence_reference = virgl_fence_reference;
1026 qdws->base.fence_server_sync = virgl_fence_server_sync;
1027 qdws->base.fence_get_fd = virgl_fence_get_fd;
1028 qdws->base.supports_fences = drm_version >= VIRGL_DRM_VERSION_FENCE_FD;
1029 qdws->base.supports_encoded_transfers = 1;
1030
1031 qdws->base.get_caps = virgl_drm_get_caps;
1032
1033 uint32_t value = 0;
1034 getparam.param = VIRTGPU_PARAM_CAPSET_QUERY_FIX;
1035 getparam.value = (uint64_t)(uintptr_t)&value;
1036 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_GETPARAM, &getparam);
1037 if (ret == 0) {
1038 if (value == 1)
1039 qdws->has_capset_query_fix = true;
1040 }
1041
1042 return &qdws->base;
1043
1044 }
1045
1046 static struct util_hash_table *fd_tab = NULL;
1047 static mtx_t virgl_screen_mutex = _MTX_INITIALIZER_NP;
1048
1049 static void
1050 virgl_drm_screen_destroy(struct pipe_screen *pscreen)
1051 {
1052 struct virgl_screen *screen = virgl_screen(pscreen);
1053 boolean destroy;
1054
1055 mtx_lock(&virgl_screen_mutex);
1056 destroy = --screen->refcnt == 0;
1057 if (destroy) {
1058 int fd = virgl_drm_winsys(screen->vws)->fd;
1059 util_hash_table_remove(fd_tab, intptr_to_pointer(fd));
1060 close(fd);
1061 }
1062 mtx_unlock(&virgl_screen_mutex);
1063
1064 if (destroy) {
1065 pscreen->destroy = screen->winsys_priv;
1066 pscreen->destroy(pscreen);
1067 }
1068 }
1069
1070 static unsigned hash_fd(void *key)
1071 {
1072 int fd = pointer_to_intptr(key);
1073 struct stat stat;
1074 fstat(fd, &stat);
1075
1076 return stat.st_dev ^ stat.st_ino ^ stat.st_rdev;
1077 }
1078
1079 static int compare_fd(void *key1, void *key2)
1080 {
1081 int fd1 = pointer_to_intptr(key1);
1082 int fd2 = pointer_to_intptr(key2);
1083 struct stat stat1, stat2;
1084 fstat(fd1, &stat1);
1085 fstat(fd2, &stat2);
1086
1087 return stat1.st_dev != stat2.st_dev ||
1088 stat1.st_ino != stat2.st_ino ||
1089 stat1.st_rdev != stat2.st_rdev;
1090 }
1091
1092 struct pipe_screen *
1093 virgl_drm_screen_create(int fd)
1094 {
1095 struct pipe_screen *pscreen = NULL;
1096
1097 mtx_lock(&virgl_screen_mutex);
1098 if (!fd_tab) {
1099 fd_tab = util_hash_table_create(hash_fd, compare_fd);
1100 if (!fd_tab)
1101 goto unlock;
1102 }
1103
1104 pscreen = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
1105 if (pscreen) {
1106 virgl_screen(pscreen)->refcnt++;
1107 } else {
1108 struct virgl_winsys *vws;
1109 int dup_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1110
1111 vws = virgl_drm_winsys_create(dup_fd);
1112 if (!vws) {
1113 close(dup_fd);
1114 goto unlock;
1115 }
1116
1117 pscreen = virgl_create_screen(vws);
1118 if (pscreen) {
1119 util_hash_table_set(fd_tab, intptr_to_pointer(dup_fd), pscreen);
1120
1121 /* Bit of a hack, to avoid circular linkage dependency,
1122 * ie. pipe driver having to call in to winsys, we
1123 * override the pipe drivers screen->destroy():
1124 */
1125 virgl_screen(pscreen)->winsys_priv = pscreen->destroy;
1126 pscreen->destroy = virgl_drm_screen_destroy;
1127 }
1128 }
1129
1130 unlock:
1131 mtx_unlock(&virgl_screen_mutex);
1132 return pscreen;
1133 }