2cf8b4ba076d00637f10df8e7c3fbedabe7fd760
[mesa.git] / src / gallium / winsys / virgl / drm / virgl_drm_winsys.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <errno.h>
25 #include <fcntl.h>
26 #include <stdio.h>
27 #include <sys/ioctl.h>
28 #include <sys/stat.h>
29
30 #include "os/os_mman.h"
31 #include "util/os_time.h"
32 #include "util/u_memory.h"
33 #include "util/u_format.h"
34 #include "util/u_hash_table.h"
35 #include "util/u_inlines.h"
36 #include "state_tracker/drm_driver.h"
37 #include "virgl/virgl_screen.h"
38 #include "virgl/virgl_public.h"
39
40 #include <xf86drm.h>
41 #include <libsync.h>
42 #include "virtgpu_drm.h"
43
44 #include "virgl_drm_winsys.h"
45 #include "virgl_drm_public.h"
46
47
48 #define VIRGL_DRM_VERSION(major, minor) ((major) << 16 | (minor))
49 #define VIRGL_DRM_VERSION_FENCE_FD VIRGL_DRM_VERSION(1, 0)
50
51
52 static inline boolean can_cache_resource(struct virgl_hw_res *res)
53 {
54 return res->cacheable == TRUE;
55 }
56
57 static void virgl_hw_res_destroy(struct virgl_drm_winsys *qdws,
58 struct virgl_hw_res *res)
59 {
60 struct drm_gem_close args;
61
62 if (res->flinked) {
63 mtx_lock(&qdws->bo_handles_mutex);
64 util_hash_table_remove(qdws->bo_names,
65 (void *)(uintptr_t)res->flink);
66 mtx_unlock(&qdws->bo_handles_mutex);
67 }
68
69 if (res->bo_handle) {
70 mtx_lock(&qdws->bo_handles_mutex);
71 util_hash_table_remove(qdws->bo_handles,
72 (void *)(uintptr_t)res->bo_handle);
73 mtx_unlock(&qdws->bo_handles_mutex);
74 }
75
76 if (res->ptr)
77 os_munmap(res->ptr, res->size);
78
79 if (res->fence_fd != -1)
80 close(res->fence_fd);
81
82 memset(&args, 0, sizeof(args));
83 args.handle = res->bo_handle;
84 drmIoctl(qdws->fd, DRM_IOCTL_GEM_CLOSE, &args);
85 FREE(res);
86 }
87
88 static boolean virgl_drm_resource_is_busy(struct virgl_drm_winsys *qdws,
89 struct virgl_hw_res *res)
90 {
91 struct drm_virtgpu_3d_wait waitcmd;
92 int ret;
93
94 memset(&waitcmd, 0, sizeof(waitcmd));
95 waitcmd.handle = res->bo_handle;
96 waitcmd.flags = VIRTGPU_WAIT_NOWAIT;
97
98 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
99 if (ret && errno == EBUSY)
100 return TRUE;
101 return FALSE;
102 }
103
104 static void
105 virgl_cache_flush(struct virgl_drm_winsys *qdws)
106 {
107 struct list_head *curr, *next;
108 struct virgl_hw_res *res;
109
110 mtx_lock(&qdws->mutex);
111 curr = qdws->delayed.next;
112 next = curr->next;
113
114 while (curr != &qdws->delayed) {
115 res = LIST_ENTRY(struct virgl_hw_res, curr, head);
116 LIST_DEL(&res->head);
117 virgl_hw_res_destroy(qdws, res);
118 curr = next;
119 next = curr->next;
120 }
121 mtx_unlock(&qdws->mutex);
122 }
123 static void
124 virgl_drm_winsys_destroy(struct virgl_winsys *qws)
125 {
126 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
127
128 virgl_cache_flush(qdws);
129
130 util_hash_table_destroy(qdws->bo_handles);
131 util_hash_table_destroy(qdws->bo_names);
132 mtx_destroy(&qdws->bo_handles_mutex);
133 mtx_destroy(&qdws->mutex);
134
135 FREE(qdws);
136 }
137
138 static void
139 virgl_cache_list_check_free(struct virgl_drm_winsys *qdws)
140 {
141 struct list_head *curr, *next;
142 struct virgl_hw_res *res;
143 int64_t now;
144
145 now = os_time_get();
146 curr = qdws->delayed.next;
147 next = curr->next;
148 while (curr != &qdws->delayed) {
149 res = LIST_ENTRY(struct virgl_hw_res, curr, head);
150 if (!os_time_timeout(res->start, res->end, now))
151 break;
152
153 LIST_DEL(&res->head);
154 virgl_hw_res_destroy(qdws, res);
155 curr = next;
156 next = curr->next;
157 }
158 }
159
160 static void virgl_drm_resource_reference(struct virgl_drm_winsys *qdws,
161 struct virgl_hw_res **dres,
162 struct virgl_hw_res *sres)
163 {
164 struct virgl_hw_res *old = *dres;
165 if (pipe_reference(&(*dres)->reference, &sres->reference)) {
166
167 if (!can_cache_resource(old)) {
168 virgl_hw_res_destroy(qdws, old);
169 } else {
170 mtx_lock(&qdws->mutex);
171 virgl_cache_list_check_free(qdws);
172
173 old->start = os_time_get();
174 old->end = old->start + qdws->usecs;
175 LIST_ADDTAIL(&old->head, &qdws->delayed);
176 qdws->num_delayed++;
177 mtx_unlock(&qdws->mutex);
178 }
179 }
180 *dres = sres;
181 }
182
183 static struct virgl_hw_res *
184 virgl_drm_winsys_resource_create(struct virgl_winsys *qws,
185 enum pipe_texture_target target,
186 uint32_t format,
187 uint32_t bind,
188 uint32_t width,
189 uint32_t height,
190 uint32_t depth,
191 uint32_t array_size,
192 uint32_t last_level,
193 uint32_t nr_samples,
194 uint32_t size)
195 {
196 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
197 struct drm_virtgpu_resource_create createcmd;
198 int ret;
199 struct virgl_hw_res *res;
200 uint32_t stride = width * util_format_get_blocksize(format);
201
202 res = CALLOC_STRUCT(virgl_hw_res);
203 if (!res)
204 return NULL;
205
206 memset(&createcmd, 0, sizeof(createcmd));
207 createcmd.target = target;
208 createcmd.format = format;
209 createcmd.bind = bind;
210 createcmd.width = width;
211 createcmd.height = height;
212 createcmd.depth = depth;
213 createcmd.array_size = array_size;
214 createcmd.last_level = last_level;
215 createcmd.nr_samples = nr_samples;
216 createcmd.stride = stride;
217 createcmd.size = size;
218
219 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &createcmd);
220 if (ret != 0) {
221 FREE(res);
222 return NULL;
223 }
224
225 res->bind = bind;
226 res->format = format;
227
228 res->res_handle = createcmd.res_handle;
229 res->bo_handle = createcmd.bo_handle;
230 res->size = size;
231 res->stride = stride;
232 pipe_reference_init(&res->reference, 1);
233 p_atomic_set(&res->num_cs_references, 0);
234 res->fence_fd = -1;
235 return res;
236 }
237
238 static inline int virgl_is_res_compat(struct virgl_drm_winsys *qdws,
239 struct virgl_hw_res *res,
240 uint32_t size, uint32_t bind,
241 uint32_t format)
242 {
243 if (res->bind != bind)
244 return 0;
245 if (res->format != format)
246 return 0;
247 if (res->size < size)
248 return 0;
249 if (res->size > size * 2)
250 return 0;
251
252 if (virgl_drm_resource_is_busy(qdws, res)) {
253 return -1;
254 }
255
256 return 1;
257 }
258
259 static int
260 virgl_bo_transfer_put(struct virgl_winsys *vws,
261 struct virgl_hw_res *res,
262 const struct pipe_box *box,
263 uint32_t stride, uint32_t layer_stride,
264 uint32_t buf_offset, uint32_t level)
265 {
266 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
267 struct drm_virtgpu_3d_transfer_to_host tohostcmd;
268
269 memset(&tohostcmd, 0, sizeof(tohostcmd));
270 tohostcmd.bo_handle = res->bo_handle;
271 tohostcmd.box.x = box->x;
272 tohostcmd.box.y = box->y;
273 tohostcmd.box.z = box->z;
274 tohostcmd.box.w = box->width;
275 tohostcmd.box.h = box->height;
276 tohostcmd.box.d = box->depth;
277 tohostcmd.offset = buf_offset;
278 tohostcmd.level = level;
279 // tohostcmd.stride = stride;
280 // tohostcmd.layer_stride = stride;
281 return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &tohostcmd);
282 }
283
284 static int
285 virgl_bo_transfer_get(struct virgl_winsys *vws,
286 struct virgl_hw_res *res,
287 const struct pipe_box *box,
288 uint32_t stride, uint32_t layer_stride,
289 uint32_t buf_offset, uint32_t level)
290 {
291 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
292 struct drm_virtgpu_3d_transfer_from_host fromhostcmd;
293
294 memset(&fromhostcmd, 0, sizeof(fromhostcmd));
295 fromhostcmd.bo_handle = res->bo_handle;
296 fromhostcmd.level = level;
297 fromhostcmd.offset = buf_offset;
298 // fromhostcmd.stride = stride;
299 // fromhostcmd.layer_stride = layer_stride;
300 fromhostcmd.box.x = box->x;
301 fromhostcmd.box.y = box->y;
302 fromhostcmd.box.z = box->z;
303 fromhostcmd.box.w = box->width;
304 fromhostcmd.box.h = box->height;
305 fromhostcmd.box.d = box->depth;
306 return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &fromhostcmd);
307 }
308
309 static struct virgl_hw_res *
310 virgl_drm_winsys_resource_cache_create(struct virgl_winsys *qws,
311 enum pipe_texture_target target,
312 uint32_t format,
313 uint32_t bind,
314 uint32_t width,
315 uint32_t height,
316 uint32_t depth,
317 uint32_t array_size,
318 uint32_t last_level,
319 uint32_t nr_samples,
320 uint32_t size)
321 {
322 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
323 struct virgl_hw_res *res, *curr_res;
324 struct list_head *curr, *next;
325 int64_t now;
326 int ret = 0;
327
328 /* only store binds for vertex/index/const buffers */
329 if (bind != VIRGL_BIND_CONSTANT_BUFFER && bind != VIRGL_BIND_INDEX_BUFFER &&
330 bind != VIRGL_BIND_VERTEX_BUFFER && bind != VIRGL_BIND_CUSTOM)
331 goto alloc;
332
333 mtx_lock(&qdws->mutex);
334
335 res = NULL;
336 curr = qdws->delayed.next;
337 next = curr->next;
338
339 now = os_time_get();
340 while (curr != &qdws->delayed) {
341 curr_res = LIST_ENTRY(struct virgl_hw_res, curr, head);
342
343 if (!res && ((ret = virgl_is_res_compat(qdws, curr_res, size, bind, format)) > 0))
344 res = curr_res;
345 else if (os_time_timeout(curr_res->start, curr_res->end, now)) {
346 LIST_DEL(&curr_res->head);
347 virgl_hw_res_destroy(qdws, curr_res);
348 } else
349 break;
350
351 if (ret == -1)
352 break;
353
354 curr = next;
355 next = curr->next;
356 }
357
358 if (!res && ret != -1) {
359 while (curr != &qdws->delayed) {
360 curr_res = LIST_ENTRY(struct virgl_hw_res, curr, head);
361 ret = virgl_is_res_compat(qdws, curr_res, size, bind, format);
362 if (ret > 0) {
363 res = curr_res;
364 break;
365 }
366 if (ret == -1)
367 break;
368 curr = next;
369 next = curr->next;
370 }
371 }
372
373 if (res) {
374 LIST_DEL(&res->head);
375 --qdws->num_delayed;
376 mtx_unlock(&qdws->mutex);
377 pipe_reference_init(&res->reference, 1);
378 return res;
379 }
380
381 mtx_unlock(&qdws->mutex);
382
383 alloc:
384 res = virgl_drm_winsys_resource_create(qws, target, format, bind,
385 width, height, depth, array_size,
386 last_level, nr_samples, size);
387 if (bind == VIRGL_BIND_CONSTANT_BUFFER || bind == VIRGL_BIND_INDEX_BUFFER ||
388 bind == VIRGL_BIND_VERTEX_BUFFER)
389 res->cacheable = TRUE;
390 return res;
391 }
392
393 static struct virgl_hw_res *
394 virgl_drm_winsys_resource_create_handle(struct virgl_winsys *qws,
395 struct winsys_handle *whandle)
396 {
397 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
398 struct drm_gem_open open_arg = {};
399 struct drm_virtgpu_resource_info info_arg = {};
400 struct virgl_hw_res *res;
401 uint32_t handle = whandle->handle;
402
403 if (whandle->offset != 0) {
404 fprintf(stderr, "attempt to import unsupported winsys offset %u\n",
405 whandle->offset);
406 return NULL;
407 }
408
409 mtx_lock(&qdws->bo_handles_mutex);
410
411 if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
412 res = util_hash_table_get(qdws->bo_names, (void*)(uintptr_t)handle);
413 if (res) {
414 struct virgl_hw_res *r = NULL;
415 virgl_drm_resource_reference(qdws, &r, res);
416 goto done;
417 }
418 }
419
420 if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
421 int r;
422 r = drmPrimeFDToHandle(qdws->fd, whandle->handle, &handle);
423 if (r) {
424 res = NULL;
425 goto done;
426 }
427 }
428
429 res = util_hash_table_get(qdws->bo_handles, (void*)(uintptr_t)handle);
430 if (res) {
431 struct virgl_hw_res *r = NULL;
432 virgl_drm_resource_reference(qdws, &r, res);
433 goto done;
434 }
435
436 res = CALLOC_STRUCT(virgl_hw_res);
437 if (!res)
438 goto done;
439
440 if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
441 res->bo_handle = handle;
442 } else {
443 memset(&open_arg, 0, sizeof(open_arg));
444 open_arg.name = whandle->handle;
445 if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
446 FREE(res);
447 res = NULL;
448 goto done;
449 }
450 res->bo_handle = open_arg.handle;
451 }
452 res->name = handle;
453
454 memset(&info_arg, 0, sizeof(info_arg));
455 info_arg.bo_handle = res->bo_handle;
456
457 if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &info_arg)) {
458 /* close */
459 FREE(res);
460 res = NULL;
461 goto done;
462 }
463
464 res->res_handle = info_arg.res_handle;
465
466 res->size = info_arg.size;
467 res->stride = info_arg.stride;
468 pipe_reference_init(&res->reference, 1);
469 res->num_cs_references = 0;
470 res->fence_fd = -1;
471
472 util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)handle, res);
473
474 done:
475 mtx_unlock(&qdws->bo_handles_mutex);
476 return res;
477 }
478
479 static boolean virgl_drm_winsys_resource_get_handle(struct virgl_winsys *qws,
480 struct virgl_hw_res *res,
481 uint32_t stride,
482 struct winsys_handle *whandle)
483 {
484 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
485 struct drm_gem_flink flink;
486
487 if (!res)
488 return FALSE;
489
490 if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
491 if (!res->flinked) {
492 memset(&flink, 0, sizeof(flink));
493 flink.handle = res->bo_handle;
494
495 if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
496 return FALSE;
497 }
498 res->flinked = TRUE;
499 res->flink = flink.name;
500
501 mtx_lock(&qdws->bo_handles_mutex);
502 util_hash_table_set(qdws->bo_names, (void *)(uintptr_t)res->flink, res);
503 mtx_unlock(&qdws->bo_handles_mutex);
504 }
505 whandle->handle = res->flink;
506 } else if (whandle->type == WINSYS_HANDLE_TYPE_KMS) {
507 whandle->handle = res->bo_handle;
508 } else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
509 if (drmPrimeHandleToFD(qdws->fd, res->bo_handle, DRM_CLOEXEC, (int*)&whandle->handle))
510 return FALSE;
511 mtx_lock(&qdws->bo_handles_mutex);
512 util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)res->bo_handle, res);
513 mtx_unlock(&qdws->bo_handles_mutex);
514 }
515 whandle->stride = stride;
516 return TRUE;
517 }
518
519 static void virgl_drm_winsys_resource_unref(struct virgl_winsys *qws,
520 struct virgl_hw_res *hres)
521 {
522 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
523
524 virgl_drm_resource_reference(qdws, &hres, NULL);
525 }
526
527 static void *virgl_drm_resource_map(struct virgl_winsys *qws,
528 struct virgl_hw_res *res)
529 {
530 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
531 struct drm_virtgpu_map mmap_arg;
532 void *ptr;
533
534 if (res->ptr)
535 return res->ptr;
536
537 memset(&mmap_arg, 0, sizeof(mmap_arg));
538 mmap_arg.handle = res->bo_handle;
539 if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_MAP, &mmap_arg))
540 return NULL;
541
542 ptr = os_mmap(0, res->size, PROT_READ|PROT_WRITE, MAP_SHARED,
543 qdws->fd, mmap_arg.offset);
544 if (ptr == MAP_FAILED)
545 return NULL;
546
547 res->ptr = ptr;
548 return ptr;
549
550 }
551
552 static void virgl_drm_resource_wait(struct virgl_winsys *qws,
553 struct virgl_hw_res *res)
554 {
555 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
556 struct drm_virtgpu_3d_wait waitcmd;
557 int ret;
558
559 memset(&waitcmd, 0, sizeof(waitcmd));
560 waitcmd.handle = res->bo_handle;
561 again:
562 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
563 if (ret == -EAGAIN)
564 goto again;
565 }
566
567 static boolean virgl_drm_lookup_res(struct virgl_drm_cmd_buf *cbuf,
568 struct virgl_hw_res *res)
569 {
570 unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
571 int i;
572
573 if (cbuf->is_handle_added[hash]) {
574 i = cbuf->reloc_indices_hashlist[hash];
575 if (cbuf->res_bo[i] == res)
576 return true;
577
578 for (i = 0; i < cbuf->cres; i++) {
579 if (cbuf->res_bo[i] == res) {
580 cbuf->reloc_indices_hashlist[hash] = i;
581 return true;
582 }
583 }
584 }
585 return false;
586 }
587
588 static void virgl_drm_add_res(struct virgl_drm_winsys *qdws,
589 struct virgl_drm_cmd_buf *cbuf,
590 struct virgl_hw_res *res)
591 {
592 unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
593
594 if (cbuf->cres >= cbuf->nres) {
595 unsigned new_nres = cbuf->nres + 256;
596 void *new_ptr = REALLOC(cbuf->res_bo,
597 cbuf->nres * sizeof(struct virgl_hw_buf*),
598 new_nres * sizeof(struct virgl_hw_buf*));
599 if (!new_ptr) {
600 fprintf(stderr,"failure to add relocation %d, %d\n", cbuf->cres, new_nres);
601 return;
602 }
603 cbuf->res_bo = new_ptr;
604
605 new_ptr = REALLOC(cbuf->res_hlist,
606 cbuf->nres * sizeof(uint32_t),
607 new_nres * sizeof(uint32_t));
608 if (!new_ptr) {
609 fprintf(stderr,"failure to add hlist relocation %d, %d\n", cbuf->cres, cbuf->nres);
610 return;
611 }
612 cbuf->res_hlist = new_ptr;
613 cbuf->nres = new_nres;
614 }
615
616 cbuf->res_bo[cbuf->cres] = NULL;
617 virgl_drm_resource_reference(qdws, &cbuf->res_bo[cbuf->cres], res);
618 cbuf->res_hlist[cbuf->cres] = res->bo_handle;
619 cbuf->is_handle_added[hash] = TRUE;
620
621 cbuf->reloc_indices_hashlist[hash] = cbuf->cres;
622 p_atomic_inc(&res->num_cs_references);
623 cbuf->cres++;
624 }
625
626 static void virgl_drm_release_all_res(struct virgl_drm_winsys *qdws,
627 struct virgl_drm_cmd_buf *cbuf)
628 {
629 int i;
630
631 for (i = 0; i < cbuf->cres; i++) {
632 p_atomic_dec(&cbuf->res_bo[i]->num_cs_references);
633 virgl_drm_resource_reference(qdws, &cbuf->res_bo[i], NULL);
634 }
635 cbuf->cres = 0;
636 }
637
638 static void virgl_drm_emit_res(struct virgl_winsys *qws,
639 struct virgl_cmd_buf *_cbuf,
640 struct virgl_hw_res *res, boolean write_buf)
641 {
642 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
643 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
644 boolean already_in_list = virgl_drm_lookup_res(cbuf, res);
645
646 if (write_buf)
647 cbuf->base.buf[cbuf->base.cdw++] = res->res_handle;
648
649 if (!already_in_list)
650 virgl_drm_add_res(qdws, cbuf, res);
651 }
652
653 static boolean virgl_drm_res_is_ref(struct virgl_winsys *qws,
654 struct virgl_cmd_buf *_cbuf,
655 struct virgl_hw_res *res)
656 {
657 if (!p_atomic_read(&res->num_cs_references))
658 return FALSE;
659
660 return TRUE;
661 }
662
663 static struct virgl_cmd_buf *virgl_drm_cmd_buf_create(struct virgl_winsys *qws,
664 uint32_t size)
665 {
666 struct virgl_drm_cmd_buf *cbuf;
667
668 cbuf = CALLOC_STRUCT(virgl_drm_cmd_buf);
669 if (!cbuf)
670 return NULL;
671
672 cbuf->ws = qws;
673
674 cbuf->nres = 512;
675 cbuf->res_bo = CALLOC(cbuf->nres, sizeof(struct virgl_hw_buf*));
676 if (!cbuf->res_bo) {
677 FREE(cbuf);
678 return NULL;
679 }
680 cbuf->res_hlist = MALLOC(cbuf->nres * sizeof(uint32_t));
681 if (!cbuf->res_hlist) {
682 FREE(cbuf->res_bo);
683 FREE(cbuf);
684 return NULL;
685 }
686
687 cbuf->buf = CALLOC(size, sizeof(uint32_t));
688 if (!cbuf->buf) {
689 FREE(cbuf->res_hlist);
690 FREE(cbuf->res_bo);
691 FREE(cbuf);
692 return NULL;
693 }
694
695 cbuf->base.buf = cbuf->buf;
696 cbuf->base.in_fence_fd = -1;
697 return &cbuf->base;
698 }
699
700 static void virgl_drm_cmd_buf_destroy(struct virgl_cmd_buf *_cbuf)
701 {
702 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
703
704 virgl_drm_release_all_res(virgl_drm_winsys(cbuf->ws), cbuf);
705 FREE(cbuf->res_hlist);
706 FREE(cbuf->res_bo);
707 FREE(cbuf->buf);
708 FREE(cbuf);
709
710 }
711
712 static int virgl_drm_winsys_submit_cmd(struct virgl_winsys *qws,
713 struct virgl_cmd_buf *_cbuf,
714 int in_fence_fd, int *out_fence_fd)
715 {
716 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
717 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
718 struct drm_virtgpu_execbuffer eb;
719 int ret;
720
721 if (cbuf->base.cdw == 0)
722 return 0;
723
724 memset(&eb, 0, sizeof(struct drm_virtgpu_execbuffer));
725 eb.command = (unsigned long)(void*)cbuf->buf;
726 eb.size = cbuf->base.cdw * 4;
727 eb.num_bo_handles = cbuf->cres;
728 eb.bo_handles = (unsigned long)(void *)cbuf->res_hlist;
729 eb.fence_fd = -1;
730
731 if (in_fence_fd != -1) {
732 eb.flags |= VIRTGPU_EXECBUF_FENCE_FD_IN;
733 eb.fence_fd = in_fence_fd;
734 }
735
736 if (out_fence_fd != NULL)
737 eb.flags |= VIRTGPU_EXECBUF_FENCE_FD_OUT;
738
739 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &eb);
740 if (ret == -1)
741 fprintf(stderr,"got error from kernel - expect bad rendering %d\n", errno);
742 cbuf->base.cdw = 0;
743
744 if (out_fence_fd != NULL)
745 *out_fence_fd = eb.fence_fd;
746
747 virgl_drm_release_all_res(qdws, cbuf);
748
749 memset(cbuf->is_handle_added, 0, sizeof(cbuf->is_handle_added));
750 return ret;
751 }
752
753 static int virgl_drm_get_caps(struct virgl_winsys *vws,
754 struct virgl_drm_caps *caps)
755 {
756 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
757 struct drm_virtgpu_get_caps args;
758 int ret;
759
760 virgl_ws_fill_new_caps_defaults(caps);
761
762 memset(&args, 0, sizeof(args));
763 if (vdws->has_capset_query_fix) {
764 /* if we have the query fix - try and get cap set id 2 first */
765 args.cap_set_id = 2;
766 args.size = sizeof(union virgl_caps);
767 } else {
768 args.cap_set_id = 1;
769 args.size = sizeof(struct virgl_caps_v1);
770 }
771 args.addr = (unsigned long)&caps->caps;
772
773 ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
774 if (ret == -1 && errno == EINVAL) {
775 /* Fallback to v1 */
776 args.cap_set_id = 1;
777 args.size = sizeof(struct virgl_caps_v1);
778 ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
779 if (ret == -1)
780 return ret;
781 }
782 return ret;
783 }
784
785 #define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
786
787 static unsigned handle_hash(void *key)
788 {
789 return PTR_TO_UINT(key);
790 }
791
792 static int handle_compare(void *key1, void *key2)
793 {
794 return PTR_TO_UINT(key1) != PTR_TO_UINT(key2);
795 }
796
797 static struct pipe_fence_handle *
798 virgl_cs_create_fence(struct virgl_winsys *vws, int fd)
799 {
800 struct virgl_hw_res *res;
801
802 res = virgl_drm_winsys_resource_cache_create(vws,
803 PIPE_BUFFER,
804 PIPE_FORMAT_R8_UNORM,
805 VIRGL_BIND_CUSTOM,
806 8, 1, 1, 0, 0, 0, 8);
807
808 res->fence_fd = fd;
809 return (struct pipe_fence_handle *)res;
810 }
811
812 static bool virgl_fence_wait(struct virgl_winsys *vws,
813 struct pipe_fence_handle *fence,
814 uint64_t timeout)
815 {
816 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
817 struct virgl_hw_res *res = virgl_hw_res(fence);
818
819 if (timeout == 0)
820 return !virgl_drm_resource_is_busy(vdws, res);
821
822 if (timeout != PIPE_TIMEOUT_INFINITE) {
823 int64_t start_time = os_time_get();
824 timeout /= 1000;
825 while (virgl_drm_resource_is_busy(vdws, res)) {
826 if (os_time_get() - start_time >= timeout)
827 return FALSE;
828 os_time_sleep(10);
829 }
830 return TRUE;
831 }
832 virgl_drm_resource_wait(vws, res);
833
834 if (res->fence_fd != -1) {
835 int ret = sync_wait(res->fence_fd, timeout / 1000000);
836 return ret == 0;
837 }
838
839 return TRUE;
840 }
841
842 static void virgl_fence_reference(struct virgl_winsys *vws,
843 struct pipe_fence_handle **dst,
844 struct pipe_fence_handle *src)
845 {
846 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
847 virgl_drm_resource_reference(vdws, (struct virgl_hw_res **)dst,
848 virgl_hw_res(src));
849 }
850
851 static void virgl_fence_server_sync(struct virgl_winsys *vws,
852 struct virgl_cmd_buf *cbuf,
853 struct pipe_fence_handle *fence)
854 {
855 struct virgl_hw_res *hw_res = virgl_hw_res(fence);
856
857 /* if not an external fence, then nothing more to do without preemption: */
858 if (hw_res->fence_fd == -1)
859 return;
860
861 sync_accumulate("virgl", &cbuf->in_fence_fd, hw_res->fence_fd);
862 }
863
864 static int virgl_fence_get_fd(struct virgl_winsys *vws,
865 struct pipe_fence_handle *fence)
866 {
867 struct virgl_hw_res *hw_res = virgl_hw_res(fence);
868
869 return dup(hw_res->fence_fd);
870 }
871
872 static int virgl_drm_get_version(int fd)
873 {
874 int ret;
875 drmVersionPtr version;
876
877 version = drmGetVersion(fd);
878
879 if (!version)
880 ret = -EFAULT;
881 else if (version->version_major != 0)
882 ret = -EINVAL;
883 else
884 ret = version->version_minor;
885
886 drmFreeVersion(version);
887
888 return ret;
889 }
890
891 static struct virgl_winsys *
892 virgl_drm_winsys_create(int drmFD)
893 {
894 struct virgl_drm_winsys *qdws;
895 int drm_version;
896 int ret;
897 int gl = 0;
898 struct drm_virtgpu_getparam getparam = {0};
899
900 getparam.param = VIRTGPU_PARAM_3D_FEATURES;
901 getparam.value = (uint64_t)(uintptr_t)&gl;
902 ret = drmIoctl(drmFD, DRM_IOCTL_VIRTGPU_GETPARAM, &getparam);
903 if (ret < 0 || !gl)
904 return NULL;
905
906 drm_version = virgl_drm_get_version(drmFD);
907 if (drm_version < 0)
908 return NULL;
909
910 qdws = CALLOC_STRUCT(virgl_drm_winsys);
911 if (!qdws)
912 return NULL;
913
914 qdws->fd = drmFD;
915 qdws->num_delayed = 0;
916 qdws->usecs = 1000000;
917 LIST_INITHEAD(&qdws->delayed);
918 (void) mtx_init(&qdws->mutex, mtx_plain);
919 (void) mtx_init(&qdws->bo_handles_mutex, mtx_plain);
920 qdws->bo_handles = util_hash_table_create(handle_hash, handle_compare);
921 qdws->bo_names = util_hash_table_create(handle_hash, handle_compare);
922 qdws->base.destroy = virgl_drm_winsys_destroy;
923
924 qdws->base.transfer_put = virgl_bo_transfer_put;
925 qdws->base.transfer_get = virgl_bo_transfer_get;
926 qdws->base.resource_create = virgl_drm_winsys_resource_cache_create;
927 qdws->base.resource_unref = virgl_drm_winsys_resource_unref;
928 qdws->base.resource_create_from_handle = virgl_drm_winsys_resource_create_handle;
929 qdws->base.resource_get_handle = virgl_drm_winsys_resource_get_handle;
930 qdws->base.resource_map = virgl_drm_resource_map;
931 qdws->base.resource_wait = virgl_drm_resource_wait;
932 qdws->base.cmd_buf_create = virgl_drm_cmd_buf_create;
933 qdws->base.cmd_buf_destroy = virgl_drm_cmd_buf_destroy;
934 qdws->base.submit_cmd = virgl_drm_winsys_submit_cmd;
935 qdws->base.emit_res = virgl_drm_emit_res;
936 qdws->base.res_is_referenced = virgl_drm_res_is_ref;
937
938 qdws->base.cs_create_fence = virgl_cs_create_fence;
939 qdws->base.fence_wait = virgl_fence_wait;
940 qdws->base.fence_reference = virgl_fence_reference;
941 qdws->base.fence_server_sync = virgl_fence_server_sync;
942 qdws->base.fence_get_fd = virgl_fence_get_fd;
943 qdws->base.supports_fences = drm_version >= VIRGL_DRM_VERSION_FENCE_FD;
944 qdws->base.supports_encoded_transfers = 1;
945
946 qdws->base.get_caps = virgl_drm_get_caps;
947
948 uint32_t value = 0;
949 getparam.param = VIRTGPU_PARAM_CAPSET_QUERY_FIX;
950 getparam.value = (uint64_t)(uintptr_t)&value;
951 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_GETPARAM, &getparam);
952 if (ret == 0) {
953 if (value == 1)
954 qdws->has_capset_query_fix = true;
955 }
956
957 return &qdws->base;
958
959 }
960
961 static struct util_hash_table *fd_tab = NULL;
962 static mtx_t virgl_screen_mutex = _MTX_INITIALIZER_NP;
963
964 static void
965 virgl_drm_screen_destroy(struct pipe_screen *pscreen)
966 {
967 struct virgl_screen *screen = virgl_screen(pscreen);
968 boolean destroy;
969
970 mtx_lock(&virgl_screen_mutex);
971 destroy = --screen->refcnt == 0;
972 if (destroy) {
973 int fd = virgl_drm_winsys(screen->vws)->fd;
974 util_hash_table_remove(fd_tab, intptr_to_pointer(fd));
975 close(fd);
976 }
977 mtx_unlock(&virgl_screen_mutex);
978
979 if (destroy) {
980 pscreen->destroy = screen->winsys_priv;
981 pscreen->destroy(pscreen);
982 }
983 }
984
985 static unsigned hash_fd(void *key)
986 {
987 int fd = pointer_to_intptr(key);
988 struct stat stat;
989 fstat(fd, &stat);
990
991 return stat.st_dev ^ stat.st_ino ^ stat.st_rdev;
992 }
993
994 static int compare_fd(void *key1, void *key2)
995 {
996 int fd1 = pointer_to_intptr(key1);
997 int fd2 = pointer_to_intptr(key2);
998 struct stat stat1, stat2;
999 fstat(fd1, &stat1);
1000 fstat(fd2, &stat2);
1001
1002 return stat1.st_dev != stat2.st_dev ||
1003 stat1.st_ino != stat2.st_ino ||
1004 stat1.st_rdev != stat2.st_rdev;
1005 }
1006
1007 struct pipe_screen *
1008 virgl_drm_screen_create(int fd)
1009 {
1010 struct pipe_screen *pscreen = NULL;
1011
1012 mtx_lock(&virgl_screen_mutex);
1013 if (!fd_tab) {
1014 fd_tab = util_hash_table_create(hash_fd, compare_fd);
1015 if (!fd_tab)
1016 goto unlock;
1017 }
1018
1019 pscreen = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
1020 if (pscreen) {
1021 virgl_screen(pscreen)->refcnt++;
1022 } else {
1023 struct virgl_winsys *vws;
1024 int dup_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1025
1026 vws = virgl_drm_winsys_create(dup_fd);
1027 if (!vws) {
1028 close(dup_fd);
1029 goto unlock;
1030 }
1031
1032 pscreen = virgl_create_screen(vws);
1033 if (pscreen) {
1034 util_hash_table_set(fd_tab, intptr_to_pointer(dup_fd), pscreen);
1035
1036 /* Bit of a hack, to avoid circular linkage dependency,
1037 * ie. pipe driver having to call in to winsys, we
1038 * override the pipe drivers screen->destroy():
1039 */
1040 virgl_screen(pscreen)->winsys_priv = pscreen->destroy;
1041 pscreen->destroy = virgl_drm_screen_destroy;
1042 }
1043 }
1044
1045 unlock:
1046 mtx_unlock(&virgl_screen_mutex);
1047 return pscreen;
1048 }