725d1f7cbf3f62cfa9d933d48a832c8c2dab064f
[mesa.git] / src / gallium / winsys / virgl / drm / virgl_drm_winsys.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <errno.h>
25 #include <fcntl.h>
26 #include <limits.h>
27 #include <stdio.h>
28 #include <sys/ioctl.h>
29 #include <sys/stat.h>
30
31 #include "os/os_mman.h"
32 #include "util/os_time.h"
33 #include "util/u_memory.h"
34 #include "util/u_format.h"
35 #include "util/u_hash_table.h"
36 #include "util/u_inlines.h"
37 #include "state_tracker/drm_driver.h"
38 #include "virgl/virgl_screen.h"
39 #include "virgl/virgl_public.h"
40
41 #include <xf86drm.h>
42 #include <libsync.h>
43 #include "virtgpu_drm.h"
44
45 #include "virgl_drm_winsys.h"
46 #include "virgl_drm_public.h"
47
48
49 #define VIRGL_DRM_VERSION(major, minor) ((major) << 16 | (minor))
50 #define VIRGL_DRM_VERSION_FENCE_FD VIRGL_DRM_VERSION(0, 1)
51
52 /* Gets a pointer to the virgl_hw_res containing the pointed to cache entry. */
53 #define cache_entry_container_res(ptr) \
54 (struct virgl_hw_res*)((char*)ptr - offsetof(struct virgl_hw_res, cache_entry))
55
56 static inline boolean can_cache_resource_with_bind(uint32_t bind)
57 {
58 return bind == VIRGL_BIND_CONSTANT_BUFFER ||
59 bind == VIRGL_BIND_INDEX_BUFFER ||
60 bind == VIRGL_BIND_VERTEX_BUFFER ||
61 bind == VIRGL_BIND_CUSTOM ||
62 bind == VIRGL_BIND_STAGING;
63 }
64
65 static void virgl_hw_res_destroy(struct virgl_drm_winsys *qdws,
66 struct virgl_hw_res *res)
67 {
68 struct drm_gem_close args;
69
70 mtx_lock(&qdws->bo_handles_mutex);
71 util_hash_table_remove(qdws->bo_handles,
72 (void *)(uintptr_t)res->bo_handle);
73 if (res->flink_name)
74 util_hash_table_remove(qdws->bo_names,
75 (void *)(uintptr_t)res->flink_name);
76 mtx_unlock(&qdws->bo_handles_mutex);
77 if (res->ptr)
78 os_munmap(res->ptr, res->size);
79
80 memset(&args, 0, sizeof(args));
81 args.handle = res->bo_handle;
82 drmIoctl(qdws->fd, DRM_IOCTL_GEM_CLOSE, &args);
83 FREE(res);
84 }
85
86 static boolean virgl_drm_resource_is_busy(struct virgl_winsys *vws,
87 struct virgl_hw_res *res)
88 {
89 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
90 struct drm_virtgpu_3d_wait waitcmd;
91 int ret;
92
93 if (!p_atomic_read(&res->maybe_busy) && !p_atomic_read(&res->external))
94 return false;
95
96 memset(&waitcmd, 0, sizeof(waitcmd));
97 waitcmd.handle = res->bo_handle;
98 waitcmd.flags = VIRTGPU_WAIT_NOWAIT;
99
100 ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
101 if (ret && errno == EBUSY)
102 return TRUE;
103
104 p_atomic_set(&res->maybe_busy, false);
105
106 return FALSE;
107 }
108
109 static void
110 virgl_drm_winsys_destroy(struct virgl_winsys *qws)
111 {
112 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
113
114 virgl_resource_cache_flush(&qdws->cache);
115
116 util_hash_table_destroy(qdws->bo_handles);
117 util_hash_table_destroy(qdws->bo_names);
118 mtx_destroy(&qdws->bo_handles_mutex);
119 mtx_destroy(&qdws->mutex);
120
121 FREE(qdws);
122 }
123
124 static void virgl_drm_resource_reference(struct virgl_winsys *qws,
125 struct virgl_hw_res **dres,
126 struct virgl_hw_res *sres)
127 {
128 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
129 struct virgl_hw_res *old = *dres;
130
131 if (pipe_reference(&(*dres)->reference, &sres->reference)) {
132
133 if (!can_cache_resource_with_bind(old->bind) ||
134 p_atomic_read(&old->external)) {
135 virgl_hw_res_destroy(qdws, old);
136 } else {
137 mtx_lock(&qdws->mutex);
138 virgl_resource_cache_add(&qdws->cache, &old->cache_entry);
139 mtx_unlock(&qdws->mutex);
140 }
141 }
142 *dres = sres;
143 }
144
145 static struct virgl_hw_res *
146 virgl_drm_winsys_resource_create(struct virgl_winsys *qws,
147 enum pipe_texture_target target,
148 uint32_t format,
149 uint32_t bind,
150 uint32_t width,
151 uint32_t height,
152 uint32_t depth,
153 uint32_t array_size,
154 uint32_t last_level,
155 uint32_t nr_samples,
156 uint32_t size,
157 bool for_fencing)
158 {
159 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
160 struct drm_virtgpu_resource_create createcmd;
161 int ret;
162 struct virgl_hw_res *res;
163 uint32_t stride = width * util_format_get_blocksize(format);
164
165 res = CALLOC_STRUCT(virgl_hw_res);
166 if (!res)
167 return NULL;
168
169 memset(&createcmd, 0, sizeof(createcmd));
170 createcmd.target = target;
171 createcmd.format = format;
172 createcmd.bind = bind;
173 createcmd.width = width;
174 createcmd.height = height;
175 createcmd.depth = depth;
176 createcmd.array_size = array_size;
177 createcmd.last_level = last_level;
178 createcmd.nr_samples = nr_samples;
179 createcmd.stride = stride;
180 createcmd.size = size;
181
182 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &createcmd);
183 if (ret != 0) {
184 FREE(res);
185 return NULL;
186 }
187
188 res->bind = bind;
189
190 res->res_handle = createcmd.res_handle;
191 res->bo_handle = createcmd.bo_handle;
192 res->size = size;
193 res->stride = stride;
194 pipe_reference_init(&res->reference, 1);
195 p_atomic_set(&res->external, false);
196 p_atomic_set(&res->num_cs_references, 0);
197
198 /* A newly created resource is considered busy by the kernel until the
199 * command is retired. But for our purposes, we can consider it idle
200 * unless it is used for fencing.
201 */
202 p_atomic_set(&res->maybe_busy, for_fencing);
203
204 virgl_resource_cache_entry_init(&res->cache_entry, size, bind, format);
205
206 return res;
207 }
208
209 static int
210 virgl_bo_transfer_put(struct virgl_winsys *vws,
211 struct virgl_hw_res *res,
212 const struct pipe_box *box,
213 uint32_t stride, uint32_t layer_stride,
214 uint32_t buf_offset, uint32_t level)
215 {
216 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
217 struct drm_virtgpu_3d_transfer_to_host tohostcmd;
218
219 p_atomic_set(&res->maybe_busy, true);
220
221 memset(&tohostcmd, 0, sizeof(tohostcmd));
222 tohostcmd.bo_handle = res->bo_handle;
223 tohostcmd.box.x = box->x;
224 tohostcmd.box.y = box->y;
225 tohostcmd.box.z = box->z;
226 tohostcmd.box.w = box->width;
227 tohostcmd.box.h = box->height;
228 tohostcmd.box.d = box->depth;
229 tohostcmd.offset = buf_offset;
230 tohostcmd.level = level;
231 // tohostcmd.stride = stride;
232 // tohostcmd.layer_stride = stride;
233 return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &tohostcmd);
234 }
235
236 static int
237 virgl_bo_transfer_get(struct virgl_winsys *vws,
238 struct virgl_hw_res *res,
239 const struct pipe_box *box,
240 uint32_t stride, uint32_t layer_stride,
241 uint32_t buf_offset, uint32_t level)
242 {
243 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
244 struct drm_virtgpu_3d_transfer_from_host fromhostcmd;
245
246 p_atomic_set(&res->maybe_busy, true);
247
248 memset(&fromhostcmd, 0, sizeof(fromhostcmd));
249 fromhostcmd.bo_handle = res->bo_handle;
250 fromhostcmd.level = level;
251 fromhostcmd.offset = buf_offset;
252 // fromhostcmd.stride = stride;
253 // fromhostcmd.layer_stride = layer_stride;
254 fromhostcmd.box.x = box->x;
255 fromhostcmd.box.y = box->y;
256 fromhostcmd.box.z = box->z;
257 fromhostcmd.box.w = box->width;
258 fromhostcmd.box.h = box->height;
259 fromhostcmd.box.d = box->depth;
260 return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &fromhostcmd);
261 }
262
263 static struct virgl_hw_res *
264 virgl_drm_winsys_resource_cache_create(struct virgl_winsys *qws,
265 enum pipe_texture_target target,
266 uint32_t format,
267 uint32_t bind,
268 uint32_t width,
269 uint32_t height,
270 uint32_t depth,
271 uint32_t array_size,
272 uint32_t last_level,
273 uint32_t nr_samples,
274 uint32_t size)
275 {
276 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
277 struct virgl_hw_res *res;
278 struct virgl_resource_cache_entry *entry;
279
280 if (!can_cache_resource_with_bind(bind))
281 goto alloc;
282
283 mtx_lock(&qdws->mutex);
284
285 entry = virgl_resource_cache_remove_compatible(&qdws->cache, size,
286 bind, format);
287 if (entry) {
288 res = cache_entry_container_res(entry);
289 mtx_unlock(&qdws->mutex);
290 pipe_reference_init(&res->reference, 1);
291 return res;
292 }
293
294 mtx_unlock(&qdws->mutex);
295
296 alloc:
297 res = virgl_drm_winsys_resource_create(qws, target, format, bind,
298 width, height, depth, array_size,
299 last_level, nr_samples, size, false);
300 return res;
301 }
302
303 static struct virgl_hw_res *
304 virgl_drm_winsys_resource_create_handle(struct virgl_winsys *qws,
305 struct winsys_handle *whandle)
306 {
307 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
308 struct drm_gem_open open_arg = {};
309 struct drm_virtgpu_resource_info info_arg = {};
310 struct virgl_hw_res *res = NULL;
311 uint32_t handle = whandle->handle;
312
313 if (whandle->offset != 0) {
314 fprintf(stderr, "attempt to import unsupported winsys offset %u\n",
315 whandle->offset);
316 return NULL;
317 }
318
319 mtx_lock(&qdws->bo_handles_mutex);
320
321 /* We must maintain a list of pairs <handle, bo>, so that we always return
322 * the same BO for one particular handle. If we didn't do that and created
323 * more than one BO for the same handle and then relocated them in a CS,
324 * we would hit a deadlock in the kernel.
325 *
326 * The list of pairs is guarded by a mutex, of course. */
327 if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
328 res = util_hash_table_get(qdws->bo_names, (void*)(uintptr_t)handle);
329 } else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
330 int r;
331 r = drmPrimeFDToHandle(qdws->fd, whandle->handle, &handle);
332 if (r)
333 goto done;
334 res = util_hash_table_get(qdws->bo_handles, (void*)(uintptr_t)handle);
335 } else {
336 /* Unknown handle type */
337 goto done;
338 }
339
340 if (res) {
341 struct virgl_hw_res *r = NULL;
342 virgl_drm_resource_reference(&qdws->base, &r, res);
343 goto done;
344 }
345
346 res = CALLOC_STRUCT(virgl_hw_res);
347 if (!res)
348 goto done;
349
350 if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
351 res->bo_handle = handle;
352 } else {
353 memset(&open_arg, 0, sizeof(open_arg));
354 open_arg.name = whandle->handle;
355 if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
356 FREE(res);
357 res = NULL;
358 goto done;
359 }
360 res->bo_handle = open_arg.handle;
361 res->flink_name = whandle->handle;
362 }
363
364 memset(&info_arg, 0, sizeof(info_arg));
365 info_arg.bo_handle = res->bo_handle;
366
367 if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &info_arg)) {
368 /* close */
369 FREE(res);
370 res = NULL;
371 goto done;
372 }
373
374 res->res_handle = info_arg.res_handle;
375
376 res->size = info_arg.size;
377 res->stride = info_arg.stride;
378 pipe_reference_init(&res->reference, 1);
379 p_atomic_set(&res->external, true);
380 res->num_cs_references = 0;
381
382 if (res->flink_name)
383 util_hash_table_set(qdws->bo_names, (void *)(uintptr_t)res->flink_name, res);
384 util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)res->bo_handle, res);
385
386 done:
387 mtx_unlock(&qdws->bo_handles_mutex);
388 return res;
389 }
390
391 static boolean virgl_drm_winsys_resource_get_handle(struct virgl_winsys *qws,
392 struct virgl_hw_res *res,
393 uint32_t stride,
394 struct winsys_handle *whandle)
395 {
396 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
397 struct drm_gem_flink flink;
398
399 if (!res)
400 return FALSE;
401
402 if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
403 if (!res->flink_name) {
404 memset(&flink, 0, sizeof(flink));
405 flink.handle = res->bo_handle;
406
407 if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
408 return FALSE;
409 }
410 res->flink_name = flink.name;
411
412 mtx_lock(&qdws->bo_handles_mutex);
413 util_hash_table_set(qdws->bo_names, (void *)(uintptr_t)res->flink_name, res);
414 mtx_unlock(&qdws->bo_handles_mutex);
415 }
416 whandle->handle = res->flink_name;
417 } else if (whandle->type == WINSYS_HANDLE_TYPE_KMS) {
418 whandle->handle = res->bo_handle;
419 } else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
420 if (drmPrimeHandleToFD(qdws->fd, res->bo_handle, DRM_CLOEXEC, (int*)&whandle->handle))
421 return FALSE;
422 mtx_lock(&qdws->bo_handles_mutex);
423 util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)res->bo_handle, res);
424 mtx_unlock(&qdws->bo_handles_mutex);
425 }
426
427 p_atomic_set(&res->external, true);
428
429 whandle->stride = stride;
430 return TRUE;
431 }
432
433 static void *virgl_drm_resource_map(struct virgl_winsys *qws,
434 struct virgl_hw_res *res)
435 {
436 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
437 struct drm_virtgpu_map mmap_arg;
438 void *ptr;
439
440 if (res->ptr)
441 return res->ptr;
442
443 memset(&mmap_arg, 0, sizeof(mmap_arg));
444 mmap_arg.handle = res->bo_handle;
445 if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_MAP, &mmap_arg))
446 return NULL;
447
448 ptr = os_mmap(0, res->size, PROT_READ|PROT_WRITE, MAP_SHARED,
449 qdws->fd, mmap_arg.offset);
450 if (ptr == MAP_FAILED)
451 return NULL;
452
453 res->ptr = ptr;
454 return ptr;
455
456 }
457
458 static void virgl_drm_resource_wait(struct virgl_winsys *qws,
459 struct virgl_hw_res *res)
460 {
461 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
462 struct drm_virtgpu_3d_wait waitcmd;
463 int ret;
464
465 if (!p_atomic_read(&res->maybe_busy) && !p_atomic_read(&res->external))
466 return;
467
468 memset(&waitcmd, 0, sizeof(waitcmd));
469 waitcmd.handle = res->bo_handle;
470 again:
471 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
472 if (ret == -EAGAIN)
473 goto again;
474
475 p_atomic_set(&res->maybe_busy, false);
476 }
477
478 static bool virgl_drm_alloc_res_list(struct virgl_drm_cmd_buf *cbuf,
479 int initial_size)
480 {
481 cbuf->nres = initial_size;
482 cbuf->cres = 0;
483
484 cbuf->res_bo = CALLOC(cbuf->nres, sizeof(struct virgl_hw_buf*));
485 if (!cbuf->res_bo)
486 return false;
487
488 cbuf->res_hlist = MALLOC(cbuf->nres * sizeof(uint32_t));
489 if (!cbuf->res_hlist) {
490 FREE(cbuf->res_bo);
491 return false;
492 }
493
494 return true;
495 }
496
497 static void virgl_drm_free_res_list(struct virgl_drm_cmd_buf *cbuf)
498 {
499 int i;
500
501 for (i = 0; i < cbuf->cres; i++) {
502 p_atomic_dec(&cbuf->res_bo[i]->num_cs_references);
503 virgl_drm_resource_reference(cbuf->ws, &cbuf->res_bo[i], NULL);
504 }
505 FREE(cbuf->res_hlist);
506 FREE(cbuf->res_bo);
507 }
508
509 static boolean virgl_drm_lookup_res(struct virgl_drm_cmd_buf *cbuf,
510 struct virgl_hw_res *res)
511 {
512 unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
513 int i;
514
515 if (cbuf->is_handle_added[hash]) {
516 i = cbuf->reloc_indices_hashlist[hash];
517 if (cbuf->res_bo[i] == res)
518 return true;
519
520 for (i = 0; i < cbuf->cres; i++) {
521 if (cbuf->res_bo[i] == res) {
522 cbuf->reloc_indices_hashlist[hash] = i;
523 return true;
524 }
525 }
526 }
527 return false;
528 }
529
530 static void virgl_drm_add_res(struct virgl_drm_winsys *qdws,
531 struct virgl_drm_cmd_buf *cbuf,
532 struct virgl_hw_res *res)
533 {
534 unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
535
536 if (cbuf->cres >= cbuf->nres) {
537 unsigned new_nres = cbuf->nres + 256;
538 void *new_ptr = REALLOC(cbuf->res_bo,
539 cbuf->nres * sizeof(struct virgl_hw_buf*),
540 new_nres * sizeof(struct virgl_hw_buf*));
541 if (!new_ptr) {
542 fprintf(stderr,"failure to add relocation %d, %d\n", cbuf->cres, new_nres);
543 return;
544 }
545 cbuf->res_bo = new_ptr;
546
547 new_ptr = REALLOC(cbuf->res_hlist,
548 cbuf->nres * sizeof(uint32_t),
549 new_nres * sizeof(uint32_t));
550 if (!new_ptr) {
551 fprintf(stderr,"failure to add hlist relocation %d, %d\n", cbuf->cres, cbuf->nres);
552 return;
553 }
554 cbuf->res_hlist = new_ptr;
555 cbuf->nres = new_nres;
556 }
557
558 cbuf->res_bo[cbuf->cres] = NULL;
559 virgl_drm_resource_reference(&qdws->base, &cbuf->res_bo[cbuf->cres], res);
560 cbuf->res_hlist[cbuf->cres] = res->bo_handle;
561 cbuf->is_handle_added[hash] = TRUE;
562
563 cbuf->reloc_indices_hashlist[hash] = cbuf->cres;
564 p_atomic_inc(&res->num_cs_references);
565 cbuf->cres++;
566 }
567
568 /* This is called after the cbuf is submitted. */
569 static void virgl_drm_clear_res_list(struct virgl_drm_cmd_buf *cbuf)
570 {
571 int i;
572
573 for (i = 0; i < cbuf->cres; i++) {
574 /* mark all BOs busy after submission */
575 p_atomic_set(&cbuf->res_bo[i]->maybe_busy, true);
576
577 p_atomic_dec(&cbuf->res_bo[i]->num_cs_references);
578 virgl_drm_resource_reference(cbuf->ws, &cbuf->res_bo[i], NULL);
579 }
580
581 cbuf->cres = 0;
582
583 memset(cbuf->is_handle_added, 0, sizeof(cbuf->is_handle_added));
584 }
585
586 static void virgl_drm_emit_res(struct virgl_winsys *qws,
587 struct virgl_cmd_buf *_cbuf,
588 struct virgl_hw_res *res, boolean write_buf)
589 {
590 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
591 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
592 boolean already_in_list = virgl_drm_lookup_res(cbuf, res);
593
594 if (write_buf)
595 cbuf->base.buf[cbuf->base.cdw++] = res->res_handle;
596
597 if (!already_in_list)
598 virgl_drm_add_res(qdws, cbuf, res);
599 }
600
601 static boolean virgl_drm_res_is_ref(struct virgl_winsys *qws,
602 struct virgl_cmd_buf *_cbuf,
603 struct virgl_hw_res *res)
604 {
605 if (!p_atomic_read(&res->num_cs_references))
606 return FALSE;
607
608 return TRUE;
609 }
610
611 static struct virgl_cmd_buf *virgl_drm_cmd_buf_create(struct virgl_winsys *qws,
612 uint32_t size)
613 {
614 struct virgl_drm_cmd_buf *cbuf;
615
616 cbuf = CALLOC_STRUCT(virgl_drm_cmd_buf);
617 if (!cbuf)
618 return NULL;
619
620 cbuf->ws = qws;
621
622 if (!virgl_drm_alloc_res_list(cbuf, 512)) {
623 FREE(cbuf);
624 return NULL;
625 }
626
627 cbuf->buf = CALLOC(size, sizeof(uint32_t));
628 if (!cbuf->buf) {
629 FREE(cbuf->res_hlist);
630 FREE(cbuf->res_bo);
631 FREE(cbuf);
632 return NULL;
633 }
634
635 cbuf->in_fence_fd = -1;
636 cbuf->base.buf = cbuf->buf;
637 return &cbuf->base;
638 }
639
640 static void virgl_drm_cmd_buf_destroy(struct virgl_cmd_buf *_cbuf)
641 {
642 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
643
644 virgl_drm_free_res_list(cbuf);
645
646 FREE(cbuf->buf);
647 FREE(cbuf);
648 }
649
650 static struct pipe_fence_handle *
651 virgl_drm_fence_create(struct virgl_winsys *vws, int fd, bool external)
652 {
653 struct virgl_drm_fence *fence;
654
655 assert(vws->supports_fences);
656
657 if (external) {
658 fd = dup(fd);
659 if (fd < 0)
660 return NULL;
661 }
662
663 fence = CALLOC_STRUCT(virgl_drm_fence);
664 if (!fence) {
665 close(fd);
666 return NULL;
667 }
668
669 fence->fd = fd;
670 fence->external = external;
671
672 pipe_reference_init(&fence->reference, 1);
673
674 return (struct pipe_fence_handle *)fence;
675 }
676
677 static struct pipe_fence_handle *
678 virgl_drm_fence_create_legacy(struct virgl_winsys *vws)
679 {
680 struct virgl_drm_fence *fence;
681
682 assert(!vws->supports_fences);
683
684 fence = CALLOC_STRUCT(virgl_drm_fence);
685 if (!fence)
686 return NULL;
687 fence->fd = -1;
688
689 /* Resources for fences should not be from the cache, since we are basing
690 * the fence status on the resource creation busy status.
691 */
692 fence->hw_res = virgl_drm_winsys_resource_create(vws, PIPE_BUFFER,
693 PIPE_FORMAT_R8_UNORM, VIRGL_BIND_CUSTOM, 8, 1, 1, 0, 0, 0, 8, true);
694 if (!fence->hw_res) {
695 FREE(fence);
696 return NULL;
697 }
698
699 pipe_reference_init(&fence->reference, 1);
700
701 return (struct pipe_fence_handle *)fence;
702 }
703
704 static int virgl_drm_winsys_submit_cmd(struct virgl_winsys *qws,
705 struct virgl_cmd_buf *_cbuf,
706 struct pipe_fence_handle **fence)
707 {
708 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
709 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
710 struct drm_virtgpu_execbuffer eb;
711 int ret;
712
713 if (cbuf->base.cdw == 0)
714 return 0;
715
716 memset(&eb, 0, sizeof(struct drm_virtgpu_execbuffer));
717 eb.command = (unsigned long)(void*)cbuf->buf;
718 eb.size = cbuf->base.cdw * 4;
719 eb.num_bo_handles = cbuf->cres;
720 eb.bo_handles = (unsigned long)(void *)cbuf->res_hlist;
721
722 eb.fence_fd = -1;
723 if (qws->supports_fences) {
724 if (cbuf->in_fence_fd >= 0) {
725 eb.flags |= VIRTGPU_EXECBUF_FENCE_FD_IN;
726 eb.fence_fd = cbuf->in_fence_fd;
727 }
728
729 if (fence != NULL)
730 eb.flags |= VIRTGPU_EXECBUF_FENCE_FD_OUT;
731 } else {
732 assert(cbuf->in_fence_fd < 0);
733 }
734
735 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &eb);
736 if (ret == -1)
737 fprintf(stderr,"got error from kernel - expect bad rendering %d\n", errno);
738 cbuf->base.cdw = 0;
739
740 if (qws->supports_fences) {
741 if (cbuf->in_fence_fd >= 0) {
742 close(cbuf->in_fence_fd);
743 cbuf->in_fence_fd = -1;
744 }
745
746 if (fence != NULL && ret == 0)
747 *fence = virgl_drm_fence_create(qws, eb.fence_fd, false);
748 } else {
749 if (fence != NULL && ret == 0)
750 *fence = virgl_drm_fence_create_legacy(qws);
751 }
752
753 virgl_drm_clear_res_list(cbuf);
754
755 return ret;
756 }
757
758 static int virgl_drm_get_caps(struct virgl_winsys *vws,
759 struct virgl_drm_caps *caps)
760 {
761 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
762 struct drm_virtgpu_get_caps args;
763 int ret;
764
765 virgl_ws_fill_new_caps_defaults(caps);
766
767 memset(&args, 0, sizeof(args));
768 if (vdws->has_capset_query_fix) {
769 /* if we have the query fix - try and get cap set id 2 first */
770 args.cap_set_id = 2;
771 args.size = sizeof(union virgl_caps);
772 } else {
773 args.cap_set_id = 1;
774 args.size = sizeof(struct virgl_caps_v1);
775 }
776 args.addr = (unsigned long)&caps->caps;
777
778 ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
779 if (ret == -1 && errno == EINVAL) {
780 /* Fallback to v1 */
781 args.cap_set_id = 1;
782 args.size = sizeof(struct virgl_caps_v1);
783 ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
784 if (ret == -1)
785 return ret;
786 }
787 return ret;
788 }
789
790 #define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
791
792 static unsigned handle_hash(void *key)
793 {
794 return PTR_TO_UINT(key);
795 }
796
797 static int handle_compare(void *key1, void *key2)
798 {
799 return PTR_TO_UINT(key1) != PTR_TO_UINT(key2);
800 }
801
802 static struct pipe_fence_handle *
803 virgl_cs_create_fence(struct virgl_winsys *vws, int fd)
804 {
805 if (!vws->supports_fences)
806 return NULL;
807
808 return virgl_drm_fence_create(vws, fd, true);
809 }
810
811 static bool virgl_fence_wait(struct virgl_winsys *vws,
812 struct pipe_fence_handle *_fence,
813 uint64_t timeout)
814 {
815 struct virgl_drm_fence *fence = virgl_drm_fence(_fence);
816
817 if (vws->supports_fences) {
818 uint64_t timeout_ms;
819 int timeout_poll;
820
821 if (timeout == 0)
822 return sync_wait(fence->fd, 0) == 0;
823
824 timeout_ms = timeout / 1000000;
825 /* round up */
826 if (timeout_ms * 1000000 < timeout)
827 timeout_ms++;
828
829 timeout_poll = timeout_ms <= INT_MAX ? (int) timeout_ms : -1;
830
831 return sync_wait(fence->fd, timeout_poll) == 0;
832 }
833
834 if (timeout == 0)
835 return !virgl_drm_resource_is_busy(vws, fence->hw_res);
836
837 if (timeout != PIPE_TIMEOUT_INFINITE) {
838 int64_t start_time = os_time_get();
839 timeout /= 1000;
840 while (virgl_drm_resource_is_busy(vws, fence->hw_res)) {
841 if (os_time_get() - start_time >= timeout)
842 return FALSE;
843 os_time_sleep(10);
844 }
845 return TRUE;
846 }
847 virgl_drm_resource_wait(vws, fence->hw_res);
848
849 return TRUE;
850 }
851
852 static void virgl_fence_reference(struct virgl_winsys *vws,
853 struct pipe_fence_handle **dst,
854 struct pipe_fence_handle *src)
855 {
856 struct virgl_drm_fence *dfence = virgl_drm_fence(*dst);
857 struct virgl_drm_fence *sfence = virgl_drm_fence(src);
858
859 if (pipe_reference(&dfence->reference, &sfence->reference)) {
860 if (vws->supports_fences) {
861 close(dfence->fd);
862 } else {
863 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
864 virgl_hw_res_destroy(vdws, dfence->hw_res);
865 }
866 FREE(dfence);
867 }
868
869 *dst = src;
870 }
871
872 static void virgl_fence_server_sync(struct virgl_winsys *vws,
873 struct virgl_cmd_buf *_cbuf,
874 struct pipe_fence_handle *_fence)
875 {
876 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
877 struct virgl_drm_fence *fence = virgl_drm_fence(_fence);
878
879 if (!vws->supports_fences)
880 return;
881
882 /* if not an external fence, then nothing more to do without preemption: */
883 if (!fence->external)
884 return;
885
886 sync_accumulate("virgl", &cbuf->in_fence_fd, fence->fd);
887 }
888
889 static int virgl_fence_get_fd(struct virgl_winsys *vws,
890 struct pipe_fence_handle *_fence)
891 {
892 struct virgl_drm_fence *fence = virgl_drm_fence(_fence);
893
894 if (!vws->supports_fences)
895 return -1;
896
897 return dup(fence->fd);
898 }
899
900 static int virgl_drm_get_version(int fd)
901 {
902 int ret;
903 drmVersionPtr version;
904
905 version = drmGetVersion(fd);
906
907 if (!version)
908 ret = -EFAULT;
909 else if (version->version_major != 0)
910 ret = -EINVAL;
911 else
912 ret = VIRGL_DRM_VERSION(0, version->version_minor);
913
914 drmFreeVersion(version);
915
916 return ret;
917 }
918
919 static bool
920 virgl_drm_resource_cache_entry_is_busy(struct virgl_resource_cache_entry *entry,
921 void *user_data)
922 {
923 struct virgl_drm_winsys *qdws = user_data;
924 struct virgl_hw_res *res = cache_entry_container_res(entry);
925
926 return virgl_drm_resource_is_busy(&qdws->base, res);
927 }
928
929 static void
930 virgl_drm_resource_cache_entry_release(struct virgl_resource_cache_entry *entry,
931 void *user_data)
932 {
933 struct virgl_drm_winsys *qdws = user_data;
934 struct virgl_hw_res *res = cache_entry_container_res(entry);
935
936 virgl_hw_res_destroy(qdws, res);
937 }
938
939 static struct virgl_winsys *
940 virgl_drm_winsys_create(int drmFD)
941 {
942 static const unsigned CACHE_TIMEOUT_USEC = 1000000;
943 struct virgl_drm_winsys *qdws;
944 int drm_version;
945 int ret;
946 int gl = 0;
947 struct drm_virtgpu_getparam getparam = {0};
948
949 getparam.param = VIRTGPU_PARAM_3D_FEATURES;
950 getparam.value = (uint64_t)(uintptr_t)&gl;
951 ret = drmIoctl(drmFD, DRM_IOCTL_VIRTGPU_GETPARAM, &getparam);
952 if (ret < 0 || !gl)
953 return NULL;
954
955 drm_version = virgl_drm_get_version(drmFD);
956 if (drm_version < 0)
957 return NULL;
958
959 qdws = CALLOC_STRUCT(virgl_drm_winsys);
960 if (!qdws)
961 return NULL;
962
963 qdws->fd = drmFD;
964 virgl_resource_cache_init(&qdws->cache, CACHE_TIMEOUT_USEC,
965 virgl_drm_resource_cache_entry_is_busy,
966 virgl_drm_resource_cache_entry_release,
967 qdws);
968 (void) mtx_init(&qdws->mutex, mtx_plain);
969 (void) mtx_init(&qdws->bo_handles_mutex, mtx_plain);
970 qdws->bo_handles = util_hash_table_create(handle_hash, handle_compare);
971 qdws->bo_names = util_hash_table_create(handle_hash, handle_compare);
972 qdws->base.destroy = virgl_drm_winsys_destroy;
973
974 qdws->base.transfer_put = virgl_bo_transfer_put;
975 qdws->base.transfer_get = virgl_bo_transfer_get;
976 qdws->base.resource_create = virgl_drm_winsys_resource_cache_create;
977 qdws->base.resource_reference = virgl_drm_resource_reference;
978 qdws->base.resource_create_from_handle = virgl_drm_winsys_resource_create_handle;
979 qdws->base.resource_get_handle = virgl_drm_winsys_resource_get_handle;
980 qdws->base.resource_map = virgl_drm_resource_map;
981 qdws->base.resource_wait = virgl_drm_resource_wait;
982 qdws->base.resource_is_busy = virgl_drm_resource_is_busy;
983 qdws->base.cmd_buf_create = virgl_drm_cmd_buf_create;
984 qdws->base.cmd_buf_destroy = virgl_drm_cmd_buf_destroy;
985 qdws->base.submit_cmd = virgl_drm_winsys_submit_cmd;
986 qdws->base.emit_res = virgl_drm_emit_res;
987 qdws->base.res_is_referenced = virgl_drm_res_is_ref;
988
989 qdws->base.cs_create_fence = virgl_cs_create_fence;
990 qdws->base.fence_wait = virgl_fence_wait;
991 qdws->base.fence_reference = virgl_fence_reference;
992 qdws->base.fence_server_sync = virgl_fence_server_sync;
993 qdws->base.fence_get_fd = virgl_fence_get_fd;
994 qdws->base.supports_fences = drm_version >= VIRGL_DRM_VERSION_FENCE_FD;
995 qdws->base.supports_encoded_transfers = 1;
996
997 qdws->base.get_caps = virgl_drm_get_caps;
998
999 uint32_t value = 0;
1000 getparam.param = VIRTGPU_PARAM_CAPSET_QUERY_FIX;
1001 getparam.value = (uint64_t)(uintptr_t)&value;
1002 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_GETPARAM, &getparam);
1003 if (ret == 0) {
1004 if (value == 1)
1005 qdws->has_capset_query_fix = true;
1006 }
1007
1008 return &qdws->base;
1009
1010 }
1011
1012 static struct util_hash_table *fd_tab = NULL;
1013 static mtx_t virgl_screen_mutex = _MTX_INITIALIZER_NP;
1014
1015 static void
1016 virgl_drm_screen_destroy(struct pipe_screen *pscreen)
1017 {
1018 struct virgl_screen *screen = virgl_screen(pscreen);
1019 boolean destroy;
1020
1021 mtx_lock(&virgl_screen_mutex);
1022 destroy = --screen->refcnt == 0;
1023 if (destroy) {
1024 int fd = virgl_drm_winsys(screen->vws)->fd;
1025 util_hash_table_remove(fd_tab, intptr_to_pointer(fd));
1026 close(fd);
1027 }
1028 mtx_unlock(&virgl_screen_mutex);
1029
1030 if (destroy) {
1031 pscreen->destroy = screen->winsys_priv;
1032 pscreen->destroy(pscreen);
1033 }
1034 }
1035
1036 static unsigned hash_fd(void *key)
1037 {
1038 int fd = pointer_to_intptr(key);
1039 struct stat stat;
1040 fstat(fd, &stat);
1041
1042 return stat.st_dev ^ stat.st_ino ^ stat.st_rdev;
1043 }
1044
1045 static int compare_fd(void *key1, void *key2)
1046 {
1047 int fd1 = pointer_to_intptr(key1);
1048 int fd2 = pointer_to_intptr(key2);
1049 struct stat stat1, stat2;
1050 fstat(fd1, &stat1);
1051 fstat(fd2, &stat2);
1052
1053 return stat1.st_dev != stat2.st_dev ||
1054 stat1.st_ino != stat2.st_ino ||
1055 stat1.st_rdev != stat2.st_rdev;
1056 }
1057
1058 struct pipe_screen *
1059 virgl_drm_screen_create(int fd, const struct pipe_screen_config *config)
1060 {
1061 struct pipe_screen *pscreen = NULL;
1062
1063 mtx_lock(&virgl_screen_mutex);
1064 if (!fd_tab) {
1065 fd_tab = util_hash_table_create(hash_fd, compare_fd);
1066 if (!fd_tab)
1067 goto unlock;
1068 }
1069
1070 pscreen = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
1071 if (pscreen) {
1072 virgl_screen(pscreen)->refcnt++;
1073 } else {
1074 struct virgl_winsys *vws;
1075 int dup_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1076
1077 vws = virgl_drm_winsys_create(dup_fd);
1078 if (!vws) {
1079 close(dup_fd);
1080 goto unlock;
1081 }
1082
1083 pscreen = virgl_create_screen(vws, config);
1084 if (pscreen) {
1085 util_hash_table_set(fd_tab, intptr_to_pointer(dup_fd), pscreen);
1086
1087 /* Bit of a hack, to avoid circular linkage dependency,
1088 * ie. pipe driver having to call in to winsys, we
1089 * override the pipe drivers screen->destroy():
1090 */
1091 virgl_screen(pscreen)->winsys_priv = pscreen->destroy;
1092 pscreen->destroy = virgl_drm_screen_destroy;
1093 }
1094 }
1095
1096 unlock:
1097 mtx_unlock(&virgl_screen_mutex);
1098 return pscreen;
1099 }