util: don't include p_defines.h and u_pointer.h from gallium
[mesa.git] / src / gallium / winsys / virgl / drm / virgl_drm_winsys.c
1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <errno.h>
25 #include <fcntl.h>
26 #include <limits.h>
27 #include <stdio.h>
28 #include <sys/ioctl.h>
29 #include <sys/stat.h>
30
31 #include "os/os_mman.h"
32 #include "util/os_time.h"
33 #include "util/u_memory.h"
34 #include "util/format/u_format.h"
35 #include "util/u_hash_table.h"
36 #include "util/u_inlines.h"
37 #include "util/u_pointer.h"
38 #include "state_tracker/drm_driver.h"
39 #include "virgl/virgl_screen.h"
40 #include "virgl/virgl_public.h"
41
42 #include <xf86drm.h>
43 #include <libsync.h>
44 #include "drm-uapi/virtgpu_drm.h"
45
46 #include "virgl_drm_winsys.h"
47 #include "virgl_drm_public.h"
48
49
50 #define VIRGL_DRM_VERSION(major, minor) ((major) << 16 | (minor))
51 #define VIRGL_DRM_VERSION_FENCE_FD VIRGL_DRM_VERSION(0, 1)
52
53 /* Gets a pointer to the virgl_hw_res containing the pointed to cache entry. */
54 #define cache_entry_container_res(ptr) \
55 (struct virgl_hw_res*)((char*)ptr - offsetof(struct virgl_hw_res, cache_entry))
56
57 static inline boolean can_cache_resource_with_bind(uint32_t bind)
58 {
59 return bind == VIRGL_BIND_CONSTANT_BUFFER ||
60 bind == VIRGL_BIND_INDEX_BUFFER ||
61 bind == VIRGL_BIND_VERTEX_BUFFER ||
62 bind == VIRGL_BIND_CUSTOM ||
63 bind == VIRGL_BIND_STAGING;
64 }
65
66 static void virgl_hw_res_destroy(struct virgl_drm_winsys *qdws,
67 struct virgl_hw_res *res)
68 {
69 struct drm_gem_close args;
70
71 mtx_lock(&qdws->bo_handles_mutex);
72 _mesa_hash_table_remove_key(qdws->bo_handles,
73 (void *)(uintptr_t)res->bo_handle);
74 if (res->flink_name)
75 _mesa_hash_table_remove_key(qdws->bo_names,
76 (void *)(uintptr_t)res->flink_name);
77 mtx_unlock(&qdws->bo_handles_mutex);
78 if (res->ptr)
79 os_munmap(res->ptr, res->size);
80
81 memset(&args, 0, sizeof(args));
82 args.handle = res->bo_handle;
83 drmIoctl(qdws->fd, DRM_IOCTL_GEM_CLOSE, &args);
84 FREE(res);
85 }
86
87 static boolean virgl_drm_resource_is_busy(struct virgl_winsys *vws,
88 struct virgl_hw_res *res)
89 {
90 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
91 struct drm_virtgpu_3d_wait waitcmd;
92 int ret;
93
94 if (!p_atomic_read(&res->maybe_busy) && !p_atomic_read(&res->external))
95 return false;
96
97 memset(&waitcmd, 0, sizeof(waitcmd));
98 waitcmd.handle = res->bo_handle;
99 waitcmd.flags = VIRTGPU_WAIT_NOWAIT;
100
101 ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
102 if (ret && errno == EBUSY)
103 return TRUE;
104
105 p_atomic_set(&res->maybe_busy, false);
106
107 return FALSE;
108 }
109
110 static void
111 virgl_drm_winsys_destroy(struct virgl_winsys *qws)
112 {
113 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
114
115 virgl_resource_cache_flush(&qdws->cache);
116
117 _mesa_hash_table_destroy(qdws->bo_handles, NULL);
118 _mesa_hash_table_destroy(qdws->bo_names, NULL);
119 mtx_destroy(&qdws->bo_handles_mutex);
120 mtx_destroy(&qdws->mutex);
121
122 FREE(qdws);
123 }
124
125 static void virgl_drm_resource_reference(struct virgl_winsys *qws,
126 struct virgl_hw_res **dres,
127 struct virgl_hw_res *sres)
128 {
129 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
130 struct virgl_hw_res *old = *dres;
131
132 if (pipe_reference(&(*dres)->reference, &sres->reference)) {
133
134 if (!can_cache_resource_with_bind(old->bind) ||
135 p_atomic_read(&old->external)) {
136 virgl_hw_res_destroy(qdws, old);
137 } else {
138 mtx_lock(&qdws->mutex);
139 virgl_resource_cache_add(&qdws->cache, &old->cache_entry);
140 mtx_unlock(&qdws->mutex);
141 }
142 }
143 *dres = sres;
144 }
145
146 static struct virgl_hw_res *
147 virgl_drm_winsys_resource_create(struct virgl_winsys *qws,
148 enum pipe_texture_target target,
149 uint32_t format,
150 uint32_t bind,
151 uint32_t width,
152 uint32_t height,
153 uint32_t depth,
154 uint32_t array_size,
155 uint32_t last_level,
156 uint32_t nr_samples,
157 uint32_t size,
158 bool for_fencing)
159 {
160 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
161 struct drm_virtgpu_resource_create createcmd;
162 int ret;
163 struct virgl_hw_res *res;
164 uint32_t stride = width * util_format_get_blocksize(format);
165
166 res = CALLOC_STRUCT(virgl_hw_res);
167 if (!res)
168 return NULL;
169
170 memset(&createcmd, 0, sizeof(createcmd));
171 createcmd.target = target;
172 createcmd.format = pipe_to_virgl_format(format);
173 createcmd.bind = bind;
174 createcmd.width = width;
175 createcmd.height = height;
176 createcmd.depth = depth;
177 createcmd.array_size = array_size;
178 createcmd.last_level = last_level;
179 createcmd.nr_samples = nr_samples;
180 createcmd.stride = stride;
181 createcmd.size = size;
182
183 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &createcmd);
184 if (ret != 0) {
185 FREE(res);
186 return NULL;
187 }
188
189 res->bind = bind;
190
191 res->res_handle = createcmd.res_handle;
192 res->bo_handle = createcmd.bo_handle;
193 res->size = size;
194 pipe_reference_init(&res->reference, 1);
195 p_atomic_set(&res->external, false);
196 p_atomic_set(&res->num_cs_references, 0);
197
198 /* A newly created resource is considered busy by the kernel until the
199 * command is retired. But for our purposes, we can consider it idle
200 * unless it is used for fencing.
201 */
202 p_atomic_set(&res->maybe_busy, for_fencing);
203
204 virgl_resource_cache_entry_init(&res->cache_entry, size, bind, format);
205
206 return res;
207 }
208
209 static int
210 virgl_bo_transfer_put(struct virgl_winsys *vws,
211 struct virgl_hw_res *res,
212 const struct pipe_box *box,
213 uint32_t stride, uint32_t layer_stride,
214 uint32_t buf_offset, uint32_t level)
215 {
216 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
217 struct drm_virtgpu_3d_transfer_to_host tohostcmd;
218
219 p_atomic_set(&res->maybe_busy, true);
220
221 memset(&tohostcmd, 0, sizeof(tohostcmd));
222 tohostcmd.bo_handle = res->bo_handle;
223 tohostcmd.box.x = box->x;
224 tohostcmd.box.y = box->y;
225 tohostcmd.box.z = box->z;
226 tohostcmd.box.w = box->width;
227 tohostcmd.box.h = box->height;
228 tohostcmd.box.d = box->depth;
229 tohostcmd.offset = buf_offset;
230 tohostcmd.level = level;
231 // tohostcmd.stride = stride;
232 // tohostcmd.layer_stride = stride;
233 return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &tohostcmd);
234 }
235
236 static int
237 virgl_bo_transfer_get(struct virgl_winsys *vws,
238 struct virgl_hw_res *res,
239 const struct pipe_box *box,
240 uint32_t stride, uint32_t layer_stride,
241 uint32_t buf_offset, uint32_t level)
242 {
243 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
244 struct drm_virtgpu_3d_transfer_from_host fromhostcmd;
245
246 p_atomic_set(&res->maybe_busy, true);
247
248 memset(&fromhostcmd, 0, sizeof(fromhostcmd));
249 fromhostcmd.bo_handle = res->bo_handle;
250 fromhostcmd.level = level;
251 fromhostcmd.offset = buf_offset;
252 // fromhostcmd.stride = stride;
253 // fromhostcmd.layer_stride = layer_stride;
254 fromhostcmd.box.x = box->x;
255 fromhostcmd.box.y = box->y;
256 fromhostcmd.box.z = box->z;
257 fromhostcmd.box.w = box->width;
258 fromhostcmd.box.h = box->height;
259 fromhostcmd.box.d = box->depth;
260 return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &fromhostcmd);
261 }
262
263 static struct virgl_hw_res *
264 virgl_drm_winsys_resource_cache_create(struct virgl_winsys *qws,
265 enum pipe_texture_target target,
266 uint32_t format,
267 uint32_t bind,
268 uint32_t width,
269 uint32_t height,
270 uint32_t depth,
271 uint32_t array_size,
272 uint32_t last_level,
273 uint32_t nr_samples,
274 uint32_t size)
275 {
276 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
277 struct virgl_hw_res *res;
278 struct virgl_resource_cache_entry *entry;
279
280 if (!can_cache_resource_with_bind(bind))
281 goto alloc;
282
283 mtx_lock(&qdws->mutex);
284
285 entry = virgl_resource_cache_remove_compatible(&qdws->cache, size,
286 bind, format);
287 if (entry) {
288 res = cache_entry_container_res(entry);
289 mtx_unlock(&qdws->mutex);
290 pipe_reference_init(&res->reference, 1);
291 return res;
292 }
293
294 mtx_unlock(&qdws->mutex);
295
296 alloc:
297 res = virgl_drm_winsys_resource_create(qws, target, format, bind,
298 width, height, depth, array_size,
299 last_level, nr_samples, size, false);
300 return res;
301 }
302
303 static struct virgl_hw_res *
304 virgl_drm_winsys_resource_create_handle(struct virgl_winsys *qws,
305 struct winsys_handle *whandle,
306 uint32_t *plane,
307 uint32_t *stride,
308 uint32_t *plane_offset,
309 uint64_t *modifier)
310 {
311 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
312 struct drm_gem_open open_arg = {};
313 struct drm_virtgpu_resource_info info_arg = {};
314 struct virgl_hw_res *res = NULL;
315 uint32_t handle = whandle->handle;
316
317 if (whandle->offset != 0 && whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
318 _debug_printf("attempt to import unsupported winsys offset %u\n",
319 whandle->offset);
320 return NULL;
321 } else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
322 *plane = whandle->plane;
323 *stride = whandle->stride;
324 *plane_offset = whandle->offset;
325 *modifier = whandle->modifier;
326 }
327
328 mtx_lock(&qdws->bo_handles_mutex);
329
330 /* We must maintain a list of pairs <handle, bo>, so that we always return
331 * the same BO for one particular handle. If we didn't do that and created
332 * more than one BO for the same handle and then relocated them in a CS,
333 * we would hit a deadlock in the kernel.
334 *
335 * The list of pairs is guarded by a mutex, of course. */
336 if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
337 res = util_hash_table_get(qdws->bo_names, (void*)(uintptr_t)handle);
338 } else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
339 int r;
340 r = drmPrimeFDToHandle(qdws->fd, whandle->handle, &handle);
341 if (r)
342 goto done;
343 res = util_hash_table_get(qdws->bo_handles, (void*)(uintptr_t)handle);
344 } else {
345 /* Unknown handle type */
346 goto done;
347 }
348
349 if (res) {
350 struct virgl_hw_res *r = NULL;
351 virgl_drm_resource_reference(&qdws->base, &r, res);
352 goto done;
353 }
354
355 res = CALLOC_STRUCT(virgl_hw_res);
356 if (!res)
357 goto done;
358
359 if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
360 res->bo_handle = handle;
361 } else {
362 memset(&open_arg, 0, sizeof(open_arg));
363 open_arg.name = whandle->handle;
364 if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
365 FREE(res);
366 res = NULL;
367 goto done;
368 }
369 res->bo_handle = open_arg.handle;
370 res->flink_name = whandle->handle;
371 }
372
373 memset(&info_arg, 0, sizeof(info_arg));
374 info_arg.bo_handle = res->bo_handle;
375
376 if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &info_arg)) {
377 /* close */
378 FREE(res);
379 res = NULL;
380 goto done;
381 }
382
383 res->res_handle = info_arg.res_handle;
384
385 res->size = info_arg.size;
386 pipe_reference_init(&res->reference, 1);
387 p_atomic_set(&res->external, true);
388 res->num_cs_references = 0;
389
390 if (res->flink_name)
391 _mesa_hash_table_insert(qdws->bo_names, (void *)(uintptr_t)res->flink_name, res);
392 _mesa_hash_table_insert(qdws->bo_handles, (void *)(uintptr_t)res->bo_handle, res);
393
394 done:
395 mtx_unlock(&qdws->bo_handles_mutex);
396 return res;
397 }
398
399 static boolean virgl_drm_winsys_resource_get_handle(struct virgl_winsys *qws,
400 struct virgl_hw_res *res,
401 uint32_t stride,
402 struct winsys_handle *whandle)
403 {
404 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
405 struct drm_gem_flink flink;
406
407 if (!res)
408 return FALSE;
409
410 if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
411 if (!res->flink_name) {
412 memset(&flink, 0, sizeof(flink));
413 flink.handle = res->bo_handle;
414
415 if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
416 return FALSE;
417 }
418 res->flink_name = flink.name;
419
420 mtx_lock(&qdws->bo_handles_mutex);
421 _mesa_hash_table_insert(qdws->bo_names, (void *)(uintptr_t)res->flink_name, res);
422 mtx_unlock(&qdws->bo_handles_mutex);
423 }
424 whandle->handle = res->flink_name;
425 } else if (whandle->type == WINSYS_HANDLE_TYPE_KMS) {
426 whandle->handle = res->bo_handle;
427 } else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
428 if (drmPrimeHandleToFD(qdws->fd, res->bo_handle, DRM_CLOEXEC, (int*)&whandle->handle))
429 return FALSE;
430 mtx_lock(&qdws->bo_handles_mutex);
431 _mesa_hash_table_insert(qdws->bo_handles, (void *)(uintptr_t)res->bo_handle, res);
432 mtx_unlock(&qdws->bo_handles_mutex);
433 }
434
435 p_atomic_set(&res->external, true);
436
437 whandle->stride = stride;
438 return TRUE;
439 }
440
441 static void *virgl_drm_resource_map(struct virgl_winsys *qws,
442 struct virgl_hw_res *res)
443 {
444 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
445 struct drm_virtgpu_map mmap_arg;
446 void *ptr;
447
448 if (res->ptr)
449 return res->ptr;
450
451 memset(&mmap_arg, 0, sizeof(mmap_arg));
452 mmap_arg.handle = res->bo_handle;
453 if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_MAP, &mmap_arg))
454 return NULL;
455
456 ptr = os_mmap(0, res->size, PROT_READ|PROT_WRITE, MAP_SHARED,
457 qdws->fd, mmap_arg.offset);
458 if (ptr == MAP_FAILED)
459 return NULL;
460
461 res->ptr = ptr;
462 return ptr;
463
464 }
465
466 static void virgl_drm_resource_wait(struct virgl_winsys *qws,
467 struct virgl_hw_res *res)
468 {
469 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
470 struct drm_virtgpu_3d_wait waitcmd;
471 int ret;
472
473 if (!p_atomic_read(&res->maybe_busy) && !p_atomic_read(&res->external))
474 return;
475
476 memset(&waitcmd, 0, sizeof(waitcmd));
477 waitcmd.handle = res->bo_handle;
478
479 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
480 if (ret)
481 _debug_printf("waiting got error - %d, slow gpu or hang?\n", errno);
482
483 p_atomic_set(&res->maybe_busy, false);
484 }
485
486 static bool virgl_drm_alloc_res_list(struct virgl_drm_cmd_buf *cbuf,
487 int initial_size)
488 {
489 cbuf->nres = initial_size;
490 cbuf->cres = 0;
491
492 cbuf->res_bo = CALLOC(cbuf->nres, sizeof(struct virgl_hw_buf*));
493 if (!cbuf->res_bo)
494 return false;
495
496 cbuf->res_hlist = MALLOC(cbuf->nres * sizeof(uint32_t));
497 if (!cbuf->res_hlist) {
498 FREE(cbuf->res_bo);
499 return false;
500 }
501
502 return true;
503 }
504
505 static void virgl_drm_free_res_list(struct virgl_drm_cmd_buf *cbuf)
506 {
507 int i;
508
509 for (i = 0; i < cbuf->cres; i++) {
510 p_atomic_dec(&cbuf->res_bo[i]->num_cs_references);
511 virgl_drm_resource_reference(cbuf->ws, &cbuf->res_bo[i], NULL);
512 }
513 FREE(cbuf->res_hlist);
514 FREE(cbuf->res_bo);
515 }
516
517 static boolean virgl_drm_lookup_res(struct virgl_drm_cmd_buf *cbuf,
518 struct virgl_hw_res *res)
519 {
520 unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
521 int i;
522
523 if (cbuf->is_handle_added[hash]) {
524 i = cbuf->reloc_indices_hashlist[hash];
525 if (cbuf->res_bo[i] == res)
526 return true;
527
528 for (i = 0; i < cbuf->cres; i++) {
529 if (cbuf->res_bo[i] == res) {
530 cbuf->reloc_indices_hashlist[hash] = i;
531 return true;
532 }
533 }
534 }
535 return false;
536 }
537
538 static void virgl_drm_add_res(struct virgl_drm_winsys *qdws,
539 struct virgl_drm_cmd_buf *cbuf,
540 struct virgl_hw_res *res)
541 {
542 unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
543
544 if (cbuf->cres >= cbuf->nres) {
545 unsigned new_nres = cbuf->nres + 256;
546 void *new_ptr = REALLOC(cbuf->res_bo,
547 cbuf->nres * sizeof(struct virgl_hw_buf*),
548 new_nres * sizeof(struct virgl_hw_buf*));
549 if (!new_ptr) {
550 _debug_printf("failure to add relocation %d, %d\n", cbuf->cres, new_nres);
551 return;
552 }
553 cbuf->res_bo = new_ptr;
554
555 new_ptr = REALLOC(cbuf->res_hlist,
556 cbuf->nres * sizeof(uint32_t),
557 new_nres * sizeof(uint32_t));
558 if (!new_ptr) {
559 _debug_printf("failure to add hlist relocation %d, %d\n", cbuf->cres, cbuf->nres);
560 return;
561 }
562 cbuf->res_hlist = new_ptr;
563 cbuf->nres = new_nres;
564 }
565
566 cbuf->res_bo[cbuf->cres] = NULL;
567 virgl_drm_resource_reference(&qdws->base, &cbuf->res_bo[cbuf->cres], res);
568 cbuf->res_hlist[cbuf->cres] = res->bo_handle;
569 cbuf->is_handle_added[hash] = TRUE;
570
571 cbuf->reloc_indices_hashlist[hash] = cbuf->cres;
572 p_atomic_inc(&res->num_cs_references);
573 cbuf->cres++;
574 }
575
576 /* This is called after the cbuf is submitted. */
577 static void virgl_drm_clear_res_list(struct virgl_drm_cmd_buf *cbuf)
578 {
579 int i;
580
581 for (i = 0; i < cbuf->cres; i++) {
582 /* mark all BOs busy after submission */
583 p_atomic_set(&cbuf->res_bo[i]->maybe_busy, true);
584
585 p_atomic_dec(&cbuf->res_bo[i]->num_cs_references);
586 virgl_drm_resource_reference(cbuf->ws, &cbuf->res_bo[i], NULL);
587 }
588
589 cbuf->cres = 0;
590
591 memset(cbuf->is_handle_added, 0, sizeof(cbuf->is_handle_added));
592 }
593
594 static void virgl_drm_emit_res(struct virgl_winsys *qws,
595 struct virgl_cmd_buf *_cbuf,
596 struct virgl_hw_res *res, boolean write_buf)
597 {
598 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
599 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
600 boolean already_in_list = virgl_drm_lookup_res(cbuf, res);
601
602 if (write_buf)
603 cbuf->base.buf[cbuf->base.cdw++] = res->res_handle;
604
605 if (!already_in_list)
606 virgl_drm_add_res(qdws, cbuf, res);
607 }
608
609 static boolean virgl_drm_res_is_ref(struct virgl_winsys *qws,
610 struct virgl_cmd_buf *_cbuf,
611 struct virgl_hw_res *res)
612 {
613 if (!p_atomic_read(&res->num_cs_references))
614 return FALSE;
615
616 return TRUE;
617 }
618
619 static struct virgl_cmd_buf *virgl_drm_cmd_buf_create(struct virgl_winsys *qws,
620 uint32_t size)
621 {
622 struct virgl_drm_cmd_buf *cbuf;
623
624 cbuf = CALLOC_STRUCT(virgl_drm_cmd_buf);
625 if (!cbuf)
626 return NULL;
627
628 cbuf->ws = qws;
629
630 if (!virgl_drm_alloc_res_list(cbuf, 512)) {
631 FREE(cbuf);
632 return NULL;
633 }
634
635 cbuf->buf = CALLOC(size, sizeof(uint32_t));
636 if (!cbuf->buf) {
637 FREE(cbuf->res_hlist);
638 FREE(cbuf->res_bo);
639 FREE(cbuf);
640 return NULL;
641 }
642
643 cbuf->in_fence_fd = -1;
644 cbuf->base.buf = cbuf->buf;
645 return &cbuf->base;
646 }
647
648 static void virgl_drm_cmd_buf_destroy(struct virgl_cmd_buf *_cbuf)
649 {
650 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
651
652 virgl_drm_free_res_list(cbuf);
653
654 FREE(cbuf->buf);
655 FREE(cbuf);
656 }
657
658 static struct pipe_fence_handle *
659 virgl_drm_fence_create(struct virgl_winsys *vws, int fd, bool external)
660 {
661 struct virgl_drm_fence *fence;
662
663 assert(vws->supports_fences);
664
665 if (external) {
666 fd = dup(fd);
667 if (fd < 0)
668 return NULL;
669 }
670
671 fence = CALLOC_STRUCT(virgl_drm_fence);
672 if (!fence) {
673 close(fd);
674 return NULL;
675 }
676
677 fence->fd = fd;
678 fence->external = external;
679
680 pipe_reference_init(&fence->reference, 1);
681
682 return (struct pipe_fence_handle *)fence;
683 }
684
685 static struct pipe_fence_handle *
686 virgl_drm_fence_create_legacy(struct virgl_winsys *vws)
687 {
688 struct virgl_drm_fence *fence;
689
690 assert(!vws->supports_fences);
691
692 fence = CALLOC_STRUCT(virgl_drm_fence);
693 if (!fence)
694 return NULL;
695 fence->fd = -1;
696
697 /* Resources for fences should not be from the cache, since we are basing
698 * the fence status on the resource creation busy status.
699 */
700 fence->hw_res = virgl_drm_winsys_resource_create(vws, PIPE_BUFFER,
701 PIPE_FORMAT_R8_UNORM, VIRGL_BIND_CUSTOM, 8, 1, 1, 0, 0, 0, 8, true);
702 if (!fence->hw_res) {
703 FREE(fence);
704 return NULL;
705 }
706
707 pipe_reference_init(&fence->reference, 1);
708
709 return (struct pipe_fence_handle *)fence;
710 }
711
712 static int virgl_drm_winsys_submit_cmd(struct virgl_winsys *qws,
713 struct virgl_cmd_buf *_cbuf,
714 struct pipe_fence_handle **fence)
715 {
716 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
717 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
718 struct drm_virtgpu_execbuffer eb;
719 int ret;
720
721 if (cbuf->base.cdw == 0)
722 return 0;
723
724 memset(&eb, 0, sizeof(struct drm_virtgpu_execbuffer));
725 eb.command = (unsigned long)(void*)cbuf->buf;
726 eb.size = cbuf->base.cdw * 4;
727 eb.num_bo_handles = cbuf->cres;
728 eb.bo_handles = (unsigned long)(void *)cbuf->res_hlist;
729
730 eb.fence_fd = -1;
731 if (qws->supports_fences) {
732 if (cbuf->in_fence_fd >= 0) {
733 eb.flags |= VIRTGPU_EXECBUF_FENCE_FD_IN;
734 eb.fence_fd = cbuf->in_fence_fd;
735 }
736
737 if (fence != NULL)
738 eb.flags |= VIRTGPU_EXECBUF_FENCE_FD_OUT;
739 } else {
740 assert(cbuf->in_fence_fd < 0);
741 }
742
743 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &eb);
744 if (ret == -1)
745 _debug_printf("got error from kernel - expect bad rendering %d\n", errno);
746 cbuf->base.cdw = 0;
747
748 if (qws->supports_fences) {
749 if (cbuf->in_fence_fd >= 0) {
750 close(cbuf->in_fence_fd);
751 cbuf->in_fence_fd = -1;
752 }
753
754 if (fence != NULL && ret == 0)
755 *fence = virgl_drm_fence_create(qws, eb.fence_fd, false);
756 } else {
757 if (fence != NULL && ret == 0)
758 *fence = virgl_drm_fence_create_legacy(qws);
759 }
760
761 virgl_drm_clear_res_list(cbuf);
762
763 return ret;
764 }
765
766 static int virgl_drm_get_caps(struct virgl_winsys *vws,
767 struct virgl_drm_caps *caps)
768 {
769 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
770 struct drm_virtgpu_get_caps args;
771 int ret;
772
773 virgl_ws_fill_new_caps_defaults(caps);
774
775 memset(&args, 0, sizeof(args));
776 if (vdws->has_capset_query_fix) {
777 /* if we have the query fix - try and get cap set id 2 first */
778 args.cap_set_id = 2;
779 args.size = sizeof(union virgl_caps);
780 } else {
781 args.cap_set_id = 1;
782 args.size = sizeof(struct virgl_caps_v1);
783 }
784 args.addr = (unsigned long)&caps->caps;
785
786 ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
787 if (ret == -1 && errno == EINVAL) {
788 /* Fallback to v1 */
789 args.cap_set_id = 1;
790 args.size = sizeof(struct virgl_caps_v1);
791 ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
792 if (ret == -1)
793 return ret;
794 }
795 return ret;
796 }
797
798 static struct pipe_fence_handle *
799 virgl_cs_create_fence(struct virgl_winsys *vws, int fd)
800 {
801 if (!vws->supports_fences)
802 return NULL;
803
804 return virgl_drm_fence_create(vws, fd, true);
805 }
806
807 static bool virgl_fence_wait(struct virgl_winsys *vws,
808 struct pipe_fence_handle *_fence,
809 uint64_t timeout)
810 {
811 struct virgl_drm_fence *fence = virgl_drm_fence(_fence);
812
813 if (vws->supports_fences) {
814 uint64_t timeout_ms;
815 int timeout_poll;
816
817 if (timeout == 0)
818 return sync_wait(fence->fd, 0) == 0;
819
820 timeout_ms = timeout / 1000000;
821 /* round up */
822 if (timeout_ms * 1000000 < timeout)
823 timeout_ms++;
824
825 timeout_poll = timeout_ms <= INT_MAX ? (int) timeout_ms : -1;
826
827 return sync_wait(fence->fd, timeout_poll) == 0;
828 }
829
830 if (timeout == 0)
831 return !virgl_drm_resource_is_busy(vws, fence->hw_res);
832
833 if (timeout != PIPE_TIMEOUT_INFINITE) {
834 int64_t start_time = os_time_get();
835 timeout /= 1000;
836 while (virgl_drm_resource_is_busy(vws, fence->hw_res)) {
837 if (os_time_get() - start_time >= timeout)
838 return FALSE;
839 os_time_sleep(10);
840 }
841 return TRUE;
842 }
843 virgl_drm_resource_wait(vws, fence->hw_res);
844
845 return TRUE;
846 }
847
848 static void virgl_fence_reference(struct virgl_winsys *vws,
849 struct pipe_fence_handle **dst,
850 struct pipe_fence_handle *src)
851 {
852 struct virgl_drm_fence *dfence = virgl_drm_fence(*dst);
853 struct virgl_drm_fence *sfence = virgl_drm_fence(src);
854
855 if (pipe_reference(&dfence->reference, &sfence->reference)) {
856 if (vws->supports_fences) {
857 close(dfence->fd);
858 } else {
859 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
860 virgl_hw_res_destroy(vdws, dfence->hw_res);
861 }
862 FREE(dfence);
863 }
864
865 *dst = src;
866 }
867
868 static void virgl_fence_server_sync(struct virgl_winsys *vws,
869 struct virgl_cmd_buf *_cbuf,
870 struct pipe_fence_handle *_fence)
871 {
872 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
873 struct virgl_drm_fence *fence = virgl_drm_fence(_fence);
874
875 if (!vws->supports_fences)
876 return;
877
878 /* if not an external fence, then nothing more to do without preemption: */
879 if (!fence->external)
880 return;
881
882 sync_accumulate("virgl", &cbuf->in_fence_fd, fence->fd);
883 }
884
885 static int virgl_fence_get_fd(struct virgl_winsys *vws,
886 struct pipe_fence_handle *_fence)
887 {
888 struct virgl_drm_fence *fence = virgl_drm_fence(_fence);
889
890 if (!vws->supports_fences)
891 return -1;
892
893 return dup(fence->fd);
894 }
895
896 static int virgl_drm_get_version(int fd)
897 {
898 int ret;
899 drmVersionPtr version;
900
901 version = drmGetVersion(fd);
902
903 if (!version)
904 ret = -EFAULT;
905 else if (version->version_major != 0)
906 ret = -EINVAL;
907 else
908 ret = VIRGL_DRM_VERSION(0, version->version_minor);
909
910 drmFreeVersion(version);
911
912 return ret;
913 }
914
915 static bool
916 virgl_drm_resource_cache_entry_is_busy(struct virgl_resource_cache_entry *entry,
917 void *user_data)
918 {
919 struct virgl_drm_winsys *qdws = user_data;
920 struct virgl_hw_res *res = cache_entry_container_res(entry);
921
922 return virgl_drm_resource_is_busy(&qdws->base, res);
923 }
924
925 static void
926 virgl_drm_resource_cache_entry_release(struct virgl_resource_cache_entry *entry,
927 void *user_data)
928 {
929 struct virgl_drm_winsys *qdws = user_data;
930 struct virgl_hw_res *res = cache_entry_container_res(entry);
931
932 virgl_hw_res_destroy(qdws, res);
933 }
934
935 static struct virgl_winsys *
936 virgl_drm_winsys_create(int drmFD)
937 {
938 static const unsigned CACHE_TIMEOUT_USEC = 1000000;
939 struct virgl_drm_winsys *qdws;
940 int drm_version;
941 int ret;
942 int gl = 0;
943 struct drm_virtgpu_getparam getparam = {0};
944
945 getparam.param = VIRTGPU_PARAM_3D_FEATURES;
946 getparam.value = (uint64_t)(uintptr_t)&gl;
947 ret = drmIoctl(drmFD, DRM_IOCTL_VIRTGPU_GETPARAM, &getparam);
948 if (ret < 0 || !gl)
949 return NULL;
950
951 drm_version = virgl_drm_get_version(drmFD);
952 if (drm_version < 0)
953 return NULL;
954
955 qdws = CALLOC_STRUCT(virgl_drm_winsys);
956 if (!qdws)
957 return NULL;
958
959 qdws->fd = drmFD;
960 virgl_resource_cache_init(&qdws->cache, CACHE_TIMEOUT_USEC,
961 virgl_drm_resource_cache_entry_is_busy,
962 virgl_drm_resource_cache_entry_release,
963 qdws);
964 (void) mtx_init(&qdws->mutex, mtx_plain);
965 (void) mtx_init(&qdws->bo_handles_mutex, mtx_plain);
966 qdws->bo_handles = util_hash_table_create_ptr_keys();
967 qdws->bo_names = util_hash_table_create_ptr_keys();
968 qdws->base.destroy = virgl_drm_winsys_destroy;
969
970 qdws->base.transfer_put = virgl_bo_transfer_put;
971 qdws->base.transfer_get = virgl_bo_transfer_get;
972 qdws->base.resource_create = virgl_drm_winsys_resource_cache_create;
973 qdws->base.resource_reference = virgl_drm_resource_reference;
974 qdws->base.resource_create_from_handle = virgl_drm_winsys_resource_create_handle;
975 qdws->base.resource_get_handle = virgl_drm_winsys_resource_get_handle;
976 qdws->base.resource_map = virgl_drm_resource_map;
977 qdws->base.resource_wait = virgl_drm_resource_wait;
978 qdws->base.resource_is_busy = virgl_drm_resource_is_busy;
979 qdws->base.cmd_buf_create = virgl_drm_cmd_buf_create;
980 qdws->base.cmd_buf_destroy = virgl_drm_cmd_buf_destroy;
981 qdws->base.submit_cmd = virgl_drm_winsys_submit_cmd;
982 qdws->base.emit_res = virgl_drm_emit_res;
983 qdws->base.res_is_referenced = virgl_drm_res_is_ref;
984
985 qdws->base.cs_create_fence = virgl_cs_create_fence;
986 qdws->base.fence_wait = virgl_fence_wait;
987 qdws->base.fence_reference = virgl_fence_reference;
988 qdws->base.fence_server_sync = virgl_fence_server_sync;
989 qdws->base.fence_get_fd = virgl_fence_get_fd;
990 qdws->base.supports_fences = drm_version >= VIRGL_DRM_VERSION_FENCE_FD;
991 qdws->base.supports_encoded_transfers = 1;
992
993 qdws->base.get_caps = virgl_drm_get_caps;
994
995 uint32_t value = 0;
996 getparam.param = VIRTGPU_PARAM_CAPSET_QUERY_FIX;
997 getparam.value = (uint64_t)(uintptr_t)&value;
998 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_GETPARAM, &getparam);
999 if (ret == 0) {
1000 if (value == 1)
1001 qdws->has_capset_query_fix = true;
1002 }
1003
1004 return &qdws->base;
1005
1006 }
1007
1008 static struct hash_table *fd_tab = NULL;
1009 static mtx_t virgl_screen_mutex = _MTX_INITIALIZER_NP;
1010
1011 static void
1012 virgl_drm_screen_destroy(struct pipe_screen *pscreen)
1013 {
1014 struct virgl_screen *screen = virgl_screen(pscreen);
1015 boolean destroy;
1016
1017 mtx_lock(&virgl_screen_mutex);
1018 destroy = --screen->refcnt == 0;
1019 if (destroy) {
1020 int fd = virgl_drm_winsys(screen->vws)->fd;
1021 _mesa_hash_table_remove_key(fd_tab, intptr_to_pointer(fd));
1022 close(fd);
1023 }
1024 mtx_unlock(&virgl_screen_mutex);
1025
1026 if (destroy) {
1027 pscreen->destroy = screen->winsys_priv;
1028 pscreen->destroy(pscreen);
1029 }
1030 }
1031
1032 struct pipe_screen *
1033 virgl_drm_screen_create(int fd, const struct pipe_screen_config *config)
1034 {
1035 struct pipe_screen *pscreen = NULL;
1036
1037 mtx_lock(&virgl_screen_mutex);
1038 if (!fd_tab) {
1039 fd_tab = util_hash_table_create_fd_keys();
1040 if (!fd_tab)
1041 goto unlock;
1042 }
1043
1044 pscreen = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
1045 if (pscreen) {
1046 virgl_screen(pscreen)->refcnt++;
1047 } else {
1048 struct virgl_winsys *vws;
1049 int dup_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
1050
1051 vws = virgl_drm_winsys_create(dup_fd);
1052 if (!vws) {
1053 close(dup_fd);
1054 goto unlock;
1055 }
1056
1057 pscreen = virgl_create_screen(vws, config);
1058 if (pscreen) {
1059 _mesa_hash_table_insert(fd_tab, intptr_to_pointer(dup_fd), pscreen);
1060
1061 /* Bit of a hack, to avoid circular linkage dependency,
1062 * ie. pipe driver having to call in to winsys, we
1063 * override the pipe drivers screen->destroy():
1064 */
1065 virgl_screen(pscreen)->winsys_priv = pscreen->destroy;
1066 pscreen->destroy = virgl_drm_screen_destroy;
1067 }
1068 }
1069
1070 unlock:
1071 mtx_unlock(&virgl_screen_mutex);
1072 return pscreen;
1073 }