2 * Copyright 2014, 2015 Red Hat.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 #include <sys/ioctl.h>
31 #include "os/os_mman.h"
32 #include "util/os_time.h"
33 #include "util/u_memory.h"
34 #include "util/format/u_format.h"
35 #include "util/u_hash_table.h"
36 #include "util/u_inlines.h"
37 #include "util/u_pointer.h"
38 #include "frontend/drm_driver.h"
39 #include "virgl/virgl_screen.h"
40 #include "virgl/virgl_public.h"
44 #include "drm-uapi/virtgpu_drm.h"
46 #include "virgl_drm_winsys.h"
47 #include "virgl_drm_public.h"
50 #define VIRGL_DRM_VERSION(major, minor) ((major) << 16 | (minor))
51 #define VIRGL_DRM_VERSION_FENCE_FD VIRGL_DRM_VERSION(0, 1)
53 /* Gets a pointer to the virgl_hw_res containing the pointed to cache entry. */
54 #define cache_entry_container_res(ptr) \
55 (struct virgl_hw_res*)((char*)ptr - offsetof(struct virgl_hw_res, cache_entry))
57 static inline boolean
can_cache_resource_with_bind(uint32_t bind
)
59 return bind
== VIRGL_BIND_CONSTANT_BUFFER
||
60 bind
== VIRGL_BIND_INDEX_BUFFER
||
61 bind
== VIRGL_BIND_VERTEX_BUFFER
||
62 bind
== VIRGL_BIND_CUSTOM
||
63 bind
== VIRGL_BIND_STAGING
;
66 static void virgl_hw_res_destroy(struct virgl_drm_winsys
*qdws
,
67 struct virgl_hw_res
*res
)
69 struct drm_gem_close args
;
71 mtx_lock(&qdws
->bo_handles_mutex
);
72 _mesa_hash_table_remove_key(qdws
->bo_handles
,
73 (void *)(uintptr_t)res
->bo_handle
);
75 _mesa_hash_table_remove_key(qdws
->bo_names
,
76 (void *)(uintptr_t)res
->flink_name
);
77 mtx_unlock(&qdws
->bo_handles_mutex
);
79 os_munmap(res
->ptr
, res
->size
);
81 memset(&args
, 0, sizeof(args
));
82 args
.handle
= res
->bo_handle
;
83 drmIoctl(qdws
->fd
, DRM_IOCTL_GEM_CLOSE
, &args
);
87 static boolean
virgl_drm_resource_is_busy(struct virgl_winsys
*vws
,
88 struct virgl_hw_res
*res
)
90 struct virgl_drm_winsys
*vdws
= virgl_drm_winsys(vws
);
91 struct drm_virtgpu_3d_wait waitcmd
;
94 if (!p_atomic_read(&res
->maybe_busy
) && !p_atomic_read(&res
->external
))
97 memset(&waitcmd
, 0, sizeof(waitcmd
));
98 waitcmd
.handle
= res
->bo_handle
;
99 waitcmd
.flags
= VIRTGPU_WAIT_NOWAIT
;
101 ret
= drmIoctl(vdws
->fd
, DRM_IOCTL_VIRTGPU_WAIT
, &waitcmd
);
102 if (ret
&& errno
== EBUSY
)
105 p_atomic_set(&res
->maybe_busy
, false);
111 virgl_drm_winsys_destroy(struct virgl_winsys
*qws
)
113 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
115 virgl_resource_cache_flush(&qdws
->cache
);
117 _mesa_hash_table_destroy(qdws
->bo_handles
, NULL
);
118 _mesa_hash_table_destroy(qdws
->bo_names
, NULL
);
119 mtx_destroy(&qdws
->bo_handles_mutex
);
120 mtx_destroy(&qdws
->mutex
);
125 static void virgl_drm_resource_reference(struct virgl_winsys
*qws
,
126 struct virgl_hw_res
**dres
,
127 struct virgl_hw_res
*sres
)
129 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
130 struct virgl_hw_res
*old
= *dres
;
132 if (pipe_reference(&(*dres
)->reference
, &sres
->reference
)) {
134 if (!can_cache_resource_with_bind(old
->bind
) ||
135 p_atomic_read(&old
->external
)) {
136 virgl_hw_res_destroy(qdws
, old
);
138 mtx_lock(&qdws
->mutex
);
139 virgl_resource_cache_add(&qdws
->cache
, &old
->cache_entry
);
140 mtx_unlock(&qdws
->mutex
);
146 static struct virgl_hw_res
*
147 virgl_drm_winsys_resource_create(struct virgl_winsys
*qws
,
148 enum pipe_texture_target target
,
160 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
161 struct drm_virtgpu_resource_create createcmd
;
163 struct virgl_hw_res
*res
;
164 uint32_t stride
= width
* util_format_get_blocksize(format
);
166 res
= CALLOC_STRUCT(virgl_hw_res
);
170 memset(&createcmd
, 0, sizeof(createcmd
));
171 createcmd
.target
= target
;
172 createcmd
.format
= pipe_to_virgl_format(format
);
173 createcmd
.bind
= bind
;
174 createcmd
.width
= width
;
175 createcmd
.height
= height
;
176 createcmd
.depth
= depth
;
177 createcmd
.array_size
= array_size
;
178 createcmd
.last_level
= last_level
;
179 createcmd
.nr_samples
= nr_samples
;
180 createcmd
.stride
= stride
;
181 createcmd
.size
= size
;
183 ret
= drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE
, &createcmd
);
191 res
->res_handle
= createcmd
.res_handle
;
192 res
->bo_handle
= createcmd
.bo_handle
;
194 pipe_reference_init(&res
->reference
, 1);
195 p_atomic_set(&res
->external
, false);
196 p_atomic_set(&res
->num_cs_references
, 0);
198 /* A newly created resource is considered busy by the kernel until the
199 * command is retired. But for our purposes, we can consider it idle
200 * unless it is used for fencing.
202 p_atomic_set(&res
->maybe_busy
, for_fencing
);
204 virgl_resource_cache_entry_init(&res
->cache_entry
, size
, bind
, format
);
210 virgl_bo_transfer_put(struct virgl_winsys
*vws
,
211 struct virgl_hw_res
*res
,
212 const struct pipe_box
*box
,
213 uint32_t stride
, uint32_t layer_stride
,
214 uint32_t buf_offset
, uint32_t level
)
216 struct virgl_drm_winsys
*vdws
= virgl_drm_winsys(vws
);
217 struct drm_virtgpu_3d_transfer_to_host tohostcmd
;
219 p_atomic_set(&res
->maybe_busy
, true);
221 memset(&tohostcmd
, 0, sizeof(tohostcmd
));
222 tohostcmd
.bo_handle
= res
->bo_handle
;
223 tohostcmd
.box
.x
= box
->x
;
224 tohostcmd
.box
.y
= box
->y
;
225 tohostcmd
.box
.z
= box
->z
;
226 tohostcmd
.box
.w
= box
->width
;
227 tohostcmd
.box
.h
= box
->height
;
228 tohostcmd
.box
.d
= box
->depth
;
229 tohostcmd
.offset
= buf_offset
;
230 tohostcmd
.level
= level
;
231 // tohostcmd.stride = stride;
232 // tohostcmd.layer_stride = stride;
233 return drmIoctl(vdws
->fd
, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST
, &tohostcmd
);
237 virgl_bo_transfer_get(struct virgl_winsys
*vws
,
238 struct virgl_hw_res
*res
,
239 const struct pipe_box
*box
,
240 uint32_t stride
, uint32_t layer_stride
,
241 uint32_t buf_offset
, uint32_t level
)
243 struct virgl_drm_winsys
*vdws
= virgl_drm_winsys(vws
);
244 struct drm_virtgpu_3d_transfer_from_host fromhostcmd
;
246 p_atomic_set(&res
->maybe_busy
, true);
248 memset(&fromhostcmd
, 0, sizeof(fromhostcmd
));
249 fromhostcmd
.bo_handle
= res
->bo_handle
;
250 fromhostcmd
.level
= level
;
251 fromhostcmd
.offset
= buf_offset
;
252 // fromhostcmd.stride = stride;
253 // fromhostcmd.layer_stride = layer_stride;
254 fromhostcmd
.box
.x
= box
->x
;
255 fromhostcmd
.box
.y
= box
->y
;
256 fromhostcmd
.box
.z
= box
->z
;
257 fromhostcmd
.box
.w
= box
->width
;
258 fromhostcmd
.box
.h
= box
->height
;
259 fromhostcmd
.box
.d
= box
->depth
;
260 return drmIoctl(vdws
->fd
, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST
, &fromhostcmd
);
263 static struct virgl_hw_res
*
264 virgl_drm_winsys_resource_cache_create(struct virgl_winsys
*qws
,
265 enum pipe_texture_target target
,
276 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
277 struct virgl_hw_res
*res
;
278 struct virgl_resource_cache_entry
*entry
;
280 if (!can_cache_resource_with_bind(bind
))
283 mtx_lock(&qdws
->mutex
);
285 entry
= virgl_resource_cache_remove_compatible(&qdws
->cache
, size
,
288 res
= cache_entry_container_res(entry
);
289 mtx_unlock(&qdws
->mutex
);
290 pipe_reference_init(&res
->reference
, 1);
294 mtx_unlock(&qdws
->mutex
);
297 res
= virgl_drm_winsys_resource_create(qws
, target
, format
, bind
,
298 width
, height
, depth
, array_size
,
299 last_level
, nr_samples
, size
, false);
303 static struct virgl_hw_res
*
304 virgl_drm_winsys_resource_create_handle(struct virgl_winsys
*qws
,
305 struct winsys_handle
*whandle
,
308 uint32_t *plane_offset
,
311 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
312 struct drm_gem_open open_arg
= {};
313 struct drm_virtgpu_resource_info info_arg
= {};
314 struct virgl_hw_res
*res
= NULL
;
315 uint32_t handle
= whandle
->handle
;
317 if (whandle
->offset
!= 0 && whandle
->type
== WINSYS_HANDLE_TYPE_SHARED
) {
318 _debug_printf("attempt to import unsupported winsys offset %u\n",
321 } else if (whandle
->type
== WINSYS_HANDLE_TYPE_FD
) {
322 *plane
= whandle
->plane
;
323 *stride
= whandle
->stride
;
324 *plane_offset
= whandle
->offset
;
325 *modifier
= whandle
->modifier
;
328 mtx_lock(&qdws
->bo_handles_mutex
);
330 /* We must maintain a list of pairs <handle, bo>, so that we always return
331 * the same BO for one particular handle. If we didn't do that and created
332 * more than one BO for the same handle and then relocated them in a CS,
333 * we would hit a deadlock in the kernel.
335 * The list of pairs is guarded by a mutex, of course. */
336 if (whandle
->type
== WINSYS_HANDLE_TYPE_SHARED
) {
337 res
= util_hash_table_get(qdws
->bo_names
, (void*)(uintptr_t)handle
);
338 } else if (whandle
->type
== WINSYS_HANDLE_TYPE_FD
) {
340 r
= drmPrimeFDToHandle(qdws
->fd
, whandle
->handle
, &handle
);
343 res
= util_hash_table_get(qdws
->bo_handles
, (void*)(uintptr_t)handle
);
345 /* Unknown handle type */
350 struct virgl_hw_res
*r
= NULL
;
351 virgl_drm_resource_reference(&qdws
->base
, &r
, res
);
355 res
= CALLOC_STRUCT(virgl_hw_res
);
359 if (whandle
->type
== WINSYS_HANDLE_TYPE_FD
) {
360 res
->bo_handle
= handle
;
362 memset(&open_arg
, 0, sizeof(open_arg
));
363 open_arg
.name
= whandle
->handle
;
364 if (drmIoctl(qdws
->fd
, DRM_IOCTL_GEM_OPEN
, &open_arg
)) {
369 res
->bo_handle
= open_arg
.handle
;
370 res
->flink_name
= whandle
->handle
;
373 memset(&info_arg
, 0, sizeof(info_arg
));
374 info_arg
.bo_handle
= res
->bo_handle
;
376 if (drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_RESOURCE_INFO
, &info_arg
)) {
383 res
->res_handle
= info_arg
.res_handle
;
385 res
->size
= info_arg
.size
;
386 pipe_reference_init(&res
->reference
, 1);
387 p_atomic_set(&res
->external
, true);
388 res
->num_cs_references
= 0;
391 _mesa_hash_table_insert(qdws
->bo_names
, (void *)(uintptr_t)res
->flink_name
, res
);
392 _mesa_hash_table_insert(qdws
->bo_handles
, (void *)(uintptr_t)res
->bo_handle
, res
);
395 mtx_unlock(&qdws
->bo_handles_mutex
);
399 static boolean
virgl_drm_winsys_resource_get_handle(struct virgl_winsys
*qws
,
400 struct virgl_hw_res
*res
,
402 struct winsys_handle
*whandle
)
404 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
405 struct drm_gem_flink flink
;
410 if (whandle
->type
== WINSYS_HANDLE_TYPE_SHARED
) {
411 if (!res
->flink_name
) {
412 memset(&flink
, 0, sizeof(flink
));
413 flink
.handle
= res
->bo_handle
;
415 if (drmIoctl(qdws
->fd
, DRM_IOCTL_GEM_FLINK
, &flink
)) {
418 res
->flink_name
= flink
.name
;
420 mtx_lock(&qdws
->bo_handles_mutex
);
421 _mesa_hash_table_insert(qdws
->bo_names
, (void *)(uintptr_t)res
->flink_name
, res
);
422 mtx_unlock(&qdws
->bo_handles_mutex
);
424 whandle
->handle
= res
->flink_name
;
425 } else if (whandle
->type
== WINSYS_HANDLE_TYPE_KMS
) {
426 whandle
->handle
= res
->bo_handle
;
427 } else if (whandle
->type
== WINSYS_HANDLE_TYPE_FD
) {
428 if (drmPrimeHandleToFD(qdws
->fd
, res
->bo_handle
, DRM_CLOEXEC
, (int*)&whandle
->handle
))
430 mtx_lock(&qdws
->bo_handles_mutex
);
431 _mesa_hash_table_insert(qdws
->bo_handles
, (void *)(uintptr_t)res
->bo_handle
, res
);
432 mtx_unlock(&qdws
->bo_handles_mutex
);
435 p_atomic_set(&res
->external
, true);
437 whandle
->stride
= stride
;
441 static void *virgl_drm_resource_map(struct virgl_winsys
*qws
,
442 struct virgl_hw_res
*res
)
444 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
445 struct drm_virtgpu_map mmap_arg
;
451 memset(&mmap_arg
, 0, sizeof(mmap_arg
));
452 mmap_arg
.handle
= res
->bo_handle
;
453 if (drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_MAP
, &mmap_arg
))
456 ptr
= os_mmap(0, res
->size
, PROT_READ
|PROT_WRITE
, MAP_SHARED
,
457 qdws
->fd
, mmap_arg
.offset
);
458 if (ptr
== MAP_FAILED
)
466 static void virgl_drm_resource_wait(struct virgl_winsys
*qws
,
467 struct virgl_hw_res
*res
)
469 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
470 struct drm_virtgpu_3d_wait waitcmd
;
473 if (!p_atomic_read(&res
->maybe_busy
) && !p_atomic_read(&res
->external
))
476 memset(&waitcmd
, 0, sizeof(waitcmd
));
477 waitcmd
.handle
= res
->bo_handle
;
479 ret
= drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_WAIT
, &waitcmd
);
481 _debug_printf("waiting got error - %d, slow gpu or hang?\n", errno
);
483 p_atomic_set(&res
->maybe_busy
, false);
486 static bool virgl_drm_alloc_res_list(struct virgl_drm_cmd_buf
*cbuf
,
489 cbuf
->nres
= initial_size
;
492 cbuf
->res_bo
= CALLOC(cbuf
->nres
, sizeof(struct virgl_hw_buf
*));
496 cbuf
->res_hlist
= MALLOC(cbuf
->nres
* sizeof(uint32_t));
497 if (!cbuf
->res_hlist
) {
505 static void virgl_drm_free_res_list(struct virgl_drm_cmd_buf
*cbuf
)
509 for (i
= 0; i
< cbuf
->cres
; i
++) {
510 p_atomic_dec(&cbuf
->res_bo
[i
]->num_cs_references
);
511 virgl_drm_resource_reference(cbuf
->ws
, &cbuf
->res_bo
[i
], NULL
);
513 FREE(cbuf
->res_hlist
);
517 static boolean
virgl_drm_lookup_res(struct virgl_drm_cmd_buf
*cbuf
,
518 struct virgl_hw_res
*res
)
520 unsigned hash
= res
->res_handle
& (sizeof(cbuf
->is_handle_added
)-1);
523 if (cbuf
->is_handle_added
[hash
]) {
524 i
= cbuf
->reloc_indices_hashlist
[hash
];
525 if (cbuf
->res_bo
[i
] == res
)
528 for (i
= 0; i
< cbuf
->cres
; i
++) {
529 if (cbuf
->res_bo
[i
] == res
) {
530 cbuf
->reloc_indices_hashlist
[hash
] = i
;
538 static void virgl_drm_add_res(struct virgl_drm_winsys
*qdws
,
539 struct virgl_drm_cmd_buf
*cbuf
,
540 struct virgl_hw_res
*res
)
542 unsigned hash
= res
->res_handle
& (sizeof(cbuf
->is_handle_added
)-1);
544 if (cbuf
->cres
>= cbuf
->nres
) {
545 unsigned new_nres
= cbuf
->nres
+ 256;
546 void *new_ptr
= REALLOC(cbuf
->res_bo
,
547 cbuf
->nres
* sizeof(struct virgl_hw_buf
*),
548 new_nres
* sizeof(struct virgl_hw_buf
*));
550 _debug_printf("failure to add relocation %d, %d\n", cbuf
->cres
, new_nres
);
553 cbuf
->res_bo
= new_ptr
;
555 new_ptr
= REALLOC(cbuf
->res_hlist
,
556 cbuf
->nres
* sizeof(uint32_t),
557 new_nres
* sizeof(uint32_t));
559 _debug_printf("failure to add hlist relocation %d, %d\n", cbuf
->cres
, cbuf
->nres
);
562 cbuf
->res_hlist
= new_ptr
;
563 cbuf
->nres
= new_nres
;
566 cbuf
->res_bo
[cbuf
->cres
] = NULL
;
567 virgl_drm_resource_reference(&qdws
->base
, &cbuf
->res_bo
[cbuf
->cres
], res
);
568 cbuf
->res_hlist
[cbuf
->cres
] = res
->bo_handle
;
569 cbuf
->is_handle_added
[hash
] = TRUE
;
571 cbuf
->reloc_indices_hashlist
[hash
] = cbuf
->cres
;
572 p_atomic_inc(&res
->num_cs_references
);
576 /* This is called after the cbuf is submitted. */
577 static void virgl_drm_clear_res_list(struct virgl_drm_cmd_buf
*cbuf
)
581 for (i
= 0; i
< cbuf
->cres
; i
++) {
582 /* mark all BOs busy after submission */
583 p_atomic_set(&cbuf
->res_bo
[i
]->maybe_busy
, true);
585 p_atomic_dec(&cbuf
->res_bo
[i
]->num_cs_references
);
586 virgl_drm_resource_reference(cbuf
->ws
, &cbuf
->res_bo
[i
], NULL
);
591 memset(cbuf
->is_handle_added
, 0, sizeof(cbuf
->is_handle_added
));
594 static void virgl_drm_emit_res(struct virgl_winsys
*qws
,
595 struct virgl_cmd_buf
*_cbuf
,
596 struct virgl_hw_res
*res
, boolean write_buf
)
598 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
599 struct virgl_drm_cmd_buf
*cbuf
= virgl_drm_cmd_buf(_cbuf
);
600 boolean already_in_list
= virgl_drm_lookup_res(cbuf
, res
);
603 cbuf
->base
.buf
[cbuf
->base
.cdw
++] = res
->res_handle
;
605 if (!already_in_list
)
606 virgl_drm_add_res(qdws
, cbuf
, res
);
609 static boolean
virgl_drm_res_is_ref(struct virgl_winsys
*qws
,
610 struct virgl_cmd_buf
*_cbuf
,
611 struct virgl_hw_res
*res
)
613 if (!p_atomic_read(&res
->num_cs_references
))
619 static struct virgl_cmd_buf
*virgl_drm_cmd_buf_create(struct virgl_winsys
*qws
,
622 struct virgl_drm_cmd_buf
*cbuf
;
624 cbuf
= CALLOC_STRUCT(virgl_drm_cmd_buf
);
630 if (!virgl_drm_alloc_res_list(cbuf
, 512)) {
635 cbuf
->buf
= CALLOC(size
, sizeof(uint32_t));
637 FREE(cbuf
->res_hlist
);
643 cbuf
->in_fence_fd
= -1;
644 cbuf
->base
.buf
= cbuf
->buf
;
648 static void virgl_drm_cmd_buf_destroy(struct virgl_cmd_buf
*_cbuf
)
650 struct virgl_drm_cmd_buf
*cbuf
= virgl_drm_cmd_buf(_cbuf
);
652 virgl_drm_free_res_list(cbuf
);
658 static struct pipe_fence_handle
*
659 virgl_drm_fence_create(struct virgl_winsys
*vws
, int fd
, bool external
)
661 struct virgl_drm_fence
*fence
;
663 assert(vws
->supports_fences
);
671 fence
= CALLOC_STRUCT(virgl_drm_fence
);
678 fence
->external
= external
;
680 pipe_reference_init(&fence
->reference
, 1);
682 return (struct pipe_fence_handle
*)fence
;
685 static struct pipe_fence_handle
*
686 virgl_drm_fence_create_legacy(struct virgl_winsys
*vws
)
688 struct virgl_drm_fence
*fence
;
690 assert(!vws
->supports_fences
);
692 fence
= CALLOC_STRUCT(virgl_drm_fence
);
697 /* Resources for fences should not be from the cache, since we are basing
698 * the fence status on the resource creation busy status.
700 fence
->hw_res
= virgl_drm_winsys_resource_create(vws
, PIPE_BUFFER
,
701 PIPE_FORMAT_R8_UNORM
, VIRGL_BIND_CUSTOM
, 8, 1, 1, 0, 0, 0, 8, true);
702 if (!fence
->hw_res
) {
707 pipe_reference_init(&fence
->reference
, 1);
709 return (struct pipe_fence_handle
*)fence
;
712 static int virgl_drm_winsys_submit_cmd(struct virgl_winsys
*qws
,
713 struct virgl_cmd_buf
*_cbuf
,
714 struct pipe_fence_handle
**fence
)
716 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
717 struct virgl_drm_cmd_buf
*cbuf
= virgl_drm_cmd_buf(_cbuf
);
718 struct drm_virtgpu_execbuffer eb
;
721 if (cbuf
->base
.cdw
== 0)
724 memset(&eb
, 0, sizeof(struct drm_virtgpu_execbuffer
));
725 eb
.command
= (unsigned long)(void*)cbuf
->buf
;
726 eb
.size
= cbuf
->base
.cdw
* 4;
727 eb
.num_bo_handles
= cbuf
->cres
;
728 eb
.bo_handles
= (unsigned long)(void *)cbuf
->res_hlist
;
731 if (qws
->supports_fences
) {
732 if (cbuf
->in_fence_fd
>= 0) {
733 eb
.flags
|= VIRTGPU_EXECBUF_FENCE_FD_IN
;
734 eb
.fence_fd
= cbuf
->in_fence_fd
;
738 eb
.flags
|= VIRTGPU_EXECBUF_FENCE_FD_OUT
;
740 assert(cbuf
->in_fence_fd
< 0);
743 ret
= drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_EXECBUFFER
, &eb
);
745 _debug_printf("got error from kernel - expect bad rendering %d\n", errno
);
748 if (qws
->supports_fences
) {
749 if (cbuf
->in_fence_fd
>= 0) {
750 close(cbuf
->in_fence_fd
);
751 cbuf
->in_fence_fd
= -1;
754 if (fence
!= NULL
&& ret
== 0)
755 *fence
= virgl_drm_fence_create(qws
, eb
.fence_fd
, false);
757 if (fence
!= NULL
&& ret
== 0)
758 *fence
= virgl_drm_fence_create_legacy(qws
);
761 virgl_drm_clear_res_list(cbuf
);
766 static int virgl_drm_get_caps(struct virgl_winsys
*vws
,
767 struct virgl_drm_caps
*caps
)
769 struct virgl_drm_winsys
*vdws
= virgl_drm_winsys(vws
);
770 struct drm_virtgpu_get_caps args
;
773 virgl_ws_fill_new_caps_defaults(caps
);
775 memset(&args
, 0, sizeof(args
));
776 if (vdws
->has_capset_query_fix
) {
777 /* if we have the query fix - try and get cap set id 2 first */
779 args
.size
= sizeof(union virgl_caps
);
782 args
.size
= sizeof(struct virgl_caps_v1
);
784 args
.addr
= (unsigned long)&caps
->caps
;
786 ret
= drmIoctl(vdws
->fd
, DRM_IOCTL_VIRTGPU_GET_CAPS
, &args
);
787 if (ret
== -1 && errno
== EINVAL
) {
790 args
.size
= sizeof(struct virgl_caps_v1
);
791 ret
= drmIoctl(vdws
->fd
, DRM_IOCTL_VIRTGPU_GET_CAPS
, &args
);
798 static struct pipe_fence_handle
*
799 virgl_cs_create_fence(struct virgl_winsys
*vws
, int fd
)
801 if (!vws
->supports_fences
)
804 return virgl_drm_fence_create(vws
, fd
, true);
807 static bool virgl_fence_wait(struct virgl_winsys
*vws
,
808 struct pipe_fence_handle
*_fence
,
811 struct virgl_drm_fence
*fence
= virgl_drm_fence(_fence
);
813 if (vws
->supports_fences
) {
818 return sync_wait(fence
->fd
, 0) == 0;
820 timeout_ms
= timeout
/ 1000000;
822 if (timeout_ms
* 1000000 < timeout
)
825 timeout_poll
= timeout_ms
<= INT_MAX
? (int) timeout_ms
: -1;
827 return sync_wait(fence
->fd
, timeout_poll
) == 0;
831 return !virgl_drm_resource_is_busy(vws
, fence
->hw_res
);
833 if (timeout
!= PIPE_TIMEOUT_INFINITE
) {
834 int64_t start_time
= os_time_get();
836 while (virgl_drm_resource_is_busy(vws
, fence
->hw_res
)) {
837 if (os_time_get() - start_time
>= timeout
)
843 virgl_drm_resource_wait(vws
, fence
->hw_res
);
848 static void virgl_fence_reference(struct virgl_winsys
*vws
,
849 struct pipe_fence_handle
**dst
,
850 struct pipe_fence_handle
*src
)
852 struct virgl_drm_fence
*dfence
= virgl_drm_fence(*dst
);
853 struct virgl_drm_fence
*sfence
= virgl_drm_fence(src
);
855 if (pipe_reference(&dfence
->reference
, &sfence
->reference
)) {
856 if (vws
->supports_fences
) {
859 struct virgl_drm_winsys
*vdws
= virgl_drm_winsys(vws
);
860 virgl_hw_res_destroy(vdws
, dfence
->hw_res
);
868 static void virgl_fence_server_sync(struct virgl_winsys
*vws
,
869 struct virgl_cmd_buf
*_cbuf
,
870 struct pipe_fence_handle
*_fence
)
872 struct virgl_drm_cmd_buf
*cbuf
= virgl_drm_cmd_buf(_cbuf
);
873 struct virgl_drm_fence
*fence
= virgl_drm_fence(_fence
);
875 if (!vws
->supports_fences
)
878 /* if not an external fence, then nothing more to do without preemption: */
879 if (!fence
->external
)
882 sync_accumulate("virgl", &cbuf
->in_fence_fd
, fence
->fd
);
885 static int virgl_fence_get_fd(struct virgl_winsys
*vws
,
886 struct pipe_fence_handle
*_fence
)
888 struct virgl_drm_fence
*fence
= virgl_drm_fence(_fence
);
890 if (!vws
->supports_fences
)
893 return dup(fence
->fd
);
896 static int virgl_drm_get_version(int fd
)
899 drmVersionPtr version
;
901 version
= drmGetVersion(fd
);
905 else if (version
->version_major
!= 0)
908 ret
= VIRGL_DRM_VERSION(0, version
->version_minor
);
910 drmFreeVersion(version
);
916 virgl_drm_resource_cache_entry_is_busy(struct virgl_resource_cache_entry
*entry
,
919 struct virgl_drm_winsys
*qdws
= user_data
;
920 struct virgl_hw_res
*res
= cache_entry_container_res(entry
);
922 return virgl_drm_resource_is_busy(&qdws
->base
, res
);
926 virgl_drm_resource_cache_entry_release(struct virgl_resource_cache_entry
*entry
,
929 struct virgl_drm_winsys
*qdws
= user_data
;
930 struct virgl_hw_res
*res
= cache_entry_container_res(entry
);
932 virgl_hw_res_destroy(qdws
, res
);
935 static struct virgl_winsys
*
936 virgl_drm_winsys_create(int drmFD
)
938 static const unsigned CACHE_TIMEOUT_USEC
= 1000000;
939 struct virgl_drm_winsys
*qdws
;
943 struct drm_virtgpu_getparam getparam
= {0};
945 getparam
.param
= VIRTGPU_PARAM_3D_FEATURES
;
946 getparam
.value
= (uint64_t)(uintptr_t)&gl
;
947 ret
= drmIoctl(drmFD
, DRM_IOCTL_VIRTGPU_GETPARAM
, &getparam
);
951 drm_version
= virgl_drm_get_version(drmFD
);
955 qdws
= CALLOC_STRUCT(virgl_drm_winsys
);
960 virgl_resource_cache_init(&qdws
->cache
, CACHE_TIMEOUT_USEC
,
961 virgl_drm_resource_cache_entry_is_busy
,
962 virgl_drm_resource_cache_entry_release
,
964 (void) mtx_init(&qdws
->mutex
, mtx_plain
);
965 (void) mtx_init(&qdws
->bo_handles_mutex
, mtx_plain
);
966 qdws
->bo_handles
= util_hash_table_create_ptr_keys();
967 qdws
->bo_names
= util_hash_table_create_ptr_keys();
968 qdws
->base
.destroy
= virgl_drm_winsys_destroy
;
970 qdws
->base
.transfer_put
= virgl_bo_transfer_put
;
971 qdws
->base
.transfer_get
= virgl_bo_transfer_get
;
972 qdws
->base
.resource_create
= virgl_drm_winsys_resource_cache_create
;
973 qdws
->base
.resource_reference
= virgl_drm_resource_reference
;
974 qdws
->base
.resource_create_from_handle
= virgl_drm_winsys_resource_create_handle
;
975 qdws
->base
.resource_get_handle
= virgl_drm_winsys_resource_get_handle
;
976 qdws
->base
.resource_map
= virgl_drm_resource_map
;
977 qdws
->base
.resource_wait
= virgl_drm_resource_wait
;
978 qdws
->base
.resource_is_busy
= virgl_drm_resource_is_busy
;
979 qdws
->base
.cmd_buf_create
= virgl_drm_cmd_buf_create
;
980 qdws
->base
.cmd_buf_destroy
= virgl_drm_cmd_buf_destroy
;
981 qdws
->base
.submit_cmd
= virgl_drm_winsys_submit_cmd
;
982 qdws
->base
.emit_res
= virgl_drm_emit_res
;
983 qdws
->base
.res_is_referenced
= virgl_drm_res_is_ref
;
985 qdws
->base
.cs_create_fence
= virgl_cs_create_fence
;
986 qdws
->base
.fence_wait
= virgl_fence_wait
;
987 qdws
->base
.fence_reference
= virgl_fence_reference
;
988 qdws
->base
.fence_server_sync
= virgl_fence_server_sync
;
989 qdws
->base
.fence_get_fd
= virgl_fence_get_fd
;
990 qdws
->base
.supports_fences
= drm_version
>= VIRGL_DRM_VERSION_FENCE_FD
;
991 qdws
->base
.supports_encoded_transfers
= 1;
993 qdws
->base
.get_caps
= virgl_drm_get_caps
;
996 getparam
.param
= VIRTGPU_PARAM_CAPSET_QUERY_FIX
;
997 getparam
.value
= (uint64_t)(uintptr_t)&value
;
998 ret
= drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_GETPARAM
, &getparam
);
1001 qdws
->has_capset_query_fix
= true;
1008 static struct hash_table
*fd_tab
= NULL
;
1009 static mtx_t virgl_screen_mutex
= _MTX_INITIALIZER_NP
;
1012 virgl_drm_screen_destroy(struct pipe_screen
*pscreen
)
1014 struct virgl_screen
*screen
= virgl_screen(pscreen
);
1017 mtx_lock(&virgl_screen_mutex
);
1018 destroy
= --screen
->refcnt
== 0;
1020 int fd
= virgl_drm_winsys(screen
->vws
)->fd
;
1021 _mesa_hash_table_remove_key(fd_tab
, intptr_to_pointer(fd
));
1024 mtx_unlock(&virgl_screen_mutex
);
1027 pscreen
->destroy
= screen
->winsys_priv
;
1028 pscreen
->destroy(pscreen
);
1032 struct pipe_screen
*
1033 virgl_drm_screen_create(int fd
, const struct pipe_screen_config
*config
)
1035 struct pipe_screen
*pscreen
= NULL
;
1037 mtx_lock(&virgl_screen_mutex
);
1039 fd_tab
= util_hash_table_create_fd_keys();
1044 pscreen
= util_hash_table_get(fd_tab
, intptr_to_pointer(fd
));
1046 virgl_screen(pscreen
)->refcnt
++;
1048 struct virgl_winsys
*vws
;
1049 int dup_fd
= fcntl(fd
, F_DUPFD_CLOEXEC
, 3);
1051 vws
= virgl_drm_winsys_create(dup_fd
);
1057 pscreen
= virgl_create_screen(vws
, config
);
1059 _mesa_hash_table_insert(fd_tab
, intptr_to_pointer(dup_fd
), pscreen
);
1061 /* Bit of a hack, to avoid circular linkage dependency,
1062 * ie. pipe driver having to call in to winsys, we
1063 * override the pipe drivers screen->destroy():
1065 virgl_screen(pscreen
)->winsys_priv
= pscreen
->destroy
;
1066 pscreen
->destroy
= virgl_drm_screen_destroy
;
1071 mtx_unlock(&virgl_screen_mutex
);