2 * Copyright 2014, 2015 Red Hat.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 #include <sys/ioctl.h>
31 #include "os/os_mman.h"
32 #include "util/os_file.h"
33 #include "util/os_time.h"
34 #include "util/u_memory.h"
35 #include "util/format/u_format.h"
36 #include "util/u_hash_table.h"
37 #include "util/u_inlines.h"
38 #include "util/u_pointer.h"
39 #include "frontend/drm_driver.h"
40 #include "virgl/virgl_screen.h"
41 #include "virgl/virgl_public.h"
45 #include "drm-uapi/virtgpu_drm.h"
47 #include "virgl_drm_winsys.h"
48 #include "virgl_drm_public.h"
51 #define VIRGL_DRM_VERSION(major, minor) ((major) << 16 | (minor))
52 #define VIRGL_DRM_VERSION_FENCE_FD VIRGL_DRM_VERSION(0, 1)
54 /* Gets a pointer to the virgl_hw_res containing the pointed to cache entry. */
55 #define cache_entry_container_res(ptr) \
56 (struct virgl_hw_res*)((char*)ptr - offsetof(struct virgl_hw_res, cache_entry))
58 static inline boolean
can_cache_resource_with_bind(uint32_t bind
)
60 return bind
== VIRGL_BIND_CONSTANT_BUFFER
||
61 bind
== VIRGL_BIND_INDEX_BUFFER
||
62 bind
== VIRGL_BIND_VERTEX_BUFFER
||
63 bind
== VIRGL_BIND_CUSTOM
||
64 bind
== VIRGL_BIND_STAGING
;
67 static void virgl_hw_res_destroy(struct virgl_drm_winsys
*qdws
,
68 struct virgl_hw_res
*res
)
70 struct drm_gem_close args
;
72 mtx_lock(&qdws
->bo_handles_mutex
);
73 _mesa_hash_table_remove_key(qdws
->bo_handles
,
74 (void *)(uintptr_t)res
->bo_handle
);
76 _mesa_hash_table_remove_key(qdws
->bo_names
,
77 (void *)(uintptr_t)res
->flink_name
);
78 mtx_unlock(&qdws
->bo_handles_mutex
);
80 os_munmap(res
->ptr
, res
->size
);
82 memset(&args
, 0, sizeof(args
));
83 args
.handle
= res
->bo_handle
;
84 drmIoctl(qdws
->fd
, DRM_IOCTL_GEM_CLOSE
, &args
);
88 static boolean
virgl_drm_resource_is_busy(struct virgl_winsys
*vws
,
89 struct virgl_hw_res
*res
)
91 struct virgl_drm_winsys
*vdws
= virgl_drm_winsys(vws
);
92 struct drm_virtgpu_3d_wait waitcmd
;
95 if (!p_atomic_read(&res
->maybe_busy
) && !p_atomic_read(&res
->external
))
98 memset(&waitcmd
, 0, sizeof(waitcmd
));
99 waitcmd
.handle
= res
->bo_handle
;
100 waitcmd
.flags
= VIRTGPU_WAIT_NOWAIT
;
102 ret
= drmIoctl(vdws
->fd
, DRM_IOCTL_VIRTGPU_WAIT
, &waitcmd
);
103 if (ret
&& errno
== EBUSY
)
106 p_atomic_set(&res
->maybe_busy
, false);
112 virgl_drm_winsys_destroy(struct virgl_winsys
*qws
)
114 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
116 virgl_resource_cache_flush(&qdws
->cache
);
118 _mesa_hash_table_destroy(qdws
->bo_handles
, NULL
);
119 _mesa_hash_table_destroy(qdws
->bo_names
, NULL
);
120 mtx_destroy(&qdws
->bo_handles_mutex
);
121 mtx_destroy(&qdws
->mutex
);
126 static void virgl_drm_resource_reference(struct virgl_winsys
*qws
,
127 struct virgl_hw_res
**dres
,
128 struct virgl_hw_res
*sres
)
130 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
131 struct virgl_hw_res
*old
= *dres
;
133 if (pipe_reference(&(*dres
)->reference
, &sres
->reference
)) {
135 if (!can_cache_resource_with_bind(old
->bind
) ||
136 p_atomic_read(&old
->external
)) {
137 virgl_hw_res_destroy(qdws
, old
);
139 mtx_lock(&qdws
->mutex
);
140 virgl_resource_cache_add(&qdws
->cache
, &old
->cache_entry
);
141 mtx_unlock(&qdws
->mutex
);
147 static struct virgl_hw_res
*
148 virgl_drm_winsys_resource_create(struct virgl_winsys
*qws
,
149 enum pipe_texture_target target
,
161 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
162 struct drm_virtgpu_resource_create createcmd
;
164 struct virgl_hw_res
*res
;
165 uint32_t stride
= width
* util_format_get_blocksize(format
);
167 res
= CALLOC_STRUCT(virgl_hw_res
);
171 memset(&createcmd
, 0, sizeof(createcmd
));
172 createcmd
.target
= target
;
173 createcmd
.format
= pipe_to_virgl_format(format
);
174 createcmd
.bind
= bind
;
175 createcmd
.width
= width
;
176 createcmd
.height
= height
;
177 createcmd
.depth
= depth
;
178 createcmd
.array_size
= array_size
;
179 createcmd
.last_level
= last_level
;
180 createcmd
.nr_samples
= nr_samples
;
181 createcmd
.stride
= stride
;
182 createcmd
.size
= size
;
184 ret
= drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE
, &createcmd
);
192 res
->res_handle
= createcmd
.res_handle
;
193 res
->bo_handle
= createcmd
.bo_handle
;
195 pipe_reference_init(&res
->reference
, 1);
196 p_atomic_set(&res
->external
, false);
197 p_atomic_set(&res
->num_cs_references
, 0);
199 /* A newly created resource is considered busy by the kernel until the
200 * command is retired. But for our purposes, we can consider it idle
201 * unless it is used for fencing.
203 p_atomic_set(&res
->maybe_busy
, for_fencing
);
205 virgl_resource_cache_entry_init(&res
->cache_entry
, size
, bind
, format
);
211 virgl_bo_transfer_put(struct virgl_winsys
*vws
,
212 struct virgl_hw_res
*res
,
213 const struct pipe_box
*box
,
214 uint32_t stride
, uint32_t layer_stride
,
215 uint32_t buf_offset
, uint32_t level
)
217 struct virgl_drm_winsys
*vdws
= virgl_drm_winsys(vws
);
218 struct drm_virtgpu_3d_transfer_to_host tohostcmd
;
220 p_atomic_set(&res
->maybe_busy
, true);
222 memset(&tohostcmd
, 0, sizeof(tohostcmd
));
223 tohostcmd
.bo_handle
= res
->bo_handle
;
224 tohostcmd
.box
.x
= box
->x
;
225 tohostcmd
.box
.y
= box
->y
;
226 tohostcmd
.box
.z
= box
->z
;
227 tohostcmd
.box
.w
= box
->width
;
228 tohostcmd
.box
.h
= box
->height
;
229 tohostcmd
.box
.d
= box
->depth
;
230 tohostcmd
.offset
= buf_offset
;
231 tohostcmd
.level
= level
;
232 // tohostcmd.stride = stride;
233 // tohostcmd.layer_stride = stride;
234 return drmIoctl(vdws
->fd
, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST
, &tohostcmd
);
238 virgl_bo_transfer_get(struct virgl_winsys
*vws
,
239 struct virgl_hw_res
*res
,
240 const struct pipe_box
*box
,
241 uint32_t stride
, uint32_t layer_stride
,
242 uint32_t buf_offset
, uint32_t level
)
244 struct virgl_drm_winsys
*vdws
= virgl_drm_winsys(vws
);
245 struct drm_virtgpu_3d_transfer_from_host fromhostcmd
;
247 p_atomic_set(&res
->maybe_busy
, true);
249 memset(&fromhostcmd
, 0, sizeof(fromhostcmd
));
250 fromhostcmd
.bo_handle
= res
->bo_handle
;
251 fromhostcmd
.level
= level
;
252 fromhostcmd
.offset
= buf_offset
;
253 // fromhostcmd.stride = stride;
254 // fromhostcmd.layer_stride = layer_stride;
255 fromhostcmd
.box
.x
= box
->x
;
256 fromhostcmd
.box
.y
= box
->y
;
257 fromhostcmd
.box
.z
= box
->z
;
258 fromhostcmd
.box
.w
= box
->width
;
259 fromhostcmd
.box
.h
= box
->height
;
260 fromhostcmd
.box
.d
= box
->depth
;
261 return drmIoctl(vdws
->fd
, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST
, &fromhostcmd
);
264 static struct virgl_hw_res
*
265 virgl_drm_winsys_resource_cache_create(struct virgl_winsys
*qws
,
266 enum pipe_texture_target target
,
277 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
278 struct virgl_hw_res
*res
;
279 struct virgl_resource_cache_entry
*entry
;
281 if (!can_cache_resource_with_bind(bind
))
284 mtx_lock(&qdws
->mutex
);
286 entry
= virgl_resource_cache_remove_compatible(&qdws
->cache
, size
,
289 res
= cache_entry_container_res(entry
);
290 mtx_unlock(&qdws
->mutex
);
291 pipe_reference_init(&res
->reference
, 1);
295 mtx_unlock(&qdws
->mutex
);
298 res
= virgl_drm_winsys_resource_create(qws
, target
, format
, bind
,
299 width
, height
, depth
, array_size
,
300 last_level
, nr_samples
, size
, false);
304 static struct virgl_hw_res
*
305 virgl_drm_winsys_resource_create_handle(struct virgl_winsys
*qws
,
306 struct winsys_handle
*whandle
,
309 uint32_t *plane_offset
,
312 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
313 struct drm_gem_open open_arg
= {};
314 struct drm_virtgpu_resource_info info_arg
= {};
315 struct virgl_hw_res
*res
= NULL
;
316 uint32_t handle
= whandle
->handle
;
318 if (whandle
->offset
!= 0 && whandle
->type
== WINSYS_HANDLE_TYPE_SHARED
) {
319 _debug_printf("attempt to import unsupported winsys offset %u\n",
322 } else if (whandle
->type
== WINSYS_HANDLE_TYPE_FD
) {
323 *plane
= whandle
->plane
;
324 *stride
= whandle
->stride
;
325 *plane_offset
= whandle
->offset
;
326 *modifier
= whandle
->modifier
;
329 mtx_lock(&qdws
->bo_handles_mutex
);
331 /* We must maintain a list of pairs <handle, bo>, so that we always return
332 * the same BO for one particular handle. If we didn't do that and created
333 * more than one BO for the same handle and then relocated them in a CS,
334 * we would hit a deadlock in the kernel.
336 * The list of pairs is guarded by a mutex, of course. */
337 if (whandle
->type
== WINSYS_HANDLE_TYPE_SHARED
) {
338 res
= util_hash_table_get(qdws
->bo_names
, (void*)(uintptr_t)handle
);
339 } else if (whandle
->type
== WINSYS_HANDLE_TYPE_FD
) {
341 r
= drmPrimeFDToHandle(qdws
->fd
, whandle
->handle
, &handle
);
344 res
= util_hash_table_get(qdws
->bo_handles
, (void*)(uintptr_t)handle
);
346 /* Unknown handle type */
351 struct virgl_hw_res
*r
= NULL
;
352 virgl_drm_resource_reference(&qdws
->base
, &r
, res
);
356 res
= CALLOC_STRUCT(virgl_hw_res
);
360 if (whandle
->type
== WINSYS_HANDLE_TYPE_FD
) {
361 res
->bo_handle
= handle
;
363 memset(&open_arg
, 0, sizeof(open_arg
));
364 open_arg
.name
= whandle
->handle
;
365 if (drmIoctl(qdws
->fd
, DRM_IOCTL_GEM_OPEN
, &open_arg
)) {
370 res
->bo_handle
= open_arg
.handle
;
371 res
->flink_name
= whandle
->handle
;
374 memset(&info_arg
, 0, sizeof(info_arg
));
375 info_arg
.bo_handle
= res
->bo_handle
;
377 if (drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_RESOURCE_INFO
, &info_arg
)) {
384 res
->res_handle
= info_arg
.res_handle
;
386 res
->size
= info_arg
.size
;
387 pipe_reference_init(&res
->reference
, 1);
388 p_atomic_set(&res
->external
, true);
389 res
->num_cs_references
= 0;
392 _mesa_hash_table_insert(qdws
->bo_names
, (void *)(uintptr_t)res
->flink_name
, res
);
393 _mesa_hash_table_insert(qdws
->bo_handles
, (void *)(uintptr_t)res
->bo_handle
, res
);
396 mtx_unlock(&qdws
->bo_handles_mutex
);
400 static boolean
virgl_drm_winsys_resource_get_handle(struct virgl_winsys
*qws
,
401 struct virgl_hw_res
*res
,
403 struct winsys_handle
*whandle
)
405 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
406 struct drm_gem_flink flink
;
411 if (whandle
->type
== WINSYS_HANDLE_TYPE_SHARED
) {
412 if (!res
->flink_name
) {
413 memset(&flink
, 0, sizeof(flink
));
414 flink
.handle
= res
->bo_handle
;
416 if (drmIoctl(qdws
->fd
, DRM_IOCTL_GEM_FLINK
, &flink
)) {
419 res
->flink_name
= flink
.name
;
421 mtx_lock(&qdws
->bo_handles_mutex
);
422 _mesa_hash_table_insert(qdws
->bo_names
, (void *)(uintptr_t)res
->flink_name
, res
);
423 mtx_unlock(&qdws
->bo_handles_mutex
);
425 whandle
->handle
= res
->flink_name
;
426 } else if (whandle
->type
== WINSYS_HANDLE_TYPE_KMS
) {
427 whandle
->handle
= res
->bo_handle
;
428 } else if (whandle
->type
== WINSYS_HANDLE_TYPE_FD
) {
429 if (drmPrimeHandleToFD(qdws
->fd
, res
->bo_handle
, DRM_CLOEXEC
, (int*)&whandle
->handle
))
431 mtx_lock(&qdws
->bo_handles_mutex
);
432 _mesa_hash_table_insert(qdws
->bo_handles
, (void *)(uintptr_t)res
->bo_handle
, res
);
433 mtx_unlock(&qdws
->bo_handles_mutex
);
436 p_atomic_set(&res
->external
, true);
438 whandle
->stride
= stride
;
442 static void *virgl_drm_resource_map(struct virgl_winsys
*qws
,
443 struct virgl_hw_res
*res
)
445 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
446 struct drm_virtgpu_map mmap_arg
;
452 memset(&mmap_arg
, 0, sizeof(mmap_arg
));
453 mmap_arg
.handle
= res
->bo_handle
;
454 if (drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_MAP
, &mmap_arg
))
457 ptr
= os_mmap(0, res
->size
, PROT_READ
|PROT_WRITE
, MAP_SHARED
,
458 qdws
->fd
, mmap_arg
.offset
);
459 if (ptr
== MAP_FAILED
)
467 static void virgl_drm_resource_wait(struct virgl_winsys
*qws
,
468 struct virgl_hw_res
*res
)
470 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
471 struct drm_virtgpu_3d_wait waitcmd
;
474 if (!p_atomic_read(&res
->maybe_busy
) && !p_atomic_read(&res
->external
))
477 memset(&waitcmd
, 0, sizeof(waitcmd
));
478 waitcmd
.handle
= res
->bo_handle
;
480 ret
= drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_WAIT
, &waitcmd
);
482 _debug_printf("waiting got error - %d, slow gpu or hang?\n", errno
);
484 p_atomic_set(&res
->maybe_busy
, false);
487 static bool virgl_drm_alloc_res_list(struct virgl_drm_cmd_buf
*cbuf
,
490 cbuf
->nres
= initial_size
;
493 cbuf
->res_bo
= CALLOC(cbuf
->nres
, sizeof(struct virgl_hw_buf
*));
497 cbuf
->res_hlist
= MALLOC(cbuf
->nres
* sizeof(uint32_t));
498 if (!cbuf
->res_hlist
) {
506 static void virgl_drm_free_res_list(struct virgl_drm_cmd_buf
*cbuf
)
510 for (i
= 0; i
< cbuf
->cres
; i
++) {
511 p_atomic_dec(&cbuf
->res_bo
[i
]->num_cs_references
);
512 virgl_drm_resource_reference(cbuf
->ws
, &cbuf
->res_bo
[i
], NULL
);
514 FREE(cbuf
->res_hlist
);
518 static boolean
virgl_drm_lookup_res(struct virgl_drm_cmd_buf
*cbuf
,
519 struct virgl_hw_res
*res
)
521 unsigned hash
= res
->res_handle
& (sizeof(cbuf
->is_handle_added
)-1);
524 if (cbuf
->is_handle_added
[hash
]) {
525 i
= cbuf
->reloc_indices_hashlist
[hash
];
526 if (cbuf
->res_bo
[i
] == res
)
529 for (i
= 0; i
< cbuf
->cres
; i
++) {
530 if (cbuf
->res_bo
[i
] == res
) {
531 cbuf
->reloc_indices_hashlist
[hash
] = i
;
539 static void virgl_drm_add_res(struct virgl_drm_winsys
*qdws
,
540 struct virgl_drm_cmd_buf
*cbuf
,
541 struct virgl_hw_res
*res
)
543 unsigned hash
= res
->res_handle
& (sizeof(cbuf
->is_handle_added
)-1);
545 if (cbuf
->cres
>= cbuf
->nres
) {
546 unsigned new_nres
= cbuf
->nres
+ 256;
547 void *new_ptr
= REALLOC(cbuf
->res_bo
,
548 cbuf
->nres
* sizeof(struct virgl_hw_buf
*),
549 new_nres
* sizeof(struct virgl_hw_buf
*));
551 _debug_printf("failure to add relocation %d, %d\n", cbuf
->cres
, new_nres
);
554 cbuf
->res_bo
= new_ptr
;
556 new_ptr
= REALLOC(cbuf
->res_hlist
,
557 cbuf
->nres
* sizeof(uint32_t),
558 new_nres
* sizeof(uint32_t));
560 _debug_printf("failure to add hlist relocation %d, %d\n", cbuf
->cres
, cbuf
->nres
);
563 cbuf
->res_hlist
= new_ptr
;
564 cbuf
->nres
= new_nres
;
567 cbuf
->res_bo
[cbuf
->cres
] = NULL
;
568 virgl_drm_resource_reference(&qdws
->base
, &cbuf
->res_bo
[cbuf
->cres
], res
);
569 cbuf
->res_hlist
[cbuf
->cres
] = res
->bo_handle
;
570 cbuf
->is_handle_added
[hash
] = TRUE
;
572 cbuf
->reloc_indices_hashlist
[hash
] = cbuf
->cres
;
573 p_atomic_inc(&res
->num_cs_references
);
577 /* This is called after the cbuf is submitted. */
578 static void virgl_drm_clear_res_list(struct virgl_drm_cmd_buf
*cbuf
)
582 for (i
= 0; i
< cbuf
->cres
; i
++) {
583 /* mark all BOs busy after submission */
584 p_atomic_set(&cbuf
->res_bo
[i
]->maybe_busy
, true);
586 p_atomic_dec(&cbuf
->res_bo
[i
]->num_cs_references
);
587 virgl_drm_resource_reference(cbuf
->ws
, &cbuf
->res_bo
[i
], NULL
);
592 memset(cbuf
->is_handle_added
, 0, sizeof(cbuf
->is_handle_added
));
595 static void virgl_drm_emit_res(struct virgl_winsys
*qws
,
596 struct virgl_cmd_buf
*_cbuf
,
597 struct virgl_hw_res
*res
, boolean write_buf
)
599 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
600 struct virgl_drm_cmd_buf
*cbuf
= virgl_drm_cmd_buf(_cbuf
);
601 boolean already_in_list
= virgl_drm_lookup_res(cbuf
, res
);
604 cbuf
->base
.buf
[cbuf
->base
.cdw
++] = res
->res_handle
;
606 if (!already_in_list
)
607 virgl_drm_add_res(qdws
, cbuf
, res
);
610 static boolean
virgl_drm_res_is_ref(struct virgl_winsys
*qws
,
611 struct virgl_cmd_buf
*_cbuf
,
612 struct virgl_hw_res
*res
)
614 if (!p_atomic_read(&res
->num_cs_references
))
620 static struct virgl_cmd_buf
*virgl_drm_cmd_buf_create(struct virgl_winsys
*qws
,
623 struct virgl_drm_cmd_buf
*cbuf
;
625 cbuf
= CALLOC_STRUCT(virgl_drm_cmd_buf
);
631 if (!virgl_drm_alloc_res_list(cbuf
, 512)) {
636 cbuf
->buf
= CALLOC(size
, sizeof(uint32_t));
638 FREE(cbuf
->res_hlist
);
644 cbuf
->in_fence_fd
= -1;
645 cbuf
->base
.buf
= cbuf
->buf
;
649 static void virgl_drm_cmd_buf_destroy(struct virgl_cmd_buf
*_cbuf
)
651 struct virgl_drm_cmd_buf
*cbuf
= virgl_drm_cmd_buf(_cbuf
);
653 virgl_drm_free_res_list(cbuf
);
659 static struct pipe_fence_handle
*
660 virgl_drm_fence_create(struct virgl_winsys
*vws
, int fd
, bool external
)
662 struct virgl_drm_fence
*fence
;
664 assert(vws
->supports_fences
);
667 fd
= os_dupfd_cloexec(fd
);
672 fence
= CALLOC_STRUCT(virgl_drm_fence
);
679 fence
->external
= external
;
681 pipe_reference_init(&fence
->reference
, 1);
683 return (struct pipe_fence_handle
*)fence
;
686 static struct pipe_fence_handle
*
687 virgl_drm_fence_create_legacy(struct virgl_winsys
*vws
)
689 struct virgl_drm_fence
*fence
;
691 assert(!vws
->supports_fences
);
693 fence
= CALLOC_STRUCT(virgl_drm_fence
);
698 /* Resources for fences should not be from the cache, since we are basing
699 * the fence status on the resource creation busy status.
701 fence
->hw_res
= virgl_drm_winsys_resource_create(vws
, PIPE_BUFFER
,
702 PIPE_FORMAT_R8_UNORM
, VIRGL_BIND_CUSTOM
, 8, 1, 1, 0, 0, 0, 8, true);
703 if (!fence
->hw_res
) {
708 pipe_reference_init(&fence
->reference
, 1);
710 return (struct pipe_fence_handle
*)fence
;
713 static int virgl_drm_winsys_submit_cmd(struct virgl_winsys
*qws
,
714 struct virgl_cmd_buf
*_cbuf
,
715 struct pipe_fence_handle
**fence
)
717 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
718 struct virgl_drm_cmd_buf
*cbuf
= virgl_drm_cmd_buf(_cbuf
);
719 struct drm_virtgpu_execbuffer eb
;
722 if (cbuf
->base
.cdw
== 0)
725 memset(&eb
, 0, sizeof(struct drm_virtgpu_execbuffer
));
726 eb
.command
= (unsigned long)(void*)cbuf
->buf
;
727 eb
.size
= cbuf
->base
.cdw
* 4;
728 eb
.num_bo_handles
= cbuf
->cres
;
729 eb
.bo_handles
= (unsigned long)(void *)cbuf
->res_hlist
;
732 if (qws
->supports_fences
) {
733 if (cbuf
->in_fence_fd
>= 0) {
734 eb
.flags
|= VIRTGPU_EXECBUF_FENCE_FD_IN
;
735 eb
.fence_fd
= cbuf
->in_fence_fd
;
739 eb
.flags
|= VIRTGPU_EXECBUF_FENCE_FD_OUT
;
741 assert(cbuf
->in_fence_fd
< 0);
744 ret
= drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_EXECBUFFER
, &eb
);
746 _debug_printf("got error from kernel - expect bad rendering %d\n", errno
);
749 if (qws
->supports_fences
) {
750 if (cbuf
->in_fence_fd
>= 0) {
751 close(cbuf
->in_fence_fd
);
752 cbuf
->in_fence_fd
= -1;
755 if (fence
!= NULL
&& ret
== 0)
756 *fence
= virgl_drm_fence_create(qws
, eb
.fence_fd
, false);
758 if (fence
!= NULL
&& ret
== 0)
759 *fence
= virgl_drm_fence_create_legacy(qws
);
762 virgl_drm_clear_res_list(cbuf
);
767 static int virgl_drm_get_caps(struct virgl_winsys
*vws
,
768 struct virgl_drm_caps
*caps
)
770 struct virgl_drm_winsys
*vdws
= virgl_drm_winsys(vws
);
771 struct drm_virtgpu_get_caps args
;
774 virgl_ws_fill_new_caps_defaults(caps
);
776 memset(&args
, 0, sizeof(args
));
777 if (vdws
->has_capset_query_fix
) {
778 /* if we have the query fix - try and get cap set id 2 first */
780 args
.size
= sizeof(union virgl_caps
);
783 args
.size
= sizeof(struct virgl_caps_v1
);
785 args
.addr
= (unsigned long)&caps
->caps
;
787 ret
= drmIoctl(vdws
->fd
, DRM_IOCTL_VIRTGPU_GET_CAPS
, &args
);
788 if (ret
== -1 && errno
== EINVAL
) {
791 args
.size
= sizeof(struct virgl_caps_v1
);
792 ret
= drmIoctl(vdws
->fd
, DRM_IOCTL_VIRTGPU_GET_CAPS
, &args
);
799 static struct pipe_fence_handle
*
800 virgl_cs_create_fence(struct virgl_winsys
*vws
, int fd
)
802 if (!vws
->supports_fences
)
805 return virgl_drm_fence_create(vws
, fd
, true);
808 static bool virgl_fence_wait(struct virgl_winsys
*vws
,
809 struct pipe_fence_handle
*_fence
,
812 struct virgl_drm_fence
*fence
= virgl_drm_fence(_fence
);
814 if (vws
->supports_fences
) {
819 return sync_wait(fence
->fd
, 0) == 0;
821 timeout_ms
= timeout
/ 1000000;
823 if (timeout_ms
* 1000000 < timeout
)
826 timeout_poll
= timeout_ms
<= INT_MAX
? (int) timeout_ms
: -1;
828 return sync_wait(fence
->fd
, timeout_poll
) == 0;
832 return !virgl_drm_resource_is_busy(vws
, fence
->hw_res
);
834 if (timeout
!= PIPE_TIMEOUT_INFINITE
) {
835 int64_t start_time
= os_time_get();
837 while (virgl_drm_resource_is_busy(vws
, fence
->hw_res
)) {
838 if (os_time_get() - start_time
>= timeout
)
844 virgl_drm_resource_wait(vws
, fence
->hw_res
);
849 static void virgl_fence_reference(struct virgl_winsys
*vws
,
850 struct pipe_fence_handle
**dst
,
851 struct pipe_fence_handle
*src
)
853 struct virgl_drm_fence
*dfence
= virgl_drm_fence(*dst
);
854 struct virgl_drm_fence
*sfence
= virgl_drm_fence(src
);
856 if (pipe_reference(&dfence
->reference
, &sfence
->reference
)) {
857 if (vws
->supports_fences
) {
860 struct virgl_drm_winsys
*vdws
= virgl_drm_winsys(vws
);
861 virgl_hw_res_destroy(vdws
, dfence
->hw_res
);
869 static void virgl_fence_server_sync(struct virgl_winsys
*vws
,
870 struct virgl_cmd_buf
*_cbuf
,
871 struct pipe_fence_handle
*_fence
)
873 struct virgl_drm_cmd_buf
*cbuf
= virgl_drm_cmd_buf(_cbuf
);
874 struct virgl_drm_fence
*fence
= virgl_drm_fence(_fence
);
876 if (!vws
->supports_fences
)
879 /* if not an external fence, then nothing more to do without preemption: */
880 if (!fence
->external
)
883 sync_accumulate("virgl", &cbuf
->in_fence_fd
, fence
->fd
);
886 static int virgl_fence_get_fd(struct virgl_winsys
*vws
,
887 struct pipe_fence_handle
*_fence
)
889 struct virgl_drm_fence
*fence
= virgl_drm_fence(_fence
);
891 if (!vws
->supports_fences
)
894 return os_dupfd_cloexec(fence
->fd
);
897 static int virgl_drm_get_version(int fd
)
900 drmVersionPtr version
;
902 version
= drmGetVersion(fd
);
906 else if (version
->version_major
!= 0)
909 ret
= VIRGL_DRM_VERSION(0, version
->version_minor
);
911 drmFreeVersion(version
);
917 virgl_drm_resource_cache_entry_is_busy(struct virgl_resource_cache_entry
*entry
,
920 struct virgl_drm_winsys
*qdws
= user_data
;
921 struct virgl_hw_res
*res
= cache_entry_container_res(entry
);
923 return virgl_drm_resource_is_busy(&qdws
->base
, res
);
927 virgl_drm_resource_cache_entry_release(struct virgl_resource_cache_entry
*entry
,
930 struct virgl_drm_winsys
*qdws
= user_data
;
931 struct virgl_hw_res
*res
= cache_entry_container_res(entry
);
933 virgl_hw_res_destroy(qdws
, res
);
936 static struct virgl_winsys
*
937 virgl_drm_winsys_create(int drmFD
)
939 static const unsigned CACHE_TIMEOUT_USEC
= 1000000;
940 struct virgl_drm_winsys
*qdws
;
944 struct drm_virtgpu_getparam getparam
= {0};
946 getparam
.param
= VIRTGPU_PARAM_3D_FEATURES
;
947 getparam
.value
= (uint64_t)(uintptr_t)&gl
;
948 ret
= drmIoctl(drmFD
, DRM_IOCTL_VIRTGPU_GETPARAM
, &getparam
);
952 drm_version
= virgl_drm_get_version(drmFD
);
956 qdws
= CALLOC_STRUCT(virgl_drm_winsys
);
961 virgl_resource_cache_init(&qdws
->cache
, CACHE_TIMEOUT_USEC
,
962 virgl_drm_resource_cache_entry_is_busy
,
963 virgl_drm_resource_cache_entry_release
,
965 (void) mtx_init(&qdws
->mutex
, mtx_plain
);
966 (void) mtx_init(&qdws
->bo_handles_mutex
, mtx_plain
);
967 qdws
->bo_handles
= util_hash_table_create_ptr_keys();
968 qdws
->bo_names
= util_hash_table_create_ptr_keys();
969 qdws
->base
.destroy
= virgl_drm_winsys_destroy
;
971 qdws
->base
.transfer_put
= virgl_bo_transfer_put
;
972 qdws
->base
.transfer_get
= virgl_bo_transfer_get
;
973 qdws
->base
.resource_create
= virgl_drm_winsys_resource_cache_create
;
974 qdws
->base
.resource_reference
= virgl_drm_resource_reference
;
975 qdws
->base
.resource_create_from_handle
= virgl_drm_winsys_resource_create_handle
;
976 qdws
->base
.resource_get_handle
= virgl_drm_winsys_resource_get_handle
;
977 qdws
->base
.resource_map
= virgl_drm_resource_map
;
978 qdws
->base
.resource_wait
= virgl_drm_resource_wait
;
979 qdws
->base
.resource_is_busy
= virgl_drm_resource_is_busy
;
980 qdws
->base
.cmd_buf_create
= virgl_drm_cmd_buf_create
;
981 qdws
->base
.cmd_buf_destroy
= virgl_drm_cmd_buf_destroy
;
982 qdws
->base
.submit_cmd
= virgl_drm_winsys_submit_cmd
;
983 qdws
->base
.emit_res
= virgl_drm_emit_res
;
984 qdws
->base
.res_is_referenced
= virgl_drm_res_is_ref
;
986 qdws
->base
.cs_create_fence
= virgl_cs_create_fence
;
987 qdws
->base
.fence_wait
= virgl_fence_wait
;
988 qdws
->base
.fence_reference
= virgl_fence_reference
;
989 qdws
->base
.fence_server_sync
= virgl_fence_server_sync
;
990 qdws
->base
.fence_get_fd
= virgl_fence_get_fd
;
991 qdws
->base
.supports_fences
= drm_version
>= VIRGL_DRM_VERSION_FENCE_FD
;
992 qdws
->base
.supports_encoded_transfers
= 1;
994 qdws
->base
.get_caps
= virgl_drm_get_caps
;
997 getparam
.param
= VIRTGPU_PARAM_CAPSET_QUERY_FIX
;
998 getparam
.value
= (uint64_t)(uintptr_t)&value
;
999 ret
= drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_GETPARAM
, &getparam
);
1002 qdws
->has_capset_query_fix
= true;
1009 static struct hash_table
*fd_tab
= NULL
;
1010 static mtx_t virgl_screen_mutex
= _MTX_INITIALIZER_NP
;
1013 virgl_drm_screen_destroy(struct pipe_screen
*pscreen
)
1015 struct virgl_screen
*screen
= virgl_screen(pscreen
);
1018 mtx_lock(&virgl_screen_mutex
);
1019 destroy
= --screen
->refcnt
== 0;
1021 int fd
= virgl_drm_winsys(screen
->vws
)->fd
;
1022 _mesa_hash_table_remove_key(fd_tab
, intptr_to_pointer(fd
));
1025 mtx_unlock(&virgl_screen_mutex
);
1028 pscreen
->destroy
= screen
->winsys_priv
;
1029 pscreen
->destroy(pscreen
);
1033 struct pipe_screen
*
1034 virgl_drm_screen_create(int fd
, const struct pipe_screen_config
*config
)
1036 struct pipe_screen
*pscreen
= NULL
;
1038 mtx_lock(&virgl_screen_mutex
);
1040 fd_tab
= util_hash_table_create_fd_keys();
1045 pscreen
= util_hash_table_get(fd_tab
, intptr_to_pointer(fd
));
1047 virgl_screen(pscreen
)->refcnt
++;
1049 struct virgl_winsys
*vws
;
1050 int dup_fd
= os_dupfd_cloexec(fd
);
1052 vws
= virgl_drm_winsys_create(dup_fd
);
1058 pscreen
= virgl_create_screen(vws
, config
);
1060 _mesa_hash_table_insert(fd_tab
, intptr_to_pointer(dup_fd
), pscreen
);
1062 /* Bit of a hack, to avoid circular linkage dependency,
1063 * ie. pipe driver having to call in to winsys, we
1064 * override the pipe drivers screen->destroy():
1066 virgl_screen(pscreen
)->winsys_priv
= pscreen
->destroy
;
1067 pscreen
->destroy
= virgl_drm_screen_destroy
;
1072 mtx_unlock(&virgl_screen_mutex
);