2 * Copyright 2014, 2015 Red Hat.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 #include <sys/ioctl.h>
31 #include "os/os_mman.h"
32 #include "util/os_time.h"
33 #include "util/u_memory.h"
34 #include "util/u_format.h"
35 #include "util/u_hash_table.h"
36 #include "util/u_inlines.h"
37 #include "state_tracker/drm_driver.h"
38 #include "virgl/virgl_screen.h"
39 #include "virgl/virgl_public.h"
43 #include "virtgpu_drm.h"
45 #include "virgl_drm_winsys.h"
46 #include "virgl_drm_public.h"
49 #define VIRGL_DRM_VERSION(major, minor) ((major) << 16 | (minor))
50 #define VIRGL_DRM_VERSION_FENCE_FD VIRGL_DRM_VERSION(0, 1)
53 static inline boolean
can_cache_resource(struct virgl_hw_res
*res
)
55 return res
->cacheable
== TRUE
;
58 static void virgl_hw_res_destroy(struct virgl_drm_winsys
*qdws
,
59 struct virgl_hw_res
*res
)
61 struct drm_gem_close args
;
64 mtx_lock(&qdws
->bo_handles_mutex
);
65 util_hash_table_remove(qdws
->bo_names
,
66 (void *)(uintptr_t)res
->flink
);
67 mtx_unlock(&qdws
->bo_handles_mutex
);
71 mtx_lock(&qdws
->bo_handles_mutex
);
72 util_hash_table_remove(qdws
->bo_handles
,
73 (void *)(uintptr_t)res
->bo_handle
);
74 mtx_unlock(&qdws
->bo_handles_mutex
);
78 os_munmap(res
->ptr
, res
->size
);
80 memset(&args
, 0, sizeof(args
));
81 args
.handle
= res
->bo_handle
;
82 drmIoctl(qdws
->fd
, DRM_IOCTL_GEM_CLOSE
, &args
);
86 static boolean
virgl_drm_resource_is_busy(struct virgl_drm_winsys
*qdws
,
87 struct virgl_hw_res
*res
)
89 struct drm_virtgpu_3d_wait waitcmd
;
92 memset(&waitcmd
, 0, sizeof(waitcmd
));
93 waitcmd
.handle
= res
->bo_handle
;
94 waitcmd
.flags
= VIRTGPU_WAIT_NOWAIT
;
96 ret
= drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_WAIT
, &waitcmd
);
97 if (ret
&& errno
== EBUSY
)
103 virgl_cache_flush(struct virgl_drm_winsys
*qdws
)
105 struct list_head
*curr
, *next
;
106 struct virgl_hw_res
*res
;
108 mtx_lock(&qdws
->mutex
);
109 curr
= qdws
->delayed
.next
;
112 while (curr
!= &qdws
->delayed
) {
113 res
= LIST_ENTRY(struct virgl_hw_res
, curr
, head
);
114 LIST_DEL(&res
->head
);
115 virgl_hw_res_destroy(qdws
, res
);
119 mtx_unlock(&qdws
->mutex
);
122 virgl_drm_winsys_destroy(struct virgl_winsys
*qws
)
124 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
126 virgl_cache_flush(qdws
);
128 util_hash_table_destroy(qdws
->bo_handles
);
129 util_hash_table_destroy(qdws
->bo_names
);
130 mtx_destroy(&qdws
->bo_handles_mutex
);
131 mtx_destroy(&qdws
->mutex
);
137 virgl_cache_list_check_free(struct virgl_drm_winsys
*qdws
)
139 struct list_head
*curr
, *next
;
140 struct virgl_hw_res
*res
;
144 curr
= qdws
->delayed
.next
;
146 while (curr
!= &qdws
->delayed
) {
147 res
= LIST_ENTRY(struct virgl_hw_res
, curr
, head
);
148 if (!os_time_timeout(res
->start
, res
->end
, now
))
151 LIST_DEL(&res
->head
);
152 virgl_hw_res_destroy(qdws
, res
);
158 static void virgl_drm_resource_reference(struct virgl_drm_winsys
*qdws
,
159 struct virgl_hw_res
**dres
,
160 struct virgl_hw_res
*sres
)
162 struct virgl_hw_res
*old
= *dres
;
163 if (pipe_reference(&(*dres
)->reference
, &sres
->reference
)) {
165 if (!can_cache_resource(old
)) {
166 virgl_hw_res_destroy(qdws
, old
);
168 mtx_lock(&qdws
->mutex
);
169 virgl_cache_list_check_free(qdws
);
171 old
->start
= os_time_get();
172 old
->end
= old
->start
+ qdws
->usecs
;
173 LIST_ADDTAIL(&old
->head
, &qdws
->delayed
);
175 mtx_unlock(&qdws
->mutex
);
181 static struct virgl_hw_res
*
182 virgl_drm_winsys_resource_create(struct virgl_winsys
*qws
,
183 enum pipe_texture_target target
,
194 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
195 struct drm_virtgpu_resource_create createcmd
;
197 struct virgl_hw_res
*res
;
198 uint32_t stride
= width
* util_format_get_blocksize(format
);
200 res
= CALLOC_STRUCT(virgl_hw_res
);
204 memset(&createcmd
, 0, sizeof(createcmd
));
205 createcmd
.target
= target
;
206 createcmd
.format
= format
;
207 createcmd
.bind
= bind
;
208 createcmd
.width
= width
;
209 createcmd
.height
= height
;
210 createcmd
.depth
= depth
;
211 createcmd
.array_size
= array_size
;
212 createcmd
.last_level
= last_level
;
213 createcmd
.nr_samples
= nr_samples
;
214 createcmd
.stride
= stride
;
215 createcmd
.size
= size
;
217 ret
= drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE
, &createcmd
);
224 res
->format
= format
;
226 res
->res_handle
= createcmd
.res_handle
;
227 res
->bo_handle
= createcmd
.bo_handle
;
229 res
->stride
= stride
;
230 pipe_reference_init(&res
->reference
, 1);
231 p_atomic_set(&res
->num_cs_references
, 0);
235 static inline int virgl_is_res_compat(struct virgl_drm_winsys
*qdws
,
236 struct virgl_hw_res
*res
,
237 uint32_t size
, uint32_t bind
,
240 if (res
->bind
!= bind
)
242 if (res
->format
!= format
)
244 if (res
->size
< size
)
246 if (res
->size
> size
* 2)
249 if (virgl_drm_resource_is_busy(qdws
, res
)) {
257 virgl_bo_transfer_put(struct virgl_winsys
*vws
,
258 struct virgl_hw_res
*res
,
259 const struct pipe_box
*box
,
260 uint32_t stride
, uint32_t layer_stride
,
261 uint32_t buf_offset
, uint32_t level
)
263 struct virgl_drm_winsys
*vdws
= virgl_drm_winsys(vws
);
264 struct drm_virtgpu_3d_transfer_to_host tohostcmd
;
266 memset(&tohostcmd
, 0, sizeof(tohostcmd
));
267 tohostcmd
.bo_handle
= res
->bo_handle
;
268 tohostcmd
.box
.x
= box
->x
;
269 tohostcmd
.box
.y
= box
->y
;
270 tohostcmd
.box
.z
= box
->z
;
271 tohostcmd
.box
.w
= box
->width
;
272 tohostcmd
.box
.h
= box
->height
;
273 tohostcmd
.box
.d
= box
->depth
;
274 tohostcmd
.offset
= buf_offset
;
275 tohostcmd
.level
= level
;
276 // tohostcmd.stride = stride;
277 // tohostcmd.layer_stride = stride;
278 return drmIoctl(vdws
->fd
, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST
, &tohostcmd
);
282 virgl_bo_transfer_get(struct virgl_winsys
*vws
,
283 struct virgl_hw_res
*res
,
284 const struct pipe_box
*box
,
285 uint32_t stride
, uint32_t layer_stride
,
286 uint32_t buf_offset
, uint32_t level
)
288 struct virgl_drm_winsys
*vdws
= virgl_drm_winsys(vws
);
289 struct drm_virtgpu_3d_transfer_from_host fromhostcmd
;
291 memset(&fromhostcmd
, 0, sizeof(fromhostcmd
));
292 fromhostcmd
.bo_handle
= res
->bo_handle
;
293 fromhostcmd
.level
= level
;
294 fromhostcmd
.offset
= buf_offset
;
295 // fromhostcmd.stride = stride;
296 // fromhostcmd.layer_stride = layer_stride;
297 fromhostcmd
.box
.x
= box
->x
;
298 fromhostcmd
.box
.y
= box
->y
;
299 fromhostcmd
.box
.z
= box
->z
;
300 fromhostcmd
.box
.w
= box
->width
;
301 fromhostcmd
.box
.h
= box
->height
;
302 fromhostcmd
.box
.d
= box
->depth
;
303 return drmIoctl(vdws
->fd
, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST
, &fromhostcmd
);
306 static struct virgl_hw_res
*
307 virgl_drm_winsys_resource_cache_create(struct virgl_winsys
*qws
,
308 enum pipe_texture_target target
,
319 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
320 struct virgl_hw_res
*res
, *curr_res
;
321 struct list_head
*curr
, *next
;
325 /* only store binds for vertex/index/const buffers */
326 if (bind
!= VIRGL_BIND_CONSTANT_BUFFER
&& bind
!= VIRGL_BIND_INDEX_BUFFER
&&
327 bind
!= VIRGL_BIND_VERTEX_BUFFER
&& bind
!= VIRGL_BIND_CUSTOM
)
330 mtx_lock(&qdws
->mutex
);
333 curr
= qdws
->delayed
.next
;
337 while (curr
!= &qdws
->delayed
) {
338 curr_res
= LIST_ENTRY(struct virgl_hw_res
, curr
, head
);
340 if (!res
&& ((ret
= virgl_is_res_compat(qdws
, curr_res
, size
, bind
, format
)) > 0))
342 else if (os_time_timeout(curr_res
->start
, curr_res
->end
, now
)) {
343 LIST_DEL(&curr_res
->head
);
344 virgl_hw_res_destroy(qdws
, curr_res
);
355 if (!res
&& ret
!= -1) {
356 while (curr
!= &qdws
->delayed
) {
357 curr_res
= LIST_ENTRY(struct virgl_hw_res
, curr
, head
);
358 ret
= virgl_is_res_compat(qdws
, curr_res
, size
, bind
, format
);
371 LIST_DEL(&res
->head
);
373 mtx_unlock(&qdws
->mutex
);
374 pipe_reference_init(&res
->reference
, 1);
378 mtx_unlock(&qdws
->mutex
);
381 res
= virgl_drm_winsys_resource_create(qws
, target
, format
, bind
,
382 width
, height
, depth
, array_size
,
383 last_level
, nr_samples
, size
);
384 if (bind
== VIRGL_BIND_CONSTANT_BUFFER
|| bind
== VIRGL_BIND_INDEX_BUFFER
||
385 bind
== VIRGL_BIND_VERTEX_BUFFER
)
386 res
->cacheable
= TRUE
;
390 static struct virgl_hw_res
*
391 virgl_drm_winsys_resource_create_handle(struct virgl_winsys
*qws
,
392 struct winsys_handle
*whandle
)
394 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
395 struct drm_gem_open open_arg
= {};
396 struct drm_virtgpu_resource_info info_arg
= {};
397 struct virgl_hw_res
*res
;
398 uint32_t handle
= whandle
->handle
;
400 if (whandle
->offset
!= 0) {
401 fprintf(stderr
, "attempt to import unsupported winsys offset %u\n",
406 mtx_lock(&qdws
->bo_handles_mutex
);
408 if (whandle
->type
== WINSYS_HANDLE_TYPE_SHARED
) {
409 res
= util_hash_table_get(qdws
->bo_names
, (void*)(uintptr_t)handle
);
411 struct virgl_hw_res
*r
= NULL
;
412 virgl_drm_resource_reference(qdws
, &r
, res
);
417 if (whandle
->type
== WINSYS_HANDLE_TYPE_FD
) {
419 r
= drmPrimeFDToHandle(qdws
->fd
, whandle
->handle
, &handle
);
426 res
= util_hash_table_get(qdws
->bo_handles
, (void*)(uintptr_t)handle
);
428 struct virgl_hw_res
*r
= NULL
;
429 virgl_drm_resource_reference(qdws
, &r
, res
);
433 res
= CALLOC_STRUCT(virgl_hw_res
);
437 if (whandle
->type
== WINSYS_HANDLE_TYPE_FD
) {
438 res
->bo_handle
= handle
;
440 memset(&open_arg
, 0, sizeof(open_arg
));
441 open_arg
.name
= whandle
->handle
;
442 if (drmIoctl(qdws
->fd
, DRM_IOCTL_GEM_OPEN
, &open_arg
)) {
447 res
->bo_handle
= open_arg
.handle
;
451 memset(&info_arg
, 0, sizeof(info_arg
));
452 info_arg
.bo_handle
= res
->bo_handle
;
454 if (drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_RESOURCE_INFO
, &info_arg
)) {
461 res
->res_handle
= info_arg
.res_handle
;
463 res
->size
= info_arg
.size
;
464 res
->stride
= info_arg
.stride
;
465 pipe_reference_init(&res
->reference
, 1);
466 res
->num_cs_references
= 0;
468 util_hash_table_set(qdws
->bo_handles
, (void *)(uintptr_t)handle
, res
);
471 mtx_unlock(&qdws
->bo_handles_mutex
);
475 static boolean
virgl_drm_winsys_resource_get_handle(struct virgl_winsys
*qws
,
476 struct virgl_hw_res
*res
,
478 struct winsys_handle
*whandle
)
480 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
481 struct drm_gem_flink flink
;
486 if (whandle
->type
== WINSYS_HANDLE_TYPE_SHARED
) {
488 memset(&flink
, 0, sizeof(flink
));
489 flink
.handle
= res
->bo_handle
;
491 if (drmIoctl(qdws
->fd
, DRM_IOCTL_GEM_FLINK
, &flink
)) {
495 res
->flink
= flink
.name
;
497 mtx_lock(&qdws
->bo_handles_mutex
);
498 util_hash_table_set(qdws
->bo_names
, (void *)(uintptr_t)res
->flink
, res
);
499 mtx_unlock(&qdws
->bo_handles_mutex
);
501 whandle
->handle
= res
->flink
;
502 } else if (whandle
->type
== WINSYS_HANDLE_TYPE_KMS
) {
503 whandle
->handle
= res
->bo_handle
;
504 } else if (whandle
->type
== WINSYS_HANDLE_TYPE_FD
) {
505 if (drmPrimeHandleToFD(qdws
->fd
, res
->bo_handle
, DRM_CLOEXEC
, (int*)&whandle
->handle
))
507 mtx_lock(&qdws
->bo_handles_mutex
);
508 util_hash_table_set(qdws
->bo_handles
, (void *)(uintptr_t)res
->bo_handle
, res
);
509 mtx_unlock(&qdws
->bo_handles_mutex
);
511 whandle
->stride
= stride
;
515 static void virgl_drm_winsys_resource_unref(struct virgl_winsys
*qws
,
516 struct virgl_hw_res
*hres
)
518 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
520 virgl_drm_resource_reference(qdws
, &hres
, NULL
);
523 static void *virgl_drm_resource_map(struct virgl_winsys
*qws
,
524 struct virgl_hw_res
*res
)
526 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
527 struct drm_virtgpu_map mmap_arg
;
533 memset(&mmap_arg
, 0, sizeof(mmap_arg
));
534 mmap_arg
.handle
= res
->bo_handle
;
535 if (drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_MAP
, &mmap_arg
))
538 ptr
= os_mmap(0, res
->size
, PROT_READ
|PROT_WRITE
, MAP_SHARED
,
539 qdws
->fd
, mmap_arg
.offset
);
540 if (ptr
== MAP_FAILED
)
548 static void virgl_drm_resource_wait(struct virgl_winsys
*qws
,
549 struct virgl_hw_res
*res
)
551 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
552 struct drm_virtgpu_3d_wait waitcmd
;
555 memset(&waitcmd
, 0, sizeof(waitcmd
));
556 waitcmd
.handle
= res
->bo_handle
;
558 ret
= drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_WAIT
, &waitcmd
);
563 static boolean
virgl_drm_lookup_res(struct virgl_drm_cmd_buf
*cbuf
,
564 struct virgl_hw_res
*res
)
566 unsigned hash
= res
->res_handle
& (sizeof(cbuf
->is_handle_added
)-1);
569 if (cbuf
->is_handle_added
[hash
]) {
570 i
= cbuf
->reloc_indices_hashlist
[hash
];
571 if (cbuf
->res_bo
[i
] == res
)
574 for (i
= 0; i
< cbuf
->cres
; i
++) {
575 if (cbuf
->res_bo
[i
] == res
) {
576 cbuf
->reloc_indices_hashlist
[hash
] = i
;
584 static void virgl_drm_add_res(struct virgl_drm_winsys
*qdws
,
585 struct virgl_drm_cmd_buf
*cbuf
,
586 struct virgl_hw_res
*res
)
588 unsigned hash
= res
->res_handle
& (sizeof(cbuf
->is_handle_added
)-1);
590 if (cbuf
->cres
>= cbuf
->nres
) {
591 unsigned new_nres
= cbuf
->nres
+ 256;
592 void *new_ptr
= REALLOC(cbuf
->res_bo
,
593 cbuf
->nres
* sizeof(struct virgl_hw_buf
*),
594 new_nres
* sizeof(struct virgl_hw_buf
*));
596 fprintf(stderr
,"failure to add relocation %d, %d\n", cbuf
->cres
, new_nres
);
599 cbuf
->res_bo
= new_ptr
;
601 new_ptr
= REALLOC(cbuf
->res_hlist
,
602 cbuf
->nres
* sizeof(uint32_t),
603 new_nres
* sizeof(uint32_t));
605 fprintf(stderr
,"failure to add hlist relocation %d, %d\n", cbuf
->cres
, cbuf
->nres
);
608 cbuf
->res_hlist
= new_ptr
;
609 cbuf
->nres
= new_nres
;
612 cbuf
->res_bo
[cbuf
->cres
] = NULL
;
613 virgl_drm_resource_reference(qdws
, &cbuf
->res_bo
[cbuf
->cres
], res
);
614 cbuf
->res_hlist
[cbuf
->cres
] = res
->bo_handle
;
615 cbuf
->is_handle_added
[hash
] = TRUE
;
617 cbuf
->reloc_indices_hashlist
[hash
] = cbuf
->cres
;
618 p_atomic_inc(&res
->num_cs_references
);
622 static void virgl_drm_release_all_res(struct virgl_drm_winsys
*qdws
,
623 struct virgl_drm_cmd_buf
*cbuf
)
627 for (i
= 0; i
< cbuf
->cres
; i
++) {
628 p_atomic_dec(&cbuf
->res_bo
[i
]->num_cs_references
);
629 virgl_drm_resource_reference(qdws
, &cbuf
->res_bo
[i
], NULL
);
634 static void virgl_drm_emit_res(struct virgl_winsys
*qws
,
635 struct virgl_cmd_buf
*_cbuf
,
636 struct virgl_hw_res
*res
, boolean write_buf
)
638 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
639 struct virgl_drm_cmd_buf
*cbuf
= virgl_drm_cmd_buf(_cbuf
);
640 boolean already_in_list
= virgl_drm_lookup_res(cbuf
, res
);
643 cbuf
->base
.buf
[cbuf
->base
.cdw
++] = res
->res_handle
;
645 if (!already_in_list
)
646 virgl_drm_add_res(qdws
, cbuf
, res
);
649 static boolean
virgl_drm_res_is_ref(struct virgl_winsys
*qws
,
650 struct virgl_cmd_buf
*_cbuf
,
651 struct virgl_hw_res
*res
)
653 if (!p_atomic_read(&res
->num_cs_references
))
659 static struct virgl_cmd_buf
*virgl_drm_cmd_buf_create(struct virgl_winsys
*qws
,
662 struct virgl_drm_cmd_buf
*cbuf
;
664 cbuf
= CALLOC_STRUCT(virgl_drm_cmd_buf
);
671 cbuf
->res_bo
= CALLOC(cbuf
->nres
, sizeof(struct virgl_hw_buf
*));
676 cbuf
->res_hlist
= MALLOC(cbuf
->nres
* sizeof(uint32_t));
677 if (!cbuf
->res_hlist
) {
683 cbuf
->buf
= CALLOC(size
, sizeof(uint32_t));
685 FREE(cbuf
->res_hlist
);
691 cbuf
->in_fence_fd
= -1;
692 cbuf
->base
.buf
= cbuf
->buf
;
696 static void virgl_drm_cmd_buf_destroy(struct virgl_cmd_buf
*_cbuf
)
698 struct virgl_drm_cmd_buf
*cbuf
= virgl_drm_cmd_buf(_cbuf
);
700 virgl_drm_release_all_res(virgl_drm_winsys(cbuf
->ws
), cbuf
);
701 FREE(cbuf
->res_hlist
);
708 static struct pipe_fence_handle
*
709 virgl_drm_fence_create(struct virgl_winsys
*vws
, int fd
, bool external
)
711 struct virgl_drm_fence
*fence
;
713 assert(vws
->supports_fences
);
721 fence
= CALLOC_STRUCT(virgl_drm_fence
);
728 fence
->external
= external
;
730 pipe_reference_init(&fence
->reference
, 1);
732 return (struct pipe_fence_handle
*)fence
;
735 static struct pipe_fence_handle
*
736 virgl_drm_fence_create_legacy(struct virgl_winsys
*vws
)
738 struct virgl_drm_fence
*fence
;
740 assert(!vws
->supports_fences
);
742 fence
= CALLOC_STRUCT(virgl_drm_fence
);
747 fence
->hw_res
= virgl_drm_winsys_resource_cache_create(vws
, PIPE_BUFFER
,
748 PIPE_FORMAT_R8_UNORM
, VIRGL_BIND_CUSTOM
, 8, 1, 1, 0, 0, 0, 8);
749 if (!fence
->hw_res
) {
754 pipe_reference_init(&fence
->reference
, 1);
756 return (struct pipe_fence_handle
*)fence
;
759 static int virgl_drm_winsys_submit_cmd(struct virgl_winsys
*qws
,
760 struct virgl_cmd_buf
*_cbuf
,
761 struct pipe_fence_handle
**fence
)
763 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
764 struct virgl_drm_cmd_buf
*cbuf
= virgl_drm_cmd_buf(_cbuf
);
765 struct drm_virtgpu_execbuffer eb
;
768 if (cbuf
->base
.cdw
== 0)
771 memset(&eb
, 0, sizeof(struct drm_virtgpu_execbuffer
));
772 eb
.command
= (unsigned long)(void*)cbuf
->buf
;
773 eb
.size
= cbuf
->base
.cdw
* 4;
774 eb
.num_bo_handles
= cbuf
->cres
;
775 eb
.bo_handles
= (unsigned long)(void *)cbuf
->res_hlist
;
778 if (qws
->supports_fences
) {
779 if (cbuf
->in_fence_fd
>= 0) {
780 eb
.flags
|= VIRTGPU_EXECBUF_FENCE_FD_IN
;
781 eb
.fence_fd
= cbuf
->in_fence_fd
;
785 eb
.flags
|= VIRTGPU_EXECBUF_FENCE_FD_OUT
;
787 assert(cbuf
->in_fence_fd
< 0);
790 ret
= drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_EXECBUFFER
, &eb
);
792 fprintf(stderr
,"got error from kernel - expect bad rendering %d\n", errno
);
795 if (qws
->supports_fences
) {
796 if (cbuf
->in_fence_fd
>= 0) {
797 close(cbuf
->in_fence_fd
);
798 cbuf
->in_fence_fd
= -1;
801 if (fence
!= NULL
&& ret
== 0)
802 *fence
= virgl_drm_fence_create(qws
, eb
.fence_fd
, false);
804 if (fence
!= NULL
&& ret
== 0)
805 *fence
= virgl_drm_fence_create_legacy(qws
);
808 virgl_drm_release_all_res(qdws
, cbuf
);
810 memset(cbuf
->is_handle_added
, 0, sizeof(cbuf
->is_handle_added
));
814 static int virgl_drm_get_caps(struct virgl_winsys
*vws
,
815 struct virgl_drm_caps
*caps
)
817 struct virgl_drm_winsys
*vdws
= virgl_drm_winsys(vws
);
818 struct drm_virtgpu_get_caps args
;
821 virgl_ws_fill_new_caps_defaults(caps
);
823 memset(&args
, 0, sizeof(args
));
824 if (vdws
->has_capset_query_fix
) {
825 /* if we have the query fix - try and get cap set id 2 first */
827 args
.size
= sizeof(union virgl_caps
);
830 args
.size
= sizeof(struct virgl_caps_v1
);
832 args
.addr
= (unsigned long)&caps
->caps
;
834 ret
= drmIoctl(vdws
->fd
, DRM_IOCTL_VIRTGPU_GET_CAPS
, &args
);
835 if (ret
== -1 && errno
== EINVAL
) {
838 args
.size
= sizeof(struct virgl_caps_v1
);
839 ret
= drmIoctl(vdws
->fd
, DRM_IOCTL_VIRTGPU_GET_CAPS
, &args
);
846 #define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
848 static unsigned handle_hash(void *key
)
850 return PTR_TO_UINT(key
);
853 static int handle_compare(void *key1
, void *key2
)
855 return PTR_TO_UINT(key1
) != PTR_TO_UINT(key2
);
858 static struct pipe_fence_handle
*
859 virgl_cs_create_fence(struct virgl_winsys
*vws
, int fd
)
861 if (!vws
->supports_fences
)
864 return virgl_drm_fence_create(vws
, fd
, true);
867 static bool virgl_fence_wait(struct virgl_winsys
*vws
,
868 struct pipe_fence_handle
*_fence
,
871 struct virgl_drm_winsys
*vdws
= virgl_drm_winsys(vws
);
872 struct virgl_drm_fence
*fence
= virgl_drm_fence(_fence
);
874 if (vws
->supports_fences
) {
879 return sync_wait(fence
->fd
, 0) == 0;
881 timeout_ms
= timeout
/ 1000000;
883 if (timeout_ms
* 1000000 < timeout
)
886 timeout_poll
= timeout_ms
<= INT_MAX
? (int) timeout_ms
: -1;
888 return sync_wait(fence
->fd
, timeout_poll
) == 0;
892 return !virgl_drm_resource_is_busy(vdws
, fence
->hw_res
);
894 if (timeout
!= PIPE_TIMEOUT_INFINITE
) {
895 int64_t start_time
= os_time_get();
897 while (virgl_drm_resource_is_busy(vdws
, fence
->hw_res
)) {
898 if (os_time_get() - start_time
>= timeout
)
904 virgl_drm_resource_wait(vws
, fence
->hw_res
);
909 static void virgl_fence_reference(struct virgl_winsys
*vws
,
910 struct pipe_fence_handle
**dst
,
911 struct pipe_fence_handle
*src
)
913 struct virgl_drm_fence
*dfence
= virgl_drm_fence(*dst
);
914 struct virgl_drm_fence
*sfence
= virgl_drm_fence(src
);
916 if (pipe_reference(&dfence
->reference
, &sfence
->reference
)) {
917 if (vws
->supports_fences
) {
920 struct virgl_drm_winsys
*vdws
= virgl_drm_winsys(vws
);
921 virgl_hw_res_destroy(vdws
, dfence
->hw_res
);
929 static void virgl_fence_server_sync(struct virgl_winsys
*vws
,
930 struct virgl_cmd_buf
*_cbuf
,
931 struct pipe_fence_handle
*_fence
)
933 struct virgl_drm_cmd_buf
*cbuf
= virgl_drm_cmd_buf(_cbuf
);
934 struct virgl_drm_fence
*fence
= virgl_drm_fence(_fence
);
936 if (!vws
->supports_fences
)
939 /* if not an external fence, then nothing more to do without preemption: */
940 if (!fence
->external
)
943 sync_accumulate("virgl", &cbuf
->in_fence_fd
, fence
->fd
);
946 static int virgl_fence_get_fd(struct virgl_winsys
*vws
,
947 struct pipe_fence_handle
*_fence
)
949 struct virgl_drm_fence
*fence
= virgl_drm_fence(_fence
);
951 if (!vws
->supports_fences
)
954 return dup(fence
->fd
);
957 static int virgl_drm_get_version(int fd
)
960 drmVersionPtr version
;
962 version
= drmGetVersion(fd
);
966 else if (version
->version_major
!= 0)
969 ret
= VIRGL_DRM_VERSION(0, version
->version_minor
);
971 drmFreeVersion(version
);
976 static struct virgl_winsys
*
977 virgl_drm_winsys_create(int drmFD
)
979 struct virgl_drm_winsys
*qdws
;
983 struct drm_virtgpu_getparam getparam
= {0};
985 getparam
.param
= VIRTGPU_PARAM_3D_FEATURES
;
986 getparam
.value
= (uint64_t)(uintptr_t)&gl
;
987 ret
= drmIoctl(drmFD
, DRM_IOCTL_VIRTGPU_GETPARAM
, &getparam
);
991 drm_version
= virgl_drm_get_version(drmFD
);
995 qdws
= CALLOC_STRUCT(virgl_drm_winsys
);
1000 qdws
->num_delayed
= 0;
1001 qdws
->usecs
= 1000000;
1002 LIST_INITHEAD(&qdws
->delayed
);
1003 (void) mtx_init(&qdws
->mutex
, mtx_plain
);
1004 (void) mtx_init(&qdws
->bo_handles_mutex
, mtx_plain
);
1005 qdws
->bo_handles
= util_hash_table_create(handle_hash
, handle_compare
);
1006 qdws
->bo_names
= util_hash_table_create(handle_hash
, handle_compare
);
1007 qdws
->base
.destroy
= virgl_drm_winsys_destroy
;
1009 qdws
->base
.transfer_put
= virgl_bo_transfer_put
;
1010 qdws
->base
.transfer_get
= virgl_bo_transfer_get
;
1011 qdws
->base
.resource_create
= virgl_drm_winsys_resource_cache_create
;
1012 qdws
->base
.resource_unref
= virgl_drm_winsys_resource_unref
;
1013 qdws
->base
.resource_create_from_handle
= virgl_drm_winsys_resource_create_handle
;
1014 qdws
->base
.resource_get_handle
= virgl_drm_winsys_resource_get_handle
;
1015 qdws
->base
.resource_map
= virgl_drm_resource_map
;
1016 qdws
->base
.resource_wait
= virgl_drm_resource_wait
;
1017 qdws
->base
.cmd_buf_create
= virgl_drm_cmd_buf_create
;
1018 qdws
->base
.cmd_buf_destroy
= virgl_drm_cmd_buf_destroy
;
1019 qdws
->base
.submit_cmd
= virgl_drm_winsys_submit_cmd
;
1020 qdws
->base
.emit_res
= virgl_drm_emit_res
;
1021 qdws
->base
.res_is_referenced
= virgl_drm_res_is_ref
;
1023 qdws
->base
.cs_create_fence
= virgl_cs_create_fence
;
1024 qdws
->base
.fence_wait
= virgl_fence_wait
;
1025 qdws
->base
.fence_reference
= virgl_fence_reference
;
1026 qdws
->base
.fence_server_sync
= virgl_fence_server_sync
;
1027 qdws
->base
.fence_get_fd
= virgl_fence_get_fd
;
1028 qdws
->base
.supports_fences
= drm_version
>= VIRGL_DRM_VERSION_FENCE_FD
;
1029 qdws
->base
.supports_encoded_transfers
= 1;
1031 qdws
->base
.get_caps
= virgl_drm_get_caps
;
1034 getparam
.param
= VIRTGPU_PARAM_CAPSET_QUERY_FIX
;
1035 getparam
.value
= (uint64_t)(uintptr_t)&value
;
1036 ret
= drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_GETPARAM
, &getparam
);
1039 qdws
->has_capset_query_fix
= true;
1046 static struct util_hash_table
*fd_tab
= NULL
;
1047 static mtx_t virgl_screen_mutex
= _MTX_INITIALIZER_NP
;
1050 virgl_drm_screen_destroy(struct pipe_screen
*pscreen
)
1052 struct virgl_screen
*screen
= virgl_screen(pscreen
);
1055 mtx_lock(&virgl_screen_mutex
);
1056 destroy
= --screen
->refcnt
== 0;
1058 int fd
= virgl_drm_winsys(screen
->vws
)->fd
;
1059 util_hash_table_remove(fd_tab
, intptr_to_pointer(fd
));
1062 mtx_unlock(&virgl_screen_mutex
);
1065 pscreen
->destroy
= screen
->winsys_priv
;
1066 pscreen
->destroy(pscreen
);
1070 static unsigned hash_fd(void *key
)
1072 int fd
= pointer_to_intptr(key
);
1076 return stat
.st_dev
^ stat
.st_ino
^ stat
.st_rdev
;
1079 static int compare_fd(void *key1
, void *key2
)
1081 int fd1
= pointer_to_intptr(key1
);
1082 int fd2
= pointer_to_intptr(key2
);
1083 struct stat stat1
, stat2
;
1087 return stat1
.st_dev
!= stat2
.st_dev
||
1088 stat1
.st_ino
!= stat2
.st_ino
||
1089 stat1
.st_rdev
!= stat2
.st_rdev
;
1092 struct pipe_screen
*
1093 virgl_drm_screen_create(int fd
)
1095 struct pipe_screen
*pscreen
= NULL
;
1097 mtx_lock(&virgl_screen_mutex
);
1099 fd_tab
= util_hash_table_create(hash_fd
, compare_fd
);
1104 pscreen
= util_hash_table_get(fd_tab
, intptr_to_pointer(fd
));
1106 virgl_screen(pscreen
)->refcnt
++;
1108 struct virgl_winsys
*vws
;
1109 int dup_fd
= fcntl(fd
, F_DUPFD_CLOEXEC
, 3);
1111 vws
= virgl_drm_winsys_create(dup_fd
);
1117 pscreen
= virgl_create_screen(vws
);
1119 util_hash_table_set(fd_tab
, intptr_to_pointer(dup_fd
), pscreen
);
1121 /* Bit of a hack, to avoid circular linkage dependency,
1122 * ie. pipe driver having to call in to winsys, we
1123 * override the pipe drivers screen->destroy():
1125 virgl_screen(pscreen
)->winsys_priv
= pscreen
->destroy
;
1126 pscreen
->destroy
= virgl_drm_screen_destroy
;
1131 mtx_unlock(&virgl_screen_mutex
);