2 * Copyright 2014, 2015 Red Hat.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 /* TODO - remove this */
24 #define _FILE_OFFSET_BITS 64
26 #include "virgl_drm_winsys.h"
27 #include "virgl_drm_public.h"
28 #include "util/u_memory.h"
29 #include "util/u_format.h"
30 #include "state_tracker/drm_driver.h"
32 #include "os/os_mman.h"
33 #include "os/os_time.h"
34 #include <sys/ioctl.h>
39 #include "virtgpu_drm.h"
41 static inline boolean
can_cache_resource(struct virgl_hw_res
*res
)
43 return res
->cacheable
== TRUE
;
46 static void virgl_hw_res_destroy(struct virgl_drm_winsys
*qdws
,
47 struct virgl_hw_res
*res
)
49 struct drm_gem_close args
;
52 pipe_mutex_lock(qdws
->bo_handles_mutex
);
53 util_hash_table_remove(qdws
->bo_handles
,
54 (void *)(uintptr_t)res
->name
);
55 pipe_mutex_unlock(qdws
->bo_handles_mutex
);
59 os_munmap(res
->ptr
, res
->size
);
61 args
.handle
= res
->bo_handle
;
62 drmIoctl(qdws
->fd
, DRM_IOCTL_GEM_CLOSE
, &args
);
66 static boolean
virgl_drm_resource_is_busy(struct virgl_drm_winsys
*qdws
, struct virgl_hw_res
*res
)
68 struct drm_virtgpu_3d_wait waitcmd
;
71 waitcmd
.handle
= res
->bo_handle
;
72 waitcmd
.flags
= VIRTGPU_WAIT_NOWAIT
;
74 ret
= drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_WAIT
, &waitcmd
);
75 if (ret
&& errno
== EBUSY
)
81 virgl_cache_flush(struct virgl_drm_winsys
*qdws
)
83 struct list_head
*curr
, *next
;
84 struct virgl_hw_res
*res
;
86 pipe_mutex_lock(qdws
->mutex
);
87 curr
= qdws
->delayed
.next
;
90 while (curr
!= &qdws
->delayed
) {
91 res
= LIST_ENTRY(struct virgl_hw_res
, curr
, head
);
93 virgl_hw_res_destroy(qdws
, res
);
97 pipe_mutex_unlock(qdws
->mutex
);
100 virgl_drm_winsys_destroy(struct virgl_winsys
*qws
)
102 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
104 virgl_cache_flush(qdws
);
106 util_hash_table_destroy(qdws
->bo_handles
);
107 pipe_mutex_destroy(qdws
->bo_handles_mutex
);
108 pipe_mutex_destroy(qdws
->mutex
);
114 virgl_cache_list_check_free(struct virgl_drm_winsys
*qdws
)
116 struct list_head
*curr
, *next
;
117 struct virgl_hw_res
*res
;
121 curr
= qdws
->delayed
.next
;
123 while (curr
!= &qdws
->delayed
) {
124 res
= LIST_ENTRY(struct virgl_hw_res
, curr
, head
);
125 if (!os_time_timeout(res
->start
, res
->end
, now
))
128 LIST_DEL(&res
->head
);
129 virgl_hw_res_destroy(qdws
, res
);
135 static void virgl_drm_resource_reference(struct virgl_drm_winsys
*qdws
,
136 struct virgl_hw_res
**dres
,
137 struct virgl_hw_res
*sres
)
139 struct virgl_hw_res
*old
= *dres
;
140 if (pipe_reference(&(*dres
)->reference
, &sres
->reference
)) {
142 if (!can_cache_resource(old
)) {
143 virgl_hw_res_destroy(qdws
, old
);
145 pipe_mutex_lock(qdws
->mutex
);
146 virgl_cache_list_check_free(qdws
);
148 old
->start
= os_time_get();
149 old
->end
= old
->start
+ qdws
->usecs
;
150 LIST_ADDTAIL(&old
->head
, &qdws
->delayed
);
152 pipe_mutex_unlock(qdws
->mutex
);
158 static struct virgl_hw_res
*virgl_drm_winsys_resource_create(struct virgl_winsys
*qws
,
159 enum pipe_texture_target target
,
170 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
171 struct drm_virtgpu_resource_create createcmd
;
173 struct virgl_hw_res
*res
;
174 uint32_t stride
= width
* util_format_get_blocksize(format
);
176 res
= CALLOC_STRUCT(virgl_hw_res
);
180 createcmd
.target
= target
;
181 createcmd
.format
= format
;
182 createcmd
.bind
= bind
;
183 createcmd
.width
= width
;
184 createcmd
.height
= height
;
185 createcmd
.depth
= depth
;
186 createcmd
.array_size
= array_size
;
187 createcmd
.last_level
= last_level
;
188 createcmd
.nr_samples
= nr_samples
;
189 createcmd
.res_handle
= 0;
190 createcmd
.stride
= stride
;
191 createcmd
.size
= size
;
194 ret
= drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE
, &createcmd
);
201 res
->format
= format
;
203 res
->res_handle
= createcmd
.res_handle
;
204 res
->bo_handle
= createcmd
.bo_handle
;
206 res
->stride
= stride
;
207 pipe_reference_init(&res
->reference
, 1);
208 res
->num_cs_references
= 0;
212 static inline int virgl_is_res_compat(struct virgl_drm_winsys
*qdws
,
213 struct virgl_hw_res
*res
,
214 uint32_t size
, uint32_t bind
, uint32_t format
)
216 if (res
->bind
!= bind
)
218 if (res
->format
!= format
)
220 if (res
->size
< size
)
222 if (res
->size
> size
* 2)
225 if (virgl_drm_resource_is_busy(qdws
, res
)) {
233 virgl_bo_transfer_put(struct virgl_winsys
*vws
,
234 struct virgl_hw_res
*res
,
235 const struct pipe_box
*box
,
236 uint32_t stride
, uint32_t layer_stride
,
237 uint32_t buf_offset
, uint32_t level
)
239 struct virgl_drm_winsys
*vdws
= virgl_drm_winsys(vws
);
240 struct drm_virtgpu_3d_transfer_to_host tohostcmd
;
243 tohostcmd
.bo_handle
= res
->bo_handle
;
244 tohostcmd
.box
= *(struct drm_virtgpu_3d_box
*)box
;
245 tohostcmd
.offset
= buf_offset
;
246 tohostcmd
.level
= level
;
247 // tohostcmd.stride = stride;
248 // tohostcmd.layer_stride = stride;
249 ret
= drmIoctl(vdws
->fd
, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST
, &tohostcmd
);
254 virgl_bo_transfer_get(struct virgl_winsys
*vws
,
255 struct virgl_hw_res
*res
,
256 const struct pipe_box
*box
,
257 uint32_t stride
, uint32_t layer_stride
,
258 uint32_t buf_offset
, uint32_t level
)
260 struct virgl_drm_winsys
*vdws
= virgl_drm_winsys(vws
);
261 struct drm_virtgpu_3d_transfer_from_host fromhostcmd
;
264 fromhostcmd
.bo_handle
= res
->bo_handle
;
265 fromhostcmd
.level
= level
;
266 fromhostcmd
.offset
= buf_offset
;
267 // fromhostcmd.stride = stride;
268 // fromhostcmd.layer_stride = layer_stride;
269 fromhostcmd
.box
= *(struct drm_virtgpu_3d_box
*)box
;
270 ret
= drmIoctl(vdws
->fd
, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST
, &fromhostcmd
);
274 static struct virgl_hw_res
*virgl_drm_winsys_resource_cache_create(struct virgl_winsys
*qws
,
275 enum pipe_texture_target target
,
286 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
287 struct virgl_hw_res
*res
, *curr_res
;
288 struct list_head
*curr
, *next
;
292 /* only store binds for vertex/index/const buffers */
293 if (bind
!= VIRGL_BIND_CONSTANT_BUFFER
&& bind
!= VIRGL_BIND_INDEX_BUFFER
&&
294 bind
!= VIRGL_BIND_VERTEX_BUFFER
&& bind
!= VIRGL_BIND_CUSTOM
)
297 pipe_mutex_lock(qdws
->mutex
);
300 curr
= qdws
->delayed
.next
;
304 while (curr
!= &qdws
->delayed
) {
305 curr_res
= LIST_ENTRY(struct virgl_hw_res
, curr
, head
);
307 if (!res
&& (ret
= virgl_is_res_compat(qdws
, curr_res
, size
, bind
, format
) > 0))
309 else if (os_time_timeout(curr_res
->start
, curr_res
->end
, now
)) {
310 LIST_DEL(&curr_res
->head
);
311 virgl_hw_res_destroy(qdws
, curr_res
);
322 if (!res
&& ret
!= -1) {
323 while (curr
!= &qdws
->delayed
) {
324 curr_res
= LIST_ENTRY(struct virgl_hw_res
, curr
, head
);
325 ret
= virgl_is_res_compat(qdws
, curr_res
, size
, bind
, format
);
338 LIST_DEL(&res
->head
);
340 pipe_mutex_unlock(qdws
->mutex
);
341 pipe_reference_init(&res
->reference
, 1);
345 pipe_mutex_unlock(qdws
->mutex
);
348 res
= virgl_drm_winsys_resource_create(qws
, target
, format
, bind
,
349 width
, height
, depth
, array_size
,
350 last_level
, nr_samples
, size
);
351 if (bind
== VIRGL_BIND_CONSTANT_BUFFER
|| bind
== VIRGL_BIND_INDEX_BUFFER
||
352 bind
== VIRGL_BIND_VERTEX_BUFFER
)
353 res
->cacheable
= TRUE
;
357 static struct virgl_hw_res
*virgl_drm_winsys_resource_create_handle(struct virgl_winsys
*qws
,
358 struct winsys_handle
*whandle
)
360 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
361 struct drm_gem_open open_arg
= {};
362 struct drm_virtgpu_resource_info info_arg
= {};
363 struct virgl_hw_res
*res
;
365 pipe_mutex_lock(qdws
->bo_handles_mutex
);
367 if (whandle
->type
== DRM_API_HANDLE_TYPE_SHARED
) {
368 res
= util_hash_table_get(qdws
->bo_handles
, (void*)(uintptr_t)whandle
->handle
);
370 struct virgl_hw_res
*r
= NULL
;
371 virgl_drm_resource_reference(qdws
, &r
, res
);
376 res
= CALLOC_STRUCT(virgl_hw_res
);
380 if (whandle
->type
== DRM_API_HANDLE_TYPE_FD
) {
383 r
= drmPrimeFDToHandle(qdws
->fd
, whandle
->handle
, &handle
);
389 res
->bo_handle
= handle
;
391 memset(&open_arg
, 0, sizeof(open_arg
));
392 open_arg
.name
= whandle
->handle
;
393 if (drmIoctl(qdws
->fd
, DRM_IOCTL_GEM_OPEN
, &open_arg
)) {
398 res
->bo_handle
= open_arg
.handle
;
400 res
->name
= whandle
->handle
;
402 memset(&info_arg
, 0, sizeof(info_arg
));
403 info_arg
.bo_handle
= res
->bo_handle
;
405 if (drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_RESOURCE_INFO
, &info_arg
)) {
412 res
->res_handle
= info_arg
.res_handle
;
414 res
->size
= info_arg
.size
;
415 res
->stride
= info_arg
.stride
;
416 pipe_reference_init(&res
->reference
, 1);
417 res
->num_cs_references
= 0;
419 util_hash_table_set(qdws
->bo_handles
, (void *)(uintptr_t)whandle
->handle
, res
);
422 pipe_mutex_unlock(qdws
->bo_handles_mutex
);
426 static boolean
virgl_drm_winsys_resource_get_handle(struct virgl_winsys
*qws
,
427 struct virgl_hw_res
*res
,
429 struct winsys_handle
*whandle
)
431 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
432 struct drm_gem_flink flink
;
436 memset(&flink
, 0, sizeof(flink
));
438 if (whandle
->type
== DRM_API_HANDLE_TYPE_SHARED
) {
440 flink
.handle
= res
->bo_handle
;
442 if (drmIoctl(qdws
->fd
, DRM_IOCTL_GEM_FLINK
, &flink
)) {
446 res
->flink
= flink
.name
;
448 pipe_mutex_lock(qdws
->bo_handles_mutex
);
449 util_hash_table_set(qdws
->bo_handles
, (void *)(uintptr_t)res
->flink
, res
);
450 pipe_mutex_unlock(qdws
->bo_handles_mutex
);
452 whandle
->handle
= res
->flink
;
453 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_KMS
) {
454 whandle
->handle
= res
->bo_handle
;
455 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_FD
) {
456 if (drmPrimeHandleToFD(qdws
->fd
, res
->bo_handle
, DRM_CLOEXEC
, (int*)&whandle
->handle
))
459 whandle
->stride
= stride
;
463 static void virgl_drm_winsys_resource_unref(struct virgl_winsys
*qws
,
464 struct virgl_hw_res
*hres
)
466 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
468 virgl_drm_resource_reference(qdws
, &hres
, NULL
);
471 static void *virgl_drm_resource_map(struct virgl_winsys
*qws
, struct virgl_hw_res
*res
)
473 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
474 struct drm_virtgpu_map mmap_arg
;
480 mmap_arg
.handle
= res
->bo_handle
;
481 if (drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_MAP
, &mmap_arg
))
484 ptr
= os_mmap(0, res
->size
, PROT_READ
|PROT_WRITE
, MAP_SHARED
,
485 qdws
->fd
, mmap_arg
.offset
);
486 if (ptr
== MAP_FAILED
)
494 static void virgl_drm_resource_wait(struct virgl_winsys
*qws
, struct virgl_hw_res
*res
)
496 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
497 struct drm_virtgpu_3d_wait waitcmd
;
500 waitcmd
.handle
= res
->bo_handle
;
503 ret
= drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_WAIT
, &waitcmd
);
508 static struct virgl_cmd_buf
*virgl_drm_cmd_buf_create(struct virgl_winsys
*qws
)
510 struct virgl_drm_cmd_buf
*cbuf
;
512 cbuf
= CALLOC_STRUCT(virgl_drm_cmd_buf
);
519 cbuf
->res_bo
= (struct virgl_hw_res
**)
520 CALLOC(cbuf
->nres
, sizeof(struct virgl_hw_buf
*));
525 cbuf
->res_hlist
= (uint32_t *)malloc(cbuf
->nres
* sizeof(uint32_t));
526 if (!cbuf
->res_hlist
) {
532 cbuf
->base
.buf
= cbuf
->buf
;
536 static void virgl_drm_cmd_buf_destroy(struct virgl_cmd_buf
*_cbuf
)
538 struct virgl_drm_cmd_buf
*cbuf
= (struct virgl_drm_cmd_buf
*)_cbuf
;
540 FREE(cbuf
->res_hlist
);
546 static boolean
virgl_drm_lookup_res(struct virgl_drm_cmd_buf
*cbuf
,
547 struct virgl_hw_res
*res
)
549 unsigned hash
= res
->res_handle
& (sizeof(cbuf
->is_handle_added
)-1);
552 if (cbuf
->is_handle_added
[hash
]) {
553 i
= cbuf
->reloc_indices_hashlist
[hash
];
554 if (cbuf
->res_bo
[i
] == res
)
557 for (i
= 0; i
< cbuf
->cres
; i
++) {
558 if (cbuf
->res_bo
[i
] == res
) {
559 cbuf
->reloc_indices_hashlist
[hash
] = i
;
567 static void virgl_drm_add_res(struct virgl_drm_winsys
*qdws
,
568 struct virgl_drm_cmd_buf
*cbuf
, struct virgl_hw_res
*res
)
570 unsigned hash
= res
->res_handle
& (sizeof(cbuf
->is_handle_added
)-1);
572 if (cbuf
->cres
> cbuf
->nres
) {
573 fprintf(stderr
,"failure to add relocation\n");
577 cbuf
->res_bo
[cbuf
->cres
] = NULL
;
578 virgl_drm_resource_reference(qdws
, &cbuf
->res_bo
[cbuf
->cres
], res
);
579 cbuf
->res_hlist
[cbuf
->cres
] = res
->bo_handle
;
580 cbuf
->is_handle_added
[hash
] = TRUE
;
582 cbuf
->reloc_indices_hashlist
[hash
] = cbuf
->cres
;
583 p_atomic_inc(&res
->num_cs_references
);
587 static void virgl_drm_release_all_res(struct virgl_drm_winsys
*qdws
,
588 struct virgl_drm_cmd_buf
*cbuf
)
592 for (i
= 0; i
< cbuf
->cres
; i
++) {
593 p_atomic_dec(&cbuf
->res_bo
[i
]->num_cs_references
);
594 virgl_drm_resource_reference(qdws
, &cbuf
->res_bo
[i
], NULL
);
599 static void virgl_drm_emit_res(struct virgl_winsys
*qws
,
600 struct virgl_cmd_buf
*_cbuf
, struct virgl_hw_res
*res
, boolean write_buf
)
602 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
603 struct virgl_drm_cmd_buf
*cbuf
= (struct virgl_drm_cmd_buf
*)_cbuf
;
604 boolean already_in_list
= virgl_drm_lookup_res(cbuf
, res
);
607 cbuf
->base
.buf
[cbuf
->base
.cdw
++] = res
->res_handle
;
609 if (!already_in_list
)
610 virgl_drm_add_res(qdws
, cbuf
, res
);
613 static boolean
virgl_drm_res_is_ref(struct virgl_winsys
*qws
,
614 struct virgl_cmd_buf
*_cbuf
,
615 struct virgl_hw_res
*res
)
617 if (!res
->num_cs_references
)
623 static int virgl_drm_winsys_submit_cmd(struct virgl_winsys
*qws
, struct virgl_cmd_buf
*_cbuf
)
625 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
626 struct virgl_drm_cmd_buf
*cbuf
= (struct virgl_drm_cmd_buf
*)_cbuf
;
627 struct drm_virtgpu_execbuffer eb
;
630 if (cbuf
->base
.cdw
== 0)
633 memset(&eb
, 0, sizeof(struct drm_virtgpu_execbuffer
));
634 eb
.command
= (unsigned long)(void*)cbuf
->buf
;
635 eb
.size
= cbuf
->base
.cdw
* 4;
636 eb
.num_bo_handles
= cbuf
->cres
;
637 eb
.bo_handles
= (unsigned long)(void *)cbuf
->res_hlist
;
639 ret
= drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_EXECBUFFER
, &eb
);
641 fprintf(stderr
,"got error from kernel - expect bad rendering %d\n", errno
);
644 virgl_drm_release_all_res(qdws
, cbuf
);
646 memset(cbuf
->is_handle_added
, 0, sizeof(cbuf
->is_handle_added
));
650 static int virgl_drm_get_caps(struct virgl_winsys
*vws
, struct virgl_drm_caps
*caps
)
652 struct virgl_drm_winsys
*vdws
= virgl_drm_winsys(vws
);
653 struct drm_virtgpu_get_caps args
;
656 memset(&args
, 0, sizeof(args
));
659 args
.addr
= (unsigned long)&caps
->caps
;
660 args
.size
= sizeof(union virgl_caps
);
661 ret
= drmIoctl(vdws
->fd
, DRM_IOCTL_VIRTGPU_GET_CAPS
, &args
);
665 #define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
667 static unsigned handle_hash(void *key
)
669 return PTR_TO_UINT(key
);
672 static int handle_compare(void *key1
, void *key2
)
674 return PTR_TO_UINT(key1
) != PTR_TO_UINT(key2
);
677 static struct pipe_fence_handle
*
678 virgl_cs_create_fence(struct virgl_winsys
*vws
)
680 struct virgl_hw_res
*res
;
682 res
= virgl_drm_winsys_resource_cache_create(vws
,
684 PIPE_FORMAT_R8_UNORM
,
686 8, 1, 1, 0, 0, 0, 8);
688 return (struct pipe_fence_handle
*)res
;
691 static bool virgl_fence_wait(struct virgl_winsys
*vws
,
692 struct pipe_fence_handle
*fence
,
695 struct virgl_drm_winsys
*vdws
= virgl_drm_winsys(vws
);
696 struct virgl_hw_res
*res
= (struct virgl_hw_res
*)fence
;
699 return virgl_drm_resource_is_busy(vdws
, res
);
701 if (timeout
!= PIPE_TIMEOUT_INFINITE
) {
702 int64_t start_time
= os_time_get();
704 while (virgl_drm_resource_is_busy(vdws
, res
)) {
705 if (os_time_get() - start_time
>= timeout
)
711 virgl_drm_resource_wait(vws
, res
);
715 static void virgl_fence_reference(struct virgl_winsys
*vws
,
716 struct pipe_fence_handle
**dst
,
717 struct pipe_fence_handle
*src
)
719 struct virgl_drm_winsys
*vdws
= virgl_drm_winsys(vws
);
720 virgl_drm_resource_reference(vdws
, (struct virgl_hw_res
**)dst
,
721 (struct virgl_hw_res
*)src
);
725 struct virgl_winsys
*
726 virgl_drm_winsys_create(int drmFD
)
728 struct virgl_drm_winsys
*qdws
;
730 qdws
= CALLOC_STRUCT(virgl_drm_winsys
);
735 qdws
->num_delayed
= 0;
736 qdws
->usecs
= 1000000;
737 LIST_INITHEAD(&qdws
->delayed
);
738 pipe_mutex_init(qdws
->mutex
);
739 pipe_mutex_init(qdws
->bo_handles_mutex
);
740 qdws
->bo_handles
= util_hash_table_create(handle_hash
, handle_compare
);
741 qdws
->base
.destroy
= virgl_drm_winsys_destroy
;
743 qdws
->base
.transfer_put
= virgl_bo_transfer_put
;
744 qdws
->base
.transfer_get
= virgl_bo_transfer_get
;
745 qdws
->base
.resource_create
= virgl_drm_winsys_resource_cache_create
;
746 qdws
->base
.resource_unref
= virgl_drm_winsys_resource_unref
;
747 qdws
->base
.resource_create_from_handle
= virgl_drm_winsys_resource_create_handle
;
748 qdws
->base
.resource_get_handle
= virgl_drm_winsys_resource_get_handle
;
749 qdws
->base
.resource_map
= virgl_drm_resource_map
;
750 qdws
->base
.resource_wait
= virgl_drm_resource_wait
;
751 qdws
->base
.cmd_buf_create
= virgl_drm_cmd_buf_create
;
752 qdws
->base
.cmd_buf_destroy
= virgl_drm_cmd_buf_destroy
;
753 qdws
->base
.submit_cmd
= virgl_drm_winsys_submit_cmd
;
754 qdws
->base
.emit_res
= virgl_drm_emit_res
;
755 qdws
->base
.res_is_referenced
= virgl_drm_res_is_ref
;
757 qdws
->base
.cs_create_fence
= virgl_cs_create_fence
;
758 qdws
->base
.fence_wait
= virgl_fence_wait
;
759 qdws
->base
.fence_reference
= virgl_fence_reference
;
761 qdws
->base
.get_caps
= virgl_drm_get_caps
;