2 * Copyright 2014, 2015 Red Hat.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 #include <sys/ioctl.h>
31 #include "os/os_mman.h"
32 #include "util/os_time.h"
33 #include "util/u_memory.h"
34 #include "util/u_format.h"
35 #include "util/u_hash_table.h"
36 #include "util/u_inlines.h"
37 #include "state_tracker/drm_driver.h"
38 #include "virgl/virgl_screen.h"
39 #include "virgl/virgl_public.h"
43 #include "virtgpu_drm.h"
45 #include "virgl_drm_winsys.h"
46 #include "virgl_drm_public.h"
49 #define VIRGL_DRM_VERSION(major, minor) ((major) << 16 | (minor))
50 #define VIRGL_DRM_VERSION_FENCE_FD VIRGL_DRM_VERSION(0, 1)
52 /* Gets a pointer to the virgl_hw_res containing the pointed to cache entry. */
53 #define cache_entry_container_res(ptr) \
54 (struct virgl_hw_res*)((char*)ptr - offsetof(struct virgl_hw_res, cache_entry))
56 static inline boolean
can_cache_resource_with_bind(uint32_t bind
)
58 return bind
== VIRGL_BIND_CONSTANT_BUFFER
||
59 bind
== VIRGL_BIND_INDEX_BUFFER
||
60 bind
== VIRGL_BIND_VERTEX_BUFFER
||
61 bind
== VIRGL_BIND_CUSTOM
||
62 bind
== VIRGL_BIND_STAGING
;
65 static void virgl_hw_res_destroy(struct virgl_drm_winsys
*qdws
,
66 struct virgl_hw_res
*res
)
68 struct drm_gem_close args
;
70 mtx_lock(&qdws
->bo_handles_mutex
);
71 util_hash_table_remove(qdws
->bo_handles
,
72 (void *)(uintptr_t)res
->bo_handle
);
74 util_hash_table_remove(qdws
->bo_names
,
75 (void *)(uintptr_t)res
->flink_name
);
76 mtx_unlock(&qdws
->bo_handles_mutex
);
78 os_munmap(res
->ptr
, res
->size
);
80 memset(&args
, 0, sizeof(args
));
81 args
.handle
= res
->bo_handle
;
82 drmIoctl(qdws
->fd
, DRM_IOCTL_GEM_CLOSE
, &args
);
86 static boolean
virgl_drm_resource_is_busy(struct virgl_winsys
*vws
,
87 struct virgl_hw_res
*res
)
89 struct virgl_drm_winsys
*vdws
= virgl_drm_winsys(vws
);
90 struct drm_virtgpu_3d_wait waitcmd
;
93 if (!p_atomic_read(&res
->maybe_busy
) && !p_atomic_read(&res
->external
))
96 memset(&waitcmd
, 0, sizeof(waitcmd
));
97 waitcmd
.handle
= res
->bo_handle
;
98 waitcmd
.flags
= VIRTGPU_WAIT_NOWAIT
;
100 ret
= drmIoctl(vdws
->fd
, DRM_IOCTL_VIRTGPU_WAIT
, &waitcmd
);
101 if (ret
&& errno
== EBUSY
)
104 p_atomic_set(&res
->maybe_busy
, false);
110 virgl_drm_winsys_destroy(struct virgl_winsys
*qws
)
112 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
114 virgl_resource_cache_flush(&qdws
->cache
);
116 util_hash_table_destroy(qdws
->bo_handles
);
117 util_hash_table_destroy(qdws
->bo_names
);
118 mtx_destroy(&qdws
->bo_handles_mutex
);
119 mtx_destroy(&qdws
->mutex
);
124 static void virgl_drm_resource_reference(struct virgl_winsys
*qws
,
125 struct virgl_hw_res
**dres
,
126 struct virgl_hw_res
*sres
)
128 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
129 struct virgl_hw_res
*old
= *dres
;
131 if (pipe_reference(&(*dres
)->reference
, &sres
->reference
)) {
133 if (!can_cache_resource_with_bind(old
->bind
) ||
134 p_atomic_read(&old
->external
)) {
135 virgl_hw_res_destroy(qdws
, old
);
137 mtx_lock(&qdws
->mutex
);
138 virgl_resource_cache_add(&qdws
->cache
, &old
->cache_entry
);
139 mtx_unlock(&qdws
->mutex
);
145 static struct virgl_hw_res
*
146 virgl_drm_winsys_resource_create(struct virgl_winsys
*qws
,
147 enum pipe_texture_target target
,
159 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
160 struct drm_virtgpu_resource_create createcmd
;
162 struct virgl_hw_res
*res
;
163 uint32_t stride
= width
* util_format_get_blocksize(format
);
165 res
= CALLOC_STRUCT(virgl_hw_res
);
169 memset(&createcmd
, 0, sizeof(createcmd
));
170 createcmd
.target
= target
;
171 createcmd
.format
= format
;
172 createcmd
.bind
= bind
;
173 createcmd
.width
= width
;
174 createcmd
.height
= height
;
175 createcmd
.depth
= depth
;
176 createcmd
.array_size
= array_size
;
177 createcmd
.last_level
= last_level
;
178 createcmd
.nr_samples
= nr_samples
;
179 createcmd
.stride
= stride
;
180 createcmd
.size
= size
;
182 ret
= drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE
, &createcmd
);
190 res
->res_handle
= createcmd
.res_handle
;
191 res
->bo_handle
= createcmd
.bo_handle
;
193 res
->stride
= stride
;
194 pipe_reference_init(&res
->reference
, 1);
195 p_atomic_set(&res
->external
, false);
196 p_atomic_set(&res
->num_cs_references
, 0);
198 /* A newly created resource is considered busy by the kernel until the
199 * command is retired. But for our purposes, we can consider it idle
200 * unless it is used for fencing.
202 p_atomic_set(&res
->maybe_busy
, for_fencing
);
204 virgl_resource_cache_entry_init(&res
->cache_entry
, size
, bind
, format
);
210 virgl_bo_transfer_put(struct virgl_winsys
*vws
,
211 struct virgl_hw_res
*res
,
212 const struct pipe_box
*box
,
213 uint32_t stride
, uint32_t layer_stride
,
214 uint32_t buf_offset
, uint32_t level
)
216 struct virgl_drm_winsys
*vdws
= virgl_drm_winsys(vws
);
217 struct drm_virtgpu_3d_transfer_to_host tohostcmd
;
219 p_atomic_set(&res
->maybe_busy
, true);
221 memset(&tohostcmd
, 0, sizeof(tohostcmd
));
222 tohostcmd
.bo_handle
= res
->bo_handle
;
223 tohostcmd
.box
.x
= box
->x
;
224 tohostcmd
.box
.y
= box
->y
;
225 tohostcmd
.box
.z
= box
->z
;
226 tohostcmd
.box
.w
= box
->width
;
227 tohostcmd
.box
.h
= box
->height
;
228 tohostcmd
.box
.d
= box
->depth
;
229 tohostcmd
.offset
= buf_offset
;
230 tohostcmd
.level
= level
;
231 // tohostcmd.stride = stride;
232 // tohostcmd.layer_stride = stride;
233 return drmIoctl(vdws
->fd
, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST
, &tohostcmd
);
237 virgl_bo_transfer_get(struct virgl_winsys
*vws
,
238 struct virgl_hw_res
*res
,
239 const struct pipe_box
*box
,
240 uint32_t stride
, uint32_t layer_stride
,
241 uint32_t buf_offset
, uint32_t level
)
243 struct virgl_drm_winsys
*vdws
= virgl_drm_winsys(vws
);
244 struct drm_virtgpu_3d_transfer_from_host fromhostcmd
;
246 p_atomic_set(&res
->maybe_busy
, true);
248 memset(&fromhostcmd
, 0, sizeof(fromhostcmd
));
249 fromhostcmd
.bo_handle
= res
->bo_handle
;
250 fromhostcmd
.level
= level
;
251 fromhostcmd
.offset
= buf_offset
;
252 // fromhostcmd.stride = stride;
253 // fromhostcmd.layer_stride = layer_stride;
254 fromhostcmd
.box
.x
= box
->x
;
255 fromhostcmd
.box
.y
= box
->y
;
256 fromhostcmd
.box
.z
= box
->z
;
257 fromhostcmd
.box
.w
= box
->width
;
258 fromhostcmd
.box
.h
= box
->height
;
259 fromhostcmd
.box
.d
= box
->depth
;
260 return drmIoctl(vdws
->fd
, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST
, &fromhostcmd
);
263 static struct virgl_hw_res
*
264 virgl_drm_winsys_resource_cache_create(struct virgl_winsys
*qws
,
265 enum pipe_texture_target target
,
276 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
277 struct virgl_hw_res
*res
;
278 struct virgl_resource_cache_entry
*entry
;
280 if (!can_cache_resource_with_bind(bind
))
283 mtx_lock(&qdws
->mutex
);
285 entry
= virgl_resource_cache_remove_compatible(&qdws
->cache
, size
,
288 res
= cache_entry_container_res(entry
);
289 mtx_unlock(&qdws
->mutex
);
290 pipe_reference_init(&res
->reference
, 1);
294 mtx_unlock(&qdws
->mutex
);
297 res
= virgl_drm_winsys_resource_create(qws
, target
, format
, bind
,
298 width
, height
, depth
, array_size
,
299 last_level
, nr_samples
, size
, false);
303 static struct virgl_hw_res
*
304 virgl_drm_winsys_resource_create_handle(struct virgl_winsys
*qws
,
305 struct winsys_handle
*whandle
)
307 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
308 struct drm_gem_open open_arg
= {};
309 struct drm_virtgpu_resource_info info_arg
= {};
310 struct virgl_hw_res
*res
= NULL
;
311 uint32_t handle
= whandle
->handle
;
313 if (whandle
->offset
!= 0) {
314 fprintf(stderr
, "attempt to import unsupported winsys offset %u\n",
319 mtx_lock(&qdws
->bo_handles_mutex
);
321 /* We must maintain a list of pairs <handle, bo>, so that we always return
322 * the same BO for one particular handle. If we didn't do that and created
323 * more than one BO for the same handle and then relocated them in a CS,
324 * we would hit a deadlock in the kernel.
326 * The list of pairs is guarded by a mutex, of course. */
327 if (whandle
->type
== WINSYS_HANDLE_TYPE_SHARED
) {
328 res
= util_hash_table_get(qdws
->bo_names
, (void*)(uintptr_t)handle
);
329 } else if (whandle
->type
== WINSYS_HANDLE_TYPE_FD
) {
331 r
= drmPrimeFDToHandle(qdws
->fd
, whandle
->handle
, &handle
);
334 res
= util_hash_table_get(qdws
->bo_handles
, (void*)(uintptr_t)handle
);
336 /* Unknown handle type */
341 struct virgl_hw_res
*r
= NULL
;
342 virgl_drm_resource_reference(&qdws
->base
, &r
, res
);
346 res
= CALLOC_STRUCT(virgl_hw_res
);
350 if (whandle
->type
== WINSYS_HANDLE_TYPE_FD
) {
351 res
->bo_handle
= handle
;
353 memset(&open_arg
, 0, sizeof(open_arg
));
354 open_arg
.name
= whandle
->handle
;
355 if (drmIoctl(qdws
->fd
, DRM_IOCTL_GEM_OPEN
, &open_arg
)) {
360 res
->bo_handle
= open_arg
.handle
;
361 res
->flink_name
= whandle
->handle
;
364 memset(&info_arg
, 0, sizeof(info_arg
));
365 info_arg
.bo_handle
= res
->bo_handle
;
367 if (drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_RESOURCE_INFO
, &info_arg
)) {
374 res
->res_handle
= info_arg
.res_handle
;
376 res
->size
= info_arg
.size
;
377 res
->stride
= info_arg
.stride
;
378 pipe_reference_init(&res
->reference
, 1);
379 p_atomic_set(&res
->external
, true);
380 res
->num_cs_references
= 0;
383 util_hash_table_set(qdws
->bo_names
, (void *)(uintptr_t)res
->flink_name
, res
);
384 util_hash_table_set(qdws
->bo_handles
, (void *)(uintptr_t)res
->bo_handle
, res
);
387 mtx_unlock(&qdws
->bo_handles_mutex
);
391 static boolean
virgl_drm_winsys_resource_get_handle(struct virgl_winsys
*qws
,
392 struct virgl_hw_res
*res
,
394 struct winsys_handle
*whandle
)
396 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
397 struct drm_gem_flink flink
;
402 if (whandle
->type
== WINSYS_HANDLE_TYPE_SHARED
) {
403 if (!res
->flink_name
) {
404 memset(&flink
, 0, sizeof(flink
));
405 flink
.handle
= res
->bo_handle
;
407 if (drmIoctl(qdws
->fd
, DRM_IOCTL_GEM_FLINK
, &flink
)) {
410 res
->flink_name
= flink
.name
;
412 mtx_lock(&qdws
->bo_handles_mutex
);
413 util_hash_table_set(qdws
->bo_names
, (void *)(uintptr_t)res
->flink_name
, res
);
414 mtx_unlock(&qdws
->bo_handles_mutex
);
416 whandle
->handle
= res
->flink_name
;
417 } else if (whandle
->type
== WINSYS_HANDLE_TYPE_KMS
) {
418 whandle
->handle
= res
->bo_handle
;
419 } else if (whandle
->type
== WINSYS_HANDLE_TYPE_FD
) {
420 if (drmPrimeHandleToFD(qdws
->fd
, res
->bo_handle
, DRM_CLOEXEC
, (int*)&whandle
->handle
))
422 mtx_lock(&qdws
->bo_handles_mutex
);
423 util_hash_table_set(qdws
->bo_handles
, (void *)(uintptr_t)res
->bo_handle
, res
);
424 mtx_unlock(&qdws
->bo_handles_mutex
);
427 p_atomic_set(&res
->external
, true);
429 whandle
->stride
= stride
;
433 static void *virgl_drm_resource_map(struct virgl_winsys
*qws
,
434 struct virgl_hw_res
*res
)
436 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
437 struct drm_virtgpu_map mmap_arg
;
443 memset(&mmap_arg
, 0, sizeof(mmap_arg
));
444 mmap_arg
.handle
= res
->bo_handle
;
445 if (drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_MAP
, &mmap_arg
))
448 ptr
= os_mmap(0, res
->size
, PROT_READ
|PROT_WRITE
, MAP_SHARED
,
449 qdws
->fd
, mmap_arg
.offset
);
450 if (ptr
== MAP_FAILED
)
458 static void virgl_drm_resource_wait(struct virgl_winsys
*qws
,
459 struct virgl_hw_res
*res
)
461 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
462 struct drm_virtgpu_3d_wait waitcmd
;
465 if (!p_atomic_read(&res
->maybe_busy
) && !p_atomic_read(&res
->external
))
468 memset(&waitcmd
, 0, sizeof(waitcmd
));
469 waitcmd
.handle
= res
->bo_handle
;
471 ret
= drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_WAIT
, &waitcmd
);
475 p_atomic_set(&res
->maybe_busy
, false);
478 static bool virgl_drm_alloc_res_list(struct virgl_drm_cmd_buf
*cbuf
,
481 cbuf
->nres
= initial_size
;
484 cbuf
->res_bo
= CALLOC(cbuf
->nres
, sizeof(struct virgl_hw_buf
*));
488 cbuf
->res_hlist
= MALLOC(cbuf
->nres
* sizeof(uint32_t));
489 if (!cbuf
->res_hlist
) {
497 static void virgl_drm_free_res_list(struct virgl_drm_cmd_buf
*cbuf
)
501 for (i
= 0; i
< cbuf
->cres
; i
++) {
502 p_atomic_dec(&cbuf
->res_bo
[i
]->num_cs_references
);
503 virgl_drm_resource_reference(cbuf
->ws
, &cbuf
->res_bo
[i
], NULL
);
505 FREE(cbuf
->res_hlist
);
509 static boolean
virgl_drm_lookup_res(struct virgl_drm_cmd_buf
*cbuf
,
510 struct virgl_hw_res
*res
)
512 unsigned hash
= res
->res_handle
& (sizeof(cbuf
->is_handle_added
)-1);
515 if (cbuf
->is_handle_added
[hash
]) {
516 i
= cbuf
->reloc_indices_hashlist
[hash
];
517 if (cbuf
->res_bo
[i
] == res
)
520 for (i
= 0; i
< cbuf
->cres
; i
++) {
521 if (cbuf
->res_bo
[i
] == res
) {
522 cbuf
->reloc_indices_hashlist
[hash
] = i
;
530 static void virgl_drm_add_res(struct virgl_drm_winsys
*qdws
,
531 struct virgl_drm_cmd_buf
*cbuf
,
532 struct virgl_hw_res
*res
)
534 unsigned hash
= res
->res_handle
& (sizeof(cbuf
->is_handle_added
)-1);
536 if (cbuf
->cres
>= cbuf
->nres
) {
537 unsigned new_nres
= cbuf
->nres
+ 256;
538 void *new_ptr
= REALLOC(cbuf
->res_bo
,
539 cbuf
->nres
* sizeof(struct virgl_hw_buf
*),
540 new_nres
* sizeof(struct virgl_hw_buf
*));
542 fprintf(stderr
,"failure to add relocation %d, %d\n", cbuf
->cres
, new_nres
);
545 cbuf
->res_bo
= new_ptr
;
547 new_ptr
= REALLOC(cbuf
->res_hlist
,
548 cbuf
->nres
* sizeof(uint32_t),
549 new_nres
* sizeof(uint32_t));
551 fprintf(stderr
,"failure to add hlist relocation %d, %d\n", cbuf
->cres
, cbuf
->nres
);
554 cbuf
->res_hlist
= new_ptr
;
555 cbuf
->nres
= new_nres
;
558 cbuf
->res_bo
[cbuf
->cres
] = NULL
;
559 virgl_drm_resource_reference(&qdws
->base
, &cbuf
->res_bo
[cbuf
->cres
], res
);
560 cbuf
->res_hlist
[cbuf
->cres
] = res
->bo_handle
;
561 cbuf
->is_handle_added
[hash
] = TRUE
;
563 cbuf
->reloc_indices_hashlist
[hash
] = cbuf
->cres
;
564 p_atomic_inc(&res
->num_cs_references
);
568 /* This is called after the cbuf is submitted. */
569 static void virgl_drm_clear_res_list(struct virgl_drm_cmd_buf
*cbuf
)
573 for (i
= 0; i
< cbuf
->cres
; i
++) {
574 /* mark all BOs busy after submission */
575 p_atomic_set(&cbuf
->res_bo
[i
]->maybe_busy
, true);
577 p_atomic_dec(&cbuf
->res_bo
[i
]->num_cs_references
);
578 virgl_drm_resource_reference(cbuf
->ws
, &cbuf
->res_bo
[i
], NULL
);
583 memset(cbuf
->is_handle_added
, 0, sizeof(cbuf
->is_handle_added
));
586 static void virgl_drm_emit_res(struct virgl_winsys
*qws
,
587 struct virgl_cmd_buf
*_cbuf
,
588 struct virgl_hw_res
*res
, boolean write_buf
)
590 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
591 struct virgl_drm_cmd_buf
*cbuf
= virgl_drm_cmd_buf(_cbuf
);
592 boolean already_in_list
= virgl_drm_lookup_res(cbuf
, res
);
595 cbuf
->base
.buf
[cbuf
->base
.cdw
++] = res
->res_handle
;
597 if (!already_in_list
)
598 virgl_drm_add_res(qdws
, cbuf
, res
);
601 static boolean
virgl_drm_res_is_ref(struct virgl_winsys
*qws
,
602 struct virgl_cmd_buf
*_cbuf
,
603 struct virgl_hw_res
*res
)
605 if (!p_atomic_read(&res
->num_cs_references
))
611 static struct virgl_cmd_buf
*virgl_drm_cmd_buf_create(struct virgl_winsys
*qws
,
614 struct virgl_drm_cmd_buf
*cbuf
;
616 cbuf
= CALLOC_STRUCT(virgl_drm_cmd_buf
);
622 if (!virgl_drm_alloc_res_list(cbuf
, 512)) {
627 cbuf
->buf
= CALLOC(size
, sizeof(uint32_t));
629 FREE(cbuf
->res_hlist
);
635 cbuf
->in_fence_fd
= -1;
636 cbuf
->base
.buf
= cbuf
->buf
;
640 static void virgl_drm_cmd_buf_destroy(struct virgl_cmd_buf
*_cbuf
)
642 struct virgl_drm_cmd_buf
*cbuf
= virgl_drm_cmd_buf(_cbuf
);
644 virgl_drm_free_res_list(cbuf
);
650 static struct pipe_fence_handle
*
651 virgl_drm_fence_create(struct virgl_winsys
*vws
, int fd
, bool external
)
653 struct virgl_drm_fence
*fence
;
655 assert(vws
->supports_fences
);
663 fence
= CALLOC_STRUCT(virgl_drm_fence
);
670 fence
->external
= external
;
672 pipe_reference_init(&fence
->reference
, 1);
674 return (struct pipe_fence_handle
*)fence
;
677 static struct pipe_fence_handle
*
678 virgl_drm_fence_create_legacy(struct virgl_winsys
*vws
)
680 struct virgl_drm_fence
*fence
;
682 assert(!vws
->supports_fences
);
684 fence
= CALLOC_STRUCT(virgl_drm_fence
);
689 /* Resources for fences should not be from the cache, since we are basing
690 * the fence status on the resource creation busy status.
692 fence
->hw_res
= virgl_drm_winsys_resource_create(vws
, PIPE_BUFFER
,
693 PIPE_FORMAT_R8_UNORM
, VIRGL_BIND_CUSTOM
, 8, 1, 1, 0, 0, 0, 8, true);
694 if (!fence
->hw_res
) {
699 pipe_reference_init(&fence
->reference
, 1);
701 return (struct pipe_fence_handle
*)fence
;
704 static int virgl_drm_winsys_submit_cmd(struct virgl_winsys
*qws
,
705 struct virgl_cmd_buf
*_cbuf
,
706 struct pipe_fence_handle
**fence
)
708 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
709 struct virgl_drm_cmd_buf
*cbuf
= virgl_drm_cmd_buf(_cbuf
);
710 struct drm_virtgpu_execbuffer eb
;
713 if (cbuf
->base
.cdw
== 0)
716 memset(&eb
, 0, sizeof(struct drm_virtgpu_execbuffer
));
717 eb
.command
= (unsigned long)(void*)cbuf
->buf
;
718 eb
.size
= cbuf
->base
.cdw
* 4;
719 eb
.num_bo_handles
= cbuf
->cres
;
720 eb
.bo_handles
= (unsigned long)(void *)cbuf
->res_hlist
;
723 if (qws
->supports_fences
) {
724 if (cbuf
->in_fence_fd
>= 0) {
725 eb
.flags
|= VIRTGPU_EXECBUF_FENCE_FD_IN
;
726 eb
.fence_fd
= cbuf
->in_fence_fd
;
730 eb
.flags
|= VIRTGPU_EXECBUF_FENCE_FD_OUT
;
732 assert(cbuf
->in_fence_fd
< 0);
735 ret
= drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_EXECBUFFER
, &eb
);
737 fprintf(stderr
,"got error from kernel - expect bad rendering %d\n", errno
);
740 if (qws
->supports_fences
) {
741 if (cbuf
->in_fence_fd
>= 0) {
742 close(cbuf
->in_fence_fd
);
743 cbuf
->in_fence_fd
= -1;
746 if (fence
!= NULL
&& ret
== 0)
747 *fence
= virgl_drm_fence_create(qws
, eb
.fence_fd
, false);
749 if (fence
!= NULL
&& ret
== 0)
750 *fence
= virgl_drm_fence_create_legacy(qws
);
753 virgl_drm_clear_res_list(cbuf
);
758 static int virgl_drm_get_caps(struct virgl_winsys
*vws
,
759 struct virgl_drm_caps
*caps
)
761 struct virgl_drm_winsys
*vdws
= virgl_drm_winsys(vws
);
762 struct drm_virtgpu_get_caps args
;
765 virgl_ws_fill_new_caps_defaults(caps
);
767 memset(&args
, 0, sizeof(args
));
768 if (vdws
->has_capset_query_fix
) {
769 /* if we have the query fix - try and get cap set id 2 first */
771 args
.size
= sizeof(union virgl_caps
);
774 args
.size
= sizeof(struct virgl_caps_v1
);
776 args
.addr
= (unsigned long)&caps
->caps
;
778 ret
= drmIoctl(vdws
->fd
, DRM_IOCTL_VIRTGPU_GET_CAPS
, &args
);
779 if (ret
== -1 && errno
== EINVAL
) {
782 args
.size
= sizeof(struct virgl_caps_v1
);
783 ret
= drmIoctl(vdws
->fd
, DRM_IOCTL_VIRTGPU_GET_CAPS
, &args
);
790 #define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
792 static unsigned handle_hash(void *key
)
794 return PTR_TO_UINT(key
);
797 static int handle_compare(void *key1
, void *key2
)
799 return PTR_TO_UINT(key1
) != PTR_TO_UINT(key2
);
802 static struct pipe_fence_handle
*
803 virgl_cs_create_fence(struct virgl_winsys
*vws
, int fd
)
805 if (!vws
->supports_fences
)
808 return virgl_drm_fence_create(vws
, fd
, true);
811 static bool virgl_fence_wait(struct virgl_winsys
*vws
,
812 struct pipe_fence_handle
*_fence
,
815 struct virgl_drm_fence
*fence
= virgl_drm_fence(_fence
);
817 if (vws
->supports_fences
) {
822 return sync_wait(fence
->fd
, 0) == 0;
824 timeout_ms
= timeout
/ 1000000;
826 if (timeout_ms
* 1000000 < timeout
)
829 timeout_poll
= timeout_ms
<= INT_MAX
? (int) timeout_ms
: -1;
831 return sync_wait(fence
->fd
, timeout_poll
) == 0;
835 return !virgl_drm_resource_is_busy(vws
, fence
->hw_res
);
837 if (timeout
!= PIPE_TIMEOUT_INFINITE
) {
838 int64_t start_time
= os_time_get();
840 while (virgl_drm_resource_is_busy(vws
, fence
->hw_res
)) {
841 if (os_time_get() - start_time
>= timeout
)
847 virgl_drm_resource_wait(vws
, fence
->hw_res
);
852 static void virgl_fence_reference(struct virgl_winsys
*vws
,
853 struct pipe_fence_handle
**dst
,
854 struct pipe_fence_handle
*src
)
856 struct virgl_drm_fence
*dfence
= virgl_drm_fence(*dst
);
857 struct virgl_drm_fence
*sfence
= virgl_drm_fence(src
);
859 if (pipe_reference(&dfence
->reference
, &sfence
->reference
)) {
860 if (vws
->supports_fences
) {
863 struct virgl_drm_winsys
*vdws
= virgl_drm_winsys(vws
);
864 virgl_hw_res_destroy(vdws
, dfence
->hw_res
);
872 static void virgl_fence_server_sync(struct virgl_winsys
*vws
,
873 struct virgl_cmd_buf
*_cbuf
,
874 struct pipe_fence_handle
*_fence
)
876 struct virgl_drm_cmd_buf
*cbuf
= virgl_drm_cmd_buf(_cbuf
);
877 struct virgl_drm_fence
*fence
= virgl_drm_fence(_fence
);
879 if (!vws
->supports_fences
)
882 /* if not an external fence, then nothing more to do without preemption: */
883 if (!fence
->external
)
886 sync_accumulate("virgl", &cbuf
->in_fence_fd
, fence
->fd
);
889 static int virgl_fence_get_fd(struct virgl_winsys
*vws
,
890 struct pipe_fence_handle
*_fence
)
892 struct virgl_drm_fence
*fence
= virgl_drm_fence(_fence
);
894 if (!vws
->supports_fences
)
897 return dup(fence
->fd
);
900 static int virgl_drm_get_version(int fd
)
903 drmVersionPtr version
;
905 version
= drmGetVersion(fd
);
909 else if (version
->version_major
!= 0)
912 ret
= VIRGL_DRM_VERSION(0, version
->version_minor
);
914 drmFreeVersion(version
);
920 virgl_drm_resource_cache_entry_is_busy(struct virgl_resource_cache_entry
*entry
,
923 struct virgl_drm_winsys
*qdws
= user_data
;
924 struct virgl_hw_res
*res
= cache_entry_container_res(entry
);
926 return virgl_drm_resource_is_busy(&qdws
->base
, res
);
930 virgl_drm_resource_cache_entry_release(struct virgl_resource_cache_entry
*entry
,
933 struct virgl_drm_winsys
*qdws
= user_data
;
934 struct virgl_hw_res
*res
= cache_entry_container_res(entry
);
936 virgl_hw_res_destroy(qdws
, res
);
939 static struct virgl_winsys
*
940 virgl_drm_winsys_create(int drmFD
)
942 static const unsigned CACHE_TIMEOUT_USEC
= 1000000;
943 struct virgl_drm_winsys
*qdws
;
947 struct drm_virtgpu_getparam getparam
= {0};
949 getparam
.param
= VIRTGPU_PARAM_3D_FEATURES
;
950 getparam
.value
= (uint64_t)(uintptr_t)&gl
;
951 ret
= drmIoctl(drmFD
, DRM_IOCTL_VIRTGPU_GETPARAM
, &getparam
);
955 drm_version
= virgl_drm_get_version(drmFD
);
959 qdws
= CALLOC_STRUCT(virgl_drm_winsys
);
964 virgl_resource_cache_init(&qdws
->cache
, CACHE_TIMEOUT_USEC
,
965 virgl_drm_resource_cache_entry_is_busy
,
966 virgl_drm_resource_cache_entry_release
,
968 (void) mtx_init(&qdws
->mutex
, mtx_plain
);
969 (void) mtx_init(&qdws
->bo_handles_mutex
, mtx_plain
);
970 qdws
->bo_handles
= util_hash_table_create(handle_hash
, handle_compare
);
971 qdws
->bo_names
= util_hash_table_create(handle_hash
, handle_compare
);
972 qdws
->base
.destroy
= virgl_drm_winsys_destroy
;
974 qdws
->base
.transfer_put
= virgl_bo_transfer_put
;
975 qdws
->base
.transfer_get
= virgl_bo_transfer_get
;
976 qdws
->base
.resource_create
= virgl_drm_winsys_resource_cache_create
;
977 qdws
->base
.resource_reference
= virgl_drm_resource_reference
;
978 qdws
->base
.resource_create_from_handle
= virgl_drm_winsys_resource_create_handle
;
979 qdws
->base
.resource_get_handle
= virgl_drm_winsys_resource_get_handle
;
980 qdws
->base
.resource_map
= virgl_drm_resource_map
;
981 qdws
->base
.resource_wait
= virgl_drm_resource_wait
;
982 qdws
->base
.resource_is_busy
= virgl_drm_resource_is_busy
;
983 qdws
->base
.cmd_buf_create
= virgl_drm_cmd_buf_create
;
984 qdws
->base
.cmd_buf_destroy
= virgl_drm_cmd_buf_destroy
;
985 qdws
->base
.submit_cmd
= virgl_drm_winsys_submit_cmd
;
986 qdws
->base
.emit_res
= virgl_drm_emit_res
;
987 qdws
->base
.res_is_referenced
= virgl_drm_res_is_ref
;
989 qdws
->base
.cs_create_fence
= virgl_cs_create_fence
;
990 qdws
->base
.fence_wait
= virgl_fence_wait
;
991 qdws
->base
.fence_reference
= virgl_fence_reference
;
992 qdws
->base
.fence_server_sync
= virgl_fence_server_sync
;
993 qdws
->base
.fence_get_fd
= virgl_fence_get_fd
;
994 qdws
->base
.supports_fences
= drm_version
>= VIRGL_DRM_VERSION_FENCE_FD
;
995 qdws
->base
.supports_encoded_transfers
= 1;
997 qdws
->base
.get_caps
= virgl_drm_get_caps
;
1000 getparam
.param
= VIRTGPU_PARAM_CAPSET_QUERY_FIX
;
1001 getparam
.value
= (uint64_t)(uintptr_t)&value
;
1002 ret
= drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_GETPARAM
, &getparam
);
1005 qdws
->has_capset_query_fix
= true;
1012 static struct util_hash_table
*fd_tab
= NULL
;
1013 static mtx_t virgl_screen_mutex
= _MTX_INITIALIZER_NP
;
1016 virgl_drm_screen_destroy(struct pipe_screen
*pscreen
)
1018 struct virgl_screen
*screen
= virgl_screen(pscreen
);
1021 mtx_lock(&virgl_screen_mutex
);
1022 destroy
= --screen
->refcnt
== 0;
1024 int fd
= virgl_drm_winsys(screen
->vws
)->fd
;
1025 util_hash_table_remove(fd_tab
, intptr_to_pointer(fd
));
1028 mtx_unlock(&virgl_screen_mutex
);
1031 pscreen
->destroy
= screen
->winsys_priv
;
1032 pscreen
->destroy(pscreen
);
1036 static unsigned hash_fd(void *key
)
1038 int fd
= pointer_to_intptr(key
);
1042 return stat
.st_dev
^ stat
.st_ino
^ stat
.st_rdev
;
1045 static int compare_fd(void *key1
, void *key2
)
1047 int fd1
= pointer_to_intptr(key1
);
1048 int fd2
= pointer_to_intptr(key2
);
1049 struct stat stat1
, stat2
;
1053 return stat1
.st_dev
!= stat2
.st_dev
||
1054 stat1
.st_ino
!= stat2
.st_ino
||
1055 stat1
.st_rdev
!= stat2
.st_rdev
;
1058 struct pipe_screen
*
1059 virgl_drm_screen_create(int fd
, const struct pipe_screen_config
*config
)
1061 struct pipe_screen
*pscreen
= NULL
;
1063 mtx_lock(&virgl_screen_mutex
);
1065 fd_tab
= util_hash_table_create(hash_fd
, compare_fd
);
1070 pscreen
= util_hash_table_get(fd_tab
, intptr_to_pointer(fd
));
1072 virgl_screen(pscreen
)->refcnt
++;
1074 struct virgl_winsys
*vws
;
1075 int dup_fd
= fcntl(fd
, F_DUPFD_CLOEXEC
, 3);
1077 vws
= virgl_drm_winsys_create(dup_fd
);
1083 pscreen
= virgl_create_screen(vws
, config
);
1085 util_hash_table_set(fd_tab
, intptr_to_pointer(dup_fd
), pscreen
);
1087 /* Bit of a hack, to avoid circular linkage dependency,
1088 * ie. pipe driver having to call in to winsys, we
1089 * override the pipe drivers screen->destroy():
1091 virgl_screen(pscreen
)->winsys_priv
= pscreen
->destroy
;
1092 pscreen
->destroy
= virgl_drm_screen_destroy
;
1097 mtx_unlock(&virgl_screen_mutex
);