2 * Copyright 2014, 2015 Red Hat.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include <sys/ioctl.h>
30 #include "os/os_mman.h"
31 #include "util/os_time.h"
32 #include "util/u_memory.h"
33 #include "util/u_format.h"
34 #include "util/u_hash_table.h"
35 #include "util/u_inlines.h"
36 #include "state_tracker/drm_driver.h"
37 #include "virgl/virgl_screen.h"
38 #include "virgl/virgl_public.h"
41 #include "virtgpu_drm.h"
43 #include "virgl_drm_winsys.h"
44 #include "virgl_drm_public.h"
46 static inline boolean
can_cache_resource(struct virgl_hw_res
*res
)
48 return res
->cacheable
== TRUE
;
51 static void virgl_hw_res_destroy(struct virgl_drm_winsys
*qdws
,
52 struct virgl_hw_res
*res
)
54 struct drm_gem_close args
;
57 mtx_lock(&qdws
->bo_handles_mutex
);
58 util_hash_table_remove(qdws
->bo_names
,
59 (void *)(uintptr_t)res
->flink
);
60 mtx_unlock(&qdws
->bo_handles_mutex
);
64 mtx_lock(&qdws
->bo_handles_mutex
);
65 util_hash_table_remove(qdws
->bo_handles
,
66 (void *)(uintptr_t)res
->bo_handle
);
67 mtx_unlock(&qdws
->bo_handles_mutex
);
71 os_munmap(res
->ptr
, res
->size
);
73 memset(&args
, 0, sizeof(args
));
74 args
.handle
= res
->bo_handle
;
75 drmIoctl(qdws
->fd
, DRM_IOCTL_GEM_CLOSE
, &args
);
79 static boolean
virgl_drm_resource_is_busy(struct virgl_drm_winsys
*qdws
,
80 struct virgl_hw_res
*res
)
82 struct drm_virtgpu_3d_wait waitcmd
;
85 memset(&waitcmd
, 0, sizeof(waitcmd
));
86 waitcmd
.handle
= res
->bo_handle
;
87 waitcmd
.flags
= VIRTGPU_WAIT_NOWAIT
;
89 ret
= drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_WAIT
, &waitcmd
);
90 if (ret
&& errno
== EBUSY
)
96 virgl_cache_flush(struct virgl_drm_winsys
*qdws
)
98 struct list_head
*curr
, *next
;
99 struct virgl_hw_res
*res
;
101 mtx_lock(&qdws
->mutex
);
102 curr
= qdws
->delayed
.next
;
105 while (curr
!= &qdws
->delayed
) {
106 res
= LIST_ENTRY(struct virgl_hw_res
, curr
, head
);
107 LIST_DEL(&res
->head
);
108 virgl_hw_res_destroy(qdws
, res
);
112 mtx_unlock(&qdws
->mutex
);
115 virgl_drm_winsys_destroy(struct virgl_winsys
*qws
)
117 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
119 virgl_cache_flush(qdws
);
121 util_hash_table_destroy(qdws
->bo_handles
);
122 util_hash_table_destroy(qdws
->bo_names
);
123 mtx_destroy(&qdws
->bo_handles_mutex
);
124 mtx_destroy(&qdws
->mutex
);
130 virgl_cache_list_check_free(struct virgl_drm_winsys
*qdws
)
132 struct list_head
*curr
, *next
;
133 struct virgl_hw_res
*res
;
137 curr
= qdws
->delayed
.next
;
139 while (curr
!= &qdws
->delayed
) {
140 res
= LIST_ENTRY(struct virgl_hw_res
, curr
, head
);
141 if (!os_time_timeout(res
->start
, res
->end
, now
))
144 LIST_DEL(&res
->head
);
145 virgl_hw_res_destroy(qdws
, res
);
151 static void virgl_drm_resource_reference(struct virgl_drm_winsys
*qdws
,
152 struct virgl_hw_res
**dres
,
153 struct virgl_hw_res
*sres
)
155 struct virgl_hw_res
*old
= *dres
;
156 if (pipe_reference(&(*dres
)->reference
, &sres
->reference
)) {
158 if (!can_cache_resource(old
)) {
159 virgl_hw_res_destroy(qdws
, old
);
161 mtx_lock(&qdws
->mutex
);
162 virgl_cache_list_check_free(qdws
);
164 old
->start
= os_time_get();
165 old
->end
= old
->start
+ qdws
->usecs
;
166 LIST_ADDTAIL(&old
->head
, &qdws
->delayed
);
168 mtx_unlock(&qdws
->mutex
);
174 static struct virgl_hw_res
*
175 virgl_drm_winsys_resource_create(struct virgl_winsys
*qws
,
176 enum pipe_texture_target target
,
187 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
188 struct drm_virtgpu_resource_create createcmd
;
190 struct virgl_hw_res
*res
;
191 uint32_t stride
= width
* util_format_get_blocksize(format
);
193 res
= CALLOC_STRUCT(virgl_hw_res
);
197 memset(&createcmd
, 0, sizeof(createcmd
));
198 createcmd
.target
= target
;
199 createcmd
.format
= format
;
200 createcmd
.bind
= bind
;
201 createcmd
.width
= width
;
202 createcmd
.height
= height
;
203 createcmd
.depth
= depth
;
204 createcmd
.array_size
= array_size
;
205 createcmd
.last_level
= last_level
;
206 createcmd
.nr_samples
= nr_samples
;
207 createcmd
.stride
= stride
;
208 createcmd
.size
= size
;
210 ret
= drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE
, &createcmd
);
217 res
->format
= format
;
219 res
->res_handle
= createcmd
.res_handle
;
220 res
->bo_handle
= createcmd
.bo_handle
;
222 res
->stride
= stride
;
223 pipe_reference_init(&res
->reference
, 1);
224 res
->num_cs_references
= 0;
228 static inline int virgl_is_res_compat(struct virgl_drm_winsys
*qdws
,
229 struct virgl_hw_res
*res
,
230 uint32_t size
, uint32_t bind
,
233 if (res
->bind
!= bind
)
235 if (res
->format
!= format
)
237 if (res
->size
< size
)
239 if (res
->size
> size
* 2)
242 if (virgl_drm_resource_is_busy(qdws
, res
)) {
250 virgl_bo_transfer_put(struct virgl_winsys
*vws
,
251 struct virgl_hw_res
*res
,
252 const struct pipe_box
*box
,
253 uint32_t stride
, uint32_t layer_stride
,
254 uint32_t buf_offset
, uint32_t level
)
256 struct virgl_drm_winsys
*vdws
= virgl_drm_winsys(vws
);
257 struct drm_virtgpu_3d_transfer_to_host tohostcmd
;
259 memset(&tohostcmd
, 0, sizeof(tohostcmd
));
260 tohostcmd
.bo_handle
= res
->bo_handle
;
261 tohostcmd
.box
.x
= box
->x
;
262 tohostcmd
.box
.y
= box
->y
;
263 tohostcmd
.box
.z
= box
->z
;
264 tohostcmd
.box
.w
= box
->width
;
265 tohostcmd
.box
.h
= box
->height
;
266 tohostcmd
.box
.d
= box
->depth
;
267 tohostcmd
.offset
= buf_offset
;
268 tohostcmd
.level
= level
;
269 // tohostcmd.stride = stride;
270 // tohostcmd.layer_stride = stride;
271 return drmIoctl(vdws
->fd
, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST
, &tohostcmd
);
275 virgl_bo_transfer_get(struct virgl_winsys
*vws
,
276 struct virgl_hw_res
*res
,
277 const struct pipe_box
*box
,
278 uint32_t stride
, uint32_t layer_stride
,
279 uint32_t buf_offset
, uint32_t level
)
281 struct virgl_drm_winsys
*vdws
= virgl_drm_winsys(vws
);
282 struct drm_virtgpu_3d_transfer_from_host fromhostcmd
;
284 memset(&fromhostcmd
, 0, sizeof(fromhostcmd
));
285 fromhostcmd
.bo_handle
= res
->bo_handle
;
286 fromhostcmd
.level
= level
;
287 fromhostcmd
.offset
= buf_offset
;
288 // fromhostcmd.stride = stride;
289 // fromhostcmd.layer_stride = layer_stride;
290 fromhostcmd
.box
.x
= box
->x
;
291 fromhostcmd
.box
.y
= box
->y
;
292 fromhostcmd
.box
.z
= box
->z
;
293 fromhostcmd
.box
.w
= box
->width
;
294 fromhostcmd
.box
.h
= box
->height
;
295 fromhostcmd
.box
.d
= box
->depth
;
296 return drmIoctl(vdws
->fd
, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST
, &fromhostcmd
);
299 static struct virgl_hw_res
*
300 virgl_drm_winsys_resource_cache_create(struct virgl_winsys
*qws
,
301 enum pipe_texture_target target
,
312 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
313 struct virgl_hw_res
*res
, *curr_res
;
314 struct list_head
*curr
, *next
;
318 /* only store binds for vertex/index/const buffers */
319 if (bind
!= VIRGL_BIND_CONSTANT_BUFFER
&& bind
!= VIRGL_BIND_INDEX_BUFFER
&&
320 bind
!= VIRGL_BIND_VERTEX_BUFFER
&& bind
!= VIRGL_BIND_CUSTOM
)
323 mtx_lock(&qdws
->mutex
);
326 curr
= qdws
->delayed
.next
;
330 while (curr
!= &qdws
->delayed
) {
331 curr_res
= LIST_ENTRY(struct virgl_hw_res
, curr
, head
);
333 if (!res
&& ((ret
= virgl_is_res_compat(qdws
, curr_res
, size
, bind
, format
)) > 0))
335 else if (os_time_timeout(curr_res
->start
, curr_res
->end
, now
)) {
336 LIST_DEL(&curr_res
->head
);
337 virgl_hw_res_destroy(qdws
, curr_res
);
348 if (!res
&& ret
!= -1) {
349 while (curr
!= &qdws
->delayed
) {
350 curr_res
= LIST_ENTRY(struct virgl_hw_res
, curr
, head
);
351 ret
= virgl_is_res_compat(qdws
, curr_res
, size
, bind
, format
);
364 LIST_DEL(&res
->head
);
366 mtx_unlock(&qdws
->mutex
);
367 pipe_reference_init(&res
->reference
, 1);
371 mtx_unlock(&qdws
->mutex
);
374 res
= virgl_drm_winsys_resource_create(qws
, target
, format
, bind
,
375 width
, height
, depth
, array_size
,
376 last_level
, nr_samples
, size
);
377 if (bind
== VIRGL_BIND_CONSTANT_BUFFER
|| bind
== VIRGL_BIND_INDEX_BUFFER
||
378 bind
== VIRGL_BIND_VERTEX_BUFFER
)
379 res
->cacheable
= TRUE
;
383 static struct virgl_hw_res
*
384 virgl_drm_winsys_resource_create_handle(struct virgl_winsys
*qws
,
385 struct winsys_handle
*whandle
)
387 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
388 struct drm_gem_open open_arg
= {};
389 struct drm_virtgpu_resource_info info_arg
= {};
390 struct virgl_hw_res
*res
;
391 uint32_t handle
= whandle
->handle
;
393 if (whandle
->offset
!= 0) {
394 fprintf(stderr
, "attempt to import unsupported winsys offset %u\n",
399 mtx_lock(&qdws
->bo_handles_mutex
);
401 if (whandle
->type
== DRM_API_HANDLE_TYPE_SHARED
) {
402 res
= util_hash_table_get(qdws
->bo_names
, (void*)(uintptr_t)handle
);
404 struct virgl_hw_res
*r
= NULL
;
405 virgl_drm_resource_reference(qdws
, &r
, res
);
410 if (whandle
->type
== DRM_API_HANDLE_TYPE_FD
) {
412 r
= drmPrimeFDToHandle(qdws
->fd
, whandle
->handle
, &handle
);
419 res
= util_hash_table_get(qdws
->bo_handles
, (void*)(uintptr_t)handle
);
420 fprintf(stderr
, "resource %p for handle %d, pfd=%d\n", res
, handle
, whandle
->handle
);
422 struct virgl_hw_res
*r
= NULL
;
423 virgl_drm_resource_reference(qdws
, &r
, res
);
427 res
= CALLOC_STRUCT(virgl_hw_res
);
431 if (whandle
->type
== DRM_API_HANDLE_TYPE_FD
) {
432 res
->bo_handle
= handle
;
434 fprintf(stderr
, "gem open handle %d\n", handle
);
435 memset(&open_arg
, 0, sizeof(open_arg
));
436 open_arg
.name
= whandle
->handle
;
437 if (drmIoctl(qdws
->fd
, DRM_IOCTL_GEM_OPEN
, &open_arg
)) {
442 res
->bo_handle
= open_arg
.handle
;
446 memset(&info_arg
, 0, sizeof(info_arg
));
447 info_arg
.bo_handle
= res
->bo_handle
;
449 if (drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_RESOURCE_INFO
, &info_arg
)) {
456 res
->res_handle
= info_arg
.res_handle
;
458 res
->size
= info_arg
.size
;
459 res
->stride
= info_arg
.stride
;
460 pipe_reference_init(&res
->reference
, 1);
461 res
->num_cs_references
= 0;
463 util_hash_table_set(qdws
->bo_handles
, (void *)(uintptr_t)handle
, res
);
466 mtx_unlock(&qdws
->bo_handles_mutex
);
470 static boolean
virgl_drm_winsys_resource_get_handle(struct virgl_winsys
*qws
,
471 struct virgl_hw_res
*res
,
473 struct winsys_handle
*whandle
)
475 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
476 struct drm_gem_flink flink
;
481 if (whandle
->type
== DRM_API_HANDLE_TYPE_SHARED
) {
483 memset(&flink
, 0, sizeof(flink
));
484 flink
.handle
= res
->bo_handle
;
486 if (drmIoctl(qdws
->fd
, DRM_IOCTL_GEM_FLINK
, &flink
)) {
490 res
->flink
= flink
.name
;
492 mtx_lock(&qdws
->bo_handles_mutex
);
493 util_hash_table_set(qdws
->bo_names
, (void *)(uintptr_t)res
->flink
, res
);
494 mtx_unlock(&qdws
->bo_handles_mutex
);
496 whandle
->handle
= res
->flink
;
497 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_KMS
) {
498 whandle
->handle
= res
->bo_handle
;
499 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_FD
) {
500 if (drmPrimeHandleToFD(qdws
->fd
, res
->bo_handle
, DRM_CLOEXEC
, (int*)&whandle
->handle
))
502 mtx_lock(&qdws
->bo_handles_mutex
);
503 util_hash_table_set(qdws
->bo_handles
, (void *)(uintptr_t)res
->bo_handle
, res
);
504 mtx_unlock(&qdws
->bo_handles_mutex
);
506 whandle
->stride
= stride
;
510 static void virgl_drm_winsys_resource_unref(struct virgl_winsys
*qws
,
511 struct virgl_hw_res
*hres
)
513 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
515 virgl_drm_resource_reference(qdws
, &hres
, NULL
);
518 static void *virgl_drm_resource_map(struct virgl_winsys
*qws
,
519 struct virgl_hw_res
*res
)
521 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
522 struct drm_virtgpu_map mmap_arg
;
528 memset(&mmap_arg
, 0, sizeof(mmap_arg
));
529 mmap_arg
.handle
= res
->bo_handle
;
530 if (drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_MAP
, &mmap_arg
))
533 ptr
= os_mmap(0, res
->size
, PROT_READ
|PROT_WRITE
, MAP_SHARED
,
534 qdws
->fd
, mmap_arg
.offset
);
535 if (ptr
== MAP_FAILED
)
543 static void virgl_drm_resource_wait(struct virgl_winsys
*qws
,
544 struct virgl_hw_res
*res
)
546 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
547 struct drm_virtgpu_3d_wait waitcmd
;
550 memset(&waitcmd
, 0, sizeof(waitcmd
));
551 waitcmd
.handle
= res
->bo_handle
;
553 ret
= drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_WAIT
, &waitcmd
);
558 static struct virgl_cmd_buf
*virgl_drm_cmd_buf_create(struct virgl_winsys
*qws
)
560 struct virgl_drm_cmd_buf
*cbuf
;
562 cbuf
= CALLOC_STRUCT(virgl_drm_cmd_buf
);
569 cbuf
->res_bo
= CALLOC(cbuf
->nres
, sizeof(struct virgl_hw_buf
*));
574 cbuf
->res_hlist
= MALLOC(cbuf
->nres
* sizeof(uint32_t));
575 if (!cbuf
->res_hlist
) {
581 cbuf
->base
.buf
= cbuf
->buf
;
585 static void virgl_drm_cmd_buf_destroy(struct virgl_cmd_buf
*_cbuf
)
587 struct virgl_drm_cmd_buf
*cbuf
= virgl_drm_cmd_buf(_cbuf
);
589 FREE(cbuf
->res_hlist
);
595 static boolean
virgl_drm_lookup_res(struct virgl_drm_cmd_buf
*cbuf
,
596 struct virgl_hw_res
*res
)
598 unsigned hash
= res
->res_handle
& (sizeof(cbuf
->is_handle_added
)-1);
601 if (cbuf
->is_handle_added
[hash
]) {
602 i
= cbuf
->reloc_indices_hashlist
[hash
];
603 if (cbuf
->res_bo
[i
] == res
)
606 for (i
= 0; i
< cbuf
->cres
; i
++) {
607 if (cbuf
->res_bo
[i
] == res
) {
608 cbuf
->reloc_indices_hashlist
[hash
] = i
;
616 static void virgl_drm_add_res(struct virgl_drm_winsys
*qdws
,
617 struct virgl_drm_cmd_buf
*cbuf
,
618 struct virgl_hw_res
*res
)
620 unsigned hash
= res
->res_handle
& (sizeof(cbuf
->is_handle_added
)-1);
622 if (cbuf
->cres
> cbuf
->nres
) {
623 fprintf(stderr
,"failure to add relocation\n");
627 cbuf
->res_bo
[cbuf
->cres
] = NULL
;
628 virgl_drm_resource_reference(qdws
, &cbuf
->res_bo
[cbuf
->cres
], res
);
629 cbuf
->res_hlist
[cbuf
->cres
] = res
->bo_handle
;
630 cbuf
->is_handle_added
[hash
] = TRUE
;
632 cbuf
->reloc_indices_hashlist
[hash
] = cbuf
->cres
;
633 p_atomic_inc(&res
->num_cs_references
);
637 static void virgl_drm_release_all_res(struct virgl_drm_winsys
*qdws
,
638 struct virgl_drm_cmd_buf
*cbuf
)
642 for (i
= 0; i
< cbuf
->cres
; i
++) {
643 p_atomic_dec(&cbuf
->res_bo
[i
]->num_cs_references
);
644 virgl_drm_resource_reference(qdws
, &cbuf
->res_bo
[i
], NULL
);
649 static void virgl_drm_emit_res(struct virgl_winsys
*qws
,
650 struct virgl_cmd_buf
*_cbuf
,
651 struct virgl_hw_res
*res
, boolean write_buf
)
653 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
654 struct virgl_drm_cmd_buf
*cbuf
= virgl_drm_cmd_buf(_cbuf
);
655 boolean already_in_list
= virgl_drm_lookup_res(cbuf
, res
);
658 cbuf
->base
.buf
[cbuf
->base
.cdw
++] = res
->res_handle
;
660 if (!already_in_list
)
661 virgl_drm_add_res(qdws
, cbuf
, res
);
664 static boolean
virgl_drm_res_is_ref(struct virgl_winsys
*qws
,
665 struct virgl_cmd_buf
*_cbuf
,
666 struct virgl_hw_res
*res
)
668 if (!res
->num_cs_references
)
674 static int virgl_drm_winsys_submit_cmd(struct virgl_winsys
*qws
,
675 struct virgl_cmd_buf
*_cbuf
)
677 struct virgl_drm_winsys
*qdws
= virgl_drm_winsys(qws
);
678 struct virgl_drm_cmd_buf
*cbuf
= virgl_drm_cmd_buf(_cbuf
);
679 struct drm_virtgpu_execbuffer eb
;
682 if (cbuf
->base
.cdw
== 0)
685 memset(&eb
, 0, sizeof(struct drm_virtgpu_execbuffer
));
686 eb
.command
= (unsigned long)(void*)cbuf
->buf
;
687 eb
.size
= cbuf
->base
.cdw
* 4;
688 eb
.num_bo_handles
= cbuf
->cres
;
689 eb
.bo_handles
= (unsigned long)(void *)cbuf
->res_hlist
;
691 ret
= drmIoctl(qdws
->fd
, DRM_IOCTL_VIRTGPU_EXECBUFFER
, &eb
);
693 fprintf(stderr
,"got error from kernel - expect bad rendering %d\n", errno
);
696 virgl_drm_release_all_res(qdws
, cbuf
);
698 memset(cbuf
->is_handle_added
, 0, sizeof(cbuf
->is_handle_added
));
702 static int virgl_drm_get_caps(struct virgl_winsys
*vws
,
703 struct virgl_drm_caps
*caps
)
705 struct virgl_drm_winsys
*vdws
= virgl_drm_winsys(vws
);
706 struct drm_virtgpu_get_caps args
;
708 bool fill_v2
= false;
710 memset(&args
, 0, sizeof(args
));
713 args
.addr
= (unsigned long)&caps
->caps
;
714 args
.size
= sizeof(union virgl_caps
);
716 ret
= drmIoctl(vdws
->fd
, DRM_IOCTL_VIRTGPU_GET_CAPS
, &args
);
718 if (ret
== -1 && errno
== EINVAL
) {
720 args
.size
= sizeof(struct virgl_caps_v1
);
721 ret
= drmIoctl(vdws
->fd
, DRM_IOCTL_VIRTGPU_GET_CAPS
, &args
);
726 if (caps
->caps
.max_version
== 1)
730 caps
->caps
.v2
.min_aliased_point_size
= 0.f
;
731 caps
->caps
.v2
.max_aliased_point_size
= 255.f
;
732 caps
->caps
.v2
.min_smooth_point_size
= 0.f
;
733 caps
->caps
.v2
.max_smooth_point_size
= 255.f
;
734 caps
->caps
.v2
.min_aliased_line_width
= 0.f
;
735 caps
->caps
.v2
.max_aliased_line_width
= 255.f
;
736 caps
->caps
.v2
.min_smooth_line_width
= 0.f
;
737 caps
->caps
.v2
.max_smooth_line_width
= 255.f
;
738 caps
->caps
.v2
.max_texture_lod_bias
= 16.0f
;
739 caps
->caps
.v2
.max_geom_output_vertices
= 256;
740 caps
->caps
.v2
.max_geom_total_output_components
= 16384;
741 caps
->caps
.v2
.max_vertex_outputs
= 32;
742 caps
->caps
.v2
.max_vertex_attribs
= 16;
743 caps
->caps
.v2
.max_shader_patch_varyings
= 0;
744 caps
->caps
.v2
.min_texel_offset
= -8;
745 caps
->caps
.v2
.max_texel_offset
= 7;
746 caps
->caps
.v2
.min_texture_gather_offset
= -8;
747 caps
->caps
.v2
.max_texture_gather_offset
= 7;
752 #define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
754 static unsigned handle_hash(void *key
)
756 return PTR_TO_UINT(key
);
759 static int handle_compare(void *key1
, void *key2
)
761 return PTR_TO_UINT(key1
) != PTR_TO_UINT(key2
);
764 static struct pipe_fence_handle
*
765 virgl_cs_create_fence(struct virgl_winsys
*vws
)
767 struct virgl_hw_res
*res
;
769 res
= virgl_drm_winsys_resource_cache_create(vws
,
771 PIPE_FORMAT_R8_UNORM
,
773 8, 1, 1, 0, 0, 0, 8);
775 return (struct pipe_fence_handle
*)res
;
778 static bool virgl_fence_wait(struct virgl_winsys
*vws
,
779 struct pipe_fence_handle
*fence
,
782 struct virgl_drm_winsys
*vdws
= virgl_drm_winsys(vws
);
783 struct virgl_hw_res
*res
= virgl_hw_res(fence
);
786 return !virgl_drm_resource_is_busy(vdws
, res
);
788 if (timeout
!= PIPE_TIMEOUT_INFINITE
) {
789 int64_t start_time
= os_time_get();
791 while (virgl_drm_resource_is_busy(vdws
, res
)) {
792 if (os_time_get() - start_time
>= timeout
)
798 virgl_drm_resource_wait(vws
, res
);
802 static void virgl_fence_reference(struct virgl_winsys
*vws
,
803 struct pipe_fence_handle
**dst
,
804 struct pipe_fence_handle
*src
)
806 struct virgl_drm_winsys
*vdws
= virgl_drm_winsys(vws
);
807 virgl_drm_resource_reference(vdws
, (struct virgl_hw_res
**)dst
,
812 static struct virgl_winsys
*
813 virgl_drm_winsys_create(int drmFD
)
815 struct virgl_drm_winsys
*qdws
;
817 qdws
= CALLOC_STRUCT(virgl_drm_winsys
);
822 qdws
->num_delayed
= 0;
823 qdws
->usecs
= 1000000;
824 LIST_INITHEAD(&qdws
->delayed
);
825 (void) mtx_init(&qdws
->mutex
, mtx_plain
);
826 (void) mtx_init(&qdws
->bo_handles_mutex
, mtx_plain
);
827 qdws
->bo_handles
= util_hash_table_create(handle_hash
, handle_compare
);
828 qdws
->bo_names
= util_hash_table_create(handle_hash
, handle_compare
);
829 qdws
->base
.destroy
= virgl_drm_winsys_destroy
;
831 qdws
->base
.transfer_put
= virgl_bo_transfer_put
;
832 qdws
->base
.transfer_get
= virgl_bo_transfer_get
;
833 qdws
->base
.resource_create
= virgl_drm_winsys_resource_cache_create
;
834 qdws
->base
.resource_unref
= virgl_drm_winsys_resource_unref
;
835 qdws
->base
.resource_create_from_handle
= virgl_drm_winsys_resource_create_handle
;
836 qdws
->base
.resource_get_handle
= virgl_drm_winsys_resource_get_handle
;
837 qdws
->base
.resource_map
= virgl_drm_resource_map
;
838 qdws
->base
.resource_wait
= virgl_drm_resource_wait
;
839 qdws
->base
.cmd_buf_create
= virgl_drm_cmd_buf_create
;
840 qdws
->base
.cmd_buf_destroy
= virgl_drm_cmd_buf_destroy
;
841 qdws
->base
.submit_cmd
= virgl_drm_winsys_submit_cmd
;
842 qdws
->base
.emit_res
= virgl_drm_emit_res
;
843 qdws
->base
.res_is_referenced
= virgl_drm_res_is_ref
;
845 qdws
->base
.cs_create_fence
= virgl_cs_create_fence
;
846 qdws
->base
.fence_wait
= virgl_fence_wait
;
847 qdws
->base
.fence_reference
= virgl_fence_reference
;
849 qdws
->base
.get_caps
= virgl_drm_get_caps
;
854 static struct util_hash_table
*fd_tab
= NULL
;
855 static mtx_t virgl_screen_mutex
= _MTX_INITIALIZER_NP
;
858 virgl_drm_screen_destroy(struct pipe_screen
*pscreen
)
860 struct virgl_screen
*screen
= virgl_screen(pscreen
);
863 mtx_lock(&virgl_screen_mutex
);
864 destroy
= --screen
->refcnt
== 0;
866 int fd
= virgl_drm_winsys(screen
->vws
)->fd
;
867 util_hash_table_remove(fd_tab
, intptr_to_pointer(fd
));
869 mtx_unlock(&virgl_screen_mutex
);
872 pscreen
->destroy
= screen
->winsys_priv
;
873 pscreen
->destroy(pscreen
);
877 static unsigned hash_fd(void *key
)
879 int fd
= pointer_to_intptr(key
);
883 return stat
.st_dev
^ stat
.st_ino
^ stat
.st_rdev
;
886 static int compare_fd(void *key1
, void *key2
)
888 int fd1
= pointer_to_intptr(key1
);
889 int fd2
= pointer_to_intptr(key2
);
890 struct stat stat1
, stat2
;
894 return stat1
.st_dev
!= stat2
.st_dev
||
895 stat1
.st_ino
!= stat2
.st_ino
||
896 stat1
.st_rdev
!= stat2
.st_rdev
;
900 virgl_drm_screen_create(int fd
)
902 struct pipe_screen
*pscreen
= NULL
;
904 mtx_lock(&virgl_screen_mutex
);
906 fd_tab
= util_hash_table_create(hash_fd
, compare_fd
);
911 pscreen
= util_hash_table_get(fd_tab
, intptr_to_pointer(fd
));
913 virgl_screen(pscreen
)->refcnt
++;
915 struct virgl_winsys
*vws
;
916 int dup_fd
= fcntl(fd
, F_DUPFD_CLOEXEC
, 3);
918 vws
= virgl_drm_winsys_create(dup_fd
);
920 pscreen
= virgl_create_screen(vws
);
922 util_hash_table_set(fd_tab
, intptr_to_pointer(dup_fd
), pscreen
);
924 /* Bit of a hack, to avoid circular linkage dependency,
925 * ie. pipe driver having to call in to winsys, we
926 * override the pipe drivers screen->destroy():
928 virgl_screen(pscreen
)->winsys_priv
= pscreen
->destroy
;
929 pscreen
->destroy
= virgl_drm_screen_destroy
;
934 mtx_unlock(&virgl_screen_mutex
);