1 /**********************************************************
2 * Copyright 2009-2015 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
29 * Wrappers for DRM ioctl functionlaity used by the rest of the vmw
32 * Based on svgaicd_escape.c
37 #include "util/u_memory.h"
38 #include "util/u_math.h"
39 #include "svgadump/svga_dump.h"
40 #include "state_tracker/drm_driver.h"
41 #include "vmw_screen.h"
42 #include "vmw_context.h"
43 #include "vmw_fence.h"
45 #include "vmwgfx_drm.h"
46 #include "svga3d_caps.h"
47 #include "svga3d_reg.h"
49 #include "os/os_mman.h"
54 #define VMW_MAX_DEFAULT_TEXTURE_SIZE (128 * 1024 * 1024)
55 #define VMW_FENCE_TIMEOUT_SECONDS 60
57 #define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
58 #define SVGA3D_FLAGS_UPPER_32(svga3d_flags) (svga3d_flags >> 32)
59 #define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
60 (svga3d_flags & ((uint64_t)UINT32_MAX))
73 vmw_region_size(struct vmw_region
*region
)
78 #if defined(__DragonFly__) || defined(__FreeBSD__) || \
79 defined(__NetBSD__) || defined(__OpenBSD__)
80 #define ERESTART EINTR
84 vmw_ioctl_context_create(struct vmw_winsys_screen
*vws
)
86 struct drm_vmw_context_arg c_arg
;
91 ret
= drmCommandRead(vws
->ioctl
.drm_fd
, DRM_VMW_CREATE_CONTEXT
,
92 &c_arg
, sizeof(c_arg
));
97 vmw_printf("Context id is %d\n", c_arg
.cid
);
102 vmw_ioctl_extended_context_create(struct vmw_winsys_screen
*vws
,
105 union drm_vmw_extended_context_arg c_arg
;
109 memset(&c_arg
, 0, sizeof(c_arg
));
110 c_arg
.req
= (vgpu10
? drm_vmw_context_dx
: drm_vmw_context_legacy
);
111 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
,
112 DRM_VMW_CREATE_EXTENDED_CONTEXT
,
113 &c_arg
, sizeof(c_arg
));
118 vmw_printf("Context id is %d\n", c_arg
.cid
);
119 return c_arg
.rep
.cid
;
123 vmw_ioctl_context_destroy(struct vmw_winsys_screen
*vws
, uint32 cid
)
125 struct drm_vmw_context_arg c_arg
;
129 memset(&c_arg
, 0, sizeof(c_arg
));
132 (void)drmCommandWrite(vws
->ioctl
.drm_fd
, DRM_VMW_UNREF_CONTEXT
,
133 &c_arg
, sizeof(c_arg
));
138 vmw_ioctl_surface_create(struct vmw_winsys_screen
*vws
,
139 SVGA3dSurface1Flags flags
,
140 SVGA3dSurfaceFormat format
,
143 uint32_t numFaces
, uint32_t numMipLevels
,
144 unsigned sampleCount
)
146 union drm_vmw_surface_create_arg s_arg
;
147 struct drm_vmw_surface_create_req
*req
= &s_arg
.req
;
148 struct drm_vmw_surface_arg
*rep
= &s_arg
.rep
;
149 struct drm_vmw_size sizes
[DRM_VMW_MAX_SURFACE_FACES
*
150 DRM_VMW_MAX_MIP_LEVELS
];
151 struct drm_vmw_size
*cur_size
;
156 vmw_printf("%s flags %d format %d\n", __FUNCTION__
, flags
, format
);
158 memset(&s_arg
, 0, sizeof(s_arg
));
159 req
->flags
= (uint32_t) flags
;
160 req
->scanout
= !!(usage
& SVGA_SURFACE_USAGE_SCANOUT
);
161 req
->format
= (uint32_t) format
;
162 req
->shareable
= !!(usage
& SVGA_SURFACE_USAGE_SHARED
);
164 assert(numFaces
* numMipLevels
< DRM_VMW_MAX_SURFACE_FACES
*
165 DRM_VMW_MAX_MIP_LEVELS
);
167 for (iFace
= 0; iFace
< numFaces
; ++iFace
) {
168 SVGA3dSize mipSize
= size
;
170 req
->mip_levels
[iFace
] = numMipLevels
;
171 for (iMipLevel
= 0; iMipLevel
< numMipLevels
; ++iMipLevel
) {
172 cur_size
->width
= mipSize
.width
;
173 cur_size
->height
= mipSize
.height
;
174 cur_size
->depth
= mipSize
.depth
;
175 mipSize
.width
= MAX2(mipSize
.width
>> 1, 1);
176 mipSize
.height
= MAX2(mipSize
.height
>> 1, 1);
177 mipSize
.depth
= MAX2(mipSize
.depth
>> 1, 1);
181 for (iFace
= numFaces
; iFace
< SVGA3D_MAX_SURFACE_FACES
; ++iFace
) {
182 req
->mip_levels
[iFace
] = 0;
185 req
->size_addr
= (unsigned long)&sizes
;
187 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_CREATE_SURFACE
,
188 &s_arg
, sizeof(s_arg
));
193 vmw_printf("Surface id is %d\n", rep
->sid
);
200 vmw_ioctl_gb_surface_create(struct vmw_winsys_screen
*vws
,
201 SVGA3dSurfaceAllFlags flags
,
202 SVGA3dSurfaceFormat format
,
206 uint32_t numMipLevels
,
207 unsigned sampleCount
,
208 uint32_t buffer_handle
,
209 SVGA3dMSPattern multisamplePattern
,
210 SVGA3dMSQualityLevel qualityLevel
,
211 struct vmw_region
**p_region
)
213 struct drm_vmw_gb_surface_create_rep
*rep
;
214 struct vmw_region
*region
= NULL
;
217 vmw_printf("%s flags %d format %d\n", __FUNCTION__
, flags
, format
);
220 region
= CALLOC_STRUCT(vmw_region
);
222 return SVGA3D_INVALID_ID
;
225 if (vws
->ioctl
.have_drm_2_15
) {
226 union drm_vmw_gb_surface_create_ext_arg s_arg
;
227 struct drm_vmw_gb_surface_create_ext_req
*req
= &s_arg
.req
;
230 memset(&s_arg
, 0, sizeof(s_arg
));
232 req
->version
= drm_vmw_gb_surface_v1
;
233 req
->multisample_pattern
= multisamplePattern
;
234 req
->quality_level
= qualityLevel
;
235 req
->must_be_zero
= 0;
236 req
->base
.svga3d_flags
= SVGA3D_FLAGS_LOWER_32(flags
);
237 req
->svga3d_flags_upper_32_bits
= SVGA3D_FLAGS_UPPER_32(flags
);
238 req
->base
.format
= (uint32_t) format
;
240 if (usage
& SVGA_SURFACE_USAGE_SCANOUT
)
241 req
->base
.drm_surface_flags
|= drm_vmw_surface_flag_scanout
;
243 if (usage
& SVGA_SURFACE_USAGE_SHARED
)
244 req
->base
.drm_surface_flags
|= drm_vmw_surface_flag_shareable
;
246 req
->base
.drm_surface_flags
|= drm_vmw_surface_flag_create_buffer
;
247 req
->base
.base_size
.width
= size
.width
;
248 req
->base
.base_size
.height
= size
.height
;
249 req
->base
.base_size
.depth
= size
.depth
;
250 req
->base
.mip_levels
= numMipLevels
;
251 req
->base
.multisample_count
= 0;
252 req
->base
.autogen_filter
= SVGA3D_TEX_FILTER_NONE
;
254 if (vws
->base
.have_vgpu10
) {
255 req
->base
.array_size
= numFaces
;
256 req
->base
.multisample_count
= sampleCount
;
258 assert(numFaces
* numMipLevels
< DRM_VMW_MAX_SURFACE_FACES
*
259 DRM_VMW_MAX_MIP_LEVELS
);
260 req
->base
.array_size
= 0;
263 req
->base
.buffer_handle
= buffer_handle
?
264 buffer_handle
: SVGA3D_INVALID_ID
;
266 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
,
267 DRM_VMW_GB_SURFACE_CREATE_EXT
, &s_arg
,
271 goto out_fail_create
;
273 union drm_vmw_gb_surface_create_arg s_arg
;
274 struct drm_vmw_gb_surface_create_req
*req
= &s_arg
.req
;
277 memset(&s_arg
, 0, sizeof(s_arg
));
278 req
->svga3d_flags
= (uint32_t) flags
;
279 req
->format
= (uint32_t) format
;
281 if (usage
& SVGA_SURFACE_USAGE_SCANOUT
)
282 req
->drm_surface_flags
|= drm_vmw_surface_flag_scanout
;
284 if (usage
& SVGA_SURFACE_USAGE_SHARED
)
285 req
->drm_surface_flags
|= drm_vmw_surface_flag_shareable
;
287 req
->drm_surface_flags
|= drm_vmw_surface_flag_create_buffer
;
288 req
->base_size
.width
= size
.width
;
289 req
->base_size
.height
= size
.height
;
290 req
->base_size
.depth
= size
.depth
;
291 req
->mip_levels
= numMipLevels
;
292 req
->multisample_count
= 0;
293 req
->autogen_filter
= SVGA3D_TEX_FILTER_NONE
;
295 if (vws
->base
.have_vgpu10
) {
296 req
->array_size
= numFaces
;
297 req
->multisample_count
= sampleCount
;
299 assert(numFaces
* numMipLevels
< DRM_VMW_MAX_SURFACE_FACES
*
300 DRM_VMW_MAX_MIP_LEVELS
);
304 req
->buffer_handle
= buffer_handle
?
305 buffer_handle
: SVGA3D_INVALID_ID
;
307 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_GB_SURFACE_CREATE
,
308 &s_arg
, sizeof(s_arg
));
311 goto out_fail_create
;
315 region
->handle
= rep
->buffer_handle
;
316 region
->map_handle
= rep
->buffer_map_handle
;
317 region
->drm_fd
= vws
->ioctl
.drm_fd
;
318 region
->size
= rep
->backup_size
;
322 vmw_printf("Surface id is %d\n", rep
->sid
);
327 return SVGA3D_INVALID_ID
;
331 * vmw_ioctl_surface_req - Fill in a struct surface_req
333 * @vws: Winsys screen
334 * @whandle: Surface handle
335 * @req: The struct surface req to fill in
336 * @needs_unref: This call takes a kernel surface reference that needs to
339 * Returns 0 on success, negative error type otherwise.
340 * Fills in the surface_req structure according to handle type and kernel
344 vmw_ioctl_surface_req(const struct vmw_winsys_screen
*vws
,
345 const struct winsys_handle
*whandle
,
346 struct drm_vmw_surface_arg
*req
,
347 boolean
*needs_unref
)
351 switch(whandle
->type
) {
352 case WINSYS_HANDLE_TYPE_SHARED
:
353 case WINSYS_HANDLE_TYPE_KMS
:
354 *needs_unref
= FALSE
;
355 req
->handle_type
= DRM_VMW_HANDLE_LEGACY
;
356 req
->sid
= whandle
->handle
;
358 case WINSYS_HANDLE_TYPE_FD
:
359 if (!vws
->ioctl
.have_drm_2_6
) {
362 ret
= drmPrimeFDToHandle(vws
->ioctl
.drm_fd
, whandle
->handle
, &handle
);
364 vmw_error("Failed to get handle from prime fd %d.\n",
365 (int) whandle
->handle
);
370 req
->handle_type
= DRM_VMW_HANDLE_LEGACY
;
373 *needs_unref
= FALSE
;
374 req
->handle_type
= DRM_VMW_HANDLE_PRIME
;
375 req
->sid
= whandle
->handle
;
379 vmw_error("Attempt to import unsupported handle type %d.\n",
388 * vmw_ioctl_gb_surface_ref - Put a reference on a guest-backed surface and
389 * get surface information
391 * @vws: Screen to register the reference on
392 * @handle: Kernel handle of the guest-backed surface
393 * @flags: flags used when the surface was created
394 * @format: Format used when the surface was created
395 * @numMipLevels: Number of mipmap levels of the surface
396 * @p_region: On successful return points to a newly allocated
397 * struct vmw_region holding a reference to the surface backup buffer.
399 * Returns 0 on success, a system error on failure.
402 vmw_ioctl_gb_surface_ref(struct vmw_winsys_screen
*vws
,
403 const struct winsys_handle
*whandle
,
404 SVGA3dSurfaceAllFlags
*flags
,
405 SVGA3dSurfaceFormat
*format
,
406 uint32_t *numMipLevels
,
408 struct vmw_region
**p_region
)
410 struct vmw_region
*region
= NULL
;
411 boolean needs_unref
= FALSE
;
414 assert(p_region
!= NULL
);
415 region
= CALLOC_STRUCT(vmw_region
);
419 if (vws
->ioctl
.have_drm_2_15
) {
420 union drm_vmw_gb_surface_reference_ext_arg s_arg
;
421 struct drm_vmw_surface_arg
*req
= &s_arg
.req
;
422 struct drm_vmw_gb_surface_ref_ext_rep
*rep
= &s_arg
.rep
;
424 memset(&s_arg
, 0, sizeof(s_arg
));
425 ret
= vmw_ioctl_surface_req(vws
, whandle
, req
, &needs_unref
);
430 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_GB_SURFACE_REF_EXT
,
431 &s_arg
, sizeof(s_arg
));
436 region
->handle
= rep
->crep
.buffer_handle
;
437 region
->map_handle
= rep
->crep
.buffer_map_handle
;
438 region
->drm_fd
= vws
->ioctl
.drm_fd
;
439 region
->size
= rep
->crep
.backup_size
;
442 *handle
= rep
->crep
.handle
;
443 *flags
= SVGA3D_FLAGS_64(rep
->creq
.svga3d_flags_upper_32_bits
,
444 rep
->creq
.base
.svga3d_flags
);
445 *format
= rep
->creq
.base
.format
;
446 *numMipLevels
= rep
->creq
.base
.mip_levels
;
448 union drm_vmw_gb_surface_reference_arg s_arg
;
449 struct drm_vmw_surface_arg
*req
= &s_arg
.req
;
450 struct drm_vmw_gb_surface_ref_rep
*rep
= &s_arg
.rep
;
452 memset(&s_arg
, 0, sizeof(s_arg
));
453 ret
= vmw_ioctl_surface_req(vws
, whandle
, req
, &needs_unref
);
458 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_GB_SURFACE_REF
,
459 &s_arg
, sizeof(s_arg
));
464 region
->handle
= rep
->crep
.buffer_handle
;
465 region
->map_handle
= rep
->crep
.buffer_map_handle
;
466 region
->drm_fd
= vws
->ioctl
.drm_fd
;
467 region
->size
= rep
->crep
.backup_size
;
470 *handle
= rep
->crep
.handle
;
471 *flags
= rep
->creq
.svga3d_flags
;
472 *format
= rep
->creq
.format
;
473 *numMipLevels
= rep
->creq
.mip_levels
;
476 vmw_printf("%s flags %d format %d\n", __FUNCTION__
, *flags
, *format
);
479 vmw_ioctl_surface_destroy(vws
, *handle
);
484 vmw_ioctl_surface_destroy(vws
, *handle
);
491 vmw_ioctl_surface_destroy(struct vmw_winsys_screen
*vws
, uint32 sid
)
493 struct drm_vmw_surface_arg s_arg
;
497 memset(&s_arg
, 0, sizeof(s_arg
));
500 (void)drmCommandWrite(vws
->ioctl
.drm_fd
, DRM_VMW_UNREF_SURFACE
,
501 &s_arg
, sizeof(s_arg
));
505 vmw_ioctl_command(struct vmw_winsys_screen
*vws
, int32_t cid
,
506 uint32_t throttle_us
, void *commands
, uint32_t size
,
507 struct pipe_fence_handle
**pfence
, int32_t imported_fence_fd
,
510 struct drm_vmw_execbuf_arg arg
;
511 struct drm_vmw_fence_rep rep
;
517 static boolean firsttime
= TRUE
;
518 static boolean debug
= FALSE
;
519 static boolean skip
= FALSE
;
521 debug
= debug_get_bool_option("SVGA_DUMP_CMD", FALSE
);
522 skip
= debug_get_bool_option("SVGA_SKIP_CMD", FALSE
);
526 svga_dump_commands(commands
, size
);
535 memset(&arg
, 0, sizeof(arg
));
536 memset(&rep
, 0, sizeof(rep
));
538 if (flags
& SVGA_HINT_FLAG_EXPORT_FENCE_FD
) {
539 arg
.flags
|= DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD
;
542 if (imported_fence_fd
!= -1) {
543 arg
.flags
|= DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD
;
548 arg
.fence_rep
= (unsigned long)&rep
;
549 arg
.commands
= (unsigned long)commands
;
550 arg
.command_size
= size
;
551 arg
.throttle_us
= throttle_us
;
552 arg
.version
= vws
->ioctl
.drm_execbuf_version
;
553 arg
.context_handle
= (vws
->base
.have_vgpu10
? cid
: SVGA3D_INVALID_ID
);
555 /* Older DRM module requires this to be zero */
556 if (vws
->base
.have_fence_fd
)
557 arg
.imported_fence_fd
= imported_fence_fd
;
559 /* In DRM_VMW_EXECBUF_VERSION 1, the drm_vmw_execbuf_arg structure ends with
560 * the flags field. The structure size sent to drmCommandWrite must match
561 * the drm_execbuf_version. Otherwise, an invalid value will be returned.
563 argsize
= vws
->ioctl
.drm_execbuf_version
> 1 ? sizeof(arg
) :
564 offsetof(struct drm_vmw_execbuf_arg
, context_handle
);
566 ret
= drmCommandWrite(vws
->ioctl
.drm_fd
, DRM_VMW_EXECBUF
, &arg
, argsize
);
569 } while(ret
== -ERESTART
|| ret
== -EBUSY
);
571 vmw_error("%s error %s.\n", __FUNCTION__
, strerror(-ret
));
578 * Kernel has already synced, or caller requested no fence.
584 vmw_fences_signal(vws
->fence_ops
, rep
.passed_seqno
, rep
.seqno
,
587 /* Older DRM module will set this to zero, but -1 is the proper FD
588 * to use for no Fence FD support */
589 if (!vws
->base
.have_fence_fd
)
592 *pfence
= vmw_fence_create(vws
->fence_ops
, rep
.handle
,
593 rep
.seqno
, rep
.mask
, rep
.fd
);
594 if (*pfence
== NULL
) {
596 * Fence creation failed. Need to sync.
598 (void) vmw_ioctl_fence_finish(vws
, rep
.handle
, rep
.mask
);
599 vmw_ioctl_fence_unref(vws
, rep
.handle
);
607 vmw_ioctl_region_create(struct vmw_winsys_screen
*vws
, uint32_t size
)
609 struct vmw_region
*region
;
610 union drm_vmw_alloc_dmabuf_arg arg
;
611 struct drm_vmw_alloc_dmabuf_req
*req
= &arg
.req
;
612 struct drm_vmw_dmabuf_rep
*rep
= &arg
.rep
;
615 vmw_printf("%s: size = %u\n", __FUNCTION__
, size
);
617 region
= CALLOC_STRUCT(vmw_region
);
621 memset(&arg
, 0, sizeof(arg
));
624 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_ALLOC_DMABUF
, &arg
,
626 } while (ret
== -ERESTART
);
629 vmw_error("IOCTL failed %d: %s\n", ret
, strerror(-ret
));
634 region
->handle
= rep
->handle
;
635 region
->map_handle
= rep
->map_handle
;
636 region
->map_count
= 0;
638 region
->drm_fd
= vws
->ioctl
.drm_fd
;
640 vmw_printf(" gmrId = %u, offset = %u\n",
641 region
->ptr
.gmrId
, region
->ptr
.offset
);
651 vmw_ioctl_region_destroy(struct vmw_region
*region
)
653 struct drm_vmw_unref_dmabuf_arg arg
;
655 vmw_printf("%s: gmrId = %u, offset = %u\n", __FUNCTION__
,
656 region
->ptr
.gmrId
, region
->ptr
.offset
);
659 os_munmap(region
->data
, region
->size
);
663 memset(&arg
, 0, sizeof(arg
));
664 arg
.handle
= region
->handle
;
665 drmCommandWrite(region
->drm_fd
, DRM_VMW_UNREF_DMABUF
, &arg
, sizeof(arg
));
671 vmw_ioctl_region_ptr(struct vmw_region
*region
)
673 SVGAGuestPtr ptr
= {region
->handle
, 0};
678 vmw_ioctl_region_map(struct vmw_region
*region
)
682 vmw_printf("%s: gmrId = %u, offset = %u\n", __FUNCTION__
,
683 region
->ptr
.gmrId
, region
->ptr
.offset
);
685 if (region
->data
== NULL
) {
686 map
= os_mmap(NULL
, region
->size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
687 region
->drm_fd
, region
->map_handle
);
688 if (map
== MAP_FAILED
) {
689 vmw_error("%s: Map failed.\n", __FUNCTION__
);
702 vmw_ioctl_region_unmap(struct vmw_region
*region
)
704 vmw_printf("%s: gmrId = %u, offset = %u\n", __FUNCTION__
,
705 region
->ptr
.gmrId
, region
->ptr
.offset
);
710 * vmw_ioctl_syncforcpu - Synchronize a buffer object for CPU usage
712 * @region: Pointer to a struct vmw_region representing the buffer object.
713 * @dont_block: Dont wait for GPU idle, but rather return -EBUSY if the
714 * GPU is busy with the buffer object.
715 * @readonly: Hint that the CPU access is read-only.
716 * @allow_cs: Allow concurrent command submission while the buffer is
717 * synchronized for CPU. If FALSE command submissions referencing the
718 * buffer will block until a corresponding call to vmw_ioctl_releasefromcpu.
720 * This function idles any GPU activities touching the buffer and blocks
721 * command submission of commands referencing the buffer, even from
725 vmw_ioctl_syncforcpu(struct vmw_region
*region
,
730 struct drm_vmw_synccpu_arg arg
;
732 memset(&arg
, 0, sizeof(arg
));
733 arg
.op
= drm_vmw_synccpu_grab
;
734 arg
.handle
= region
->handle
;
735 arg
.flags
= drm_vmw_synccpu_read
;
737 arg
.flags
|= drm_vmw_synccpu_write
;
739 arg
.flags
|= drm_vmw_synccpu_dontblock
;
741 arg
.flags
|= drm_vmw_synccpu_allow_cs
;
743 return drmCommandWrite(region
->drm_fd
, DRM_VMW_SYNCCPU
, &arg
, sizeof(arg
));
747 * vmw_ioctl_releasefromcpu - Undo a previous syncforcpu.
749 * @region: Pointer to a struct vmw_region representing the buffer object.
750 * @readonly: Should hold the same value as the matching syncforcpu call.
751 * @allow_cs: Should hold the same value as the matching syncforcpu call.
754 vmw_ioctl_releasefromcpu(struct vmw_region
*region
,
758 struct drm_vmw_synccpu_arg arg
;
760 memset(&arg
, 0, sizeof(arg
));
761 arg
.op
= drm_vmw_synccpu_release
;
762 arg
.handle
= region
->handle
;
763 arg
.flags
= drm_vmw_synccpu_read
;
765 arg
.flags
|= drm_vmw_synccpu_write
;
767 arg
.flags
|= drm_vmw_synccpu_allow_cs
;
769 (void) drmCommandWrite(region
->drm_fd
, DRM_VMW_SYNCCPU
, &arg
, sizeof(arg
));
773 vmw_ioctl_fence_unref(struct vmw_winsys_screen
*vws
,
776 struct drm_vmw_fence_arg arg
;
779 memset(&arg
, 0, sizeof(arg
));
782 ret
= drmCommandWrite(vws
->ioctl
.drm_fd
, DRM_VMW_FENCE_UNREF
,
785 vmw_error("%s Failed\n", __FUNCTION__
);
788 static inline uint32_t
789 vmw_drm_fence_flags(uint32_t flags
)
793 if (flags
& SVGA_FENCE_FLAG_EXEC
)
794 dflags
|= DRM_VMW_FENCE_FLAG_EXEC
;
795 if (flags
& SVGA_FENCE_FLAG_QUERY
)
796 dflags
|= DRM_VMW_FENCE_FLAG_QUERY
;
803 vmw_ioctl_fence_signalled(struct vmw_winsys_screen
*vws
,
807 struct drm_vmw_fence_signaled_arg arg
;
808 uint32_t vflags
= vmw_drm_fence_flags(flags
);
811 memset(&arg
, 0, sizeof(arg
));
815 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_FENCE_SIGNALED
,
821 vmw_fences_signal(vws
->fence_ops
, arg
.passed_seqno
, 0, FALSE
);
823 return (arg
.signaled
) ? 0 : -1;
829 vmw_ioctl_fence_finish(struct vmw_winsys_screen
*vws
,
833 struct drm_vmw_fence_wait_arg arg
;
834 uint32_t vflags
= vmw_drm_fence_flags(flags
);
837 memset(&arg
, 0, sizeof(arg
));
840 arg
.timeout_us
= VMW_FENCE_TIMEOUT_SECONDS
*1000000;
844 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_FENCE_WAIT
,
848 vmw_error("%s Failed\n", __FUNCTION__
);
854 vmw_ioctl_shader_create(struct vmw_winsys_screen
*vws
,
855 SVGA3dShaderType type
,
858 struct drm_vmw_shader_create_arg sh_arg
;
863 memset(&sh_arg
, 0, sizeof(sh_arg
));
865 sh_arg
.size
= code_len
;
866 sh_arg
.buffer_handle
= SVGA3D_INVALID_ID
;
867 sh_arg
.shader_handle
= SVGA3D_INVALID_ID
;
869 case SVGA3D_SHADERTYPE_VS
:
870 sh_arg
.shader_type
= drm_vmw_shader_type_vs
;
872 case SVGA3D_SHADERTYPE_PS
:
873 sh_arg
.shader_type
= drm_vmw_shader_type_ps
;
876 assert(!"Invalid shader type.");
880 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_CREATE_SHADER
,
881 &sh_arg
, sizeof(sh_arg
));
884 return SVGA3D_INVALID_ID
;
886 return sh_arg
.shader_handle
;
890 vmw_ioctl_shader_destroy(struct vmw_winsys_screen
*vws
, uint32 shid
)
892 struct drm_vmw_shader_arg sh_arg
;
896 memset(&sh_arg
, 0, sizeof(sh_arg
));
897 sh_arg
.handle
= shid
;
899 (void)drmCommandWrite(vws
->ioctl
.drm_fd
, DRM_VMW_UNREF_SHADER
,
900 &sh_arg
, sizeof(sh_arg
));
905 vmw_ioctl_parse_caps(struct vmw_winsys_screen
*vws
,
906 const uint32_t *cap_buffer
)
910 if (vws
->base
.have_gb_objects
) {
911 for (i
= 0; i
< vws
->ioctl
.num_cap_3d
; ++i
) {
912 vws
->ioctl
.cap_3d
[i
].has_cap
= TRUE
;
913 vws
->ioctl
.cap_3d
[i
].result
.u
= cap_buffer
[i
];
917 const uint32
*capsBlock
;
918 const SVGA3dCapsRecord
*capsRecord
= NULL
;
920 const SVGA3dCapPair
*capArray
;
924 * Search linearly through the caps block records for the specified type.
926 capsBlock
= cap_buffer
;
927 for (offset
= 0; capsBlock
[offset
] != 0; offset
+= capsBlock
[offset
]) {
928 const SVGA3dCapsRecord
*record
;
929 assert(offset
< SVGA_FIFO_3D_CAPS_SIZE
);
930 record
= (const SVGA3dCapsRecord
*) (capsBlock
+ offset
);
931 if ((record
->header
.type
>= SVGA3DCAPS_RECORD_DEVCAPS_MIN
) &&
932 (record
->header
.type
<= SVGA3DCAPS_RECORD_DEVCAPS_MAX
) &&
933 (!capsRecord
|| (record
->header
.type
> capsRecord
->header
.type
))) {
942 * Calculate the number of caps from the size of the record.
944 capArray
= (const SVGA3dCapPair
*) capsRecord
->data
;
945 numCaps
= (int) ((capsRecord
->header
.length
* sizeof(uint32
) -
946 sizeof capsRecord
->header
) / (2 * sizeof(uint32
)));
948 for (i
= 0; i
< numCaps
; i
++) {
949 index
= capArray
[i
][0];
950 if (index
< vws
->ioctl
.num_cap_3d
) {
951 vws
->ioctl
.cap_3d
[index
].has_cap
= TRUE
;
952 vws
->ioctl
.cap_3d
[index
].result
.u
= capArray
[i
][1];
954 debug_printf("Unknown devcaps seen: %d\n", index
);
962 vmw_ioctl_init(struct vmw_winsys_screen
*vws
)
964 struct drm_vmw_getparam_arg gp_arg
;
965 struct drm_vmw_get_3d_cap_arg cap_arg
;
968 uint32_t *cap_buffer
;
969 drmVersionPtr version
;
970 boolean drm_gb_capable
;
971 boolean have_drm_2_5
;
972 const char *getenv_val
;
976 version
= drmGetVersion(vws
->ioctl
.drm_fd
);
980 have_drm_2_5
= version
->version_major
> 2 ||
981 (version
->version_major
== 2 && version
->version_minor
> 4);
982 vws
->ioctl
.have_drm_2_6
= version
->version_major
> 2 ||
983 (version
->version_major
== 2 && version
->version_minor
> 5);
984 vws
->ioctl
.have_drm_2_9
= version
->version_major
> 2 ||
985 (version
->version_major
== 2 && version
->version_minor
> 8);
986 vws
->ioctl
.have_drm_2_15
= version
->version_major
> 2 ||
987 (version
->version_major
== 2 && version
->version_minor
> 14);
989 vws
->ioctl
.drm_execbuf_version
= vws
->ioctl
.have_drm_2_9
? 2 : 1;
991 drm_gb_capable
= have_drm_2_5
;
993 memset(&gp_arg
, 0, sizeof(gp_arg
));
994 gp_arg
.param
= DRM_VMW_PARAM_3D
;
995 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_GET_PARAM
,
996 &gp_arg
, sizeof(gp_arg
));
997 if (ret
|| gp_arg
.value
== 0) {
998 vmw_error("No 3D enabled (%i, %s).\n", ret
, strerror(-ret
));
1002 memset(&gp_arg
, 0, sizeof(gp_arg
));
1003 gp_arg
.param
= DRM_VMW_PARAM_FIFO_HW_VERSION
;
1004 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_GET_PARAM
,
1005 &gp_arg
, sizeof(gp_arg
));
1007 vmw_error("Failed to get fifo hw version (%i, %s).\n",
1008 ret
, strerror(-ret
));
1011 vws
->ioctl
.hwversion
= gp_arg
.value
;
1012 getenv_val
= getenv("SVGA_FORCE_HOST_BACKED");
1013 if (!getenv_val
|| strcmp(getenv_val
, "0") == 0) {
1014 memset(&gp_arg
, 0, sizeof(gp_arg
));
1015 gp_arg
.param
= DRM_VMW_PARAM_HW_CAPS
;
1016 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_GET_PARAM
,
1017 &gp_arg
, sizeof(gp_arg
));
1022 vws
->base
.have_gb_objects
= FALSE
;
1024 vws
->base
.have_gb_objects
=
1025 !!(gp_arg
.value
& (uint64_t) SVGA_CAP_GBOBJECTS
);
1027 if (vws
->base
.have_gb_objects
&& !drm_gb_capable
)
1030 vws
->base
.have_vgpu10
= FALSE
;
1031 vws
->base
.have_sm4_1
= FALSE
;
1032 vws
->base
.have_intra_surface_copy
= FALSE
;
1034 if (vws
->base
.have_gb_objects
) {
1035 memset(&gp_arg
, 0, sizeof(gp_arg
));
1036 gp_arg
.param
= DRM_VMW_PARAM_MAX_MOB_MEMORY
;
1037 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_GET_PARAM
,
1038 &gp_arg
, sizeof(gp_arg
));
1040 /* Just guess a large enough value. */
1041 vws
->ioctl
.max_mob_memory
= 256*1024*1024;
1043 vws
->ioctl
.max_mob_memory
= gp_arg
.value
;
1046 memset(&gp_arg
, 0, sizeof(gp_arg
));
1047 gp_arg
.param
= DRM_VMW_PARAM_MAX_MOB_SIZE
;
1048 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_GET_PARAM
,
1049 &gp_arg
, sizeof(gp_arg
));
1051 if (ret
|| gp_arg
.value
== 0) {
1052 vws
->ioctl
.max_texture_size
= VMW_MAX_DEFAULT_TEXTURE_SIZE
;
1054 vws
->ioctl
.max_texture_size
= gp_arg
.value
;
1057 /* Never early flush surfaces, mobs do accounting. */
1058 vws
->ioctl
.max_surface_memory
= -1;
1060 if (vws
->ioctl
.have_drm_2_9
) {
1061 memset(&gp_arg
, 0, sizeof(gp_arg
));
1062 gp_arg
.param
= DRM_VMW_PARAM_DX
;
1063 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_GET_PARAM
,
1064 &gp_arg
, sizeof(gp_arg
));
1065 if (ret
== 0 && gp_arg
.value
!= 0) {
1066 const char *vgpu10_val
;
1068 debug_printf("Have VGPU10 interface and hardware.\n");
1069 vws
->base
.have_vgpu10
= TRUE
;
1070 vgpu10_val
= getenv("SVGA_VGPU10");
1071 if (vgpu10_val
&& strcmp(vgpu10_val
, "0") == 0) {
1072 debug_printf("Disabling VGPU10 interface.\n");
1073 vws
->base
.have_vgpu10
= FALSE
;
1075 debug_printf("Enabling VGPU10 interface.\n");
1080 if (vws
->ioctl
.have_drm_2_15
&& vws
->base
.have_vgpu10
) {
1081 memset(&gp_arg
, 0, sizeof(gp_arg
));
1082 gp_arg
.param
= DRM_VMW_PARAM_HW_CAPS2
;
1083 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_GET_PARAM
,
1084 &gp_arg
, sizeof(gp_arg
));
1085 if (ret
== 0 && gp_arg
.value
!= 0) {
1086 vws
->base
.have_intra_surface_copy
= TRUE
;
1089 memset(&gp_arg
, 0, sizeof(gp_arg
));
1090 gp_arg
.param
= DRM_VMW_PARAM_SM4_1
;
1091 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_GET_PARAM
,
1092 &gp_arg
, sizeof(gp_arg
));
1093 if (ret
== 0 && gp_arg
.value
!= 0) {
1094 vws
->base
.have_sm4_1
= TRUE
;
1098 memset(&gp_arg
, 0, sizeof(gp_arg
));
1099 gp_arg
.param
= DRM_VMW_PARAM_3D_CAPS_SIZE
;
1100 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_GET_PARAM
,
1101 &gp_arg
, sizeof(gp_arg
));
1103 size
= SVGA_FIFO_3D_CAPS_SIZE
* sizeof(uint32_t);
1105 size
= gp_arg
.value
;
1107 if (vws
->base
.have_gb_objects
)
1108 vws
->ioctl
.num_cap_3d
= size
/ sizeof(uint32_t);
1110 vws
->ioctl
.num_cap_3d
= SVGA3D_DEVCAP_MAX
;
1112 vws
->ioctl
.num_cap_3d
= SVGA3D_DEVCAP_MAX
;
1114 memset(&gp_arg
, 0, sizeof(gp_arg
));
1115 gp_arg
.param
= DRM_VMW_PARAM_MAX_SURF_MEMORY
;
1117 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_GET_PARAM
,
1118 &gp_arg
, sizeof(gp_arg
));
1119 if (!have_drm_2_5
|| ret
) {
1120 /* Just guess a large enough value, around 800mb. */
1121 vws
->ioctl
.max_surface_memory
= 0x30000000;
1123 vws
->ioctl
.max_surface_memory
= gp_arg
.value
;
1126 vws
->ioctl
.max_texture_size
= VMW_MAX_DEFAULT_TEXTURE_SIZE
;
1128 size
= SVGA_FIFO_3D_CAPS_SIZE
* sizeof(uint32_t);
1131 debug_printf("VGPU10 interface is %s.\n",
1132 vws
->base
.have_vgpu10
? "on" : "off");
1134 cap_buffer
= calloc(1, size
);
1136 debug_printf("Failed alloc fifo 3D caps buffer.\n");
1140 vws
->ioctl
.cap_3d
= calloc(vws
->ioctl
.num_cap_3d
,
1141 sizeof(*vws
->ioctl
.cap_3d
));
1142 if (!vws
->ioctl
.cap_3d
) {
1143 debug_printf("Failed alloc fifo 3D caps buffer.\n");
1144 goto out_no_caparray
;
1147 memset(&cap_arg
, 0, sizeof(cap_arg
));
1148 cap_arg
.buffer
= (uint64_t) (unsigned long) (cap_buffer
);
1149 cap_arg
.max_size
= size
;
1152 * This call must always be after DRM_VMW_PARAM_MAX_MOB_MEMORY and
1153 * DRM_VMW_PARAM_SM4_1. This is because, based on these calls, kernel
1154 * driver sends the supported cap.
1156 ret
= drmCommandWrite(vws
->ioctl
.drm_fd
, DRM_VMW_GET_3D_CAP
,
1157 &cap_arg
, sizeof(cap_arg
));
1160 debug_printf("Failed to get 3D capabilities"
1161 " (%i, %s).\n", ret
, strerror(-ret
));
1165 ret
= vmw_ioctl_parse_caps(vws
, cap_buffer
);
1167 debug_printf("Failed to parse 3D capabilities"
1168 " (%i, %s).\n", ret
, strerror(-ret
));
1172 if (((version
->version_major
== 2 && version
->version_minor
>= 10)
1173 || version
->version_major
> 2) && vws
->base
.have_vgpu10
) {
1175 /* support for these commands didn't make it into vmwgfx kernel
1176 * modules before 2.10.
1178 vws
->base
.have_generate_mipmap_cmd
= TRUE
;
1179 vws
->base
.have_set_predication_cmd
= TRUE
;
1182 if (version
->version_major
== 2 && version
->version_minor
>= 14) {
1183 vws
->base
.have_fence_fd
= TRUE
;
1187 drmFreeVersion(version
);
1188 vmw_printf("%s OK\n", __FUNCTION__
);
1191 free(vws
->ioctl
.cap_3d
);
1195 drmFreeVersion(version
);
1197 vws
->ioctl
.num_cap_3d
= 0;
1198 debug_printf("%s Failed\n", __FUNCTION__
);
1205 vmw_ioctl_cleanup(struct vmw_winsys_screen
*vws
)
1209 free(vws
->ioctl
.cap_3d
);