1 /**********************************************************
2 * Copyright 2009-2015 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
29 * Wrappers for DRM ioctl functionlaity used by the rest of the vmw
32 * Based on svgaicd_escape.c
37 #include "util/u_memory.h"
38 #include "util/u_math.h"
39 #include "svgadump/svga_dump.h"
40 #include "state_tracker/drm_driver.h"
41 #include "vmw_screen.h"
42 #include "vmw_context.h"
43 #include "vmw_fence.h"
45 #include "vmwgfx_drm.h"
46 #include "svga3d_caps.h"
47 #include "svga3d_reg.h"
49 #include "os/os_mman.h"
54 #define VMW_MAX_DEFAULT_TEXTURE_SIZE (128 * 1024 * 1024)
55 #define VMW_FENCE_TIMEOUT_SECONDS 60
68 vmw_region_size(struct vmw_region
*region
)
73 #if defined(__DragonFly__) || defined(__FreeBSD__) || \
74 defined(__NetBSD__) || defined(__OpenBSD__)
75 #define ERESTART EINTR
79 vmw_ioctl_context_create(struct vmw_winsys_screen
*vws
)
81 struct drm_vmw_context_arg c_arg
;
86 ret
= drmCommandRead(vws
->ioctl
.drm_fd
, DRM_VMW_CREATE_CONTEXT
,
87 &c_arg
, sizeof(c_arg
));
92 vmw_printf("Context id is %d\n", c_arg
.cid
);
97 vmw_ioctl_extended_context_create(struct vmw_winsys_screen
*vws
,
100 union drm_vmw_extended_context_arg c_arg
;
104 memset(&c_arg
, 0, sizeof(c_arg
));
105 c_arg
.req
= (vgpu10
? drm_vmw_context_vgpu10
: drm_vmw_context_legacy
);
106 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
,
107 DRM_VMW_CREATE_EXTENDED_CONTEXT
,
108 &c_arg
, sizeof(c_arg
));
113 vmw_printf("Context id is %d\n", c_arg
.cid
);
114 return c_arg
.rep
.cid
;
118 vmw_ioctl_context_destroy(struct vmw_winsys_screen
*vws
, uint32 cid
)
120 struct drm_vmw_context_arg c_arg
;
124 memset(&c_arg
, 0, sizeof(c_arg
));
127 (void)drmCommandWrite(vws
->ioctl
.drm_fd
, DRM_VMW_UNREF_CONTEXT
,
128 &c_arg
, sizeof(c_arg
));
133 vmw_ioctl_surface_create(struct vmw_winsys_screen
*vws
,
134 SVGA3dSurface1Flags flags
,
135 SVGA3dSurfaceFormat format
,
138 uint32_t numFaces
, uint32_t numMipLevels
,
139 unsigned sampleCount
)
141 union drm_vmw_surface_create_arg s_arg
;
142 struct drm_vmw_surface_create_req
*req
= &s_arg
.req
;
143 struct drm_vmw_surface_arg
*rep
= &s_arg
.rep
;
144 struct drm_vmw_size sizes
[DRM_VMW_MAX_SURFACE_FACES
*
145 DRM_VMW_MAX_MIP_LEVELS
];
146 struct drm_vmw_size
*cur_size
;
151 vmw_printf("%s flags %d format %d\n", __FUNCTION__
, flags
, format
);
153 memset(&s_arg
, 0, sizeof(s_arg
));
154 req
->flags
= (uint32_t) flags
;
155 req
->scanout
= !!(usage
& SVGA_SURFACE_USAGE_SCANOUT
);
156 req
->format
= (uint32_t) format
;
157 req
->shareable
= !!(usage
& SVGA_SURFACE_USAGE_SHARED
);
159 assert(numFaces
* numMipLevels
< DRM_VMW_MAX_SURFACE_FACES
*
160 DRM_VMW_MAX_MIP_LEVELS
);
162 for (iFace
= 0; iFace
< numFaces
; ++iFace
) {
163 SVGA3dSize mipSize
= size
;
165 req
->mip_levels
[iFace
] = numMipLevels
;
166 for (iMipLevel
= 0; iMipLevel
< numMipLevels
; ++iMipLevel
) {
167 cur_size
->width
= mipSize
.width
;
168 cur_size
->height
= mipSize
.height
;
169 cur_size
->depth
= mipSize
.depth
;
170 mipSize
.width
= MAX2(mipSize
.width
>> 1, 1);
171 mipSize
.height
= MAX2(mipSize
.height
>> 1, 1);
172 mipSize
.depth
= MAX2(mipSize
.depth
>> 1, 1);
176 for (iFace
= numFaces
; iFace
< SVGA3D_MAX_SURFACE_FACES
; ++iFace
) {
177 req
->mip_levels
[iFace
] = 0;
180 req
->size_addr
= (unsigned long)&sizes
;
182 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_CREATE_SURFACE
,
183 &s_arg
, sizeof(s_arg
));
188 vmw_printf("Surface id is %d\n", rep
->sid
);
195 vmw_ioctl_gb_surface_create(struct vmw_winsys_screen
*vws
,
196 SVGA3dSurface1Flags flags
,
197 SVGA3dSurfaceFormat format
,
201 uint32_t numMipLevels
,
202 unsigned sampleCount
,
203 uint32_t buffer_handle
,
204 struct vmw_region
**p_region
)
206 union drm_vmw_gb_surface_create_arg s_arg
;
207 struct drm_vmw_gb_surface_create_req
*req
= &s_arg
.req
;
208 struct drm_vmw_gb_surface_create_rep
*rep
= &s_arg
.rep
;
209 struct vmw_region
*region
= NULL
;
212 vmw_printf("%s flags %d format %d\n", __FUNCTION__
, flags
, format
);
215 region
= CALLOC_STRUCT(vmw_region
);
217 return SVGA3D_INVALID_ID
;
220 memset(&s_arg
, 0, sizeof(s_arg
));
221 req
->svga3d_flags
= (uint32_t) flags
;
222 if (usage
& SVGA_SURFACE_USAGE_SCANOUT
)
223 req
->drm_surface_flags
|= drm_vmw_surface_flag_scanout
;
224 req
->format
= (uint32_t) format
;
225 if (usage
& SVGA_SURFACE_USAGE_SHARED
)
226 req
->drm_surface_flags
|= drm_vmw_surface_flag_shareable
;
227 req
->drm_surface_flags
|= drm_vmw_surface_flag_create_buffer
;
228 req
->base_size
.width
= size
.width
;
229 req
->base_size
.height
= size
.height
;
230 req
->base_size
.depth
= size
.depth
;
231 req
->mip_levels
= numMipLevels
;
232 req
->multisample_count
= 0;
233 req
->autogen_filter
= SVGA3D_TEX_FILTER_NONE
;
235 if (vws
->base
.have_vgpu10
) {
236 req
->array_size
= numFaces
;
237 req
->multisample_count
= sampleCount
;
239 assert(numFaces
* numMipLevels
< DRM_VMW_MAX_SURFACE_FACES
*
240 DRM_VMW_MAX_MIP_LEVELS
);
245 req
->buffer_handle
= buffer_handle
;
247 req
->buffer_handle
= SVGA3D_INVALID_ID
;
249 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_GB_SURFACE_CREATE
,
250 &s_arg
, sizeof(s_arg
));
253 goto out_fail_create
;
256 region
->handle
= rep
->buffer_handle
;
257 region
->map_handle
= rep
->buffer_map_handle
;
258 region
->drm_fd
= vws
->ioctl
.drm_fd
;
259 region
->size
= rep
->backup_size
;
263 vmw_printf("Surface id is %d\n", rep
->sid
);
268 return SVGA3D_INVALID_ID
;
272 * vmw_ioctl_surface_req - Fill in a struct surface_req
274 * @vws: Winsys screen
275 * @whandle: Surface handle
276 * @req: The struct surface req to fill in
277 * @needs_unref: This call takes a kernel surface reference that needs to
280 * Returns 0 on success, negative error type otherwise.
281 * Fills in the surface_req structure according to handle type and kernel
285 vmw_ioctl_surface_req(const struct vmw_winsys_screen
*vws
,
286 const struct winsys_handle
*whandle
,
287 struct drm_vmw_surface_arg
*req
,
288 boolean
*needs_unref
)
292 switch(whandle
->type
) {
293 case WINSYS_HANDLE_TYPE_SHARED
:
294 case WINSYS_HANDLE_TYPE_KMS
:
295 *needs_unref
= FALSE
;
296 req
->handle_type
= DRM_VMW_HANDLE_LEGACY
;
297 req
->sid
= whandle
->handle
;
299 case WINSYS_HANDLE_TYPE_FD
:
300 if (!vws
->ioctl
.have_drm_2_6
) {
303 ret
= drmPrimeFDToHandle(vws
->ioctl
.drm_fd
, whandle
->handle
, &handle
);
305 vmw_error("Failed to get handle from prime fd %d.\n",
306 (int) whandle
->handle
);
311 req
->handle_type
= DRM_VMW_HANDLE_LEGACY
;
314 *needs_unref
= FALSE
;
315 req
->handle_type
= DRM_VMW_HANDLE_PRIME
;
316 req
->sid
= whandle
->handle
;
320 vmw_error("Attempt to import unsupported handle type %d.\n",
329 * vmw_ioctl_gb_surface_ref - Put a reference on a guest-backed surface and
330 * get surface information
332 * @vws: Screen to register the reference on
333 * @handle: Kernel handle of the guest-backed surface
334 * @flags: flags used when the surface was created
335 * @format: Format used when the surface was created
336 * @numMipLevels: Number of mipmap levels of the surface
337 * @p_region: On successful return points to a newly allocated
338 * struct vmw_region holding a reference to the surface backup buffer.
340 * Returns 0 on success, a system error on failure.
343 vmw_ioctl_gb_surface_ref(struct vmw_winsys_screen
*vws
,
344 const struct winsys_handle
*whandle
,
345 SVGA3dSurface1Flags
*flags
,
346 SVGA3dSurfaceFormat
*format
,
347 uint32_t *numMipLevels
,
349 struct vmw_region
**p_region
)
351 union drm_vmw_gb_surface_reference_arg s_arg
;
352 struct drm_vmw_surface_arg
*req
= &s_arg
.req
;
353 struct drm_vmw_gb_surface_ref_rep
*rep
= &s_arg
.rep
;
354 struct vmw_region
*region
= NULL
;
355 boolean needs_unref
= FALSE
;
358 vmw_printf("%s flags %d format %d\n", __FUNCTION__
, flags
, format
);
360 assert(p_region
!= NULL
);
361 region
= CALLOC_STRUCT(vmw_region
);
365 memset(&s_arg
, 0, sizeof(s_arg
));
366 ret
= vmw_ioctl_surface_req(vws
, whandle
, req
, &needs_unref
);
371 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_GB_SURFACE_REF
,
372 &s_arg
, sizeof(s_arg
));
377 region
->handle
= rep
->crep
.buffer_handle
;
378 region
->map_handle
= rep
->crep
.buffer_map_handle
;
379 region
->drm_fd
= vws
->ioctl
.drm_fd
;
380 region
->size
= rep
->crep
.backup_size
;
383 *handle
= rep
->crep
.handle
;
384 *flags
= rep
->creq
.svga3d_flags
;
385 *format
= rep
->creq
.format
;
386 *numMipLevels
= rep
->creq
.mip_levels
;
389 vmw_ioctl_surface_destroy(vws
, *handle
);
394 vmw_ioctl_surface_destroy(vws
, *handle
);
401 vmw_ioctl_surface_destroy(struct vmw_winsys_screen
*vws
, uint32 sid
)
403 struct drm_vmw_surface_arg s_arg
;
407 memset(&s_arg
, 0, sizeof(s_arg
));
410 (void)drmCommandWrite(vws
->ioctl
.drm_fd
, DRM_VMW_UNREF_SURFACE
,
411 &s_arg
, sizeof(s_arg
));
415 vmw_ioctl_command(struct vmw_winsys_screen
*vws
, int32_t cid
,
416 uint32_t throttle_us
, void *commands
, uint32_t size
,
417 struct pipe_fence_handle
**pfence
, int32_t imported_fence_fd
,
420 struct drm_vmw_execbuf_arg arg
;
421 struct drm_vmw_fence_rep rep
;
427 static boolean firsttime
= TRUE
;
428 static boolean debug
= FALSE
;
429 static boolean skip
= FALSE
;
431 debug
= debug_get_bool_option("SVGA_DUMP_CMD", FALSE
);
432 skip
= debug_get_bool_option("SVGA_SKIP_CMD", FALSE
);
436 svga_dump_commands(commands
, size
);
445 memset(&arg
, 0, sizeof(arg
));
446 memset(&rep
, 0, sizeof(rep
));
448 if (flags
& SVGA_HINT_FLAG_EXPORT_FENCE_FD
) {
449 arg
.flags
|= DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD
;
452 if (imported_fence_fd
!= -1) {
453 arg
.flags
|= DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD
;
458 arg
.fence_rep
= (unsigned long)&rep
;
459 arg
.commands
= (unsigned long)commands
;
460 arg
.command_size
= size
;
461 arg
.throttle_us
= throttle_us
;
462 arg
.version
= vws
->ioctl
.drm_execbuf_version
;
463 arg
.context_handle
= (vws
->base
.have_vgpu10
? cid
: SVGA3D_INVALID_ID
);
465 /* Older DRM module requires this to be zero */
466 if (vws
->base
.have_fence_fd
)
467 arg
.imported_fence_fd
= imported_fence_fd
;
469 /* In DRM_VMW_EXECBUF_VERSION 1, the drm_vmw_execbuf_arg structure ends with
470 * the flags field. The structure size sent to drmCommandWrite must match
471 * the drm_execbuf_version. Otherwise, an invalid value will be returned.
473 argsize
= vws
->ioctl
.drm_execbuf_version
> 1 ? sizeof(arg
) :
474 offsetof(struct drm_vmw_execbuf_arg
, context_handle
);
476 ret
= drmCommandWrite(vws
->ioctl
.drm_fd
, DRM_VMW_EXECBUF
, &arg
, argsize
);
477 } while(ret
== -ERESTART
);
479 vmw_error("%s error %s.\n", __FUNCTION__
, strerror(-ret
));
486 * Kernel has already synced, or caller requested no fence.
492 vmw_fences_signal(vws
->fence_ops
, rep
.passed_seqno
, rep
.seqno
,
495 /* Older DRM module will set this to zero, but -1 is the proper FD
496 * to use for no Fence FD support */
497 if (!vws
->base
.have_fence_fd
)
500 *pfence
= vmw_fence_create(vws
->fence_ops
, rep
.handle
,
501 rep
.seqno
, rep
.mask
, rep
.fd
);
502 if (*pfence
== NULL
) {
504 * Fence creation failed. Need to sync.
506 (void) vmw_ioctl_fence_finish(vws
, rep
.handle
, rep
.mask
);
507 vmw_ioctl_fence_unref(vws
, rep
.handle
);
515 vmw_ioctl_region_create(struct vmw_winsys_screen
*vws
, uint32_t size
)
517 struct vmw_region
*region
;
518 union drm_vmw_alloc_dmabuf_arg arg
;
519 struct drm_vmw_alloc_dmabuf_req
*req
= &arg
.req
;
520 struct drm_vmw_dmabuf_rep
*rep
= &arg
.rep
;
523 vmw_printf("%s: size = %u\n", __FUNCTION__
, size
);
525 region
= CALLOC_STRUCT(vmw_region
);
529 memset(&arg
, 0, sizeof(arg
));
532 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_ALLOC_DMABUF
, &arg
,
534 } while (ret
== -ERESTART
);
537 vmw_error("IOCTL failed %d: %s\n", ret
, strerror(-ret
));
542 region
->handle
= rep
->handle
;
543 region
->map_handle
= rep
->map_handle
;
544 region
->map_count
= 0;
546 region
->drm_fd
= vws
->ioctl
.drm_fd
;
548 vmw_printf(" gmrId = %u, offset = %u\n",
549 region
->ptr
.gmrId
, region
->ptr
.offset
);
559 vmw_ioctl_region_destroy(struct vmw_region
*region
)
561 struct drm_vmw_unref_dmabuf_arg arg
;
563 vmw_printf("%s: gmrId = %u, offset = %u\n", __FUNCTION__
,
564 region
->ptr
.gmrId
, region
->ptr
.offset
);
567 os_munmap(region
->data
, region
->size
);
571 memset(&arg
, 0, sizeof(arg
));
572 arg
.handle
= region
->handle
;
573 drmCommandWrite(region
->drm_fd
, DRM_VMW_UNREF_DMABUF
, &arg
, sizeof(arg
));
579 vmw_ioctl_region_ptr(struct vmw_region
*region
)
581 SVGAGuestPtr ptr
= {region
->handle
, 0};
586 vmw_ioctl_region_map(struct vmw_region
*region
)
590 vmw_printf("%s: gmrId = %u, offset = %u\n", __FUNCTION__
,
591 region
->ptr
.gmrId
, region
->ptr
.offset
);
593 if (region
->data
== NULL
) {
594 map
= os_mmap(NULL
, region
->size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
595 region
->drm_fd
, region
->map_handle
);
596 if (map
== MAP_FAILED
) {
597 vmw_error("%s: Map failed.\n", __FUNCTION__
);
610 vmw_ioctl_region_unmap(struct vmw_region
*region
)
612 vmw_printf("%s: gmrId = %u, offset = %u\n", __FUNCTION__
,
613 region
->ptr
.gmrId
, region
->ptr
.offset
);
618 * vmw_ioctl_syncforcpu - Synchronize a buffer object for CPU usage
620 * @region: Pointer to a struct vmw_region representing the buffer object.
621 * @dont_block: Dont wait for GPU idle, but rather return -EBUSY if the
622 * GPU is busy with the buffer object.
623 * @readonly: Hint that the CPU access is read-only.
624 * @allow_cs: Allow concurrent command submission while the buffer is
625 * synchronized for CPU. If FALSE command submissions referencing the
626 * buffer will block until a corresponding call to vmw_ioctl_releasefromcpu.
628 * This function idles any GPU activities touching the buffer and blocks
629 * command submission of commands referencing the buffer, even from
633 vmw_ioctl_syncforcpu(struct vmw_region
*region
,
638 struct drm_vmw_synccpu_arg arg
;
640 memset(&arg
, 0, sizeof(arg
));
641 arg
.op
= drm_vmw_synccpu_grab
;
642 arg
.handle
= region
->handle
;
643 arg
.flags
= drm_vmw_synccpu_read
;
645 arg
.flags
|= drm_vmw_synccpu_write
;
647 arg
.flags
|= drm_vmw_synccpu_dontblock
;
649 arg
.flags
|= drm_vmw_synccpu_allow_cs
;
651 return drmCommandWrite(region
->drm_fd
, DRM_VMW_SYNCCPU
, &arg
, sizeof(arg
));
655 * vmw_ioctl_releasefromcpu - Undo a previous syncforcpu.
657 * @region: Pointer to a struct vmw_region representing the buffer object.
658 * @readonly: Should hold the same value as the matching syncforcpu call.
659 * @allow_cs: Should hold the same value as the matching syncforcpu call.
662 vmw_ioctl_releasefromcpu(struct vmw_region
*region
,
666 struct drm_vmw_synccpu_arg arg
;
668 memset(&arg
, 0, sizeof(arg
));
669 arg
.op
= drm_vmw_synccpu_release
;
670 arg
.handle
= region
->handle
;
671 arg
.flags
= drm_vmw_synccpu_read
;
673 arg
.flags
|= drm_vmw_synccpu_write
;
675 arg
.flags
|= drm_vmw_synccpu_allow_cs
;
677 (void) drmCommandWrite(region
->drm_fd
, DRM_VMW_SYNCCPU
, &arg
, sizeof(arg
));
681 vmw_ioctl_fence_unref(struct vmw_winsys_screen
*vws
,
684 struct drm_vmw_fence_arg arg
;
687 memset(&arg
, 0, sizeof(arg
));
690 ret
= drmCommandWrite(vws
->ioctl
.drm_fd
, DRM_VMW_FENCE_UNREF
,
693 vmw_error("%s Failed\n", __FUNCTION__
);
696 static inline uint32_t
697 vmw_drm_fence_flags(uint32_t flags
)
701 if (flags
& SVGA_FENCE_FLAG_EXEC
)
702 dflags
|= DRM_VMW_FENCE_FLAG_EXEC
;
703 if (flags
& SVGA_FENCE_FLAG_QUERY
)
704 dflags
|= DRM_VMW_FENCE_FLAG_QUERY
;
711 vmw_ioctl_fence_signalled(struct vmw_winsys_screen
*vws
,
715 struct drm_vmw_fence_signaled_arg arg
;
716 uint32_t vflags
= vmw_drm_fence_flags(flags
);
719 memset(&arg
, 0, sizeof(arg
));
723 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_FENCE_SIGNALED
,
729 vmw_fences_signal(vws
->fence_ops
, arg
.passed_seqno
, 0, FALSE
);
731 return (arg
.signaled
) ? 0 : -1;
737 vmw_ioctl_fence_finish(struct vmw_winsys_screen
*vws
,
741 struct drm_vmw_fence_wait_arg arg
;
742 uint32_t vflags
= vmw_drm_fence_flags(flags
);
745 memset(&arg
, 0, sizeof(arg
));
748 arg
.timeout_us
= VMW_FENCE_TIMEOUT_SECONDS
*1000000;
752 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_FENCE_WAIT
,
756 vmw_error("%s Failed\n", __FUNCTION__
);
762 vmw_ioctl_shader_create(struct vmw_winsys_screen
*vws
,
763 SVGA3dShaderType type
,
766 struct drm_vmw_shader_create_arg sh_arg
;
771 memset(&sh_arg
, 0, sizeof(sh_arg
));
773 sh_arg
.size
= code_len
;
774 sh_arg
.buffer_handle
= SVGA3D_INVALID_ID
;
775 sh_arg
.shader_handle
= SVGA3D_INVALID_ID
;
777 case SVGA3D_SHADERTYPE_VS
:
778 sh_arg
.shader_type
= drm_vmw_shader_type_vs
;
780 case SVGA3D_SHADERTYPE_PS
:
781 sh_arg
.shader_type
= drm_vmw_shader_type_ps
;
784 assert(!"Invalid shader type.");
788 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_CREATE_SHADER
,
789 &sh_arg
, sizeof(sh_arg
));
792 return SVGA3D_INVALID_ID
;
794 return sh_arg
.shader_handle
;
798 vmw_ioctl_shader_destroy(struct vmw_winsys_screen
*vws
, uint32 shid
)
800 struct drm_vmw_shader_arg sh_arg
;
804 memset(&sh_arg
, 0, sizeof(sh_arg
));
805 sh_arg
.handle
= shid
;
807 (void)drmCommandWrite(vws
->ioctl
.drm_fd
, DRM_VMW_UNREF_SHADER
,
808 &sh_arg
, sizeof(sh_arg
));
813 vmw_ioctl_parse_caps(struct vmw_winsys_screen
*vws
,
814 const uint32_t *cap_buffer
)
818 if (vws
->base
.have_gb_objects
) {
819 for (i
= 0; i
< vws
->ioctl
.num_cap_3d
; ++i
) {
820 vws
->ioctl
.cap_3d
[i
].has_cap
= TRUE
;
821 vws
->ioctl
.cap_3d
[i
].result
.u
= cap_buffer
[i
];
825 const uint32
*capsBlock
;
826 const SVGA3dCapsRecord
*capsRecord
= NULL
;
828 const SVGA3dCapPair
*capArray
;
832 * Search linearly through the caps block records for the specified type.
834 capsBlock
= cap_buffer
;
835 for (offset
= 0; capsBlock
[offset
] != 0; offset
+= capsBlock
[offset
]) {
836 const SVGA3dCapsRecord
*record
;
837 assert(offset
< SVGA_FIFO_3D_CAPS_SIZE
);
838 record
= (const SVGA3dCapsRecord
*) (capsBlock
+ offset
);
839 if ((record
->header
.type
>= SVGA3DCAPS_RECORD_DEVCAPS_MIN
) &&
840 (record
->header
.type
<= SVGA3DCAPS_RECORD_DEVCAPS_MAX
) &&
841 (!capsRecord
|| (record
->header
.type
> capsRecord
->header
.type
))) {
850 * Calculate the number of caps from the size of the record.
852 capArray
= (const SVGA3dCapPair
*) capsRecord
->data
;
853 numCaps
= (int) ((capsRecord
->header
.length
* sizeof(uint32
) -
854 sizeof capsRecord
->header
) / (2 * sizeof(uint32
)));
856 for (i
= 0; i
< numCaps
; i
++) {
857 index
= capArray
[i
][0];
858 if (index
< vws
->ioctl
.num_cap_3d
) {
859 vws
->ioctl
.cap_3d
[index
].has_cap
= TRUE
;
860 vws
->ioctl
.cap_3d
[index
].result
.u
= capArray
[i
][1];
862 debug_printf("Unknown devcaps seen: %d\n", index
);
870 vmw_ioctl_init(struct vmw_winsys_screen
*vws
)
872 struct drm_vmw_getparam_arg gp_arg
;
873 struct drm_vmw_get_3d_cap_arg cap_arg
;
876 uint32_t *cap_buffer
;
877 drmVersionPtr version
;
878 boolean drm_gb_capable
;
879 boolean have_drm_2_5
;
883 version
= drmGetVersion(vws
->ioctl
.drm_fd
);
887 have_drm_2_5
= version
->version_major
> 2 ||
888 (version
->version_major
== 2 && version
->version_minor
> 4);
889 vws
->ioctl
.have_drm_2_6
= version
->version_major
> 2 ||
890 (version
->version_major
== 2 && version
->version_minor
> 5);
891 vws
->ioctl
.have_drm_2_9
= version
->version_major
> 2 ||
892 (version
->version_major
== 2 && version
->version_minor
> 8);
894 vws
->ioctl
.drm_execbuf_version
= vws
->ioctl
.have_drm_2_9
? 2 : 1;
896 drm_gb_capable
= have_drm_2_5
;
898 memset(&gp_arg
, 0, sizeof(gp_arg
));
899 gp_arg
.param
= DRM_VMW_PARAM_3D
;
900 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_GET_PARAM
,
901 &gp_arg
, sizeof(gp_arg
));
902 if (ret
|| gp_arg
.value
== 0) {
903 vmw_error("No 3D enabled (%i, %s).\n", ret
, strerror(-ret
));
907 memset(&gp_arg
, 0, sizeof(gp_arg
));
908 gp_arg
.param
= DRM_VMW_PARAM_FIFO_HW_VERSION
;
909 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_GET_PARAM
,
910 &gp_arg
, sizeof(gp_arg
));
912 vmw_error("Failed to get fifo hw version (%i, %s).\n",
913 ret
, strerror(-ret
));
916 vws
->ioctl
.hwversion
= gp_arg
.value
;
918 memset(&gp_arg
, 0, sizeof(gp_arg
));
919 gp_arg
.param
= DRM_VMW_PARAM_HW_CAPS
;
920 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_GET_PARAM
,
921 &gp_arg
, sizeof(gp_arg
));
923 vws
->base
.have_gb_objects
= FALSE
;
925 vws
->base
.have_gb_objects
=
926 !!(gp_arg
.value
& (uint64_t) SVGA_CAP_GBOBJECTS
);
928 if (vws
->base
.have_gb_objects
&& !drm_gb_capable
)
932 * CAP2 functionality is not yet in vmwgfx. Till then, avoiding
935 vws
->base
.have_intra_surface_copy
= FALSE
;
937 vws
->base
.have_vgpu10
= FALSE
;
938 if (vws
->base
.have_gb_objects
) {
939 memset(&gp_arg
, 0, sizeof(gp_arg
));
940 gp_arg
.param
= DRM_VMW_PARAM_3D_CAPS_SIZE
;
941 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_GET_PARAM
,
942 &gp_arg
, sizeof(gp_arg
));
944 size
= SVGA_FIFO_3D_CAPS_SIZE
* sizeof(uint32_t);
948 if (vws
->base
.have_gb_objects
)
949 vws
->ioctl
.num_cap_3d
= size
/ sizeof(uint32_t);
951 vws
->ioctl
.num_cap_3d
= SVGA3D_DEVCAP_MAX
;
954 memset(&gp_arg
, 0, sizeof(gp_arg
));
955 gp_arg
.param
= DRM_VMW_PARAM_MAX_MOB_MEMORY
;
956 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_GET_PARAM
,
957 &gp_arg
, sizeof(gp_arg
));
959 /* Just guess a large enough value. */
960 vws
->ioctl
.max_mob_memory
= 256*1024*1024;
962 vws
->ioctl
.max_mob_memory
= gp_arg
.value
;
965 memset(&gp_arg
, 0, sizeof(gp_arg
));
966 gp_arg
.param
= DRM_VMW_PARAM_MAX_MOB_SIZE
;
967 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_GET_PARAM
,
968 &gp_arg
, sizeof(gp_arg
));
970 if (ret
|| gp_arg
.value
== 0) {
971 vws
->ioctl
.max_texture_size
= VMW_MAX_DEFAULT_TEXTURE_SIZE
;
973 vws
->ioctl
.max_texture_size
= gp_arg
.value
;
976 /* Never early flush surfaces, mobs do accounting. */
977 vws
->ioctl
.max_surface_memory
= -1;
979 if (vws
->ioctl
.have_drm_2_9
) {
981 memset(&gp_arg
, 0, sizeof(gp_arg
));
982 gp_arg
.param
= DRM_VMW_PARAM_VGPU10
;
983 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_GET_PARAM
,
984 &gp_arg
, sizeof(gp_arg
));
985 if (ret
== 0 && gp_arg
.value
!= 0) {
986 const char *vgpu10_val
;
988 debug_printf("Have VGPU10 interface and hardware.\n");
989 vws
->base
.have_vgpu10
= TRUE
;
990 vgpu10_val
= getenv("SVGA_VGPU10");
991 if (vgpu10_val
&& strcmp(vgpu10_val
, "0") == 0) {
992 debug_printf("Disabling VGPU10 interface.\n");
993 vws
->base
.have_vgpu10
= FALSE
;
995 debug_printf("Enabling VGPU10 interface.\n");
1000 vws
->ioctl
.num_cap_3d
= SVGA3D_DEVCAP_MAX
;
1002 memset(&gp_arg
, 0, sizeof(gp_arg
));
1003 gp_arg
.param
= DRM_VMW_PARAM_MAX_SURF_MEMORY
;
1005 ret
= drmCommandWriteRead(vws
->ioctl
.drm_fd
, DRM_VMW_GET_PARAM
,
1006 &gp_arg
, sizeof(gp_arg
));
1007 if (!have_drm_2_5
|| ret
) {
1008 /* Just guess a large enough value, around 800mb. */
1009 vws
->ioctl
.max_surface_memory
= 0x30000000;
1011 vws
->ioctl
.max_surface_memory
= gp_arg
.value
;
1014 vws
->ioctl
.max_texture_size
= VMW_MAX_DEFAULT_TEXTURE_SIZE
;
1016 size
= SVGA_FIFO_3D_CAPS_SIZE
* sizeof(uint32_t);
1019 debug_printf("VGPU10 interface is %s.\n",
1020 vws
->base
.have_vgpu10
? "on" : "off");
1022 cap_buffer
= calloc(1, size
);
1024 debug_printf("Failed alloc fifo 3D caps buffer.\n");
1028 vws
->ioctl
.cap_3d
= calloc(vws
->ioctl
.num_cap_3d
,
1029 sizeof(*vws
->ioctl
.cap_3d
));
1030 if (!vws
->ioctl
.cap_3d
) {
1031 debug_printf("Failed alloc fifo 3D caps buffer.\n");
1032 goto out_no_caparray
;
1035 memset(&cap_arg
, 0, sizeof(cap_arg
));
1036 cap_arg
.buffer
= (uint64_t) (unsigned long) (cap_buffer
);
1037 cap_arg
.max_size
= size
;
1039 ret
= drmCommandWrite(vws
->ioctl
.drm_fd
, DRM_VMW_GET_3D_CAP
,
1040 &cap_arg
, sizeof(cap_arg
));
1043 debug_printf("Failed to get 3D capabilities"
1044 " (%i, %s).\n", ret
, strerror(-ret
));
1048 ret
= vmw_ioctl_parse_caps(vws
, cap_buffer
);
1050 debug_printf("Failed to parse 3D capabilities"
1051 " (%i, %s).\n", ret
, strerror(-ret
));
1055 if (((version
->version_major
== 2 && version
->version_minor
>= 10)
1056 || version
->version_major
> 2) && vws
->base
.have_vgpu10
) {
1058 /* support for these commands didn't make it into vmwgfx kernel
1059 * modules before 2.10.
1061 vws
->base
.have_generate_mipmap_cmd
= TRUE
;
1062 vws
->base
.have_set_predication_cmd
= TRUE
;
1065 if (version
->version_major
== 2 && version
->version_minor
>= 14) {
1066 vws
->base
.have_fence_fd
= TRUE
;
1070 drmFreeVersion(version
);
1071 vmw_printf("%s OK\n", __FUNCTION__
);
1074 free(vws
->ioctl
.cap_3d
);
1078 drmFreeVersion(version
);
1080 vws
->ioctl
.num_cap_3d
= 0;
1081 debug_printf("%s Failed\n", __FUNCTION__
);
1088 vmw_ioctl_cleanup(struct vmw_winsys_screen
*vws
)