1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #ifndef _VMWGFX_DRM_H_
29 #define _VMWGFX_DRM_H_
31 #define DRM_VMW_MAX_SURFACE_FACES 6
32 #define DRM_VMW_MAX_MIP_LEVELS 24
34 #define DRM_VMW_EXT_NAME_LEN 128
36 #define DRM_VMW_GET_PARAM 1
37 #define DRM_VMW_EXTENSION 2
38 #define DRM_VMW_CREATE_CONTEXT 3
39 #define DRM_VMW_UNREF_CONTEXT 4
40 #define DRM_VMW_CREATE_SURFACE 5
41 #define DRM_VMW_UNREF_SURFACE 6
42 #define DRM_VMW_REF_SURFACE 7
43 #define DRM_VMW_EXECBUF 8
44 #define DRM_VMW_ALLOC_DMABUF 9
45 #define DRM_VMW_UNREF_DMABUF 10
46 #define DRM_VMW_FIFO_DEBUG 11
47 #define DRM_VMW_FENCE_WAIT 12
50 /*************************************************************************/
52 * DRM_VMW_GET_PARAM - get device information.
54 * Currently we support only one parameter:
56 * DRM_VMW_PARAM_FIFO_OFFSET:
57 * Offset to use to map the first page of the FIFO read-only.
58 * The fifo is mapped using the mmap() system call on the drm device.
61 #define DRM_VMW_PARAM_FIFO_OFFSET 0
64 * struct drm_vmw_getparam_arg
66 * @value: Returned value. //Out
67 * @param: Parameter to query. //In.
69 * Argument to the DRM_VMW_GET_PARAM Ioctl.
72 struct drm_vmw_getparam_arg
{
78 /*************************************************************************/
80 * DRM_VMW_EXTENSION - Query device extensions.
84 * struct drm_vmw_extension_rep
86 * @exists: The queried extension exists.
87 * @driver_ioctl_offset: Ioctl number of the first ioctl in the extension.
88 * @driver_sarea_offset: Offset to any space in the DRI SAREA
89 * used by the extension.
90 * @major: Major version number of the extension.
91 * @minor: Minor version number of the extension.
92 * @pl: Patch level version number of the extension.
94 * Output argument to the DRM_VMW_EXTENSION Ioctl.
97 struct drm_vmw_extension_rep
{
99 uint32_t driver_ioctl_offset
;
100 uint32_t driver_sarea_offset
;
108 * union drm_vmw_extension_arg
110 * @extension - Ascii name of the extension to be queried. //In
111 * @rep - Reply as defined above. //Out
113 * Argument to the DRM_VMW_EXTENSION Ioctl.
116 union drm_vmw_extension_arg
{
117 char extension
[DRM_VMW_EXT_NAME_LEN
];
118 struct drm_vmw_extension_rep rep
;
121 /*************************************************************************/
123 * DRM_VMW_CREATE_CONTEXT - Create a host context.
125 * Allocates a device unique context id, and queues a create context command
126 * for the host. Does not wait for host completion.
130 * struct drm_vmw_context_arg
132 * @cid: Device unique context ID.
134 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
135 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
138 struct drm_vmw_context_arg
{
143 /*************************************************************************/
145 * DRM_VMW_UNREF_CONTEXT - Create a host context.
147 * Frees a global context id, and queues a destroy host command for the host.
148 * Does not wait for host completion. The context ID can be used directly
149 * in the command stream and shows up as the same context ID on the host.
152 /*************************************************************************/
154 * DRM_VMW_CREATE_SURFACE - Create a host suface.
156 * Allocates a device unique surface id, and queues a create surface command
157 * for the host. Does not wait for host completion. The surface ID can be
158 * used directly in the command stream and shows up as the same surface
163 * struct drm_wmv_surface_create_req
165 * @flags: Surface flags as understood by the host.
166 * @format: Surface format as understood by the host.
167 * @mip_levels: Number of mip levels for each face.
168 * An unused face should have 0 encoded.
169 * @size_addr: Address of a user-space array of sruct drm_vmw_size
170 * cast to an uint64_t for 32-64 bit compatibility.
171 * The size of the array should equal the total number of mipmap levels.
172 * @shareable: Boolean whether other clients (as identified by file descriptors)
173 * may reference this surface.
175 * Input data to the DRM_VMW_CREATE_SURFACE Ioctl.
176 * Output data from the DRM_VMW_REF_SURFACE Ioctl.
179 struct drm_vmw_surface_create_req
{
182 uint32_t mip_levels
[DRM_VMW_MAX_SURFACE_FACES
];
189 * struct drm_wmv_surface_arg
191 * @sid: Surface id of created surface or surface to destroy or reference.
193 * Output data from the DRM_VMW_CREATE_SURFACE Ioctl.
194 * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.
195 * Input argument to the DRM_VMW_REF_SURFACE Ioctl.
198 struct drm_vmw_surface_arg
{
204 * struct drm_vmw_size ioctl.
206 * @width - mip level width
207 * @height - mip level height
208 * @depth - mip level depth
210 * Description of a mip level.
211 * Input data to the DRM_WMW_CREATE_SURFACE Ioctl.
214 struct drm_vmw_size
{
222 * union drm_vmw_surface_create_arg
224 * @rep: Output data as described above.
225 * @req: Input data as described above.
227 * Argument to the DRM_VMW_CREATE_SURFACE Ioctl.
230 union drm_vmw_surface_create_arg
{
231 struct drm_vmw_surface_arg rep
;
232 struct drm_vmw_surface_create_req req
;
235 /*************************************************************************/
237 * DRM_VMW_REF_SURFACE - Reference a host surface.
239 * Puts a reference on a host surface with a give sid, as previously
240 * returned by the DRM_VMW_CREATE_SURFACE ioctl.
241 * A reference will make sure the surface isn't destroyed while we hold
242 * it and will allow the calling client to use the surface ID in the command
245 * On successful return, the Ioctl returns the surface information given
246 * in the DRM_VMW_CREATE_SURFACE ioctl.
250 * union drm_vmw_surface_reference_arg
252 * @rep: Output data as described above.
253 * @req: Input data as described above.
255 * Argument to the DRM_VMW_REF_SURFACE Ioctl.
258 union drm_vmw_surface_reference_arg
{
259 struct drm_vmw_surface_create_req rep
;
260 struct drm_vmw_surface_arg req
;
263 /*************************************************************************/
265 * DRM_VMW_UNREF_SURFACE - Unreference a host surface.
267 * Clear a reference previously put on a host surface.
268 * When all references are gone, including the one implicitly placed
270 * a destroy surface command will be queued for the host.
271 * Does not wait for completion.
274 /*************************************************************************/
278 * Submit a command buffer for execution on the host, and return a
279 * fence sequence that when signaled, indicates that the command buffer has
284 * struct drm_vmw_execbuf_arg
286 * @commands: User-space address of a command buffer cast to an uint64_t.
287 * @command-size: Size in bytes of the command buffer.
288 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
291 * Argument to the DRM_VMW_EXECBUF Ioctl.
294 struct drm_vmw_execbuf_arg
{
296 uint32_t command_size
;
302 * struct drm_vmw_fence_rep
304 * @fence_seq: Fence sequence associated with a command submission.
305 * @error: This member should've been set to -EFAULT on submission.
306 * The following actions should be take on completion:
307 * error == -EFAULT: Fence communication failed. The host is synchronized.
308 * Use the last fence id read from the FIFO fence register.
309 * error != 0 && error != -EFAULT:
310 * Fence submission failed. The host is synchronized. Use the fence_seq member.
311 * error == 0: All is OK, The host may not be synchronized.
312 * Use the fence_seq member.
314 * Input / Output data to the DRM_VMW_EXECBUF Ioctl.
317 struct drm_vmw_fence_rep
{
323 /*************************************************************************/
325 * DRM_VMW_ALLOC_DMABUF
327 * Allocate a DMA buffer that is visible also to the host.
328 * NOTE: The buffer is
329 * identified by a handle and an offset, which are private to the guest, but
330 * useable in the command stream. The guest kernel may translate these
331 * and patch up the command stream accordingly. In the future, the offset may
332 * be zero at all times, or it may disappear from the interface before it is
335 * The DMA buffer may stay user-space mapped in the guest at all times,
336 * and is thus suitable for sub-allocation.
338 * DMA buffers are mapped using the mmap() syscall on the drm device.
342 * struct drm_vmw_alloc_dmabuf_req
344 * @size: Required minimum size of the buffer.
346 * Input data to the DRM_VMW_ALLOC_DMABUF Ioctl.
349 struct drm_vmw_alloc_dmabuf_req
{
355 * struct drm_vmw_dmabuf_rep
357 * @map_handle: Offset to use in the mmap() call used to map the buffer.
358 * @handle: Handle unique to this buffer. Used for unreferencing.
359 * @cur_gmr_id: GMR id to use in the command stream when this buffer is
360 * referenced. See not above.
361 * @cur_gmr_offset: Offset to use in the command stream when this buffer is
362 * referenced. See note above.
364 * Output data from the DRM_VMW_ALLOC_DMABUF Ioctl.
367 struct drm_vmw_dmabuf_rep
{
371 uint32_t cur_gmr_offset
;
376 * union drm_vmw_dmabuf_arg
378 * @req: Input data as described above.
379 * @rep: Output data as described above.
381 * Argument to the DRM_VMW_ALLOC_DMABUF Ioctl.
384 union drm_vmw_alloc_dmabuf_arg
{
385 struct drm_vmw_alloc_dmabuf_req req
;
386 struct drm_vmw_dmabuf_rep rep
;
389 /*************************************************************************/
391 * DRM_VMW_UNREF_DMABUF - Free a DMA buffer.
396 * struct drm_vmw_unref_dmabuf_arg
398 * @handle: Handle indicating what buffer to free. Obtained from the
399 * DRM_VMW_ALLOC_DMABUF Ioctl.
401 * Argument to the DRM_VMW_UNREF_DMABUF Ioctl.
404 struct drm_vmw_unref_dmabuf_arg
{
409 /*************************************************************************/
411 * DRM_VMW_FIFO_DEBUG - Get last FIFO submission.
413 * This IOCTL copies the last FIFO submission directly out of the FIFO buffer.
417 * struct drm_vmw_fifo_debug_arg
419 * @debug_buffer: User space address of a debug_buffer cast to an uint64_t //In
420 * @debug_buffer_size: Size in bytes of debug buffer //In
421 * @used_size: Number of bytes copied to the buffer // Out
422 * @did_not_fit: Boolean indicating that the fifo contents did not fit. //Out
424 * Argument to the DRM_VMW_FIFO_DEBUG Ioctl.
427 struct drm_vmw_fifo_debug_arg
{
428 uint64_t debug_buffer
;
429 uint32_t debug_buffer_size
;
435 struct drm_vmw_fence_wait_arg
{
437 uint64_t kernel_cookie
;
438 int32_t cookie_valid
;