svga: Remove the surface_invalidate winsys function
[mesa.git] / src / gallium / winsys / svga / drm / vmwgfx_drm.h
1 /**************************************************************************
2 *
3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #ifndef __VMWGFX_DRM_H__
29 #define __VMWGFX_DRM_H__
30
31 #include "drm.h"
32
33 #if defined(__cplusplus)
34 extern "C" {
35 #endif
36
37 #define DRM_VMW_MAX_SURFACE_FACES 6
38 #define DRM_VMW_MAX_MIP_LEVELS 24
39
40
41 #define DRM_VMW_GET_PARAM 0
42 #define DRM_VMW_ALLOC_DMABUF 1
43 #define DRM_VMW_ALLOC_BO 1
44 #define DRM_VMW_UNREF_DMABUF 2
45 #define DRM_VMW_HANDLE_CLOSE 2
46 #define DRM_VMW_CURSOR_BYPASS 3
47 /* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/
48 #define DRM_VMW_CONTROL_STREAM 4
49 #define DRM_VMW_CLAIM_STREAM 5
50 #define DRM_VMW_UNREF_STREAM 6
51 /* guarded by DRM_VMW_PARAM_3D == 1 */
52 #define DRM_VMW_CREATE_CONTEXT 7
53 #define DRM_VMW_UNREF_CONTEXT 8
54 #define DRM_VMW_CREATE_SURFACE 9
55 #define DRM_VMW_UNREF_SURFACE 10
56 #define DRM_VMW_REF_SURFACE 11
57 #define DRM_VMW_EXECBUF 12
58 #define DRM_VMW_GET_3D_CAP 13
59 #define DRM_VMW_FENCE_WAIT 14
60 #define DRM_VMW_FENCE_SIGNALED 15
61 #define DRM_VMW_FENCE_UNREF 16
62 #define DRM_VMW_FENCE_EVENT 17
63 #define DRM_VMW_PRESENT 18
64 #define DRM_VMW_PRESENT_READBACK 19
65 #define DRM_VMW_UPDATE_LAYOUT 20
66 #define DRM_VMW_CREATE_SHADER 21
67 #define DRM_VMW_UNREF_SHADER 22
68 #define DRM_VMW_GB_SURFACE_CREATE 23
69 #define DRM_VMW_GB_SURFACE_REF 24
70 #define DRM_VMW_SYNCCPU 25
71 #define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
72 #define DRM_VMW_GB_SURFACE_CREATE_EXT 27
73 #define DRM_VMW_GB_SURFACE_REF_EXT 28
74
75 /*************************************************************************/
76 /**
77 * DRM_VMW_GET_PARAM - get device information.
78 *
79 * DRM_VMW_PARAM_FIFO_OFFSET:
80 * Offset to use to map the first page of the FIFO read-only.
81 * The fifo is mapped using the mmap() system call on the drm device.
82 *
83 * DRM_VMW_PARAM_OVERLAY_IOCTL:
84 * Does the driver support the overlay ioctl.
85 *
86 * DRM_VMW_PARAM_SM4_1
87 * SM4_1 support is enabled.
88 */
89
90 #define DRM_VMW_PARAM_NUM_STREAMS 0
91 #define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
92 #define DRM_VMW_PARAM_3D 2
93 #define DRM_VMW_PARAM_HW_CAPS 3
94 #define DRM_VMW_PARAM_FIFO_CAPS 4
95 #define DRM_VMW_PARAM_MAX_FB_SIZE 5
96 #define DRM_VMW_PARAM_FIFO_HW_VERSION 6
97 #define DRM_VMW_PARAM_MAX_SURF_MEMORY 7
98 #define DRM_VMW_PARAM_3D_CAPS_SIZE 8
99 #define DRM_VMW_PARAM_MAX_MOB_MEMORY 9
100 #define DRM_VMW_PARAM_MAX_MOB_SIZE 10
101 #define DRM_VMW_PARAM_SCREEN_TARGET 11
102 #define DRM_VMW_PARAM_DX 12
103 #define DRM_VMW_PARAM_HW_CAPS2 13
104 #define DRM_VMW_PARAM_SM4_1 14
105
106 /**
107 * enum drm_vmw_handle_type - handle type for ref ioctls
108 *
109 */
110 enum drm_vmw_handle_type {
111 DRM_VMW_HANDLE_LEGACY = 0,
112 DRM_VMW_HANDLE_PRIME = 1
113 };
114
115 /**
116 * struct drm_vmw_getparam_arg
117 *
118 * @value: Returned value. //Out
119 * @param: Parameter to query. //In.
120 *
121 * Argument to the DRM_VMW_GET_PARAM Ioctl.
122 */
123
124 struct drm_vmw_getparam_arg {
125 __u64 value;
126 __u32 param;
127 __u32 pad64;
128 };
129
130 /*************************************************************************/
131 /**
132 * DRM_VMW_CREATE_CONTEXT - Create a host context.
133 *
134 * Allocates a device unique context id, and queues a create context command
135 * for the host. Does not wait for host completion.
136 */
137
138 /**
139 * struct drm_vmw_context_arg
140 *
141 * @cid: Device unique context ID.
142 *
143 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
144 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
145 */
146
147 struct drm_vmw_context_arg {
148 __s32 cid;
149 __u32 pad64;
150 };
151
152 /*************************************************************************/
153 /**
154 * DRM_VMW_UNREF_CONTEXT - Create a host context.
155 *
156 * Frees a global context id, and queues a destroy host command for the host.
157 * Does not wait for host completion. The context ID can be used directly
158 * in the command stream and shows up as the same context ID on the host.
159 */
160
161 /*************************************************************************/
162 /**
163 * DRM_VMW_CREATE_SURFACE - Create a host suface.
164 *
165 * Allocates a device unique surface id, and queues a create surface command
166 * for the host. Does not wait for host completion. The surface ID can be
167 * used directly in the command stream and shows up as the same surface
168 * ID on the host.
169 */
170
171 /**
172 * struct drm_wmv_surface_create_req
173 *
174 * @flags: Surface flags as understood by the host.
175 * @format: Surface format as understood by the host.
176 * @mip_levels: Number of mip levels for each face.
177 * An unused face should have 0 encoded.
178 * @size_addr: Address of a user-space array of sruct drm_vmw_size
179 * cast to an __u64 for 32-64 bit compatibility.
180 * The size of the array should equal the total number of mipmap levels.
181 * @shareable: Boolean whether other clients (as identified by file descriptors)
182 * may reference this surface.
183 * @scanout: Boolean whether the surface is intended to be used as a
184 * scanout.
185 *
186 * Input data to the DRM_VMW_CREATE_SURFACE Ioctl.
187 * Output data from the DRM_VMW_REF_SURFACE Ioctl.
188 */
189
190 struct drm_vmw_surface_create_req {
191 __u32 flags;
192 __u32 format;
193 __u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES];
194 __u64 size_addr;
195 __s32 shareable;
196 __s32 scanout;
197 };
198
199 /**
200 * struct drm_wmv_surface_arg
201 *
202 * @sid: Surface id of created surface or surface to destroy or reference.
203 * @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl.
204 *
205 * Output data from the DRM_VMW_CREATE_SURFACE Ioctl.
206 * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.
207 * Input argument to the DRM_VMW_REF_SURFACE Ioctl.
208 */
209
210 struct drm_vmw_surface_arg {
211 __s32 sid;
212 enum drm_vmw_handle_type handle_type;
213 };
214
215 /**
216 * struct drm_vmw_size ioctl.
217 *
218 * @width - mip level width
219 * @height - mip level height
220 * @depth - mip level depth
221 *
222 * Description of a mip level.
223 * Input data to the DRM_WMW_CREATE_SURFACE Ioctl.
224 */
225
226 struct drm_vmw_size {
227 __u32 width;
228 __u32 height;
229 __u32 depth;
230 __u32 pad64;
231 };
232
233 /**
234 * union drm_vmw_surface_create_arg
235 *
236 * @rep: Output data as described above.
237 * @req: Input data as described above.
238 *
239 * Argument to the DRM_VMW_CREATE_SURFACE Ioctl.
240 */
241
242 union drm_vmw_surface_create_arg {
243 struct drm_vmw_surface_arg rep;
244 struct drm_vmw_surface_create_req req;
245 };
246
247 /*************************************************************************/
248 /**
249 * DRM_VMW_REF_SURFACE - Reference a host surface.
250 *
251 * Puts a reference on a host surface with a give sid, as previously
252 * returned by the DRM_VMW_CREATE_SURFACE ioctl.
253 * A reference will make sure the surface isn't destroyed while we hold
254 * it and will allow the calling client to use the surface ID in the command
255 * stream.
256 *
257 * On successful return, the Ioctl returns the surface information given
258 * in the DRM_VMW_CREATE_SURFACE ioctl.
259 */
260
261 /**
262 * union drm_vmw_surface_reference_arg
263 *
264 * @rep: Output data as described above.
265 * @req: Input data as described above.
266 *
267 * Argument to the DRM_VMW_REF_SURFACE Ioctl.
268 */
269
270 union drm_vmw_surface_reference_arg {
271 struct drm_vmw_surface_create_req rep;
272 struct drm_vmw_surface_arg req;
273 };
274
275 /*************************************************************************/
276 /**
277 * DRM_VMW_UNREF_SURFACE - Unreference a host surface.
278 *
279 * Clear a reference previously put on a host surface.
280 * When all references are gone, including the one implicitly placed
281 * on creation,
282 * a destroy surface command will be queued for the host.
283 * Does not wait for completion.
284 */
285
286 /*************************************************************************/
287 /**
288 * DRM_VMW_EXECBUF
289 *
290 * Submit a command buffer for execution on the host, and return a
291 * fence seqno that when signaled, indicates that the command buffer has
292 * executed.
293 */
294
295 /**
296 * struct drm_vmw_execbuf_arg
297 *
298 * @commands: User-space address of a command buffer cast to an __u64.
299 * @command-size: Size in bytes of the command buffer.
300 * @throttle-us: Sleep until software is less than @throttle_us
301 * microseconds ahead of hardware. The driver may round this value
302 * to the nearest kernel tick.
303 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
304 * __u64.
305 * @version: Allows expanding the execbuf ioctl parameters without breaking
306 * backwards compatibility, since user-space will always tell the kernel
307 * which version it uses.
308 * @flags: Execbuf flags.
309 * @imported_fence_fd: FD for a fence imported from another device
310 *
311 * Argument to the DRM_VMW_EXECBUF Ioctl.
312 */
313
314 #define DRM_VMW_EXECBUF_VERSION 2
315
316 #define DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD (1 << 0)
317 #define DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD (1 << 1)
318
319 struct drm_vmw_execbuf_arg {
320 __u64 commands;
321 __u32 command_size;
322 __u32 throttle_us;
323 __u64 fence_rep;
324 __u32 version;
325 __u32 flags;
326 __u32 context_handle;
327 __s32 imported_fence_fd;
328 };
329
330 /**
331 * struct drm_vmw_fence_rep
332 *
333 * @handle: Fence object handle for fence associated with a command submission.
334 * @mask: Fence flags relevant for this fence object.
335 * @seqno: Fence sequence number in fifo. A fence object with a lower
336 * seqno will signal the EXEC flag before a fence object with a higher
337 * seqno. This can be used by user-space to avoid kernel calls to determine
338 * whether a fence has signaled the EXEC flag. Note that @seqno will
339 * wrap at 32-bit.
340 * @passed_seqno: The highest seqno number processed by the hardware
341 * so far. This can be used to mark user-space fence objects as signaled, and
342 * to determine whether a fence seqno might be stale.
343 * @fd: FD associated with the fence, -1 if not exported
344 * @error: This member should've been set to -EFAULT on submission.
345 * The following actions should be take on completion:
346 * error == -EFAULT: Fence communication failed. The host is synchronized.
347 * Use the last fence id read from the FIFO fence register.
348 * error != 0 && error != -EFAULT:
349 * Fence submission failed. The host is synchronized. Use the fence_seq member.
350 * error == 0: All is OK, The host may not be synchronized.
351 * Use the fence_seq member.
352 *
353 * Input / Output data to the DRM_VMW_EXECBUF Ioctl.
354 */
355
356 struct drm_vmw_fence_rep {
357 __u32 handle;
358 __u32 mask;
359 __u32 seqno;
360 __u32 passed_seqno;
361 __s32 fd;
362 __s32 error;
363 };
364
365 /*************************************************************************/
366 /**
367 * DRM_VMW_ALLOC_BO
368 *
369 * Allocate a buffer object that is visible also to the host.
370 * NOTE: The buffer is
371 * identified by a handle and an offset, which are private to the guest, but
372 * useable in the command stream. The guest kernel may translate these
373 * and patch up the command stream accordingly. In the future, the offset may
374 * be zero at all times, or it may disappear from the interface before it is
375 * fixed.
376 *
377 * The buffer object may stay user-space mapped in the guest at all times,
378 * and is thus suitable for sub-allocation.
379 *
380 * Buffer objects are mapped using the mmap() syscall on the drm device.
381 */
382
383 /**
384 * struct drm_vmw_alloc_bo_req
385 *
386 * @size: Required minimum size of the buffer.
387 *
388 * Input data to the DRM_VMW_ALLOC_BO Ioctl.
389 */
390
391 struct drm_vmw_alloc_bo_req {
392 __u32 size;
393 __u32 pad64;
394 };
395 #define drm_vmw_alloc_dmabuf_req drm_vmw_alloc_bo_req
396
397 /**
398 * struct drm_vmw_bo_rep
399 *
400 * @map_handle: Offset to use in the mmap() call used to map the buffer.
401 * @handle: Handle unique to this buffer. Used for unreferencing.
402 * @cur_gmr_id: GMR id to use in the command stream when this buffer is
403 * referenced. See not above.
404 * @cur_gmr_offset: Offset to use in the command stream when this buffer is
405 * referenced. See note above.
406 *
407 * Output data from the DRM_VMW_ALLOC_BO Ioctl.
408 */
409
410 struct drm_vmw_bo_rep {
411 __u64 map_handle;
412 __u32 handle;
413 __u32 cur_gmr_id;
414 __u32 cur_gmr_offset;
415 __u32 pad64;
416 };
417 #define drm_vmw_dmabuf_rep drm_vmw_bo_rep
418
419 /**
420 * union drm_vmw_alloc_bo_arg
421 *
422 * @req: Input data as described above.
423 * @rep: Output data as described above.
424 *
425 * Argument to the DRM_VMW_ALLOC_BO Ioctl.
426 */
427
428 union drm_vmw_alloc_bo_arg {
429 struct drm_vmw_alloc_bo_req req;
430 struct drm_vmw_bo_rep rep;
431 };
432 #define drm_vmw_alloc_dmabuf_arg drm_vmw_alloc_bo_arg
433
434 /*************************************************************************/
435 /**
436 * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams.
437 *
438 * This IOCTL controls the overlay units of the svga device.
439 * The SVGA overlay units does not work like regular hardware units in
440 * that they do not automaticaly read back the contents of the given dma
441 * buffer. But instead only read back for each call to this ioctl, and
442 * at any point between this call being made and a following call that
443 * either changes the buffer or disables the stream.
444 */
445
446 /**
447 * struct drm_vmw_rect
448 *
449 * Defines a rectangle. Used in the overlay ioctl to define
450 * source and destination rectangle.
451 */
452
453 struct drm_vmw_rect {
454 __s32 x;
455 __s32 y;
456 __u32 w;
457 __u32 h;
458 };
459
460 /**
461 * struct drm_vmw_control_stream_arg
462 *
463 * @stream_id: Stearm to control
464 * @enabled: If false all following arguments are ignored.
465 * @handle: Handle to buffer for getting data from.
466 * @format: Format of the overlay as understood by the host.
467 * @width: Width of the overlay.
468 * @height: Height of the overlay.
469 * @size: Size of the overlay in bytes.
470 * @pitch: Array of pitches, the two last are only used for YUV12 formats.
471 * @offset: Offset from start of dma buffer to overlay.
472 * @src: Source rect, must be within the defined area above.
473 * @dst: Destination rect, x and y may be negative.
474 *
475 * Argument to the DRM_VMW_CONTROL_STREAM Ioctl.
476 */
477
478 struct drm_vmw_control_stream_arg {
479 __u32 stream_id;
480 __u32 enabled;
481
482 __u32 flags;
483 __u32 color_key;
484
485 __u32 handle;
486 __u32 offset;
487 __s32 format;
488 __u32 size;
489 __u32 width;
490 __u32 height;
491 __u32 pitch[3];
492
493 __u32 pad64;
494 struct drm_vmw_rect src;
495 struct drm_vmw_rect dst;
496 };
497
498 /*************************************************************************/
499 /**
500 * DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass.
501 *
502 */
503
504 #define DRM_VMW_CURSOR_BYPASS_ALL (1 << 0)
505 #define DRM_VMW_CURSOR_BYPASS_FLAGS (1)
506
507 /**
508 * struct drm_vmw_cursor_bypass_arg
509 *
510 * @flags: Flags.
511 * @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed.
512 * @xpos: X position of cursor.
513 * @ypos: Y position of cursor.
514 * @xhot: X hotspot.
515 * @yhot: Y hotspot.
516 *
517 * Argument to the DRM_VMW_CURSOR_BYPASS Ioctl.
518 */
519
520 struct drm_vmw_cursor_bypass_arg {
521 __u32 flags;
522 __u32 crtc_id;
523 __s32 xpos;
524 __s32 ypos;
525 __s32 xhot;
526 __s32 yhot;
527 };
528
529 /*************************************************************************/
530 /**
531 * DRM_VMW_CLAIM_STREAM - Claim a single stream.
532 */
533
534 /**
535 * struct drm_vmw_context_arg
536 *
537 * @stream_id: Device unique context ID.
538 *
539 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
540 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
541 */
542
543 struct drm_vmw_stream_arg {
544 __u32 stream_id;
545 __u32 pad64;
546 };
547
548 /*************************************************************************/
549 /**
550 * DRM_VMW_UNREF_STREAM - Unclaim a stream.
551 *
552 * Return a single stream that was claimed by this process. Also makes
553 * sure that the stream has been stopped.
554 */
555
556 /*************************************************************************/
557 /**
558 * DRM_VMW_GET_3D_CAP
559 *
560 * Read 3D capabilities from the FIFO
561 *
562 */
563
564 /**
565 * struct drm_vmw_get_3d_cap_arg
566 *
567 * @buffer: Pointer to a buffer for capability data, cast to an __u64
568 * @size: Max size to copy
569 *
570 * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL
571 * ioctls.
572 */
573
574 struct drm_vmw_get_3d_cap_arg {
575 __u64 buffer;
576 __u32 max_size;
577 __u32 pad64;
578 };
579
580 /*************************************************************************/
581 /**
582 * DRM_VMW_FENCE_WAIT
583 *
584 * Waits for a fence object to signal. The wait is interruptible, so that
585 * signals may be delivered during the interrupt. The wait may timeout,
586 * in which case the calls returns -EBUSY. If the wait is restarted,
587 * that is restarting without resetting @cookie_valid to zero,
588 * the timeout is computed from the first call.
589 *
590 * The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait
591 * on:
592 * DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command
593 * stream
594 * have executed.
595 * DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish
596 * commands
597 * in the buffer given to the EXECBUF ioctl returning the fence object handle
598 * are available to user-space.
599 *
600 * DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the
601 * fenc wait ioctl returns 0, the fence object has been unreferenced after
602 * the wait.
603 */
604
605 #define DRM_VMW_FENCE_FLAG_EXEC (1 << 0)
606 #define DRM_VMW_FENCE_FLAG_QUERY (1 << 1)
607
608 #define DRM_VMW_WAIT_OPTION_UNREF (1 << 0)
609
610 /**
611 * struct drm_vmw_fence_wait_arg
612 *
613 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
614 * @cookie_valid: Must be reset to 0 on first call. Left alone on restart.
615 * @kernel_cookie: Set to 0 on first call. Left alone on restart.
616 * @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout.
617 * @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick
618 * before returning.
619 * @flags: Fence flags to wait on.
620 * @wait_options: Options that control the behaviour of the wait ioctl.
621 *
622 * Input argument to the DRM_VMW_FENCE_WAIT ioctl.
623 */
624
625 struct drm_vmw_fence_wait_arg {
626 __u32 handle;
627 __s32 cookie_valid;
628 __u64 kernel_cookie;
629 __u64 timeout_us;
630 __s32 lazy;
631 __s32 flags;
632 __s32 wait_options;
633 __s32 pad64;
634 };
635
636 /*************************************************************************/
637 /**
638 * DRM_VMW_FENCE_SIGNALED
639 *
640 * Checks if a fence object is signaled..
641 */
642
643 /**
644 * struct drm_vmw_fence_signaled_arg
645 *
646 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
647 * @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl
648 * @signaled: Out: Flags signaled.
649 * @sequence: Out: Highest sequence passed so far. Can be used to signal the
650 * EXEC flag of user-space fence objects.
651 *
652 * Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF
653 * ioctls.
654 */
655
656 struct drm_vmw_fence_signaled_arg {
657 __u32 handle;
658 __u32 flags;
659 __s32 signaled;
660 __u32 passed_seqno;
661 __u32 signaled_flags;
662 __u32 pad64;
663 };
664
665 /*************************************************************************/
666 /**
667 * DRM_VMW_FENCE_UNREF
668 *
669 * Unreferences a fence object, and causes it to be destroyed if there are no
670 * other references to it.
671 *
672 */
673
674 /**
675 * struct drm_vmw_fence_arg
676 *
677 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
678 *
679 * Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl..
680 */
681
682 struct drm_vmw_fence_arg {
683 __u32 handle;
684 __u32 pad64;
685 };
686
687
688 /*************************************************************************/
689 /**
690 * DRM_VMW_FENCE_EVENT
691 *
692 * Queues an event on a fence to be delivered on the drm character device
693 * when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag.
694 * Optionally the approximate time when the fence signaled is
695 * given by the event.
696 */
697
698 /*
699 * The event type
700 */
701 #define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000
702
703 struct drm_vmw_event_fence {
704 struct drm_event base;
705 __u64 user_data;
706 __u32 tv_sec;
707 __u32 tv_usec;
708 };
709
710 /*
711 * Flags that may be given to the command.
712 */
713 /* Request fence signaled time on the event. */
714 #define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0)
715
716 /**
717 * struct drm_vmw_fence_event_arg
718 *
719 * @fence_rep: Pointer to fence_rep structure cast to __u64 or 0 if
720 * the fence is not supposed to be referenced by user-space.
721 * @user_info: Info to be delivered with the event.
722 * @handle: Attach the event to this fence only.
723 * @flags: A set of flags as defined above.
724 */
725 struct drm_vmw_fence_event_arg {
726 __u64 fence_rep;
727 __u64 user_data;
728 __u32 handle;
729 __u32 flags;
730 };
731
732
733 /*************************************************************************/
734 /**
735 * DRM_VMW_PRESENT
736 *
737 * Executes an SVGA present on a given fb for a given surface. The surface
738 * is placed on the framebuffer. Cliprects are given relative to the given
739 * point (the point disignated by dest_{x|y}).
740 *
741 */
742
743 /**
744 * struct drm_vmw_present_arg
745 * @fb_id: framebuffer id to present / read back from.
746 * @sid: Surface id to present from.
747 * @dest_x: X placement coordinate for surface.
748 * @dest_y: Y placement coordinate for surface.
749 * @clips_ptr: Pointer to an array of clip rects cast to an __u64.
750 * @num_clips: Number of cliprects given relative to the framebuffer origin,
751 * in the same coordinate space as the frame buffer.
752 * @pad64: Unused 64-bit padding.
753 *
754 * Input argument to the DRM_VMW_PRESENT ioctl.
755 */
756
757 struct drm_vmw_present_arg {
758 __u32 fb_id;
759 __u32 sid;
760 __s32 dest_x;
761 __s32 dest_y;
762 __u64 clips_ptr;
763 __u32 num_clips;
764 __u32 pad64;
765 };
766
767
768 /*************************************************************************/
769 /**
770 * DRM_VMW_PRESENT_READBACK
771 *
772 * Executes an SVGA present readback from a given fb to the dma buffer
773 * currently bound as the fb. If there is no dma buffer bound to the fb,
774 * an error will be returned.
775 *
776 */
777
778 /**
779 * struct drm_vmw_present_arg
780 * @fb_id: fb_id to present / read back from.
781 * @num_clips: Number of cliprects.
782 * @clips_ptr: Pointer to an array of clip rects cast to an __u64.
783 * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an __u64.
784 * If this member is NULL, then the ioctl should not return a fence.
785 */
786
787 struct drm_vmw_present_readback_arg {
788 __u32 fb_id;
789 __u32 num_clips;
790 __u64 clips_ptr;
791 __u64 fence_rep;
792 };
793
794 /*************************************************************************/
795 /**
796 * DRM_VMW_UPDATE_LAYOUT - Update layout
797 *
798 * Updates the preferred modes and connection status for connectors. The
799 * command consists of one drm_vmw_update_layout_arg pointing to an array
800 * of num_outputs drm_vmw_rect's.
801 */
802
803 /**
804 * struct drm_vmw_update_layout_arg
805 *
806 * @num_outputs: number of active connectors
807 * @rects: pointer to array of drm_vmw_rect cast to an __u64
808 *
809 * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl.
810 */
811 struct drm_vmw_update_layout_arg {
812 __u32 num_outputs;
813 __u32 pad64;
814 __u64 rects;
815 };
816
817
818 /*************************************************************************/
819 /**
820 * DRM_VMW_CREATE_SHADER - Create shader
821 *
822 * Creates a shader and optionally binds it to a dma buffer containing
823 * the shader byte-code.
824 */
825
826 /**
827 * enum drm_vmw_shader_type - Shader types
828 */
829 enum drm_vmw_shader_type {
830 drm_vmw_shader_type_vs = 0,
831 drm_vmw_shader_type_ps,
832 };
833
834
835 /**
836 * struct drm_vmw_shader_create_arg
837 *
838 * @shader_type: Shader type of the shader to create.
839 * @size: Size of the byte-code in bytes.
840 * where the shader byte-code starts
841 * @buffer_handle: Buffer handle identifying the buffer containing the
842 * shader byte-code
843 * @shader_handle: On successful completion contains a handle that
844 * can be used to subsequently identify the shader.
845 * @offset: Offset in bytes into the buffer given by @buffer_handle,
846 *
847 * Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl.
848 */
849 struct drm_vmw_shader_create_arg {
850 enum drm_vmw_shader_type shader_type;
851 __u32 size;
852 __u32 buffer_handle;
853 __u32 shader_handle;
854 __u64 offset;
855 };
856
857 /*************************************************************************/
858 /**
859 * DRM_VMW_UNREF_SHADER - Unreferences a shader
860 *
861 * Destroys a user-space reference to a shader, optionally destroying
862 * it.
863 */
864
865 /**
866 * struct drm_vmw_shader_arg
867 *
868 * @handle: Handle identifying the shader to destroy.
869 *
870 * Input argument to the DRM_VMW_UNREF_SHADER ioctl.
871 */
872 struct drm_vmw_shader_arg {
873 __u32 handle;
874 __u32 pad64;
875 };
876
877 /*************************************************************************/
878 /**
879 * DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface.
880 *
881 * Allocates a surface handle and queues a create surface command
882 * for the host on the first use of the surface. The surface ID can
883 * be used as the surface ID in commands referencing the surface.
884 */
885
886 /**
887 * enum drm_vmw_surface_flags
888 *
889 * @drm_vmw_surface_flag_shareable: Whether the surface is shareable
890 * @drm_vmw_surface_flag_scanout: Whether the surface is a scanout
891 * surface.
892 * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is
893 * given.
894 * @drm_vmw_surface_flag_coherent: Back surface with coherent memory.
895 */
896 enum drm_vmw_surface_flags {
897 drm_vmw_surface_flag_shareable = (1 << 0),
898 drm_vmw_surface_flag_scanout = (1 << 1),
899 drm_vmw_surface_flag_create_buffer = (1 << 2),
900 drm_vmw_surface_flag_coherent = (1 << 3),
901 };
902
903 /**
904 * struct drm_vmw_gb_surface_create_req
905 *
906 * @svga3d_flags: SVGA3d surface flags for the device.
907 * @format: SVGA3d format.
908 * @mip_level: Number of mip levels for all faces.
909 * @drm_surface_flags Flags as described above.
910 * @multisample_count Future use. Set to 0.
911 * @autogen_filter Future use. Set to 0.
912 * @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID
913 * if none.
914 * @base_size Size of the base mip level for all faces.
915 * @array_size Must be zero for non-DX hardware, and if non-zero
916 * svga3d_flags must have proper bind flags setup.
917 *
918 * Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl.
919 * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
920 */
921 struct drm_vmw_gb_surface_create_req {
922 __u32 svga3d_flags;
923 __u32 format;
924 __u32 mip_levels;
925 enum drm_vmw_surface_flags drm_surface_flags;
926 __u32 multisample_count;
927 __u32 autogen_filter;
928 __u32 buffer_handle;
929 __u32 array_size;
930 struct drm_vmw_size base_size;
931 };
932
933 /**
934 * struct drm_vmw_gb_surface_create_rep
935 *
936 * @handle: Surface handle.
937 * @backup_size: Size of backup buffers for this surface.
938 * @buffer_handle: Handle of backup buffer. SVGA3D_INVALID_ID if none.
939 * @buffer_size: Actual size of the buffer identified by
940 * @buffer_handle
941 * @buffer_map_handle: Offset into device address space for the buffer
942 * identified by @buffer_handle.
943 *
944 * Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl.
945 * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl.
946 */
947 struct drm_vmw_gb_surface_create_rep {
948 __u32 handle;
949 __u32 backup_size;
950 __u32 buffer_handle;
951 __u32 buffer_size;
952 __u64 buffer_map_handle;
953 };
954
955 /**
956 * union drm_vmw_gb_surface_create_arg
957 *
958 * @req: Input argument as described above.
959 * @rep: Output argument as described above.
960 *
961 * Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl.
962 */
963 union drm_vmw_gb_surface_create_arg {
964 struct drm_vmw_gb_surface_create_rep rep;
965 struct drm_vmw_gb_surface_create_req req;
966 };
967
968 /*************************************************************************/
969 /**
970 * DRM_VMW_GB_SURFACE_REF - Reference a host surface.
971 *
972 * Puts a reference on a host surface with a given handle, as previously
973 * returned by the DRM_VMW_GB_SURFACE_CREATE ioctl.
974 * A reference will make sure the surface isn't destroyed while we hold
975 * it and will allow the calling client to use the surface handle in
976 * the command stream.
977 *
978 * On successful return, the Ioctl returns the surface information given
979 * to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl.
980 */
981
982 /**
983 * struct drm_vmw_gb_surface_reference_arg
984 *
985 * @creq: The data used as input when the surface was created, as described
986 * above at "struct drm_vmw_gb_surface_create_req"
987 * @crep: Additional data output when the surface was created, as described
988 * above at "struct drm_vmw_gb_surface_create_rep"
989 *
990 * Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl.
991 */
992 struct drm_vmw_gb_surface_ref_rep {
993 struct drm_vmw_gb_surface_create_req creq;
994 struct drm_vmw_gb_surface_create_rep crep;
995 };
996
997 /**
998 * union drm_vmw_gb_surface_reference_arg
999 *
1000 * @req: Input data as described above at "struct drm_vmw_surface_arg"
1001 * @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep"
1002 *
1003 * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
1004 */
1005 union drm_vmw_gb_surface_reference_arg {
1006 struct drm_vmw_gb_surface_ref_rep rep;
1007 struct drm_vmw_surface_arg req;
1008 };
1009
1010
1011 /*************************************************************************/
1012 /**
1013 * DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access.
1014 *
1015 * Idles any previously submitted GPU operations on the buffer and
1016 * by default blocks command submissions that reference the buffer.
1017 * If the file descriptor used to grab a blocking CPU sync is closed, the
1018 * cpu sync is released.
1019 * The flags argument indicates how the grab / release operation should be
1020 * performed:
1021 */
1022
1023 /**
1024 * enum drm_vmw_synccpu_flags - Synccpu flags:
1025 *
1026 * @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a
1027 * hint to the kernel to allow command submissions that references the buffer
1028 * for read-only.
1029 * @drm_vmw_synccpu_write: Sync for write. Block all command submissions
1030 * referencing this buffer.
1031 * @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return
1032 * -EBUSY should the buffer be busy.
1033 * @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer
1034 * while the buffer is synced for CPU. This is similar to the GEM bo idle
1035 * behavior.
1036 */
1037 enum drm_vmw_synccpu_flags {
1038 drm_vmw_synccpu_read = (1 << 0),
1039 drm_vmw_synccpu_write = (1 << 1),
1040 drm_vmw_synccpu_dontblock = (1 << 2),
1041 drm_vmw_synccpu_allow_cs = (1 << 3)
1042 };
1043
1044 /**
1045 * enum drm_vmw_synccpu_op - Synccpu operations:
1046 *
1047 * @drm_vmw_synccpu_grab: Grab the buffer for CPU operations
1048 * @drm_vmw_synccpu_release: Release a previous grab.
1049 */
1050 enum drm_vmw_synccpu_op {
1051 drm_vmw_synccpu_grab,
1052 drm_vmw_synccpu_release
1053 };
1054
1055 /**
1056 * struct drm_vmw_synccpu_arg
1057 *
1058 * @op: The synccpu operation as described above.
1059 * @handle: Handle identifying the buffer object.
1060 * @flags: Flags as described above.
1061 */
1062 struct drm_vmw_synccpu_arg {
1063 enum drm_vmw_synccpu_op op;
1064 enum drm_vmw_synccpu_flags flags;
1065 __u32 handle;
1066 __u32 pad64;
1067 };
1068
1069 /*************************************************************************/
1070 /**
1071 * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context.
1072 *
1073 * Allocates a device unique context id, and queues a create context command
1074 * for the host. Does not wait for host completion.
1075 */
1076 enum drm_vmw_extended_context {
1077 drm_vmw_context_legacy,
1078 drm_vmw_context_dx
1079 };
1080
1081 /**
1082 * union drm_vmw_extended_context_arg
1083 *
1084 * @req: Context type.
1085 * @rep: Context identifier.
1086 *
1087 * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl.
1088 */
1089 union drm_vmw_extended_context_arg {
1090 enum drm_vmw_extended_context req;
1091 struct drm_vmw_context_arg rep;
1092 };
1093
1094 /*************************************************************************/
1095 /*
1096 * DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its
1097 * underlying resource.
1098 *
1099 * Note that this ioctl is overlaid on the deprecated DRM_VMW_UNREF_DMABUF
1100 * Ioctl.
1101 */
1102
1103 /**
1104 * struct drm_vmw_handle_close_arg
1105 *
1106 * @handle: Handle to close.
1107 *
1108 * Argument to the DRM_VMW_HANDLE_CLOSE Ioctl.
1109 */
1110 struct drm_vmw_handle_close_arg {
1111 __u32 handle;
1112 __u32 pad64;
1113 };
1114 #define drm_vmw_unref_dmabuf_arg drm_vmw_handle_close_arg
1115
1116 /*************************************************************************/
1117 /**
1118 * DRM_VMW_GB_SURFACE_CREATE_EXT - Create a host guest-backed surface.
1119 *
1120 * Allocates a surface handle and queues a create surface command
1121 * for the host on the first use of the surface. The surface ID can
1122 * be used as the surface ID in commands referencing the surface.
1123 *
1124 * This new command extends DRM_VMW_GB_SURFACE_CREATE by adding version
1125 * parameter and 64 bit svga flag.
1126 */
1127
1128 /**
1129 * enum drm_vmw_surface_version
1130 *
1131 * @drm_vmw_surface_gb_v1: Corresponds to current gb surface format with
1132 * svga3d surface flags split into 2, upper half and lower half.
1133 */
1134 enum drm_vmw_surface_version {
1135 drm_vmw_gb_surface_v1
1136 };
1137
1138 /**
1139 * struct drm_vmw_gb_surface_create_ext_req
1140 *
1141 * @base: Surface create parameters.
1142 * @version: Version of surface create ioctl.
1143 * @svga3d_flags_upper_32_bits: Upper 32 bits of svga3d flags.
1144 * @multisample_pattern: Multisampling pattern when msaa is supported.
1145 * @quality_level: Precision settings for each sample.
1146 * @must_be_zero: Reserved for future usage.
1147 *
1148 * Input argument to the DRM_VMW_GB_SURFACE_CREATE_EXT Ioctl.
1149 * Part of output argument for the DRM_VMW_GB_SURFACE_REF_EXT Ioctl.
1150 */
1151 struct drm_vmw_gb_surface_create_ext_req {
1152 struct drm_vmw_gb_surface_create_req base;
1153 enum drm_vmw_surface_version version;
1154 uint32_t svga3d_flags_upper_32_bits;
1155 SVGA3dMSPattern multisample_pattern;
1156 SVGA3dMSQualityLevel quality_level;
1157 uint64_t must_be_zero;
1158 };
1159
1160 /**
1161 * union drm_vmw_gb_surface_create_ext_arg
1162 *
1163 * @req: Input argument as described above.
1164 * @rep: Output argument as described above.
1165 *
1166 * Argument to the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
1167 */
1168 union drm_vmw_gb_surface_create_ext_arg {
1169 struct drm_vmw_gb_surface_create_rep rep;
1170 struct drm_vmw_gb_surface_create_ext_req req;
1171 };
1172
1173 /*************************************************************************/
1174 /**
1175 * DRM_VMW_GB_SURFACE_REF_EXT - Reference a host surface.
1176 *
1177 * Puts a reference on a host surface with a given handle, as previously
1178 * returned by the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
1179 * A reference will make sure the surface isn't destroyed while we hold
1180 * it and will allow the calling client to use the surface handle in
1181 * the command stream.
1182 *
1183 * On successful return, the Ioctl returns the surface information given
1184 * to and returned from the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
1185 */
1186
1187 /**
1188 * struct drm_vmw_gb_surface_ref_ext_rep
1189 *
1190 * @creq: The data used as input when the surface was created, as described
1191 * above at "struct drm_vmw_gb_surface_create_ext_req"
1192 * @crep: Additional data output when the surface was created, as described
1193 * above at "struct drm_vmw_gb_surface_create_rep"
1194 *
1195 * Output Argument to the DRM_VMW_GB_SURFACE_REF_EXT ioctl.
1196 */
1197 struct drm_vmw_gb_surface_ref_ext_rep {
1198 struct drm_vmw_gb_surface_create_ext_req creq;
1199 struct drm_vmw_gb_surface_create_rep crep;
1200 };
1201
1202 /**
1203 * union drm_vmw_gb_surface_reference_ext_arg
1204 *
1205 * @req: Input data as described above at "struct drm_vmw_surface_arg"
1206 * @rep: Output data as described above at
1207 * "struct drm_vmw_gb_surface_ref_ext_rep"
1208 *
1209 * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
1210 */
1211 union drm_vmw_gb_surface_reference_ext_arg {
1212 struct drm_vmw_gb_surface_ref_ext_rep rep;
1213 struct drm_vmw_surface_arg req;
1214 };
1215
1216 #if defined(__cplusplus)
1217 }
1218 #endif
1219
1220 #endif