ef696d1a5435f6e3b51fe017fa022f5b1b760f19
[mesa.git] / include / drm-uapi / i915_drm.h
1 /*
2 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27 #ifndef _I915_DRM_H_
28 #define _I915_DRM_H_
29
30 #include "drm.h"
31
32 #if defined(__cplusplus)
33 extern "C" {
34 #endif
35
36 /* Please note that modifications to all structs defined here are
37 * subject to backwards-compatibility constraints.
38 */
39
40 /**
41 * DOC: uevents generated by i915 on it's device node
42 *
43 * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
44 * event from the gpu l3 cache. Additional information supplied is ROW,
45 * BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
46 * track of these events and if a specific cache-line seems to have a
47 * persistent error remap it with the l3 remapping tool supplied in
48 * intel-gpu-tools. The value supplied with the event is always 1.
49 *
50 * I915_ERROR_UEVENT - Generated upon error detection, currently only via
51 * hangcheck. The error detection event is a good indicator of when things
52 * began to go badly. The value supplied with the event is a 1 upon error
53 * detection, and a 0 upon reset completion, signifying no more error
54 * exists. NOTE: Disabling hangcheck or reset via module parameter will
55 * cause the related events to not be seen.
56 *
57 * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
58 * GPU. The value supplied with the event is always 1. NOTE: Disable
59 * reset via module parameter will cause this event to not be seen.
60 */
61 #define I915_L3_PARITY_UEVENT "L3_PARITY_ERROR"
62 #define I915_ERROR_UEVENT "ERROR"
63 #define I915_RESET_UEVENT "RESET"
64
65 /*
66 * i915_user_extension: Base class for defining a chain of extensions
67 *
68 * Many interfaces need to grow over time. In most cases we can simply
69 * extend the struct and have userspace pass in more data. Another option,
70 * as demonstrated by Vulkan's approach to providing extensions for forward
71 * and backward compatibility, is to use a list of optional structs to
72 * provide those extra details.
73 *
74 * The key advantage to using an extension chain is that it allows us to
75 * redefine the interface more easily than an ever growing struct of
76 * increasing complexity, and for large parts of that interface to be
77 * entirely optional. The downside is more pointer chasing; chasing across
78 * the boundary with pointers encapsulated inside u64.
79 */
80 struct i915_user_extension {
81 __u64 next_extension;
82 __u32 name;
83 __u32 flags; /* All undefined bits must be zero. */
84 __u32 rsvd[4]; /* Reserved for future use; must be zero. */
85 };
86
87 /*
88 * MOCS indexes used for GPU surfaces, defining the cacheability of the
89 * surface data and the coherency for this data wrt. CPU vs. GPU accesses.
90 */
91 enum i915_mocs_table_index {
92 /*
93 * Not cached anywhere, coherency between CPU and GPU accesses is
94 * guaranteed.
95 */
96 I915_MOCS_UNCACHED,
97 /*
98 * Cacheability and coherency controlled by the kernel automatically
99 * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current
100 * usage of the surface (used for display scanout or not).
101 */
102 I915_MOCS_PTE,
103 /*
104 * Cached in all GPU caches available on the platform.
105 * Coherency between CPU and GPU accesses to the surface is not
106 * guaranteed without extra synchronization.
107 */
108 I915_MOCS_CACHED,
109 };
110
111 /*
112 * Different engines serve different roles, and there may be more than one
113 * engine serving each role. enum drm_i915_gem_engine_class provides a
114 * classification of the role of the engine, which may be used when requesting
115 * operations to be performed on a certain subset of engines, or for providing
116 * information about that group.
117 */
118 enum drm_i915_gem_engine_class {
119 I915_ENGINE_CLASS_RENDER = 0,
120 I915_ENGINE_CLASS_COPY = 1,
121 I915_ENGINE_CLASS_VIDEO = 2,
122 I915_ENGINE_CLASS_VIDEO_ENHANCE = 3,
123
124 /* should be kept compact */
125
126 I915_ENGINE_CLASS_INVALID = -1
127 };
128
129 /*
130 * There may be more than one engine fulfilling any role within the system.
131 * Each engine of a class is given a unique instance number and therefore
132 * any engine can be specified by its class:instance tuplet. APIs that allow
133 * access to any engine in the system will use struct i915_engine_class_instance
134 * for this identification.
135 */
136 struct i915_engine_class_instance {
137 __u16 engine_class; /* see enum drm_i915_gem_engine_class */
138 __u16 engine_instance;
139 #define I915_ENGINE_CLASS_INVALID_NONE -1
140 #define I915_ENGINE_CLASS_INVALID_VIRTUAL -2
141 };
142
143 /**
144 * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
145 *
146 */
147
148 enum drm_i915_pmu_engine_sample {
149 I915_SAMPLE_BUSY = 0,
150 I915_SAMPLE_WAIT = 1,
151 I915_SAMPLE_SEMA = 2
152 };
153
154 #define I915_PMU_SAMPLE_BITS (4)
155 #define I915_PMU_SAMPLE_MASK (0xf)
156 #define I915_PMU_SAMPLE_INSTANCE_BITS (8)
157 #define I915_PMU_CLASS_SHIFT \
158 (I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
159
160 #define __I915_PMU_ENGINE(class, instance, sample) \
161 ((class) << I915_PMU_CLASS_SHIFT | \
162 (instance) << I915_PMU_SAMPLE_BITS | \
163 (sample))
164
165 #define I915_PMU_ENGINE_BUSY(class, instance) \
166 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
167
168 #define I915_PMU_ENGINE_WAIT(class, instance) \
169 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
170
171 #define I915_PMU_ENGINE_SEMA(class, instance) \
172 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
173
174 #define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
175
176 #define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0)
177 #define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1)
178 #define I915_PMU_INTERRUPTS __I915_PMU_OTHER(2)
179 #define I915_PMU_RC6_RESIDENCY __I915_PMU_OTHER(3)
180
181 #define I915_PMU_LAST I915_PMU_RC6_RESIDENCY
182
183 /* Each region is a minimum of 16k, and there are at most 255 of them.
184 */
185 #define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
186 * of chars for next/prev indices */
187 #define I915_LOG_MIN_TEX_REGION_SIZE 14
188
189 typedef struct _drm_i915_init {
190 enum {
191 I915_INIT_DMA = 0x01,
192 I915_CLEANUP_DMA = 0x02,
193 I915_RESUME_DMA = 0x03
194 } func;
195 unsigned int mmio_offset;
196 int sarea_priv_offset;
197 unsigned int ring_start;
198 unsigned int ring_end;
199 unsigned int ring_size;
200 unsigned int front_offset;
201 unsigned int back_offset;
202 unsigned int depth_offset;
203 unsigned int w;
204 unsigned int h;
205 unsigned int pitch;
206 unsigned int pitch_bits;
207 unsigned int back_pitch;
208 unsigned int depth_pitch;
209 unsigned int cpp;
210 unsigned int chipset;
211 } drm_i915_init_t;
212
213 typedef struct _drm_i915_sarea {
214 struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
215 int last_upload; /* last time texture was uploaded */
216 int last_enqueue; /* last time a buffer was enqueued */
217 int last_dispatch; /* age of the most recently dispatched buffer */
218 int ctxOwner; /* last context to upload state */
219 int texAge;
220 int pf_enabled; /* is pageflipping allowed? */
221 int pf_active;
222 int pf_current_page; /* which buffer is being displayed? */
223 int perf_boxes; /* performance boxes to be displayed */
224 int width, height; /* screen size in pixels */
225
226 drm_handle_t front_handle;
227 int front_offset;
228 int front_size;
229
230 drm_handle_t back_handle;
231 int back_offset;
232 int back_size;
233
234 drm_handle_t depth_handle;
235 int depth_offset;
236 int depth_size;
237
238 drm_handle_t tex_handle;
239 int tex_offset;
240 int tex_size;
241 int log_tex_granularity;
242 int pitch;
243 int rotation; /* 0, 90, 180 or 270 */
244 int rotated_offset;
245 int rotated_size;
246 int rotated_pitch;
247 int virtualX, virtualY;
248
249 unsigned int front_tiled;
250 unsigned int back_tiled;
251 unsigned int depth_tiled;
252 unsigned int rotated_tiled;
253 unsigned int rotated2_tiled;
254
255 int pipeA_x;
256 int pipeA_y;
257 int pipeA_w;
258 int pipeA_h;
259 int pipeB_x;
260 int pipeB_y;
261 int pipeB_w;
262 int pipeB_h;
263
264 /* fill out some space for old userspace triple buffer */
265 drm_handle_t unused_handle;
266 __u32 unused1, unused2, unused3;
267
268 /* buffer object handles for static buffers. May change
269 * over the lifetime of the client.
270 */
271 __u32 front_bo_handle;
272 __u32 back_bo_handle;
273 __u32 unused_bo_handle;
274 __u32 depth_bo_handle;
275
276 } drm_i915_sarea_t;
277
278 /* due to userspace building against these headers we need some compat here */
279 #define planeA_x pipeA_x
280 #define planeA_y pipeA_y
281 #define planeA_w pipeA_w
282 #define planeA_h pipeA_h
283 #define planeB_x pipeB_x
284 #define planeB_y pipeB_y
285 #define planeB_w pipeB_w
286 #define planeB_h pipeB_h
287
288 /* Flags for perf_boxes
289 */
290 #define I915_BOX_RING_EMPTY 0x1
291 #define I915_BOX_FLIP 0x2
292 #define I915_BOX_WAIT 0x4
293 #define I915_BOX_TEXTURE_LOAD 0x8
294 #define I915_BOX_LOST_CONTEXT 0x10
295
296 /*
297 * i915 specific ioctls.
298 *
299 * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
300 * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
301 * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
302 */
303 #define DRM_I915_INIT 0x00
304 #define DRM_I915_FLUSH 0x01
305 #define DRM_I915_FLIP 0x02
306 #define DRM_I915_BATCHBUFFER 0x03
307 #define DRM_I915_IRQ_EMIT 0x04
308 #define DRM_I915_IRQ_WAIT 0x05
309 #define DRM_I915_GETPARAM 0x06
310 #define DRM_I915_SETPARAM 0x07
311 #define DRM_I915_ALLOC 0x08
312 #define DRM_I915_FREE 0x09
313 #define DRM_I915_INIT_HEAP 0x0a
314 #define DRM_I915_CMDBUFFER 0x0b
315 #define DRM_I915_DESTROY_HEAP 0x0c
316 #define DRM_I915_SET_VBLANK_PIPE 0x0d
317 #define DRM_I915_GET_VBLANK_PIPE 0x0e
318 #define DRM_I915_VBLANK_SWAP 0x0f
319 #define DRM_I915_HWS_ADDR 0x11
320 #define DRM_I915_GEM_INIT 0x13
321 #define DRM_I915_GEM_EXECBUFFER 0x14
322 #define DRM_I915_GEM_PIN 0x15
323 #define DRM_I915_GEM_UNPIN 0x16
324 #define DRM_I915_GEM_BUSY 0x17
325 #define DRM_I915_GEM_THROTTLE 0x18
326 #define DRM_I915_GEM_ENTERVT 0x19
327 #define DRM_I915_GEM_LEAVEVT 0x1a
328 #define DRM_I915_GEM_CREATE 0x1b
329 #define DRM_I915_GEM_PREAD 0x1c
330 #define DRM_I915_GEM_PWRITE 0x1d
331 #define DRM_I915_GEM_MMAP 0x1e
332 #define DRM_I915_GEM_SET_DOMAIN 0x1f
333 #define DRM_I915_GEM_SW_FINISH 0x20
334 #define DRM_I915_GEM_SET_TILING 0x21
335 #define DRM_I915_GEM_GET_TILING 0x22
336 #define DRM_I915_GEM_GET_APERTURE 0x23
337 #define DRM_I915_GEM_MMAP_GTT 0x24
338 #define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
339 #define DRM_I915_GEM_MADVISE 0x26
340 #define DRM_I915_OVERLAY_PUT_IMAGE 0x27
341 #define DRM_I915_OVERLAY_ATTRS 0x28
342 #define DRM_I915_GEM_EXECBUFFER2 0x29
343 #define DRM_I915_GEM_EXECBUFFER2_WR DRM_I915_GEM_EXECBUFFER2
344 #define DRM_I915_GET_SPRITE_COLORKEY 0x2a
345 #define DRM_I915_SET_SPRITE_COLORKEY 0x2b
346 #define DRM_I915_GEM_WAIT 0x2c
347 #define DRM_I915_GEM_CONTEXT_CREATE 0x2d
348 #define DRM_I915_GEM_CONTEXT_DESTROY 0x2e
349 #define DRM_I915_GEM_SET_CACHING 0x2f
350 #define DRM_I915_GEM_GET_CACHING 0x30
351 #define DRM_I915_REG_READ 0x31
352 #define DRM_I915_GET_RESET_STATS 0x32
353 #define DRM_I915_GEM_USERPTR 0x33
354 #define DRM_I915_GEM_CONTEXT_GETPARAM 0x34
355 #define DRM_I915_GEM_CONTEXT_SETPARAM 0x35
356 #define DRM_I915_PERF_OPEN 0x36
357 #define DRM_I915_PERF_ADD_CONFIG 0x37
358 #define DRM_I915_PERF_REMOVE_CONFIG 0x38
359 #define DRM_I915_QUERY 0x39
360 #define DRM_I915_GEM_VM_CREATE 0x3a
361 #define DRM_I915_GEM_VM_DESTROY 0x3b
362 /* Must be kept compact -- no holes */
363
364 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
365 #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
366 #define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
367 #define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
368 #define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
369 #define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
370 #define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
371 #define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
372 #define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
373 #define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
374 #define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
375 #define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
376 #define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
377 #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
378 #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
379 #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
380 #define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
381 #define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
382 #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
383 #define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
384 #define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2)
385 #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
386 #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
387 #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
388 #define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
389 #define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
390 #define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
391 #define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
392 #define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
393 #define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
394 #define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
395 #define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
396 #define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
397 #define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
398 #define DRM_IOCTL_I915_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_offset)
399 #define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
400 #define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
401 #define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
402 #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
403 #define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
404 #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
405 #define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
406 #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
407 #define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
408 #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
409 #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
410 #define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
411 #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
412 #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create_ext)
413 #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
414 #define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
415 #define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
416 #define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
417 #define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
418 #define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
419 #define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
420 #define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
421 #define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
422 #define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
423 #define DRM_IOCTL_I915_GEM_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
424 #define DRM_IOCTL_I915_GEM_VM_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
425
426 /* Allow drivers to submit batchbuffers directly to hardware, relying
427 * on the security mechanisms provided by hardware.
428 */
429 typedef struct drm_i915_batchbuffer {
430 int start; /* agp offset */
431 int used; /* nr bytes in use */
432 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
433 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
434 int num_cliprects; /* mulitpass with multiple cliprects? */
435 struct drm_clip_rect *cliprects; /* pointer to userspace cliprects */
436 } drm_i915_batchbuffer_t;
437
438 /* As above, but pass a pointer to userspace buffer which can be
439 * validated by the kernel prior to sending to hardware.
440 */
441 typedef struct _drm_i915_cmdbuffer {
442 char *buf; /* pointer to userspace command buffer */
443 int sz; /* nr bytes in buf */
444 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
445 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
446 int num_cliprects; /* mulitpass with multiple cliprects? */
447 struct drm_clip_rect *cliprects; /* pointer to userspace cliprects */
448 } drm_i915_cmdbuffer_t;
449
450 /* Userspace can request & wait on irq's:
451 */
452 typedef struct drm_i915_irq_emit {
453 int *irq_seq;
454 } drm_i915_irq_emit_t;
455
456 typedef struct drm_i915_irq_wait {
457 int irq_seq;
458 } drm_i915_irq_wait_t;
459
460 /*
461 * Different modes of per-process Graphics Translation Table,
462 * see I915_PARAM_HAS_ALIASING_PPGTT
463 */
464 #define I915_GEM_PPGTT_NONE 0
465 #define I915_GEM_PPGTT_ALIASING 1
466 #define I915_GEM_PPGTT_FULL 2
467
468 /* Ioctl to query kernel params:
469 */
470 #define I915_PARAM_IRQ_ACTIVE 1
471 #define I915_PARAM_ALLOW_BATCHBUFFER 2
472 #define I915_PARAM_LAST_DISPATCH 3
473 #define I915_PARAM_CHIPSET_ID 4
474 #define I915_PARAM_HAS_GEM 5
475 #define I915_PARAM_NUM_FENCES_AVAIL 6
476 #define I915_PARAM_HAS_OVERLAY 7
477 #define I915_PARAM_HAS_PAGEFLIPPING 8
478 #define I915_PARAM_HAS_EXECBUF2 9
479 #define I915_PARAM_HAS_BSD 10
480 #define I915_PARAM_HAS_BLT 11
481 #define I915_PARAM_HAS_RELAXED_FENCING 12
482 #define I915_PARAM_HAS_COHERENT_RINGS 13
483 #define I915_PARAM_HAS_EXEC_CONSTANTS 14
484 #define I915_PARAM_HAS_RELAXED_DELTA 15
485 #define I915_PARAM_HAS_GEN7_SOL_RESET 16
486 #define I915_PARAM_HAS_LLC 17
487 #define I915_PARAM_HAS_ALIASING_PPGTT 18
488 #define I915_PARAM_HAS_WAIT_TIMEOUT 19
489 #define I915_PARAM_HAS_SEMAPHORES 20
490 #define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
491 #define I915_PARAM_HAS_VEBOX 22
492 #define I915_PARAM_HAS_SECURE_BATCHES 23
493 #define I915_PARAM_HAS_PINNED_BATCHES 24
494 #define I915_PARAM_HAS_EXEC_NO_RELOC 25
495 #define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
496 #define I915_PARAM_HAS_WT 27
497 #define I915_PARAM_CMD_PARSER_VERSION 28
498 #define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
499 #define I915_PARAM_MMAP_VERSION 30
500 #define I915_PARAM_HAS_BSD2 31
501 #define I915_PARAM_REVISION 32
502 #define I915_PARAM_SUBSLICE_TOTAL 33
503 #define I915_PARAM_EU_TOTAL 34
504 #define I915_PARAM_HAS_GPU_RESET 35
505 #define I915_PARAM_HAS_RESOURCE_STREAMER 36
506 #define I915_PARAM_HAS_EXEC_SOFTPIN 37
507 #define I915_PARAM_HAS_POOLED_EU 38
508 #define I915_PARAM_MIN_EU_IN_POOL 39
509 #define I915_PARAM_MMAP_GTT_VERSION 40
510
511 /*
512 * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
513 * priorities and the driver will attempt to execute batches in priority order.
514 * The param returns a capability bitmask, nonzero implies that the scheduler
515 * is enabled, with different features present according to the mask.
516 *
517 * The initial priority for each batch is supplied by the context and is
518 * controlled via I915_CONTEXT_PARAM_PRIORITY.
519 */
520 #define I915_PARAM_HAS_SCHEDULER 41
521 #define I915_SCHEDULER_CAP_ENABLED (1ul << 0)
522 #define I915_SCHEDULER_CAP_PRIORITY (1ul << 1)
523 #define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
524 #define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3)
525 #define I915_SCHEDULER_CAP_ENGINE_BUSY_STATS (1ul << 4)
526
527 #define I915_PARAM_HUC_STATUS 42
528
529 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
530 * synchronisation with implicit fencing on individual objects.
531 * See EXEC_OBJECT_ASYNC.
532 */
533 #define I915_PARAM_HAS_EXEC_ASYNC 43
534
535 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support -
536 * both being able to pass in a sync_file fd to wait upon before executing,
537 * and being able to return a new sync_file fd that is signaled when the
538 * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT.
539 */
540 #define I915_PARAM_HAS_EXEC_FENCE 44
541
542 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
543 * user specified bufffers for post-mortem debugging of GPU hangs. See
544 * EXEC_OBJECT_CAPTURE.
545 */
546 #define I915_PARAM_HAS_EXEC_CAPTURE 45
547
548 #define I915_PARAM_SLICE_MASK 46
549
550 /* Assuming it's uniform for each slice, this queries the mask of subslices
551 * per-slice for this system.
552 */
553 #define I915_PARAM_SUBSLICE_MASK 47
554
555 /*
556 * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer
557 * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST.
558 */
559 #define I915_PARAM_HAS_EXEC_BATCH_FIRST 48
560
561 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
562 * drm_i915_gem_exec_fence structures. See I915_EXEC_FENCE_ARRAY.
563 */
564 #define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49
565
566 /*
567 * Query whether every context (both per-file default and user created) is
568 * isolated (insofar as HW supports). If this parameter is not true, then
569 * freshly created contexts may inherit values from an existing context,
570 * rather than default HW values. If true, it also ensures (insofar as HW
571 * supports) that all state set by this context will not leak to any other
572 * context.
573 *
574 * As not every engine across every gen support contexts, the returned
575 * value reports the support of context isolation for individual engines by
576 * returning a bitmask of each engine class set to true if that class supports
577 * isolation.
578 */
579 #define I915_PARAM_HAS_CONTEXT_ISOLATION 50
580
581 /* Frequency of the command streamer timestamps given by the *_TIMESTAMP
582 * registers. This used to be fixed per platform but from CNL onwards, this
583 * might vary depending on the parts.
584 */
585 #define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
586
587 /*
588 * Once upon a time we supposed that writes through the GGTT would be
589 * immediately in physical memory (once flushed out of the CPU path). However,
590 * on a few different processors and chipsets, this is not necessarily the case
591 * as the writes appear to be buffered internally. Thus a read of the backing
592 * storage (physical memory) via a different path (with different physical tags
593 * to the indirect write via the GGTT) will see stale values from before
594 * the GGTT write. Inside the kernel, we can for the most part keep track of
595 * the different read/write domains in use (e.g. set-domain), but the assumption
596 * of coherency is baked into the ABI, hence reporting its true state in this
597 * parameter.
598 *
599 * Reports true when writes via mmap_gtt are immediately visible following an
600 * lfence to flush the WCB.
601 *
602 * Reports false when writes via mmap_gtt are indeterminately delayed in an in
603 * internal buffer and are _not_ immediately visible to third parties accessing
604 * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC
605 * communications channel when reporting false is strongly disadvised.
606 */
607 #define I915_PARAM_MMAP_GTT_COHERENT 52
608
609 /*
610 * Query whether DRM_I915_GEM_EXECBUFFER2 supports coordination of parallel
611 * execution through use of explicit fence support.
612 * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT.
613 */
614 #define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53
615
616 /*
617 * Revision of the i915-perf uAPI. The value returned helps determine what
618 * i915-perf features are available. See drm_i915_perf_property_id.
619 */
620 #define I915_PARAM_PERF_REVISION 54
621
622 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
623 * timeline syncobj through drm_i915_gem_execbuffer_ext_timeline_fences. See
624 * I915_EXEC_USE_EXTENSIONS.
625 */
626 #define I915_PARAM_HAS_EXEC_TIMELINE_FENCES 55
627
628 /* Must be kept compact -- no holes and well documented */
629
630 typedef struct drm_i915_getparam {
631 __s32 param;
632 /*
633 * WARNING: Using pointers instead of fixed-size u64 means we need to write
634 * compat32 code. Don't repeat this mistake.
635 */
636 int *value;
637 } drm_i915_getparam_t;
638
639 /* Ioctl to set kernel params:
640 */
641 #define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1
642 #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
643 #define I915_SETPARAM_ALLOW_BATCHBUFFER 3
644 #define I915_SETPARAM_NUM_USED_FENCES 4
645 /* Must be kept compact -- no holes */
646
647 typedef struct drm_i915_setparam {
648 int param;
649 int value;
650 } drm_i915_setparam_t;
651
652 /* A memory manager for regions of shared memory:
653 */
654 #define I915_MEM_REGION_AGP 1
655
656 typedef struct drm_i915_mem_alloc {
657 int region;
658 int alignment;
659 int size;
660 int *region_offset; /* offset from start of fb or agp */
661 } drm_i915_mem_alloc_t;
662
663 typedef struct drm_i915_mem_free {
664 int region;
665 int region_offset;
666 } drm_i915_mem_free_t;
667
668 typedef struct drm_i915_mem_init_heap {
669 int region;
670 int size;
671 int start;
672 } drm_i915_mem_init_heap_t;
673
674 /* Allow memory manager to be torn down and re-initialized (eg on
675 * rotate):
676 */
677 typedef struct drm_i915_mem_destroy_heap {
678 int region;
679 } drm_i915_mem_destroy_heap_t;
680
681 /* Allow X server to configure which pipes to monitor for vblank signals
682 */
683 #define DRM_I915_VBLANK_PIPE_A 1
684 #define DRM_I915_VBLANK_PIPE_B 2
685
686 typedef struct drm_i915_vblank_pipe {
687 int pipe;
688 } drm_i915_vblank_pipe_t;
689
690 /* Schedule buffer swap at given vertical blank:
691 */
692 typedef struct drm_i915_vblank_swap {
693 drm_drawable_t drawable;
694 enum drm_vblank_seq_type seqtype;
695 unsigned int sequence;
696 } drm_i915_vblank_swap_t;
697
698 typedef struct drm_i915_hws_addr {
699 __u64 addr;
700 } drm_i915_hws_addr_t;
701
702 struct drm_i915_gem_init {
703 /**
704 * Beginning offset in the GTT to be managed by the DRM memory
705 * manager.
706 */
707 __u64 gtt_start;
708 /**
709 * Ending offset in the GTT to be managed by the DRM memory
710 * manager.
711 */
712 __u64 gtt_end;
713 };
714
715 struct drm_i915_gem_create {
716 /**
717 * Requested size for the object.
718 *
719 * The (page-aligned) allocated size for the object will be returned.
720 */
721 __u64 size;
722 /**
723 * Returned handle for the object.
724 *
725 * Object handles are nonzero.
726 */
727 __u32 handle;
728 __u32 pad;
729 };
730
731 struct drm_i915_gem_pread {
732 /** Handle for the object being read. */
733 __u32 handle;
734 __u32 pad;
735 /** Offset into the object to read from */
736 __u64 offset;
737 /** Length of data to read */
738 __u64 size;
739 /**
740 * Pointer to write the data into.
741 *
742 * This is a fixed-size type for 32/64 compatibility.
743 */
744 __u64 data_ptr;
745 };
746
747 struct drm_i915_gem_pwrite {
748 /** Handle for the object being written to. */
749 __u32 handle;
750 __u32 pad;
751 /** Offset into the object to write to */
752 __u64 offset;
753 /** Length of data to write */
754 __u64 size;
755 /**
756 * Pointer to read the data from.
757 *
758 * This is a fixed-size type for 32/64 compatibility.
759 */
760 __u64 data_ptr;
761 };
762
763 struct drm_i915_gem_mmap {
764 /** Handle for the object being mapped. */
765 __u32 handle;
766 __u32 pad;
767 /** Offset in the object to map. */
768 __u64 offset;
769 /**
770 * Length of data to map.
771 *
772 * The value will be page-aligned.
773 */
774 __u64 size;
775 /**
776 * Returned pointer the data was mapped at.
777 *
778 * This is a fixed-size type for 32/64 compatibility.
779 */
780 __u64 addr_ptr;
781
782 /**
783 * Flags for extended behaviour.
784 *
785 * Added in version 2.
786 */
787 __u64 flags;
788 #define I915_MMAP_WC 0x1
789 };
790
791 struct drm_i915_gem_mmap_gtt {
792 /** Handle for the object being mapped. */
793 __u32 handle;
794 __u32 pad;
795 /**
796 * Fake offset to use for subsequent mmap call
797 *
798 * This is a fixed-size type for 32/64 compatibility.
799 */
800 __u64 offset;
801 };
802
803 struct drm_i915_gem_mmap_offset {
804 /** Handle for the object being mapped. */
805 __u32 handle;
806 __u32 pad;
807 /**
808 * Fake offset to use for subsequent mmap call
809 *
810 * This is a fixed-size type for 32/64 compatibility.
811 */
812 __u64 offset;
813
814 /**
815 * Flags for extended behaviour.
816 *
817 * It is mandatory that one of the MMAP_OFFSET types
818 * (GTT, WC, WB, UC, etc) should be included.
819 */
820 __u64 flags;
821 #define I915_MMAP_OFFSET_GTT 0
822 #define I915_MMAP_OFFSET_WC 1
823 #define I915_MMAP_OFFSET_WB 2
824 #define I915_MMAP_OFFSET_UC 3
825
826 /*
827 * Zero-terminated chain of extensions.
828 *
829 * No current extensions defined; mbz.
830 */
831 __u64 extensions;
832 };
833
834 struct drm_i915_gem_set_domain {
835 /** Handle for the object */
836 __u32 handle;
837
838 /** New read domains */
839 __u32 read_domains;
840
841 /** New write domain */
842 __u32 write_domain;
843 };
844
845 struct drm_i915_gem_sw_finish {
846 /** Handle for the object */
847 __u32 handle;
848 };
849
850 struct drm_i915_gem_relocation_entry {
851 /**
852 * Handle of the buffer being pointed to by this relocation entry.
853 *
854 * It's appealing to make this be an index into the mm_validate_entry
855 * list to refer to the buffer, but this allows the driver to create
856 * a relocation list for state buffers and not re-write it per
857 * exec using the buffer.
858 */
859 __u32 target_handle;
860
861 /**
862 * Value to be added to the offset of the target buffer to make up
863 * the relocation entry.
864 */
865 __u32 delta;
866
867 /** Offset in the buffer the relocation entry will be written into */
868 __u64 offset;
869
870 /**
871 * Offset value of the target buffer that the relocation entry was last
872 * written as.
873 *
874 * If the buffer has the same offset as last time, we can skip syncing
875 * and writing the relocation. This value is written back out by
876 * the execbuffer ioctl when the relocation is written.
877 */
878 __u64 presumed_offset;
879
880 /**
881 * Target memory domains read by this operation.
882 */
883 __u32 read_domains;
884
885 /**
886 * Target memory domains written by this operation.
887 *
888 * Note that only one domain may be written by the whole
889 * execbuffer operation, so that where there are conflicts,
890 * the application will get -EINVAL back.
891 */
892 __u32 write_domain;
893 };
894
895 /** @{
896 * Intel memory domains
897 *
898 * Most of these just align with the various caches in
899 * the system and are used to flush and invalidate as
900 * objects end up cached in different domains.
901 */
902 /** CPU cache */
903 #define I915_GEM_DOMAIN_CPU 0x00000001
904 /** Render cache, used by 2D and 3D drawing */
905 #define I915_GEM_DOMAIN_RENDER 0x00000002
906 /** Sampler cache, used by texture engine */
907 #define I915_GEM_DOMAIN_SAMPLER 0x00000004
908 /** Command queue, used to load batch buffers */
909 #define I915_GEM_DOMAIN_COMMAND 0x00000008
910 /** Instruction cache, used by shader programs */
911 #define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
912 /** Vertex address cache */
913 #define I915_GEM_DOMAIN_VERTEX 0x00000020
914 /** GTT domain - aperture and scanout */
915 #define I915_GEM_DOMAIN_GTT 0x00000040
916 /** WC domain - uncached access */
917 #define I915_GEM_DOMAIN_WC 0x00000080
918 /** @} */
919
920 struct drm_i915_gem_exec_object {
921 /**
922 * User's handle for a buffer to be bound into the GTT for this
923 * operation.
924 */
925 __u32 handle;
926
927 /** Number of relocations to be performed on this buffer */
928 __u32 relocation_count;
929 /**
930 * Pointer to array of struct drm_i915_gem_relocation_entry containing
931 * the relocations to be performed in this buffer.
932 */
933 __u64 relocs_ptr;
934
935 /** Required alignment in graphics aperture */
936 __u64 alignment;
937
938 /**
939 * Returned value of the updated offset of the object, for future
940 * presumed_offset writes.
941 */
942 __u64 offset;
943 };
944
945 struct drm_i915_gem_execbuffer {
946 /**
947 * List of buffers to be validated with their relocations to be
948 * performend on them.
949 *
950 * This is a pointer to an array of struct drm_i915_gem_validate_entry.
951 *
952 * These buffers must be listed in an order such that all relocations
953 * a buffer is performing refer to buffers that have already appeared
954 * in the validate list.
955 */
956 __u64 buffers_ptr;
957 __u32 buffer_count;
958
959 /** Offset in the batchbuffer to start execution from. */
960 __u32 batch_start_offset;
961 /** Bytes used in batchbuffer from batch_start_offset */
962 __u32 batch_len;
963 __u32 DR1;
964 __u32 DR4;
965 __u32 num_cliprects;
966 /** This is a struct drm_clip_rect *cliprects */
967 __u64 cliprects_ptr;
968 };
969
970 struct drm_i915_gem_exec_object2 {
971 /**
972 * User's handle for a buffer to be bound into the GTT for this
973 * operation.
974 */
975 __u32 handle;
976
977 /** Number of relocations to be performed on this buffer */
978 __u32 relocation_count;
979 /**
980 * Pointer to array of struct drm_i915_gem_relocation_entry containing
981 * the relocations to be performed in this buffer.
982 */
983 __u64 relocs_ptr;
984
985 /** Required alignment in graphics aperture */
986 __u64 alignment;
987
988 /**
989 * When the EXEC_OBJECT_PINNED flag is specified this is populated by
990 * the user with the GTT offset at which this object will be pinned.
991 * When the I915_EXEC_NO_RELOC flag is specified this must contain the
992 * presumed_offset of the object.
993 * During execbuffer2 the kernel populates it with the value of the
994 * current GTT offset of the object, for future presumed_offset writes.
995 */
996 __u64 offset;
997
998 #define EXEC_OBJECT_NEEDS_FENCE (1<<0)
999 #define EXEC_OBJECT_NEEDS_GTT (1<<1)
1000 #define EXEC_OBJECT_WRITE (1<<2)
1001 #define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
1002 #define EXEC_OBJECT_PINNED (1<<4)
1003 #define EXEC_OBJECT_PAD_TO_SIZE (1<<5)
1004 /* The kernel implicitly tracks GPU activity on all GEM objects, and
1005 * synchronises operations with outstanding rendering. This includes
1006 * rendering on other devices if exported via dma-buf. However, sometimes
1007 * this tracking is too coarse and the user knows better. For example,
1008 * if the object is split into non-overlapping ranges shared between different
1009 * clients or engines (i.e. suballocating objects), the implicit tracking
1010 * by kernel assumes that each operation affects the whole object rather
1011 * than an individual range, causing needless synchronisation between clients.
1012 * The kernel will also forgo any CPU cache flushes prior to rendering from
1013 * the object as the client is expected to be also handling such domain
1014 * tracking.
1015 *
1016 * The kernel maintains the implicit tracking in order to manage resources
1017 * used by the GPU - this flag only disables the synchronisation prior to
1018 * rendering with this object in this execbuf.
1019 *
1020 * Opting out of implicit synhronisation requires the user to do its own
1021 * explicit tracking to avoid rendering corruption. See, for example,
1022 * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously.
1023 */
1024 #define EXEC_OBJECT_ASYNC (1<<6)
1025 /* Request that the contents of this execobject be copied into the error
1026 * state upon a GPU hang involving this batch for post-mortem debugging.
1027 * These buffers are recorded in no particular order as "user" in
1028 * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see
1029 * if the kernel supports this flag.
1030 */
1031 #define EXEC_OBJECT_CAPTURE (1<<7)
1032 /* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
1033 #define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1)
1034 __u64 flags;
1035
1036 union {
1037 __u64 rsvd1;
1038 __u64 pad_to_size;
1039 };
1040 __u64 rsvd2;
1041 };
1042
1043 struct drm_i915_gem_exec_fence {
1044 /**
1045 * User's handle for a drm_syncobj to wait on or signal.
1046 */
1047 __u32 handle;
1048
1049 #define I915_EXEC_FENCE_WAIT (1<<0)
1050 #define I915_EXEC_FENCE_SIGNAL (1<<1)
1051 #define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
1052 __u32 flags;
1053 };
1054
1055 /**
1056 * See drm_i915_gem_execbuffer_ext_timeline_fences.
1057 */
1058 #define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0
1059
1060 /**
1061 * This structure describes an array of drm_syncobj and associated points for
1062 * timeline variants of drm_syncobj. It is invalid to append this structure to
1063 * the execbuf if I915_EXEC_FENCE_ARRAY is set.
1064 */
1065 struct drm_i915_gem_execbuffer_ext_timeline_fences {
1066 struct i915_user_extension base;
1067
1068 /**
1069 * Number of element in the handles_ptr & value_ptr arrays.
1070 */
1071 __u64 fence_count;
1072
1073 /**
1074 * Pointer to an array of struct drm_i915_gem_exec_fence of length
1075 * fence_count.
1076 */
1077 __u64 handles_ptr;
1078
1079 /**
1080 * Pointer to an array of u64 values of length fence_count. Values
1081 * must be 0 for a binary drm_syncobj. A Value of 0 for a timeline
1082 * drm_syncobj is invalid as it turns a drm_syncobj into a binary one.
1083 */
1084 __u64 values_ptr;
1085 };
1086
1087 struct drm_i915_gem_execbuffer2 {
1088 /**
1089 * List of gem_exec_object2 structs
1090 */
1091 __u64 buffers_ptr;
1092 __u32 buffer_count;
1093
1094 /** Offset in the batchbuffer to start execution from. */
1095 __u32 batch_start_offset;
1096 /** Bytes used in batchbuffer from batch_start_offset */
1097 __u32 batch_len;
1098 __u32 DR1;
1099 __u32 DR4;
1100 __u32 num_cliprects;
1101 /**
1102 * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
1103 * & I915_EXEC_USE_EXTENSIONS are not set.
1104 *
1105 * If I915_EXEC_FENCE_ARRAY is set, then this is a pointer to an array
1106 * of struct drm_i915_gem_exec_fence and num_cliprects is the length
1107 * of the array.
1108 *
1109 * If I915_EXEC_USE_EXTENSIONS is set, then this is a pointer to a
1110 * single struct i915_user_extension and num_cliprects is 0.
1111 */
1112 __u64 cliprects_ptr;
1113 #define I915_EXEC_RING_MASK (0x3f)
1114 #define I915_EXEC_DEFAULT (0<<0)
1115 #define I915_EXEC_RENDER (1<<0)
1116 #define I915_EXEC_BSD (2<<0)
1117 #define I915_EXEC_BLT (3<<0)
1118 #define I915_EXEC_VEBOX (4<<0)
1119
1120 /* Used for switching the constants addressing mode on gen4+ RENDER ring.
1121 * Gen6+ only supports relative addressing to dynamic state (default) and
1122 * absolute addressing.
1123 *
1124 * These flags are ignored for the BSD and BLT rings.
1125 */
1126 #define I915_EXEC_CONSTANTS_MASK (3<<6)
1127 #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
1128 #define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
1129 #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
1130 __u64 flags;
1131 __u64 rsvd1; /* now used for context info */
1132 __u64 rsvd2;
1133 };
1134
1135 /** Resets the SO write offset registers for transform feedback on gen7. */
1136 #define I915_EXEC_GEN7_SOL_RESET (1<<8)
1137
1138 /** Request a privileged ("secure") batch buffer. Note only available for
1139 * DRM_ROOT_ONLY | DRM_MASTER processes.
1140 */
1141 #define I915_EXEC_SECURE (1<<9)
1142
1143 /** Inform the kernel that the batch is and will always be pinned. This
1144 * negates the requirement for a workaround to be performed to avoid
1145 * an incoherent CS (such as can be found on 830/845). If this flag is
1146 * not passed, the kernel will endeavour to make sure the batch is
1147 * coherent with the CS before execution. If this flag is passed,
1148 * userspace assumes the responsibility for ensuring the same.
1149 */
1150 #define I915_EXEC_IS_PINNED (1<<10)
1151
1152 /** Provide a hint to the kernel that the command stream and auxiliary
1153 * state buffers already holds the correct presumed addresses and so the
1154 * relocation process may be skipped if no buffers need to be moved in
1155 * preparation for the execbuffer.
1156 */
1157 #define I915_EXEC_NO_RELOC (1<<11)
1158
1159 /** Use the reloc.handle as an index into the exec object array rather
1160 * than as the per-file handle.
1161 */
1162 #define I915_EXEC_HANDLE_LUT (1<<12)
1163
1164 /** Used for switching BSD rings on the platforms with two BSD rings */
1165 #define I915_EXEC_BSD_SHIFT (13)
1166 #define I915_EXEC_BSD_MASK (3 << I915_EXEC_BSD_SHIFT)
1167 /* default ping-pong mode */
1168 #define I915_EXEC_BSD_DEFAULT (0 << I915_EXEC_BSD_SHIFT)
1169 #define I915_EXEC_BSD_RING1 (1 << I915_EXEC_BSD_SHIFT)
1170 #define I915_EXEC_BSD_RING2 (2 << I915_EXEC_BSD_SHIFT)
1171
1172 /** Tell the kernel that the batchbuffer is processed by
1173 * the resource streamer.
1174 */
1175 #define I915_EXEC_RESOURCE_STREAMER (1<<15)
1176
1177 /* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent
1178 * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
1179 * the batch.
1180 *
1181 * Returns -EINVAL if the sync_file fd cannot be found.
1182 */
1183 #define I915_EXEC_FENCE_IN (1<<16)
1184
1185 /* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd
1186 * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given
1187 * to the caller, and it should be close() after use. (The fd is a regular
1188 * file descriptor and will be cleaned up on process termination. It holds
1189 * a reference to the request, but nothing else.)
1190 *
1191 * The sync_file fd can be combined with other sync_file and passed either
1192 * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip
1193 * will only occur after this request completes), or to other devices.
1194 *
1195 * Using I915_EXEC_FENCE_OUT requires use of
1196 * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written
1197 * back to userspace. Failure to do so will cause the out-fence to always
1198 * be reported as zero, and the real fence fd to be leaked.
1199 */
1200 #define I915_EXEC_FENCE_OUT (1<<17)
1201
1202 /*
1203 * Traditionally the execbuf ioctl has only considered the final element in
1204 * the execobject[] to be the executable batch. Often though, the client
1205 * will known the batch object prior to construction and being able to place
1206 * it into the execobject[] array first can simplify the relocation tracking.
1207 * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the
1208 * execobject[] as the * batch instead (the default is to use the last
1209 * element).
1210 */
1211 #define I915_EXEC_BATCH_FIRST (1<<18)
1212
1213 /* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr
1214 * define an array of i915_gem_exec_fence structures which specify a set of
1215 * dma fences to wait upon or signal.
1216 */
1217 #define I915_EXEC_FENCE_ARRAY (1<<19)
1218
1219 /*
1220 * Setting I915_EXEC_FENCE_SUBMIT implies that lower_32_bits(rsvd2) represent
1221 * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
1222 * the batch.
1223 *
1224 * Returns -EINVAL if the sync_file fd cannot be found.
1225 */
1226 #define I915_EXEC_FENCE_SUBMIT (1 << 20)
1227
1228 /*
1229 * Setting I915_EXEC_USE_EXTENSIONS implies that
1230 * drm_i915_gem_execbuffer2.cliprects_ptr is treated as a pointer to an linked
1231 * list of i915_user_extension. Each i915_user_extension node is the base of a
1232 * larger structure. The list of supported structures are listed in the
1233 * drm_i915_gem_execbuffer_ext enum.
1234 */
1235 #define I915_EXEC_USE_EXTENSIONS (1 << 21)
1236
1237 #define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_USE_EXTENSIONS << 1))
1238
1239 #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
1240 #define i915_execbuffer2_set_context_id(eb2, context) \
1241 (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
1242 #define i915_execbuffer2_get_context_id(eb2) \
1243 ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
1244
1245 struct drm_i915_gem_pin {
1246 /** Handle of the buffer to be pinned. */
1247 __u32 handle;
1248 __u32 pad;
1249
1250 /** alignment required within the aperture */
1251 __u64 alignment;
1252
1253 /** Returned GTT offset of the buffer. */
1254 __u64 offset;
1255 };
1256
1257 struct drm_i915_gem_unpin {
1258 /** Handle of the buffer to be unpinned. */
1259 __u32 handle;
1260 __u32 pad;
1261 };
1262
1263 struct drm_i915_gem_busy {
1264 /** Handle of the buffer to check for busy */
1265 __u32 handle;
1266
1267 /** Return busy status
1268 *
1269 * A return of 0 implies that the object is idle (after
1270 * having flushed any pending activity), and a non-zero return that
1271 * the object is still in-flight on the GPU. (The GPU has not yet
1272 * signaled completion for all pending requests that reference the
1273 * object.) An object is guaranteed to become idle eventually (so
1274 * long as no new GPU commands are executed upon it). Due to the
1275 * asynchronous nature of the hardware, an object reported
1276 * as busy may become idle before the ioctl is completed.
1277 *
1278 * Furthermore, if the object is busy, which engine is busy is only
1279 * provided as a guide and only indirectly by reporting its class
1280 * (there may be more than one engine in each class). There are race
1281 * conditions which prevent the report of which engines are busy from
1282 * being always accurate. However, the converse is not true. If the
1283 * object is idle, the result of the ioctl, that all engines are idle,
1284 * is accurate.
1285 *
1286 * The returned dword is split into two fields to indicate both
1287 * the engine classess on which the object is being read, and the
1288 * engine class on which it is currently being written (if any).
1289 *
1290 * The low word (bits 0:15) indicate if the object is being written
1291 * to by any engine (there can only be one, as the GEM implicit
1292 * synchronisation rules force writes to be serialised). Only the
1293 * engine class (offset by 1, I915_ENGINE_CLASS_RENDER is reported as
1294 * 1 not 0 etc) for the last write is reported.
1295 *
1296 * The high word (bits 16:31) are a bitmask of which engines classes
1297 * are currently reading from the object. Multiple engines may be
1298 * reading from the object simultaneously.
1299 *
1300 * The value of each engine class is the same as specified in the
1301 * I915_CONTEXT_SET_ENGINES parameter and via perf, i.e.
1302 * I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc.
1303 * reported as active itself. Some hardware may have parallel
1304 * execution engines, e.g. multiple media engines, which are
1305 * mapped to the same class identifier and so are not separately
1306 * reported for busyness.
1307 *
1308 * Caveat emptor:
1309 * Only the boolean result of this query is reliable; that is whether
1310 * the object is idle or busy. The report of which engines are busy
1311 * should be only used as a heuristic.
1312 */
1313 __u32 busy;
1314 };
1315
1316 /**
1317 * I915_CACHING_NONE
1318 *
1319 * GPU access is not coherent with cpu caches. Default for machines without an
1320 * LLC.
1321 */
1322 #define I915_CACHING_NONE 0
1323 /**
1324 * I915_CACHING_CACHED
1325 *
1326 * GPU access is coherent with cpu caches and furthermore the data is cached in
1327 * last-level caches shared between cpu cores and the gpu GT. Default on
1328 * machines with HAS_LLC.
1329 */
1330 #define I915_CACHING_CACHED 1
1331 /**
1332 * I915_CACHING_DISPLAY
1333 *
1334 * Special GPU caching mode which is coherent with the scanout engines.
1335 * Transparently falls back to I915_CACHING_NONE on platforms where no special
1336 * cache mode (like write-through or gfdt flushing) is available. The kernel
1337 * automatically sets this mode when using a buffer as a scanout target.
1338 * Userspace can manually set this mode to avoid a costly stall and clflush in
1339 * the hotpath of drawing the first frame.
1340 */
1341 #define I915_CACHING_DISPLAY 2
1342
1343 struct drm_i915_gem_caching {
1344 /**
1345 * Handle of the buffer to set/get the caching level of. */
1346 __u32 handle;
1347
1348 /**
1349 * Cacheing level to apply or return value
1350 *
1351 * bits0-15 are for generic caching control (i.e. the above defined
1352 * values). bits16-31 are reserved for platform-specific variations
1353 * (e.g. l3$ caching on gen7). */
1354 __u32 caching;
1355 };
1356
1357 #define I915_TILING_NONE 0
1358 #define I915_TILING_X 1
1359 #define I915_TILING_Y 2
1360 #define I915_TILING_LAST I915_TILING_Y
1361
1362 #define I915_BIT_6_SWIZZLE_NONE 0
1363 #define I915_BIT_6_SWIZZLE_9 1
1364 #define I915_BIT_6_SWIZZLE_9_10 2
1365 #define I915_BIT_6_SWIZZLE_9_11 3
1366 #define I915_BIT_6_SWIZZLE_9_10_11 4
1367 /* Not seen by userland */
1368 #define I915_BIT_6_SWIZZLE_UNKNOWN 5
1369 /* Seen by userland. */
1370 #define I915_BIT_6_SWIZZLE_9_17 6
1371 #define I915_BIT_6_SWIZZLE_9_10_17 7
1372
1373 struct drm_i915_gem_set_tiling {
1374 /** Handle of the buffer to have its tiling state updated */
1375 __u32 handle;
1376
1377 /**
1378 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1379 * I915_TILING_Y).
1380 *
1381 * This value is to be set on request, and will be updated by the
1382 * kernel on successful return with the actual chosen tiling layout.
1383 *
1384 * The tiling mode may be demoted to I915_TILING_NONE when the system
1385 * has bit 6 swizzling that can't be managed correctly by GEM.
1386 *
1387 * Buffer contents become undefined when changing tiling_mode.
1388 */
1389 __u32 tiling_mode;
1390
1391 /**
1392 * Stride in bytes for the object when in I915_TILING_X or
1393 * I915_TILING_Y.
1394 */
1395 __u32 stride;
1396
1397 /**
1398 * Returned address bit 6 swizzling required for CPU access through
1399 * mmap mapping.
1400 */
1401 __u32 swizzle_mode;
1402 };
1403
1404 struct drm_i915_gem_get_tiling {
1405 /** Handle of the buffer to get tiling state for. */
1406 __u32 handle;
1407
1408 /**
1409 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1410 * I915_TILING_Y).
1411 */
1412 __u32 tiling_mode;
1413
1414 /**
1415 * Returned address bit 6 swizzling required for CPU access through
1416 * mmap mapping.
1417 */
1418 __u32 swizzle_mode;
1419
1420 /**
1421 * Returned address bit 6 swizzling required for CPU access through
1422 * mmap mapping whilst bound.
1423 */
1424 __u32 phys_swizzle_mode;
1425 };
1426
1427 struct drm_i915_gem_get_aperture {
1428 /** Total size of the aperture used by i915_gem_execbuffer, in bytes */
1429 __u64 aper_size;
1430
1431 /**
1432 * Available space in the aperture used by i915_gem_execbuffer, in
1433 * bytes
1434 */
1435 __u64 aper_available_size;
1436 };
1437
1438 struct drm_i915_get_pipe_from_crtc_id {
1439 /** ID of CRTC being requested **/
1440 __u32 crtc_id;
1441
1442 /** pipe of requested CRTC **/
1443 __u32 pipe;
1444 };
1445
1446 #define I915_MADV_WILLNEED 0
1447 #define I915_MADV_DONTNEED 1
1448 #define __I915_MADV_PURGED 2 /* internal state */
1449
1450 struct drm_i915_gem_madvise {
1451 /** Handle of the buffer to change the backing store advice */
1452 __u32 handle;
1453
1454 /* Advice: either the buffer will be needed again in the near future,
1455 * or wont be and could be discarded under memory pressure.
1456 */
1457 __u32 madv;
1458
1459 /** Whether the backing store still exists. */
1460 __u32 retained;
1461 };
1462
1463 /* flags */
1464 #define I915_OVERLAY_TYPE_MASK 0xff
1465 #define I915_OVERLAY_YUV_PLANAR 0x01
1466 #define I915_OVERLAY_YUV_PACKED 0x02
1467 #define I915_OVERLAY_RGB 0x03
1468
1469 #define I915_OVERLAY_DEPTH_MASK 0xff00
1470 #define I915_OVERLAY_RGB24 0x1000
1471 #define I915_OVERLAY_RGB16 0x2000
1472 #define I915_OVERLAY_RGB15 0x3000
1473 #define I915_OVERLAY_YUV422 0x0100
1474 #define I915_OVERLAY_YUV411 0x0200
1475 #define I915_OVERLAY_YUV420 0x0300
1476 #define I915_OVERLAY_YUV410 0x0400
1477
1478 #define I915_OVERLAY_SWAP_MASK 0xff0000
1479 #define I915_OVERLAY_NO_SWAP 0x000000
1480 #define I915_OVERLAY_UV_SWAP 0x010000
1481 #define I915_OVERLAY_Y_SWAP 0x020000
1482 #define I915_OVERLAY_Y_AND_UV_SWAP 0x030000
1483
1484 #define I915_OVERLAY_FLAGS_MASK 0xff000000
1485 #define I915_OVERLAY_ENABLE 0x01000000
1486
1487 struct drm_intel_overlay_put_image {
1488 /* various flags and src format description */
1489 __u32 flags;
1490 /* source picture description */
1491 __u32 bo_handle;
1492 /* stride values and offsets are in bytes, buffer relative */
1493 __u16 stride_Y; /* stride for packed formats */
1494 __u16 stride_UV;
1495 __u32 offset_Y; /* offset for packet formats */
1496 __u32 offset_U;
1497 __u32 offset_V;
1498 /* in pixels */
1499 __u16 src_width;
1500 __u16 src_height;
1501 /* to compensate the scaling factors for partially covered surfaces */
1502 __u16 src_scan_width;
1503 __u16 src_scan_height;
1504 /* output crtc description */
1505 __u32 crtc_id;
1506 __u16 dst_x;
1507 __u16 dst_y;
1508 __u16 dst_width;
1509 __u16 dst_height;
1510 };
1511
1512 /* flags */
1513 #define I915_OVERLAY_UPDATE_ATTRS (1<<0)
1514 #define I915_OVERLAY_UPDATE_GAMMA (1<<1)
1515 #define I915_OVERLAY_DISABLE_DEST_COLORKEY (1<<2)
1516 struct drm_intel_overlay_attrs {
1517 __u32 flags;
1518 __u32 color_key;
1519 __s32 brightness;
1520 __u32 contrast;
1521 __u32 saturation;
1522 __u32 gamma0;
1523 __u32 gamma1;
1524 __u32 gamma2;
1525 __u32 gamma3;
1526 __u32 gamma4;
1527 __u32 gamma5;
1528 };
1529
1530 /*
1531 * Intel sprite handling
1532 *
1533 * Color keying works with a min/mask/max tuple. Both source and destination
1534 * color keying is allowed.
1535 *
1536 * Source keying:
1537 * Sprite pixels within the min & max values, masked against the color channels
1538 * specified in the mask field, will be transparent. All other pixels will
1539 * be displayed on top of the primary plane. For RGB surfaces, only the min
1540 * and mask fields will be used; ranged compares are not allowed.
1541 *
1542 * Destination keying:
1543 * Primary plane pixels that match the min value, masked against the color
1544 * channels specified in the mask field, will be replaced by corresponding
1545 * pixels from the sprite plane.
1546 *
1547 * Note that source & destination keying are exclusive; only one can be
1548 * active on a given plane.
1549 */
1550
1551 #define I915_SET_COLORKEY_NONE (1<<0) /* Deprecated. Instead set
1552 * flags==0 to disable colorkeying.
1553 */
1554 #define I915_SET_COLORKEY_DESTINATION (1<<1)
1555 #define I915_SET_COLORKEY_SOURCE (1<<2)
1556 struct drm_intel_sprite_colorkey {
1557 __u32 plane_id;
1558 __u32 min_value;
1559 __u32 channel_mask;
1560 __u32 max_value;
1561 __u32 flags;
1562 };
1563
1564 struct drm_i915_gem_wait {
1565 /** Handle of BO we shall wait on */
1566 __u32 bo_handle;
1567 __u32 flags;
1568 /** Number of nanoseconds to wait, Returns time remaining. */
1569 __s64 timeout_ns;
1570 };
1571
1572 struct drm_i915_gem_context_create {
1573 __u32 ctx_id; /* output: id of new context*/
1574 __u32 pad;
1575 };
1576
1577 struct drm_i915_gem_context_create_ext {
1578 __u32 ctx_id; /* output: id of new context*/
1579 __u32 flags;
1580 #define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0)
1581 #define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1u << 1)
1582 #define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
1583 (-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1))
1584 __u64 extensions;
1585 };
1586
1587 struct drm_i915_gem_context_param {
1588 __u32 ctx_id;
1589 __u32 size;
1590 __u64 param;
1591 #define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
1592 #define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
1593 #define I915_CONTEXT_PARAM_GTT_SIZE 0x3
1594 #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
1595 #define I915_CONTEXT_PARAM_BANNABLE 0x5
1596 #define I915_CONTEXT_PARAM_PRIORITY 0x6
1597 #define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */
1598 #define I915_CONTEXT_DEFAULT_PRIORITY 0
1599 #define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */
1600 /*
1601 * When using the following param, value should be a pointer to
1602 * drm_i915_gem_context_param_sseu.
1603 */
1604 #define I915_CONTEXT_PARAM_SSEU 0x7
1605
1606 /*
1607 * Not all clients may want to attempt automatic recover of a context after
1608 * a hang (for example, some clients may only submit very small incremental
1609 * batches relying on known logical state of previous batches which will never
1610 * recover correctly and each attempt will hang), and so would prefer that
1611 * the context is forever banned instead.
1612 *
1613 * If set to false (0), after a reset, subsequent (and in flight) rendering
1614 * from this context is discarded, and the client will need to create a new
1615 * context to use instead.
1616 *
1617 * If set to true (1), the kernel will automatically attempt to recover the
1618 * context by skipping the hanging batch and executing the next batch starting
1619 * from the default context state (discarding the incomplete logical context
1620 * state lost due to the reset).
1621 *
1622 * On creation, all new contexts are marked as recoverable.
1623 */
1624 #define I915_CONTEXT_PARAM_RECOVERABLE 0x8
1625
1626 /*
1627 * The id of the associated virtual memory address space (ppGTT) of
1628 * this context. Can be retrieved and passed to another context
1629 * (on the same fd) for both to use the same ppGTT and so share
1630 * address layouts, and avoid reloading the page tables on context
1631 * switches between themselves.
1632 *
1633 * See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY.
1634 */
1635 #define I915_CONTEXT_PARAM_VM 0x9
1636
1637 /*
1638 * I915_CONTEXT_PARAM_ENGINES:
1639 *
1640 * Bind this context to operate on this subset of available engines. Henceforth,
1641 * the I915_EXEC_RING selector for DRM_IOCTL_I915_GEM_EXECBUFFER2 operates as
1642 * an index into this array of engines; I915_EXEC_DEFAULT selecting engine[0]
1643 * and upwards. Slots 0...N are filled in using the specified (class, instance).
1644 * Use
1645 * engine_class: I915_ENGINE_CLASS_INVALID,
1646 * engine_instance: I915_ENGINE_CLASS_INVALID_NONE
1647 * to specify a gap in the array that can be filled in later, e.g. by a
1648 * virtual engine used for load balancing.
1649 *
1650 * Setting the number of engines bound to the context to 0, by passing a zero
1651 * sized argument, will revert back to default settings.
1652 *
1653 * See struct i915_context_param_engines.
1654 *
1655 * Extensions:
1656 * i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE)
1657 * i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
1658 */
1659 #define I915_CONTEXT_PARAM_ENGINES 0xa
1660
1661 /*
1662 * I915_CONTEXT_PARAM_PERSISTENCE:
1663 *
1664 * Allow the context and active rendering to survive the process until
1665 * completion. Persistence allows fire-and-forget clients to queue up a
1666 * bunch of work, hand the output over to a display server and then quit.
1667 * If the context is marked as not persistent, upon closing (either via
1668 * an explicit DRM_I915_GEM_CONTEXT_DESTROY or implicitly from file closure
1669 * or process termination), the context and any outstanding requests will be
1670 * cancelled (and exported fences for cancelled requests marked as -EIO).
1671 *
1672 * By default, new contexts allow persistence.
1673 */
1674 #define I915_CONTEXT_PARAM_PERSISTENCE 0xb
1675
1676 /*
1677 * I915_CONTEXT_PARAM_RINGSIZE:
1678 *
1679 * Sets the size of the CS ringbuffer to use for logical ring contexts. This
1680 * applies a limit of how many batches can be queued to HW before the caller
1681 * is blocked due to lack of space for more commands.
1682 *
1683 * Only reliably possible to be set prior to first use, i.e. during
1684 * construction. At any later point, the current execution must be flushed as
1685 * the ring can only be changed while the context is idle. Note, the ringsize
1686 * can be specified as a constructor property, see
1687 * I915_CONTEXT_CREATE_EXT_SETPARAM, but can also be set later if required.
1688 *
1689 * Only applies to the current set of engine and lost when those engines
1690 * are replaced by a new mapping (see I915_CONTEXT_PARAM_ENGINES).
1691 *
1692 * Must be between 4 - 512 KiB, in intervals of page size [4 KiB].
1693 * Default is 16 KiB.
1694 */
1695 #define I915_CONTEXT_PARAM_RINGSIZE 0xc
1696 /* Must be kept compact -- no holes and well documented */
1697
1698 __u64 value;
1699 };
1700
1701 /**
1702 * Context SSEU programming
1703 *
1704 * It may be necessary for either functional or performance reason to configure
1705 * a context to run with a reduced number of SSEU (where SSEU stands for Slice/
1706 * Sub-slice/EU).
1707 *
1708 * This is done by configuring SSEU configuration using the below
1709 * @struct drm_i915_gem_context_param_sseu for every supported engine which
1710 * userspace intends to use.
1711 *
1712 * Not all GPUs or engines support this functionality in which case an error
1713 * code -ENODEV will be returned.
1714 *
1715 * Also, flexibility of possible SSEU configuration permutations varies between
1716 * GPU generations and software imposed limitations. Requesting such a
1717 * combination will return an error code of -EINVAL.
1718 *
1719 * NOTE: When perf/OA is active the context's SSEU configuration is ignored in
1720 * favour of a single global setting.
1721 */
1722 struct drm_i915_gem_context_param_sseu {
1723 /*
1724 * Engine class & instance to be configured or queried.
1725 */
1726 struct i915_engine_class_instance engine;
1727
1728 /*
1729 * Unknown flags must be cleared to zero.
1730 */
1731 __u32 flags;
1732 #define I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX (1u << 0)
1733
1734 /*
1735 * Mask of slices to enable for the context. Valid values are a subset
1736 * of the bitmask value returned for I915_PARAM_SLICE_MASK.
1737 */
1738 __u64 slice_mask;
1739
1740 /*
1741 * Mask of subslices to enable for the context. Valid values are a
1742 * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK.
1743 */
1744 __u64 subslice_mask;
1745
1746 /*
1747 * Minimum/Maximum number of EUs to enable per subslice for the
1748 * context. min_eus_per_subslice must be inferior or equal to
1749 * max_eus_per_subslice.
1750 */
1751 __u16 min_eus_per_subslice;
1752 __u16 max_eus_per_subslice;
1753
1754 /*
1755 * Unused for now. Must be cleared to zero.
1756 */
1757 __u32 rsvd;
1758 };
1759
1760 /*
1761 * i915_context_engines_load_balance:
1762 *
1763 * Enable load balancing across this set of engines.
1764 *
1765 * Into the I915_EXEC_DEFAULT slot [0], a virtual engine is created that when
1766 * used will proxy the execbuffer request onto one of the set of engines
1767 * in such a way as to distribute the load evenly across the set.
1768 *
1769 * The set of engines must be compatible (e.g. the same HW class) as they
1770 * will share the same logical GPU context and ring.
1771 *
1772 * To intermix rendering with the virtual engine and direct rendering onto
1773 * the backing engines (bypassing the load balancing proxy), the context must
1774 * be defined to use a single timeline for all engines.
1775 */
1776 struct i915_context_engines_load_balance {
1777 struct i915_user_extension base;
1778
1779 __u16 engine_index;
1780 __u16 num_siblings;
1781 __u32 flags; /* all undefined flags must be zero */
1782
1783 __u64 mbz64; /* reserved for future use; must be zero */
1784
1785 struct i915_engine_class_instance engines[0];
1786 } __attribute__((packed));
1787
1788 #define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \
1789 struct i915_user_extension base; \
1790 __u16 engine_index; \
1791 __u16 num_siblings; \
1792 __u32 flags; \
1793 __u64 mbz64; \
1794 struct i915_engine_class_instance engines[N__]; \
1795 } __attribute__((packed)) name__
1796
1797 /*
1798 * i915_context_engines_bond:
1799 *
1800 * Constructed bonded pairs for execution within a virtual engine.
1801 *
1802 * All engines are equal, but some are more equal than others. Given
1803 * the distribution of resources in the HW, it may be preferable to run
1804 * a request on a given subset of engines in parallel to a request on a
1805 * specific engine. We enable this selection of engines within a virtual
1806 * engine by specifying bonding pairs, for any given master engine we will
1807 * only execute on one of the corresponding siblings within the virtual engine.
1808 *
1809 * To execute a request in parallel on the master engine and a sibling requires
1810 * coordination with a I915_EXEC_FENCE_SUBMIT.
1811 */
1812 struct i915_context_engines_bond {
1813 struct i915_user_extension base;
1814
1815 struct i915_engine_class_instance master;
1816
1817 __u16 virtual_index; /* index of virtual engine in ctx->engines[] */
1818 __u16 num_bonds;
1819
1820 __u64 flags; /* all undefined flags must be zero */
1821 __u64 mbz64[4]; /* reserved for future use; must be zero */
1822
1823 struct i915_engine_class_instance engines[0];
1824 } __attribute__((packed));
1825
1826 #define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \
1827 struct i915_user_extension base; \
1828 struct i915_engine_class_instance master; \
1829 __u16 virtual_index; \
1830 __u16 num_bonds; \
1831 __u64 flags; \
1832 __u64 mbz64[4]; \
1833 struct i915_engine_class_instance engines[N__]; \
1834 } __attribute__((packed)) name__
1835
1836 struct i915_context_param_engines {
1837 __u64 extensions; /* linked chain of extension blocks, 0 terminates */
1838 #define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
1839 #define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
1840 struct i915_engine_class_instance engines[0];
1841 } __attribute__((packed));
1842
1843 #define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \
1844 __u64 extensions; \
1845 struct i915_engine_class_instance engines[N__]; \
1846 } __attribute__((packed)) name__
1847
1848 struct drm_i915_gem_context_create_ext_setparam {
1849 #define I915_CONTEXT_CREATE_EXT_SETPARAM 0
1850 struct i915_user_extension base;
1851 struct drm_i915_gem_context_param param;
1852 };
1853
1854 struct drm_i915_gem_context_create_ext_clone {
1855 #define I915_CONTEXT_CREATE_EXT_CLONE 1
1856 struct i915_user_extension base;
1857 __u32 clone_id;
1858 __u32 flags;
1859 #define I915_CONTEXT_CLONE_ENGINES (1u << 0)
1860 #define I915_CONTEXT_CLONE_FLAGS (1u << 1)
1861 #define I915_CONTEXT_CLONE_SCHEDATTR (1u << 2)
1862 #define I915_CONTEXT_CLONE_SSEU (1u << 3)
1863 #define I915_CONTEXT_CLONE_TIMELINE (1u << 4)
1864 #define I915_CONTEXT_CLONE_VM (1u << 5)
1865 #define I915_CONTEXT_CLONE_UNKNOWN -(I915_CONTEXT_CLONE_VM << 1)
1866 __u64 rsvd;
1867 };
1868
1869 struct drm_i915_gem_context_destroy {
1870 __u32 ctx_id;
1871 __u32 pad;
1872 };
1873
1874 /*
1875 * DRM_I915_GEM_VM_CREATE -
1876 *
1877 * Create a new virtual memory address space (ppGTT) for use within a context
1878 * on the same file. Extensions can be provided to configure exactly how the
1879 * address space is setup upon creation.
1880 *
1881 * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
1882 * returned in the outparam @id.
1883 *
1884 * No flags are defined, with all bits reserved and must be zero.
1885 *
1886 * An extension chain maybe provided, starting with @extensions, and terminated
1887 * by the @next_extension being 0. Currently, no extensions are defined.
1888 *
1889 * DRM_I915_GEM_VM_DESTROY -
1890 *
1891 * Destroys a previously created VM id, specified in @id.
1892 *
1893 * No extensions or flags are allowed currently, and so must be zero.
1894 */
1895 struct drm_i915_gem_vm_control {
1896 __u64 extensions;
1897 __u32 flags;
1898 __u32 vm_id;
1899 };
1900
1901 struct drm_i915_reg_read {
1902 /*
1903 * Register offset.
1904 * For 64bit wide registers where the upper 32bits don't immediately
1905 * follow the lower 32bits, the offset of the lower 32bits must
1906 * be specified
1907 */
1908 __u64 offset;
1909 #define I915_REG_READ_8B_WA (1ul << 0)
1910
1911 __u64 val; /* Return value */
1912 };
1913
1914 /* Known registers:
1915 *
1916 * Render engine timestamp - 0x2358 + 64bit - gen7+
1917 * - Note this register returns an invalid value if using the default
1918 * single instruction 8byte read, in order to workaround that pass
1919 * flag I915_REG_READ_8B_WA in offset field.
1920 *
1921 */
1922
1923 struct drm_i915_reset_stats {
1924 __u32 ctx_id;
1925 __u32 flags;
1926
1927 /* All resets since boot/module reload, for all contexts */
1928 __u32 reset_count;
1929
1930 /* Number of batches lost when active in GPU, for this context */
1931 __u32 batch_active;
1932
1933 /* Number of batches lost pending for execution, for this context */
1934 __u32 batch_pending;
1935
1936 __u32 pad;
1937 };
1938
1939 struct drm_i915_gem_userptr {
1940 __u64 user_ptr;
1941 __u64 user_size;
1942 __u32 flags;
1943 #define I915_USERPTR_READ_ONLY 0x1
1944 #define I915_USERPTR_UNSYNCHRONIZED 0x80000000
1945 /**
1946 * Returned handle for the object.
1947 *
1948 * Object handles are nonzero.
1949 */
1950 __u32 handle;
1951 };
1952
1953 enum drm_i915_oa_format {
1954 I915_OA_FORMAT_A13 = 1, /* HSW only */
1955 I915_OA_FORMAT_A29, /* HSW only */
1956 I915_OA_FORMAT_A13_B8_C8, /* HSW only */
1957 I915_OA_FORMAT_B4_C8, /* HSW only */
1958 I915_OA_FORMAT_A45_B8_C8, /* HSW only */
1959 I915_OA_FORMAT_B4_C8_A16, /* HSW only */
1960 I915_OA_FORMAT_C4_B8, /* HSW+ */
1961
1962 /* Gen8+ */
1963 I915_OA_FORMAT_A12,
1964 I915_OA_FORMAT_A12_B8_C8,
1965 I915_OA_FORMAT_A32u40_A4u32_B8_C8,
1966
1967 I915_OA_FORMAT_MAX /* non-ABI */
1968 };
1969
1970 enum drm_i915_perf_property_id {
1971 /**
1972 * Open the stream for a specific context handle (as used with
1973 * execbuffer2). A stream opened for a specific context this way
1974 * won't typically require root privileges.
1975 *
1976 * This property is available in perf revision 1.
1977 */
1978 DRM_I915_PERF_PROP_CTX_HANDLE = 1,
1979
1980 /**
1981 * A value of 1 requests the inclusion of raw OA unit reports as
1982 * part of stream samples.
1983 *
1984 * This property is available in perf revision 1.
1985 */
1986 DRM_I915_PERF_PROP_SAMPLE_OA,
1987
1988 /**
1989 * The value specifies which set of OA unit metrics should be
1990 * configured, defining the contents of any OA unit reports.
1991 *
1992 * This property is available in perf revision 1.
1993 */
1994 DRM_I915_PERF_PROP_OA_METRICS_SET,
1995
1996 /**
1997 * The value specifies the size and layout of OA unit reports.
1998 *
1999 * This property is available in perf revision 1.
2000 */
2001 DRM_I915_PERF_PROP_OA_FORMAT,
2002
2003 /**
2004 * Specifying this property implicitly requests periodic OA unit
2005 * sampling and (at least on Haswell) the sampling frequency is derived
2006 * from this exponent as follows:
2007 *
2008 * 80ns * 2^(period_exponent + 1)
2009 *
2010 * This property is available in perf revision 1.
2011 */
2012 DRM_I915_PERF_PROP_OA_EXPONENT,
2013
2014 /**
2015 * Specifying this property is only valid when specify a context to
2016 * filter with DRM_I915_PERF_PROP_CTX_HANDLE. Specifying this property
2017 * will hold preemption of the particular context we want to gather
2018 * performance data about. The execbuf2 submissions must include a
2019 * drm_i915_gem_execbuffer_ext_perf parameter for this to apply.
2020 *
2021 * This property is available in perf revision 3.
2022 */
2023 DRM_I915_PERF_PROP_HOLD_PREEMPTION,
2024
2025 /**
2026 * Specifying this pins all contexts to the specified SSEU power
2027 * configuration for the duration of the recording.
2028 *
2029 * This parameter's value is a pointer to a struct
2030 * drm_i915_gem_context_param_sseu.
2031 *
2032 * This property is available in perf revision 4.
2033 */
2034 DRM_I915_PERF_PROP_GLOBAL_SSEU,
2035
2036 /**
2037 * This optional parameter specifies the timer interval in nanoseconds
2038 * at which the i915 driver will check the OA buffer for available data.
2039 * Minimum allowed value is 100 microseconds. A default value is used by
2040 * the driver if this parameter is not specified. Note that larger timer
2041 * values will reduce cpu consumption during OA perf captures. However,
2042 * excessively large values would potentially result in OA buffer
2043 * overwrites as captures reach end of the OA buffer.
2044 *
2045 * This property is available in perf revision 5.
2046 */
2047 DRM_I915_PERF_PROP_POLL_OA_PERIOD,
2048
2049 DRM_I915_PERF_PROP_MAX /* non-ABI */
2050 };
2051
2052 struct drm_i915_perf_open_param {
2053 __u32 flags;
2054 #define I915_PERF_FLAG_FD_CLOEXEC (1<<0)
2055 #define I915_PERF_FLAG_FD_NONBLOCK (1<<1)
2056 #define I915_PERF_FLAG_DISABLED (1<<2)
2057
2058 /** The number of u64 (id, value) pairs */
2059 __u32 num_properties;
2060
2061 /**
2062 * Pointer to array of u64 (id, value) pairs configuring the stream
2063 * to open.
2064 */
2065 __u64 properties_ptr;
2066 };
2067
2068 /**
2069 * Enable data capture for a stream that was either opened in a disabled state
2070 * via I915_PERF_FLAG_DISABLED or was later disabled via
2071 * I915_PERF_IOCTL_DISABLE.
2072 *
2073 * It is intended to be cheaper to disable and enable a stream than it may be
2074 * to close and re-open a stream with the same configuration.
2075 *
2076 * It's undefined whether any pending data for the stream will be lost.
2077 *
2078 * This ioctl is available in perf revision 1.
2079 */
2080 #define I915_PERF_IOCTL_ENABLE _IO('i', 0x0)
2081
2082 /**
2083 * Disable data capture for a stream.
2084 *
2085 * It is an error to try and read a stream that is disabled.
2086 *
2087 * This ioctl is available in perf revision 1.
2088 */
2089 #define I915_PERF_IOCTL_DISABLE _IO('i', 0x1)
2090
2091 /**
2092 * Change metrics_set captured by a stream.
2093 *
2094 * If the stream is bound to a specific context, the configuration change
2095 * will performed __inline__ with that context such that it takes effect before
2096 * the next execbuf submission.
2097 *
2098 * Returns the previously bound metrics set id, or a negative error code.
2099 *
2100 * This ioctl is available in perf revision 2.
2101 */
2102 #define I915_PERF_IOCTL_CONFIG _IO('i', 0x2)
2103
2104 /**
2105 * Common to all i915 perf records
2106 */
2107 struct drm_i915_perf_record_header {
2108 __u32 type;
2109 __u16 pad;
2110 __u16 size;
2111 };
2112
2113 enum drm_i915_perf_record_type {
2114
2115 /**
2116 * Samples are the work horse record type whose contents are extensible
2117 * and defined when opening an i915 perf stream based on the given
2118 * properties.
2119 *
2120 * Boolean properties following the naming convention
2121 * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in
2122 * every sample.
2123 *
2124 * The order of these sample properties given by userspace has no
2125 * affect on the ordering of data within a sample. The order is
2126 * documented here.
2127 *
2128 * struct {
2129 * struct drm_i915_perf_record_header header;
2130 *
2131 * { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA
2132 * };
2133 */
2134 DRM_I915_PERF_RECORD_SAMPLE = 1,
2135
2136 /*
2137 * Indicates that one or more OA reports were not written by the
2138 * hardware. This can happen for example if an MI_REPORT_PERF_COUNT
2139 * command collides with periodic sampling - which would be more likely
2140 * at higher sampling frequencies.
2141 */
2142 DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2,
2143
2144 /**
2145 * An error occurred that resulted in all pending OA reports being lost.
2146 */
2147 DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3,
2148
2149 DRM_I915_PERF_RECORD_MAX /* non-ABI */
2150 };
2151
2152 /**
2153 * Structure to upload perf dynamic configuration into the kernel.
2154 */
2155 struct drm_i915_perf_oa_config {
2156 /** String formatted like "%08x-%04x-%04x-%04x-%012x" */
2157 char uuid[36];
2158
2159 __u32 n_mux_regs;
2160 __u32 n_boolean_regs;
2161 __u32 n_flex_regs;
2162
2163 /*
2164 * These fields are pointers to tuples of u32 values (register address,
2165 * value). For example the expected length of the buffer pointed by
2166 * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
2167 */
2168 __u64 mux_regs_ptr;
2169 __u64 boolean_regs_ptr;
2170 __u64 flex_regs_ptr;
2171 };
2172
2173 struct drm_i915_query_item {
2174 __u64 query_id;
2175 #define DRM_I915_QUERY_TOPOLOGY_INFO 1
2176 #define DRM_I915_QUERY_ENGINE_INFO 2
2177 #define DRM_I915_QUERY_PERF_CONFIG 3
2178 /* Must be kept compact -- no holes and well documented */
2179
2180 /*
2181 * When set to zero by userspace, this is filled with the size of the
2182 * data to be written at the data_ptr pointer. The kernel sets this
2183 * value to a negative value to signal an error on a particular query
2184 * item.
2185 */
2186 __s32 length;
2187
2188 /*
2189 * When query_id == DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
2190 *
2191 * When query_id == DRM_I915_QUERY_PERF_CONFIG, must be one of the
2192 * following :
2193 * - DRM_I915_QUERY_PERF_CONFIG_LIST
2194 * - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
2195 * - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
2196 */
2197 __u32 flags;
2198 #define DRM_I915_QUERY_PERF_CONFIG_LIST 1
2199 #define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2
2200 #define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID 3
2201
2202 /*
2203 * Data will be written at the location pointed by data_ptr when the
2204 * value of length matches the length of the data to be written by the
2205 * kernel.
2206 */
2207 __u64 data_ptr;
2208 };
2209
2210 struct drm_i915_query {
2211 __u32 num_items;
2212
2213 /*
2214 * Unused for now. Must be cleared to zero.
2215 */
2216 __u32 flags;
2217
2218 /*
2219 * This points to an array of num_items drm_i915_query_item structures.
2220 */
2221 __u64 items_ptr;
2222 };
2223
2224 /*
2225 * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO :
2226 *
2227 * data: contains the 3 pieces of information :
2228 *
2229 * - the slice mask with one bit per slice telling whether a slice is
2230 * available. The availability of slice X can be queried with the following
2231 * formula :
2232 *
2233 * (data[X / 8] >> (X % 8)) & 1
2234 *
2235 * - the subslice mask for each slice with one bit per subslice telling
2236 * whether a subslice is available. Gen12 has dual-subslices, which are
2237 * similar to two gen11 subslices. For gen12, this array represents dual-
2238 * subslices. The availability of subslice Y in slice X can be queried
2239 * with the following formula :
2240 *
2241 * (data[subslice_offset +
2242 * X * subslice_stride +
2243 * Y / 8] >> (Y % 8)) & 1
2244 *
2245 * - the EU mask for each subslice in each slice with one bit per EU telling
2246 * whether an EU is available. The availability of EU Z in subslice Y in
2247 * slice X can be queried with the following formula :
2248 *
2249 * (data[eu_offset +
2250 * (X * max_subslices + Y) * eu_stride +
2251 * Z / 8] >> (Z % 8)) & 1
2252 */
2253 struct drm_i915_query_topology_info {
2254 /*
2255 * Unused for now. Must be cleared to zero.
2256 */
2257 __u16 flags;
2258
2259 __u16 max_slices;
2260 __u16 max_subslices;
2261 __u16 max_eus_per_subslice;
2262
2263 /*
2264 * Offset in data[] at which the subslice masks are stored.
2265 */
2266 __u16 subslice_offset;
2267
2268 /*
2269 * Stride at which each of the subslice masks for each slice are
2270 * stored.
2271 */
2272 __u16 subslice_stride;
2273
2274 /*
2275 * Offset in data[] at which the EU masks are stored.
2276 */
2277 __u16 eu_offset;
2278
2279 /*
2280 * Stride at which each of the EU masks for each subslice are stored.
2281 */
2282 __u16 eu_stride;
2283
2284 __u8 data[];
2285 };
2286
2287 /**
2288 * struct drm_i915_engine_info
2289 *
2290 * Describes one engine and it's capabilities as known to the driver.
2291 */
2292 struct drm_i915_engine_info {
2293 /** Engine class and instance. */
2294 struct i915_engine_class_instance engine;
2295
2296 /** Reserved field. */
2297 __u32 rsvd0;
2298
2299 /** Engine flags. */
2300 __u64 flags;
2301
2302 /** Capabilities of this engine. */
2303 __u64 capabilities;
2304 #define I915_VIDEO_CLASS_CAPABILITY_HEVC (1 << 0)
2305 #define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC (1 << 1)
2306
2307 /** Reserved fields. */
2308 __u64 rsvd1[4];
2309 };
2310
2311 /**
2312 * struct drm_i915_query_engine_info
2313 *
2314 * Engine info query enumerates all engines known to the driver by filling in
2315 * an array of struct drm_i915_engine_info structures.
2316 */
2317 struct drm_i915_query_engine_info {
2318 /** Number of struct drm_i915_engine_info structs following. */
2319 __u32 num_engines;
2320
2321 /** MBZ */
2322 __u32 rsvd[3];
2323
2324 /** Marker for drm_i915_engine_info structures. */
2325 struct drm_i915_engine_info engines[];
2326 };
2327
2328 /*
2329 * Data written by the kernel with query DRM_I915_QUERY_PERF_CONFIG.
2330 */
2331 struct drm_i915_query_perf_config {
2332 union {
2333 /*
2334 * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets
2335 * this fields to the number of configurations available.
2336 */
2337 __u64 n_configs;
2338
2339 /*
2340 * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID,
2341 * i915 will use the value in this field as configuration
2342 * identifier to decide what data to write into config_ptr.
2343 */
2344 __u64 config;
2345
2346 /*
2347 * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID,
2348 * i915 will use the value in this field as configuration
2349 * identifier to decide what data to write into config_ptr.
2350 *
2351 * String formatted like "%08x-%04x-%04x-%04x-%012x"
2352 */
2353 char uuid[36];
2354 };
2355
2356 /*
2357 * Unused for now. Must be cleared to zero.
2358 */
2359 __u32 flags;
2360
2361 /*
2362 * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 will
2363 * write an array of __u64 of configuration identifiers.
2364 *
2365 * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_DATA, i915 will
2366 * write a struct drm_i915_perf_oa_config. If the following fields of
2367 * drm_i915_perf_oa_config are set not set to 0, i915 will write into
2368 * the associated pointers the values of submitted when the
2369 * configuration was created :
2370 *
2371 * - n_mux_regs
2372 * - n_boolean_regs
2373 * - n_flex_regs
2374 */
2375 __u8 data[];
2376 };
2377
2378 #if defined(__cplusplus)
2379 }
2380 #endif
2381
2382 #endif /* _I915_DRM_H_ */