etnaviv: drm: s/bo_del/_etna_bo_del/
[mesa.git] / include / drm-uapi / i915_drm.h
1 /*
2 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27 #ifndef _I915_DRM_H_
28 #define _I915_DRM_H_
29
30 #include "drm.h"
31
32 #if defined(__cplusplus)
33 extern "C" {
34 #endif
35
36 /* Please note that modifications to all structs defined here are
37 * subject to backwards-compatibility constraints.
38 */
39
40 /**
41 * DOC: uevents generated by i915 on it's device node
42 *
43 * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
44 * event from the gpu l3 cache. Additional information supplied is ROW,
45 * BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
46 * track of these events and if a specific cache-line seems to have a
47 * persistent error remap it with the l3 remapping tool supplied in
48 * intel-gpu-tools. The value supplied with the event is always 1.
49 *
50 * I915_ERROR_UEVENT - Generated upon error detection, currently only via
51 * hangcheck. The error detection event is a good indicator of when things
52 * began to go badly. The value supplied with the event is a 1 upon error
53 * detection, and a 0 upon reset completion, signifying no more error
54 * exists. NOTE: Disabling hangcheck or reset via module parameter will
55 * cause the related events to not be seen.
56 *
57 * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
58 * the GPU. The value supplied with the event is always 1. NOTE: Disable
59 * reset via module parameter will cause this event to not be seen.
60 */
61 #define I915_L3_PARITY_UEVENT "L3_PARITY_ERROR"
62 #define I915_ERROR_UEVENT "ERROR"
63 #define I915_RESET_UEVENT "RESET"
64
65 /*
66 * MOCS indexes used for GPU surfaces, defining the cacheability of the
67 * surface data and the coherency for this data wrt. CPU vs. GPU accesses.
68 */
69 enum i915_mocs_table_index {
70 /*
71 * Not cached anywhere, coherency between CPU and GPU accesses is
72 * guaranteed.
73 */
74 I915_MOCS_UNCACHED,
75 /*
76 * Cacheability and coherency controlled by the kernel automatically
77 * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current
78 * usage of the surface (used for display scanout or not).
79 */
80 I915_MOCS_PTE,
81 /*
82 * Cached in all GPU caches available on the platform.
83 * Coherency between CPU and GPU accesses to the surface is not
84 * guaranteed without extra synchronization.
85 */
86 I915_MOCS_CACHED,
87 };
88
89 /*
90 * Different engines serve different roles, and there may be more than one
91 * engine serving each role. enum drm_i915_gem_engine_class provides a
92 * classification of the role of the engine, which may be used when requesting
93 * operations to be performed on a certain subset of engines, or for providing
94 * information about that group.
95 */
96 enum drm_i915_gem_engine_class {
97 I915_ENGINE_CLASS_RENDER = 0,
98 I915_ENGINE_CLASS_COPY = 1,
99 I915_ENGINE_CLASS_VIDEO = 2,
100 I915_ENGINE_CLASS_VIDEO_ENHANCE = 3,
101
102 I915_ENGINE_CLASS_INVALID = -1
103 };
104
105 /**
106 * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
107 *
108 */
109
110 enum drm_i915_pmu_engine_sample {
111 I915_SAMPLE_BUSY = 0,
112 I915_SAMPLE_WAIT = 1,
113 I915_SAMPLE_SEMA = 2
114 };
115
116 #define I915_PMU_SAMPLE_BITS (4)
117 #define I915_PMU_SAMPLE_MASK (0xf)
118 #define I915_PMU_SAMPLE_INSTANCE_BITS (8)
119 #define I915_PMU_CLASS_SHIFT \
120 (I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
121
122 #define __I915_PMU_ENGINE(class, instance, sample) \
123 ((class) << I915_PMU_CLASS_SHIFT | \
124 (instance) << I915_PMU_SAMPLE_BITS | \
125 (sample))
126
127 #define I915_PMU_ENGINE_BUSY(class, instance) \
128 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
129
130 #define I915_PMU_ENGINE_WAIT(class, instance) \
131 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
132
133 #define I915_PMU_ENGINE_SEMA(class, instance) \
134 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
135
136 #define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
137
138 #define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0)
139 #define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1)
140 #define I915_PMU_INTERRUPTS __I915_PMU_OTHER(2)
141 #define I915_PMU_RC6_RESIDENCY __I915_PMU_OTHER(3)
142
143 #define I915_PMU_LAST I915_PMU_RC6_RESIDENCY
144
145 /* Each region is a minimum of 16k, and there are at most 255 of them.
146 */
147 #define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
148 * of chars for next/prev indices */
149 #define I915_LOG_MIN_TEX_REGION_SIZE 14
150
151 typedef struct _drm_i915_init {
152 enum {
153 I915_INIT_DMA = 0x01,
154 I915_CLEANUP_DMA = 0x02,
155 I915_RESUME_DMA = 0x03
156 } func;
157 unsigned int mmio_offset;
158 int sarea_priv_offset;
159 unsigned int ring_start;
160 unsigned int ring_end;
161 unsigned int ring_size;
162 unsigned int front_offset;
163 unsigned int back_offset;
164 unsigned int depth_offset;
165 unsigned int w;
166 unsigned int h;
167 unsigned int pitch;
168 unsigned int pitch_bits;
169 unsigned int back_pitch;
170 unsigned int depth_pitch;
171 unsigned int cpp;
172 unsigned int chipset;
173 } drm_i915_init_t;
174
175 typedef struct _drm_i915_sarea {
176 struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
177 int last_upload; /* last time texture was uploaded */
178 int last_enqueue; /* last time a buffer was enqueued */
179 int last_dispatch; /* age of the most recently dispatched buffer */
180 int ctxOwner; /* last context to upload state */
181 int texAge;
182 int pf_enabled; /* is pageflipping allowed? */
183 int pf_active;
184 int pf_current_page; /* which buffer is being displayed? */
185 int perf_boxes; /* performance boxes to be displayed */
186 int width, height; /* screen size in pixels */
187
188 drm_handle_t front_handle;
189 int front_offset;
190 int front_size;
191
192 drm_handle_t back_handle;
193 int back_offset;
194 int back_size;
195
196 drm_handle_t depth_handle;
197 int depth_offset;
198 int depth_size;
199
200 drm_handle_t tex_handle;
201 int tex_offset;
202 int tex_size;
203 int log_tex_granularity;
204 int pitch;
205 int rotation; /* 0, 90, 180 or 270 */
206 int rotated_offset;
207 int rotated_size;
208 int rotated_pitch;
209 int virtualX, virtualY;
210
211 unsigned int front_tiled;
212 unsigned int back_tiled;
213 unsigned int depth_tiled;
214 unsigned int rotated_tiled;
215 unsigned int rotated2_tiled;
216
217 int pipeA_x;
218 int pipeA_y;
219 int pipeA_w;
220 int pipeA_h;
221 int pipeB_x;
222 int pipeB_y;
223 int pipeB_w;
224 int pipeB_h;
225
226 /* fill out some space for old userspace triple buffer */
227 drm_handle_t unused_handle;
228 __u32 unused1, unused2, unused3;
229
230 /* buffer object handles for static buffers. May change
231 * over the lifetime of the client.
232 */
233 __u32 front_bo_handle;
234 __u32 back_bo_handle;
235 __u32 unused_bo_handle;
236 __u32 depth_bo_handle;
237
238 } drm_i915_sarea_t;
239
240 /* due to userspace building against these headers we need some compat here */
241 #define planeA_x pipeA_x
242 #define planeA_y pipeA_y
243 #define planeA_w pipeA_w
244 #define planeA_h pipeA_h
245 #define planeB_x pipeB_x
246 #define planeB_y pipeB_y
247 #define planeB_w pipeB_w
248 #define planeB_h pipeB_h
249
250 /* Flags for perf_boxes
251 */
252 #define I915_BOX_RING_EMPTY 0x1
253 #define I915_BOX_FLIP 0x2
254 #define I915_BOX_WAIT 0x4
255 #define I915_BOX_TEXTURE_LOAD 0x8
256 #define I915_BOX_LOST_CONTEXT 0x10
257
258 /*
259 * i915 specific ioctls.
260 *
261 * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
262 * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
263 * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
264 */
265 #define DRM_I915_INIT 0x00
266 #define DRM_I915_FLUSH 0x01
267 #define DRM_I915_FLIP 0x02
268 #define DRM_I915_BATCHBUFFER 0x03
269 #define DRM_I915_IRQ_EMIT 0x04
270 #define DRM_I915_IRQ_WAIT 0x05
271 #define DRM_I915_GETPARAM 0x06
272 #define DRM_I915_SETPARAM 0x07
273 #define DRM_I915_ALLOC 0x08
274 #define DRM_I915_FREE 0x09
275 #define DRM_I915_INIT_HEAP 0x0a
276 #define DRM_I915_CMDBUFFER 0x0b
277 #define DRM_I915_DESTROY_HEAP 0x0c
278 #define DRM_I915_SET_VBLANK_PIPE 0x0d
279 #define DRM_I915_GET_VBLANK_PIPE 0x0e
280 #define DRM_I915_VBLANK_SWAP 0x0f
281 #define DRM_I915_HWS_ADDR 0x11
282 #define DRM_I915_GEM_INIT 0x13
283 #define DRM_I915_GEM_EXECBUFFER 0x14
284 #define DRM_I915_GEM_PIN 0x15
285 #define DRM_I915_GEM_UNPIN 0x16
286 #define DRM_I915_GEM_BUSY 0x17
287 #define DRM_I915_GEM_THROTTLE 0x18
288 #define DRM_I915_GEM_ENTERVT 0x19
289 #define DRM_I915_GEM_LEAVEVT 0x1a
290 #define DRM_I915_GEM_CREATE 0x1b
291 #define DRM_I915_GEM_PREAD 0x1c
292 #define DRM_I915_GEM_PWRITE 0x1d
293 #define DRM_I915_GEM_MMAP 0x1e
294 #define DRM_I915_GEM_SET_DOMAIN 0x1f
295 #define DRM_I915_GEM_SW_FINISH 0x20
296 #define DRM_I915_GEM_SET_TILING 0x21
297 #define DRM_I915_GEM_GET_TILING 0x22
298 #define DRM_I915_GEM_GET_APERTURE 0x23
299 #define DRM_I915_GEM_MMAP_GTT 0x24
300 #define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
301 #define DRM_I915_GEM_MADVISE 0x26
302 #define DRM_I915_OVERLAY_PUT_IMAGE 0x27
303 #define DRM_I915_OVERLAY_ATTRS 0x28
304 #define DRM_I915_GEM_EXECBUFFER2 0x29
305 #define DRM_I915_GEM_EXECBUFFER2_WR DRM_I915_GEM_EXECBUFFER2
306 #define DRM_I915_GET_SPRITE_COLORKEY 0x2a
307 #define DRM_I915_SET_SPRITE_COLORKEY 0x2b
308 #define DRM_I915_GEM_WAIT 0x2c
309 #define DRM_I915_GEM_CONTEXT_CREATE 0x2d
310 #define DRM_I915_GEM_CONTEXT_DESTROY 0x2e
311 #define DRM_I915_GEM_SET_CACHING 0x2f
312 #define DRM_I915_GEM_GET_CACHING 0x30
313 #define DRM_I915_REG_READ 0x31
314 #define DRM_I915_GET_RESET_STATS 0x32
315 #define DRM_I915_GEM_USERPTR 0x33
316 #define DRM_I915_GEM_CONTEXT_GETPARAM 0x34
317 #define DRM_I915_GEM_CONTEXT_SETPARAM 0x35
318 #define DRM_I915_PERF_OPEN 0x36
319 #define DRM_I915_PERF_ADD_CONFIG 0x37
320 #define DRM_I915_PERF_REMOVE_CONFIG 0x38
321 #define DRM_I915_QUERY 0x39
322
323 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
324 #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
325 #define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
326 #define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
327 #define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
328 #define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
329 #define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
330 #define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
331 #define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
332 #define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
333 #define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
334 #define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
335 #define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
336 #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
337 #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
338 #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
339 #define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
340 #define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
341 #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
342 #define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
343 #define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2)
344 #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
345 #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
346 #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
347 #define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
348 #define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
349 #define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
350 #define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
351 #define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
352 #define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
353 #define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
354 #define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
355 #define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
356 #define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
357 #define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
358 #define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
359 #define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
360 #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
361 #define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
362 #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
363 #define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
364 #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
365 #define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
366 #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
367 #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
368 #define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
369 #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
370 #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
371 #define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
372 #define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
373 #define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
374 #define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
375 #define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
376 #define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
377 #define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
378 #define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
379 #define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
380
381 /* Allow drivers to submit batchbuffers directly to hardware, relying
382 * on the security mechanisms provided by hardware.
383 */
384 typedef struct drm_i915_batchbuffer {
385 int start; /* agp offset */
386 int used; /* nr bytes in use */
387 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
388 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
389 int num_cliprects; /* mulitpass with multiple cliprects? */
390 struct drm_clip_rect *cliprects; /* pointer to userspace cliprects */
391 } drm_i915_batchbuffer_t;
392
393 /* As above, but pass a pointer to userspace buffer which can be
394 * validated by the kernel prior to sending to hardware.
395 */
396 typedef struct _drm_i915_cmdbuffer {
397 char *buf; /* pointer to userspace command buffer */
398 int sz; /* nr bytes in buf */
399 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
400 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
401 int num_cliprects; /* mulitpass with multiple cliprects? */
402 struct drm_clip_rect *cliprects; /* pointer to userspace cliprects */
403 } drm_i915_cmdbuffer_t;
404
405 /* Userspace can request & wait on irq's:
406 */
407 typedef struct drm_i915_irq_emit {
408 int *irq_seq;
409 } drm_i915_irq_emit_t;
410
411 typedef struct drm_i915_irq_wait {
412 int irq_seq;
413 } drm_i915_irq_wait_t;
414
415 /*
416 * Different modes of per-process Graphics Translation Table,
417 * see I915_PARAM_HAS_ALIASING_PPGTT
418 */
419 #define I915_GEM_PPGTT_NONE 0
420 #define I915_GEM_PPGTT_ALIASING 1
421 #define I915_GEM_PPGTT_FULL 2
422
423 /* Ioctl to query kernel params:
424 */
425 #define I915_PARAM_IRQ_ACTIVE 1
426 #define I915_PARAM_ALLOW_BATCHBUFFER 2
427 #define I915_PARAM_LAST_DISPATCH 3
428 #define I915_PARAM_CHIPSET_ID 4
429 #define I915_PARAM_HAS_GEM 5
430 #define I915_PARAM_NUM_FENCES_AVAIL 6
431 #define I915_PARAM_HAS_OVERLAY 7
432 #define I915_PARAM_HAS_PAGEFLIPPING 8
433 #define I915_PARAM_HAS_EXECBUF2 9
434 #define I915_PARAM_HAS_BSD 10
435 #define I915_PARAM_HAS_BLT 11
436 #define I915_PARAM_HAS_RELAXED_FENCING 12
437 #define I915_PARAM_HAS_COHERENT_RINGS 13
438 #define I915_PARAM_HAS_EXEC_CONSTANTS 14
439 #define I915_PARAM_HAS_RELAXED_DELTA 15
440 #define I915_PARAM_HAS_GEN7_SOL_RESET 16
441 #define I915_PARAM_HAS_LLC 17
442 #define I915_PARAM_HAS_ALIASING_PPGTT 18
443 #define I915_PARAM_HAS_WAIT_TIMEOUT 19
444 #define I915_PARAM_HAS_SEMAPHORES 20
445 #define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
446 #define I915_PARAM_HAS_VEBOX 22
447 #define I915_PARAM_HAS_SECURE_BATCHES 23
448 #define I915_PARAM_HAS_PINNED_BATCHES 24
449 #define I915_PARAM_HAS_EXEC_NO_RELOC 25
450 #define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
451 #define I915_PARAM_HAS_WT 27
452 #define I915_PARAM_CMD_PARSER_VERSION 28
453 #define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
454 #define I915_PARAM_MMAP_VERSION 30
455 #define I915_PARAM_HAS_BSD2 31
456 #define I915_PARAM_REVISION 32
457 #define I915_PARAM_SUBSLICE_TOTAL 33
458 #define I915_PARAM_EU_TOTAL 34
459 #define I915_PARAM_HAS_GPU_RESET 35
460 #define I915_PARAM_HAS_RESOURCE_STREAMER 36
461 #define I915_PARAM_HAS_EXEC_SOFTPIN 37
462 #define I915_PARAM_HAS_POOLED_EU 38
463 #define I915_PARAM_MIN_EU_IN_POOL 39
464 #define I915_PARAM_MMAP_GTT_VERSION 40
465
466 /*
467 * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
468 * priorities and the driver will attempt to execute batches in priority order.
469 * The param returns a capability bitmask, nonzero implies that the scheduler
470 * is enabled, with different features present according to the mask.
471 *
472 * The initial priority for each batch is supplied by the context and is
473 * controlled via I915_CONTEXT_PARAM_PRIORITY.
474 */
475 #define I915_PARAM_HAS_SCHEDULER 41
476 #define I915_SCHEDULER_CAP_ENABLED (1ul << 0)
477 #define I915_SCHEDULER_CAP_PRIORITY (1ul << 1)
478 #define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
479
480 #define I915_PARAM_HUC_STATUS 42
481
482 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
483 * synchronisation with implicit fencing on individual objects.
484 * See EXEC_OBJECT_ASYNC.
485 */
486 #define I915_PARAM_HAS_EXEC_ASYNC 43
487
488 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support -
489 * both being able to pass in a sync_file fd to wait upon before executing,
490 * and being able to return a new sync_file fd that is signaled when the
491 * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT.
492 */
493 #define I915_PARAM_HAS_EXEC_FENCE 44
494
495 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
496 * user specified bufffers for post-mortem debugging of GPU hangs. See
497 * EXEC_OBJECT_CAPTURE.
498 */
499 #define I915_PARAM_HAS_EXEC_CAPTURE 45
500
501 #define I915_PARAM_SLICE_MASK 46
502
503 /* Assuming it's uniform for each slice, this queries the mask of subslices
504 * per-slice for this system.
505 */
506 #define I915_PARAM_SUBSLICE_MASK 47
507
508 /*
509 * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer
510 * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST.
511 */
512 #define I915_PARAM_HAS_EXEC_BATCH_FIRST 48
513
514 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
515 * drm_i915_gem_exec_fence structures. See I915_EXEC_FENCE_ARRAY.
516 */
517 #define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49
518
519 /*
520 * Query whether every context (both per-file default and user created) is
521 * isolated (insofar as HW supports). If this parameter is not true, then
522 * freshly created contexts may inherit values from an existing context,
523 * rather than default HW values. If true, it also ensures (insofar as HW
524 * supports) that all state set by this context will not leak to any other
525 * context.
526 *
527 * As not every engine across every gen support contexts, the returned
528 * value reports the support of context isolation for individual engines by
529 * returning a bitmask of each engine class set to true if that class supports
530 * isolation.
531 */
532 #define I915_PARAM_HAS_CONTEXT_ISOLATION 50
533
534 /* Frequency of the command streamer timestamps given by the *_TIMESTAMP
535 * registers. This used to be fixed per platform but from CNL onwards, this
536 * might vary depending on the parts.
537 */
538 #define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
539
540 /*
541 * Once upon a time we supposed that writes through the GGTT would be
542 * immediately in physical memory (once flushed out of the CPU path). However,
543 * on a few different processors and chipsets, this is not necessarily the case
544 * as the writes appear to be buffered internally. Thus a read of the backing
545 * storage (physical memory) via a different path (with different physical tags
546 * to the indirect write via the GGTT) will see stale values from before
547 * the GGTT write. Inside the kernel, we can for the most part keep track of
548 * the different read/write domains in use (e.g. set-domain), but the assumption
549 * of coherency is baked into the ABI, hence reporting its true state in this
550 * parameter.
551 *
552 * Reports true when writes via mmap_gtt are immediately visible following an
553 * lfence to flush the WCB.
554 *
555 * Reports false when writes via mmap_gtt are indeterminately delayed in an in
556 * internal buffer and are _not_ immediately visible to third parties accessing
557 * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC
558 * communications channel when reporting false is strongly disadvised.
559 */
560 #define I915_PARAM_MMAP_GTT_COHERENT 52
561
562 typedef struct drm_i915_getparam {
563 __s32 param;
564 /*
565 * WARNING: Using pointers instead of fixed-size u64 means we need to write
566 * compat32 code. Don't repeat this mistake.
567 */
568 int *value;
569 } drm_i915_getparam_t;
570
571 /* Ioctl to set kernel params:
572 */
573 #define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1
574 #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
575 #define I915_SETPARAM_ALLOW_BATCHBUFFER 3
576 #define I915_SETPARAM_NUM_USED_FENCES 4
577
578 typedef struct drm_i915_setparam {
579 int param;
580 int value;
581 } drm_i915_setparam_t;
582
583 /* A memory manager for regions of shared memory:
584 */
585 #define I915_MEM_REGION_AGP 1
586
587 typedef struct drm_i915_mem_alloc {
588 int region;
589 int alignment;
590 int size;
591 int *region_offset; /* offset from start of fb or agp */
592 } drm_i915_mem_alloc_t;
593
594 typedef struct drm_i915_mem_free {
595 int region;
596 int region_offset;
597 } drm_i915_mem_free_t;
598
599 typedef struct drm_i915_mem_init_heap {
600 int region;
601 int size;
602 int start;
603 } drm_i915_mem_init_heap_t;
604
605 /* Allow memory manager to be torn down and re-initialized (eg on
606 * rotate):
607 */
608 typedef struct drm_i915_mem_destroy_heap {
609 int region;
610 } drm_i915_mem_destroy_heap_t;
611
612 /* Allow X server to configure which pipes to monitor for vblank signals
613 */
614 #define DRM_I915_VBLANK_PIPE_A 1
615 #define DRM_I915_VBLANK_PIPE_B 2
616
617 typedef struct drm_i915_vblank_pipe {
618 int pipe;
619 } drm_i915_vblank_pipe_t;
620
621 /* Schedule buffer swap at given vertical blank:
622 */
623 typedef struct drm_i915_vblank_swap {
624 drm_drawable_t drawable;
625 enum drm_vblank_seq_type seqtype;
626 unsigned int sequence;
627 } drm_i915_vblank_swap_t;
628
629 typedef struct drm_i915_hws_addr {
630 __u64 addr;
631 } drm_i915_hws_addr_t;
632
633 struct drm_i915_gem_init {
634 /**
635 * Beginning offset in the GTT to be managed by the DRM memory
636 * manager.
637 */
638 __u64 gtt_start;
639 /**
640 * Ending offset in the GTT to be managed by the DRM memory
641 * manager.
642 */
643 __u64 gtt_end;
644 };
645
646 struct drm_i915_gem_create {
647 /**
648 * Requested size for the object.
649 *
650 * The (page-aligned) allocated size for the object will be returned.
651 */
652 __u64 size;
653 /**
654 * Returned handle for the object.
655 *
656 * Object handles are nonzero.
657 */
658 __u32 handle;
659 __u32 pad;
660 };
661
662 struct drm_i915_gem_pread {
663 /** Handle for the object being read. */
664 __u32 handle;
665 __u32 pad;
666 /** Offset into the object to read from */
667 __u64 offset;
668 /** Length of data to read */
669 __u64 size;
670 /**
671 * Pointer to write the data into.
672 *
673 * This is a fixed-size type for 32/64 compatibility.
674 */
675 __u64 data_ptr;
676 };
677
678 struct drm_i915_gem_pwrite {
679 /** Handle for the object being written to. */
680 __u32 handle;
681 __u32 pad;
682 /** Offset into the object to write to */
683 __u64 offset;
684 /** Length of data to write */
685 __u64 size;
686 /**
687 * Pointer to read the data from.
688 *
689 * This is a fixed-size type for 32/64 compatibility.
690 */
691 __u64 data_ptr;
692 };
693
694 struct drm_i915_gem_mmap {
695 /** Handle for the object being mapped. */
696 __u32 handle;
697 __u32 pad;
698 /** Offset in the object to map. */
699 __u64 offset;
700 /**
701 * Length of data to map.
702 *
703 * The value will be page-aligned.
704 */
705 __u64 size;
706 /**
707 * Returned pointer the data was mapped at.
708 *
709 * This is a fixed-size type for 32/64 compatibility.
710 */
711 __u64 addr_ptr;
712
713 /**
714 * Flags for extended behaviour.
715 *
716 * Added in version 2.
717 */
718 __u64 flags;
719 #define I915_MMAP_WC 0x1
720 };
721
722 struct drm_i915_gem_mmap_gtt {
723 /** Handle for the object being mapped. */
724 __u32 handle;
725 __u32 pad;
726 /**
727 * Fake offset to use for subsequent mmap call
728 *
729 * This is a fixed-size type for 32/64 compatibility.
730 */
731 __u64 offset;
732 };
733
734 struct drm_i915_gem_set_domain {
735 /** Handle for the object */
736 __u32 handle;
737
738 /** New read domains */
739 __u32 read_domains;
740
741 /** New write domain */
742 __u32 write_domain;
743 };
744
745 struct drm_i915_gem_sw_finish {
746 /** Handle for the object */
747 __u32 handle;
748 };
749
750 struct drm_i915_gem_relocation_entry {
751 /**
752 * Handle of the buffer being pointed to by this relocation entry.
753 *
754 * It's appealing to make this be an index into the mm_validate_entry
755 * list to refer to the buffer, but this allows the driver to create
756 * a relocation list for state buffers and not re-write it per
757 * exec using the buffer.
758 */
759 __u32 target_handle;
760
761 /**
762 * Value to be added to the offset of the target buffer to make up
763 * the relocation entry.
764 */
765 __u32 delta;
766
767 /** Offset in the buffer the relocation entry will be written into */
768 __u64 offset;
769
770 /**
771 * Offset value of the target buffer that the relocation entry was last
772 * written as.
773 *
774 * If the buffer has the same offset as last time, we can skip syncing
775 * and writing the relocation. This value is written back out by
776 * the execbuffer ioctl when the relocation is written.
777 */
778 __u64 presumed_offset;
779
780 /**
781 * Target memory domains read by this operation.
782 */
783 __u32 read_domains;
784
785 /**
786 * Target memory domains written by this operation.
787 *
788 * Note that only one domain may be written by the whole
789 * execbuffer operation, so that where there are conflicts,
790 * the application will get -EINVAL back.
791 */
792 __u32 write_domain;
793 };
794
795 /** @{
796 * Intel memory domains
797 *
798 * Most of these just align with the various caches in
799 * the system and are used to flush and invalidate as
800 * objects end up cached in different domains.
801 */
802 /** CPU cache */
803 #define I915_GEM_DOMAIN_CPU 0x00000001
804 /** Render cache, used by 2D and 3D drawing */
805 #define I915_GEM_DOMAIN_RENDER 0x00000002
806 /** Sampler cache, used by texture engine */
807 #define I915_GEM_DOMAIN_SAMPLER 0x00000004
808 /** Command queue, used to load batch buffers */
809 #define I915_GEM_DOMAIN_COMMAND 0x00000008
810 /** Instruction cache, used by shader programs */
811 #define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
812 /** Vertex address cache */
813 #define I915_GEM_DOMAIN_VERTEX 0x00000020
814 /** GTT domain - aperture and scanout */
815 #define I915_GEM_DOMAIN_GTT 0x00000040
816 /** WC domain - uncached access */
817 #define I915_GEM_DOMAIN_WC 0x00000080
818 /** @} */
819
820 struct drm_i915_gem_exec_object {
821 /**
822 * User's handle for a buffer to be bound into the GTT for this
823 * operation.
824 */
825 __u32 handle;
826
827 /** Number of relocations to be performed on this buffer */
828 __u32 relocation_count;
829 /**
830 * Pointer to array of struct drm_i915_gem_relocation_entry containing
831 * the relocations to be performed in this buffer.
832 */
833 __u64 relocs_ptr;
834
835 /** Required alignment in graphics aperture */
836 __u64 alignment;
837
838 /**
839 * Returned value of the updated offset of the object, for future
840 * presumed_offset writes.
841 */
842 __u64 offset;
843 };
844
845 struct drm_i915_gem_execbuffer {
846 /**
847 * List of buffers to be validated with their relocations to be
848 * performend on them.
849 *
850 * This is a pointer to an array of struct drm_i915_gem_validate_entry.
851 *
852 * These buffers must be listed in an order such that all relocations
853 * a buffer is performing refer to buffers that have already appeared
854 * in the validate list.
855 */
856 __u64 buffers_ptr;
857 __u32 buffer_count;
858
859 /** Offset in the batchbuffer to start execution from. */
860 __u32 batch_start_offset;
861 /** Bytes used in batchbuffer from batch_start_offset */
862 __u32 batch_len;
863 __u32 DR1;
864 __u32 DR4;
865 __u32 num_cliprects;
866 /** This is a struct drm_clip_rect *cliprects */
867 __u64 cliprects_ptr;
868 };
869
870 struct drm_i915_gem_exec_object2 {
871 /**
872 * User's handle for a buffer to be bound into the GTT for this
873 * operation.
874 */
875 __u32 handle;
876
877 /** Number of relocations to be performed on this buffer */
878 __u32 relocation_count;
879 /**
880 * Pointer to array of struct drm_i915_gem_relocation_entry containing
881 * the relocations to be performed in this buffer.
882 */
883 __u64 relocs_ptr;
884
885 /** Required alignment in graphics aperture */
886 __u64 alignment;
887
888 /**
889 * When the EXEC_OBJECT_PINNED flag is specified this is populated by
890 * the user with the GTT offset at which this object will be pinned.
891 * When the I915_EXEC_NO_RELOC flag is specified this must contain the
892 * presumed_offset of the object.
893 * During execbuffer2 the kernel populates it with the value of the
894 * current GTT offset of the object, for future presumed_offset writes.
895 */
896 __u64 offset;
897
898 #define EXEC_OBJECT_NEEDS_FENCE (1<<0)
899 #define EXEC_OBJECT_NEEDS_GTT (1<<1)
900 #define EXEC_OBJECT_WRITE (1<<2)
901 #define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
902 #define EXEC_OBJECT_PINNED (1<<4)
903 #define EXEC_OBJECT_PAD_TO_SIZE (1<<5)
904 /* The kernel implicitly tracks GPU activity on all GEM objects, and
905 * synchronises operations with outstanding rendering. This includes
906 * rendering on other devices if exported via dma-buf. However, sometimes
907 * this tracking is too coarse and the user knows better. For example,
908 * if the object is split into non-overlapping ranges shared between different
909 * clients or engines (i.e. suballocating objects), the implicit tracking
910 * by kernel assumes that each operation affects the whole object rather
911 * than an individual range, causing needless synchronisation between clients.
912 * The kernel will also forgo any CPU cache flushes prior to rendering from
913 * the object as the client is expected to be also handling such domain
914 * tracking.
915 *
916 * The kernel maintains the implicit tracking in order to manage resources
917 * used by the GPU - this flag only disables the synchronisation prior to
918 * rendering with this object in this execbuf.
919 *
920 * Opting out of implicit synhronisation requires the user to do its own
921 * explicit tracking to avoid rendering corruption. See, for example,
922 * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously.
923 */
924 #define EXEC_OBJECT_ASYNC (1<<6)
925 /* Request that the contents of this execobject be copied into the error
926 * state upon a GPU hang involving this batch for post-mortem debugging.
927 * These buffers are recorded in no particular order as "user" in
928 * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see
929 * if the kernel supports this flag.
930 */
931 #define EXEC_OBJECT_CAPTURE (1<<7)
932 /* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
933 #define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1)
934 __u64 flags;
935
936 union {
937 __u64 rsvd1;
938 __u64 pad_to_size;
939 };
940 __u64 rsvd2;
941 };
942
943 struct drm_i915_gem_exec_fence {
944 /**
945 * User's handle for a drm_syncobj to wait on or signal.
946 */
947 __u32 handle;
948
949 #define I915_EXEC_FENCE_WAIT (1<<0)
950 #define I915_EXEC_FENCE_SIGNAL (1<<1)
951 #define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
952 __u32 flags;
953 };
954
955 struct drm_i915_gem_execbuffer2 {
956 /**
957 * List of gem_exec_object2 structs
958 */
959 __u64 buffers_ptr;
960 __u32 buffer_count;
961
962 /** Offset in the batchbuffer to start execution from. */
963 __u32 batch_start_offset;
964 /** Bytes used in batchbuffer from batch_start_offset */
965 __u32 batch_len;
966 __u32 DR1;
967 __u32 DR4;
968 __u32 num_cliprects;
969 /**
970 * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
971 * is not set. If I915_EXEC_FENCE_ARRAY is set, then this is a
972 * struct drm_i915_gem_exec_fence *fences.
973 */
974 __u64 cliprects_ptr;
975 #define I915_EXEC_RING_MASK (7<<0)
976 #define I915_EXEC_DEFAULT (0<<0)
977 #define I915_EXEC_RENDER (1<<0)
978 #define I915_EXEC_BSD (2<<0)
979 #define I915_EXEC_BLT (3<<0)
980 #define I915_EXEC_VEBOX (4<<0)
981
982 /* Used for switching the constants addressing mode on gen4+ RENDER ring.
983 * Gen6+ only supports relative addressing to dynamic state (default) and
984 * absolute addressing.
985 *
986 * These flags are ignored for the BSD and BLT rings.
987 */
988 #define I915_EXEC_CONSTANTS_MASK (3<<6)
989 #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
990 #define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
991 #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
992 __u64 flags;
993 __u64 rsvd1; /* now used for context info */
994 __u64 rsvd2;
995 };
996
997 /** Resets the SO write offset registers for transform feedback on gen7. */
998 #define I915_EXEC_GEN7_SOL_RESET (1<<8)
999
1000 /** Request a privileged ("secure") batch buffer. Note only available for
1001 * DRM_ROOT_ONLY | DRM_MASTER processes.
1002 */
1003 #define I915_EXEC_SECURE (1<<9)
1004
1005 /** Inform the kernel that the batch is and will always be pinned. This
1006 * negates the requirement for a workaround to be performed to avoid
1007 * an incoherent CS (such as can be found on 830/845). If this flag is
1008 * not passed, the kernel will endeavour to make sure the batch is
1009 * coherent with the CS before execution. If this flag is passed,
1010 * userspace assumes the responsibility for ensuring the same.
1011 */
1012 #define I915_EXEC_IS_PINNED (1<<10)
1013
1014 /** Provide a hint to the kernel that the command stream and auxiliary
1015 * state buffers already holds the correct presumed addresses and so the
1016 * relocation process may be skipped if no buffers need to be moved in
1017 * preparation for the execbuffer.
1018 */
1019 #define I915_EXEC_NO_RELOC (1<<11)
1020
1021 /** Use the reloc.handle as an index into the exec object array rather
1022 * than as the per-file handle.
1023 */
1024 #define I915_EXEC_HANDLE_LUT (1<<12)
1025
1026 /** Used for switching BSD rings on the platforms with two BSD rings */
1027 #define I915_EXEC_BSD_SHIFT (13)
1028 #define I915_EXEC_BSD_MASK (3 << I915_EXEC_BSD_SHIFT)
1029 /* default ping-pong mode */
1030 #define I915_EXEC_BSD_DEFAULT (0 << I915_EXEC_BSD_SHIFT)
1031 #define I915_EXEC_BSD_RING1 (1 << I915_EXEC_BSD_SHIFT)
1032 #define I915_EXEC_BSD_RING2 (2 << I915_EXEC_BSD_SHIFT)
1033
1034 /** Tell the kernel that the batchbuffer is processed by
1035 * the resource streamer.
1036 */
1037 #define I915_EXEC_RESOURCE_STREAMER (1<<15)
1038
1039 /* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent
1040 * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
1041 * the batch.
1042 *
1043 * Returns -EINVAL if the sync_file fd cannot be found.
1044 */
1045 #define I915_EXEC_FENCE_IN (1<<16)
1046
1047 /* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd
1048 * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given
1049 * to the caller, and it should be close() after use. (The fd is a regular
1050 * file descriptor and will be cleaned up on process termination. It holds
1051 * a reference to the request, but nothing else.)
1052 *
1053 * The sync_file fd can be combined with other sync_file and passed either
1054 * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip
1055 * will only occur after this request completes), or to other devices.
1056 *
1057 * Using I915_EXEC_FENCE_OUT requires use of
1058 * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written
1059 * back to userspace. Failure to do so will cause the out-fence to always
1060 * be reported as zero, and the real fence fd to be leaked.
1061 */
1062 #define I915_EXEC_FENCE_OUT (1<<17)
1063
1064 /*
1065 * Traditionally the execbuf ioctl has only considered the final element in
1066 * the execobject[] to be the executable batch. Often though, the client
1067 * will known the batch object prior to construction and being able to place
1068 * it into the execobject[] array first can simplify the relocation tracking.
1069 * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the
1070 * execobject[] as the * batch instead (the default is to use the last
1071 * element).
1072 */
1073 #define I915_EXEC_BATCH_FIRST (1<<18)
1074
1075 /* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr
1076 * define an array of i915_gem_exec_fence structures which specify a set of
1077 * dma fences to wait upon or signal.
1078 */
1079 #define I915_EXEC_FENCE_ARRAY (1<<19)
1080
1081 #define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_ARRAY<<1))
1082
1083 #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
1084 #define i915_execbuffer2_set_context_id(eb2, context) \
1085 (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
1086 #define i915_execbuffer2_get_context_id(eb2) \
1087 ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
1088
1089 struct drm_i915_gem_pin {
1090 /** Handle of the buffer to be pinned. */
1091 __u32 handle;
1092 __u32 pad;
1093
1094 /** alignment required within the aperture */
1095 __u64 alignment;
1096
1097 /** Returned GTT offset of the buffer. */
1098 __u64 offset;
1099 };
1100
1101 struct drm_i915_gem_unpin {
1102 /** Handle of the buffer to be unpinned. */
1103 __u32 handle;
1104 __u32 pad;
1105 };
1106
1107 struct drm_i915_gem_busy {
1108 /** Handle of the buffer to check for busy */
1109 __u32 handle;
1110
1111 /** Return busy status
1112 *
1113 * A return of 0 implies that the object is idle (after
1114 * having flushed any pending activity), and a non-zero return that
1115 * the object is still in-flight on the GPU. (The GPU has not yet
1116 * signaled completion for all pending requests that reference the
1117 * object.) An object is guaranteed to become idle eventually (so
1118 * long as no new GPU commands are executed upon it). Due to the
1119 * asynchronous nature of the hardware, an object reported
1120 * as busy may become idle before the ioctl is completed.
1121 *
1122 * Furthermore, if the object is busy, which engine is busy is only
1123 * provided as a guide. There are race conditions which prevent the
1124 * report of which engines are busy from being always accurate.
1125 * However, the converse is not true. If the object is idle, the
1126 * result of the ioctl, that all engines are idle, is accurate.
1127 *
1128 * The returned dword is split into two fields to indicate both
1129 * the engines on which the object is being read, and the
1130 * engine on which it is currently being written (if any).
1131 *
1132 * The low word (bits 0:15) indicate if the object is being written
1133 * to by any engine (there can only be one, as the GEM implicit
1134 * synchronisation rules force writes to be serialised). Only the
1135 * engine for the last write is reported.
1136 *
1137 * The high word (bits 16:31) are a bitmask of which engines are
1138 * currently reading from the object. Multiple engines may be
1139 * reading from the object simultaneously.
1140 *
1141 * The value of each engine is the same as specified in the
1142 * EXECBUFFER2 ioctl, i.e. I915_EXEC_RENDER, I915_EXEC_BSD etc.
1143 * Note I915_EXEC_DEFAULT is a symbolic value and is mapped to
1144 * the I915_EXEC_RENDER engine for execution, and so it is never
1145 * reported as active itself. Some hardware may have parallel
1146 * execution engines, e.g. multiple media engines, which are
1147 * mapped to the same identifier in the EXECBUFFER2 ioctl and
1148 * so are not separately reported for busyness.
1149 *
1150 * Caveat emptor:
1151 * Only the boolean result of this query is reliable; that is whether
1152 * the object is idle or busy. The report of which engines are busy
1153 * should be only used as a heuristic.
1154 */
1155 __u32 busy;
1156 };
1157
1158 /**
1159 * I915_CACHING_NONE
1160 *
1161 * GPU access is not coherent with cpu caches. Default for machines without an
1162 * LLC.
1163 */
1164 #define I915_CACHING_NONE 0
1165 /**
1166 * I915_CACHING_CACHED
1167 *
1168 * GPU access is coherent with cpu caches and furthermore the data is cached in
1169 * last-level caches shared between cpu cores and the gpu GT. Default on
1170 * machines with HAS_LLC.
1171 */
1172 #define I915_CACHING_CACHED 1
1173 /**
1174 * I915_CACHING_DISPLAY
1175 *
1176 * Special GPU caching mode which is coherent with the scanout engines.
1177 * Transparently falls back to I915_CACHING_NONE on platforms where no special
1178 * cache mode (like write-through or gfdt flushing) is available. The kernel
1179 * automatically sets this mode when using a buffer as a scanout target.
1180 * Userspace can manually set this mode to avoid a costly stall and clflush in
1181 * the hotpath of drawing the first frame.
1182 */
1183 #define I915_CACHING_DISPLAY 2
1184
1185 struct drm_i915_gem_caching {
1186 /**
1187 * Handle of the buffer to set/get the caching level of. */
1188 __u32 handle;
1189
1190 /**
1191 * Cacheing level to apply or return value
1192 *
1193 * bits0-15 are for generic caching control (i.e. the above defined
1194 * values). bits16-31 are reserved for platform-specific variations
1195 * (e.g. l3$ caching on gen7). */
1196 __u32 caching;
1197 };
1198
1199 #define I915_TILING_NONE 0
1200 #define I915_TILING_X 1
1201 #define I915_TILING_Y 2
1202 #define I915_TILING_LAST I915_TILING_Y
1203
1204 #define I915_BIT_6_SWIZZLE_NONE 0
1205 #define I915_BIT_6_SWIZZLE_9 1
1206 #define I915_BIT_6_SWIZZLE_9_10 2
1207 #define I915_BIT_6_SWIZZLE_9_11 3
1208 #define I915_BIT_6_SWIZZLE_9_10_11 4
1209 /* Not seen by userland */
1210 #define I915_BIT_6_SWIZZLE_UNKNOWN 5
1211 /* Seen by userland. */
1212 #define I915_BIT_6_SWIZZLE_9_17 6
1213 #define I915_BIT_6_SWIZZLE_9_10_17 7
1214
1215 struct drm_i915_gem_set_tiling {
1216 /** Handle of the buffer to have its tiling state updated */
1217 __u32 handle;
1218
1219 /**
1220 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1221 * I915_TILING_Y).
1222 *
1223 * This value is to be set on request, and will be updated by the
1224 * kernel on successful return with the actual chosen tiling layout.
1225 *
1226 * The tiling mode may be demoted to I915_TILING_NONE when the system
1227 * has bit 6 swizzling that can't be managed correctly by GEM.
1228 *
1229 * Buffer contents become undefined when changing tiling_mode.
1230 */
1231 __u32 tiling_mode;
1232
1233 /**
1234 * Stride in bytes for the object when in I915_TILING_X or
1235 * I915_TILING_Y.
1236 */
1237 __u32 stride;
1238
1239 /**
1240 * Returned address bit 6 swizzling required for CPU access through
1241 * mmap mapping.
1242 */
1243 __u32 swizzle_mode;
1244 };
1245
1246 struct drm_i915_gem_get_tiling {
1247 /** Handle of the buffer to get tiling state for. */
1248 __u32 handle;
1249
1250 /**
1251 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1252 * I915_TILING_Y).
1253 */
1254 __u32 tiling_mode;
1255
1256 /**
1257 * Returned address bit 6 swizzling required for CPU access through
1258 * mmap mapping.
1259 */
1260 __u32 swizzle_mode;
1261
1262 /**
1263 * Returned address bit 6 swizzling required for CPU access through
1264 * mmap mapping whilst bound.
1265 */
1266 __u32 phys_swizzle_mode;
1267 };
1268
1269 struct drm_i915_gem_get_aperture {
1270 /** Total size of the aperture used by i915_gem_execbuffer, in bytes */
1271 __u64 aper_size;
1272
1273 /**
1274 * Available space in the aperture used by i915_gem_execbuffer, in
1275 * bytes
1276 */
1277 __u64 aper_available_size;
1278 };
1279
1280 struct drm_i915_get_pipe_from_crtc_id {
1281 /** ID of CRTC being requested **/
1282 __u32 crtc_id;
1283
1284 /** pipe of requested CRTC **/
1285 __u32 pipe;
1286 };
1287
1288 #define I915_MADV_WILLNEED 0
1289 #define I915_MADV_DONTNEED 1
1290 #define __I915_MADV_PURGED 2 /* internal state */
1291
1292 struct drm_i915_gem_madvise {
1293 /** Handle of the buffer to change the backing store advice */
1294 __u32 handle;
1295
1296 /* Advice: either the buffer will be needed again in the near future,
1297 * or wont be and could be discarded under memory pressure.
1298 */
1299 __u32 madv;
1300
1301 /** Whether the backing store still exists. */
1302 __u32 retained;
1303 };
1304
1305 /* flags */
1306 #define I915_OVERLAY_TYPE_MASK 0xff
1307 #define I915_OVERLAY_YUV_PLANAR 0x01
1308 #define I915_OVERLAY_YUV_PACKED 0x02
1309 #define I915_OVERLAY_RGB 0x03
1310
1311 #define I915_OVERLAY_DEPTH_MASK 0xff00
1312 #define I915_OVERLAY_RGB24 0x1000
1313 #define I915_OVERLAY_RGB16 0x2000
1314 #define I915_OVERLAY_RGB15 0x3000
1315 #define I915_OVERLAY_YUV422 0x0100
1316 #define I915_OVERLAY_YUV411 0x0200
1317 #define I915_OVERLAY_YUV420 0x0300
1318 #define I915_OVERLAY_YUV410 0x0400
1319
1320 #define I915_OVERLAY_SWAP_MASK 0xff0000
1321 #define I915_OVERLAY_NO_SWAP 0x000000
1322 #define I915_OVERLAY_UV_SWAP 0x010000
1323 #define I915_OVERLAY_Y_SWAP 0x020000
1324 #define I915_OVERLAY_Y_AND_UV_SWAP 0x030000
1325
1326 #define I915_OVERLAY_FLAGS_MASK 0xff000000
1327 #define I915_OVERLAY_ENABLE 0x01000000
1328
1329 struct drm_intel_overlay_put_image {
1330 /* various flags and src format description */
1331 __u32 flags;
1332 /* source picture description */
1333 __u32 bo_handle;
1334 /* stride values and offsets are in bytes, buffer relative */
1335 __u16 stride_Y; /* stride for packed formats */
1336 __u16 stride_UV;
1337 __u32 offset_Y; /* offset for packet formats */
1338 __u32 offset_U;
1339 __u32 offset_V;
1340 /* in pixels */
1341 __u16 src_width;
1342 __u16 src_height;
1343 /* to compensate the scaling factors for partially covered surfaces */
1344 __u16 src_scan_width;
1345 __u16 src_scan_height;
1346 /* output crtc description */
1347 __u32 crtc_id;
1348 __u16 dst_x;
1349 __u16 dst_y;
1350 __u16 dst_width;
1351 __u16 dst_height;
1352 };
1353
1354 /* flags */
1355 #define I915_OVERLAY_UPDATE_ATTRS (1<<0)
1356 #define I915_OVERLAY_UPDATE_GAMMA (1<<1)
1357 #define I915_OVERLAY_DISABLE_DEST_COLORKEY (1<<2)
1358 struct drm_intel_overlay_attrs {
1359 __u32 flags;
1360 __u32 color_key;
1361 __s32 brightness;
1362 __u32 contrast;
1363 __u32 saturation;
1364 __u32 gamma0;
1365 __u32 gamma1;
1366 __u32 gamma2;
1367 __u32 gamma3;
1368 __u32 gamma4;
1369 __u32 gamma5;
1370 };
1371
1372 /*
1373 * Intel sprite handling
1374 *
1375 * Color keying works with a min/mask/max tuple. Both source and destination
1376 * color keying is allowed.
1377 *
1378 * Source keying:
1379 * Sprite pixels within the min & max values, masked against the color channels
1380 * specified in the mask field, will be transparent. All other pixels will
1381 * be displayed on top of the primary plane. For RGB surfaces, only the min
1382 * and mask fields will be used; ranged compares are not allowed.
1383 *
1384 * Destination keying:
1385 * Primary plane pixels that match the min value, masked against the color
1386 * channels specified in the mask field, will be replaced by corresponding
1387 * pixels from the sprite plane.
1388 *
1389 * Note that source & destination keying are exclusive; only one can be
1390 * active on a given plane.
1391 */
1392
1393 #define I915_SET_COLORKEY_NONE (1<<0) /* Deprecated. Instead set
1394 * flags==0 to disable colorkeying.
1395 */
1396 #define I915_SET_COLORKEY_DESTINATION (1<<1)
1397 #define I915_SET_COLORKEY_SOURCE (1<<2)
1398 struct drm_intel_sprite_colorkey {
1399 __u32 plane_id;
1400 __u32 min_value;
1401 __u32 channel_mask;
1402 __u32 max_value;
1403 __u32 flags;
1404 };
1405
1406 struct drm_i915_gem_wait {
1407 /** Handle of BO we shall wait on */
1408 __u32 bo_handle;
1409 __u32 flags;
1410 /** Number of nanoseconds to wait, Returns time remaining. */
1411 __s64 timeout_ns;
1412 };
1413
1414 struct drm_i915_gem_context_create {
1415 /* output: id of new context*/
1416 __u32 ctx_id;
1417 __u32 pad;
1418 };
1419
1420 struct drm_i915_gem_context_destroy {
1421 __u32 ctx_id;
1422 __u32 pad;
1423 };
1424
1425 struct drm_i915_reg_read {
1426 /*
1427 * Register offset.
1428 * For 64bit wide registers where the upper 32bits don't immediately
1429 * follow the lower 32bits, the offset of the lower 32bits must
1430 * be specified
1431 */
1432 __u64 offset;
1433 #define I915_REG_READ_8B_WA (1ul << 0)
1434
1435 __u64 val; /* Return value */
1436 };
1437 /* Known registers:
1438 *
1439 * Render engine timestamp - 0x2358 + 64bit - gen7+
1440 * - Note this register returns an invalid value if using the default
1441 * single instruction 8byte read, in order to workaround that pass
1442 * flag I915_REG_READ_8B_WA in offset field.
1443 *
1444 */
1445
1446 struct drm_i915_reset_stats {
1447 __u32 ctx_id;
1448 __u32 flags;
1449
1450 /* All resets since boot/module reload, for all contexts */
1451 __u32 reset_count;
1452
1453 /* Number of batches lost when active in GPU, for this context */
1454 __u32 batch_active;
1455
1456 /* Number of batches lost pending for execution, for this context */
1457 __u32 batch_pending;
1458
1459 __u32 pad;
1460 };
1461
1462 struct drm_i915_gem_userptr {
1463 __u64 user_ptr;
1464 __u64 user_size;
1465 __u32 flags;
1466 #define I915_USERPTR_READ_ONLY 0x1
1467 #define I915_USERPTR_UNSYNCHRONIZED 0x80000000
1468 /**
1469 * Returned handle for the object.
1470 *
1471 * Object handles are nonzero.
1472 */
1473 __u32 handle;
1474 };
1475
1476 struct drm_i915_gem_context_param {
1477 __u32 ctx_id;
1478 __u32 size;
1479 __u64 param;
1480 #define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
1481 #define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
1482 #define I915_CONTEXT_PARAM_GTT_SIZE 0x3
1483 #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
1484 #define I915_CONTEXT_PARAM_BANNABLE 0x5
1485 #define I915_CONTEXT_PARAM_PRIORITY 0x6
1486 #define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */
1487 #define I915_CONTEXT_DEFAULT_PRIORITY 0
1488 #define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */
1489 /*
1490 * When using the following param, value should be a pointer to
1491 * drm_i915_gem_context_param_sseu.
1492 */
1493 #define I915_CONTEXT_PARAM_SSEU 0x7
1494
1495 /*
1496 * Not all clients may want to attempt automatic recover of a context after
1497 * a hang (for example, some clients may only submit very small incremental
1498 * batches relying on known logical state of previous batches which will never
1499 * recover correctly and each attempt will hang), and so would prefer that
1500 * the context is forever banned instead.
1501 *
1502 * If set to false (0), after a reset, subsequent (and in flight) rendering
1503 * from this context is discarded, and the client will need to create a new
1504 * context to use instead.
1505 *
1506 * If set to true (1), the kernel will automatically attempt to recover the
1507 * context by skipping the hanging batch and executing the next batch starting
1508 * from the default context state (discarding the incomplete logical context
1509 * state lost due to the reset).
1510 *
1511 * On creation, all new contexts are marked as recoverable.
1512 */
1513 #define I915_CONTEXT_PARAM_RECOVERABLE 0x8
1514 __u64 value;
1515 };
1516
1517 /**
1518 * Context SSEU programming
1519 *
1520 * It may be necessary for either functional or performance reason to configure
1521 * a context to run with a reduced number of SSEU (where SSEU stands for Slice/
1522 * Sub-slice/EU).
1523 *
1524 * This is done by configuring SSEU configuration using the below
1525 * @struct drm_i915_gem_context_param_sseu for every supported engine which
1526 * userspace intends to use.
1527 *
1528 * Not all GPUs or engines support this functionality in which case an error
1529 * code -ENODEV will be returned.
1530 *
1531 * Also, flexibility of possible SSEU configuration permutations varies between
1532 * GPU generations and software imposed limitations. Requesting such a
1533 * combination will return an error code of -EINVAL.
1534 *
1535 * NOTE: When perf/OA is active the context's SSEU configuration is ignored in
1536 * favour of a single global setting.
1537 */
1538 struct drm_i915_gem_context_param_sseu {
1539 /*
1540 * Engine class & instance to be configured or queried.
1541 */
1542 __u16 engine_class;
1543 __u16 engine_instance;
1544
1545 /*
1546 * Unused for now. Must be cleared to zero.
1547 */
1548 __u32 flags;
1549
1550 /*
1551 * Mask of slices to enable for the context. Valid values are a subset
1552 * of the bitmask value returned for I915_PARAM_SLICE_MASK.
1553 */
1554 __u64 slice_mask;
1555
1556 /*
1557 * Mask of subslices to enable for the context. Valid values are a
1558 * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK.
1559 */
1560 __u64 subslice_mask;
1561
1562 /*
1563 * Minimum/Maximum number of EUs to enable per subslice for the
1564 * context. min_eus_per_subslice must be inferior or equal to
1565 * max_eus_per_subslice.
1566 */
1567 __u16 min_eus_per_subslice;
1568 __u16 max_eus_per_subslice;
1569
1570 /*
1571 * Unused for now. Must be cleared to zero.
1572 */
1573 __u32 rsvd;
1574 };
1575
1576 enum drm_i915_oa_format {
1577 I915_OA_FORMAT_A13 = 1, /* HSW only */
1578 I915_OA_FORMAT_A29, /* HSW only */
1579 I915_OA_FORMAT_A13_B8_C8, /* HSW only */
1580 I915_OA_FORMAT_B4_C8, /* HSW only */
1581 I915_OA_FORMAT_A45_B8_C8, /* HSW only */
1582 I915_OA_FORMAT_B4_C8_A16, /* HSW only */
1583 I915_OA_FORMAT_C4_B8, /* HSW+ */
1584
1585 /* Gen8+ */
1586 I915_OA_FORMAT_A12,
1587 I915_OA_FORMAT_A12_B8_C8,
1588 I915_OA_FORMAT_A32u40_A4u32_B8_C8,
1589
1590 I915_OA_FORMAT_MAX /* non-ABI */
1591 };
1592
1593 enum drm_i915_perf_property_id {
1594 /**
1595 * Open the stream for a specific context handle (as used with
1596 * execbuffer2). A stream opened for a specific context this way
1597 * won't typically require root privileges.
1598 */
1599 DRM_I915_PERF_PROP_CTX_HANDLE = 1,
1600
1601 /**
1602 * A value of 1 requests the inclusion of raw OA unit reports as
1603 * part of stream samples.
1604 */
1605 DRM_I915_PERF_PROP_SAMPLE_OA,
1606
1607 /**
1608 * The value specifies which set of OA unit metrics should be
1609 * be configured, defining the contents of any OA unit reports.
1610 */
1611 DRM_I915_PERF_PROP_OA_METRICS_SET,
1612
1613 /**
1614 * The value specifies the size and layout of OA unit reports.
1615 */
1616 DRM_I915_PERF_PROP_OA_FORMAT,
1617
1618 /**
1619 * Specifying this property implicitly requests periodic OA unit
1620 * sampling and (at least on Haswell) the sampling frequency is derived
1621 * from this exponent as follows:
1622 *
1623 * 80ns * 2^(period_exponent + 1)
1624 */
1625 DRM_I915_PERF_PROP_OA_EXPONENT,
1626
1627 DRM_I915_PERF_PROP_MAX /* non-ABI */
1628 };
1629
1630 struct drm_i915_perf_open_param {
1631 __u32 flags;
1632 #define I915_PERF_FLAG_FD_CLOEXEC (1<<0)
1633 #define I915_PERF_FLAG_FD_NONBLOCK (1<<1)
1634 #define I915_PERF_FLAG_DISABLED (1<<2)
1635
1636 /** The number of u64 (id, value) pairs */
1637 __u32 num_properties;
1638
1639 /**
1640 * Pointer to array of u64 (id, value) pairs configuring the stream
1641 * to open.
1642 */
1643 __u64 properties_ptr;
1644 };
1645
1646 /**
1647 * Enable data capture for a stream that was either opened in a disabled state
1648 * via I915_PERF_FLAG_DISABLED or was later disabled via
1649 * I915_PERF_IOCTL_DISABLE.
1650 *
1651 * It is intended to be cheaper to disable and enable a stream than it may be
1652 * to close and re-open a stream with the same configuration.
1653 *
1654 * It's undefined whether any pending data for the stream will be lost.
1655 */
1656 #define I915_PERF_IOCTL_ENABLE _IO('i', 0x0)
1657
1658 /**
1659 * Disable data capture for a stream.
1660 *
1661 * It is an error to try and read a stream that is disabled.
1662 */
1663 #define I915_PERF_IOCTL_DISABLE _IO('i', 0x1)
1664
1665 /**
1666 * Common to all i915 perf records
1667 */
1668 struct drm_i915_perf_record_header {
1669 __u32 type;
1670 __u16 pad;
1671 __u16 size;
1672 };
1673
1674 enum drm_i915_perf_record_type {
1675
1676 /**
1677 * Samples are the work horse record type whose contents are extensible
1678 * and defined when opening an i915 perf stream based on the given
1679 * properties.
1680 *
1681 * Boolean properties following the naming convention
1682 * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in
1683 * every sample.
1684 *
1685 * The order of these sample properties given by userspace has no
1686 * affect on the ordering of data within a sample. The order is
1687 * documented here.
1688 *
1689 * struct {
1690 * struct drm_i915_perf_record_header header;
1691 *
1692 * { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA
1693 * };
1694 */
1695 DRM_I915_PERF_RECORD_SAMPLE = 1,
1696
1697 /*
1698 * Indicates that one or more OA reports were not written by the
1699 * hardware. This can happen for example if an MI_REPORT_PERF_COUNT
1700 * command collides with periodic sampling - which would be more likely
1701 * at higher sampling frequencies.
1702 */
1703 DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2,
1704
1705 /**
1706 * An error occurred that resulted in all pending OA reports being lost.
1707 */
1708 DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3,
1709
1710 DRM_I915_PERF_RECORD_MAX /* non-ABI */
1711 };
1712
1713 /**
1714 * Structure to upload perf dynamic configuration into the kernel.
1715 */
1716 struct drm_i915_perf_oa_config {
1717 /** String formatted like "%08x-%04x-%04x-%04x-%012x" */
1718 char uuid[36];
1719
1720 __u32 n_mux_regs;
1721 __u32 n_boolean_regs;
1722 __u32 n_flex_regs;
1723
1724 /*
1725 * These fields are pointers to tuples of u32 values (register address,
1726 * value). For example the expected length of the buffer pointed by
1727 * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
1728 */
1729 __u64 mux_regs_ptr;
1730 __u64 boolean_regs_ptr;
1731 __u64 flex_regs_ptr;
1732 };
1733
1734 struct drm_i915_query_item {
1735 __u64 query_id;
1736 #define DRM_I915_QUERY_TOPOLOGY_INFO 1
1737
1738 /*
1739 * When set to zero by userspace, this is filled with the size of the
1740 * data to be written at the data_ptr pointer. The kernel sets this
1741 * value to a negative value to signal an error on a particular query
1742 * item.
1743 */
1744 __s32 length;
1745
1746 /*
1747 * Unused for now. Must be cleared to zero.
1748 */
1749 __u32 flags;
1750
1751 /*
1752 * Data will be written at the location pointed by data_ptr when the
1753 * value of length matches the length of the data to be written by the
1754 * kernel.
1755 */
1756 __u64 data_ptr;
1757 };
1758
1759 struct drm_i915_query {
1760 __u32 num_items;
1761
1762 /*
1763 * Unused for now. Must be cleared to zero.
1764 */
1765 __u32 flags;
1766
1767 /*
1768 * This points to an array of num_items drm_i915_query_item structures.
1769 */
1770 __u64 items_ptr;
1771 };
1772
1773 /*
1774 * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO :
1775 *
1776 * data: contains the 3 pieces of information :
1777 *
1778 * - the slice mask with one bit per slice telling whether a slice is
1779 * available. The availability of slice X can be queried with the following
1780 * formula :
1781 *
1782 * (data[X / 8] >> (X % 8)) & 1
1783 *
1784 * - the subslice mask for each slice with one bit per subslice telling
1785 * whether a subslice is available. The availability of subslice Y in slice
1786 * X can be queried with the following formula :
1787 *
1788 * (data[subslice_offset +
1789 * X * subslice_stride +
1790 * Y / 8] >> (Y % 8)) & 1
1791 *
1792 * - the EU mask for each subslice in each slice with one bit per EU telling
1793 * whether an EU is available. The availability of EU Z in subslice Y in
1794 * slice X can be queried with the following formula :
1795 *
1796 * (data[eu_offset +
1797 * (X * max_subslices + Y) * eu_stride +
1798 * Z / 8] >> (Z % 8)) & 1
1799 */
1800 struct drm_i915_query_topology_info {
1801 /*
1802 * Unused for now. Must be cleared to zero.
1803 */
1804 __u16 flags;
1805
1806 __u16 max_slices;
1807 __u16 max_subslices;
1808 __u16 max_eus_per_subslice;
1809
1810 /*
1811 * Offset in data[] at which the subslice masks are stored.
1812 */
1813 __u16 subslice_offset;
1814
1815 /*
1816 * Stride at which each of the subslice masks for each slice are
1817 * stored.
1818 */
1819 __u16 subslice_stride;
1820
1821 /*
1822 * Offset in data[] at which the EU masks are stored.
1823 */
1824 __u16 eu_offset;
1825
1826 /*
1827 * Stride at which each of the EU masks for each subslice are stored.
1828 */
1829 __u16 eu_stride;
1830
1831 __u8 data[];
1832 };
1833
1834 #if defined(__cplusplus)
1835 }
1836 #endif
1837
1838 #endif /* _I915_DRM_H_ */