intel: tools: import intel_aubdump
[mesa.git] / src / intel / tools / intel_dump_gpu.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <stdlib.h>
25 #include <stdio.h>
26 #include <string.h>
27 #include <stdint.h>
28 #include <stdbool.h>
29 #include <signal.h>
30 #include <stdarg.h>
31 #include <fcntl.h>
32 #include <sys/types.h>
33 #include <sys/sysmacros.h>
34 #include <sys/stat.h>
35 #include <sys/ioctl.h>
36 #include <unistd.h>
37 #include <errno.h>
38 #include <sys/mman.h>
39 #include <dlfcn.h>
40 #include <i915_drm.h>
41
42 #include "intel_aub.h"
43
44 #include "dev/gen_device_info.h"
45 #include "util/macros.h"
46
47 #ifndef ALIGN
48 #define ALIGN(x, y) (((x) + (y)-1) & ~((y)-1))
49 #endif
50
51 #define MI_LOAD_REGISTER_IMM_n(n) ((0x22 << 23) | (2 * (n) - 1))
52 #define MI_LRI_FORCE_POSTED (1<<12)
53
54 #define MI_BATCH_BUFFER_END (0xA << 23)
55
56 #define min(a, b) ({ \
57 __typeof(a) _a = (a); \
58 __typeof(b) _b = (b); \
59 _a < _b ? _a : _b; \
60 })
61
62 #define HWS_PGA_RCSUNIT 0x02080
63 #define HWS_PGA_VCSUNIT0 0x12080
64 #define HWS_PGA_BCSUNIT 0x22080
65
66 #define GFX_MODE_RCSUNIT 0x0229c
67 #define GFX_MODE_VCSUNIT0 0x1229c
68 #define GFX_MODE_BCSUNIT 0x2229c
69
70 #define EXECLIST_SUBMITPORT_RCSUNIT 0x02230
71 #define EXECLIST_SUBMITPORT_VCSUNIT0 0x12230
72 #define EXECLIST_SUBMITPORT_BCSUNIT 0x22230
73
74 #define EXECLIST_STATUS_RCSUNIT 0x02234
75 #define EXECLIST_STATUS_VCSUNIT0 0x12234
76 #define EXECLIST_STATUS_BCSUNIT 0x22234
77
78 #define EXECLIST_SQ_CONTENTS0_RCSUNIT 0x02510
79 #define EXECLIST_SQ_CONTENTS0_VCSUNIT0 0x12510
80 #define EXECLIST_SQ_CONTENTS0_BCSUNIT 0x22510
81
82 #define EXECLIST_CONTROL_RCSUNIT 0x02550
83 #define EXECLIST_CONTROL_VCSUNIT0 0x12550
84 #define EXECLIST_CONTROL_BCSUNIT 0x22550
85
86 #define MEMORY_MAP_SIZE (64 /* MiB */ * 1024 * 1024)
87
88 #define PTE_SIZE 4
89 #define GEN8_PTE_SIZE 8
90
91 #define NUM_PT_ENTRIES (ALIGN(MEMORY_MAP_SIZE, 4096) / 4096)
92 #define PT_SIZE ALIGN(NUM_PT_ENTRIES * GEN8_PTE_SIZE, 4096)
93
94 #define RING_SIZE (1 * 4096)
95 #define PPHWSP_SIZE (1 * 4096)
96 #define GEN10_LR_CONTEXT_RENDER_SIZE (19 * 4096)
97 #define GEN8_LR_CONTEXT_OTHER_SIZE (2 * 4096)
98
99 #define STATIC_GGTT_MAP_START 0
100
101 #define RENDER_RING_ADDR STATIC_GGTT_MAP_START
102 #define RENDER_CONTEXT_ADDR (RENDER_RING_ADDR + RING_SIZE)
103
104 #define BLITTER_RING_ADDR (RENDER_CONTEXT_ADDR + PPHWSP_SIZE + GEN10_LR_CONTEXT_RENDER_SIZE)
105 #define BLITTER_CONTEXT_ADDR (BLITTER_RING_ADDR + RING_SIZE)
106
107 #define VIDEO_RING_ADDR (BLITTER_CONTEXT_ADDR + PPHWSP_SIZE + GEN8_LR_CONTEXT_OTHER_SIZE)
108 #define VIDEO_CONTEXT_ADDR (VIDEO_RING_ADDR + RING_SIZE)
109
110 #define STATIC_GGTT_MAP_END (VIDEO_CONTEXT_ADDR + PPHWSP_SIZE + GEN8_LR_CONTEXT_OTHER_SIZE)
111 #define STATIC_GGTT_MAP_SIZE (STATIC_GGTT_MAP_END - STATIC_GGTT_MAP_START)
112
113 #define CONTEXT_FLAGS (0x229) /* Normal Priority | L3-LLC Coherency |
114 Legacy Context with no 64 bit VA support | Valid */
115
116 #define RENDER_CONTEXT_DESCRIPTOR ((uint64_t)1 << 32 | RENDER_CONTEXT_ADDR | CONTEXT_FLAGS)
117 #define BLITTER_CONTEXT_DESCRIPTOR ((uint64_t)2 << 32 | BLITTER_CONTEXT_ADDR | CONTEXT_FLAGS)
118 #define VIDEO_CONTEXT_DESCRIPTOR ((uint64_t)3 << 32 | VIDEO_CONTEXT_ADDR | CONTEXT_FLAGS)
119
120 static const uint32_t render_context_init[GEN10_LR_CONTEXT_RENDER_SIZE /
121 sizeof(uint32_t)] = {
122 0 /* MI_NOOP */,
123 MI_LOAD_REGISTER_IMM_n(14) | MI_LRI_FORCE_POSTED,
124 0x2244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
125 0x2034 /* RING_HEAD */, 0,
126 0x2030 /* RING_TAIL */, 0,
127 0x2038 /* RING_BUFFER_START */, RENDER_RING_ADDR,
128 0x203C /* RING_BUFFER_CONTROL */, (RING_SIZE - 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
129 0x2168 /* BB_HEAD_U */, 0,
130 0x2140 /* BB_HEAD_L */, 0,
131 0x2110 /* BB_STATE */, 0,
132 0x211C /* SECOND_BB_HEAD_U */, 0,
133 0x2114 /* SECOND_BB_HEAD_L */, 0,
134 0x2118 /* SECOND_BB_STATE */, 0,
135 0x21C0 /* BB_PER_CTX_PTR */, 0,
136 0x21C4 /* RCS_INDIRECT_CTX */, 0,
137 0x21C8 /* RCS_INDIRECT_CTX_OFFSET */, 0,
138 /* MI_NOOP */
139 0, 0,
140
141 0 /* MI_NOOP */,
142 MI_LOAD_REGISTER_IMM_n(9) | MI_LRI_FORCE_POSTED,
143 0x23A8 /* CTX_TIMESTAMP */, 0,
144 0x228C /* PDP3_UDW */, 0,
145 0x2288 /* PDP3_LDW */, 0,
146 0x2284 /* PDP2_UDW */, 0,
147 0x2280 /* PDP2_LDW */, 0,
148 0x227C /* PDP1_UDW */, 0,
149 0x2278 /* PDP1_LDW */, 0,
150 0x2274 /* PDP0_UDW */, 0,
151 0x2270 /* PDP0_LDW */, 0,
152 /* MI_NOOP */
153 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
154
155 0 /* MI_NOOP */,
156 MI_LOAD_REGISTER_IMM_n(1),
157 0x20C8 /* R_PWR_CLK_STATE */, 0x7FFFFFFF,
158 MI_BATCH_BUFFER_END
159 };
160
161 static const uint32_t blitter_context_init[GEN8_LR_CONTEXT_OTHER_SIZE /
162 sizeof(uint32_t)] = {
163 0 /* MI_NOOP */,
164 MI_LOAD_REGISTER_IMM_n(11) | MI_LRI_FORCE_POSTED,
165 0x22244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
166 0x22034 /* RING_HEAD */, 0,
167 0x22030 /* RING_TAIL */, 0,
168 0x22038 /* RING_BUFFER_START */, BLITTER_RING_ADDR,
169 0x2203C /* RING_BUFFER_CONTROL */, (RING_SIZE - 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
170 0x22168 /* BB_HEAD_U */, 0,
171 0x22140 /* BB_HEAD_L */, 0,
172 0x22110 /* BB_STATE */, 0,
173 0x2211C /* SECOND_BB_HEAD_U */, 0,
174 0x22114 /* SECOND_BB_HEAD_L */, 0,
175 0x22118 /* SECOND_BB_STATE */, 0,
176 /* MI_NOOP */
177 0, 0, 0, 0, 0, 0, 0, 0,
178
179 0 /* MI_NOOP */,
180 MI_LOAD_REGISTER_IMM_n(9) | MI_LRI_FORCE_POSTED,
181 0x223A8 /* CTX_TIMESTAMP */, 0,
182 0x2228C /* PDP3_UDW */, 0,
183 0x22288 /* PDP3_LDW */, 0,
184 0x22284 /* PDP2_UDW */, 0,
185 0x22280 /* PDP2_LDW */, 0,
186 0x2227C /* PDP1_UDW */, 0,
187 0x22278 /* PDP1_LDW */, 0,
188 0x22274 /* PDP0_UDW */, 0,
189 0x22270 /* PDP0_LDW */, 0,
190 /* MI_NOOP */
191 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
192
193 MI_BATCH_BUFFER_END
194 };
195
196 static const uint32_t video_context_init[GEN8_LR_CONTEXT_OTHER_SIZE /
197 sizeof(uint32_t)] = {
198 0 /* MI_NOOP */,
199 MI_LOAD_REGISTER_IMM_n(11) | MI_LRI_FORCE_POSTED,
200 0x1C244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
201 0x1C034 /* RING_HEAD */, 0,
202 0x1C030 /* RING_TAIL */, 0,
203 0x1C038 /* RING_BUFFER_START */, VIDEO_RING_ADDR,
204 0x1C03C /* RING_BUFFER_CONTROL */, (RING_SIZE - 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
205 0x1C168 /* BB_HEAD_U */, 0,
206 0x1C140 /* BB_HEAD_L */, 0,
207 0x1C110 /* BB_STATE */, 0,
208 0x1C11C /* SECOND_BB_HEAD_U */, 0,
209 0x1C114 /* SECOND_BB_HEAD_L */, 0,
210 0x1C118 /* SECOND_BB_STATE */, 0,
211 /* MI_NOOP */
212 0, 0, 0, 0, 0, 0, 0, 0,
213
214 0 /* MI_NOOP */,
215 MI_LOAD_REGISTER_IMM_n(9) | MI_LRI_FORCE_POSTED,
216 0x1C3A8 /* CTX_TIMESTAMP */, 0,
217 0x1C28C /* PDP3_UDW */, 0,
218 0x1C288 /* PDP3_LDW */, 0,
219 0x1C284 /* PDP2_UDW */, 0,
220 0x1C280 /* PDP2_LDW */, 0,
221 0x1C27C /* PDP1_UDW */, 0,
222 0x1C278 /* PDP1_LDW */, 0,
223 0x1C274 /* PDP0_UDW */, 0,
224 0x1C270 /* PDP0_LDW */, 0,
225 /* MI_NOOP */
226 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
227
228 MI_BATCH_BUFFER_END
229 };
230
231 static int close_init_helper(int fd);
232 static int ioctl_init_helper(int fd, unsigned long request, ...);
233
234 static int (*libc_close)(int fd) = close_init_helper;
235 static int (*libc_ioctl)(int fd, unsigned long request, ...) = ioctl_init_helper;
236
237 static int drm_fd = -1;
238 static char *filename = NULL;
239 static FILE *files[2] = { NULL, NULL };
240 static struct gen_device_info devinfo = {0};
241 static int verbose = 0;
242 static bool device_override;
243 static uint32_t device;
244 static int addr_bits = 0;
245
246 #define MAX_BO_COUNT 64 * 1024
247
248 struct bo {
249 uint32_t size;
250 uint64_t offset;
251 void *map;
252 };
253
254 static struct bo *bos;
255
256 #define DRM_MAJOR 226
257
258 #ifndef DRM_I915_GEM_USERPTR
259
260 #define DRM_I915_GEM_USERPTR 0x33
261 #define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
262
263 struct drm_i915_gem_userptr {
264 __u64 user_ptr;
265 __u64 user_size;
266 __u32 flags;
267 #define I915_USERPTR_READ_ONLY 0x1
268 #define I915_USERPTR_UNSYNCHRONIZED 0x80000000
269 /**
270 * Returned handle for the object.
271 *
272 * Object handles are nonzero.
273 */
274 __u32 handle;
275 };
276
277 #endif
278
279 /* We set bit 0 in the map pointer for userptr BOs so we know not to
280 * munmap them on DRM_IOCTL_GEM_CLOSE.
281 */
282 #define USERPTR_FLAG 1
283 #define IS_USERPTR(p) ((uintptr_t) (p) & USERPTR_FLAG)
284 #define GET_PTR(p) ( (void *) ((uintptr_t) p & ~(uintptr_t) 1) )
285
286 #ifndef I915_EXEC_BATCH_FIRST
287 #define I915_EXEC_BATCH_FIRST (1 << 18)
288 #endif
289
290 static void __attribute__ ((format(__printf__, 2, 3)))
291 fail_if(int cond, const char *format, ...)
292 {
293 va_list args;
294
295 if (!cond)
296 return;
297
298 va_start(args, format);
299 vfprintf(stderr, format, args);
300 va_end(args);
301
302 raise(SIGTRAP);
303 }
304
305 static struct bo *
306 get_bo(uint32_t handle)
307 {
308 struct bo *bo;
309
310 fail_if(handle >= MAX_BO_COUNT, "bo handle too large\n");
311 bo = &bos[handle];
312
313 return bo;
314 }
315
316 static inline uint32_t
317 align_u32(uint32_t v, uint32_t a)
318 {
319 return (v + a - 1) & ~(a - 1);
320 }
321
322 static inline uint64_t
323 align_u64(uint64_t v, uint64_t a)
324 {
325 return (v + a - 1) & ~(a - 1);
326 }
327
328 static void
329 dword_out(uint32_t data)
330 {
331 for (int i = 0; i < ARRAY_SIZE (files); i++) {
332 if (files[i] == NULL)
333 continue;
334
335 fail_if(fwrite(&data, 1, 4, files[i]) == 0,
336 "Writing to output failed\n");
337 }
338 }
339
340 static void
341 data_out(const void *data, size_t size)
342 {
343 if (size == 0)
344 return;
345
346 for (int i = 0; i < ARRAY_SIZE (files); i++) {
347 if (files[i] == NULL)
348 continue;
349
350 fail_if(fwrite(data, 1, size, files[i]) == 0,
351 "Writing to output failed\n");
352 }
353 }
354
355 static uint32_t
356 gtt_size(void)
357 {
358 return NUM_PT_ENTRIES * (addr_bits > 32 ? GEN8_PTE_SIZE : PTE_SIZE);
359 }
360
361 static void
362 mem_trace_memory_write_header_out(uint64_t addr, uint32_t len,
363 uint32_t addr_space)
364 {
365 uint32_t dwords = ALIGN(len, sizeof(uint32_t)) / sizeof(uint32_t);
366
367 dword_out(CMD_MEM_TRACE_MEMORY_WRITE | (5 + dwords - 1));
368 dword_out(addr & 0xFFFFFFFF); /* addr lo */
369 dword_out(addr >> 32); /* addr hi */
370 dword_out(addr_space); /* gtt */
371 dword_out(len);
372 }
373
374 static void
375 register_write_out(uint32_t addr, uint32_t value)
376 {
377 uint32_t dwords = 1;
378
379 dword_out(CMD_MEM_TRACE_REGISTER_WRITE | (5 + dwords - 1));
380 dword_out(addr);
381 dword_out(AUB_MEM_TRACE_REGISTER_SIZE_DWORD |
382 AUB_MEM_TRACE_REGISTER_SPACE_MMIO);
383 dword_out(0xFFFFFFFF); /* mask lo */
384 dword_out(0x00000000); /* mask hi */
385 dword_out(value);
386 }
387
388 static void
389 gen8_emit_ggtt_pte_for_range(uint64_t start, uint64_t end)
390 {
391 uint64_t entry_addr;
392 uint64_t page_num;
393 uint64_t end_aligned = align_u64(end, 4096);
394
395 if (start >= end || end > (1ull << 32))
396 return;
397
398 entry_addr = start & ~(4096 - 1);
399 do {
400 uint64_t last_page_entry, num_entries;
401
402 page_num = entry_addr >> 21;
403 last_page_entry = min((page_num + 1) << 21, end_aligned);
404 num_entries = (last_page_entry - entry_addr) >> 12;
405 mem_trace_memory_write_header_out(
406 entry_addr >> 9, num_entries * GEN8_PTE_SIZE,
407 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT_ENTRY);
408 while (num_entries-- > 0) {
409 dword_out((entry_addr & ~(4096 - 1)) |
410 3 /* read/write | present */);
411 dword_out(entry_addr >> 32);
412 entry_addr += 4096;
413 }
414 } while (entry_addr < end);
415 }
416
417 /**
418 * Sets bits `start` through `end` - 1 in the bitmap array.
419 */
420 static void
421 set_bitmap_range(uint32_t *bitmap, uint32_t start, uint32_t end)
422 {
423 uint32_t pos = start;
424 while (pos < end) {
425 const uint32_t bit = 1 << (pos & 0x1f);
426 if (bit == 1 && (end - pos) > 32) {
427 bitmap[pos >> 5] = 0xffffffff;
428 pos += 32;
429 } else {
430 bitmap[pos >> 5] |= bit;
431 pos++;
432 }
433 }
434 }
435
436 /**
437 * Finds the next `set` (or clear) bit in the bitmap array.
438 *
439 * The search starts at `*start` and only checks until `end` - 1.
440 *
441 * If found, returns true, and the found bit index in `*start`.
442 */
443 static bool
444 find_bitmap_bit(uint32_t *bitmap, bool set, uint32_t *start, uint32_t end)
445 {
446 uint32_t pos = *start;
447 const uint32_t neg_dw = set ? 0 : -1;
448 while (pos < end) {
449 const uint32_t dw = bitmap[pos >> 5];
450 const uint32_t bit = 1 << (pos & 0x1f);
451 if (!!(dw & bit) == set) {
452 *start = pos;
453 return true;
454 } else if (bit == 1 && dw == neg_dw)
455 pos += 32;
456 else
457 pos++;
458 }
459 return false;
460 }
461
462 /**
463 * Finds a range of clear bits within the bitmap array.
464 *
465 * The search starts at `*start` and only checks until `*end` - 1.
466 *
467 * If found, returns true, and `*start` and `*end` are set for the
468 * range of clear bits.
469 */
470 static bool
471 find_bitmap_clear_bit_range(uint32_t *bitmap, uint32_t *start, uint32_t *end)
472 {
473 if (find_bitmap_bit(bitmap, false, start, *end)) {
474 uint32_t found_end = *start;
475 if (find_bitmap_bit(bitmap, true, &found_end, *end))
476 *end = found_end;
477 return true;
478 }
479 return false;
480 }
481
482 static void
483 gen8_map_ggtt_range(uint64_t start, uint64_t end)
484 {
485 uint32_t pos1, pos2, end_pos;
486 static uint32_t *bitmap = NULL;
487 if (bitmap == NULL) {
488 /* 4GiB (32-bits) of 4KiB pages (12-bits) in dwords (5-bits) */
489 bitmap = calloc(1 << (32 - 12 - 5), sizeof(*bitmap));
490 if (bitmap == NULL)
491 return;
492 }
493
494 pos1 = start >> 12;
495 end_pos = (end + 4096 - 1) >> 12;
496 while (pos1 < end_pos) {
497 pos2 = end_pos;
498 if (!find_bitmap_clear_bit_range(bitmap, &pos1, &pos2))
499 break;
500
501 if (verbose)
502 printf("MAPPING 0x%08lx-0x%08lx\n",
503 (uint64_t)pos1 << 12, (uint64_t)pos2 << 12);
504 gen8_emit_ggtt_pte_for_range((uint64_t)pos1 << 12,
505 (uint64_t)pos2 << 12);
506 set_bitmap_range(bitmap, (uint64_t)pos1, (uint64_t)pos2);
507 pos1 = pos2;
508 }
509 }
510
511 static void
512 gen8_map_base_size(uint64_t base, uint64_t size)
513 {
514 gen8_map_ggtt_range(base, base + size);
515 }
516
517 static void
518 gen10_write_header(void)
519 {
520 char app_name[8 * 4];
521 int app_name_len, dwords;
522
523 app_name_len =
524 snprintf(app_name, sizeof(app_name), "PCI-ID=0x%X %s", device,
525 program_invocation_short_name);
526 app_name_len = ALIGN(app_name_len, sizeof(uint32_t));
527
528 dwords = 5 + app_name_len / sizeof(uint32_t);
529 dword_out(CMD_MEM_TRACE_VERSION | (dwords - 1));
530 dword_out(AUB_MEM_TRACE_VERSION_FILE_VERSION);
531 dword_out(AUB_MEM_TRACE_VERSION_DEVICE_CNL |
532 AUB_MEM_TRACE_VERSION_METHOD_PHY);
533 dword_out(0); /* version */
534 dword_out(0); /* version */
535 data_out(app_name, app_name_len);
536
537 /* RENDER_RING */
538 gen8_map_base_size(RENDER_RING_ADDR, RING_SIZE);
539 mem_trace_memory_write_header_out(RENDER_RING_ADDR, RING_SIZE,
540 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL);
541 for (uint32_t i = 0; i < RING_SIZE; i += sizeof(uint32_t))
542 dword_out(0);
543
544 /* RENDER_PPHWSP */
545 gen8_map_base_size(RENDER_CONTEXT_ADDR,
546 PPHWSP_SIZE + sizeof(render_context_init));
547 mem_trace_memory_write_header_out(RENDER_CONTEXT_ADDR,
548 PPHWSP_SIZE +
549 sizeof(render_context_init),
550 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL);
551 for (uint32_t i = 0; i < PPHWSP_SIZE; i += sizeof(uint32_t))
552 dword_out(0);
553
554 /* RENDER_CONTEXT */
555 data_out(render_context_init, sizeof(render_context_init));
556
557 /* BLITTER_RING */
558 gen8_map_base_size(BLITTER_RING_ADDR, RING_SIZE);
559 mem_trace_memory_write_header_out(BLITTER_RING_ADDR, RING_SIZE,
560 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL);
561 for (uint32_t i = 0; i < RING_SIZE; i += sizeof(uint32_t))
562 dword_out(0);
563
564 /* BLITTER_PPHWSP */
565 gen8_map_base_size(BLITTER_CONTEXT_ADDR,
566 PPHWSP_SIZE + sizeof(blitter_context_init));
567 mem_trace_memory_write_header_out(BLITTER_CONTEXT_ADDR,
568 PPHWSP_SIZE +
569 sizeof(blitter_context_init),
570 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL);
571 for (uint32_t i = 0; i < PPHWSP_SIZE; i += sizeof(uint32_t))
572 dword_out(0);
573
574 /* BLITTER_CONTEXT */
575 data_out(blitter_context_init, sizeof(blitter_context_init));
576
577 /* VIDEO_RING */
578 gen8_map_base_size(VIDEO_RING_ADDR, RING_SIZE);
579 mem_trace_memory_write_header_out(VIDEO_RING_ADDR, RING_SIZE,
580 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL);
581 for (uint32_t i = 0; i < RING_SIZE; i += sizeof(uint32_t))
582 dword_out(0);
583
584 /* VIDEO_PPHWSP */
585 gen8_map_base_size(VIDEO_CONTEXT_ADDR,
586 PPHWSP_SIZE + sizeof(video_context_init));
587 mem_trace_memory_write_header_out(VIDEO_CONTEXT_ADDR,
588 PPHWSP_SIZE +
589 sizeof(video_context_init),
590 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL);
591 for (uint32_t i = 0; i < PPHWSP_SIZE; i += sizeof(uint32_t))
592 dword_out(0);
593
594 /* VIDEO_CONTEXT */
595 data_out(video_context_init, sizeof(video_context_init));
596
597 register_write_out(HWS_PGA_RCSUNIT, RENDER_CONTEXT_ADDR);
598 register_write_out(HWS_PGA_VCSUNIT0, VIDEO_CONTEXT_ADDR);
599 register_write_out(HWS_PGA_BCSUNIT, BLITTER_CONTEXT_ADDR);
600
601 register_write_out(GFX_MODE_RCSUNIT, 0x80008000 /* execlist enable */);
602 register_write_out(GFX_MODE_VCSUNIT0, 0x80008000 /* execlist enable */);
603 register_write_out(GFX_MODE_BCSUNIT, 0x80008000 /* execlist enable */);
604 }
605
606 static void write_header(void)
607 {
608 char app_name[8 * 4];
609 char comment[16];
610 int comment_len, comment_dwords, dwords;
611 uint32_t entry = 0x200003;
612
613 comment_len = snprintf(comment, sizeof(comment), "PCI-ID=0x%x", device);
614 comment_dwords = ((comment_len + 3) / 4);
615
616 /* Start with a (required) version packet. */
617 dwords = 13 + comment_dwords;
618 dword_out(CMD_AUB_HEADER | (dwords - 2));
619 dword_out((4 << AUB_HEADER_MAJOR_SHIFT) |
620 (0 << AUB_HEADER_MINOR_SHIFT));
621
622 /* Next comes a 32-byte application name. */
623 strncpy(app_name, program_invocation_short_name, sizeof(app_name));
624 app_name[sizeof(app_name) - 1] = 0;
625 data_out(app_name, sizeof(app_name));
626
627 dword_out(0); /* timestamp */
628 dword_out(0); /* timestamp */
629 dword_out(comment_len);
630 data_out(comment, comment_dwords * 4);
631
632 /* Set up the GTT. The max we can handle is 64M */
633 dword_out(CMD_AUB_TRACE_HEADER_BLOCK | ((addr_bits > 32 ? 6 : 5) - 2));
634 dword_out(AUB_TRACE_MEMTYPE_GTT_ENTRY |
635 AUB_TRACE_TYPE_NOTYPE | AUB_TRACE_OP_DATA_WRITE);
636 dword_out(0); /* subtype */
637 dword_out(0); /* offset */
638 dword_out(gtt_size()); /* size */
639 if (addr_bits > 32)
640 dword_out(0);
641 for (uint32_t i = 0; i < NUM_PT_ENTRIES; i++) {
642 dword_out(entry + 0x1000 * i);
643 if (addr_bits > 32)
644 dword_out(0);
645 }
646 }
647
648 /**
649 * Break up large objects into multiple writes. Otherwise a 128kb VBO
650 * would overflow the 16 bits of size field in the packet header and
651 * everything goes badly after that.
652 */
653 static void
654 aub_write_trace_block(uint32_t type, void *virtual, uint32_t size, uint64_t gtt_offset)
655 {
656 uint32_t block_size;
657 uint32_t subtype = 0;
658 static const char null_block[8 * 4096];
659
660 for (uint32_t offset = 0; offset < size; offset += block_size) {
661 block_size = size - offset;
662
663 if (block_size > 8 * 4096)
664 block_size = 8 * 4096;
665
666 if (devinfo.gen >= 10) {
667 mem_trace_memory_write_header_out(gtt_offset + offset,
668 block_size,
669 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL);
670 } else {
671 dword_out(CMD_AUB_TRACE_HEADER_BLOCK |
672 ((addr_bits > 32 ? 6 : 5) - 2));
673 dword_out(AUB_TRACE_MEMTYPE_GTT |
674 type | AUB_TRACE_OP_DATA_WRITE);
675 dword_out(subtype);
676 dword_out(gtt_offset + offset);
677 dword_out(align_u32(block_size, 4));
678 if (addr_bits > 32)
679 dword_out((gtt_offset + offset) >> 32);
680 }
681
682 if (virtual)
683 data_out(((char *) GET_PTR(virtual)) + offset, block_size);
684 else
685 data_out(null_block, block_size);
686
687 /* Pad to a multiple of 4 bytes. */
688 data_out(null_block, -block_size & 3);
689 }
690 }
691
692 static void
693 write_reloc(void *p, uint64_t v)
694 {
695 if (addr_bits > 32) {
696 /* From the Broadwell PRM Vol. 2a,
697 * MI_LOAD_REGISTER_MEM::MemoryAddress:
698 *
699 * "This field specifies the address of the memory
700 * location where the register value specified in the
701 * DWord above will read from. The address specifies
702 * the DWord location of the data. Range =
703 * GraphicsVirtualAddress[63:2] for a DWord register
704 * GraphicsAddress [63:48] are ignored by the HW and
705 * assumed to be in correct canonical form [63:48] ==
706 * [47]."
707 *
708 * In practice, this will always mean the top bits are zero
709 * because of the GTT size limitation of the aubdump tool.
710 */
711 const int shift = 63 - 47;
712 *(uint64_t *)p = (((int64_t)v) << shift) >> shift;
713 } else {
714 *(uint32_t *)p = v;
715 }
716 }
717
718 static void
719 aub_dump_execlist(uint64_t batch_offset, int ring_flag)
720 {
721 uint32_t ring_addr;
722 uint64_t descriptor;
723 uint32_t elsp_reg;
724 uint32_t elsq_reg;
725 uint32_t status_reg;
726 uint32_t control_reg;
727
728 switch (ring_flag) {
729 case I915_EXEC_DEFAULT:
730 case I915_EXEC_RENDER:
731 ring_addr = RENDER_RING_ADDR;
732 descriptor = RENDER_CONTEXT_DESCRIPTOR;
733 elsp_reg = EXECLIST_SUBMITPORT_RCSUNIT;
734 elsq_reg = EXECLIST_SQ_CONTENTS0_RCSUNIT;
735 status_reg = EXECLIST_STATUS_RCSUNIT;
736 control_reg = EXECLIST_CONTROL_RCSUNIT;
737 break;
738 case I915_EXEC_BSD:
739 ring_addr = VIDEO_RING_ADDR;
740 descriptor = VIDEO_CONTEXT_DESCRIPTOR;
741 elsp_reg = EXECLIST_SUBMITPORT_VCSUNIT0;
742 elsq_reg = EXECLIST_SQ_CONTENTS0_VCSUNIT0;
743 status_reg = EXECLIST_STATUS_VCSUNIT0;
744 control_reg = EXECLIST_CONTROL_VCSUNIT0;
745 break;
746 case I915_EXEC_BLT:
747 ring_addr = BLITTER_RING_ADDR;
748 descriptor = BLITTER_CONTEXT_DESCRIPTOR;
749 elsp_reg = EXECLIST_SUBMITPORT_BCSUNIT;
750 elsq_reg = EXECLIST_SQ_CONTENTS0_BCSUNIT;
751 status_reg = EXECLIST_STATUS_BCSUNIT;
752 control_reg = EXECLIST_CONTROL_BCSUNIT;
753 break;
754 }
755
756 mem_trace_memory_write_header_out(ring_addr, 16,
757 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL);
758 dword_out(AUB_MI_BATCH_BUFFER_START | (3 - 2));
759 dword_out(batch_offset & 0xFFFFFFFF);
760 dword_out(batch_offset >> 32);
761 dword_out(0 /* MI_NOOP */);
762
763 mem_trace_memory_write_header_out(ring_addr + 8192 + 20, 4,
764 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL);
765 dword_out(0); /* RING_BUFFER_HEAD */
766 mem_trace_memory_write_header_out(ring_addr + 8192 + 28, 4,
767 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL);
768 dword_out(16); /* RING_BUFFER_TAIL */
769
770 if (devinfo.gen >= 11) {
771 register_write_out(elsq_reg, descriptor & 0xFFFFFFFF);
772 register_write_out(elsq_reg + sizeof(uint32_t), descriptor >> 32);
773 register_write_out(control_reg, 1);
774 } else {
775 register_write_out(elsp_reg, 0);
776 register_write_out(elsp_reg, 0);
777 register_write_out(elsp_reg, descriptor >> 32);
778 register_write_out(elsp_reg, descriptor & 0xFFFFFFFF);
779 }
780
781 dword_out(CMD_MEM_TRACE_REGISTER_POLL | (5 + 1 - 1));
782 dword_out(status_reg);
783 dword_out(AUB_MEM_TRACE_REGISTER_SIZE_DWORD |
784 AUB_MEM_TRACE_REGISTER_SPACE_MMIO);
785 if (devinfo.gen >= 11) {
786 dword_out(0x00000001); /* mask lo */
787 dword_out(0x00000000); /* mask hi */
788 dword_out(0x00000001);
789 } else {
790 dword_out(0x00000010); /* mask lo */
791 dword_out(0x00000000); /* mask hi */
792 dword_out(0x00000000);
793 }
794 }
795
796 static void
797 aub_dump_ringbuffer(uint64_t batch_offset, uint64_t offset, int ring_flag)
798 {
799 uint32_t ringbuffer[4096];
800 unsigned aub_mi_bbs_len;
801 int ring = AUB_TRACE_TYPE_RING_PRB0; /* The default ring */
802 int ring_count = 0;
803
804 if (ring_flag == I915_EXEC_BSD)
805 ring = AUB_TRACE_TYPE_RING_PRB1;
806 else if (ring_flag == I915_EXEC_BLT)
807 ring = AUB_TRACE_TYPE_RING_PRB2;
808
809 /* Make a ring buffer to execute our batchbuffer. */
810 memset(ringbuffer, 0, sizeof(ringbuffer));
811
812 aub_mi_bbs_len = addr_bits > 32 ? 3 : 2;
813 ringbuffer[ring_count] = AUB_MI_BATCH_BUFFER_START | (aub_mi_bbs_len - 2);
814 write_reloc(&ringbuffer[ring_count + 1], batch_offset);
815 ring_count += aub_mi_bbs_len;
816
817 /* Write out the ring. This appears to trigger execution of
818 * the ring in the simulator.
819 */
820 dword_out(CMD_AUB_TRACE_HEADER_BLOCK |
821 ((addr_bits > 32 ? 6 : 5) - 2));
822 dword_out(AUB_TRACE_MEMTYPE_GTT | ring | AUB_TRACE_OP_COMMAND_WRITE);
823 dword_out(0); /* general/surface subtype */
824 dword_out(offset);
825 dword_out(ring_count * 4);
826 if (addr_bits > 32)
827 dword_out(offset >> 32);
828
829 data_out(ringbuffer, ring_count * 4);
830 }
831
832 static void *
833 relocate_bo(struct bo *bo, const struct drm_i915_gem_execbuffer2 *execbuffer2,
834 const struct drm_i915_gem_exec_object2 *obj)
835 {
836 const struct drm_i915_gem_exec_object2 *exec_objects =
837 (struct drm_i915_gem_exec_object2 *) (uintptr_t) execbuffer2->buffers_ptr;
838 const struct drm_i915_gem_relocation_entry *relocs =
839 (const struct drm_i915_gem_relocation_entry *) (uintptr_t) obj->relocs_ptr;
840 void *relocated;
841 int handle;
842
843 relocated = malloc(bo->size);
844 fail_if(relocated == NULL, "intel_aubdump: out of memory\n");
845 memcpy(relocated, GET_PTR(bo->map), bo->size);
846 for (size_t i = 0; i < obj->relocation_count; i++) {
847 fail_if(relocs[i].offset >= bo->size, "intel_aubdump: reloc outside bo\n");
848
849 if (execbuffer2->flags & I915_EXEC_HANDLE_LUT)
850 handle = exec_objects[relocs[i].target_handle].handle;
851 else
852 handle = relocs[i].target_handle;
853
854 write_reloc(((char *)relocated) + relocs[i].offset,
855 get_bo(handle)->offset + relocs[i].delta);
856 }
857
858 return relocated;
859 }
860
861 static int
862 gem_ioctl(int fd, unsigned long request, void *argp)
863 {
864 int ret;
865
866 do {
867 ret = libc_ioctl(fd, request, argp);
868 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
869
870 return ret;
871 }
872
873 static void *
874 gem_mmap(int fd, uint32_t handle, uint64_t offset, uint64_t size)
875 {
876 struct drm_i915_gem_mmap mmap = {
877 .handle = handle,
878 .offset = offset,
879 .size = size
880 };
881
882 if (gem_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap) == -1)
883 return MAP_FAILED;
884
885 return (void *)(uintptr_t) mmap.addr_ptr;
886 }
887
888 static int
889 gem_get_param(int fd, uint32_t param)
890 {
891 int value;
892 drm_i915_getparam_t gp = {
893 .param = param,
894 .value = &value
895 };
896
897 if (gem_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp) == -1)
898 return 0;
899
900 return value;
901 }
902
903 static void
904 dump_execbuffer2(int fd, struct drm_i915_gem_execbuffer2 *execbuffer2)
905 {
906 struct drm_i915_gem_exec_object2 *exec_objects =
907 (struct drm_i915_gem_exec_object2 *) (uintptr_t) execbuffer2->buffers_ptr;
908 uint32_t ring_flag = execbuffer2->flags & I915_EXEC_RING_MASK;
909 uint32_t offset;
910 struct drm_i915_gem_exec_object2 *obj;
911 struct bo *bo, *batch_bo;
912 int batch_index;
913 void *data;
914
915 /* We can't do this at open time as we're not yet authenticated. */
916 if (device == 0) {
917 device = gem_get_param(fd, I915_PARAM_CHIPSET_ID);
918 fail_if(device == 0 || devinfo.gen == 0, "failed to identify chipset\n");
919 }
920 if (devinfo.gen == 0) {
921 fail_if(!gen_get_device_info(device, &devinfo),
922 "failed to identify chipset=0x%x\n", device);
923
924 addr_bits = devinfo.gen >= 8 ? 48 : 32;
925
926 if (devinfo.gen >= 10)
927 gen10_write_header();
928 else
929 write_header();
930
931 if (verbose)
932 printf("[intel_aubdump: running, "
933 "output file %s, chipset id 0x%04x, gen %d]\n",
934 filename, device, devinfo.gen);
935 }
936
937 if (devinfo.gen >= 10)
938 offset = STATIC_GGTT_MAP_END;
939 else
940 offset = gtt_size();
941
942 if (verbose)
943 printf("Dumping execbuffer2:\n");
944
945 for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
946 obj = &exec_objects[i];
947 bo = get_bo(obj->handle);
948
949 /* If bo->size == 0, this means they passed us an invalid
950 * buffer. The kernel will reject it and so should we.
951 */
952 if (bo->size == 0) {
953 if (verbose)
954 printf("BO #%d is invalid!\n", obj->handle);
955 return;
956 }
957
958 if (obj->flags & EXEC_OBJECT_PINNED) {
959 bo->offset = obj->offset;
960 if (verbose)
961 printf("BO #%d (%dB) pinned @ 0x%lx\n",
962 obj->handle, bo->size, bo->offset);
963 } else {
964 if (obj->alignment != 0)
965 offset = align_u32(offset, obj->alignment);
966 bo->offset = offset;
967 if (verbose)
968 printf("BO #%d (%dB) @ 0x%lx\n", obj->handle,
969 bo->size, bo->offset);
970 offset = align_u32(offset + bo->size + 4095, 4096);
971 }
972
973 if (bo->map == NULL && bo->size > 0)
974 bo->map = gem_mmap(fd, obj->handle, 0, bo->size);
975 fail_if(bo->map == MAP_FAILED, "intel_aubdump: bo mmap failed\n");
976
977 if (devinfo.gen >= 10)
978 gen8_map_ggtt_range(bo->offset, bo->offset + bo->size);
979 }
980
981 batch_index = (execbuffer2->flags & I915_EXEC_BATCH_FIRST) ? 0 :
982 execbuffer2->buffer_count - 1;
983 batch_bo = get_bo(exec_objects[batch_index].handle);
984 for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
985 obj = &exec_objects[i];
986 bo = get_bo(obj->handle);
987
988 if (obj->relocation_count > 0)
989 data = relocate_bo(bo, execbuffer2, obj);
990 else
991 data = bo->map;
992
993 if (bo == batch_bo) {
994 aub_write_trace_block(AUB_TRACE_TYPE_BATCH,
995 data, bo->size, bo->offset);
996 } else {
997 aub_write_trace_block(AUB_TRACE_TYPE_NOTYPE,
998 data, bo->size, bo->offset);
999 }
1000 if (data != bo->map)
1001 free(data);
1002 }
1003
1004 if (devinfo.gen >= 10) {
1005 aub_dump_execlist(batch_bo->offset +
1006 execbuffer2->batch_start_offset, ring_flag);
1007 } else {
1008 /* Dump ring buffer */
1009 aub_dump_ringbuffer(batch_bo->offset +
1010 execbuffer2->batch_start_offset, offset,
1011 ring_flag);
1012 }
1013
1014 for (int i = 0; i < ARRAY_SIZE(files); i++) {
1015 if (files[i] != NULL)
1016 fflush(files[i]);
1017 }
1018
1019 if (device_override &&
1020 (execbuffer2->flags & I915_EXEC_FENCE_ARRAY) != 0) {
1021 struct drm_i915_gem_exec_fence *fences =
1022 (void*)(uintptr_t)execbuffer2->cliprects_ptr;
1023 for (uint32_t i = 0; i < execbuffer2->num_cliprects; i++) {
1024 if ((fences[i].flags & I915_EXEC_FENCE_SIGNAL) != 0) {
1025 struct drm_syncobj_array arg = {
1026 .handles = (uintptr_t)&fences[i].handle,
1027 .count_handles = 1,
1028 .pad = 0,
1029 };
1030 libc_ioctl(fd, DRM_IOCTL_SYNCOBJ_SIGNAL, &arg);
1031 }
1032 }
1033 }
1034 }
1035
1036 static void
1037 add_new_bo(int handle, uint64_t size, void *map)
1038 {
1039 struct bo *bo = &bos[handle];
1040
1041 fail_if(handle >= MAX_BO_COUNT, "intel_aubdump: bo handle out of range\n");
1042 fail_if(size == 0, "intel_aubdump: bo size is invalid\n");
1043
1044 bo->size = size;
1045 bo->map = map;
1046 }
1047
1048 static void
1049 remove_bo(int handle)
1050 {
1051 struct bo *bo = get_bo(handle);
1052
1053 if (bo->map && !IS_USERPTR(bo->map))
1054 munmap(bo->map, bo->size);
1055 bo->size = 0;
1056 bo->map = NULL;
1057 }
1058
1059 __attribute__ ((visibility ("default"))) int
1060 close(int fd)
1061 {
1062 if (fd == drm_fd)
1063 drm_fd = -1;
1064
1065 return libc_close(fd);
1066 }
1067
1068 static FILE *
1069 launch_command(char *command)
1070 {
1071 int i = 0, fds[2];
1072 char **args = calloc(strlen(command), sizeof(char *));
1073 char *iter = command;
1074
1075 args[i++] = iter = command;
1076
1077 while ((iter = strstr(iter, ",")) != NULL) {
1078 *iter = '\0';
1079 iter += 1;
1080 args[i++] = iter;
1081 }
1082
1083 if (pipe(fds) == -1)
1084 return NULL;
1085
1086 switch (fork()) {
1087 case 0:
1088 dup2(fds[0], 0);
1089 fail_if(execvp(args[0], args) == -1,
1090 "intel_aubdump: failed to launch child command\n");
1091 return NULL;
1092
1093 default:
1094 free(args);
1095 return fdopen(fds[1], "w");
1096
1097 case -1:
1098 return NULL;
1099 }
1100 }
1101
1102 static void
1103 maybe_init(void)
1104 {
1105 static bool initialized = false;
1106 FILE *config;
1107 char *key, *value;
1108
1109 if (initialized)
1110 return;
1111
1112 initialized = true;
1113
1114 config = fdopen(3, "r");
1115 while (fscanf(config, "%m[^=]=%m[^\n]\n", &key, &value) != EOF) {
1116 if (!strcmp(key, "verbose")) {
1117 verbose = 1;
1118 } else if (!strcmp(key, "device")) {
1119 fail_if(sscanf(value, "%i", &device) != 1,
1120 "intel_aubdump: failed to parse device id '%s'",
1121 value);
1122 device_override = true;
1123 } else if (!strcmp(key, "file")) {
1124 filename = strdup(value);
1125 files[0] = fopen(filename, "w+");
1126 fail_if(files[0] == NULL,
1127 "intel_aubdump: failed to open file '%s'\n",
1128 filename);
1129 } else if (!strcmp(key, "command")) {
1130 files[1] = launch_command(value);
1131 fail_if(files[1] == NULL,
1132 "intel_aubdump: failed to launch command '%s'\n",
1133 value);
1134 } else {
1135 fprintf(stderr, "intel_aubdump: unknown option '%s'\n", key);
1136 }
1137
1138 free(key);
1139 free(value);
1140 }
1141 fclose(config);
1142
1143 bos = calloc(MAX_BO_COUNT, sizeof(bos[0]));
1144 fail_if(bos == NULL, "intel_aubdump: out of memory\n");
1145 }
1146
1147 #define LOCAL_IOCTL_I915_GEM_EXECBUFFER2_WR \
1148 DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
1149
1150 __attribute__ ((visibility ("default"))) int
1151 ioctl(int fd, unsigned long request, ...)
1152 {
1153 va_list args;
1154 void *argp;
1155 int ret;
1156 struct stat buf;
1157
1158 va_start(args, request);
1159 argp = va_arg(args, void *);
1160 va_end(args);
1161
1162 if (_IOC_TYPE(request) == DRM_IOCTL_BASE &&
1163 drm_fd != fd && fstat(fd, &buf) == 0 &&
1164 (buf.st_mode & S_IFMT) == S_IFCHR && major(buf.st_rdev) == DRM_MAJOR) {
1165 drm_fd = fd;
1166 if (verbose)
1167 printf("[intel_aubdump: intercept drm ioctl on fd %d]\n", fd);
1168 }
1169
1170 if (fd == drm_fd) {
1171 maybe_init();
1172
1173 switch (request) {
1174 case DRM_IOCTL_I915_GETPARAM: {
1175 struct drm_i915_getparam *getparam = argp;
1176
1177 if (device_override && getparam->param == I915_PARAM_CHIPSET_ID) {
1178 *getparam->value = device;
1179 return 0;
1180 }
1181
1182 ret = libc_ioctl(fd, request, argp);
1183
1184 /* If the application looks up chipset_id
1185 * (they typically do), we'll piggy-back on
1186 * their ioctl and store the id for later
1187 * use. */
1188 if (getparam->param == I915_PARAM_CHIPSET_ID)
1189 device = *getparam->value;
1190
1191 return ret;
1192 }
1193
1194 case DRM_IOCTL_I915_GEM_EXECBUFFER: {
1195 static bool once;
1196 if (!once) {
1197 fprintf(stderr, "intel_aubdump: "
1198 "application uses DRM_IOCTL_I915_GEM_EXECBUFFER, not handled\n");
1199 once = true;
1200 }
1201 return libc_ioctl(fd, request, argp);
1202 }
1203
1204 case DRM_IOCTL_I915_GEM_EXECBUFFER2:
1205 case LOCAL_IOCTL_I915_GEM_EXECBUFFER2_WR: {
1206 dump_execbuffer2(fd, argp);
1207 if (device_override)
1208 return 0;
1209
1210 return libc_ioctl(fd, request, argp);
1211 }
1212
1213 case DRM_IOCTL_I915_GEM_CREATE: {
1214 struct drm_i915_gem_create *create = argp;
1215
1216 ret = libc_ioctl(fd, request, argp);
1217 if (ret == 0)
1218 add_new_bo(create->handle, create->size, NULL);
1219
1220 return ret;
1221 }
1222
1223 case DRM_IOCTL_I915_GEM_USERPTR: {
1224 struct drm_i915_gem_userptr *userptr = argp;
1225
1226 ret = libc_ioctl(fd, request, argp);
1227 if (ret == 0)
1228 add_new_bo(userptr->handle, userptr->user_size,
1229 (void *) (uintptr_t) (userptr->user_ptr | USERPTR_FLAG));
1230 return ret;
1231 }
1232
1233 case DRM_IOCTL_GEM_CLOSE: {
1234 struct drm_gem_close *close = argp;
1235
1236 remove_bo(close->handle);
1237
1238 return libc_ioctl(fd, request, argp);
1239 }
1240
1241 case DRM_IOCTL_GEM_OPEN: {
1242 struct drm_gem_open *open = argp;
1243
1244 ret = libc_ioctl(fd, request, argp);
1245 if (ret == 0)
1246 add_new_bo(open->handle, open->size, NULL);
1247
1248 return ret;
1249 }
1250
1251 case DRM_IOCTL_PRIME_FD_TO_HANDLE: {
1252 struct drm_prime_handle *prime = argp;
1253
1254 ret = libc_ioctl(fd, request, argp);
1255 if (ret == 0) {
1256 off_t size;
1257
1258 size = lseek(prime->fd, 0, SEEK_END);
1259 fail_if(size == -1, "intel_aubdump: failed to get prime bo size\n");
1260 add_new_bo(prime->handle, size, NULL);
1261 }
1262
1263 return ret;
1264 }
1265
1266 default:
1267 return libc_ioctl(fd, request, argp);
1268 }
1269 } else {
1270 return libc_ioctl(fd, request, argp);
1271 }
1272 }
1273
1274 static void
1275 init(void)
1276 {
1277 libc_close = dlsym(RTLD_NEXT, "close");
1278 libc_ioctl = dlsym(RTLD_NEXT, "ioctl");
1279 fail_if(libc_close == NULL || libc_ioctl == NULL,
1280 "intel_aubdump: failed to get libc ioctl or close\n");
1281 }
1282
1283 static int
1284 close_init_helper(int fd)
1285 {
1286 init();
1287 return libc_close(fd);
1288 }
1289
1290 static int
1291 ioctl_init_helper(int fd, unsigned long request, ...)
1292 {
1293 va_list args;
1294 void *argp;
1295
1296 va_start(args, request);
1297 argp = va_arg(args, void *);
1298 va_end(args);
1299
1300 init();
1301 return libc_ioctl(fd, request, argp);
1302 }
1303
1304 static void __attribute__ ((destructor))
1305 fini(void)
1306 {
1307 free(filename);
1308 for (int i = 0; i < ARRAY_SIZE(files); i++) {
1309 if (files[i] != NULL)
1310 fclose(files[i]);
1311 }
1312 free(bos);
1313 }