intel: tools: remove drm-uapi defines
[mesa.git] / src / intel / tools / intel_dump_gpu.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <stdlib.h>
25 #include <stdio.h>
26 #include <string.h>
27 #include <stdint.h>
28 #include <stdbool.h>
29 #include <signal.h>
30 #include <stdarg.h>
31 #include <fcntl.h>
32 #include <sys/types.h>
33 #include <sys/sysmacros.h>
34 #include <sys/stat.h>
35 #include <sys/ioctl.h>
36 #include <unistd.h>
37 #include <errno.h>
38 #include <sys/mman.h>
39 #include <dlfcn.h>
40 #include <i915_drm.h>
41
42 #include "intel_aub.h"
43
44 #include "dev/gen_device_info.h"
45 #include "util/macros.h"
46
47 #ifndef ALIGN
48 #define ALIGN(x, y) (((x) + (y)-1) & ~((y)-1))
49 #endif
50
51 #define MI_LOAD_REGISTER_IMM_n(n) ((0x22 << 23) | (2 * (n) - 1))
52 #define MI_LRI_FORCE_POSTED (1<<12)
53
54 #define MI_BATCH_NON_SECURE_I965 (1 << 8)
55
56 #define MI_BATCH_BUFFER_END (0xA << 23)
57
58 #define min(a, b) ({ \
59 __typeof(a) _a = (a); \
60 __typeof(b) _b = (b); \
61 _a < _b ? _a : _b; \
62 })
63
64 #define max(a, b) ({ \
65 __typeof(a) _a = (a); \
66 __typeof(b) _b = (b); \
67 _a > _b ? _a : _b; \
68 })
69
70 #define HWS_PGA_RCSUNIT 0x02080
71 #define HWS_PGA_VCSUNIT0 0x12080
72 #define HWS_PGA_BCSUNIT 0x22080
73
74 #define GFX_MODE_RCSUNIT 0x0229c
75 #define GFX_MODE_VCSUNIT0 0x1229c
76 #define GFX_MODE_BCSUNIT 0x2229c
77
78 #define EXECLIST_SUBMITPORT_RCSUNIT 0x02230
79 #define EXECLIST_SUBMITPORT_VCSUNIT0 0x12230
80 #define EXECLIST_SUBMITPORT_BCSUNIT 0x22230
81
82 #define EXECLIST_STATUS_RCSUNIT 0x02234
83 #define EXECLIST_STATUS_VCSUNIT0 0x12234
84 #define EXECLIST_STATUS_BCSUNIT 0x22234
85
86 #define EXECLIST_SQ_CONTENTS0_RCSUNIT 0x02510
87 #define EXECLIST_SQ_CONTENTS0_VCSUNIT0 0x12510
88 #define EXECLIST_SQ_CONTENTS0_BCSUNIT 0x22510
89
90 #define EXECLIST_CONTROL_RCSUNIT 0x02550
91 #define EXECLIST_CONTROL_VCSUNIT0 0x12550
92 #define EXECLIST_CONTROL_BCSUNIT 0x22550
93
94 #define MEMORY_MAP_SIZE (64 /* MiB */ * 1024 * 1024)
95
96 #define PTE_SIZE 4
97 #define GEN8_PTE_SIZE 8
98
99 #define NUM_PT_ENTRIES (ALIGN(MEMORY_MAP_SIZE, 4096) / 4096)
100 #define PT_SIZE ALIGN(NUM_PT_ENTRIES * GEN8_PTE_SIZE, 4096)
101
102 #define RING_SIZE (1 * 4096)
103 #define PPHWSP_SIZE (1 * 4096)
104 #define GEN11_LR_CONTEXT_RENDER_SIZE (14 * 4096)
105 #define GEN10_LR_CONTEXT_RENDER_SIZE (19 * 4096)
106 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * 4096)
107 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * 4096)
108 #define GEN8_LR_CONTEXT_OTHER_SIZE (2 * 4096)
109
110
111 #define STATIC_GGTT_MAP_START 0
112
113 #define RENDER_RING_ADDR STATIC_GGTT_MAP_START
114 #define RENDER_CONTEXT_ADDR (RENDER_RING_ADDR + RING_SIZE)
115
116 #define BLITTER_RING_ADDR (RENDER_CONTEXT_ADDR + PPHWSP_SIZE + GEN10_LR_CONTEXT_RENDER_SIZE)
117 #define BLITTER_CONTEXT_ADDR (BLITTER_RING_ADDR + RING_SIZE)
118
119 #define VIDEO_RING_ADDR (BLITTER_CONTEXT_ADDR + PPHWSP_SIZE + GEN8_LR_CONTEXT_OTHER_SIZE)
120 #define VIDEO_CONTEXT_ADDR (VIDEO_RING_ADDR + RING_SIZE)
121
122 #define STATIC_GGTT_MAP_END (VIDEO_CONTEXT_ADDR + PPHWSP_SIZE + GEN8_LR_CONTEXT_OTHER_SIZE)
123 #define STATIC_GGTT_MAP_SIZE (STATIC_GGTT_MAP_END - STATIC_GGTT_MAP_START)
124
125 #define PML4_PHYS_ADDR ((uint64_t)(STATIC_GGTT_MAP_END))
126
127 #define CONTEXT_FLAGS (0x339) /* Normal Priority | L3-LLC Coherency |
128 * PPGTT Enabled |
129 * Legacy Context with 64 bit VA support |
130 * Valid
131 */
132
133 #define RENDER_CONTEXT_DESCRIPTOR ((uint64_t)1 << 62 | RENDER_CONTEXT_ADDR | CONTEXT_FLAGS)
134 #define BLITTER_CONTEXT_DESCRIPTOR ((uint64_t)2 << 62 | BLITTER_CONTEXT_ADDR | CONTEXT_FLAGS)
135 #define VIDEO_CONTEXT_DESCRIPTOR ((uint64_t)3 << 62 | VIDEO_CONTEXT_ADDR | CONTEXT_FLAGS)
136
137 static const uint32_t render_context_init[GEN9_LR_CONTEXT_RENDER_SIZE / /* Choose the largest */
138 sizeof(uint32_t)] = {
139 0 /* MI_NOOP */,
140 MI_LOAD_REGISTER_IMM_n(14) | MI_LRI_FORCE_POSTED,
141 0x2244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
142 0x2034 /* RING_HEAD */, 0,
143 0x2030 /* RING_TAIL */, 0,
144 0x2038 /* RING_BUFFER_START */, RENDER_RING_ADDR,
145 0x203C /* RING_BUFFER_CONTROL */, (RING_SIZE - 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
146 0x2168 /* BB_HEAD_U */, 0,
147 0x2140 /* BB_HEAD_L */, 0,
148 0x2110 /* BB_STATE */, 0,
149 0x211C /* SECOND_BB_HEAD_U */, 0,
150 0x2114 /* SECOND_BB_HEAD_L */, 0,
151 0x2118 /* SECOND_BB_STATE */, 0,
152 0x21C0 /* BB_PER_CTX_PTR */, 0,
153 0x21C4 /* RCS_INDIRECT_CTX */, 0,
154 0x21C8 /* RCS_INDIRECT_CTX_OFFSET */, 0,
155 /* MI_NOOP */
156 0, 0,
157
158 0 /* MI_NOOP */,
159 MI_LOAD_REGISTER_IMM_n(9) | MI_LRI_FORCE_POSTED,
160 0x23A8 /* CTX_TIMESTAMP */, 0,
161 0x228C /* PDP3_UDW */, 0,
162 0x2288 /* PDP3_LDW */, 0,
163 0x2284 /* PDP2_UDW */, 0,
164 0x2280 /* PDP2_LDW */, 0,
165 0x227C /* PDP1_UDW */, 0,
166 0x2278 /* PDP1_LDW */, 0,
167 0x2274 /* PDP0_UDW */, PML4_PHYS_ADDR >> 32,
168 0x2270 /* PDP0_LDW */, PML4_PHYS_ADDR,
169 /* MI_NOOP */
170 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
171
172 0 /* MI_NOOP */,
173 MI_LOAD_REGISTER_IMM_n(1),
174 0x20C8 /* R_PWR_CLK_STATE */, 0x7FFFFFFF,
175 MI_BATCH_BUFFER_END
176 };
177
178 static const uint32_t blitter_context_init[GEN8_LR_CONTEXT_OTHER_SIZE /
179 sizeof(uint32_t)] = {
180 0 /* MI_NOOP */,
181 MI_LOAD_REGISTER_IMM_n(11) | MI_LRI_FORCE_POSTED,
182 0x22244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
183 0x22034 /* RING_HEAD */, 0,
184 0x22030 /* RING_TAIL */, 0,
185 0x22038 /* RING_BUFFER_START */, BLITTER_RING_ADDR,
186 0x2203C /* RING_BUFFER_CONTROL */, (RING_SIZE - 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
187 0x22168 /* BB_HEAD_U */, 0,
188 0x22140 /* BB_HEAD_L */, 0,
189 0x22110 /* BB_STATE */, 0,
190 0x2211C /* SECOND_BB_HEAD_U */, 0,
191 0x22114 /* SECOND_BB_HEAD_L */, 0,
192 0x22118 /* SECOND_BB_STATE */, 0,
193 /* MI_NOOP */
194 0, 0, 0, 0, 0, 0, 0, 0,
195
196 0 /* MI_NOOP */,
197 MI_LOAD_REGISTER_IMM_n(9) | MI_LRI_FORCE_POSTED,
198 0x223A8 /* CTX_TIMESTAMP */, 0,
199 0x2228C /* PDP3_UDW */, 0,
200 0x22288 /* PDP3_LDW */, 0,
201 0x22284 /* PDP2_UDW */, 0,
202 0x22280 /* PDP2_LDW */, 0,
203 0x2227C /* PDP1_UDW */, 0,
204 0x22278 /* PDP1_LDW */, 0,
205 0x22274 /* PDP0_UDW */, PML4_PHYS_ADDR >> 32,
206 0x22270 /* PDP0_LDW */, PML4_PHYS_ADDR,
207 /* MI_NOOP */
208 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
209
210 MI_BATCH_BUFFER_END
211 };
212
213 static const uint32_t video_context_init[GEN8_LR_CONTEXT_OTHER_SIZE /
214 sizeof(uint32_t)] = {
215 0 /* MI_NOOP */,
216 MI_LOAD_REGISTER_IMM_n(11) | MI_LRI_FORCE_POSTED,
217 0x1C244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
218 0x1C034 /* RING_HEAD */, 0,
219 0x1C030 /* RING_TAIL */, 0,
220 0x1C038 /* RING_BUFFER_START */, VIDEO_RING_ADDR,
221 0x1C03C /* RING_BUFFER_CONTROL */, (RING_SIZE - 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
222 0x1C168 /* BB_HEAD_U */, 0,
223 0x1C140 /* BB_HEAD_L */, 0,
224 0x1C110 /* BB_STATE */, 0,
225 0x1C11C /* SECOND_BB_HEAD_U */, 0,
226 0x1C114 /* SECOND_BB_HEAD_L */, 0,
227 0x1C118 /* SECOND_BB_STATE */, 0,
228 /* MI_NOOP */
229 0, 0, 0, 0, 0, 0, 0, 0,
230
231 0 /* MI_NOOP */,
232 MI_LOAD_REGISTER_IMM_n(9) | MI_LRI_FORCE_POSTED,
233 0x1C3A8 /* CTX_TIMESTAMP */, 0,
234 0x1C28C /* PDP3_UDW */, 0,
235 0x1C288 /* PDP3_LDW */, 0,
236 0x1C284 /* PDP2_UDW */, 0,
237 0x1C280 /* PDP2_LDW */, 0,
238 0x1C27C /* PDP1_UDW */, 0,
239 0x1C278 /* PDP1_LDW */, 0,
240 0x1C274 /* PDP0_UDW */, PML4_PHYS_ADDR >> 32,
241 0x1C270 /* PDP0_LDW */, PML4_PHYS_ADDR,
242 /* MI_NOOP */
243 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
244
245 MI_BATCH_BUFFER_END
246 };
247
248 static int close_init_helper(int fd);
249 static int ioctl_init_helper(int fd, unsigned long request, ...);
250
251 static int (*libc_close)(int fd) = close_init_helper;
252 static int (*libc_ioctl)(int fd, unsigned long request, ...) = ioctl_init_helper;
253
254 static int drm_fd = -1;
255 static char *filename = NULL;
256 static FILE *files[2] = { NULL, NULL };
257 static struct gen_device_info devinfo = {0};
258 static int verbose = 0;
259 static bool device_override;
260 static uint32_t device;
261 static int addr_bits = 0;
262
263 #define MAX_BO_COUNT 64 * 1024
264
265 struct bo {
266 uint32_t size;
267 uint64_t offset;
268 void *map;
269 };
270
271 static struct bo *bos;
272
273 #define DRM_MAJOR 226
274
275 /* We set bit 0 in the map pointer for userptr BOs so we know not to
276 * munmap them on DRM_IOCTL_GEM_CLOSE.
277 */
278 #define USERPTR_FLAG 1
279 #define IS_USERPTR(p) ((uintptr_t) (p) & USERPTR_FLAG)
280 #define GET_PTR(p) ( (void *) ((uintptr_t) p & ~(uintptr_t) 1) )
281
282 static inline bool use_execlists(void)
283 {
284 return devinfo.gen >= 8;
285 }
286
287 static void __attribute__ ((format(__printf__, 2, 3)))
288 fail_if(int cond, const char *format, ...)
289 {
290 va_list args;
291
292 if (!cond)
293 return;
294
295 va_start(args, format);
296 vfprintf(stderr, format, args);
297 va_end(args);
298
299 raise(SIGTRAP);
300 }
301
302 static struct bo *
303 get_bo(uint32_t handle)
304 {
305 struct bo *bo;
306
307 fail_if(handle >= MAX_BO_COUNT, "bo handle too large\n");
308 bo = &bos[handle];
309
310 return bo;
311 }
312
313 static inline uint32_t
314 align_u32(uint32_t v, uint32_t a)
315 {
316 return (v + a - 1) & ~(a - 1);
317 }
318
319 static void
320 dword_out(uint32_t data)
321 {
322 for (int i = 0; i < ARRAY_SIZE (files); i++) {
323 if (files[i] == NULL)
324 continue;
325
326 fail_if(fwrite(&data, 1, 4, files[i]) == 0,
327 "Writing to output failed\n");
328 }
329 }
330
331 static void
332 data_out(const void *data, size_t size)
333 {
334 if (size == 0)
335 return;
336
337 for (int i = 0; i < ARRAY_SIZE (files); i++) {
338 if (files[i] == NULL)
339 continue;
340
341 fail_if(fwrite(data, 1, size, files[i]) == 0,
342 "Writing to output failed\n");
343 }
344 }
345
346 static uint32_t
347 gtt_size(void)
348 {
349 return NUM_PT_ENTRIES * (addr_bits > 32 ? GEN8_PTE_SIZE : PTE_SIZE);
350 }
351
352 static void
353 mem_trace_memory_write_header_out(uint64_t addr, uint32_t len,
354 uint32_t addr_space)
355 {
356 uint32_t dwords = ALIGN(len, sizeof(uint32_t)) / sizeof(uint32_t);
357
358 dword_out(CMD_MEM_TRACE_MEMORY_WRITE | (5 + dwords - 1));
359 dword_out(addr & 0xFFFFFFFF); /* addr lo */
360 dword_out(addr >> 32); /* addr hi */
361 dword_out(addr_space); /* gtt */
362 dword_out(len);
363 }
364
365 static void
366 register_write_out(uint32_t addr, uint32_t value)
367 {
368 uint32_t dwords = 1;
369
370 dword_out(CMD_MEM_TRACE_REGISTER_WRITE | (5 + dwords - 1));
371 dword_out(addr);
372 dword_out(AUB_MEM_TRACE_REGISTER_SIZE_DWORD |
373 AUB_MEM_TRACE_REGISTER_SPACE_MMIO);
374 dword_out(0xFFFFFFFF); /* mask lo */
375 dword_out(0x00000000); /* mask hi */
376 dword_out(value);
377 }
378
379 static struct ppgtt_table {
380 uint64_t phys_addr;
381 struct ppgtt_table *subtables[512];
382 } pml4 = {PML4_PHYS_ADDR};
383
384 static void
385 populate_ppgtt_table(struct ppgtt_table *table, int start, int end,
386 int level)
387 {
388 static uint64_t phys_addrs_allocator = (PML4_PHYS_ADDR >> 12) + 1;
389 uint64_t entries[512] = {0};
390 int dirty_start = 512, dirty_end = 0;
391
392 for (int i = start; i <= end; i++) {
393 if (!table->subtables[i]) {
394 dirty_start = min(dirty_start, i);
395 dirty_end = max(dirty_end, i);
396 if (level == 1) {
397 table->subtables[i] =
398 (void *)(phys_addrs_allocator++ << 12);
399 } else {
400 table->subtables[i] =
401 calloc(1, sizeof(struct ppgtt_table));
402 table->subtables[i]->phys_addr =
403 phys_addrs_allocator++ << 12;
404 }
405 }
406 entries[i] = 3 /* read/write | present */ |
407 (level == 1 ? (uint64_t)table->subtables[i] :
408 table->subtables[i]->phys_addr);
409 }
410
411 if (dirty_start <= dirty_end) {
412 uint64_t write_addr = table->phys_addr + dirty_start *
413 sizeof(uint64_t);
414 uint64_t write_size = (dirty_end - dirty_start + 1) *
415 sizeof(uint64_t);
416 mem_trace_memory_write_header_out(write_addr, write_size,
417 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_PHYSICAL);
418 data_out(entries + dirty_start, write_size);
419 }
420 }
421
422 static void
423 map_ppgtt(uint64_t start, uint64_t size)
424 {
425 uint64_t l4_start = start & 0xff8000000000;
426 uint64_t l3_start = start & 0xffffc0000000;
427 uint64_t l2_start = start & 0xffffffe00000;
428 uint64_t l1_start = start & 0xfffffffff000;
429 uint64_t l4_end = ((start + size - 1) | 0x007fffffffff) & 0xffffffffffff;
430 uint64_t l3_end = ((start + size - 1) | 0x00003fffffff) & 0xffffffffffff;
431 uint64_t l2_end = ((start + size - 1) | 0x0000001fffff) & 0xffffffffffff;
432 uint64_t l1_end = ((start + size - 1) | 0x000000000fff) & 0xffffffffffff;
433
434 #define L4_index(addr) (((addr) >> 39) & 0x1ff)
435 #define L3_index(addr) (((addr) >> 30) & 0x1ff)
436 #define L2_index(addr) (((addr) >> 21) & 0x1ff)
437 #define L1_index(addr) (((addr) >> 12) & 0x1ff)
438
439 #define L3_table(addr) (pml4.subtables[L4_index(addr)])
440 #define L2_table(addr) (L3_table(addr)->subtables[L3_index(addr)])
441 #define L1_table(addr) (L2_table(addr)->subtables[L2_index(addr)])
442
443 populate_ppgtt_table(&pml4, L4_index(l4_start), L4_index(l4_end), 4);
444
445 for (uint64_t a = l4_start; a < l4_end; a += (1ULL << 39)) {
446 uint64_t _start = max(a, l3_start);
447 uint64_t _end = min(a + (1ULL << 39), l3_end);
448
449 populate_ppgtt_table(L3_table(a), L3_index(_start),
450 L3_index(_end), 3);
451 }
452
453 for (uint64_t a = l3_start; a < l3_end; a += (1ULL << 30)) {
454 uint64_t _start = max(a, l2_start);
455 uint64_t _end = min(a + (1ULL << 30), l2_end);
456
457 populate_ppgtt_table(L2_table(a), L2_index(_start),
458 L2_index(_end), 2);
459 }
460
461 for (uint64_t a = l2_start; a < l2_end; a += (1ULL << 21)) {
462 uint64_t _start = max(a, l1_start);
463 uint64_t _end = min(a + (1ULL << 21), l1_end);
464
465 populate_ppgtt_table(L1_table(a), L1_index(_start),
466 L1_index(_end), 1);
467 }
468 }
469
470 static uint64_t
471 ppgtt_lookup(uint64_t ppgtt_addr)
472 {
473 return (uint64_t)L1_table(ppgtt_addr)->subtables[L1_index(ppgtt_addr)];
474 }
475
476 static void
477 write_execlists_header(void)
478 {
479 char app_name[8 * 4];
480 int app_name_len, dwords;
481
482 app_name_len =
483 snprintf(app_name, sizeof(app_name), "PCI-ID=0x%X %s", device,
484 program_invocation_short_name);
485 app_name_len = ALIGN(app_name_len, sizeof(uint32_t));
486
487 dwords = 5 + app_name_len / sizeof(uint32_t);
488 dword_out(CMD_MEM_TRACE_VERSION | (dwords - 1));
489 dword_out(AUB_MEM_TRACE_VERSION_FILE_VERSION);
490 dword_out(devinfo.simulator_id << AUB_MEM_TRACE_VERSION_DEVICE_SHIFT);
491 dword_out(0); /* version */
492 dword_out(0); /* version */
493 data_out(app_name, app_name_len);
494
495 /* GGTT PT */
496 uint32_t ggtt_ptes = STATIC_GGTT_MAP_SIZE >> 12;
497
498 mem_trace_memory_write_header_out(STATIC_GGTT_MAP_START >> 12,
499 ggtt_ptes * GEN8_PTE_SIZE,
500 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT_ENTRY);
501 for (uint32_t i = 0; i < ggtt_ptes; i++) {
502 dword_out(1 + 0x1000 * i + STATIC_GGTT_MAP_START);
503 dword_out(0);
504 }
505
506 /* RENDER_RING */
507 mem_trace_memory_write_header_out(RENDER_RING_ADDR, RING_SIZE,
508 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
509 for (uint32_t i = 0; i < RING_SIZE; i += sizeof(uint32_t))
510 dword_out(0);
511
512 /* RENDER_PPHWSP */
513 mem_trace_memory_write_header_out(RENDER_CONTEXT_ADDR,
514 PPHWSP_SIZE +
515 sizeof(render_context_init),
516 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
517 for (uint32_t i = 0; i < PPHWSP_SIZE; i += sizeof(uint32_t))
518 dword_out(0);
519
520 /* RENDER_CONTEXT */
521 data_out(render_context_init, sizeof(render_context_init));
522
523 /* BLITTER_RING */
524 mem_trace_memory_write_header_out(BLITTER_RING_ADDR, RING_SIZE,
525 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
526 for (uint32_t i = 0; i < RING_SIZE; i += sizeof(uint32_t))
527 dword_out(0);
528
529 /* BLITTER_PPHWSP */
530 mem_trace_memory_write_header_out(BLITTER_CONTEXT_ADDR,
531 PPHWSP_SIZE +
532 sizeof(blitter_context_init),
533 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
534 for (uint32_t i = 0; i < PPHWSP_SIZE; i += sizeof(uint32_t))
535 dword_out(0);
536
537 /* BLITTER_CONTEXT */
538 data_out(blitter_context_init, sizeof(blitter_context_init));
539
540 /* VIDEO_RING */
541 mem_trace_memory_write_header_out(VIDEO_RING_ADDR, RING_SIZE,
542 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
543 for (uint32_t i = 0; i < RING_SIZE; i += sizeof(uint32_t))
544 dword_out(0);
545
546 /* VIDEO_PPHWSP */
547 mem_trace_memory_write_header_out(VIDEO_CONTEXT_ADDR,
548 PPHWSP_SIZE +
549 sizeof(video_context_init),
550 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
551 for (uint32_t i = 0; i < PPHWSP_SIZE; i += sizeof(uint32_t))
552 dword_out(0);
553
554 /* VIDEO_CONTEXT */
555 data_out(video_context_init, sizeof(video_context_init));
556
557 register_write_out(HWS_PGA_RCSUNIT, RENDER_CONTEXT_ADDR);
558 register_write_out(HWS_PGA_VCSUNIT0, VIDEO_CONTEXT_ADDR);
559 register_write_out(HWS_PGA_BCSUNIT, BLITTER_CONTEXT_ADDR);
560
561 register_write_out(GFX_MODE_RCSUNIT, 0x80008000 /* execlist enable */);
562 register_write_out(GFX_MODE_VCSUNIT0, 0x80008000 /* execlist enable */);
563 register_write_out(GFX_MODE_BCSUNIT, 0x80008000 /* execlist enable */);
564 }
565
566 static void write_legacy_header(void)
567 {
568 char app_name[8 * 4];
569 char comment[16];
570 int comment_len, comment_dwords, dwords;
571 uint32_t entry = 0x200003;
572
573 comment_len = snprintf(comment, sizeof(comment), "PCI-ID=0x%x", device);
574 comment_dwords = ((comment_len + 3) / 4);
575
576 /* Start with a (required) version packet. */
577 dwords = 13 + comment_dwords;
578 dword_out(CMD_AUB_HEADER | (dwords - 2));
579 dword_out((4 << AUB_HEADER_MAJOR_SHIFT) |
580 (0 << AUB_HEADER_MINOR_SHIFT));
581
582 /* Next comes a 32-byte application name. */
583 strncpy(app_name, program_invocation_short_name, sizeof(app_name));
584 app_name[sizeof(app_name) - 1] = 0;
585 data_out(app_name, sizeof(app_name));
586
587 dword_out(0); /* timestamp */
588 dword_out(0); /* timestamp */
589 dword_out(comment_len);
590 data_out(comment, comment_dwords * 4);
591
592 /* Set up the GTT. The max we can handle is 64M */
593 dword_out(CMD_AUB_TRACE_HEADER_BLOCK | ((addr_bits > 32 ? 6 : 5) - 2));
594 dword_out(AUB_TRACE_MEMTYPE_GTT_ENTRY |
595 AUB_TRACE_TYPE_NOTYPE | AUB_TRACE_OP_DATA_WRITE);
596 dword_out(0); /* subtype */
597 dword_out(0); /* offset */
598 dword_out(gtt_size()); /* size */
599 if (addr_bits > 32)
600 dword_out(0);
601 for (uint32_t i = 0; i < NUM_PT_ENTRIES; i++) {
602 dword_out(entry + 0x1000 * i);
603 if (addr_bits > 32)
604 dword_out(0);
605 }
606 }
607
608 /**
609 * Break up large objects into multiple writes. Otherwise a 128kb VBO
610 * would overflow the 16 bits of size field in the packet header and
611 * everything goes badly after that.
612 */
613 static void
614 aub_write_trace_block(uint32_t type, void *virtual, uint32_t size, uint64_t gtt_offset)
615 {
616 uint32_t block_size;
617 uint32_t subtype = 0;
618 static const char null_block[8 * 4096];
619
620 for (uint32_t offset = 0; offset < size; offset += block_size) {
621 block_size = min(8 * 4096, size - offset);
622
623 if (use_execlists()) {
624 block_size = min(4096, block_size);
625 mem_trace_memory_write_header_out(ppgtt_lookup(gtt_offset + offset),
626 block_size,
627 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_PHYSICAL);
628 } else {
629 dword_out(CMD_AUB_TRACE_HEADER_BLOCK |
630 ((addr_bits > 32 ? 6 : 5) - 2));
631 dword_out(AUB_TRACE_MEMTYPE_GTT |
632 type | AUB_TRACE_OP_DATA_WRITE);
633 dword_out(subtype);
634 dword_out(gtt_offset + offset);
635 dword_out(align_u32(block_size, 4));
636 if (addr_bits > 32)
637 dword_out((gtt_offset + offset) >> 32);
638 }
639
640 if (virtual)
641 data_out(((char *) GET_PTR(virtual)) + offset, block_size);
642 else
643 data_out(null_block, block_size);
644
645 /* Pad to a multiple of 4 bytes. */
646 data_out(null_block, -block_size & 3);
647 }
648 }
649
650 static void
651 write_reloc(void *p, uint64_t v)
652 {
653 if (addr_bits > 32) {
654 /* From the Broadwell PRM Vol. 2a,
655 * MI_LOAD_REGISTER_MEM::MemoryAddress:
656 *
657 * "This field specifies the address of the memory
658 * location where the register value specified in the
659 * DWord above will read from. The address specifies
660 * the DWord location of the data. Range =
661 * GraphicsVirtualAddress[63:2] for a DWord register
662 * GraphicsAddress [63:48] are ignored by the HW and
663 * assumed to be in correct canonical form [63:48] ==
664 * [47]."
665 *
666 * In practice, this will always mean the top bits are zero
667 * because of the GTT size limitation of the aubdump tool.
668 */
669 const int shift = 63 - 47;
670 *(uint64_t *)p = (((int64_t)v) << shift) >> shift;
671 } else {
672 *(uint32_t *)p = v;
673 }
674 }
675
676 static void
677 aub_dump_execlist(uint64_t batch_offset, int ring_flag)
678 {
679 uint32_t ring_addr;
680 uint64_t descriptor;
681 uint32_t elsp_reg;
682 uint32_t elsq_reg;
683 uint32_t status_reg;
684 uint32_t control_reg;
685
686 switch (ring_flag) {
687 case I915_EXEC_DEFAULT:
688 case I915_EXEC_RENDER:
689 ring_addr = RENDER_RING_ADDR;
690 descriptor = RENDER_CONTEXT_DESCRIPTOR;
691 elsp_reg = EXECLIST_SUBMITPORT_RCSUNIT;
692 elsq_reg = EXECLIST_SQ_CONTENTS0_RCSUNIT;
693 status_reg = EXECLIST_STATUS_RCSUNIT;
694 control_reg = EXECLIST_CONTROL_RCSUNIT;
695 break;
696 case I915_EXEC_BSD:
697 ring_addr = VIDEO_RING_ADDR;
698 descriptor = VIDEO_CONTEXT_DESCRIPTOR;
699 elsp_reg = EXECLIST_SUBMITPORT_VCSUNIT0;
700 elsq_reg = EXECLIST_SQ_CONTENTS0_VCSUNIT0;
701 status_reg = EXECLIST_STATUS_VCSUNIT0;
702 control_reg = EXECLIST_CONTROL_VCSUNIT0;
703 break;
704 case I915_EXEC_BLT:
705 ring_addr = BLITTER_RING_ADDR;
706 descriptor = BLITTER_CONTEXT_DESCRIPTOR;
707 elsp_reg = EXECLIST_SUBMITPORT_BCSUNIT;
708 elsq_reg = EXECLIST_SQ_CONTENTS0_BCSUNIT;
709 status_reg = EXECLIST_STATUS_BCSUNIT;
710 control_reg = EXECLIST_CONTROL_BCSUNIT;
711 break;
712 }
713
714 mem_trace_memory_write_header_out(ring_addr, 16,
715 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
716 dword_out(AUB_MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965 | (3 - 2));
717 dword_out(batch_offset & 0xFFFFFFFF);
718 dword_out(batch_offset >> 32);
719 dword_out(0 /* MI_NOOP */);
720
721 mem_trace_memory_write_header_out(ring_addr + 8192 + 20, 4,
722 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
723 dword_out(0); /* RING_BUFFER_HEAD */
724 mem_trace_memory_write_header_out(ring_addr + 8192 + 28, 4,
725 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
726 dword_out(16); /* RING_BUFFER_TAIL */
727
728 if (devinfo.gen >= 11) {
729 register_write_out(elsq_reg, descriptor & 0xFFFFFFFF);
730 register_write_out(elsq_reg + sizeof(uint32_t), descriptor >> 32);
731 register_write_out(control_reg, 1);
732 } else {
733 register_write_out(elsp_reg, 0);
734 register_write_out(elsp_reg, 0);
735 register_write_out(elsp_reg, descriptor >> 32);
736 register_write_out(elsp_reg, descriptor & 0xFFFFFFFF);
737 }
738
739 dword_out(CMD_MEM_TRACE_REGISTER_POLL | (5 + 1 - 1));
740 dword_out(status_reg);
741 dword_out(AUB_MEM_TRACE_REGISTER_SIZE_DWORD |
742 AUB_MEM_TRACE_REGISTER_SPACE_MMIO);
743 if (devinfo.gen >= 11) {
744 dword_out(0x00000001); /* mask lo */
745 dword_out(0x00000000); /* mask hi */
746 dword_out(0x00000001);
747 } else {
748 dword_out(0x00000010); /* mask lo */
749 dword_out(0x00000000); /* mask hi */
750 dword_out(0x00000000);
751 }
752 }
753
754 static void
755 aub_dump_ringbuffer(uint64_t batch_offset, uint64_t offset, int ring_flag)
756 {
757 uint32_t ringbuffer[4096];
758 unsigned aub_mi_bbs_len;
759 int ring = AUB_TRACE_TYPE_RING_PRB0; /* The default ring */
760 int ring_count = 0;
761
762 if (ring_flag == I915_EXEC_BSD)
763 ring = AUB_TRACE_TYPE_RING_PRB1;
764 else if (ring_flag == I915_EXEC_BLT)
765 ring = AUB_TRACE_TYPE_RING_PRB2;
766
767 /* Make a ring buffer to execute our batchbuffer. */
768 memset(ringbuffer, 0, sizeof(ringbuffer));
769
770 aub_mi_bbs_len = addr_bits > 32 ? 3 : 2;
771 ringbuffer[ring_count] = AUB_MI_BATCH_BUFFER_START | (aub_mi_bbs_len - 2);
772 write_reloc(&ringbuffer[ring_count + 1], batch_offset);
773 ring_count += aub_mi_bbs_len;
774
775 /* Write out the ring. This appears to trigger execution of
776 * the ring in the simulator.
777 */
778 dword_out(CMD_AUB_TRACE_HEADER_BLOCK |
779 ((addr_bits > 32 ? 6 : 5) - 2));
780 dword_out(AUB_TRACE_MEMTYPE_GTT | ring | AUB_TRACE_OP_COMMAND_WRITE);
781 dword_out(0); /* general/surface subtype */
782 dword_out(offset);
783 dword_out(ring_count * 4);
784 if (addr_bits > 32)
785 dword_out(offset >> 32);
786
787 data_out(ringbuffer, ring_count * 4);
788 }
789
790 static void *
791 relocate_bo(struct bo *bo, const struct drm_i915_gem_execbuffer2 *execbuffer2,
792 const struct drm_i915_gem_exec_object2 *obj)
793 {
794 const struct drm_i915_gem_exec_object2 *exec_objects =
795 (struct drm_i915_gem_exec_object2 *) (uintptr_t) execbuffer2->buffers_ptr;
796 const struct drm_i915_gem_relocation_entry *relocs =
797 (const struct drm_i915_gem_relocation_entry *) (uintptr_t) obj->relocs_ptr;
798 void *relocated;
799 int handle;
800
801 relocated = malloc(bo->size);
802 fail_if(relocated == NULL, "intel_aubdump: out of memory\n");
803 memcpy(relocated, GET_PTR(bo->map), bo->size);
804 for (size_t i = 0; i < obj->relocation_count; i++) {
805 fail_if(relocs[i].offset >= bo->size, "intel_aubdump: reloc outside bo\n");
806
807 if (execbuffer2->flags & I915_EXEC_HANDLE_LUT)
808 handle = exec_objects[relocs[i].target_handle].handle;
809 else
810 handle = relocs[i].target_handle;
811
812 write_reloc(((char *)relocated) + relocs[i].offset,
813 get_bo(handle)->offset + relocs[i].delta);
814 }
815
816 return relocated;
817 }
818
819 static int
820 gem_ioctl(int fd, unsigned long request, void *argp)
821 {
822 int ret;
823
824 do {
825 ret = libc_ioctl(fd, request, argp);
826 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
827
828 return ret;
829 }
830
831 static void *
832 gem_mmap(int fd, uint32_t handle, uint64_t offset, uint64_t size)
833 {
834 struct drm_i915_gem_mmap mmap = {
835 .handle = handle,
836 .offset = offset,
837 .size = size
838 };
839
840 if (gem_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap) == -1)
841 return MAP_FAILED;
842
843 return (void *)(uintptr_t) mmap.addr_ptr;
844 }
845
846 static int
847 gem_get_param(int fd, uint32_t param)
848 {
849 int value;
850 drm_i915_getparam_t gp = {
851 .param = param,
852 .value = &value
853 };
854
855 if (gem_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp) == -1)
856 return 0;
857
858 return value;
859 }
860
861 static void
862 dump_execbuffer2(int fd, struct drm_i915_gem_execbuffer2 *execbuffer2)
863 {
864 struct drm_i915_gem_exec_object2 *exec_objects =
865 (struct drm_i915_gem_exec_object2 *) (uintptr_t) execbuffer2->buffers_ptr;
866 uint32_t ring_flag = execbuffer2->flags & I915_EXEC_RING_MASK;
867 uint32_t offset;
868 struct drm_i915_gem_exec_object2 *obj;
869 struct bo *bo, *batch_bo;
870 int batch_index;
871 void *data;
872
873 /* We can't do this at open time as we're not yet authenticated. */
874 if (device == 0) {
875 device = gem_get_param(fd, I915_PARAM_CHIPSET_ID);
876 fail_if(device == 0 || devinfo.gen == 0, "failed to identify chipset\n");
877 }
878 if (devinfo.gen == 0) {
879 fail_if(!gen_get_device_info(device, &devinfo),
880 "failed to identify chipset=0x%x\n", device);
881
882 addr_bits = devinfo.gen >= 8 ? 48 : 32;
883
884 if (use_execlists())
885 write_execlists_header();
886 else
887 write_legacy_header();
888
889 if (verbose)
890 printf("[intel_aubdump: running, "
891 "output file %s, chipset id 0x%04x, gen %d]\n",
892 filename, device, devinfo.gen);
893 }
894
895 if (use_execlists())
896 offset = 0x1000;
897 else
898 offset = gtt_size();
899
900 if (verbose)
901 printf("Dumping execbuffer2:\n");
902
903 for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
904 obj = &exec_objects[i];
905 bo = get_bo(obj->handle);
906
907 /* If bo->size == 0, this means they passed us an invalid
908 * buffer. The kernel will reject it and so should we.
909 */
910 if (bo->size == 0) {
911 if (verbose)
912 printf("BO #%d is invalid!\n", obj->handle);
913 return;
914 }
915
916 if (obj->flags & EXEC_OBJECT_PINNED) {
917 bo->offset = obj->offset;
918 if (verbose)
919 printf("BO #%d (%dB) pinned @ 0x%lx\n",
920 obj->handle, bo->size, bo->offset);
921 } else {
922 if (obj->alignment != 0)
923 offset = align_u32(offset, obj->alignment);
924 bo->offset = offset;
925 if (verbose)
926 printf("BO #%d (%dB) @ 0x%lx\n", obj->handle,
927 bo->size, bo->offset);
928 offset = align_u32(offset + bo->size + 4095, 4096);
929 }
930
931 if (bo->map == NULL && bo->size > 0)
932 bo->map = gem_mmap(fd, obj->handle, 0, bo->size);
933 fail_if(bo->map == MAP_FAILED, "intel_aubdump: bo mmap failed\n");
934
935 if (use_execlists())
936 map_ppgtt(bo->offset, bo->size);
937 }
938
939 batch_index = (execbuffer2->flags & I915_EXEC_BATCH_FIRST) ? 0 :
940 execbuffer2->buffer_count - 1;
941 batch_bo = get_bo(exec_objects[batch_index].handle);
942 for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
943 obj = &exec_objects[i];
944 bo = get_bo(obj->handle);
945
946 if (obj->relocation_count > 0)
947 data = relocate_bo(bo, execbuffer2, obj);
948 else
949 data = bo->map;
950
951 if (bo == batch_bo) {
952 aub_write_trace_block(AUB_TRACE_TYPE_BATCH,
953 data, bo->size, bo->offset);
954 } else {
955 aub_write_trace_block(AUB_TRACE_TYPE_NOTYPE,
956 data, bo->size, bo->offset);
957 }
958 if (data != bo->map)
959 free(data);
960 }
961
962 if (use_execlists()) {
963 aub_dump_execlist(batch_bo->offset +
964 execbuffer2->batch_start_offset, ring_flag);
965 } else {
966 /* Dump ring buffer */
967 aub_dump_ringbuffer(batch_bo->offset +
968 execbuffer2->batch_start_offset, offset,
969 ring_flag);
970 }
971
972 for (int i = 0; i < ARRAY_SIZE(files); i++) {
973 if (files[i] != NULL)
974 fflush(files[i]);
975 }
976
977 if (device_override &&
978 (execbuffer2->flags & I915_EXEC_FENCE_ARRAY) != 0) {
979 struct drm_i915_gem_exec_fence *fences =
980 (void*)(uintptr_t)execbuffer2->cliprects_ptr;
981 for (uint32_t i = 0; i < execbuffer2->num_cliprects; i++) {
982 if ((fences[i].flags & I915_EXEC_FENCE_SIGNAL) != 0) {
983 struct drm_syncobj_array arg = {
984 .handles = (uintptr_t)&fences[i].handle,
985 .count_handles = 1,
986 .pad = 0,
987 };
988 libc_ioctl(fd, DRM_IOCTL_SYNCOBJ_SIGNAL, &arg);
989 }
990 }
991 }
992 }
993
994 static void
995 add_new_bo(int handle, uint64_t size, void *map)
996 {
997 struct bo *bo = &bos[handle];
998
999 fail_if(handle >= MAX_BO_COUNT, "intel_aubdump: bo handle out of range\n");
1000 fail_if(size == 0, "intel_aubdump: bo size is invalid\n");
1001
1002 bo->size = size;
1003 bo->map = map;
1004 }
1005
1006 static void
1007 remove_bo(int handle)
1008 {
1009 struct bo *bo = get_bo(handle);
1010
1011 if (bo->map && !IS_USERPTR(bo->map))
1012 munmap(bo->map, bo->size);
1013 bo->size = 0;
1014 bo->map = NULL;
1015 }
1016
1017 __attribute__ ((visibility ("default"))) int
1018 close(int fd)
1019 {
1020 if (fd == drm_fd)
1021 drm_fd = -1;
1022
1023 return libc_close(fd);
1024 }
1025
1026 static FILE *
1027 launch_command(char *command)
1028 {
1029 int i = 0, fds[2];
1030 char **args = calloc(strlen(command), sizeof(char *));
1031 char *iter = command;
1032
1033 args[i++] = iter = command;
1034
1035 while ((iter = strstr(iter, ",")) != NULL) {
1036 *iter = '\0';
1037 iter += 1;
1038 args[i++] = iter;
1039 }
1040
1041 if (pipe(fds) == -1)
1042 return NULL;
1043
1044 switch (fork()) {
1045 case 0:
1046 dup2(fds[0], 0);
1047 fail_if(execvp(args[0], args) == -1,
1048 "intel_aubdump: failed to launch child command\n");
1049 return NULL;
1050
1051 default:
1052 free(args);
1053 return fdopen(fds[1], "w");
1054
1055 case -1:
1056 return NULL;
1057 }
1058 }
1059
1060 static void
1061 maybe_init(void)
1062 {
1063 static bool initialized = false;
1064 FILE *config;
1065 char *key, *value;
1066
1067 if (initialized)
1068 return;
1069
1070 initialized = true;
1071
1072 config = fdopen(3, "r");
1073 while (fscanf(config, "%m[^=]=%m[^\n]\n", &key, &value) != EOF) {
1074 if (!strcmp(key, "verbose")) {
1075 verbose = 1;
1076 } else if (!strcmp(key, "device")) {
1077 fail_if(sscanf(value, "%i", &device) != 1,
1078 "intel_aubdump: failed to parse device id '%s'",
1079 value);
1080 device_override = true;
1081 } else if (!strcmp(key, "file")) {
1082 filename = strdup(value);
1083 files[0] = fopen(filename, "w+");
1084 fail_if(files[0] == NULL,
1085 "intel_aubdump: failed to open file '%s'\n",
1086 filename);
1087 } else if (!strcmp(key, "command")) {
1088 files[1] = launch_command(value);
1089 fail_if(files[1] == NULL,
1090 "intel_aubdump: failed to launch command '%s'\n",
1091 value);
1092 } else {
1093 fprintf(stderr, "intel_aubdump: unknown option '%s'\n", key);
1094 }
1095
1096 free(key);
1097 free(value);
1098 }
1099 fclose(config);
1100
1101 bos = calloc(MAX_BO_COUNT, sizeof(bos[0]));
1102 fail_if(bos == NULL, "intel_aubdump: out of memory\n");
1103 }
1104
1105 __attribute__ ((visibility ("default"))) int
1106 ioctl(int fd, unsigned long request, ...)
1107 {
1108 va_list args;
1109 void *argp;
1110 int ret;
1111 struct stat buf;
1112
1113 va_start(args, request);
1114 argp = va_arg(args, void *);
1115 va_end(args);
1116
1117 if (_IOC_TYPE(request) == DRM_IOCTL_BASE &&
1118 drm_fd != fd && fstat(fd, &buf) == 0 &&
1119 (buf.st_mode & S_IFMT) == S_IFCHR && major(buf.st_rdev) == DRM_MAJOR) {
1120 drm_fd = fd;
1121 if (verbose)
1122 printf("[intel_aubdump: intercept drm ioctl on fd %d]\n", fd);
1123 }
1124
1125 if (fd == drm_fd) {
1126 maybe_init();
1127
1128 switch (request) {
1129 case DRM_IOCTL_I915_GETPARAM: {
1130 struct drm_i915_getparam *getparam = argp;
1131
1132 if (device_override && getparam->param == I915_PARAM_CHIPSET_ID) {
1133 *getparam->value = device;
1134 return 0;
1135 }
1136
1137 ret = libc_ioctl(fd, request, argp);
1138
1139 /* If the application looks up chipset_id
1140 * (they typically do), we'll piggy-back on
1141 * their ioctl and store the id for later
1142 * use. */
1143 if (getparam->param == I915_PARAM_CHIPSET_ID)
1144 device = *getparam->value;
1145
1146 return ret;
1147 }
1148
1149 case DRM_IOCTL_I915_GEM_EXECBUFFER: {
1150 static bool once;
1151 if (!once) {
1152 fprintf(stderr, "intel_aubdump: "
1153 "application uses DRM_IOCTL_I915_GEM_EXECBUFFER, not handled\n");
1154 once = true;
1155 }
1156 return libc_ioctl(fd, request, argp);
1157 }
1158
1159 case DRM_IOCTL_I915_GEM_EXECBUFFER2:
1160 case DRM_IOCTL_I915_GEM_EXECBUFFER2_WR: {
1161 dump_execbuffer2(fd, argp);
1162 if (device_override)
1163 return 0;
1164
1165 return libc_ioctl(fd, request, argp);
1166 }
1167
1168 case DRM_IOCTL_I915_GEM_CREATE: {
1169 struct drm_i915_gem_create *create = argp;
1170
1171 ret = libc_ioctl(fd, request, argp);
1172 if (ret == 0)
1173 add_new_bo(create->handle, create->size, NULL);
1174
1175 return ret;
1176 }
1177
1178 case DRM_IOCTL_I915_GEM_USERPTR: {
1179 struct drm_i915_gem_userptr *userptr = argp;
1180
1181 ret = libc_ioctl(fd, request, argp);
1182 if (ret == 0)
1183 add_new_bo(userptr->handle, userptr->user_size,
1184 (void *) (uintptr_t) (userptr->user_ptr | USERPTR_FLAG));
1185 return ret;
1186 }
1187
1188 case DRM_IOCTL_GEM_CLOSE: {
1189 struct drm_gem_close *close = argp;
1190
1191 remove_bo(close->handle);
1192
1193 return libc_ioctl(fd, request, argp);
1194 }
1195
1196 case DRM_IOCTL_GEM_OPEN: {
1197 struct drm_gem_open *open = argp;
1198
1199 ret = libc_ioctl(fd, request, argp);
1200 if (ret == 0)
1201 add_new_bo(open->handle, open->size, NULL);
1202
1203 return ret;
1204 }
1205
1206 case DRM_IOCTL_PRIME_FD_TO_HANDLE: {
1207 struct drm_prime_handle *prime = argp;
1208
1209 ret = libc_ioctl(fd, request, argp);
1210 if (ret == 0) {
1211 off_t size;
1212
1213 size = lseek(prime->fd, 0, SEEK_END);
1214 fail_if(size == -1, "intel_aubdump: failed to get prime bo size\n");
1215 add_new_bo(prime->handle, size, NULL);
1216 }
1217
1218 return ret;
1219 }
1220
1221 default:
1222 return libc_ioctl(fd, request, argp);
1223 }
1224 } else {
1225 return libc_ioctl(fd, request, argp);
1226 }
1227 }
1228
1229 static void
1230 init(void)
1231 {
1232 libc_close = dlsym(RTLD_NEXT, "close");
1233 libc_ioctl = dlsym(RTLD_NEXT, "ioctl");
1234 fail_if(libc_close == NULL || libc_ioctl == NULL,
1235 "intel_aubdump: failed to get libc ioctl or close\n");
1236 }
1237
1238 static int
1239 close_init_helper(int fd)
1240 {
1241 init();
1242 return libc_close(fd);
1243 }
1244
1245 static int
1246 ioctl_init_helper(int fd, unsigned long request, ...)
1247 {
1248 va_list args;
1249 void *argp;
1250
1251 va_start(args, request);
1252 argp = va_arg(args, void *);
1253 va_end(args);
1254
1255 init();
1256 return libc_ioctl(fd, request, argp);
1257 }
1258
1259 static void __attribute__ ((destructor))
1260 fini(void)
1261 {
1262 free(filename);
1263 for (int i = 0; i < ARRAY_SIZE(files); i++) {
1264 if (files[i] != NULL)
1265 fclose(files[i]);
1266 }
1267 free(bos);
1268 }