intel: tools: Fix uninitialized variable warnings in intel_dump_gpu.
[mesa.git] / src / intel / tools / intel_dump_gpu.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <stdlib.h>
25 #include <stdio.h>
26 #include <string.h>
27 #include <stdint.h>
28 #include <stdbool.h>
29 #include <signal.h>
30 #include <stdarg.h>
31 #include <fcntl.h>
32 #include <sys/types.h>
33 #include <sys/sysmacros.h>
34 #include <sys/stat.h>
35 #include <sys/ioctl.h>
36 #include <unistd.h>
37 #include <errno.h>
38 #include <sys/mman.h>
39 #include <dlfcn.h>
40 #include <i915_drm.h>
41 #include <inttypes.h>
42
43 #include "intel_aub.h"
44
45 #include "dev/gen_device_info.h"
46 #include "util/macros.h"
47
48 #ifndef ALIGN
49 #define ALIGN(x, y) (((x) + (y)-1) & ~((y)-1))
50 #endif
51
52 #define MI_LOAD_REGISTER_IMM_n(n) ((0x22 << 23) | (2 * (n) - 1))
53 #define MI_LRI_FORCE_POSTED (1<<12)
54
55 #define MI_BATCH_NON_SECURE_I965 (1 << 8)
56
57 #define MI_BATCH_BUFFER_END (0xA << 23)
58
59 #define min(a, b) ({ \
60 __typeof(a) _a = (a); \
61 __typeof(b) _b = (b); \
62 _a < _b ? _a : _b; \
63 })
64
65 #define max(a, b) ({ \
66 __typeof(a) _a = (a); \
67 __typeof(b) _b = (b); \
68 _a > _b ? _a : _b; \
69 })
70
71 #define HWS_PGA_RCSUNIT 0x02080
72 #define HWS_PGA_VCSUNIT0 0x12080
73 #define HWS_PGA_BCSUNIT 0x22080
74
75 #define GFX_MODE_RCSUNIT 0x0229c
76 #define GFX_MODE_VCSUNIT0 0x1229c
77 #define GFX_MODE_BCSUNIT 0x2229c
78
79 #define EXECLIST_SUBMITPORT_RCSUNIT 0x02230
80 #define EXECLIST_SUBMITPORT_VCSUNIT0 0x12230
81 #define EXECLIST_SUBMITPORT_BCSUNIT 0x22230
82
83 #define EXECLIST_STATUS_RCSUNIT 0x02234
84 #define EXECLIST_STATUS_VCSUNIT0 0x12234
85 #define EXECLIST_STATUS_BCSUNIT 0x22234
86
87 #define EXECLIST_SQ_CONTENTS0_RCSUNIT 0x02510
88 #define EXECLIST_SQ_CONTENTS0_VCSUNIT0 0x12510
89 #define EXECLIST_SQ_CONTENTS0_BCSUNIT 0x22510
90
91 #define EXECLIST_CONTROL_RCSUNIT 0x02550
92 #define EXECLIST_CONTROL_VCSUNIT0 0x12550
93 #define EXECLIST_CONTROL_BCSUNIT 0x22550
94
95 #define MEMORY_MAP_SIZE (64 /* MiB */ * 1024 * 1024)
96
97 #define PTE_SIZE 4
98 #define GEN8_PTE_SIZE 8
99
100 #define NUM_PT_ENTRIES (ALIGN(MEMORY_MAP_SIZE, 4096) / 4096)
101 #define PT_SIZE ALIGN(NUM_PT_ENTRIES * GEN8_PTE_SIZE, 4096)
102
103 #define RING_SIZE (1 * 4096)
104 #define PPHWSP_SIZE (1 * 4096)
105 #define GEN11_LR_CONTEXT_RENDER_SIZE (14 * 4096)
106 #define GEN10_LR_CONTEXT_RENDER_SIZE (19 * 4096)
107 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * 4096)
108 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * 4096)
109 #define GEN8_LR_CONTEXT_OTHER_SIZE (2 * 4096)
110
111
112 #define STATIC_GGTT_MAP_START 0
113
114 #define RENDER_RING_ADDR STATIC_GGTT_MAP_START
115 #define RENDER_CONTEXT_ADDR (RENDER_RING_ADDR + RING_SIZE)
116
117 #define BLITTER_RING_ADDR (RENDER_CONTEXT_ADDR + PPHWSP_SIZE + GEN10_LR_CONTEXT_RENDER_SIZE)
118 #define BLITTER_CONTEXT_ADDR (BLITTER_RING_ADDR + RING_SIZE)
119
120 #define VIDEO_RING_ADDR (BLITTER_CONTEXT_ADDR + PPHWSP_SIZE + GEN8_LR_CONTEXT_OTHER_SIZE)
121 #define VIDEO_CONTEXT_ADDR (VIDEO_RING_ADDR + RING_SIZE)
122
123 #define STATIC_GGTT_MAP_END (VIDEO_CONTEXT_ADDR + PPHWSP_SIZE + GEN8_LR_CONTEXT_OTHER_SIZE)
124 #define STATIC_GGTT_MAP_SIZE (STATIC_GGTT_MAP_END - STATIC_GGTT_MAP_START)
125
126 #define PML4_PHYS_ADDR ((uint64_t)(STATIC_GGTT_MAP_END))
127
128 #define CONTEXT_FLAGS (0x339) /* Normal Priority | L3-LLC Coherency |
129 * PPGTT Enabled |
130 * Legacy Context with 64 bit VA support |
131 * Valid
132 */
133
134 #define RENDER_CONTEXT_DESCRIPTOR ((uint64_t)1 << 62 | RENDER_CONTEXT_ADDR | CONTEXT_FLAGS)
135 #define BLITTER_CONTEXT_DESCRIPTOR ((uint64_t)2 << 62 | BLITTER_CONTEXT_ADDR | CONTEXT_FLAGS)
136 #define VIDEO_CONTEXT_DESCRIPTOR ((uint64_t)3 << 62 | VIDEO_CONTEXT_ADDR | CONTEXT_FLAGS)
137
138 static const uint32_t render_context_init[GEN9_LR_CONTEXT_RENDER_SIZE / /* Choose the largest */
139 sizeof(uint32_t)] = {
140 0 /* MI_NOOP */,
141 MI_LOAD_REGISTER_IMM_n(14) | MI_LRI_FORCE_POSTED,
142 0x2244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
143 0x2034 /* RING_HEAD */, 0,
144 0x2030 /* RING_TAIL */, 0,
145 0x2038 /* RING_BUFFER_START */, RENDER_RING_ADDR,
146 0x203C /* RING_BUFFER_CONTROL */, (RING_SIZE - 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
147 0x2168 /* BB_HEAD_U */, 0,
148 0x2140 /* BB_HEAD_L */, 0,
149 0x2110 /* BB_STATE */, 0,
150 0x211C /* SECOND_BB_HEAD_U */, 0,
151 0x2114 /* SECOND_BB_HEAD_L */, 0,
152 0x2118 /* SECOND_BB_STATE */, 0,
153 0x21C0 /* BB_PER_CTX_PTR */, 0,
154 0x21C4 /* RCS_INDIRECT_CTX */, 0,
155 0x21C8 /* RCS_INDIRECT_CTX_OFFSET */, 0,
156 /* MI_NOOP */
157 0, 0,
158
159 0 /* MI_NOOP */,
160 MI_LOAD_REGISTER_IMM_n(9) | MI_LRI_FORCE_POSTED,
161 0x23A8 /* CTX_TIMESTAMP */, 0,
162 0x228C /* PDP3_UDW */, 0,
163 0x2288 /* PDP3_LDW */, 0,
164 0x2284 /* PDP2_UDW */, 0,
165 0x2280 /* PDP2_LDW */, 0,
166 0x227C /* PDP1_UDW */, 0,
167 0x2278 /* PDP1_LDW */, 0,
168 0x2274 /* PDP0_UDW */, PML4_PHYS_ADDR >> 32,
169 0x2270 /* PDP0_LDW */, PML4_PHYS_ADDR,
170 /* MI_NOOP */
171 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
172
173 0 /* MI_NOOP */,
174 MI_LOAD_REGISTER_IMM_n(1),
175 0x20C8 /* R_PWR_CLK_STATE */, 0x7FFFFFFF,
176 MI_BATCH_BUFFER_END
177 };
178
179 static const uint32_t blitter_context_init[GEN8_LR_CONTEXT_OTHER_SIZE /
180 sizeof(uint32_t)] = {
181 0 /* MI_NOOP */,
182 MI_LOAD_REGISTER_IMM_n(11) | MI_LRI_FORCE_POSTED,
183 0x22244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
184 0x22034 /* RING_HEAD */, 0,
185 0x22030 /* RING_TAIL */, 0,
186 0x22038 /* RING_BUFFER_START */, BLITTER_RING_ADDR,
187 0x2203C /* RING_BUFFER_CONTROL */, (RING_SIZE - 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
188 0x22168 /* BB_HEAD_U */, 0,
189 0x22140 /* BB_HEAD_L */, 0,
190 0x22110 /* BB_STATE */, 0,
191 0x2211C /* SECOND_BB_HEAD_U */, 0,
192 0x22114 /* SECOND_BB_HEAD_L */, 0,
193 0x22118 /* SECOND_BB_STATE */, 0,
194 /* MI_NOOP */
195 0, 0, 0, 0, 0, 0, 0, 0,
196
197 0 /* MI_NOOP */,
198 MI_LOAD_REGISTER_IMM_n(9) | MI_LRI_FORCE_POSTED,
199 0x223A8 /* CTX_TIMESTAMP */, 0,
200 0x2228C /* PDP3_UDW */, 0,
201 0x22288 /* PDP3_LDW */, 0,
202 0x22284 /* PDP2_UDW */, 0,
203 0x22280 /* PDP2_LDW */, 0,
204 0x2227C /* PDP1_UDW */, 0,
205 0x22278 /* PDP1_LDW */, 0,
206 0x22274 /* PDP0_UDW */, PML4_PHYS_ADDR >> 32,
207 0x22270 /* PDP0_LDW */, PML4_PHYS_ADDR,
208 /* MI_NOOP */
209 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
210
211 MI_BATCH_BUFFER_END
212 };
213
214 static const uint32_t video_context_init[GEN8_LR_CONTEXT_OTHER_SIZE /
215 sizeof(uint32_t)] = {
216 0 /* MI_NOOP */,
217 MI_LOAD_REGISTER_IMM_n(11) | MI_LRI_FORCE_POSTED,
218 0x1C244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
219 0x1C034 /* RING_HEAD */, 0,
220 0x1C030 /* RING_TAIL */, 0,
221 0x1C038 /* RING_BUFFER_START */, VIDEO_RING_ADDR,
222 0x1C03C /* RING_BUFFER_CONTROL */, (RING_SIZE - 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
223 0x1C168 /* BB_HEAD_U */, 0,
224 0x1C140 /* BB_HEAD_L */, 0,
225 0x1C110 /* BB_STATE */, 0,
226 0x1C11C /* SECOND_BB_HEAD_U */, 0,
227 0x1C114 /* SECOND_BB_HEAD_L */, 0,
228 0x1C118 /* SECOND_BB_STATE */, 0,
229 /* MI_NOOP */
230 0, 0, 0, 0, 0, 0, 0, 0,
231
232 0 /* MI_NOOP */,
233 MI_LOAD_REGISTER_IMM_n(9) | MI_LRI_FORCE_POSTED,
234 0x1C3A8 /* CTX_TIMESTAMP */, 0,
235 0x1C28C /* PDP3_UDW */, 0,
236 0x1C288 /* PDP3_LDW */, 0,
237 0x1C284 /* PDP2_UDW */, 0,
238 0x1C280 /* PDP2_LDW */, 0,
239 0x1C27C /* PDP1_UDW */, 0,
240 0x1C278 /* PDP1_LDW */, 0,
241 0x1C274 /* PDP0_UDW */, PML4_PHYS_ADDR >> 32,
242 0x1C270 /* PDP0_LDW */, PML4_PHYS_ADDR,
243 /* MI_NOOP */
244 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
245
246 MI_BATCH_BUFFER_END
247 };
248
249 static int close_init_helper(int fd);
250 static int ioctl_init_helper(int fd, unsigned long request, ...);
251
252 static int (*libc_close)(int fd) = close_init_helper;
253 static int (*libc_ioctl)(int fd, unsigned long request, ...) = ioctl_init_helper;
254
255 static int drm_fd = -1;
256 static char *filename = NULL;
257 static FILE *files[2] = { NULL, NULL };
258 static struct gen_device_info devinfo = {0};
259 static int verbose = 0;
260 static bool device_override;
261 static uint32_t device;
262 static int addr_bits = 0;
263
264 #define MAX_BO_COUNT 64 * 1024
265
266 struct bo {
267 uint32_t size;
268 uint64_t offset;
269 void *map;
270 };
271
272 static struct bo *bos;
273
274 #define DRM_MAJOR 226
275
276 /* We set bit 0 in the map pointer for userptr BOs so we know not to
277 * munmap them on DRM_IOCTL_GEM_CLOSE.
278 */
279 #define USERPTR_FLAG 1
280 #define IS_USERPTR(p) ((uintptr_t) (p) & USERPTR_FLAG)
281 #define GET_PTR(p) ( (void *) ((uintptr_t) p & ~(uintptr_t) 1) )
282
283 static inline bool use_execlists(void)
284 {
285 return devinfo.gen >= 8;
286 }
287
288 static void __attribute__ ((format(__printf__, 2, 3)))
289 fail_if(int cond, const char *format, ...)
290 {
291 va_list args;
292
293 if (!cond)
294 return;
295
296 va_start(args, format);
297 vfprintf(stderr, format, args);
298 va_end(args);
299
300 raise(SIGTRAP);
301 }
302
303 static struct bo *
304 get_bo(uint32_t handle)
305 {
306 struct bo *bo;
307
308 fail_if(handle >= MAX_BO_COUNT, "bo handle too large\n");
309 bo = &bos[handle];
310
311 return bo;
312 }
313
314 static inline uint32_t
315 align_u32(uint32_t v, uint32_t a)
316 {
317 return (v + a - 1) & ~(a - 1);
318 }
319
320 static void
321 dword_out(uint32_t data)
322 {
323 for (int i = 0; i < ARRAY_SIZE (files); i++) {
324 if (files[i] == NULL)
325 continue;
326
327 fail_if(fwrite(&data, 1, 4, files[i]) == 0,
328 "Writing to output failed\n");
329 }
330 }
331
332 static void
333 data_out(const void *data, size_t size)
334 {
335 if (size == 0)
336 return;
337
338 for (int i = 0; i < ARRAY_SIZE (files); i++) {
339 if (files[i] == NULL)
340 continue;
341
342 fail_if(fwrite(data, 1, size, files[i]) == 0,
343 "Writing to output failed\n");
344 }
345 }
346
347 static uint32_t
348 gtt_size(void)
349 {
350 return NUM_PT_ENTRIES * (addr_bits > 32 ? GEN8_PTE_SIZE : PTE_SIZE);
351 }
352
353 static void
354 mem_trace_memory_write_header_out(uint64_t addr, uint32_t len,
355 uint32_t addr_space)
356 {
357 uint32_t dwords = ALIGN(len, sizeof(uint32_t)) / sizeof(uint32_t);
358
359 dword_out(CMD_MEM_TRACE_MEMORY_WRITE | (5 + dwords - 1));
360 dword_out(addr & 0xFFFFFFFF); /* addr lo */
361 dword_out(addr >> 32); /* addr hi */
362 dword_out(addr_space); /* gtt */
363 dword_out(len);
364 }
365
366 static void
367 register_write_out(uint32_t addr, uint32_t value)
368 {
369 uint32_t dwords = 1;
370
371 dword_out(CMD_MEM_TRACE_REGISTER_WRITE | (5 + dwords - 1));
372 dword_out(addr);
373 dword_out(AUB_MEM_TRACE_REGISTER_SIZE_DWORD |
374 AUB_MEM_TRACE_REGISTER_SPACE_MMIO);
375 dword_out(0xFFFFFFFF); /* mask lo */
376 dword_out(0x00000000); /* mask hi */
377 dword_out(value);
378 }
379
380 static struct ppgtt_table {
381 uint64_t phys_addr;
382 struct ppgtt_table *subtables[512];
383 } pml4 = {PML4_PHYS_ADDR};
384
385 static void
386 populate_ppgtt_table(struct ppgtt_table *table, int start, int end,
387 int level)
388 {
389 static uint64_t phys_addrs_allocator = (PML4_PHYS_ADDR >> 12) + 1;
390 uint64_t entries[512] = {0};
391 int dirty_start = 512, dirty_end = 0;
392
393 if (verbose == 2) {
394 printf(" PPGTT (0x%016" PRIx64 "), lvl %d, start: %x, end: %x\n",
395 table->phys_addr, level, start, end);
396 }
397
398 for (int i = start; i <= end; i++) {
399 if (!table->subtables[i]) {
400 dirty_start = min(dirty_start, i);
401 dirty_end = max(dirty_end, i);
402 if (level == 1) {
403 table->subtables[i] =
404 (void *)(phys_addrs_allocator++ << 12);
405 if (verbose == 2) {
406 printf(" Adding entry: %x, phys_addr: 0x%016" PRIx64 "\n",
407 i, (uint64_t)table->subtables[i]);
408 }
409 } else {
410 table->subtables[i] =
411 calloc(1, sizeof(struct ppgtt_table));
412 table->subtables[i]->phys_addr =
413 phys_addrs_allocator++ << 12;
414 if (verbose == 2) {
415 printf(" Adding entry: %x, phys_addr: 0x%016" PRIx64 "\n",
416 i, table->subtables[i]->phys_addr);
417 }
418 }
419 }
420 entries[i] = 3 /* read/write | present */ |
421 (level == 1 ? (uint64_t)table->subtables[i] :
422 table->subtables[i]->phys_addr);
423 }
424
425 if (dirty_start <= dirty_end) {
426 uint64_t write_addr = table->phys_addr + dirty_start *
427 sizeof(uint64_t);
428 uint64_t write_size = (dirty_end - dirty_start + 1) *
429 sizeof(uint64_t);
430 mem_trace_memory_write_header_out(write_addr, write_size,
431 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_PHYSICAL);
432 data_out(entries + dirty_start, write_size);
433 }
434 }
435
436 static void
437 map_ppgtt(uint64_t start, uint64_t size)
438 {
439 uint64_t l4_start = start & 0xff8000000000;
440 uint64_t l4_end = ((start + size - 1) | 0x007fffffffff) & 0xffffffffffff;
441
442 #define L4_index(addr) (((addr) >> 39) & 0x1ff)
443 #define L3_index(addr) (((addr) >> 30) & 0x1ff)
444 #define L2_index(addr) (((addr) >> 21) & 0x1ff)
445 #define L1_index(addr) (((addr) >> 12) & 0x1ff)
446
447 #define L3_table(addr) (pml4.subtables[L4_index(addr)])
448 #define L2_table(addr) (L3_table(addr)->subtables[L3_index(addr)])
449 #define L1_table(addr) (L2_table(addr)->subtables[L2_index(addr)])
450
451 if (verbose == 2) {
452 printf(" Mapping PPGTT address: 0x%" PRIx64 ", size: %" PRIu64"\n",
453 start, size);
454 }
455
456 populate_ppgtt_table(&pml4, L4_index(l4_start), L4_index(l4_end), 4);
457
458 for (uint64_t l4 = l4_start; l4 < l4_end; l4 += (1ULL << 39)) {
459 uint64_t l3_start = max(l4, start & 0xffffc0000000);
460 uint64_t l3_end = min(l4 + (1ULL << 39),
461 ((start + size - 1) | 0x00003fffffff) & 0xffffffffffff);
462 uint64_t l3_start_idx = L3_index(l3_start);
463 uint64_t l3_end_idx = L3_index(l3_start) >= l3_start_idx ? L3_index(l3_end) : 0x1ff;
464
465 populate_ppgtt_table(L3_table(l4), l3_start_idx, l3_end_idx, 3);
466
467 for (uint64_t l3 = l3_start; l3 < l3_end; l3 += (1ULL << 30)) {
468 uint64_t l2_start = max(l3, start & 0xffffffe00000);
469 uint64_t l2_end = min(l3 + (1ULL << 30),
470 ((start + size - 1) | 0x0000001fffff) & 0xffffffffffff);
471 uint64_t l2_start_idx = L2_index(l2_start);
472 uint64_t l2_end_idx = L2_index(l2_end) >= l2_start_idx ? L2_index(l2_end) : 0x1ff;
473
474 populate_ppgtt_table(L2_table(l3), l2_start_idx, l2_end_idx, 2);
475
476 for (uint64_t l2 = l2_start; l2 < l2_end; l2 += (1ULL << 21)) {
477 uint64_t l1_start = max(l2, start & 0xfffffffff000);
478 uint64_t l1_end = min(l2 + (1ULL << 21),
479 ((start + size - 1) | 0x000000000fff) & 0xffffffffffff);
480 uint64_t l1_start_idx = L1_index(l1_start);
481 uint64_t l1_end_idx = L1_index(l1_end) >= l1_start_idx ? L1_index(l1_end) : 0x1ff;
482
483 populate_ppgtt_table(L1_table(l2), l1_start_idx, l1_end_idx, 1);
484 }
485 }
486 }
487 }
488
489 static uint64_t
490 ppgtt_lookup(uint64_t ppgtt_addr)
491 {
492 return (uint64_t)L1_table(ppgtt_addr)->subtables[L1_index(ppgtt_addr)];
493 }
494
495 static void
496 write_execlists_header(void)
497 {
498 char app_name[8 * 4];
499 int app_name_len, dwords;
500
501 app_name_len =
502 snprintf(app_name, sizeof(app_name), "PCI-ID=0x%X %s", device,
503 program_invocation_short_name);
504 app_name_len = ALIGN(app_name_len, sizeof(uint32_t));
505
506 dwords = 5 + app_name_len / sizeof(uint32_t);
507 dword_out(CMD_MEM_TRACE_VERSION | (dwords - 1));
508 dword_out(AUB_MEM_TRACE_VERSION_FILE_VERSION);
509 dword_out(devinfo.simulator_id << AUB_MEM_TRACE_VERSION_DEVICE_SHIFT);
510 dword_out(0); /* version */
511 dword_out(0); /* version */
512 data_out(app_name, app_name_len);
513
514 /* GGTT PT */
515 uint32_t ggtt_ptes = STATIC_GGTT_MAP_SIZE >> 12;
516
517 mem_trace_memory_write_header_out(STATIC_GGTT_MAP_START >> 12,
518 ggtt_ptes * GEN8_PTE_SIZE,
519 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT_ENTRY);
520 for (uint32_t i = 0; i < ggtt_ptes; i++) {
521 dword_out(1 + 0x1000 * i + STATIC_GGTT_MAP_START);
522 dword_out(0);
523 }
524
525 /* RENDER_RING */
526 mem_trace_memory_write_header_out(RENDER_RING_ADDR, RING_SIZE,
527 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
528 for (uint32_t i = 0; i < RING_SIZE; i += sizeof(uint32_t))
529 dword_out(0);
530
531 /* RENDER_PPHWSP */
532 mem_trace_memory_write_header_out(RENDER_CONTEXT_ADDR,
533 PPHWSP_SIZE +
534 sizeof(render_context_init),
535 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
536 for (uint32_t i = 0; i < PPHWSP_SIZE; i += sizeof(uint32_t))
537 dword_out(0);
538
539 /* RENDER_CONTEXT */
540 data_out(render_context_init, sizeof(render_context_init));
541
542 /* BLITTER_RING */
543 mem_trace_memory_write_header_out(BLITTER_RING_ADDR, RING_SIZE,
544 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
545 for (uint32_t i = 0; i < RING_SIZE; i += sizeof(uint32_t))
546 dword_out(0);
547
548 /* BLITTER_PPHWSP */
549 mem_trace_memory_write_header_out(BLITTER_CONTEXT_ADDR,
550 PPHWSP_SIZE +
551 sizeof(blitter_context_init),
552 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
553 for (uint32_t i = 0; i < PPHWSP_SIZE; i += sizeof(uint32_t))
554 dword_out(0);
555
556 /* BLITTER_CONTEXT */
557 data_out(blitter_context_init, sizeof(blitter_context_init));
558
559 /* VIDEO_RING */
560 mem_trace_memory_write_header_out(VIDEO_RING_ADDR, RING_SIZE,
561 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
562 for (uint32_t i = 0; i < RING_SIZE; i += sizeof(uint32_t))
563 dword_out(0);
564
565 /* VIDEO_PPHWSP */
566 mem_trace_memory_write_header_out(VIDEO_CONTEXT_ADDR,
567 PPHWSP_SIZE +
568 sizeof(video_context_init),
569 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
570 for (uint32_t i = 0; i < PPHWSP_SIZE; i += sizeof(uint32_t))
571 dword_out(0);
572
573 /* VIDEO_CONTEXT */
574 data_out(video_context_init, sizeof(video_context_init));
575
576 register_write_out(HWS_PGA_RCSUNIT, RENDER_CONTEXT_ADDR);
577 register_write_out(HWS_PGA_VCSUNIT0, VIDEO_CONTEXT_ADDR);
578 register_write_out(HWS_PGA_BCSUNIT, BLITTER_CONTEXT_ADDR);
579
580 register_write_out(GFX_MODE_RCSUNIT, 0x80008000 /* execlist enable */);
581 register_write_out(GFX_MODE_VCSUNIT0, 0x80008000 /* execlist enable */);
582 register_write_out(GFX_MODE_BCSUNIT, 0x80008000 /* execlist enable */);
583 }
584
585 static void write_legacy_header(void)
586 {
587 char app_name[8 * 4];
588 char comment[16];
589 int comment_len, comment_dwords, dwords;
590 uint32_t entry = 0x200003;
591
592 comment_len = snprintf(comment, sizeof(comment), "PCI-ID=0x%x", device);
593 comment_dwords = ((comment_len + 3) / 4);
594
595 /* Start with a (required) version packet. */
596 dwords = 13 + comment_dwords;
597 dword_out(CMD_AUB_HEADER | (dwords - 2));
598 dword_out((4 << AUB_HEADER_MAJOR_SHIFT) |
599 (0 << AUB_HEADER_MINOR_SHIFT));
600
601 /* Next comes a 32-byte application name. */
602 strncpy(app_name, program_invocation_short_name, sizeof(app_name));
603 app_name[sizeof(app_name) - 1] = 0;
604 data_out(app_name, sizeof(app_name));
605
606 dword_out(0); /* timestamp */
607 dword_out(0); /* timestamp */
608 dword_out(comment_len);
609 data_out(comment, comment_dwords * 4);
610
611 /* Set up the GTT. The max we can handle is 64M */
612 dword_out(CMD_AUB_TRACE_HEADER_BLOCK | ((addr_bits > 32 ? 6 : 5) - 2));
613 dword_out(AUB_TRACE_MEMTYPE_GTT_ENTRY |
614 AUB_TRACE_TYPE_NOTYPE | AUB_TRACE_OP_DATA_WRITE);
615 dword_out(0); /* subtype */
616 dword_out(0); /* offset */
617 dword_out(gtt_size()); /* size */
618 if (addr_bits > 32)
619 dword_out(0);
620 for (uint32_t i = 0; i < NUM_PT_ENTRIES; i++) {
621 dword_out(entry + 0x1000 * i);
622 if (addr_bits > 32)
623 dword_out(0);
624 }
625 }
626
627 /**
628 * Break up large objects into multiple writes. Otherwise a 128kb VBO
629 * would overflow the 16 bits of size field in the packet header and
630 * everything goes badly after that.
631 */
632 static void
633 aub_write_trace_block(uint32_t type, void *virtual, uint32_t size, uint64_t gtt_offset)
634 {
635 uint32_t block_size;
636 uint32_t subtype = 0;
637 static const char null_block[8 * 4096];
638
639 for (uint32_t offset = 0; offset < size; offset += block_size) {
640 block_size = min(8 * 4096, size - offset);
641
642 if (use_execlists()) {
643 block_size = min(4096, block_size);
644 mem_trace_memory_write_header_out(ppgtt_lookup(gtt_offset + offset),
645 block_size,
646 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_PHYSICAL);
647 } else {
648 dword_out(CMD_AUB_TRACE_HEADER_BLOCK |
649 ((addr_bits > 32 ? 6 : 5) - 2));
650 dword_out(AUB_TRACE_MEMTYPE_GTT |
651 type | AUB_TRACE_OP_DATA_WRITE);
652 dword_out(subtype);
653 dword_out(gtt_offset + offset);
654 dword_out(align_u32(block_size, 4));
655 if (addr_bits > 32)
656 dword_out((gtt_offset + offset) >> 32);
657 }
658
659 if (virtual)
660 data_out(((char *) GET_PTR(virtual)) + offset, block_size);
661 else
662 data_out(null_block, block_size);
663
664 /* Pad to a multiple of 4 bytes. */
665 data_out(null_block, -block_size & 3);
666 }
667 }
668
669 static void
670 write_reloc(void *p, uint64_t v)
671 {
672 if (addr_bits > 32) {
673 /* From the Broadwell PRM Vol. 2a,
674 * MI_LOAD_REGISTER_MEM::MemoryAddress:
675 *
676 * "This field specifies the address of the memory
677 * location where the register value specified in the
678 * DWord above will read from. The address specifies
679 * the DWord location of the data. Range =
680 * GraphicsVirtualAddress[63:2] for a DWord register
681 * GraphicsAddress [63:48] are ignored by the HW and
682 * assumed to be in correct canonical form [63:48] ==
683 * [47]."
684 *
685 * In practice, this will always mean the top bits are zero
686 * because of the GTT size limitation of the aubdump tool.
687 */
688 const int shift = 63 - 47;
689 *(uint64_t *)p = (((int64_t)v) << shift) >> shift;
690 } else {
691 *(uint32_t *)p = v;
692 }
693 }
694
695 static void
696 aub_dump_execlist(uint64_t batch_offset, int ring_flag)
697 {
698 uint32_t ring_addr;
699 uint64_t descriptor;
700 uint32_t elsp_reg;
701 uint32_t elsq_reg;
702 uint32_t status_reg;
703 uint32_t control_reg;
704
705 switch (ring_flag) {
706 case I915_EXEC_DEFAULT:
707 case I915_EXEC_RENDER:
708 ring_addr = RENDER_RING_ADDR;
709 descriptor = RENDER_CONTEXT_DESCRIPTOR;
710 elsp_reg = EXECLIST_SUBMITPORT_RCSUNIT;
711 elsq_reg = EXECLIST_SQ_CONTENTS0_RCSUNIT;
712 status_reg = EXECLIST_STATUS_RCSUNIT;
713 control_reg = EXECLIST_CONTROL_RCSUNIT;
714 break;
715 case I915_EXEC_BSD:
716 ring_addr = VIDEO_RING_ADDR;
717 descriptor = VIDEO_CONTEXT_DESCRIPTOR;
718 elsp_reg = EXECLIST_SUBMITPORT_VCSUNIT0;
719 elsq_reg = EXECLIST_SQ_CONTENTS0_VCSUNIT0;
720 status_reg = EXECLIST_STATUS_VCSUNIT0;
721 control_reg = EXECLIST_CONTROL_VCSUNIT0;
722 break;
723 case I915_EXEC_BLT:
724 ring_addr = BLITTER_RING_ADDR;
725 descriptor = BLITTER_CONTEXT_DESCRIPTOR;
726 elsp_reg = EXECLIST_SUBMITPORT_BCSUNIT;
727 elsq_reg = EXECLIST_SQ_CONTENTS0_BCSUNIT;
728 status_reg = EXECLIST_STATUS_BCSUNIT;
729 control_reg = EXECLIST_CONTROL_BCSUNIT;
730 break;
731 default:
732 unreachable("unknown ring");
733 }
734
735 mem_trace_memory_write_header_out(ring_addr, 16,
736 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
737 dword_out(AUB_MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965 | (3 - 2));
738 dword_out(batch_offset & 0xFFFFFFFF);
739 dword_out(batch_offset >> 32);
740 dword_out(0 /* MI_NOOP */);
741
742 mem_trace_memory_write_header_out(ring_addr + 8192 + 20, 4,
743 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
744 dword_out(0); /* RING_BUFFER_HEAD */
745 mem_trace_memory_write_header_out(ring_addr + 8192 + 28, 4,
746 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
747 dword_out(16); /* RING_BUFFER_TAIL */
748
749 if (devinfo.gen >= 11) {
750 register_write_out(elsq_reg, descriptor & 0xFFFFFFFF);
751 register_write_out(elsq_reg + sizeof(uint32_t), descriptor >> 32);
752 register_write_out(control_reg, 1);
753 } else {
754 register_write_out(elsp_reg, 0);
755 register_write_out(elsp_reg, 0);
756 register_write_out(elsp_reg, descriptor >> 32);
757 register_write_out(elsp_reg, descriptor & 0xFFFFFFFF);
758 }
759
760 dword_out(CMD_MEM_TRACE_REGISTER_POLL | (5 + 1 - 1));
761 dword_out(status_reg);
762 dword_out(AUB_MEM_TRACE_REGISTER_SIZE_DWORD |
763 AUB_MEM_TRACE_REGISTER_SPACE_MMIO);
764 if (devinfo.gen >= 11) {
765 dword_out(0x00000001); /* mask lo */
766 dword_out(0x00000000); /* mask hi */
767 dword_out(0x00000001);
768 } else {
769 dword_out(0x00000010); /* mask lo */
770 dword_out(0x00000000); /* mask hi */
771 dword_out(0x00000000);
772 }
773 }
774
775 static void
776 aub_dump_ringbuffer(uint64_t batch_offset, uint64_t offset, int ring_flag)
777 {
778 uint32_t ringbuffer[4096];
779 unsigned aub_mi_bbs_len;
780 int ring = AUB_TRACE_TYPE_RING_PRB0; /* The default ring */
781 int ring_count = 0;
782
783 if (ring_flag == I915_EXEC_BSD)
784 ring = AUB_TRACE_TYPE_RING_PRB1;
785 else if (ring_flag == I915_EXEC_BLT)
786 ring = AUB_TRACE_TYPE_RING_PRB2;
787
788 /* Make a ring buffer to execute our batchbuffer. */
789 memset(ringbuffer, 0, sizeof(ringbuffer));
790
791 aub_mi_bbs_len = addr_bits > 32 ? 3 : 2;
792 ringbuffer[ring_count] = AUB_MI_BATCH_BUFFER_START | (aub_mi_bbs_len - 2);
793 write_reloc(&ringbuffer[ring_count + 1], batch_offset);
794 ring_count += aub_mi_bbs_len;
795
796 /* Write out the ring. This appears to trigger execution of
797 * the ring in the simulator.
798 */
799 dword_out(CMD_AUB_TRACE_HEADER_BLOCK |
800 ((addr_bits > 32 ? 6 : 5) - 2));
801 dword_out(AUB_TRACE_MEMTYPE_GTT | ring | AUB_TRACE_OP_COMMAND_WRITE);
802 dword_out(0); /* general/surface subtype */
803 dword_out(offset);
804 dword_out(ring_count * 4);
805 if (addr_bits > 32)
806 dword_out(offset >> 32);
807
808 data_out(ringbuffer, ring_count * 4);
809 }
810
811 static void *
812 relocate_bo(struct bo *bo, const struct drm_i915_gem_execbuffer2 *execbuffer2,
813 const struct drm_i915_gem_exec_object2 *obj)
814 {
815 const struct drm_i915_gem_exec_object2 *exec_objects =
816 (struct drm_i915_gem_exec_object2 *) (uintptr_t) execbuffer2->buffers_ptr;
817 const struct drm_i915_gem_relocation_entry *relocs =
818 (const struct drm_i915_gem_relocation_entry *) (uintptr_t) obj->relocs_ptr;
819 void *relocated;
820 int handle;
821
822 relocated = malloc(bo->size);
823 fail_if(relocated == NULL, "intel_aubdump: out of memory\n");
824 memcpy(relocated, GET_PTR(bo->map), bo->size);
825 for (size_t i = 0; i < obj->relocation_count; i++) {
826 fail_if(relocs[i].offset >= bo->size, "intel_aubdump: reloc outside bo\n");
827
828 if (execbuffer2->flags & I915_EXEC_HANDLE_LUT)
829 handle = exec_objects[relocs[i].target_handle].handle;
830 else
831 handle = relocs[i].target_handle;
832
833 write_reloc(((char *)relocated) + relocs[i].offset,
834 get_bo(handle)->offset + relocs[i].delta);
835 }
836
837 return relocated;
838 }
839
840 static int
841 gem_ioctl(int fd, unsigned long request, void *argp)
842 {
843 int ret;
844
845 do {
846 ret = libc_ioctl(fd, request, argp);
847 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
848
849 return ret;
850 }
851
852 static void *
853 gem_mmap(int fd, uint32_t handle, uint64_t offset, uint64_t size)
854 {
855 struct drm_i915_gem_mmap mmap = {
856 .handle = handle,
857 .offset = offset,
858 .size = size
859 };
860
861 if (gem_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap) == -1)
862 return MAP_FAILED;
863
864 return (void *)(uintptr_t) mmap.addr_ptr;
865 }
866
867 static int
868 gem_get_param(int fd, uint32_t param)
869 {
870 int value;
871 drm_i915_getparam_t gp = {
872 .param = param,
873 .value = &value
874 };
875
876 if (gem_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp) == -1)
877 return 0;
878
879 return value;
880 }
881
882 static void
883 dump_execbuffer2(int fd, struct drm_i915_gem_execbuffer2 *execbuffer2)
884 {
885 struct drm_i915_gem_exec_object2 *exec_objects =
886 (struct drm_i915_gem_exec_object2 *) (uintptr_t) execbuffer2->buffers_ptr;
887 uint32_t ring_flag = execbuffer2->flags & I915_EXEC_RING_MASK;
888 uint32_t offset;
889 struct drm_i915_gem_exec_object2 *obj;
890 struct bo *bo, *batch_bo;
891 int batch_index;
892 void *data;
893
894 /* We can't do this at open time as we're not yet authenticated. */
895 if (device == 0) {
896 device = gem_get_param(fd, I915_PARAM_CHIPSET_ID);
897 fail_if(device == 0 || devinfo.gen == 0, "failed to identify chipset\n");
898 }
899 if (devinfo.gen == 0) {
900 fail_if(!gen_get_device_info(device, &devinfo),
901 "failed to identify chipset=0x%x\n", device);
902
903 addr_bits = devinfo.gen >= 8 ? 48 : 32;
904
905 if (use_execlists())
906 write_execlists_header();
907 else
908 write_legacy_header();
909
910 if (verbose)
911 printf("[intel_aubdump: running, "
912 "output file %s, chipset id 0x%04x, gen %d]\n",
913 filename, device, devinfo.gen);
914 }
915
916 if (use_execlists())
917 offset = 0x1000;
918 else
919 offset = gtt_size();
920
921 if (verbose)
922 printf("Dumping execbuffer2:\n");
923
924 for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
925 obj = &exec_objects[i];
926 bo = get_bo(obj->handle);
927
928 /* If bo->size == 0, this means they passed us an invalid
929 * buffer. The kernel will reject it and so should we.
930 */
931 if (bo->size == 0) {
932 if (verbose)
933 printf("BO #%d is invalid!\n", obj->handle);
934 return;
935 }
936
937 if (obj->flags & EXEC_OBJECT_PINNED) {
938 bo->offset = obj->offset;
939 if (verbose)
940 printf("BO #%d (%dB) pinned @ 0x%lx\n",
941 obj->handle, bo->size, bo->offset);
942 } else {
943 if (obj->alignment != 0)
944 offset = align_u32(offset, obj->alignment);
945 bo->offset = offset;
946 if (verbose)
947 printf("BO #%d (%dB) @ 0x%lx\n", obj->handle,
948 bo->size, bo->offset);
949 offset = align_u32(offset + bo->size + 4095, 4096);
950 }
951
952 if (bo->map == NULL && bo->size > 0)
953 bo->map = gem_mmap(fd, obj->handle, 0, bo->size);
954 fail_if(bo->map == MAP_FAILED, "intel_aubdump: bo mmap failed\n");
955
956 if (use_execlists())
957 map_ppgtt(bo->offset, bo->size);
958 }
959
960 batch_index = (execbuffer2->flags & I915_EXEC_BATCH_FIRST) ? 0 :
961 execbuffer2->buffer_count - 1;
962 batch_bo = get_bo(exec_objects[batch_index].handle);
963 for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
964 obj = &exec_objects[i];
965 bo = get_bo(obj->handle);
966
967 if (obj->relocation_count > 0)
968 data = relocate_bo(bo, execbuffer2, obj);
969 else
970 data = bo->map;
971
972 if (bo == batch_bo) {
973 aub_write_trace_block(AUB_TRACE_TYPE_BATCH,
974 data, bo->size, bo->offset);
975 } else {
976 aub_write_trace_block(AUB_TRACE_TYPE_NOTYPE,
977 data, bo->size, bo->offset);
978 }
979 if (data != bo->map)
980 free(data);
981 }
982
983 if (use_execlists()) {
984 aub_dump_execlist(batch_bo->offset +
985 execbuffer2->batch_start_offset, ring_flag);
986 } else {
987 /* Dump ring buffer */
988 aub_dump_ringbuffer(batch_bo->offset +
989 execbuffer2->batch_start_offset, offset,
990 ring_flag);
991 }
992
993 for (int i = 0; i < ARRAY_SIZE(files); i++) {
994 if (files[i] != NULL)
995 fflush(files[i]);
996 }
997
998 if (device_override &&
999 (execbuffer2->flags & I915_EXEC_FENCE_ARRAY) != 0) {
1000 struct drm_i915_gem_exec_fence *fences =
1001 (void*)(uintptr_t)execbuffer2->cliprects_ptr;
1002 for (uint32_t i = 0; i < execbuffer2->num_cliprects; i++) {
1003 if ((fences[i].flags & I915_EXEC_FENCE_SIGNAL) != 0) {
1004 struct drm_syncobj_array arg = {
1005 .handles = (uintptr_t)&fences[i].handle,
1006 .count_handles = 1,
1007 .pad = 0,
1008 };
1009 libc_ioctl(fd, DRM_IOCTL_SYNCOBJ_SIGNAL, &arg);
1010 }
1011 }
1012 }
1013 }
1014
1015 static void
1016 add_new_bo(int handle, uint64_t size, void *map)
1017 {
1018 struct bo *bo = &bos[handle];
1019
1020 fail_if(handle >= MAX_BO_COUNT, "intel_aubdump: bo handle out of range\n");
1021 fail_if(size == 0, "intel_aubdump: bo size is invalid\n");
1022
1023 bo->size = size;
1024 bo->map = map;
1025 }
1026
1027 static void
1028 remove_bo(int handle)
1029 {
1030 struct bo *bo = get_bo(handle);
1031
1032 if (bo->map && !IS_USERPTR(bo->map))
1033 munmap(bo->map, bo->size);
1034 bo->size = 0;
1035 bo->map = NULL;
1036 }
1037
1038 __attribute__ ((visibility ("default"))) int
1039 close(int fd)
1040 {
1041 if (fd == drm_fd)
1042 drm_fd = -1;
1043
1044 return libc_close(fd);
1045 }
1046
1047 static FILE *
1048 launch_command(char *command)
1049 {
1050 int i = 0, fds[2];
1051 char **args = calloc(strlen(command), sizeof(char *));
1052 char *iter = command;
1053
1054 args[i++] = iter = command;
1055
1056 while ((iter = strstr(iter, ",")) != NULL) {
1057 *iter = '\0';
1058 iter += 1;
1059 args[i++] = iter;
1060 }
1061
1062 if (pipe(fds) == -1)
1063 return NULL;
1064
1065 switch (fork()) {
1066 case 0:
1067 dup2(fds[0], 0);
1068 fail_if(execvp(args[0], args) == -1,
1069 "intel_aubdump: failed to launch child command\n");
1070 return NULL;
1071
1072 default:
1073 free(args);
1074 return fdopen(fds[1], "w");
1075
1076 case -1:
1077 return NULL;
1078 }
1079 }
1080
1081 static void
1082 maybe_init(void)
1083 {
1084 static bool initialized = false;
1085 FILE *config;
1086 char *key, *value;
1087
1088 if (initialized)
1089 return;
1090
1091 initialized = true;
1092
1093 config = fdopen(3, "r");
1094 while (fscanf(config, "%m[^=]=%m[^\n]\n", &key, &value) != EOF) {
1095 if (!strcmp(key, "verbose")) {
1096 if (!strcmp(value, "1")) {
1097 verbose = 1;
1098 } else if (!strcmp(value, "2")) {
1099 verbose = 2;
1100 }
1101 } else if (!strcmp(key, "device")) {
1102 fail_if(sscanf(value, "%i", &device) != 1,
1103 "intel_aubdump: failed to parse device id '%s'",
1104 value);
1105 device_override = true;
1106 } else if (!strcmp(key, "file")) {
1107 filename = strdup(value);
1108 files[0] = fopen(filename, "w+");
1109 fail_if(files[0] == NULL,
1110 "intel_aubdump: failed to open file '%s'\n",
1111 filename);
1112 } else if (!strcmp(key, "command")) {
1113 files[1] = launch_command(value);
1114 fail_if(files[1] == NULL,
1115 "intel_aubdump: failed to launch command '%s'\n",
1116 value);
1117 } else {
1118 fprintf(stderr, "intel_aubdump: unknown option '%s'\n", key);
1119 }
1120
1121 free(key);
1122 free(value);
1123 }
1124 fclose(config);
1125
1126 bos = calloc(MAX_BO_COUNT, sizeof(bos[0]));
1127 fail_if(bos == NULL, "intel_aubdump: out of memory\n");
1128 }
1129
1130 __attribute__ ((visibility ("default"))) int
1131 ioctl(int fd, unsigned long request, ...)
1132 {
1133 va_list args;
1134 void *argp;
1135 int ret;
1136 struct stat buf;
1137
1138 va_start(args, request);
1139 argp = va_arg(args, void *);
1140 va_end(args);
1141
1142 if (_IOC_TYPE(request) == DRM_IOCTL_BASE &&
1143 drm_fd != fd && fstat(fd, &buf) == 0 &&
1144 (buf.st_mode & S_IFMT) == S_IFCHR && major(buf.st_rdev) == DRM_MAJOR) {
1145 drm_fd = fd;
1146 if (verbose)
1147 printf("[intel_aubdump: intercept drm ioctl on fd %d]\n", fd);
1148 }
1149
1150 if (fd == drm_fd) {
1151 maybe_init();
1152
1153 switch (request) {
1154 case DRM_IOCTL_I915_GETPARAM: {
1155 struct drm_i915_getparam *getparam = argp;
1156
1157 if (device_override && getparam->param == I915_PARAM_CHIPSET_ID) {
1158 *getparam->value = device;
1159 return 0;
1160 }
1161
1162 ret = libc_ioctl(fd, request, argp);
1163
1164 /* If the application looks up chipset_id
1165 * (they typically do), we'll piggy-back on
1166 * their ioctl and store the id for later
1167 * use. */
1168 if (getparam->param == I915_PARAM_CHIPSET_ID)
1169 device = *getparam->value;
1170
1171 return ret;
1172 }
1173
1174 case DRM_IOCTL_I915_GEM_EXECBUFFER: {
1175 static bool once;
1176 if (!once) {
1177 fprintf(stderr, "intel_aubdump: "
1178 "application uses DRM_IOCTL_I915_GEM_EXECBUFFER, not handled\n");
1179 once = true;
1180 }
1181 return libc_ioctl(fd, request, argp);
1182 }
1183
1184 case DRM_IOCTL_I915_GEM_EXECBUFFER2:
1185 case DRM_IOCTL_I915_GEM_EXECBUFFER2_WR: {
1186 dump_execbuffer2(fd, argp);
1187 if (device_override)
1188 return 0;
1189
1190 return libc_ioctl(fd, request, argp);
1191 }
1192
1193 case DRM_IOCTL_I915_GEM_CREATE: {
1194 struct drm_i915_gem_create *create = argp;
1195
1196 ret = libc_ioctl(fd, request, argp);
1197 if (ret == 0)
1198 add_new_bo(create->handle, create->size, NULL);
1199
1200 return ret;
1201 }
1202
1203 case DRM_IOCTL_I915_GEM_USERPTR: {
1204 struct drm_i915_gem_userptr *userptr = argp;
1205
1206 ret = libc_ioctl(fd, request, argp);
1207 if (ret == 0)
1208 add_new_bo(userptr->handle, userptr->user_size,
1209 (void *) (uintptr_t) (userptr->user_ptr | USERPTR_FLAG));
1210 return ret;
1211 }
1212
1213 case DRM_IOCTL_GEM_CLOSE: {
1214 struct drm_gem_close *close = argp;
1215
1216 remove_bo(close->handle);
1217
1218 return libc_ioctl(fd, request, argp);
1219 }
1220
1221 case DRM_IOCTL_GEM_OPEN: {
1222 struct drm_gem_open *open = argp;
1223
1224 ret = libc_ioctl(fd, request, argp);
1225 if (ret == 0)
1226 add_new_bo(open->handle, open->size, NULL);
1227
1228 return ret;
1229 }
1230
1231 case DRM_IOCTL_PRIME_FD_TO_HANDLE: {
1232 struct drm_prime_handle *prime = argp;
1233
1234 ret = libc_ioctl(fd, request, argp);
1235 if (ret == 0) {
1236 off_t size;
1237
1238 size = lseek(prime->fd, 0, SEEK_END);
1239 fail_if(size == -1, "intel_aubdump: failed to get prime bo size\n");
1240 add_new_bo(prime->handle, size, NULL);
1241 }
1242
1243 return ret;
1244 }
1245
1246 default:
1247 return libc_ioctl(fd, request, argp);
1248 }
1249 } else {
1250 return libc_ioctl(fd, request, argp);
1251 }
1252 }
1253
1254 static void
1255 init(void)
1256 {
1257 libc_close = dlsym(RTLD_NEXT, "close");
1258 libc_ioctl = dlsym(RTLD_NEXT, "ioctl");
1259 fail_if(libc_close == NULL || libc_ioctl == NULL,
1260 "intel_aubdump: failed to get libc ioctl or close\n");
1261 }
1262
1263 static int
1264 close_init_helper(int fd)
1265 {
1266 init();
1267 return libc_close(fd);
1268 }
1269
1270 static int
1271 ioctl_init_helper(int fd, unsigned long request, ...)
1272 {
1273 va_list args;
1274 void *argp;
1275
1276 va_start(args, request);
1277 argp = va_arg(args, void *);
1278 va_end(args);
1279
1280 init();
1281 return libc_ioctl(fd, request, argp);
1282 }
1283
1284 static void __attribute__ ((destructor))
1285 fini(void)
1286 {
1287 free(filename);
1288 for (int i = 0; i < ARRAY_SIZE(files); i++) {
1289 if (files[i] != NULL)
1290 fclose(files[i]);
1291 }
1292 free(bos);
1293 }