intel/tools/dump_gpu: Add option to print ppgtt mappings.
[mesa.git] / src / intel / tools / intel_dump_gpu.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <stdlib.h>
25 #include <stdio.h>
26 #include <string.h>
27 #include <stdint.h>
28 #include <stdbool.h>
29 #include <signal.h>
30 #include <stdarg.h>
31 #include <fcntl.h>
32 #include <sys/types.h>
33 #include <sys/sysmacros.h>
34 #include <sys/stat.h>
35 #include <sys/ioctl.h>
36 #include <unistd.h>
37 #include <errno.h>
38 #include <sys/mman.h>
39 #include <dlfcn.h>
40 #include <i915_drm.h>
41 #include <inttypes.h>
42
43 #include "intel_aub.h"
44
45 #include "dev/gen_device_info.h"
46 #include "util/macros.h"
47
48 #ifndef ALIGN
49 #define ALIGN(x, y) (((x) + (y)-1) & ~((y)-1))
50 #endif
51
52 #define MI_LOAD_REGISTER_IMM_n(n) ((0x22 << 23) | (2 * (n) - 1))
53 #define MI_LRI_FORCE_POSTED (1<<12)
54
55 #define MI_BATCH_NON_SECURE_I965 (1 << 8)
56
57 #define MI_BATCH_BUFFER_END (0xA << 23)
58
59 #define min(a, b) ({ \
60 __typeof(a) _a = (a); \
61 __typeof(b) _b = (b); \
62 _a < _b ? _a : _b; \
63 })
64
65 #define max(a, b) ({ \
66 __typeof(a) _a = (a); \
67 __typeof(b) _b = (b); \
68 _a > _b ? _a : _b; \
69 })
70
71 #define HWS_PGA_RCSUNIT 0x02080
72 #define HWS_PGA_VCSUNIT0 0x12080
73 #define HWS_PGA_BCSUNIT 0x22080
74
75 #define GFX_MODE_RCSUNIT 0x0229c
76 #define GFX_MODE_VCSUNIT0 0x1229c
77 #define GFX_MODE_BCSUNIT 0x2229c
78
79 #define EXECLIST_SUBMITPORT_RCSUNIT 0x02230
80 #define EXECLIST_SUBMITPORT_VCSUNIT0 0x12230
81 #define EXECLIST_SUBMITPORT_BCSUNIT 0x22230
82
83 #define EXECLIST_STATUS_RCSUNIT 0x02234
84 #define EXECLIST_STATUS_VCSUNIT0 0x12234
85 #define EXECLIST_STATUS_BCSUNIT 0x22234
86
87 #define EXECLIST_SQ_CONTENTS0_RCSUNIT 0x02510
88 #define EXECLIST_SQ_CONTENTS0_VCSUNIT0 0x12510
89 #define EXECLIST_SQ_CONTENTS0_BCSUNIT 0x22510
90
91 #define EXECLIST_CONTROL_RCSUNIT 0x02550
92 #define EXECLIST_CONTROL_VCSUNIT0 0x12550
93 #define EXECLIST_CONTROL_BCSUNIT 0x22550
94
95 #define MEMORY_MAP_SIZE (64 /* MiB */ * 1024 * 1024)
96
97 #define PTE_SIZE 4
98 #define GEN8_PTE_SIZE 8
99
100 #define NUM_PT_ENTRIES (ALIGN(MEMORY_MAP_SIZE, 4096) / 4096)
101 #define PT_SIZE ALIGN(NUM_PT_ENTRIES * GEN8_PTE_SIZE, 4096)
102
103 #define RING_SIZE (1 * 4096)
104 #define PPHWSP_SIZE (1 * 4096)
105 #define GEN11_LR_CONTEXT_RENDER_SIZE (14 * 4096)
106 #define GEN10_LR_CONTEXT_RENDER_SIZE (19 * 4096)
107 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * 4096)
108 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * 4096)
109 #define GEN8_LR_CONTEXT_OTHER_SIZE (2 * 4096)
110
111
112 #define STATIC_GGTT_MAP_START 0
113
114 #define RENDER_RING_ADDR STATIC_GGTT_MAP_START
115 #define RENDER_CONTEXT_ADDR (RENDER_RING_ADDR + RING_SIZE)
116
117 #define BLITTER_RING_ADDR (RENDER_CONTEXT_ADDR + PPHWSP_SIZE + GEN10_LR_CONTEXT_RENDER_SIZE)
118 #define BLITTER_CONTEXT_ADDR (BLITTER_RING_ADDR + RING_SIZE)
119
120 #define VIDEO_RING_ADDR (BLITTER_CONTEXT_ADDR + PPHWSP_SIZE + GEN8_LR_CONTEXT_OTHER_SIZE)
121 #define VIDEO_CONTEXT_ADDR (VIDEO_RING_ADDR + RING_SIZE)
122
123 #define STATIC_GGTT_MAP_END (VIDEO_CONTEXT_ADDR + PPHWSP_SIZE + GEN8_LR_CONTEXT_OTHER_SIZE)
124 #define STATIC_GGTT_MAP_SIZE (STATIC_GGTT_MAP_END - STATIC_GGTT_MAP_START)
125
126 #define PML4_PHYS_ADDR ((uint64_t)(STATIC_GGTT_MAP_END))
127
128 #define CONTEXT_FLAGS (0x339) /* Normal Priority | L3-LLC Coherency |
129 * PPGTT Enabled |
130 * Legacy Context with 64 bit VA support |
131 * Valid
132 */
133
134 #define RENDER_CONTEXT_DESCRIPTOR ((uint64_t)1 << 62 | RENDER_CONTEXT_ADDR | CONTEXT_FLAGS)
135 #define BLITTER_CONTEXT_DESCRIPTOR ((uint64_t)2 << 62 | BLITTER_CONTEXT_ADDR | CONTEXT_FLAGS)
136 #define VIDEO_CONTEXT_DESCRIPTOR ((uint64_t)3 << 62 | VIDEO_CONTEXT_ADDR | CONTEXT_FLAGS)
137
138 static const uint32_t render_context_init[GEN9_LR_CONTEXT_RENDER_SIZE / /* Choose the largest */
139 sizeof(uint32_t)] = {
140 0 /* MI_NOOP */,
141 MI_LOAD_REGISTER_IMM_n(14) | MI_LRI_FORCE_POSTED,
142 0x2244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
143 0x2034 /* RING_HEAD */, 0,
144 0x2030 /* RING_TAIL */, 0,
145 0x2038 /* RING_BUFFER_START */, RENDER_RING_ADDR,
146 0x203C /* RING_BUFFER_CONTROL */, (RING_SIZE - 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
147 0x2168 /* BB_HEAD_U */, 0,
148 0x2140 /* BB_HEAD_L */, 0,
149 0x2110 /* BB_STATE */, 0,
150 0x211C /* SECOND_BB_HEAD_U */, 0,
151 0x2114 /* SECOND_BB_HEAD_L */, 0,
152 0x2118 /* SECOND_BB_STATE */, 0,
153 0x21C0 /* BB_PER_CTX_PTR */, 0,
154 0x21C4 /* RCS_INDIRECT_CTX */, 0,
155 0x21C8 /* RCS_INDIRECT_CTX_OFFSET */, 0,
156 /* MI_NOOP */
157 0, 0,
158
159 0 /* MI_NOOP */,
160 MI_LOAD_REGISTER_IMM_n(9) | MI_LRI_FORCE_POSTED,
161 0x23A8 /* CTX_TIMESTAMP */, 0,
162 0x228C /* PDP3_UDW */, 0,
163 0x2288 /* PDP3_LDW */, 0,
164 0x2284 /* PDP2_UDW */, 0,
165 0x2280 /* PDP2_LDW */, 0,
166 0x227C /* PDP1_UDW */, 0,
167 0x2278 /* PDP1_LDW */, 0,
168 0x2274 /* PDP0_UDW */, PML4_PHYS_ADDR >> 32,
169 0x2270 /* PDP0_LDW */, PML4_PHYS_ADDR,
170 /* MI_NOOP */
171 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
172
173 0 /* MI_NOOP */,
174 MI_LOAD_REGISTER_IMM_n(1),
175 0x20C8 /* R_PWR_CLK_STATE */, 0x7FFFFFFF,
176 MI_BATCH_BUFFER_END
177 };
178
179 static const uint32_t blitter_context_init[GEN8_LR_CONTEXT_OTHER_SIZE /
180 sizeof(uint32_t)] = {
181 0 /* MI_NOOP */,
182 MI_LOAD_REGISTER_IMM_n(11) | MI_LRI_FORCE_POSTED,
183 0x22244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
184 0x22034 /* RING_HEAD */, 0,
185 0x22030 /* RING_TAIL */, 0,
186 0x22038 /* RING_BUFFER_START */, BLITTER_RING_ADDR,
187 0x2203C /* RING_BUFFER_CONTROL */, (RING_SIZE - 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
188 0x22168 /* BB_HEAD_U */, 0,
189 0x22140 /* BB_HEAD_L */, 0,
190 0x22110 /* BB_STATE */, 0,
191 0x2211C /* SECOND_BB_HEAD_U */, 0,
192 0x22114 /* SECOND_BB_HEAD_L */, 0,
193 0x22118 /* SECOND_BB_STATE */, 0,
194 /* MI_NOOP */
195 0, 0, 0, 0, 0, 0, 0, 0,
196
197 0 /* MI_NOOP */,
198 MI_LOAD_REGISTER_IMM_n(9) | MI_LRI_FORCE_POSTED,
199 0x223A8 /* CTX_TIMESTAMP */, 0,
200 0x2228C /* PDP3_UDW */, 0,
201 0x22288 /* PDP3_LDW */, 0,
202 0x22284 /* PDP2_UDW */, 0,
203 0x22280 /* PDP2_LDW */, 0,
204 0x2227C /* PDP1_UDW */, 0,
205 0x22278 /* PDP1_LDW */, 0,
206 0x22274 /* PDP0_UDW */, PML4_PHYS_ADDR >> 32,
207 0x22270 /* PDP0_LDW */, PML4_PHYS_ADDR,
208 /* MI_NOOP */
209 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
210
211 MI_BATCH_BUFFER_END
212 };
213
214 static const uint32_t video_context_init[GEN8_LR_CONTEXT_OTHER_SIZE /
215 sizeof(uint32_t)] = {
216 0 /* MI_NOOP */,
217 MI_LOAD_REGISTER_IMM_n(11) | MI_LRI_FORCE_POSTED,
218 0x1C244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
219 0x1C034 /* RING_HEAD */, 0,
220 0x1C030 /* RING_TAIL */, 0,
221 0x1C038 /* RING_BUFFER_START */, VIDEO_RING_ADDR,
222 0x1C03C /* RING_BUFFER_CONTROL */, (RING_SIZE - 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
223 0x1C168 /* BB_HEAD_U */, 0,
224 0x1C140 /* BB_HEAD_L */, 0,
225 0x1C110 /* BB_STATE */, 0,
226 0x1C11C /* SECOND_BB_HEAD_U */, 0,
227 0x1C114 /* SECOND_BB_HEAD_L */, 0,
228 0x1C118 /* SECOND_BB_STATE */, 0,
229 /* MI_NOOP */
230 0, 0, 0, 0, 0, 0, 0, 0,
231
232 0 /* MI_NOOP */,
233 MI_LOAD_REGISTER_IMM_n(9) | MI_LRI_FORCE_POSTED,
234 0x1C3A8 /* CTX_TIMESTAMP */, 0,
235 0x1C28C /* PDP3_UDW */, 0,
236 0x1C288 /* PDP3_LDW */, 0,
237 0x1C284 /* PDP2_UDW */, 0,
238 0x1C280 /* PDP2_LDW */, 0,
239 0x1C27C /* PDP1_UDW */, 0,
240 0x1C278 /* PDP1_LDW */, 0,
241 0x1C274 /* PDP0_UDW */, PML4_PHYS_ADDR >> 32,
242 0x1C270 /* PDP0_LDW */, PML4_PHYS_ADDR,
243 /* MI_NOOP */
244 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
245
246 MI_BATCH_BUFFER_END
247 };
248
249 static int close_init_helper(int fd);
250 static int ioctl_init_helper(int fd, unsigned long request, ...);
251
252 static int (*libc_close)(int fd) = close_init_helper;
253 static int (*libc_ioctl)(int fd, unsigned long request, ...) = ioctl_init_helper;
254
255 static int drm_fd = -1;
256 static char *filename = NULL;
257 static FILE *files[2] = { NULL, NULL };
258 static struct gen_device_info devinfo = {0};
259 static int verbose = 0;
260 static bool device_override;
261 static uint32_t device;
262 static int addr_bits = 0;
263
264 #define MAX_BO_COUNT 64 * 1024
265
266 struct bo {
267 uint32_t size;
268 uint64_t offset;
269 void *map;
270 };
271
272 static struct bo *bos;
273
274 #define DRM_MAJOR 226
275
276 /* We set bit 0 in the map pointer for userptr BOs so we know not to
277 * munmap them on DRM_IOCTL_GEM_CLOSE.
278 */
279 #define USERPTR_FLAG 1
280 #define IS_USERPTR(p) ((uintptr_t) (p) & USERPTR_FLAG)
281 #define GET_PTR(p) ( (void *) ((uintptr_t) p & ~(uintptr_t) 1) )
282
283 static inline bool use_execlists(void)
284 {
285 return devinfo.gen >= 8;
286 }
287
288 static void __attribute__ ((format(__printf__, 2, 3)))
289 fail_if(int cond, const char *format, ...)
290 {
291 va_list args;
292
293 if (!cond)
294 return;
295
296 va_start(args, format);
297 vfprintf(stderr, format, args);
298 va_end(args);
299
300 raise(SIGTRAP);
301 }
302
303 static struct bo *
304 get_bo(uint32_t handle)
305 {
306 struct bo *bo;
307
308 fail_if(handle >= MAX_BO_COUNT, "bo handle too large\n");
309 bo = &bos[handle];
310
311 return bo;
312 }
313
314 static inline uint32_t
315 align_u32(uint32_t v, uint32_t a)
316 {
317 return (v + a - 1) & ~(a - 1);
318 }
319
320 static void
321 dword_out(uint32_t data)
322 {
323 for (int i = 0; i < ARRAY_SIZE (files); i++) {
324 if (files[i] == NULL)
325 continue;
326
327 fail_if(fwrite(&data, 1, 4, files[i]) == 0,
328 "Writing to output failed\n");
329 }
330 }
331
332 static void
333 data_out(const void *data, size_t size)
334 {
335 if (size == 0)
336 return;
337
338 for (int i = 0; i < ARRAY_SIZE (files); i++) {
339 if (files[i] == NULL)
340 continue;
341
342 fail_if(fwrite(data, 1, size, files[i]) == 0,
343 "Writing to output failed\n");
344 }
345 }
346
347 static uint32_t
348 gtt_size(void)
349 {
350 return NUM_PT_ENTRIES * (addr_bits > 32 ? GEN8_PTE_SIZE : PTE_SIZE);
351 }
352
353 static void
354 mem_trace_memory_write_header_out(uint64_t addr, uint32_t len,
355 uint32_t addr_space)
356 {
357 uint32_t dwords = ALIGN(len, sizeof(uint32_t)) / sizeof(uint32_t);
358
359 dword_out(CMD_MEM_TRACE_MEMORY_WRITE | (5 + dwords - 1));
360 dword_out(addr & 0xFFFFFFFF); /* addr lo */
361 dword_out(addr >> 32); /* addr hi */
362 dword_out(addr_space); /* gtt */
363 dword_out(len);
364 }
365
366 static void
367 register_write_out(uint32_t addr, uint32_t value)
368 {
369 uint32_t dwords = 1;
370
371 dword_out(CMD_MEM_TRACE_REGISTER_WRITE | (5 + dwords - 1));
372 dword_out(addr);
373 dword_out(AUB_MEM_TRACE_REGISTER_SIZE_DWORD |
374 AUB_MEM_TRACE_REGISTER_SPACE_MMIO);
375 dword_out(0xFFFFFFFF); /* mask lo */
376 dword_out(0x00000000); /* mask hi */
377 dword_out(value);
378 }
379
380 static struct ppgtt_table {
381 uint64_t phys_addr;
382 struct ppgtt_table *subtables[512];
383 } pml4 = {PML4_PHYS_ADDR};
384
385 static void
386 populate_ppgtt_table(struct ppgtt_table *table, int start, int end,
387 int level)
388 {
389 static uint64_t phys_addrs_allocator = (PML4_PHYS_ADDR >> 12) + 1;
390 uint64_t entries[512] = {0};
391 int dirty_start = 512, dirty_end = 0;
392
393 if (verbose == 2) {
394 printf(" PPGTT (0x%016" PRIx64 "), lvl %d, start: %x, end: %x\n",
395 table->phys_addr, level, start, end);
396 }
397
398 for (int i = start; i <= end; i++) {
399 if (!table->subtables[i]) {
400 dirty_start = min(dirty_start, i);
401 dirty_end = max(dirty_end, i);
402 if (level == 1) {
403 table->subtables[i] =
404 (void *)(phys_addrs_allocator++ << 12);
405 if (verbose == 2) {
406 printf(" Adding entry: %x, phys_addr: 0x%016" PRIx64 "\n",
407 i, (uint64_t)table->subtables[i]);
408 }
409 } else {
410 table->subtables[i] =
411 calloc(1, sizeof(struct ppgtt_table));
412 table->subtables[i]->phys_addr =
413 phys_addrs_allocator++ << 12;
414 if (verbose == 2) {
415 printf(" Adding entry: %x, phys_addr: 0x%016" PRIx64 "\n",
416 i, table->subtables[i]->phys_addr);
417 }
418 }
419 }
420 entries[i] = 3 /* read/write | present */ |
421 (level == 1 ? (uint64_t)table->subtables[i] :
422 table->subtables[i]->phys_addr);
423 }
424
425 if (dirty_start <= dirty_end) {
426 uint64_t write_addr = table->phys_addr + dirty_start *
427 sizeof(uint64_t);
428 uint64_t write_size = (dirty_end - dirty_start + 1) *
429 sizeof(uint64_t);
430 mem_trace_memory_write_header_out(write_addr, write_size,
431 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_PHYSICAL);
432 data_out(entries + dirty_start, write_size);
433 }
434 }
435
436 static void
437 map_ppgtt(uint64_t start, uint64_t size)
438 {
439 uint64_t l4_start = start & 0xff8000000000;
440 uint64_t l4_end = ((start + size - 1) | 0x007fffffffff) & 0xffffffffffff;
441
442 #define L4_index(addr) (((addr) >> 39) & 0x1ff)
443 #define L3_index(addr) (((addr) >> 30) & 0x1ff)
444 #define L2_index(addr) (((addr) >> 21) & 0x1ff)
445 #define L1_index(addr) (((addr) >> 12) & 0x1ff)
446
447 #define L3_table(addr) (pml4.subtables[L4_index(addr)])
448 #define L2_table(addr) (L3_table(addr)->subtables[L3_index(addr)])
449 #define L1_table(addr) (L2_table(addr)->subtables[L2_index(addr)])
450
451 if (verbose == 2) {
452 printf(" Mapping PPGTT address: 0x%" PRIx64 ", size: %" PRIu64"\n",
453 start, size);
454 }
455
456 populate_ppgtt_table(&pml4, L4_index(l4_start), L4_index(l4_end), 4);
457
458 for (uint64_t l4 = l4_start; l4 < l4_end; l4 += (1ULL << 39)) {
459 uint64_t l3_start = max(l4, start & 0xffffc0000000);
460 uint64_t l3_end = min(l4 + (1ULL << 39),
461 ((start + size - 1) | 0x00003fffffff) & 0xffffffffffff);
462 uint64_t l3_start_idx = L3_index(l3_start);
463 uint64_t l3_end_idx = L3_index(l3_start) >= l3_start_idx ? L3_index(l3_end) : 0x1ff;
464
465 populate_ppgtt_table(L3_table(l4), l3_start_idx, l3_end_idx, 3);
466
467 for (uint64_t l3 = l3_start; l3 < l3_end; l3 += (1ULL << 30)) {
468 uint64_t l2_start = max(l3, start & 0xffffffe00000);
469 uint64_t l2_end = min(l3 + (1ULL << 30),
470 ((start + size - 1) | 0x0000001fffff) & 0xffffffffffff);
471 uint64_t l2_start_idx = L2_index(l2_start);
472 uint64_t l2_end_idx = L2_index(l2_end) >= l2_start_idx ? L2_index(l2_end) : 0x1ff;
473
474 populate_ppgtt_table(L2_table(l3), l2_start_idx, l2_end_idx, 2);
475
476 for (uint64_t l2 = l2_start; l2 < l2_end; l2 += (1ULL << 21)) {
477 uint64_t l1_start = max(l2, start & 0xfffffffff000);
478 uint64_t l1_end = min(l2 + (1ULL << 21),
479 ((start + size - 1) | 0x000000000fff) & 0xffffffffffff);
480 uint64_t l1_start_idx = L1_index(l1_start);
481 uint64_t l1_end_idx = L1_index(l1_end) >= l1_start_idx ? L1_index(l1_end) : 0x1ff;
482
483 populate_ppgtt_table(L1_table(l2), l1_start_idx, l1_end_idx, 1);
484 }
485 }
486 }
487 }
488
489 static uint64_t
490 ppgtt_lookup(uint64_t ppgtt_addr)
491 {
492 return (uint64_t)L1_table(ppgtt_addr)->subtables[L1_index(ppgtt_addr)];
493 }
494
495 static void
496 write_execlists_header(void)
497 {
498 char app_name[8 * 4];
499 int app_name_len, dwords;
500
501 app_name_len =
502 snprintf(app_name, sizeof(app_name), "PCI-ID=0x%X %s", device,
503 program_invocation_short_name);
504 app_name_len = ALIGN(app_name_len, sizeof(uint32_t));
505
506 dwords = 5 + app_name_len / sizeof(uint32_t);
507 dword_out(CMD_MEM_TRACE_VERSION | (dwords - 1));
508 dword_out(AUB_MEM_TRACE_VERSION_FILE_VERSION);
509 dword_out(devinfo.simulator_id << AUB_MEM_TRACE_VERSION_DEVICE_SHIFT);
510 dword_out(0); /* version */
511 dword_out(0); /* version */
512 data_out(app_name, app_name_len);
513
514 /* GGTT PT */
515 uint32_t ggtt_ptes = STATIC_GGTT_MAP_SIZE >> 12;
516
517 mem_trace_memory_write_header_out(STATIC_GGTT_MAP_START >> 12,
518 ggtt_ptes * GEN8_PTE_SIZE,
519 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT_ENTRY);
520 for (uint32_t i = 0; i < ggtt_ptes; i++) {
521 dword_out(1 + 0x1000 * i + STATIC_GGTT_MAP_START);
522 dword_out(0);
523 }
524
525 /* RENDER_RING */
526 mem_trace_memory_write_header_out(RENDER_RING_ADDR, RING_SIZE,
527 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
528 for (uint32_t i = 0; i < RING_SIZE; i += sizeof(uint32_t))
529 dword_out(0);
530
531 /* RENDER_PPHWSP */
532 mem_trace_memory_write_header_out(RENDER_CONTEXT_ADDR,
533 PPHWSP_SIZE +
534 sizeof(render_context_init),
535 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
536 for (uint32_t i = 0; i < PPHWSP_SIZE; i += sizeof(uint32_t))
537 dword_out(0);
538
539 /* RENDER_CONTEXT */
540 data_out(render_context_init, sizeof(render_context_init));
541
542 /* BLITTER_RING */
543 mem_trace_memory_write_header_out(BLITTER_RING_ADDR, RING_SIZE,
544 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
545 for (uint32_t i = 0; i < RING_SIZE; i += sizeof(uint32_t))
546 dword_out(0);
547
548 /* BLITTER_PPHWSP */
549 mem_trace_memory_write_header_out(BLITTER_CONTEXT_ADDR,
550 PPHWSP_SIZE +
551 sizeof(blitter_context_init),
552 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
553 for (uint32_t i = 0; i < PPHWSP_SIZE; i += sizeof(uint32_t))
554 dword_out(0);
555
556 /* BLITTER_CONTEXT */
557 data_out(blitter_context_init, sizeof(blitter_context_init));
558
559 /* VIDEO_RING */
560 mem_trace_memory_write_header_out(VIDEO_RING_ADDR, RING_SIZE,
561 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
562 for (uint32_t i = 0; i < RING_SIZE; i += sizeof(uint32_t))
563 dword_out(0);
564
565 /* VIDEO_PPHWSP */
566 mem_trace_memory_write_header_out(VIDEO_CONTEXT_ADDR,
567 PPHWSP_SIZE +
568 sizeof(video_context_init),
569 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
570 for (uint32_t i = 0; i < PPHWSP_SIZE; i += sizeof(uint32_t))
571 dword_out(0);
572
573 /* VIDEO_CONTEXT */
574 data_out(video_context_init, sizeof(video_context_init));
575
576 register_write_out(HWS_PGA_RCSUNIT, RENDER_CONTEXT_ADDR);
577 register_write_out(HWS_PGA_VCSUNIT0, VIDEO_CONTEXT_ADDR);
578 register_write_out(HWS_PGA_BCSUNIT, BLITTER_CONTEXT_ADDR);
579
580 register_write_out(GFX_MODE_RCSUNIT, 0x80008000 /* execlist enable */);
581 register_write_out(GFX_MODE_VCSUNIT0, 0x80008000 /* execlist enable */);
582 register_write_out(GFX_MODE_BCSUNIT, 0x80008000 /* execlist enable */);
583 }
584
585 static void write_legacy_header(void)
586 {
587 char app_name[8 * 4];
588 char comment[16];
589 int comment_len, comment_dwords, dwords;
590 uint32_t entry = 0x200003;
591
592 comment_len = snprintf(comment, sizeof(comment), "PCI-ID=0x%x", device);
593 comment_dwords = ((comment_len + 3) / 4);
594
595 /* Start with a (required) version packet. */
596 dwords = 13 + comment_dwords;
597 dword_out(CMD_AUB_HEADER | (dwords - 2));
598 dword_out((4 << AUB_HEADER_MAJOR_SHIFT) |
599 (0 << AUB_HEADER_MINOR_SHIFT));
600
601 /* Next comes a 32-byte application name. */
602 strncpy(app_name, program_invocation_short_name, sizeof(app_name));
603 app_name[sizeof(app_name) - 1] = 0;
604 data_out(app_name, sizeof(app_name));
605
606 dword_out(0); /* timestamp */
607 dword_out(0); /* timestamp */
608 dword_out(comment_len);
609 data_out(comment, comment_dwords * 4);
610
611 /* Set up the GTT. The max we can handle is 64M */
612 dword_out(CMD_AUB_TRACE_HEADER_BLOCK | ((addr_bits > 32 ? 6 : 5) - 2));
613 dword_out(AUB_TRACE_MEMTYPE_GTT_ENTRY |
614 AUB_TRACE_TYPE_NOTYPE | AUB_TRACE_OP_DATA_WRITE);
615 dword_out(0); /* subtype */
616 dword_out(0); /* offset */
617 dword_out(gtt_size()); /* size */
618 if (addr_bits > 32)
619 dword_out(0);
620 for (uint32_t i = 0; i < NUM_PT_ENTRIES; i++) {
621 dword_out(entry + 0x1000 * i);
622 if (addr_bits > 32)
623 dword_out(0);
624 }
625 }
626
627 /**
628 * Break up large objects into multiple writes. Otherwise a 128kb VBO
629 * would overflow the 16 bits of size field in the packet header and
630 * everything goes badly after that.
631 */
632 static void
633 aub_write_trace_block(uint32_t type, void *virtual, uint32_t size, uint64_t gtt_offset)
634 {
635 uint32_t block_size;
636 uint32_t subtype = 0;
637 static const char null_block[8 * 4096];
638
639 for (uint32_t offset = 0; offset < size; offset += block_size) {
640 block_size = min(8 * 4096, size - offset);
641
642 if (use_execlists()) {
643 block_size = min(4096, block_size);
644 mem_trace_memory_write_header_out(ppgtt_lookup(gtt_offset + offset),
645 block_size,
646 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_PHYSICAL);
647 } else {
648 dword_out(CMD_AUB_TRACE_HEADER_BLOCK |
649 ((addr_bits > 32 ? 6 : 5) - 2));
650 dword_out(AUB_TRACE_MEMTYPE_GTT |
651 type | AUB_TRACE_OP_DATA_WRITE);
652 dword_out(subtype);
653 dword_out(gtt_offset + offset);
654 dword_out(align_u32(block_size, 4));
655 if (addr_bits > 32)
656 dword_out((gtt_offset + offset) >> 32);
657 }
658
659 if (virtual)
660 data_out(((char *) GET_PTR(virtual)) + offset, block_size);
661 else
662 data_out(null_block, block_size);
663
664 /* Pad to a multiple of 4 bytes. */
665 data_out(null_block, -block_size & 3);
666 }
667 }
668
669 static void
670 write_reloc(void *p, uint64_t v)
671 {
672 if (addr_bits > 32) {
673 /* From the Broadwell PRM Vol. 2a,
674 * MI_LOAD_REGISTER_MEM::MemoryAddress:
675 *
676 * "This field specifies the address of the memory
677 * location where the register value specified in the
678 * DWord above will read from. The address specifies
679 * the DWord location of the data. Range =
680 * GraphicsVirtualAddress[63:2] for a DWord register
681 * GraphicsAddress [63:48] are ignored by the HW and
682 * assumed to be in correct canonical form [63:48] ==
683 * [47]."
684 *
685 * In practice, this will always mean the top bits are zero
686 * because of the GTT size limitation of the aubdump tool.
687 */
688 const int shift = 63 - 47;
689 *(uint64_t *)p = (((int64_t)v) << shift) >> shift;
690 } else {
691 *(uint32_t *)p = v;
692 }
693 }
694
695 static void
696 aub_dump_execlist(uint64_t batch_offset, int ring_flag)
697 {
698 uint32_t ring_addr;
699 uint64_t descriptor;
700 uint32_t elsp_reg;
701 uint32_t elsq_reg;
702 uint32_t status_reg;
703 uint32_t control_reg;
704
705 switch (ring_flag) {
706 case I915_EXEC_DEFAULT:
707 case I915_EXEC_RENDER:
708 ring_addr = RENDER_RING_ADDR;
709 descriptor = RENDER_CONTEXT_DESCRIPTOR;
710 elsp_reg = EXECLIST_SUBMITPORT_RCSUNIT;
711 elsq_reg = EXECLIST_SQ_CONTENTS0_RCSUNIT;
712 status_reg = EXECLIST_STATUS_RCSUNIT;
713 control_reg = EXECLIST_CONTROL_RCSUNIT;
714 break;
715 case I915_EXEC_BSD:
716 ring_addr = VIDEO_RING_ADDR;
717 descriptor = VIDEO_CONTEXT_DESCRIPTOR;
718 elsp_reg = EXECLIST_SUBMITPORT_VCSUNIT0;
719 elsq_reg = EXECLIST_SQ_CONTENTS0_VCSUNIT0;
720 status_reg = EXECLIST_STATUS_VCSUNIT0;
721 control_reg = EXECLIST_CONTROL_VCSUNIT0;
722 break;
723 case I915_EXEC_BLT:
724 ring_addr = BLITTER_RING_ADDR;
725 descriptor = BLITTER_CONTEXT_DESCRIPTOR;
726 elsp_reg = EXECLIST_SUBMITPORT_BCSUNIT;
727 elsq_reg = EXECLIST_SQ_CONTENTS0_BCSUNIT;
728 status_reg = EXECLIST_STATUS_BCSUNIT;
729 control_reg = EXECLIST_CONTROL_BCSUNIT;
730 break;
731 }
732
733 mem_trace_memory_write_header_out(ring_addr, 16,
734 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
735 dword_out(AUB_MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965 | (3 - 2));
736 dword_out(batch_offset & 0xFFFFFFFF);
737 dword_out(batch_offset >> 32);
738 dword_out(0 /* MI_NOOP */);
739
740 mem_trace_memory_write_header_out(ring_addr + 8192 + 20, 4,
741 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
742 dword_out(0); /* RING_BUFFER_HEAD */
743 mem_trace_memory_write_header_out(ring_addr + 8192 + 28, 4,
744 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
745 dword_out(16); /* RING_BUFFER_TAIL */
746
747 if (devinfo.gen >= 11) {
748 register_write_out(elsq_reg, descriptor & 0xFFFFFFFF);
749 register_write_out(elsq_reg + sizeof(uint32_t), descriptor >> 32);
750 register_write_out(control_reg, 1);
751 } else {
752 register_write_out(elsp_reg, 0);
753 register_write_out(elsp_reg, 0);
754 register_write_out(elsp_reg, descriptor >> 32);
755 register_write_out(elsp_reg, descriptor & 0xFFFFFFFF);
756 }
757
758 dword_out(CMD_MEM_TRACE_REGISTER_POLL | (5 + 1 - 1));
759 dword_out(status_reg);
760 dword_out(AUB_MEM_TRACE_REGISTER_SIZE_DWORD |
761 AUB_MEM_TRACE_REGISTER_SPACE_MMIO);
762 if (devinfo.gen >= 11) {
763 dword_out(0x00000001); /* mask lo */
764 dword_out(0x00000000); /* mask hi */
765 dword_out(0x00000001);
766 } else {
767 dword_out(0x00000010); /* mask lo */
768 dword_out(0x00000000); /* mask hi */
769 dword_out(0x00000000);
770 }
771 }
772
773 static void
774 aub_dump_ringbuffer(uint64_t batch_offset, uint64_t offset, int ring_flag)
775 {
776 uint32_t ringbuffer[4096];
777 unsigned aub_mi_bbs_len;
778 int ring = AUB_TRACE_TYPE_RING_PRB0; /* The default ring */
779 int ring_count = 0;
780
781 if (ring_flag == I915_EXEC_BSD)
782 ring = AUB_TRACE_TYPE_RING_PRB1;
783 else if (ring_flag == I915_EXEC_BLT)
784 ring = AUB_TRACE_TYPE_RING_PRB2;
785
786 /* Make a ring buffer to execute our batchbuffer. */
787 memset(ringbuffer, 0, sizeof(ringbuffer));
788
789 aub_mi_bbs_len = addr_bits > 32 ? 3 : 2;
790 ringbuffer[ring_count] = AUB_MI_BATCH_BUFFER_START | (aub_mi_bbs_len - 2);
791 write_reloc(&ringbuffer[ring_count + 1], batch_offset);
792 ring_count += aub_mi_bbs_len;
793
794 /* Write out the ring. This appears to trigger execution of
795 * the ring in the simulator.
796 */
797 dword_out(CMD_AUB_TRACE_HEADER_BLOCK |
798 ((addr_bits > 32 ? 6 : 5) - 2));
799 dword_out(AUB_TRACE_MEMTYPE_GTT | ring | AUB_TRACE_OP_COMMAND_WRITE);
800 dword_out(0); /* general/surface subtype */
801 dword_out(offset);
802 dword_out(ring_count * 4);
803 if (addr_bits > 32)
804 dword_out(offset >> 32);
805
806 data_out(ringbuffer, ring_count * 4);
807 }
808
809 static void *
810 relocate_bo(struct bo *bo, const struct drm_i915_gem_execbuffer2 *execbuffer2,
811 const struct drm_i915_gem_exec_object2 *obj)
812 {
813 const struct drm_i915_gem_exec_object2 *exec_objects =
814 (struct drm_i915_gem_exec_object2 *) (uintptr_t) execbuffer2->buffers_ptr;
815 const struct drm_i915_gem_relocation_entry *relocs =
816 (const struct drm_i915_gem_relocation_entry *) (uintptr_t) obj->relocs_ptr;
817 void *relocated;
818 int handle;
819
820 relocated = malloc(bo->size);
821 fail_if(relocated == NULL, "intel_aubdump: out of memory\n");
822 memcpy(relocated, GET_PTR(bo->map), bo->size);
823 for (size_t i = 0; i < obj->relocation_count; i++) {
824 fail_if(relocs[i].offset >= bo->size, "intel_aubdump: reloc outside bo\n");
825
826 if (execbuffer2->flags & I915_EXEC_HANDLE_LUT)
827 handle = exec_objects[relocs[i].target_handle].handle;
828 else
829 handle = relocs[i].target_handle;
830
831 write_reloc(((char *)relocated) + relocs[i].offset,
832 get_bo(handle)->offset + relocs[i].delta);
833 }
834
835 return relocated;
836 }
837
838 static int
839 gem_ioctl(int fd, unsigned long request, void *argp)
840 {
841 int ret;
842
843 do {
844 ret = libc_ioctl(fd, request, argp);
845 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
846
847 return ret;
848 }
849
850 static void *
851 gem_mmap(int fd, uint32_t handle, uint64_t offset, uint64_t size)
852 {
853 struct drm_i915_gem_mmap mmap = {
854 .handle = handle,
855 .offset = offset,
856 .size = size
857 };
858
859 if (gem_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap) == -1)
860 return MAP_FAILED;
861
862 return (void *)(uintptr_t) mmap.addr_ptr;
863 }
864
865 static int
866 gem_get_param(int fd, uint32_t param)
867 {
868 int value;
869 drm_i915_getparam_t gp = {
870 .param = param,
871 .value = &value
872 };
873
874 if (gem_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp) == -1)
875 return 0;
876
877 return value;
878 }
879
880 static void
881 dump_execbuffer2(int fd, struct drm_i915_gem_execbuffer2 *execbuffer2)
882 {
883 struct drm_i915_gem_exec_object2 *exec_objects =
884 (struct drm_i915_gem_exec_object2 *) (uintptr_t) execbuffer2->buffers_ptr;
885 uint32_t ring_flag = execbuffer2->flags & I915_EXEC_RING_MASK;
886 uint32_t offset;
887 struct drm_i915_gem_exec_object2 *obj;
888 struct bo *bo, *batch_bo;
889 int batch_index;
890 void *data;
891
892 /* We can't do this at open time as we're not yet authenticated. */
893 if (device == 0) {
894 device = gem_get_param(fd, I915_PARAM_CHIPSET_ID);
895 fail_if(device == 0 || devinfo.gen == 0, "failed to identify chipset\n");
896 }
897 if (devinfo.gen == 0) {
898 fail_if(!gen_get_device_info(device, &devinfo),
899 "failed to identify chipset=0x%x\n", device);
900
901 addr_bits = devinfo.gen >= 8 ? 48 : 32;
902
903 if (use_execlists())
904 write_execlists_header();
905 else
906 write_legacy_header();
907
908 if (verbose)
909 printf("[intel_aubdump: running, "
910 "output file %s, chipset id 0x%04x, gen %d]\n",
911 filename, device, devinfo.gen);
912 }
913
914 if (use_execlists())
915 offset = 0x1000;
916 else
917 offset = gtt_size();
918
919 if (verbose)
920 printf("Dumping execbuffer2:\n");
921
922 for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
923 obj = &exec_objects[i];
924 bo = get_bo(obj->handle);
925
926 /* If bo->size == 0, this means they passed us an invalid
927 * buffer. The kernel will reject it and so should we.
928 */
929 if (bo->size == 0) {
930 if (verbose)
931 printf("BO #%d is invalid!\n", obj->handle);
932 return;
933 }
934
935 if (obj->flags & EXEC_OBJECT_PINNED) {
936 bo->offset = obj->offset;
937 if (verbose)
938 printf("BO #%d (%dB) pinned @ 0x%lx\n",
939 obj->handle, bo->size, bo->offset);
940 } else {
941 if (obj->alignment != 0)
942 offset = align_u32(offset, obj->alignment);
943 bo->offset = offset;
944 if (verbose)
945 printf("BO #%d (%dB) @ 0x%lx\n", obj->handle,
946 bo->size, bo->offset);
947 offset = align_u32(offset + bo->size + 4095, 4096);
948 }
949
950 if (bo->map == NULL && bo->size > 0)
951 bo->map = gem_mmap(fd, obj->handle, 0, bo->size);
952 fail_if(bo->map == MAP_FAILED, "intel_aubdump: bo mmap failed\n");
953
954 if (use_execlists())
955 map_ppgtt(bo->offset, bo->size);
956 }
957
958 batch_index = (execbuffer2->flags & I915_EXEC_BATCH_FIRST) ? 0 :
959 execbuffer2->buffer_count - 1;
960 batch_bo = get_bo(exec_objects[batch_index].handle);
961 for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
962 obj = &exec_objects[i];
963 bo = get_bo(obj->handle);
964
965 if (obj->relocation_count > 0)
966 data = relocate_bo(bo, execbuffer2, obj);
967 else
968 data = bo->map;
969
970 if (bo == batch_bo) {
971 aub_write_trace_block(AUB_TRACE_TYPE_BATCH,
972 data, bo->size, bo->offset);
973 } else {
974 aub_write_trace_block(AUB_TRACE_TYPE_NOTYPE,
975 data, bo->size, bo->offset);
976 }
977 if (data != bo->map)
978 free(data);
979 }
980
981 if (use_execlists()) {
982 aub_dump_execlist(batch_bo->offset +
983 execbuffer2->batch_start_offset, ring_flag);
984 } else {
985 /* Dump ring buffer */
986 aub_dump_ringbuffer(batch_bo->offset +
987 execbuffer2->batch_start_offset, offset,
988 ring_flag);
989 }
990
991 for (int i = 0; i < ARRAY_SIZE(files); i++) {
992 if (files[i] != NULL)
993 fflush(files[i]);
994 }
995
996 if (device_override &&
997 (execbuffer2->flags & I915_EXEC_FENCE_ARRAY) != 0) {
998 struct drm_i915_gem_exec_fence *fences =
999 (void*)(uintptr_t)execbuffer2->cliprects_ptr;
1000 for (uint32_t i = 0; i < execbuffer2->num_cliprects; i++) {
1001 if ((fences[i].flags & I915_EXEC_FENCE_SIGNAL) != 0) {
1002 struct drm_syncobj_array arg = {
1003 .handles = (uintptr_t)&fences[i].handle,
1004 .count_handles = 1,
1005 .pad = 0,
1006 };
1007 libc_ioctl(fd, DRM_IOCTL_SYNCOBJ_SIGNAL, &arg);
1008 }
1009 }
1010 }
1011 }
1012
1013 static void
1014 add_new_bo(int handle, uint64_t size, void *map)
1015 {
1016 struct bo *bo = &bos[handle];
1017
1018 fail_if(handle >= MAX_BO_COUNT, "intel_aubdump: bo handle out of range\n");
1019 fail_if(size == 0, "intel_aubdump: bo size is invalid\n");
1020
1021 bo->size = size;
1022 bo->map = map;
1023 }
1024
1025 static void
1026 remove_bo(int handle)
1027 {
1028 struct bo *bo = get_bo(handle);
1029
1030 if (bo->map && !IS_USERPTR(bo->map))
1031 munmap(bo->map, bo->size);
1032 bo->size = 0;
1033 bo->map = NULL;
1034 }
1035
1036 __attribute__ ((visibility ("default"))) int
1037 close(int fd)
1038 {
1039 if (fd == drm_fd)
1040 drm_fd = -1;
1041
1042 return libc_close(fd);
1043 }
1044
1045 static FILE *
1046 launch_command(char *command)
1047 {
1048 int i = 0, fds[2];
1049 char **args = calloc(strlen(command), sizeof(char *));
1050 char *iter = command;
1051
1052 args[i++] = iter = command;
1053
1054 while ((iter = strstr(iter, ",")) != NULL) {
1055 *iter = '\0';
1056 iter += 1;
1057 args[i++] = iter;
1058 }
1059
1060 if (pipe(fds) == -1)
1061 return NULL;
1062
1063 switch (fork()) {
1064 case 0:
1065 dup2(fds[0], 0);
1066 fail_if(execvp(args[0], args) == -1,
1067 "intel_aubdump: failed to launch child command\n");
1068 return NULL;
1069
1070 default:
1071 free(args);
1072 return fdopen(fds[1], "w");
1073
1074 case -1:
1075 return NULL;
1076 }
1077 }
1078
1079 static void
1080 maybe_init(void)
1081 {
1082 static bool initialized = false;
1083 FILE *config;
1084 char *key, *value;
1085
1086 if (initialized)
1087 return;
1088
1089 initialized = true;
1090
1091 config = fdopen(3, "r");
1092 while (fscanf(config, "%m[^=]=%m[^\n]\n", &key, &value) != EOF) {
1093 if (!strcmp(key, "verbose")) {
1094 if (!strcmp(value, "1")) {
1095 verbose = 1;
1096 } else if (!strcmp(value, "2")) {
1097 verbose = 2;
1098 }
1099 } else if (!strcmp(key, "device")) {
1100 fail_if(sscanf(value, "%i", &device) != 1,
1101 "intel_aubdump: failed to parse device id '%s'",
1102 value);
1103 device_override = true;
1104 } else if (!strcmp(key, "file")) {
1105 filename = strdup(value);
1106 files[0] = fopen(filename, "w+");
1107 fail_if(files[0] == NULL,
1108 "intel_aubdump: failed to open file '%s'\n",
1109 filename);
1110 } else if (!strcmp(key, "command")) {
1111 files[1] = launch_command(value);
1112 fail_if(files[1] == NULL,
1113 "intel_aubdump: failed to launch command '%s'\n",
1114 value);
1115 } else {
1116 fprintf(stderr, "intel_aubdump: unknown option '%s'\n", key);
1117 }
1118
1119 free(key);
1120 free(value);
1121 }
1122 fclose(config);
1123
1124 bos = calloc(MAX_BO_COUNT, sizeof(bos[0]));
1125 fail_if(bos == NULL, "intel_aubdump: out of memory\n");
1126 }
1127
1128 __attribute__ ((visibility ("default"))) int
1129 ioctl(int fd, unsigned long request, ...)
1130 {
1131 va_list args;
1132 void *argp;
1133 int ret;
1134 struct stat buf;
1135
1136 va_start(args, request);
1137 argp = va_arg(args, void *);
1138 va_end(args);
1139
1140 if (_IOC_TYPE(request) == DRM_IOCTL_BASE &&
1141 drm_fd != fd && fstat(fd, &buf) == 0 &&
1142 (buf.st_mode & S_IFMT) == S_IFCHR && major(buf.st_rdev) == DRM_MAJOR) {
1143 drm_fd = fd;
1144 if (verbose)
1145 printf("[intel_aubdump: intercept drm ioctl on fd %d]\n", fd);
1146 }
1147
1148 if (fd == drm_fd) {
1149 maybe_init();
1150
1151 switch (request) {
1152 case DRM_IOCTL_I915_GETPARAM: {
1153 struct drm_i915_getparam *getparam = argp;
1154
1155 if (device_override && getparam->param == I915_PARAM_CHIPSET_ID) {
1156 *getparam->value = device;
1157 return 0;
1158 }
1159
1160 ret = libc_ioctl(fd, request, argp);
1161
1162 /* If the application looks up chipset_id
1163 * (they typically do), we'll piggy-back on
1164 * their ioctl and store the id for later
1165 * use. */
1166 if (getparam->param == I915_PARAM_CHIPSET_ID)
1167 device = *getparam->value;
1168
1169 return ret;
1170 }
1171
1172 case DRM_IOCTL_I915_GEM_EXECBUFFER: {
1173 static bool once;
1174 if (!once) {
1175 fprintf(stderr, "intel_aubdump: "
1176 "application uses DRM_IOCTL_I915_GEM_EXECBUFFER, not handled\n");
1177 once = true;
1178 }
1179 return libc_ioctl(fd, request, argp);
1180 }
1181
1182 case DRM_IOCTL_I915_GEM_EXECBUFFER2:
1183 case DRM_IOCTL_I915_GEM_EXECBUFFER2_WR: {
1184 dump_execbuffer2(fd, argp);
1185 if (device_override)
1186 return 0;
1187
1188 return libc_ioctl(fd, request, argp);
1189 }
1190
1191 case DRM_IOCTL_I915_GEM_CREATE: {
1192 struct drm_i915_gem_create *create = argp;
1193
1194 ret = libc_ioctl(fd, request, argp);
1195 if (ret == 0)
1196 add_new_bo(create->handle, create->size, NULL);
1197
1198 return ret;
1199 }
1200
1201 case DRM_IOCTL_I915_GEM_USERPTR: {
1202 struct drm_i915_gem_userptr *userptr = argp;
1203
1204 ret = libc_ioctl(fd, request, argp);
1205 if (ret == 0)
1206 add_new_bo(userptr->handle, userptr->user_size,
1207 (void *) (uintptr_t) (userptr->user_ptr | USERPTR_FLAG));
1208 return ret;
1209 }
1210
1211 case DRM_IOCTL_GEM_CLOSE: {
1212 struct drm_gem_close *close = argp;
1213
1214 remove_bo(close->handle);
1215
1216 return libc_ioctl(fd, request, argp);
1217 }
1218
1219 case DRM_IOCTL_GEM_OPEN: {
1220 struct drm_gem_open *open = argp;
1221
1222 ret = libc_ioctl(fd, request, argp);
1223 if (ret == 0)
1224 add_new_bo(open->handle, open->size, NULL);
1225
1226 return ret;
1227 }
1228
1229 case DRM_IOCTL_PRIME_FD_TO_HANDLE: {
1230 struct drm_prime_handle *prime = argp;
1231
1232 ret = libc_ioctl(fd, request, argp);
1233 if (ret == 0) {
1234 off_t size;
1235
1236 size = lseek(prime->fd, 0, SEEK_END);
1237 fail_if(size == -1, "intel_aubdump: failed to get prime bo size\n");
1238 add_new_bo(prime->handle, size, NULL);
1239 }
1240
1241 return ret;
1242 }
1243
1244 default:
1245 return libc_ioctl(fd, request, argp);
1246 }
1247 } else {
1248 return libc_ioctl(fd, request, argp);
1249 }
1250 }
1251
1252 static void
1253 init(void)
1254 {
1255 libc_close = dlsym(RTLD_NEXT, "close");
1256 libc_ioctl = dlsym(RTLD_NEXT, "ioctl");
1257 fail_if(libc_close == NULL || libc_ioctl == NULL,
1258 "intel_aubdump: failed to get libc ioctl or close\n");
1259 }
1260
1261 static int
1262 close_init_helper(int fd)
1263 {
1264 init();
1265 return libc_close(fd);
1266 }
1267
1268 static int
1269 ioctl_init_helper(int fd, unsigned long request, ...)
1270 {
1271 va_list args;
1272 void *argp;
1273
1274 va_start(args, request);
1275 argp = va_arg(args, void *);
1276 va_end(args);
1277
1278 init();
1279 return libc_ioctl(fd, request, argp);
1280 }
1281
1282 static void __attribute__ ((destructor))
1283 fini(void)
1284 {
1285 free(filename);
1286 for (int i = 0; i < ARRAY_SIZE(files); i++) {
1287 if (files[i] != NULL)
1288 fclose(files[i]);
1289 }
1290 free(bos);
1291 }