intel/tools: Refactor aub dumping to remove singletons
[mesa.git] / src / intel / tools / intel_dump_gpu.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <stdlib.h>
25 #include <stdio.h>
26 #include <string.h>
27 #include <stdint.h>
28 #include <stdbool.h>
29 #include <signal.h>
30 #include <stdarg.h>
31 #include <fcntl.h>
32 #include <sys/types.h>
33 #include <sys/sysmacros.h>
34 #include <sys/stat.h>
35 #include <sys/ioctl.h>
36 #include <unistd.h>
37 #include <errno.h>
38 #include <sys/mman.h>
39 #include <dlfcn.h>
40 #include <i915_drm.h>
41 #include <inttypes.h>
42
43 #include "intel_aub.h"
44
45 #include "dev/gen_device_info.h"
46 #include "util/macros.h"
47
48 #ifndef ALIGN
49 #define ALIGN(x, y) (((x) + (y)-1) & ~((y)-1))
50 #endif
51
52 #define MI_LOAD_REGISTER_IMM_n(n) ((0x22 << 23) | (2 * (n) - 1))
53 #define MI_LRI_FORCE_POSTED (1<<12)
54
55 #define MI_BATCH_NON_SECURE_I965 (1 << 8)
56
57 #define MI_BATCH_BUFFER_END (0xA << 23)
58
59 #define min(a, b) ({ \
60 __typeof(a) _a = (a); \
61 __typeof(b) _b = (b); \
62 _a < _b ? _a : _b; \
63 })
64
65 #define max(a, b) ({ \
66 __typeof(a) _a = (a); \
67 __typeof(b) _b = (b); \
68 _a > _b ? _a : _b; \
69 })
70
71 #define HWS_PGA_RCSUNIT 0x02080
72 #define HWS_PGA_VCSUNIT0 0x12080
73 #define HWS_PGA_BCSUNIT 0x22080
74
75 #define GFX_MODE_RCSUNIT 0x0229c
76 #define GFX_MODE_VCSUNIT0 0x1229c
77 #define GFX_MODE_BCSUNIT 0x2229c
78
79 #define EXECLIST_SUBMITPORT_RCSUNIT 0x02230
80 #define EXECLIST_SUBMITPORT_VCSUNIT0 0x12230
81 #define EXECLIST_SUBMITPORT_BCSUNIT 0x22230
82
83 #define EXECLIST_STATUS_RCSUNIT 0x02234
84 #define EXECLIST_STATUS_VCSUNIT0 0x12234
85 #define EXECLIST_STATUS_BCSUNIT 0x22234
86
87 #define EXECLIST_SQ_CONTENTS0_RCSUNIT 0x02510
88 #define EXECLIST_SQ_CONTENTS0_VCSUNIT0 0x12510
89 #define EXECLIST_SQ_CONTENTS0_BCSUNIT 0x22510
90
91 #define EXECLIST_CONTROL_RCSUNIT 0x02550
92 #define EXECLIST_CONTROL_VCSUNIT0 0x12550
93 #define EXECLIST_CONTROL_BCSUNIT 0x22550
94
95 #define MEMORY_MAP_SIZE (64 /* MiB */ * 1024 * 1024)
96
97 #define PTE_SIZE 4
98 #define GEN8_PTE_SIZE 8
99
100 #define NUM_PT_ENTRIES (ALIGN(MEMORY_MAP_SIZE, 4096) / 4096)
101 #define PT_SIZE ALIGN(NUM_PT_ENTRIES * GEN8_PTE_SIZE, 4096)
102
103 #define RING_SIZE (1 * 4096)
104 #define PPHWSP_SIZE (1 * 4096)
105 #define GEN11_LR_CONTEXT_RENDER_SIZE (14 * 4096)
106 #define GEN10_LR_CONTEXT_RENDER_SIZE (19 * 4096)
107 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * 4096)
108 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * 4096)
109 #define GEN8_LR_CONTEXT_OTHER_SIZE (2 * 4096)
110
111
112 #define STATIC_GGTT_MAP_START 0
113
114 #define RENDER_RING_ADDR STATIC_GGTT_MAP_START
115 #define RENDER_CONTEXT_ADDR (RENDER_RING_ADDR + RING_SIZE)
116
117 #define BLITTER_RING_ADDR (RENDER_CONTEXT_ADDR + PPHWSP_SIZE + GEN10_LR_CONTEXT_RENDER_SIZE)
118 #define BLITTER_CONTEXT_ADDR (BLITTER_RING_ADDR + RING_SIZE)
119
120 #define VIDEO_RING_ADDR (BLITTER_CONTEXT_ADDR + PPHWSP_SIZE + GEN8_LR_CONTEXT_OTHER_SIZE)
121 #define VIDEO_CONTEXT_ADDR (VIDEO_RING_ADDR + RING_SIZE)
122
123 #define STATIC_GGTT_MAP_END (VIDEO_CONTEXT_ADDR + PPHWSP_SIZE + GEN8_LR_CONTEXT_OTHER_SIZE)
124 #define STATIC_GGTT_MAP_SIZE (STATIC_GGTT_MAP_END - STATIC_GGTT_MAP_START)
125
126 #define PML4_PHYS_ADDR ((uint64_t)(STATIC_GGTT_MAP_END))
127
128 #define CONTEXT_FLAGS (0x339) /* Normal Priority | L3-LLC Coherency |
129 * PPGTT Enabled |
130 * Legacy Context with 64 bit VA support |
131 * Valid
132 */
133
134 #define RENDER_CONTEXT_DESCRIPTOR ((uint64_t)1 << 62 | RENDER_CONTEXT_ADDR | CONTEXT_FLAGS)
135 #define BLITTER_CONTEXT_DESCRIPTOR ((uint64_t)2 << 62 | BLITTER_CONTEXT_ADDR | CONTEXT_FLAGS)
136 #define VIDEO_CONTEXT_DESCRIPTOR ((uint64_t)3 << 62 | VIDEO_CONTEXT_ADDR | CONTEXT_FLAGS)
137
138 static const uint32_t render_context_init[GEN9_LR_CONTEXT_RENDER_SIZE / /* Choose the largest */
139 sizeof(uint32_t)] = {
140 0 /* MI_NOOP */,
141 MI_LOAD_REGISTER_IMM_n(14) | MI_LRI_FORCE_POSTED,
142 0x2244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
143 0x2034 /* RING_HEAD */, 0,
144 0x2030 /* RING_TAIL */, 0,
145 0x2038 /* RING_BUFFER_START */, RENDER_RING_ADDR,
146 0x203C /* RING_BUFFER_CONTROL */, (RING_SIZE - 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
147 0x2168 /* BB_HEAD_U */, 0,
148 0x2140 /* BB_HEAD_L */, 0,
149 0x2110 /* BB_STATE */, 0,
150 0x211C /* SECOND_BB_HEAD_U */, 0,
151 0x2114 /* SECOND_BB_HEAD_L */, 0,
152 0x2118 /* SECOND_BB_STATE */, 0,
153 0x21C0 /* BB_PER_CTX_PTR */, 0,
154 0x21C4 /* RCS_INDIRECT_CTX */, 0,
155 0x21C8 /* RCS_INDIRECT_CTX_OFFSET */, 0,
156 /* MI_NOOP */
157 0, 0,
158
159 0 /* MI_NOOP */,
160 MI_LOAD_REGISTER_IMM_n(9) | MI_LRI_FORCE_POSTED,
161 0x23A8 /* CTX_TIMESTAMP */, 0,
162 0x228C /* PDP3_UDW */, 0,
163 0x2288 /* PDP3_LDW */, 0,
164 0x2284 /* PDP2_UDW */, 0,
165 0x2280 /* PDP2_LDW */, 0,
166 0x227C /* PDP1_UDW */, 0,
167 0x2278 /* PDP1_LDW */, 0,
168 0x2274 /* PDP0_UDW */, PML4_PHYS_ADDR >> 32,
169 0x2270 /* PDP0_LDW */, PML4_PHYS_ADDR,
170 /* MI_NOOP */
171 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
172
173 0 /* MI_NOOP */,
174 MI_LOAD_REGISTER_IMM_n(1),
175 0x20C8 /* R_PWR_CLK_STATE */, 0x7FFFFFFF,
176 MI_BATCH_BUFFER_END
177 };
178
179 static const uint32_t blitter_context_init[GEN8_LR_CONTEXT_OTHER_SIZE /
180 sizeof(uint32_t)] = {
181 0 /* MI_NOOP */,
182 MI_LOAD_REGISTER_IMM_n(11) | MI_LRI_FORCE_POSTED,
183 0x22244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
184 0x22034 /* RING_HEAD */, 0,
185 0x22030 /* RING_TAIL */, 0,
186 0x22038 /* RING_BUFFER_START */, BLITTER_RING_ADDR,
187 0x2203C /* RING_BUFFER_CONTROL */, (RING_SIZE - 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
188 0x22168 /* BB_HEAD_U */, 0,
189 0x22140 /* BB_HEAD_L */, 0,
190 0x22110 /* BB_STATE */, 0,
191 0x2211C /* SECOND_BB_HEAD_U */, 0,
192 0x22114 /* SECOND_BB_HEAD_L */, 0,
193 0x22118 /* SECOND_BB_STATE */, 0,
194 /* MI_NOOP */
195 0, 0, 0, 0, 0, 0, 0, 0,
196
197 0 /* MI_NOOP */,
198 MI_LOAD_REGISTER_IMM_n(9) | MI_LRI_FORCE_POSTED,
199 0x223A8 /* CTX_TIMESTAMP */, 0,
200 0x2228C /* PDP3_UDW */, 0,
201 0x22288 /* PDP3_LDW */, 0,
202 0x22284 /* PDP2_UDW */, 0,
203 0x22280 /* PDP2_LDW */, 0,
204 0x2227C /* PDP1_UDW */, 0,
205 0x22278 /* PDP1_LDW */, 0,
206 0x22274 /* PDP0_UDW */, PML4_PHYS_ADDR >> 32,
207 0x22270 /* PDP0_LDW */, PML4_PHYS_ADDR,
208 /* MI_NOOP */
209 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
210
211 MI_BATCH_BUFFER_END
212 };
213
214 static const uint32_t video_context_init[GEN8_LR_CONTEXT_OTHER_SIZE /
215 sizeof(uint32_t)] = {
216 0 /* MI_NOOP */,
217 MI_LOAD_REGISTER_IMM_n(11) | MI_LRI_FORCE_POSTED,
218 0x1C244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
219 0x1C034 /* RING_HEAD */, 0,
220 0x1C030 /* RING_TAIL */, 0,
221 0x1C038 /* RING_BUFFER_START */, VIDEO_RING_ADDR,
222 0x1C03C /* RING_BUFFER_CONTROL */, (RING_SIZE - 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
223 0x1C168 /* BB_HEAD_U */, 0,
224 0x1C140 /* BB_HEAD_L */, 0,
225 0x1C110 /* BB_STATE */, 0,
226 0x1C11C /* SECOND_BB_HEAD_U */, 0,
227 0x1C114 /* SECOND_BB_HEAD_L */, 0,
228 0x1C118 /* SECOND_BB_STATE */, 0,
229 /* MI_NOOP */
230 0, 0, 0, 0, 0, 0, 0, 0,
231
232 0 /* MI_NOOP */,
233 MI_LOAD_REGISTER_IMM_n(9) | MI_LRI_FORCE_POSTED,
234 0x1C3A8 /* CTX_TIMESTAMP */, 0,
235 0x1C28C /* PDP3_UDW */, 0,
236 0x1C288 /* PDP3_LDW */, 0,
237 0x1C284 /* PDP2_UDW */, 0,
238 0x1C280 /* PDP2_LDW */, 0,
239 0x1C27C /* PDP1_UDW */, 0,
240 0x1C278 /* PDP1_LDW */, 0,
241 0x1C274 /* PDP0_UDW */, PML4_PHYS_ADDR >> 32,
242 0x1C270 /* PDP0_LDW */, PML4_PHYS_ADDR,
243 /* MI_NOOP */
244 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
245
246 MI_BATCH_BUFFER_END
247 };
248
249 static int close_init_helper(int fd);
250 static int ioctl_init_helper(int fd, unsigned long request, ...);
251
252 static int (*libc_close)(int fd) = close_init_helper;
253 static int (*libc_ioctl)(int fd, unsigned long request, ...) = ioctl_init_helper;
254
255 static int drm_fd = -1;
256 static char *filename = NULL;
257 static FILE *files[2] = { NULL, NULL };
258 static int verbose = 0;
259 static bool device_override;
260
261 #define MAX_BO_COUNT 64 * 1024
262
263 struct bo {
264 uint32_t size;
265 uint64_t offset;
266 void *map;
267 };
268
269 static struct bo *bos;
270
271 #define DRM_MAJOR 226
272
273 /* We set bit 0 in the map pointer for userptr BOs so we know not to
274 * munmap them on DRM_IOCTL_GEM_CLOSE.
275 */
276 #define USERPTR_FLAG 1
277 #define IS_USERPTR(p) ((uintptr_t) (p) & USERPTR_FLAG)
278 #define GET_PTR(p) ( (void *) ((uintptr_t) p & ~(uintptr_t) 1) )
279
280 static void __attribute__ ((format(__printf__, 2, 3)))
281 fail_if(int cond, const char *format, ...)
282 {
283 va_list args;
284
285 if (!cond)
286 return;
287
288 va_start(args, format);
289 vfprintf(stderr, format, args);
290 va_end(args);
291
292 raise(SIGTRAP);
293 }
294
295 static struct bo *
296 get_bo(uint32_t handle)
297 {
298 struct bo *bo;
299
300 fail_if(handle >= MAX_BO_COUNT, "bo handle too large\n");
301 bo = &bos[handle];
302
303 return bo;
304 }
305
306 static inline uint32_t
307 align_u32(uint32_t v, uint32_t a)
308 {
309 return (v + a - 1) & ~(a - 1);
310 }
311
312 struct aub_ppgtt_table {
313 uint64_t phys_addr;
314 struct aub_ppgtt_table *subtables[512];
315 };
316
317 static void
318 aub_ppgtt_table_finish(struct aub_ppgtt_table *table)
319 {
320 for (unsigned i = 0; i < ARRAY_SIZE(table->subtables); i++) {
321 aub_ppgtt_table_finish(table->subtables[i]);
322 free(table->subtables[i]);
323 }
324 }
325
326 struct aub_file {
327 FILE *file;
328
329 /* Set if you want extra logging */
330 FILE *verbose_log_file;
331
332 uint16_t pci_id;
333 struct gen_device_info devinfo;
334
335 int addr_bits;
336
337 struct aub_ppgtt_table pml4;
338 };
339
340 static void
341 aub_file_init(struct aub_file *aub, FILE *file, uint16_t pci_id)
342 {
343 memset(aub, 0, sizeof(*aub));
344
345 aub->file = file;
346 aub->pci_id = pci_id;
347 fail_if(!gen_get_device_info(pci_id, &aub->devinfo),
348 "failed to identify chipset=0x%x\n", pci_id);
349 aub->addr_bits = aub->devinfo.gen >= 8 ? 48 : 32;
350
351 aub->pml4.phys_addr = PML4_PHYS_ADDR;
352 }
353
354 static void
355 aub_file_finish(struct aub_file *aub)
356 {
357 aub_ppgtt_table_finish(&aub->pml4);
358 fclose(aub->file);
359 }
360
361 static inline bool aub_use_execlists(const struct aub_file *aub)
362 {
363 return aub->devinfo.gen >= 8;
364 }
365
366 static void
367 data_out(struct aub_file *aub, const void *data, size_t size)
368 {
369 if (size == 0)
370 return;
371
372 fail_if(fwrite(data, 1, size, aub->file) == 0,
373 "Writing to output failed\n");
374 }
375
376 static void
377 dword_out(struct aub_file *aub, uint32_t data)
378 {
379 data_out(aub, &data, sizeof(data));
380 }
381
382 static uint32_t
383 aub_gtt_size(struct aub_file *aub)
384 {
385 return NUM_PT_ENTRIES * (aub->addr_bits > 32 ? GEN8_PTE_SIZE : PTE_SIZE);
386 }
387
388 static void
389 mem_trace_memory_write_header_out(struct aub_file *aub, uint64_t addr,
390 uint32_t len, uint32_t addr_space)
391 {
392 uint32_t dwords = ALIGN(len, sizeof(uint32_t)) / sizeof(uint32_t);
393
394 dword_out(aub, CMD_MEM_TRACE_MEMORY_WRITE | (5 + dwords - 1));
395 dword_out(aub, addr & 0xFFFFFFFF); /* addr lo */
396 dword_out(aub, addr >> 32); /* addr hi */
397 dword_out(aub, addr_space); /* gtt */
398 dword_out(aub, len);
399 }
400
401 static void
402 register_write_out(struct aub_file *aub, uint32_t addr, uint32_t value)
403 {
404 uint32_t dwords = 1;
405
406 dword_out(aub, CMD_MEM_TRACE_REGISTER_WRITE | (5 + dwords - 1));
407 dword_out(aub, addr);
408 dword_out(aub, AUB_MEM_TRACE_REGISTER_SIZE_DWORD |
409 AUB_MEM_TRACE_REGISTER_SPACE_MMIO);
410 dword_out(aub, 0xFFFFFFFF); /* mask lo */
411 dword_out(aub, 0x00000000); /* mask hi */
412 dword_out(aub, value);
413 }
414
415 static void
416 populate_ppgtt_table(struct aub_file *aub, struct aub_ppgtt_table *table,
417 int start, int end, int level)
418 {
419 static uint64_t phys_addrs_allocator = (PML4_PHYS_ADDR >> 12) + 1;
420 uint64_t entries[512] = {0};
421 int dirty_start = 512, dirty_end = 0;
422
423 if (aub->verbose_log_file) {
424 fprintf(aub->verbose_log_file,
425 " PPGTT (0x%016" PRIx64 "), lvl %d, start: %x, end: %x\n",
426 table->phys_addr, level, start, end);
427 }
428
429 for (int i = start; i <= end; i++) {
430 if (!table->subtables[i]) {
431 dirty_start = min(dirty_start, i);
432 dirty_end = max(dirty_end, i);
433 if (level == 1) {
434 table->subtables[i] =
435 (void *)(phys_addrs_allocator++ << 12);
436 if (aub->verbose_log_file) {
437 fprintf(aub->verbose_log_file,
438 " Adding entry: %x, phys_addr: 0x%016" PRIx64 "\n",
439 i, (uint64_t)table->subtables[i]);
440 }
441 } else {
442 table->subtables[i] =
443 calloc(1, sizeof(struct aub_ppgtt_table));
444 table->subtables[i]->phys_addr =
445 phys_addrs_allocator++ << 12;
446 if (aub->verbose_log_file) {
447 fprintf(aub->verbose_log_file,
448 " Adding entry: %x, phys_addr: 0x%016" PRIx64 "\n",
449 i, table->subtables[i]->phys_addr);
450 }
451 }
452 }
453 entries[i] = 3 /* read/write | present */ |
454 (level == 1 ? (uint64_t)table->subtables[i] :
455 table->subtables[i]->phys_addr);
456 }
457
458 if (dirty_start <= dirty_end) {
459 uint64_t write_addr = table->phys_addr + dirty_start *
460 sizeof(uint64_t);
461 uint64_t write_size = (dirty_end - dirty_start + 1) *
462 sizeof(uint64_t);
463 mem_trace_memory_write_header_out(aub, write_addr, write_size,
464 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_PHYSICAL);
465 data_out(aub, entries + dirty_start, write_size);
466 }
467 }
468
469 static void
470 aub_map_ppgtt(struct aub_file *aub, uint64_t start, uint64_t size)
471 {
472 uint64_t l4_start = start & 0xff8000000000;
473 uint64_t l4_end = ((start + size - 1) | 0x007fffffffff) & 0xffffffffffff;
474
475 #define L4_index(addr) (((addr) >> 39) & 0x1ff)
476 #define L3_index(addr) (((addr) >> 30) & 0x1ff)
477 #define L2_index(addr) (((addr) >> 21) & 0x1ff)
478 #define L1_index(addr) (((addr) >> 12) & 0x1ff)
479
480 #define L3_table(addr) (aub->pml4.subtables[L4_index(addr)])
481 #define L2_table(addr) (L3_table(addr)->subtables[L3_index(addr)])
482 #define L1_table(addr) (L2_table(addr)->subtables[L2_index(addr)])
483
484 if (aub->verbose_log_file) {
485 fprintf(aub->verbose_log_file,
486 " Mapping PPGTT address: 0x%" PRIx64 ", size: %" PRIu64"\n",
487 start, size);
488 }
489
490 populate_ppgtt_table(aub, &aub->pml4, L4_index(l4_start), L4_index(l4_end), 4);
491
492 for (uint64_t l4 = l4_start; l4 < l4_end; l4 += (1ULL << 39)) {
493 uint64_t l3_start = max(l4, start & 0xffffc0000000);
494 uint64_t l3_end = min(l4 + (1ULL << 39) - 1,
495 ((start + size - 1) | 0x00003fffffff) & 0xffffffffffff);
496 uint64_t l3_start_idx = L3_index(l3_start);
497 uint64_t l3_end_idx = L3_index(l3_end);
498
499 populate_ppgtt_table(aub, L3_table(l4), l3_start_idx, l3_end_idx, 3);
500
501 for (uint64_t l3 = l3_start; l3 < l3_end; l3 += (1ULL << 30)) {
502 uint64_t l2_start = max(l3, start & 0xffffffe00000);
503 uint64_t l2_end = min(l3 + (1ULL << 30) - 1,
504 ((start + size - 1) | 0x0000001fffff) & 0xffffffffffff);
505 uint64_t l2_start_idx = L2_index(l2_start);
506 uint64_t l2_end_idx = L2_index(l2_end);
507
508 populate_ppgtt_table(aub, L2_table(l3), l2_start_idx, l2_end_idx, 2);
509
510 for (uint64_t l2 = l2_start; l2 < l2_end; l2 += (1ULL << 21)) {
511 uint64_t l1_start = max(l2, start & 0xfffffffff000);
512 uint64_t l1_end = min(l2 + (1ULL << 21) - 1,
513 ((start + size - 1) | 0x000000000fff) & 0xffffffffffff);
514 uint64_t l1_start_idx = L1_index(l1_start);
515 uint64_t l1_end_idx = L1_index(l1_end);
516
517 populate_ppgtt_table(aub, L1_table(l2), l1_start_idx, l1_end_idx, 1);
518 }
519 }
520 }
521 }
522
523 static uint64_t
524 ppgtt_lookup(struct aub_file *aub, uint64_t ppgtt_addr)
525 {
526 return (uint64_t)L1_table(ppgtt_addr)->subtables[L1_index(ppgtt_addr)];
527 }
528
529 static void
530 write_execlists_header(struct aub_file *aub, const char *name)
531 {
532 char app_name[8 * 4];
533 int app_name_len, dwords;
534
535 app_name_len =
536 snprintf(app_name, sizeof(app_name), "PCI-ID=0x%X %s",
537 aub->pci_id, name);
538 app_name_len = ALIGN(app_name_len, sizeof(uint32_t));
539
540 dwords = 5 + app_name_len / sizeof(uint32_t);
541 dword_out(aub, CMD_MEM_TRACE_VERSION | (dwords - 1));
542 dword_out(aub, AUB_MEM_TRACE_VERSION_FILE_VERSION);
543 dword_out(aub, aub->devinfo.simulator_id << AUB_MEM_TRACE_VERSION_DEVICE_SHIFT);
544 dword_out(aub, 0); /* version */
545 dword_out(aub, 0); /* version */
546 data_out(aub, app_name, app_name_len);
547
548 /* GGTT PT */
549 uint32_t ggtt_ptes = STATIC_GGTT_MAP_SIZE >> 12;
550
551 mem_trace_memory_write_header_out(aub, STATIC_GGTT_MAP_START >> 12,
552 ggtt_ptes * GEN8_PTE_SIZE,
553 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT_ENTRY);
554 for (uint32_t i = 0; i < ggtt_ptes; i++) {
555 dword_out(aub, 1 + 0x1000 * i + STATIC_GGTT_MAP_START);
556 dword_out(aub, 0);
557 }
558
559 /* RENDER_RING */
560 mem_trace_memory_write_header_out(aub, RENDER_RING_ADDR, RING_SIZE,
561 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
562 for (uint32_t i = 0; i < RING_SIZE; i += sizeof(uint32_t))
563 dword_out(aub, 0);
564
565 /* RENDER_PPHWSP */
566 mem_trace_memory_write_header_out(aub, RENDER_CONTEXT_ADDR,
567 PPHWSP_SIZE +
568 sizeof(render_context_init),
569 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
570 for (uint32_t i = 0; i < PPHWSP_SIZE; i += sizeof(uint32_t))
571 dword_out(aub, 0);
572
573 /* RENDER_CONTEXT */
574 data_out(aub, render_context_init, sizeof(render_context_init));
575
576 /* BLITTER_RING */
577 mem_trace_memory_write_header_out(aub, BLITTER_RING_ADDR, RING_SIZE,
578 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
579 for (uint32_t i = 0; i < RING_SIZE; i += sizeof(uint32_t))
580 dword_out(aub, 0);
581
582 /* BLITTER_PPHWSP */
583 mem_trace_memory_write_header_out(aub, BLITTER_CONTEXT_ADDR,
584 PPHWSP_SIZE +
585 sizeof(blitter_context_init),
586 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
587 for (uint32_t i = 0; i < PPHWSP_SIZE; i += sizeof(uint32_t))
588 dword_out(aub, 0);
589
590 /* BLITTER_CONTEXT */
591 data_out(aub, blitter_context_init, sizeof(blitter_context_init));
592
593 /* VIDEO_RING */
594 mem_trace_memory_write_header_out(aub, VIDEO_RING_ADDR, RING_SIZE,
595 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
596 for (uint32_t i = 0; i < RING_SIZE; i += sizeof(uint32_t))
597 dword_out(aub, 0);
598
599 /* VIDEO_PPHWSP */
600 mem_trace_memory_write_header_out(aub, VIDEO_CONTEXT_ADDR,
601 PPHWSP_SIZE +
602 sizeof(video_context_init),
603 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
604 for (uint32_t i = 0; i < PPHWSP_SIZE; i += sizeof(uint32_t))
605 dword_out(aub, 0);
606
607 /* VIDEO_CONTEXT */
608 data_out(aub, video_context_init, sizeof(video_context_init));
609
610 register_write_out(aub, HWS_PGA_RCSUNIT, RENDER_CONTEXT_ADDR);
611 register_write_out(aub, HWS_PGA_VCSUNIT0, VIDEO_CONTEXT_ADDR);
612 register_write_out(aub, HWS_PGA_BCSUNIT, BLITTER_CONTEXT_ADDR);
613
614 register_write_out(aub, GFX_MODE_RCSUNIT, 0x80008000 /* execlist enable */);
615 register_write_out(aub, GFX_MODE_VCSUNIT0, 0x80008000 /* execlist enable */);
616 register_write_out(aub, GFX_MODE_BCSUNIT, 0x80008000 /* execlist enable */);
617 }
618
619 static void write_legacy_header(struct aub_file *aub, const char *name)
620 {
621 char app_name[8 * 4];
622 char comment[16];
623 int comment_len, comment_dwords, dwords;
624 uint32_t entry = 0x200003;
625
626 comment_len = snprintf(comment, sizeof(comment), "PCI-ID=0x%x", aub->pci_id);
627 comment_dwords = ((comment_len + 3) / 4);
628
629 /* Start with a (required) version packet. */
630 dwords = 13 + comment_dwords;
631 dword_out(aub, CMD_AUB_HEADER | (dwords - 2));
632 dword_out(aub, (4 << AUB_HEADER_MAJOR_SHIFT) |
633 (0 << AUB_HEADER_MINOR_SHIFT));
634
635 /* Next comes a 32-byte application name. */
636 strncpy(app_name, program_invocation_short_name, sizeof(app_name));
637 app_name[sizeof(app_name) - 1] = 0;
638 data_out(aub, app_name, sizeof(app_name));
639
640 dword_out(aub, 0); /* timestamp */
641 dword_out(aub, 0); /* timestamp */
642 dword_out(aub, comment_len);
643 data_out(aub, comment, comment_dwords * 4);
644
645 /* Set up the GTT. The max we can handle is 64M */
646 dword_out(aub, CMD_AUB_TRACE_HEADER_BLOCK |
647 ((aub->addr_bits > 32 ? 6 : 5) - 2));
648 dword_out(aub, AUB_TRACE_MEMTYPE_GTT_ENTRY |
649 AUB_TRACE_TYPE_NOTYPE | AUB_TRACE_OP_DATA_WRITE);
650 dword_out(aub, 0); /* subtype */
651 dword_out(aub, 0); /* offset */
652 dword_out(aub, aub_gtt_size(aub)); /* size */
653 if (aub->addr_bits > 32)
654 dword_out(aub, 0);
655 for (uint32_t i = 0; i < NUM_PT_ENTRIES; i++) {
656 dword_out(aub, entry + 0x1000 * i);
657 if (aub->addr_bits > 32)
658 dword_out(aub, 0);
659 }
660 }
661
662 static void
663 aub_write_header(struct aub_file *aub, const char *app_name)
664 {
665 if (aub_use_execlists(aub))
666 write_execlists_header(aub, app_name);
667 else
668 write_legacy_header(aub, app_name);
669 }
670
671 /**
672 * Break up large objects into multiple writes. Otherwise a 128kb VBO
673 * would overflow the 16 bits of size field in the packet header and
674 * everything goes badly after that.
675 */
676 static void
677 aub_write_trace_block(struct aub_file *aub,
678 uint32_t type, void *virtual,
679 uint32_t size, uint64_t gtt_offset)
680 {
681 uint32_t block_size;
682 uint32_t subtype = 0;
683 static const char null_block[8 * 4096];
684
685 for (uint32_t offset = 0; offset < size; offset += block_size) {
686 block_size = min(8 * 4096, size - offset);
687
688 if (aub_use_execlists(aub)) {
689 block_size = min(4096, block_size);
690 mem_trace_memory_write_header_out(aub,
691 ppgtt_lookup(aub, gtt_offset + offset),
692 block_size,
693 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_PHYSICAL);
694 } else {
695 dword_out(aub, CMD_AUB_TRACE_HEADER_BLOCK |
696 ((aub->addr_bits > 32 ? 6 : 5) - 2));
697 dword_out(aub, AUB_TRACE_MEMTYPE_GTT |
698 type | AUB_TRACE_OP_DATA_WRITE);
699 dword_out(aub, subtype);
700 dword_out(aub, gtt_offset + offset);
701 dword_out(aub, align_u32(block_size, 4));
702 if (aub->addr_bits > 32)
703 dword_out(aub, (gtt_offset + offset) >> 32);
704 }
705
706 if (virtual)
707 data_out(aub, ((char *) virtual) + offset, block_size);
708 else
709 data_out(aub, null_block, block_size);
710
711 /* Pad to a multiple of 4 bytes. */
712 data_out(aub, null_block, -block_size & 3);
713 }
714 }
715
716 static void
717 aub_write_reloc(const struct gen_device_info *devinfo, void *p, uint64_t v)
718 {
719 if (devinfo->gen >= 8) {
720 /* From the Broadwell PRM Vol. 2a,
721 * MI_LOAD_REGISTER_MEM::MemoryAddress:
722 *
723 * "This field specifies the address of the memory
724 * location where the register value specified in the
725 * DWord above will read from. The address specifies
726 * the DWord location of the data. Range =
727 * GraphicsVirtualAddress[63:2] for a DWord register
728 * GraphicsAddress [63:48] are ignored by the HW and
729 * assumed to be in correct canonical form [63:48] ==
730 * [47]."
731 *
732 * In practice, this will always mean the top bits are zero
733 * because of the GTT size limitation of the aubdump tool.
734 */
735 const int shift = 63 - 47;
736 *(uint64_t *)p = (((int64_t)v) << shift) >> shift;
737 } else {
738 *(uint32_t *)p = v;
739 }
740 }
741
742 static void
743 aub_dump_execlist(struct aub_file *aub, uint64_t batch_offset, int ring_flag)
744 {
745 uint32_t ring_addr;
746 uint64_t descriptor;
747 uint32_t elsp_reg;
748 uint32_t elsq_reg;
749 uint32_t status_reg;
750 uint32_t control_reg;
751
752 switch (ring_flag) {
753 case I915_EXEC_DEFAULT:
754 case I915_EXEC_RENDER:
755 ring_addr = RENDER_RING_ADDR;
756 descriptor = RENDER_CONTEXT_DESCRIPTOR;
757 elsp_reg = EXECLIST_SUBMITPORT_RCSUNIT;
758 elsq_reg = EXECLIST_SQ_CONTENTS0_RCSUNIT;
759 status_reg = EXECLIST_STATUS_RCSUNIT;
760 control_reg = EXECLIST_CONTROL_RCSUNIT;
761 break;
762 case I915_EXEC_BSD:
763 ring_addr = VIDEO_RING_ADDR;
764 descriptor = VIDEO_CONTEXT_DESCRIPTOR;
765 elsp_reg = EXECLIST_SUBMITPORT_VCSUNIT0;
766 elsq_reg = EXECLIST_SQ_CONTENTS0_VCSUNIT0;
767 status_reg = EXECLIST_STATUS_VCSUNIT0;
768 control_reg = EXECLIST_CONTROL_VCSUNIT0;
769 break;
770 case I915_EXEC_BLT:
771 ring_addr = BLITTER_RING_ADDR;
772 descriptor = BLITTER_CONTEXT_DESCRIPTOR;
773 elsp_reg = EXECLIST_SUBMITPORT_BCSUNIT;
774 elsq_reg = EXECLIST_SQ_CONTENTS0_BCSUNIT;
775 status_reg = EXECLIST_STATUS_BCSUNIT;
776 control_reg = EXECLIST_CONTROL_BCSUNIT;
777 break;
778 default:
779 unreachable("unknown ring");
780 }
781
782 mem_trace_memory_write_header_out(aub, ring_addr, 16,
783 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
784 dword_out(aub, AUB_MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965 | (3 - 2));
785 dword_out(aub, batch_offset & 0xFFFFFFFF);
786 dword_out(aub, batch_offset >> 32);
787 dword_out(aub, 0 /* MI_NOOP */);
788
789 mem_trace_memory_write_header_out(aub, ring_addr + 8192 + 20, 4,
790 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
791 dword_out(aub, 0); /* RING_BUFFER_HEAD */
792 mem_trace_memory_write_header_out(aub, ring_addr + 8192 + 28, 4,
793 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT);
794 dword_out(aub, 16); /* RING_BUFFER_TAIL */
795
796 if (aub->devinfo.gen >= 11) {
797 register_write_out(aub, elsq_reg, descriptor & 0xFFFFFFFF);
798 register_write_out(aub, elsq_reg + sizeof(uint32_t), descriptor >> 32);
799 register_write_out(aub, control_reg, 1);
800 } else {
801 register_write_out(aub, elsp_reg, 0);
802 register_write_out(aub, elsp_reg, 0);
803 register_write_out(aub, elsp_reg, descriptor >> 32);
804 register_write_out(aub, elsp_reg, descriptor & 0xFFFFFFFF);
805 }
806
807 dword_out(aub, CMD_MEM_TRACE_REGISTER_POLL | (5 + 1 - 1));
808 dword_out(aub, status_reg);
809 dword_out(aub, AUB_MEM_TRACE_REGISTER_SIZE_DWORD |
810 AUB_MEM_TRACE_REGISTER_SPACE_MMIO);
811 if (aub->devinfo.gen >= 11) {
812 dword_out(aub, 0x00000001); /* mask lo */
813 dword_out(aub, 0x00000000); /* mask hi */
814 dword_out(aub, 0x00000001);
815 } else {
816 dword_out(aub, 0x00000010); /* mask lo */
817 dword_out(aub, 0x00000000); /* mask hi */
818 dword_out(aub, 0x00000000);
819 }
820 }
821
822 static void
823 aub_dump_ringbuffer(struct aub_file *aub, uint64_t batch_offset,
824 uint64_t offset, int ring_flag)
825 {
826 uint32_t ringbuffer[4096];
827 unsigned aub_mi_bbs_len;
828 int ring = AUB_TRACE_TYPE_RING_PRB0; /* The default ring */
829 int ring_count = 0;
830
831 if (ring_flag == I915_EXEC_BSD)
832 ring = AUB_TRACE_TYPE_RING_PRB1;
833 else if (ring_flag == I915_EXEC_BLT)
834 ring = AUB_TRACE_TYPE_RING_PRB2;
835
836 /* Make a ring buffer to execute our batchbuffer. */
837 memset(ringbuffer, 0, sizeof(ringbuffer));
838
839 aub_mi_bbs_len = aub->addr_bits > 32 ? 3 : 2;
840 ringbuffer[ring_count] = AUB_MI_BATCH_BUFFER_START | (aub_mi_bbs_len - 2);
841 aub_write_reloc(&aub->devinfo, &ringbuffer[ring_count + 1], batch_offset);
842 ring_count += aub_mi_bbs_len;
843
844 /* Write out the ring. This appears to trigger execution of
845 * the ring in the simulator.
846 */
847 dword_out(aub, CMD_AUB_TRACE_HEADER_BLOCK |
848 ((aub->addr_bits > 32 ? 6 : 5) - 2));
849 dword_out(aub, AUB_TRACE_MEMTYPE_GTT | ring | AUB_TRACE_OP_COMMAND_WRITE);
850 dword_out(aub, 0); /* general/surface subtype */
851 dword_out(aub, offset);
852 dword_out(aub, ring_count * 4);
853 if (aub->addr_bits > 32)
854 dword_out(aub, offset >> 32);
855
856 data_out(aub, ringbuffer, ring_count * 4);
857 }
858
859 static void
860 aub_write_exec(struct aub_file *aub, uint64_t batch_addr,
861 uint64_t offset, int ring_flag)
862 {
863 if (aub_use_execlists(aub)) {
864 aub_dump_execlist(aub, batch_addr, ring_flag);
865 } else {
866 /* Dump ring buffer */
867 aub_dump_ringbuffer(aub, batch_addr, offset, ring_flag);
868 }
869 fflush(aub->file);
870 }
871
872 static struct gen_device_info devinfo = {0};
873 static uint32_t device;
874 static struct aub_file aubs[2];
875
876 static void *
877 relocate_bo(struct bo *bo, const struct drm_i915_gem_execbuffer2 *execbuffer2,
878 const struct drm_i915_gem_exec_object2 *obj)
879 {
880 const struct drm_i915_gem_exec_object2 *exec_objects =
881 (struct drm_i915_gem_exec_object2 *) (uintptr_t) execbuffer2->buffers_ptr;
882 const struct drm_i915_gem_relocation_entry *relocs =
883 (const struct drm_i915_gem_relocation_entry *) (uintptr_t) obj->relocs_ptr;
884 void *relocated;
885 int handle;
886
887 relocated = malloc(bo->size);
888 fail_if(relocated == NULL, "intel_aubdump: out of memory\n");
889 memcpy(relocated, GET_PTR(bo->map), bo->size);
890 for (size_t i = 0; i < obj->relocation_count; i++) {
891 fail_if(relocs[i].offset >= bo->size, "intel_aubdump: reloc outside bo\n");
892
893 if (execbuffer2->flags & I915_EXEC_HANDLE_LUT)
894 handle = exec_objects[relocs[i].target_handle].handle;
895 else
896 handle = relocs[i].target_handle;
897
898 aub_write_reloc(&devinfo, ((char *)relocated) + relocs[i].offset,
899 get_bo(handle)->offset + relocs[i].delta);
900 }
901
902 return relocated;
903 }
904
905 static int
906 gem_ioctl(int fd, unsigned long request, void *argp)
907 {
908 int ret;
909
910 do {
911 ret = libc_ioctl(fd, request, argp);
912 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
913
914 return ret;
915 }
916
917 static void *
918 gem_mmap(int fd, uint32_t handle, uint64_t offset, uint64_t size)
919 {
920 struct drm_i915_gem_mmap mmap = {
921 .handle = handle,
922 .offset = offset,
923 .size = size
924 };
925
926 if (gem_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap) == -1)
927 return MAP_FAILED;
928
929 return (void *)(uintptr_t) mmap.addr_ptr;
930 }
931
932 static int
933 gem_get_param(int fd, uint32_t param)
934 {
935 int value;
936 drm_i915_getparam_t gp = {
937 .param = param,
938 .value = &value
939 };
940
941 if (gem_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp) == -1)
942 return 0;
943
944 return value;
945 }
946
947 static void
948 dump_execbuffer2(int fd, struct drm_i915_gem_execbuffer2 *execbuffer2)
949 {
950 struct drm_i915_gem_exec_object2 *exec_objects =
951 (struct drm_i915_gem_exec_object2 *) (uintptr_t) execbuffer2->buffers_ptr;
952 uint32_t ring_flag = execbuffer2->flags & I915_EXEC_RING_MASK;
953 uint32_t offset;
954 struct drm_i915_gem_exec_object2 *obj;
955 struct bo *bo, *batch_bo;
956 int batch_index;
957 void *data;
958
959 /* We can't do this at open time as we're not yet authenticated. */
960 if (device == 0) {
961 device = gem_get_param(fd, I915_PARAM_CHIPSET_ID);
962 fail_if(device == 0 || devinfo.gen == 0, "failed to identify chipset\n");
963 }
964 if (devinfo.gen == 0) {
965 fail_if(!gen_get_device_info(device, &devinfo),
966 "failed to identify chipset=0x%x\n", device);
967
968 for (int i = 0; i < ARRAY_SIZE(files); i++) {
969 if (files[i] != NULL) {
970 aub_file_init(&aubs[i], files[i], device);
971 if (verbose == 2)
972 aubs[i].verbose_log_file = stdout;
973 aub_write_header(&aubs[i], program_invocation_short_name);
974 }
975 }
976
977 if (verbose)
978 printf("[intel_aubdump: running, "
979 "output file %s, chipset id 0x%04x, gen %d]\n",
980 filename, device, devinfo.gen);
981 }
982
983 /* Any aub */
984 struct aub_file *any_aub = files[0] ? &aubs[0] : &aubs[1];;
985
986 if (aub_use_execlists(any_aub))
987 offset = 0x1000;
988 else
989 offset = aub_gtt_size(any_aub);
990
991 if (verbose)
992 printf("Dumping execbuffer2:\n");
993
994 for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
995 obj = &exec_objects[i];
996 bo = get_bo(obj->handle);
997
998 /* If bo->size == 0, this means they passed us an invalid
999 * buffer. The kernel will reject it and so should we.
1000 */
1001 if (bo->size == 0) {
1002 if (verbose)
1003 printf("BO #%d is invalid!\n", obj->handle);
1004 return;
1005 }
1006
1007 if (obj->flags & EXEC_OBJECT_PINNED) {
1008 bo->offset = obj->offset;
1009 if (verbose)
1010 printf("BO #%d (%dB) pinned @ 0x%lx\n",
1011 obj->handle, bo->size, bo->offset);
1012 } else {
1013 if (obj->alignment != 0)
1014 offset = align_u32(offset, obj->alignment);
1015 bo->offset = offset;
1016 if (verbose)
1017 printf("BO #%d (%dB) @ 0x%lx\n", obj->handle,
1018 bo->size, bo->offset);
1019 offset = align_u32(offset + bo->size + 4095, 4096);
1020 }
1021
1022 if (bo->map == NULL && bo->size > 0)
1023 bo->map = gem_mmap(fd, obj->handle, 0, bo->size);
1024 fail_if(bo->map == MAP_FAILED, "intel_aubdump: bo mmap failed\n");
1025
1026 for (int i = 0; i < ARRAY_SIZE(files); i++) {
1027 if (files[i] == NULL)
1028 continue;
1029
1030 if (aub_use_execlists(&aubs[i]))
1031 aub_map_ppgtt(&aubs[i], bo->offset, bo->size);
1032 }
1033 }
1034
1035 batch_index = (execbuffer2->flags & I915_EXEC_BATCH_FIRST) ? 0 :
1036 execbuffer2->buffer_count - 1;
1037 batch_bo = get_bo(exec_objects[batch_index].handle);
1038 for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
1039 obj = &exec_objects[i];
1040 bo = get_bo(obj->handle);
1041
1042 if (obj->relocation_count > 0)
1043 data = relocate_bo(bo, execbuffer2, obj);
1044 else
1045 data = bo->map;
1046
1047 for (int i = 0; i < ARRAY_SIZE(files); i++) {
1048 if (files[i] == NULL)
1049 continue;
1050
1051 if (bo == batch_bo) {
1052 aub_write_trace_block(&aubs[i], AUB_TRACE_TYPE_BATCH,
1053 GET_PTR(data), bo->size, bo->offset);
1054 } else {
1055 aub_write_trace_block(&aubs[i], AUB_TRACE_TYPE_NOTYPE,
1056 GET_PTR(data), bo->size, bo->offset);
1057 }
1058 }
1059 if (data != bo->map)
1060 free(data);
1061 }
1062
1063 for (int i = 0; i < ARRAY_SIZE(files); i++) {
1064 if (files[i] != NULL)
1065 continue;
1066
1067 aub_write_exec(&aubs[i],
1068 batch_bo->offset + execbuffer2->batch_start_offset,
1069 offset, ring_flag);
1070 }
1071
1072 if (device_override &&
1073 (execbuffer2->flags & I915_EXEC_FENCE_ARRAY) != 0) {
1074 struct drm_i915_gem_exec_fence *fences =
1075 (void*)(uintptr_t)execbuffer2->cliprects_ptr;
1076 for (uint32_t i = 0; i < execbuffer2->num_cliprects; i++) {
1077 if ((fences[i].flags & I915_EXEC_FENCE_SIGNAL) != 0) {
1078 struct drm_syncobj_array arg = {
1079 .handles = (uintptr_t)&fences[i].handle,
1080 .count_handles = 1,
1081 .pad = 0,
1082 };
1083 libc_ioctl(fd, DRM_IOCTL_SYNCOBJ_SIGNAL, &arg);
1084 }
1085 }
1086 }
1087 }
1088
1089 static void
1090 add_new_bo(int handle, uint64_t size, void *map)
1091 {
1092 struct bo *bo = &bos[handle];
1093
1094 fail_if(handle >= MAX_BO_COUNT, "intel_aubdump: bo handle out of range\n");
1095 fail_if(size == 0, "intel_aubdump: bo size is invalid\n");
1096
1097 bo->size = size;
1098 bo->map = map;
1099 }
1100
1101 static void
1102 remove_bo(int handle)
1103 {
1104 struct bo *bo = get_bo(handle);
1105
1106 if (bo->map && !IS_USERPTR(bo->map))
1107 munmap(bo->map, bo->size);
1108 bo->size = 0;
1109 bo->map = NULL;
1110 }
1111
1112 __attribute__ ((visibility ("default"))) int
1113 close(int fd)
1114 {
1115 if (fd == drm_fd)
1116 drm_fd = -1;
1117
1118 return libc_close(fd);
1119 }
1120
1121 static FILE *
1122 launch_command(char *command)
1123 {
1124 int i = 0, fds[2];
1125 char **args = calloc(strlen(command), sizeof(char *));
1126 char *iter = command;
1127
1128 args[i++] = iter = command;
1129
1130 while ((iter = strstr(iter, ",")) != NULL) {
1131 *iter = '\0';
1132 iter += 1;
1133 args[i++] = iter;
1134 }
1135
1136 if (pipe(fds) == -1)
1137 return NULL;
1138
1139 switch (fork()) {
1140 case 0:
1141 dup2(fds[0], 0);
1142 fail_if(execvp(args[0], args) == -1,
1143 "intel_aubdump: failed to launch child command\n");
1144 return NULL;
1145
1146 default:
1147 free(args);
1148 return fdopen(fds[1], "w");
1149
1150 case -1:
1151 return NULL;
1152 }
1153 }
1154
1155 static void
1156 maybe_init(void)
1157 {
1158 static bool initialized = false;
1159 FILE *config;
1160 char *key, *value;
1161
1162 if (initialized)
1163 return;
1164
1165 initialized = true;
1166
1167 config = fdopen(3, "r");
1168 while (fscanf(config, "%m[^=]=%m[^\n]\n", &key, &value) != EOF) {
1169 if (!strcmp(key, "verbose")) {
1170 if (!strcmp(value, "1")) {
1171 verbose = 1;
1172 } else if (!strcmp(value, "2")) {
1173 verbose = 2;
1174 }
1175 } else if (!strcmp(key, "device")) {
1176 fail_if(sscanf(value, "%i", &device) != 1,
1177 "intel_aubdump: failed to parse device id '%s'",
1178 value);
1179 device_override = true;
1180 } else if (!strcmp(key, "file")) {
1181 filename = strdup(value);
1182 files[0] = fopen(filename, "w+");
1183 fail_if(files[0] == NULL,
1184 "intel_aubdump: failed to open file '%s'\n",
1185 filename);
1186 } else if (!strcmp(key, "command")) {
1187 files[1] = launch_command(value);
1188 fail_if(files[1] == NULL,
1189 "intel_aubdump: failed to launch command '%s'\n",
1190 value);
1191 } else {
1192 fprintf(stderr, "intel_aubdump: unknown option '%s'\n", key);
1193 }
1194
1195 free(key);
1196 free(value);
1197 }
1198 fclose(config);
1199
1200 bos = calloc(MAX_BO_COUNT, sizeof(bos[0]));
1201 fail_if(bos == NULL, "intel_aubdump: out of memory\n");
1202 }
1203
1204 __attribute__ ((visibility ("default"))) int
1205 ioctl(int fd, unsigned long request, ...)
1206 {
1207 va_list args;
1208 void *argp;
1209 int ret;
1210 struct stat buf;
1211
1212 va_start(args, request);
1213 argp = va_arg(args, void *);
1214 va_end(args);
1215
1216 if (_IOC_TYPE(request) == DRM_IOCTL_BASE &&
1217 drm_fd != fd && fstat(fd, &buf) == 0 &&
1218 (buf.st_mode & S_IFMT) == S_IFCHR && major(buf.st_rdev) == DRM_MAJOR) {
1219 drm_fd = fd;
1220 if (verbose)
1221 printf("[intel_aubdump: intercept drm ioctl on fd %d]\n", fd);
1222 }
1223
1224 if (fd == drm_fd) {
1225 maybe_init();
1226
1227 switch (request) {
1228 case DRM_IOCTL_I915_GETPARAM: {
1229 struct drm_i915_getparam *getparam = argp;
1230
1231 if (device_override && getparam->param == I915_PARAM_CHIPSET_ID) {
1232 *getparam->value = device;
1233 return 0;
1234 }
1235
1236 ret = libc_ioctl(fd, request, argp);
1237
1238 /* If the application looks up chipset_id
1239 * (they typically do), we'll piggy-back on
1240 * their ioctl and store the id for later
1241 * use. */
1242 if (getparam->param == I915_PARAM_CHIPSET_ID)
1243 device = *getparam->value;
1244
1245 return ret;
1246 }
1247
1248 case DRM_IOCTL_I915_GEM_EXECBUFFER: {
1249 static bool once;
1250 if (!once) {
1251 fprintf(stderr, "intel_aubdump: "
1252 "application uses DRM_IOCTL_I915_GEM_EXECBUFFER, not handled\n");
1253 once = true;
1254 }
1255 return libc_ioctl(fd, request, argp);
1256 }
1257
1258 case DRM_IOCTL_I915_GEM_EXECBUFFER2:
1259 case DRM_IOCTL_I915_GEM_EXECBUFFER2_WR: {
1260 dump_execbuffer2(fd, argp);
1261 if (device_override)
1262 return 0;
1263
1264 return libc_ioctl(fd, request, argp);
1265 }
1266
1267 case DRM_IOCTL_I915_GEM_CREATE: {
1268 struct drm_i915_gem_create *create = argp;
1269
1270 ret = libc_ioctl(fd, request, argp);
1271 if (ret == 0)
1272 add_new_bo(create->handle, create->size, NULL);
1273
1274 return ret;
1275 }
1276
1277 case DRM_IOCTL_I915_GEM_USERPTR: {
1278 struct drm_i915_gem_userptr *userptr = argp;
1279
1280 ret = libc_ioctl(fd, request, argp);
1281 if (ret == 0)
1282 add_new_bo(userptr->handle, userptr->user_size,
1283 (void *) (uintptr_t) (userptr->user_ptr | USERPTR_FLAG));
1284 return ret;
1285 }
1286
1287 case DRM_IOCTL_GEM_CLOSE: {
1288 struct drm_gem_close *close = argp;
1289
1290 remove_bo(close->handle);
1291
1292 return libc_ioctl(fd, request, argp);
1293 }
1294
1295 case DRM_IOCTL_GEM_OPEN: {
1296 struct drm_gem_open *open = argp;
1297
1298 ret = libc_ioctl(fd, request, argp);
1299 if (ret == 0)
1300 add_new_bo(open->handle, open->size, NULL);
1301
1302 return ret;
1303 }
1304
1305 case DRM_IOCTL_PRIME_FD_TO_HANDLE: {
1306 struct drm_prime_handle *prime = argp;
1307
1308 ret = libc_ioctl(fd, request, argp);
1309 if (ret == 0) {
1310 off_t size;
1311
1312 size = lseek(prime->fd, 0, SEEK_END);
1313 fail_if(size == -1, "intel_aubdump: failed to get prime bo size\n");
1314 add_new_bo(prime->handle, size, NULL);
1315 }
1316
1317 return ret;
1318 }
1319
1320 default:
1321 return libc_ioctl(fd, request, argp);
1322 }
1323 } else {
1324 return libc_ioctl(fd, request, argp);
1325 }
1326 }
1327
1328 static void
1329 init(void)
1330 {
1331 libc_close = dlsym(RTLD_NEXT, "close");
1332 libc_ioctl = dlsym(RTLD_NEXT, "ioctl");
1333 fail_if(libc_close == NULL || libc_ioctl == NULL,
1334 "intel_aubdump: failed to get libc ioctl or close\n");
1335 }
1336
1337 static int
1338 close_init_helper(int fd)
1339 {
1340 init();
1341 return libc_close(fd);
1342 }
1343
1344 static int
1345 ioctl_init_helper(int fd, unsigned long request, ...)
1346 {
1347 va_list args;
1348 void *argp;
1349
1350 va_start(args, request);
1351 argp = va_arg(args, void *);
1352 va_end(args);
1353
1354 init();
1355 return libc_ioctl(fd, request, argp);
1356 }
1357
1358 static void __attribute__ ((destructor))
1359 fini(void)
1360 {
1361 free(filename);
1362 for (int i = 0; i < ARRAY_SIZE(files); i++) {
1363 if (aubs[i].file)
1364 aub_file_finish(&aubs[i]);
1365 else if (files[i])
1366 fclose(files[i]);
1367 }
1368 free(bos);
1369 }