intel: tools: split aub parsing from aubinator
[mesa.git] / src / intel / tools / aubinator.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <stdint.h>
27 #include <stdbool.h>
28 #include <getopt.h>
29
30 #include <unistd.h>
31 #include <fcntl.h>
32 #include <string.h>
33 #include <signal.h>
34 #include <errno.h>
35 #include <inttypes.h>
36 #include <sys/types.h>
37 #include <sys/stat.h>
38 #include <sys/wait.h>
39 #include <sys/mman.h>
40
41 #include "util/list.h"
42 #include "util/macros.h"
43 #include "util/rb_tree.h"
44
45 #include "common/gen_decoder.h"
46 #include "intel_aub.h"
47 #include "aub_read.h"
48
49 #ifndef HAVE_MEMFD_CREATE
50 #include <sys/syscall.h>
51
52 static inline int
53 memfd_create(const char *name, unsigned int flags)
54 {
55 return syscall(SYS_memfd_create, name, flags);
56 }
57 #endif
58
59 /* Below is the only command missing from intel_aub.h in libdrm
60 * So, reuse intel_aub.h from libdrm and #define the
61 * AUB_MI_BATCH_BUFFER_END as below
62 */
63 #define AUB_MI_BATCH_BUFFER_END (0x0500 << 16)
64
65 #define CSI "\e["
66 #define BLUE_HEADER CSI "0;44m"
67 #define GREEN_HEADER CSI "1;42m"
68 #define NORMAL CSI "0m"
69
70 /* options */
71
72 static int option_full_decode = true;
73 static int option_print_offsets = true;
74 static int max_vbo_lines = -1;
75 static enum { COLOR_AUTO, COLOR_ALWAYS, COLOR_NEVER } option_color;
76
77 /* state */
78
79 uint16_t pci_id = 0;
80 char *input_file = NULL, *xml_path = NULL;
81 struct gen_device_info devinfo;
82 struct gen_batch_decode_ctx batch_ctx;
83
84 struct bo_map {
85 struct list_head link;
86 struct gen_batch_decode_bo bo;
87 bool unmap_after_use;
88 };
89
90 struct ggtt_entry {
91 struct rb_node node;
92 uint64_t virt_addr;
93 uint64_t phys_addr;
94 };
95
96 struct phys_mem {
97 struct rb_node node;
98 uint64_t fd_offset;
99 uint64_t phys_addr;
100 uint8_t *data;
101 };
102
103 static struct list_head maps;
104 static struct rb_tree ggtt = {NULL};
105 static struct rb_tree mem = {NULL};
106 int mem_fd = -1;
107 off_t mem_fd_len = 0;
108
109 FILE *outfile;
110
111 struct brw_instruction;
112
113 static void
114 add_gtt_bo_map(struct gen_batch_decode_bo bo, bool unmap_after_use)
115 {
116 struct bo_map *m = calloc(1, sizeof(*m));
117
118 m->bo = bo;
119 m->unmap_after_use = unmap_after_use;
120 list_add(&m->link, &maps);
121 }
122
123 static void
124 clear_bo_maps(void)
125 {
126 list_for_each_entry_safe(struct bo_map, i, &maps, link) {
127 if (i->unmap_after_use)
128 munmap((void *)i->bo.map, i->bo.size);
129 list_del(&i->link);
130 free(i);
131 }
132 }
133
134 static inline struct ggtt_entry *
135 ggtt_entry_next(struct ggtt_entry *entry)
136 {
137 if (!entry)
138 return NULL;
139 struct rb_node *node = rb_node_next(&entry->node);
140 if (!node)
141 return NULL;
142 return rb_node_data(struct ggtt_entry, node, node);
143 }
144
145 static inline int
146 cmp_uint64(uint64_t a, uint64_t b)
147 {
148 if (a < b)
149 return -1;
150 if (a > b)
151 return 1;
152 return 0;
153 }
154
155 static inline int
156 cmp_ggtt_entry(const struct rb_node *node, const void *addr)
157 {
158 struct ggtt_entry *entry = rb_node_data(struct ggtt_entry, node, node);
159 return cmp_uint64(entry->virt_addr, *(const uint64_t *)addr);
160 }
161
162 static struct ggtt_entry *
163 ensure_ggtt_entry(struct rb_tree *tree, uint64_t virt_addr)
164 {
165 struct rb_node *node = rb_tree_search_sloppy(&ggtt, &virt_addr,
166 cmp_ggtt_entry);
167 int cmp = 0;
168 if (!node || (cmp = cmp_ggtt_entry(node, &virt_addr))) {
169 struct ggtt_entry *new_entry = calloc(1, sizeof(*new_entry));
170 new_entry->virt_addr = virt_addr;
171 rb_tree_insert_at(&ggtt, node, &new_entry->node, cmp > 0);
172 node = &new_entry->node;
173 }
174
175 return rb_node_data(struct ggtt_entry, node, node);
176 }
177
178 static struct ggtt_entry *
179 search_ggtt_entry(uint64_t virt_addr)
180 {
181 virt_addr &= ~0xfff;
182
183 struct rb_node *node = rb_tree_search(&ggtt, &virt_addr, cmp_ggtt_entry);
184
185 if (!node)
186 return NULL;
187
188 return rb_node_data(struct ggtt_entry, node, node);
189 }
190
191 static inline int
192 cmp_phys_mem(const struct rb_node *node, const void *addr)
193 {
194 struct phys_mem *mem = rb_node_data(struct phys_mem, node, node);
195 return cmp_uint64(mem->phys_addr, *(uint64_t *)addr);
196 }
197
198 static struct phys_mem *
199 ensure_phys_mem(uint64_t phys_addr)
200 {
201 struct rb_node *node = rb_tree_search_sloppy(&mem, &phys_addr, cmp_phys_mem);
202 int cmp = 0;
203 if (!node || (cmp = cmp_phys_mem(node, &phys_addr))) {
204 struct phys_mem *new_mem = calloc(1, sizeof(*new_mem));
205 new_mem->phys_addr = phys_addr;
206 new_mem->fd_offset = mem_fd_len;
207
208 MAYBE_UNUSED int ftruncate_res = ftruncate(mem_fd, mem_fd_len += 4096);
209 assert(ftruncate_res == 0);
210
211 new_mem->data = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED,
212 mem_fd, new_mem->fd_offset);
213 assert(new_mem->data != MAP_FAILED);
214
215 rb_tree_insert_at(&mem, node, &new_mem->node, cmp > 0);
216 node = &new_mem->node;
217 }
218
219 return rb_node_data(struct phys_mem, node, node);
220 }
221
222 static struct phys_mem *
223 search_phys_mem(uint64_t phys_addr)
224 {
225 phys_addr &= ~0xfff;
226
227 struct rb_node *node = rb_tree_search(&mem, &phys_addr, cmp_phys_mem);
228
229 if (!node)
230 return NULL;
231
232 return rb_node_data(struct phys_mem, node, node);
233 }
234
235 static void
236 handle_local_write(void *user_data, uint64_t address, const void *data, uint32_t size)
237 {
238 struct gen_batch_decode_bo bo = {
239 .map = data,
240 .addr = address,
241 .size = size,
242 };
243 add_gtt_bo_map(bo, false);
244 }
245
246 static void
247 handle_ggtt_entry_write(void *user_data, uint64_t address, const void *_data, uint32_t _size)
248 {
249 uint64_t virt_addr = (address / sizeof(uint64_t)) << 12;
250 const uint64_t *data = _data;
251 size_t size = _size / sizeof(*data);
252 for (const uint64_t *entry = data;
253 entry < data + size;
254 entry++, virt_addr += 4096) {
255 struct ggtt_entry *pt = ensure_ggtt_entry(&ggtt, virt_addr);
256 pt->phys_addr = *entry;
257 }
258 }
259
260 static void
261 handle_physical_write(void *user_data, uint64_t phys_address, const void *data, uint32_t size)
262 {
263 uint32_t to_write = size;
264 for (uint64_t page = phys_address & ~0xfff; page < phys_address + size; page += 4096) {
265 struct phys_mem *mem = ensure_phys_mem(page);
266 uint64_t offset = MAX2(page, phys_address) - page;
267 uint32_t size_this_page = MIN2(to_write, 4096 - offset);
268 to_write -= size_this_page;
269 memcpy(mem->data + offset, data, size_this_page);
270 data = (const uint8_t *)data + size_this_page;
271 }
272 }
273
274 static void
275 handle_ggtt_write(void *user_data, uint64_t virt_address, const void *data, uint32_t size)
276 {
277 uint32_t to_write = size;
278 for (uint64_t page = virt_address & ~0xfff; page < virt_address + size; page += 4096) {
279 struct ggtt_entry *entry = search_ggtt_entry(page);
280 assert(entry && entry->phys_addr & 0x1);
281
282 uint64_t offset = MAX2(page, virt_address) - page;
283 uint32_t size_this_page = MIN2(to_write, 4096 - offset);
284 to_write -= size_this_page;
285
286 uint64_t phys_page = entry->phys_addr & ~0xfff; /* Clear the validity bits. */
287 handle_physical_write(user_data, phys_page + offset, data, size_this_page);
288 data = (const uint8_t *)data + size_this_page;
289 }
290 }
291
292 static struct gen_batch_decode_bo
293 get_ggtt_batch_bo(void *user_data, uint64_t address)
294 {
295 struct gen_batch_decode_bo bo = {0};
296
297 list_for_each_entry(struct bo_map, i, &maps, link)
298 if (i->bo.addr <= address && i->bo.addr + i->bo.size > address)
299 return i->bo;
300
301 address &= ~0xfff;
302
303 struct ggtt_entry *start =
304 (struct ggtt_entry *)rb_tree_search_sloppy(&ggtt, &address,
305 cmp_ggtt_entry);
306 if (start && start->virt_addr < address)
307 start = ggtt_entry_next(start);
308 if (!start)
309 return bo;
310
311 struct ggtt_entry *last = start;
312 for (struct ggtt_entry *i = ggtt_entry_next(last);
313 i && last->virt_addr + 4096 == i->virt_addr;
314 last = i, i = ggtt_entry_next(last))
315 ;
316
317 bo.addr = MIN2(address, start->virt_addr);
318 bo.size = last->virt_addr - bo.addr + 4096;
319 bo.map = mmap(NULL, bo.size, PROT_READ, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
320 assert(bo.map != MAP_FAILED);
321
322 for (struct ggtt_entry *i = start;
323 i;
324 i = i == last ? NULL : ggtt_entry_next(i)) {
325 uint64_t phys_addr = i->phys_addr & ~0xfff;
326 struct phys_mem *phys_mem = search_phys_mem(phys_addr);
327
328 if (!phys_mem)
329 continue;
330
331 uint32_t map_offset = i->virt_addr - address;
332 void *res = mmap((uint8_t *)bo.map + map_offset, 4096, PROT_READ,
333 MAP_SHARED | MAP_FIXED, mem_fd, phys_mem->fd_offset);
334 assert(res != MAP_FAILED);
335 }
336
337 add_gtt_bo_map(bo, true);
338
339 return bo;
340 }
341
342 static struct phys_mem *
343 ppgtt_walk(uint64_t pml4, uint64_t address)
344 {
345 uint64_t shift = 39;
346 uint64_t addr = pml4;
347 for (int level = 4; level > 0; level--) {
348 struct phys_mem *table = search_phys_mem(addr);
349 if (!table)
350 return NULL;
351 int index = (address >> shift) & 0x1ff;
352 uint64_t entry = ((uint64_t *)table->data)[index];
353 if (!(entry & 1))
354 return NULL;
355 addr = entry & ~0xfff;
356 shift -= 9;
357 }
358 return search_phys_mem(addr);
359 }
360
361 static bool
362 ppgtt_mapped(uint64_t pml4, uint64_t address)
363 {
364 return ppgtt_walk(pml4, address) != NULL;
365 }
366
367 static struct gen_batch_decode_bo
368 get_ppgtt_batch_bo(void *user_data, uint64_t address)
369 {
370 struct gen_batch_decode_bo bo = {0};
371 uint64_t pml4 = *(uint64_t *)user_data;
372
373 address &= ~0xfff;
374
375 if (!ppgtt_mapped(pml4, address))
376 return bo;
377
378 /* Map everything until the first gap since we don't know how much the
379 * decoder actually needs.
380 */
381 uint64_t end = address;
382 while (ppgtt_mapped(pml4, end))
383 end += 4096;
384
385 bo.addr = address;
386 bo.size = end - address;
387 bo.map = mmap(NULL, bo.size, PROT_READ, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
388 assert(bo.map != MAP_FAILED);
389
390 for (uint64_t page = address; page < end; page += 4096) {
391 struct phys_mem *phys_mem = ppgtt_walk(pml4, page);
392
393 void *res = mmap((uint8_t *)bo.map + (page - bo.addr), 4096, PROT_READ,
394 MAP_SHARED | MAP_FIXED, mem_fd, phys_mem->fd_offset);
395 assert(res != MAP_FAILED);
396 }
397
398 add_gtt_bo_map(bo, true);
399
400 return bo;
401 }
402
403 static void
404 aubinator_error(void *user_data, const void *aub_data, const char *msg)
405 {
406 fprintf(stderr, msg);
407 }
408
409 static void
410 aubinator_init(void *user_data, int aub_pci_id, const char *app_name)
411 {
412 pci_id = aub_pci_id;
413
414 if (!gen_get_device_info(pci_id, &devinfo)) {
415 fprintf(stderr, "can't find device information: pci_id=0x%x\n", pci_id);
416 exit(EXIT_FAILURE);
417 }
418
419 enum gen_batch_decode_flags batch_flags = 0;
420 if (option_color == COLOR_ALWAYS)
421 batch_flags |= GEN_BATCH_DECODE_IN_COLOR;
422 if (option_full_decode)
423 batch_flags |= GEN_BATCH_DECODE_FULL;
424 if (option_print_offsets)
425 batch_flags |= GEN_BATCH_DECODE_OFFSETS;
426 batch_flags |= GEN_BATCH_DECODE_FLOATS;
427
428 gen_batch_decode_ctx_init(&batch_ctx, &devinfo, outfile, batch_flags,
429 xml_path, NULL, NULL, NULL);
430 batch_ctx.max_vbo_decoded_lines = max_vbo_lines;
431
432 char *color = GREEN_HEADER, *reset_color = NORMAL;
433 if (option_color == COLOR_NEVER)
434 color = reset_color = "";
435
436 fprintf(outfile, "%sAubinator: Intel AUB file decoder.%-80s%s\n",
437 color, "", reset_color);
438
439 if (input_file)
440 fprintf(outfile, "File name: %s\n", input_file);
441
442 if (aub_pci_id)
443 fprintf(outfile, "PCI ID: 0x%x\n", aub_pci_id);
444
445 fprintf(outfile, "Application name: %s\n", app_name);
446
447 fprintf(outfile, "Decoding as: %s\n", gen_get_device_name(pci_id));
448
449 /* Throw in a new line before the first batch */
450 fprintf(outfile, "\n");
451 }
452
453 static void
454 handle_execlist_write(void *user_data, enum gen_engine engine, uint64_t context_descriptor)
455 {
456 const uint32_t pphwsp_size = 4096;
457 uint32_t pphwsp_addr = context_descriptor & 0xfffff000;
458 struct gen_batch_decode_bo pphwsp_bo = get_ggtt_batch_bo(NULL, pphwsp_addr);
459 uint32_t *context = (uint32_t *)((uint8_t *)pphwsp_bo.map +
460 (pphwsp_addr - pphwsp_bo.addr) +
461 pphwsp_size);
462
463 uint32_t ring_buffer_head = context[5];
464 uint32_t ring_buffer_tail = context[7];
465 uint32_t ring_buffer_start = context[9];
466 uint64_t pml4 = (uint64_t)context[49] << 32 | context[51];
467
468 struct gen_batch_decode_bo ring_bo = get_ggtt_batch_bo(NULL,
469 ring_buffer_start);
470 assert(ring_bo.size > 0);
471 void *commands = (uint8_t *)ring_bo.map + (ring_buffer_start - ring_bo.addr);
472
473 if (context_descriptor & 0x100 /* ppgtt */) {
474 batch_ctx.get_bo = get_ppgtt_batch_bo;
475 batch_ctx.user_data = &pml4;
476 } else {
477 batch_ctx.get_bo = get_ggtt_batch_bo;
478 }
479
480 (void)engine; /* TODO */
481 gen_print_batch(&batch_ctx, commands, ring_buffer_tail - ring_buffer_head,
482 0);
483 clear_bo_maps();
484 }
485
486 static void
487 handle_ring_write(void *user_data, enum gen_engine engine,
488 const void *data, uint32_t data_len)
489 {
490 batch_ctx.get_bo = get_ggtt_batch_bo;
491
492 gen_print_batch(&batch_ctx, data, data_len, 0);
493
494 clear_bo_maps();
495 }
496
497 struct aub_file {
498 FILE *stream;
499
500 void *map, *end, *cursor;
501 };
502
503 static struct aub_file *
504 aub_file_open(const char *filename)
505 {
506 struct aub_file *file;
507 struct stat sb;
508 int fd;
509
510 file = calloc(1, sizeof *file);
511 fd = open(filename, O_RDONLY);
512 if (fd == -1) {
513 fprintf(stderr, "open %s failed: %s\n", filename, strerror(errno));
514 exit(EXIT_FAILURE);
515 }
516
517 if (fstat(fd, &sb) == -1) {
518 fprintf(stderr, "stat failed: %s\n", strerror(errno));
519 exit(EXIT_FAILURE);
520 }
521
522 file->map = mmap(NULL, sb.st_size,
523 PROT_READ, MAP_SHARED, fd, 0);
524 if (file->map == MAP_FAILED) {
525 fprintf(stderr, "mmap failed: %s\n", strerror(errno));
526 exit(EXIT_FAILURE);
527 }
528
529 close(fd);
530
531 file->cursor = file->map;
532 file->end = file->map + sb.st_size;
533
534 return file;
535 }
536
537 static int
538 aub_file_more_stuff(struct aub_file *file)
539 {
540 return file->cursor < file->end || (file->stream && !feof(file->stream));
541 }
542
543 static void
544 setup_pager(void)
545 {
546 int fds[2];
547 pid_t pid;
548
549 if (!isatty(1))
550 return;
551
552 if (pipe(fds) == -1)
553 return;
554
555 pid = fork();
556 if (pid == -1)
557 return;
558
559 if (pid == 0) {
560 close(fds[1]);
561 dup2(fds[0], 0);
562 execlp("less", "less", "-FRSi", NULL);
563 }
564
565 close(fds[0]);
566 dup2(fds[1], 1);
567 close(fds[1]);
568 }
569
570 static void
571 print_help(const char *progname, FILE *file)
572 {
573 fprintf(file,
574 "Usage: %s [OPTION]... FILE\n"
575 "Decode aub file contents from FILE.\n\n"
576 " --help display this help and exit\n"
577 " --gen=platform decode for given platform (3 letter platform name)\n"
578 " --headers decode only command headers\n"
579 " --color[=WHEN] colorize the output; WHEN can be 'auto' (default\n"
580 " if omitted), 'always', or 'never'\n"
581 " --max-vbo-lines=N limit the number of decoded VBO lines\n"
582 " --no-pager don't launch pager\n"
583 " --no-offsets don't print instruction offsets\n"
584 " --xml=DIR load hardware xml description from directory DIR\n",
585 progname);
586 }
587
588 int main(int argc, char *argv[])
589 {
590 struct aub_file *file;
591 int c, i;
592 bool help = false, pager = true;
593 const struct option aubinator_opts[] = {
594 { "help", no_argument, (int *) &help, true },
595 { "no-pager", no_argument, (int *) &pager, false },
596 { "no-offsets", no_argument, (int *) &option_print_offsets, false },
597 { "gen", required_argument, NULL, 'g' },
598 { "headers", no_argument, (int *) &option_full_decode, false },
599 { "color", required_argument, NULL, 'c' },
600 { "xml", required_argument, NULL, 'x' },
601 { "max-vbo-lines", required_argument, NULL, 'v' },
602 { NULL, 0, NULL, 0 }
603 };
604
605 outfile = stdout;
606
607 i = 0;
608 while ((c = getopt_long(argc, argv, "", aubinator_opts, &i)) != -1) {
609 switch (c) {
610 case 'g': {
611 const int id = gen_device_name_to_pci_device_id(optarg);
612 if (id < 0) {
613 fprintf(stderr, "can't parse gen: '%s', expected ivb, byt, hsw, "
614 "bdw, chv, skl, kbl or bxt\n", optarg);
615 exit(EXIT_FAILURE);
616 } else {
617 pci_id = id;
618 }
619 break;
620 }
621 case 'c':
622 if (optarg == NULL || strcmp(optarg, "always") == 0)
623 option_color = COLOR_ALWAYS;
624 else if (strcmp(optarg, "never") == 0)
625 option_color = COLOR_NEVER;
626 else if (strcmp(optarg, "auto") == 0)
627 option_color = COLOR_AUTO;
628 else {
629 fprintf(stderr, "invalid value for --color: %s", optarg);
630 exit(EXIT_FAILURE);
631 }
632 break;
633 case 'x':
634 xml_path = strdup(optarg);
635 break;
636 case 'v':
637 max_vbo_lines = atoi(optarg);
638 break;
639 default:
640 break;
641 }
642 }
643
644 if (optind < argc)
645 input_file = argv[optind];
646
647 if (help || !input_file) {
648 print_help(argv[0], stderr);
649 exit(0);
650 }
651
652 /* Do this before we redirect stdout to pager. */
653 if (option_color == COLOR_AUTO)
654 option_color = isatty(1) ? COLOR_ALWAYS : COLOR_NEVER;
655
656 if (isatty(1) && pager)
657 setup_pager();
658
659 mem_fd = memfd_create("phys memory", 0);
660
661 list_inithead(&maps);
662
663 file = aub_file_open(input_file);
664
665 struct aub_read aub_read = {
666 .user_data = NULL,
667 .error = aubinator_error,
668 .info = aubinator_init,
669 .local_write = handle_local_write,
670 .phys_write = handle_physical_write,
671 .ggtt_write = handle_ggtt_write,
672 .ggtt_entry_write = handle_ggtt_entry_write,
673 .execlist_write = handle_execlist_write,
674 .ring_write = handle_ring_write,
675 };
676 int consumed;
677 while (aub_file_more_stuff(file) &&
678 (consumed = aub_read_command(&aub_read, file->cursor,
679 file->end - file->cursor)) > 0) {
680 file->cursor += consumed;
681 }
682
683 fflush(stdout);
684 /* close the stdout which is opened to write the output */
685 close(1);
686 free(xml_path);
687
688 wait(NULL);
689
690 return EXIT_SUCCESS;
691 }