intel: aubinator: simplify decoding
[mesa.git] / src / intel / tools / aubinator.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <stdint.h>
27 #include <stdbool.h>
28 #include <getopt.h>
29
30 #include <unistd.h>
31 #include <fcntl.h>
32 #include <string.h>
33 #include <signal.h>
34 #include <errno.h>
35 #include <inttypes.h>
36 #include <sys/types.h>
37 #include <sys/stat.h>
38 #include <sys/wait.h>
39 #include <sys/mman.h>
40
41 #include "util/list.h"
42 #include "util/macros.h"
43 #include "util/rb_tree.h"
44
45 #include "common/gen_decoder.h"
46 #include "common/gen_disasm.h"
47 #include "common/gen_gem.h"
48 #include "intel_aub.h"
49
50 #ifndef HAVE_MEMFD_CREATE
51 #include <sys/syscall.h>
52
53 static inline int
54 memfd_create(const char *name, unsigned int flags)
55 {
56 return syscall(SYS_memfd_create, name, flags);
57 }
58 #endif
59
60 /* Below is the only command missing from intel_aub.h in libdrm
61 * So, reuse intel_aub.h from libdrm and #define the
62 * AUB_MI_BATCH_BUFFER_END as below
63 */
64 #define AUB_MI_BATCH_BUFFER_END (0x0500 << 16)
65
66 #define CSI "\e["
67 #define BLUE_HEADER CSI "0;44m"
68 #define GREEN_HEADER CSI "1;42m"
69 #define NORMAL CSI "0m"
70
71 /* options */
72
73 static int option_full_decode = true;
74 static int option_print_offsets = true;
75 static int max_vbo_lines = -1;
76 static enum { COLOR_AUTO, COLOR_ALWAYS, COLOR_NEVER } option_color;
77
78 /* state */
79
80 uint16_t pci_id = 0;
81 char *input_file = NULL, *xml_path = NULL;
82 struct gen_device_info devinfo;
83 struct gen_batch_decode_ctx batch_ctx;
84
85 struct bo_map {
86 struct list_head link;
87 struct gen_batch_decode_bo bo;
88 bool unmap_after_use;
89 };
90
91 struct ggtt_entry {
92 struct rb_node node;
93 uint64_t virt_addr;
94 uint64_t phys_addr;
95 };
96
97 struct phys_mem {
98 struct rb_node node;
99 uint64_t fd_offset;
100 uint64_t phys_addr;
101 uint8_t *data;
102 };
103
104 static struct list_head maps;
105 static struct rb_tree ggtt = {NULL};
106 static struct rb_tree mem = {NULL};
107 int mem_fd = -1;
108 off_t mem_fd_len = 0;
109
110 FILE *outfile;
111
112 struct brw_instruction;
113
114 static void
115 add_gtt_bo_map(struct gen_batch_decode_bo bo, bool unmap_after_use)
116 {
117 struct bo_map *m = calloc(1, sizeof(*m));
118
119 m->bo = bo;
120 m->unmap_after_use = unmap_after_use;
121 list_add(&m->link, &maps);
122 }
123
124 static void
125 clear_bo_maps(void)
126 {
127 list_for_each_entry_safe(struct bo_map, i, &maps, link) {
128 if (i->unmap_after_use)
129 munmap((void *)i->bo.map, i->bo.size);
130 list_del(&i->link);
131 free(i);
132 }
133 }
134
135 static inline struct ggtt_entry *
136 ggtt_entry_next(struct ggtt_entry *entry)
137 {
138 if (!entry)
139 return NULL;
140 struct rb_node *node = rb_node_next(&entry->node);
141 if (!node)
142 return NULL;
143 return rb_node_data(struct ggtt_entry, node, node);
144 }
145
146 static inline int
147 cmp_uint64(uint64_t a, uint64_t b)
148 {
149 if (a < b)
150 return -1;
151 if (a > b)
152 return 1;
153 return 0;
154 }
155
156 static inline int
157 cmp_ggtt_entry(const struct rb_node *node, const void *addr)
158 {
159 struct ggtt_entry *entry = rb_node_data(struct ggtt_entry, node, node);
160 return cmp_uint64(entry->virt_addr, *(const uint64_t *)addr);
161 }
162
163 static struct ggtt_entry *
164 ensure_ggtt_entry(struct rb_tree *tree, uint64_t virt_addr)
165 {
166 struct rb_node *node = rb_tree_search_sloppy(&ggtt, &virt_addr,
167 cmp_ggtt_entry);
168 int cmp = 0;
169 if (!node || (cmp = cmp_ggtt_entry(node, &virt_addr))) {
170 struct ggtt_entry *new_entry = calloc(1, sizeof(*new_entry));
171 new_entry->virt_addr = virt_addr;
172 rb_tree_insert_at(&ggtt, node, &new_entry->node, cmp > 0);
173 node = &new_entry->node;
174 }
175
176 return rb_node_data(struct ggtt_entry, node, node);
177 }
178
179 static struct ggtt_entry *
180 search_ggtt_entry(uint64_t virt_addr)
181 {
182 virt_addr &= ~0xfff;
183
184 struct rb_node *node = rb_tree_search(&ggtt, &virt_addr, cmp_ggtt_entry);
185
186 if (!node)
187 return NULL;
188
189 return rb_node_data(struct ggtt_entry, node, node);
190 }
191
192 static inline int
193 cmp_phys_mem(const struct rb_node *node, const void *addr)
194 {
195 struct phys_mem *mem = rb_node_data(struct phys_mem, node, node);
196 return cmp_uint64(mem->phys_addr, *(uint64_t *)addr);
197 }
198
199 static struct phys_mem *
200 ensure_phys_mem(uint64_t phys_addr)
201 {
202 struct rb_node *node = rb_tree_search_sloppy(&mem, &phys_addr, cmp_phys_mem);
203 int cmp = 0;
204 if (!node || (cmp = cmp_phys_mem(node, &phys_addr))) {
205 struct phys_mem *new_mem = calloc(1, sizeof(*new_mem));
206 new_mem->phys_addr = phys_addr;
207 new_mem->fd_offset = mem_fd_len;
208
209 int ftruncate_res = ftruncate(mem_fd, mem_fd_len += 4096);
210 assert(ftruncate_res == 0);
211
212 new_mem->data = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED,
213 mem_fd, new_mem->fd_offset);
214 assert(new_mem->data != MAP_FAILED);
215
216 rb_tree_insert_at(&mem, node, &new_mem->node, cmp > 0);
217 node = &new_mem->node;
218 }
219
220 return rb_node_data(struct phys_mem, node, node);
221 }
222
223 static struct phys_mem *
224 search_phys_mem(uint64_t phys_addr)
225 {
226 phys_addr &= ~0xfff;
227
228 struct rb_node *node = rb_tree_search(&mem, &phys_addr, cmp_phys_mem);
229
230 if (!node)
231 return NULL;
232
233 return rb_node_data(struct phys_mem, node, node);
234 }
235
236 static void
237 handle_ggtt_entry_write(uint64_t address, const void *_data, uint32_t _size)
238 {
239 uint64_t virt_addr = (address / sizeof(uint64_t)) << 12;
240 const uint64_t *data = _data;
241 size_t size = _size / sizeof(*data);
242 for (const uint64_t *entry = data;
243 entry < data + size;
244 entry++, virt_addr += 4096) {
245 struct ggtt_entry *pt = ensure_ggtt_entry(&ggtt, virt_addr);
246 pt->phys_addr = *entry;
247 }
248 }
249
250 static void
251 handle_physical_write(uint64_t phys_address, const void *data, uint32_t size)
252 {
253 uint32_t to_write = size;
254 for (uint64_t page = phys_address & ~0xfff; page < phys_address + size; page += 4096) {
255 struct phys_mem *mem = ensure_phys_mem(page);
256 uint64_t offset = MAX2(page, phys_address) - page;
257 uint32_t size_this_page = MIN2(to_write, 4096 - offset);
258 to_write -= size_this_page;
259 memcpy(mem->data + offset, data, size_this_page);
260 data = (const uint8_t *)data + size_this_page;
261 }
262 }
263
264 static void
265 handle_ggtt_write(uint64_t virt_address, const void *data, uint32_t size)
266 {
267 uint32_t to_write = size;
268 for (uint64_t page = virt_address & ~0xfff; page < virt_address + size; page += 4096) {
269 struct ggtt_entry *entry = search_ggtt_entry(page);
270 assert(entry && entry->phys_addr & 0x1);
271
272 uint64_t offset = MAX2(page, virt_address) - page;
273 uint32_t size_this_page = MIN2(to_write, 4096 - offset);
274 to_write -= size_this_page;
275
276 uint64_t phys_page = entry->phys_addr & ~0xfff; /* Clear the validity bits. */
277 handle_physical_write(phys_page + offset, data, size_this_page);
278 data = (const uint8_t *)data + size_this_page;
279 }
280 }
281
282 static struct gen_batch_decode_bo
283 get_ggtt_batch_bo(void *user_data, uint64_t address)
284 {
285 struct gen_batch_decode_bo bo = {0};
286
287 list_for_each_entry(struct bo_map, i, &maps, link)
288 if (i->bo.addr <= address && i->bo.addr + i->bo.size > address)
289 return i->bo;
290
291 address &= ~0xfff;
292
293 struct ggtt_entry *start =
294 (struct ggtt_entry *)rb_tree_search_sloppy(&ggtt, &address,
295 cmp_ggtt_entry);
296 if (start && start->virt_addr < address)
297 start = ggtt_entry_next(start);
298 if (!start)
299 return bo;
300
301 struct ggtt_entry *last = start;
302 for (struct ggtt_entry *i = ggtt_entry_next(last);
303 i && last->virt_addr + 4096 == i->virt_addr;
304 last = i, i = ggtt_entry_next(last))
305 ;
306
307 bo.addr = MIN2(address, start->virt_addr);
308 bo.size = last->virt_addr - bo.addr + 4096;
309 bo.map = mmap(NULL, bo.size, PROT_READ, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
310 assert(bo.map != MAP_FAILED);
311
312 for (struct ggtt_entry *i = start;
313 i;
314 i = i == last ? NULL : ggtt_entry_next(i)) {
315 uint64_t phys_addr = i->phys_addr & ~0xfff;
316 struct phys_mem *phys_mem = search_phys_mem(phys_addr);
317
318 if (!phys_mem)
319 continue;
320
321 uint32_t map_offset = i->virt_addr - address;
322 void *res = mmap((uint8_t *)bo.map + map_offset, 4096, PROT_READ,
323 MAP_SHARED | MAP_FIXED, mem_fd, phys_mem->fd_offset);
324 assert(res != MAP_FAILED);
325 }
326
327 add_gtt_bo_map(bo, true);
328
329 return bo;
330 }
331
332 static struct phys_mem *
333 ppgtt_walk(uint64_t pml4, uint64_t address)
334 {
335 uint64_t shift = 39;
336 uint64_t addr = pml4;
337 for (int level = 4; level > 0; level--) {
338 struct phys_mem *table = search_phys_mem(addr);
339 if (!table)
340 return NULL;
341 int index = (address >> shift) & 0x1ff;
342 uint64_t entry = ((uint64_t *)table->data)[index];
343 if (!(entry & 1))
344 return NULL;
345 addr = entry & ~0xfff;
346 shift -= 9;
347 }
348 return search_phys_mem(addr);
349 }
350
351 static bool
352 ppgtt_mapped(uint64_t pml4, uint64_t address)
353 {
354 return ppgtt_walk(pml4, address) != NULL;
355 }
356
357 static struct gen_batch_decode_bo
358 get_ppgtt_batch_bo(void *user_data, uint64_t address)
359 {
360 struct gen_batch_decode_bo bo = {0};
361 uint64_t pml4 = *(uint64_t *)user_data;
362
363 address &= ~0xfff;
364
365 if (!ppgtt_mapped(pml4, address))
366 return bo;
367
368 /* Map everything until the first gap since we don't know how much the
369 * decoder actually needs.
370 */
371 uint64_t end = address;
372 while (ppgtt_mapped(pml4, end))
373 end += 4096;
374
375 bo.addr = address;
376 bo.size = end - address;
377 bo.map = mmap(NULL, bo.size, PROT_READ, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
378 assert(bo.map != MAP_FAILED);
379
380 for (uint64_t page = address; page < end; page += 4096) {
381 struct phys_mem *phys_mem = ppgtt_walk(pml4, page);
382
383 void *res = mmap((uint8_t *)bo.map + (page - bo.addr), 4096, PROT_READ,
384 MAP_SHARED | MAP_FIXED, mem_fd, phys_mem->fd_offset);
385 assert(res != MAP_FAILED);
386 }
387
388 add_gtt_bo_map(bo, true);
389
390 return bo;
391 }
392
393 #define GEN_ENGINE_RENDER 1
394 #define GEN_ENGINE_BLITTER 2
395
396 static void
397 handle_trace_block(uint32_t *p)
398 {
399 int operation = p[1] & AUB_TRACE_OPERATION_MASK;
400 int type = p[1] & AUB_TRACE_TYPE_MASK;
401 int address_space = p[1] & AUB_TRACE_ADDRESS_SPACE_MASK;
402 int header_length = p[0] & 0xffff;
403 int engine = GEN_ENGINE_RENDER;
404 struct gen_batch_decode_bo bo = {
405 .map = p + header_length + 2,
406 /* Addresses written by aubdump here are in canonical form but the batch
407 * decoder always gives us addresses with the top 16bits zeroed, so do
408 * the same here.
409 */
410 .addr = gen_48b_address((devinfo.gen >= 8 ? ((uint64_t) p[5] << 32) : 0) |
411 ((uint64_t) p[3])),
412 .size = p[4],
413 };
414
415 switch (operation) {
416 case AUB_TRACE_OP_DATA_WRITE:
417 if (address_space == AUB_TRACE_MEMTYPE_GTT)
418 add_gtt_bo_map(bo, false);
419 break;
420 case AUB_TRACE_OP_COMMAND_WRITE:
421 switch (type) {
422 case AUB_TRACE_TYPE_RING_PRB0:
423 engine = GEN_ENGINE_RENDER;
424 break;
425 case AUB_TRACE_TYPE_RING_PRB2:
426 engine = GEN_ENGINE_BLITTER;
427 break;
428 default:
429 fprintf(outfile, "command write to unknown ring %d\n", type);
430 break;
431 }
432
433 (void)engine; /* TODO */
434 batch_ctx.get_bo = get_ggtt_batch_bo;
435 gen_print_batch(&batch_ctx, bo.map, bo.size, 0);
436
437 clear_bo_maps();
438 break;
439 }
440 }
441
442 static void
443 aubinator_init(uint16_t aub_pci_id, const char *app_name)
444 {
445 if (!gen_get_device_info(pci_id, &devinfo)) {
446 fprintf(stderr, "can't find device information: pci_id=0x%x\n", pci_id);
447 exit(EXIT_FAILURE);
448 }
449
450 enum gen_batch_decode_flags batch_flags = 0;
451 if (option_color == COLOR_ALWAYS)
452 batch_flags |= GEN_BATCH_DECODE_IN_COLOR;
453 if (option_full_decode)
454 batch_flags |= GEN_BATCH_DECODE_FULL;
455 if (option_print_offsets)
456 batch_flags |= GEN_BATCH_DECODE_OFFSETS;
457 batch_flags |= GEN_BATCH_DECODE_FLOATS;
458
459 gen_batch_decode_ctx_init(&batch_ctx, &devinfo, outfile, batch_flags,
460 xml_path, NULL, NULL, NULL);
461 batch_ctx.max_vbo_decoded_lines = max_vbo_lines;
462
463 char *color = GREEN_HEADER, *reset_color = NORMAL;
464 if (option_color == COLOR_NEVER)
465 color = reset_color = "";
466
467 fprintf(outfile, "%sAubinator: Intel AUB file decoder.%-80s%s\n",
468 color, "", reset_color);
469
470 if (input_file)
471 fprintf(outfile, "File name: %s\n", input_file);
472
473 if (aub_pci_id)
474 fprintf(outfile, "PCI ID: 0x%x\n", aub_pci_id);
475
476 fprintf(outfile, "Application name: %s\n", app_name);
477
478 fprintf(outfile, "Decoding as: %s\n", gen_get_device_name(pci_id));
479
480 /* Throw in a new line before the first batch */
481 fprintf(outfile, "\n");
482 }
483
484 static void
485 handle_trace_header(uint32_t *p)
486 {
487 /* The intel_aubdump tool from IGT is kind enough to put a PCI-ID= tag in
488 * the AUB header comment. If the user hasn't specified a hardware
489 * generation, try to use the one from the AUB file.
490 */
491 uint32_t *end = p + (p[0] & 0xffff) + 2;
492 int aub_pci_id = 0;
493 if (end > &p[12] && p[12] > 0)
494 sscanf((char *)&p[13], "PCI-ID=%i", &aub_pci_id);
495
496 if (pci_id == 0)
497 pci_id = aub_pci_id;
498
499 char app_name[33];
500 strncpy(app_name, (char *)&p[2], 32);
501 app_name[32] = 0;
502
503 aubinator_init(aub_pci_id, app_name);
504 }
505
506 static void
507 handle_memtrace_version(uint32_t *p)
508 {
509 int header_length = p[0] & 0xffff;
510 char app_name[64];
511 int app_name_len = MIN2(4 * (header_length + 1 - 5), ARRAY_SIZE(app_name) - 1);
512 int pci_id_len = 0;
513 int aub_pci_id = 0;
514
515 strncpy(app_name, (char *)&p[5], app_name_len);
516 app_name[app_name_len] = 0;
517 sscanf(app_name, "PCI-ID=%i %n", &aub_pci_id, &pci_id_len);
518 if (pci_id == 0)
519 pci_id = aub_pci_id;
520 aubinator_init(aub_pci_id, app_name + pci_id_len);
521 }
522
523 static void
524 handle_memtrace_reg_write(uint32_t *p)
525 {
526 static struct execlist_regs {
527 uint32_t render_elsp[4];
528 int render_elsp_index;
529 uint32_t blitter_elsp[4];
530 int blitter_elsp_index;
531 } state = {};
532
533 uint32_t offset = p[1];
534 uint32_t value = p[5];
535
536 int engine;
537 uint64_t context_descriptor;
538
539 switch (offset) {
540 case 0x2230: /* render elsp */
541 state.render_elsp[state.render_elsp_index++] = value;
542 if (state.render_elsp_index < 4)
543 return;
544
545 state.render_elsp_index = 0;
546 engine = GEN_ENGINE_RENDER;
547 context_descriptor = (uint64_t)state.render_elsp[2] << 32 |
548 state.render_elsp[3];
549 break;
550 case 0x22230: /* blitter elsp */
551 state.blitter_elsp[state.blitter_elsp_index++] = value;
552 if (state.blitter_elsp_index < 4)
553 return;
554
555 state.blitter_elsp_index = 0;
556 engine = GEN_ENGINE_BLITTER;
557 context_descriptor = (uint64_t)state.blitter_elsp[2] << 32 |
558 state.blitter_elsp[3];
559 break;
560 case 0x2510: /* render elsq0 lo */
561 state.render_elsp[3] = value;
562 return;
563 break;
564 case 0x2514: /* render elsq0 hi */
565 state.render_elsp[2] = value;
566 return;
567 break;
568 case 0x22510: /* blitter elsq0 lo */
569 state.blitter_elsp[3] = value;
570 return;
571 break;
572 case 0x22514: /* blitter elsq0 hi */
573 state.blitter_elsp[2] = value;
574 return;
575 break;
576 case 0x2550: /* render elsc */
577 engine = GEN_ENGINE_RENDER;
578 context_descriptor = (uint64_t)state.render_elsp[2] << 32 |
579 state.render_elsp[3];
580 break;
581 case 0x22550: /* blitter elsc */
582 engine = GEN_ENGINE_BLITTER;
583 context_descriptor = (uint64_t)state.blitter_elsp[2] << 32 |
584 state.blitter_elsp[3];
585 break;
586 default:
587 return;
588 }
589
590 const uint32_t pphwsp_size = 4096;
591 uint32_t pphwsp_addr = context_descriptor & 0xfffff000;
592 struct gen_batch_decode_bo pphwsp_bo = get_ggtt_batch_bo(NULL, pphwsp_addr);
593 uint32_t *context = (uint32_t *)((uint8_t *)pphwsp_bo.map +
594 (pphwsp_addr - pphwsp_bo.addr) +
595 pphwsp_size);
596
597 uint32_t ring_buffer_head = context[5];
598 uint32_t ring_buffer_tail = context[7];
599 uint32_t ring_buffer_start = context[9];
600 uint64_t pml4 = (uint64_t)context[49] << 32 | context[51];
601
602 struct gen_batch_decode_bo ring_bo = get_ggtt_batch_bo(NULL,
603 ring_buffer_start);
604 assert(ring_bo.size > 0);
605 void *commands = (uint8_t *)ring_bo.map + (ring_buffer_start - ring_bo.addr);
606
607 if (context_descriptor & 0x100 /* ppgtt */) {
608 batch_ctx.get_bo = get_ppgtt_batch_bo;
609 batch_ctx.user_data = &pml4;
610 } else {
611 batch_ctx.get_bo = get_ggtt_batch_bo;
612 }
613
614 (void)engine; /* TODO */
615 gen_print_batch(&batch_ctx, commands, ring_buffer_tail - ring_buffer_head,
616 0);
617 clear_bo_maps();
618 }
619
620 static void
621 handle_memtrace_mem_write(uint32_t *p)
622 {
623 struct gen_batch_decode_bo bo = {
624 .map = p + 5,
625 /* Addresses written by aubdump here are in canonical form but the batch
626 * decoder always gives us addresses with the top 16bits zeroed, so do
627 * the same here.
628 */
629 .addr = gen_48b_address(*(uint64_t*)&p[1]),
630 .size = p[4],
631 };
632 uint32_t address_space = p[3] >> 28;
633
634 switch (address_space) {
635 case 0: /* GGTT */
636 handle_ggtt_write(bo.addr, bo.map, bo.size);
637 break;
638 case 1: /* Local */
639 add_gtt_bo_map(bo, false);
640 break;
641 case 2: /* Physical */
642 handle_physical_write(bo.addr, bo.map, bo.size);
643 break;
644 case 4: /* GGTT Entry */
645 handle_ggtt_entry_write(bo.addr, bo.map, bo.size);
646 break;
647 }
648 }
649
650 struct aub_file {
651 FILE *stream;
652
653 uint32_t *map, *end, *cursor;
654 uint32_t *mem_end;
655 };
656
657 static struct aub_file *
658 aub_file_open(const char *filename)
659 {
660 struct aub_file *file;
661 struct stat sb;
662 int fd;
663
664 file = calloc(1, sizeof *file);
665 fd = open(filename, O_RDONLY);
666 if (fd == -1) {
667 fprintf(stderr, "open %s failed: %s\n", filename, strerror(errno));
668 exit(EXIT_FAILURE);
669 }
670
671 if (fstat(fd, &sb) == -1) {
672 fprintf(stderr, "stat failed: %s\n", strerror(errno));
673 exit(EXIT_FAILURE);
674 }
675
676 file->map = mmap(NULL, sb.st_size,
677 PROT_READ, MAP_SHARED, fd, 0);
678 if (file->map == MAP_FAILED) {
679 fprintf(stderr, "mmap failed: %s\n", strerror(errno));
680 exit(EXIT_FAILURE);
681 }
682
683 close(fd);
684
685 file->cursor = file->map;
686 file->end = file->map + sb.st_size / 4;
687
688 return file;
689 }
690
691 #define TYPE(dw) (((dw) >> 29) & 7)
692 #define OPCODE(dw) (((dw) >> 23) & 0x3f)
693 #define SUBOPCODE(dw) (((dw) >> 16) & 0x7f)
694
695 #define MAKE_HEADER(type, opcode, subopcode) \
696 (((type) << 29) | ((opcode) << 23) | ((subopcode) << 16))
697
698 #define TYPE_AUB 0x7
699
700 /* Classic AUB opcodes */
701 #define OPCODE_AUB 0x01
702 #define SUBOPCODE_HEADER 0x05
703 #define SUBOPCODE_BLOCK 0x41
704 #define SUBOPCODE_BMP 0x1e
705
706 /* Newer version AUB opcode */
707 #define OPCODE_NEW_AUB 0x2e
708 #define SUBOPCODE_REG_POLL 0x02
709 #define SUBOPCODE_REG_WRITE 0x03
710 #define SUBOPCODE_MEM_POLL 0x05
711 #define SUBOPCODE_MEM_WRITE 0x06
712 #define SUBOPCODE_VERSION 0x0e
713
714 #define MAKE_GEN(major, minor) ( ((major) << 8) | (minor) )
715
716 static bool
717 aub_file_decode_batch(struct aub_file *file)
718 {
719 uint32_t *p, h, *new_cursor;
720 int header_length, bias;
721
722 assert(file->cursor < file->end);
723
724 p = file->cursor;
725 h = *p;
726 header_length = h & 0xffff;
727
728 switch (OPCODE(h)) {
729 case OPCODE_AUB:
730 bias = 2;
731 break;
732 case OPCODE_NEW_AUB:
733 bias = 1;
734 break;
735 default:
736 fprintf(outfile, "unknown opcode %d at %td/%td\n",
737 OPCODE(h), file->cursor - file->map,
738 file->end - file->map);
739 return false;
740 }
741
742 new_cursor = p + header_length + bias;
743 if ((h & 0xffff0000) == MAKE_HEADER(TYPE_AUB, OPCODE_AUB, SUBOPCODE_BLOCK)) {
744 assert(file->end - file->cursor >= 4);
745 new_cursor += p[4] / 4;
746 }
747
748 assert(new_cursor <= file->end);
749
750 switch (h & 0xffff0000) {
751 case MAKE_HEADER(TYPE_AUB, OPCODE_AUB, SUBOPCODE_HEADER):
752 handle_trace_header(p);
753 break;
754 case MAKE_HEADER(TYPE_AUB, OPCODE_AUB, SUBOPCODE_BLOCK):
755 handle_trace_block(p);
756 break;
757 case MAKE_HEADER(TYPE_AUB, OPCODE_AUB, SUBOPCODE_BMP):
758 break;
759 case MAKE_HEADER(TYPE_AUB, OPCODE_NEW_AUB, SUBOPCODE_VERSION):
760 handle_memtrace_version(p);
761 break;
762 case MAKE_HEADER(TYPE_AUB, OPCODE_NEW_AUB, SUBOPCODE_REG_WRITE):
763 handle_memtrace_reg_write(p);
764 break;
765 case MAKE_HEADER(TYPE_AUB, OPCODE_NEW_AUB, SUBOPCODE_MEM_WRITE):
766 handle_memtrace_mem_write(p);
767 break;
768 case MAKE_HEADER(TYPE_AUB, OPCODE_NEW_AUB, SUBOPCODE_MEM_POLL):
769 fprintf(outfile, "memory poll block (dwords %d):\n", h & 0xffff);
770 break;
771 case MAKE_HEADER(TYPE_AUB, OPCODE_NEW_AUB, SUBOPCODE_REG_POLL):
772 break;
773 default:
774 fprintf(outfile, "unknown block type=0x%x, opcode=0x%x, "
775 "subopcode=0x%x (%08x)\n", TYPE(h), OPCODE(h), SUBOPCODE(h), h);
776 break;
777 }
778 file->cursor = new_cursor;
779
780 return true;
781 }
782
783 static int
784 aub_file_more_stuff(struct aub_file *file)
785 {
786 return file->cursor < file->end || (file->stream && !feof(file->stream));
787 }
788
789 static void
790 setup_pager(void)
791 {
792 int fds[2];
793 pid_t pid;
794
795 if (!isatty(1))
796 return;
797
798 if (pipe(fds) == -1)
799 return;
800
801 pid = fork();
802 if (pid == -1)
803 return;
804
805 if (pid == 0) {
806 close(fds[1]);
807 dup2(fds[0], 0);
808 execlp("less", "less", "-FRSi", NULL);
809 }
810
811 close(fds[0]);
812 dup2(fds[1], 1);
813 close(fds[1]);
814 }
815
816 static void
817 print_help(const char *progname, FILE *file)
818 {
819 fprintf(file,
820 "Usage: %s [OPTION]... FILE\n"
821 "Decode aub file contents from FILE.\n\n"
822 " --help display this help and exit\n"
823 " --gen=platform decode for given platform (3 letter platform name)\n"
824 " --headers decode only command headers\n"
825 " --color[=WHEN] colorize the output; WHEN can be 'auto' (default\n"
826 " if omitted), 'always', or 'never'\n"
827 " --max-vbo-lines=N limit the number of decoded VBO lines\n"
828 " --no-pager don't launch pager\n"
829 " --no-offsets don't print instruction offsets\n"
830 " --xml=DIR load hardware xml description from directory DIR\n",
831 progname);
832 }
833
834 int main(int argc, char *argv[])
835 {
836 struct aub_file *file;
837 int c, i;
838 bool help = false, pager = true;
839 const struct option aubinator_opts[] = {
840 { "help", no_argument, (int *) &help, true },
841 { "no-pager", no_argument, (int *) &pager, false },
842 { "no-offsets", no_argument, (int *) &option_print_offsets, false },
843 { "gen", required_argument, NULL, 'g' },
844 { "headers", no_argument, (int *) &option_full_decode, false },
845 { "color", required_argument, NULL, 'c' },
846 { "xml", required_argument, NULL, 'x' },
847 { "max-vbo-lines", required_argument, NULL, 'v' },
848 { NULL, 0, NULL, 0 }
849 };
850
851 outfile = stdout;
852
853 i = 0;
854 while ((c = getopt_long(argc, argv, "", aubinator_opts, &i)) != -1) {
855 switch (c) {
856 case 'g': {
857 const int id = gen_device_name_to_pci_device_id(optarg);
858 if (id < 0) {
859 fprintf(stderr, "can't parse gen: '%s', expected ivb, byt, hsw, "
860 "bdw, chv, skl, kbl or bxt\n", optarg);
861 exit(EXIT_FAILURE);
862 } else {
863 pci_id = id;
864 }
865 break;
866 }
867 case 'c':
868 if (optarg == NULL || strcmp(optarg, "always") == 0)
869 option_color = COLOR_ALWAYS;
870 else if (strcmp(optarg, "never") == 0)
871 option_color = COLOR_NEVER;
872 else if (strcmp(optarg, "auto") == 0)
873 option_color = COLOR_AUTO;
874 else {
875 fprintf(stderr, "invalid value for --color: %s", optarg);
876 exit(EXIT_FAILURE);
877 }
878 break;
879 case 'x':
880 xml_path = strdup(optarg);
881 break;
882 case 'v':
883 max_vbo_lines = atoi(optarg);
884 break;
885 default:
886 break;
887 }
888 }
889
890 if (optind < argc)
891 input_file = argv[optind];
892
893 if (help || !input_file) {
894 print_help(argv[0], stderr);
895 exit(0);
896 }
897
898 /* Do this before we redirect stdout to pager. */
899 if (option_color == COLOR_AUTO)
900 option_color = isatty(1) ? COLOR_ALWAYS : COLOR_NEVER;
901
902 if (isatty(1) && pager)
903 setup_pager();
904
905 mem_fd = memfd_create("phys memory", 0);
906
907 list_inithead(&maps);
908
909 file = aub_file_open(input_file);
910
911 while (aub_file_more_stuff(file) &&
912 aub_file_decode_batch(file));
913
914 fflush(stdout);
915 /* close the stdout which is opened to write the output */
916 close(1);
917 free(xml_path);
918
919 wait(NULL);
920
921 return EXIT_SUCCESS;
922 }