intel: aubinator: fix read the context/ring
[mesa.git] / src / intel / tools / aubinator.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <stdint.h>
27 #include <getopt.h>
28
29 #include <unistd.h>
30 #include <fcntl.h>
31 #include <string.h>
32 #include <signal.h>
33 #include <errno.h>
34 #include <inttypes.h>
35 #include <sys/types.h>
36 #include <sys/stat.h>
37 #include <sys/wait.h>
38 #include <sys/mman.h>
39
40 #include "util/list.h"
41 #include "util/macros.h"
42 #include "util/rb_tree.h"
43
44 #include "common/gen_decoder.h"
45 #include "common/gen_disasm.h"
46 #include "common/gen_gem.h"
47 #include "intel_aub.h"
48
49 #ifndef HAVE_MEMFD_CREATE
50 #include <sys/syscall.h>
51
52 static inline int
53 memfd_create(const char *name, unsigned int flags)
54 {
55 return syscall(SYS_memfd_create, name, flags);
56 }
57 #endif
58
59 /* Below is the only command missing from intel_aub.h in libdrm
60 * So, reuse intel_aub.h from libdrm and #define the
61 * AUB_MI_BATCH_BUFFER_END as below
62 */
63 #define AUB_MI_BATCH_BUFFER_END (0x0500 << 16)
64
65 #define CSI "\e["
66 #define BLUE_HEADER CSI "0;44m"
67 #define GREEN_HEADER CSI "1;42m"
68 #define NORMAL CSI "0m"
69
70 /* options */
71
72 static int option_full_decode = true;
73 static int option_print_offsets = true;
74 static int max_vbo_lines = -1;
75 static enum { COLOR_AUTO, COLOR_ALWAYS, COLOR_NEVER } option_color;
76
77 /* state */
78
79 uint16_t pci_id = 0;
80 char *input_file = NULL, *xml_path = NULL;
81 struct gen_device_info devinfo;
82 struct gen_batch_decode_ctx batch_ctx;
83
84 struct bo_map {
85 struct list_head link;
86 struct gen_batch_decode_bo bo;
87 bool unmap_after_use;
88 };
89
90 struct ggtt_entry {
91 struct rb_node node;
92 uint64_t virt_addr;
93 uint64_t phys_addr;
94 };
95
96 struct phys_mem {
97 struct rb_node node;
98 uint64_t fd_offset;
99 uint64_t phys_addr;
100 uint8_t *data;
101 };
102
103 static struct list_head maps;
104 static struct rb_tree ggtt = {NULL};
105 static struct rb_tree mem = {NULL};
106 int mem_fd = -1;
107 off_t mem_fd_len = 0;
108
109 FILE *outfile;
110
111 struct brw_instruction;
112
113 static void
114 add_gtt_bo_map(struct gen_batch_decode_bo bo, bool unmap_after_use)
115 {
116 struct bo_map *m = calloc(1, sizeof(*m));
117
118 m->bo = bo;
119 m->unmap_after_use = unmap_after_use;
120 list_add(&m->link, &maps);
121 }
122
123 static void
124 clear_bo_maps(void)
125 {
126 list_for_each_entry_safe(struct bo_map, i, &maps, link) {
127 if (i->unmap_after_use)
128 munmap((void *)i->bo.map, i->bo.size);
129 list_del(&i->link);
130 free(i);
131 }
132 }
133
134 static inline struct ggtt_entry *
135 ggtt_entry_next(struct ggtt_entry *entry)
136 {
137 if (!entry)
138 return NULL;
139 struct rb_node *node = rb_node_next(&entry->node);
140 if (!node)
141 return NULL;
142 return rb_node_data(struct ggtt_entry, node, node);
143 }
144
145 static inline int
146 cmp_uint64(uint64_t a, uint64_t b)
147 {
148 if (a < b)
149 return -1;
150 if (a > b)
151 return 1;
152 return 0;
153 }
154
155 static inline int
156 cmp_ggtt_entry(const struct rb_node *node, const void *addr)
157 {
158 struct ggtt_entry *entry = rb_node_data(struct ggtt_entry, node, node);
159 return cmp_uint64(entry->virt_addr, *(const uint64_t *)addr);
160 }
161
162 static struct ggtt_entry *
163 ensure_ggtt_entry(struct rb_tree *tree, uint64_t virt_addr)
164 {
165 struct rb_node *node = rb_tree_search_sloppy(&ggtt, &virt_addr,
166 cmp_ggtt_entry);
167 int cmp = 0;
168 if (!node || (cmp = cmp_ggtt_entry(node, &virt_addr))) {
169 struct ggtt_entry *new_entry = calloc(1, sizeof(*new_entry));
170 new_entry->virt_addr = virt_addr;
171 rb_tree_insert_at(&ggtt, node, &new_entry->node, cmp > 0);
172 node = &new_entry->node;
173 }
174
175 return rb_node_data(struct ggtt_entry, node, node);
176 }
177
178 static struct ggtt_entry *
179 search_ggtt_entry(uint64_t virt_addr)
180 {
181 virt_addr &= ~0xfff;
182
183 struct rb_node *node = rb_tree_search(&ggtt, &virt_addr, cmp_ggtt_entry);
184
185 if (!node)
186 return NULL;
187
188 return rb_node_data(struct ggtt_entry, node, node);
189 }
190
191 static inline int
192 cmp_phys_mem(const struct rb_node *node, const void *addr)
193 {
194 struct phys_mem *mem = rb_node_data(struct phys_mem, node, node);
195 return cmp_uint64(mem->phys_addr, *(uint64_t *)addr);
196 }
197
198 static struct phys_mem *
199 ensure_phys_mem(uint64_t phys_addr)
200 {
201 struct rb_node *node = rb_tree_search_sloppy(&mem, &phys_addr, cmp_phys_mem);
202 int cmp = 0;
203 if (!node || (cmp = cmp_phys_mem(node, &phys_addr))) {
204 struct phys_mem *new_mem = calloc(1, sizeof(*new_mem));
205 new_mem->phys_addr = phys_addr;
206 new_mem->fd_offset = mem_fd_len;
207
208 int ftruncate_res = ftruncate(mem_fd, mem_fd_len += 4096);
209 assert(ftruncate_res == 0);
210
211 new_mem->data = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED,
212 mem_fd, new_mem->fd_offset);
213 assert(new_mem->data != MAP_FAILED);
214
215 rb_tree_insert_at(&mem, node, &new_mem->node, cmp > 0);
216 node = &new_mem->node;
217 }
218
219 return rb_node_data(struct phys_mem, node, node);
220 }
221
222 static struct phys_mem *
223 search_phys_mem(uint64_t phys_addr)
224 {
225 phys_addr &= ~0xfff;
226
227 struct rb_node *node = rb_tree_search(&mem, &phys_addr, cmp_phys_mem);
228
229 if (!node)
230 return NULL;
231
232 return rb_node_data(struct phys_mem, node, node);
233 }
234
235 static void
236 handle_ggtt_entry_write(uint64_t address, const void *_data, uint32_t _size)
237 {
238 uint64_t virt_addr = (address / sizeof(uint64_t)) << 12;
239 const uint64_t *data = _data;
240 size_t size = _size / sizeof(*data);
241 for (const uint64_t *entry = data;
242 entry < data + size;
243 entry++, virt_addr += 4096) {
244 struct ggtt_entry *pt = ensure_ggtt_entry(&ggtt, virt_addr);
245 pt->phys_addr = *entry;
246 }
247 }
248
249 static void
250 handle_physical_write(uint64_t phys_address, const void *data, uint32_t size)
251 {
252 uint32_t to_write = size;
253 for (uint64_t page = phys_address & ~0xfff; page < phys_address + size; page += 4096) {
254 struct phys_mem *mem = ensure_phys_mem(page);
255 uint64_t offset = MAX2(page, phys_address) - page;
256 uint32_t size_this_page = MIN2(to_write, 4096 - offset);
257 to_write -= size_this_page;
258 memcpy(mem->data + offset, data, size_this_page);
259 data = (const uint8_t *)data + size_this_page;
260 }
261 }
262
263 static void
264 handle_ggtt_write(uint64_t virt_address, const void *data, uint32_t size)
265 {
266 uint32_t to_write = size;
267 for (uint64_t page = virt_address & ~0xfff; page < virt_address + size; page += 4096) {
268 struct ggtt_entry *entry = search_ggtt_entry(page);
269 assert(entry && entry->phys_addr & 0x1);
270
271 uint64_t offset = MAX2(page, virt_address) - page;
272 uint32_t size_this_page = MIN2(to_write, 4096 - offset);
273 to_write -= size_this_page;
274
275 uint64_t phys_page = entry->phys_addr & ~0xfff; /* Clear the validity bits. */
276 handle_physical_write(phys_page + offset, data, size_this_page);
277 data = (const uint8_t *)data + size_this_page;
278 }
279 }
280
281 static struct gen_batch_decode_bo
282 get_ggtt_batch_bo(void *user_data, uint64_t address)
283 {
284 struct gen_batch_decode_bo bo = {0};
285
286 list_for_each_entry(struct bo_map, i, &maps, link)
287 if (i->bo.addr <= address && i->bo.addr + i->bo.size > address)
288 return i->bo;
289
290 address &= ~0xfff;
291
292 struct ggtt_entry *start =
293 (struct ggtt_entry *)rb_tree_search_sloppy(&ggtt, &address,
294 cmp_ggtt_entry);
295 if (start && start->virt_addr < address)
296 start = ggtt_entry_next(start);
297 if (!start)
298 return bo;
299
300 struct ggtt_entry *last = start;
301 for (struct ggtt_entry *i = ggtt_entry_next(last);
302 i && last->virt_addr + 4096 == i->virt_addr;
303 last = i, i = ggtt_entry_next(last))
304 ;
305
306 bo.addr = MIN2(address, start->virt_addr);
307 bo.size = last->virt_addr - bo.addr + 4096;
308 bo.map = mmap(NULL, bo.size, PROT_READ, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
309 assert(bo.map != MAP_FAILED);
310
311 for (struct ggtt_entry *i = start;
312 i;
313 i = i == last ? NULL : ggtt_entry_next(i)) {
314 uint64_t phys_addr = i->phys_addr & ~0xfff;
315 struct phys_mem *phys_mem = search_phys_mem(phys_addr);
316
317 if (!phys_mem)
318 continue;
319
320 uint32_t map_offset = i->virt_addr - address;
321 void *res = mmap((uint8_t *)bo.map + map_offset, 4096, PROT_READ,
322 MAP_SHARED | MAP_FIXED, mem_fd, phys_mem->fd_offset);
323 assert(res != MAP_FAILED);
324 }
325
326 add_gtt_bo_map(bo, true);
327
328 return bo;
329 }
330
331 static struct phys_mem *
332 ppgtt_walk(uint64_t pml4, uint64_t address)
333 {
334 uint64_t shift = 39;
335 uint64_t addr = pml4;
336 for (int level = 4; level > 0; level--) {
337 struct phys_mem *table = search_phys_mem(addr);
338 if (!table)
339 return NULL;
340 int index = (address >> shift) & 0x1ff;
341 uint64_t entry = ((uint64_t *)table->data)[index];
342 if (!(entry & 1))
343 return NULL;
344 addr = entry & ~0xfff;
345 shift -= 9;
346 }
347 return search_phys_mem(addr);
348 }
349
350 static bool
351 ppgtt_mapped(uint64_t pml4, uint64_t address)
352 {
353 return ppgtt_walk(pml4, address) != NULL;
354 }
355
356 static struct gen_batch_decode_bo
357 get_ppgtt_batch_bo(void *user_data, uint64_t address)
358 {
359 struct gen_batch_decode_bo bo = {0};
360 uint64_t pml4 = *(uint64_t *)user_data;
361
362 address &= ~0xfff;
363
364 if (!ppgtt_mapped(pml4, address))
365 return bo;
366
367 /* Map everything until the first gap since we don't know how much the
368 * decoder actually needs.
369 */
370 uint64_t end = address;
371 while (ppgtt_mapped(pml4, end))
372 end += 4096;
373
374 bo.addr = address;
375 bo.size = end - address;
376 bo.map = mmap(NULL, bo.size, PROT_READ, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
377 assert(bo.map != MAP_FAILED);
378
379 for (uint64_t page = address; page < end; page += 4096) {
380 struct phys_mem *phys_mem = ppgtt_walk(pml4, page);
381
382 void *res = mmap((uint8_t *)bo.map + (page - bo.addr), 4096, PROT_READ,
383 MAP_SHARED | MAP_FIXED, mem_fd, phys_mem->fd_offset);
384 assert(res != MAP_FAILED);
385 }
386
387 add_gtt_bo_map(bo, true);
388
389 return bo;
390 }
391
392 #define GEN_ENGINE_RENDER 1
393 #define GEN_ENGINE_BLITTER 2
394
395 static void
396 handle_trace_block(uint32_t *p)
397 {
398 int operation = p[1] & AUB_TRACE_OPERATION_MASK;
399 int type = p[1] & AUB_TRACE_TYPE_MASK;
400 int address_space = p[1] & AUB_TRACE_ADDRESS_SPACE_MASK;
401 int header_length = p[0] & 0xffff;
402 int engine = GEN_ENGINE_RENDER;
403 struct gen_batch_decode_bo bo = {
404 .map = p + header_length + 2,
405 /* Addresses written by aubdump here are in canonical form but the batch
406 * decoder always gives us addresses with the top 16bits zeroed, so do
407 * the same here.
408 */
409 .addr = gen_48b_address((devinfo.gen >= 8 ? ((uint64_t) p[5] << 32) : 0) |
410 ((uint64_t) p[3])),
411 .size = p[4],
412 };
413
414 switch (operation) {
415 case AUB_TRACE_OP_DATA_WRITE:
416 if (address_space == AUB_TRACE_MEMTYPE_GTT)
417 add_gtt_bo_map(bo, false);
418 break;
419 case AUB_TRACE_OP_COMMAND_WRITE:
420 switch (type) {
421 case AUB_TRACE_TYPE_RING_PRB0:
422 engine = GEN_ENGINE_RENDER;
423 break;
424 case AUB_TRACE_TYPE_RING_PRB2:
425 engine = GEN_ENGINE_BLITTER;
426 break;
427 default:
428 fprintf(outfile, "command write to unknown ring %d\n", type);
429 break;
430 }
431
432 (void)engine; /* TODO */
433 batch_ctx.get_bo = get_ggtt_batch_bo;
434 gen_print_batch(&batch_ctx, bo.map, bo.size, 0);
435
436 clear_bo_maps();
437 break;
438 }
439 }
440
441 static void
442 aubinator_init(uint16_t aub_pci_id, const char *app_name)
443 {
444 if (!gen_get_device_info(pci_id, &devinfo)) {
445 fprintf(stderr, "can't find device information: pci_id=0x%x\n", pci_id);
446 exit(EXIT_FAILURE);
447 }
448
449 enum gen_batch_decode_flags batch_flags = 0;
450 if (option_color == COLOR_ALWAYS)
451 batch_flags |= GEN_BATCH_DECODE_IN_COLOR;
452 if (option_full_decode)
453 batch_flags |= GEN_BATCH_DECODE_FULL;
454 if (option_print_offsets)
455 batch_flags |= GEN_BATCH_DECODE_OFFSETS;
456 batch_flags |= GEN_BATCH_DECODE_FLOATS;
457
458 gen_batch_decode_ctx_init(&batch_ctx, &devinfo, outfile, batch_flags,
459 xml_path, NULL, NULL, NULL);
460 batch_ctx.max_vbo_decoded_lines = max_vbo_lines;
461
462 char *color = GREEN_HEADER, *reset_color = NORMAL;
463 if (option_color == COLOR_NEVER)
464 color = reset_color = "";
465
466 fprintf(outfile, "%sAubinator: Intel AUB file decoder.%-80s%s\n",
467 color, "", reset_color);
468
469 if (input_file)
470 fprintf(outfile, "File name: %s\n", input_file);
471
472 if (aub_pci_id)
473 fprintf(outfile, "PCI ID: 0x%x\n", aub_pci_id);
474
475 fprintf(outfile, "Application name: %s\n", app_name);
476
477 fprintf(outfile, "Decoding as: %s\n", gen_get_device_name(pci_id));
478
479 /* Throw in a new line before the first batch */
480 fprintf(outfile, "\n");
481 }
482
483 static void
484 handle_trace_header(uint32_t *p)
485 {
486 /* The intel_aubdump tool from IGT is kind enough to put a PCI-ID= tag in
487 * the AUB header comment. If the user hasn't specified a hardware
488 * generation, try to use the one from the AUB file.
489 */
490 uint32_t *end = p + (p[0] & 0xffff) + 2;
491 int aub_pci_id = 0;
492 if (end > &p[12] && p[12] > 0)
493 sscanf((char *)&p[13], "PCI-ID=%i", &aub_pci_id);
494
495 if (pci_id == 0)
496 pci_id = aub_pci_id;
497
498 char app_name[33];
499 strncpy(app_name, (char *)&p[2], 32);
500 app_name[32] = 0;
501
502 aubinator_init(aub_pci_id, app_name);
503 }
504
505 static void
506 handle_memtrace_version(uint32_t *p)
507 {
508 int header_length = p[0] & 0xffff;
509 char app_name[64];
510 int app_name_len = MIN2(4 * (header_length + 1 - 5), ARRAY_SIZE(app_name) - 1);
511 int pci_id_len = 0;
512 int aub_pci_id = 0;
513
514 strncpy(app_name, (char *)&p[5], app_name_len);
515 app_name[app_name_len] = 0;
516 sscanf(app_name, "PCI-ID=%i %n", &aub_pci_id, &pci_id_len);
517 if (pci_id == 0)
518 pci_id = aub_pci_id;
519 aubinator_init(aub_pci_id, app_name + pci_id_len);
520 }
521
522 static void
523 handle_memtrace_reg_write(uint32_t *p)
524 {
525 static struct execlist_regs {
526 uint32_t render_elsp[4];
527 int render_elsp_index;
528 uint32_t blitter_elsp[4];
529 int blitter_elsp_index;
530 } state = {};
531
532 uint32_t offset = p[1];
533 uint32_t value = p[5];
534
535 int engine;
536 uint64_t context_descriptor;
537
538 switch (offset) {
539 case 0x2230: /* render elsp */
540 state.render_elsp[state.render_elsp_index++] = value;
541 if (state.render_elsp_index < 4)
542 return;
543
544 state.render_elsp_index = 0;
545 engine = GEN_ENGINE_RENDER;
546 context_descriptor = (uint64_t)state.render_elsp[2] << 32 |
547 state.render_elsp[3];
548 break;
549 case 0x22230: /* blitter elsp */
550 state.blitter_elsp[state.blitter_elsp_index++] = value;
551 if (state.blitter_elsp_index < 4)
552 return;
553
554 state.blitter_elsp_index = 0;
555 engine = GEN_ENGINE_BLITTER;
556 context_descriptor = (uint64_t)state.blitter_elsp[2] << 32 |
557 state.blitter_elsp[3];
558 break;
559 case 0x2510: /* render elsq0 lo */
560 state.render_elsp[3] = value;
561 return;
562 break;
563 case 0x2514: /* render elsq0 hi */
564 state.render_elsp[2] = value;
565 return;
566 break;
567 case 0x22510: /* blitter elsq0 lo */
568 state.blitter_elsp[3] = value;
569 return;
570 break;
571 case 0x22514: /* blitter elsq0 hi */
572 state.blitter_elsp[2] = value;
573 return;
574 break;
575 case 0x2550: /* render elsc */
576 engine = GEN_ENGINE_RENDER;
577 context_descriptor = (uint64_t)state.render_elsp[2] << 32 |
578 state.render_elsp[3];
579 break;
580 case 0x22550: /* blitter elsc */
581 engine = GEN_ENGINE_BLITTER;
582 context_descriptor = (uint64_t)state.blitter_elsp[2] << 32 |
583 state.blitter_elsp[3];
584 break;
585 default:
586 return;
587 }
588
589 const uint32_t pphwsp_size = 4096;
590 uint32_t pphwsp_addr = context_descriptor & 0xfffff000;
591 struct gen_batch_decode_bo pphwsp_bo = get_ggtt_batch_bo(NULL, pphwsp_addr);
592 uint32_t *context = (uint32_t *)((uint8_t *)pphwsp_bo.map +
593 (pphwsp_addr - pphwsp_bo.addr) +
594 pphwsp_size);
595
596 uint32_t ring_buffer_head = context[5];
597 uint32_t ring_buffer_tail = context[7];
598 uint32_t ring_buffer_start = context[9];
599 uint64_t pml4 = (uint64_t)context[49] << 32 | context[51];
600
601 struct gen_batch_decode_bo ring_bo = get_ggtt_batch_bo(NULL,
602 ring_buffer_start);
603 assert(ring_bo.size > 0);
604 void *commands = (uint8_t *)ring_bo.map + (ring_buffer_start - ring_bo.addr);
605
606 if (context_descriptor & 0x100 /* ppgtt */) {
607 batch_ctx.get_bo = get_ppgtt_batch_bo;
608 batch_ctx.user_data = &pml4;
609 } else {
610 batch_ctx.get_bo = get_ggtt_batch_bo;
611 }
612
613 (void)engine; /* TODO */
614 gen_print_batch(&batch_ctx, commands, ring_buffer_tail - ring_buffer_head,
615 0);
616 clear_bo_maps();
617 }
618
619 static void
620 handle_memtrace_mem_write(uint32_t *p)
621 {
622 struct gen_batch_decode_bo bo = {
623 .map = p + 5,
624 /* Addresses written by aubdump here are in canonical form but the batch
625 * decoder always gives us addresses with the top 16bits zeroed, so do
626 * the same here.
627 */
628 .addr = gen_48b_address(*(uint64_t*)&p[1]),
629 .size = p[4],
630 };
631 uint32_t address_space = p[3] >> 28;
632
633 switch (address_space) {
634 case 0: /* GGTT */
635 handle_ggtt_write(bo.addr, bo.map, bo.size);
636 break;
637 case 1: /* Local */
638 add_gtt_bo_map(bo, false);
639 break;
640 case 2: /* Physical */
641 handle_physical_write(bo.addr, bo.map, bo.size);
642 break;
643 case 4: /* GGTT Entry */
644 handle_ggtt_entry_write(bo.addr, bo.map, bo.size);
645 break;
646 }
647 }
648
649 struct aub_file {
650 FILE *stream;
651
652 uint32_t *map, *end, *cursor;
653 uint32_t *mem_end;
654 };
655
656 static struct aub_file *
657 aub_file_open(const char *filename)
658 {
659 struct aub_file *file;
660 struct stat sb;
661 int fd;
662
663 file = calloc(1, sizeof *file);
664 fd = open(filename, O_RDONLY);
665 if (fd == -1) {
666 fprintf(stderr, "open %s failed: %s\n", filename, strerror(errno));
667 exit(EXIT_FAILURE);
668 }
669
670 if (fstat(fd, &sb) == -1) {
671 fprintf(stderr, "stat failed: %s\n", strerror(errno));
672 exit(EXIT_FAILURE);
673 }
674
675 file->map = mmap(NULL, sb.st_size,
676 PROT_READ, MAP_SHARED, fd, 0);
677 if (file->map == MAP_FAILED) {
678 fprintf(stderr, "mmap failed: %s\n", strerror(errno));
679 exit(EXIT_FAILURE);
680 }
681
682 close(fd);
683
684 file->cursor = file->map;
685 file->end = file->map + sb.st_size / 4;
686
687 return file;
688 }
689
690 #define TYPE(dw) (((dw) >> 29) & 7)
691 #define OPCODE(dw) (((dw) >> 23) & 0x3f)
692 #define SUBOPCODE(dw) (((dw) >> 16) & 0x7f)
693
694 #define MAKE_HEADER(type, opcode, subopcode) \
695 (((type) << 29) | ((opcode) << 23) | ((subopcode) << 16))
696
697 #define TYPE_AUB 0x7
698
699 /* Classic AUB opcodes */
700 #define OPCODE_AUB 0x01
701 #define SUBOPCODE_HEADER 0x05
702 #define SUBOPCODE_BLOCK 0x41
703 #define SUBOPCODE_BMP 0x1e
704
705 /* Newer version AUB opcode */
706 #define OPCODE_NEW_AUB 0x2e
707 #define SUBOPCODE_REG_POLL 0x02
708 #define SUBOPCODE_REG_WRITE 0x03
709 #define SUBOPCODE_MEM_POLL 0x05
710 #define SUBOPCODE_MEM_WRITE 0x06
711 #define SUBOPCODE_VERSION 0x0e
712
713 #define MAKE_GEN(major, minor) ( ((major) << 8) | (minor) )
714
715 enum {
716 AUB_ITEM_DECODE_OK,
717 AUB_ITEM_DECODE_FAILED,
718 AUB_ITEM_DECODE_NEED_MORE_DATA,
719 };
720
721 static int
722 aub_file_decode_batch(struct aub_file *file)
723 {
724 uint32_t *p, h, *new_cursor;
725 int header_length, bias;
726
727 assert(file->cursor < file->end);
728
729 p = file->cursor;
730 h = *p;
731 header_length = h & 0xffff;
732
733 switch (OPCODE(h)) {
734 case OPCODE_AUB:
735 bias = 2;
736 break;
737 case OPCODE_NEW_AUB:
738 bias = 1;
739 break;
740 default:
741 fprintf(outfile, "unknown opcode %d at %td/%td\n",
742 OPCODE(h), file->cursor - file->map,
743 file->end - file->map);
744 return AUB_ITEM_DECODE_FAILED;
745 }
746
747 new_cursor = p + header_length + bias;
748 if ((h & 0xffff0000) == MAKE_HEADER(TYPE_AUB, OPCODE_AUB, SUBOPCODE_BLOCK)) {
749 assert(file->end - file->cursor >= 4);
750 new_cursor += p[4] / 4;
751 }
752
753 assert(new_cursor <= file->end);
754
755 switch (h & 0xffff0000) {
756 case MAKE_HEADER(TYPE_AUB, OPCODE_AUB, SUBOPCODE_HEADER):
757 handle_trace_header(p);
758 break;
759 case MAKE_HEADER(TYPE_AUB, OPCODE_AUB, SUBOPCODE_BLOCK):
760 handle_trace_block(p);
761 break;
762 case MAKE_HEADER(TYPE_AUB, OPCODE_AUB, SUBOPCODE_BMP):
763 break;
764 case MAKE_HEADER(TYPE_AUB, OPCODE_NEW_AUB, SUBOPCODE_VERSION):
765 handle_memtrace_version(p);
766 break;
767 case MAKE_HEADER(TYPE_AUB, OPCODE_NEW_AUB, SUBOPCODE_REG_WRITE):
768 handle_memtrace_reg_write(p);
769 break;
770 case MAKE_HEADER(TYPE_AUB, OPCODE_NEW_AUB, SUBOPCODE_MEM_WRITE):
771 handle_memtrace_mem_write(p);
772 break;
773 case MAKE_HEADER(TYPE_AUB, OPCODE_NEW_AUB, SUBOPCODE_MEM_POLL):
774 fprintf(outfile, "memory poll block (dwords %d):\n", h & 0xffff);
775 break;
776 case MAKE_HEADER(TYPE_AUB, OPCODE_NEW_AUB, SUBOPCODE_REG_POLL):
777 break;
778 default:
779 fprintf(outfile, "unknown block type=0x%x, opcode=0x%x, "
780 "subopcode=0x%x (%08x)\n", TYPE(h), OPCODE(h), SUBOPCODE(h), h);
781 break;
782 }
783 file->cursor = new_cursor;
784
785 return AUB_ITEM_DECODE_OK;
786 }
787
788 static int
789 aub_file_more_stuff(struct aub_file *file)
790 {
791 return file->cursor < file->end || (file->stream && !feof(file->stream));
792 }
793
794 static void
795 setup_pager(void)
796 {
797 int fds[2];
798 pid_t pid;
799
800 if (!isatty(1))
801 return;
802
803 if (pipe(fds) == -1)
804 return;
805
806 pid = fork();
807 if (pid == -1)
808 return;
809
810 if (pid == 0) {
811 close(fds[1]);
812 dup2(fds[0], 0);
813 execlp("less", "less", "-FRSi", NULL);
814 }
815
816 close(fds[0]);
817 dup2(fds[1], 1);
818 close(fds[1]);
819 }
820
821 static void
822 print_help(const char *progname, FILE *file)
823 {
824 fprintf(file,
825 "Usage: %s [OPTION]... FILE\n"
826 "Decode aub file contents from FILE.\n\n"
827 " --help display this help and exit\n"
828 " --gen=platform decode for given platform (3 letter platform name)\n"
829 " --headers decode only command headers\n"
830 " --color[=WHEN] colorize the output; WHEN can be 'auto' (default\n"
831 " if omitted), 'always', or 'never'\n"
832 " --max-vbo-lines=N limit the number of decoded VBO lines\n"
833 " --no-pager don't launch pager\n"
834 " --no-offsets don't print instruction offsets\n"
835 " --xml=DIR load hardware xml description from directory DIR\n",
836 progname);
837 }
838
839 int main(int argc, char *argv[])
840 {
841 struct aub_file *file;
842 int c, i;
843 bool help = false, pager = true;
844 const struct option aubinator_opts[] = {
845 { "help", no_argument, (int *) &help, true },
846 { "no-pager", no_argument, (int *) &pager, false },
847 { "no-offsets", no_argument, (int *) &option_print_offsets, false },
848 { "gen", required_argument, NULL, 'g' },
849 { "headers", no_argument, (int *) &option_full_decode, false },
850 { "color", required_argument, NULL, 'c' },
851 { "xml", required_argument, NULL, 'x' },
852 { "max-vbo-lines", required_argument, NULL, 'v' },
853 { NULL, 0, NULL, 0 }
854 };
855
856 outfile = stdout;
857
858 i = 0;
859 while ((c = getopt_long(argc, argv, "", aubinator_opts, &i)) != -1) {
860 switch (c) {
861 case 'g': {
862 const int id = gen_device_name_to_pci_device_id(optarg);
863 if (id < 0) {
864 fprintf(stderr, "can't parse gen: '%s', expected ivb, byt, hsw, "
865 "bdw, chv, skl, kbl or bxt\n", optarg);
866 exit(EXIT_FAILURE);
867 } else {
868 pci_id = id;
869 }
870 break;
871 }
872 case 'c':
873 if (optarg == NULL || strcmp(optarg, "always") == 0)
874 option_color = COLOR_ALWAYS;
875 else if (strcmp(optarg, "never") == 0)
876 option_color = COLOR_NEVER;
877 else if (strcmp(optarg, "auto") == 0)
878 option_color = COLOR_AUTO;
879 else {
880 fprintf(stderr, "invalid value for --color: %s", optarg);
881 exit(EXIT_FAILURE);
882 }
883 break;
884 case 'x':
885 xml_path = strdup(optarg);
886 break;
887 case 'v':
888 max_vbo_lines = atoi(optarg);
889 break;
890 default:
891 break;
892 }
893 }
894
895 if (optind < argc)
896 input_file = argv[optind];
897
898 if (help || !input_file) {
899 print_help(argv[0], stderr);
900 exit(0);
901 }
902
903 /* Do this before we redirect stdout to pager. */
904 if (option_color == COLOR_AUTO)
905 option_color = isatty(1) ? COLOR_ALWAYS : COLOR_NEVER;
906
907 if (isatty(1) && pager)
908 setup_pager();
909
910 mem_fd = memfd_create("phys memory", 0);
911
912 list_inithead(&maps);
913
914 file = aub_file_open(input_file);
915
916 while (aub_file_more_stuff(file) &&
917 aub_file_decode_batch(file) == AUB_ITEM_DECODE_OK);
918
919 fflush(stdout);
920 /* close the stdout which is opened to write the output */
921 close(1);
922 free(xml_path);
923
924 wait(NULL);
925
926 return EXIT_SUCCESS;
927 }