2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
35 #include <sys/types.h>
40 #include "util/list.h"
41 #include "util/macros.h"
42 #include "util/rb_tree.h"
44 #include "common/gen_decoder.h"
45 #include "common/gen_disasm.h"
46 #include "common/gen_gem.h"
47 #include "intel_aub.h"
49 #ifndef HAVE_MEMFD_CREATE
50 #include <sys/syscall.h>
53 memfd_create(const char *name
, unsigned int flags
)
55 return syscall(SYS_memfd_create
, name
, flags
);
59 /* Below is the only command missing from intel_aub.h in libdrm
60 * So, reuse intel_aub.h from libdrm and #define the
61 * AUB_MI_BATCH_BUFFER_END as below
63 #define AUB_MI_BATCH_BUFFER_END (0x0500 << 16)
66 #define BLUE_HEADER CSI "0;44m"
67 #define GREEN_HEADER CSI "1;42m"
68 #define NORMAL CSI "0m"
72 static int option_full_decode
= true;
73 static int option_print_offsets
= true;
74 static int max_vbo_lines
= -1;
75 static enum { COLOR_AUTO
, COLOR_ALWAYS
, COLOR_NEVER
} option_color
;
80 char *input_file
= NULL
, *xml_path
= NULL
;
81 struct gen_device_info devinfo
;
82 struct gen_batch_decode_ctx batch_ctx
;
85 struct list_head link
;
86 struct gen_batch_decode_bo bo
;
103 static struct list_head maps
;
104 static struct rb_tree ggtt
= {NULL
};
105 static struct rb_tree mem
= {NULL
};
107 off_t mem_fd_len
= 0;
111 struct brw_instruction
;
114 add_gtt_bo_map(struct gen_batch_decode_bo bo
, bool unmap_after_use
)
116 struct bo_map
*m
= calloc(1, sizeof(*m
));
119 m
->unmap_after_use
= unmap_after_use
;
120 list_add(&m
->link
, &maps
);
126 list_for_each_entry_safe(struct bo_map
, i
, &maps
, link
) {
127 if (i
->unmap_after_use
)
128 munmap((void *)i
->bo
.map
, i
->bo
.size
);
134 static inline struct ggtt_entry
*
135 ggtt_entry_next(struct ggtt_entry
*entry
)
139 struct rb_node
*node
= rb_node_next(&entry
->node
);
142 return rb_node_data(struct ggtt_entry
, node
, node
);
146 cmp_uint64(uint64_t a
, uint64_t b
)
156 cmp_ggtt_entry(const struct rb_node
*node
, const void *addr
)
158 struct ggtt_entry
*entry
= rb_node_data(struct ggtt_entry
, node
, node
);
159 return cmp_uint64(entry
->virt_addr
, *(const uint64_t *)addr
);
162 static struct ggtt_entry
*
163 ensure_ggtt_entry(struct rb_tree
*tree
, uint64_t virt_addr
)
165 struct rb_node
*node
= rb_tree_search_sloppy(&ggtt
, &virt_addr
,
168 if (!node
|| (cmp
= cmp_ggtt_entry(node
, &virt_addr
))) {
169 struct ggtt_entry
*new_entry
= calloc(1, sizeof(*new_entry
));
170 new_entry
->virt_addr
= virt_addr
;
171 rb_tree_insert_at(&ggtt
, node
, &new_entry
->node
, cmp
> 0);
172 node
= &new_entry
->node
;
175 return rb_node_data(struct ggtt_entry
, node
, node
);
178 static struct ggtt_entry
*
179 search_ggtt_entry(uint64_t virt_addr
)
183 struct rb_node
*node
= rb_tree_search(&ggtt
, &virt_addr
, cmp_ggtt_entry
);
188 return rb_node_data(struct ggtt_entry
, node
, node
);
192 cmp_phys_mem(const struct rb_node
*node
, const void *addr
)
194 struct phys_mem
*mem
= rb_node_data(struct phys_mem
, node
, node
);
195 return cmp_uint64(mem
->phys_addr
, *(uint64_t *)addr
);
198 static struct phys_mem
*
199 ensure_phys_mem(uint64_t phys_addr
)
201 struct rb_node
*node
= rb_tree_search_sloppy(&mem
, &phys_addr
, cmp_phys_mem
);
203 if (!node
|| (cmp
= cmp_phys_mem(node
, &phys_addr
))) {
204 struct phys_mem
*new_mem
= calloc(1, sizeof(*new_mem
));
205 new_mem
->phys_addr
= phys_addr
;
206 new_mem
->fd_offset
= mem_fd_len
;
208 int ftruncate_res
= ftruncate(mem_fd
, mem_fd_len
+= 4096);
209 assert(ftruncate_res
== 0);
211 new_mem
->data
= mmap(NULL
, 4096, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
212 mem_fd
, new_mem
->fd_offset
);
213 assert(new_mem
->data
!= MAP_FAILED
);
215 rb_tree_insert_at(&mem
, node
, &new_mem
->node
, cmp
> 0);
216 node
= &new_mem
->node
;
219 return rb_node_data(struct phys_mem
, node
, node
);
222 static struct phys_mem
*
223 search_phys_mem(uint64_t phys_addr
)
227 struct rb_node
*node
= rb_tree_search(&mem
, &phys_addr
, cmp_phys_mem
);
232 return rb_node_data(struct phys_mem
, node
, node
);
236 handle_ggtt_entry_write(uint64_t address
, const void *_data
, uint32_t _size
)
238 uint64_t virt_addr
= (address
/ sizeof(uint64_t)) << 12;
239 const uint64_t *data
= _data
;
240 size_t size
= _size
/ sizeof(*data
);
241 for (const uint64_t *entry
= data
;
243 entry
++, virt_addr
+= 4096) {
244 struct ggtt_entry
*pt
= ensure_ggtt_entry(&ggtt
, virt_addr
);
245 pt
->phys_addr
= *entry
;
250 handle_physical_write(uint64_t phys_address
, const void *data
, uint32_t size
)
252 uint32_t to_write
= size
;
253 for (uint64_t page
= phys_address
& ~0xfff; page
< phys_address
+ size
; page
+= 4096) {
254 struct phys_mem
*mem
= ensure_phys_mem(page
);
255 uint64_t offset
= MAX2(page
, phys_address
) - page
;
256 uint32_t size_this_page
= MIN2(to_write
, 4096 - offset
);
257 to_write
-= size_this_page
;
258 memcpy(mem
->data
+ offset
, data
, size_this_page
);
259 data
= (const uint8_t *)data
+ size_this_page
;
264 handle_ggtt_write(uint64_t virt_address
, const void *data
, uint32_t size
)
266 uint32_t to_write
= size
;
267 for (uint64_t page
= virt_address
& ~0xfff; page
< virt_address
+ size
; page
+= 4096) {
268 struct ggtt_entry
*entry
= search_ggtt_entry(page
);
269 assert(entry
&& entry
->phys_addr
& 0x1);
271 uint64_t offset
= MAX2(page
, virt_address
) - page
;
272 uint32_t size_this_page
= MIN2(to_write
, 4096 - offset
);
273 to_write
-= size_this_page
;
275 uint64_t phys_page
= entry
->phys_addr
& ~0xfff; /* Clear the validity bits. */
276 handle_physical_write(phys_page
+ offset
, data
, size_this_page
);
277 data
= (const uint8_t *)data
+ size_this_page
;
281 static struct gen_batch_decode_bo
282 get_ggtt_batch_bo(void *user_data
, uint64_t address
)
284 struct gen_batch_decode_bo bo
= {0};
286 list_for_each_entry(struct bo_map
, i
, &maps
, link
)
287 if (i
->bo
.addr
<= address
&& i
->bo
.addr
+ i
->bo
.size
> address
)
292 struct ggtt_entry
*start
=
293 (struct ggtt_entry
*)rb_tree_search_sloppy(&ggtt
, &address
,
295 if (start
&& start
->virt_addr
< address
)
296 start
= ggtt_entry_next(start
);
300 struct ggtt_entry
*last
= start
;
301 for (struct ggtt_entry
*i
= ggtt_entry_next(last
);
302 i
&& last
->virt_addr
+ 4096 == i
->virt_addr
;
303 last
= i
, i
= ggtt_entry_next(last
))
306 bo
.addr
= MIN2(address
, start
->virt_addr
);
307 bo
.size
= last
->virt_addr
- bo
.addr
+ 4096;
308 bo
.map
= mmap(NULL
, bo
.size
, PROT_READ
, MAP_SHARED
| MAP_ANONYMOUS
, -1, 0);
309 assert(bo
.map
!= MAP_FAILED
);
311 for (struct ggtt_entry
*i
= start
;
313 i
= i
== last
? NULL
: ggtt_entry_next(i
)) {
314 uint64_t phys_addr
= i
->phys_addr
& ~0xfff;
315 struct phys_mem
*phys_mem
= search_phys_mem(phys_addr
);
320 uint32_t map_offset
= i
->virt_addr
- address
;
321 void *res
= mmap((uint8_t *)bo
.map
+ map_offset
, 4096, PROT_READ
,
322 MAP_SHARED
| MAP_FIXED
, mem_fd
, phys_mem
->fd_offset
);
323 assert(res
!= MAP_FAILED
);
326 add_gtt_bo_map(bo
, true);
331 static struct phys_mem
*
332 ppgtt_walk(uint64_t pml4
, uint64_t address
)
335 uint64_t addr
= pml4
;
336 for (int level
= 4; level
> 0; level
--) {
337 struct phys_mem
*table
= search_phys_mem(addr
);
340 int index
= (address
>> shift
) & 0x1ff;
341 uint64_t entry
= ((uint64_t *)table
->data
)[index
];
344 addr
= entry
& ~0xfff;
347 return search_phys_mem(addr
);
351 ppgtt_mapped(uint64_t pml4
, uint64_t address
)
353 return ppgtt_walk(pml4
, address
) != NULL
;
356 static struct gen_batch_decode_bo
357 get_ppgtt_batch_bo(void *user_data
, uint64_t address
)
359 struct gen_batch_decode_bo bo
= {0};
360 uint64_t pml4
= *(uint64_t *)user_data
;
364 if (!ppgtt_mapped(pml4
, address
))
367 /* Map everything until the first gap since we don't know how much the
368 * decoder actually needs.
370 uint64_t end
= address
;
371 while (ppgtt_mapped(pml4
, end
))
375 bo
.size
= end
- address
;
376 bo
.map
= mmap(NULL
, bo
.size
, PROT_READ
, MAP_SHARED
| MAP_ANONYMOUS
, -1, 0);
377 assert(bo
.map
!= MAP_FAILED
);
379 for (uint64_t page
= address
; page
< end
; page
+= 4096) {
380 struct phys_mem
*phys_mem
= ppgtt_walk(pml4
, page
);
382 void *res
= mmap((uint8_t *)bo
.map
+ (page
- bo
.addr
), 4096, PROT_READ
,
383 MAP_SHARED
| MAP_FIXED
, mem_fd
, phys_mem
->fd_offset
);
384 assert(res
!= MAP_FAILED
);
387 add_gtt_bo_map(bo
, true);
392 #define GEN_ENGINE_RENDER 1
393 #define GEN_ENGINE_BLITTER 2
396 handle_trace_block(uint32_t *p
)
398 int operation
= p
[1] & AUB_TRACE_OPERATION_MASK
;
399 int type
= p
[1] & AUB_TRACE_TYPE_MASK
;
400 int address_space
= p
[1] & AUB_TRACE_ADDRESS_SPACE_MASK
;
401 int header_length
= p
[0] & 0xffff;
402 int engine
= GEN_ENGINE_RENDER
;
403 struct gen_batch_decode_bo bo
= {
404 .map
= p
+ header_length
+ 2,
405 /* Addresses written by aubdump here are in canonical form but the batch
406 * decoder always gives us addresses with the top 16bits zeroed, so do
409 .addr
= gen_48b_address((devinfo
.gen
>= 8 ? ((uint64_t) p
[5] << 32) : 0) |
415 case AUB_TRACE_OP_DATA_WRITE
:
416 if (address_space
== AUB_TRACE_MEMTYPE_GTT
)
417 add_gtt_bo_map(bo
, false);
419 case AUB_TRACE_OP_COMMAND_WRITE
:
421 case AUB_TRACE_TYPE_RING_PRB0
:
422 engine
= GEN_ENGINE_RENDER
;
424 case AUB_TRACE_TYPE_RING_PRB2
:
425 engine
= GEN_ENGINE_BLITTER
;
428 fprintf(outfile
, "command write to unknown ring %d\n", type
);
432 (void)engine
; /* TODO */
433 batch_ctx
.get_bo
= get_ggtt_batch_bo
;
434 gen_print_batch(&batch_ctx
, bo
.map
, bo
.size
, 0);
442 aubinator_init(uint16_t aub_pci_id
, const char *app_name
)
444 if (!gen_get_device_info(pci_id
, &devinfo
)) {
445 fprintf(stderr
, "can't find device information: pci_id=0x%x\n", pci_id
);
449 enum gen_batch_decode_flags batch_flags
= 0;
450 if (option_color
== COLOR_ALWAYS
)
451 batch_flags
|= GEN_BATCH_DECODE_IN_COLOR
;
452 if (option_full_decode
)
453 batch_flags
|= GEN_BATCH_DECODE_FULL
;
454 if (option_print_offsets
)
455 batch_flags
|= GEN_BATCH_DECODE_OFFSETS
;
456 batch_flags
|= GEN_BATCH_DECODE_FLOATS
;
458 gen_batch_decode_ctx_init(&batch_ctx
, &devinfo
, outfile
, batch_flags
,
459 xml_path
, NULL
, NULL
, NULL
);
460 batch_ctx
.max_vbo_decoded_lines
= max_vbo_lines
;
462 char *color
= GREEN_HEADER
, *reset_color
= NORMAL
;
463 if (option_color
== COLOR_NEVER
)
464 color
= reset_color
= "";
466 fprintf(outfile
, "%sAubinator: Intel AUB file decoder.%-80s%s\n",
467 color
, "", reset_color
);
470 fprintf(outfile
, "File name: %s\n", input_file
);
473 fprintf(outfile
, "PCI ID: 0x%x\n", aub_pci_id
);
475 fprintf(outfile
, "Application name: %s\n", app_name
);
477 fprintf(outfile
, "Decoding as: %s\n", gen_get_device_name(pci_id
));
479 /* Throw in a new line before the first batch */
480 fprintf(outfile
, "\n");
484 handle_trace_header(uint32_t *p
)
486 /* The intel_aubdump tool from IGT is kind enough to put a PCI-ID= tag in
487 * the AUB header comment. If the user hasn't specified a hardware
488 * generation, try to use the one from the AUB file.
490 uint32_t *end
= p
+ (p
[0] & 0xffff) + 2;
492 if (end
> &p
[12] && p
[12] > 0)
493 sscanf((char *)&p
[13], "PCI-ID=%i", &aub_pci_id
);
499 strncpy(app_name
, (char *)&p
[2], 32);
502 aubinator_init(aub_pci_id
, app_name
);
506 handle_memtrace_version(uint32_t *p
)
508 int header_length
= p
[0] & 0xffff;
510 int app_name_len
= MIN2(4 * (header_length
+ 1 - 5), ARRAY_SIZE(app_name
) - 1);
514 strncpy(app_name
, (char *)&p
[5], app_name_len
);
515 app_name
[app_name_len
] = 0;
516 sscanf(app_name
, "PCI-ID=%i %n", &aub_pci_id
, &pci_id_len
);
519 aubinator_init(aub_pci_id
, app_name
+ pci_id_len
);
523 handle_memtrace_reg_write(uint32_t *p
)
525 static struct execlist_regs
{
526 uint32_t render_elsp
[4];
527 int render_elsp_index
;
528 uint32_t blitter_elsp
[4];
529 int blitter_elsp_index
;
532 uint32_t offset
= p
[1];
533 uint32_t value
= p
[5];
536 uint64_t context_descriptor
;
539 case 0x2230: /* render elsp */
540 state
.render_elsp
[state
.render_elsp_index
++] = value
;
541 if (state
.render_elsp_index
< 4)
544 state
.render_elsp_index
= 0;
545 engine
= GEN_ENGINE_RENDER
;
546 context_descriptor
= (uint64_t)state
.render_elsp
[2] << 32 |
547 state
.render_elsp
[3];
549 case 0x22230: /* blitter elsp */
550 state
.blitter_elsp
[state
.blitter_elsp_index
++] = value
;
551 if (state
.blitter_elsp_index
< 4)
554 state
.blitter_elsp_index
= 0;
555 engine
= GEN_ENGINE_BLITTER
;
556 context_descriptor
= (uint64_t)state
.blitter_elsp
[2] << 32 |
557 state
.blitter_elsp
[3];
559 case 0x2510: /* render elsq0 lo */
560 state
.render_elsp
[3] = value
;
563 case 0x2514: /* render elsq0 hi */
564 state
.render_elsp
[2] = value
;
567 case 0x22510: /* blitter elsq0 lo */
568 state
.blitter_elsp
[3] = value
;
571 case 0x22514: /* blitter elsq0 hi */
572 state
.blitter_elsp
[2] = value
;
575 case 0x2550: /* render elsc */
576 engine
= GEN_ENGINE_RENDER
;
577 context_descriptor
= (uint64_t)state
.render_elsp
[2] << 32 |
578 state
.render_elsp
[3];
580 case 0x22550: /* blitter elsc */
581 engine
= GEN_ENGINE_BLITTER
;
582 context_descriptor
= (uint64_t)state
.blitter_elsp
[2] << 32 |
583 state
.blitter_elsp
[3];
589 const uint32_t pphwsp_size
= 4096;
590 uint32_t pphwsp_addr
= context_descriptor
& 0xfffff000;
591 struct gen_batch_decode_bo pphwsp_bo
= get_ggtt_batch_bo(NULL
, pphwsp_addr
);
592 uint32_t *context
= (uint32_t *)((uint8_t *)pphwsp_bo
.map
+
593 (pphwsp_addr
- pphwsp_bo
.addr
) +
596 uint32_t ring_buffer_head
= context
[5];
597 uint32_t ring_buffer_tail
= context
[7];
598 uint32_t ring_buffer_start
= context
[9];
599 uint64_t pml4
= (uint64_t)context
[49] << 32 | context
[51];
601 struct gen_batch_decode_bo ring_bo
= get_ggtt_batch_bo(NULL
,
603 assert(ring_bo
.size
> 0);
604 void *commands
= (uint8_t *)ring_bo
.map
+ (ring_buffer_start
- ring_bo
.addr
);
606 if (context_descriptor
& 0x100 /* ppgtt */) {
607 batch_ctx
.get_bo
= get_ppgtt_batch_bo
;
608 batch_ctx
.user_data
= &pml4
;
610 batch_ctx
.get_bo
= get_ggtt_batch_bo
;
613 (void)engine
; /* TODO */
614 gen_print_batch(&batch_ctx
, commands
, ring_buffer_tail
- ring_buffer_head
,
620 handle_memtrace_mem_write(uint32_t *p
)
622 struct gen_batch_decode_bo bo
= {
624 /* Addresses written by aubdump here are in canonical form but the batch
625 * decoder always gives us addresses with the top 16bits zeroed, so do
628 .addr
= gen_48b_address(*(uint64_t*)&p
[1]),
631 uint32_t address_space
= p
[3] >> 28;
633 switch (address_space
) {
635 handle_ggtt_write(bo
.addr
, bo
.map
, bo
.size
);
638 add_gtt_bo_map(bo
, false);
640 case 2: /* Physical */
641 handle_physical_write(bo
.addr
, bo
.map
, bo
.size
);
643 case 4: /* GGTT Entry */
644 handle_ggtt_entry_write(bo
.addr
, bo
.map
, bo
.size
);
652 uint32_t *map
, *end
, *cursor
;
656 static struct aub_file
*
657 aub_file_open(const char *filename
)
659 struct aub_file
*file
;
663 file
= calloc(1, sizeof *file
);
664 fd
= open(filename
, O_RDONLY
);
666 fprintf(stderr
, "open %s failed: %s\n", filename
, strerror(errno
));
670 if (fstat(fd
, &sb
) == -1) {
671 fprintf(stderr
, "stat failed: %s\n", strerror(errno
));
675 file
->map
= mmap(NULL
, sb
.st_size
,
676 PROT_READ
, MAP_SHARED
, fd
, 0);
677 if (file
->map
== MAP_FAILED
) {
678 fprintf(stderr
, "mmap failed: %s\n", strerror(errno
));
684 file
->cursor
= file
->map
;
685 file
->end
= file
->map
+ sb
.st_size
/ 4;
690 #define TYPE(dw) (((dw) >> 29) & 7)
691 #define OPCODE(dw) (((dw) >> 23) & 0x3f)
692 #define SUBOPCODE(dw) (((dw) >> 16) & 0x7f)
694 #define MAKE_HEADER(type, opcode, subopcode) \
695 (((type) << 29) | ((opcode) << 23) | ((subopcode) << 16))
699 /* Classic AUB opcodes */
700 #define OPCODE_AUB 0x01
701 #define SUBOPCODE_HEADER 0x05
702 #define SUBOPCODE_BLOCK 0x41
703 #define SUBOPCODE_BMP 0x1e
705 /* Newer version AUB opcode */
706 #define OPCODE_NEW_AUB 0x2e
707 #define SUBOPCODE_REG_POLL 0x02
708 #define SUBOPCODE_REG_WRITE 0x03
709 #define SUBOPCODE_MEM_POLL 0x05
710 #define SUBOPCODE_MEM_WRITE 0x06
711 #define SUBOPCODE_VERSION 0x0e
713 #define MAKE_GEN(major, minor) ( ((major) << 8) | (minor) )
717 AUB_ITEM_DECODE_FAILED
,
718 AUB_ITEM_DECODE_NEED_MORE_DATA
,
722 aub_file_decode_batch(struct aub_file
*file
)
724 uint32_t *p
, h
, *new_cursor
;
725 int header_length
, bias
;
727 assert(file
->cursor
< file
->end
);
731 header_length
= h
& 0xffff;
741 fprintf(outfile
, "unknown opcode %d at %td/%td\n",
742 OPCODE(h
), file
->cursor
- file
->map
,
743 file
->end
- file
->map
);
744 return AUB_ITEM_DECODE_FAILED
;
747 new_cursor
= p
+ header_length
+ bias
;
748 if ((h
& 0xffff0000) == MAKE_HEADER(TYPE_AUB
, OPCODE_AUB
, SUBOPCODE_BLOCK
)) {
749 assert(file
->end
- file
->cursor
>= 4);
750 new_cursor
+= p
[4] / 4;
753 assert(new_cursor
<= file
->end
);
755 switch (h
& 0xffff0000) {
756 case MAKE_HEADER(TYPE_AUB
, OPCODE_AUB
, SUBOPCODE_HEADER
):
757 handle_trace_header(p
);
759 case MAKE_HEADER(TYPE_AUB
, OPCODE_AUB
, SUBOPCODE_BLOCK
):
760 handle_trace_block(p
);
762 case MAKE_HEADER(TYPE_AUB
, OPCODE_AUB
, SUBOPCODE_BMP
):
764 case MAKE_HEADER(TYPE_AUB
, OPCODE_NEW_AUB
, SUBOPCODE_VERSION
):
765 handle_memtrace_version(p
);
767 case MAKE_HEADER(TYPE_AUB
, OPCODE_NEW_AUB
, SUBOPCODE_REG_WRITE
):
768 handle_memtrace_reg_write(p
);
770 case MAKE_HEADER(TYPE_AUB
, OPCODE_NEW_AUB
, SUBOPCODE_MEM_WRITE
):
771 handle_memtrace_mem_write(p
);
773 case MAKE_HEADER(TYPE_AUB
, OPCODE_NEW_AUB
, SUBOPCODE_MEM_POLL
):
774 fprintf(outfile
, "memory poll block (dwords %d):\n", h
& 0xffff);
776 case MAKE_HEADER(TYPE_AUB
, OPCODE_NEW_AUB
, SUBOPCODE_REG_POLL
):
779 fprintf(outfile
, "unknown block type=0x%x, opcode=0x%x, "
780 "subopcode=0x%x (%08x)\n", TYPE(h
), OPCODE(h
), SUBOPCODE(h
), h
);
783 file
->cursor
= new_cursor
;
785 return AUB_ITEM_DECODE_OK
;
789 aub_file_more_stuff(struct aub_file
*file
)
791 return file
->cursor
< file
->end
|| (file
->stream
&& !feof(file
->stream
));
813 execlp("less", "less", "-FRSi", NULL
);
822 print_help(const char *progname
, FILE *file
)
825 "Usage: %s [OPTION]... FILE\n"
826 "Decode aub file contents from FILE.\n\n"
827 " --help display this help and exit\n"
828 " --gen=platform decode for given platform (3 letter platform name)\n"
829 " --headers decode only command headers\n"
830 " --color[=WHEN] colorize the output; WHEN can be 'auto' (default\n"
831 " if omitted), 'always', or 'never'\n"
832 " --max-vbo-lines=N limit the number of decoded VBO lines\n"
833 " --no-pager don't launch pager\n"
834 " --no-offsets don't print instruction offsets\n"
835 " --xml=DIR load hardware xml description from directory DIR\n",
839 int main(int argc
, char *argv
[])
841 struct aub_file
*file
;
843 bool help
= false, pager
= true;
844 const struct option aubinator_opts
[] = {
845 { "help", no_argument
, (int *) &help
, true },
846 { "no-pager", no_argument
, (int *) &pager
, false },
847 { "no-offsets", no_argument
, (int *) &option_print_offsets
, false },
848 { "gen", required_argument
, NULL
, 'g' },
849 { "headers", no_argument
, (int *) &option_full_decode
, false },
850 { "color", required_argument
, NULL
, 'c' },
851 { "xml", required_argument
, NULL
, 'x' },
852 { "max-vbo-lines", required_argument
, NULL
, 'v' },
859 while ((c
= getopt_long(argc
, argv
, "", aubinator_opts
, &i
)) != -1) {
862 const int id
= gen_device_name_to_pci_device_id(optarg
);
864 fprintf(stderr
, "can't parse gen: '%s', expected ivb, byt, hsw, "
865 "bdw, chv, skl, kbl or bxt\n", optarg
);
873 if (optarg
== NULL
|| strcmp(optarg
, "always") == 0)
874 option_color
= COLOR_ALWAYS
;
875 else if (strcmp(optarg
, "never") == 0)
876 option_color
= COLOR_NEVER
;
877 else if (strcmp(optarg
, "auto") == 0)
878 option_color
= COLOR_AUTO
;
880 fprintf(stderr
, "invalid value for --color: %s", optarg
);
885 xml_path
= strdup(optarg
);
888 max_vbo_lines
= atoi(optarg
);
896 input_file
= argv
[optind
];
898 if (help
|| !input_file
) {
899 print_help(argv
[0], stderr
);
903 /* Do this before we redirect stdout to pager. */
904 if (option_color
== COLOR_AUTO
)
905 option_color
= isatty(1) ? COLOR_ALWAYS
: COLOR_NEVER
;
907 if (isatty(1) && pager
)
910 mem_fd
= memfd_create("phys memory", 0);
912 list_inithead(&maps
);
914 file
= aub_file_open(input_file
);
916 while (aub_file_more_stuff(file
) &&
917 aub_file_decode_batch(file
) == AUB_ITEM_DECODE_OK
);
920 /* close the stdout which is opened to write the output */