2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 #include <sys/types.h>
33 #include <sys/sysmacros.h>
35 #include <sys/ioctl.h>
42 #include "intel_aub.h"
44 #include "dev/gen_device_info.h"
45 #include "util/macros.h"
48 #define ALIGN(x, y) (((x) + (y)-1) & ~((y)-1))
51 #define MI_LOAD_REGISTER_IMM_n(n) ((0x22 << 23) | (2 * (n) - 1))
52 #define MI_LRI_FORCE_POSTED (1<<12)
54 #define MI_BATCH_BUFFER_END (0xA << 23)
56 #define min(a, b) ({ \
57 __typeof(a) _a = (a); \
58 __typeof(b) _b = (b); \
62 #define HWS_PGA_RCSUNIT 0x02080
63 #define HWS_PGA_VCSUNIT0 0x12080
64 #define HWS_PGA_BCSUNIT 0x22080
66 #define GFX_MODE_RCSUNIT 0x0229c
67 #define GFX_MODE_VCSUNIT0 0x1229c
68 #define GFX_MODE_BCSUNIT 0x2229c
70 #define EXECLIST_SUBMITPORT_RCSUNIT 0x02230
71 #define EXECLIST_SUBMITPORT_VCSUNIT0 0x12230
72 #define EXECLIST_SUBMITPORT_BCSUNIT 0x22230
74 #define EXECLIST_STATUS_RCSUNIT 0x02234
75 #define EXECLIST_STATUS_VCSUNIT0 0x12234
76 #define EXECLIST_STATUS_BCSUNIT 0x22234
78 #define EXECLIST_SQ_CONTENTS0_RCSUNIT 0x02510
79 #define EXECLIST_SQ_CONTENTS0_VCSUNIT0 0x12510
80 #define EXECLIST_SQ_CONTENTS0_BCSUNIT 0x22510
82 #define EXECLIST_CONTROL_RCSUNIT 0x02550
83 #define EXECLIST_CONTROL_VCSUNIT0 0x12550
84 #define EXECLIST_CONTROL_BCSUNIT 0x22550
86 #define MEMORY_MAP_SIZE (64 /* MiB */ * 1024 * 1024)
89 #define GEN8_PTE_SIZE 8
91 #define NUM_PT_ENTRIES (ALIGN(MEMORY_MAP_SIZE, 4096) / 4096)
92 #define PT_SIZE ALIGN(NUM_PT_ENTRIES * GEN8_PTE_SIZE, 4096)
94 #define RING_SIZE (1 * 4096)
95 #define PPHWSP_SIZE (1 * 4096)
96 #define GEN10_LR_CONTEXT_RENDER_SIZE (19 * 4096)
97 #define GEN8_LR_CONTEXT_OTHER_SIZE (2 * 4096)
99 #define STATIC_GGTT_MAP_START 0
101 #define RENDER_RING_ADDR STATIC_GGTT_MAP_START
102 #define RENDER_CONTEXT_ADDR (RENDER_RING_ADDR + RING_SIZE)
104 #define BLITTER_RING_ADDR (RENDER_CONTEXT_ADDR + PPHWSP_SIZE + GEN10_LR_CONTEXT_RENDER_SIZE)
105 #define BLITTER_CONTEXT_ADDR (BLITTER_RING_ADDR + RING_SIZE)
107 #define VIDEO_RING_ADDR (BLITTER_CONTEXT_ADDR + PPHWSP_SIZE + GEN8_LR_CONTEXT_OTHER_SIZE)
108 #define VIDEO_CONTEXT_ADDR (VIDEO_RING_ADDR + RING_SIZE)
110 #define STATIC_GGTT_MAP_END (VIDEO_CONTEXT_ADDR + PPHWSP_SIZE + GEN8_LR_CONTEXT_OTHER_SIZE)
111 #define STATIC_GGTT_MAP_SIZE (STATIC_GGTT_MAP_END - STATIC_GGTT_MAP_START)
113 #define CONTEXT_FLAGS (0x229) /* Normal Priority | L3-LLC Coherency |
114 Legacy Context with no 64 bit VA support | Valid */
116 #define RENDER_CONTEXT_DESCRIPTOR ((uint64_t)1 << 32 | RENDER_CONTEXT_ADDR | CONTEXT_FLAGS)
117 #define BLITTER_CONTEXT_DESCRIPTOR ((uint64_t)2 << 32 | BLITTER_CONTEXT_ADDR | CONTEXT_FLAGS)
118 #define VIDEO_CONTEXT_DESCRIPTOR ((uint64_t)3 << 32 | VIDEO_CONTEXT_ADDR | CONTEXT_FLAGS)
120 static const uint32_t render_context_init
[GEN10_LR_CONTEXT_RENDER_SIZE
/
121 sizeof(uint32_t)] = {
123 MI_LOAD_REGISTER_IMM_n(14) | MI_LRI_FORCE_POSTED
,
124 0x2244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
125 0x2034 /* RING_HEAD */, 0,
126 0x2030 /* RING_TAIL */, 0,
127 0x2038 /* RING_BUFFER_START */, RENDER_RING_ADDR
,
128 0x203C /* RING_BUFFER_CONTROL */, (RING_SIZE
- 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
129 0x2168 /* BB_HEAD_U */, 0,
130 0x2140 /* BB_HEAD_L */, 0,
131 0x2110 /* BB_STATE */, 0,
132 0x211C /* SECOND_BB_HEAD_U */, 0,
133 0x2114 /* SECOND_BB_HEAD_L */, 0,
134 0x2118 /* SECOND_BB_STATE */, 0,
135 0x21C0 /* BB_PER_CTX_PTR */, 0,
136 0x21C4 /* RCS_INDIRECT_CTX */, 0,
137 0x21C8 /* RCS_INDIRECT_CTX_OFFSET */, 0,
142 MI_LOAD_REGISTER_IMM_n(9) | MI_LRI_FORCE_POSTED
,
143 0x23A8 /* CTX_TIMESTAMP */, 0,
144 0x228C /* PDP3_UDW */, 0,
145 0x2288 /* PDP3_LDW */, 0,
146 0x2284 /* PDP2_UDW */, 0,
147 0x2280 /* PDP2_LDW */, 0,
148 0x227C /* PDP1_UDW */, 0,
149 0x2278 /* PDP1_LDW */, 0,
150 0x2274 /* PDP0_UDW */, 0,
151 0x2270 /* PDP0_LDW */, 0,
153 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
156 MI_LOAD_REGISTER_IMM_n(1),
157 0x20C8 /* R_PWR_CLK_STATE */, 0x7FFFFFFF,
161 static const uint32_t blitter_context_init
[GEN8_LR_CONTEXT_OTHER_SIZE
/
162 sizeof(uint32_t)] = {
164 MI_LOAD_REGISTER_IMM_n(11) | MI_LRI_FORCE_POSTED
,
165 0x22244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
166 0x22034 /* RING_HEAD */, 0,
167 0x22030 /* RING_TAIL */, 0,
168 0x22038 /* RING_BUFFER_START */, BLITTER_RING_ADDR
,
169 0x2203C /* RING_BUFFER_CONTROL */, (RING_SIZE
- 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
170 0x22168 /* BB_HEAD_U */, 0,
171 0x22140 /* BB_HEAD_L */, 0,
172 0x22110 /* BB_STATE */, 0,
173 0x2211C /* SECOND_BB_HEAD_U */, 0,
174 0x22114 /* SECOND_BB_HEAD_L */, 0,
175 0x22118 /* SECOND_BB_STATE */, 0,
177 0, 0, 0, 0, 0, 0, 0, 0,
180 MI_LOAD_REGISTER_IMM_n(9) | MI_LRI_FORCE_POSTED
,
181 0x223A8 /* CTX_TIMESTAMP */, 0,
182 0x2228C /* PDP3_UDW */, 0,
183 0x22288 /* PDP3_LDW */, 0,
184 0x22284 /* PDP2_UDW */, 0,
185 0x22280 /* PDP2_LDW */, 0,
186 0x2227C /* PDP1_UDW */, 0,
187 0x22278 /* PDP1_LDW */, 0,
188 0x22274 /* PDP0_UDW */, 0,
189 0x22270 /* PDP0_LDW */, 0,
191 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
196 static const uint32_t video_context_init
[GEN8_LR_CONTEXT_OTHER_SIZE
/
197 sizeof(uint32_t)] = {
199 MI_LOAD_REGISTER_IMM_n(11) | MI_LRI_FORCE_POSTED
,
200 0x1C244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
201 0x1C034 /* RING_HEAD */, 0,
202 0x1C030 /* RING_TAIL */, 0,
203 0x1C038 /* RING_BUFFER_START */, VIDEO_RING_ADDR
,
204 0x1C03C /* RING_BUFFER_CONTROL */, (RING_SIZE
- 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
205 0x1C168 /* BB_HEAD_U */, 0,
206 0x1C140 /* BB_HEAD_L */, 0,
207 0x1C110 /* BB_STATE */, 0,
208 0x1C11C /* SECOND_BB_HEAD_U */, 0,
209 0x1C114 /* SECOND_BB_HEAD_L */, 0,
210 0x1C118 /* SECOND_BB_STATE */, 0,
212 0, 0, 0, 0, 0, 0, 0, 0,
215 MI_LOAD_REGISTER_IMM_n(9) | MI_LRI_FORCE_POSTED
,
216 0x1C3A8 /* CTX_TIMESTAMP */, 0,
217 0x1C28C /* PDP3_UDW */, 0,
218 0x1C288 /* PDP3_LDW */, 0,
219 0x1C284 /* PDP2_UDW */, 0,
220 0x1C280 /* PDP2_LDW */, 0,
221 0x1C27C /* PDP1_UDW */, 0,
222 0x1C278 /* PDP1_LDW */, 0,
223 0x1C274 /* PDP0_UDW */, 0,
224 0x1C270 /* PDP0_LDW */, 0,
226 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
231 static int close_init_helper(int fd
);
232 static int ioctl_init_helper(int fd
, unsigned long request
, ...);
234 static int (*libc_close
)(int fd
) = close_init_helper
;
235 static int (*libc_ioctl
)(int fd
, unsigned long request
, ...) = ioctl_init_helper
;
237 static int drm_fd
= -1;
238 static char *filename
= NULL
;
239 static FILE *files
[2] = { NULL
, NULL
};
240 static struct gen_device_info devinfo
= {0};
241 static int verbose
= 0;
242 static bool device_override
;
243 static uint32_t device
;
244 static int addr_bits
= 0;
246 #define MAX_BO_COUNT 64 * 1024
254 static struct bo
*bos
;
256 #define DRM_MAJOR 226
258 #ifndef DRM_I915_GEM_USERPTR
260 #define DRM_I915_GEM_USERPTR 0x33
261 #define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
263 struct drm_i915_gem_userptr
{
267 #define I915_USERPTR_READ_ONLY 0x1
268 #define I915_USERPTR_UNSYNCHRONIZED 0x80000000
270 * Returned handle for the object.
272 * Object handles are nonzero.
279 /* We set bit 0 in the map pointer for userptr BOs so we know not to
280 * munmap them on DRM_IOCTL_GEM_CLOSE.
282 #define USERPTR_FLAG 1
283 #define IS_USERPTR(p) ((uintptr_t) (p) & USERPTR_FLAG)
284 #define GET_PTR(p) ( (void *) ((uintptr_t) p & ~(uintptr_t) 1) )
286 #ifndef I915_EXEC_BATCH_FIRST
287 #define I915_EXEC_BATCH_FIRST (1 << 18)
290 static void __attribute__ ((format(__printf__
, 2, 3)))
291 fail_if(int cond
, const char *format
, ...)
298 va_start(args
, format
);
299 vfprintf(stderr
, format
, args
);
306 get_bo(uint32_t handle
)
310 fail_if(handle
>= MAX_BO_COUNT
, "bo handle too large\n");
316 static inline uint32_t
317 align_u32(uint32_t v
, uint32_t a
)
319 return (v
+ a
- 1) & ~(a
- 1);
322 static inline uint64_t
323 align_u64(uint64_t v
, uint64_t a
)
325 return (v
+ a
- 1) & ~(a
- 1);
329 dword_out(uint32_t data
)
331 for (int i
= 0; i
< ARRAY_SIZE (files
); i
++) {
332 if (files
[i
] == NULL
)
335 fail_if(fwrite(&data
, 1, 4, files
[i
]) == 0,
336 "Writing to output failed\n");
341 data_out(const void *data
, size_t size
)
346 for (int i
= 0; i
< ARRAY_SIZE (files
); i
++) {
347 if (files
[i
] == NULL
)
350 fail_if(fwrite(data
, 1, size
, files
[i
]) == 0,
351 "Writing to output failed\n");
358 return NUM_PT_ENTRIES
* (addr_bits
> 32 ? GEN8_PTE_SIZE
: PTE_SIZE
);
362 mem_trace_memory_write_header_out(uint64_t addr
, uint32_t len
,
365 uint32_t dwords
= ALIGN(len
, sizeof(uint32_t)) / sizeof(uint32_t);
367 dword_out(CMD_MEM_TRACE_MEMORY_WRITE
| (5 + dwords
- 1));
368 dword_out(addr
& 0xFFFFFFFF); /* addr lo */
369 dword_out(addr
>> 32); /* addr hi */
370 dword_out(addr_space
); /* gtt */
375 register_write_out(uint32_t addr
, uint32_t value
)
379 dword_out(CMD_MEM_TRACE_REGISTER_WRITE
| (5 + dwords
- 1));
381 dword_out(AUB_MEM_TRACE_REGISTER_SIZE_DWORD
|
382 AUB_MEM_TRACE_REGISTER_SPACE_MMIO
);
383 dword_out(0xFFFFFFFF); /* mask lo */
384 dword_out(0x00000000); /* mask hi */
389 gen8_emit_ggtt_pte_for_range(uint64_t start
, uint64_t end
)
393 uint64_t end_aligned
= align_u64(end
, 4096);
395 if (start
>= end
|| end
> (1ull << 32))
398 entry_addr
= start
& ~(4096 - 1);
400 uint64_t last_page_entry
, num_entries
;
402 page_num
= entry_addr
>> 21;
403 last_page_entry
= min((page_num
+ 1) << 21, end_aligned
);
404 num_entries
= (last_page_entry
- entry_addr
) >> 12;
405 mem_trace_memory_write_header_out(
406 entry_addr
>> 9, num_entries
* GEN8_PTE_SIZE
,
407 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT_ENTRY
);
408 while (num_entries
-- > 0) {
409 dword_out((entry_addr
& ~(4096 - 1)) |
410 3 /* read/write | present */);
411 dword_out(entry_addr
>> 32);
414 } while (entry_addr
< end
);
418 * Sets bits `start` through `end` - 1 in the bitmap array.
421 set_bitmap_range(uint32_t *bitmap
, uint32_t start
, uint32_t end
)
423 uint32_t pos
= start
;
425 const uint32_t bit
= 1 << (pos
& 0x1f);
426 if (bit
== 1 && (end
- pos
) > 32) {
427 bitmap
[pos
>> 5] = 0xffffffff;
430 bitmap
[pos
>> 5] |= bit
;
437 * Finds the next `set` (or clear) bit in the bitmap array.
439 * The search starts at `*start` and only checks until `end` - 1.
441 * If found, returns true, and the found bit index in `*start`.
444 find_bitmap_bit(uint32_t *bitmap
, bool set
, uint32_t *start
, uint32_t end
)
446 uint32_t pos
= *start
;
447 const uint32_t neg_dw
= set
? 0 : -1;
449 const uint32_t dw
= bitmap
[pos
>> 5];
450 const uint32_t bit
= 1 << (pos
& 0x1f);
451 if (!!(dw
& bit
) == set
) {
454 } else if (bit
== 1 && dw
== neg_dw
)
463 * Finds a range of clear bits within the bitmap array.
465 * The search starts at `*start` and only checks until `*end` - 1.
467 * If found, returns true, and `*start` and `*end` are set for the
468 * range of clear bits.
471 find_bitmap_clear_bit_range(uint32_t *bitmap
, uint32_t *start
, uint32_t *end
)
473 if (find_bitmap_bit(bitmap
, false, start
, *end
)) {
474 uint32_t found_end
= *start
;
475 if (find_bitmap_bit(bitmap
, true, &found_end
, *end
))
483 gen8_map_ggtt_range(uint64_t start
, uint64_t end
)
485 uint32_t pos1
, pos2
, end_pos
;
486 static uint32_t *bitmap
= NULL
;
487 if (bitmap
== NULL
) {
488 /* 4GiB (32-bits) of 4KiB pages (12-bits) in dwords (5-bits) */
489 bitmap
= calloc(1 << (32 - 12 - 5), sizeof(*bitmap
));
495 end_pos
= (end
+ 4096 - 1) >> 12;
496 while (pos1
< end_pos
) {
498 if (!find_bitmap_clear_bit_range(bitmap
, &pos1
, &pos2
))
502 printf("MAPPING 0x%08lx-0x%08lx\n",
503 (uint64_t)pos1
<< 12, (uint64_t)pos2
<< 12);
504 gen8_emit_ggtt_pte_for_range((uint64_t)pos1
<< 12,
505 (uint64_t)pos2
<< 12);
506 set_bitmap_range(bitmap
, (uint64_t)pos1
, (uint64_t)pos2
);
512 gen8_map_base_size(uint64_t base
, uint64_t size
)
514 gen8_map_ggtt_range(base
, base
+ size
);
518 gen10_write_header(void)
520 char app_name
[8 * 4];
521 int app_name_len
, dwords
;
524 snprintf(app_name
, sizeof(app_name
), "PCI-ID=0x%X %s", device
,
525 program_invocation_short_name
);
526 app_name_len
= ALIGN(app_name_len
, sizeof(uint32_t));
528 dwords
= 5 + app_name_len
/ sizeof(uint32_t);
529 dword_out(CMD_MEM_TRACE_VERSION
| (dwords
- 1));
530 dword_out(AUB_MEM_TRACE_VERSION_FILE_VERSION
);
531 dword_out(AUB_MEM_TRACE_VERSION_DEVICE_CNL
|
532 AUB_MEM_TRACE_VERSION_METHOD_PHY
);
533 dword_out(0); /* version */
534 dword_out(0); /* version */
535 data_out(app_name
, app_name_len
);
538 gen8_map_base_size(RENDER_RING_ADDR
, RING_SIZE
);
539 mem_trace_memory_write_header_out(RENDER_RING_ADDR
, RING_SIZE
,
540 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL
);
541 for (uint32_t i
= 0; i
< RING_SIZE
; i
+= sizeof(uint32_t))
545 gen8_map_base_size(RENDER_CONTEXT_ADDR
,
546 PPHWSP_SIZE
+ sizeof(render_context_init
));
547 mem_trace_memory_write_header_out(RENDER_CONTEXT_ADDR
,
549 sizeof(render_context_init
),
550 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL
);
551 for (uint32_t i
= 0; i
< PPHWSP_SIZE
; i
+= sizeof(uint32_t))
555 data_out(render_context_init
, sizeof(render_context_init
));
558 gen8_map_base_size(BLITTER_RING_ADDR
, RING_SIZE
);
559 mem_trace_memory_write_header_out(BLITTER_RING_ADDR
, RING_SIZE
,
560 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL
);
561 for (uint32_t i
= 0; i
< RING_SIZE
; i
+= sizeof(uint32_t))
565 gen8_map_base_size(BLITTER_CONTEXT_ADDR
,
566 PPHWSP_SIZE
+ sizeof(blitter_context_init
));
567 mem_trace_memory_write_header_out(BLITTER_CONTEXT_ADDR
,
569 sizeof(blitter_context_init
),
570 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL
);
571 for (uint32_t i
= 0; i
< PPHWSP_SIZE
; i
+= sizeof(uint32_t))
574 /* BLITTER_CONTEXT */
575 data_out(blitter_context_init
, sizeof(blitter_context_init
));
578 gen8_map_base_size(VIDEO_RING_ADDR
, RING_SIZE
);
579 mem_trace_memory_write_header_out(VIDEO_RING_ADDR
, RING_SIZE
,
580 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL
);
581 for (uint32_t i
= 0; i
< RING_SIZE
; i
+= sizeof(uint32_t))
585 gen8_map_base_size(VIDEO_CONTEXT_ADDR
,
586 PPHWSP_SIZE
+ sizeof(video_context_init
));
587 mem_trace_memory_write_header_out(VIDEO_CONTEXT_ADDR
,
589 sizeof(video_context_init
),
590 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL
);
591 for (uint32_t i
= 0; i
< PPHWSP_SIZE
; i
+= sizeof(uint32_t))
595 data_out(video_context_init
, sizeof(video_context_init
));
597 register_write_out(HWS_PGA_RCSUNIT
, RENDER_CONTEXT_ADDR
);
598 register_write_out(HWS_PGA_VCSUNIT0
, VIDEO_CONTEXT_ADDR
);
599 register_write_out(HWS_PGA_BCSUNIT
, BLITTER_CONTEXT_ADDR
);
601 register_write_out(GFX_MODE_RCSUNIT
, 0x80008000 /* execlist enable */);
602 register_write_out(GFX_MODE_VCSUNIT0
, 0x80008000 /* execlist enable */);
603 register_write_out(GFX_MODE_BCSUNIT
, 0x80008000 /* execlist enable */);
606 static void write_header(void)
608 char app_name
[8 * 4];
610 int comment_len
, comment_dwords
, dwords
;
611 uint32_t entry
= 0x200003;
613 comment_len
= snprintf(comment
, sizeof(comment
), "PCI-ID=0x%x", device
);
614 comment_dwords
= ((comment_len
+ 3) / 4);
616 /* Start with a (required) version packet. */
617 dwords
= 13 + comment_dwords
;
618 dword_out(CMD_AUB_HEADER
| (dwords
- 2));
619 dword_out((4 << AUB_HEADER_MAJOR_SHIFT
) |
620 (0 << AUB_HEADER_MINOR_SHIFT
));
622 /* Next comes a 32-byte application name. */
623 strncpy(app_name
, program_invocation_short_name
, sizeof(app_name
));
624 app_name
[sizeof(app_name
) - 1] = 0;
625 data_out(app_name
, sizeof(app_name
));
627 dword_out(0); /* timestamp */
628 dword_out(0); /* timestamp */
629 dword_out(comment_len
);
630 data_out(comment
, comment_dwords
* 4);
632 /* Set up the GTT. The max we can handle is 64M */
633 dword_out(CMD_AUB_TRACE_HEADER_BLOCK
| ((addr_bits
> 32 ? 6 : 5) - 2));
634 dword_out(AUB_TRACE_MEMTYPE_GTT_ENTRY
|
635 AUB_TRACE_TYPE_NOTYPE
| AUB_TRACE_OP_DATA_WRITE
);
636 dword_out(0); /* subtype */
637 dword_out(0); /* offset */
638 dword_out(gtt_size()); /* size */
641 for (uint32_t i
= 0; i
< NUM_PT_ENTRIES
; i
++) {
642 dword_out(entry
+ 0x1000 * i
);
649 * Break up large objects into multiple writes. Otherwise a 128kb VBO
650 * would overflow the 16 bits of size field in the packet header and
651 * everything goes badly after that.
654 aub_write_trace_block(uint32_t type
, void *virtual, uint32_t size
, uint64_t gtt_offset
)
657 uint32_t subtype
= 0;
658 static const char null_block
[8 * 4096];
660 for (uint32_t offset
= 0; offset
< size
; offset
+= block_size
) {
661 block_size
= size
- offset
;
663 if (block_size
> 8 * 4096)
664 block_size
= 8 * 4096;
666 if (devinfo
.gen
>= 10) {
667 mem_trace_memory_write_header_out(gtt_offset
+ offset
,
669 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL
);
671 dword_out(CMD_AUB_TRACE_HEADER_BLOCK
|
672 ((addr_bits
> 32 ? 6 : 5) - 2));
673 dword_out(AUB_TRACE_MEMTYPE_GTT
|
674 type
| AUB_TRACE_OP_DATA_WRITE
);
676 dword_out(gtt_offset
+ offset
);
677 dword_out(align_u32(block_size
, 4));
679 dword_out((gtt_offset
+ offset
) >> 32);
683 data_out(((char *) GET_PTR(virtual)) + offset
, block_size
);
685 data_out(null_block
, block_size
);
687 /* Pad to a multiple of 4 bytes. */
688 data_out(null_block
, -block_size
& 3);
693 write_reloc(void *p
, uint64_t v
)
695 if (addr_bits
> 32) {
696 /* From the Broadwell PRM Vol. 2a,
697 * MI_LOAD_REGISTER_MEM::MemoryAddress:
699 * "This field specifies the address of the memory
700 * location where the register value specified in the
701 * DWord above will read from. The address specifies
702 * the DWord location of the data. Range =
703 * GraphicsVirtualAddress[63:2] for a DWord register
704 * GraphicsAddress [63:48] are ignored by the HW and
705 * assumed to be in correct canonical form [63:48] ==
708 * In practice, this will always mean the top bits are zero
709 * because of the GTT size limitation of the aubdump tool.
711 const int shift
= 63 - 47;
712 *(uint64_t *)p
= (((int64_t)v
) << shift
) >> shift
;
719 aub_dump_execlist(uint64_t batch_offset
, int ring_flag
)
726 uint32_t control_reg
;
729 case I915_EXEC_DEFAULT
:
730 case I915_EXEC_RENDER
:
731 ring_addr
= RENDER_RING_ADDR
;
732 descriptor
= RENDER_CONTEXT_DESCRIPTOR
;
733 elsp_reg
= EXECLIST_SUBMITPORT_RCSUNIT
;
734 elsq_reg
= EXECLIST_SQ_CONTENTS0_RCSUNIT
;
735 status_reg
= EXECLIST_STATUS_RCSUNIT
;
736 control_reg
= EXECLIST_CONTROL_RCSUNIT
;
739 ring_addr
= VIDEO_RING_ADDR
;
740 descriptor
= VIDEO_CONTEXT_DESCRIPTOR
;
741 elsp_reg
= EXECLIST_SUBMITPORT_VCSUNIT0
;
742 elsq_reg
= EXECLIST_SQ_CONTENTS0_VCSUNIT0
;
743 status_reg
= EXECLIST_STATUS_VCSUNIT0
;
744 control_reg
= EXECLIST_CONTROL_VCSUNIT0
;
747 ring_addr
= BLITTER_RING_ADDR
;
748 descriptor
= BLITTER_CONTEXT_DESCRIPTOR
;
749 elsp_reg
= EXECLIST_SUBMITPORT_BCSUNIT
;
750 elsq_reg
= EXECLIST_SQ_CONTENTS0_BCSUNIT
;
751 status_reg
= EXECLIST_STATUS_BCSUNIT
;
752 control_reg
= EXECLIST_CONTROL_BCSUNIT
;
756 mem_trace_memory_write_header_out(ring_addr
, 16,
757 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL
);
758 dword_out(AUB_MI_BATCH_BUFFER_START
| (3 - 2));
759 dword_out(batch_offset
& 0xFFFFFFFF);
760 dword_out(batch_offset
>> 32);
761 dword_out(0 /* MI_NOOP */);
763 mem_trace_memory_write_header_out(ring_addr
+ 8192 + 20, 4,
764 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL
);
765 dword_out(0); /* RING_BUFFER_HEAD */
766 mem_trace_memory_write_header_out(ring_addr
+ 8192 + 28, 4,
767 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_LOCAL
);
768 dword_out(16); /* RING_BUFFER_TAIL */
770 if (devinfo
.gen
>= 11) {
771 register_write_out(elsq_reg
, descriptor
& 0xFFFFFFFF);
772 register_write_out(elsq_reg
+ sizeof(uint32_t), descriptor
>> 32);
773 register_write_out(control_reg
, 1);
775 register_write_out(elsp_reg
, 0);
776 register_write_out(elsp_reg
, 0);
777 register_write_out(elsp_reg
, descriptor
>> 32);
778 register_write_out(elsp_reg
, descriptor
& 0xFFFFFFFF);
781 dword_out(CMD_MEM_TRACE_REGISTER_POLL
| (5 + 1 - 1));
782 dword_out(status_reg
);
783 dword_out(AUB_MEM_TRACE_REGISTER_SIZE_DWORD
|
784 AUB_MEM_TRACE_REGISTER_SPACE_MMIO
);
785 if (devinfo
.gen
>= 11) {
786 dword_out(0x00000001); /* mask lo */
787 dword_out(0x00000000); /* mask hi */
788 dword_out(0x00000001);
790 dword_out(0x00000010); /* mask lo */
791 dword_out(0x00000000); /* mask hi */
792 dword_out(0x00000000);
797 aub_dump_ringbuffer(uint64_t batch_offset
, uint64_t offset
, int ring_flag
)
799 uint32_t ringbuffer
[4096];
800 unsigned aub_mi_bbs_len
;
801 int ring
= AUB_TRACE_TYPE_RING_PRB0
; /* The default ring */
804 if (ring_flag
== I915_EXEC_BSD
)
805 ring
= AUB_TRACE_TYPE_RING_PRB1
;
806 else if (ring_flag
== I915_EXEC_BLT
)
807 ring
= AUB_TRACE_TYPE_RING_PRB2
;
809 /* Make a ring buffer to execute our batchbuffer. */
810 memset(ringbuffer
, 0, sizeof(ringbuffer
));
812 aub_mi_bbs_len
= addr_bits
> 32 ? 3 : 2;
813 ringbuffer
[ring_count
] = AUB_MI_BATCH_BUFFER_START
| (aub_mi_bbs_len
- 2);
814 write_reloc(&ringbuffer
[ring_count
+ 1], batch_offset
);
815 ring_count
+= aub_mi_bbs_len
;
817 /* Write out the ring. This appears to trigger execution of
818 * the ring in the simulator.
820 dword_out(CMD_AUB_TRACE_HEADER_BLOCK
|
821 ((addr_bits
> 32 ? 6 : 5) - 2));
822 dword_out(AUB_TRACE_MEMTYPE_GTT
| ring
| AUB_TRACE_OP_COMMAND_WRITE
);
823 dword_out(0); /* general/surface subtype */
825 dword_out(ring_count
* 4);
827 dword_out(offset
>> 32);
829 data_out(ringbuffer
, ring_count
* 4);
833 relocate_bo(struct bo
*bo
, const struct drm_i915_gem_execbuffer2
*execbuffer2
,
834 const struct drm_i915_gem_exec_object2
*obj
)
836 const struct drm_i915_gem_exec_object2
*exec_objects
=
837 (struct drm_i915_gem_exec_object2
*) (uintptr_t) execbuffer2
->buffers_ptr
;
838 const struct drm_i915_gem_relocation_entry
*relocs
=
839 (const struct drm_i915_gem_relocation_entry
*) (uintptr_t) obj
->relocs_ptr
;
843 relocated
= malloc(bo
->size
);
844 fail_if(relocated
== NULL
, "intel_aubdump: out of memory\n");
845 memcpy(relocated
, GET_PTR(bo
->map
), bo
->size
);
846 for (size_t i
= 0; i
< obj
->relocation_count
; i
++) {
847 fail_if(relocs
[i
].offset
>= bo
->size
, "intel_aubdump: reloc outside bo\n");
849 if (execbuffer2
->flags
& I915_EXEC_HANDLE_LUT
)
850 handle
= exec_objects
[relocs
[i
].target_handle
].handle
;
852 handle
= relocs
[i
].target_handle
;
854 write_reloc(((char *)relocated
) + relocs
[i
].offset
,
855 get_bo(handle
)->offset
+ relocs
[i
].delta
);
862 gem_ioctl(int fd
, unsigned long request
, void *argp
)
867 ret
= libc_ioctl(fd
, request
, argp
);
868 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
874 gem_mmap(int fd
, uint32_t handle
, uint64_t offset
, uint64_t size
)
876 struct drm_i915_gem_mmap mmap
= {
882 if (gem_ioctl(fd
, DRM_IOCTL_I915_GEM_MMAP
, &mmap
) == -1)
885 return (void *)(uintptr_t) mmap
.addr_ptr
;
889 gem_get_param(int fd
, uint32_t param
)
892 drm_i915_getparam_t gp
= {
897 if (gem_ioctl(fd
, DRM_IOCTL_I915_GETPARAM
, &gp
) == -1)
904 dump_execbuffer2(int fd
, struct drm_i915_gem_execbuffer2
*execbuffer2
)
906 struct drm_i915_gem_exec_object2
*exec_objects
=
907 (struct drm_i915_gem_exec_object2
*) (uintptr_t) execbuffer2
->buffers_ptr
;
908 uint32_t ring_flag
= execbuffer2
->flags
& I915_EXEC_RING_MASK
;
910 struct drm_i915_gem_exec_object2
*obj
;
911 struct bo
*bo
, *batch_bo
;
915 /* We can't do this at open time as we're not yet authenticated. */
917 device
= gem_get_param(fd
, I915_PARAM_CHIPSET_ID
);
918 fail_if(device
== 0 || devinfo
.gen
== 0, "failed to identify chipset\n");
920 if (devinfo
.gen
== 0) {
921 fail_if(!gen_get_device_info(device
, &devinfo
),
922 "failed to identify chipset=0x%x\n", device
);
924 addr_bits
= devinfo
.gen
>= 8 ? 48 : 32;
926 if (devinfo
.gen
>= 10)
927 gen10_write_header();
932 printf("[intel_aubdump: running, "
933 "output file %s, chipset id 0x%04x, gen %d]\n",
934 filename
, device
, devinfo
.gen
);
937 if (devinfo
.gen
>= 10)
938 offset
= STATIC_GGTT_MAP_END
;
943 printf("Dumping execbuffer2:\n");
945 for (uint32_t i
= 0; i
< execbuffer2
->buffer_count
; i
++) {
946 obj
= &exec_objects
[i
];
947 bo
= get_bo(obj
->handle
);
949 /* If bo->size == 0, this means they passed us an invalid
950 * buffer. The kernel will reject it and so should we.
954 printf("BO #%d is invalid!\n", obj
->handle
);
958 if (obj
->flags
& EXEC_OBJECT_PINNED
) {
959 bo
->offset
= obj
->offset
;
961 printf("BO #%d (%dB) pinned @ 0x%lx\n",
962 obj
->handle
, bo
->size
, bo
->offset
);
964 if (obj
->alignment
!= 0)
965 offset
= align_u32(offset
, obj
->alignment
);
968 printf("BO #%d (%dB) @ 0x%lx\n", obj
->handle
,
969 bo
->size
, bo
->offset
);
970 offset
= align_u32(offset
+ bo
->size
+ 4095, 4096);
973 if (bo
->map
== NULL
&& bo
->size
> 0)
974 bo
->map
= gem_mmap(fd
, obj
->handle
, 0, bo
->size
);
975 fail_if(bo
->map
== MAP_FAILED
, "intel_aubdump: bo mmap failed\n");
977 if (devinfo
.gen
>= 10)
978 gen8_map_ggtt_range(bo
->offset
, bo
->offset
+ bo
->size
);
981 batch_index
= (execbuffer2
->flags
& I915_EXEC_BATCH_FIRST
) ? 0 :
982 execbuffer2
->buffer_count
- 1;
983 batch_bo
= get_bo(exec_objects
[batch_index
].handle
);
984 for (uint32_t i
= 0; i
< execbuffer2
->buffer_count
; i
++) {
985 obj
= &exec_objects
[i
];
986 bo
= get_bo(obj
->handle
);
988 if (obj
->relocation_count
> 0)
989 data
= relocate_bo(bo
, execbuffer2
, obj
);
993 if (bo
== batch_bo
) {
994 aub_write_trace_block(AUB_TRACE_TYPE_BATCH
,
995 data
, bo
->size
, bo
->offset
);
997 aub_write_trace_block(AUB_TRACE_TYPE_NOTYPE
,
998 data
, bo
->size
, bo
->offset
);
1000 if (data
!= bo
->map
)
1004 if (devinfo
.gen
>= 10) {
1005 aub_dump_execlist(batch_bo
->offset
+
1006 execbuffer2
->batch_start_offset
, ring_flag
);
1008 /* Dump ring buffer */
1009 aub_dump_ringbuffer(batch_bo
->offset
+
1010 execbuffer2
->batch_start_offset
, offset
,
1014 for (int i
= 0; i
< ARRAY_SIZE(files
); i
++) {
1015 if (files
[i
] != NULL
)
1019 if (device_override
&&
1020 (execbuffer2
->flags
& I915_EXEC_FENCE_ARRAY
) != 0) {
1021 struct drm_i915_gem_exec_fence
*fences
=
1022 (void*)(uintptr_t)execbuffer2
->cliprects_ptr
;
1023 for (uint32_t i
= 0; i
< execbuffer2
->num_cliprects
; i
++) {
1024 if ((fences
[i
].flags
& I915_EXEC_FENCE_SIGNAL
) != 0) {
1025 struct drm_syncobj_array arg
= {
1026 .handles
= (uintptr_t)&fences
[i
].handle
,
1030 libc_ioctl(fd
, DRM_IOCTL_SYNCOBJ_SIGNAL
, &arg
);
1037 add_new_bo(int handle
, uint64_t size
, void *map
)
1039 struct bo
*bo
= &bos
[handle
];
1041 fail_if(handle
>= MAX_BO_COUNT
, "intel_aubdump: bo handle out of range\n");
1042 fail_if(size
== 0, "intel_aubdump: bo size is invalid\n");
1049 remove_bo(int handle
)
1051 struct bo
*bo
= get_bo(handle
);
1053 if (bo
->map
&& !IS_USERPTR(bo
->map
))
1054 munmap(bo
->map
, bo
->size
);
1059 __attribute__ ((visibility ("default"))) int
1065 return libc_close(fd
);
1069 launch_command(char *command
)
1072 char **args
= calloc(strlen(command
), sizeof(char *));
1073 char *iter
= command
;
1075 args
[i
++] = iter
= command
;
1077 while ((iter
= strstr(iter
, ",")) != NULL
) {
1083 if (pipe(fds
) == -1)
1089 fail_if(execvp(args
[0], args
) == -1,
1090 "intel_aubdump: failed to launch child command\n");
1095 return fdopen(fds
[1], "w");
1105 static bool initialized
= false;
1114 config
= fdopen(3, "r");
1115 while (fscanf(config
, "%m[^=]=%m[^\n]\n", &key
, &value
) != EOF
) {
1116 if (!strcmp(key
, "verbose")) {
1118 } else if (!strcmp(key
, "device")) {
1119 fail_if(sscanf(value
, "%i", &device
) != 1,
1120 "intel_aubdump: failed to parse device id '%s'",
1122 device_override
= true;
1123 } else if (!strcmp(key
, "file")) {
1124 filename
= strdup(value
);
1125 files
[0] = fopen(filename
, "w+");
1126 fail_if(files
[0] == NULL
,
1127 "intel_aubdump: failed to open file '%s'\n",
1129 } else if (!strcmp(key
, "command")) {
1130 files
[1] = launch_command(value
);
1131 fail_if(files
[1] == NULL
,
1132 "intel_aubdump: failed to launch command '%s'\n",
1135 fprintf(stderr
, "intel_aubdump: unknown option '%s'\n", key
);
1143 bos
= calloc(MAX_BO_COUNT
, sizeof(bos
[0]));
1144 fail_if(bos
== NULL
, "intel_aubdump: out of memory\n");
1147 #define LOCAL_IOCTL_I915_GEM_EXECBUFFER2_WR \
1148 DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
1150 __attribute__ ((visibility ("default"))) int
1151 ioctl(int fd
, unsigned long request
, ...)
1158 va_start(args
, request
);
1159 argp
= va_arg(args
, void *);
1162 if (_IOC_TYPE(request
) == DRM_IOCTL_BASE
&&
1163 drm_fd
!= fd
&& fstat(fd
, &buf
) == 0 &&
1164 (buf
.st_mode
& S_IFMT
) == S_IFCHR
&& major(buf
.st_rdev
) == DRM_MAJOR
) {
1167 printf("[intel_aubdump: intercept drm ioctl on fd %d]\n", fd
);
1174 case DRM_IOCTL_I915_GETPARAM
: {
1175 struct drm_i915_getparam
*getparam
= argp
;
1177 if (device_override
&& getparam
->param
== I915_PARAM_CHIPSET_ID
) {
1178 *getparam
->value
= device
;
1182 ret
= libc_ioctl(fd
, request
, argp
);
1184 /* If the application looks up chipset_id
1185 * (they typically do), we'll piggy-back on
1186 * their ioctl and store the id for later
1188 if (getparam
->param
== I915_PARAM_CHIPSET_ID
)
1189 device
= *getparam
->value
;
1194 case DRM_IOCTL_I915_GEM_EXECBUFFER
: {
1197 fprintf(stderr
, "intel_aubdump: "
1198 "application uses DRM_IOCTL_I915_GEM_EXECBUFFER, not handled\n");
1201 return libc_ioctl(fd
, request
, argp
);
1204 case DRM_IOCTL_I915_GEM_EXECBUFFER2
:
1205 case LOCAL_IOCTL_I915_GEM_EXECBUFFER2_WR
: {
1206 dump_execbuffer2(fd
, argp
);
1207 if (device_override
)
1210 return libc_ioctl(fd
, request
, argp
);
1213 case DRM_IOCTL_I915_GEM_CREATE
: {
1214 struct drm_i915_gem_create
*create
= argp
;
1216 ret
= libc_ioctl(fd
, request
, argp
);
1218 add_new_bo(create
->handle
, create
->size
, NULL
);
1223 case DRM_IOCTL_I915_GEM_USERPTR
: {
1224 struct drm_i915_gem_userptr
*userptr
= argp
;
1226 ret
= libc_ioctl(fd
, request
, argp
);
1228 add_new_bo(userptr
->handle
, userptr
->user_size
,
1229 (void *) (uintptr_t) (userptr
->user_ptr
| USERPTR_FLAG
));
1233 case DRM_IOCTL_GEM_CLOSE
: {
1234 struct drm_gem_close
*close
= argp
;
1236 remove_bo(close
->handle
);
1238 return libc_ioctl(fd
, request
, argp
);
1241 case DRM_IOCTL_GEM_OPEN
: {
1242 struct drm_gem_open
*open
= argp
;
1244 ret
= libc_ioctl(fd
, request
, argp
);
1246 add_new_bo(open
->handle
, open
->size
, NULL
);
1251 case DRM_IOCTL_PRIME_FD_TO_HANDLE
: {
1252 struct drm_prime_handle
*prime
= argp
;
1254 ret
= libc_ioctl(fd
, request
, argp
);
1258 size
= lseek(prime
->fd
, 0, SEEK_END
);
1259 fail_if(size
== -1, "intel_aubdump: failed to get prime bo size\n");
1260 add_new_bo(prime
->handle
, size
, NULL
);
1267 return libc_ioctl(fd
, request
, argp
);
1270 return libc_ioctl(fd
, request
, argp
);
1277 libc_close
= dlsym(RTLD_NEXT
, "close");
1278 libc_ioctl
= dlsym(RTLD_NEXT
, "ioctl");
1279 fail_if(libc_close
== NULL
|| libc_ioctl
== NULL
,
1280 "intel_aubdump: failed to get libc ioctl or close\n");
1284 close_init_helper(int fd
)
1287 return libc_close(fd
);
1291 ioctl_init_helper(int fd
, unsigned long request
, ...)
1296 va_start(args
, request
);
1297 argp
= va_arg(args
, void *);
1301 return libc_ioctl(fd
, request
, argp
);
1304 static void __attribute__ ((destructor
))
1308 for (int i
= 0; i
< ARRAY_SIZE(files
); i
++) {
1309 if (files
[i
] != NULL
)