2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 #include <sys/types.h>
33 #include <sys/sysmacros.h>
35 #include <sys/ioctl.h>
42 #include "intel_aub.h"
44 #include "dev/gen_device_info.h"
45 #include "util/macros.h"
48 #define ALIGN(x, y) (((x) + (y)-1) & ~((y)-1))
51 #define MI_LOAD_REGISTER_IMM_n(n) ((0x22 << 23) | (2 * (n) - 1))
52 #define MI_LRI_FORCE_POSTED (1<<12)
54 #define MI_BATCH_NON_SECURE_I965 (1 << 8)
56 #define MI_BATCH_BUFFER_END (0xA << 23)
58 #define min(a, b) ({ \
59 __typeof(a) _a = (a); \
60 __typeof(b) _b = (b); \
64 #define max(a, b) ({ \
65 __typeof(a) _a = (a); \
66 __typeof(b) _b = (b); \
70 #define HWS_PGA_RCSUNIT 0x02080
71 #define HWS_PGA_VCSUNIT0 0x12080
72 #define HWS_PGA_BCSUNIT 0x22080
74 #define GFX_MODE_RCSUNIT 0x0229c
75 #define GFX_MODE_VCSUNIT0 0x1229c
76 #define GFX_MODE_BCSUNIT 0x2229c
78 #define EXECLIST_SUBMITPORT_RCSUNIT 0x02230
79 #define EXECLIST_SUBMITPORT_VCSUNIT0 0x12230
80 #define EXECLIST_SUBMITPORT_BCSUNIT 0x22230
82 #define EXECLIST_STATUS_RCSUNIT 0x02234
83 #define EXECLIST_STATUS_VCSUNIT0 0x12234
84 #define EXECLIST_STATUS_BCSUNIT 0x22234
86 #define EXECLIST_SQ_CONTENTS0_RCSUNIT 0x02510
87 #define EXECLIST_SQ_CONTENTS0_VCSUNIT0 0x12510
88 #define EXECLIST_SQ_CONTENTS0_BCSUNIT 0x22510
90 #define EXECLIST_CONTROL_RCSUNIT 0x02550
91 #define EXECLIST_CONTROL_VCSUNIT0 0x12550
92 #define EXECLIST_CONTROL_BCSUNIT 0x22550
94 #define MEMORY_MAP_SIZE (64 /* MiB */ * 1024 * 1024)
97 #define GEN8_PTE_SIZE 8
99 #define NUM_PT_ENTRIES (ALIGN(MEMORY_MAP_SIZE, 4096) / 4096)
100 #define PT_SIZE ALIGN(NUM_PT_ENTRIES * GEN8_PTE_SIZE, 4096)
102 #define RING_SIZE (1 * 4096)
103 #define PPHWSP_SIZE (1 * 4096)
104 #define GEN11_LR_CONTEXT_RENDER_SIZE (14 * 4096)
105 #define GEN10_LR_CONTEXT_RENDER_SIZE (19 * 4096)
106 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * 4096)
107 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * 4096)
108 #define GEN8_LR_CONTEXT_OTHER_SIZE (2 * 4096)
111 #define STATIC_GGTT_MAP_START 0
113 #define RENDER_RING_ADDR STATIC_GGTT_MAP_START
114 #define RENDER_CONTEXT_ADDR (RENDER_RING_ADDR + RING_SIZE)
116 #define BLITTER_RING_ADDR (RENDER_CONTEXT_ADDR + PPHWSP_SIZE + GEN10_LR_CONTEXT_RENDER_SIZE)
117 #define BLITTER_CONTEXT_ADDR (BLITTER_RING_ADDR + RING_SIZE)
119 #define VIDEO_RING_ADDR (BLITTER_CONTEXT_ADDR + PPHWSP_SIZE + GEN8_LR_CONTEXT_OTHER_SIZE)
120 #define VIDEO_CONTEXT_ADDR (VIDEO_RING_ADDR + RING_SIZE)
122 #define STATIC_GGTT_MAP_END (VIDEO_CONTEXT_ADDR + PPHWSP_SIZE + GEN8_LR_CONTEXT_OTHER_SIZE)
123 #define STATIC_GGTT_MAP_SIZE (STATIC_GGTT_MAP_END - STATIC_GGTT_MAP_START)
125 #define PML4_PHYS_ADDR ((uint64_t)(STATIC_GGTT_MAP_END))
127 #define CONTEXT_FLAGS (0x339) /* Normal Priority | L3-LLC Coherency |
129 * Legacy Context with 64 bit VA support |
133 #define RENDER_CONTEXT_DESCRIPTOR ((uint64_t)1 << 62 | RENDER_CONTEXT_ADDR | CONTEXT_FLAGS)
134 #define BLITTER_CONTEXT_DESCRIPTOR ((uint64_t)2 << 62 | BLITTER_CONTEXT_ADDR | CONTEXT_FLAGS)
135 #define VIDEO_CONTEXT_DESCRIPTOR ((uint64_t)3 << 62 | VIDEO_CONTEXT_ADDR | CONTEXT_FLAGS)
137 static const uint32_t render_context_init
[GEN9_LR_CONTEXT_RENDER_SIZE
/ /* Choose the largest */
138 sizeof(uint32_t)] = {
140 MI_LOAD_REGISTER_IMM_n(14) | MI_LRI_FORCE_POSTED
,
141 0x2244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
142 0x2034 /* RING_HEAD */, 0,
143 0x2030 /* RING_TAIL */, 0,
144 0x2038 /* RING_BUFFER_START */, RENDER_RING_ADDR
,
145 0x203C /* RING_BUFFER_CONTROL */, (RING_SIZE
- 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
146 0x2168 /* BB_HEAD_U */, 0,
147 0x2140 /* BB_HEAD_L */, 0,
148 0x2110 /* BB_STATE */, 0,
149 0x211C /* SECOND_BB_HEAD_U */, 0,
150 0x2114 /* SECOND_BB_HEAD_L */, 0,
151 0x2118 /* SECOND_BB_STATE */, 0,
152 0x21C0 /* BB_PER_CTX_PTR */, 0,
153 0x21C4 /* RCS_INDIRECT_CTX */, 0,
154 0x21C8 /* RCS_INDIRECT_CTX_OFFSET */, 0,
159 MI_LOAD_REGISTER_IMM_n(9) | MI_LRI_FORCE_POSTED
,
160 0x23A8 /* CTX_TIMESTAMP */, 0,
161 0x228C /* PDP3_UDW */, 0,
162 0x2288 /* PDP3_LDW */, 0,
163 0x2284 /* PDP2_UDW */, 0,
164 0x2280 /* PDP2_LDW */, 0,
165 0x227C /* PDP1_UDW */, 0,
166 0x2278 /* PDP1_LDW */, 0,
167 0x2274 /* PDP0_UDW */, PML4_PHYS_ADDR
>> 32,
168 0x2270 /* PDP0_LDW */, PML4_PHYS_ADDR
,
170 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
173 MI_LOAD_REGISTER_IMM_n(1),
174 0x20C8 /* R_PWR_CLK_STATE */, 0x7FFFFFFF,
178 static const uint32_t blitter_context_init
[GEN8_LR_CONTEXT_OTHER_SIZE
/
179 sizeof(uint32_t)] = {
181 MI_LOAD_REGISTER_IMM_n(11) | MI_LRI_FORCE_POSTED
,
182 0x22244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
183 0x22034 /* RING_HEAD */, 0,
184 0x22030 /* RING_TAIL */, 0,
185 0x22038 /* RING_BUFFER_START */, BLITTER_RING_ADDR
,
186 0x2203C /* RING_BUFFER_CONTROL */, (RING_SIZE
- 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
187 0x22168 /* BB_HEAD_U */, 0,
188 0x22140 /* BB_HEAD_L */, 0,
189 0x22110 /* BB_STATE */, 0,
190 0x2211C /* SECOND_BB_HEAD_U */, 0,
191 0x22114 /* SECOND_BB_HEAD_L */, 0,
192 0x22118 /* SECOND_BB_STATE */, 0,
194 0, 0, 0, 0, 0, 0, 0, 0,
197 MI_LOAD_REGISTER_IMM_n(9) | MI_LRI_FORCE_POSTED
,
198 0x223A8 /* CTX_TIMESTAMP */, 0,
199 0x2228C /* PDP3_UDW */, 0,
200 0x22288 /* PDP3_LDW */, 0,
201 0x22284 /* PDP2_UDW */, 0,
202 0x22280 /* PDP2_LDW */, 0,
203 0x2227C /* PDP1_UDW */, 0,
204 0x22278 /* PDP1_LDW */, 0,
205 0x22274 /* PDP0_UDW */, PML4_PHYS_ADDR
>> 32,
206 0x22270 /* PDP0_LDW */, PML4_PHYS_ADDR
,
208 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
213 static const uint32_t video_context_init
[GEN8_LR_CONTEXT_OTHER_SIZE
/
214 sizeof(uint32_t)] = {
216 MI_LOAD_REGISTER_IMM_n(11) | MI_LRI_FORCE_POSTED
,
217 0x1C244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
218 0x1C034 /* RING_HEAD */, 0,
219 0x1C030 /* RING_TAIL */, 0,
220 0x1C038 /* RING_BUFFER_START */, VIDEO_RING_ADDR
,
221 0x1C03C /* RING_BUFFER_CONTROL */, (RING_SIZE
- 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
222 0x1C168 /* BB_HEAD_U */, 0,
223 0x1C140 /* BB_HEAD_L */, 0,
224 0x1C110 /* BB_STATE */, 0,
225 0x1C11C /* SECOND_BB_HEAD_U */, 0,
226 0x1C114 /* SECOND_BB_HEAD_L */, 0,
227 0x1C118 /* SECOND_BB_STATE */, 0,
229 0, 0, 0, 0, 0, 0, 0, 0,
232 MI_LOAD_REGISTER_IMM_n(9) | MI_LRI_FORCE_POSTED
,
233 0x1C3A8 /* CTX_TIMESTAMP */, 0,
234 0x1C28C /* PDP3_UDW */, 0,
235 0x1C288 /* PDP3_LDW */, 0,
236 0x1C284 /* PDP2_UDW */, 0,
237 0x1C280 /* PDP2_LDW */, 0,
238 0x1C27C /* PDP1_UDW */, 0,
239 0x1C278 /* PDP1_LDW */, 0,
240 0x1C274 /* PDP0_UDW */, PML4_PHYS_ADDR
>> 32,
241 0x1C270 /* PDP0_LDW */, PML4_PHYS_ADDR
,
243 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
248 static int close_init_helper(int fd
);
249 static int ioctl_init_helper(int fd
, unsigned long request
, ...);
251 static int (*libc_close
)(int fd
) = close_init_helper
;
252 static int (*libc_ioctl
)(int fd
, unsigned long request
, ...) = ioctl_init_helper
;
254 static int drm_fd
= -1;
255 static char *filename
= NULL
;
256 static FILE *files
[2] = { NULL
, NULL
};
257 static struct gen_device_info devinfo
= {0};
258 static int verbose
= 0;
259 static bool device_override
;
260 static uint32_t device
;
261 static int addr_bits
= 0;
263 #define MAX_BO_COUNT 64 * 1024
271 static struct bo
*bos
;
273 #define DRM_MAJOR 226
275 #ifndef DRM_I915_GEM_USERPTR
277 #define DRM_I915_GEM_USERPTR 0x33
278 #define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
280 struct drm_i915_gem_userptr
{
284 #define I915_USERPTR_READ_ONLY 0x1
285 #define I915_USERPTR_UNSYNCHRONIZED 0x80000000
287 * Returned handle for the object.
289 * Object handles are nonzero.
296 /* We set bit 0 in the map pointer for userptr BOs so we know not to
297 * munmap them on DRM_IOCTL_GEM_CLOSE.
299 #define USERPTR_FLAG 1
300 #define IS_USERPTR(p) ((uintptr_t) (p) & USERPTR_FLAG)
301 #define GET_PTR(p) ( (void *) ((uintptr_t) p & ~(uintptr_t) 1) )
303 #ifndef I915_EXEC_BATCH_FIRST
304 #define I915_EXEC_BATCH_FIRST (1 << 18)
307 static inline bool use_execlists(void)
309 return devinfo
.gen
>= 8;
312 static void __attribute__ ((format(__printf__
, 2, 3)))
313 fail_if(int cond
, const char *format
, ...)
320 va_start(args
, format
);
321 vfprintf(stderr
, format
, args
);
328 get_bo(uint32_t handle
)
332 fail_if(handle
>= MAX_BO_COUNT
, "bo handle too large\n");
338 static inline uint32_t
339 align_u32(uint32_t v
, uint32_t a
)
341 return (v
+ a
- 1) & ~(a
- 1);
345 dword_out(uint32_t data
)
347 for (int i
= 0; i
< ARRAY_SIZE (files
); i
++) {
348 if (files
[i
] == NULL
)
351 fail_if(fwrite(&data
, 1, 4, files
[i
]) == 0,
352 "Writing to output failed\n");
357 data_out(const void *data
, size_t size
)
362 for (int i
= 0; i
< ARRAY_SIZE (files
); i
++) {
363 if (files
[i
] == NULL
)
366 fail_if(fwrite(data
, 1, size
, files
[i
]) == 0,
367 "Writing to output failed\n");
374 return NUM_PT_ENTRIES
* (addr_bits
> 32 ? GEN8_PTE_SIZE
: PTE_SIZE
);
378 mem_trace_memory_write_header_out(uint64_t addr
, uint32_t len
,
381 uint32_t dwords
= ALIGN(len
, sizeof(uint32_t)) / sizeof(uint32_t);
383 dword_out(CMD_MEM_TRACE_MEMORY_WRITE
| (5 + dwords
- 1));
384 dword_out(addr
& 0xFFFFFFFF); /* addr lo */
385 dword_out(addr
>> 32); /* addr hi */
386 dword_out(addr_space
); /* gtt */
391 register_write_out(uint32_t addr
, uint32_t value
)
395 dword_out(CMD_MEM_TRACE_REGISTER_WRITE
| (5 + dwords
- 1));
397 dword_out(AUB_MEM_TRACE_REGISTER_SIZE_DWORD
|
398 AUB_MEM_TRACE_REGISTER_SPACE_MMIO
);
399 dword_out(0xFFFFFFFF); /* mask lo */
400 dword_out(0x00000000); /* mask hi */
404 static struct ppgtt_table
{
406 struct ppgtt_table
*subtables
[512];
407 } pml4
= {PML4_PHYS_ADDR
};
410 populate_ppgtt_table(struct ppgtt_table
*table
, int start
, int end
,
413 static uint64_t phys_addrs_allocator
= (PML4_PHYS_ADDR
>> 12) + 1;
414 uint64_t entries
[512] = {0};
415 int dirty_start
= 512, dirty_end
= 0;
417 for (int i
= start
; i
<= end
; i
++) {
418 if (!table
->subtables
[i
]) {
419 dirty_start
= min(dirty_start
, i
);
420 dirty_end
= max(dirty_end
, i
);
422 table
->subtables
[i
] =
423 (void *)(phys_addrs_allocator
++ << 12);
425 table
->subtables
[i
] =
426 calloc(1, sizeof(struct ppgtt_table
));
427 table
->subtables
[i
]->phys_addr
=
428 phys_addrs_allocator
++ << 12;
431 entries
[i
] = 3 /* read/write | present */ |
432 (level
== 1 ? (uint64_t)table
->subtables
[i
] :
433 table
->subtables
[i
]->phys_addr
);
436 if (dirty_start
<= dirty_end
) {
437 uint64_t write_addr
= table
->phys_addr
+ dirty_start
*
439 uint64_t write_size
= (dirty_end
- dirty_start
+ 1) *
441 mem_trace_memory_write_header_out(write_addr
, write_size
,
442 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_PHYSICAL
);
443 data_out(entries
+ dirty_start
, write_size
);
448 map_ppgtt(uint64_t start
, uint64_t size
)
450 uint64_t l4_start
= start
& 0xff8000000000;
451 uint64_t l3_start
= start
& 0xffffc0000000;
452 uint64_t l2_start
= start
& 0xffffffe00000;
453 uint64_t l1_start
= start
& 0xfffffffff000;
454 uint64_t l4_end
= ((start
+ size
- 1) | 0x007fffffffff) & 0xffffffffffff;
455 uint64_t l3_end
= ((start
+ size
- 1) | 0x00003fffffff) & 0xffffffffffff;
456 uint64_t l2_end
= ((start
+ size
- 1) | 0x0000001fffff) & 0xffffffffffff;
457 uint64_t l1_end
= ((start
+ size
- 1) | 0x000000000fff) & 0xffffffffffff;
459 #define L4_index(addr) (((addr) >> 39) & 0x1ff)
460 #define L3_index(addr) (((addr) >> 30) & 0x1ff)
461 #define L2_index(addr) (((addr) >> 21) & 0x1ff)
462 #define L1_index(addr) (((addr) >> 12) & 0x1ff)
464 #define L3_table(addr) (pml4.subtables[L4_index(addr)])
465 #define L2_table(addr) (L3_table(addr)->subtables[L3_index(addr)])
466 #define L1_table(addr) (L2_table(addr)->subtables[L2_index(addr)])
468 populate_ppgtt_table(&pml4
, L4_index(l4_start
), L4_index(l4_end
), 4);
470 for (uint64_t a
= l4_start
; a
< l4_end
; a
+= (1ULL << 39)) {
471 uint64_t _start
= max(a
, l3_start
);
472 uint64_t _end
= min(a
+ (1ULL << 39), l3_end
);
474 populate_ppgtt_table(L3_table(a
), L3_index(_start
),
478 for (uint64_t a
= l3_start
; a
< l3_end
; a
+= (1ULL << 30)) {
479 uint64_t _start
= max(a
, l2_start
);
480 uint64_t _end
= min(a
+ (1ULL << 30), l2_end
);
482 populate_ppgtt_table(L2_table(a
), L2_index(_start
),
486 for (uint64_t a
= l2_start
; a
< l2_end
; a
+= (1ULL << 21)) {
487 uint64_t _start
= max(a
, l1_start
);
488 uint64_t _end
= min(a
+ (1ULL << 21), l1_end
);
490 populate_ppgtt_table(L1_table(a
), L1_index(_start
),
496 ppgtt_lookup(uint64_t ppgtt_addr
)
498 return (uint64_t)L1_table(ppgtt_addr
)->subtables
[L1_index(ppgtt_addr
)];
502 write_execlists_header(void)
504 char app_name
[8 * 4];
505 int app_name_len
, dwords
;
508 snprintf(app_name
, sizeof(app_name
), "PCI-ID=0x%X %s", device
,
509 program_invocation_short_name
);
510 app_name_len
= ALIGN(app_name_len
, sizeof(uint32_t));
512 dwords
= 5 + app_name_len
/ sizeof(uint32_t);
513 dword_out(CMD_MEM_TRACE_VERSION
| (dwords
- 1));
514 dword_out(AUB_MEM_TRACE_VERSION_FILE_VERSION
);
515 dword_out(AUB_MEM_TRACE_VERSION_DEVICE_CNL
);
516 dword_out(0); /* version */
517 dword_out(0); /* version */
518 data_out(app_name
, app_name_len
);
521 uint32_t ggtt_ptes
= STATIC_GGTT_MAP_SIZE
>> 12;
523 mem_trace_memory_write_header_out(STATIC_GGTT_MAP_START
>> 12,
524 ggtt_ptes
* GEN8_PTE_SIZE
,
525 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT_ENTRY
);
526 for (uint32_t i
= 0; i
< ggtt_ptes
; i
++) {
527 dword_out(1 + 0x1000 * i
+ STATIC_GGTT_MAP_START
);
532 mem_trace_memory_write_header_out(RENDER_RING_ADDR
, RING_SIZE
,
533 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
534 for (uint32_t i
= 0; i
< RING_SIZE
; i
+= sizeof(uint32_t))
538 mem_trace_memory_write_header_out(RENDER_CONTEXT_ADDR
,
540 sizeof(render_context_init
),
541 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
542 for (uint32_t i
= 0; i
< PPHWSP_SIZE
; i
+= sizeof(uint32_t))
546 data_out(render_context_init
, sizeof(render_context_init
));
549 mem_trace_memory_write_header_out(BLITTER_RING_ADDR
, RING_SIZE
,
550 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
551 for (uint32_t i
= 0; i
< RING_SIZE
; i
+= sizeof(uint32_t))
555 mem_trace_memory_write_header_out(BLITTER_CONTEXT_ADDR
,
557 sizeof(blitter_context_init
),
558 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
559 for (uint32_t i
= 0; i
< PPHWSP_SIZE
; i
+= sizeof(uint32_t))
562 /* BLITTER_CONTEXT */
563 data_out(blitter_context_init
, sizeof(blitter_context_init
));
566 mem_trace_memory_write_header_out(VIDEO_RING_ADDR
, RING_SIZE
,
567 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
568 for (uint32_t i
= 0; i
< RING_SIZE
; i
+= sizeof(uint32_t))
572 mem_trace_memory_write_header_out(VIDEO_CONTEXT_ADDR
,
574 sizeof(video_context_init
),
575 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
576 for (uint32_t i
= 0; i
< PPHWSP_SIZE
; i
+= sizeof(uint32_t))
580 data_out(video_context_init
, sizeof(video_context_init
));
582 register_write_out(HWS_PGA_RCSUNIT
, RENDER_CONTEXT_ADDR
);
583 register_write_out(HWS_PGA_VCSUNIT0
, VIDEO_CONTEXT_ADDR
);
584 register_write_out(HWS_PGA_BCSUNIT
, BLITTER_CONTEXT_ADDR
);
586 register_write_out(GFX_MODE_RCSUNIT
, 0x80008000 /* execlist enable */);
587 register_write_out(GFX_MODE_VCSUNIT0
, 0x80008000 /* execlist enable */);
588 register_write_out(GFX_MODE_BCSUNIT
, 0x80008000 /* execlist enable */);
591 static void write_legacy_header(void)
593 char app_name
[8 * 4];
595 int comment_len
, comment_dwords
, dwords
;
596 uint32_t entry
= 0x200003;
598 comment_len
= snprintf(comment
, sizeof(comment
), "PCI-ID=0x%x", device
);
599 comment_dwords
= ((comment_len
+ 3) / 4);
601 /* Start with a (required) version packet. */
602 dwords
= 13 + comment_dwords
;
603 dword_out(CMD_AUB_HEADER
| (dwords
- 2));
604 dword_out((4 << AUB_HEADER_MAJOR_SHIFT
) |
605 (0 << AUB_HEADER_MINOR_SHIFT
));
607 /* Next comes a 32-byte application name. */
608 strncpy(app_name
, program_invocation_short_name
, sizeof(app_name
));
609 app_name
[sizeof(app_name
) - 1] = 0;
610 data_out(app_name
, sizeof(app_name
));
612 dword_out(0); /* timestamp */
613 dword_out(0); /* timestamp */
614 dword_out(comment_len
);
615 data_out(comment
, comment_dwords
* 4);
617 /* Set up the GTT. The max we can handle is 64M */
618 dword_out(CMD_AUB_TRACE_HEADER_BLOCK
| ((addr_bits
> 32 ? 6 : 5) - 2));
619 dword_out(AUB_TRACE_MEMTYPE_GTT_ENTRY
|
620 AUB_TRACE_TYPE_NOTYPE
| AUB_TRACE_OP_DATA_WRITE
);
621 dword_out(0); /* subtype */
622 dword_out(0); /* offset */
623 dword_out(gtt_size()); /* size */
626 for (uint32_t i
= 0; i
< NUM_PT_ENTRIES
; i
++) {
627 dword_out(entry
+ 0x1000 * i
);
634 * Break up large objects into multiple writes. Otherwise a 128kb VBO
635 * would overflow the 16 bits of size field in the packet header and
636 * everything goes badly after that.
639 aub_write_trace_block(uint32_t type
, void *virtual, uint32_t size
, uint64_t gtt_offset
)
642 uint32_t subtype
= 0;
643 static const char null_block
[8 * 4096];
645 for (uint32_t offset
= 0; offset
< size
; offset
+= block_size
) {
646 block_size
= min(8 * 4096, size
- offset
);
648 if (use_execlists()) {
649 block_size
= min(4096, block_size
);
650 mem_trace_memory_write_header_out(ppgtt_lookup(gtt_offset
+ offset
),
652 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_PHYSICAL
);
654 dword_out(CMD_AUB_TRACE_HEADER_BLOCK
|
655 ((addr_bits
> 32 ? 6 : 5) - 2));
656 dword_out(AUB_TRACE_MEMTYPE_GTT
|
657 type
| AUB_TRACE_OP_DATA_WRITE
);
659 dword_out(gtt_offset
+ offset
);
660 dword_out(align_u32(block_size
, 4));
662 dword_out((gtt_offset
+ offset
) >> 32);
666 data_out(((char *) GET_PTR(virtual)) + offset
, block_size
);
668 data_out(null_block
, block_size
);
670 /* Pad to a multiple of 4 bytes. */
671 data_out(null_block
, -block_size
& 3);
676 write_reloc(void *p
, uint64_t v
)
678 if (addr_bits
> 32) {
679 /* From the Broadwell PRM Vol. 2a,
680 * MI_LOAD_REGISTER_MEM::MemoryAddress:
682 * "This field specifies the address of the memory
683 * location where the register value specified in the
684 * DWord above will read from. The address specifies
685 * the DWord location of the data. Range =
686 * GraphicsVirtualAddress[63:2] for a DWord register
687 * GraphicsAddress [63:48] are ignored by the HW and
688 * assumed to be in correct canonical form [63:48] ==
691 * In practice, this will always mean the top bits are zero
692 * because of the GTT size limitation of the aubdump tool.
694 const int shift
= 63 - 47;
695 *(uint64_t *)p
= (((int64_t)v
) << shift
) >> shift
;
702 aub_dump_execlist(uint64_t batch_offset
, int ring_flag
)
709 uint32_t control_reg
;
712 case I915_EXEC_DEFAULT
:
713 case I915_EXEC_RENDER
:
714 ring_addr
= RENDER_RING_ADDR
;
715 descriptor
= RENDER_CONTEXT_DESCRIPTOR
;
716 elsp_reg
= EXECLIST_SUBMITPORT_RCSUNIT
;
717 elsq_reg
= EXECLIST_SQ_CONTENTS0_RCSUNIT
;
718 status_reg
= EXECLIST_STATUS_RCSUNIT
;
719 control_reg
= EXECLIST_CONTROL_RCSUNIT
;
722 ring_addr
= VIDEO_RING_ADDR
;
723 descriptor
= VIDEO_CONTEXT_DESCRIPTOR
;
724 elsp_reg
= EXECLIST_SUBMITPORT_VCSUNIT0
;
725 elsq_reg
= EXECLIST_SQ_CONTENTS0_VCSUNIT0
;
726 status_reg
= EXECLIST_STATUS_VCSUNIT0
;
727 control_reg
= EXECLIST_CONTROL_VCSUNIT0
;
730 ring_addr
= BLITTER_RING_ADDR
;
731 descriptor
= BLITTER_CONTEXT_DESCRIPTOR
;
732 elsp_reg
= EXECLIST_SUBMITPORT_BCSUNIT
;
733 elsq_reg
= EXECLIST_SQ_CONTENTS0_BCSUNIT
;
734 status_reg
= EXECLIST_STATUS_BCSUNIT
;
735 control_reg
= EXECLIST_CONTROL_BCSUNIT
;
739 mem_trace_memory_write_header_out(ring_addr
, 16,
740 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
741 dword_out(AUB_MI_BATCH_BUFFER_START
| MI_BATCH_NON_SECURE_I965
| (3 - 2));
742 dword_out(batch_offset
& 0xFFFFFFFF);
743 dword_out(batch_offset
>> 32);
744 dword_out(0 /* MI_NOOP */);
746 mem_trace_memory_write_header_out(ring_addr
+ 8192 + 20, 4,
747 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
748 dword_out(0); /* RING_BUFFER_HEAD */
749 mem_trace_memory_write_header_out(ring_addr
+ 8192 + 28, 4,
750 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
751 dword_out(16); /* RING_BUFFER_TAIL */
753 if (devinfo
.gen
>= 11) {
754 register_write_out(elsq_reg
, descriptor
& 0xFFFFFFFF);
755 register_write_out(elsq_reg
+ sizeof(uint32_t), descriptor
>> 32);
756 register_write_out(control_reg
, 1);
758 register_write_out(elsp_reg
, 0);
759 register_write_out(elsp_reg
, 0);
760 register_write_out(elsp_reg
, descriptor
>> 32);
761 register_write_out(elsp_reg
, descriptor
& 0xFFFFFFFF);
764 dword_out(CMD_MEM_TRACE_REGISTER_POLL
| (5 + 1 - 1));
765 dword_out(status_reg
);
766 dword_out(AUB_MEM_TRACE_REGISTER_SIZE_DWORD
|
767 AUB_MEM_TRACE_REGISTER_SPACE_MMIO
);
768 if (devinfo
.gen
>= 11) {
769 dword_out(0x00000001); /* mask lo */
770 dword_out(0x00000000); /* mask hi */
771 dword_out(0x00000001);
773 dword_out(0x00000010); /* mask lo */
774 dword_out(0x00000000); /* mask hi */
775 dword_out(0x00000000);
780 aub_dump_ringbuffer(uint64_t batch_offset
, uint64_t offset
, int ring_flag
)
782 uint32_t ringbuffer
[4096];
783 unsigned aub_mi_bbs_len
;
784 int ring
= AUB_TRACE_TYPE_RING_PRB0
; /* The default ring */
787 if (ring_flag
== I915_EXEC_BSD
)
788 ring
= AUB_TRACE_TYPE_RING_PRB1
;
789 else if (ring_flag
== I915_EXEC_BLT
)
790 ring
= AUB_TRACE_TYPE_RING_PRB2
;
792 /* Make a ring buffer to execute our batchbuffer. */
793 memset(ringbuffer
, 0, sizeof(ringbuffer
));
795 aub_mi_bbs_len
= addr_bits
> 32 ? 3 : 2;
796 ringbuffer
[ring_count
] = AUB_MI_BATCH_BUFFER_START
| (aub_mi_bbs_len
- 2);
797 write_reloc(&ringbuffer
[ring_count
+ 1], batch_offset
);
798 ring_count
+= aub_mi_bbs_len
;
800 /* Write out the ring. This appears to trigger execution of
801 * the ring in the simulator.
803 dword_out(CMD_AUB_TRACE_HEADER_BLOCK
|
804 ((addr_bits
> 32 ? 6 : 5) - 2));
805 dword_out(AUB_TRACE_MEMTYPE_GTT
| ring
| AUB_TRACE_OP_COMMAND_WRITE
);
806 dword_out(0); /* general/surface subtype */
808 dword_out(ring_count
* 4);
810 dword_out(offset
>> 32);
812 data_out(ringbuffer
, ring_count
* 4);
816 relocate_bo(struct bo
*bo
, const struct drm_i915_gem_execbuffer2
*execbuffer2
,
817 const struct drm_i915_gem_exec_object2
*obj
)
819 const struct drm_i915_gem_exec_object2
*exec_objects
=
820 (struct drm_i915_gem_exec_object2
*) (uintptr_t) execbuffer2
->buffers_ptr
;
821 const struct drm_i915_gem_relocation_entry
*relocs
=
822 (const struct drm_i915_gem_relocation_entry
*) (uintptr_t) obj
->relocs_ptr
;
826 relocated
= malloc(bo
->size
);
827 fail_if(relocated
== NULL
, "intel_aubdump: out of memory\n");
828 memcpy(relocated
, GET_PTR(bo
->map
), bo
->size
);
829 for (size_t i
= 0; i
< obj
->relocation_count
; i
++) {
830 fail_if(relocs
[i
].offset
>= bo
->size
, "intel_aubdump: reloc outside bo\n");
832 if (execbuffer2
->flags
& I915_EXEC_HANDLE_LUT
)
833 handle
= exec_objects
[relocs
[i
].target_handle
].handle
;
835 handle
= relocs
[i
].target_handle
;
837 write_reloc(((char *)relocated
) + relocs
[i
].offset
,
838 get_bo(handle
)->offset
+ relocs
[i
].delta
);
845 gem_ioctl(int fd
, unsigned long request
, void *argp
)
850 ret
= libc_ioctl(fd
, request
, argp
);
851 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
857 gem_mmap(int fd
, uint32_t handle
, uint64_t offset
, uint64_t size
)
859 struct drm_i915_gem_mmap mmap
= {
865 if (gem_ioctl(fd
, DRM_IOCTL_I915_GEM_MMAP
, &mmap
) == -1)
868 return (void *)(uintptr_t) mmap
.addr_ptr
;
872 gem_get_param(int fd
, uint32_t param
)
875 drm_i915_getparam_t gp
= {
880 if (gem_ioctl(fd
, DRM_IOCTL_I915_GETPARAM
, &gp
) == -1)
887 dump_execbuffer2(int fd
, struct drm_i915_gem_execbuffer2
*execbuffer2
)
889 struct drm_i915_gem_exec_object2
*exec_objects
=
890 (struct drm_i915_gem_exec_object2
*) (uintptr_t) execbuffer2
->buffers_ptr
;
891 uint32_t ring_flag
= execbuffer2
->flags
& I915_EXEC_RING_MASK
;
893 struct drm_i915_gem_exec_object2
*obj
;
894 struct bo
*bo
, *batch_bo
;
898 /* We can't do this at open time as we're not yet authenticated. */
900 device
= gem_get_param(fd
, I915_PARAM_CHIPSET_ID
);
901 fail_if(device
== 0 || devinfo
.gen
== 0, "failed to identify chipset\n");
903 if (devinfo
.gen
== 0) {
904 fail_if(!gen_get_device_info(device
, &devinfo
),
905 "failed to identify chipset=0x%x\n", device
);
907 addr_bits
= devinfo
.gen
>= 8 ? 48 : 32;
910 write_execlists_header();
912 write_legacy_header();
915 printf("[intel_aubdump: running, "
916 "output file %s, chipset id 0x%04x, gen %d]\n",
917 filename
, device
, devinfo
.gen
);
926 printf("Dumping execbuffer2:\n");
928 for (uint32_t i
= 0; i
< execbuffer2
->buffer_count
; i
++) {
929 obj
= &exec_objects
[i
];
930 bo
= get_bo(obj
->handle
);
932 /* If bo->size == 0, this means they passed us an invalid
933 * buffer. The kernel will reject it and so should we.
937 printf("BO #%d is invalid!\n", obj
->handle
);
941 if (obj
->flags
& EXEC_OBJECT_PINNED
) {
942 bo
->offset
= obj
->offset
;
944 printf("BO #%d (%dB) pinned @ 0x%lx\n",
945 obj
->handle
, bo
->size
, bo
->offset
);
947 if (obj
->alignment
!= 0)
948 offset
= align_u32(offset
, obj
->alignment
);
951 printf("BO #%d (%dB) @ 0x%lx\n", obj
->handle
,
952 bo
->size
, bo
->offset
);
953 offset
= align_u32(offset
+ bo
->size
+ 4095, 4096);
956 if (bo
->map
== NULL
&& bo
->size
> 0)
957 bo
->map
= gem_mmap(fd
, obj
->handle
, 0, bo
->size
);
958 fail_if(bo
->map
== MAP_FAILED
, "intel_aubdump: bo mmap failed\n");
961 map_ppgtt(bo
->offset
, bo
->size
);
964 batch_index
= (execbuffer2
->flags
& I915_EXEC_BATCH_FIRST
) ? 0 :
965 execbuffer2
->buffer_count
- 1;
966 batch_bo
= get_bo(exec_objects
[batch_index
].handle
);
967 for (uint32_t i
= 0; i
< execbuffer2
->buffer_count
; i
++) {
968 obj
= &exec_objects
[i
];
969 bo
= get_bo(obj
->handle
);
971 if (obj
->relocation_count
> 0)
972 data
= relocate_bo(bo
, execbuffer2
, obj
);
976 if (bo
== batch_bo
) {
977 aub_write_trace_block(AUB_TRACE_TYPE_BATCH
,
978 data
, bo
->size
, bo
->offset
);
980 aub_write_trace_block(AUB_TRACE_TYPE_NOTYPE
,
981 data
, bo
->size
, bo
->offset
);
987 if (use_execlists()) {
988 aub_dump_execlist(batch_bo
->offset
+
989 execbuffer2
->batch_start_offset
, ring_flag
);
991 /* Dump ring buffer */
992 aub_dump_ringbuffer(batch_bo
->offset
+
993 execbuffer2
->batch_start_offset
, offset
,
997 for (int i
= 0; i
< ARRAY_SIZE(files
); i
++) {
998 if (files
[i
] != NULL
)
1002 if (device_override
&&
1003 (execbuffer2
->flags
& I915_EXEC_FENCE_ARRAY
) != 0) {
1004 struct drm_i915_gem_exec_fence
*fences
=
1005 (void*)(uintptr_t)execbuffer2
->cliprects_ptr
;
1006 for (uint32_t i
= 0; i
< execbuffer2
->num_cliprects
; i
++) {
1007 if ((fences
[i
].flags
& I915_EXEC_FENCE_SIGNAL
) != 0) {
1008 struct drm_syncobj_array arg
= {
1009 .handles
= (uintptr_t)&fences
[i
].handle
,
1013 libc_ioctl(fd
, DRM_IOCTL_SYNCOBJ_SIGNAL
, &arg
);
1020 add_new_bo(int handle
, uint64_t size
, void *map
)
1022 struct bo
*bo
= &bos
[handle
];
1024 fail_if(handle
>= MAX_BO_COUNT
, "intel_aubdump: bo handle out of range\n");
1025 fail_if(size
== 0, "intel_aubdump: bo size is invalid\n");
1032 remove_bo(int handle
)
1034 struct bo
*bo
= get_bo(handle
);
1036 if (bo
->map
&& !IS_USERPTR(bo
->map
))
1037 munmap(bo
->map
, bo
->size
);
1042 __attribute__ ((visibility ("default"))) int
1048 return libc_close(fd
);
1052 launch_command(char *command
)
1055 char **args
= calloc(strlen(command
), sizeof(char *));
1056 char *iter
= command
;
1058 args
[i
++] = iter
= command
;
1060 while ((iter
= strstr(iter
, ",")) != NULL
) {
1066 if (pipe(fds
) == -1)
1072 fail_if(execvp(args
[0], args
) == -1,
1073 "intel_aubdump: failed to launch child command\n");
1078 return fdopen(fds
[1], "w");
1088 static bool initialized
= false;
1097 config
= fdopen(3, "r");
1098 while (fscanf(config
, "%m[^=]=%m[^\n]\n", &key
, &value
) != EOF
) {
1099 if (!strcmp(key
, "verbose")) {
1101 } else if (!strcmp(key
, "device")) {
1102 fail_if(sscanf(value
, "%i", &device
) != 1,
1103 "intel_aubdump: failed to parse device id '%s'",
1105 device_override
= true;
1106 } else if (!strcmp(key
, "file")) {
1107 filename
= strdup(value
);
1108 files
[0] = fopen(filename
, "w+");
1109 fail_if(files
[0] == NULL
,
1110 "intel_aubdump: failed to open file '%s'\n",
1112 } else if (!strcmp(key
, "command")) {
1113 files
[1] = launch_command(value
);
1114 fail_if(files
[1] == NULL
,
1115 "intel_aubdump: failed to launch command '%s'\n",
1118 fprintf(stderr
, "intel_aubdump: unknown option '%s'\n", key
);
1126 bos
= calloc(MAX_BO_COUNT
, sizeof(bos
[0]));
1127 fail_if(bos
== NULL
, "intel_aubdump: out of memory\n");
1130 #define LOCAL_IOCTL_I915_GEM_EXECBUFFER2_WR \
1131 DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
1133 __attribute__ ((visibility ("default"))) int
1134 ioctl(int fd
, unsigned long request
, ...)
1141 va_start(args
, request
);
1142 argp
= va_arg(args
, void *);
1145 if (_IOC_TYPE(request
) == DRM_IOCTL_BASE
&&
1146 drm_fd
!= fd
&& fstat(fd
, &buf
) == 0 &&
1147 (buf
.st_mode
& S_IFMT
) == S_IFCHR
&& major(buf
.st_rdev
) == DRM_MAJOR
) {
1150 printf("[intel_aubdump: intercept drm ioctl on fd %d]\n", fd
);
1157 case DRM_IOCTL_I915_GETPARAM
: {
1158 struct drm_i915_getparam
*getparam
= argp
;
1160 if (device_override
&& getparam
->param
== I915_PARAM_CHIPSET_ID
) {
1161 *getparam
->value
= device
;
1165 ret
= libc_ioctl(fd
, request
, argp
);
1167 /* If the application looks up chipset_id
1168 * (they typically do), we'll piggy-back on
1169 * their ioctl and store the id for later
1171 if (getparam
->param
== I915_PARAM_CHIPSET_ID
)
1172 device
= *getparam
->value
;
1177 case DRM_IOCTL_I915_GEM_EXECBUFFER
: {
1180 fprintf(stderr
, "intel_aubdump: "
1181 "application uses DRM_IOCTL_I915_GEM_EXECBUFFER, not handled\n");
1184 return libc_ioctl(fd
, request
, argp
);
1187 case DRM_IOCTL_I915_GEM_EXECBUFFER2
:
1188 case LOCAL_IOCTL_I915_GEM_EXECBUFFER2_WR
: {
1189 dump_execbuffer2(fd
, argp
);
1190 if (device_override
)
1193 return libc_ioctl(fd
, request
, argp
);
1196 case DRM_IOCTL_I915_GEM_CREATE
: {
1197 struct drm_i915_gem_create
*create
= argp
;
1199 ret
= libc_ioctl(fd
, request
, argp
);
1201 add_new_bo(create
->handle
, create
->size
, NULL
);
1206 case DRM_IOCTL_I915_GEM_USERPTR
: {
1207 struct drm_i915_gem_userptr
*userptr
= argp
;
1209 ret
= libc_ioctl(fd
, request
, argp
);
1211 add_new_bo(userptr
->handle
, userptr
->user_size
,
1212 (void *) (uintptr_t) (userptr
->user_ptr
| USERPTR_FLAG
));
1216 case DRM_IOCTL_GEM_CLOSE
: {
1217 struct drm_gem_close
*close
= argp
;
1219 remove_bo(close
->handle
);
1221 return libc_ioctl(fd
, request
, argp
);
1224 case DRM_IOCTL_GEM_OPEN
: {
1225 struct drm_gem_open
*open
= argp
;
1227 ret
= libc_ioctl(fd
, request
, argp
);
1229 add_new_bo(open
->handle
, open
->size
, NULL
);
1234 case DRM_IOCTL_PRIME_FD_TO_HANDLE
: {
1235 struct drm_prime_handle
*prime
= argp
;
1237 ret
= libc_ioctl(fd
, request
, argp
);
1241 size
= lseek(prime
->fd
, 0, SEEK_END
);
1242 fail_if(size
== -1, "intel_aubdump: failed to get prime bo size\n");
1243 add_new_bo(prime
->handle
, size
, NULL
);
1250 return libc_ioctl(fd
, request
, argp
);
1253 return libc_ioctl(fd
, request
, argp
);
1260 libc_close
= dlsym(RTLD_NEXT
, "close");
1261 libc_ioctl
= dlsym(RTLD_NEXT
, "ioctl");
1262 fail_if(libc_close
== NULL
|| libc_ioctl
== NULL
,
1263 "intel_aubdump: failed to get libc ioctl or close\n");
1267 close_init_helper(int fd
)
1270 return libc_close(fd
);
1274 ioctl_init_helper(int fd
, unsigned long request
, ...)
1279 va_start(args
, request
);
1280 argp
= va_arg(args
, void *);
1284 return libc_ioctl(fd
, request
, argp
);
1287 static void __attribute__ ((destructor
))
1291 for (int i
= 0; i
< ARRAY_SIZE(files
); i
++) {
1292 if (files
[i
] != NULL
)