2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 #include <sys/types.h>
33 #include <sys/sysmacros.h>
35 #include <sys/ioctl.h>
42 #include "intel_aub.h"
44 #include "dev/gen_device_info.h"
45 #include "util/macros.h"
48 #define ALIGN(x, y) (((x) + (y)-1) & ~((y)-1))
51 #define MI_LOAD_REGISTER_IMM_n(n) ((0x22 << 23) | (2 * (n) - 1))
52 #define MI_LRI_FORCE_POSTED (1<<12)
54 #define MI_BATCH_NON_SECURE_I965 (1 << 8)
56 #define MI_BATCH_BUFFER_END (0xA << 23)
58 #define min(a, b) ({ \
59 __typeof(a) _a = (a); \
60 __typeof(b) _b = (b); \
64 #define max(a, b) ({ \
65 __typeof(a) _a = (a); \
66 __typeof(b) _b = (b); \
70 #define HWS_PGA_RCSUNIT 0x02080
71 #define HWS_PGA_VCSUNIT0 0x12080
72 #define HWS_PGA_BCSUNIT 0x22080
74 #define GFX_MODE_RCSUNIT 0x0229c
75 #define GFX_MODE_VCSUNIT0 0x1229c
76 #define GFX_MODE_BCSUNIT 0x2229c
78 #define EXECLIST_SUBMITPORT_RCSUNIT 0x02230
79 #define EXECLIST_SUBMITPORT_VCSUNIT0 0x12230
80 #define EXECLIST_SUBMITPORT_BCSUNIT 0x22230
82 #define EXECLIST_STATUS_RCSUNIT 0x02234
83 #define EXECLIST_STATUS_VCSUNIT0 0x12234
84 #define EXECLIST_STATUS_BCSUNIT 0x22234
86 #define EXECLIST_SQ_CONTENTS0_RCSUNIT 0x02510
87 #define EXECLIST_SQ_CONTENTS0_VCSUNIT0 0x12510
88 #define EXECLIST_SQ_CONTENTS0_BCSUNIT 0x22510
90 #define EXECLIST_CONTROL_RCSUNIT 0x02550
91 #define EXECLIST_CONTROL_VCSUNIT0 0x12550
92 #define EXECLIST_CONTROL_BCSUNIT 0x22550
94 #define MEMORY_MAP_SIZE (64 /* MiB */ * 1024 * 1024)
97 #define GEN8_PTE_SIZE 8
99 #define NUM_PT_ENTRIES (ALIGN(MEMORY_MAP_SIZE, 4096) / 4096)
100 #define PT_SIZE ALIGN(NUM_PT_ENTRIES * GEN8_PTE_SIZE, 4096)
102 #define RING_SIZE (1 * 4096)
103 #define PPHWSP_SIZE (1 * 4096)
104 #define GEN11_LR_CONTEXT_RENDER_SIZE (14 * 4096)
105 #define GEN10_LR_CONTEXT_RENDER_SIZE (19 * 4096)
106 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * 4096)
107 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * 4096)
108 #define GEN8_LR_CONTEXT_OTHER_SIZE (2 * 4096)
111 #define STATIC_GGTT_MAP_START 0
113 #define RENDER_RING_ADDR STATIC_GGTT_MAP_START
114 #define RENDER_CONTEXT_ADDR (RENDER_RING_ADDR + RING_SIZE)
116 #define BLITTER_RING_ADDR (RENDER_CONTEXT_ADDR + PPHWSP_SIZE + GEN10_LR_CONTEXT_RENDER_SIZE)
117 #define BLITTER_CONTEXT_ADDR (BLITTER_RING_ADDR + RING_SIZE)
119 #define VIDEO_RING_ADDR (BLITTER_CONTEXT_ADDR + PPHWSP_SIZE + GEN8_LR_CONTEXT_OTHER_SIZE)
120 #define VIDEO_CONTEXT_ADDR (VIDEO_RING_ADDR + RING_SIZE)
122 #define STATIC_GGTT_MAP_END (VIDEO_CONTEXT_ADDR + PPHWSP_SIZE + GEN8_LR_CONTEXT_OTHER_SIZE)
123 #define STATIC_GGTT_MAP_SIZE (STATIC_GGTT_MAP_END - STATIC_GGTT_MAP_START)
125 #define PML4_PHYS_ADDR ((uint64_t)(STATIC_GGTT_MAP_END))
127 #define CONTEXT_FLAGS (0x339) /* Normal Priority | L3-LLC Coherency |
129 * Legacy Context with 64 bit VA support |
133 #define RENDER_CONTEXT_DESCRIPTOR ((uint64_t)1 << 62 | RENDER_CONTEXT_ADDR | CONTEXT_FLAGS)
134 #define BLITTER_CONTEXT_DESCRIPTOR ((uint64_t)2 << 62 | BLITTER_CONTEXT_ADDR | CONTEXT_FLAGS)
135 #define VIDEO_CONTEXT_DESCRIPTOR ((uint64_t)3 << 62 | VIDEO_CONTEXT_ADDR | CONTEXT_FLAGS)
137 static const uint32_t render_context_init
[GEN9_LR_CONTEXT_RENDER_SIZE
/ /* Choose the largest */
138 sizeof(uint32_t)] = {
140 MI_LOAD_REGISTER_IMM_n(14) | MI_LRI_FORCE_POSTED
,
141 0x2244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
142 0x2034 /* RING_HEAD */, 0,
143 0x2030 /* RING_TAIL */, 0,
144 0x2038 /* RING_BUFFER_START */, RENDER_RING_ADDR
,
145 0x203C /* RING_BUFFER_CONTROL */, (RING_SIZE
- 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
146 0x2168 /* BB_HEAD_U */, 0,
147 0x2140 /* BB_HEAD_L */, 0,
148 0x2110 /* BB_STATE */, 0,
149 0x211C /* SECOND_BB_HEAD_U */, 0,
150 0x2114 /* SECOND_BB_HEAD_L */, 0,
151 0x2118 /* SECOND_BB_STATE */, 0,
152 0x21C0 /* BB_PER_CTX_PTR */, 0,
153 0x21C4 /* RCS_INDIRECT_CTX */, 0,
154 0x21C8 /* RCS_INDIRECT_CTX_OFFSET */, 0,
159 MI_LOAD_REGISTER_IMM_n(9) | MI_LRI_FORCE_POSTED
,
160 0x23A8 /* CTX_TIMESTAMP */, 0,
161 0x228C /* PDP3_UDW */, 0,
162 0x2288 /* PDP3_LDW */, 0,
163 0x2284 /* PDP2_UDW */, 0,
164 0x2280 /* PDP2_LDW */, 0,
165 0x227C /* PDP1_UDW */, 0,
166 0x2278 /* PDP1_LDW */, 0,
167 0x2274 /* PDP0_UDW */, PML4_PHYS_ADDR
>> 32,
168 0x2270 /* PDP0_LDW */, PML4_PHYS_ADDR
,
170 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
173 MI_LOAD_REGISTER_IMM_n(1),
174 0x20C8 /* R_PWR_CLK_STATE */, 0x7FFFFFFF,
178 static const uint32_t blitter_context_init
[GEN8_LR_CONTEXT_OTHER_SIZE
/
179 sizeof(uint32_t)] = {
181 MI_LOAD_REGISTER_IMM_n(11) | MI_LRI_FORCE_POSTED
,
182 0x22244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
183 0x22034 /* RING_HEAD */, 0,
184 0x22030 /* RING_TAIL */, 0,
185 0x22038 /* RING_BUFFER_START */, BLITTER_RING_ADDR
,
186 0x2203C /* RING_BUFFER_CONTROL */, (RING_SIZE
- 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
187 0x22168 /* BB_HEAD_U */, 0,
188 0x22140 /* BB_HEAD_L */, 0,
189 0x22110 /* BB_STATE */, 0,
190 0x2211C /* SECOND_BB_HEAD_U */, 0,
191 0x22114 /* SECOND_BB_HEAD_L */, 0,
192 0x22118 /* SECOND_BB_STATE */, 0,
194 0, 0, 0, 0, 0, 0, 0, 0,
197 MI_LOAD_REGISTER_IMM_n(9) | MI_LRI_FORCE_POSTED
,
198 0x223A8 /* CTX_TIMESTAMP */, 0,
199 0x2228C /* PDP3_UDW */, 0,
200 0x22288 /* PDP3_LDW */, 0,
201 0x22284 /* PDP2_UDW */, 0,
202 0x22280 /* PDP2_LDW */, 0,
203 0x2227C /* PDP1_UDW */, 0,
204 0x22278 /* PDP1_LDW */, 0,
205 0x22274 /* PDP0_UDW */, PML4_PHYS_ADDR
>> 32,
206 0x22270 /* PDP0_LDW */, PML4_PHYS_ADDR
,
208 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
213 static const uint32_t video_context_init
[GEN8_LR_CONTEXT_OTHER_SIZE
/
214 sizeof(uint32_t)] = {
216 MI_LOAD_REGISTER_IMM_n(11) | MI_LRI_FORCE_POSTED
,
217 0x1C244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
218 0x1C034 /* RING_HEAD */, 0,
219 0x1C030 /* RING_TAIL */, 0,
220 0x1C038 /* RING_BUFFER_START */, VIDEO_RING_ADDR
,
221 0x1C03C /* RING_BUFFER_CONTROL */, (RING_SIZE
- 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
222 0x1C168 /* BB_HEAD_U */, 0,
223 0x1C140 /* BB_HEAD_L */, 0,
224 0x1C110 /* BB_STATE */, 0,
225 0x1C11C /* SECOND_BB_HEAD_U */, 0,
226 0x1C114 /* SECOND_BB_HEAD_L */, 0,
227 0x1C118 /* SECOND_BB_STATE */, 0,
229 0, 0, 0, 0, 0, 0, 0, 0,
232 MI_LOAD_REGISTER_IMM_n(9) | MI_LRI_FORCE_POSTED
,
233 0x1C3A8 /* CTX_TIMESTAMP */, 0,
234 0x1C28C /* PDP3_UDW */, 0,
235 0x1C288 /* PDP3_LDW */, 0,
236 0x1C284 /* PDP2_UDW */, 0,
237 0x1C280 /* PDP2_LDW */, 0,
238 0x1C27C /* PDP1_UDW */, 0,
239 0x1C278 /* PDP1_LDW */, 0,
240 0x1C274 /* PDP0_UDW */, PML4_PHYS_ADDR
>> 32,
241 0x1C270 /* PDP0_LDW */, PML4_PHYS_ADDR
,
243 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
248 static int close_init_helper(int fd
);
249 static int ioctl_init_helper(int fd
, unsigned long request
, ...);
251 static int (*libc_close
)(int fd
) = close_init_helper
;
252 static int (*libc_ioctl
)(int fd
, unsigned long request
, ...) = ioctl_init_helper
;
254 static int drm_fd
= -1;
255 static char *filename
= NULL
;
256 static FILE *files
[2] = { NULL
, NULL
};
257 static struct gen_device_info devinfo
= {0};
258 static int verbose
= 0;
259 static bool device_override
;
260 static uint32_t device
;
261 static int addr_bits
= 0;
263 #define MAX_BO_COUNT 64 * 1024
271 static struct bo
*bos
;
273 #define DRM_MAJOR 226
275 /* We set bit 0 in the map pointer for userptr BOs so we know not to
276 * munmap them on DRM_IOCTL_GEM_CLOSE.
278 #define USERPTR_FLAG 1
279 #define IS_USERPTR(p) ((uintptr_t) (p) & USERPTR_FLAG)
280 #define GET_PTR(p) ( (void *) ((uintptr_t) p & ~(uintptr_t) 1) )
282 static inline bool use_execlists(void)
284 return devinfo
.gen
>= 8;
287 static void __attribute__ ((format(__printf__
, 2, 3)))
288 fail_if(int cond
, const char *format
, ...)
295 va_start(args
, format
);
296 vfprintf(stderr
, format
, args
);
303 get_bo(uint32_t handle
)
307 fail_if(handle
>= MAX_BO_COUNT
, "bo handle too large\n");
313 static inline uint32_t
314 align_u32(uint32_t v
, uint32_t a
)
316 return (v
+ a
- 1) & ~(a
- 1);
320 dword_out(uint32_t data
)
322 for (int i
= 0; i
< ARRAY_SIZE (files
); i
++) {
323 if (files
[i
] == NULL
)
326 fail_if(fwrite(&data
, 1, 4, files
[i
]) == 0,
327 "Writing to output failed\n");
332 data_out(const void *data
, size_t size
)
337 for (int i
= 0; i
< ARRAY_SIZE (files
); i
++) {
338 if (files
[i
] == NULL
)
341 fail_if(fwrite(data
, 1, size
, files
[i
]) == 0,
342 "Writing to output failed\n");
349 return NUM_PT_ENTRIES
* (addr_bits
> 32 ? GEN8_PTE_SIZE
: PTE_SIZE
);
353 mem_trace_memory_write_header_out(uint64_t addr
, uint32_t len
,
356 uint32_t dwords
= ALIGN(len
, sizeof(uint32_t)) / sizeof(uint32_t);
358 dword_out(CMD_MEM_TRACE_MEMORY_WRITE
| (5 + dwords
- 1));
359 dword_out(addr
& 0xFFFFFFFF); /* addr lo */
360 dword_out(addr
>> 32); /* addr hi */
361 dword_out(addr_space
); /* gtt */
366 register_write_out(uint32_t addr
, uint32_t value
)
370 dword_out(CMD_MEM_TRACE_REGISTER_WRITE
| (5 + dwords
- 1));
372 dword_out(AUB_MEM_TRACE_REGISTER_SIZE_DWORD
|
373 AUB_MEM_TRACE_REGISTER_SPACE_MMIO
);
374 dword_out(0xFFFFFFFF); /* mask lo */
375 dword_out(0x00000000); /* mask hi */
379 static struct ppgtt_table
{
381 struct ppgtt_table
*subtables
[512];
382 } pml4
= {PML4_PHYS_ADDR
};
385 populate_ppgtt_table(struct ppgtt_table
*table
, int start
, int end
,
388 static uint64_t phys_addrs_allocator
= (PML4_PHYS_ADDR
>> 12) + 1;
389 uint64_t entries
[512] = {0};
390 int dirty_start
= 512, dirty_end
= 0;
392 for (int i
= start
; i
<= end
; i
++) {
393 if (!table
->subtables
[i
]) {
394 dirty_start
= min(dirty_start
, i
);
395 dirty_end
= max(dirty_end
, i
);
397 table
->subtables
[i
] =
398 (void *)(phys_addrs_allocator
++ << 12);
400 table
->subtables
[i
] =
401 calloc(1, sizeof(struct ppgtt_table
));
402 table
->subtables
[i
]->phys_addr
=
403 phys_addrs_allocator
++ << 12;
406 entries
[i
] = 3 /* read/write | present */ |
407 (level
== 1 ? (uint64_t)table
->subtables
[i
] :
408 table
->subtables
[i
]->phys_addr
);
411 if (dirty_start
<= dirty_end
) {
412 uint64_t write_addr
= table
->phys_addr
+ dirty_start
*
414 uint64_t write_size
= (dirty_end
- dirty_start
+ 1) *
416 mem_trace_memory_write_header_out(write_addr
, write_size
,
417 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_PHYSICAL
);
418 data_out(entries
+ dirty_start
, write_size
);
423 map_ppgtt(uint64_t start
, uint64_t size
)
425 uint64_t l4_start
= start
& 0xff8000000000;
426 uint64_t l3_start
= start
& 0xffffc0000000;
427 uint64_t l2_start
= start
& 0xffffffe00000;
428 uint64_t l1_start
= start
& 0xfffffffff000;
429 uint64_t l4_end
= ((start
+ size
- 1) | 0x007fffffffff) & 0xffffffffffff;
430 uint64_t l3_end
= ((start
+ size
- 1) | 0x00003fffffff) & 0xffffffffffff;
431 uint64_t l2_end
= ((start
+ size
- 1) | 0x0000001fffff) & 0xffffffffffff;
432 uint64_t l1_end
= ((start
+ size
- 1) | 0x000000000fff) & 0xffffffffffff;
434 #define L4_index(addr) (((addr) >> 39) & 0x1ff)
435 #define L3_index(addr) (((addr) >> 30) & 0x1ff)
436 #define L2_index(addr) (((addr) >> 21) & 0x1ff)
437 #define L1_index(addr) (((addr) >> 12) & 0x1ff)
439 #define L3_table(addr) (pml4.subtables[L4_index(addr)])
440 #define L2_table(addr) (L3_table(addr)->subtables[L3_index(addr)])
441 #define L1_table(addr) (L2_table(addr)->subtables[L2_index(addr)])
443 populate_ppgtt_table(&pml4
, L4_index(l4_start
), L4_index(l4_end
), 4);
445 for (uint64_t a
= l4_start
; a
< l4_end
; a
+= (1ULL << 39)) {
446 uint64_t _start
= max(a
, l3_start
);
447 uint64_t _end
= min(a
+ (1ULL << 39), l3_end
);
449 populate_ppgtt_table(L3_table(a
), L3_index(_start
),
453 for (uint64_t a
= l3_start
; a
< l3_end
; a
+= (1ULL << 30)) {
454 uint64_t _start
= max(a
, l2_start
);
455 uint64_t _end
= min(a
+ (1ULL << 30), l2_end
);
457 populate_ppgtt_table(L2_table(a
), L2_index(_start
),
461 for (uint64_t a
= l2_start
; a
< l2_end
; a
+= (1ULL << 21)) {
462 uint64_t _start
= max(a
, l1_start
);
463 uint64_t _end
= min(a
+ (1ULL << 21), l1_end
);
465 populate_ppgtt_table(L1_table(a
), L1_index(_start
),
471 ppgtt_lookup(uint64_t ppgtt_addr
)
473 return (uint64_t)L1_table(ppgtt_addr
)->subtables
[L1_index(ppgtt_addr
)];
477 write_execlists_header(void)
479 char app_name
[8 * 4];
480 int app_name_len
, dwords
;
483 snprintf(app_name
, sizeof(app_name
), "PCI-ID=0x%X %s", device
,
484 program_invocation_short_name
);
485 app_name_len
= ALIGN(app_name_len
, sizeof(uint32_t));
487 dwords
= 5 + app_name_len
/ sizeof(uint32_t);
488 dword_out(CMD_MEM_TRACE_VERSION
| (dwords
- 1));
489 dword_out(AUB_MEM_TRACE_VERSION_FILE_VERSION
);
490 dword_out(devinfo
.simulator_id
<< AUB_MEM_TRACE_VERSION_DEVICE_SHIFT
);
491 dword_out(0); /* version */
492 dword_out(0); /* version */
493 data_out(app_name
, app_name_len
);
496 uint32_t ggtt_ptes
= STATIC_GGTT_MAP_SIZE
>> 12;
498 mem_trace_memory_write_header_out(STATIC_GGTT_MAP_START
>> 12,
499 ggtt_ptes
* GEN8_PTE_SIZE
,
500 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT_ENTRY
);
501 for (uint32_t i
= 0; i
< ggtt_ptes
; i
++) {
502 dword_out(1 + 0x1000 * i
+ STATIC_GGTT_MAP_START
);
507 mem_trace_memory_write_header_out(RENDER_RING_ADDR
, RING_SIZE
,
508 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
509 for (uint32_t i
= 0; i
< RING_SIZE
; i
+= sizeof(uint32_t))
513 mem_trace_memory_write_header_out(RENDER_CONTEXT_ADDR
,
515 sizeof(render_context_init
),
516 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
517 for (uint32_t i
= 0; i
< PPHWSP_SIZE
; i
+= sizeof(uint32_t))
521 data_out(render_context_init
, sizeof(render_context_init
));
524 mem_trace_memory_write_header_out(BLITTER_RING_ADDR
, RING_SIZE
,
525 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
526 for (uint32_t i
= 0; i
< RING_SIZE
; i
+= sizeof(uint32_t))
530 mem_trace_memory_write_header_out(BLITTER_CONTEXT_ADDR
,
532 sizeof(blitter_context_init
),
533 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
534 for (uint32_t i
= 0; i
< PPHWSP_SIZE
; i
+= sizeof(uint32_t))
537 /* BLITTER_CONTEXT */
538 data_out(blitter_context_init
, sizeof(blitter_context_init
));
541 mem_trace_memory_write_header_out(VIDEO_RING_ADDR
, RING_SIZE
,
542 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
543 for (uint32_t i
= 0; i
< RING_SIZE
; i
+= sizeof(uint32_t))
547 mem_trace_memory_write_header_out(VIDEO_CONTEXT_ADDR
,
549 sizeof(video_context_init
),
550 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
551 for (uint32_t i
= 0; i
< PPHWSP_SIZE
; i
+= sizeof(uint32_t))
555 data_out(video_context_init
, sizeof(video_context_init
));
557 register_write_out(HWS_PGA_RCSUNIT
, RENDER_CONTEXT_ADDR
);
558 register_write_out(HWS_PGA_VCSUNIT0
, VIDEO_CONTEXT_ADDR
);
559 register_write_out(HWS_PGA_BCSUNIT
, BLITTER_CONTEXT_ADDR
);
561 register_write_out(GFX_MODE_RCSUNIT
, 0x80008000 /* execlist enable */);
562 register_write_out(GFX_MODE_VCSUNIT0
, 0x80008000 /* execlist enable */);
563 register_write_out(GFX_MODE_BCSUNIT
, 0x80008000 /* execlist enable */);
566 static void write_legacy_header(void)
568 char app_name
[8 * 4];
570 int comment_len
, comment_dwords
, dwords
;
571 uint32_t entry
= 0x200003;
573 comment_len
= snprintf(comment
, sizeof(comment
), "PCI-ID=0x%x", device
);
574 comment_dwords
= ((comment_len
+ 3) / 4);
576 /* Start with a (required) version packet. */
577 dwords
= 13 + comment_dwords
;
578 dword_out(CMD_AUB_HEADER
| (dwords
- 2));
579 dword_out((4 << AUB_HEADER_MAJOR_SHIFT
) |
580 (0 << AUB_HEADER_MINOR_SHIFT
));
582 /* Next comes a 32-byte application name. */
583 strncpy(app_name
, program_invocation_short_name
, sizeof(app_name
));
584 app_name
[sizeof(app_name
) - 1] = 0;
585 data_out(app_name
, sizeof(app_name
));
587 dword_out(0); /* timestamp */
588 dword_out(0); /* timestamp */
589 dword_out(comment_len
);
590 data_out(comment
, comment_dwords
* 4);
592 /* Set up the GTT. The max we can handle is 64M */
593 dword_out(CMD_AUB_TRACE_HEADER_BLOCK
| ((addr_bits
> 32 ? 6 : 5) - 2));
594 dword_out(AUB_TRACE_MEMTYPE_GTT_ENTRY
|
595 AUB_TRACE_TYPE_NOTYPE
| AUB_TRACE_OP_DATA_WRITE
);
596 dword_out(0); /* subtype */
597 dword_out(0); /* offset */
598 dword_out(gtt_size()); /* size */
601 for (uint32_t i
= 0; i
< NUM_PT_ENTRIES
; i
++) {
602 dword_out(entry
+ 0x1000 * i
);
609 * Break up large objects into multiple writes. Otherwise a 128kb VBO
610 * would overflow the 16 bits of size field in the packet header and
611 * everything goes badly after that.
614 aub_write_trace_block(uint32_t type
, void *virtual, uint32_t size
, uint64_t gtt_offset
)
617 uint32_t subtype
= 0;
618 static const char null_block
[8 * 4096];
620 for (uint32_t offset
= 0; offset
< size
; offset
+= block_size
) {
621 block_size
= min(8 * 4096, size
- offset
);
623 if (use_execlists()) {
624 block_size
= min(4096, block_size
);
625 mem_trace_memory_write_header_out(ppgtt_lookup(gtt_offset
+ offset
),
627 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_PHYSICAL
);
629 dword_out(CMD_AUB_TRACE_HEADER_BLOCK
|
630 ((addr_bits
> 32 ? 6 : 5) - 2));
631 dword_out(AUB_TRACE_MEMTYPE_GTT
|
632 type
| AUB_TRACE_OP_DATA_WRITE
);
634 dword_out(gtt_offset
+ offset
);
635 dword_out(align_u32(block_size
, 4));
637 dword_out((gtt_offset
+ offset
) >> 32);
641 data_out(((char *) GET_PTR(virtual)) + offset
, block_size
);
643 data_out(null_block
, block_size
);
645 /* Pad to a multiple of 4 bytes. */
646 data_out(null_block
, -block_size
& 3);
651 write_reloc(void *p
, uint64_t v
)
653 if (addr_bits
> 32) {
654 /* From the Broadwell PRM Vol. 2a,
655 * MI_LOAD_REGISTER_MEM::MemoryAddress:
657 * "This field specifies the address of the memory
658 * location where the register value specified in the
659 * DWord above will read from. The address specifies
660 * the DWord location of the data. Range =
661 * GraphicsVirtualAddress[63:2] for a DWord register
662 * GraphicsAddress [63:48] are ignored by the HW and
663 * assumed to be in correct canonical form [63:48] ==
666 * In practice, this will always mean the top bits are zero
667 * because of the GTT size limitation of the aubdump tool.
669 const int shift
= 63 - 47;
670 *(uint64_t *)p
= (((int64_t)v
) << shift
) >> shift
;
677 aub_dump_execlist(uint64_t batch_offset
, int ring_flag
)
684 uint32_t control_reg
;
687 case I915_EXEC_DEFAULT
:
688 case I915_EXEC_RENDER
:
689 ring_addr
= RENDER_RING_ADDR
;
690 descriptor
= RENDER_CONTEXT_DESCRIPTOR
;
691 elsp_reg
= EXECLIST_SUBMITPORT_RCSUNIT
;
692 elsq_reg
= EXECLIST_SQ_CONTENTS0_RCSUNIT
;
693 status_reg
= EXECLIST_STATUS_RCSUNIT
;
694 control_reg
= EXECLIST_CONTROL_RCSUNIT
;
697 ring_addr
= VIDEO_RING_ADDR
;
698 descriptor
= VIDEO_CONTEXT_DESCRIPTOR
;
699 elsp_reg
= EXECLIST_SUBMITPORT_VCSUNIT0
;
700 elsq_reg
= EXECLIST_SQ_CONTENTS0_VCSUNIT0
;
701 status_reg
= EXECLIST_STATUS_VCSUNIT0
;
702 control_reg
= EXECLIST_CONTROL_VCSUNIT0
;
705 ring_addr
= BLITTER_RING_ADDR
;
706 descriptor
= BLITTER_CONTEXT_DESCRIPTOR
;
707 elsp_reg
= EXECLIST_SUBMITPORT_BCSUNIT
;
708 elsq_reg
= EXECLIST_SQ_CONTENTS0_BCSUNIT
;
709 status_reg
= EXECLIST_STATUS_BCSUNIT
;
710 control_reg
= EXECLIST_CONTROL_BCSUNIT
;
714 mem_trace_memory_write_header_out(ring_addr
, 16,
715 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
716 dword_out(AUB_MI_BATCH_BUFFER_START
| MI_BATCH_NON_SECURE_I965
| (3 - 2));
717 dword_out(batch_offset
& 0xFFFFFFFF);
718 dword_out(batch_offset
>> 32);
719 dword_out(0 /* MI_NOOP */);
721 mem_trace_memory_write_header_out(ring_addr
+ 8192 + 20, 4,
722 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
723 dword_out(0); /* RING_BUFFER_HEAD */
724 mem_trace_memory_write_header_out(ring_addr
+ 8192 + 28, 4,
725 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
726 dword_out(16); /* RING_BUFFER_TAIL */
728 if (devinfo
.gen
>= 11) {
729 register_write_out(elsq_reg
, descriptor
& 0xFFFFFFFF);
730 register_write_out(elsq_reg
+ sizeof(uint32_t), descriptor
>> 32);
731 register_write_out(control_reg
, 1);
733 register_write_out(elsp_reg
, 0);
734 register_write_out(elsp_reg
, 0);
735 register_write_out(elsp_reg
, descriptor
>> 32);
736 register_write_out(elsp_reg
, descriptor
& 0xFFFFFFFF);
739 dword_out(CMD_MEM_TRACE_REGISTER_POLL
| (5 + 1 - 1));
740 dword_out(status_reg
);
741 dword_out(AUB_MEM_TRACE_REGISTER_SIZE_DWORD
|
742 AUB_MEM_TRACE_REGISTER_SPACE_MMIO
);
743 if (devinfo
.gen
>= 11) {
744 dword_out(0x00000001); /* mask lo */
745 dword_out(0x00000000); /* mask hi */
746 dword_out(0x00000001);
748 dword_out(0x00000010); /* mask lo */
749 dword_out(0x00000000); /* mask hi */
750 dword_out(0x00000000);
755 aub_dump_ringbuffer(uint64_t batch_offset
, uint64_t offset
, int ring_flag
)
757 uint32_t ringbuffer
[4096];
758 unsigned aub_mi_bbs_len
;
759 int ring
= AUB_TRACE_TYPE_RING_PRB0
; /* The default ring */
762 if (ring_flag
== I915_EXEC_BSD
)
763 ring
= AUB_TRACE_TYPE_RING_PRB1
;
764 else if (ring_flag
== I915_EXEC_BLT
)
765 ring
= AUB_TRACE_TYPE_RING_PRB2
;
767 /* Make a ring buffer to execute our batchbuffer. */
768 memset(ringbuffer
, 0, sizeof(ringbuffer
));
770 aub_mi_bbs_len
= addr_bits
> 32 ? 3 : 2;
771 ringbuffer
[ring_count
] = AUB_MI_BATCH_BUFFER_START
| (aub_mi_bbs_len
- 2);
772 write_reloc(&ringbuffer
[ring_count
+ 1], batch_offset
);
773 ring_count
+= aub_mi_bbs_len
;
775 /* Write out the ring. This appears to trigger execution of
776 * the ring in the simulator.
778 dword_out(CMD_AUB_TRACE_HEADER_BLOCK
|
779 ((addr_bits
> 32 ? 6 : 5) - 2));
780 dword_out(AUB_TRACE_MEMTYPE_GTT
| ring
| AUB_TRACE_OP_COMMAND_WRITE
);
781 dword_out(0); /* general/surface subtype */
783 dword_out(ring_count
* 4);
785 dword_out(offset
>> 32);
787 data_out(ringbuffer
, ring_count
* 4);
791 relocate_bo(struct bo
*bo
, const struct drm_i915_gem_execbuffer2
*execbuffer2
,
792 const struct drm_i915_gem_exec_object2
*obj
)
794 const struct drm_i915_gem_exec_object2
*exec_objects
=
795 (struct drm_i915_gem_exec_object2
*) (uintptr_t) execbuffer2
->buffers_ptr
;
796 const struct drm_i915_gem_relocation_entry
*relocs
=
797 (const struct drm_i915_gem_relocation_entry
*) (uintptr_t) obj
->relocs_ptr
;
801 relocated
= malloc(bo
->size
);
802 fail_if(relocated
== NULL
, "intel_aubdump: out of memory\n");
803 memcpy(relocated
, GET_PTR(bo
->map
), bo
->size
);
804 for (size_t i
= 0; i
< obj
->relocation_count
; i
++) {
805 fail_if(relocs
[i
].offset
>= bo
->size
, "intel_aubdump: reloc outside bo\n");
807 if (execbuffer2
->flags
& I915_EXEC_HANDLE_LUT
)
808 handle
= exec_objects
[relocs
[i
].target_handle
].handle
;
810 handle
= relocs
[i
].target_handle
;
812 write_reloc(((char *)relocated
) + relocs
[i
].offset
,
813 get_bo(handle
)->offset
+ relocs
[i
].delta
);
820 gem_ioctl(int fd
, unsigned long request
, void *argp
)
825 ret
= libc_ioctl(fd
, request
, argp
);
826 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
832 gem_mmap(int fd
, uint32_t handle
, uint64_t offset
, uint64_t size
)
834 struct drm_i915_gem_mmap mmap
= {
840 if (gem_ioctl(fd
, DRM_IOCTL_I915_GEM_MMAP
, &mmap
) == -1)
843 return (void *)(uintptr_t) mmap
.addr_ptr
;
847 gem_get_param(int fd
, uint32_t param
)
850 drm_i915_getparam_t gp
= {
855 if (gem_ioctl(fd
, DRM_IOCTL_I915_GETPARAM
, &gp
) == -1)
862 dump_execbuffer2(int fd
, struct drm_i915_gem_execbuffer2
*execbuffer2
)
864 struct drm_i915_gem_exec_object2
*exec_objects
=
865 (struct drm_i915_gem_exec_object2
*) (uintptr_t) execbuffer2
->buffers_ptr
;
866 uint32_t ring_flag
= execbuffer2
->flags
& I915_EXEC_RING_MASK
;
868 struct drm_i915_gem_exec_object2
*obj
;
869 struct bo
*bo
, *batch_bo
;
873 /* We can't do this at open time as we're not yet authenticated. */
875 device
= gem_get_param(fd
, I915_PARAM_CHIPSET_ID
);
876 fail_if(device
== 0 || devinfo
.gen
== 0, "failed to identify chipset\n");
878 if (devinfo
.gen
== 0) {
879 fail_if(!gen_get_device_info(device
, &devinfo
),
880 "failed to identify chipset=0x%x\n", device
);
882 addr_bits
= devinfo
.gen
>= 8 ? 48 : 32;
885 write_execlists_header();
887 write_legacy_header();
890 printf("[intel_aubdump: running, "
891 "output file %s, chipset id 0x%04x, gen %d]\n",
892 filename
, device
, devinfo
.gen
);
901 printf("Dumping execbuffer2:\n");
903 for (uint32_t i
= 0; i
< execbuffer2
->buffer_count
; i
++) {
904 obj
= &exec_objects
[i
];
905 bo
= get_bo(obj
->handle
);
907 /* If bo->size == 0, this means they passed us an invalid
908 * buffer. The kernel will reject it and so should we.
912 printf("BO #%d is invalid!\n", obj
->handle
);
916 if (obj
->flags
& EXEC_OBJECT_PINNED
) {
917 bo
->offset
= obj
->offset
;
919 printf("BO #%d (%dB) pinned @ 0x%lx\n",
920 obj
->handle
, bo
->size
, bo
->offset
);
922 if (obj
->alignment
!= 0)
923 offset
= align_u32(offset
, obj
->alignment
);
926 printf("BO #%d (%dB) @ 0x%lx\n", obj
->handle
,
927 bo
->size
, bo
->offset
);
928 offset
= align_u32(offset
+ bo
->size
+ 4095, 4096);
931 if (bo
->map
== NULL
&& bo
->size
> 0)
932 bo
->map
= gem_mmap(fd
, obj
->handle
, 0, bo
->size
);
933 fail_if(bo
->map
== MAP_FAILED
, "intel_aubdump: bo mmap failed\n");
936 map_ppgtt(bo
->offset
, bo
->size
);
939 batch_index
= (execbuffer2
->flags
& I915_EXEC_BATCH_FIRST
) ? 0 :
940 execbuffer2
->buffer_count
- 1;
941 batch_bo
= get_bo(exec_objects
[batch_index
].handle
);
942 for (uint32_t i
= 0; i
< execbuffer2
->buffer_count
; i
++) {
943 obj
= &exec_objects
[i
];
944 bo
= get_bo(obj
->handle
);
946 if (obj
->relocation_count
> 0)
947 data
= relocate_bo(bo
, execbuffer2
, obj
);
951 if (bo
== batch_bo
) {
952 aub_write_trace_block(AUB_TRACE_TYPE_BATCH
,
953 data
, bo
->size
, bo
->offset
);
955 aub_write_trace_block(AUB_TRACE_TYPE_NOTYPE
,
956 data
, bo
->size
, bo
->offset
);
962 if (use_execlists()) {
963 aub_dump_execlist(batch_bo
->offset
+
964 execbuffer2
->batch_start_offset
, ring_flag
);
966 /* Dump ring buffer */
967 aub_dump_ringbuffer(batch_bo
->offset
+
968 execbuffer2
->batch_start_offset
, offset
,
972 for (int i
= 0; i
< ARRAY_SIZE(files
); i
++) {
973 if (files
[i
] != NULL
)
977 if (device_override
&&
978 (execbuffer2
->flags
& I915_EXEC_FENCE_ARRAY
) != 0) {
979 struct drm_i915_gem_exec_fence
*fences
=
980 (void*)(uintptr_t)execbuffer2
->cliprects_ptr
;
981 for (uint32_t i
= 0; i
< execbuffer2
->num_cliprects
; i
++) {
982 if ((fences
[i
].flags
& I915_EXEC_FENCE_SIGNAL
) != 0) {
983 struct drm_syncobj_array arg
= {
984 .handles
= (uintptr_t)&fences
[i
].handle
,
988 libc_ioctl(fd
, DRM_IOCTL_SYNCOBJ_SIGNAL
, &arg
);
995 add_new_bo(int handle
, uint64_t size
, void *map
)
997 struct bo
*bo
= &bos
[handle
];
999 fail_if(handle
>= MAX_BO_COUNT
, "intel_aubdump: bo handle out of range\n");
1000 fail_if(size
== 0, "intel_aubdump: bo size is invalid\n");
1007 remove_bo(int handle
)
1009 struct bo
*bo
= get_bo(handle
);
1011 if (bo
->map
&& !IS_USERPTR(bo
->map
))
1012 munmap(bo
->map
, bo
->size
);
1017 __attribute__ ((visibility ("default"))) int
1023 return libc_close(fd
);
1027 launch_command(char *command
)
1030 char **args
= calloc(strlen(command
), sizeof(char *));
1031 char *iter
= command
;
1033 args
[i
++] = iter
= command
;
1035 while ((iter
= strstr(iter
, ",")) != NULL
) {
1041 if (pipe(fds
) == -1)
1047 fail_if(execvp(args
[0], args
) == -1,
1048 "intel_aubdump: failed to launch child command\n");
1053 return fdopen(fds
[1], "w");
1063 static bool initialized
= false;
1072 config
= fdopen(3, "r");
1073 while (fscanf(config
, "%m[^=]=%m[^\n]\n", &key
, &value
) != EOF
) {
1074 if (!strcmp(key
, "verbose")) {
1076 } else if (!strcmp(key
, "device")) {
1077 fail_if(sscanf(value
, "%i", &device
) != 1,
1078 "intel_aubdump: failed to parse device id '%s'",
1080 device_override
= true;
1081 } else if (!strcmp(key
, "file")) {
1082 filename
= strdup(value
);
1083 files
[0] = fopen(filename
, "w+");
1084 fail_if(files
[0] == NULL
,
1085 "intel_aubdump: failed to open file '%s'\n",
1087 } else if (!strcmp(key
, "command")) {
1088 files
[1] = launch_command(value
);
1089 fail_if(files
[1] == NULL
,
1090 "intel_aubdump: failed to launch command '%s'\n",
1093 fprintf(stderr
, "intel_aubdump: unknown option '%s'\n", key
);
1101 bos
= calloc(MAX_BO_COUNT
, sizeof(bos
[0]));
1102 fail_if(bos
== NULL
, "intel_aubdump: out of memory\n");
1105 __attribute__ ((visibility ("default"))) int
1106 ioctl(int fd
, unsigned long request
, ...)
1113 va_start(args
, request
);
1114 argp
= va_arg(args
, void *);
1117 if (_IOC_TYPE(request
) == DRM_IOCTL_BASE
&&
1118 drm_fd
!= fd
&& fstat(fd
, &buf
) == 0 &&
1119 (buf
.st_mode
& S_IFMT
) == S_IFCHR
&& major(buf
.st_rdev
) == DRM_MAJOR
) {
1122 printf("[intel_aubdump: intercept drm ioctl on fd %d]\n", fd
);
1129 case DRM_IOCTL_I915_GETPARAM
: {
1130 struct drm_i915_getparam
*getparam
= argp
;
1132 if (device_override
&& getparam
->param
== I915_PARAM_CHIPSET_ID
) {
1133 *getparam
->value
= device
;
1137 ret
= libc_ioctl(fd
, request
, argp
);
1139 /* If the application looks up chipset_id
1140 * (they typically do), we'll piggy-back on
1141 * their ioctl and store the id for later
1143 if (getparam
->param
== I915_PARAM_CHIPSET_ID
)
1144 device
= *getparam
->value
;
1149 case DRM_IOCTL_I915_GEM_EXECBUFFER
: {
1152 fprintf(stderr
, "intel_aubdump: "
1153 "application uses DRM_IOCTL_I915_GEM_EXECBUFFER, not handled\n");
1156 return libc_ioctl(fd
, request
, argp
);
1159 case DRM_IOCTL_I915_GEM_EXECBUFFER2
:
1160 case DRM_IOCTL_I915_GEM_EXECBUFFER2_WR
: {
1161 dump_execbuffer2(fd
, argp
);
1162 if (device_override
)
1165 return libc_ioctl(fd
, request
, argp
);
1168 case DRM_IOCTL_I915_GEM_CREATE
: {
1169 struct drm_i915_gem_create
*create
= argp
;
1171 ret
= libc_ioctl(fd
, request
, argp
);
1173 add_new_bo(create
->handle
, create
->size
, NULL
);
1178 case DRM_IOCTL_I915_GEM_USERPTR
: {
1179 struct drm_i915_gem_userptr
*userptr
= argp
;
1181 ret
= libc_ioctl(fd
, request
, argp
);
1183 add_new_bo(userptr
->handle
, userptr
->user_size
,
1184 (void *) (uintptr_t) (userptr
->user_ptr
| USERPTR_FLAG
));
1188 case DRM_IOCTL_GEM_CLOSE
: {
1189 struct drm_gem_close
*close
= argp
;
1191 remove_bo(close
->handle
);
1193 return libc_ioctl(fd
, request
, argp
);
1196 case DRM_IOCTL_GEM_OPEN
: {
1197 struct drm_gem_open
*open
= argp
;
1199 ret
= libc_ioctl(fd
, request
, argp
);
1201 add_new_bo(open
->handle
, open
->size
, NULL
);
1206 case DRM_IOCTL_PRIME_FD_TO_HANDLE
: {
1207 struct drm_prime_handle
*prime
= argp
;
1209 ret
= libc_ioctl(fd
, request
, argp
);
1213 size
= lseek(prime
->fd
, 0, SEEK_END
);
1214 fail_if(size
== -1, "intel_aubdump: failed to get prime bo size\n");
1215 add_new_bo(prime
->handle
, size
, NULL
);
1222 return libc_ioctl(fd
, request
, argp
);
1225 return libc_ioctl(fd
, request
, argp
);
1232 libc_close
= dlsym(RTLD_NEXT
, "close");
1233 libc_ioctl
= dlsym(RTLD_NEXT
, "ioctl");
1234 fail_if(libc_close
== NULL
|| libc_ioctl
== NULL
,
1235 "intel_aubdump: failed to get libc ioctl or close\n");
1239 close_init_helper(int fd
)
1242 return libc_close(fd
);
1246 ioctl_init_helper(int fd
, unsigned long request
, ...)
1251 va_start(args
, request
);
1252 argp
= va_arg(args
, void *);
1256 return libc_ioctl(fd
, request
, argp
);
1259 static void __attribute__ ((destructor
))
1263 for (int i
= 0; i
< ARRAY_SIZE(files
); i
++) {
1264 if (files
[i
] != NULL
)