2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 #include <sys/types.h>
33 #include <sys/sysmacros.h>
35 #include <sys/ioctl.h>
43 #include "intel_aub.h"
45 #include "dev/gen_device_info.h"
46 #include "util/macros.h"
49 #define ALIGN(x, y) (((x) + (y)-1) & ~((y)-1))
52 #define MI_LOAD_REGISTER_IMM_n(n) ((0x22 << 23) | (2 * (n) - 1))
53 #define MI_LRI_FORCE_POSTED (1<<12)
55 #define MI_BATCH_NON_SECURE_I965 (1 << 8)
57 #define MI_BATCH_BUFFER_END (0xA << 23)
59 #define min(a, b) ({ \
60 __typeof(a) _a = (a); \
61 __typeof(b) _b = (b); \
65 #define max(a, b) ({ \
66 __typeof(a) _a = (a); \
67 __typeof(b) _b = (b); \
71 #define HWS_PGA_RCSUNIT 0x02080
72 #define HWS_PGA_VCSUNIT0 0x12080
73 #define HWS_PGA_BCSUNIT 0x22080
75 #define GFX_MODE_RCSUNIT 0x0229c
76 #define GFX_MODE_VCSUNIT0 0x1229c
77 #define GFX_MODE_BCSUNIT 0x2229c
79 #define EXECLIST_SUBMITPORT_RCSUNIT 0x02230
80 #define EXECLIST_SUBMITPORT_VCSUNIT0 0x12230
81 #define EXECLIST_SUBMITPORT_BCSUNIT 0x22230
83 #define EXECLIST_STATUS_RCSUNIT 0x02234
84 #define EXECLIST_STATUS_VCSUNIT0 0x12234
85 #define EXECLIST_STATUS_BCSUNIT 0x22234
87 #define EXECLIST_SQ_CONTENTS0_RCSUNIT 0x02510
88 #define EXECLIST_SQ_CONTENTS0_VCSUNIT0 0x12510
89 #define EXECLIST_SQ_CONTENTS0_BCSUNIT 0x22510
91 #define EXECLIST_CONTROL_RCSUNIT 0x02550
92 #define EXECLIST_CONTROL_VCSUNIT0 0x12550
93 #define EXECLIST_CONTROL_BCSUNIT 0x22550
95 #define MEMORY_MAP_SIZE (64 /* MiB */ * 1024 * 1024)
98 #define GEN8_PTE_SIZE 8
100 #define NUM_PT_ENTRIES (ALIGN(MEMORY_MAP_SIZE, 4096) / 4096)
101 #define PT_SIZE ALIGN(NUM_PT_ENTRIES * GEN8_PTE_SIZE, 4096)
103 #define RING_SIZE (1 * 4096)
104 #define PPHWSP_SIZE (1 * 4096)
105 #define GEN11_LR_CONTEXT_RENDER_SIZE (14 * 4096)
106 #define GEN10_LR_CONTEXT_RENDER_SIZE (19 * 4096)
107 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * 4096)
108 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * 4096)
109 #define GEN8_LR_CONTEXT_OTHER_SIZE (2 * 4096)
112 #define STATIC_GGTT_MAP_START 0
114 #define RENDER_RING_ADDR STATIC_GGTT_MAP_START
115 #define RENDER_CONTEXT_ADDR (RENDER_RING_ADDR + RING_SIZE)
117 #define BLITTER_RING_ADDR (RENDER_CONTEXT_ADDR + PPHWSP_SIZE + GEN10_LR_CONTEXT_RENDER_SIZE)
118 #define BLITTER_CONTEXT_ADDR (BLITTER_RING_ADDR + RING_SIZE)
120 #define VIDEO_RING_ADDR (BLITTER_CONTEXT_ADDR + PPHWSP_SIZE + GEN8_LR_CONTEXT_OTHER_SIZE)
121 #define VIDEO_CONTEXT_ADDR (VIDEO_RING_ADDR + RING_SIZE)
123 #define STATIC_GGTT_MAP_END (VIDEO_CONTEXT_ADDR + PPHWSP_SIZE + GEN8_LR_CONTEXT_OTHER_SIZE)
124 #define STATIC_GGTT_MAP_SIZE (STATIC_GGTT_MAP_END - STATIC_GGTT_MAP_START)
126 #define PML4_PHYS_ADDR ((uint64_t)(STATIC_GGTT_MAP_END))
128 #define CONTEXT_FLAGS (0x339) /* Normal Priority | L3-LLC Coherency |
130 * Legacy Context with 64 bit VA support |
134 #define RENDER_CONTEXT_DESCRIPTOR ((uint64_t)1 << 62 | RENDER_CONTEXT_ADDR | CONTEXT_FLAGS)
135 #define BLITTER_CONTEXT_DESCRIPTOR ((uint64_t)2 << 62 | BLITTER_CONTEXT_ADDR | CONTEXT_FLAGS)
136 #define VIDEO_CONTEXT_DESCRIPTOR ((uint64_t)3 << 62 | VIDEO_CONTEXT_ADDR | CONTEXT_FLAGS)
138 static const uint32_t render_context_init
[GEN9_LR_CONTEXT_RENDER_SIZE
/ /* Choose the largest */
139 sizeof(uint32_t)] = {
141 MI_LOAD_REGISTER_IMM_n(14) | MI_LRI_FORCE_POSTED
,
142 0x2244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
143 0x2034 /* RING_HEAD */, 0,
144 0x2030 /* RING_TAIL */, 0,
145 0x2038 /* RING_BUFFER_START */, RENDER_RING_ADDR
,
146 0x203C /* RING_BUFFER_CONTROL */, (RING_SIZE
- 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
147 0x2168 /* BB_HEAD_U */, 0,
148 0x2140 /* BB_HEAD_L */, 0,
149 0x2110 /* BB_STATE */, 0,
150 0x211C /* SECOND_BB_HEAD_U */, 0,
151 0x2114 /* SECOND_BB_HEAD_L */, 0,
152 0x2118 /* SECOND_BB_STATE */, 0,
153 0x21C0 /* BB_PER_CTX_PTR */, 0,
154 0x21C4 /* RCS_INDIRECT_CTX */, 0,
155 0x21C8 /* RCS_INDIRECT_CTX_OFFSET */, 0,
160 MI_LOAD_REGISTER_IMM_n(9) | MI_LRI_FORCE_POSTED
,
161 0x23A8 /* CTX_TIMESTAMP */, 0,
162 0x228C /* PDP3_UDW */, 0,
163 0x2288 /* PDP3_LDW */, 0,
164 0x2284 /* PDP2_UDW */, 0,
165 0x2280 /* PDP2_LDW */, 0,
166 0x227C /* PDP1_UDW */, 0,
167 0x2278 /* PDP1_LDW */, 0,
168 0x2274 /* PDP0_UDW */, PML4_PHYS_ADDR
>> 32,
169 0x2270 /* PDP0_LDW */, PML4_PHYS_ADDR
,
171 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
174 MI_LOAD_REGISTER_IMM_n(1),
175 0x20C8 /* R_PWR_CLK_STATE */, 0x7FFFFFFF,
179 static const uint32_t blitter_context_init
[GEN8_LR_CONTEXT_OTHER_SIZE
/
180 sizeof(uint32_t)] = {
182 MI_LOAD_REGISTER_IMM_n(11) | MI_LRI_FORCE_POSTED
,
183 0x22244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
184 0x22034 /* RING_HEAD */, 0,
185 0x22030 /* RING_TAIL */, 0,
186 0x22038 /* RING_BUFFER_START */, BLITTER_RING_ADDR
,
187 0x2203C /* RING_BUFFER_CONTROL */, (RING_SIZE
- 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
188 0x22168 /* BB_HEAD_U */, 0,
189 0x22140 /* BB_HEAD_L */, 0,
190 0x22110 /* BB_STATE */, 0,
191 0x2211C /* SECOND_BB_HEAD_U */, 0,
192 0x22114 /* SECOND_BB_HEAD_L */, 0,
193 0x22118 /* SECOND_BB_STATE */, 0,
195 0, 0, 0, 0, 0, 0, 0, 0,
198 MI_LOAD_REGISTER_IMM_n(9) | MI_LRI_FORCE_POSTED
,
199 0x223A8 /* CTX_TIMESTAMP */, 0,
200 0x2228C /* PDP3_UDW */, 0,
201 0x22288 /* PDP3_LDW */, 0,
202 0x22284 /* PDP2_UDW */, 0,
203 0x22280 /* PDP2_LDW */, 0,
204 0x2227C /* PDP1_UDW */, 0,
205 0x22278 /* PDP1_LDW */, 0,
206 0x22274 /* PDP0_UDW */, PML4_PHYS_ADDR
>> 32,
207 0x22270 /* PDP0_LDW */, PML4_PHYS_ADDR
,
209 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
214 static const uint32_t video_context_init
[GEN8_LR_CONTEXT_OTHER_SIZE
/
215 sizeof(uint32_t)] = {
217 MI_LOAD_REGISTER_IMM_n(11) | MI_LRI_FORCE_POSTED
,
218 0x1C244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
219 0x1C034 /* RING_HEAD */, 0,
220 0x1C030 /* RING_TAIL */, 0,
221 0x1C038 /* RING_BUFFER_START */, VIDEO_RING_ADDR
,
222 0x1C03C /* RING_BUFFER_CONTROL */, (RING_SIZE
- 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
223 0x1C168 /* BB_HEAD_U */, 0,
224 0x1C140 /* BB_HEAD_L */, 0,
225 0x1C110 /* BB_STATE */, 0,
226 0x1C11C /* SECOND_BB_HEAD_U */, 0,
227 0x1C114 /* SECOND_BB_HEAD_L */, 0,
228 0x1C118 /* SECOND_BB_STATE */, 0,
230 0, 0, 0, 0, 0, 0, 0, 0,
233 MI_LOAD_REGISTER_IMM_n(9) | MI_LRI_FORCE_POSTED
,
234 0x1C3A8 /* CTX_TIMESTAMP */, 0,
235 0x1C28C /* PDP3_UDW */, 0,
236 0x1C288 /* PDP3_LDW */, 0,
237 0x1C284 /* PDP2_UDW */, 0,
238 0x1C280 /* PDP2_LDW */, 0,
239 0x1C27C /* PDP1_UDW */, 0,
240 0x1C278 /* PDP1_LDW */, 0,
241 0x1C274 /* PDP0_UDW */, PML4_PHYS_ADDR
>> 32,
242 0x1C270 /* PDP0_LDW */, PML4_PHYS_ADDR
,
244 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
249 static int close_init_helper(int fd
);
250 static int ioctl_init_helper(int fd
, unsigned long request
, ...);
252 static int (*libc_close
)(int fd
) = close_init_helper
;
253 static int (*libc_ioctl
)(int fd
, unsigned long request
, ...) = ioctl_init_helper
;
255 static int drm_fd
= -1;
256 static char *filename
= NULL
;
257 static FILE *files
[2] = { NULL
, NULL
};
258 static struct gen_device_info devinfo
= {0};
259 static int verbose
= 0;
260 static bool device_override
;
261 static uint32_t device
;
262 static int addr_bits
= 0;
264 #define MAX_BO_COUNT 64 * 1024
272 static struct bo
*bos
;
274 #define DRM_MAJOR 226
276 /* We set bit 0 in the map pointer for userptr BOs so we know not to
277 * munmap them on DRM_IOCTL_GEM_CLOSE.
279 #define USERPTR_FLAG 1
280 #define IS_USERPTR(p) ((uintptr_t) (p) & USERPTR_FLAG)
281 #define GET_PTR(p) ( (void *) ((uintptr_t) p & ~(uintptr_t) 1) )
283 static inline bool use_execlists(void)
285 return devinfo
.gen
>= 8;
288 static void __attribute__ ((format(__printf__
, 2, 3)))
289 fail_if(int cond
, const char *format
, ...)
296 va_start(args
, format
);
297 vfprintf(stderr
, format
, args
);
304 get_bo(uint32_t handle
)
308 fail_if(handle
>= MAX_BO_COUNT
, "bo handle too large\n");
314 static inline uint32_t
315 align_u32(uint32_t v
, uint32_t a
)
317 return (v
+ a
- 1) & ~(a
- 1);
321 dword_out(uint32_t data
)
323 for (int i
= 0; i
< ARRAY_SIZE (files
); i
++) {
324 if (files
[i
] == NULL
)
327 fail_if(fwrite(&data
, 1, 4, files
[i
]) == 0,
328 "Writing to output failed\n");
333 data_out(const void *data
, size_t size
)
338 for (int i
= 0; i
< ARRAY_SIZE (files
); i
++) {
339 if (files
[i
] == NULL
)
342 fail_if(fwrite(data
, 1, size
, files
[i
]) == 0,
343 "Writing to output failed\n");
350 return NUM_PT_ENTRIES
* (addr_bits
> 32 ? GEN8_PTE_SIZE
: PTE_SIZE
);
354 mem_trace_memory_write_header_out(uint64_t addr
, uint32_t len
,
357 uint32_t dwords
= ALIGN(len
, sizeof(uint32_t)) / sizeof(uint32_t);
359 dword_out(CMD_MEM_TRACE_MEMORY_WRITE
| (5 + dwords
- 1));
360 dword_out(addr
& 0xFFFFFFFF); /* addr lo */
361 dword_out(addr
>> 32); /* addr hi */
362 dword_out(addr_space
); /* gtt */
367 register_write_out(uint32_t addr
, uint32_t value
)
371 dword_out(CMD_MEM_TRACE_REGISTER_WRITE
| (5 + dwords
- 1));
373 dword_out(AUB_MEM_TRACE_REGISTER_SIZE_DWORD
|
374 AUB_MEM_TRACE_REGISTER_SPACE_MMIO
);
375 dword_out(0xFFFFFFFF); /* mask lo */
376 dword_out(0x00000000); /* mask hi */
380 static struct ppgtt_table
{
382 struct ppgtt_table
*subtables
[512];
383 } pml4
= {PML4_PHYS_ADDR
};
386 populate_ppgtt_table(struct ppgtt_table
*table
, int start
, int end
,
389 static uint64_t phys_addrs_allocator
= (PML4_PHYS_ADDR
>> 12) + 1;
390 uint64_t entries
[512] = {0};
391 int dirty_start
= 512, dirty_end
= 0;
394 printf(" PPGTT (0x%016" PRIx64
"), lvl %d, start: %x, end: %x\n",
395 table
->phys_addr
, level
, start
, end
);
398 for (int i
= start
; i
<= end
; i
++) {
399 if (!table
->subtables
[i
]) {
400 dirty_start
= min(dirty_start
, i
);
401 dirty_end
= max(dirty_end
, i
);
403 table
->subtables
[i
] =
404 (void *)(phys_addrs_allocator
++ << 12);
406 printf(" Adding entry: %x, phys_addr: 0x%016" PRIx64
"\n",
407 i
, (uint64_t)table
->subtables
[i
]);
410 table
->subtables
[i
] =
411 calloc(1, sizeof(struct ppgtt_table
));
412 table
->subtables
[i
]->phys_addr
=
413 phys_addrs_allocator
++ << 12;
415 printf(" Adding entry: %x, phys_addr: 0x%016" PRIx64
"\n",
416 i
, table
->subtables
[i
]->phys_addr
);
420 entries
[i
] = 3 /* read/write | present */ |
421 (level
== 1 ? (uint64_t)table
->subtables
[i
] :
422 table
->subtables
[i
]->phys_addr
);
425 if (dirty_start
<= dirty_end
) {
426 uint64_t write_addr
= table
->phys_addr
+ dirty_start
*
428 uint64_t write_size
= (dirty_end
- dirty_start
+ 1) *
430 mem_trace_memory_write_header_out(write_addr
, write_size
,
431 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_PHYSICAL
);
432 data_out(entries
+ dirty_start
, write_size
);
437 map_ppgtt(uint64_t start
, uint64_t size
)
439 uint64_t l4_start
= start
& 0xff8000000000;
440 uint64_t l4_end
= ((start
+ size
- 1) | 0x007fffffffff) & 0xffffffffffff;
442 #define L4_index(addr) (((addr) >> 39) & 0x1ff)
443 #define L3_index(addr) (((addr) >> 30) & 0x1ff)
444 #define L2_index(addr) (((addr) >> 21) & 0x1ff)
445 #define L1_index(addr) (((addr) >> 12) & 0x1ff)
447 #define L3_table(addr) (pml4.subtables[L4_index(addr)])
448 #define L2_table(addr) (L3_table(addr)->subtables[L3_index(addr)])
449 #define L1_table(addr) (L2_table(addr)->subtables[L2_index(addr)])
452 printf(" Mapping PPGTT address: 0x%" PRIx64
", size: %" PRIu64
"\n",
456 populate_ppgtt_table(&pml4
, L4_index(l4_start
), L4_index(l4_end
), 4);
458 for (uint64_t l4
= l4_start
; l4
< l4_end
; l4
+= (1ULL << 39)) {
459 uint64_t l3_start
= max(l4
, start
& 0xffffc0000000);
460 uint64_t l3_end
= min(l4
+ (1ULL << 39) - 1,
461 ((start
+ size
- 1) | 0x00003fffffff) & 0xffffffffffff);
462 uint64_t l3_start_idx
= L3_index(l3_start
);
463 uint64_t l3_end_idx
= L3_index(l3_end
);
465 populate_ppgtt_table(L3_table(l4
), l3_start_idx
, l3_end_idx
, 3);
467 for (uint64_t l3
= l3_start
; l3
< l3_end
; l3
+= (1ULL << 30)) {
468 uint64_t l2_start
= max(l3
, start
& 0xffffffe00000);
469 uint64_t l2_end
= min(l3
+ (1ULL << 30) - 1,
470 ((start
+ size
- 1) | 0x0000001fffff) & 0xffffffffffff);
471 uint64_t l2_start_idx
= L2_index(l2_start
);
472 uint64_t l2_end_idx
= L2_index(l2_end
);
474 populate_ppgtt_table(L2_table(l3
), l2_start_idx
, l2_end_idx
, 2);
476 for (uint64_t l2
= l2_start
; l2
< l2_end
; l2
+= (1ULL << 21)) {
477 uint64_t l1_start
= max(l2
, start
& 0xfffffffff000);
478 uint64_t l1_end
= min(l2
+ (1ULL << 21) - 1,
479 ((start
+ size
- 1) | 0x000000000fff) & 0xffffffffffff);
480 uint64_t l1_start_idx
= L1_index(l1_start
);
481 uint64_t l1_end_idx
= L1_index(l1_end
);
483 populate_ppgtt_table(L1_table(l2
), l1_start_idx
, l1_end_idx
, 1);
490 ppgtt_lookup(uint64_t ppgtt_addr
)
492 return (uint64_t)L1_table(ppgtt_addr
)->subtables
[L1_index(ppgtt_addr
)];
496 write_execlists_header(void)
498 char app_name
[8 * 4];
499 int app_name_len
, dwords
;
502 snprintf(app_name
, sizeof(app_name
), "PCI-ID=0x%X %s", device
,
503 program_invocation_short_name
);
504 app_name_len
= ALIGN(app_name_len
, sizeof(uint32_t));
506 dwords
= 5 + app_name_len
/ sizeof(uint32_t);
507 dword_out(CMD_MEM_TRACE_VERSION
| (dwords
- 1));
508 dword_out(AUB_MEM_TRACE_VERSION_FILE_VERSION
);
509 dword_out(devinfo
.simulator_id
<< AUB_MEM_TRACE_VERSION_DEVICE_SHIFT
);
510 dword_out(0); /* version */
511 dword_out(0); /* version */
512 data_out(app_name
, app_name_len
);
515 uint32_t ggtt_ptes
= STATIC_GGTT_MAP_SIZE
>> 12;
517 mem_trace_memory_write_header_out(STATIC_GGTT_MAP_START
>> 12,
518 ggtt_ptes
* GEN8_PTE_SIZE
,
519 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT_ENTRY
);
520 for (uint32_t i
= 0; i
< ggtt_ptes
; i
++) {
521 dword_out(1 + 0x1000 * i
+ STATIC_GGTT_MAP_START
);
526 mem_trace_memory_write_header_out(RENDER_RING_ADDR
, RING_SIZE
,
527 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
528 for (uint32_t i
= 0; i
< RING_SIZE
; i
+= sizeof(uint32_t))
532 mem_trace_memory_write_header_out(RENDER_CONTEXT_ADDR
,
534 sizeof(render_context_init
),
535 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
536 for (uint32_t i
= 0; i
< PPHWSP_SIZE
; i
+= sizeof(uint32_t))
540 data_out(render_context_init
, sizeof(render_context_init
));
543 mem_trace_memory_write_header_out(BLITTER_RING_ADDR
, RING_SIZE
,
544 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
545 for (uint32_t i
= 0; i
< RING_SIZE
; i
+= sizeof(uint32_t))
549 mem_trace_memory_write_header_out(BLITTER_CONTEXT_ADDR
,
551 sizeof(blitter_context_init
),
552 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
553 for (uint32_t i
= 0; i
< PPHWSP_SIZE
; i
+= sizeof(uint32_t))
556 /* BLITTER_CONTEXT */
557 data_out(blitter_context_init
, sizeof(blitter_context_init
));
560 mem_trace_memory_write_header_out(VIDEO_RING_ADDR
, RING_SIZE
,
561 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
562 for (uint32_t i
= 0; i
< RING_SIZE
; i
+= sizeof(uint32_t))
566 mem_trace_memory_write_header_out(VIDEO_CONTEXT_ADDR
,
568 sizeof(video_context_init
),
569 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
570 for (uint32_t i
= 0; i
< PPHWSP_SIZE
; i
+= sizeof(uint32_t))
574 data_out(video_context_init
, sizeof(video_context_init
));
576 register_write_out(HWS_PGA_RCSUNIT
, RENDER_CONTEXT_ADDR
);
577 register_write_out(HWS_PGA_VCSUNIT0
, VIDEO_CONTEXT_ADDR
);
578 register_write_out(HWS_PGA_BCSUNIT
, BLITTER_CONTEXT_ADDR
);
580 register_write_out(GFX_MODE_RCSUNIT
, 0x80008000 /* execlist enable */);
581 register_write_out(GFX_MODE_VCSUNIT0
, 0x80008000 /* execlist enable */);
582 register_write_out(GFX_MODE_BCSUNIT
, 0x80008000 /* execlist enable */);
585 static void write_legacy_header(void)
587 char app_name
[8 * 4];
589 int comment_len
, comment_dwords
, dwords
;
590 uint32_t entry
= 0x200003;
592 comment_len
= snprintf(comment
, sizeof(comment
), "PCI-ID=0x%x", device
);
593 comment_dwords
= ((comment_len
+ 3) / 4);
595 /* Start with a (required) version packet. */
596 dwords
= 13 + comment_dwords
;
597 dword_out(CMD_AUB_HEADER
| (dwords
- 2));
598 dword_out((4 << AUB_HEADER_MAJOR_SHIFT
) |
599 (0 << AUB_HEADER_MINOR_SHIFT
));
601 /* Next comes a 32-byte application name. */
602 strncpy(app_name
, program_invocation_short_name
, sizeof(app_name
));
603 app_name
[sizeof(app_name
) - 1] = 0;
604 data_out(app_name
, sizeof(app_name
));
606 dword_out(0); /* timestamp */
607 dword_out(0); /* timestamp */
608 dword_out(comment_len
);
609 data_out(comment
, comment_dwords
* 4);
611 /* Set up the GTT. The max we can handle is 64M */
612 dword_out(CMD_AUB_TRACE_HEADER_BLOCK
| ((addr_bits
> 32 ? 6 : 5) - 2));
613 dword_out(AUB_TRACE_MEMTYPE_GTT_ENTRY
|
614 AUB_TRACE_TYPE_NOTYPE
| AUB_TRACE_OP_DATA_WRITE
);
615 dword_out(0); /* subtype */
616 dword_out(0); /* offset */
617 dword_out(gtt_size()); /* size */
620 for (uint32_t i
= 0; i
< NUM_PT_ENTRIES
; i
++) {
621 dword_out(entry
+ 0x1000 * i
);
628 * Break up large objects into multiple writes. Otherwise a 128kb VBO
629 * would overflow the 16 bits of size field in the packet header and
630 * everything goes badly after that.
633 aub_write_trace_block(uint32_t type
, void *virtual, uint32_t size
, uint64_t gtt_offset
)
636 uint32_t subtype
= 0;
637 static const char null_block
[8 * 4096];
639 for (uint32_t offset
= 0; offset
< size
; offset
+= block_size
) {
640 block_size
= min(8 * 4096, size
- offset
);
642 if (use_execlists()) {
643 block_size
= min(4096, block_size
);
644 mem_trace_memory_write_header_out(ppgtt_lookup(gtt_offset
+ offset
),
646 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_PHYSICAL
);
648 dword_out(CMD_AUB_TRACE_HEADER_BLOCK
|
649 ((addr_bits
> 32 ? 6 : 5) - 2));
650 dword_out(AUB_TRACE_MEMTYPE_GTT
|
651 type
| AUB_TRACE_OP_DATA_WRITE
);
653 dword_out(gtt_offset
+ offset
);
654 dword_out(align_u32(block_size
, 4));
656 dword_out((gtt_offset
+ offset
) >> 32);
660 data_out(((char *) GET_PTR(virtual)) + offset
, block_size
);
662 data_out(null_block
, block_size
);
664 /* Pad to a multiple of 4 bytes. */
665 data_out(null_block
, -block_size
& 3);
670 write_reloc(void *p
, uint64_t v
)
672 if (addr_bits
> 32) {
673 /* From the Broadwell PRM Vol. 2a,
674 * MI_LOAD_REGISTER_MEM::MemoryAddress:
676 * "This field specifies the address of the memory
677 * location where the register value specified in the
678 * DWord above will read from. The address specifies
679 * the DWord location of the data. Range =
680 * GraphicsVirtualAddress[63:2] for a DWord register
681 * GraphicsAddress [63:48] are ignored by the HW and
682 * assumed to be in correct canonical form [63:48] ==
685 * In practice, this will always mean the top bits are zero
686 * because of the GTT size limitation of the aubdump tool.
688 const int shift
= 63 - 47;
689 *(uint64_t *)p
= (((int64_t)v
) << shift
) >> shift
;
696 aub_dump_execlist(uint64_t batch_offset
, int ring_flag
)
703 uint32_t control_reg
;
706 case I915_EXEC_DEFAULT
:
707 case I915_EXEC_RENDER
:
708 ring_addr
= RENDER_RING_ADDR
;
709 descriptor
= RENDER_CONTEXT_DESCRIPTOR
;
710 elsp_reg
= EXECLIST_SUBMITPORT_RCSUNIT
;
711 elsq_reg
= EXECLIST_SQ_CONTENTS0_RCSUNIT
;
712 status_reg
= EXECLIST_STATUS_RCSUNIT
;
713 control_reg
= EXECLIST_CONTROL_RCSUNIT
;
716 ring_addr
= VIDEO_RING_ADDR
;
717 descriptor
= VIDEO_CONTEXT_DESCRIPTOR
;
718 elsp_reg
= EXECLIST_SUBMITPORT_VCSUNIT0
;
719 elsq_reg
= EXECLIST_SQ_CONTENTS0_VCSUNIT0
;
720 status_reg
= EXECLIST_STATUS_VCSUNIT0
;
721 control_reg
= EXECLIST_CONTROL_VCSUNIT0
;
724 ring_addr
= BLITTER_RING_ADDR
;
725 descriptor
= BLITTER_CONTEXT_DESCRIPTOR
;
726 elsp_reg
= EXECLIST_SUBMITPORT_BCSUNIT
;
727 elsq_reg
= EXECLIST_SQ_CONTENTS0_BCSUNIT
;
728 status_reg
= EXECLIST_STATUS_BCSUNIT
;
729 control_reg
= EXECLIST_CONTROL_BCSUNIT
;
732 unreachable("unknown ring");
735 mem_trace_memory_write_header_out(ring_addr
, 16,
736 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
737 dword_out(AUB_MI_BATCH_BUFFER_START
| MI_BATCH_NON_SECURE_I965
| (3 - 2));
738 dword_out(batch_offset
& 0xFFFFFFFF);
739 dword_out(batch_offset
>> 32);
740 dword_out(0 /* MI_NOOP */);
742 mem_trace_memory_write_header_out(ring_addr
+ 8192 + 20, 4,
743 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
744 dword_out(0); /* RING_BUFFER_HEAD */
745 mem_trace_memory_write_header_out(ring_addr
+ 8192 + 28, 4,
746 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
747 dword_out(16); /* RING_BUFFER_TAIL */
749 if (devinfo
.gen
>= 11) {
750 register_write_out(elsq_reg
, descriptor
& 0xFFFFFFFF);
751 register_write_out(elsq_reg
+ sizeof(uint32_t), descriptor
>> 32);
752 register_write_out(control_reg
, 1);
754 register_write_out(elsp_reg
, 0);
755 register_write_out(elsp_reg
, 0);
756 register_write_out(elsp_reg
, descriptor
>> 32);
757 register_write_out(elsp_reg
, descriptor
& 0xFFFFFFFF);
760 dword_out(CMD_MEM_TRACE_REGISTER_POLL
| (5 + 1 - 1));
761 dword_out(status_reg
);
762 dword_out(AUB_MEM_TRACE_REGISTER_SIZE_DWORD
|
763 AUB_MEM_TRACE_REGISTER_SPACE_MMIO
);
764 if (devinfo
.gen
>= 11) {
765 dword_out(0x00000001); /* mask lo */
766 dword_out(0x00000000); /* mask hi */
767 dword_out(0x00000001);
769 dword_out(0x00000010); /* mask lo */
770 dword_out(0x00000000); /* mask hi */
771 dword_out(0x00000000);
776 aub_dump_ringbuffer(uint64_t batch_offset
, uint64_t offset
, int ring_flag
)
778 uint32_t ringbuffer
[4096];
779 unsigned aub_mi_bbs_len
;
780 int ring
= AUB_TRACE_TYPE_RING_PRB0
; /* The default ring */
783 if (ring_flag
== I915_EXEC_BSD
)
784 ring
= AUB_TRACE_TYPE_RING_PRB1
;
785 else if (ring_flag
== I915_EXEC_BLT
)
786 ring
= AUB_TRACE_TYPE_RING_PRB2
;
788 /* Make a ring buffer to execute our batchbuffer. */
789 memset(ringbuffer
, 0, sizeof(ringbuffer
));
791 aub_mi_bbs_len
= addr_bits
> 32 ? 3 : 2;
792 ringbuffer
[ring_count
] = AUB_MI_BATCH_BUFFER_START
| (aub_mi_bbs_len
- 2);
793 write_reloc(&ringbuffer
[ring_count
+ 1], batch_offset
);
794 ring_count
+= aub_mi_bbs_len
;
796 /* Write out the ring. This appears to trigger execution of
797 * the ring in the simulator.
799 dword_out(CMD_AUB_TRACE_HEADER_BLOCK
|
800 ((addr_bits
> 32 ? 6 : 5) - 2));
801 dword_out(AUB_TRACE_MEMTYPE_GTT
| ring
| AUB_TRACE_OP_COMMAND_WRITE
);
802 dword_out(0); /* general/surface subtype */
804 dword_out(ring_count
* 4);
806 dword_out(offset
>> 32);
808 data_out(ringbuffer
, ring_count
* 4);
812 relocate_bo(struct bo
*bo
, const struct drm_i915_gem_execbuffer2
*execbuffer2
,
813 const struct drm_i915_gem_exec_object2
*obj
)
815 const struct drm_i915_gem_exec_object2
*exec_objects
=
816 (struct drm_i915_gem_exec_object2
*) (uintptr_t) execbuffer2
->buffers_ptr
;
817 const struct drm_i915_gem_relocation_entry
*relocs
=
818 (const struct drm_i915_gem_relocation_entry
*) (uintptr_t) obj
->relocs_ptr
;
822 relocated
= malloc(bo
->size
);
823 fail_if(relocated
== NULL
, "intel_aubdump: out of memory\n");
824 memcpy(relocated
, GET_PTR(bo
->map
), bo
->size
);
825 for (size_t i
= 0; i
< obj
->relocation_count
; i
++) {
826 fail_if(relocs
[i
].offset
>= bo
->size
, "intel_aubdump: reloc outside bo\n");
828 if (execbuffer2
->flags
& I915_EXEC_HANDLE_LUT
)
829 handle
= exec_objects
[relocs
[i
].target_handle
].handle
;
831 handle
= relocs
[i
].target_handle
;
833 write_reloc(((char *)relocated
) + relocs
[i
].offset
,
834 get_bo(handle
)->offset
+ relocs
[i
].delta
);
841 gem_ioctl(int fd
, unsigned long request
, void *argp
)
846 ret
= libc_ioctl(fd
, request
, argp
);
847 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
853 gem_mmap(int fd
, uint32_t handle
, uint64_t offset
, uint64_t size
)
855 struct drm_i915_gem_mmap mmap
= {
861 if (gem_ioctl(fd
, DRM_IOCTL_I915_GEM_MMAP
, &mmap
) == -1)
864 return (void *)(uintptr_t) mmap
.addr_ptr
;
868 gem_get_param(int fd
, uint32_t param
)
871 drm_i915_getparam_t gp
= {
876 if (gem_ioctl(fd
, DRM_IOCTL_I915_GETPARAM
, &gp
) == -1)
883 dump_execbuffer2(int fd
, struct drm_i915_gem_execbuffer2
*execbuffer2
)
885 struct drm_i915_gem_exec_object2
*exec_objects
=
886 (struct drm_i915_gem_exec_object2
*) (uintptr_t) execbuffer2
->buffers_ptr
;
887 uint32_t ring_flag
= execbuffer2
->flags
& I915_EXEC_RING_MASK
;
889 struct drm_i915_gem_exec_object2
*obj
;
890 struct bo
*bo
, *batch_bo
;
894 /* We can't do this at open time as we're not yet authenticated. */
896 device
= gem_get_param(fd
, I915_PARAM_CHIPSET_ID
);
897 fail_if(device
== 0 || devinfo
.gen
== 0, "failed to identify chipset\n");
899 if (devinfo
.gen
== 0) {
900 fail_if(!gen_get_device_info(device
, &devinfo
),
901 "failed to identify chipset=0x%x\n", device
);
903 addr_bits
= devinfo
.gen
>= 8 ? 48 : 32;
906 write_execlists_header();
908 write_legacy_header();
911 printf("[intel_aubdump: running, "
912 "output file %s, chipset id 0x%04x, gen %d]\n",
913 filename
, device
, devinfo
.gen
);
922 printf("Dumping execbuffer2:\n");
924 for (uint32_t i
= 0; i
< execbuffer2
->buffer_count
; i
++) {
925 obj
= &exec_objects
[i
];
926 bo
= get_bo(obj
->handle
);
928 /* If bo->size == 0, this means they passed us an invalid
929 * buffer. The kernel will reject it and so should we.
933 printf("BO #%d is invalid!\n", obj
->handle
);
937 if (obj
->flags
& EXEC_OBJECT_PINNED
) {
938 bo
->offset
= obj
->offset
;
940 printf("BO #%d (%dB) pinned @ 0x%lx\n",
941 obj
->handle
, bo
->size
, bo
->offset
);
943 if (obj
->alignment
!= 0)
944 offset
= align_u32(offset
, obj
->alignment
);
947 printf("BO #%d (%dB) @ 0x%lx\n", obj
->handle
,
948 bo
->size
, bo
->offset
);
949 offset
= align_u32(offset
+ bo
->size
+ 4095, 4096);
952 if (bo
->map
== NULL
&& bo
->size
> 0)
953 bo
->map
= gem_mmap(fd
, obj
->handle
, 0, bo
->size
);
954 fail_if(bo
->map
== MAP_FAILED
, "intel_aubdump: bo mmap failed\n");
957 map_ppgtt(bo
->offset
, bo
->size
);
960 batch_index
= (execbuffer2
->flags
& I915_EXEC_BATCH_FIRST
) ? 0 :
961 execbuffer2
->buffer_count
- 1;
962 batch_bo
= get_bo(exec_objects
[batch_index
].handle
);
963 for (uint32_t i
= 0; i
< execbuffer2
->buffer_count
; i
++) {
964 obj
= &exec_objects
[i
];
965 bo
= get_bo(obj
->handle
);
967 if (obj
->relocation_count
> 0)
968 data
= relocate_bo(bo
, execbuffer2
, obj
);
972 if (bo
== batch_bo
) {
973 aub_write_trace_block(AUB_TRACE_TYPE_BATCH
,
974 data
, bo
->size
, bo
->offset
);
976 aub_write_trace_block(AUB_TRACE_TYPE_NOTYPE
,
977 data
, bo
->size
, bo
->offset
);
983 if (use_execlists()) {
984 aub_dump_execlist(batch_bo
->offset
+
985 execbuffer2
->batch_start_offset
, ring_flag
);
987 /* Dump ring buffer */
988 aub_dump_ringbuffer(batch_bo
->offset
+
989 execbuffer2
->batch_start_offset
, offset
,
993 for (int i
= 0; i
< ARRAY_SIZE(files
); i
++) {
994 if (files
[i
] != NULL
)
998 if (device_override
&&
999 (execbuffer2
->flags
& I915_EXEC_FENCE_ARRAY
) != 0) {
1000 struct drm_i915_gem_exec_fence
*fences
=
1001 (void*)(uintptr_t)execbuffer2
->cliprects_ptr
;
1002 for (uint32_t i
= 0; i
< execbuffer2
->num_cliprects
; i
++) {
1003 if ((fences
[i
].flags
& I915_EXEC_FENCE_SIGNAL
) != 0) {
1004 struct drm_syncobj_array arg
= {
1005 .handles
= (uintptr_t)&fences
[i
].handle
,
1009 libc_ioctl(fd
, DRM_IOCTL_SYNCOBJ_SIGNAL
, &arg
);
1016 add_new_bo(int handle
, uint64_t size
, void *map
)
1018 struct bo
*bo
= &bos
[handle
];
1020 fail_if(handle
>= MAX_BO_COUNT
, "intel_aubdump: bo handle out of range\n");
1021 fail_if(size
== 0, "intel_aubdump: bo size is invalid\n");
1028 remove_bo(int handle
)
1030 struct bo
*bo
= get_bo(handle
);
1032 if (bo
->map
&& !IS_USERPTR(bo
->map
))
1033 munmap(bo
->map
, bo
->size
);
1038 __attribute__ ((visibility ("default"))) int
1044 return libc_close(fd
);
1048 launch_command(char *command
)
1051 char **args
= calloc(strlen(command
), sizeof(char *));
1052 char *iter
= command
;
1054 args
[i
++] = iter
= command
;
1056 while ((iter
= strstr(iter
, ",")) != NULL
) {
1062 if (pipe(fds
) == -1)
1068 fail_if(execvp(args
[0], args
) == -1,
1069 "intel_aubdump: failed to launch child command\n");
1074 return fdopen(fds
[1], "w");
1084 static bool initialized
= false;
1093 config
= fdopen(3, "r");
1094 while (fscanf(config
, "%m[^=]=%m[^\n]\n", &key
, &value
) != EOF
) {
1095 if (!strcmp(key
, "verbose")) {
1096 if (!strcmp(value
, "1")) {
1098 } else if (!strcmp(value
, "2")) {
1101 } else if (!strcmp(key
, "device")) {
1102 fail_if(sscanf(value
, "%i", &device
) != 1,
1103 "intel_aubdump: failed to parse device id '%s'",
1105 device_override
= true;
1106 } else if (!strcmp(key
, "file")) {
1107 filename
= strdup(value
);
1108 files
[0] = fopen(filename
, "w+");
1109 fail_if(files
[0] == NULL
,
1110 "intel_aubdump: failed to open file '%s'\n",
1112 } else if (!strcmp(key
, "command")) {
1113 files
[1] = launch_command(value
);
1114 fail_if(files
[1] == NULL
,
1115 "intel_aubdump: failed to launch command '%s'\n",
1118 fprintf(stderr
, "intel_aubdump: unknown option '%s'\n", key
);
1126 bos
= calloc(MAX_BO_COUNT
, sizeof(bos
[0]));
1127 fail_if(bos
== NULL
, "intel_aubdump: out of memory\n");
1130 __attribute__ ((visibility ("default"))) int
1131 ioctl(int fd
, unsigned long request
, ...)
1138 va_start(args
, request
);
1139 argp
= va_arg(args
, void *);
1142 if (_IOC_TYPE(request
) == DRM_IOCTL_BASE
&&
1143 drm_fd
!= fd
&& fstat(fd
, &buf
) == 0 &&
1144 (buf
.st_mode
& S_IFMT
) == S_IFCHR
&& major(buf
.st_rdev
) == DRM_MAJOR
) {
1147 printf("[intel_aubdump: intercept drm ioctl on fd %d]\n", fd
);
1154 case DRM_IOCTL_I915_GETPARAM
: {
1155 struct drm_i915_getparam
*getparam
= argp
;
1157 if (device_override
&& getparam
->param
== I915_PARAM_CHIPSET_ID
) {
1158 *getparam
->value
= device
;
1162 ret
= libc_ioctl(fd
, request
, argp
);
1164 /* If the application looks up chipset_id
1165 * (they typically do), we'll piggy-back on
1166 * their ioctl and store the id for later
1168 if (getparam
->param
== I915_PARAM_CHIPSET_ID
)
1169 device
= *getparam
->value
;
1174 case DRM_IOCTL_I915_GEM_EXECBUFFER
: {
1177 fprintf(stderr
, "intel_aubdump: "
1178 "application uses DRM_IOCTL_I915_GEM_EXECBUFFER, not handled\n");
1181 return libc_ioctl(fd
, request
, argp
);
1184 case DRM_IOCTL_I915_GEM_EXECBUFFER2
:
1185 case DRM_IOCTL_I915_GEM_EXECBUFFER2_WR
: {
1186 dump_execbuffer2(fd
, argp
);
1187 if (device_override
)
1190 return libc_ioctl(fd
, request
, argp
);
1193 case DRM_IOCTL_I915_GEM_CREATE
: {
1194 struct drm_i915_gem_create
*create
= argp
;
1196 ret
= libc_ioctl(fd
, request
, argp
);
1198 add_new_bo(create
->handle
, create
->size
, NULL
);
1203 case DRM_IOCTL_I915_GEM_USERPTR
: {
1204 struct drm_i915_gem_userptr
*userptr
= argp
;
1206 ret
= libc_ioctl(fd
, request
, argp
);
1208 add_new_bo(userptr
->handle
, userptr
->user_size
,
1209 (void *) (uintptr_t) (userptr
->user_ptr
| USERPTR_FLAG
));
1213 case DRM_IOCTL_GEM_CLOSE
: {
1214 struct drm_gem_close
*close
= argp
;
1216 remove_bo(close
->handle
);
1218 return libc_ioctl(fd
, request
, argp
);
1221 case DRM_IOCTL_GEM_OPEN
: {
1222 struct drm_gem_open
*open
= argp
;
1224 ret
= libc_ioctl(fd
, request
, argp
);
1226 add_new_bo(open
->handle
, open
->size
, NULL
);
1231 case DRM_IOCTL_PRIME_FD_TO_HANDLE
: {
1232 struct drm_prime_handle
*prime
= argp
;
1234 ret
= libc_ioctl(fd
, request
, argp
);
1238 size
= lseek(prime
->fd
, 0, SEEK_END
);
1239 fail_if(size
== -1, "intel_aubdump: failed to get prime bo size\n");
1240 add_new_bo(prime
->handle
, size
, NULL
);
1247 return libc_ioctl(fd
, request
, argp
);
1250 return libc_ioctl(fd
, request
, argp
);
1257 libc_close
= dlsym(RTLD_NEXT
, "close");
1258 libc_ioctl
= dlsym(RTLD_NEXT
, "ioctl");
1259 fail_if(libc_close
== NULL
|| libc_ioctl
== NULL
,
1260 "intel_aubdump: failed to get libc ioctl or close\n");
1264 close_init_helper(int fd
)
1267 return libc_close(fd
);
1271 ioctl_init_helper(int fd
, unsigned long request
, ...)
1276 va_start(args
, request
);
1277 argp
= va_arg(args
, void *);
1281 return libc_ioctl(fd
, request
, argp
);
1284 static void __attribute__ ((destructor
))
1288 for (int i
= 0; i
< ARRAY_SIZE(files
); i
++) {
1289 if (files
[i
] != NULL
)