2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 #include <sys/types.h>
33 #include <sys/sysmacros.h>
35 #include <sys/ioctl.h>
43 #include "intel_aub.h"
45 #include "dev/gen_device_info.h"
46 #include "util/macros.h"
49 #define ALIGN(x, y) (((x) + (y)-1) & ~((y)-1))
52 #define MI_LOAD_REGISTER_IMM_n(n) ((0x22 << 23) | (2 * (n) - 1))
53 #define MI_LRI_FORCE_POSTED (1<<12)
55 #define MI_BATCH_NON_SECURE_I965 (1 << 8)
57 #define MI_BATCH_BUFFER_END (0xA << 23)
59 #define min(a, b) ({ \
60 __typeof(a) _a = (a); \
61 __typeof(b) _b = (b); \
65 #define max(a, b) ({ \
66 __typeof(a) _a = (a); \
67 __typeof(b) _b = (b); \
71 #define HWS_PGA_RCSUNIT 0x02080
72 #define HWS_PGA_VCSUNIT0 0x12080
73 #define HWS_PGA_BCSUNIT 0x22080
75 #define GFX_MODE_RCSUNIT 0x0229c
76 #define GFX_MODE_VCSUNIT0 0x1229c
77 #define GFX_MODE_BCSUNIT 0x2229c
79 #define EXECLIST_SUBMITPORT_RCSUNIT 0x02230
80 #define EXECLIST_SUBMITPORT_VCSUNIT0 0x12230
81 #define EXECLIST_SUBMITPORT_BCSUNIT 0x22230
83 #define EXECLIST_STATUS_RCSUNIT 0x02234
84 #define EXECLIST_STATUS_VCSUNIT0 0x12234
85 #define EXECLIST_STATUS_BCSUNIT 0x22234
87 #define EXECLIST_SQ_CONTENTS0_RCSUNIT 0x02510
88 #define EXECLIST_SQ_CONTENTS0_VCSUNIT0 0x12510
89 #define EXECLIST_SQ_CONTENTS0_BCSUNIT 0x22510
91 #define EXECLIST_CONTROL_RCSUNIT 0x02550
92 #define EXECLIST_CONTROL_VCSUNIT0 0x12550
93 #define EXECLIST_CONTROL_BCSUNIT 0x22550
95 #define MEMORY_MAP_SIZE (64 /* MiB */ * 1024 * 1024)
98 #define GEN8_PTE_SIZE 8
100 #define NUM_PT_ENTRIES (ALIGN(MEMORY_MAP_SIZE, 4096) / 4096)
101 #define PT_SIZE ALIGN(NUM_PT_ENTRIES * GEN8_PTE_SIZE, 4096)
103 #define RING_SIZE (1 * 4096)
104 #define PPHWSP_SIZE (1 * 4096)
105 #define GEN11_LR_CONTEXT_RENDER_SIZE (14 * 4096)
106 #define GEN10_LR_CONTEXT_RENDER_SIZE (19 * 4096)
107 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * 4096)
108 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * 4096)
109 #define GEN8_LR_CONTEXT_OTHER_SIZE (2 * 4096)
112 #define STATIC_GGTT_MAP_START 0
114 #define RENDER_RING_ADDR STATIC_GGTT_MAP_START
115 #define RENDER_CONTEXT_ADDR (RENDER_RING_ADDR + RING_SIZE)
117 #define BLITTER_RING_ADDR (RENDER_CONTEXT_ADDR + PPHWSP_SIZE + GEN10_LR_CONTEXT_RENDER_SIZE)
118 #define BLITTER_CONTEXT_ADDR (BLITTER_RING_ADDR + RING_SIZE)
120 #define VIDEO_RING_ADDR (BLITTER_CONTEXT_ADDR + PPHWSP_SIZE + GEN8_LR_CONTEXT_OTHER_SIZE)
121 #define VIDEO_CONTEXT_ADDR (VIDEO_RING_ADDR + RING_SIZE)
123 #define STATIC_GGTT_MAP_END (VIDEO_CONTEXT_ADDR + PPHWSP_SIZE + GEN8_LR_CONTEXT_OTHER_SIZE)
124 #define STATIC_GGTT_MAP_SIZE (STATIC_GGTT_MAP_END - STATIC_GGTT_MAP_START)
126 #define PML4_PHYS_ADDR ((uint64_t)(STATIC_GGTT_MAP_END))
128 #define CONTEXT_FLAGS (0x339) /* Normal Priority | L3-LLC Coherency |
130 * Legacy Context with 64 bit VA support |
134 #define RENDER_CONTEXT_DESCRIPTOR ((uint64_t)1 << 62 | RENDER_CONTEXT_ADDR | CONTEXT_FLAGS)
135 #define BLITTER_CONTEXT_DESCRIPTOR ((uint64_t)2 << 62 | BLITTER_CONTEXT_ADDR | CONTEXT_FLAGS)
136 #define VIDEO_CONTEXT_DESCRIPTOR ((uint64_t)3 << 62 | VIDEO_CONTEXT_ADDR | CONTEXT_FLAGS)
138 static const uint32_t render_context_init
[GEN9_LR_CONTEXT_RENDER_SIZE
/ /* Choose the largest */
139 sizeof(uint32_t)] = {
141 MI_LOAD_REGISTER_IMM_n(14) | MI_LRI_FORCE_POSTED
,
142 0x2244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
143 0x2034 /* RING_HEAD */, 0,
144 0x2030 /* RING_TAIL */, 0,
145 0x2038 /* RING_BUFFER_START */, RENDER_RING_ADDR
,
146 0x203C /* RING_BUFFER_CONTROL */, (RING_SIZE
- 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
147 0x2168 /* BB_HEAD_U */, 0,
148 0x2140 /* BB_HEAD_L */, 0,
149 0x2110 /* BB_STATE */, 0,
150 0x211C /* SECOND_BB_HEAD_U */, 0,
151 0x2114 /* SECOND_BB_HEAD_L */, 0,
152 0x2118 /* SECOND_BB_STATE */, 0,
153 0x21C0 /* BB_PER_CTX_PTR */, 0,
154 0x21C4 /* RCS_INDIRECT_CTX */, 0,
155 0x21C8 /* RCS_INDIRECT_CTX_OFFSET */, 0,
160 MI_LOAD_REGISTER_IMM_n(9) | MI_LRI_FORCE_POSTED
,
161 0x23A8 /* CTX_TIMESTAMP */, 0,
162 0x228C /* PDP3_UDW */, 0,
163 0x2288 /* PDP3_LDW */, 0,
164 0x2284 /* PDP2_UDW */, 0,
165 0x2280 /* PDP2_LDW */, 0,
166 0x227C /* PDP1_UDW */, 0,
167 0x2278 /* PDP1_LDW */, 0,
168 0x2274 /* PDP0_UDW */, PML4_PHYS_ADDR
>> 32,
169 0x2270 /* PDP0_LDW */, PML4_PHYS_ADDR
,
171 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
174 MI_LOAD_REGISTER_IMM_n(1),
175 0x20C8 /* R_PWR_CLK_STATE */, 0x7FFFFFFF,
179 static const uint32_t blitter_context_init
[GEN8_LR_CONTEXT_OTHER_SIZE
/
180 sizeof(uint32_t)] = {
182 MI_LOAD_REGISTER_IMM_n(11) | MI_LRI_FORCE_POSTED
,
183 0x22244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
184 0x22034 /* RING_HEAD */, 0,
185 0x22030 /* RING_TAIL */, 0,
186 0x22038 /* RING_BUFFER_START */, BLITTER_RING_ADDR
,
187 0x2203C /* RING_BUFFER_CONTROL */, (RING_SIZE
- 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
188 0x22168 /* BB_HEAD_U */, 0,
189 0x22140 /* BB_HEAD_L */, 0,
190 0x22110 /* BB_STATE */, 0,
191 0x2211C /* SECOND_BB_HEAD_U */, 0,
192 0x22114 /* SECOND_BB_HEAD_L */, 0,
193 0x22118 /* SECOND_BB_STATE */, 0,
195 0, 0, 0, 0, 0, 0, 0, 0,
198 MI_LOAD_REGISTER_IMM_n(9) | MI_LRI_FORCE_POSTED
,
199 0x223A8 /* CTX_TIMESTAMP */, 0,
200 0x2228C /* PDP3_UDW */, 0,
201 0x22288 /* PDP3_LDW */, 0,
202 0x22284 /* PDP2_UDW */, 0,
203 0x22280 /* PDP2_LDW */, 0,
204 0x2227C /* PDP1_UDW */, 0,
205 0x22278 /* PDP1_LDW */, 0,
206 0x22274 /* PDP0_UDW */, PML4_PHYS_ADDR
>> 32,
207 0x22270 /* PDP0_LDW */, PML4_PHYS_ADDR
,
209 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
214 static const uint32_t video_context_init
[GEN8_LR_CONTEXT_OTHER_SIZE
/
215 sizeof(uint32_t)] = {
217 MI_LOAD_REGISTER_IMM_n(11) | MI_LRI_FORCE_POSTED
,
218 0x1C244 /* CONTEXT_CONTROL */, 0x90009 /* Inhibit Synchronous Context Switch | Engine Context Restore Inhibit */,
219 0x1C034 /* RING_HEAD */, 0,
220 0x1C030 /* RING_TAIL */, 0,
221 0x1C038 /* RING_BUFFER_START */, VIDEO_RING_ADDR
,
222 0x1C03C /* RING_BUFFER_CONTROL */, (RING_SIZE
- 4096) | 1 /* Buffer Length | Ring Buffer Enable */,
223 0x1C168 /* BB_HEAD_U */, 0,
224 0x1C140 /* BB_HEAD_L */, 0,
225 0x1C110 /* BB_STATE */, 0,
226 0x1C11C /* SECOND_BB_HEAD_U */, 0,
227 0x1C114 /* SECOND_BB_HEAD_L */, 0,
228 0x1C118 /* SECOND_BB_STATE */, 0,
230 0, 0, 0, 0, 0, 0, 0, 0,
233 MI_LOAD_REGISTER_IMM_n(9) | MI_LRI_FORCE_POSTED
,
234 0x1C3A8 /* CTX_TIMESTAMP */, 0,
235 0x1C28C /* PDP3_UDW */, 0,
236 0x1C288 /* PDP3_LDW */, 0,
237 0x1C284 /* PDP2_UDW */, 0,
238 0x1C280 /* PDP2_LDW */, 0,
239 0x1C27C /* PDP1_UDW */, 0,
240 0x1C278 /* PDP1_LDW */, 0,
241 0x1C274 /* PDP0_UDW */, PML4_PHYS_ADDR
>> 32,
242 0x1C270 /* PDP0_LDW */, PML4_PHYS_ADDR
,
244 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
249 static int close_init_helper(int fd
);
250 static int ioctl_init_helper(int fd
, unsigned long request
, ...);
252 static int (*libc_close
)(int fd
) = close_init_helper
;
253 static int (*libc_ioctl
)(int fd
, unsigned long request
, ...) = ioctl_init_helper
;
255 static int drm_fd
= -1;
256 static char *filename
= NULL
;
257 static FILE *files
[2] = { NULL
, NULL
};
258 static int verbose
= 0;
259 static bool device_override
;
261 #define MAX_BO_COUNT 64 * 1024
269 static struct bo
*bos
;
271 #define DRM_MAJOR 226
273 /* We set bit 0 in the map pointer for userptr BOs so we know not to
274 * munmap them on DRM_IOCTL_GEM_CLOSE.
276 #define USERPTR_FLAG 1
277 #define IS_USERPTR(p) ((uintptr_t) (p) & USERPTR_FLAG)
278 #define GET_PTR(p) ( (void *) ((uintptr_t) p & ~(uintptr_t) 1) )
280 static void __attribute__ ((format(__printf__
, 2, 3)))
281 fail_if(int cond
, const char *format
, ...)
288 va_start(args
, format
);
289 vfprintf(stderr
, format
, args
);
296 get_bo(uint32_t handle
)
300 fail_if(handle
>= MAX_BO_COUNT
, "bo handle too large\n");
306 static inline uint32_t
307 align_u32(uint32_t v
, uint32_t a
)
309 return (v
+ a
- 1) & ~(a
- 1);
312 struct aub_ppgtt_table
{
314 struct aub_ppgtt_table
*subtables
[512];
318 aub_ppgtt_table_finish(struct aub_ppgtt_table
*table
)
320 for (unsigned i
= 0; i
< ARRAY_SIZE(table
->subtables
); i
++) {
321 aub_ppgtt_table_finish(table
->subtables
[i
]);
322 free(table
->subtables
[i
]);
329 /* Set if you want extra logging */
330 FILE *verbose_log_file
;
333 struct gen_device_info devinfo
;
337 struct aub_ppgtt_table pml4
;
341 aub_file_init(struct aub_file
*aub
, FILE *file
, uint16_t pci_id
)
343 memset(aub
, 0, sizeof(*aub
));
346 aub
->pci_id
= pci_id
;
347 fail_if(!gen_get_device_info(pci_id
, &aub
->devinfo
),
348 "failed to identify chipset=0x%x\n", pci_id
);
349 aub
->addr_bits
= aub
->devinfo
.gen
>= 8 ? 48 : 32;
351 aub
->pml4
.phys_addr
= PML4_PHYS_ADDR
;
355 aub_file_finish(struct aub_file
*aub
)
357 aub_ppgtt_table_finish(&aub
->pml4
);
361 static inline bool aub_use_execlists(const struct aub_file
*aub
)
363 return aub
->devinfo
.gen
>= 8;
367 data_out(struct aub_file
*aub
, const void *data
, size_t size
)
372 fail_if(fwrite(data
, 1, size
, aub
->file
) == 0,
373 "Writing to output failed\n");
377 dword_out(struct aub_file
*aub
, uint32_t data
)
379 data_out(aub
, &data
, sizeof(data
));
383 aub_gtt_size(struct aub_file
*aub
)
385 return NUM_PT_ENTRIES
* (aub
->addr_bits
> 32 ? GEN8_PTE_SIZE
: PTE_SIZE
);
389 mem_trace_memory_write_header_out(struct aub_file
*aub
, uint64_t addr
,
390 uint32_t len
, uint32_t addr_space
)
392 uint32_t dwords
= ALIGN(len
, sizeof(uint32_t)) / sizeof(uint32_t);
394 dword_out(aub
, CMD_MEM_TRACE_MEMORY_WRITE
| (5 + dwords
- 1));
395 dword_out(aub
, addr
& 0xFFFFFFFF); /* addr lo */
396 dword_out(aub
, addr
>> 32); /* addr hi */
397 dword_out(aub
, addr_space
); /* gtt */
402 register_write_out(struct aub_file
*aub
, uint32_t addr
, uint32_t value
)
406 dword_out(aub
, CMD_MEM_TRACE_REGISTER_WRITE
| (5 + dwords
- 1));
407 dword_out(aub
, addr
);
408 dword_out(aub
, AUB_MEM_TRACE_REGISTER_SIZE_DWORD
|
409 AUB_MEM_TRACE_REGISTER_SPACE_MMIO
);
410 dword_out(aub
, 0xFFFFFFFF); /* mask lo */
411 dword_out(aub
, 0x00000000); /* mask hi */
412 dword_out(aub
, value
);
416 populate_ppgtt_table(struct aub_file
*aub
, struct aub_ppgtt_table
*table
,
417 int start
, int end
, int level
)
419 static uint64_t phys_addrs_allocator
= (PML4_PHYS_ADDR
>> 12) + 1;
420 uint64_t entries
[512] = {0};
421 int dirty_start
= 512, dirty_end
= 0;
423 if (aub
->verbose_log_file
) {
424 fprintf(aub
->verbose_log_file
,
425 " PPGTT (0x%016" PRIx64
"), lvl %d, start: %x, end: %x\n",
426 table
->phys_addr
, level
, start
, end
);
429 for (int i
= start
; i
<= end
; i
++) {
430 if (!table
->subtables
[i
]) {
431 dirty_start
= min(dirty_start
, i
);
432 dirty_end
= max(dirty_end
, i
);
434 table
->subtables
[i
] =
435 (void *)(phys_addrs_allocator
++ << 12);
436 if (aub
->verbose_log_file
) {
437 fprintf(aub
->verbose_log_file
,
438 " Adding entry: %x, phys_addr: 0x%016" PRIx64
"\n",
439 i
, (uint64_t)table
->subtables
[i
]);
442 table
->subtables
[i
] =
443 calloc(1, sizeof(struct aub_ppgtt_table
));
444 table
->subtables
[i
]->phys_addr
=
445 phys_addrs_allocator
++ << 12;
446 if (aub
->verbose_log_file
) {
447 fprintf(aub
->verbose_log_file
,
448 " Adding entry: %x, phys_addr: 0x%016" PRIx64
"\n",
449 i
, table
->subtables
[i
]->phys_addr
);
453 entries
[i
] = 3 /* read/write | present */ |
454 (level
== 1 ? (uint64_t)table
->subtables
[i
] :
455 table
->subtables
[i
]->phys_addr
);
458 if (dirty_start
<= dirty_end
) {
459 uint64_t write_addr
= table
->phys_addr
+ dirty_start
*
461 uint64_t write_size
= (dirty_end
- dirty_start
+ 1) *
463 mem_trace_memory_write_header_out(aub
, write_addr
, write_size
,
464 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_PHYSICAL
);
465 data_out(aub
, entries
+ dirty_start
, write_size
);
470 aub_map_ppgtt(struct aub_file
*aub
, uint64_t start
, uint64_t size
)
472 uint64_t l4_start
= start
& 0xff8000000000;
473 uint64_t l4_end
= ((start
+ size
- 1) | 0x007fffffffff) & 0xffffffffffff;
475 #define L4_index(addr) (((addr) >> 39) & 0x1ff)
476 #define L3_index(addr) (((addr) >> 30) & 0x1ff)
477 #define L2_index(addr) (((addr) >> 21) & 0x1ff)
478 #define L1_index(addr) (((addr) >> 12) & 0x1ff)
480 #define L3_table(addr) (aub->pml4.subtables[L4_index(addr)])
481 #define L2_table(addr) (L3_table(addr)->subtables[L3_index(addr)])
482 #define L1_table(addr) (L2_table(addr)->subtables[L2_index(addr)])
484 if (aub
->verbose_log_file
) {
485 fprintf(aub
->verbose_log_file
,
486 " Mapping PPGTT address: 0x%" PRIx64
", size: %" PRIu64
"\n",
490 populate_ppgtt_table(aub
, &aub
->pml4
, L4_index(l4_start
), L4_index(l4_end
), 4);
492 for (uint64_t l4
= l4_start
; l4
< l4_end
; l4
+= (1ULL << 39)) {
493 uint64_t l3_start
= max(l4
, start
& 0xffffc0000000);
494 uint64_t l3_end
= min(l4
+ (1ULL << 39) - 1,
495 ((start
+ size
- 1) | 0x00003fffffff) & 0xffffffffffff);
496 uint64_t l3_start_idx
= L3_index(l3_start
);
497 uint64_t l3_end_idx
= L3_index(l3_end
);
499 populate_ppgtt_table(aub
, L3_table(l4
), l3_start_idx
, l3_end_idx
, 3);
501 for (uint64_t l3
= l3_start
; l3
< l3_end
; l3
+= (1ULL << 30)) {
502 uint64_t l2_start
= max(l3
, start
& 0xffffffe00000);
503 uint64_t l2_end
= min(l3
+ (1ULL << 30) - 1,
504 ((start
+ size
- 1) | 0x0000001fffff) & 0xffffffffffff);
505 uint64_t l2_start_idx
= L2_index(l2_start
);
506 uint64_t l2_end_idx
= L2_index(l2_end
);
508 populate_ppgtt_table(aub
, L2_table(l3
), l2_start_idx
, l2_end_idx
, 2);
510 for (uint64_t l2
= l2_start
; l2
< l2_end
; l2
+= (1ULL << 21)) {
511 uint64_t l1_start
= max(l2
, start
& 0xfffffffff000);
512 uint64_t l1_end
= min(l2
+ (1ULL << 21) - 1,
513 ((start
+ size
- 1) | 0x000000000fff) & 0xffffffffffff);
514 uint64_t l1_start_idx
= L1_index(l1_start
);
515 uint64_t l1_end_idx
= L1_index(l1_end
);
517 populate_ppgtt_table(aub
, L1_table(l2
), l1_start_idx
, l1_end_idx
, 1);
524 ppgtt_lookup(struct aub_file
*aub
, uint64_t ppgtt_addr
)
526 return (uint64_t)L1_table(ppgtt_addr
)->subtables
[L1_index(ppgtt_addr
)];
530 write_execlists_header(struct aub_file
*aub
, const char *name
)
532 char app_name
[8 * 4];
533 int app_name_len
, dwords
;
536 snprintf(app_name
, sizeof(app_name
), "PCI-ID=0x%X %s",
538 app_name_len
= ALIGN(app_name_len
, sizeof(uint32_t));
540 dwords
= 5 + app_name_len
/ sizeof(uint32_t);
541 dword_out(aub
, CMD_MEM_TRACE_VERSION
| (dwords
- 1));
542 dword_out(aub
, AUB_MEM_TRACE_VERSION_FILE_VERSION
);
543 dword_out(aub
, aub
->devinfo
.simulator_id
<< AUB_MEM_TRACE_VERSION_DEVICE_SHIFT
);
544 dword_out(aub
, 0); /* version */
545 dword_out(aub
, 0); /* version */
546 data_out(aub
, app_name
, app_name_len
);
549 uint32_t ggtt_ptes
= STATIC_GGTT_MAP_SIZE
>> 12;
551 mem_trace_memory_write_header_out(aub
, STATIC_GGTT_MAP_START
>> 12,
552 ggtt_ptes
* GEN8_PTE_SIZE
,
553 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT_ENTRY
);
554 for (uint32_t i
= 0; i
< ggtt_ptes
; i
++) {
555 dword_out(aub
, 1 + 0x1000 * i
+ STATIC_GGTT_MAP_START
);
560 mem_trace_memory_write_header_out(aub
, RENDER_RING_ADDR
, RING_SIZE
,
561 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
562 for (uint32_t i
= 0; i
< RING_SIZE
; i
+= sizeof(uint32_t))
566 mem_trace_memory_write_header_out(aub
, RENDER_CONTEXT_ADDR
,
568 sizeof(render_context_init
),
569 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
570 for (uint32_t i
= 0; i
< PPHWSP_SIZE
; i
+= sizeof(uint32_t))
574 data_out(aub
, render_context_init
, sizeof(render_context_init
));
577 mem_trace_memory_write_header_out(aub
, BLITTER_RING_ADDR
, RING_SIZE
,
578 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
579 for (uint32_t i
= 0; i
< RING_SIZE
; i
+= sizeof(uint32_t))
583 mem_trace_memory_write_header_out(aub
, BLITTER_CONTEXT_ADDR
,
585 sizeof(blitter_context_init
),
586 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
587 for (uint32_t i
= 0; i
< PPHWSP_SIZE
; i
+= sizeof(uint32_t))
590 /* BLITTER_CONTEXT */
591 data_out(aub
, blitter_context_init
, sizeof(blitter_context_init
));
594 mem_trace_memory_write_header_out(aub
, VIDEO_RING_ADDR
, RING_SIZE
,
595 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
596 for (uint32_t i
= 0; i
< RING_SIZE
; i
+= sizeof(uint32_t))
600 mem_trace_memory_write_header_out(aub
, VIDEO_CONTEXT_ADDR
,
602 sizeof(video_context_init
),
603 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
604 for (uint32_t i
= 0; i
< PPHWSP_SIZE
; i
+= sizeof(uint32_t))
608 data_out(aub
, video_context_init
, sizeof(video_context_init
));
610 register_write_out(aub
, HWS_PGA_RCSUNIT
, RENDER_CONTEXT_ADDR
);
611 register_write_out(aub
, HWS_PGA_VCSUNIT0
, VIDEO_CONTEXT_ADDR
);
612 register_write_out(aub
, HWS_PGA_BCSUNIT
, BLITTER_CONTEXT_ADDR
);
614 register_write_out(aub
, GFX_MODE_RCSUNIT
, 0x80008000 /* execlist enable */);
615 register_write_out(aub
, GFX_MODE_VCSUNIT0
, 0x80008000 /* execlist enable */);
616 register_write_out(aub
, GFX_MODE_BCSUNIT
, 0x80008000 /* execlist enable */);
619 static void write_legacy_header(struct aub_file
*aub
, const char *name
)
621 char app_name
[8 * 4];
623 int comment_len
, comment_dwords
, dwords
;
624 uint32_t entry
= 0x200003;
626 comment_len
= snprintf(comment
, sizeof(comment
), "PCI-ID=0x%x", aub
->pci_id
);
627 comment_dwords
= ((comment_len
+ 3) / 4);
629 /* Start with a (required) version packet. */
630 dwords
= 13 + comment_dwords
;
631 dword_out(aub
, CMD_AUB_HEADER
| (dwords
- 2));
632 dword_out(aub
, (4 << AUB_HEADER_MAJOR_SHIFT
) |
633 (0 << AUB_HEADER_MINOR_SHIFT
));
635 /* Next comes a 32-byte application name. */
636 strncpy(app_name
, program_invocation_short_name
, sizeof(app_name
));
637 app_name
[sizeof(app_name
) - 1] = 0;
638 data_out(aub
, app_name
, sizeof(app_name
));
640 dword_out(aub
, 0); /* timestamp */
641 dword_out(aub
, 0); /* timestamp */
642 dword_out(aub
, comment_len
);
643 data_out(aub
, comment
, comment_dwords
* 4);
645 /* Set up the GTT. The max we can handle is 64M */
646 dword_out(aub
, CMD_AUB_TRACE_HEADER_BLOCK
|
647 ((aub
->addr_bits
> 32 ? 6 : 5) - 2));
648 dword_out(aub
, AUB_TRACE_MEMTYPE_GTT_ENTRY
|
649 AUB_TRACE_TYPE_NOTYPE
| AUB_TRACE_OP_DATA_WRITE
);
650 dword_out(aub
, 0); /* subtype */
651 dword_out(aub
, 0); /* offset */
652 dword_out(aub
, aub_gtt_size(aub
)); /* size */
653 if (aub
->addr_bits
> 32)
655 for (uint32_t i
= 0; i
< NUM_PT_ENTRIES
; i
++) {
656 dword_out(aub
, entry
+ 0x1000 * i
);
657 if (aub
->addr_bits
> 32)
663 aub_write_header(struct aub_file
*aub
, const char *app_name
)
665 if (aub_use_execlists(aub
))
666 write_execlists_header(aub
, app_name
);
668 write_legacy_header(aub
, app_name
);
672 * Break up large objects into multiple writes. Otherwise a 128kb VBO
673 * would overflow the 16 bits of size field in the packet header and
674 * everything goes badly after that.
677 aub_write_trace_block(struct aub_file
*aub
,
678 uint32_t type
, void *virtual,
679 uint32_t size
, uint64_t gtt_offset
)
682 uint32_t subtype
= 0;
683 static const char null_block
[8 * 4096];
685 for (uint32_t offset
= 0; offset
< size
; offset
+= block_size
) {
686 block_size
= min(8 * 4096, size
- offset
);
688 if (aub_use_execlists(aub
)) {
689 block_size
= min(4096, block_size
);
690 mem_trace_memory_write_header_out(aub
,
691 ppgtt_lookup(aub
, gtt_offset
+ offset
),
693 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_PHYSICAL
);
695 dword_out(aub
, CMD_AUB_TRACE_HEADER_BLOCK
|
696 ((aub
->addr_bits
> 32 ? 6 : 5) - 2));
697 dword_out(aub
, AUB_TRACE_MEMTYPE_GTT
|
698 type
| AUB_TRACE_OP_DATA_WRITE
);
699 dword_out(aub
, subtype
);
700 dword_out(aub
, gtt_offset
+ offset
);
701 dword_out(aub
, align_u32(block_size
, 4));
702 if (aub
->addr_bits
> 32)
703 dword_out(aub
, (gtt_offset
+ offset
) >> 32);
707 data_out(aub
, ((char *) virtual) + offset
, block_size
);
709 data_out(aub
, null_block
, block_size
);
711 /* Pad to a multiple of 4 bytes. */
712 data_out(aub
, null_block
, -block_size
& 3);
717 aub_write_reloc(const struct gen_device_info
*devinfo
, void *p
, uint64_t v
)
719 if (devinfo
->gen
>= 8) {
720 /* From the Broadwell PRM Vol. 2a,
721 * MI_LOAD_REGISTER_MEM::MemoryAddress:
723 * "This field specifies the address of the memory
724 * location where the register value specified in the
725 * DWord above will read from. The address specifies
726 * the DWord location of the data. Range =
727 * GraphicsVirtualAddress[63:2] for a DWord register
728 * GraphicsAddress [63:48] are ignored by the HW and
729 * assumed to be in correct canonical form [63:48] ==
732 * In practice, this will always mean the top bits are zero
733 * because of the GTT size limitation of the aubdump tool.
735 const int shift
= 63 - 47;
736 *(uint64_t *)p
= (((int64_t)v
) << shift
) >> shift
;
743 aub_dump_execlist(struct aub_file
*aub
, uint64_t batch_offset
, int ring_flag
)
750 uint32_t control_reg
;
753 case I915_EXEC_DEFAULT
:
754 case I915_EXEC_RENDER
:
755 ring_addr
= RENDER_RING_ADDR
;
756 descriptor
= RENDER_CONTEXT_DESCRIPTOR
;
757 elsp_reg
= EXECLIST_SUBMITPORT_RCSUNIT
;
758 elsq_reg
= EXECLIST_SQ_CONTENTS0_RCSUNIT
;
759 status_reg
= EXECLIST_STATUS_RCSUNIT
;
760 control_reg
= EXECLIST_CONTROL_RCSUNIT
;
763 ring_addr
= VIDEO_RING_ADDR
;
764 descriptor
= VIDEO_CONTEXT_DESCRIPTOR
;
765 elsp_reg
= EXECLIST_SUBMITPORT_VCSUNIT0
;
766 elsq_reg
= EXECLIST_SQ_CONTENTS0_VCSUNIT0
;
767 status_reg
= EXECLIST_STATUS_VCSUNIT0
;
768 control_reg
= EXECLIST_CONTROL_VCSUNIT0
;
771 ring_addr
= BLITTER_RING_ADDR
;
772 descriptor
= BLITTER_CONTEXT_DESCRIPTOR
;
773 elsp_reg
= EXECLIST_SUBMITPORT_BCSUNIT
;
774 elsq_reg
= EXECLIST_SQ_CONTENTS0_BCSUNIT
;
775 status_reg
= EXECLIST_STATUS_BCSUNIT
;
776 control_reg
= EXECLIST_CONTROL_BCSUNIT
;
779 unreachable("unknown ring");
782 mem_trace_memory_write_header_out(aub
, ring_addr
, 16,
783 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
784 dword_out(aub
, AUB_MI_BATCH_BUFFER_START
| MI_BATCH_NON_SECURE_I965
| (3 - 2));
785 dword_out(aub
, batch_offset
& 0xFFFFFFFF);
786 dword_out(aub
, batch_offset
>> 32);
787 dword_out(aub
, 0 /* MI_NOOP */);
789 mem_trace_memory_write_header_out(aub
, ring_addr
+ 8192 + 20, 4,
790 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
791 dword_out(aub
, 0); /* RING_BUFFER_HEAD */
792 mem_trace_memory_write_header_out(aub
, ring_addr
+ 8192 + 28, 4,
793 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
);
794 dword_out(aub
, 16); /* RING_BUFFER_TAIL */
796 if (aub
->devinfo
.gen
>= 11) {
797 register_write_out(aub
, elsq_reg
, descriptor
& 0xFFFFFFFF);
798 register_write_out(aub
, elsq_reg
+ sizeof(uint32_t), descriptor
>> 32);
799 register_write_out(aub
, control_reg
, 1);
801 register_write_out(aub
, elsp_reg
, 0);
802 register_write_out(aub
, elsp_reg
, 0);
803 register_write_out(aub
, elsp_reg
, descriptor
>> 32);
804 register_write_out(aub
, elsp_reg
, descriptor
& 0xFFFFFFFF);
807 dword_out(aub
, CMD_MEM_TRACE_REGISTER_POLL
| (5 + 1 - 1));
808 dword_out(aub
, status_reg
);
809 dword_out(aub
, AUB_MEM_TRACE_REGISTER_SIZE_DWORD
|
810 AUB_MEM_TRACE_REGISTER_SPACE_MMIO
);
811 if (aub
->devinfo
.gen
>= 11) {
812 dword_out(aub
, 0x00000001); /* mask lo */
813 dword_out(aub
, 0x00000000); /* mask hi */
814 dword_out(aub
, 0x00000001);
816 dword_out(aub
, 0x00000010); /* mask lo */
817 dword_out(aub
, 0x00000000); /* mask hi */
818 dword_out(aub
, 0x00000000);
823 aub_dump_ringbuffer(struct aub_file
*aub
, uint64_t batch_offset
,
824 uint64_t offset
, int ring_flag
)
826 uint32_t ringbuffer
[4096];
827 unsigned aub_mi_bbs_len
;
828 int ring
= AUB_TRACE_TYPE_RING_PRB0
; /* The default ring */
831 if (ring_flag
== I915_EXEC_BSD
)
832 ring
= AUB_TRACE_TYPE_RING_PRB1
;
833 else if (ring_flag
== I915_EXEC_BLT
)
834 ring
= AUB_TRACE_TYPE_RING_PRB2
;
836 /* Make a ring buffer to execute our batchbuffer. */
837 memset(ringbuffer
, 0, sizeof(ringbuffer
));
839 aub_mi_bbs_len
= aub
->addr_bits
> 32 ? 3 : 2;
840 ringbuffer
[ring_count
] = AUB_MI_BATCH_BUFFER_START
| (aub_mi_bbs_len
- 2);
841 aub_write_reloc(&aub
->devinfo
, &ringbuffer
[ring_count
+ 1], batch_offset
);
842 ring_count
+= aub_mi_bbs_len
;
844 /* Write out the ring. This appears to trigger execution of
845 * the ring in the simulator.
847 dword_out(aub
, CMD_AUB_TRACE_HEADER_BLOCK
|
848 ((aub
->addr_bits
> 32 ? 6 : 5) - 2));
849 dword_out(aub
, AUB_TRACE_MEMTYPE_GTT
| ring
| AUB_TRACE_OP_COMMAND_WRITE
);
850 dword_out(aub
, 0); /* general/surface subtype */
851 dword_out(aub
, offset
);
852 dword_out(aub
, ring_count
* 4);
853 if (aub
->addr_bits
> 32)
854 dword_out(aub
, offset
>> 32);
856 data_out(aub
, ringbuffer
, ring_count
* 4);
860 aub_write_exec(struct aub_file
*aub
, uint64_t batch_addr
,
861 uint64_t offset
, int ring_flag
)
863 if (aub_use_execlists(aub
)) {
864 aub_dump_execlist(aub
, batch_addr
, ring_flag
);
866 /* Dump ring buffer */
867 aub_dump_ringbuffer(aub
, batch_addr
, offset
, ring_flag
);
872 static struct gen_device_info devinfo
= {0};
873 static uint32_t device
;
874 static struct aub_file aubs
[2];
877 relocate_bo(struct bo
*bo
, const struct drm_i915_gem_execbuffer2
*execbuffer2
,
878 const struct drm_i915_gem_exec_object2
*obj
)
880 const struct drm_i915_gem_exec_object2
*exec_objects
=
881 (struct drm_i915_gem_exec_object2
*) (uintptr_t) execbuffer2
->buffers_ptr
;
882 const struct drm_i915_gem_relocation_entry
*relocs
=
883 (const struct drm_i915_gem_relocation_entry
*) (uintptr_t) obj
->relocs_ptr
;
887 relocated
= malloc(bo
->size
);
888 fail_if(relocated
== NULL
, "intel_aubdump: out of memory\n");
889 memcpy(relocated
, GET_PTR(bo
->map
), bo
->size
);
890 for (size_t i
= 0; i
< obj
->relocation_count
; i
++) {
891 fail_if(relocs
[i
].offset
>= bo
->size
, "intel_aubdump: reloc outside bo\n");
893 if (execbuffer2
->flags
& I915_EXEC_HANDLE_LUT
)
894 handle
= exec_objects
[relocs
[i
].target_handle
].handle
;
896 handle
= relocs
[i
].target_handle
;
898 aub_write_reloc(&devinfo
, ((char *)relocated
) + relocs
[i
].offset
,
899 get_bo(handle
)->offset
+ relocs
[i
].delta
);
906 gem_ioctl(int fd
, unsigned long request
, void *argp
)
911 ret
= libc_ioctl(fd
, request
, argp
);
912 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
918 gem_mmap(int fd
, uint32_t handle
, uint64_t offset
, uint64_t size
)
920 struct drm_i915_gem_mmap mmap
= {
926 if (gem_ioctl(fd
, DRM_IOCTL_I915_GEM_MMAP
, &mmap
) == -1)
929 return (void *)(uintptr_t) mmap
.addr_ptr
;
933 gem_get_param(int fd
, uint32_t param
)
936 drm_i915_getparam_t gp
= {
941 if (gem_ioctl(fd
, DRM_IOCTL_I915_GETPARAM
, &gp
) == -1)
948 dump_execbuffer2(int fd
, struct drm_i915_gem_execbuffer2
*execbuffer2
)
950 struct drm_i915_gem_exec_object2
*exec_objects
=
951 (struct drm_i915_gem_exec_object2
*) (uintptr_t) execbuffer2
->buffers_ptr
;
952 uint32_t ring_flag
= execbuffer2
->flags
& I915_EXEC_RING_MASK
;
954 struct drm_i915_gem_exec_object2
*obj
;
955 struct bo
*bo
, *batch_bo
;
959 /* We can't do this at open time as we're not yet authenticated. */
961 device
= gem_get_param(fd
, I915_PARAM_CHIPSET_ID
);
962 fail_if(device
== 0 || devinfo
.gen
== 0, "failed to identify chipset\n");
964 if (devinfo
.gen
== 0) {
965 fail_if(!gen_get_device_info(device
, &devinfo
),
966 "failed to identify chipset=0x%x\n", device
);
968 for (int i
= 0; i
< ARRAY_SIZE(files
); i
++) {
969 if (files
[i
] != NULL
) {
970 aub_file_init(&aubs
[i
], files
[i
], device
);
972 aubs
[i
].verbose_log_file
= stdout
;
973 aub_write_header(&aubs
[i
], program_invocation_short_name
);
978 printf("[intel_aubdump: running, "
979 "output file %s, chipset id 0x%04x, gen %d]\n",
980 filename
, device
, devinfo
.gen
);
984 struct aub_file
*any_aub
= files
[0] ? &aubs
[0] : &aubs
[1];;
986 if (aub_use_execlists(any_aub
))
989 offset
= aub_gtt_size(any_aub
);
992 printf("Dumping execbuffer2:\n");
994 for (uint32_t i
= 0; i
< execbuffer2
->buffer_count
; i
++) {
995 obj
= &exec_objects
[i
];
996 bo
= get_bo(obj
->handle
);
998 /* If bo->size == 0, this means they passed us an invalid
999 * buffer. The kernel will reject it and so should we.
1001 if (bo
->size
== 0) {
1003 printf("BO #%d is invalid!\n", obj
->handle
);
1007 if (obj
->flags
& EXEC_OBJECT_PINNED
) {
1008 bo
->offset
= obj
->offset
;
1010 printf("BO #%d (%dB) pinned @ 0x%lx\n",
1011 obj
->handle
, bo
->size
, bo
->offset
);
1013 if (obj
->alignment
!= 0)
1014 offset
= align_u32(offset
, obj
->alignment
);
1015 bo
->offset
= offset
;
1017 printf("BO #%d (%dB) @ 0x%lx\n", obj
->handle
,
1018 bo
->size
, bo
->offset
);
1019 offset
= align_u32(offset
+ bo
->size
+ 4095, 4096);
1022 if (bo
->map
== NULL
&& bo
->size
> 0)
1023 bo
->map
= gem_mmap(fd
, obj
->handle
, 0, bo
->size
);
1024 fail_if(bo
->map
== MAP_FAILED
, "intel_aubdump: bo mmap failed\n");
1026 for (int i
= 0; i
< ARRAY_SIZE(files
); i
++) {
1027 if (files
[i
] == NULL
)
1030 if (aub_use_execlists(&aubs
[i
]))
1031 aub_map_ppgtt(&aubs
[i
], bo
->offset
, bo
->size
);
1035 batch_index
= (execbuffer2
->flags
& I915_EXEC_BATCH_FIRST
) ? 0 :
1036 execbuffer2
->buffer_count
- 1;
1037 batch_bo
= get_bo(exec_objects
[batch_index
].handle
);
1038 for (uint32_t i
= 0; i
< execbuffer2
->buffer_count
; i
++) {
1039 obj
= &exec_objects
[i
];
1040 bo
= get_bo(obj
->handle
);
1042 if (obj
->relocation_count
> 0)
1043 data
= relocate_bo(bo
, execbuffer2
, obj
);
1047 for (int i
= 0; i
< ARRAY_SIZE(files
); i
++) {
1048 if (files
[i
] == NULL
)
1051 if (bo
== batch_bo
) {
1052 aub_write_trace_block(&aubs
[i
], AUB_TRACE_TYPE_BATCH
,
1053 GET_PTR(data
), bo
->size
, bo
->offset
);
1055 aub_write_trace_block(&aubs
[i
], AUB_TRACE_TYPE_NOTYPE
,
1056 GET_PTR(data
), bo
->size
, bo
->offset
);
1059 if (data
!= bo
->map
)
1063 for (int i
= 0; i
< ARRAY_SIZE(files
); i
++) {
1064 if (files
[i
] != NULL
)
1067 aub_write_exec(&aubs
[i
],
1068 batch_bo
->offset
+ execbuffer2
->batch_start_offset
,
1072 if (device_override
&&
1073 (execbuffer2
->flags
& I915_EXEC_FENCE_ARRAY
) != 0) {
1074 struct drm_i915_gem_exec_fence
*fences
=
1075 (void*)(uintptr_t)execbuffer2
->cliprects_ptr
;
1076 for (uint32_t i
= 0; i
< execbuffer2
->num_cliprects
; i
++) {
1077 if ((fences
[i
].flags
& I915_EXEC_FENCE_SIGNAL
) != 0) {
1078 struct drm_syncobj_array arg
= {
1079 .handles
= (uintptr_t)&fences
[i
].handle
,
1083 libc_ioctl(fd
, DRM_IOCTL_SYNCOBJ_SIGNAL
, &arg
);
1090 add_new_bo(int handle
, uint64_t size
, void *map
)
1092 struct bo
*bo
= &bos
[handle
];
1094 fail_if(handle
>= MAX_BO_COUNT
, "intel_aubdump: bo handle out of range\n");
1095 fail_if(size
== 0, "intel_aubdump: bo size is invalid\n");
1102 remove_bo(int handle
)
1104 struct bo
*bo
= get_bo(handle
);
1106 if (bo
->map
&& !IS_USERPTR(bo
->map
))
1107 munmap(bo
->map
, bo
->size
);
1112 __attribute__ ((visibility ("default"))) int
1118 return libc_close(fd
);
1122 launch_command(char *command
)
1125 char **args
= calloc(strlen(command
), sizeof(char *));
1126 char *iter
= command
;
1128 args
[i
++] = iter
= command
;
1130 while ((iter
= strstr(iter
, ",")) != NULL
) {
1136 if (pipe(fds
) == -1)
1142 fail_if(execvp(args
[0], args
) == -1,
1143 "intel_aubdump: failed to launch child command\n");
1148 return fdopen(fds
[1], "w");
1158 static bool initialized
= false;
1167 config
= fdopen(3, "r");
1168 while (fscanf(config
, "%m[^=]=%m[^\n]\n", &key
, &value
) != EOF
) {
1169 if (!strcmp(key
, "verbose")) {
1170 if (!strcmp(value
, "1")) {
1172 } else if (!strcmp(value
, "2")) {
1175 } else if (!strcmp(key
, "device")) {
1176 fail_if(sscanf(value
, "%i", &device
) != 1,
1177 "intel_aubdump: failed to parse device id '%s'",
1179 device_override
= true;
1180 } else if (!strcmp(key
, "file")) {
1181 filename
= strdup(value
);
1182 files
[0] = fopen(filename
, "w+");
1183 fail_if(files
[0] == NULL
,
1184 "intel_aubdump: failed to open file '%s'\n",
1186 } else if (!strcmp(key
, "command")) {
1187 files
[1] = launch_command(value
);
1188 fail_if(files
[1] == NULL
,
1189 "intel_aubdump: failed to launch command '%s'\n",
1192 fprintf(stderr
, "intel_aubdump: unknown option '%s'\n", key
);
1200 bos
= calloc(MAX_BO_COUNT
, sizeof(bos
[0]));
1201 fail_if(bos
== NULL
, "intel_aubdump: out of memory\n");
1204 __attribute__ ((visibility ("default"))) int
1205 ioctl(int fd
, unsigned long request
, ...)
1212 va_start(args
, request
);
1213 argp
= va_arg(args
, void *);
1216 if (_IOC_TYPE(request
) == DRM_IOCTL_BASE
&&
1217 drm_fd
!= fd
&& fstat(fd
, &buf
) == 0 &&
1218 (buf
.st_mode
& S_IFMT
) == S_IFCHR
&& major(buf
.st_rdev
) == DRM_MAJOR
) {
1221 printf("[intel_aubdump: intercept drm ioctl on fd %d]\n", fd
);
1228 case DRM_IOCTL_I915_GETPARAM
: {
1229 struct drm_i915_getparam
*getparam
= argp
;
1231 if (device_override
&& getparam
->param
== I915_PARAM_CHIPSET_ID
) {
1232 *getparam
->value
= device
;
1236 ret
= libc_ioctl(fd
, request
, argp
);
1238 /* If the application looks up chipset_id
1239 * (they typically do), we'll piggy-back on
1240 * their ioctl and store the id for later
1242 if (getparam
->param
== I915_PARAM_CHIPSET_ID
)
1243 device
= *getparam
->value
;
1248 case DRM_IOCTL_I915_GEM_EXECBUFFER
: {
1251 fprintf(stderr
, "intel_aubdump: "
1252 "application uses DRM_IOCTL_I915_GEM_EXECBUFFER, not handled\n");
1255 return libc_ioctl(fd
, request
, argp
);
1258 case DRM_IOCTL_I915_GEM_EXECBUFFER2
:
1259 case DRM_IOCTL_I915_GEM_EXECBUFFER2_WR
: {
1260 dump_execbuffer2(fd
, argp
);
1261 if (device_override
)
1264 return libc_ioctl(fd
, request
, argp
);
1267 case DRM_IOCTL_I915_GEM_CREATE
: {
1268 struct drm_i915_gem_create
*create
= argp
;
1270 ret
= libc_ioctl(fd
, request
, argp
);
1272 add_new_bo(create
->handle
, create
->size
, NULL
);
1277 case DRM_IOCTL_I915_GEM_USERPTR
: {
1278 struct drm_i915_gem_userptr
*userptr
= argp
;
1280 ret
= libc_ioctl(fd
, request
, argp
);
1282 add_new_bo(userptr
->handle
, userptr
->user_size
,
1283 (void *) (uintptr_t) (userptr
->user_ptr
| USERPTR_FLAG
));
1287 case DRM_IOCTL_GEM_CLOSE
: {
1288 struct drm_gem_close
*close
= argp
;
1290 remove_bo(close
->handle
);
1292 return libc_ioctl(fd
, request
, argp
);
1295 case DRM_IOCTL_GEM_OPEN
: {
1296 struct drm_gem_open
*open
= argp
;
1298 ret
= libc_ioctl(fd
, request
, argp
);
1300 add_new_bo(open
->handle
, open
->size
, NULL
);
1305 case DRM_IOCTL_PRIME_FD_TO_HANDLE
: {
1306 struct drm_prime_handle
*prime
= argp
;
1308 ret
= libc_ioctl(fd
, request
, argp
);
1312 size
= lseek(prime
->fd
, 0, SEEK_END
);
1313 fail_if(size
== -1, "intel_aubdump: failed to get prime bo size\n");
1314 add_new_bo(prime
->handle
, size
, NULL
);
1321 return libc_ioctl(fd
, request
, argp
);
1324 return libc_ioctl(fd
, request
, argp
);
1331 libc_close
= dlsym(RTLD_NEXT
, "close");
1332 libc_ioctl
= dlsym(RTLD_NEXT
, "ioctl");
1333 fail_if(libc_close
== NULL
|| libc_ioctl
== NULL
,
1334 "intel_aubdump: failed to get libc ioctl or close\n");
1338 close_init_helper(int fd
)
1341 return libc_close(fd
);
1345 ioctl_init_helper(int fd
, unsigned long request
, ...)
1350 va_start(args
, request
);
1351 argp
= va_arg(args
, void *);
1355 return libc_ioctl(fd
, request
, argp
);
1358 static void __attribute__ ((destructor
))
1362 for (int i
= 0; i
< ARRAY_SIZE(files
); i
++) {
1364 aub_file_finish(&aubs
[i
]);