2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 #include <sys/types.h>
33 #include <sys/sysmacros.h>
35 #include <sys/ioctl.h>
40 #include "drm-uapi/i915_drm.h"
43 #include "intel_aub.h"
44 #include "aub_write.h"
46 #include "dev/gen_device_info.h"
47 #include "util/macros.h"
49 static int close_init_helper(int fd
);
50 static int ioctl_init_helper(int fd
, unsigned long request
, ...);
51 static int munmap_init_helper(void *addr
, size_t length
);
53 static int (*libc_close
)(int fd
) = close_init_helper
;
54 static int (*libc_ioctl
)(int fd
, unsigned long request
, ...) = ioctl_init_helper
;
55 static int (*libc_munmap
)(void *addr
, size_t length
) = munmap_init_helper
;
57 static int drm_fd
= -1;
58 static char *output_filename
= NULL
;
59 static FILE *output_file
= NULL
;
60 static int verbose
= 0;
61 static bool device_override
= false;
62 static bool capture_only
= false;
64 #define MAX_FD_COUNT 64
65 #define MAX_BO_COUNT 64 * 1024
71 /* Tracks userspace mmapping of the buffer */
73 /* Using the i915-gem mmapping ioctl & execbuffer ioctl, track whether a
74 * buffer has been updated.
79 static struct bo
*bos
;
83 /* We set bit 0 in the map pointer for userptr BOs so we know not to
84 * munmap them on DRM_IOCTL_GEM_CLOSE.
86 #define USERPTR_FLAG 1
87 #define IS_USERPTR(p) ((uintptr_t) (p) & USERPTR_FLAG)
88 #define GET_PTR(p) ( (void *) ((uintptr_t) p & ~(uintptr_t) 1) )
90 static void __attribute__ ((format(__printf__
, 2, 3)))
91 fail_if(int cond
, const char *format
, ...)
98 va_start(args
, format
);
99 fprintf(stderr
, "intel_dump_gpu: ");
100 vfprintf(stderr
, format
, args
);
107 get_bo(unsigned fd
, uint32_t handle
)
111 fail_if(handle
>= MAX_BO_COUNT
, "bo handle too large\n");
112 fail_if(fd
>= MAX_FD_COUNT
, "bo fd too large\n");
113 bo
= &bos
[handle
+ fd
* MAX_BO_COUNT
];
118 static inline uint32_t
119 align_u32(uint32_t v
, uint32_t a
)
121 return (v
+ a
- 1) & ~(a
- 1);
124 static struct gen_device_info devinfo
= {0};
125 static int device
= 0;
126 static struct aub_file aub_file
;
129 ensure_device_info(int fd
)
131 /* We can't do this at open time as we're not yet authenticated. */
133 fail_if(!gen_get_device_info_from_fd(fd
, &devinfo
),
134 "failed to identify chipset.\n");
135 device
= devinfo
.chipset_id
;
136 } else if (devinfo
.gen
== 0) {
137 fail_if(!gen_get_device_info_from_pci_id(device
, &devinfo
),
138 "failed to identify chipset.\n");
143 relocate_bo(int fd
, struct bo
*bo
, const struct drm_i915_gem_execbuffer2
*execbuffer2
,
144 const struct drm_i915_gem_exec_object2
*obj
)
146 const struct drm_i915_gem_exec_object2
*exec_objects
=
147 (struct drm_i915_gem_exec_object2
*) (uintptr_t) execbuffer2
->buffers_ptr
;
148 const struct drm_i915_gem_relocation_entry
*relocs
=
149 (const struct drm_i915_gem_relocation_entry
*) (uintptr_t) obj
->relocs_ptr
;
153 relocated
= malloc(bo
->size
);
154 fail_if(relocated
== NULL
, "out of memory\n");
155 memcpy(relocated
, GET_PTR(bo
->map
), bo
->size
);
156 for (size_t i
= 0; i
< obj
->relocation_count
; i
++) {
157 fail_if(relocs
[i
].offset
>= bo
->size
, "reloc outside bo\n");
159 if (execbuffer2
->flags
& I915_EXEC_HANDLE_LUT
)
160 handle
= exec_objects
[relocs
[i
].target_handle
].handle
;
162 handle
= relocs
[i
].target_handle
;
164 aub_write_reloc(&devinfo
, ((char *)relocated
) + relocs
[i
].offset
,
165 get_bo(fd
, handle
)->offset
+ relocs
[i
].delta
);
172 gem_ioctl(int fd
, unsigned long request
, void *argp
)
177 ret
= libc_ioctl(fd
, request
, argp
);
178 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
184 gem_mmap(int fd
, uint32_t handle
, uint64_t offset
, uint64_t size
)
186 struct drm_i915_gem_mmap mmap
= {
192 if (gem_ioctl(fd
, DRM_IOCTL_I915_GEM_MMAP
, &mmap
) == -1)
195 return (void *)(uintptr_t) mmap
.addr_ptr
;
198 static enum drm_i915_gem_engine_class
199 engine_class_from_ring_flag(uint32_t ring_flag
)
202 case I915_EXEC_DEFAULT
:
203 case I915_EXEC_RENDER
:
204 return I915_ENGINE_CLASS_RENDER
;
206 return I915_ENGINE_CLASS_VIDEO
;
208 return I915_ENGINE_CLASS_COPY
;
209 case I915_EXEC_VEBOX
:
210 return I915_ENGINE_CLASS_VIDEO_ENHANCE
;
212 return I915_ENGINE_CLASS_INVALID
;
217 dump_execbuffer2(int fd
, struct drm_i915_gem_execbuffer2
*execbuffer2
)
219 struct drm_i915_gem_exec_object2
*exec_objects
=
220 (struct drm_i915_gem_exec_object2
*) (uintptr_t) execbuffer2
->buffers_ptr
;
221 uint32_t ring_flag
= execbuffer2
->flags
& I915_EXEC_RING_MASK
;
223 struct drm_i915_gem_exec_object2
*obj
;
224 struct bo
*bo
, *batch_bo
;
228 ensure_device_info(fd
);
230 if (!aub_file
.file
) {
231 aub_file_init(&aub_file
, output_file
,
232 verbose
== 2 ? stdout
: NULL
,
233 device
, program_invocation_short_name
);
234 aub_write_default_setup(&aub_file
);
237 printf("[running, output file %s, chipset id 0x%04x, gen %d]\n",
238 output_filename
, device
, devinfo
.gen
);
241 if (aub_use_execlists(&aub_file
))
244 offset
= aub_gtt_size(&aub_file
);
247 printf("Dumping execbuffer2:\n");
249 for (uint32_t i
= 0; i
< execbuffer2
->buffer_count
; i
++) {
250 obj
= &exec_objects
[i
];
251 bo
= get_bo(fd
, obj
->handle
);
253 /* If bo->size == 0, this means they passed us an invalid
254 * buffer. The kernel will reject it and so should we.
258 printf("BO #%d is invalid!\n", obj
->handle
);
262 if (obj
->flags
& EXEC_OBJECT_PINNED
) {
263 bo
->offset
= obj
->offset
;
265 printf("BO #%d (%dB) pinned @ 0x%" PRIx64
"\n",
266 obj
->handle
, bo
->size
, bo
->offset
);
268 if (obj
->alignment
!= 0)
269 offset
= align_u32(offset
, obj
->alignment
);
272 printf("BO #%d (%dB) @ 0x%" PRIx64
"\n", obj
->handle
,
273 bo
->size
, bo
->offset
);
274 offset
= align_u32(offset
+ bo
->size
+ 4095, 4096);
277 if (bo
->map
== NULL
&& bo
->size
> 0)
278 bo
->map
= gem_mmap(fd
, obj
->handle
, 0, bo
->size
);
279 fail_if(bo
->map
== MAP_FAILED
, "bo mmap failed\n");
281 if (aub_use_execlists(&aub_file
))
282 aub_map_ppgtt(&aub_file
, bo
->offset
, bo
->size
);
285 batch_index
= (execbuffer2
->flags
& I915_EXEC_BATCH_FIRST
) ? 0 :
286 execbuffer2
->buffer_count
- 1;
287 batch_bo
= get_bo(fd
, exec_objects
[batch_index
].handle
);
288 for (uint32_t i
= 0; i
< execbuffer2
->buffer_count
; i
++) {
289 obj
= &exec_objects
[i
];
290 bo
= get_bo(fd
, obj
->handle
);
292 if (obj
->relocation_count
> 0)
293 data
= relocate_bo(fd
, bo
, execbuffer2
, obj
);
297 bool write
= !capture_only
|| (obj
->flags
& EXEC_OBJECT_CAPTURE
);
299 if (write
&& bo
->dirty
) {
300 if (bo
== batch_bo
) {
301 aub_write_trace_block(&aub_file
, AUB_TRACE_TYPE_BATCH
,
302 GET_PTR(data
), bo
->size
, bo
->offset
);
304 aub_write_trace_block(&aub_file
, AUB_TRACE_TYPE_NOTYPE
,
305 GET_PTR(data
), bo
->size
, bo
->offset
);
308 if (!bo
->user_mapped
)
316 uint32_t ctx_id
= execbuffer2
->rsvd1
;
318 aub_write_exec(&aub_file
, ctx_id
,
319 batch_bo
->offset
+ execbuffer2
->batch_start_offset
,
320 offset
, engine_class_from_ring_flag(ring_flag
));
322 if (device_override
&&
323 (execbuffer2
->flags
& I915_EXEC_FENCE_ARRAY
) != 0) {
324 struct drm_i915_gem_exec_fence
*fences
=
325 (void*)(uintptr_t)execbuffer2
->cliprects_ptr
;
326 for (uint32_t i
= 0; i
< execbuffer2
->num_cliprects
; i
++) {
327 if ((fences
[i
].flags
& I915_EXEC_FENCE_SIGNAL
) != 0) {
328 struct drm_syncobj_array arg
= {
329 .handles
= (uintptr_t)&fences
[i
].handle
,
333 libc_ioctl(fd
, DRM_IOCTL_SYNCOBJ_SIGNAL
, &arg
);
340 add_new_bo(unsigned fd
, int handle
, uint64_t size
, void *map
)
342 struct bo
*bo
= &bos
[handle
+ fd
* MAX_BO_COUNT
];
344 fail_if(handle
>= MAX_BO_COUNT
, "bo handle out of range\n");
345 fail_if(fd
>= MAX_FD_COUNT
, "bo fd out of range\n");
346 fail_if(size
== 0, "bo size is invalid\n");
350 bo
->user_mapped
= false;
354 remove_bo(int fd
, int handle
)
356 struct bo
*bo
= get_bo(fd
, handle
);
358 if (bo
->map
&& !IS_USERPTR(bo
->map
))
359 munmap(bo
->map
, bo
->size
);
362 bo
->user_mapped
= false;
365 __attribute__ ((visibility ("default"))) int
371 return libc_close(fd
);
375 get_pci_id(int fd
, int *pci_id
)
377 struct drm_i915_getparam gparam
;
379 if (device_override
) {
384 gparam
.param
= I915_PARAM_CHIPSET_ID
;
385 gparam
.value
= pci_id
;
386 return libc_ioctl(fd
, DRM_IOCTL_I915_GETPARAM
, &gparam
);
392 static bool initialized
= false;
401 config
= fopen(getenv("INTEL_DUMP_GPU_CONFIG"), "r");
402 while (fscanf(config
, "%m[^=]=%m[^\n]\n", &key
, &value
) != EOF
) {
403 if (!strcmp(key
, "verbose")) {
404 if (!strcmp(value
, "1")) {
406 } else if (!strcmp(value
, "2")) {
409 } else if (!strcmp(key
, "device")) {
410 fail_if(device
!= 0, "Device/Platform override specified multiple times.");
411 fail_if(sscanf(value
, "%i", &device
) != 1,
412 "failed to parse device id '%s'",
414 device_override
= true;
415 } else if (!strcmp(key
, "platform")) {
416 fail_if(device
!= 0, "Device/Platform override specified multiple times.");
417 device
= gen_device_name_to_pci_device_id(value
);
418 fail_if(device
== -1, "Unknown platform '%s'", value
);
419 device_override
= true;
420 } else if (!strcmp(key
, "file")) {
421 output_filename
= strdup(value
);
422 output_file
= fopen(output_filename
, "w+");
423 fail_if(output_file
== NULL
,
424 "failed to open file '%s'\n",
426 } else if (!strcmp(key
, "capture_only")) {
427 capture_only
= atoi(value
);
429 fprintf(stderr
, "unknown option '%s'\n", key
);
437 bos
= calloc(MAX_FD_COUNT
* MAX_BO_COUNT
, sizeof(bos
[0]));
438 fail_if(bos
== NULL
, "out of memory\n");
440 int ret
= get_pci_id(fd
, &device
);
443 aub_file_init(&aub_file
, output_file
,
444 verbose
== 2 ? stdout
: NULL
,
445 device
, program_invocation_short_name
);
446 aub_write_default_setup(&aub_file
);
449 printf("[running, output file %s, chipset id 0x%04x, gen %d]\n",
450 output_filename
, device
, devinfo
.gen
);
453 __attribute__ ((visibility ("default"))) int
454 ioctl(int fd
, unsigned long request
, ...)
461 va_start(args
, request
);
462 argp
= va_arg(args
, void *);
465 if (_IOC_TYPE(request
) == DRM_IOCTL_BASE
&&
466 drm_fd
!= fd
&& fstat(fd
, &buf
) == 0 &&
467 (buf
.st_mode
& S_IFMT
) == S_IFCHR
&& major(buf
.st_rdev
) == DRM_MAJOR
) {
470 printf("[intercept drm ioctl on fd %d]\n", fd
);
477 case DRM_IOCTL_SYNCOBJ_WAIT
:
478 case DRM_IOCTL_I915_GEM_WAIT
: {
481 return libc_ioctl(fd
, request
, argp
);
484 case DRM_IOCTL_I915_GET_RESET_STATS
: {
485 if (device_override
) {
486 struct drm_i915_reset_stats
*stats
= argp
;
488 stats
->reset_count
= 0;
489 stats
->batch_active
= 0;
490 stats
->batch_pending
= 0;
493 return libc_ioctl(fd
, request
, argp
);
496 case DRM_IOCTL_I915_GETPARAM
: {
497 struct drm_i915_getparam
*getparam
= argp
;
499 ensure_device_info(fd
);
501 if (getparam
->param
== I915_PARAM_CHIPSET_ID
)
502 return get_pci_id(fd
, getparam
->value
);
504 if (device_override
) {
505 switch (getparam
->param
) {
506 case I915_PARAM_CS_TIMESTAMP_FREQUENCY
:
507 *getparam
->value
= devinfo
.timestamp_frequency
;
510 case I915_PARAM_HAS_WAIT_TIMEOUT
:
511 case I915_PARAM_HAS_EXECBUF2
:
512 case I915_PARAM_MMAP_VERSION
:
513 case I915_PARAM_HAS_EXEC_ASYNC
:
514 case I915_PARAM_HAS_EXEC_FENCE
:
515 case I915_PARAM_HAS_EXEC_FENCE_ARRAY
:
516 *getparam
->value
= 1;
519 case I915_PARAM_HAS_EXEC_SOFTPIN
:
520 *getparam
->value
= devinfo
.gen
>= 8 && !devinfo
.is_cherryview
;
528 return libc_ioctl(fd
, request
, argp
);
531 case DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM
: {
532 struct drm_i915_gem_context_param
*getparam
= argp
;
534 ensure_device_info(fd
);
536 if (device_override
) {
537 switch (getparam
->param
) {
538 case I915_CONTEXT_PARAM_GTT_SIZE
:
539 if (devinfo
.is_elkhartlake
)
540 getparam
->value
= 1ull << 36;
541 else if (devinfo
.gen
>= 8 && !devinfo
.is_cherryview
)
542 getparam
->value
= 1ull << 48;
544 getparam
->value
= 1ull << 31;
552 return libc_ioctl(fd
, request
, argp
);
555 case DRM_IOCTL_I915_GEM_EXECBUFFER
: {
559 "application uses DRM_IOCTL_I915_GEM_EXECBUFFER, not handled\n");
562 return libc_ioctl(fd
, request
, argp
);
565 case DRM_IOCTL_I915_GEM_EXECBUFFER2
:
566 case DRM_IOCTL_I915_GEM_EXECBUFFER2_WR
: {
567 dump_execbuffer2(fd
, argp
);
571 return libc_ioctl(fd
, request
, argp
);
574 case DRM_IOCTL_I915_GEM_CONTEXT_CREATE
: {
575 uint32_t *ctx_id
= NULL
;
576 struct drm_i915_gem_context_create
*create
= argp
;
578 if (!device_override
) {
579 ret
= libc_ioctl(fd
, request
, argp
);
580 ctx_id
= &create
->ctx_id
;
584 create
->ctx_id
= aub_write_context_create(&aub_file
, ctx_id
);
589 case DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT
: {
590 uint32_t *ctx_id
= NULL
;
591 struct drm_i915_gem_context_create_ext
*create
= argp
;
593 if (!device_override
) {
594 ret
= libc_ioctl(fd
, request
, argp
);
595 ctx_id
= &create
->ctx_id
;
599 create
->ctx_id
= aub_write_context_create(&aub_file
, ctx_id
);
604 case DRM_IOCTL_I915_GEM_CREATE
: {
605 struct drm_i915_gem_create
*create
= argp
;
607 ret
= libc_ioctl(fd
, request
, argp
);
609 add_new_bo(fd
, create
->handle
, create
->size
, NULL
);
614 case DRM_IOCTL_I915_GEM_USERPTR
: {
615 struct drm_i915_gem_userptr
*userptr
= argp
;
617 ret
= libc_ioctl(fd
, request
, argp
);
619 add_new_bo(fd
, userptr
->handle
, userptr
->user_size
,
620 (void *) (uintptr_t) (userptr
->user_ptr
| USERPTR_FLAG
));
625 case DRM_IOCTL_GEM_CLOSE
: {
626 struct drm_gem_close
*close
= argp
;
628 remove_bo(fd
, close
->handle
);
630 return libc_ioctl(fd
, request
, argp
);
633 case DRM_IOCTL_GEM_OPEN
: {
634 struct drm_gem_open
*open
= argp
;
636 ret
= libc_ioctl(fd
, request
, argp
);
638 add_new_bo(fd
, open
->handle
, open
->size
, NULL
);
643 case DRM_IOCTL_PRIME_FD_TO_HANDLE
: {
644 struct drm_prime_handle
*prime
= argp
;
646 ret
= libc_ioctl(fd
, request
, argp
);
650 size
= lseek(prime
->fd
, 0, SEEK_END
);
651 fail_if(size
== -1, "failed to get prime bo size\n");
652 add_new_bo(fd
, prime
->handle
, size
, NULL
);
659 case DRM_IOCTL_I915_GEM_MMAP
: {
660 ret
= libc_ioctl(fd
, request
, argp
);
662 struct drm_i915_gem_mmap
*mmap
= argp
;
663 struct bo
*bo
= get_bo(fd
, mmap
->handle
);
664 bo
->user_mapped
= true;
671 return libc_ioctl(fd
, request
, argp
);
674 return libc_ioctl(fd
, request
, argp
);
681 libc_close
= dlsym(RTLD_NEXT
, "close");
682 libc_ioctl
= dlsym(RTLD_NEXT
, "ioctl");
683 libc_munmap
= dlsym(RTLD_NEXT
, "munmap");
684 fail_if(libc_close
== NULL
|| libc_ioctl
== NULL
,
685 "failed to get libc ioctl or close\n");
689 close_init_helper(int fd
)
692 return libc_close(fd
);
696 ioctl_init_helper(int fd
, unsigned long request
, ...)
701 va_start(args
, request
);
702 argp
= va_arg(args
, void *);
706 return libc_ioctl(fd
, request
, argp
);
710 munmap_init_helper(void *addr
, size_t length
)
713 for (uint32_t i
= 0; i
< MAX_FD_COUNT
* MAX_BO_COUNT
; i
++) {
714 struct bo
*bo
= &bos
[i
];
715 if (bo
->map
== addr
) {
716 bo
->user_mapped
= false;
720 return libc_munmap(addr
, length
);
723 static void __attribute__ ((destructor
))
726 if (devinfo
.gen
!= 0) {
727 free(output_filename
);
728 aub_file_finish(&aub_file
);