2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 #include <sys/types.h>
33 #include <sys/sysmacros.h>
35 #include <sys/ioctl.h>
40 #include "drm-uapi/i915_drm.h"
43 #include "intel_aub.h"
44 #include "aub_write.h"
46 #include "dev/gen_device_info.h"
47 #include "util/macros.h"
49 static int close_init_helper(int fd
);
50 static int ioctl_init_helper(int fd
, unsigned long request
, ...);
51 static int munmap_init_helper(void *addr
, size_t length
);
53 static int (*libc_close
)(int fd
) = close_init_helper
;
54 static int (*libc_ioctl
)(int fd
, unsigned long request
, ...) = ioctl_init_helper
;
55 static int (*libc_munmap
)(void *addr
, size_t length
) = munmap_init_helper
;
57 static int drm_fd
= -1;
58 static char *output_filename
= NULL
;
59 static FILE *output_file
= NULL
;
60 static int verbose
= 0;
61 static bool device_override
;
63 #define MAX_FD_COUNT 64
64 #define MAX_BO_COUNT 64 * 1024
70 /* Tracks userspace mmapping of the buffer */
72 /* Using the i915-gem mmapping ioctl & execbuffer ioctl, track whether a
73 * buffer has been updated.
78 static struct bo
*bos
;
82 /* We set bit 0 in the map pointer for userptr BOs so we know not to
83 * munmap them on DRM_IOCTL_GEM_CLOSE.
85 #define USERPTR_FLAG 1
86 #define IS_USERPTR(p) ((uintptr_t) (p) & USERPTR_FLAG)
87 #define GET_PTR(p) ( (void *) ((uintptr_t) p & ~(uintptr_t) 1) )
89 static void __attribute__ ((format(__printf__
, 2, 3)))
90 fail_if(int cond
, const char *format
, ...)
97 va_start(args
, format
);
98 fprintf(stderr
, "intel_dump_gpu: ");
99 vfprintf(stderr
, format
, args
);
106 get_bo(unsigned fd
, uint32_t handle
)
110 fail_if(handle
>= MAX_BO_COUNT
, "bo handle too large\n");
111 fail_if(fd
>= MAX_FD_COUNT
, "bo fd too large\n");
112 bo
= &bos
[handle
+ fd
* MAX_BO_COUNT
];
117 static inline uint32_t
118 align_u32(uint32_t v
, uint32_t a
)
120 return (v
+ a
- 1) & ~(a
- 1);
123 static struct gen_device_info devinfo
= {0};
124 static int device
= 0;
125 static struct aub_file aub_file
;
128 ensure_device_info(int fd
)
130 /* We can't do this at open time as we're not yet authenticated. */
132 fail_if(!gen_get_device_info_from_fd(fd
, &devinfo
),
133 "failed to identify chipset.\n");
134 device
= devinfo
.chipset_id
;
135 } else if (devinfo
.gen
== 0) {
136 fail_if(!gen_get_device_info_from_pci_id(device
, &devinfo
),
137 "failed to identify chipset.\n");
142 relocate_bo(int fd
, struct bo
*bo
, const struct drm_i915_gem_execbuffer2
*execbuffer2
,
143 const struct drm_i915_gem_exec_object2
*obj
)
145 const struct drm_i915_gem_exec_object2
*exec_objects
=
146 (struct drm_i915_gem_exec_object2
*) (uintptr_t) execbuffer2
->buffers_ptr
;
147 const struct drm_i915_gem_relocation_entry
*relocs
=
148 (const struct drm_i915_gem_relocation_entry
*) (uintptr_t) obj
->relocs_ptr
;
152 relocated
= malloc(bo
->size
);
153 fail_if(relocated
== NULL
, "out of memory\n");
154 memcpy(relocated
, GET_PTR(bo
->map
), bo
->size
);
155 for (size_t i
= 0; i
< obj
->relocation_count
; i
++) {
156 fail_if(relocs
[i
].offset
>= bo
->size
, "reloc outside bo\n");
158 if (execbuffer2
->flags
& I915_EXEC_HANDLE_LUT
)
159 handle
= exec_objects
[relocs
[i
].target_handle
].handle
;
161 handle
= relocs
[i
].target_handle
;
163 aub_write_reloc(&devinfo
, ((char *)relocated
) + relocs
[i
].offset
,
164 get_bo(fd
, handle
)->offset
+ relocs
[i
].delta
);
171 gem_ioctl(int fd
, unsigned long request
, void *argp
)
176 ret
= libc_ioctl(fd
, request
, argp
);
177 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
183 gem_mmap(int fd
, uint32_t handle
, uint64_t offset
, uint64_t size
)
185 struct drm_i915_gem_mmap mmap
= {
191 if (gem_ioctl(fd
, DRM_IOCTL_I915_GEM_MMAP
, &mmap
) == -1)
194 return (void *)(uintptr_t) mmap
.addr_ptr
;
197 static enum drm_i915_gem_engine_class
198 engine_class_from_ring_flag(uint32_t ring_flag
)
201 case I915_EXEC_DEFAULT
:
202 case I915_EXEC_RENDER
:
203 return I915_ENGINE_CLASS_RENDER
;
205 return I915_ENGINE_CLASS_VIDEO
;
207 return I915_ENGINE_CLASS_COPY
;
208 case I915_EXEC_VEBOX
:
209 return I915_ENGINE_CLASS_VIDEO_ENHANCE
;
211 return I915_ENGINE_CLASS_INVALID
;
216 dump_execbuffer2(int fd
, struct drm_i915_gem_execbuffer2
*execbuffer2
)
218 struct drm_i915_gem_exec_object2
*exec_objects
=
219 (struct drm_i915_gem_exec_object2
*) (uintptr_t) execbuffer2
->buffers_ptr
;
220 uint32_t ring_flag
= execbuffer2
->flags
& I915_EXEC_RING_MASK
;
222 struct drm_i915_gem_exec_object2
*obj
;
223 struct bo
*bo
, *batch_bo
;
227 ensure_device_info(fd
);
229 if (!aub_file
.file
) {
230 aub_file_init(&aub_file
, output_file
,
231 verbose
== 2 ? stdout
: NULL
,
232 device
, program_invocation_short_name
);
233 aub_write_default_setup(&aub_file
);
236 printf("[running, output file %s, chipset id 0x%04x, gen %d]\n",
237 output_filename
, device
, devinfo
.gen
);
240 if (aub_use_execlists(&aub_file
))
243 offset
= aub_gtt_size(&aub_file
);
246 printf("Dumping execbuffer2:\n");
248 for (uint32_t i
= 0; i
< execbuffer2
->buffer_count
; i
++) {
249 obj
= &exec_objects
[i
];
250 bo
= get_bo(fd
, obj
->handle
);
252 /* If bo->size == 0, this means they passed us an invalid
253 * buffer. The kernel will reject it and so should we.
257 printf("BO #%d is invalid!\n", obj
->handle
);
261 if (obj
->flags
& EXEC_OBJECT_PINNED
) {
262 bo
->offset
= obj
->offset
;
264 printf("BO #%d (%dB) pinned @ 0x%" PRIx64
"\n",
265 obj
->handle
, bo
->size
, bo
->offset
);
267 if (obj
->alignment
!= 0)
268 offset
= align_u32(offset
, obj
->alignment
);
271 printf("BO #%d (%dB) @ 0x%" PRIx64
"\n", obj
->handle
,
272 bo
->size
, bo
->offset
);
273 offset
= align_u32(offset
+ bo
->size
+ 4095, 4096);
276 if (bo
->map
== NULL
&& bo
->size
> 0)
277 bo
->map
= gem_mmap(fd
, obj
->handle
, 0, bo
->size
);
278 fail_if(bo
->map
== MAP_FAILED
, "bo mmap failed\n");
280 if (aub_use_execlists(&aub_file
))
281 aub_map_ppgtt(&aub_file
, bo
->offset
, bo
->size
);
284 batch_index
= (execbuffer2
->flags
& I915_EXEC_BATCH_FIRST
) ? 0 :
285 execbuffer2
->buffer_count
- 1;
286 batch_bo
= get_bo(fd
, exec_objects
[batch_index
].handle
);
287 for (uint32_t i
= 0; i
< execbuffer2
->buffer_count
; i
++) {
288 obj
= &exec_objects
[i
];
289 bo
= get_bo(fd
, obj
->handle
);
291 if (obj
->relocation_count
> 0)
292 data
= relocate_bo(fd
, bo
, execbuffer2
, obj
);
297 if (bo
== batch_bo
) {
298 aub_write_trace_block(&aub_file
, AUB_TRACE_TYPE_BATCH
,
299 GET_PTR(data
), bo
->size
, bo
->offset
);
301 aub_write_trace_block(&aub_file
, AUB_TRACE_TYPE_NOTYPE
,
302 GET_PTR(data
), bo
->size
, bo
->offset
);
305 if (!bo
->user_mapped
)
313 uint32_t ctx_id
= execbuffer2
->rsvd1
;
315 aub_write_exec(&aub_file
, ctx_id
,
316 batch_bo
->offset
+ execbuffer2
->batch_start_offset
,
317 offset
, engine_class_from_ring_flag(ring_flag
));
319 if (device_override
&&
320 (execbuffer2
->flags
& I915_EXEC_FENCE_ARRAY
) != 0) {
321 struct drm_i915_gem_exec_fence
*fences
=
322 (void*)(uintptr_t)execbuffer2
->cliprects_ptr
;
323 for (uint32_t i
= 0; i
< execbuffer2
->num_cliprects
; i
++) {
324 if ((fences
[i
].flags
& I915_EXEC_FENCE_SIGNAL
) != 0) {
325 struct drm_syncobj_array arg
= {
326 .handles
= (uintptr_t)&fences
[i
].handle
,
330 libc_ioctl(fd
, DRM_IOCTL_SYNCOBJ_SIGNAL
, &arg
);
337 add_new_bo(unsigned fd
, int handle
, uint64_t size
, void *map
)
339 struct bo
*bo
= &bos
[handle
+ fd
* MAX_BO_COUNT
];
341 fail_if(handle
>= MAX_BO_COUNT
, "bo handle out of range\n");
342 fail_if(fd
>= MAX_FD_COUNT
, "bo fd out of range\n");
343 fail_if(size
== 0, "bo size is invalid\n");
347 bo
->user_mapped
= false;
351 remove_bo(int fd
, int handle
)
353 struct bo
*bo
= get_bo(fd
, handle
);
355 if (bo
->map
&& !IS_USERPTR(bo
->map
))
356 munmap(bo
->map
, bo
->size
);
359 bo
->user_mapped
= false;
362 __attribute__ ((visibility ("default"))) int
368 return libc_close(fd
);
372 get_pci_id(int fd
, int *pci_id
)
374 struct drm_i915_getparam gparam
;
376 if (device_override
) {
381 gparam
.param
= I915_PARAM_CHIPSET_ID
;
382 gparam
.value
= pci_id
;
383 return libc_ioctl(fd
, DRM_IOCTL_I915_GETPARAM
, &gparam
);
389 static bool initialized
= false;
398 config
= fopen(getenv("INTEL_DUMP_GPU_CONFIG"), "r");
399 while (fscanf(config
, "%m[^=]=%m[^\n]\n", &key
, &value
) != EOF
) {
400 if (!strcmp(key
, "verbose")) {
401 if (!strcmp(value
, "1")) {
403 } else if (!strcmp(value
, "2")) {
406 } else if (!strcmp(key
, "device")) {
407 fail_if(device
!= 0, "Device/Platform override specified multiple times.");
408 fail_if(sscanf(value
, "%i", &device
) != 1,
409 "failed to parse device id '%s'",
411 device_override
= true;
412 } else if (!strcmp(key
, "platform")) {
413 fail_if(device
!= 0, "Device/Platform override specified multiple times.");
414 device
= gen_device_name_to_pci_device_id(value
);
415 fail_if(device
== -1, "Unknown platform '%s'", value
);
416 device_override
= true;
417 } else if (!strcmp(key
, "file")) {
418 output_filename
= strdup(value
);
419 output_file
= fopen(output_filename
, "w+");
420 fail_if(output_file
== NULL
,
421 "failed to open file '%s'\n",
424 fprintf(stderr
, "unknown option '%s'\n", key
);
432 bos
= calloc(MAX_FD_COUNT
* MAX_BO_COUNT
, sizeof(bos
[0]));
433 fail_if(bos
== NULL
, "out of memory\n");
435 int ret
= get_pci_id(fd
, &device
);
438 aub_file_init(&aub_file
, output_file
,
439 verbose
== 2 ? stdout
: NULL
,
440 device
, program_invocation_short_name
);
441 aub_write_default_setup(&aub_file
);
444 printf("[running, output file %s, chipset id 0x%04x, gen %d]\n",
445 output_filename
, device
, devinfo
.gen
);
448 __attribute__ ((visibility ("default"))) int
449 ioctl(int fd
, unsigned long request
, ...)
456 va_start(args
, request
);
457 argp
= va_arg(args
, void *);
460 if (_IOC_TYPE(request
) == DRM_IOCTL_BASE
&&
461 drm_fd
!= fd
&& fstat(fd
, &buf
) == 0 &&
462 (buf
.st_mode
& S_IFMT
) == S_IFCHR
&& major(buf
.st_rdev
) == DRM_MAJOR
) {
465 printf("[intercept drm ioctl on fd %d]\n", fd
);
472 case DRM_IOCTL_SYNCOBJ_WAIT
:
473 case DRM_IOCTL_I915_GEM_WAIT
: {
476 return libc_ioctl(fd
, request
, argp
);
479 case DRM_IOCTL_I915_GET_RESET_STATS
: {
480 if (device_override
) {
481 struct drm_i915_reset_stats
*stats
= argp
;
483 stats
->reset_count
= 0;
484 stats
->batch_active
= 0;
485 stats
->batch_pending
= 0;
488 return libc_ioctl(fd
, request
, argp
);
491 case DRM_IOCTL_I915_GETPARAM
: {
492 struct drm_i915_getparam
*getparam
= argp
;
494 ensure_device_info(fd
);
496 if (getparam
->param
== I915_PARAM_CHIPSET_ID
)
497 return get_pci_id(fd
, getparam
->value
);
499 if (device_override
) {
500 switch (getparam
->param
) {
501 case I915_PARAM_CS_TIMESTAMP_FREQUENCY
:
502 *getparam
->value
= devinfo
.timestamp_frequency
;
505 case I915_PARAM_HAS_WAIT_TIMEOUT
:
506 case I915_PARAM_HAS_EXECBUF2
:
507 case I915_PARAM_MMAP_VERSION
:
508 case I915_PARAM_HAS_EXEC_ASYNC
:
509 case I915_PARAM_HAS_EXEC_FENCE
:
510 case I915_PARAM_HAS_EXEC_FENCE_ARRAY
:
511 *getparam
->value
= 1;
514 case I915_PARAM_HAS_EXEC_SOFTPIN
:
515 *getparam
->value
= devinfo
.gen
>= 8 && !devinfo
.is_cherryview
;
523 return libc_ioctl(fd
, request
, argp
);
526 case DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM
: {
527 struct drm_i915_gem_context_param
*getparam
= argp
;
529 ensure_device_info(fd
);
531 if (device_override
) {
532 switch (getparam
->param
) {
533 case I915_CONTEXT_PARAM_GTT_SIZE
:
534 if (devinfo
.is_elkhartlake
)
535 getparam
->value
= 1ull << 36;
536 else if (devinfo
.gen
>= 8 && !devinfo
.is_cherryview
)
537 getparam
->value
= 1ull << 48;
539 getparam
->value
= 1ull << 31;
547 return libc_ioctl(fd
, request
, argp
);
550 case DRM_IOCTL_I915_GEM_EXECBUFFER
: {
554 "application uses DRM_IOCTL_I915_GEM_EXECBUFFER, not handled\n");
557 return libc_ioctl(fd
, request
, argp
);
560 case DRM_IOCTL_I915_GEM_EXECBUFFER2
:
561 case DRM_IOCTL_I915_GEM_EXECBUFFER2_WR
: {
562 dump_execbuffer2(fd
, argp
);
566 return libc_ioctl(fd
, request
, argp
);
569 case DRM_IOCTL_I915_GEM_CONTEXT_CREATE
: {
570 uint32_t *ctx_id
= NULL
;
571 struct drm_i915_gem_context_create
*create
= argp
;
573 if (!device_override
) {
574 ret
= libc_ioctl(fd
, request
, argp
);
575 ctx_id
= &create
->ctx_id
;
579 create
->ctx_id
= aub_write_context_create(&aub_file
, ctx_id
);
584 case DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT
: {
585 uint32_t *ctx_id
= NULL
;
586 struct drm_i915_gem_context_create_ext
*create
= argp
;
588 if (!device_override
) {
589 ret
= libc_ioctl(fd
, request
, argp
);
590 ctx_id
= &create
->ctx_id
;
594 create
->ctx_id
= aub_write_context_create(&aub_file
, ctx_id
);
599 case DRM_IOCTL_I915_GEM_CREATE
: {
600 struct drm_i915_gem_create
*create
= argp
;
602 ret
= libc_ioctl(fd
, request
, argp
);
604 add_new_bo(fd
, create
->handle
, create
->size
, NULL
);
609 case DRM_IOCTL_I915_GEM_USERPTR
: {
610 struct drm_i915_gem_userptr
*userptr
= argp
;
612 ret
= libc_ioctl(fd
, request
, argp
);
614 add_new_bo(fd
, userptr
->handle
, userptr
->user_size
,
615 (void *) (uintptr_t) (userptr
->user_ptr
| USERPTR_FLAG
));
620 case DRM_IOCTL_GEM_CLOSE
: {
621 struct drm_gem_close
*close
= argp
;
623 remove_bo(fd
, close
->handle
);
625 return libc_ioctl(fd
, request
, argp
);
628 case DRM_IOCTL_GEM_OPEN
: {
629 struct drm_gem_open
*open
= argp
;
631 ret
= libc_ioctl(fd
, request
, argp
);
633 add_new_bo(fd
, open
->handle
, open
->size
, NULL
);
638 case DRM_IOCTL_PRIME_FD_TO_HANDLE
: {
639 struct drm_prime_handle
*prime
= argp
;
641 ret
= libc_ioctl(fd
, request
, argp
);
645 size
= lseek(prime
->fd
, 0, SEEK_END
);
646 fail_if(size
== -1, "failed to get prime bo size\n");
647 add_new_bo(fd
, prime
->handle
, size
, NULL
);
654 case DRM_IOCTL_I915_GEM_MMAP
: {
655 ret
= libc_ioctl(fd
, request
, argp
);
657 struct drm_i915_gem_mmap
*mmap
= argp
;
658 struct bo
*bo
= get_bo(fd
, mmap
->handle
);
659 bo
->user_mapped
= true;
666 return libc_ioctl(fd
, request
, argp
);
669 return libc_ioctl(fd
, request
, argp
);
676 libc_close
= dlsym(RTLD_NEXT
, "close");
677 libc_ioctl
= dlsym(RTLD_NEXT
, "ioctl");
678 libc_munmap
= dlsym(RTLD_NEXT
, "munmap");
679 fail_if(libc_close
== NULL
|| libc_ioctl
== NULL
,
680 "failed to get libc ioctl or close\n");
684 close_init_helper(int fd
)
687 return libc_close(fd
);
691 ioctl_init_helper(int fd
, unsigned long request
, ...)
696 va_start(args
, request
);
697 argp
= va_arg(args
, void *);
701 return libc_ioctl(fd
, request
, argp
);
705 munmap_init_helper(void *addr
, size_t length
)
708 for (uint32_t i
= 0; i
< MAX_FD_COUNT
* MAX_BO_COUNT
; i
++) {
709 struct bo
*bo
= &bos
[i
];
710 if (bo
->map
== addr
) {
711 bo
->user_mapped
= false;
715 return libc_munmap(addr
, length
);
718 static void __attribute__ ((destructor
))
721 if (devinfo
.gen
!= 0) {
722 free(output_filename
);
723 aub_file_finish(&aub_file
);