intel/dump_gpu: add an option to capture a single frame
[mesa.git] / src / intel / tools / intel_dump_gpu.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <stdlib.h>
25 #include <stdio.h>
26 #include <string.h>
27 #include <stdint.h>
28 #include <stdbool.h>
29 #include <signal.h>
30 #include <stdarg.h>
31 #include <fcntl.h>
32 #include <sys/types.h>
33 #include <sys/sysmacros.h>
34 #include <sys/stat.h>
35 #include <sys/ioctl.h>
36 #include <unistd.h>
37 #include <errno.h>
38 #include <sys/mman.h>
39 #include <dlfcn.h>
40 #include "drm-uapi/i915_drm.h"
41 #include <inttypes.h>
42
43 #include "intel_aub.h"
44 #include "aub_write.h"
45
46 #include "dev/gen_debug.h"
47 #include "dev/gen_device_info.h"
48 #include "util/macros.h"
49
50 static int close_init_helper(int fd);
51 static int ioctl_init_helper(int fd, unsigned long request, ...);
52 static int munmap_init_helper(void *addr, size_t length);
53
54 static int (*libc_close)(int fd) = close_init_helper;
55 static int (*libc_ioctl)(int fd, unsigned long request, ...) = ioctl_init_helper;
56 static int (*libc_munmap)(void *addr, size_t length) = munmap_init_helper;
57
58 static int drm_fd = -1;
59 static char *output_filename = NULL;
60 static FILE *output_file = NULL;
61 static int verbose = 0;
62 static bool device_override = false;
63 static bool capture_only = false;
64 static int64_t frame_id = -1;
65 static bool capture_finished = false;
66
67 #define MAX_FD_COUNT 64
68 #define MAX_BO_COUNT 64 * 1024
69
70 struct bo {
71 uint32_t size;
72 uint64_t offset;
73 void *map;
74 /* Whether the buffer has been positionned in the GTT already. */
75 bool gtt_mapped : 1;
76 /* Tracks userspace mmapping of the buffer */
77 bool user_mapped : 1;
78 /* Using the i915-gem mmapping ioctl & execbuffer ioctl, track whether a
79 * buffer has been updated.
80 */
81 bool dirty : 1;
82 };
83
84 static struct bo *bos;
85
86 #define DRM_MAJOR 226
87
88 /* We set bit 0 in the map pointer for userptr BOs so we know not to
89 * munmap them on DRM_IOCTL_GEM_CLOSE.
90 */
91 #define USERPTR_FLAG 1
92 #define IS_USERPTR(p) ((uintptr_t) (p) & USERPTR_FLAG)
93 #define GET_PTR(p) ( (void *) ((uintptr_t) p & ~(uintptr_t) 1) )
94
95 static void __attribute__ ((format(__printf__, 2, 3)))
96 fail_if(int cond, const char *format, ...)
97 {
98 va_list args;
99
100 if (!cond)
101 return;
102
103 va_start(args, format);
104 fprintf(stderr, "intel_dump_gpu: ");
105 vfprintf(stderr, format, args);
106 va_end(args);
107
108 raise(SIGTRAP);
109 }
110
111 static struct bo *
112 get_bo(unsigned fd, uint32_t handle)
113 {
114 struct bo *bo;
115
116 fail_if(handle >= MAX_BO_COUNT, "bo handle too large\n");
117 fail_if(fd >= MAX_FD_COUNT, "bo fd too large\n");
118 bo = &bos[handle + fd * MAX_BO_COUNT];
119
120 return bo;
121 }
122
123 static inline uint32_t
124 align_u32(uint32_t v, uint32_t a)
125 {
126 return (v + a - 1) & ~(a - 1);
127 }
128
129 static struct gen_device_info devinfo = {0};
130 static int device = 0;
131 static struct aub_file aub_file;
132
133 static void
134 ensure_device_info(int fd)
135 {
136 /* We can't do this at open time as we're not yet authenticated. */
137 if (device == 0) {
138 fail_if(!gen_get_device_info_from_fd(fd, &devinfo),
139 "failed to identify chipset.\n");
140 device = devinfo.chipset_id;
141 } else if (devinfo.gen == 0) {
142 fail_if(!gen_get_device_info_from_pci_id(device, &devinfo),
143 "failed to identify chipset.\n");
144 }
145 }
146
147 static void *
148 relocate_bo(int fd, struct bo *bo, const struct drm_i915_gem_execbuffer2 *execbuffer2,
149 const struct drm_i915_gem_exec_object2 *obj)
150 {
151 const struct drm_i915_gem_exec_object2 *exec_objects =
152 (struct drm_i915_gem_exec_object2 *) (uintptr_t) execbuffer2->buffers_ptr;
153 const struct drm_i915_gem_relocation_entry *relocs =
154 (const struct drm_i915_gem_relocation_entry *) (uintptr_t) obj->relocs_ptr;
155 void *relocated;
156 int handle;
157
158 relocated = malloc(bo->size);
159 fail_if(relocated == NULL, "out of memory\n");
160 memcpy(relocated, GET_PTR(bo->map), bo->size);
161 for (size_t i = 0; i < obj->relocation_count; i++) {
162 fail_if(relocs[i].offset >= bo->size, "reloc outside bo\n");
163
164 if (execbuffer2->flags & I915_EXEC_HANDLE_LUT)
165 handle = exec_objects[relocs[i].target_handle].handle;
166 else
167 handle = relocs[i].target_handle;
168
169 aub_write_reloc(&devinfo, ((char *)relocated) + relocs[i].offset,
170 get_bo(fd, handle)->offset + relocs[i].delta);
171 }
172
173 return relocated;
174 }
175
176 static int
177 gem_ioctl(int fd, unsigned long request, void *argp)
178 {
179 int ret;
180
181 do {
182 ret = libc_ioctl(fd, request, argp);
183 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
184
185 return ret;
186 }
187
188 static void *
189 gem_mmap(int fd, uint32_t handle, uint64_t offset, uint64_t size)
190 {
191 struct drm_i915_gem_mmap mmap = {
192 .handle = handle,
193 .offset = offset,
194 .size = size
195 };
196
197 if (gem_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap) == -1)
198 return MAP_FAILED;
199
200 return (void *)(uintptr_t) mmap.addr_ptr;
201 }
202
203 static enum drm_i915_gem_engine_class
204 engine_class_from_ring_flag(uint32_t ring_flag)
205 {
206 switch (ring_flag) {
207 case I915_EXEC_DEFAULT:
208 case I915_EXEC_RENDER:
209 return I915_ENGINE_CLASS_RENDER;
210 case I915_EXEC_BSD:
211 return I915_ENGINE_CLASS_VIDEO;
212 case I915_EXEC_BLT:
213 return I915_ENGINE_CLASS_COPY;
214 case I915_EXEC_VEBOX:
215 return I915_ENGINE_CLASS_VIDEO_ENHANCE;
216 default:
217 return I915_ENGINE_CLASS_INVALID;
218 }
219 }
220
221 static void
222 dump_execbuffer2(int fd, struct drm_i915_gem_execbuffer2 *execbuffer2)
223 {
224 struct drm_i915_gem_exec_object2 *exec_objects =
225 (struct drm_i915_gem_exec_object2 *) (uintptr_t) execbuffer2->buffers_ptr;
226 uint32_t ring_flag = execbuffer2->flags & I915_EXEC_RING_MASK;
227 uint32_t offset;
228 struct drm_i915_gem_exec_object2 *obj;
229 struct bo *bo, *batch_bo;
230 int batch_index;
231 void *data;
232
233 ensure_device_info(fd);
234
235 if (capture_finished)
236 return;
237
238 if (!aub_file.file) {
239 aub_file_init(&aub_file, output_file,
240 verbose == 2 ? stdout : NULL,
241 device, program_invocation_short_name);
242 aub_write_default_setup(&aub_file);
243
244 if (verbose)
245 printf("[running, output file %s, chipset id 0x%04x, gen %d]\n",
246 output_filename, device, devinfo.gen);
247 }
248
249 if (aub_use_execlists(&aub_file))
250 offset = 0x1000;
251 else
252 offset = aub_gtt_size(&aub_file);
253
254 for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
255 obj = &exec_objects[i];
256 bo = get_bo(fd, obj->handle);
257
258 /* If bo->size == 0, this means they passed us an invalid
259 * buffer. The kernel will reject it and so should we.
260 */
261 if (bo->size == 0) {
262 if (verbose)
263 printf("BO #%d is invalid!\n", obj->handle);
264 return;
265 }
266
267 if (obj->flags & EXEC_OBJECT_PINNED) {
268 bo->offset = obj->offset;
269 } else {
270 if (obj->alignment != 0)
271 offset = align_u32(offset, obj->alignment);
272 bo->offset = offset;
273 offset = align_u32(offset + bo->size + 4095, 4096);
274 }
275
276 if (bo->map == NULL && bo->size > 0)
277 bo->map = gem_mmap(fd, obj->handle, 0, bo->size);
278 fail_if(bo->map == MAP_FAILED, "bo mmap failed\n");
279 }
280
281 uint64_t current_frame_id = 0;
282 if (frame_id >= 0) {
283 for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
284 obj = &exec_objects[i];
285 bo = get_bo(fd, obj->handle);
286
287 /* Check against frame_id requirements. */
288 if (memcmp(bo->map, intel_debug_identifier(),
289 intel_debug_identifier_size()) == 0) {
290 const struct gen_debug_block_frame *frame_desc =
291 intel_debug_get_identifier_block(bo->map, bo->size,
292 GEN_DEBUG_BLOCK_TYPE_FRAME);
293
294 current_frame_id = frame_desc ? frame_desc->frame_id : 0;
295 break;
296 }
297 }
298 }
299
300 if (verbose)
301 printf("Dumping execbuffer2 (frame_id=%"PRIu64", buffers=%u):\n",
302 current_frame_id, execbuffer2->buffer_count);
303
304 /* Check whether we can stop right now. */
305 if (frame_id >= 0) {
306 if (current_frame_id < frame_id)
307 return;
308
309 if (current_frame_id > frame_id) {
310 aub_file_finish(&aub_file);
311 capture_finished = true;
312 return;
313 }
314 }
315
316
317 /* Map buffers into the PPGTT. */
318 for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
319 obj = &exec_objects[i];
320 bo = get_bo(fd, obj->handle);
321
322 if (verbose) {
323 printf("BO #%d (%dB) @ 0x%" PRIx64 "\n",
324 obj->handle, bo->size, bo->offset);
325 }
326
327 if (aub_use_execlists(&aub_file) && !bo->gtt_mapped) {
328 aub_map_ppgtt(&aub_file, bo->offset, bo->size);
329 bo->gtt_mapped = true;
330 }
331 }
332
333 /* Write the buffer content into the Aub. */
334 batch_index = (execbuffer2->flags & I915_EXEC_BATCH_FIRST) ? 0 :
335 execbuffer2->buffer_count - 1;
336 batch_bo = get_bo(fd, exec_objects[batch_index].handle);
337 for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
338 obj = &exec_objects[i];
339 bo = get_bo(fd, obj->handle);
340
341 if (obj->relocation_count > 0)
342 data = relocate_bo(fd, bo, execbuffer2, obj);
343 else
344 data = bo->map;
345
346 bool write = !capture_only || (obj->flags & EXEC_OBJECT_CAPTURE);
347
348 if (write && bo->dirty) {
349 if (bo == batch_bo) {
350 aub_write_trace_block(&aub_file, AUB_TRACE_TYPE_BATCH,
351 GET_PTR(data), bo->size, bo->offset);
352 } else {
353 aub_write_trace_block(&aub_file, AUB_TRACE_TYPE_NOTYPE,
354 GET_PTR(data), bo->size, bo->offset);
355 }
356
357 if (!bo->user_mapped)
358 bo->dirty = false;
359 }
360
361 if (data != bo->map)
362 free(data);
363 }
364
365 uint32_t ctx_id = execbuffer2->rsvd1;
366
367 aub_write_exec(&aub_file, ctx_id,
368 batch_bo->offset + execbuffer2->batch_start_offset,
369 offset, engine_class_from_ring_flag(ring_flag));
370
371 if (device_override &&
372 (execbuffer2->flags & I915_EXEC_FENCE_ARRAY) != 0) {
373 struct drm_i915_gem_exec_fence *fences =
374 (void*)(uintptr_t)execbuffer2->cliprects_ptr;
375 for (uint32_t i = 0; i < execbuffer2->num_cliprects; i++) {
376 if ((fences[i].flags & I915_EXEC_FENCE_SIGNAL) != 0) {
377 struct drm_syncobj_array arg = {
378 .handles = (uintptr_t)&fences[i].handle,
379 .count_handles = 1,
380 .pad = 0,
381 };
382 libc_ioctl(fd, DRM_IOCTL_SYNCOBJ_SIGNAL, &arg);
383 }
384 }
385 }
386 }
387
388 static void
389 add_new_bo(unsigned fd, int handle, uint64_t size, void *map)
390 {
391 struct bo *bo = &bos[handle + fd * MAX_BO_COUNT];
392
393 fail_if(handle >= MAX_BO_COUNT, "bo handle out of range\n");
394 fail_if(fd >= MAX_FD_COUNT, "bo fd out of range\n");
395 fail_if(size == 0, "bo size is invalid\n");
396
397 bo->size = size;
398 bo->map = map;
399 bo->user_mapped = false;
400 bo->gtt_mapped = false;
401 }
402
403 static void
404 remove_bo(int fd, int handle)
405 {
406 struct bo *bo = get_bo(fd, handle);
407
408 if (bo->map && !IS_USERPTR(bo->map))
409 munmap(bo->map, bo->size);
410 memset(bo, 0, sizeof(*bo));
411 }
412
413 __attribute__ ((visibility ("default"))) int
414 close(int fd)
415 {
416 if (fd == drm_fd)
417 drm_fd = -1;
418
419 return libc_close(fd);
420 }
421
422 static int
423 get_pci_id(int fd, int *pci_id)
424 {
425 struct drm_i915_getparam gparam;
426
427 if (device_override) {
428 *pci_id = device;
429 return 0;
430 }
431
432 gparam.param = I915_PARAM_CHIPSET_ID;
433 gparam.value = pci_id;
434 return libc_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gparam);
435 }
436
437 static void
438 maybe_init(int fd)
439 {
440 static bool initialized = false;
441 FILE *config;
442 char *key, *value;
443
444 if (initialized)
445 return;
446
447 initialized = true;
448
449 config = fopen(getenv("INTEL_DUMP_GPU_CONFIG"), "r");
450 while (fscanf(config, "%m[^=]=%m[^\n]\n", &key, &value) != EOF) {
451 if (!strcmp(key, "verbose")) {
452 if (!strcmp(value, "1")) {
453 verbose = 1;
454 } else if (!strcmp(value, "2")) {
455 verbose = 2;
456 }
457 } else if (!strcmp(key, "device")) {
458 fail_if(device != 0, "Device/Platform override specified multiple times.");
459 fail_if(sscanf(value, "%i", &device) != 1,
460 "failed to parse device id '%s'",
461 value);
462 device_override = true;
463 } else if (!strcmp(key, "platform")) {
464 fail_if(device != 0, "Device/Platform override specified multiple times.");
465 device = gen_device_name_to_pci_device_id(value);
466 fail_if(device == -1, "Unknown platform '%s'", value);
467 device_override = true;
468 } else if (!strcmp(key, "file")) {
469 output_filename = strdup(value);
470 output_file = fopen(output_filename, "w+");
471 fail_if(output_file == NULL,
472 "failed to open file '%s'\n",
473 output_filename);
474 } else if (!strcmp(key, "capture_only")) {
475 capture_only = atoi(value);
476 } else if (!strcmp(key, "frame")) {
477 frame_id = atol(value);
478 } else {
479 fprintf(stderr, "unknown option '%s'\n", key);
480 }
481
482 free(key);
483 free(value);
484 }
485 fclose(config);
486
487 bos = calloc(MAX_FD_COUNT * MAX_BO_COUNT, sizeof(bos[0]));
488 fail_if(bos == NULL, "out of memory\n");
489
490 int ret = get_pci_id(fd, &device);
491 assert(ret == 0);
492
493 aub_file_init(&aub_file, output_file,
494 verbose == 2 ? stdout : NULL,
495 device, program_invocation_short_name);
496 aub_write_default_setup(&aub_file);
497
498 if (verbose)
499 printf("[running, output file %s, chipset id 0x%04x, gen %d]\n",
500 output_filename, device, devinfo.gen);
501 }
502
503 __attribute__ ((visibility ("default"))) int
504 ioctl(int fd, unsigned long request, ...)
505 {
506 va_list args;
507 void *argp;
508 int ret;
509 struct stat buf;
510
511 va_start(args, request);
512 argp = va_arg(args, void *);
513 va_end(args);
514
515 if (_IOC_TYPE(request) == DRM_IOCTL_BASE &&
516 drm_fd != fd && fstat(fd, &buf) == 0 &&
517 (buf.st_mode & S_IFMT) == S_IFCHR && major(buf.st_rdev) == DRM_MAJOR) {
518 drm_fd = fd;
519 if (verbose)
520 printf("[intercept drm ioctl on fd %d]\n", fd);
521 }
522
523 if (fd == drm_fd) {
524 maybe_init(fd);
525
526 switch (request) {
527 case DRM_IOCTL_SYNCOBJ_WAIT:
528 case DRM_IOCTL_I915_GEM_WAIT: {
529 if (device_override)
530 return 0;
531 return libc_ioctl(fd, request, argp);
532 }
533
534 case DRM_IOCTL_I915_GET_RESET_STATS: {
535 if (device_override) {
536 struct drm_i915_reset_stats *stats = argp;
537
538 stats->reset_count = 0;
539 stats->batch_active = 0;
540 stats->batch_pending = 0;
541 return 0;
542 }
543 return libc_ioctl(fd, request, argp);
544 }
545
546 case DRM_IOCTL_I915_GETPARAM: {
547 struct drm_i915_getparam *getparam = argp;
548
549 ensure_device_info(fd);
550
551 if (getparam->param == I915_PARAM_CHIPSET_ID)
552 return get_pci_id(fd, getparam->value);
553
554 if (device_override) {
555 switch (getparam->param) {
556 case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
557 *getparam->value = devinfo.timestamp_frequency;
558 return 0;
559
560 case I915_PARAM_HAS_WAIT_TIMEOUT:
561 case I915_PARAM_HAS_EXECBUF2:
562 case I915_PARAM_MMAP_VERSION:
563 case I915_PARAM_HAS_EXEC_ASYNC:
564 case I915_PARAM_HAS_EXEC_FENCE:
565 case I915_PARAM_HAS_EXEC_FENCE_ARRAY:
566 *getparam->value = 1;
567 return 0;
568
569 case I915_PARAM_HAS_EXEC_SOFTPIN:
570 *getparam->value = devinfo.gen >= 8 && !devinfo.is_cherryview;
571 return 0;
572
573 default:
574 return -1;
575 }
576 }
577
578 return libc_ioctl(fd, request, argp);
579 }
580
581 case DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM: {
582 struct drm_i915_gem_context_param *getparam = argp;
583
584 ensure_device_info(fd);
585
586 if (device_override) {
587 switch (getparam->param) {
588 case I915_CONTEXT_PARAM_GTT_SIZE:
589 if (devinfo.is_elkhartlake)
590 getparam->value = 1ull << 36;
591 else if (devinfo.gen >= 8 && !devinfo.is_cherryview)
592 getparam->value = 1ull << 48;
593 else
594 getparam->value = 1ull << 31;
595 return 0;
596
597 default:
598 return -1;
599 }
600 }
601
602 return libc_ioctl(fd, request, argp);
603 }
604
605 case DRM_IOCTL_I915_GEM_EXECBUFFER: {
606 static bool once;
607 if (!once) {
608 fprintf(stderr,
609 "application uses DRM_IOCTL_I915_GEM_EXECBUFFER, not handled\n");
610 once = true;
611 }
612 return libc_ioctl(fd, request, argp);
613 }
614
615 case DRM_IOCTL_I915_GEM_EXECBUFFER2:
616 case DRM_IOCTL_I915_GEM_EXECBUFFER2_WR: {
617 dump_execbuffer2(fd, argp);
618 if (device_override)
619 return 0;
620
621 return libc_ioctl(fd, request, argp);
622 }
623
624 case DRM_IOCTL_I915_GEM_CONTEXT_CREATE: {
625 uint32_t *ctx_id = NULL;
626 struct drm_i915_gem_context_create *create = argp;
627 ret = 0;
628 if (!device_override) {
629 ret = libc_ioctl(fd, request, argp);
630 ctx_id = &create->ctx_id;
631 }
632
633 if (ret == 0)
634 create->ctx_id = aub_write_context_create(&aub_file, ctx_id);
635
636 return ret;
637 }
638
639 case DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT: {
640 uint32_t *ctx_id = NULL;
641 struct drm_i915_gem_context_create_ext *create = argp;
642 ret = 0;
643 if (!device_override) {
644 ret = libc_ioctl(fd, request, argp);
645 ctx_id = &create->ctx_id;
646 }
647
648 if (ret == 0)
649 create->ctx_id = aub_write_context_create(&aub_file, ctx_id);
650
651 return ret;
652 }
653
654 case DRM_IOCTL_I915_GEM_CREATE: {
655 struct drm_i915_gem_create *create = argp;
656
657 ret = libc_ioctl(fd, request, argp);
658 if (ret == 0)
659 add_new_bo(fd, create->handle, create->size, NULL);
660
661 return ret;
662 }
663
664 case DRM_IOCTL_I915_GEM_USERPTR: {
665 struct drm_i915_gem_userptr *userptr = argp;
666
667 ret = libc_ioctl(fd, request, argp);
668 if (ret == 0)
669 add_new_bo(fd, userptr->handle, userptr->user_size,
670 (void *) (uintptr_t) (userptr->user_ptr | USERPTR_FLAG));
671
672 return ret;
673 }
674
675 case DRM_IOCTL_GEM_CLOSE: {
676 struct drm_gem_close *close = argp;
677
678 remove_bo(fd, close->handle);
679
680 return libc_ioctl(fd, request, argp);
681 }
682
683 case DRM_IOCTL_GEM_OPEN: {
684 struct drm_gem_open *open = argp;
685
686 ret = libc_ioctl(fd, request, argp);
687 if (ret == 0)
688 add_new_bo(fd, open->handle, open->size, NULL);
689
690 return ret;
691 }
692
693 case DRM_IOCTL_PRIME_FD_TO_HANDLE: {
694 struct drm_prime_handle *prime = argp;
695
696 ret = libc_ioctl(fd, request, argp);
697 if (ret == 0) {
698 off_t size;
699
700 size = lseek(prime->fd, 0, SEEK_END);
701 fail_if(size == -1, "failed to get prime bo size\n");
702 add_new_bo(fd, prime->handle, size, NULL);
703
704 }
705
706 return ret;
707 }
708
709 case DRM_IOCTL_I915_GEM_MMAP: {
710 ret = libc_ioctl(fd, request, argp);
711 if (ret == 0) {
712 struct drm_i915_gem_mmap *mmap = argp;
713 struct bo *bo = get_bo(fd, mmap->handle);
714 bo->user_mapped = true;
715 bo->dirty = true;
716 }
717 return ret;
718 }
719
720 default:
721 return libc_ioctl(fd, request, argp);
722 }
723 } else {
724 return libc_ioctl(fd, request, argp);
725 }
726 }
727
728 static void
729 init(void)
730 {
731 libc_close = dlsym(RTLD_NEXT, "close");
732 libc_ioctl = dlsym(RTLD_NEXT, "ioctl");
733 libc_munmap = dlsym(RTLD_NEXT, "munmap");
734 fail_if(libc_close == NULL || libc_ioctl == NULL,
735 "failed to get libc ioctl or close\n");
736 }
737
738 static int
739 close_init_helper(int fd)
740 {
741 init();
742 return libc_close(fd);
743 }
744
745 static int
746 ioctl_init_helper(int fd, unsigned long request, ...)
747 {
748 va_list args;
749 void *argp;
750
751 va_start(args, request);
752 argp = va_arg(args, void *);
753 va_end(args);
754
755 init();
756 return libc_ioctl(fd, request, argp);
757 }
758
759 static int
760 munmap_init_helper(void *addr, size_t length)
761 {
762 init();
763 for (uint32_t i = 0; i < MAX_FD_COUNT * MAX_BO_COUNT; i++) {
764 struct bo *bo = &bos[i];
765 if (bo->map == addr) {
766 bo->user_mapped = false;
767 break;
768 }
769 }
770 return libc_munmap(addr, length);
771 }
772
773 static void __attribute__ ((destructor))
774 fini(void)
775 {
776 if (devinfo.gen != 0) {
777 free(output_filename);
778 if (!capture_finished)
779 aub_file_finish(&aub_file);
780 free(bos);
781 }
782 }