050ec78e3b8c90bb303e6e4a099932f38c1de801
[mesa.git] / src / intel / tools / intel_dump_gpu.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <stdlib.h>
25 #include <stdio.h>
26 #include <string.h>
27 #include <stdint.h>
28 #include <stdbool.h>
29 #include <signal.h>
30 #include <stdarg.h>
31 #include <fcntl.h>
32 #include <sys/types.h>
33 #include <sys/sysmacros.h>
34 #include <sys/stat.h>
35 #include <sys/ioctl.h>
36 #include <unistd.h>
37 #include <errno.h>
38 #include <sys/mman.h>
39 #include <dlfcn.h>
40 #include "drm-uapi/i915_drm.h"
41 #include <inttypes.h>
42
43 #include "intel_aub.h"
44 #include "aub_write.h"
45
46 #include "dev/gen_device_info.h"
47 #include "util/macros.h"
48
49 static int close_init_helper(int fd);
50 static int ioctl_init_helper(int fd, unsigned long request, ...);
51 static int munmap_init_helper(void *addr, size_t length);
52
53 static int (*libc_close)(int fd) = close_init_helper;
54 static int (*libc_ioctl)(int fd, unsigned long request, ...) = ioctl_init_helper;
55 static int (*libc_munmap)(void *addr, size_t length) = munmap_init_helper;
56
57 static int drm_fd = -1;
58 static char *output_filename = NULL;
59 static FILE *output_file = NULL;
60 static int verbose = 0;
61 static bool device_override = false;
62 static bool capture_only = false;
63
64 #define MAX_FD_COUNT 64
65 #define MAX_BO_COUNT 64 * 1024
66
67 struct bo {
68 uint32_t size;
69 uint64_t offset;
70 void *map;
71 /* Tracks userspace mmapping of the buffer */
72 bool user_mapped : 1;
73 /* Using the i915-gem mmapping ioctl & execbuffer ioctl, track whether a
74 * buffer has been updated.
75 */
76 bool dirty : 1;
77 };
78
79 static struct bo *bos;
80
81 #define DRM_MAJOR 226
82
83 /* We set bit 0 in the map pointer for userptr BOs so we know not to
84 * munmap them on DRM_IOCTL_GEM_CLOSE.
85 */
86 #define USERPTR_FLAG 1
87 #define IS_USERPTR(p) ((uintptr_t) (p) & USERPTR_FLAG)
88 #define GET_PTR(p) ( (void *) ((uintptr_t) p & ~(uintptr_t) 1) )
89
90 static void __attribute__ ((format(__printf__, 2, 3)))
91 fail_if(int cond, const char *format, ...)
92 {
93 va_list args;
94
95 if (!cond)
96 return;
97
98 va_start(args, format);
99 fprintf(stderr, "intel_dump_gpu: ");
100 vfprintf(stderr, format, args);
101 va_end(args);
102
103 raise(SIGTRAP);
104 }
105
106 static struct bo *
107 get_bo(unsigned fd, uint32_t handle)
108 {
109 struct bo *bo;
110
111 fail_if(handle >= MAX_BO_COUNT, "bo handle too large\n");
112 fail_if(fd >= MAX_FD_COUNT, "bo fd too large\n");
113 bo = &bos[handle + fd * MAX_BO_COUNT];
114
115 return bo;
116 }
117
118 static inline uint32_t
119 align_u32(uint32_t v, uint32_t a)
120 {
121 return (v + a - 1) & ~(a - 1);
122 }
123
124 static struct gen_device_info devinfo = {0};
125 static int device = 0;
126 static struct aub_file aub_file;
127
128 static void
129 ensure_device_info(int fd)
130 {
131 /* We can't do this at open time as we're not yet authenticated. */
132 if (device == 0) {
133 fail_if(!gen_get_device_info_from_fd(fd, &devinfo),
134 "failed to identify chipset.\n");
135 device = devinfo.chipset_id;
136 } else if (devinfo.gen == 0) {
137 fail_if(!gen_get_device_info_from_pci_id(device, &devinfo),
138 "failed to identify chipset.\n");
139 }
140 }
141
142 static void *
143 relocate_bo(int fd, struct bo *bo, const struct drm_i915_gem_execbuffer2 *execbuffer2,
144 const struct drm_i915_gem_exec_object2 *obj)
145 {
146 const struct drm_i915_gem_exec_object2 *exec_objects =
147 (struct drm_i915_gem_exec_object2 *) (uintptr_t) execbuffer2->buffers_ptr;
148 const struct drm_i915_gem_relocation_entry *relocs =
149 (const struct drm_i915_gem_relocation_entry *) (uintptr_t) obj->relocs_ptr;
150 void *relocated;
151 int handle;
152
153 relocated = malloc(bo->size);
154 fail_if(relocated == NULL, "out of memory\n");
155 memcpy(relocated, GET_PTR(bo->map), bo->size);
156 for (size_t i = 0; i < obj->relocation_count; i++) {
157 fail_if(relocs[i].offset >= bo->size, "reloc outside bo\n");
158
159 if (execbuffer2->flags & I915_EXEC_HANDLE_LUT)
160 handle = exec_objects[relocs[i].target_handle].handle;
161 else
162 handle = relocs[i].target_handle;
163
164 aub_write_reloc(&devinfo, ((char *)relocated) + relocs[i].offset,
165 get_bo(fd, handle)->offset + relocs[i].delta);
166 }
167
168 return relocated;
169 }
170
171 static int
172 gem_ioctl(int fd, unsigned long request, void *argp)
173 {
174 int ret;
175
176 do {
177 ret = libc_ioctl(fd, request, argp);
178 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
179
180 return ret;
181 }
182
183 static void *
184 gem_mmap(int fd, uint32_t handle, uint64_t offset, uint64_t size)
185 {
186 struct drm_i915_gem_mmap mmap = {
187 .handle = handle,
188 .offset = offset,
189 .size = size
190 };
191
192 if (gem_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap) == -1)
193 return MAP_FAILED;
194
195 return (void *)(uintptr_t) mmap.addr_ptr;
196 }
197
198 static enum drm_i915_gem_engine_class
199 engine_class_from_ring_flag(uint32_t ring_flag)
200 {
201 switch (ring_flag) {
202 case I915_EXEC_DEFAULT:
203 case I915_EXEC_RENDER:
204 return I915_ENGINE_CLASS_RENDER;
205 case I915_EXEC_BSD:
206 return I915_ENGINE_CLASS_VIDEO;
207 case I915_EXEC_BLT:
208 return I915_ENGINE_CLASS_COPY;
209 case I915_EXEC_VEBOX:
210 return I915_ENGINE_CLASS_VIDEO_ENHANCE;
211 default:
212 return I915_ENGINE_CLASS_INVALID;
213 }
214 }
215
216 static void
217 dump_execbuffer2(int fd, struct drm_i915_gem_execbuffer2 *execbuffer2)
218 {
219 struct drm_i915_gem_exec_object2 *exec_objects =
220 (struct drm_i915_gem_exec_object2 *) (uintptr_t) execbuffer2->buffers_ptr;
221 uint32_t ring_flag = execbuffer2->flags & I915_EXEC_RING_MASK;
222 uint32_t offset;
223 struct drm_i915_gem_exec_object2 *obj;
224 struct bo *bo, *batch_bo;
225 int batch_index;
226 void *data;
227
228 ensure_device_info(fd);
229
230 if (!aub_file.file) {
231 aub_file_init(&aub_file, output_file,
232 verbose == 2 ? stdout : NULL,
233 device, program_invocation_short_name);
234 aub_write_default_setup(&aub_file);
235
236 if (verbose)
237 printf("[running, output file %s, chipset id 0x%04x, gen %d]\n",
238 output_filename, device, devinfo.gen);
239 }
240
241 if (aub_use_execlists(&aub_file))
242 offset = 0x1000;
243 else
244 offset = aub_gtt_size(&aub_file);
245
246 if (verbose)
247 printf("Dumping execbuffer2:\n");
248
249 for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
250 obj = &exec_objects[i];
251 bo = get_bo(fd, obj->handle);
252
253 /* If bo->size == 0, this means they passed us an invalid
254 * buffer. The kernel will reject it and so should we.
255 */
256 if (bo->size == 0) {
257 if (verbose)
258 printf("BO #%d is invalid!\n", obj->handle);
259 return;
260 }
261
262 if (obj->flags & EXEC_OBJECT_PINNED) {
263 bo->offset = obj->offset;
264 if (verbose)
265 printf("BO #%d (%dB) pinned @ 0x%" PRIx64 "\n",
266 obj->handle, bo->size, bo->offset);
267 } else {
268 if (obj->alignment != 0)
269 offset = align_u32(offset, obj->alignment);
270 bo->offset = offset;
271 if (verbose)
272 printf("BO #%d (%dB) @ 0x%" PRIx64 "\n", obj->handle,
273 bo->size, bo->offset);
274 offset = align_u32(offset + bo->size + 4095, 4096);
275 }
276
277 if (bo->map == NULL && bo->size > 0)
278 bo->map = gem_mmap(fd, obj->handle, 0, bo->size);
279 fail_if(bo->map == MAP_FAILED, "bo mmap failed\n");
280
281 if (aub_use_execlists(&aub_file))
282 aub_map_ppgtt(&aub_file, bo->offset, bo->size);
283 }
284
285 batch_index = (execbuffer2->flags & I915_EXEC_BATCH_FIRST) ? 0 :
286 execbuffer2->buffer_count - 1;
287 batch_bo = get_bo(fd, exec_objects[batch_index].handle);
288 for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
289 obj = &exec_objects[i];
290 bo = get_bo(fd, obj->handle);
291
292 if (obj->relocation_count > 0)
293 data = relocate_bo(fd, bo, execbuffer2, obj);
294 else
295 data = bo->map;
296
297 bool write = !capture_only || (obj->flags & EXEC_OBJECT_CAPTURE);
298
299 if (write && bo->dirty) {
300 if (bo == batch_bo) {
301 aub_write_trace_block(&aub_file, AUB_TRACE_TYPE_BATCH,
302 GET_PTR(data), bo->size, bo->offset);
303 } else {
304 aub_write_trace_block(&aub_file, AUB_TRACE_TYPE_NOTYPE,
305 GET_PTR(data), bo->size, bo->offset);
306 }
307
308 if (!bo->user_mapped)
309 bo->dirty = false;
310 }
311
312 if (data != bo->map)
313 free(data);
314 }
315
316 uint32_t ctx_id = execbuffer2->rsvd1;
317
318 aub_write_exec(&aub_file, ctx_id,
319 batch_bo->offset + execbuffer2->batch_start_offset,
320 offset, engine_class_from_ring_flag(ring_flag));
321
322 if (device_override &&
323 (execbuffer2->flags & I915_EXEC_FENCE_ARRAY) != 0) {
324 struct drm_i915_gem_exec_fence *fences =
325 (void*)(uintptr_t)execbuffer2->cliprects_ptr;
326 for (uint32_t i = 0; i < execbuffer2->num_cliprects; i++) {
327 if ((fences[i].flags & I915_EXEC_FENCE_SIGNAL) != 0) {
328 struct drm_syncobj_array arg = {
329 .handles = (uintptr_t)&fences[i].handle,
330 .count_handles = 1,
331 .pad = 0,
332 };
333 libc_ioctl(fd, DRM_IOCTL_SYNCOBJ_SIGNAL, &arg);
334 }
335 }
336 }
337 }
338
339 static void
340 add_new_bo(unsigned fd, int handle, uint64_t size, void *map)
341 {
342 struct bo *bo = &bos[handle + fd * MAX_BO_COUNT];
343
344 fail_if(handle >= MAX_BO_COUNT, "bo handle out of range\n");
345 fail_if(fd >= MAX_FD_COUNT, "bo fd out of range\n");
346 fail_if(size == 0, "bo size is invalid\n");
347
348 bo->size = size;
349 bo->map = map;
350 bo->user_mapped = false;
351 }
352
353 static void
354 remove_bo(int fd, int handle)
355 {
356 struct bo *bo = get_bo(fd, handle);
357
358 if (bo->map && !IS_USERPTR(bo->map))
359 munmap(bo->map, bo->size);
360 bo->size = 0;
361 bo->map = NULL;
362 bo->user_mapped = false;
363 }
364
365 __attribute__ ((visibility ("default"))) int
366 close(int fd)
367 {
368 if (fd == drm_fd)
369 drm_fd = -1;
370
371 return libc_close(fd);
372 }
373
374 static int
375 get_pci_id(int fd, int *pci_id)
376 {
377 struct drm_i915_getparam gparam;
378
379 if (device_override) {
380 *pci_id = device;
381 return 0;
382 }
383
384 gparam.param = I915_PARAM_CHIPSET_ID;
385 gparam.value = pci_id;
386 return libc_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gparam);
387 }
388
389 static void
390 maybe_init(int fd)
391 {
392 static bool initialized = false;
393 FILE *config;
394 char *key, *value;
395
396 if (initialized)
397 return;
398
399 initialized = true;
400
401 config = fopen(getenv("INTEL_DUMP_GPU_CONFIG"), "r");
402 while (fscanf(config, "%m[^=]=%m[^\n]\n", &key, &value) != EOF) {
403 if (!strcmp(key, "verbose")) {
404 if (!strcmp(value, "1")) {
405 verbose = 1;
406 } else if (!strcmp(value, "2")) {
407 verbose = 2;
408 }
409 } else if (!strcmp(key, "device")) {
410 fail_if(device != 0, "Device/Platform override specified multiple times.");
411 fail_if(sscanf(value, "%i", &device) != 1,
412 "failed to parse device id '%s'",
413 value);
414 device_override = true;
415 } else if (!strcmp(key, "platform")) {
416 fail_if(device != 0, "Device/Platform override specified multiple times.");
417 device = gen_device_name_to_pci_device_id(value);
418 fail_if(device == -1, "Unknown platform '%s'", value);
419 device_override = true;
420 } else if (!strcmp(key, "file")) {
421 output_filename = strdup(value);
422 output_file = fopen(output_filename, "w+");
423 fail_if(output_file == NULL,
424 "failed to open file '%s'\n",
425 output_filename);
426 } else if (!strcmp(key, "capture_only")) {
427 capture_only = atoi(value);
428 } else {
429 fprintf(stderr, "unknown option '%s'\n", key);
430 }
431
432 free(key);
433 free(value);
434 }
435 fclose(config);
436
437 bos = calloc(MAX_FD_COUNT * MAX_BO_COUNT, sizeof(bos[0]));
438 fail_if(bos == NULL, "out of memory\n");
439
440 int ret = get_pci_id(fd, &device);
441 assert(ret == 0);
442
443 aub_file_init(&aub_file, output_file,
444 verbose == 2 ? stdout : NULL,
445 device, program_invocation_short_name);
446 aub_write_default_setup(&aub_file);
447
448 if (verbose)
449 printf("[running, output file %s, chipset id 0x%04x, gen %d]\n",
450 output_filename, device, devinfo.gen);
451 }
452
453 __attribute__ ((visibility ("default"))) int
454 ioctl(int fd, unsigned long request, ...)
455 {
456 va_list args;
457 void *argp;
458 int ret;
459 struct stat buf;
460
461 va_start(args, request);
462 argp = va_arg(args, void *);
463 va_end(args);
464
465 if (_IOC_TYPE(request) == DRM_IOCTL_BASE &&
466 drm_fd != fd && fstat(fd, &buf) == 0 &&
467 (buf.st_mode & S_IFMT) == S_IFCHR && major(buf.st_rdev) == DRM_MAJOR) {
468 drm_fd = fd;
469 if (verbose)
470 printf("[intercept drm ioctl on fd %d]\n", fd);
471 }
472
473 if (fd == drm_fd) {
474 maybe_init(fd);
475
476 switch (request) {
477 case DRM_IOCTL_SYNCOBJ_WAIT:
478 case DRM_IOCTL_I915_GEM_WAIT: {
479 if (device_override)
480 return 0;
481 return libc_ioctl(fd, request, argp);
482 }
483
484 case DRM_IOCTL_I915_GET_RESET_STATS: {
485 if (device_override) {
486 struct drm_i915_reset_stats *stats = argp;
487
488 stats->reset_count = 0;
489 stats->batch_active = 0;
490 stats->batch_pending = 0;
491 return 0;
492 }
493 return libc_ioctl(fd, request, argp);
494 }
495
496 case DRM_IOCTL_I915_GETPARAM: {
497 struct drm_i915_getparam *getparam = argp;
498
499 ensure_device_info(fd);
500
501 if (getparam->param == I915_PARAM_CHIPSET_ID)
502 return get_pci_id(fd, getparam->value);
503
504 if (device_override) {
505 switch (getparam->param) {
506 case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
507 *getparam->value = devinfo.timestamp_frequency;
508 return 0;
509
510 case I915_PARAM_HAS_WAIT_TIMEOUT:
511 case I915_PARAM_HAS_EXECBUF2:
512 case I915_PARAM_MMAP_VERSION:
513 case I915_PARAM_HAS_EXEC_ASYNC:
514 case I915_PARAM_HAS_EXEC_FENCE:
515 case I915_PARAM_HAS_EXEC_FENCE_ARRAY:
516 *getparam->value = 1;
517 return 0;
518
519 case I915_PARAM_HAS_EXEC_SOFTPIN:
520 *getparam->value = devinfo.gen >= 8 && !devinfo.is_cherryview;
521 return 0;
522
523 default:
524 return -1;
525 }
526 }
527
528 return libc_ioctl(fd, request, argp);
529 }
530
531 case DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM: {
532 struct drm_i915_gem_context_param *getparam = argp;
533
534 ensure_device_info(fd);
535
536 if (device_override) {
537 switch (getparam->param) {
538 case I915_CONTEXT_PARAM_GTT_SIZE:
539 if (devinfo.is_elkhartlake)
540 getparam->value = 1ull << 36;
541 else if (devinfo.gen >= 8 && !devinfo.is_cherryview)
542 getparam->value = 1ull << 48;
543 else
544 getparam->value = 1ull << 31;
545 return 0;
546
547 default:
548 return -1;
549 }
550 }
551
552 return libc_ioctl(fd, request, argp);
553 }
554
555 case DRM_IOCTL_I915_GEM_EXECBUFFER: {
556 static bool once;
557 if (!once) {
558 fprintf(stderr,
559 "application uses DRM_IOCTL_I915_GEM_EXECBUFFER, not handled\n");
560 once = true;
561 }
562 return libc_ioctl(fd, request, argp);
563 }
564
565 case DRM_IOCTL_I915_GEM_EXECBUFFER2:
566 case DRM_IOCTL_I915_GEM_EXECBUFFER2_WR: {
567 dump_execbuffer2(fd, argp);
568 if (device_override)
569 return 0;
570
571 return libc_ioctl(fd, request, argp);
572 }
573
574 case DRM_IOCTL_I915_GEM_CONTEXT_CREATE: {
575 uint32_t *ctx_id = NULL;
576 struct drm_i915_gem_context_create *create = argp;
577 ret = 0;
578 if (!device_override) {
579 ret = libc_ioctl(fd, request, argp);
580 ctx_id = &create->ctx_id;
581 }
582
583 if (ret == 0)
584 create->ctx_id = aub_write_context_create(&aub_file, ctx_id);
585
586 return ret;
587 }
588
589 case DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT: {
590 uint32_t *ctx_id = NULL;
591 struct drm_i915_gem_context_create_ext *create = argp;
592 ret = 0;
593 if (!device_override) {
594 ret = libc_ioctl(fd, request, argp);
595 ctx_id = &create->ctx_id;
596 }
597
598 if (ret == 0)
599 create->ctx_id = aub_write_context_create(&aub_file, ctx_id);
600
601 return ret;
602 }
603
604 case DRM_IOCTL_I915_GEM_CREATE: {
605 struct drm_i915_gem_create *create = argp;
606
607 ret = libc_ioctl(fd, request, argp);
608 if (ret == 0)
609 add_new_bo(fd, create->handle, create->size, NULL);
610
611 return ret;
612 }
613
614 case DRM_IOCTL_I915_GEM_USERPTR: {
615 struct drm_i915_gem_userptr *userptr = argp;
616
617 ret = libc_ioctl(fd, request, argp);
618 if (ret == 0)
619 add_new_bo(fd, userptr->handle, userptr->user_size,
620 (void *) (uintptr_t) (userptr->user_ptr | USERPTR_FLAG));
621
622 return ret;
623 }
624
625 case DRM_IOCTL_GEM_CLOSE: {
626 struct drm_gem_close *close = argp;
627
628 remove_bo(fd, close->handle);
629
630 return libc_ioctl(fd, request, argp);
631 }
632
633 case DRM_IOCTL_GEM_OPEN: {
634 struct drm_gem_open *open = argp;
635
636 ret = libc_ioctl(fd, request, argp);
637 if (ret == 0)
638 add_new_bo(fd, open->handle, open->size, NULL);
639
640 return ret;
641 }
642
643 case DRM_IOCTL_PRIME_FD_TO_HANDLE: {
644 struct drm_prime_handle *prime = argp;
645
646 ret = libc_ioctl(fd, request, argp);
647 if (ret == 0) {
648 off_t size;
649
650 size = lseek(prime->fd, 0, SEEK_END);
651 fail_if(size == -1, "failed to get prime bo size\n");
652 add_new_bo(fd, prime->handle, size, NULL);
653
654 }
655
656 return ret;
657 }
658
659 case DRM_IOCTL_I915_GEM_MMAP: {
660 ret = libc_ioctl(fd, request, argp);
661 if (ret == 0) {
662 struct drm_i915_gem_mmap *mmap = argp;
663 struct bo *bo = get_bo(fd, mmap->handle);
664 bo->user_mapped = true;
665 bo->dirty = true;
666 }
667 return ret;
668 }
669
670 default:
671 return libc_ioctl(fd, request, argp);
672 }
673 } else {
674 return libc_ioctl(fd, request, argp);
675 }
676 }
677
678 static void
679 init(void)
680 {
681 libc_close = dlsym(RTLD_NEXT, "close");
682 libc_ioctl = dlsym(RTLD_NEXT, "ioctl");
683 libc_munmap = dlsym(RTLD_NEXT, "munmap");
684 fail_if(libc_close == NULL || libc_ioctl == NULL,
685 "failed to get libc ioctl or close\n");
686 }
687
688 static int
689 close_init_helper(int fd)
690 {
691 init();
692 return libc_close(fd);
693 }
694
695 static int
696 ioctl_init_helper(int fd, unsigned long request, ...)
697 {
698 va_list args;
699 void *argp;
700
701 va_start(args, request);
702 argp = va_arg(args, void *);
703 va_end(args);
704
705 init();
706 return libc_ioctl(fd, request, argp);
707 }
708
709 static int
710 munmap_init_helper(void *addr, size_t length)
711 {
712 init();
713 for (uint32_t i = 0; i < MAX_FD_COUNT * MAX_BO_COUNT; i++) {
714 struct bo *bo = &bos[i];
715 if (bo->map == addr) {
716 bo->user_mapped = false;
717 break;
718 }
719 }
720 return libc_munmap(addr, length);
721 }
722
723 static void __attribute__ ((destructor))
724 fini(void)
725 {
726 if (devinfo.gen != 0) {
727 free(output_filename);
728 aub_file_finish(&aub_file);
729 free(bos);
730 }
731 }