intel/dump_gpu: only map in GTT buffers not previously mapped
[mesa.git] / src / intel / tools / intel_dump_gpu.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <stdlib.h>
25 #include <stdio.h>
26 #include <string.h>
27 #include <stdint.h>
28 #include <stdbool.h>
29 #include <signal.h>
30 #include <stdarg.h>
31 #include <fcntl.h>
32 #include <sys/types.h>
33 #include <sys/sysmacros.h>
34 #include <sys/stat.h>
35 #include <sys/ioctl.h>
36 #include <unistd.h>
37 #include <errno.h>
38 #include <sys/mman.h>
39 #include <dlfcn.h>
40 #include "drm-uapi/i915_drm.h"
41 #include <inttypes.h>
42
43 #include "intel_aub.h"
44 #include "aub_write.h"
45
46 #include "dev/gen_device_info.h"
47 #include "util/macros.h"
48
49 static int close_init_helper(int fd);
50 static int ioctl_init_helper(int fd, unsigned long request, ...);
51 static int munmap_init_helper(void *addr, size_t length);
52
53 static int (*libc_close)(int fd) = close_init_helper;
54 static int (*libc_ioctl)(int fd, unsigned long request, ...) = ioctl_init_helper;
55 static int (*libc_munmap)(void *addr, size_t length) = munmap_init_helper;
56
57 static int drm_fd = -1;
58 static char *output_filename = NULL;
59 static FILE *output_file = NULL;
60 static int verbose = 0;
61 static bool device_override = false;
62 static bool capture_only = false;
63
64 #define MAX_FD_COUNT 64
65 #define MAX_BO_COUNT 64 * 1024
66
67 struct bo {
68 uint32_t size;
69 uint64_t offset;
70 void *map;
71 /* Whether the buffer has been positionned in the GTT already. */
72 bool gtt_mapped : 1;
73 /* Tracks userspace mmapping of the buffer */
74 bool user_mapped : 1;
75 /* Using the i915-gem mmapping ioctl & execbuffer ioctl, track whether a
76 * buffer has been updated.
77 */
78 bool dirty : 1;
79 };
80
81 static struct bo *bos;
82
83 #define DRM_MAJOR 226
84
85 /* We set bit 0 in the map pointer for userptr BOs so we know not to
86 * munmap them on DRM_IOCTL_GEM_CLOSE.
87 */
88 #define USERPTR_FLAG 1
89 #define IS_USERPTR(p) ((uintptr_t) (p) & USERPTR_FLAG)
90 #define GET_PTR(p) ( (void *) ((uintptr_t) p & ~(uintptr_t) 1) )
91
92 static void __attribute__ ((format(__printf__, 2, 3)))
93 fail_if(int cond, const char *format, ...)
94 {
95 va_list args;
96
97 if (!cond)
98 return;
99
100 va_start(args, format);
101 fprintf(stderr, "intel_dump_gpu: ");
102 vfprintf(stderr, format, args);
103 va_end(args);
104
105 raise(SIGTRAP);
106 }
107
108 static struct bo *
109 get_bo(unsigned fd, uint32_t handle)
110 {
111 struct bo *bo;
112
113 fail_if(handle >= MAX_BO_COUNT, "bo handle too large\n");
114 fail_if(fd >= MAX_FD_COUNT, "bo fd too large\n");
115 bo = &bos[handle + fd * MAX_BO_COUNT];
116
117 return bo;
118 }
119
120 static inline uint32_t
121 align_u32(uint32_t v, uint32_t a)
122 {
123 return (v + a - 1) & ~(a - 1);
124 }
125
126 static struct gen_device_info devinfo = {0};
127 static int device = 0;
128 static struct aub_file aub_file;
129
130 static void
131 ensure_device_info(int fd)
132 {
133 /* We can't do this at open time as we're not yet authenticated. */
134 if (device == 0) {
135 fail_if(!gen_get_device_info_from_fd(fd, &devinfo),
136 "failed to identify chipset.\n");
137 device = devinfo.chipset_id;
138 } else if (devinfo.gen == 0) {
139 fail_if(!gen_get_device_info_from_pci_id(device, &devinfo),
140 "failed to identify chipset.\n");
141 }
142 }
143
144 static void *
145 relocate_bo(int fd, struct bo *bo, const struct drm_i915_gem_execbuffer2 *execbuffer2,
146 const struct drm_i915_gem_exec_object2 *obj)
147 {
148 const struct drm_i915_gem_exec_object2 *exec_objects =
149 (struct drm_i915_gem_exec_object2 *) (uintptr_t) execbuffer2->buffers_ptr;
150 const struct drm_i915_gem_relocation_entry *relocs =
151 (const struct drm_i915_gem_relocation_entry *) (uintptr_t) obj->relocs_ptr;
152 void *relocated;
153 int handle;
154
155 relocated = malloc(bo->size);
156 fail_if(relocated == NULL, "out of memory\n");
157 memcpy(relocated, GET_PTR(bo->map), bo->size);
158 for (size_t i = 0; i < obj->relocation_count; i++) {
159 fail_if(relocs[i].offset >= bo->size, "reloc outside bo\n");
160
161 if (execbuffer2->flags & I915_EXEC_HANDLE_LUT)
162 handle = exec_objects[relocs[i].target_handle].handle;
163 else
164 handle = relocs[i].target_handle;
165
166 aub_write_reloc(&devinfo, ((char *)relocated) + relocs[i].offset,
167 get_bo(fd, handle)->offset + relocs[i].delta);
168 }
169
170 return relocated;
171 }
172
173 static int
174 gem_ioctl(int fd, unsigned long request, void *argp)
175 {
176 int ret;
177
178 do {
179 ret = libc_ioctl(fd, request, argp);
180 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
181
182 return ret;
183 }
184
185 static void *
186 gem_mmap(int fd, uint32_t handle, uint64_t offset, uint64_t size)
187 {
188 struct drm_i915_gem_mmap mmap = {
189 .handle = handle,
190 .offset = offset,
191 .size = size
192 };
193
194 if (gem_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap) == -1)
195 return MAP_FAILED;
196
197 return (void *)(uintptr_t) mmap.addr_ptr;
198 }
199
200 static enum drm_i915_gem_engine_class
201 engine_class_from_ring_flag(uint32_t ring_flag)
202 {
203 switch (ring_flag) {
204 case I915_EXEC_DEFAULT:
205 case I915_EXEC_RENDER:
206 return I915_ENGINE_CLASS_RENDER;
207 case I915_EXEC_BSD:
208 return I915_ENGINE_CLASS_VIDEO;
209 case I915_EXEC_BLT:
210 return I915_ENGINE_CLASS_COPY;
211 case I915_EXEC_VEBOX:
212 return I915_ENGINE_CLASS_VIDEO_ENHANCE;
213 default:
214 return I915_ENGINE_CLASS_INVALID;
215 }
216 }
217
218 static void
219 dump_execbuffer2(int fd, struct drm_i915_gem_execbuffer2 *execbuffer2)
220 {
221 struct drm_i915_gem_exec_object2 *exec_objects =
222 (struct drm_i915_gem_exec_object2 *) (uintptr_t) execbuffer2->buffers_ptr;
223 uint32_t ring_flag = execbuffer2->flags & I915_EXEC_RING_MASK;
224 uint32_t offset;
225 struct drm_i915_gem_exec_object2 *obj;
226 struct bo *bo, *batch_bo;
227 int batch_index;
228 void *data;
229
230 ensure_device_info(fd);
231
232 if (!aub_file.file) {
233 aub_file_init(&aub_file, output_file,
234 verbose == 2 ? stdout : NULL,
235 device, program_invocation_short_name);
236 aub_write_default_setup(&aub_file);
237
238 if (verbose)
239 printf("[running, output file %s, chipset id 0x%04x, gen %d]\n",
240 output_filename, device, devinfo.gen);
241 }
242
243 if (aub_use_execlists(&aub_file))
244 offset = 0x1000;
245 else
246 offset = aub_gtt_size(&aub_file);
247
248 if (verbose)
249 printf("Dumping execbuffer2:\n");
250
251 for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
252 obj = &exec_objects[i];
253 bo = get_bo(fd, obj->handle);
254
255 /* If bo->size == 0, this means they passed us an invalid
256 * buffer. The kernel will reject it and so should we.
257 */
258 if (bo->size == 0) {
259 if (verbose)
260 printf("BO #%d is invalid!\n", obj->handle);
261 return;
262 }
263
264 if (obj->flags & EXEC_OBJECT_PINNED) {
265 bo->offset = obj->offset;
266 if (verbose)
267 printf("BO #%d (%dB) pinned @ 0x%" PRIx64 "\n",
268 obj->handle, bo->size, bo->offset);
269 } else {
270 if (obj->alignment != 0)
271 offset = align_u32(offset, obj->alignment);
272 bo->offset = offset;
273 if (verbose)
274 printf("BO #%d (%dB) @ 0x%" PRIx64 "\n", obj->handle,
275 bo->size, bo->offset);
276 offset = align_u32(offset + bo->size + 4095, 4096);
277 }
278
279 if (bo->map == NULL && bo->size > 0)
280 bo->map = gem_mmap(fd, obj->handle, 0, bo->size);
281 fail_if(bo->map == MAP_FAILED, "bo mmap failed\n");
282
283 if (aub_use_execlists(&aub_file) && !bo->gtt_mapped) {
284 aub_map_ppgtt(&aub_file, bo->offset, bo->size);
285 bo->gtt_mapped = true;
286 }
287 }
288
289 batch_index = (execbuffer2->flags & I915_EXEC_BATCH_FIRST) ? 0 :
290 execbuffer2->buffer_count - 1;
291 batch_bo = get_bo(fd, exec_objects[batch_index].handle);
292 for (uint32_t i = 0; i < execbuffer2->buffer_count; i++) {
293 obj = &exec_objects[i];
294 bo = get_bo(fd, obj->handle);
295
296 if (obj->relocation_count > 0)
297 data = relocate_bo(fd, bo, execbuffer2, obj);
298 else
299 data = bo->map;
300
301 bool write = !capture_only || (obj->flags & EXEC_OBJECT_CAPTURE);
302
303 if (write && bo->dirty) {
304 if (bo == batch_bo) {
305 aub_write_trace_block(&aub_file, AUB_TRACE_TYPE_BATCH,
306 GET_PTR(data), bo->size, bo->offset);
307 } else {
308 aub_write_trace_block(&aub_file, AUB_TRACE_TYPE_NOTYPE,
309 GET_PTR(data), bo->size, bo->offset);
310 }
311
312 if (!bo->user_mapped)
313 bo->dirty = false;
314 }
315
316 if (data != bo->map)
317 free(data);
318 }
319
320 uint32_t ctx_id = execbuffer2->rsvd1;
321
322 aub_write_exec(&aub_file, ctx_id,
323 batch_bo->offset + execbuffer2->batch_start_offset,
324 offset, engine_class_from_ring_flag(ring_flag));
325
326 if (device_override &&
327 (execbuffer2->flags & I915_EXEC_FENCE_ARRAY) != 0) {
328 struct drm_i915_gem_exec_fence *fences =
329 (void*)(uintptr_t)execbuffer2->cliprects_ptr;
330 for (uint32_t i = 0; i < execbuffer2->num_cliprects; i++) {
331 if ((fences[i].flags & I915_EXEC_FENCE_SIGNAL) != 0) {
332 struct drm_syncobj_array arg = {
333 .handles = (uintptr_t)&fences[i].handle,
334 .count_handles = 1,
335 .pad = 0,
336 };
337 libc_ioctl(fd, DRM_IOCTL_SYNCOBJ_SIGNAL, &arg);
338 }
339 }
340 }
341 }
342
343 static void
344 add_new_bo(unsigned fd, int handle, uint64_t size, void *map)
345 {
346 struct bo *bo = &bos[handle + fd * MAX_BO_COUNT];
347
348 fail_if(handle >= MAX_BO_COUNT, "bo handle out of range\n");
349 fail_if(fd >= MAX_FD_COUNT, "bo fd out of range\n");
350 fail_if(size == 0, "bo size is invalid\n");
351
352 bo->size = size;
353 bo->map = map;
354 bo->user_mapped = false;
355 bo->gtt_mapped = false;
356 }
357
358 static void
359 remove_bo(int fd, int handle)
360 {
361 struct bo *bo = get_bo(fd, handle);
362
363 if (bo->map && !IS_USERPTR(bo->map))
364 munmap(bo->map, bo->size);
365 memset(bo, 0, sizeof(*bo));
366 }
367
368 __attribute__ ((visibility ("default"))) int
369 close(int fd)
370 {
371 if (fd == drm_fd)
372 drm_fd = -1;
373
374 return libc_close(fd);
375 }
376
377 static int
378 get_pci_id(int fd, int *pci_id)
379 {
380 struct drm_i915_getparam gparam;
381
382 if (device_override) {
383 *pci_id = device;
384 return 0;
385 }
386
387 gparam.param = I915_PARAM_CHIPSET_ID;
388 gparam.value = pci_id;
389 return libc_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gparam);
390 }
391
392 static void
393 maybe_init(int fd)
394 {
395 static bool initialized = false;
396 FILE *config;
397 char *key, *value;
398
399 if (initialized)
400 return;
401
402 initialized = true;
403
404 config = fopen(getenv("INTEL_DUMP_GPU_CONFIG"), "r");
405 while (fscanf(config, "%m[^=]=%m[^\n]\n", &key, &value) != EOF) {
406 if (!strcmp(key, "verbose")) {
407 if (!strcmp(value, "1")) {
408 verbose = 1;
409 } else if (!strcmp(value, "2")) {
410 verbose = 2;
411 }
412 } else if (!strcmp(key, "device")) {
413 fail_if(device != 0, "Device/Platform override specified multiple times.");
414 fail_if(sscanf(value, "%i", &device) != 1,
415 "failed to parse device id '%s'",
416 value);
417 device_override = true;
418 } else if (!strcmp(key, "platform")) {
419 fail_if(device != 0, "Device/Platform override specified multiple times.");
420 device = gen_device_name_to_pci_device_id(value);
421 fail_if(device == -1, "Unknown platform '%s'", value);
422 device_override = true;
423 } else if (!strcmp(key, "file")) {
424 output_filename = strdup(value);
425 output_file = fopen(output_filename, "w+");
426 fail_if(output_file == NULL,
427 "failed to open file '%s'\n",
428 output_filename);
429 } else if (!strcmp(key, "capture_only")) {
430 capture_only = atoi(value);
431 } else {
432 fprintf(stderr, "unknown option '%s'\n", key);
433 }
434
435 free(key);
436 free(value);
437 }
438 fclose(config);
439
440 bos = calloc(MAX_FD_COUNT * MAX_BO_COUNT, sizeof(bos[0]));
441 fail_if(bos == NULL, "out of memory\n");
442
443 int ret = get_pci_id(fd, &device);
444 assert(ret == 0);
445
446 aub_file_init(&aub_file, output_file,
447 verbose == 2 ? stdout : NULL,
448 device, program_invocation_short_name);
449 aub_write_default_setup(&aub_file);
450
451 if (verbose)
452 printf("[running, output file %s, chipset id 0x%04x, gen %d]\n",
453 output_filename, device, devinfo.gen);
454 }
455
456 __attribute__ ((visibility ("default"))) int
457 ioctl(int fd, unsigned long request, ...)
458 {
459 va_list args;
460 void *argp;
461 int ret;
462 struct stat buf;
463
464 va_start(args, request);
465 argp = va_arg(args, void *);
466 va_end(args);
467
468 if (_IOC_TYPE(request) == DRM_IOCTL_BASE &&
469 drm_fd != fd && fstat(fd, &buf) == 0 &&
470 (buf.st_mode & S_IFMT) == S_IFCHR && major(buf.st_rdev) == DRM_MAJOR) {
471 drm_fd = fd;
472 if (verbose)
473 printf("[intercept drm ioctl on fd %d]\n", fd);
474 }
475
476 if (fd == drm_fd) {
477 maybe_init(fd);
478
479 switch (request) {
480 case DRM_IOCTL_SYNCOBJ_WAIT:
481 case DRM_IOCTL_I915_GEM_WAIT: {
482 if (device_override)
483 return 0;
484 return libc_ioctl(fd, request, argp);
485 }
486
487 case DRM_IOCTL_I915_GET_RESET_STATS: {
488 if (device_override) {
489 struct drm_i915_reset_stats *stats = argp;
490
491 stats->reset_count = 0;
492 stats->batch_active = 0;
493 stats->batch_pending = 0;
494 return 0;
495 }
496 return libc_ioctl(fd, request, argp);
497 }
498
499 case DRM_IOCTL_I915_GETPARAM: {
500 struct drm_i915_getparam *getparam = argp;
501
502 ensure_device_info(fd);
503
504 if (getparam->param == I915_PARAM_CHIPSET_ID)
505 return get_pci_id(fd, getparam->value);
506
507 if (device_override) {
508 switch (getparam->param) {
509 case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
510 *getparam->value = devinfo.timestamp_frequency;
511 return 0;
512
513 case I915_PARAM_HAS_WAIT_TIMEOUT:
514 case I915_PARAM_HAS_EXECBUF2:
515 case I915_PARAM_MMAP_VERSION:
516 case I915_PARAM_HAS_EXEC_ASYNC:
517 case I915_PARAM_HAS_EXEC_FENCE:
518 case I915_PARAM_HAS_EXEC_FENCE_ARRAY:
519 *getparam->value = 1;
520 return 0;
521
522 case I915_PARAM_HAS_EXEC_SOFTPIN:
523 *getparam->value = devinfo.gen >= 8 && !devinfo.is_cherryview;
524 return 0;
525
526 default:
527 return -1;
528 }
529 }
530
531 return libc_ioctl(fd, request, argp);
532 }
533
534 case DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM: {
535 struct drm_i915_gem_context_param *getparam = argp;
536
537 ensure_device_info(fd);
538
539 if (device_override) {
540 switch (getparam->param) {
541 case I915_CONTEXT_PARAM_GTT_SIZE:
542 if (devinfo.is_elkhartlake)
543 getparam->value = 1ull << 36;
544 else if (devinfo.gen >= 8 && !devinfo.is_cherryview)
545 getparam->value = 1ull << 48;
546 else
547 getparam->value = 1ull << 31;
548 return 0;
549
550 default:
551 return -1;
552 }
553 }
554
555 return libc_ioctl(fd, request, argp);
556 }
557
558 case DRM_IOCTL_I915_GEM_EXECBUFFER: {
559 static bool once;
560 if (!once) {
561 fprintf(stderr,
562 "application uses DRM_IOCTL_I915_GEM_EXECBUFFER, not handled\n");
563 once = true;
564 }
565 return libc_ioctl(fd, request, argp);
566 }
567
568 case DRM_IOCTL_I915_GEM_EXECBUFFER2:
569 case DRM_IOCTL_I915_GEM_EXECBUFFER2_WR: {
570 dump_execbuffer2(fd, argp);
571 if (device_override)
572 return 0;
573
574 return libc_ioctl(fd, request, argp);
575 }
576
577 case DRM_IOCTL_I915_GEM_CONTEXT_CREATE: {
578 uint32_t *ctx_id = NULL;
579 struct drm_i915_gem_context_create *create = argp;
580 ret = 0;
581 if (!device_override) {
582 ret = libc_ioctl(fd, request, argp);
583 ctx_id = &create->ctx_id;
584 }
585
586 if (ret == 0)
587 create->ctx_id = aub_write_context_create(&aub_file, ctx_id);
588
589 return ret;
590 }
591
592 case DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT: {
593 uint32_t *ctx_id = NULL;
594 struct drm_i915_gem_context_create_ext *create = argp;
595 ret = 0;
596 if (!device_override) {
597 ret = libc_ioctl(fd, request, argp);
598 ctx_id = &create->ctx_id;
599 }
600
601 if (ret == 0)
602 create->ctx_id = aub_write_context_create(&aub_file, ctx_id);
603
604 return ret;
605 }
606
607 case DRM_IOCTL_I915_GEM_CREATE: {
608 struct drm_i915_gem_create *create = argp;
609
610 ret = libc_ioctl(fd, request, argp);
611 if (ret == 0)
612 add_new_bo(fd, create->handle, create->size, NULL);
613
614 return ret;
615 }
616
617 case DRM_IOCTL_I915_GEM_USERPTR: {
618 struct drm_i915_gem_userptr *userptr = argp;
619
620 ret = libc_ioctl(fd, request, argp);
621 if (ret == 0)
622 add_new_bo(fd, userptr->handle, userptr->user_size,
623 (void *) (uintptr_t) (userptr->user_ptr | USERPTR_FLAG));
624
625 return ret;
626 }
627
628 case DRM_IOCTL_GEM_CLOSE: {
629 struct drm_gem_close *close = argp;
630
631 remove_bo(fd, close->handle);
632
633 return libc_ioctl(fd, request, argp);
634 }
635
636 case DRM_IOCTL_GEM_OPEN: {
637 struct drm_gem_open *open = argp;
638
639 ret = libc_ioctl(fd, request, argp);
640 if (ret == 0)
641 add_new_bo(fd, open->handle, open->size, NULL);
642
643 return ret;
644 }
645
646 case DRM_IOCTL_PRIME_FD_TO_HANDLE: {
647 struct drm_prime_handle *prime = argp;
648
649 ret = libc_ioctl(fd, request, argp);
650 if (ret == 0) {
651 off_t size;
652
653 size = lseek(prime->fd, 0, SEEK_END);
654 fail_if(size == -1, "failed to get prime bo size\n");
655 add_new_bo(fd, prime->handle, size, NULL);
656
657 }
658
659 return ret;
660 }
661
662 case DRM_IOCTL_I915_GEM_MMAP: {
663 ret = libc_ioctl(fd, request, argp);
664 if (ret == 0) {
665 struct drm_i915_gem_mmap *mmap = argp;
666 struct bo *bo = get_bo(fd, mmap->handle);
667 bo->user_mapped = true;
668 bo->dirty = true;
669 }
670 return ret;
671 }
672
673 default:
674 return libc_ioctl(fd, request, argp);
675 }
676 } else {
677 return libc_ioctl(fd, request, argp);
678 }
679 }
680
681 static void
682 init(void)
683 {
684 libc_close = dlsym(RTLD_NEXT, "close");
685 libc_ioctl = dlsym(RTLD_NEXT, "ioctl");
686 libc_munmap = dlsym(RTLD_NEXT, "munmap");
687 fail_if(libc_close == NULL || libc_ioctl == NULL,
688 "failed to get libc ioctl or close\n");
689 }
690
691 static int
692 close_init_helper(int fd)
693 {
694 init();
695 return libc_close(fd);
696 }
697
698 static int
699 ioctl_init_helper(int fd, unsigned long request, ...)
700 {
701 va_list args;
702 void *argp;
703
704 va_start(args, request);
705 argp = va_arg(args, void *);
706 va_end(args);
707
708 init();
709 return libc_ioctl(fd, request, argp);
710 }
711
712 static int
713 munmap_init_helper(void *addr, size_t length)
714 {
715 init();
716 for (uint32_t i = 0; i < MAX_FD_COUNT * MAX_BO_COUNT; i++) {
717 struct bo *bo = &bos[i];
718 if (bo->map == addr) {
719 bo->user_mapped = false;
720 break;
721 }
722 }
723 return libc_munmap(addr, length);
724 }
725
726 static void __attribute__ ((destructor))
727 fini(void)
728 {
729 if (devinfo.gen != 0) {
730 free(output_filename);
731 aub_file_finish(&aub_file);
732 free(bos);
733 }
734 }