broadcom/vc4: Mark BOs as purgeable when they enter the BO cache
[mesa.git] / src / gallium / drivers / vc4 / vc4_bufmgr.c
1 /*
2 * Copyright © 2014-2015 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <errno.h>
25 #include <err.h>
26 #include <sys/mman.h>
27 #include <fcntl.h>
28 #include <xf86drm.h>
29 #include <xf86drmMode.h>
30
31 #include "util/u_hash_table.h"
32 #include "util/u_memory.h"
33 #include "util/ralloc.h"
34
35 #include "vc4_context.h"
36 #include "vc4_screen.h"
37
38 #ifdef HAVE_VALGRIND
39 #include <valgrind.h>
40 #include <memcheck.h>
41 #define VG(x) x
42 #else
43 #define VG(x)
44 #endif
45
46 static bool dump_stats = false;
47
48 static void
49 vc4_bo_cache_free_all(struct vc4_bo_cache *cache);
50
51 void
52 vc4_bo_label(struct vc4_screen *screen, struct vc4_bo *bo, const char *fmt, ...)
53 {
54 /* Perform BO labeling by default on debug builds (so that you get
55 * whole-system allocation information), or if VC4_DEBUG=surf is set
56 * (for debugging a single app's allocation).
57 */
58 #ifndef DEBUG
59 if (!(vc4_debug & VC4_DEBUG_SURFACE))
60 return;
61 #endif
62 va_list va;
63 va_start(va, fmt);
64 char *name = ralloc_vasprintf(NULL, fmt, va);
65 va_end(va);
66
67 struct drm_vc4_label_bo label = {
68 .handle = bo->handle,
69 .len = strlen(name),
70 .name = (uintptr_t)name,
71 };
72 vc4_ioctl(screen->fd, DRM_IOCTL_VC4_LABEL_BO, &label);
73
74 ralloc_free(name);
75 }
76
77 static void
78 vc4_bo_dump_stats(struct vc4_screen *screen)
79 {
80 struct vc4_bo_cache *cache = &screen->bo_cache;
81
82 fprintf(stderr, " BOs allocated: %d\n", screen->bo_count);
83 fprintf(stderr, " BOs size: %dkb\n", screen->bo_size / 1024);
84 fprintf(stderr, " BOs cached: %d\n", cache->bo_count);
85 fprintf(stderr, " BOs cached size: %dkb\n", cache->bo_size / 1024);
86
87 if (!list_empty(&cache->time_list)) {
88 struct vc4_bo *first = LIST_ENTRY(struct vc4_bo,
89 cache->time_list.next,
90 time_list);
91 struct vc4_bo *last = LIST_ENTRY(struct vc4_bo,
92 cache->time_list.prev,
93 time_list);
94
95 fprintf(stderr, " oldest cache time: %ld\n",
96 (long)first->free_time);
97 fprintf(stderr, " newest cache time: %ld\n",
98 (long)last->free_time);
99
100 struct timespec time;
101 clock_gettime(CLOCK_MONOTONIC, &time);
102 fprintf(stderr, " now: %ld\n",
103 time.tv_sec);
104 }
105 }
106
107 static void
108 vc4_bo_remove_from_cache(struct vc4_bo_cache *cache, struct vc4_bo *bo)
109 {
110 list_del(&bo->time_list);
111 list_del(&bo->size_list);
112 cache->bo_count--;
113 cache->bo_size -= bo->size;
114 }
115
116 static void vc4_bo_purgeable(struct vc4_bo *bo)
117 {
118 struct drm_vc4_gem_madvise arg = {
119 .handle = bo->handle,
120 .madv = VC4_MADV_DONTNEED,
121 };
122
123 if (bo->screen->has_madvise)
124 vc4_ioctl(bo->screen->fd, DRM_IOCTL_VC4_GEM_MADVISE, &arg);
125 }
126
127 static bool vc4_bo_unpurgeable(struct vc4_bo *bo)
128 {
129 struct drm_vc4_gem_madvise arg = {
130 .handle = bo->handle,
131 .madv = VC4_MADV_WILLNEED,
132 };
133
134 if (!bo->screen->has_madvise)
135 return true;
136
137 if (vc4_ioctl(bo->screen->fd, DRM_IOCTL_VC4_GEM_MADVISE, &arg))
138 return false;
139
140 return arg.retained;
141 }
142
143 static void
144 vc4_bo_free(struct vc4_bo *bo)
145 {
146 struct vc4_screen *screen = bo->screen;
147
148 if (bo->map) {
149 if (using_vc4_simulator && bo->name &&
150 strcmp(bo->name, "winsys") == 0) {
151 free(bo->map);
152 } else {
153 munmap(bo->map, bo->size);
154 VG(VALGRIND_FREELIKE_BLOCK(bo->map, 0));
155 }
156 }
157
158 struct drm_gem_close c;
159 memset(&c, 0, sizeof(c));
160 c.handle = bo->handle;
161 int ret = vc4_ioctl(screen->fd, DRM_IOCTL_GEM_CLOSE, &c);
162 if (ret != 0)
163 fprintf(stderr, "close object %d: %s\n", bo->handle, strerror(errno));
164
165 screen->bo_count--;
166 screen->bo_size -= bo->size;
167
168 if (dump_stats) {
169 fprintf(stderr, "Freed %s%s%dkb:\n",
170 bo->name ? bo->name : "",
171 bo->name ? " " : "",
172 bo->size / 1024);
173 vc4_bo_dump_stats(screen);
174 }
175
176 free(bo);
177 }
178
179 static struct vc4_bo *
180 vc4_bo_from_cache(struct vc4_screen *screen, uint32_t size, const char *name)
181 {
182 struct vc4_bo_cache *cache = &screen->bo_cache;
183 uint32_t page_index = size / 4096 - 1;
184 struct vc4_bo *iter, *tmp, *bo = NULL;
185
186 if (cache->size_list_size <= page_index)
187 return NULL;
188
189 mtx_lock(&cache->lock);
190 LIST_FOR_EACH_ENTRY_SAFE(iter, tmp, &cache->size_list[page_index],
191 size_list) {
192 /* Check that the BO has gone idle. If not, then none of the
193 * other BOs (pushed to the list after later rendering) are
194 * likely to be idle, either.
195 */
196 if (!vc4_bo_wait(iter, 0, NULL))
197 break;
198
199 if (!vc4_bo_unpurgeable(iter)) {
200 /* The BO has been purged. Free it and try to find
201 * another one in the cache.
202 */
203 vc4_bo_remove_from_cache(cache, iter);
204 vc4_bo_free(iter);
205 continue;
206 }
207
208 bo = iter;
209 pipe_reference_init(&bo->reference, 1);
210 vc4_bo_remove_from_cache(cache, bo);
211
212 vc4_bo_label(screen, bo, "%s", name);
213 bo->name = name;
214 break;
215 }
216 mtx_unlock(&cache->lock);
217 return bo;
218 }
219
220 struct vc4_bo *
221 vc4_bo_alloc(struct vc4_screen *screen, uint32_t size, const char *name)
222 {
223 bool cleared_and_retried = false;
224 struct drm_vc4_create_bo create;
225 struct vc4_bo *bo;
226 int ret;
227
228 size = align(size, 4096);
229
230 bo = vc4_bo_from_cache(screen, size, name);
231 if (bo) {
232 if (dump_stats) {
233 fprintf(stderr, "Allocated %s %dkb from cache:\n",
234 name, size / 1024);
235 vc4_bo_dump_stats(screen);
236 }
237 return bo;
238 }
239
240 bo = CALLOC_STRUCT(vc4_bo);
241 if (!bo)
242 return NULL;
243
244 pipe_reference_init(&bo->reference, 1);
245 bo->screen = screen;
246 bo->size = size;
247 bo->name = name;
248 bo->private = true;
249
250 retry:
251 memset(&create, 0, sizeof(create));
252 create.size = size;
253
254 ret = vc4_ioctl(screen->fd, DRM_IOCTL_VC4_CREATE_BO, &create);
255 bo->handle = create.handle;
256
257 if (ret != 0) {
258 if (!list_empty(&screen->bo_cache.time_list) &&
259 !cleared_and_retried) {
260 cleared_and_retried = true;
261 vc4_bo_cache_free_all(&screen->bo_cache);
262 goto retry;
263 }
264
265 free(bo);
266 return NULL;
267 }
268
269 screen->bo_count++;
270 screen->bo_size += bo->size;
271 if (dump_stats) {
272 fprintf(stderr, "Allocated %s %dkb:\n", name, size / 1024);
273 vc4_bo_dump_stats(screen);
274 }
275
276 vc4_bo_label(screen, bo, "%s", name);
277
278 return bo;
279 }
280
281 void
282 vc4_bo_last_unreference(struct vc4_bo *bo)
283 {
284 struct vc4_screen *screen = bo->screen;
285
286 struct timespec time;
287 clock_gettime(CLOCK_MONOTONIC, &time);
288 mtx_lock(&screen->bo_cache.lock);
289 vc4_bo_last_unreference_locked_timed(bo, time.tv_sec);
290 mtx_unlock(&screen->bo_cache.lock);
291 }
292
293 static void
294 free_stale_bos(struct vc4_screen *screen, time_t time)
295 {
296 struct vc4_bo_cache *cache = &screen->bo_cache;
297 bool freed_any = false;
298
299 list_for_each_entry_safe(struct vc4_bo, bo, &cache->time_list,
300 time_list) {
301 if (dump_stats && !freed_any) {
302 fprintf(stderr, "Freeing stale BOs:\n");
303 vc4_bo_dump_stats(screen);
304 freed_any = true;
305 }
306
307 /* If it's more than a second old, free it. */
308 if (time - bo->free_time > 2) {
309 vc4_bo_remove_from_cache(cache, bo);
310 vc4_bo_free(bo);
311 } else {
312 break;
313 }
314 }
315
316 if (dump_stats && freed_any) {
317 fprintf(stderr, "Freed stale BOs:\n");
318 vc4_bo_dump_stats(screen);
319 }
320 }
321
322 static void
323 vc4_bo_cache_free_all(struct vc4_bo_cache *cache)
324 {
325 mtx_lock(&cache->lock);
326 list_for_each_entry_safe(struct vc4_bo, bo, &cache->time_list,
327 time_list) {
328 vc4_bo_remove_from_cache(cache, bo);
329 vc4_bo_free(bo);
330 }
331 mtx_unlock(&cache->lock);
332 }
333
334 void
335 vc4_bo_last_unreference_locked_timed(struct vc4_bo *bo, time_t time)
336 {
337 struct vc4_screen *screen = bo->screen;
338 struct vc4_bo_cache *cache = &screen->bo_cache;
339 uint32_t page_index = bo->size / 4096 - 1;
340
341 if (!bo->private) {
342 vc4_bo_free(bo);
343 return;
344 }
345
346 if (cache->size_list_size <= page_index) {
347 struct list_head *new_list =
348 ralloc_array(screen, struct list_head, page_index + 1);
349
350 /* Move old list contents over (since the array has moved, and
351 * therefore the pointers to the list heads have to change).
352 */
353 for (int i = 0; i < cache->size_list_size; i++)
354 list_replace(&cache->size_list[i], &new_list[i]);
355 for (int i = cache->size_list_size; i < page_index + 1; i++)
356 list_inithead(&new_list[i]);
357
358 cache->size_list = new_list;
359 cache->size_list_size = page_index + 1;
360 }
361
362 vc4_bo_purgeable(bo);
363 bo->free_time = time;
364 list_addtail(&bo->size_list, &cache->size_list[page_index]);
365 list_addtail(&bo->time_list, &cache->time_list);
366 cache->bo_count++;
367 cache->bo_size += bo->size;
368 if (dump_stats) {
369 fprintf(stderr, "Freed %s %dkb to cache:\n",
370 bo->name, bo->size / 1024);
371 vc4_bo_dump_stats(screen);
372 }
373 bo->name = NULL;
374 vc4_bo_label(screen, bo, "mesa cache");
375
376 free_stale_bos(screen, time);
377 }
378
379 static struct vc4_bo *
380 vc4_bo_open_handle(struct vc4_screen *screen,
381 uint32_t winsys_stride,
382 uint32_t handle, uint32_t size)
383 {
384 struct vc4_bo *bo;
385
386 assert(size);
387
388 mtx_lock(&screen->bo_handles_mutex);
389
390 bo = util_hash_table_get(screen->bo_handles, (void*)(uintptr_t)handle);
391 if (bo) {
392 pipe_reference(NULL, &bo->reference);
393 goto done;
394 }
395
396 bo = CALLOC_STRUCT(vc4_bo);
397 pipe_reference_init(&bo->reference, 1);
398 bo->screen = screen;
399 bo->handle = handle;
400 bo->size = size;
401 bo->name = "winsys";
402 bo->private = false;
403
404 #ifdef USE_VC4_SIMULATOR
405 vc4_simulator_open_from_handle(screen->fd, winsys_stride,
406 bo->handle, bo->size);
407 bo->map = malloc(bo->size);
408 #endif
409
410 util_hash_table_set(screen->bo_handles, (void *)(uintptr_t)handle, bo);
411
412 done:
413 mtx_unlock(&screen->bo_handles_mutex);
414 return bo;
415 }
416
417 struct vc4_bo *
418 vc4_bo_open_name(struct vc4_screen *screen, uint32_t name,
419 uint32_t winsys_stride)
420 {
421 struct drm_gem_open o = {
422 .name = name
423 };
424 int ret = vc4_ioctl(screen->fd, DRM_IOCTL_GEM_OPEN, &o);
425 if (ret) {
426 fprintf(stderr, "Failed to open bo %d: %s\n",
427 name, strerror(errno));
428 return NULL;
429 }
430
431 return vc4_bo_open_handle(screen, winsys_stride, o.handle, o.size);
432 }
433
434 struct vc4_bo *
435 vc4_bo_open_dmabuf(struct vc4_screen *screen, int fd, uint32_t winsys_stride)
436 {
437 uint32_t handle;
438 int ret = drmPrimeFDToHandle(screen->fd, fd, &handle);
439 int size;
440 if (ret) {
441 fprintf(stderr, "Failed to get vc4 handle for dmabuf %d\n", fd);
442 return NULL;
443 }
444
445 /* Determine the size of the bo we were handed. */
446 size = lseek(fd, 0, SEEK_END);
447 if (size == -1) {
448 fprintf(stderr, "Couldn't get size of dmabuf fd %d.\n", fd);
449 return NULL;
450 }
451
452 return vc4_bo_open_handle(screen, winsys_stride, handle, size);
453 }
454
455 int
456 vc4_bo_get_dmabuf(struct vc4_bo *bo)
457 {
458 int fd;
459 int ret = drmPrimeHandleToFD(bo->screen->fd, bo->handle,
460 O_CLOEXEC, &fd);
461 if (ret != 0) {
462 fprintf(stderr, "Failed to export gem bo %d to dmabuf\n",
463 bo->handle);
464 return -1;
465 }
466
467 mtx_lock(&bo->screen->bo_handles_mutex);
468 bo->private = false;
469 util_hash_table_set(bo->screen->bo_handles, (void *)(uintptr_t)bo->handle, bo);
470 mtx_unlock(&bo->screen->bo_handles_mutex);
471
472 return fd;
473 }
474
475 struct vc4_bo *
476 vc4_bo_alloc_shader(struct vc4_screen *screen, const void *data, uint32_t size)
477 {
478 struct vc4_bo *bo;
479 int ret;
480
481 bo = CALLOC_STRUCT(vc4_bo);
482 if (!bo)
483 return NULL;
484
485 pipe_reference_init(&bo->reference, 1);
486 bo->screen = screen;
487 bo->size = align(size, 4096);
488 bo->name = "code";
489 bo->private = false; /* Make sure it doesn't go back to the cache. */
490
491 struct drm_vc4_create_shader_bo create = {
492 .size = size,
493 .data = (uintptr_t)data,
494 };
495
496 ret = vc4_ioctl(screen->fd, DRM_IOCTL_VC4_CREATE_SHADER_BO,
497 &create);
498 bo->handle = create.handle;
499
500 if (ret != 0) {
501 fprintf(stderr, "create shader ioctl failure\n");
502 abort();
503 }
504
505 screen->bo_count++;
506 screen->bo_size += bo->size;
507 if (dump_stats) {
508 fprintf(stderr, "Allocated shader %dkb:\n", bo->size / 1024);
509 vc4_bo_dump_stats(screen);
510 }
511
512 return bo;
513 }
514
515 bool
516 vc4_bo_flink(struct vc4_bo *bo, uint32_t *name)
517 {
518 struct drm_gem_flink flink = {
519 .handle = bo->handle,
520 };
521 int ret = vc4_ioctl(bo->screen->fd, DRM_IOCTL_GEM_FLINK, &flink);
522 if (ret) {
523 fprintf(stderr, "Failed to flink bo %d: %s\n",
524 bo->handle, strerror(errno));
525 free(bo);
526 return false;
527 }
528
529 bo->private = false;
530 *name = flink.name;
531
532 return true;
533 }
534
535 static int vc4_wait_seqno_ioctl(int fd, uint64_t seqno, uint64_t timeout_ns)
536 {
537 struct drm_vc4_wait_seqno wait = {
538 .seqno = seqno,
539 .timeout_ns = timeout_ns,
540 };
541 int ret = vc4_ioctl(fd, DRM_IOCTL_VC4_WAIT_SEQNO, &wait);
542 if (ret == -1)
543 return -errno;
544 else
545 return 0;
546
547 }
548
549 bool
550 vc4_wait_seqno(struct vc4_screen *screen, uint64_t seqno, uint64_t timeout_ns,
551 const char *reason)
552 {
553 if (screen->finished_seqno >= seqno)
554 return true;
555
556 if (unlikely(vc4_debug & VC4_DEBUG_PERF) && timeout_ns && reason) {
557 if (vc4_wait_seqno_ioctl(screen->fd, seqno, 0) == -ETIME) {
558 fprintf(stderr, "Blocking on seqno %lld for %s\n",
559 (long long)seqno, reason);
560 }
561 }
562
563 int ret = vc4_wait_seqno_ioctl(screen->fd, seqno, timeout_ns);
564 if (ret) {
565 if (ret != -ETIME) {
566 fprintf(stderr, "wait failed: %d\n", ret);
567 abort();
568 }
569
570 return false;
571 }
572
573 screen->finished_seqno = seqno;
574 return true;
575 }
576
577 static int vc4_wait_bo_ioctl(int fd, uint32_t handle, uint64_t timeout_ns)
578 {
579 struct drm_vc4_wait_bo wait = {
580 .handle = handle,
581 .timeout_ns = timeout_ns,
582 };
583 int ret = vc4_ioctl(fd, DRM_IOCTL_VC4_WAIT_BO, &wait);
584 if (ret == -1)
585 return -errno;
586 else
587 return 0;
588
589 }
590
591 bool
592 vc4_bo_wait(struct vc4_bo *bo, uint64_t timeout_ns, const char *reason)
593 {
594 struct vc4_screen *screen = bo->screen;
595
596 if (unlikely(vc4_debug & VC4_DEBUG_PERF) && timeout_ns && reason) {
597 if (vc4_wait_bo_ioctl(screen->fd, bo->handle, 0) == -ETIME) {
598 fprintf(stderr, "Blocking on %s BO for %s\n",
599 bo->name, reason);
600 }
601 }
602
603 int ret = vc4_wait_bo_ioctl(screen->fd, bo->handle, timeout_ns);
604 if (ret) {
605 if (ret != -ETIME) {
606 fprintf(stderr, "wait failed: %d\n", ret);
607 abort();
608 }
609
610 return false;
611 }
612
613 return true;
614 }
615
616 void *
617 vc4_bo_map_unsynchronized(struct vc4_bo *bo)
618 {
619 uint64_t offset;
620 int ret;
621
622 if (bo->map)
623 return bo->map;
624
625 struct drm_vc4_mmap_bo map;
626 memset(&map, 0, sizeof(map));
627 map.handle = bo->handle;
628 ret = vc4_ioctl(bo->screen->fd, DRM_IOCTL_VC4_MMAP_BO, &map);
629 offset = map.offset;
630 if (ret != 0) {
631 fprintf(stderr, "map ioctl failure\n");
632 abort();
633 }
634
635 bo->map = mmap(NULL, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
636 bo->screen->fd, offset);
637 if (bo->map == MAP_FAILED) {
638 fprintf(stderr, "mmap of bo %d (offset 0x%016llx, size %d) failed\n",
639 bo->handle, (long long)offset, bo->size);
640 abort();
641 }
642 VG(VALGRIND_MALLOCLIKE_BLOCK(bo->map, bo->size, 0, false));
643
644 return bo->map;
645 }
646
647 void *
648 vc4_bo_map(struct vc4_bo *bo)
649 {
650 void *map = vc4_bo_map_unsynchronized(bo);
651
652 bool ok = vc4_bo_wait(bo, PIPE_TIMEOUT_INFINITE, "bo map");
653 if (!ok) {
654 fprintf(stderr, "BO wait for map failed\n");
655 abort();
656 }
657
658 return map;
659 }
660
661 void
662 vc4_bufmgr_destroy(struct pipe_screen *pscreen)
663 {
664 struct vc4_screen *screen = vc4_screen(pscreen);
665 struct vc4_bo_cache *cache = &screen->bo_cache;
666
667 vc4_bo_cache_free_all(cache);
668
669 if (dump_stats) {
670 fprintf(stderr, "BO stats after screen destroy:\n");
671 vc4_bo_dump_stats(screen);
672 }
673 }