From 3fe3c7d749fbf146ae6c6f8c84c5cd847a1ce098 Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Thu, 25 Jan 2018 02:24:45 +0000 Subject: [PATCH] re PR other/68239 (libbacktrace allocation is sometimes very slow) PR other/68239 * mmap.c (backtrace_free_locked): Don't put more than 16 entries on the free list. From-SVN: r257039 --- libbacktrace/ChangeLog | 6 ++++++ libbacktrace/mmap.c | 24 +++++++++++++++++++++++- 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/libbacktrace/ChangeLog b/libbacktrace/ChangeLog index 1fcca776759..2d89ea1dd6c 100644 --- a/libbacktrace/ChangeLog +++ b/libbacktrace/ChangeLog @@ -1,3 +1,9 @@ +2018-01-24 Ian Lance Taylor + + PR other/68239 + * mmap.c (backtrace_free_locked): Don't put more than 16 entries + on the free list. + 2018-01-19 Tony Reix * xcoff.c (xcoff_incl_compare): New function. diff --git a/libbacktrace/mmap.c b/libbacktrace/mmap.c index 41bbc71d463..32fcba62399 100644 --- a/libbacktrace/mmap.c +++ b/libbacktrace/mmap.c @@ -69,11 +69,33 @@ struct backtrace_freelist_struct static void backtrace_free_locked (struct backtrace_state *state, void *addr, size_t size) { - /* Just leak small blocks. We don't have to be perfect. */ + /* Just leak small blocks. We don't have to be perfect. Don't put + more than 16 entries on the free list, to avoid wasting time + searching when allocating a block. If we have more than 16 + entries, leak the smallest entry. */ + if (size >= sizeof (struct backtrace_freelist_struct)) { + size_t c; + struct backtrace_freelist_struct **ppsmall; + struct backtrace_freelist_struct **pp; struct backtrace_freelist_struct *p; + c = 0; + ppsmall = NULL; + for (pp = &state->freelist; *pp != NULL; pp = &(*pp)->next) + { + if (ppsmall == NULL || (*pp)->size < (*ppsmall)->size) + ppsmall = pp; + ++c; + } + if (c >= 16) + { + if (size <= (*ppsmall)->size) + return; + *ppsmall = (*ppsmall)->next; + } + p = (struct backtrace_freelist_struct *) addr; p->next = state->freelist; p->size = size; -- 2.30.2