gallium/pb_cache: divide the cache into buckets for reducing cache misses
[mesa.git] / src / gallium / auxiliary / pipebuffer / pb_cache.c
1 /**************************************************************************
2 *
3 * Copyright 2007-2008 VMware, Inc.
4 * Copyright 2015 Advanced Micro Devices, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 #include "pb_cache.h"
30 #include "util/u_memory.h"
31 #include "util/u_time.h"
32
33
34 /**
35 * Actually destroy the buffer.
36 */
37 static void
38 destroy_buffer_locked(struct pb_cache_entry *entry)
39 {
40 struct pb_cache *mgr = entry->mgr;
41
42 assert(!pipe_is_referenced(&entry->buffer->reference));
43 if (entry->head.next) {
44 LIST_DEL(&entry->head);
45 assert(mgr->num_buffers);
46 --mgr->num_buffers;
47 mgr->cache_size -= entry->buffer->size;
48 }
49 entry->mgr->destroy_buffer(entry->buffer);
50 }
51
52 /**
53 * Free as many cache buffers from the list head as possible.
54 */
55 static void
56 release_expired_buffers_locked(struct list_head *cache)
57 {
58 struct list_head *curr, *next;
59 struct pb_cache_entry *entry;
60 int64_t now;
61
62 now = os_time_get();
63
64 curr = cache->next;
65 next = curr->next;
66 while (curr != cache) {
67 entry = LIST_ENTRY(struct pb_cache_entry, curr, head);
68
69 if (!os_time_timeout(entry->start, entry->end, now))
70 break;
71
72 destroy_buffer_locked(entry);
73
74 curr = next;
75 next = curr->next;
76 }
77 }
78
79 /**
80 * Add a buffer to the cache. This is typically done when the buffer is
81 * being released.
82 */
83 void
84 pb_cache_add_buffer(struct pb_cache_entry *entry)
85 {
86 struct pb_cache *mgr = entry->mgr;
87 struct list_head *cache = &mgr->buckets[entry->bucket_index];
88 unsigned i;
89
90 pipe_mutex_lock(mgr->mutex);
91 assert(!pipe_is_referenced(&entry->buffer->reference));
92
93 for (i = 0; i < ARRAY_SIZE(mgr->buckets); i++)
94 release_expired_buffers_locked(&mgr->buckets[i]);
95
96 /* Directly release any buffer that exceeds the limit. */
97 if (mgr->cache_size + entry->buffer->size > mgr->max_cache_size) {
98 entry->mgr->destroy_buffer(entry->buffer);
99 pipe_mutex_unlock(mgr->mutex);
100 return;
101 }
102
103 entry->start = os_time_get();
104 entry->end = entry->start + mgr->usecs;
105 LIST_ADDTAIL(&entry->head, cache);
106 ++mgr->num_buffers;
107 mgr->cache_size += entry->buffer->size;
108 pipe_mutex_unlock(mgr->mutex);
109 }
110
111 /**
112 * \return 1 if compatible and can be reclaimed
113 * 0 if incompatible
114 * -1 if compatible and can't be reclaimed
115 */
116 static int
117 pb_cache_is_buffer_compat(struct pb_cache_entry *entry,
118 pb_size size, unsigned alignment, unsigned usage)
119 {
120 struct pb_cache *mgr = entry->mgr;
121 struct pb_buffer *buf = entry->buffer;
122
123 if (!pb_check_usage(usage, buf->usage))
124 return 0;
125
126 /* be lenient with size */
127 if (buf->size < size ||
128 buf->size > (unsigned) (mgr->size_factor * size))
129 return 0;
130
131 if (usage & mgr->bypass_usage)
132 return 0;
133
134 if (!pb_check_alignment(alignment, buf->alignment))
135 return 0;
136
137 return mgr->can_reclaim(buf) ? 1 : -1;
138 }
139
140 /**
141 * Find a compatible buffer in the cache, return it, and remove it
142 * from the cache.
143 */
144 struct pb_buffer *
145 pb_cache_reclaim_buffer(struct pb_cache *mgr, pb_size size,
146 unsigned alignment, unsigned usage,
147 unsigned bucket_index)
148 {
149 struct pb_cache_entry *entry;
150 struct pb_cache_entry *cur_entry;
151 struct list_head *cur, *next;
152 int64_t now;
153 int ret = 0;
154 struct list_head *cache = &mgr->buckets[bucket_index];
155
156 pipe_mutex_lock(mgr->mutex);
157
158 entry = NULL;
159 cur = cache->next;
160 next = cur->next;
161
162 /* search in the expired buffers, freeing them in the process */
163 now = os_time_get();
164 while (cur != cache) {
165 cur_entry = LIST_ENTRY(struct pb_cache_entry, cur, head);
166
167 if (!entry && (ret = pb_cache_is_buffer_compat(cur_entry, size,
168 alignment, usage) > 0))
169 entry = cur_entry;
170 else if (os_time_timeout(cur_entry->start, cur_entry->end, now))
171 destroy_buffer_locked(cur_entry);
172 else
173 /* This buffer (and all hereafter) are still hot in cache */
174 break;
175
176 /* the buffer is busy (and probably all remaining ones too) */
177 if (ret == -1)
178 break;
179
180 cur = next;
181 next = cur->next;
182 }
183
184 /* keep searching in the hot buffers */
185 if (!entry && ret != -1) {
186 while (cur != cache) {
187 cur_entry = LIST_ENTRY(struct pb_cache_entry, cur, head);
188 ret = pb_cache_is_buffer_compat(cur_entry, size, alignment, usage);
189
190 if (ret > 0) {
191 entry = cur_entry;
192 break;
193 }
194 if (ret == -1)
195 break;
196 /* no need to check the timeout here */
197 cur = next;
198 next = cur->next;
199 }
200 }
201
202 /* found a compatible buffer, return it */
203 if (entry) {
204 struct pb_buffer *buf = entry->buffer;
205
206 mgr->cache_size -= buf->size;
207 LIST_DEL(&entry->head);
208 --mgr->num_buffers;
209 pipe_mutex_unlock(mgr->mutex);
210 /* Increase refcount */
211 pipe_reference_init(&buf->reference, 1);
212 return buf;
213 }
214
215 pipe_mutex_unlock(mgr->mutex);
216 return NULL;
217 }
218
219 /**
220 * Empty the cache. Useful when there is not enough memory.
221 */
222 void
223 pb_cache_release_all_buffers(struct pb_cache *mgr)
224 {
225 struct list_head *curr, *next;
226 struct pb_cache_entry *buf;
227 unsigned i;
228
229 pipe_mutex_lock(mgr->mutex);
230 for (i = 0; i < ARRAY_SIZE(mgr->buckets); i++) {
231 struct list_head *cache = &mgr->buckets[i];
232
233 curr = cache->next;
234 next = curr->next;
235 while (curr != cache) {
236 buf = LIST_ENTRY(struct pb_cache_entry, curr, head);
237 destroy_buffer_locked(buf);
238 curr = next;
239 next = curr->next;
240 }
241 }
242 pipe_mutex_unlock(mgr->mutex);
243 }
244
245 void
246 pb_cache_init_entry(struct pb_cache *mgr, struct pb_cache_entry *entry,
247 struct pb_buffer *buf, unsigned bucket_index)
248 {
249 memset(entry, 0, sizeof(*entry));
250 entry->buffer = buf;
251 entry->mgr = mgr;
252 entry->bucket_index = bucket_index;
253 }
254
255 /**
256 * Initialize a caching buffer manager.
257 *
258 * @param mgr The cache buffer manager
259 * @param usecs Unused buffers may be released from the cache after this
260 * time
261 * @param size_factor Declare buffers that are size_factor times bigger than
262 * the requested size as cache hits.
263 * @param bypass_usage Bitmask. If (requested usage & bypass_usage) != 0,
264 * buffer allocation requests are rejected.
265 * @param maximum_cache_size Maximum size of all unused buffers the cache can
266 * hold.
267 * @param destroy_buffer Function that destroys a buffer for good.
268 * @param can_reclaim Whether a buffer can be reclaimed (e.g. is not busy)
269 */
270 void
271 pb_cache_init(struct pb_cache *mgr, uint usecs, float size_factor,
272 unsigned bypass_usage, uint64_t maximum_cache_size,
273 void (*destroy_buffer)(struct pb_buffer *buf),
274 bool (*can_reclaim)(struct pb_buffer *buf))
275 {
276 unsigned i;
277
278 for (i = 0; i < ARRAY_SIZE(mgr->buckets); i++)
279 LIST_INITHEAD(&mgr->buckets[i]);
280
281 pipe_mutex_init(mgr->mutex);
282 mgr->cache_size = 0;
283 mgr->max_cache_size = maximum_cache_size;
284 mgr->usecs = usecs;
285 mgr->num_buffers = 0;
286 mgr->bypass_usage = bypass_usage;
287 mgr->size_factor = size_factor;
288 mgr->destroy_buffer = destroy_buffer;
289 mgr->can_reclaim = can_reclaim;
290 }
291
292 /**
293 * Deinitialize the manager completely.
294 */
295 void
296 pb_cache_deinit(struct pb_cache *mgr)
297 {
298 pb_cache_release_all_buffers(mgr);
299 pipe_mutex_destroy(mgr->mutex);
300 }