util: remove LIST_DEL macro
[mesa.git] / src / gallium / auxiliary / pipebuffer / pb_cache.c
1 /**************************************************************************
2 *
3 * Copyright 2007-2008 VMware, Inc.
4 * Copyright 2015 Advanced Micro Devices, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 #include "pb_cache.h"
30 #include "util/u_memory.h"
31 #include "util/os_time.h"
32
33
34 /**
35 * Actually destroy the buffer.
36 */
37 static void
38 destroy_buffer_locked(struct pb_cache_entry *entry)
39 {
40 struct pb_cache *mgr = entry->mgr;
41 struct pb_buffer *buf = entry->buffer;
42
43 assert(!pipe_is_referenced(&buf->reference));
44 if (entry->head.next) {
45 list_del(&entry->head);
46 assert(mgr->num_buffers);
47 --mgr->num_buffers;
48 mgr->cache_size -= buf->size;
49 }
50 mgr->destroy_buffer(buf);
51 }
52
53 /**
54 * Free as many cache buffers from the list head as possible.
55 */
56 static void
57 release_expired_buffers_locked(struct list_head *cache,
58 int64_t current_time)
59 {
60 struct list_head *curr, *next;
61 struct pb_cache_entry *entry;
62
63 curr = cache->next;
64 next = curr->next;
65 while (curr != cache) {
66 entry = LIST_ENTRY(struct pb_cache_entry, curr, head);
67
68 if (!os_time_timeout(entry->start, entry->end, current_time))
69 break;
70
71 destroy_buffer_locked(entry);
72
73 curr = next;
74 next = curr->next;
75 }
76 }
77
78 /**
79 * Add a buffer to the cache. This is typically done when the buffer is
80 * being released.
81 */
82 void
83 pb_cache_add_buffer(struct pb_cache_entry *entry)
84 {
85 struct pb_cache *mgr = entry->mgr;
86 struct list_head *cache = &mgr->buckets[entry->bucket_index];
87 struct pb_buffer *buf = entry->buffer;
88 unsigned i;
89
90 mtx_lock(&mgr->mutex);
91 assert(!pipe_is_referenced(&buf->reference));
92
93 int64_t current_time = os_time_get();
94
95 for (i = 0; i < mgr->num_heaps; i++)
96 release_expired_buffers_locked(&mgr->buckets[i], current_time);
97
98 /* Directly release any buffer that exceeds the limit. */
99 if (mgr->cache_size + buf->size > mgr->max_cache_size) {
100 mgr->destroy_buffer(buf);
101 mtx_unlock(&mgr->mutex);
102 return;
103 }
104
105 entry->start = os_time_get();
106 entry->end = entry->start + mgr->usecs;
107 list_addtail(&entry->head, cache);
108 ++mgr->num_buffers;
109 mgr->cache_size += buf->size;
110 mtx_unlock(&mgr->mutex);
111 }
112
113 /**
114 * \return 1 if compatible and can be reclaimed
115 * 0 if incompatible
116 * -1 if compatible and can't be reclaimed
117 */
118 static int
119 pb_cache_is_buffer_compat(struct pb_cache_entry *entry,
120 pb_size size, unsigned alignment, unsigned usage)
121 {
122 struct pb_cache *mgr = entry->mgr;
123 struct pb_buffer *buf = entry->buffer;
124
125 if (!pb_check_usage(usage, buf->usage))
126 return 0;
127
128 /* be lenient with size */
129 if (buf->size < size ||
130 buf->size > (unsigned) (mgr->size_factor * size))
131 return 0;
132
133 if (usage & mgr->bypass_usage)
134 return 0;
135
136 if (!pb_check_alignment(alignment, buf->alignment))
137 return 0;
138
139 return mgr->can_reclaim(buf) ? 1 : -1;
140 }
141
142 /**
143 * Find a compatible buffer in the cache, return it, and remove it
144 * from the cache.
145 */
146 struct pb_buffer *
147 pb_cache_reclaim_buffer(struct pb_cache *mgr, pb_size size,
148 unsigned alignment, unsigned usage,
149 unsigned bucket_index)
150 {
151 struct pb_cache_entry *entry;
152 struct pb_cache_entry *cur_entry;
153 struct list_head *cur, *next;
154 int64_t now;
155 int ret = 0;
156
157 assert(bucket_index < mgr->num_heaps);
158 struct list_head *cache = &mgr->buckets[bucket_index];
159
160 mtx_lock(&mgr->mutex);
161
162 entry = NULL;
163 cur = cache->next;
164 next = cur->next;
165
166 /* search in the expired buffers, freeing them in the process */
167 now = os_time_get();
168 while (cur != cache) {
169 cur_entry = LIST_ENTRY(struct pb_cache_entry, cur, head);
170
171 if (!entry && (ret = pb_cache_is_buffer_compat(cur_entry, size,
172 alignment, usage)) > 0)
173 entry = cur_entry;
174 else if (os_time_timeout(cur_entry->start, cur_entry->end, now))
175 destroy_buffer_locked(cur_entry);
176 else
177 /* This buffer (and all hereafter) are still hot in cache */
178 break;
179
180 /* the buffer is busy (and probably all remaining ones too) */
181 if (ret == -1)
182 break;
183
184 cur = next;
185 next = cur->next;
186 }
187
188 /* keep searching in the hot buffers */
189 if (!entry && ret != -1) {
190 while (cur != cache) {
191 cur_entry = LIST_ENTRY(struct pb_cache_entry, cur, head);
192 ret = pb_cache_is_buffer_compat(cur_entry, size, alignment, usage);
193
194 if (ret > 0) {
195 entry = cur_entry;
196 break;
197 }
198 if (ret == -1)
199 break;
200 /* no need to check the timeout here */
201 cur = next;
202 next = cur->next;
203 }
204 }
205
206 /* found a compatible buffer, return it */
207 if (entry) {
208 struct pb_buffer *buf = entry->buffer;
209
210 mgr->cache_size -= buf->size;
211 list_del(&entry->head);
212 --mgr->num_buffers;
213 mtx_unlock(&mgr->mutex);
214 /* Increase refcount */
215 pipe_reference_init(&buf->reference, 1);
216 return buf;
217 }
218
219 mtx_unlock(&mgr->mutex);
220 return NULL;
221 }
222
223 /**
224 * Empty the cache. Useful when there is not enough memory.
225 */
226 void
227 pb_cache_release_all_buffers(struct pb_cache *mgr)
228 {
229 struct list_head *curr, *next;
230 struct pb_cache_entry *buf;
231 unsigned i;
232
233 mtx_lock(&mgr->mutex);
234 for (i = 0; i < mgr->num_heaps; i++) {
235 struct list_head *cache = &mgr->buckets[i];
236
237 curr = cache->next;
238 next = curr->next;
239 while (curr != cache) {
240 buf = LIST_ENTRY(struct pb_cache_entry, curr, head);
241 destroy_buffer_locked(buf);
242 curr = next;
243 next = curr->next;
244 }
245 }
246 mtx_unlock(&mgr->mutex);
247 }
248
249 void
250 pb_cache_init_entry(struct pb_cache *mgr, struct pb_cache_entry *entry,
251 struct pb_buffer *buf, unsigned bucket_index)
252 {
253 assert(bucket_index < mgr->num_heaps);
254
255 memset(entry, 0, sizeof(*entry));
256 entry->buffer = buf;
257 entry->mgr = mgr;
258 entry->bucket_index = bucket_index;
259 }
260
261 /**
262 * Initialize a caching buffer manager.
263 *
264 * @param mgr The cache buffer manager
265 * @param num_heaps Number of separate caches/buckets indexed by bucket_index
266 * for faster buffer matching (alternative to slower
267 * "usage"-based matching).
268 * @param usecs Unused buffers may be released from the cache after this
269 * time
270 * @param size_factor Declare buffers that are size_factor times bigger than
271 * the requested size as cache hits.
272 * @param bypass_usage Bitmask. If (requested usage & bypass_usage) != 0,
273 * buffer allocation requests are rejected.
274 * @param maximum_cache_size Maximum size of all unused buffers the cache can
275 * hold.
276 * @param destroy_buffer Function that destroys a buffer for good.
277 * @param can_reclaim Whether a buffer can be reclaimed (e.g. is not busy)
278 */
279 void
280 pb_cache_init(struct pb_cache *mgr, uint num_heaps,
281 uint usecs, float size_factor,
282 unsigned bypass_usage, uint64_t maximum_cache_size,
283 void (*destroy_buffer)(struct pb_buffer *buf),
284 bool (*can_reclaim)(struct pb_buffer *buf))
285 {
286 unsigned i;
287
288 mgr->buckets = CALLOC(num_heaps, sizeof(struct list_head));
289 if (!mgr->buckets)
290 return;
291
292 for (i = 0; i < num_heaps; i++)
293 list_inithead(&mgr->buckets[i]);
294
295 (void) mtx_init(&mgr->mutex, mtx_plain);
296 mgr->cache_size = 0;
297 mgr->max_cache_size = maximum_cache_size;
298 mgr->num_heaps = num_heaps;
299 mgr->usecs = usecs;
300 mgr->num_buffers = 0;
301 mgr->bypass_usage = bypass_usage;
302 mgr->size_factor = size_factor;
303 mgr->destroy_buffer = destroy_buffer;
304 mgr->can_reclaim = can_reclaim;
305 }
306
307 /**
308 * Deinitialize the manager completely.
309 */
310 void
311 pb_cache_deinit(struct pb_cache *mgr)
312 {
313 pb_cache_release_all_buffers(mgr);
314 mtx_destroy(&mgr->mutex);
315 FREE(mgr->buckets);
316 mgr->buckets = NULL;
317 }