5eb8d06a09186e10a59e5b11e89946163178b693
[mesa.git] / src / gallium / auxiliary / pipebuffer / pb_bufmgr_cache.c
1 /**************************************************************************
2 *
3 * Copyright 2007-2008 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * \file
30 * Buffer cache.
31 *
32 * \author Jose Fonseca <jfonseca-at-vmware-dot-com>
33 * \author Thomas Hellström <thellstom-at-vmware-dot-com>
34 */
35
36
37 #include "pipe/p_compiler.h"
38 #include "util/u_debug.h"
39 #include "os/os_thread.h"
40 #include "util/u_memory.h"
41 #include "util/u_double_list.h"
42 #include "util/u_time.h"
43
44 #include "pb_buffer.h"
45 #include "pb_bufmgr.h"
46
47
48 /**
49 * Convenience macro (type safe).
50 */
51 #define SUPER(__derived) (&(__derived)->base)
52
53
54 struct pb_cache_manager;
55
56
57 /**
58 * Wrapper around a pipe buffer which adds delayed destruction.
59 */
60 struct pb_cache_buffer
61 {
62 struct pb_buffer base;
63
64 struct pb_buffer *buffer;
65 struct pb_cache_manager *mgr;
66
67 /** Caching time interval */
68 int64_t start, end;
69
70 struct list_head head;
71 };
72
73
74 struct pb_cache_manager
75 {
76 struct pb_manager base;
77
78 struct pb_manager *provider;
79 unsigned usecs;
80
81 pipe_mutex mutex;
82
83 struct list_head delayed;
84 pb_size numDelayed;
85 float size_factor;
86 unsigned bypass_usage;
87 uint64_t cache_size, max_cache_size;
88 };
89
90
91 static INLINE struct pb_cache_buffer *
92 pb_cache_buffer(struct pb_buffer *buf)
93 {
94 assert(buf);
95 return (struct pb_cache_buffer *)buf;
96 }
97
98
99 static INLINE struct pb_cache_manager *
100 pb_cache_manager(struct pb_manager *mgr)
101 {
102 assert(mgr);
103 return (struct pb_cache_manager *)mgr;
104 }
105
106
107 /**
108 * Actually destroy the buffer.
109 */
110 static INLINE void
111 _pb_cache_buffer_destroy(struct pb_cache_buffer *buf)
112 {
113 struct pb_cache_manager *mgr = buf->mgr;
114
115 LIST_DEL(&buf->head);
116 assert(mgr->numDelayed);
117 --mgr->numDelayed;
118 mgr->cache_size -= buf->base.size;
119 assert(!pipe_is_referenced(&buf->base.reference));
120 pb_reference(&buf->buffer, NULL);
121 FREE(buf);
122 }
123
124
125 /**
126 * Free as many cache buffers from the list head as possible.
127 */
128 static void
129 _pb_cache_buffer_list_check_free(struct pb_cache_manager *mgr)
130 {
131 struct list_head *curr, *next;
132 struct pb_cache_buffer *buf;
133 int64_t now;
134
135 now = os_time_get();
136
137 curr = mgr->delayed.next;
138 next = curr->next;
139 while(curr != &mgr->delayed) {
140 buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
141
142 if(!os_time_timeout(buf->start, buf->end, now))
143 break;
144
145 _pb_cache_buffer_destroy(buf);
146
147 curr = next;
148 next = curr->next;
149 }
150 }
151
152
153 static void
154 pb_cache_buffer_destroy(struct pb_buffer *_buf)
155 {
156 struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
157 struct pb_cache_manager *mgr = buf->mgr;
158
159 pipe_mutex_lock(mgr->mutex);
160 assert(!pipe_is_referenced(&buf->base.reference));
161
162 _pb_cache_buffer_list_check_free(mgr);
163
164 /* Directly release any buffer that exceeds the limit. */
165 if (mgr->cache_size + buf->base.size > mgr->max_cache_size) {
166 pb_reference(&buf->buffer, NULL);
167 FREE(buf);
168 pipe_mutex_unlock(mgr->mutex);
169 return;
170 }
171
172 buf->start = os_time_get();
173 buf->end = buf->start + mgr->usecs;
174 LIST_ADDTAIL(&buf->head, &mgr->delayed);
175 ++mgr->numDelayed;
176 mgr->cache_size += buf->base.size;
177 pipe_mutex_unlock(mgr->mutex);
178 }
179
180
181 static void *
182 pb_cache_buffer_map(struct pb_buffer *_buf,
183 unsigned flags, void *flush_ctx)
184 {
185 struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
186 return pb_map(buf->buffer, flags, flush_ctx);
187 }
188
189
190 static void
191 pb_cache_buffer_unmap(struct pb_buffer *_buf)
192 {
193 struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
194 pb_unmap(buf->buffer);
195 }
196
197
198 static enum pipe_error
199 pb_cache_buffer_validate(struct pb_buffer *_buf,
200 struct pb_validate *vl,
201 unsigned flags)
202 {
203 struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
204 return pb_validate(buf->buffer, vl, flags);
205 }
206
207
208 static void
209 pb_cache_buffer_fence(struct pb_buffer *_buf,
210 struct pipe_fence_handle *fence)
211 {
212 struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
213 pb_fence(buf->buffer, fence);
214 }
215
216
217 static void
218 pb_cache_buffer_get_base_buffer(struct pb_buffer *_buf,
219 struct pb_buffer **base_buf,
220 pb_size *offset)
221 {
222 struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
223 pb_get_base_buffer(buf->buffer, base_buf, offset);
224 }
225
226
227 const struct pb_vtbl
228 pb_cache_buffer_vtbl = {
229 pb_cache_buffer_destroy,
230 pb_cache_buffer_map,
231 pb_cache_buffer_unmap,
232 pb_cache_buffer_validate,
233 pb_cache_buffer_fence,
234 pb_cache_buffer_get_base_buffer
235 };
236
237
238 static INLINE int
239 pb_cache_is_buffer_compat(struct pb_cache_buffer *buf,
240 pb_size size,
241 const struct pb_desc *desc)
242 {
243 if (desc->usage & buf->mgr->bypass_usage)
244 return 0;
245
246 if(buf->base.size < size)
247 return 0;
248
249 /* be lenient with size */
250 if(buf->base.size > (unsigned) (buf->mgr->size_factor * size))
251 return 0;
252
253 if(!pb_check_alignment(desc->alignment, buf->base.alignment))
254 return 0;
255
256 if(!pb_check_usage(desc->usage, buf->base.usage))
257 return 0;
258
259 if (buf->mgr->provider->is_buffer_busy) {
260 if (buf->mgr->provider->is_buffer_busy(buf->mgr->provider, buf->buffer))
261 return -1;
262 } else {
263 void *ptr = pb_map(buf->buffer, PB_USAGE_DONTBLOCK, NULL);
264
265 if (!ptr)
266 return -1;
267
268 pb_unmap(buf->buffer);
269 }
270
271 return 1;
272 }
273
274
275 static struct pb_buffer *
276 pb_cache_manager_create_buffer(struct pb_manager *_mgr,
277 pb_size size,
278 const struct pb_desc *desc)
279 {
280 struct pb_cache_manager *mgr = pb_cache_manager(_mgr);
281 struct pb_cache_buffer *buf;
282 struct pb_cache_buffer *curr_buf;
283 struct list_head *curr, *next;
284 int64_t now;
285 int ret = 0;
286
287 pipe_mutex_lock(mgr->mutex);
288
289 buf = NULL;
290 curr = mgr->delayed.next;
291 next = curr->next;
292
293 /* search in the expired buffers, freeing them in the process */
294 now = os_time_get();
295 while(curr != &mgr->delayed) {
296 curr_buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
297 if(!buf && (ret = pb_cache_is_buffer_compat(curr_buf, size, desc) > 0))
298 buf = curr_buf;
299 else if(os_time_timeout(curr_buf->start, curr_buf->end, now))
300 _pb_cache_buffer_destroy(curr_buf);
301 else
302 /* This buffer (and all hereafter) are still hot in cache */
303 break;
304 if (ret == -1)
305 break;
306 curr = next;
307 next = curr->next;
308 }
309
310 /* keep searching in the hot buffers */
311 if(!buf && ret != -1) {
312 while(curr != &mgr->delayed) {
313 curr_buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
314 ret = pb_cache_is_buffer_compat(curr_buf, size, desc);
315 if (ret > 0) {
316 buf = curr_buf;
317 break;
318 }
319 if (ret == -1)
320 break;
321 /* no need to check the timeout here */
322 curr = next;
323 next = curr->next;
324 }
325 }
326
327 if(buf) {
328 mgr->cache_size -= buf->base.size;
329 LIST_DEL(&buf->head);
330 --mgr->numDelayed;
331 pipe_mutex_unlock(mgr->mutex);
332 /* Increase refcount */
333 pipe_reference_init(&buf->base.reference, 1);
334 return &buf->base;
335 }
336
337 pipe_mutex_unlock(mgr->mutex);
338
339 buf = CALLOC_STRUCT(pb_cache_buffer);
340 if(!buf)
341 return NULL;
342
343 buf->buffer = mgr->provider->create_buffer(mgr->provider, size, desc);
344
345 /* Empty the cache and try again. */
346 if (!buf->buffer) {
347 mgr->base.flush(&mgr->base);
348 buf->buffer = mgr->provider->create_buffer(mgr->provider, size, desc);
349 }
350
351 if(!buf->buffer) {
352 FREE(buf);
353 return NULL;
354 }
355
356 assert(pipe_is_referenced(&buf->buffer->reference));
357 assert(pb_check_alignment(desc->alignment, buf->buffer->alignment));
358 assert(pb_check_usage(desc->usage & ~mgr->bypass_usage, buf->buffer->usage));
359 assert(buf->buffer->size >= size);
360
361 pipe_reference_init(&buf->base.reference, 1);
362 buf->base.alignment = buf->buffer->alignment;
363 buf->base.usage = buf->buffer->usage;
364 buf->base.size = buf->buffer->size;
365
366 buf->base.vtbl = &pb_cache_buffer_vtbl;
367 buf->mgr = mgr;
368
369 return &buf->base;
370 }
371
372
373 static void
374 pb_cache_manager_flush(struct pb_manager *_mgr)
375 {
376 struct pb_cache_manager *mgr = pb_cache_manager(_mgr);
377 struct list_head *curr, *next;
378 struct pb_cache_buffer *buf;
379
380 pipe_mutex_lock(mgr->mutex);
381 curr = mgr->delayed.next;
382 next = curr->next;
383 while(curr != &mgr->delayed) {
384 buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
385 _pb_cache_buffer_destroy(buf);
386 curr = next;
387 next = curr->next;
388 }
389 pipe_mutex_unlock(mgr->mutex);
390
391 assert(mgr->provider->flush);
392 if(mgr->provider->flush)
393 mgr->provider->flush(mgr->provider);
394 }
395
396
397 static void
398 pb_cache_manager_destroy(struct pb_manager *mgr)
399 {
400 pb_cache_manager_flush(mgr);
401 FREE(mgr);
402 }
403
404 /**
405 * Create a caching buffer manager
406 *
407 * @param provider The buffer manager to which cache miss buffer requests
408 * should be redirected.
409 * @param usecs Unused buffers may be released from the cache after this
410 * time
411 * @param size_factor Declare buffers that are size_factor times bigger than
412 * the requested size as cache hits.
413 * @param bypass_usage Bitmask. If (requested usage & bypass_usage) != 0,
414 * buffer allocation requests are redirected to the provider.
415 * @param maximum_cache_size Maximum size of all unused buffers the cache can
416 * hold.
417 */
418 struct pb_manager *
419 pb_cache_manager_create(struct pb_manager *provider,
420 unsigned usecs,
421 float size_factor,
422 unsigned bypass_usage,
423 uint64_t maximum_cache_size)
424 {
425 struct pb_cache_manager *mgr;
426
427 if(!provider)
428 return NULL;
429
430 mgr = CALLOC_STRUCT(pb_cache_manager);
431 if (!mgr)
432 return NULL;
433
434 mgr->base.destroy = pb_cache_manager_destroy;
435 mgr->base.create_buffer = pb_cache_manager_create_buffer;
436 mgr->base.flush = pb_cache_manager_flush;
437 mgr->provider = provider;
438 mgr->usecs = usecs;
439 mgr->size_factor = size_factor;
440 mgr->bypass_usage = bypass_usage;
441 LIST_INITHEAD(&mgr->delayed);
442 mgr->numDelayed = 0;
443 mgr->max_cache_size = maximum_cache_size;
444 pipe_mutex_init(mgr->mutex);
445
446 return &mgr->base;
447 }