1 /**************************************************************************
3 * Copyright 2007-2008 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
32 * \author Jose Fonseca <jfonseca-at-vmware-dot-com>
33 * \author Thomas Hellström <thellstom-at-vmware-dot-com>
37 #include "pipe/p_compiler.h"
38 #include "util/u_debug.h"
39 #include "os/os_thread.h"
40 #include "util/u_memory.h"
41 #include "util/list.h"
42 #include "util/u_time.h"
44 #include "pb_buffer.h"
45 #include "pb_bufmgr.h"
49 * Convenience macro (type safe).
51 #define SUPER(__derived) (&(__derived)->base)
54 struct pb_cache_manager
;
58 * Wrapper around a pipe buffer which adds delayed destruction.
60 struct pb_cache_buffer
62 struct pb_buffer base
;
64 struct pb_buffer
*buffer
;
65 struct pb_cache_manager
*mgr
;
67 /** Caching time interval */
70 struct list_head head
;
74 struct pb_cache_manager
76 struct pb_manager base
;
78 struct pb_manager
*provider
;
83 struct list_head delayed
;
86 unsigned bypass_usage
;
87 uint64_t cache_size
, max_cache_size
;
91 static INLINE
struct pb_cache_buffer
*
92 pb_cache_buffer(struct pb_buffer
*buf
)
95 return (struct pb_cache_buffer
*)buf
;
99 static INLINE
struct pb_cache_manager
*
100 pb_cache_manager(struct pb_manager
*mgr
)
103 return (struct pb_cache_manager
*)mgr
;
108 * Actually destroy the buffer.
111 _pb_cache_buffer_destroy(struct pb_cache_buffer
*buf
)
113 struct pb_cache_manager
*mgr
= buf
->mgr
;
115 LIST_DEL(&buf
->head
);
116 assert(mgr
->numDelayed
);
118 mgr
->cache_size
-= buf
->base
.size
;
119 assert(!pipe_is_referenced(&buf
->base
.reference
));
120 pb_reference(&buf
->buffer
, NULL
);
126 * Free as many cache buffers from the list head as possible.
129 _pb_cache_buffer_list_check_free(struct pb_cache_manager
*mgr
)
131 struct list_head
*curr
, *next
;
132 struct pb_cache_buffer
*buf
;
137 curr
= mgr
->delayed
.next
;
139 while(curr
!= &mgr
->delayed
) {
140 buf
= LIST_ENTRY(struct pb_cache_buffer
, curr
, head
);
142 if(!os_time_timeout(buf
->start
, buf
->end
, now
))
145 _pb_cache_buffer_destroy(buf
);
154 pb_cache_buffer_destroy(struct pb_buffer
*_buf
)
156 struct pb_cache_buffer
*buf
= pb_cache_buffer(_buf
);
157 struct pb_cache_manager
*mgr
= buf
->mgr
;
159 pipe_mutex_lock(mgr
->mutex
);
160 assert(!pipe_is_referenced(&buf
->base
.reference
));
162 _pb_cache_buffer_list_check_free(mgr
);
164 /* Directly release any buffer that exceeds the limit. */
165 if (mgr
->cache_size
+ buf
->base
.size
> mgr
->max_cache_size
) {
166 pb_reference(&buf
->buffer
, NULL
);
168 pipe_mutex_unlock(mgr
->mutex
);
172 buf
->start
= os_time_get();
173 buf
->end
= buf
->start
+ mgr
->usecs
;
174 LIST_ADDTAIL(&buf
->head
, &mgr
->delayed
);
176 mgr
->cache_size
+= buf
->base
.size
;
177 pipe_mutex_unlock(mgr
->mutex
);
182 pb_cache_buffer_map(struct pb_buffer
*_buf
,
183 unsigned flags
, void *flush_ctx
)
185 struct pb_cache_buffer
*buf
= pb_cache_buffer(_buf
);
186 return pb_map(buf
->buffer
, flags
, flush_ctx
);
191 pb_cache_buffer_unmap(struct pb_buffer
*_buf
)
193 struct pb_cache_buffer
*buf
= pb_cache_buffer(_buf
);
194 pb_unmap(buf
->buffer
);
198 static enum pipe_error
199 pb_cache_buffer_validate(struct pb_buffer
*_buf
,
200 struct pb_validate
*vl
,
203 struct pb_cache_buffer
*buf
= pb_cache_buffer(_buf
);
204 return pb_validate(buf
->buffer
, vl
, flags
);
209 pb_cache_buffer_fence(struct pb_buffer
*_buf
,
210 struct pipe_fence_handle
*fence
)
212 struct pb_cache_buffer
*buf
= pb_cache_buffer(_buf
);
213 pb_fence(buf
->buffer
, fence
);
218 pb_cache_buffer_get_base_buffer(struct pb_buffer
*_buf
,
219 struct pb_buffer
**base_buf
,
222 struct pb_cache_buffer
*buf
= pb_cache_buffer(_buf
);
223 pb_get_base_buffer(buf
->buffer
, base_buf
, offset
);
228 pb_cache_buffer_vtbl
= {
229 pb_cache_buffer_destroy
,
231 pb_cache_buffer_unmap
,
232 pb_cache_buffer_validate
,
233 pb_cache_buffer_fence
,
234 pb_cache_buffer_get_base_buffer
239 pb_cache_is_buffer_compat(struct pb_cache_buffer
*buf
,
241 const struct pb_desc
*desc
)
243 if (desc
->usage
& buf
->mgr
->bypass_usage
)
246 if(buf
->base
.size
< size
)
249 /* be lenient with size */
250 if(buf
->base
.size
> (unsigned) (buf
->mgr
->size_factor
* size
))
253 if(!pb_check_alignment(desc
->alignment
, buf
->base
.alignment
))
256 if(!pb_check_usage(desc
->usage
, buf
->base
.usage
))
259 if (buf
->mgr
->provider
->is_buffer_busy
) {
260 if (buf
->mgr
->provider
->is_buffer_busy(buf
->mgr
->provider
, buf
->buffer
))
263 void *ptr
= pb_map(buf
->buffer
, PB_USAGE_DONTBLOCK
, NULL
);
268 pb_unmap(buf
->buffer
);
275 static struct pb_buffer
*
276 pb_cache_manager_create_buffer(struct pb_manager
*_mgr
,
278 const struct pb_desc
*desc
)
280 struct pb_cache_manager
*mgr
= pb_cache_manager(_mgr
);
281 struct pb_cache_buffer
*buf
;
282 struct pb_cache_buffer
*curr_buf
;
283 struct list_head
*curr
, *next
;
287 pipe_mutex_lock(mgr
->mutex
);
290 curr
= mgr
->delayed
.next
;
293 /* search in the expired buffers, freeing them in the process */
295 while(curr
!= &mgr
->delayed
) {
296 curr_buf
= LIST_ENTRY(struct pb_cache_buffer
, curr
, head
);
297 if(!buf
&& (ret
= pb_cache_is_buffer_compat(curr_buf
, size
, desc
) > 0))
299 else if(os_time_timeout(curr_buf
->start
, curr_buf
->end
, now
))
300 _pb_cache_buffer_destroy(curr_buf
);
302 /* This buffer (and all hereafter) are still hot in cache */
310 /* keep searching in the hot buffers */
311 if(!buf
&& ret
!= -1) {
312 while(curr
!= &mgr
->delayed
) {
313 curr_buf
= LIST_ENTRY(struct pb_cache_buffer
, curr
, head
);
314 ret
= pb_cache_is_buffer_compat(curr_buf
, size
, desc
);
321 /* no need to check the timeout here */
328 mgr
->cache_size
-= buf
->base
.size
;
329 LIST_DEL(&buf
->head
);
331 pipe_mutex_unlock(mgr
->mutex
);
332 /* Increase refcount */
333 pipe_reference_init(&buf
->base
.reference
, 1);
337 pipe_mutex_unlock(mgr
->mutex
);
339 buf
= CALLOC_STRUCT(pb_cache_buffer
);
343 buf
->buffer
= mgr
->provider
->create_buffer(mgr
->provider
, size
, desc
);
345 /* Empty the cache and try again. */
347 mgr
->base
.flush(&mgr
->base
);
348 buf
->buffer
= mgr
->provider
->create_buffer(mgr
->provider
, size
, desc
);
356 assert(pipe_is_referenced(&buf
->buffer
->reference
));
357 assert(pb_check_alignment(desc
->alignment
, buf
->buffer
->alignment
));
358 assert(pb_check_usage(desc
->usage
& ~mgr
->bypass_usage
, buf
->buffer
->usage
));
359 assert(buf
->buffer
->size
>= size
);
361 pipe_reference_init(&buf
->base
.reference
, 1);
362 buf
->base
.alignment
= buf
->buffer
->alignment
;
363 buf
->base
.usage
= buf
->buffer
->usage
;
364 buf
->base
.size
= buf
->buffer
->size
;
366 buf
->base
.vtbl
= &pb_cache_buffer_vtbl
;
374 pb_cache_manager_flush(struct pb_manager
*_mgr
)
376 struct pb_cache_manager
*mgr
= pb_cache_manager(_mgr
);
377 struct list_head
*curr
, *next
;
378 struct pb_cache_buffer
*buf
;
380 pipe_mutex_lock(mgr
->mutex
);
381 curr
= mgr
->delayed
.next
;
383 while(curr
!= &mgr
->delayed
) {
384 buf
= LIST_ENTRY(struct pb_cache_buffer
, curr
, head
);
385 _pb_cache_buffer_destroy(buf
);
389 pipe_mutex_unlock(mgr
->mutex
);
391 assert(mgr
->provider
->flush
);
392 if(mgr
->provider
->flush
)
393 mgr
->provider
->flush(mgr
->provider
);
398 pb_cache_manager_destroy(struct pb_manager
*mgr
)
400 pb_cache_manager_flush(mgr
);
405 * Create a caching buffer manager
407 * @param provider The buffer manager to which cache miss buffer requests
408 * should be redirected.
409 * @param usecs Unused buffers may be released from the cache after this
411 * @param size_factor Declare buffers that are size_factor times bigger than
412 * the requested size as cache hits.
413 * @param bypass_usage Bitmask. If (requested usage & bypass_usage) != 0,
414 * buffer allocation requests are redirected to the provider.
415 * @param maximum_cache_size Maximum size of all unused buffers the cache can
419 pb_cache_manager_create(struct pb_manager
*provider
,
422 unsigned bypass_usage
,
423 uint64_t maximum_cache_size
)
425 struct pb_cache_manager
*mgr
;
430 mgr
= CALLOC_STRUCT(pb_cache_manager
);
434 mgr
->base
.destroy
= pb_cache_manager_destroy
;
435 mgr
->base
.create_buffer
= pb_cache_manager_create_buffer
;
436 mgr
->base
.flush
= pb_cache_manager_flush
;
437 mgr
->provider
= provider
;
439 mgr
->size_factor
= size_factor
;
440 mgr
->bypass_usage
= bypass_usage
;
441 LIST_INITHEAD(&mgr
->delayed
);
443 mgr
->max_cache_size
= maximum_cache_size
;
444 pipe_mutex_init(mgr
->mutex
);