1 /**************************************************************************
3 * Copyright 2007-2008 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
32 * \author Jose Fonseca <jfonseca-at-vmware-dot-com>
33 * \author Thomas Hellström <thellstom-at-vmware-dot-com>
37 #include "pipe/p_compiler.h"
38 #include "util/u_debug.h"
39 #include "os/os_thread.h"
40 #include "util/u_memory.h"
41 #include "util/list.h"
42 #include "util/u_time.h"
44 #include "pb_buffer.h"
45 #include "pb_bufmgr.h"
49 * Convenience macro (type safe).
51 #define SUPER(__derived) (&(__derived)->base)
54 struct pb_cache_manager
;
58 * Wrapper around a pipe buffer which adds delayed destruction.
60 struct pb_cache_buffer
62 struct pb_buffer base
;
64 struct pb_buffer
*buffer
;
65 struct pb_cache_manager
*mgr
;
67 /** Caching time interval */
70 struct list_head head
;
74 struct pb_cache_manager
76 struct pb_manager base
;
78 struct pb_manager
*provider
;
83 struct list_head delayed
;
86 unsigned bypass_usage
;
87 uint64_t cache_size
, max_cache_size
;
91 static inline struct pb_cache_buffer
*
92 pb_cache_buffer(struct pb_buffer
*buf
)
95 return (struct pb_cache_buffer
*)buf
;
99 static inline struct pb_cache_manager
*
100 pb_cache_manager(struct pb_manager
*mgr
)
103 return (struct pb_cache_manager
*)mgr
;
108 _pb_cache_manager_remove_buffer_locked(struct pb_cache_buffer
*buf
)
110 struct pb_cache_manager
*mgr
= buf
->mgr
;
112 if (buf
->head
.next
) {
113 LIST_DEL(&buf
->head
);
114 assert(mgr
->numDelayed
);
116 mgr
->cache_size
-= buf
->base
.size
;
122 pb_cache_manager_remove_buffer(struct pb_buffer
*pb_buf
)
124 struct pb_cache_buffer
*buf
= (struct pb_cache_buffer
*)pb_buf
;
125 struct pb_cache_manager
*mgr
= buf
->mgr
;
130 pipe_mutex_lock(mgr
->mutex
);
131 _pb_cache_manager_remove_buffer_locked(buf
);
132 pipe_mutex_unlock(mgr
->mutex
);
136 * Actually destroy the buffer.
139 _pb_cache_buffer_destroy(struct pb_cache_buffer
*buf
)
142 _pb_cache_manager_remove_buffer_locked(buf
);
143 assert(!pipe_is_referenced(&buf
->base
.reference
));
144 pb_reference(&buf
->buffer
, NULL
);
150 * Free as many cache buffers from the list head as possible.
153 _pb_cache_buffer_list_check_free(struct pb_cache_manager
*mgr
)
155 struct list_head
*curr
, *next
;
156 struct pb_cache_buffer
*buf
;
161 curr
= mgr
->delayed
.next
;
163 while(curr
!= &mgr
->delayed
) {
164 buf
= LIST_ENTRY(struct pb_cache_buffer
, curr
, head
);
166 if(!os_time_timeout(buf
->start
, buf
->end
, now
))
169 _pb_cache_buffer_destroy(buf
);
178 pb_cache_buffer_destroy(struct pb_buffer
*_buf
)
180 struct pb_cache_buffer
*buf
= pb_cache_buffer(_buf
);
181 struct pb_cache_manager
*mgr
= buf
->mgr
;
184 pb_reference(&buf
->buffer
, NULL
);
189 pipe_mutex_lock(mgr
->mutex
);
190 assert(!pipe_is_referenced(&buf
->base
.reference
));
192 _pb_cache_buffer_list_check_free(mgr
);
194 /* Directly release any buffer that exceeds the limit. */
195 if (mgr
->cache_size
+ buf
->base
.size
> mgr
->max_cache_size
) {
196 pb_reference(&buf
->buffer
, NULL
);
198 pipe_mutex_unlock(mgr
->mutex
);
202 buf
->start
= os_time_get();
203 buf
->end
= buf
->start
+ mgr
->usecs
;
204 LIST_ADDTAIL(&buf
->head
, &mgr
->delayed
);
206 mgr
->cache_size
+= buf
->base
.size
;
207 pipe_mutex_unlock(mgr
->mutex
);
212 pb_cache_buffer_map(struct pb_buffer
*_buf
,
213 unsigned flags
, void *flush_ctx
)
215 struct pb_cache_buffer
*buf
= pb_cache_buffer(_buf
);
216 return pb_map(buf
->buffer
, flags
, flush_ctx
);
221 pb_cache_buffer_unmap(struct pb_buffer
*_buf
)
223 struct pb_cache_buffer
*buf
= pb_cache_buffer(_buf
);
224 pb_unmap(buf
->buffer
);
228 static enum pipe_error
229 pb_cache_buffer_validate(struct pb_buffer
*_buf
,
230 struct pb_validate
*vl
,
233 struct pb_cache_buffer
*buf
= pb_cache_buffer(_buf
);
234 return pb_validate(buf
->buffer
, vl
, flags
);
239 pb_cache_buffer_fence(struct pb_buffer
*_buf
,
240 struct pipe_fence_handle
*fence
)
242 struct pb_cache_buffer
*buf
= pb_cache_buffer(_buf
);
243 pb_fence(buf
->buffer
, fence
);
248 pb_cache_buffer_get_base_buffer(struct pb_buffer
*_buf
,
249 struct pb_buffer
**base_buf
,
252 struct pb_cache_buffer
*buf
= pb_cache_buffer(_buf
);
253 pb_get_base_buffer(buf
->buffer
, base_buf
, offset
);
258 pb_cache_buffer_vtbl
= {
259 pb_cache_buffer_destroy
,
261 pb_cache_buffer_unmap
,
262 pb_cache_buffer_validate
,
263 pb_cache_buffer_fence
,
264 pb_cache_buffer_get_base_buffer
269 pb_cache_is_buffer_compat(struct pb_cache_buffer
*buf
,
271 const struct pb_desc
*desc
)
273 if (desc
->usage
& buf
->mgr
->bypass_usage
)
276 if(buf
->base
.size
< size
)
279 /* be lenient with size */
280 if(buf
->base
.size
> (unsigned) (buf
->mgr
->size_factor
* size
))
283 if(!pb_check_alignment(desc
->alignment
, buf
->base
.alignment
))
286 if(!pb_check_usage(desc
->usage
, buf
->base
.usage
))
289 if (buf
->mgr
->provider
->is_buffer_busy
) {
290 if (buf
->mgr
->provider
->is_buffer_busy(buf
->mgr
->provider
, buf
->buffer
))
293 void *ptr
= pb_map(buf
->buffer
, PB_USAGE_DONTBLOCK
, NULL
);
298 pb_unmap(buf
->buffer
);
305 static struct pb_buffer
*
306 pb_cache_manager_create_buffer(struct pb_manager
*_mgr
,
308 const struct pb_desc
*desc
)
310 struct pb_cache_manager
*mgr
= pb_cache_manager(_mgr
);
311 struct pb_cache_buffer
*buf
;
312 struct pb_cache_buffer
*curr_buf
;
313 struct list_head
*curr
, *next
;
317 pipe_mutex_lock(mgr
->mutex
);
320 curr
= mgr
->delayed
.next
;
323 /* search in the expired buffers, freeing them in the process */
325 while(curr
!= &mgr
->delayed
) {
326 curr_buf
= LIST_ENTRY(struct pb_cache_buffer
, curr
, head
);
327 if(!buf
&& (ret
= pb_cache_is_buffer_compat(curr_buf
, size
, desc
) > 0))
329 else if(os_time_timeout(curr_buf
->start
, curr_buf
->end
, now
))
330 _pb_cache_buffer_destroy(curr_buf
);
332 /* This buffer (and all hereafter) are still hot in cache */
340 /* keep searching in the hot buffers */
341 if(!buf
&& ret
!= -1) {
342 while(curr
!= &mgr
->delayed
) {
343 curr_buf
= LIST_ENTRY(struct pb_cache_buffer
, curr
, head
);
344 ret
= pb_cache_is_buffer_compat(curr_buf
, size
, desc
);
351 /* no need to check the timeout here */
358 mgr
->cache_size
-= buf
->base
.size
;
359 LIST_DEL(&buf
->head
);
361 pipe_mutex_unlock(mgr
->mutex
);
362 /* Increase refcount */
363 pipe_reference_init(&buf
->base
.reference
, 1);
367 pipe_mutex_unlock(mgr
->mutex
);
369 buf
= CALLOC_STRUCT(pb_cache_buffer
);
373 buf
->buffer
= mgr
->provider
->create_buffer(mgr
->provider
, size
, desc
);
375 /* Empty the cache and try again. */
377 mgr
->base
.flush(&mgr
->base
);
378 buf
->buffer
= mgr
->provider
->create_buffer(mgr
->provider
, size
, desc
);
386 assert(pipe_is_referenced(&buf
->buffer
->reference
));
387 assert(pb_check_alignment(desc
->alignment
, buf
->buffer
->alignment
));
388 assert(pb_check_usage(desc
->usage
& ~mgr
->bypass_usage
, buf
->buffer
->usage
));
389 assert(buf
->buffer
->size
>= size
);
391 pipe_reference_init(&buf
->base
.reference
, 1);
392 buf
->base
.alignment
= buf
->buffer
->alignment
;
393 buf
->base
.usage
= buf
->buffer
->usage
;
394 buf
->base
.size
= buf
->buffer
->size
;
396 buf
->base
.vtbl
= &pb_cache_buffer_vtbl
;
404 pb_cache_manager_flush(struct pb_manager
*_mgr
)
406 struct pb_cache_manager
*mgr
= pb_cache_manager(_mgr
);
407 struct list_head
*curr
, *next
;
408 struct pb_cache_buffer
*buf
;
410 pipe_mutex_lock(mgr
->mutex
);
411 curr
= mgr
->delayed
.next
;
413 while(curr
!= &mgr
->delayed
) {
414 buf
= LIST_ENTRY(struct pb_cache_buffer
, curr
, head
);
415 _pb_cache_buffer_destroy(buf
);
419 pipe_mutex_unlock(mgr
->mutex
);
421 assert(mgr
->provider
->flush
);
422 if(mgr
->provider
->flush
)
423 mgr
->provider
->flush(mgr
->provider
);
428 pb_cache_manager_destroy(struct pb_manager
*mgr
)
430 pb_cache_manager_flush(mgr
);
435 * Create a caching buffer manager
437 * @param provider The buffer manager to which cache miss buffer requests
438 * should be redirected.
439 * @param usecs Unused buffers may be released from the cache after this
441 * @param size_factor Declare buffers that are size_factor times bigger than
442 * the requested size as cache hits.
443 * @param bypass_usage Bitmask. If (requested usage & bypass_usage) != 0,
444 * buffer allocation requests are redirected to the provider.
445 * @param maximum_cache_size Maximum size of all unused buffers the cache can
449 pb_cache_manager_create(struct pb_manager
*provider
,
452 unsigned bypass_usage
,
453 uint64_t maximum_cache_size
)
455 struct pb_cache_manager
*mgr
;
460 mgr
= CALLOC_STRUCT(pb_cache_manager
);
464 mgr
->base
.destroy
= pb_cache_manager_destroy
;
465 mgr
->base
.create_buffer
= pb_cache_manager_create_buffer
;
466 mgr
->base
.flush
= pb_cache_manager_flush
;
467 mgr
->provider
= provider
;
469 mgr
->size_factor
= size_factor
;
470 mgr
->bypass_usage
= bypass_usage
;
471 LIST_INITHEAD(&mgr
->delayed
);
473 mgr
->max_cache_size
= maximum_cache_size
;
474 pipe_mutex_init(mgr
->mutex
);