1 /**************************************************************************
3 * Copyright 2007-2008 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
32 * \author Jose Fonseca <jfonseca-at-vmware-dot-com>
33 * \author Thomas Hellström <thellstom-at-vmware-dot-com>
37 #include "pipe/p_compiler.h"
38 #include "util/u_debug.h"
39 #include "os/os_thread.h"
40 #include "util/u_memory.h"
41 #include "util/u_double_list.h"
42 #include "util/u_time.h"
44 #include "pb_buffer.h"
45 #include "pb_bufmgr.h"
49 * Convenience macro (type safe).
51 #define SUPER(__derived) (&(__derived)->base)
54 struct pb_cache_manager
;
58 * Wrapper around a pipe buffer which adds delayed destruction.
60 struct pb_cache_buffer
62 struct pb_buffer base
;
64 struct pb_buffer
*buffer
;
65 struct pb_cache_manager
*mgr
;
67 /** Caching time interval */
70 struct list_head head
;
74 struct pb_cache_manager
76 struct pb_manager base
;
78 struct pb_manager
*provider
;
83 struct list_head delayed
;
86 unsigned bypass_usage
;
90 static INLINE
struct pb_cache_buffer
*
91 pb_cache_buffer(struct pb_buffer
*buf
)
94 return (struct pb_cache_buffer
*)buf
;
98 static INLINE
struct pb_cache_manager
*
99 pb_cache_manager(struct pb_manager
*mgr
)
102 return (struct pb_cache_manager
*)mgr
;
107 * Actually destroy the buffer.
110 _pb_cache_buffer_destroy(struct pb_cache_buffer
*buf
)
112 struct pb_cache_manager
*mgr
= buf
->mgr
;
114 LIST_DEL(&buf
->head
);
115 assert(mgr
->numDelayed
);
117 assert(!pipe_is_referenced(&buf
->base
.reference
));
118 pb_reference(&buf
->buffer
, NULL
);
124 * Free as many cache buffers from the list head as possible.
127 _pb_cache_buffer_list_check_free(struct pb_cache_manager
*mgr
)
129 struct list_head
*curr
, *next
;
130 struct pb_cache_buffer
*buf
;
135 curr
= mgr
->delayed
.next
;
137 while(curr
!= &mgr
->delayed
) {
138 buf
= LIST_ENTRY(struct pb_cache_buffer
, curr
, head
);
140 if(!os_time_timeout(buf
->start
, buf
->end
, now
))
143 _pb_cache_buffer_destroy(buf
);
152 pb_cache_buffer_destroy(struct pb_buffer
*_buf
)
154 struct pb_cache_buffer
*buf
= pb_cache_buffer(_buf
);
155 struct pb_cache_manager
*mgr
= buf
->mgr
;
157 pipe_mutex_lock(mgr
->mutex
);
158 assert(!pipe_is_referenced(&buf
->base
.reference
));
160 _pb_cache_buffer_list_check_free(mgr
);
162 buf
->start
= os_time_get();
163 buf
->end
= buf
->start
+ mgr
->usecs
;
164 LIST_ADDTAIL(&buf
->head
, &mgr
->delayed
);
166 pipe_mutex_unlock(mgr
->mutex
);
171 pb_cache_buffer_map(struct pb_buffer
*_buf
,
172 unsigned flags
, void *flush_ctx
)
174 struct pb_cache_buffer
*buf
= pb_cache_buffer(_buf
);
175 return pb_map(buf
->buffer
, flags
, flush_ctx
);
180 pb_cache_buffer_unmap(struct pb_buffer
*_buf
)
182 struct pb_cache_buffer
*buf
= pb_cache_buffer(_buf
);
183 pb_unmap(buf
->buffer
);
187 static enum pipe_error
188 pb_cache_buffer_validate(struct pb_buffer
*_buf
,
189 struct pb_validate
*vl
,
192 struct pb_cache_buffer
*buf
= pb_cache_buffer(_buf
);
193 return pb_validate(buf
->buffer
, vl
, flags
);
198 pb_cache_buffer_fence(struct pb_buffer
*_buf
,
199 struct pipe_fence_handle
*fence
)
201 struct pb_cache_buffer
*buf
= pb_cache_buffer(_buf
);
202 pb_fence(buf
->buffer
, fence
);
207 pb_cache_buffer_get_base_buffer(struct pb_buffer
*_buf
,
208 struct pb_buffer
**base_buf
,
211 struct pb_cache_buffer
*buf
= pb_cache_buffer(_buf
);
212 pb_get_base_buffer(buf
->buffer
, base_buf
, offset
);
217 pb_cache_buffer_vtbl
= {
218 pb_cache_buffer_destroy
,
220 pb_cache_buffer_unmap
,
221 pb_cache_buffer_validate
,
222 pb_cache_buffer_fence
,
223 pb_cache_buffer_get_base_buffer
228 pb_cache_is_buffer_compat(struct pb_cache_buffer
*buf
,
230 const struct pb_desc
*desc
)
232 if (desc
->usage
& buf
->mgr
->bypass_usage
)
235 if(buf
->base
.size
< size
)
238 /* be lenient with size */
239 if(buf
->base
.size
> (unsigned) (buf
->mgr
->size_factor
* size
))
242 if(!pb_check_alignment(desc
->alignment
, buf
->base
.alignment
))
245 if(!pb_check_usage(desc
->usage
, buf
->base
.usage
))
248 if (buf
->mgr
->provider
->is_buffer_busy
) {
249 if (buf
->mgr
->provider
->is_buffer_busy(buf
->mgr
->provider
, buf
->buffer
))
252 void *ptr
= pb_map(buf
->buffer
, PB_USAGE_DONTBLOCK
, NULL
);
257 pb_unmap(buf
->buffer
);
264 static struct pb_buffer
*
265 pb_cache_manager_create_buffer(struct pb_manager
*_mgr
,
267 const struct pb_desc
*desc
)
269 struct pb_cache_manager
*mgr
= pb_cache_manager(_mgr
);
270 struct pb_cache_buffer
*buf
;
271 struct pb_cache_buffer
*curr_buf
;
272 struct list_head
*curr
, *next
;
276 pipe_mutex_lock(mgr
->mutex
);
279 curr
= mgr
->delayed
.next
;
282 /* search in the expired buffers, freeing them in the process */
284 while(curr
!= &mgr
->delayed
) {
285 curr_buf
= LIST_ENTRY(struct pb_cache_buffer
, curr
, head
);
286 if(!buf
&& (ret
= pb_cache_is_buffer_compat(curr_buf
, size
, desc
) > 0))
288 else if(os_time_timeout(curr_buf
->start
, curr_buf
->end
, now
))
289 _pb_cache_buffer_destroy(curr_buf
);
291 /* This buffer (and all hereafter) are still hot in cache */
299 /* keep searching in the hot buffers */
300 if(!buf
&& ret
!= -1) {
301 while(curr
!= &mgr
->delayed
) {
302 curr_buf
= LIST_ENTRY(struct pb_cache_buffer
, curr
, head
);
303 ret
= pb_cache_is_buffer_compat(curr_buf
, size
, desc
);
310 /* no need to check the timeout here */
317 LIST_DEL(&buf
->head
);
319 pipe_mutex_unlock(mgr
->mutex
);
320 /* Increase refcount */
321 pipe_reference_init(&buf
->base
.reference
, 1);
325 pipe_mutex_unlock(mgr
->mutex
);
327 buf
= CALLOC_STRUCT(pb_cache_buffer
);
331 buf
->buffer
= mgr
->provider
->create_buffer(mgr
->provider
, size
, desc
);
333 /* Empty the cache and try again. */
335 mgr
->base
.flush(&mgr
->base
);
336 buf
->buffer
= mgr
->provider
->create_buffer(mgr
->provider
, size
, desc
);
344 assert(pipe_is_referenced(&buf
->buffer
->reference
));
345 assert(pb_check_alignment(desc
->alignment
, buf
->buffer
->alignment
));
346 assert(pb_check_usage(desc
->usage
& ~mgr
->bypass_usage
, buf
->buffer
->usage
));
347 assert(buf
->buffer
->size
>= size
);
349 pipe_reference_init(&buf
->base
.reference
, 1);
350 buf
->base
.alignment
= buf
->buffer
->alignment
;
351 buf
->base
.usage
= buf
->buffer
->usage
;
352 buf
->base
.size
= buf
->buffer
->size
;
354 buf
->base
.vtbl
= &pb_cache_buffer_vtbl
;
362 pb_cache_manager_flush(struct pb_manager
*_mgr
)
364 struct pb_cache_manager
*mgr
= pb_cache_manager(_mgr
);
365 struct list_head
*curr
, *next
;
366 struct pb_cache_buffer
*buf
;
368 pipe_mutex_lock(mgr
->mutex
);
369 curr
= mgr
->delayed
.next
;
371 while(curr
!= &mgr
->delayed
) {
372 buf
= LIST_ENTRY(struct pb_cache_buffer
, curr
, head
);
373 _pb_cache_buffer_destroy(buf
);
377 pipe_mutex_unlock(mgr
->mutex
);
379 assert(mgr
->provider
->flush
);
380 if(mgr
->provider
->flush
)
381 mgr
->provider
->flush(mgr
->provider
);
386 pb_cache_manager_destroy(struct pb_manager
*mgr
)
388 pb_cache_manager_flush(mgr
);
393 * Create a caching buffer manager
395 * @param provider The buffer manager to which cache miss buffer requests
396 * should be redirected.
397 * @param usecs Unused buffers may be released from the cache after this
399 * @param size_factor Declare buffers that are size_factor times bigger than
400 * the requested size as cache hits.
401 * @param bypass_usage Bitmask. If (requested usage & bypass_usage) != 0,
402 * buffer allocation requests are redirected to the provider.
405 pb_cache_manager_create(struct pb_manager
*provider
,
408 unsigned bypass_usage
)
410 struct pb_cache_manager
*mgr
;
415 mgr
= CALLOC_STRUCT(pb_cache_manager
);
419 mgr
->base
.destroy
= pb_cache_manager_destroy
;
420 mgr
->base
.create_buffer
= pb_cache_manager_create_buffer
;
421 mgr
->base
.flush
= pb_cache_manager_flush
;
422 mgr
->provider
= provider
;
424 mgr
->size_factor
= size_factor
;
425 mgr
->bypass_usage
= bypass_usage
;
426 LIST_INITHEAD(&mgr
->delayed
);
428 pipe_mutex_init(mgr
->mutex
);