1 /**************************************************************************
3 * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
32 * \author José Fonseca <jrfonseca-at-tungstengraphics-dot-com>
33 * \author Thomas Hellström <thomas-at-tungstengraphics-dot-com>
37 #include "pipe/p_compiler.h"
38 #include "pipe/p_debug.h"
39 #include "pipe/p_winsys.h"
40 #include "pipe/p_thread.h"
41 #include "util/u_memory.h"
42 #include "util/u_double_list.h"
43 #include "util/u_time.h"
45 #include "pb_buffer.h"
46 #include "pb_bufmgr.h"
50 * Convenience macro (type safe).
52 #define SUPER(__derived) (&(__derived)->base)
55 struct pb_cache_manager
;
59 * Wrapper around a pipe buffer which adds delayed destruction.
61 struct pb_cache_buffer
63 struct pb_buffer base
;
65 struct pb_buffer
*buffer
;
66 struct pb_cache_manager
*mgr
;
68 /** Caching time interval */
69 struct util_time start
, end
;
71 struct list_head head
;
75 struct pb_cache_manager
77 struct pb_manager base
;
79 struct pb_manager
*provider
;
84 struct list_head delayed
;
89 static INLINE
struct pb_cache_buffer
*
90 pb_cache_buffer(struct pb_buffer
*buf
)
93 return (struct pb_cache_buffer
*)buf
;
97 static INLINE
struct pb_cache_manager
*
98 pb_cache_manager(struct pb_manager
*mgr
)
101 return (struct pb_cache_manager
*)mgr
;
106 * Actually destroy the buffer.
109 _pb_cache_buffer_destroy(struct pb_cache_buffer
*buf
)
111 struct pb_cache_manager
*mgr
= buf
->mgr
;
113 LIST_DEL(&buf
->head
);
114 assert(mgr
->numDelayed
);
116 assert(!buf
->base
.base
.refcount
);
117 pb_reference(&buf
->buffer
, NULL
);
123 * Free as many cache buffers from the list head as possible.
126 _pb_cache_buffer_list_check_free(struct pb_cache_manager
*mgr
)
128 struct list_head
*curr
, *next
;
129 struct pb_cache_buffer
*buf
;
130 struct util_time now
;
134 curr
= mgr
->delayed
.next
;
136 while(curr
!= &mgr
->delayed
) {
137 buf
= LIST_ENTRY(struct pb_cache_buffer
, curr
, head
);
139 if(!util_time_timeout(&buf
->start
, &buf
->end
, &now
))
142 _pb_cache_buffer_destroy(buf
);
151 pb_cache_buffer_destroy(struct pb_buffer
*_buf
)
153 struct pb_cache_buffer
*buf
= pb_cache_buffer(_buf
);
154 struct pb_cache_manager
*mgr
= buf
->mgr
;
156 pipe_mutex_lock(mgr
->mutex
);
157 assert(buf
->base
.base
.refcount
== 0);
159 _pb_cache_buffer_list_check_free(mgr
);
161 util_time_get(&buf
->start
);
162 util_time_add(&buf
->start
, mgr
->usecs
, &buf
->end
);
163 LIST_ADDTAIL(&buf
->head
, &mgr
->delayed
);
165 pipe_mutex_unlock(mgr
->mutex
);
170 pb_cache_buffer_map(struct pb_buffer
*_buf
,
173 struct pb_cache_buffer
*buf
= pb_cache_buffer(_buf
);
174 return pb_map(buf
->buffer
, flags
);
179 pb_cache_buffer_unmap(struct pb_buffer
*_buf
)
181 struct pb_cache_buffer
*buf
= pb_cache_buffer(_buf
);
182 pb_unmap(buf
->buffer
);
187 pb_cache_buffer_get_base_buffer(struct pb_buffer
*_buf
,
188 struct pb_buffer
**base_buf
,
191 struct pb_cache_buffer
*buf
= pb_cache_buffer(_buf
);
192 pb_get_base_buffer(buf
->buffer
, base_buf
, offset
);
197 pb_cache_buffer_vtbl
= {
198 pb_cache_buffer_destroy
,
200 pb_cache_buffer_unmap
,
201 pb_cache_buffer_get_base_buffer
205 static INLINE boolean
206 pb_cache_is_buffer_compat(struct pb_cache_buffer
*buf
,
208 const struct pb_desc
*desc
)
210 if(buf
->base
.base
.size
< size
)
213 /* be lenient with size */
214 if(buf
->base
.base
.size
>= 2*size
)
217 if(!pb_check_alignment(desc
->alignment
, buf
->base
.base
.alignment
))
220 if(!pb_check_usage(desc
->usage
, buf
->base
.base
.usage
))
227 static struct pb_buffer
*
228 pb_cache_manager_create_buffer(struct pb_manager
*_mgr
,
230 const struct pb_desc
*desc
)
232 struct pb_cache_manager
*mgr
= pb_cache_manager(_mgr
);
233 struct pb_cache_buffer
*buf
;
234 struct pb_cache_buffer
*curr_buf
;
235 struct list_head
*curr
, *next
;
236 struct util_time now
;
238 pipe_mutex_lock(mgr
->mutex
);
241 curr
= mgr
->delayed
.next
;
244 /* search in the expired buffers, freeing them in the process */
246 while(curr
!= &mgr
->delayed
) {
247 curr_buf
= LIST_ENTRY(struct pb_cache_buffer
, curr
, head
);
248 if(!buf
&& pb_cache_is_buffer_compat(curr_buf
, size
, desc
))
250 else if(util_time_timeout(&curr_buf
->start
, &curr_buf
->end
, &now
))
251 _pb_cache_buffer_destroy(curr_buf
);
253 /* This buffer (and all hereafter) are still hot in cache */
259 /* keep searching in the hot buffers */
261 while(curr
!= &mgr
->delayed
) {
262 curr_buf
= LIST_ENTRY(struct pb_cache_buffer
, curr
, head
);
263 if(pb_cache_is_buffer_compat(curr_buf
, size
, desc
)) {
267 /* no need to check the timeout here */
274 LIST_DEL(&buf
->head
);
275 pipe_mutex_unlock(mgr
->mutex
);
276 ++buf
->base
.base
.refcount
;
280 pipe_mutex_unlock(mgr
->mutex
);
282 buf
= CALLOC_STRUCT(pb_cache_buffer
);
286 buf
->buffer
= mgr
->provider
->create_buffer(mgr
->provider
, size
, desc
);
292 assert(buf
->buffer
->base
.refcount
>= 1);
293 assert(pb_check_alignment(desc
->alignment
, buf
->buffer
->base
.alignment
));
294 assert(pb_check_usage(desc
->usage
, buf
->buffer
->base
.usage
));
295 assert(buf
->buffer
->base
.size
>= size
);
297 buf
->base
.base
.refcount
= 1;
298 buf
->base
.base
.alignment
= buf
->buffer
->base
.alignment
;
299 buf
->base
.base
.usage
= buf
->buffer
->base
.usage
;
300 buf
->base
.base
.size
= buf
->buffer
->base
.size
;
302 buf
->base
.vtbl
= &pb_cache_buffer_vtbl
;
310 pb_cache_flush(struct pb_manager
*_mgr
)
312 struct pb_cache_manager
*mgr
= pb_cache_manager(_mgr
);
313 struct list_head
*curr
, *next
;
314 struct pb_cache_buffer
*buf
;
316 pipe_mutex_lock(mgr
->mutex
);
317 curr
= mgr
->delayed
.next
;
319 while(curr
!= &mgr
->delayed
) {
320 buf
= LIST_ENTRY(struct pb_cache_buffer
, curr
, head
);
321 _pb_cache_buffer_destroy(buf
);
325 pipe_mutex_unlock(mgr
->mutex
);
330 pb_cache_manager_destroy(struct pb_manager
*mgr
)
338 pb_cache_manager_create(struct pb_manager
*provider
,
341 struct pb_cache_manager
*mgr
;
346 mgr
= CALLOC_STRUCT(pb_cache_manager
);
350 mgr
->base
.destroy
= pb_cache_manager_destroy
;
351 mgr
->base
.create_buffer
= pb_cache_manager_create_buffer
;
352 mgr
->provider
= provider
;
354 LIST_INITHEAD(&mgr
->delayed
);
356 pipe_mutex_init(mgr
->mutex
);