1 /**************************************************************************
3 * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
32 * \author José Fonseca <jrfonseca-at-tungstengraphics-dot-com>
33 * \author Thomas Hellström <thomas-at-tungstengraphics-dot-com>
37 #include "pipe/p_compiler.h"
38 #include "pipe/p_debug.h"
39 #include "pipe/p_winsys.h"
40 #include "pipe/p_thread.h"
41 #include "pipe/p_util.h"
42 #include "util/u_double_list.h"
43 #include "util/u_time.h"
45 #include "pb_buffer.h"
46 #include "pb_bufmgr.h"
50 * Convenience macro (type safe).
52 #define SUPER(__derived) (&(__derived)->base)
55 struct pb_cache_manager
;
59 * Wrapper around a pipe buffer which adds delayed destruction.
61 struct pb_cache_buffer
63 struct pb_buffer base
;
65 struct pb_buffer
*buffer
;
66 struct pb_cache_manager
*mgr
;
68 /** Caching time interval */
69 struct util_time start
, end
;
71 struct list_head head
;
75 struct pb_cache_manager
77 struct pb_manager base
;
79 struct pb_manager
*provider
;
82 _glthread_Mutex mutex
;
84 struct list_head delayed
;
89 static INLINE
struct pb_cache_buffer
*
90 pb_cache_buffer(struct pb_buffer
*buf
)
93 return (struct pb_cache_buffer
*)buf
;
97 static INLINE
struct pb_cache_manager
*
98 pb_cache_manager(struct pb_manager
*mgr
)
101 return (struct pb_cache_manager
*)mgr
;
106 * Actually destroy the buffer.
109 _pb_cache_buffer_destroy(struct pb_cache_buffer
*buf
)
111 struct pb_cache_manager
*mgr
= buf
->mgr
;
113 LIST_DEL(&buf
->head
);
114 assert(mgr
->numDelayed
);
116 assert(!buf
->base
.base
.refcount
);
117 pb_reference(&buf
->buffer
, NULL
);
123 * Free as many cache buffers from the list head as possible.
126 _pb_cache_buffer_list_check_free(struct pb_cache_manager
*mgr
)
128 struct list_head
*curr
, *next
;
129 struct pb_cache_buffer
*buf
;
130 struct util_time now
;
134 curr
= mgr
->delayed
.next
;
136 while(curr
!= &mgr
->delayed
) {
137 buf
= LIST_ENTRY(struct pb_cache_buffer
, curr
, head
);
139 if(!util_time_timeout(&buf
->start
, &buf
->end
, &now
))
142 _pb_cache_buffer_destroy(buf
);
151 pb_cache_buffer_destroy(struct pb_buffer
*_buf
)
153 struct pb_cache_buffer
*buf
= pb_cache_buffer(_buf
);
154 struct pb_cache_manager
*mgr
= buf
->mgr
;
156 _glthread_LOCK_MUTEX(mgr
->mutex
);
157 assert(buf
->base
.base
.refcount
== 0);
159 _pb_cache_buffer_list_check_free(mgr
);
161 util_time_get(&buf
->start
);
162 util_time_add(&buf
->start
, mgr
->usecs
, &buf
->end
);
163 LIST_ADDTAIL(&buf
->head
, &mgr
->delayed
);
165 _glthread_UNLOCK_MUTEX(mgr
->mutex
);
170 pb_cache_buffer_map(struct pb_buffer
*_buf
,
173 struct pb_cache_buffer
*buf
= pb_cache_buffer(_buf
);
174 return pb_map(buf
->buffer
, flags
);
179 pb_cache_buffer_unmap(struct pb_buffer
*_buf
)
181 struct pb_cache_buffer
*buf
= pb_cache_buffer(_buf
);
182 pb_unmap(buf
->buffer
);
187 pb_cache_buffer_get_base_buffer(struct pb_buffer
*_buf
,
188 struct pb_buffer
**base_buf
,
191 struct pb_cache_buffer
*buf
= pb_cache_buffer(_buf
);
192 pb_get_base_buffer(buf
->buffer
, base_buf
, offset
);
197 pb_cache_buffer_vtbl
= {
198 pb_cache_buffer_destroy
,
200 pb_cache_buffer_unmap
,
201 pb_cache_buffer_get_base_buffer
205 static INLINE boolean
206 pb_cache_is_buffer_compat(struct pb_cache_buffer
*buf
,
208 const struct pb_desc
*desc
)
210 /* TODO: be more lenient with size */
211 if(buf
->base
.base
.size
!= size
)
214 if(!pb_check_alignment(desc
->alignment
, buf
->base
.base
.alignment
))
217 /* XXX: check usage too? */
223 static struct pb_buffer
*
224 pb_cache_manager_create_buffer(struct pb_manager
*_mgr
,
226 const struct pb_desc
*desc
)
228 struct pb_cache_manager
*mgr
= pb_cache_manager(_mgr
);
229 struct pb_cache_buffer
*buf
;
230 struct pb_cache_buffer
*curr_buf
;
231 struct list_head
*curr
, *next
;
232 struct util_time now
;
234 _glthread_LOCK_MUTEX(mgr
->mutex
);
237 curr
= mgr
->delayed
.next
;
240 /* search in the expired buffers, freeing them in the process */
242 while(curr
!= &mgr
->delayed
) {
243 curr_buf
= LIST_ENTRY(struct pb_cache_buffer
, curr
, head
);
244 if(!buf
&& pb_cache_is_buffer_compat(curr_buf
, size
, desc
))
246 else if(util_time_timeout(&curr_buf
->start
, &curr_buf
->end
, &now
))
247 _pb_cache_buffer_destroy(curr_buf
);
252 /* keep searching in the hot buffers */
253 while(!buf
&& curr
!= &mgr
->delayed
) {
254 curr_buf
= LIST_ENTRY(struct pb_cache_buffer
, curr
, head
);
255 if(pb_cache_is_buffer_compat(curr_buf
, size
, desc
))
262 LIST_DEL(&buf
->head
);
263 _glthread_UNLOCK_MUTEX(mgr
->mutex
);
264 ++buf
->base
.base
.refcount
;
268 _glthread_UNLOCK_MUTEX(mgr
->mutex
);
270 buf
= CALLOC_STRUCT(pb_cache_buffer
);
274 buf
->buffer
= mgr
->provider
->create_buffer(mgr
->provider
, size
, desc
);
280 assert(buf
->buffer
->base
.refcount
>= 1);
281 assert(pb_check_alignment(desc
->alignment
, buf
->buffer
->base
.alignment
));
282 assert((buf
->buffer
->base
.usage
& desc
->usage
) == desc
->usage
);
283 assert(buf
->buffer
->base
.size
>= size
);
285 buf
->base
.base
.refcount
= 1;
286 buf
->base
.base
.alignment
= buf
->buffer
->base
.alignment
;
287 buf
->base
.base
.usage
= buf
->buffer
->base
.usage
;
288 buf
->base
.base
.size
= buf
->buffer
->base
.size
;
290 buf
->base
.vtbl
= &pb_cache_buffer_vtbl
;
298 pb_cache_manager_destroy(struct pb_manager
*_mgr
)
300 struct pb_cache_manager
*mgr
= pb_cache_manager(_mgr
);
301 struct list_head
*curr
, *next
;
302 struct pb_cache_buffer
*buf
;
304 _glthread_LOCK_MUTEX(mgr
->mutex
);
305 curr
= mgr
->delayed
.next
;
307 while(curr
!= &mgr
->delayed
) {
308 buf
= LIST_ENTRY(struct pb_cache_buffer
, curr
, head
);
309 _pb_cache_buffer_destroy(buf
);
313 _glthread_UNLOCK_MUTEX(mgr
->mutex
);
320 pb_cache_manager_create(struct pb_manager
*provider
,
323 struct pb_cache_manager
*mgr
;
325 mgr
= (struct pb_cache_manager
*)CALLOC(1, sizeof(*mgr
));
329 mgr
->base
.destroy
= pb_cache_manager_destroy
;
330 mgr
->base
.create_buffer
= pb_cache_manager_create_buffer
;
331 mgr
->provider
= provider
;
333 LIST_INITHEAD(&mgr
->delayed
);
335 _glthread_INIT_MUTEX(mgr
->mutex
);