Merge commit 'origin/master' into gallium-0.2
[mesa.git] / src / gallium / auxiliary / pipebuffer / pb_bufmgr_cache.c
1 /**************************************************************************
2 *
3 * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * \file
30 * Buffer cache.
31 *
32 * \author Jose Fonseca <jrfonseca-at-tungstengraphics-dot-com>
33 * \author Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 */
35
36
37 #include "pipe/p_compiler.h"
38 #include "pipe/p_debug.h"
39 #include "pipe/p_winsys.h"
40 #include "pipe/p_thread.h"
41 #include "util/u_memory.h"
42 #include "util/u_double_list.h"
43 #include "util/u_time.h"
44
45 #include "pb_buffer.h"
46 #include "pb_bufmgr.h"
47
48
49 /**
50 * Convenience macro (type safe).
51 */
52 #define SUPER(__derived) (&(__derived)->base)
53
54
55 struct pb_cache_manager;
56
57
58 /**
59 * Wrapper around a pipe buffer which adds delayed destruction.
60 */
61 struct pb_cache_buffer
62 {
63 struct pb_buffer base;
64
65 struct pb_buffer *buffer;
66 struct pb_cache_manager *mgr;
67
68 /** Caching time interval */
69 struct util_time start, end;
70
71 struct list_head head;
72 };
73
74
75 struct pb_cache_manager
76 {
77 struct pb_manager base;
78
79 struct pb_manager *provider;
80 unsigned usecs;
81
82 pipe_mutex mutex;
83
84 struct list_head delayed;
85 size_t numDelayed;
86 };
87
88
89 static INLINE struct pb_cache_buffer *
90 pb_cache_buffer(struct pb_buffer *buf)
91 {
92 assert(buf);
93 return (struct pb_cache_buffer *)buf;
94 }
95
96
97 static INLINE struct pb_cache_manager *
98 pb_cache_manager(struct pb_manager *mgr)
99 {
100 assert(mgr);
101 return (struct pb_cache_manager *)mgr;
102 }
103
104
105 /**
106 * Actually destroy the buffer.
107 */
108 static INLINE void
109 _pb_cache_buffer_destroy(struct pb_cache_buffer *buf)
110 {
111 struct pb_cache_manager *mgr = buf->mgr;
112
113 LIST_DEL(&buf->head);
114 assert(mgr->numDelayed);
115 --mgr->numDelayed;
116 assert(!buf->base.base.refcount);
117 pb_reference(&buf->buffer, NULL);
118 FREE(buf);
119 }
120
121
122 /**
123 * Free as many cache buffers from the list head as possible.
124 */
125 static void
126 _pb_cache_buffer_list_check_free(struct pb_cache_manager *mgr)
127 {
128 struct list_head *curr, *next;
129 struct pb_cache_buffer *buf;
130 struct util_time now;
131
132 util_time_get(&now);
133
134 curr = mgr->delayed.next;
135 next = curr->next;
136 while(curr != &mgr->delayed) {
137 buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
138
139 if(!util_time_timeout(&buf->start, &buf->end, &now))
140 break;
141
142 _pb_cache_buffer_destroy(buf);
143
144 curr = next;
145 next = curr->next;
146 }
147 }
148
149
150 static void
151 pb_cache_buffer_destroy(struct pb_buffer *_buf)
152 {
153 struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
154 struct pb_cache_manager *mgr = buf->mgr;
155
156 pipe_mutex_lock(mgr->mutex);
157 assert(buf->base.base.refcount == 0);
158
159 _pb_cache_buffer_list_check_free(mgr);
160
161 util_time_get(&buf->start);
162 util_time_add(&buf->start, mgr->usecs, &buf->end);
163 LIST_ADDTAIL(&buf->head, &mgr->delayed);
164 ++mgr->numDelayed;
165 pipe_mutex_unlock(mgr->mutex);
166 }
167
168
169 static void *
170 pb_cache_buffer_map(struct pb_buffer *_buf,
171 unsigned flags)
172 {
173 struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
174 return pb_map(buf->buffer, flags);
175 }
176
177
178 static void
179 pb_cache_buffer_unmap(struct pb_buffer *_buf)
180 {
181 struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
182 pb_unmap(buf->buffer);
183 }
184
185
186 static enum pipe_error
187 pb_cache_buffer_validate(struct pb_buffer *_buf,
188 struct pb_validate *vl,
189 unsigned flags)
190 {
191 struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
192 return pb_validate(buf->buffer, vl, flags);
193 }
194
195
196 static void
197 pb_cache_buffer_fence(struct pb_buffer *_buf,
198 struct pipe_fence_handle *fence)
199 {
200 struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
201 pb_fence(buf->buffer, fence);
202 }
203
204
205 static void
206 pb_cache_buffer_get_base_buffer(struct pb_buffer *_buf,
207 struct pb_buffer **base_buf,
208 unsigned *offset)
209 {
210 struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
211 pb_get_base_buffer(buf->buffer, base_buf, offset);
212 }
213
214
215 const struct pb_vtbl
216 pb_cache_buffer_vtbl = {
217 pb_cache_buffer_destroy,
218 pb_cache_buffer_map,
219 pb_cache_buffer_unmap,
220 pb_cache_buffer_validate,
221 pb_cache_buffer_fence,
222 pb_cache_buffer_get_base_buffer
223 };
224
225
226 static INLINE boolean
227 pb_cache_is_buffer_compat(struct pb_cache_buffer *buf,
228 size_t size,
229 const struct pb_desc *desc)
230 {
231 if(buf->base.base.size < size)
232 return FALSE;
233
234 /* be lenient with size */
235 if(buf->base.base.size >= 2*size)
236 return FALSE;
237
238 if(!pb_check_alignment(desc->alignment, buf->base.base.alignment))
239 return FALSE;
240
241 if(!pb_check_usage(desc->usage, buf->base.base.usage))
242 return FALSE;
243
244 return TRUE;
245 }
246
247
248 static struct pb_buffer *
249 pb_cache_manager_create_buffer(struct pb_manager *_mgr,
250 size_t size,
251 const struct pb_desc *desc)
252 {
253 struct pb_cache_manager *mgr = pb_cache_manager(_mgr);
254 struct pb_cache_buffer *buf;
255 struct pb_cache_buffer *curr_buf;
256 struct list_head *curr, *next;
257 struct util_time now;
258
259 pipe_mutex_lock(mgr->mutex);
260
261 buf = NULL;
262 curr = mgr->delayed.next;
263 next = curr->next;
264
265 /* search in the expired buffers, freeing them in the process */
266 util_time_get(&now);
267 while(curr != &mgr->delayed) {
268 curr_buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
269 if(!buf && pb_cache_is_buffer_compat(curr_buf, size, desc))
270 buf = curr_buf;
271 else if(util_time_timeout(&curr_buf->start, &curr_buf->end, &now))
272 _pb_cache_buffer_destroy(curr_buf);
273 else
274 /* This buffer (and all hereafter) are still hot in cache */
275 break;
276 curr = next;
277 next = curr->next;
278 }
279
280 /* keep searching in the hot buffers */
281 if(!buf) {
282 while(curr != &mgr->delayed) {
283 curr_buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
284 if(pb_cache_is_buffer_compat(curr_buf, size, desc)) {
285 buf = curr_buf;
286 break;
287 }
288 /* no need to check the timeout here */
289 curr = next;
290 next = curr->next;
291 }
292 }
293
294 if(buf) {
295 LIST_DEL(&buf->head);
296 pipe_mutex_unlock(mgr->mutex);
297 ++buf->base.base.refcount;
298 return &buf->base;
299 }
300
301 pipe_mutex_unlock(mgr->mutex);
302
303 buf = CALLOC_STRUCT(pb_cache_buffer);
304 if(!buf)
305 return NULL;
306
307 buf->buffer = mgr->provider->create_buffer(mgr->provider, size, desc);
308 if(!buf->buffer) {
309 FREE(buf);
310 return NULL;
311 }
312
313 assert(buf->buffer->base.refcount >= 1);
314 assert(pb_check_alignment(desc->alignment, buf->buffer->base.alignment));
315 assert(pb_check_usage(desc->usage, buf->buffer->base.usage));
316 assert(buf->buffer->base.size >= size);
317
318 buf->base.base.refcount = 1;
319 buf->base.base.alignment = buf->buffer->base.alignment;
320 buf->base.base.usage = buf->buffer->base.usage;
321 buf->base.base.size = buf->buffer->base.size;
322
323 buf->base.vtbl = &pb_cache_buffer_vtbl;
324 buf->mgr = mgr;
325
326 return &buf->base;
327 }
328
329
330 static void
331 pb_cache_manager_flush(struct pb_manager *_mgr)
332 {
333 struct pb_cache_manager *mgr = pb_cache_manager(_mgr);
334 struct list_head *curr, *next;
335 struct pb_cache_buffer *buf;
336
337 pipe_mutex_lock(mgr->mutex);
338 curr = mgr->delayed.next;
339 next = curr->next;
340 while(curr != &mgr->delayed) {
341 buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
342 _pb_cache_buffer_destroy(buf);
343 curr = next;
344 next = curr->next;
345 }
346 pipe_mutex_unlock(mgr->mutex);
347
348 assert(mgr->provider->flush);
349 if(mgr->provider->flush)
350 mgr->provider->flush(mgr->provider);
351 }
352
353
354 static void
355 pb_cache_manager_destroy(struct pb_manager *mgr)
356 {
357 pb_cache_manager_flush(mgr);
358 FREE(mgr);
359 }
360
361
362 struct pb_manager *
363 pb_cache_manager_create(struct pb_manager *provider,
364 unsigned usecs)
365 {
366 struct pb_cache_manager *mgr;
367
368 if(!provider)
369 return NULL;
370
371 mgr = CALLOC_STRUCT(pb_cache_manager);
372 if (!mgr)
373 return NULL;
374
375 mgr->base.destroy = pb_cache_manager_destroy;
376 mgr->base.create_buffer = pb_cache_manager_create_buffer;
377 mgr->base.flush = pb_cache_manager_flush;
378 mgr->provider = provider;
379 mgr->usecs = usecs;
380 LIST_INITHEAD(&mgr->delayed);
381 mgr->numDelayed = 0;
382 pipe_mutex_init(mgr->mutex);
383
384 return &mgr->base;
385 }