1 /**************************************************************************
3 * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, FREE of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
27 **************************************************************************/
31 * S-lab pool implementation.
33 * @author Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
34 * @author Jose Fonseca <jrfonseca@tungstengraphics.com>
37 #include "pipe/p_compiler.h"
38 #include "pipe/p_error.h"
39 #include "pipe/p_debug.h"
40 #include "pipe/p_thread.h"
41 #include "pipe/p_defines.h"
42 #include "pipe/p_util.h"
43 #include "util/u_double_list.h"
44 #include "util/u_time.h"
46 #include "pb_buffer.h"
47 #include "pb_bufmgr.h"
54 struct pb_buffer base
;
57 struct list_head head
;
65 struct list_head head
;
66 struct list_head freeBuffers
;
69 struct pb_slab_buffer
*buffers
;
70 struct pb_slab_manager
*mgr
;
76 struct pb_slab_manager
78 struct pb_manager base
;
80 struct pb_manager
*provider
;
85 struct list_head slabs
;
86 struct list_head freeSlabs
;
88 _glthread_Mutex mutex
;
92 * The data of this structure remains constant after
93 * initialization and thus needs no mutex protection.
95 struct pb_slab_range_manager
97 struct pb_manager base
;
99 struct pb_manager
*provider
;
106 struct pb_manager
**buckets
;
110 static INLINE
struct pb_slab_buffer
*
111 pb_slab_buffer(struct pb_buffer
*buf
)
114 return (struct pb_slab_buffer
*)buf
;
118 static INLINE
struct pb_slab_manager
*
119 pb_slab_manager(struct pb_manager
*mgr
)
122 return (struct pb_slab_manager
*)mgr
;
126 static INLINE
struct pb_slab_range_manager
*
127 pb_slab_range_manager(struct pb_manager
*mgr
)
130 return (struct pb_slab_range_manager
*)mgr
;
135 * Delete a buffer from the slab delayed list and put
136 * it on the slab FREE list.
139 pb_slab_buffer_destroy(struct pb_buffer
*_buf
)
141 struct pb_slab_buffer
*buf
= pb_slab_buffer(_buf
);
142 struct pb_slab
*slab
= buf
->slab
;
143 struct pb_slab_manager
*mgr
= slab
->mgr
;
144 struct list_head
*list
= &buf
->head
;
146 _glthread_LOCK_MUTEX(mgr
->mutex
);
148 assert(buf
->base
.base
.refcount
== 0);
153 LIST_ADDTAIL(list
, &slab
->freeBuffers
);
156 if (slab
->head
.next
== &slab
->head
)
157 LIST_ADDTAIL(&slab
->head
, &mgr
->slabs
);
159 if (slab
->numFree
== slab
->numBuffers
) {
162 LIST_ADDTAIL(list
, &mgr
->freeSlabs
);
165 if (mgr
->slabs
.next
== &mgr
->slabs
|| slab
->numFree
166 != slab
->numBuffers
) {
168 struct list_head
*next
;
170 for (list
= mgr
->freeSlabs
.next
, next
= list
->next
; list
171 != &mgr
->freeSlabs
; list
= next
, next
= list
->next
) {
173 slab
= LIST_ENTRY(struct pb_slab
, list
, head
);
176 pb_reference(&slab
->bo
, NULL
);
182 _glthread_UNLOCK_MUTEX(mgr
->mutex
);
187 pb_slab_buffer_map(struct pb_buffer
*_buf
,
190 struct pb_slab_buffer
*buf
= pb_slab_buffer(_buf
);
193 return (void *) ((uint8_t *) buf
->slab
->virtual + buf
->start
);
198 pb_slab_buffer_unmap(struct pb_buffer
*_buf
)
200 struct pb_slab_buffer
*buf
= pb_slab_buffer(_buf
);
203 if (buf
->mapCount
== 0)
204 _glthread_COND_BROADCAST(buf
->event
);
209 pb_slab_buffer_get_base_buffer(struct pb_buffer
*_buf
,
210 struct pb_buffer
**base_buf
,
213 struct pb_slab_buffer
*buf
= pb_slab_buffer(_buf
);
214 pb_get_base_buffer(buf
->slab
->bo
, base_buf
, offset
);
215 *offset
+= buf
->start
;
219 static const struct pb_vtbl
220 pb_slab_buffer_vtbl
= {
221 pb_slab_buffer_destroy
,
223 pb_slab_buffer_unmap
,
224 pb_slab_buffer_get_base_buffer
228 static enum pipe_error
229 pb_slab_create(struct pb_slab_manager
*mgr
)
231 struct pb_slab
*slab
;
232 struct pb_slab_buffer
*buf
;
237 slab
= CALLOC_STRUCT(pb_slab
);
239 return PIPE_ERROR_OUT_OF_MEMORY
;
242 * FIXME: We should perhaps allow some variation in slabsize in order
243 * to efficiently reuse slabs.
246 slab
->bo
= mgr
->provider
->create_buffer(mgr
->provider
, mgr
->slabSize
, &mgr
->desc
);
248 ret
= PIPE_ERROR_OUT_OF_MEMORY
;
252 slab
->virtual = pb_map(slab
->bo
,
253 PIPE_BUFFER_USAGE_CPU_READ
|
254 PIPE_BUFFER_USAGE_CPU_WRITE
);
256 ret
= PIPE_ERROR_OUT_OF_MEMORY
;
262 numBuffers
= slab
->bo
->base
.size
/ mgr
->bufSize
;
264 slab
->buffers
= CALLOC(numBuffers
, sizeof(*slab
->buffers
));
265 if (!slab
->buffers
) {
266 ret
= PIPE_ERROR_OUT_OF_MEMORY
;
270 LIST_INITHEAD(&slab
->head
);
271 LIST_INITHEAD(&slab
->freeBuffers
);
272 slab
->numBuffers
= numBuffers
;
277 for (i
=0; i
< numBuffers
; ++i
) {
278 buf
->base
.base
.refcount
= 0;
279 buf
->base
.base
.size
= mgr
->bufSize
;
280 buf
->base
.base
.alignment
= 0;
281 buf
->base
.base
.usage
= 0;
282 buf
->base
.vtbl
= &pb_slab_buffer_vtbl
;
284 buf
->start
= i
* mgr
->bufSize
;
286 _glthread_INIT_COND(buf
->event
);
287 LIST_ADDTAIL(&buf
->head
, &slab
->freeBuffers
);
292 LIST_ADDTAIL(&slab
->head
, &mgr
->slabs
);
297 pb_reference(&slab
->bo
, NULL
);
304 static struct pb_buffer
*
305 pb_slab_manager_create_buffer(struct pb_manager
*_mgr
,
307 const struct pb_desc
*desc
)
309 struct pb_slab_manager
*mgr
= pb_slab_manager(_mgr
);
310 static struct pb_slab_buffer
*buf
;
311 struct pb_slab
*slab
;
312 struct list_head
*list
;
315 assert(size
<= mgr
->bufSize
);
316 if(size
> mgr
->bufSize
)
319 /* check if we can provide the requested alignment */
320 assert(pb_check_alignment(desc
->alignment
, mgr
->desc
.alignment
));
321 if(!pb_check_alignment(desc
->alignment
, mgr
->desc
.alignment
))
323 assert(pb_check_alignment(desc
->alignment
, mgr
->bufSize
));
324 if(!pb_check_alignment(desc
->alignment
, mgr
->bufSize
))
327 /* XXX: check for compatible buffer usage too? */
329 _glthread_LOCK_MUTEX(mgr
->mutex
);
330 if (mgr
->slabs
.next
== &mgr
->slabs
) {
331 (void) pb_slab_create(mgr
);
332 if (mgr
->slabs
.next
== &mgr
->slabs
) {
333 _glthread_UNLOCK_MUTEX(mgr
->mutex
);
337 list
= mgr
->slabs
.next
;
338 slab
= LIST_ENTRY(struct pb_slab
, list
, head
);
339 if (--slab
->numFree
== 0)
342 list
= slab
->freeBuffers
.next
;
345 _glthread_UNLOCK_MUTEX(mgr
->mutex
);
346 buf
= LIST_ENTRY(struct pb_slab_buffer
, list
, head
);
348 ++buf
->base
.base
.refcount
;
349 buf
->base
.base
.alignment
= desc
->alignment
;
350 buf
->base
.base
.usage
= desc
->usage
;
357 pb_slab_manager_destroy(struct pb_manager
*_mgr
)
359 struct pb_slab_manager
*mgr
= pb_slab_manager(_mgr
);
361 /* TODO: cleanup all allocated buffers */
367 pb_slab_manager_create(struct pb_manager
*provider
,
370 const struct pb_desc
*desc
)
372 struct pb_slab_manager
*mgr
;
374 mgr
= CALLOC_STRUCT(pb_slab_manager
);
378 mgr
->base
.destroy
= pb_slab_manager_destroy
;
379 mgr
->base
.create_buffer
= pb_slab_manager_create_buffer
;
381 mgr
->provider
= provider
;
382 mgr
->bufSize
= bufSize
;
383 mgr
->slabSize
= slabSize
;
386 LIST_INITHEAD(&mgr
->slabs
);
387 LIST_INITHEAD(&mgr
->freeSlabs
);
389 _glthread_INIT_MUTEX(mgr
->mutex
);
395 static struct pb_buffer
*
396 pb_slab_range_manager_create_buffer(struct pb_manager
*_mgr
,
398 const struct pb_desc
*desc
)
400 struct pb_slab_range_manager
*mgr
= pb_slab_range_manager(_mgr
);
404 bufSize
= mgr
->minBufSize
;
405 for (i
= 0; i
< mgr
->numBuckets
; ++i
) {
407 return mgr
->buckets
[i
]->create_buffer(mgr
->buckets
[i
], size
, desc
);
411 /* Fall back to allocate a buffer object directly from the provider. */
412 return mgr
->provider
->create_buffer(mgr
->provider
, size
, desc
);
417 pb_slab_range_manager_destroy(struct pb_manager
*_mgr
)
419 struct pb_slab_range_manager
*mgr
= pb_slab_range_manager(_mgr
);
422 for (i
= 0; i
< mgr
->numBuckets
; ++i
)
423 mgr
->buckets
[i
]->destroy(mgr
->buckets
[i
]);
425 FREE(mgr
->bucketSizes
);
431 pb_slab_range_manager_create(struct pb_manager
*provider
,
435 const struct pb_desc
*desc
)
437 struct pb_slab_range_manager
*mgr
;
441 mgr
= CALLOC_STRUCT(pb_slab_range_manager
);
445 mgr
->base
.destroy
= pb_slab_range_manager_destroy
;
446 mgr
->base
.create_buffer
= pb_slab_range_manager_create_buffer
;
448 mgr
->provider
= provider
;
449 mgr
->minBufSize
= minBufSize
;
450 mgr
->maxBufSize
= maxBufSize
;
453 bufSize
= minBufSize
;
454 while(bufSize
< maxBufSize
) {
459 mgr
->buckets
= CALLOC(mgr
->numBuckets
, sizeof(*mgr
->buckets
));
463 bufSize
= minBufSize
;
464 for (i
= 0; i
< mgr
->numBuckets
; ++i
) {
465 mgr
->buckets
[i
] = pb_slab_manager_create(provider
, bufSize
, slabSize
, desc
);
474 for (i
= 0; i
< mgr
->numBuckets
; ++i
)
476 mgr
->buckets
[i
]->destroy(mgr
->buckets
[i
]);