1 /**************************************************************************
3 * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, FREE of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
27 **************************************************************************/
31 * S-lab pool implementation.
33 * @sa http://en.wikipedia.org/wiki/Slab_allocation
35 * @author Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
36 * @author Jose Fonseca <jrfonseca@tungstengraphics.com>
39 #include "pipe/p_compiler.h"
40 #include "pipe/p_error.h"
41 #include "pipe/p_debug.h"
42 #include "pipe/p_thread.h"
43 #include "pipe/p_defines.h"
44 #include "util/u_memory.h"
45 #include "util/u_double_list.h"
46 #include "util/u_time.h"
48 #include "pb_buffer.h"
49 #include "pb_bufmgr.h"
58 * Sub-allocation of a contiguous buffer.
62 struct pb_buffer base
;
66 struct list_head head
;
70 /** Offset relative to the start of the slab buffer. */
73 /** Use when validating, to signal that all mappings are finished */
74 /* TODO: Actually validation does not reach this stage yet */
80 * Slab -- a contiguous piece of memory.
84 struct list_head head
;
85 struct list_head freeBuffers
;
89 struct pb_slab_buffer
*buffers
;
90 struct pb_slab_manager
*mgr
;
92 /** Buffer from the provider */
100 * It adds/removes slabs as needed in order to meet the allocation/destruction
101 * of individual buffers.
103 struct pb_slab_manager
105 struct pb_manager base
;
107 /** From where we get our buffers */
108 struct pb_manager
*provider
;
110 /** Size of the buffers we hand on downstream */
113 /** Size of the buffers we request upstream */
117 * Alignment, usage to be used to allocate the slab buffers.
119 * We can only provide buffers which are consistent (in alignment, usage)
120 * with this description.
127 * Full slabs are not stored in any list. Empty slabs are destroyed
130 struct list_head slabs
;
137 * Wrapper around several slabs, therefore capable of handling buffers of
140 * This buffer manager just dispatches buffer allocations to the appropriate slab
141 * manager, according to the requested buffer size, or by passes the slab
142 * managers altogether for even greater sizes.
144 * The data of this structure remains constant after
145 * initialization and thus needs no mutex protection.
147 struct pb_slab_range_manager
149 struct pb_manager base
;
151 struct pb_manager
*provider
;
156 /** @sa pb_slab_manager::desc */
162 /** Array of pb_slab_manager, one for each bucket size */
163 struct pb_manager
**buckets
;
167 static INLINE
struct pb_slab_buffer
*
168 pb_slab_buffer(struct pb_buffer
*buf
)
171 return (struct pb_slab_buffer
*)buf
;
175 static INLINE
struct pb_slab_manager
*
176 pb_slab_manager(struct pb_manager
*mgr
)
179 return (struct pb_slab_manager
*)mgr
;
183 static INLINE
struct pb_slab_range_manager
*
184 pb_slab_range_manager(struct pb_manager
*mgr
)
187 return (struct pb_slab_range_manager
*)mgr
;
192 * Delete a buffer from the slab delayed list and put
193 * it on the slab FREE list.
196 pb_slab_buffer_destroy(struct pb_buffer
*_buf
)
198 struct pb_slab_buffer
*buf
= pb_slab_buffer(_buf
);
199 struct pb_slab
*slab
= buf
->slab
;
200 struct pb_slab_manager
*mgr
= slab
->mgr
;
201 struct list_head
*list
= &buf
->head
;
203 pipe_mutex_lock(mgr
->mutex
);
205 assert(buf
->base
.base
.refcount
== 0);
210 LIST_ADDTAIL(list
, &slab
->freeBuffers
);
213 if (slab
->head
.next
== &slab
->head
)
214 LIST_ADDTAIL(&slab
->head
, &mgr
->slabs
);
216 /* If the slab becomes totally empty, free it */
217 if (slab
->numFree
== slab
->numBuffers
) {
220 pb_reference(&slab
->bo
, NULL
);
225 pipe_mutex_unlock(mgr
->mutex
);
230 pb_slab_buffer_map(struct pb_buffer
*_buf
,
233 struct pb_slab_buffer
*buf
= pb_slab_buffer(_buf
);
236 return (void *) ((uint8_t *) buf
->slab
->virtual + buf
->start
);
241 pb_slab_buffer_unmap(struct pb_buffer
*_buf
)
243 struct pb_slab_buffer
*buf
= pb_slab_buffer(_buf
);
246 if (buf
->mapCount
== 0)
247 pipe_condvar_broadcast(buf
->event
);
252 pb_slab_buffer_get_base_buffer(struct pb_buffer
*_buf
,
253 struct pb_buffer
**base_buf
,
256 struct pb_slab_buffer
*buf
= pb_slab_buffer(_buf
);
257 pb_get_base_buffer(buf
->slab
->bo
, base_buf
, offset
);
258 *offset
+= buf
->start
;
262 static const struct pb_vtbl
263 pb_slab_buffer_vtbl
= {
264 pb_slab_buffer_destroy
,
266 pb_slab_buffer_unmap
,
267 pb_slab_buffer_get_base_buffer
274 * Called when we ran out of free slabs.
276 static enum pipe_error
277 pb_slab_create(struct pb_slab_manager
*mgr
)
279 struct pb_slab
*slab
;
280 struct pb_slab_buffer
*buf
;
285 slab
= CALLOC_STRUCT(pb_slab
);
287 return PIPE_ERROR_OUT_OF_MEMORY
;
289 slab
->bo
= mgr
->provider
->create_buffer(mgr
->provider
, mgr
->slabSize
, &mgr
->desc
);
291 ret
= PIPE_ERROR_OUT_OF_MEMORY
;
295 /* Note down the slab virtual address. All mappings are accessed directly
296 * through this address so it is required that the buffer is pinned. */
297 slab
->virtual = pb_map(slab
->bo
,
298 PIPE_BUFFER_USAGE_CPU_READ
|
299 PIPE_BUFFER_USAGE_CPU_WRITE
);
301 ret
= PIPE_ERROR_OUT_OF_MEMORY
;
306 numBuffers
= slab
->bo
->base
.size
/ mgr
->bufSize
;
308 slab
->buffers
= CALLOC(numBuffers
, sizeof(*slab
->buffers
));
309 if (!slab
->buffers
) {
310 ret
= PIPE_ERROR_OUT_OF_MEMORY
;
314 LIST_INITHEAD(&slab
->head
);
315 LIST_INITHEAD(&slab
->freeBuffers
);
316 slab
->numBuffers
= numBuffers
;
321 for (i
=0; i
< numBuffers
; ++i
) {
322 buf
->base
.base
.refcount
= 0;
323 buf
->base
.base
.size
= mgr
->bufSize
;
324 buf
->base
.base
.alignment
= 0;
325 buf
->base
.base
.usage
= 0;
326 buf
->base
.vtbl
= &pb_slab_buffer_vtbl
;
328 buf
->start
= i
* mgr
->bufSize
;
330 pipe_condvar_init(buf
->event
);
331 LIST_ADDTAIL(&buf
->head
, &slab
->freeBuffers
);
336 /* Add this slab to the list of partial slabs */
337 LIST_ADDTAIL(&slab
->head
, &mgr
->slabs
);
342 pb_reference(&slab
->bo
, NULL
);
349 static struct pb_buffer
*
350 pb_slab_manager_create_buffer(struct pb_manager
*_mgr
,
352 const struct pb_desc
*desc
)
354 struct pb_slab_manager
*mgr
= pb_slab_manager(_mgr
);
355 static struct pb_slab_buffer
*buf
;
356 struct pb_slab
*slab
;
357 struct list_head
*list
;
360 assert(size
<= mgr
->bufSize
);
361 if(size
> mgr
->bufSize
)
364 /* check if we can provide the requested alignment */
365 assert(pb_check_alignment(desc
->alignment
, mgr
->desc
.alignment
));
366 if(!pb_check_alignment(desc
->alignment
, mgr
->desc
.alignment
))
368 assert(pb_check_alignment(desc
->alignment
, mgr
->bufSize
));
369 if(!pb_check_alignment(desc
->alignment
, mgr
->bufSize
))
372 assert(pb_check_usage(desc
->usage
, mgr
->desc
.usage
));
373 if(!pb_check_usage(desc
->usage
, mgr
->desc
.usage
))
376 pipe_mutex_lock(mgr
->mutex
);
378 /* Create a new slab, if we run out of partial slabs */
379 if (mgr
->slabs
.next
== &mgr
->slabs
) {
380 (void) pb_slab_create(mgr
);
381 if (mgr
->slabs
.next
== &mgr
->slabs
) {
382 pipe_mutex_unlock(mgr
->mutex
);
387 /* Allocate the buffer from a partial (or just created) slab */
388 list
= mgr
->slabs
.next
;
389 slab
= LIST_ENTRY(struct pb_slab
, list
, head
);
391 /* If totally full remove from the partial slab list */
392 if (--slab
->numFree
== 0)
395 list
= slab
->freeBuffers
.next
;
398 pipe_mutex_unlock(mgr
->mutex
);
399 buf
= LIST_ENTRY(struct pb_slab_buffer
, list
, head
);
401 ++buf
->base
.base
.refcount
;
402 buf
->base
.base
.alignment
= desc
->alignment
;
403 buf
->base
.base
.usage
= desc
->usage
;
410 pb_slab_manager_flush(struct pb_manager
*_mgr
)
412 struct pb_slab_manager
*mgr
= pb_slab_manager(_mgr
);
414 assert(mgr
->provider
->flush
);
415 if(mgr
->provider
->flush
)
416 mgr
->provider
->flush(mgr
->provider
);
421 pb_slab_manager_destroy(struct pb_manager
*_mgr
)
423 struct pb_slab_manager
*mgr
= pb_slab_manager(_mgr
);
425 /* TODO: cleanup all allocated buffers */
431 pb_slab_manager_create(struct pb_manager
*provider
,
434 const struct pb_desc
*desc
)
436 struct pb_slab_manager
*mgr
;
438 mgr
= CALLOC_STRUCT(pb_slab_manager
);
442 mgr
->base
.destroy
= pb_slab_manager_destroy
;
443 mgr
->base
.create_buffer
= pb_slab_manager_create_buffer
;
444 mgr
->base
.flush
= pb_slab_manager_flush
;
446 mgr
->provider
= provider
;
447 mgr
->bufSize
= bufSize
;
448 mgr
->slabSize
= slabSize
;
451 LIST_INITHEAD(&mgr
->slabs
);
453 pipe_mutex_init(mgr
->mutex
);
459 static struct pb_buffer
*
460 pb_slab_range_manager_create_buffer(struct pb_manager
*_mgr
,
462 const struct pb_desc
*desc
)
464 struct pb_slab_range_manager
*mgr
= pb_slab_range_manager(_mgr
);
468 bufSize
= mgr
->minBufSize
;
469 for (i
= 0; i
< mgr
->numBuckets
; ++i
) {
471 return mgr
->buckets
[i
]->create_buffer(mgr
->buckets
[i
], size
, desc
);
475 /* Fall back to allocate a buffer object directly from the provider. */
476 return mgr
->provider
->create_buffer(mgr
->provider
, size
, desc
);
481 pb_slab_range_manager_flush(struct pb_manager
*_mgr
)
483 struct pb_slab_range_manager
*mgr
= pb_slab_range_manager(_mgr
);
485 /* Individual slabs don't hold any temporary buffers so no need to call them */
487 assert(mgr
->provider
->flush
);
488 if(mgr
->provider
->flush
)
489 mgr
->provider
->flush(mgr
->provider
);
494 pb_slab_range_manager_destroy(struct pb_manager
*_mgr
)
496 struct pb_slab_range_manager
*mgr
= pb_slab_range_manager(_mgr
);
499 for (i
= 0; i
< mgr
->numBuckets
; ++i
)
500 mgr
->buckets
[i
]->destroy(mgr
->buckets
[i
]);
502 FREE(mgr
->bucketSizes
);
508 pb_slab_range_manager_create(struct pb_manager
*provider
,
512 const struct pb_desc
*desc
)
514 struct pb_slab_range_manager
*mgr
;
521 mgr
= CALLOC_STRUCT(pb_slab_range_manager
);
525 mgr
->base
.destroy
= pb_slab_range_manager_destroy
;
526 mgr
->base
.create_buffer
= pb_slab_range_manager_create_buffer
;
527 mgr
->base
.flush
= pb_slab_range_manager_flush
;
529 mgr
->provider
= provider
;
530 mgr
->minBufSize
= minBufSize
;
531 mgr
->maxBufSize
= maxBufSize
;
534 bufSize
= minBufSize
;
535 while(bufSize
< maxBufSize
) {
540 mgr
->buckets
= CALLOC(mgr
->numBuckets
, sizeof(*mgr
->buckets
));
544 bufSize
= minBufSize
;
545 for (i
= 0; i
< mgr
->numBuckets
; ++i
) {
546 mgr
->buckets
[i
] = pb_slab_manager_create(provider
, bufSize
, slabSize
, desc
);
555 for (i
= 0; i
< mgr
->numBuckets
; ++i
)
557 mgr
->buckets
[i
]->destroy(mgr
->buckets
[i
]);