1 /**************************************************************************
3 * Copyright 2006-2008 VMware, Inc., USA
6 * Permission is hereby granted, FREE of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
27 **************************************************************************/
31 * S-lab pool implementation.
33 * @sa http://en.wikipedia.org/wiki/Slab_allocation
35 * @author Thomas Hellstrom <thellstrom-at-vmware-dot-com>
36 * @author Jose Fonseca <jfonseca@vmware.com>
39 #include "pipe/p_compiler.h"
40 #include "util/u_debug.h"
41 #include "os/os_thread.h"
42 #include "pipe/p_defines.h"
43 #include "util/u_memory.h"
44 #include "util/list.h"
46 #include "pb_buffer.h"
47 #include "pb_bufmgr.h"
56 * Sub-allocation of a contiguous buffer.
60 struct pb_buffer base
;
64 struct list_head head
;
68 /** Offset relative to the start of the slab buffer. */
71 /** Use when validating, to signal that all mappings are finished */
72 /* TODO: Actually validation does not reach this stage yet */
78 * Slab -- a contiguous piece of memory.
82 struct list_head head
;
83 struct list_head freeBuffers
;
87 struct pb_slab_buffer
*buffers
;
88 struct pb_slab_manager
*mgr
;
90 /** Buffer from the provider */
98 * It adds/removes slabs as needed in order to meet the allocation/destruction
99 * of individual buffers.
101 struct pb_slab_manager
103 struct pb_manager base
;
105 /** From where we get our buffers */
106 struct pb_manager
*provider
;
108 /** Size of the buffers we hand on downstream */
111 /** Size of the buffers we request upstream */
115 * Alignment, usage to be used to allocate the slab buffers.
117 * We can only provide buffers which are consistent (in alignment, usage)
118 * with this description.
125 * Full slabs are not stored in any list. Empty slabs are destroyed
128 struct list_head slabs
;
135 * Wrapper around several slabs, therefore capable of handling buffers of
138 * This buffer manager just dispatches buffer allocations to the appropriate slab
139 * manager, according to the requested buffer size, or by passes the slab
140 * managers altogether for even greater sizes.
142 * The data of this structure remains constant after
143 * initialization and thus needs no mutex protection.
145 struct pb_slab_range_manager
147 struct pb_manager base
;
149 struct pb_manager
*provider
;
154 /** @sa pb_slab_manager::desc */
158 pb_size
*bucketSizes
;
160 /** Array of pb_slab_manager, one for each bucket size */
161 struct pb_manager
**buckets
;
165 static inline struct pb_slab_buffer
*
166 pb_slab_buffer(struct pb_buffer
*buf
)
169 return (struct pb_slab_buffer
*)buf
;
173 static inline struct pb_slab_manager
*
174 pb_slab_manager(struct pb_manager
*mgr
)
177 return (struct pb_slab_manager
*)mgr
;
181 static inline struct pb_slab_range_manager
*
182 pb_slab_range_manager(struct pb_manager
*mgr
)
185 return (struct pb_slab_range_manager
*)mgr
;
190 * Delete a buffer from the slab delayed list and put
191 * it on the slab FREE list.
194 pb_slab_buffer_destroy(struct pb_buffer
*_buf
)
196 struct pb_slab_buffer
*buf
= pb_slab_buffer(_buf
);
197 struct pb_slab
*slab
= buf
->slab
;
198 struct pb_slab_manager
*mgr
= slab
->mgr
;
199 struct list_head
*list
= &buf
->head
;
201 mtx_lock(&mgr
->mutex
);
203 assert(!pipe_is_referenced(&buf
->base
.reference
));
208 list_addtail(list
, &slab
->freeBuffers
);
211 if (slab
->head
.next
== &slab
->head
)
212 list_addtail(&slab
->head
, &mgr
->slabs
);
214 /* If the slab becomes totally empty, free it */
215 if (slab
->numFree
== slab
->numBuffers
) {
218 pb_reference(&slab
->bo
, NULL
);
223 mtx_unlock(&mgr
->mutex
);
228 pb_slab_buffer_map(struct pb_buffer
*_buf
,
229 enum pb_usage_flags flags
,
232 struct pb_slab_buffer
*buf
= pb_slab_buffer(_buf
);
234 /* XXX: it will be necessary to remap here to propagate flush_ctx */
237 return (void *) ((uint8_t *) buf
->slab
->virtual + buf
->start
);
242 pb_slab_buffer_unmap(struct pb_buffer
*_buf
)
244 struct pb_slab_buffer
*buf
= pb_slab_buffer(_buf
);
247 if (buf
->mapCount
== 0)
248 cnd_broadcast(&buf
->event
);
252 static enum pipe_error
253 pb_slab_buffer_validate(struct pb_buffer
*_buf
,
254 struct pb_validate
*vl
,
255 enum pb_usage_flags flags
)
257 struct pb_slab_buffer
*buf
= pb_slab_buffer(_buf
);
258 return pb_validate(buf
->slab
->bo
, vl
, flags
);
263 pb_slab_buffer_fence(struct pb_buffer
*_buf
,
264 struct pipe_fence_handle
*fence
)
266 struct pb_slab_buffer
*buf
= pb_slab_buffer(_buf
);
267 pb_fence(buf
->slab
->bo
, fence
);
272 pb_slab_buffer_get_base_buffer(struct pb_buffer
*_buf
,
273 struct pb_buffer
**base_buf
,
276 struct pb_slab_buffer
*buf
= pb_slab_buffer(_buf
);
277 pb_get_base_buffer(buf
->slab
->bo
, base_buf
, offset
);
278 *offset
+= buf
->start
;
282 static const struct pb_vtbl
283 pb_slab_buffer_vtbl
= {
284 pb_slab_buffer_destroy
,
286 pb_slab_buffer_unmap
,
287 pb_slab_buffer_validate
,
288 pb_slab_buffer_fence
,
289 pb_slab_buffer_get_base_buffer
296 * Called when we ran out of free slabs.
298 static enum pipe_error
299 pb_slab_create(struct pb_slab_manager
*mgr
)
301 struct pb_slab
*slab
;
302 struct pb_slab_buffer
*buf
;
307 slab
= CALLOC_STRUCT(pb_slab
);
309 return PIPE_ERROR_OUT_OF_MEMORY
;
311 slab
->bo
= mgr
->provider
->create_buffer(mgr
->provider
, mgr
->slabSize
, &mgr
->desc
);
313 ret
= PIPE_ERROR_OUT_OF_MEMORY
;
317 /* Note down the slab virtual address. All mappings are accessed directly
318 * through this address so it is required that the buffer is pinned. */
319 slab
->virtual = pb_map(slab
->bo
,
321 PB_USAGE_CPU_WRITE
, NULL
);
323 ret
= PIPE_ERROR_OUT_OF_MEMORY
;
328 numBuffers
= slab
->bo
->size
/ mgr
->bufSize
;
330 slab
->buffers
= CALLOC(numBuffers
, sizeof(*slab
->buffers
));
331 if (!slab
->buffers
) {
332 ret
= PIPE_ERROR_OUT_OF_MEMORY
;
336 list_inithead(&slab
->head
);
337 list_inithead(&slab
->freeBuffers
);
338 slab
->numBuffers
= numBuffers
;
343 for (i
=0; i
< numBuffers
; ++i
) {
344 pipe_reference_init(&buf
->base
.reference
, 0);
345 buf
->base
.size
= mgr
->bufSize
;
346 buf
->base
.alignment
= 0;
348 buf
->base
.vtbl
= &pb_slab_buffer_vtbl
;
350 buf
->start
= i
* mgr
->bufSize
;
352 cnd_init(&buf
->event
);
353 list_addtail(&buf
->head
, &slab
->freeBuffers
);
358 /* Add this slab to the list of partial slabs */
359 list_addtail(&slab
->head
, &mgr
->slabs
);
364 pb_reference(&slab
->bo
, NULL
);
371 static struct pb_buffer
*
372 pb_slab_manager_create_buffer(struct pb_manager
*_mgr
,
374 const struct pb_desc
*desc
)
376 struct pb_slab_manager
*mgr
= pb_slab_manager(_mgr
);
377 static struct pb_slab_buffer
*buf
;
378 struct pb_slab
*slab
;
379 struct list_head
*list
;
382 assert(size
<= mgr
->bufSize
);
383 if(size
> mgr
->bufSize
)
386 /* check if we can provide the requested alignment */
387 assert(pb_check_alignment(desc
->alignment
, mgr
->desc
.alignment
));
388 if(!pb_check_alignment(desc
->alignment
, mgr
->desc
.alignment
))
390 assert(pb_check_alignment(desc
->alignment
, mgr
->bufSize
));
391 if(!pb_check_alignment(desc
->alignment
, mgr
->bufSize
))
394 assert(pb_check_usage(desc
->usage
, mgr
->desc
.usage
));
395 if(!pb_check_usage(desc
->usage
, mgr
->desc
.usage
))
398 mtx_lock(&mgr
->mutex
);
400 /* Create a new slab, if we run out of partial slabs */
401 if (mgr
->slabs
.next
== &mgr
->slabs
) {
402 (void) pb_slab_create(mgr
);
403 if (mgr
->slabs
.next
== &mgr
->slabs
) {
404 mtx_unlock(&mgr
->mutex
);
409 /* Allocate the buffer from a partial (or just created) slab */
410 list
= mgr
->slabs
.next
;
411 slab
= LIST_ENTRY(struct pb_slab
, list
, head
);
413 /* If totally full remove from the partial slab list */
414 if (--slab
->numFree
== 0)
417 list
= slab
->freeBuffers
.next
;
420 mtx_unlock(&mgr
->mutex
);
421 buf
= LIST_ENTRY(struct pb_slab_buffer
, list
, head
);
423 pipe_reference_init(&buf
->base
.reference
, 1);
424 buf
->base
.alignment
= desc
->alignment
;
425 buf
->base
.usage
= desc
->usage
;
432 pb_slab_manager_flush(struct pb_manager
*_mgr
)
434 struct pb_slab_manager
*mgr
= pb_slab_manager(_mgr
);
436 assert(mgr
->provider
->flush
);
437 if(mgr
->provider
->flush
)
438 mgr
->provider
->flush(mgr
->provider
);
443 pb_slab_manager_destroy(struct pb_manager
*_mgr
)
445 struct pb_slab_manager
*mgr
= pb_slab_manager(_mgr
);
447 /* TODO: cleanup all allocated buffers */
453 pb_slab_manager_create(struct pb_manager
*provider
,
456 const struct pb_desc
*desc
)
458 struct pb_slab_manager
*mgr
;
460 mgr
= CALLOC_STRUCT(pb_slab_manager
);
464 mgr
->base
.destroy
= pb_slab_manager_destroy
;
465 mgr
->base
.create_buffer
= pb_slab_manager_create_buffer
;
466 mgr
->base
.flush
= pb_slab_manager_flush
;
468 mgr
->provider
= provider
;
469 mgr
->bufSize
= bufSize
;
470 mgr
->slabSize
= slabSize
;
473 list_inithead(&mgr
->slabs
);
475 (void) mtx_init(&mgr
->mutex
, mtx_plain
);
481 static struct pb_buffer
*
482 pb_slab_range_manager_create_buffer(struct pb_manager
*_mgr
,
484 const struct pb_desc
*desc
)
486 struct pb_slab_range_manager
*mgr
= pb_slab_range_manager(_mgr
);
488 pb_size reqSize
= size
;
489 enum pb_usage_flags i
;
491 if(desc
->alignment
> reqSize
)
492 reqSize
= desc
->alignment
;
494 bufSize
= mgr
->minBufSize
;
495 for (i
= 0; i
< mgr
->numBuckets
; ++i
) {
496 if(bufSize
>= reqSize
)
497 return mgr
->buckets
[i
]->create_buffer(mgr
->buckets
[i
], size
, desc
);
501 /* Fall back to allocate a buffer object directly from the provider. */
502 return mgr
->provider
->create_buffer(mgr
->provider
, size
, desc
);
507 pb_slab_range_manager_flush(struct pb_manager
*_mgr
)
509 struct pb_slab_range_manager
*mgr
= pb_slab_range_manager(_mgr
);
511 /* Individual slabs don't hold any temporary buffers so no need to call them */
513 assert(mgr
->provider
->flush
);
514 if(mgr
->provider
->flush
)
515 mgr
->provider
->flush(mgr
->provider
);
520 pb_slab_range_manager_destroy(struct pb_manager
*_mgr
)
522 struct pb_slab_range_manager
*mgr
= pb_slab_range_manager(_mgr
);
525 for (i
= 0; i
< mgr
->numBuckets
; ++i
)
526 mgr
->buckets
[i
]->destroy(mgr
->buckets
[i
]);
528 FREE(mgr
->bucketSizes
);
534 pb_slab_range_manager_create(struct pb_manager
*provider
,
538 const struct pb_desc
*desc
)
540 struct pb_slab_range_manager
*mgr
;
547 mgr
= CALLOC_STRUCT(pb_slab_range_manager
);
551 mgr
->base
.destroy
= pb_slab_range_manager_destroy
;
552 mgr
->base
.create_buffer
= pb_slab_range_manager_create_buffer
;
553 mgr
->base
.flush
= pb_slab_range_manager_flush
;
555 mgr
->provider
= provider
;
556 mgr
->minBufSize
= minBufSize
;
557 mgr
->maxBufSize
= maxBufSize
;
560 bufSize
= minBufSize
;
561 while(bufSize
< maxBufSize
) {
566 mgr
->buckets
= CALLOC(mgr
->numBuckets
, sizeof(*mgr
->buckets
));
570 bufSize
= minBufSize
;
571 for (i
= 0; i
< mgr
->numBuckets
; ++i
) {
572 mgr
->buckets
[i
] = pb_slab_manager_create(provider
, bufSize
, slabSize
, desc
);
581 for (i
= 0; i
< mgr
->numBuckets
; ++i
)
583 mgr
->buckets
[i
]->destroy(mgr
->buckets
[i
]);