2 * Copyright 2010 Marek Olšák <maraeo@gmail.com>
3 * Copyright 2016 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE. */
31 #define ALIGN(value, align) (((value) + (align) - 1) & ~((align) - 1))
33 #define SLAB_MAGIC_ALLOCATED 0xcafe4321
34 #define SLAB_MAGIC_FREE 0x7ee01234
37 #define SET_MAGIC(element, value) (element)->magic = (value)
38 #define CHECK_MAGIC(element, value) assert((element)->magic == (value))
40 #define SET_MAGIC(element, value)
41 #define CHECK_MAGIC(element, value)
44 /* One array element within a big buffer. */
45 struct slab_element_header
{
46 /* The next element in the free or migrated list. */
47 struct slab_element_header
*next
;
50 * - a pointer to the child pool to which this element belongs, or
51 * - a pointer to the orphaned page of the element, with the least
52 * significant bit set to 1.
61 /* The page is an array of allocations in one block. */
62 struct slab_page_header
{
64 /* Next page in the same child pool. */
65 struct slab_page_header
*next
;
67 /* Number of remaining, non-freed elements (for orphaned pages). */
68 unsigned num_remaining
;
70 /* Memory after the last member is dedicated to the page itself.
71 * The allocated size is always larger than this structure.
76 static struct slab_element_header
*
77 slab_get_element(struct slab_parent_pool
*parent
,
78 struct slab_page_header
*page
, unsigned index
)
80 return (struct slab_element_header
*)
81 ((uint8_t*)&page
[1] + (parent
->element_size
* index
));
84 /* The given object/element belongs to an orphaned page (i.e. the owning child
85 * pool has been destroyed). Mark the element as freed and free the whole page
86 * when no elements are left in it.
89 slab_free_orphaned(struct slab_element_header
*elt
)
91 struct slab_page_header
*page
;
93 assert(elt
->owner
& 1);
95 page
= (struct slab_page_header
*)(elt
->owner
& ~(intptr_t)1);
96 if (!p_atomic_dec_return(&page
->u
.num_remaining
))
101 * Create a parent pool for the allocation of same-sized objects.
103 * \param item_size Size of one object.
104 * \param num_items Number of objects to allocate at once.
107 slab_create_parent(struct slab_parent_pool
*parent
,
111 mtx_init(&parent
->mutex
, mtx_plain
);
112 parent
->element_size
= ALIGN(sizeof(struct slab_element_header
) + item_size
,
114 parent
->num_elements
= num_items
;
118 slab_destroy_parent(struct slab_parent_pool
*parent
)
120 mtx_destroy(&parent
->mutex
);
124 * Create a child pool linked to the given parent.
126 void slab_create_child(struct slab_child_pool
*pool
,
127 struct slab_parent_pool
*parent
)
129 pool
->parent
= parent
;
132 pool
->migrated
= NULL
;
136 * Destroy the child pool.
138 * Pages associated to the pool will be orphaned. They are eventually freed
139 * when all objects in them are freed.
141 void slab_destroy_child(struct slab_child_pool
*pool
)
143 mtx_lock(&pool
->parent
->mutex
);
145 while (pool
->pages
) {
146 struct slab_page_header
*page
= pool
->pages
;
147 pool
->pages
= page
->u
.next
;
148 p_atomic_set(&page
->u
.num_remaining
, pool
->parent
->num_elements
);
150 for (unsigned i
= 0; i
< pool
->parent
->num_elements
; ++i
) {
151 struct slab_element_header
*elt
= slab_get_element(pool
->parent
, page
, i
);
152 p_atomic_set(&elt
->owner
, (intptr_t)page
| 1);
156 while (pool
->migrated
) {
157 struct slab_element_header
*elt
= pool
->migrated
;
158 pool
->migrated
= elt
->next
;
159 slab_free_orphaned(elt
);
162 mtx_unlock(&pool
->parent
->mutex
);
165 struct slab_element_header
*elt
= pool
->free
;
166 pool
->free
= elt
->next
;
167 slab_free_orphaned(elt
);
170 /* Guard against use-after-free. */
175 slab_add_new_page(struct slab_child_pool
*pool
)
177 struct slab_page_header
*page
= malloc(sizeof(struct slab_page_header
) +
178 pool
->parent
->num_elements
* pool
->parent
->element_size
);
183 for (unsigned i
= 0; i
< pool
->parent
->num_elements
; ++i
) {
184 struct slab_element_header
*elt
= slab_get_element(pool
->parent
, page
, i
);
185 elt
->owner
= (intptr_t)pool
;
186 assert(!(elt
->owner
& 1));
188 elt
->next
= pool
->free
;
190 SET_MAGIC(elt
, SLAB_MAGIC_FREE
);
193 page
->u
.next
= pool
->pages
;
200 * Allocate an object from the child pool. Single-threaded (i.e. the caller
201 * must ensure that no operation happens on the same child pool in another
205 slab_alloc(struct slab_child_pool
*pool
)
207 struct slab_element_header
*elt
;
210 /* First, collect elements that belong to us but were freed from a
211 * different child pool.
213 mtx_lock(&pool
->parent
->mutex
);
214 pool
->free
= pool
->migrated
;
215 pool
->migrated
= NULL
;
216 mtx_unlock(&pool
->parent
->mutex
);
218 /* Now allocate a new page. */
219 if (!pool
->free
&& !slab_add_new_page(pool
))
224 pool
->free
= elt
->next
;
226 CHECK_MAGIC(elt
, SLAB_MAGIC_FREE
);
227 SET_MAGIC(elt
, SLAB_MAGIC_ALLOCATED
);
233 * Free an object allocated from the slab. Single-threaded (i.e. the caller
234 * must ensure that no operation happens on the same child pool in another
237 * Freeing an object in a different child pool from the one where it was
238 * allocated is allowed, as long the pool belong to the same parent. No
239 * additional locking is required in this case.
241 void slab_free(struct slab_child_pool
*pool
, void *ptr
)
243 struct slab_element_header
*elt
= ((struct slab_element_header
*)ptr
- 1);
246 CHECK_MAGIC(elt
, SLAB_MAGIC_ALLOCATED
);
247 SET_MAGIC(elt
, SLAB_MAGIC_FREE
);
249 if (p_atomic_read(&elt
->owner
) == (intptr_t)pool
) {
250 /* This is the simple case: The caller guarantees that we can safely
251 * access the free list.
253 elt
->next
= pool
->free
;
258 /* The slow case: migration or an orphaned page. */
259 mtx_lock(&pool
->parent
->mutex
);
261 /* Note: we _must_ re-read elt->owner here because the owning child pool
262 * may have been destroyed by another thread in the meantime.
264 owner_int
= p_atomic_read(&elt
->owner
);
266 if (!(owner_int
& 1)) {
267 struct slab_child_pool
*owner
= (struct slab_child_pool
*)owner_int
;
268 elt
->next
= owner
->migrated
;
269 owner
->migrated
= elt
;
270 mtx_unlock(&pool
->parent
->mutex
);
272 mtx_unlock(&pool
->parent
->mutex
);
274 slab_free_orphaned(elt
);
279 * Allocate an object from the slab. Single-threaded (no mutex).
282 slab_alloc_st(struct slab_mempool
*pool
)
284 return slab_alloc(&pool
->child
);
288 * Free an object allocated from the slab. Single-threaded (no mutex).
291 slab_free_st(struct slab_mempool
*pool
, void *ptr
)
293 slab_free(&pool
->child
, ptr
);
297 slab_destroy(struct slab_mempool
*pool
)
299 slab_destroy_child(&pool
->child
);
300 slab_destroy_parent(&pool
->parent
);
304 * Create an allocator for same-sized objects.
306 * \param item_size Size of one object.
307 * \param num_items Number of objects to allocate at once.
310 slab_create(struct slab_mempool
*pool
,
314 slab_create_parent(&pool
->parent
, item_size
, num_items
);
315 slab_create_child(&pool
->child
, &pool
->parent
);