2 * Copyright 2010 Marek Olšák <maraeo@gmail.com>
3 * Copyright 2016 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE. */
31 #define ALIGN(value, align) (((value) + (align) - 1) & ~((align) - 1))
33 #define SLAB_MAGIC_ALLOCATED 0xcafe4321
34 #define SLAB_MAGIC_FREE 0x7ee01234
37 #define SET_MAGIC(element, value) (element)->magic = (value)
38 #define CHECK_MAGIC(element, value) assert((element)->magic == (value))
40 #define SET_MAGIC(element, value)
41 #define CHECK_MAGIC(element, value)
44 /* One array element within a big buffer. */
45 struct slab_element_header
{
46 /* The next element in the free or migrated list. */
47 struct slab_element_header
*next
;
50 * - a pointer to the child pool to which this element belongs, or
51 * - a pointer to the orphaned page of the element, with the least
52 * significant bit set to 1.
61 /* The page is an array of allocations in one block. */
62 struct slab_page_header
{
64 /* Next page in the same child pool. */
65 struct slab_page_header
*next
;
67 /* Number of remaining, non-freed elements (for orphaned pages). */
68 unsigned num_remaining
;
70 /* Memory after the last member is dedicated to the page itself.
71 * The allocated size is always larger than this structure.
76 static struct slab_element_header
*
77 slab_get_element(struct slab_parent_pool
*parent
,
78 struct slab_page_header
*page
, unsigned index
)
80 return (struct slab_element_header
*)
81 ((uint8_t*)&page
[1] + (parent
->element_size
* index
));
84 /* The given object/element belongs to an orphaned page (i.e. the owning child
85 * pool has been destroyed). Mark the element as freed and free the whole page
86 * when no elements are left in it.
89 slab_free_orphaned(struct slab_element_header
*elt
)
91 struct slab_page_header
*page
;
93 assert(elt
->owner
& 1);
95 page
= (struct slab_page_header
*)(elt
->owner
& ~(intptr_t)1);
96 if (!p_atomic_dec_return(&page
->u
.num_remaining
))
101 * Create a parent pool for the allocation of same-sized objects.
103 * \param item_size Size of one object.
104 * \param num_items Number of objects to allocate at once.
107 slab_create_parent(struct slab_parent_pool
*parent
,
111 mtx_init(&parent
->mutex
, mtx_plain
);
112 parent
->element_size
= ALIGN(sizeof(struct slab_element_header
) + item_size
,
114 parent
->num_elements
= num_items
;
118 slab_destroy_parent(struct slab_parent_pool
*parent
)
120 mtx_destroy(&parent
->mutex
);
124 * Create a child pool linked to the given parent.
126 void slab_create_child(struct slab_child_pool
*pool
,
127 struct slab_parent_pool
*parent
)
129 pool
->parent
= parent
;
132 pool
->migrated
= NULL
;
136 * Destroy the child pool.
138 * Pages associated to the pool will be orphaned. They are eventually freed
139 * when all objects in them are freed.
141 void slab_destroy_child(struct slab_child_pool
*pool
)
144 return; /* the slab probably wasn't even created */
146 mtx_lock(&pool
->parent
->mutex
);
148 while (pool
->pages
) {
149 struct slab_page_header
*page
= pool
->pages
;
150 pool
->pages
= page
->u
.next
;
151 p_atomic_set(&page
->u
.num_remaining
, pool
->parent
->num_elements
);
153 for (unsigned i
= 0; i
< pool
->parent
->num_elements
; ++i
) {
154 struct slab_element_header
*elt
= slab_get_element(pool
->parent
, page
, i
);
155 p_atomic_set(&elt
->owner
, (intptr_t)page
| 1);
159 while (pool
->migrated
) {
160 struct slab_element_header
*elt
= pool
->migrated
;
161 pool
->migrated
= elt
->next
;
162 slab_free_orphaned(elt
);
165 mtx_unlock(&pool
->parent
->mutex
);
168 struct slab_element_header
*elt
= pool
->free
;
169 pool
->free
= elt
->next
;
170 slab_free_orphaned(elt
);
173 /* Guard against use-after-free. */
178 slab_add_new_page(struct slab_child_pool
*pool
)
180 struct slab_page_header
*page
= malloc(sizeof(struct slab_page_header
) +
181 pool
->parent
->num_elements
* pool
->parent
->element_size
);
186 for (unsigned i
= 0; i
< pool
->parent
->num_elements
; ++i
) {
187 struct slab_element_header
*elt
= slab_get_element(pool
->parent
, page
, i
);
188 elt
->owner
= (intptr_t)pool
;
189 assert(!(elt
->owner
& 1));
191 elt
->next
= pool
->free
;
193 SET_MAGIC(elt
, SLAB_MAGIC_FREE
);
196 page
->u
.next
= pool
->pages
;
203 * Allocate an object from the child pool. Single-threaded (i.e. the caller
204 * must ensure that no operation happens on the same child pool in another
208 slab_alloc(struct slab_child_pool
*pool
)
210 struct slab_element_header
*elt
;
213 /* First, collect elements that belong to us but were freed from a
214 * different child pool.
216 mtx_lock(&pool
->parent
->mutex
);
217 pool
->free
= pool
->migrated
;
218 pool
->migrated
= NULL
;
219 mtx_unlock(&pool
->parent
->mutex
);
221 /* Now allocate a new page. */
222 if (!pool
->free
&& !slab_add_new_page(pool
))
227 pool
->free
= elt
->next
;
229 CHECK_MAGIC(elt
, SLAB_MAGIC_FREE
);
230 SET_MAGIC(elt
, SLAB_MAGIC_ALLOCATED
);
236 * Free an object allocated from the slab. Single-threaded (i.e. the caller
237 * must ensure that no operation happens on the same child pool in another
240 * Freeing an object in a different child pool from the one where it was
241 * allocated is allowed, as long the pool belong to the same parent. No
242 * additional locking is required in this case.
244 void slab_free(struct slab_child_pool
*pool
, void *ptr
)
246 struct slab_element_header
*elt
= ((struct slab_element_header
*)ptr
- 1);
249 CHECK_MAGIC(elt
, SLAB_MAGIC_ALLOCATED
);
250 SET_MAGIC(elt
, SLAB_MAGIC_FREE
);
252 if (p_atomic_read(&elt
->owner
) == (intptr_t)pool
) {
253 /* This is the simple case: The caller guarantees that we can safely
254 * access the free list.
256 elt
->next
= pool
->free
;
261 /* The slow case: migration or an orphaned page. */
262 mtx_lock(&pool
->parent
->mutex
);
264 /* Note: we _must_ re-read elt->owner here because the owning child pool
265 * may have been destroyed by another thread in the meantime.
267 owner_int
= p_atomic_read(&elt
->owner
);
269 if (!(owner_int
& 1)) {
270 struct slab_child_pool
*owner
= (struct slab_child_pool
*)owner_int
;
271 elt
->next
= owner
->migrated
;
272 owner
->migrated
= elt
;
273 mtx_unlock(&pool
->parent
->mutex
);
275 mtx_unlock(&pool
->parent
->mutex
);
277 slab_free_orphaned(elt
);
282 * Allocate an object from the slab. Single-threaded (no mutex).
285 slab_alloc_st(struct slab_mempool
*pool
)
287 return slab_alloc(&pool
->child
);
291 * Free an object allocated from the slab. Single-threaded (no mutex).
294 slab_free_st(struct slab_mempool
*pool
, void *ptr
)
296 slab_free(&pool
->child
, ptr
);
300 slab_destroy(struct slab_mempool
*pool
)
302 slab_destroy_child(&pool
->child
);
303 slab_destroy_parent(&pool
->parent
);
307 * Create an allocator for same-sized objects.
309 * \param item_size Size of one object.
310 * \param num_items Number of objects to allocate at once.
313 slab_create(struct slab_mempool
*pool
,
317 slab_create_parent(&pool
->parent
, item_size
, num_items
);
318 slab_create_child(&pool
->child
, &pool
->parent
);