2 * Copyright 2010 Marek Olšák <maraeo@gmail.com>
3 * Copyright 2016 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE. */
31 #define SLAB_MAGIC_ALLOCATED 0xcafe4321
32 #define SLAB_MAGIC_FREE 0x7ee01234
35 #define SET_MAGIC(element, value) (element)->magic = (value)
36 #define CHECK_MAGIC(element, value) assert((element)->magic == (value))
38 #define SET_MAGIC(element, value)
39 #define CHECK_MAGIC(element, value)
42 /* One array element within a big buffer. */
43 struct slab_element_header
{
44 /* The next element in the free or migrated list. */
45 struct slab_element_header
*next
;
48 * - a pointer to the child pool to which this element belongs, or
49 * - a pointer to the orphaned page of the element, with the least
50 * significant bit set to 1.
59 /* The page is an array of allocations in one block. */
60 struct slab_page_header
{
62 /* Next page in the same child pool. */
63 struct slab_page_header
*next
;
65 /* Number of remaining, non-freed elements (for orphaned pages). */
66 unsigned num_remaining
;
68 /* Memory after the last member is dedicated to the page itself.
69 * The allocated size is always larger than this structure.
74 static struct slab_element_header
*
75 slab_get_element(struct slab_parent_pool
*parent
,
76 struct slab_page_header
*page
, unsigned index
)
78 return (struct slab_element_header
*)
79 ((uint8_t*)&page
[1] + (parent
->element_size
* index
));
82 /* The given object/element belongs to an orphaned page (i.e. the owning child
83 * pool has been destroyed). Mark the element as freed and free the whole page
84 * when no elements are left in it.
87 slab_free_orphaned(struct slab_element_header
*elt
)
89 struct slab_page_header
*page
;
91 assert(elt
->owner
& 1);
93 page
= (struct slab_page_header
*)(elt
->owner
& ~(intptr_t)1);
94 if (!p_atomic_dec_return(&page
->u
.num_remaining
))
99 * Create a parent pool for the allocation of same-sized objects.
101 * \param item_size Size of one object.
102 * \param num_items Number of objects to allocate at once.
105 slab_create_parent(struct slab_parent_pool
*parent
,
109 mtx_init(&parent
->mutex
, mtx_plain
);
110 parent
->element_size
= ALIGN_POT(sizeof(struct slab_element_header
) + item_size
,
112 parent
->num_elements
= num_items
;
116 slab_destroy_parent(struct slab_parent_pool
*parent
)
118 mtx_destroy(&parent
->mutex
);
122 * Create a child pool linked to the given parent.
124 void slab_create_child(struct slab_child_pool
*pool
,
125 struct slab_parent_pool
*parent
)
127 pool
->parent
= parent
;
130 pool
->migrated
= NULL
;
134 * Destroy the child pool.
136 * Pages associated to the pool will be orphaned. They are eventually freed
137 * when all objects in them are freed.
139 void slab_destroy_child(struct slab_child_pool
*pool
)
142 return; /* the slab probably wasn't even created */
144 mtx_lock(&pool
->parent
->mutex
);
146 while (pool
->pages
) {
147 struct slab_page_header
*page
= pool
->pages
;
148 pool
->pages
= page
->u
.next
;
149 p_atomic_set(&page
->u
.num_remaining
, pool
->parent
->num_elements
);
151 for (unsigned i
= 0; i
< pool
->parent
->num_elements
; ++i
) {
152 struct slab_element_header
*elt
= slab_get_element(pool
->parent
, page
, i
);
153 p_atomic_set(&elt
->owner
, (intptr_t)page
| 1);
157 while (pool
->migrated
) {
158 struct slab_element_header
*elt
= pool
->migrated
;
159 pool
->migrated
= elt
->next
;
160 slab_free_orphaned(elt
);
163 mtx_unlock(&pool
->parent
->mutex
);
166 struct slab_element_header
*elt
= pool
->free
;
167 pool
->free
= elt
->next
;
168 slab_free_orphaned(elt
);
171 /* Guard against use-after-free. */
176 slab_add_new_page(struct slab_child_pool
*pool
)
178 struct slab_page_header
*page
= malloc(sizeof(struct slab_page_header
) +
179 pool
->parent
->num_elements
* pool
->parent
->element_size
);
184 for (unsigned i
= 0; i
< pool
->parent
->num_elements
; ++i
) {
185 struct slab_element_header
*elt
= slab_get_element(pool
->parent
, page
, i
);
186 elt
->owner
= (intptr_t)pool
;
187 assert(!(elt
->owner
& 1));
189 elt
->next
= pool
->free
;
191 SET_MAGIC(elt
, SLAB_MAGIC_FREE
);
194 page
->u
.next
= pool
->pages
;
201 * Allocate an object from the child pool. Single-threaded (i.e. the caller
202 * must ensure that no operation happens on the same child pool in another
206 slab_alloc(struct slab_child_pool
*pool
)
208 struct slab_element_header
*elt
;
211 /* First, collect elements that belong to us but were freed from a
212 * different child pool.
214 mtx_lock(&pool
->parent
->mutex
);
215 pool
->free
= pool
->migrated
;
216 pool
->migrated
= NULL
;
217 mtx_unlock(&pool
->parent
->mutex
);
219 /* Now allocate a new page. */
220 if (!pool
->free
&& !slab_add_new_page(pool
))
225 pool
->free
= elt
->next
;
227 CHECK_MAGIC(elt
, SLAB_MAGIC_FREE
);
228 SET_MAGIC(elt
, SLAB_MAGIC_ALLOCATED
);
234 * Free an object allocated from the slab. Single-threaded (i.e. the caller
235 * must ensure that no operation happens on the same child pool in another
238 * Freeing an object in a different child pool from the one where it was
239 * allocated is allowed, as long the pool belong to the same parent. No
240 * additional locking is required in this case.
242 void slab_free(struct slab_child_pool
*pool
, void *ptr
)
244 struct slab_element_header
*elt
= ((struct slab_element_header
*)ptr
- 1);
247 CHECK_MAGIC(elt
, SLAB_MAGIC_ALLOCATED
);
248 SET_MAGIC(elt
, SLAB_MAGIC_FREE
);
250 if (p_atomic_read(&elt
->owner
) == (intptr_t)pool
) {
251 /* This is the simple case: The caller guarantees that we can safely
252 * access the free list.
254 elt
->next
= pool
->free
;
259 /* The slow case: migration or an orphaned page. */
260 mtx_lock(&pool
->parent
->mutex
);
262 /* Note: we _must_ re-read elt->owner here because the owning child pool
263 * may have been destroyed by another thread in the meantime.
265 owner_int
= p_atomic_read(&elt
->owner
);
267 if (!(owner_int
& 1)) {
268 struct slab_child_pool
*owner
= (struct slab_child_pool
*)owner_int
;
269 elt
->next
= owner
->migrated
;
270 owner
->migrated
= elt
;
271 mtx_unlock(&pool
->parent
->mutex
);
273 mtx_unlock(&pool
->parent
->mutex
);
275 slab_free_orphaned(elt
);
280 * Allocate an object from the slab. Single-threaded (no mutex).
283 slab_alloc_st(struct slab_mempool
*mempool
)
285 return slab_alloc(&mempool
->child
);
289 * Free an object allocated from the slab. Single-threaded (no mutex).
292 slab_free_st(struct slab_mempool
*mempool
, void *ptr
)
294 slab_free(&mempool
->child
, ptr
);
298 slab_destroy(struct slab_mempool
*mempool
)
300 slab_destroy_child(&mempool
->child
);
301 slab_destroy_parent(&mempool
->parent
);
305 * Create an allocator for same-sized objects.
307 * \param item_size Size of one object.
308 * \param num_items Number of objects to allocate at once.
311 slab_create(struct slab_mempool
*mempool
,
315 slab_create_parent(&mempool
->parent
, item_size
, num_items
);
316 slab_create_child(&mempool
->child
, &mempool
->parent
);