util: import the slab allocator from gallium
[mesa.git] / src / util / slab.c
1 /*
2 * Copyright 2010 Marek Olšák <maraeo@gmail.com>
3 * Copyright 2016 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE. */
23
24 #include "slab.h"
25 #include "macros.h"
26 #include "simple_list.h"
27 #include <stdbool.h>
28 #include <string.h>
29
30 #define ALIGN(value, align) (((value) + (align) - 1) & ~((align) - 1))
31
32 #ifdef DEBUG
33 #define SLAB_MAGIC 0xcafe4321
34 #define SET_MAGIC(element) (element)->magic = SLAB_MAGIC
35 #define CHECK_MAGIC(element) assert((element)->magic == SLAB_MAGIC)
36 #else
37 #define SET_MAGIC(element)
38 #define CHECK_MAGIC(element)
39 #endif
40
41 /* One array element within a big buffer. */
42 struct slab_element_header {
43 /* The next free element. */
44 struct slab_element_header *next_free;
45
46 #ifdef DEBUG
47 /* Use intptr_t to keep the header aligned to a pointer size. */
48 intptr_t magic;
49 #endif
50 };
51
52 static struct slab_element_header *
53 slab_get_element(struct slab_mempool *pool,
54 struct slab_page_header *page, unsigned index)
55 {
56 return (struct slab_element_header*)
57 ((uint8_t*)&page[1] + (pool->element_size * index));
58 }
59
60 static bool
61 slab_add_new_page(struct slab_mempool *pool)
62 {
63 struct slab_page_header *page;
64 struct slab_element_header *element;
65 unsigned i;
66
67 page = malloc(sizeof(struct slab_page_header) +
68 pool->num_elements * pool->element_size);
69 if (!page)
70 return false;
71
72 if (!pool->list.prev)
73 make_empty_list(&pool->list);
74
75 insert_at_tail(&pool->list, page);
76
77 /* Mark all elements as free. */
78 for (i = 0; i < pool->num_elements-1; i++) {
79 element = slab_get_element(pool, page, i);
80 element->next_free = slab_get_element(pool, page, i + 1);
81 SET_MAGIC(element);
82 }
83
84 element = slab_get_element(pool, page, pool->num_elements - 1);
85 element->next_free = pool->first_free;
86 SET_MAGIC(element);
87 pool->first_free = slab_get_element(pool, page, 0);
88 return true;
89 }
90
91 /**
92 * Allocate an object from the slab. Single-threaded (no mutex).
93 */
94 void *
95 slab_alloc_st(struct slab_mempool *pool)
96 {
97 struct slab_element_header *element;
98
99 /* Allocate a new page. */
100 if (!pool->first_free &&
101 !slab_add_new_page(pool))
102 return NULL;
103
104 element = pool->first_free;
105 CHECK_MAGIC(element);
106 pool->first_free = element->next_free;
107 return &element[1];
108 }
109
110 /**
111 * Free an object allocated from the slab. Single-threaded (no mutex).
112 */
113 void
114 slab_free_st(struct slab_mempool *pool, void *ptr)
115 {
116 struct slab_element_header *element =
117 ((struct slab_element_header*)ptr - 1);
118
119 CHECK_MAGIC(element);
120 element->next_free = pool->first_free;
121 pool->first_free = element;
122 }
123
124 /**
125 * Allocate an object from the slab. Thread-safe.
126 */
127 void *
128 slab_alloc_mt(struct slab_mempool *pool)
129 {
130 void *mem;
131
132 mtx_lock(&pool->mutex);
133 mem = slab_alloc_st(pool);
134 mtx_unlock(&pool->mutex);
135 return mem;
136 }
137
138 /**
139 * Free an object allocated from the slab. Thread-safe.
140 */
141 void
142 slab_free_mt(struct slab_mempool *pool, void *ptr)
143 {
144 mtx_lock(&pool->mutex);
145 slab_free_st(pool, ptr);
146 mtx_unlock(&pool->mutex);
147 }
148
149 void
150 slab_destroy(struct slab_mempool *pool)
151 {
152 struct slab_page_header *page, *temp;
153
154 if (pool->list.next) {
155 foreach_s(page, temp, &pool->list) {
156 remove_from_list(page);
157 free(page);
158 }
159 }
160
161 mtx_destroy(&pool->mutex);
162 }
163
164 /**
165 * Create an allocator for same-sized objects.
166 *
167 * \param item_size Size of one object.
168 * \param num_items Number of objects to allocate at once.
169 */
170 void
171 slab_create(struct slab_mempool *pool,
172 unsigned item_size,
173 unsigned num_items)
174 {
175 mtx_init(&pool->mutex, mtx_plain);
176 pool->element_size = ALIGN(sizeof(struct slab_element_header) + item_size,
177 sizeof(intptr_t));
178 pool->num_elements = num_items;
179 pool->first_free = NULL;
180 }