i965/fs: Stop using fs_reg::in_range() in favor of regions_overlap().
[mesa.git] / src / util / slab.c
1 /*
2 * Copyright 2010 Marek Olšák <maraeo@gmail.com>
3 * Copyright 2016 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE. */
23
24 #include "slab.h"
25 #include "macros.h"
26 #include "simple_list.h"
27 #include <stdint.h>
28 #include <stdbool.h>
29 #include <string.h>
30
31 #define ALIGN(value, align) (((value) + (align) - 1) & ~((align) - 1))
32
33 #ifdef DEBUG
34 #define SLAB_MAGIC 0xcafe4321
35 #define SET_MAGIC(element) (element)->magic = SLAB_MAGIC
36 #define CHECK_MAGIC(element) assert((element)->magic == SLAB_MAGIC)
37 #else
38 #define SET_MAGIC(element)
39 #define CHECK_MAGIC(element)
40 #endif
41
42 /* One array element within a big buffer. */
43 struct slab_element_header {
44 /* The next free element. */
45 struct slab_element_header *next_free;
46
47 #ifdef DEBUG
48 /* Use intptr_t to keep the header aligned to a pointer size. */
49 intptr_t magic;
50 #endif
51 };
52
53 static struct slab_element_header *
54 slab_get_element(struct slab_mempool *pool,
55 struct slab_page_header *page, unsigned index)
56 {
57 return (struct slab_element_header*)
58 ((uint8_t*)&page[1] + (pool->element_size * index));
59 }
60
61 static bool
62 slab_add_new_page(struct slab_mempool *pool)
63 {
64 struct slab_page_header *page;
65 struct slab_element_header *element;
66 unsigned i;
67
68 page = malloc(sizeof(struct slab_page_header) +
69 pool->num_elements * pool->element_size);
70 if (!page)
71 return false;
72
73 if (!pool->list.prev)
74 make_empty_list(&pool->list);
75
76 insert_at_tail(&pool->list, page);
77
78 /* Mark all elements as free. */
79 for (i = 0; i < pool->num_elements-1; i++) {
80 element = slab_get_element(pool, page, i);
81 element->next_free = slab_get_element(pool, page, i + 1);
82 SET_MAGIC(element);
83 }
84
85 element = slab_get_element(pool, page, pool->num_elements - 1);
86 element->next_free = pool->first_free;
87 SET_MAGIC(element);
88 pool->first_free = slab_get_element(pool, page, 0);
89 return true;
90 }
91
92 /**
93 * Allocate an object from the slab. Single-threaded (no mutex).
94 */
95 void *
96 slab_alloc_st(struct slab_mempool *pool)
97 {
98 struct slab_element_header *element;
99
100 /* Allocate a new page. */
101 if (!pool->first_free &&
102 !slab_add_new_page(pool))
103 return NULL;
104
105 element = pool->first_free;
106 CHECK_MAGIC(element);
107 pool->first_free = element->next_free;
108 return &element[1];
109 }
110
111 /**
112 * Free an object allocated from the slab. Single-threaded (no mutex).
113 */
114 void
115 slab_free_st(struct slab_mempool *pool, void *ptr)
116 {
117 struct slab_element_header *element =
118 ((struct slab_element_header*)ptr - 1);
119
120 CHECK_MAGIC(element);
121 element->next_free = pool->first_free;
122 pool->first_free = element;
123 }
124
125 /**
126 * Allocate an object from the slab. Thread-safe.
127 */
128 void *
129 slab_alloc_mt(struct slab_mempool *pool)
130 {
131 void *mem;
132
133 mtx_lock(&pool->mutex);
134 mem = slab_alloc_st(pool);
135 mtx_unlock(&pool->mutex);
136 return mem;
137 }
138
139 /**
140 * Free an object allocated from the slab. Thread-safe.
141 */
142 void
143 slab_free_mt(struct slab_mempool *pool, void *ptr)
144 {
145 mtx_lock(&pool->mutex);
146 slab_free_st(pool, ptr);
147 mtx_unlock(&pool->mutex);
148 }
149
150 void
151 slab_destroy(struct slab_mempool *pool)
152 {
153 struct slab_page_header *page, *temp;
154
155 if (pool->list.next) {
156 foreach_s(page, temp, &pool->list) {
157 remove_from_list(page);
158 free(page);
159 }
160 }
161
162 mtx_destroy(&pool->mutex);
163 }
164
165 /**
166 * Create an allocator for same-sized objects.
167 *
168 * \param item_size Size of one object.
169 * \param num_items Number of objects to allocate at once.
170 */
171 void
172 slab_create(struct slab_mempool *pool,
173 unsigned item_size,
174 unsigned num_items)
175 {
176 mtx_init(&pool->mutex, mtx_plain);
177 pool->element_size = ALIGN(sizeof(struct slab_element_header) + item_size,
178 sizeof(intptr_t));
179 pool->num_elements = num_items;
180 pool->first_free = NULL;
181 }