2 * Copyright 2016 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
19 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
21 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
30 #include "util/u_math.h"
31 #include "util/u_memory.h"
33 /* All slab allocations from the same heap and with the same size belong
38 /* Slabs with allocation candidates. Typically, slabs in this list should
39 * have some free entries.
41 * However, when the head becomes full we purposefully keep it around
42 * until the next allocation attempt, at which time we try a reclaim.
43 * The intention is to keep serving allocations from the same slab as long
44 * as possible for better locality.
46 * Due to a race in new slab allocation, additional slabs in this list
47 * can be fully allocated as well.
49 struct list_head slabs
;
54 pb_slab_reclaim(struct pb_slabs
*slabs
, struct pb_slab_entry
*entry
)
56 struct pb_slab
*slab
= entry
->slab
;
58 LIST_DEL(&entry
->head
); /* remove from reclaim list */
59 LIST_ADD(&entry
->head
, &slab
->free
);
62 /* Add slab to the group's list if it isn't already linked. */
63 if (!slab
->head
.next
) {
64 struct pb_slab_group
*group
= &slabs
->groups
[entry
->group_index
];
65 LIST_ADDTAIL(&slab
->head
, &group
->slabs
);
68 if (slab
->num_free
>= slab
->num_entries
) {
69 LIST_DEL(&slab
->head
);
70 slabs
->slab_free(slabs
->priv
, slab
);
75 pb_slabs_reclaim_locked(struct pb_slabs
*slabs
)
77 while (!LIST_IS_EMPTY(&slabs
->reclaim
)) {
78 struct pb_slab_entry
*entry
=
79 LIST_ENTRY(struct pb_slab_entry
, slabs
->reclaim
.next
, head
);
81 if (!slabs
->can_reclaim(slabs
->priv
, entry
))
84 pb_slab_reclaim(slabs
, entry
);
88 /* Allocate a slab entry of the given size from the given heap.
90 * This will try to re-use entries that have previously been freed. However,
91 * if no entries are free (or all free entries are still "in flight" as
92 * determined by the can_reclaim fallback function), a new slab will be
93 * requested via the slab_alloc callback.
95 * Note that slab_free can also be called by this function.
97 struct pb_slab_entry
*
98 pb_slab_alloc(struct pb_slabs
*slabs
, unsigned size
, unsigned heap
)
100 unsigned order
= MAX2(slabs
->min_order
, util_logbase2_ceil(size
));
101 unsigned group_index
;
102 struct pb_slab_group
*group
;
103 struct pb_slab
*slab
;
104 struct pb_slab_entry
*entry
;
106 assert(order
< slabs
->min_order
+ slabs
->num_orders
);
107 assert(heap
< slabs
->num_heaps
);
109 group_index
= heap
* slabs
->num_orders
+ (order
- slabs
->min_order
);
110 group
= &slabs
->groups
[group_index
];
112 mtx_lock(&slabs
->mutex
);
114 /* If there is no candidate slab at all, or the first slab has no free
115 * entries, try reclaiming entries.
117 if (LIST_IS_EMPTY(&group
->slabs
) ||
118 LIST_IS_EMPTY(&LIST_ENTRY(struct pb_slab
, group
->slabs
.next
, head
)->free
))
119 pb_slabs_reclaim_locked(slabs
);
121 /* Remove slabs without free entries. */
122 while (!LIST_IS_EMPTY(&group
->slabs
)) {
123 slab
= LIST_ENTRY(struct pb_slab
, group
->slabs
.next
, head
);
124 if (!LIST_IS_EMPTY(&slab
->free
))
127 LIST_DEL(&slab
->head
);
130 if (LIST_IS_EMPTY(&group
->slabs
)) {
131 /* Drop the mutex temporarily to prevent a deadlock where the allocation
132 * calls back into slab functions (most likely to happen for
133 * pb_slab_reclaim if memory is low).
135 * There's a chance that racing threads will end up allocating multiple
136 * slabs for the same group, but that doesn't hurt correctness.
138 mtx_unlock(&slabs
->mutex
);
139 slab
= slabs
->slab_alloc(slabs
->priv
, heap
, 1 << order
, group_index
);
142 mtx_lock(&slabs
->mutex
);
144 LIST_ADD(&slab
->head
, &group
->slabs
);
147 entry
= LIST_ENTRY(struct pb_slab_entry
, slab
->free
.next
, head
);
148 LIST_DEL(&entry
->head
);
151 mtx_unlock(&slabs
->mutex
);
156 /* Free the given slab entry.
158 * The entry may still be in use e.g. by in-flight command submissions. The
159 * can_reclaim callback function will be called to determine whether the entry
160 * can be handed out again by pb_slab_alloc.
163 pb_slab_free(struct pb_slabs
* slabs
, struct pb_slab_entry
*entry
)
165 mtx_lock(&slabs
->mutex
);
166 LIST_ADDTAIL(&entry
->head
, &slabs
->reclaim
);
167 mtx_unlock(&slabs
->mutex
);
170 /* Check if any of the entries handed to pb_slab_free are ready to be re-used.
172 * This may end up freeing some slabs and is therefore useful to try to reclaim
173 * some no longer used memory. However, calling this function is not strictly
174 * required since pb_slab_alloc will eventually do the same thing.
177 pb_slabs_reclaim(struct pb_slabs
*slabs
)
179 mtx_lock(&slabs
->mutex
);
180 pb_slabs_reclaim_locked(slabs
);
181 mtx_unlock(&slabs
->mutex
);
184 /* Initialize the slabs manager.
186 * The minimum and maximum size of slab entries are 2^min_order and
187 * 2^max_order, respectively.
189 * priv will be passed to the given callback functions.
192 pb_slabs_init(struct pb_slabs
*slabs
,
193 unsigned min_order
, unsigned max_order
,
196 slab_can_reclaim_fn
*can_reclaim
,
197 slab_alloc_fn
*slab_alloc
,
198 slab_free_fn
*slab_free
)
203 assert(min_order
<= max_order
);
204 assert(max_order
< sizeof(unsigned) * 8 - 1);
206 slabs
->min_order
= min_order
;
207 slabs
->num_orders
= max_order
- min_order
+ 1;
208 slabs
->num_heaps
= num_heaps
;
211 slabs
->can_reclaim
= can_reclaim
;
212 slabs
->slab_alloc
= slab_alloc
;
213 slabs
->slab_free
= slab_free
;
215 LIST_INITHEAD(&slabs
->reclaim
);
217 num_groups
= slabs
->num_orders
* slabs
->num_heaps
;
218 slabs
->groups
= CALLOC(num_groups
, sizeof(*slabs
->groups
));
222 for (i
= 0; i
< num_groups
; ++i
) {
223 struct pb_slab_group
*group
= &slabs
->groups
[i
];
224 LIST_INITHEAD(&group
->slabs
);
227 (void) mtx_init(&slabs
->mutex
, mtx_plain
);
232 /* Shutdown the slab manager.
234 * This will free all allocated slabs and internal structures, even if some
235 * of the slab entries are still in flight (i.e. if can_reclaim would return
239 pb_slabs_deinit(struct pb_slabs
*slabs
)
241 /* Reclaim all slab entries (even those that are still in flight). This
242 * implicitly calls slab_free for everything.
244 while (!LIST_IS_EMPTY(&slabs
->reclaim
)) {
245 struct pb_slab_entry
*entry
=
246 LIST_ENTRY(struct pb_slab_entry
, slabs
->reclaim
.next
, head
);
247 pb_slab_reclaim(slabs
, entry
);
251 mtx_destroy(&slabs
->mutex
);