2 * Copyright © 2018 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "util/u_math.h"
29 struct util_vma_hole
{
30 struct list_head link
;
35 #define util_vma_foreach_hole(_hole, _heap) \
36 list_for_each_entry(struct util_vma_hole, _hole, &(_heap)->holes, link)
38 #define util_vma_foreach_hole_safe(_hole, _heap) \
39 list_for_each_entry_safe(struct util_vma_hole, _hole, &(_heap)->holes, link)
41 #define util_vma_foreach_hole_safe_rev(_hole, _heap) \
42 list_for_each_entry_safe_rev(struct util_vma_hole, _hole, &(_heap)->holes, link)
45 util_vma_heap_init(struct util_vma_heap
*heap
,
46 uint64_t start
, uint64_t size
)
48 list_inithead(&heap
->holes
);
49 util_vma_heap_free(heap
, start
, size
);
51 /* Default to using high addresses */
52 heap
->alloc_high
= true;
56 util_vma_heap_finish(struct util_vma_heap
*heap
)
58 util_vma_foreach_hole_safe(hole
, heap
)
64 util_vma_heap_validate(struct util_vma_heap
*heap
)
66 uint64_t prev_offset
= 0;
67 util_vma_foreach_hole(hole
, heap
) {
68 assert(hole
->offset
> 0);
69 assert(hole
->size
> 0);
71 if (&hole
->link
== heap
->holes
.next
) {
72 /* This must be the top-most hole. Assert that, if it overflows, it
73 * overflows to 0, i.e. 2^64.
75 assert(hole
->size
+ hole
->offset
== 0 ||
76 hole
->size
+ hole
->offset
> hole
->offset
);
78 /* This is not the top-most hole so it must not overflow and, in
79 * fact, must be strictly lower than the top-most hole. If
80 * hole->size + hole->offset == prev_offset, then we failed to join
81 * holes during a util_vma_heap_free.
83 assert(hole
->size
+ hole
->offset
> hole
->offset
&&
84 hole
->size
+ hole
->offset
< prev_offset
);
86 prev_offset
= hole
->offset
;
90 #define util_vma_heap_validate(heap)
94 util_vma_hole_alloc(struct util_vma_hole
*hole
,
95 uint64_t offset
, uint64_t size
)
97 assert(hole
->offset
<= offset
);
98 assert(hole
->size
>= offset
- hole
->offset
+ size
);
100 if (offset
== hole
->offset
&& size
== hole
->size
) {
101 /* Just get rid of the hole. */
102 list_del(&hole
->link
);
107 assert(offset
- hole
->offset
<= hole
->size
- size
);
108 uint64_t waste
= (hole
->size
- size
) - (offset
- hole
->offset
);
110 /* We allocated at the top. Shrink the hole down. */
115 if (offset
== hole
->offset
) {
116 /* We allocated at the bottom. Shrink the hole up. */
117 hole
->offset
+= size
;
122 /* We allocated in the middle. We need to split the old hole into two
123 * holes, one high and one low.
125 struct util_vma_hole
*high_hole
= calloc(1, sizeof(*hole
));
126 high_hole
->offset
= offset
+ size
;
127 high_hole
->size
= waste
;
129 /* Adjust the hole to be the amount of space left at he bottom of the
132 hole
->size
= offset
- hole
->offset
;
134 /* Place the new hole before the old hole so that the list is in order
137 list_addtail(&high_hole
->link
, &hole
->link
);
141 util_vma_heap_alloc(struct util_vma_heap
*heap
,
142 uint64_t size
, uint64_t alignment
)
144 /* The caller is expected to reject zero-size allocations */
146 assert(alignment
> 0);
148 util_vma_heap_validate(heap
);
150 if (heap
->alloc_high
) {
151 util_vma_foreach_hole_safe(hole
, heap
) {
152 if (size
> hole
->size
)
155 /* Compute the offset as the highest address where a chunk of the
156 * given size can be without going over the top of the hole.
158 * This calculation is known to not overflow because we know that
159 * hole->size + hole->offset can only overflow to 0 and size > 0.
161 uint64_t offset
= (hole
->size
- size
) + hole
->offset
;
163 /* Align the offset. We align down and not up because we are
164 * allocating from the top of the hole and not the bottom.
166 offset
= (offset
/ alignment
) * alignment
;
168 if (offset
< hole
->offset
)
171 util_vma_hole_alloc(hole
, offset
, size
);
172 util_vma_heap_validate(heap
);
176 util_vma_foreach_hole_safe_rev(hole
, heap
) {
177 if (size
> hole
->size
)
180 uint64_t offset
= hole
->offset
;
182 /* Align the offset */
183 uint64_t misalign
= offset
% alignment
;
185 uint64_t pad
= alignment
- misalign
;
186 if (pad
> hole
->size
- size
)
192 util_vma_hole_alloc(hole
, offset
, size
);
193 util_vma_heap_validate(heap
);
198 /* Failed to allocate */
203 util_vma_heap_alloc_addr(struct util_vma_heap
*heap
,
204 uint64_t offset
, uint64_t size
)
206 /* An offset of 0 is reserved for allocation failure. It is not a valid
207 * address and cannot be allocated.
211 /* Allocating something with a size of 0 is also not valid. */
214 /* It's possible for offset + size to wrap around if we touch the top of
215 * the 64-bit address space, but we cannot go any higher than 2^64.
217 assert(offset
+ size
== 0 || offset
+ size
> offset
);
219 /* Find the hole if one exists. */
220 util_vma_foreach_hole_safe(hole
, heap
) {
221 if (hole
->offset
> offset
)
224 /* Holes are ordered high-to-low so the first hole we find with
225 * hole->offset <= is our hole. If it's not big enough to contain the
226 * requested range, then the allocation fails.
228 assert(hole
->offset
<= offset
);
229 if (hole
->size
< offset
- hole
->offset
+ size
)
232 util_vma_hole_alloc(hole
, offset
, size
);
236 /* We didn't find a suitable hole */
241 util_vma_heap_free(struct util_vma_heap
*heap
,
242 uint64_t offset
, uint64_t size
)
244 /* An offset of 0 is reserved for allocation failure. It is not a valid
245 * address and cannot be freed.
249 /* Freeing something with a size of 0 is also not valid. */
252 /* It's possible for offset + size to wrap around if we touch the top of
253 * the 64-bit address space, but we cannot go any higher than 2^64.
255 assert(offset
+ size
== 0 || offset
+ size
> offset
);
257 util_vma_heap_validate(heap
);
259 /* Find immediately higher and lower holes if they exist. */
260 struct util_vma_hole
*high_hole
= NULL
, *low_hole
= NULL
;
261 util_vma_foreach_hole(hole
, heap
) {
262 if (hole
->offset
<= offset
) {
270 assert(offset
+ size
<= high_hole
->offset
);
271 bool high_adjacent
= high_hole
&& offset
+ size
== high_hole
->offset
;
274 assert(low_hole
->offset
+ low_hole
->size
> low_hole
->offset
);
275 assert(low_hole
->offset
+ low_hole
->size
<= offset
);
277 bool low_adjacent
= low_hole
&& low_hole
->offset
+ low_hole
->size
== offset
;
279 if (low_adjacent
&& high_adjacent
) {
280 /* Merge the two holes */
281 low_hole
->size
+= size
+ high_hole
->size
;
282 list_del(&high_hole
->link
);
284 } else if (low_adjacent
) {
285 /* Merge into the low hole */
286 low_hole
->size
+= size
;
287 } else if (high_adjacent
) {
288 /* Merge into the high hole */
289 high_hole
->offset
= offset
;
290 high_hole
->size
+= size
;
292 /* Neither hole is adjacent; make a new one */
293 struct util_vma_hole
*hole
= calloc(1, sizeof(*hole
));
295 hole
->offset
= offset
;
298 /* Add it after the high hole so we maintain high-to-low ordering */
300 list_add(&hole
->link
, &high_hole
->link
);
302 list_add(&hole
->link
, &heap
->holes
);
305 util_vma_heap_validate(heap
);