vulkan: add vk_x11_strict_image_count option
[mesa.git] / src / util / vma.c
1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <stdlib.h>
25
26 #include "util/u_math.h"
27 #include "util/vma.h"
28
29 struct util_vma_hole {
30 struct list_head link;
31 uint64_t offset;
32 uint64_t size;
33 };
34
35 #define util_vma_foreach_hole(_hole, _heap) \
36 list_for_each_entry(struct util_vma_hole, _hole, &(_heap)->holes, link)
37
38 #define util_vma_foreach_hole_safe(_hole, _heap) \
39 list_for_each_entry_safe(struct util_vma_hole, _hole, &(_heap)->holes, link)
40
41 void
42 util_vma_heap_init(struct util_vma_heap *heap,
43 uint64_t start, uint64_t size)
44 {
45 list_inithead(&heap->holes);
46 util_vma_heap_free(heap, start, size);
47 }
48
49 void
50 util_vma_heap_finish(struct util_vma_heap *heap)
51 {
52 util_vma_foreach_hole_safe(hole, heap)
53 free(hole);
54 }
55
56 #ifndef NDEBUG
57 static void
58 util_vma_heap_validate(struct util_vma_heap *heap)
59 {
60 uint64_t prev_offset = 0;
61 util_vma_foreach_hole(hole, heap) {
62 assert(hole->offset > 0);
63 assert(hole->size > 0);
64
65 if (&hole->link == heap->holes.next) {
66 /* This must be the top-most hole. Assert that, if it overflows, it
67 * overflows to 0, i.e. 2^64.
68 */
69 assert(hole->size + hole->offset == 0 ||
70 hole->size + hole->offset > hole->offset);
71 } else {
72 /* This is not the top-most hole so it must not overflow and, in
73 * fact, must be strictly lower than the top-most hole. If
74 * hole->size + hole->offset == prev_offset, then we failed to join
75 * holes during a util_vma_heap_free.
76 */
77 assert(hole->size + hole->offset > hole->offset &&
78 hole->size + hole->offset < prev_offset);
79 }
80 prev_offset = hole->offset;
81 }
82 }
83 #else
84 #define util_vma_heap_validate(heap)
85 #endif
86
87 uint64_t
88 util_vma_heap_alloc(struct util_vma_heap *heap,
89 uint64_t size, uint64_t alignment)
90 {
91 /* The caller is expected to reject zero-size allocations */
92 assert(size > 0);
93 assert(alignment > 0);
94
95 util_vma_heap_validate(heap);
96
97 util_vma_foreach_hole_safe(hole, heap) {
98 if (size > hole->size)
99 continue;
100
101 /* Compute the offset as the highest address where a chunk of the given
102 * size can be without going over the top of the hole.
103 *
104 * This calculation is known to not overflow because we know that
105 * hole->size + hole->offset can only overflow to 0 and size > 0.
106 */
107 uint64_t offset = (hole->size - size) + hole->offset;
108
109 /* Align the offset. We align down and not up because we are allocating
110 * from the top of the hole and not the bottom.
111 */
112 offset = (offset / alignment) * alignment;
113
114 if (offset < hole->offset)
115 continue;
116
117 if (offset == hole->offset && size == hole->size) {
118 /* Just get rid of the hole. */
119 list_del(&hole->link);
120 free(hole);
121 util_vma_heap_validate(heap);
122 return offset;
123 }
124
125 assert(offset - hole->offset <= hole->size - size);
126 uint64_t waste = (hole->size - size) - (offset - hole->offset);
127 if (waste == 0) {
128 /* We allocated at the top. Shrink the hole down. */
129 hole->size -= size;
130 util_vma_heap_validate(heap);
131 return offset;
132 }
133
134 if (offset == hole->offset) {
135 /* We allocated at the bottom. Shrink the hole up. */
136 hole->offset += size;
137 hole->size -= size;
138 util_vma_heap_validate(heap);
139 return offset;
140 }
141
142 /* We allocated in the middle. We need to split the old hole into two
143 * holes, one high and one low.
144 */
145 struct util_vma_hole *high_hole = calloc(1, sizeof(*hole));
146 high_hole->offset = offset + size;
147 high_hole->size = waste;
148
149 /* Adjust the hole to be the amount of space left at he bottom of the
150 * original hole.
151 */
152 hole->size = offset - hole->offset;
153
154 /* Place the new hole before the old hole so that the list is in order
155 * from high to low.
156 */
157 list_addtail(&high_hole->link, &hole->link);
158
159 util_vma_heap_validate(heap);
160
161 return offset;
162 }
163
164 /* Failed to allocate */
165 return 0;
166 }
167
168 void
169 util_vma_heap_free(struct util_vma_heap *heap,
170 uint64_t offset, uint64_t size)
171 {
172 /* An offset of 0 is reserved for allocation failure. It is not a valid
173 * address and cannot be freed.
174 */
175 assert(offset > 0);
176
177 /* Freeing something with a size of 0 is also not valid. */
178 assert(size > 0);
179
180 /* It's possible for offset + size to wrap around if we touch the top of
181 * the 64-bit address space, but we cannot go any higher than 2^64.
182 */
183 assert(offset + size == 0 || offset + size > offset);
184
185 util_vma_heap_validate(heap);
186
187 /* Find immediately higher and lower holes if they exist. */
188 struct util_vma_hole *high_hole = NULL, *low_hole = NULL;
189 util_vma_foreach_hole(hole, heap) {
190 if (hole->offset <= offset) {
191 low_hole = hole;
192 break;
193 }
194 high_hole = hole;
195 }
196
197 if (high_hole)
198 assert(offset + size <= high_hole->offset);
199 bool high_adjacent = high_hole && offset + size == high_hole->offset;
200
201 if (low_hole) {
202 assert(low_hole->offset + low_hole->size > low_hole->offset);
203 assert(low_hole->offset + low_hole->size <= offset);
204 }
205 bool low_adjacent = low_hole && low_hole->offset + low_hole->size == offset;
206
207 if (low_adjacent && high_adjacent) {
208 /* Merge the two holes */
209 low_hole->size += size + high_hole->size;
210 list_del(&high_hole->link);
211 free(high_hole);
212 } else if (low_adjacent) {
213 /* Merge into the low hole */
214 low_hole->size += size;
215 } else if (high_adjacent) {
216 /* Merge into the high hole */
217 high_hole->offset = offset;
218 high_hole->size += size;
219 } else {
220 /* Neither hole is adjacent; make a new one */
221 struct util_vma_hole *hole = calloc(1, sizeof(*hole));
222
223 hole->offset = offset;
224 hole->size = size;
225
226 /* Add it after the high hole so we maintain high-to-low ordering */
227 if (high_hole)
228 list_add(&hole->link, &high_hole->link);
229 else
230 list_add(&hole->link, &heap->holes);
231 }
232
233 util_vma_heap_validate(heap);
234 }