2 * Copyright © 2018 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 /* it is a test after all */
43 static const uint64_t MEM_PAGE_SIZE
= 4096;
50 struct allocation_less
{
51 bool operator()(const allocation
& lhs
, const allocation
& rhs
) const
53 assert(lhs
.start_page
+ lhs
.num_pages
> lhs
.start_page
);
54 return lhs
.start_page
+ lhs
.num_pages
<= rhs
.start_page
;
58 constexpr uint64_t allocation_end_page(const allocation
& a
) {
59 return a
.start_page
+ a
.num_pages
;
63 static const uint64_t MEM_START_PAGE
= 1;
64 static const uint64_t MEM_SIZE
= 0xfffffffffffff000;
65 static const uint64_t MEM_PAGES
= MEM_SIZE
/ MEM_PAGE_SIZE
;
67 random_test(uint_fast32_t seed
)
68 : heap_holes
{allocation
{MEM_START_PAGE
, MEM_PAGES
}}, rand
{seed
}
70 util_vma_heap_init(&heap
, MEM_START_PAGE
* MEM_PAGE_SIZE
, MEM_SIZE
);
73 void test(unsigned long count
)
75 std::uniform_int_distribution
<> one_to_thousand(1, 1000);
77 int action
= one_to_thousand(rand
);
78 if (action
== 1) fill();
79 else if (action
== 2) empty();
80 else if (action
< 374) dealloc();
85 bool alloc(uint64_t size_order
=52, uint64_t align_order
=52)
87 std::geometric_distribution
<> dist
;
90 align_order
= std::min(dist(rand
), 51);
91 uint64_t align_pages
= 1ULL << align_order
;
92 uint64_t align
= align_pages
* MEM_PAGE_SIZE
;
95 size_order
= std::min(dist(rand
), 51);
96 uint64_t size_pages
= 1ULL << size_order
;
97 uint64_t size
= size_pages
* MEM_PAGE_SIZE
;
99 uint64_t addr
= util_vma_heap_alloc(&heap
, size
, align
);
102 /* assert no gaps are present in the tracker that could satisfy this
105 for (const auto& hole
: heap_holes
) {
106 uint64_t hole_alignment_pages
=
107 (align_pages
- (hole
.start_page
% align_pages
)) % align_pages
;
108 assert(hole
.num_pages
< size_pages
+ hole_alignment_pages
);
112 assert(addr
% align
== 0);
113 uint64_t addr_page
= addr
/ MEM_PAGE_SIZE
;
114 allocation a
{addr_page
, size_pages
};
115 auto i
= heap_holes
.find(a
);
116 assert(i
!= end(heap_holes
));
117 allocation hole
= *i
;
119 assert(hole
.start_page
<= addr_page
);
120 assert(hole
.num_pages
>= size_pages
+ addr_page
- hole
.start_page
);
123 if (hole
.start_page
< a
.start_page
) {
124 heap_holes
.emplace(allocation
{hole
.start_page
,
125 a
.start_page
- hole
.start_page
});
127 if (allocation_end_page(hole
) > allocation_end_page(a
)) {
128 heap_holes
.emplace(allocation
{allocation_end_page(a
),
129 allocation_end_page(hole
) - allocation_end_page(a
)});
132 allocations
.push_back(a
);
139 if (allocations
.size() == 0)
142 std::uniform_int_distribution
<> dist(0, allocations
.size() - 1);
143 int to_dealloc
= dist(rand
);
145 std::swap(allocations
.at(to_dealloc
), allocations
.back());
146 allocation a
= allocations
.back();
147 allocations
.pop_back();
149 util_vma_heap_free(&heap
, a
.start_page
* MEM_PAGE_SIZE
,
150 a
.num_pages
* MEM_PAGE_SIZE
);
152 assert(heap_holes
.find(a
) == end(heap_holes
));
153 auto next
= heap_holes
.upper_bound(a
);
154 if (next
!= end(heap_holes
)) {
155 if (next
->start_page
== allocation_end_page(a
)) {
156 allocation x
{a
.start_page
, a
.num_pages
+ next
->num_pages
};
157 next
= heap_holes
.erase(next
);
158 next
= heap_holes
.insert(next
, x
);
160 if (next
!= begin(heap_holes
)) {
163 if (allocation_end_page(*prev
) == next
->start_page
) {
164 allocation x
{prev
->start_page
,
165 prev
->num_pages
+ next
->num_pages
};
167 heap_holes
.erase(prev
);
168 next
= heap_holes
.erase(next
);
169 heap_holes
.insert(next
, x
);
177 if (next
!= begin(heap_holes
)) {
180 if (allocation_end_page(*prev
) == a
.start_page
) {
181 allocation x
{prev
->start_page
, prev
->num_pages
+ a
.num_pages
};
182 next
= heap_holes
.erase(prev
);
183 heap_holes
.insert(next
, x
);
189 heap_holes
.emplace(a
);
194 for (int sz
= 51; sz
>= 0; sz
--) {
198 assert(heap_holes
.empty());
203 while (allocations
.size() != 0)
205 assert(heap_holes
.size() == 1);
206 auto& hole
= *begin(heap_holes
);
207 assert(hole
.start_page
== MEM_START_PAGE
&& hole
.num_pages
== MEM_PAGES
);
210 struct util_vma_heap heap
;
211 std::set
<allocation
, allocation_less
> heap_holes
;
212 std::default_random_engine rand
;
213 std::vector
<allocation
> allocations
;
218 int main(int argc
, char **argv
)
220 unsigned long seed
, count
;
222 char *arg_end
= NULL
;
223 seed
= strtoul(argv
[1], &arg_end
, 0);
224 if (!arg_end
|| *arg_end
|| seed
== ULONG_MAX
)
225 errx(1, "invalid seed \"%s\"", argv
[1]);
228 count
= strtoul(argv
[2], &arg_end
, 0);
229 if (!arg_end
|| *arg_end
|| count
== ULONG_MAX
)
230 errx(1, "invalid count \"%s\"", argv
[2]);
231 } else if (argc
== 1) {
232 /* importantly chosen prime numbers. */
236 errx(1, "USAGE: %s seed iter_count\n", argv
[0]);
239 random_test r
{(uint_fast32_t)seed
};