2 * Copyright © 2019 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "sparse_array.h"
26 struct util_sparse_array_node
{
33 util_sparse_array_init(struct util_sparse_array
*arr
,
34 size_t elem_size
, size_t node_size
)
36 memset(arr
, 0, sizeof(*arr
));
37 arr
->elem_size
= elem_size
;
38 arr
->node_size_log2
= util_logbase2_64(node_size
);
39 assert(node_size
>= 2 && node_size
== (1ull << arr
->node_size_log2
));
43 _util_sparse_array_node_data(struct util_sparse_array_node
*node
)
49 _util_sparse_array_node_finish(struct util_sparse_array
*arr
,
50 struct util_sparse_array_node
*node
)
52 if (node
->level
> 0) {
53 struct util_sparse_array_node
**children
=
54 _util_sparse_array_node_data(node
);
55 size_t node_size
= 1ull << arr
->node_size_log2
;
56 for (size_t i
= 0; i
< node_size
; i
++) {
57 if (children
[i
] != NULL
)
58 _util_sparse_array_node_finish(arr
, children
[i
]);
66 util_sparse_array_finish(struct util_sparse_array
*arr
)
69 _util_sparse_array_node_finish(arr
, arr
->root
);
72 static inline struct util_sparse_array_node
*
73 _util_sparse_array_alloc_node(struct util_sparse_array
*arr
,
76 size_t size
= sizeof(struct util_sparse_array_node
);
78 size
+= arr
->elem_size
<< arr
->node_size_log2
;
80 size
+= sizeof(struct util_sparse_array_node
*) << arr
->node_size_log2
;
83 struct util_sparse_array_node
*node
= calloc(1, size
);
89 static inline struct util_sparse_array_node
*
90 _util_sparse_array_set_or_free_node(struct util_sparse_array_node
**node_ptr
,
91 struct util_sparse_array_node
*cmp_node
,
92 struct util_sparse_array_node
*node
)
94 struct util_sparse_array_node
*prev_node
=
95 p_atomic_cmpxchg(node_ptr
, cmp_node
, node
);
97 if (prev_node
!= cmp_node
) {
98 /* We lost the race. Free this one and return the one that was already
109 util_sparse_array_get(struct util_sparse_array
*arr
, uint64_t idx
)
111 const unsigned node_size_log2
= arr
->node_size_log2
;
112 struct util_sparse_array_node
*root
= p_atomic_read(&arr
->root
);
113 if (unlikely(root
== NULL
)) {
114 unsigned root_level
= 0;
115 uint64_t idx_iter
= idx
>> node_size_log2
;
117 idx_iter
>>= node_size_log2
;
120 struct util_sparse_array_node
*new_root
=
121 _util_sparse_array_alloc_node(arr
, root_level
);
122 root
= _util_sparse_array_set_or_free_node(&arr
->root
, NULL
, new_root
);
126 uint64_t root_idx
= idx
>> (root
->level
* node_size_log2
);
127 if (likely(root_idx
< (1ull << node_size_log2
)))
130 /* In this case, we have a root but its level is low enough that the
131 * requested index is out-of-bounds.
133 struct util_sparse_array_node
*new_root
=
134 _util_sparse_array_alloc_node(arr
, root
->level
+ 1);
136 struct util_sparse_array_node
**new_root_children
=
137 _util_sparse_array_node_data(new_root
);
138 new_root_children
[0] = root
;
140 /* We only add one at a time instead of the whole tree because it's
141 * easier to ensure correctness of both the tree building and the
142 * clean-up path. Because we're only adding one node we never have to
143 * worry about trying to free multiple things without freeing the old
146 root
= _util_sparse_array_set_or_free_node(&arr
->root
, root
, new_root
);
149 struct util_sparse_array_node
*node
= root
;
150 while (node
->level
> 0) {
151 uint64_t child_idx
= (idx
>> (node
->level
* node_size_log2
)) &
152 ((1ull << node_size_log2
) - 1);
154 struct util_sparse_array_node
**children
=
155 _util_sparse_array_node_data(node
);
156 struct util_sparse_array_node
*child
=
157 p_atomic_read(&children
[child_idx
]);
159 if (unlikely(child
== NULL
)) {
160 child
= _util_sparse_array_alloc_node(arr
, node
->level
- 1);
161 child
= _util_sparse_array_set_or_free_node(&children
[child_idx
],
168 uint64_t elem_idx
= idx
& ((1ull << node_size_log2
) - 1);
169 return (void *)((char *)_util_sparse_array_node_data(node
) +
170 (elem_idx
* arr
->elem_size
));
174 validate_node_level(struct util_sparse_array
*arr
,
175 struct util_sparse_array_node
*node
,
178 assert(node
->level
== level
);
180 if (node
->level
> 0) {
181 struct util_sparse_array_node
**children
=
182 _util_sparse_array_node_data(node
);
183 size_t node_size
= 1ull << arr
->node_size_log2
;
184 for (size_t i
= 0; i
< node_size
; i
++) {
185 if (children
[i
] != NULL
)
186 validate_node_level(arr
, children
[i
], level
- 1);
192 util_sparse_array_validate(struct util_sparse_array
*arr
)
194 validate_node_level(arr
, arr
->root
, arr
->root
->level
);
198 util_sparse_array_free_list_init(struct util_sparse_array_free_list
*fl
,
199 struct util_sparse_array
*arr
,
201 uint32_t next_offset
)
205 fl
->sentinel
= sentinel
;
206 fl
->next_offset
= next_offset
;
210 free_list_head(uint64_t old
, uint32_t next
)
212 return ((old
& 0xffffffff00000000ull
) + 0x100000000ull
) | next
;
216 util_sparse_array_free_list_push(struct util_sparse_array_free_list
*fl
,
217 uint32_t *items
, unsigned num_items
)
219 assert(num_items
> 0);
220 assert(items
[0] != fl
->sentinel
);
221 void *last_elem
= util_sparse_array_get(fl
->arr
, items
[0]);
222 uint32_t *last_next
= (uint32_t *)((char *)last_elem
+ fl
->next_offset
);
223 for (unsigned i
= 1; i
< num_items
; i
++) {
224 *last_next
= items
[i
];
225 assert(items
[i
] != fl
->sentinel
);
226 last_elem
= util_sparse_array_get(fl
->arr
, items
[i
]);
227 last_next
= (uint32_t *)((char *)last_elem
+ fl
->next_offset
);
230 uint64_t current_head
, old_head
;
231 old_head
= p_atomic_read(&fl
->head
);
233 current_head
= old_head
;
234 *last_next
= current_head
; /* Index is the bottom 32 bits */
235 uint64_t new_head
= free_list_head(current_head
, items
[0]);
236 old_head
= p_atomic_cmpxchg(&fl
->head
, current_head
, new_head
);
237 } while (old_head
!= current_head
);
241 util_sparse_array_free_list_pop_idx(struct util_sparse_array_free_list
*fl
)
243 uint64_t current_head
;
245 current_head
= p_atomic_read(&fl
->head
);
247 if ((uint32_t)current_head
== fl
->sentinel
)
250 uint32_t head_idx
= current_head
; /* Index is the bottom 32 bits */
251 void *head_elem
= util_sparse_array_get(fl
->arr
, head_idx
);
252 uint32_t *head_next
= (uint32_t *)((char *)head_elem
+ fl
->next_offset
);
253 uint32_t new_head
= free_list_head(current_head
, *head_next
);
254 uint64_t old_head
= p_atomic_cmpxchg(&fl
->head
, current_head
, new_head
);
255 if (old_head
== current_head
)
257 current_head
= old_head
;
262 util_sparse_array_free_list_pop_elem(struct util_sparse_array_free_list
*fl
)
264 uint64_t current_head
;
266 current_head
= p_atomic_read(&fl
->head
);
268 if ((uint32_t)current_head
== fl
->sentinel
)
271 uint32_t head_idx
= current_head
; /* Index is the bottom 32 bits */
272 void *head_elem
= util_sparse_array_get(fl
->arr
, head_idx
);
273 uint32_t *head_next
= (uint32_t *)((char *)head_elem
+ fl
->next_offset
);
274 uint32_t new_head
= free_list_head(current_head
, *head_next
);
275 uint64_t old_head
= p_atomic_cmpxchg(&fl
->head
, current_head
, new_head
);
276 if (old_head
== current_head
)
278 current_head
= old_head
;