{
struct pb_slab *slab = entry->slab;
- LIST_DEL(&entry->head); /* remove from reclaim list */
- LIST_ADD(&entry->head, &slab->free);
+ list_del(&entry->head); /* remove from reclaim list */
+ list_add(&entry->head, &slab->free);
slab->num_free++;
/* Add slab to the group's list if it isn't already linked. */
if (!slab->head.next) {
struct pb_slab_group *group = &slabs->groups[entry->group_index];
- LIST_ADDTAIL(&slab->head, &group->slabs);
+ list_addtail(&slab->head, &group->slabs);
}
if (slab->num_free >= slab->num_entries) {
- LIST_DEL(&slab->head);
+ list_del(&slab->head);
slabs->slab_free(slabs->priv, slab);
}
}
static void
pb_slabs_reclaim_locked(struct pb_slabs *slabs)
{
- while (!LIST_IS_EMPTY(&slabs->reclaim)) {
+ while (!list_is_empty(&slabs->reclaim)) {
struct pb_slab_entry *entry =
LIST_ENTRY(struct pb_slab_entry, slabs->reclaim.next, head);
group_index = heap * slabs->num_orders + (order - slabs->min_order);
group = &slabs->groups[group_index];
- pipe_mutex_lock(slabs->mutex);
+ mtx_lock(&slabs->mutex);
/* If there is no candidate slab at all, or the first slab has no free
* entries, try reclaiming entries.
*/
- if (LIST_IS_EMPTY(&group->slabs) ||
- LIST_IS_EMPTY(&LIST_ENTRY(struct pb_slab, group->slabs.next, head)->free))
+ if (list_is_empty(&group->slabs) ||
+ list_is_empty(&LIST_ENTRY(struct pb_slab, group->slabs.next, head)->free))
pb_slabs_reclaim_locked(slabs);
/* Remove slabs without free entries. */
- while (!LIST_IS_EMPTY(&group->slabs)) {
+ while (!list_is_empty(&group->slabs)) {
slab = LIST_ENTRY(struct pb_slab, group->slabs.next, head);
- if (!LIST_IS_EMPTY(&slab->free))
+ if (!list_is_empty(&slab->free))
break;
- LIST_DEL(&slab->head);
+ list_del(&slab->head);
}
- if (LIST_IS_EMPTY(&group->slabs)) {
+ if (list_is_empty(&group->slabs)) {
/* Drop the mutex temporarily to prevent a deadlock where the allocation
* calls back into slab functions (most likely to happen for
* pb_slab_reclaim if memory is low).
* There's a chance that racing threads will end up allocating multiple
* slabs for the same group, but that doesn't hurt correctness.
*/
- pipe_mutex_unlock(slabs->mutex);
+ mtx_unlock(&slabs->mutex);
slab = slabs->slab_alloc(slabs->priv, heap, 1 << order, group_index);
if (!slab)
return NULL;
- pipe_mutex_lock(slabs->mutex);
+ mtx_lock(&slabs->mutex);
- LIST_ADD(&slab->head, &group->slabs);
+ list_add(&slab->head, &group->slabs);
}
entry = LIST_ENTRY(struct pb_slab_entry, slab->free.next, head);
- LIST_DEL(&entry->head);
+ list_del(&entry->head);
slab->num_free--;
- pipe_mutex_unlock(slabs->mutex);
+ mtx_unlock(&slabs->mutex);
return entry;
}
void
pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry)
{
- pipe_mutex_lock(slabs->mutex);
- LIST_ADDTAIL(&entry->head, &slabs->reclaim);
- pipe_mutex_unlock(slabs->mutex);
+ mtx_lock(&slabs->mutex);
+ list_addtail(&entry->head, &slabs->reclaim);
+ mtx_unlock(&slabs->mutex);
}
/* Check if any of the entries handed to pb_slab_free are ready to be re-used.
void
pb_slabs_reclaim(struct pb_slabs *slabs)
{
- pipe_mutex_lock(slabs->mutex);
+ mtx_lock(&slabs->mutex);
pb_slabs_reclaim_locked(slabs);
- pipe_mutex_unlock(slabs->mutex);
+ mtx_unlock(&slabs->mutex);
}
/* Initialize the slabs manager.
slabs->slab_alloc = slab_alloc;
slabs->slab_free = slab_free;
- LIST_INITHEAD(&slabs->reclaim);
+ list_inithead(&slabs->reclaim);
num_groups = slabs->num_orders * slabs->num_heaps;
slabs->groups = CALLOC(num_groups, sizeof(*slabs->groups));
for (i = 0; i < num_groups; ++i) {
struct pb_slab_group *group = &slabs->groups[i];
- LIST_INITHEAD(&group->slabs);
+ list_inithead(&group->slabs);
}
- pipe_mutex_init(slabs->mutex);
+ (void) mtx_init(&slabs->mutex, mtx_plain);
return true;
}
/* Reclaim all slab entries (even those that are still in flight). This
* implicitly calls slab_free for everything.
*/
- while (!LIST_IS_EMPTY(&slabs->reclaim)) {
+ while (!list_is_empty(&slabs->reclaim)) {
struct pb_slab_entry *entry =
LIST_ENTRY(struct pb_slab_entry, slabs->reclaim.next, head);
pb_slab_reclaim(slabs, entry);
}
FREE(slabs->groups);
- pipe_mutex_destroy(slabs->mutex);
+ mtx_destroy(&slabs->mutex);
}