* There's a chance that racing threads will end up allocating multiple
* slabs for the same group, but that doesn't hurt correctness.
*/
- pipe_mutex_unlock(slabs->mutex);
+ mtx_unlock(&slabs->mutex);
slab = slabs->slab_alloc(slabs->priv, heap, 1 << order, group_index);
if (!slab)
return NULL;
LIST_DEL(&entry->head);
slab->num_free--;
- pipe_mutex_unlock(slabs->mutex);
+ mtx_unlock(&slabs->mutex);
return entry;
}
{
mtx_lock(&slabs->mutex);
LIST_ADDTAIL(&entry->head, &slabs->reclaim);
- pipe_mutex_unlock(slabs->mutex);
+ mtx_unlock(&slabs->mutex);
}
/* Check if any of the entries handed to pb_slab_free are ready to be re-used.
{
mtx_lock(&slabs->mutex);
pb_slabs_reclaim_locked(slabs);
- pipe_mutex_unlock(slabs->mutex);
+ mtx_unlock(&slabs->mutex);
}
/* Initialize the slabs manager.