if (gr->current_value <
LIST_ENTRY(struct hud_graph, next, head)->current_value) {
LIST_DEL(&gr->head);
- LIST_ADD(&gr->head, &next->head);
+ list_add(&gr->head, &next->head);
}
}
}
struct pb_slab *slab = entry->slab;
LIST_DEL(&entry->head); /* remove from reclaim list */
- LIST_ADD(&entry->head, &slab->free);
+ list_add(&entry->head, &slab->free);
slab->num_free++;
/* Add slab to the group's list if it isn't already linked. */
return NULL;
mtx_lock(&slabs->mutex);
- LIST_ADD(&slab->head, &group->slabs);
+ list_add(&slab->head, &group->slabs);
}
entry = LIST_ENTRY(struct pb_slab_entry, slab->free.next, head);
struct threaded_query *tq = threaded_query(p->query);
if (!tq->head_unflushed.next)
- LIST_ADD(&tq->head_unflushed, &p->tc->unflushed_queries);
+ list_add(&tq->head_unflushed, &p->tc->unflushed_queries);
pipe->end_query(pipe, p->query);
}
return false;
work->func = func;
work->data = data;
- LIST_ADD(&work->list, &fence->work);
+ list_add(&work->list, &fence->work);
p_atomic_inc(&fence->work_count);
if (fence->work_count > 64)
nouveau_fence_kick(fence);
slab->order = chunk_order;
slab->count = slab->free = size >> chunk_order;
- LIST_ADD(&slab->head, &mm_bucket_by_order(cache, chunk_order)->free);
+ list_add(&slab->head, &mm_bucket_by_order(cache, chunk_order)->free);
cache->allocated += size;
slab = LIST_ENTRY(struct mm_slab, bucket->free.next, head);
LIST_DEL(&slab->head);
- LIST_ADD(&slab->head, &bucket->used);
+ list_add(&slab->head, &bucket->used);
}
*offset = mm_slab_alloc(slab) << slab->order;
if (slab->free == 0) {
LIST_DEL(&slab->head);
- LIST_ADD(&slab->head, &bucket->full);
+ list_add(&slab->head, &bucket->full);
}
alloc->next = NULL;
if (l1) {
LIST_DEL(&l1->list);
- LIST_ADD(&l1->list, &enc->cpb_slots);
+ list_add(&l1->list, &enc->cpb_slots);
}
if (l0) {
LIST_DEL(&l0->list);
- LIST_ADD(&l0->list, &enc->cpb_slots);
+ list_add(&l0->list, &enc->cpb_slots);
}
}
slot->pic_order_cnt = enc->pic.pic_order_cnt;
if (!enc->pic.not_referenced) {
LIST_DEL(&slot->list);
- LIST_ADD(&slot->list, &enc->cpb_slots);
+ list_add(&slot->list, &enc->cpb_slots);
}
}
if (l1) {
LIST_DEL(&l1->list);
- LIST_ADD(&l1->list, &enc->cpb_slots);
+ list_add(&l1->list, &enc->cpb_slots);
}
if (l0) {
LIST_DEL(&l0->list);
- LIST_ADD(&l0->list, &enc->cpb_slots);
+ list_add(&l0->list, &enc->cpb_slots);
}
}
slot->pic_order_cnt = enc->pic.pic_order_cnt;
if (!enc->pic.not_referenced) {
LIST_DEL(&slot->list);
- LIST_ADD(&slot->list, &enc->cpb_slots);
+ list_add(&slot->list, &enc->cpb_slots);
}
}
bufsurf->key = *key;
/* add the surface to the surface list */
- LIST_ADD(&bufsurf->list, &sbuf->surfaces);
+ list_add(&bufsurf->list, &sbuf->surfaces);
/* Set the new bind flags for this buffer resource */
sbuf->bind_flags = bind_flags;
LIST_DEL(&entry->head);
/* Add the cache entry (but not the surface!) to the empty list */
- LIST_ADD(&entry->head, &cache->empty);
+ list_add(&entry->head, &cache->empty);
/* update the cache size */
surf_size = surface_size(&entry->key);
LIST_DEL(&entry->bucket_head);
LIST_DEL(&entry->head);
- LIST_ADD(&entry->head, &cache->empty);
+ list_add(&entry->head, &cache->empty);
if (cache->total_size <= target_size) {
/* all done */
/* If we don't have gb objects, we don't need to invalidate. */
if (sws->have_gb_objects)
- LIST_ADD(&entry->head, &cache->validated);
+ list_add(&entry->head, &cache->validated);
else
- LIST_ADD(&entry->head, &cache->invalidated);
+ list_add(&entry->head, &cache->invalidated);
cache->total_size += surf_size;
}
sws->fence_reference(sws, &entry->fence, fence);
/* Add entry to the unused list */
- LIST_ADD(&entry->head, &cache->unused);
+ list_add(&entry->head, &cache->unused);
/* Add entry to the hash table bucket */
bucket = svga_screen_cache_bucket(&entry->key);
- LIST_ADD(&entry->bucket_head, &cache->bucket[bucket]);
+ list_add(&entry->bucket_head, &cache->bucket[bucket]);
}
curr = next;
}
/* add the entry to the invalidated list */
- LIST_ADD(&entry->head, &cache->invalidated);
+ list_add(&entry->head, &cache->invalidated);
}
curr = next;
}
/* add to the context's winsys buffers list */
- LIST_ADD(&cur->head, &st->winsys_buffers);
+ list_add(&cur->head, &st->winsys_buffers);
st_framebuffer_reference(&stfb, cur);
}
assert(node->next->prev == node && node->prev->next == node);
}
-#define LIST_ADD(__item, __list) list_add(__item, __list)
#define LIST_REPLACE(__from, __to) list_replace(__from, __to)
#define LIST_DEL(__item) list_del(__item)
#define LIST_DELINIT(__item) list_delinit(__item)
call_once(&atexit_once_flag, global_init);
mtx_lock(&exit_mutex);
- LIST_ADD(&queue->head, &queue_list);
+ list_add(&queue->head, &queue_list);
mtx_unlock(&exit_mutex);
}