} else {
if (bo->ws->debug_all_bos) {
pthread_mutex_lock(&bo->ws->global_bo_list_lock);
- LIST_DEL(&bo->global_list_item);
+ list_del(&bo->global_list_item);
bo->ws->num_buffers--;
pthread_mutex_unlock(&bo->ws->global_bo_list_lock);
}
*/
if (gr->current_value <
LIST_ENTRY(struct hud_graph, next, head)->current_value) {
- LIST_DEL(&gr->head);
+ list_del(&gr->head);
list_add(&gr->head, &next->head);
}
}
LIST_FOR_EACH_ENTRY_SAFE(pane, pane_tmp, &hud->pane_list, head) {
LIST_FOR_EACH_ENTRY_SAFE(graph, graph_tmp, &pane->graph_list, head) {
- LIST_DEL(&graph->head);
+ list_del(&graph->head);
hud_graph_destroy(graph, pipe);
}
- LIST_DEL(&pane->head);
+ list_del(&pane->head);
FREE(pane);
}
assert(!fenced_buf->fence);
assert(fenced_buf->head.prev);
assert(fenced_buf->head.next);
- LIST_DEL(&fenced_buf->head);
+ list_del(&fenced_buf->head);
assert(fenced_mgr->num_unfenced);
--fenced_mgr->num_unfenced;
p_atomic_inc(&fenced_buf->base.reference.count);
- LIST_DEL(&fenced_buf->head);
+ list_del(&fenced_buf->head);
assert(fenced_mgr->num_unfenced);
--fenced_mgr->num_unfenced;
list_addtail(&fenced_buf->head, &fenced_mgr->fenced);
assert(fenced_buf->head.prev);
assert(fenced_buf->head.next);
- LIST_DEL(&fenced_buf->head);
+ list_del(&fenced_buf->head);
assert(fenced_mgr->num_fenced);
--fenced_mgr->num_fenced;
pb_debug_buffer_check(buf);
mtx_lock(&mgr->mutex);
- LIST_DEL(&buf->head);
+ list_del(&buf->head);
mtx_unlock(&mgr->mutex);
mtx_destroy(&buf->mutex);
buf->mapCount = 0;
- LIST_DEL(list);
+ list_del(list);
list_addtail(list, &slab->freeBuffers);
slab->numFree++;
assert(!pipe_is_referenced(&buf->reference));
if (entry->head.next) {
- LIST_DEL(&entry->head);
+ list_del(&entry->head);
assert(mgr->num_buffers);
--mgr->num_buffers;
mgr->cache_size -= buf->size;
struct pb_buffer *buf = entry->buffer;
mgr->cache_size -= buf->size;
- LIST_DEL(&entry->head);
+ list_del(&entry->head);
--mgr->num_buffers;
mtx_unlock(&mgr->mutex);
/* Increase refcount */
{
struct pb_slab *slab = entry->slab;
- LIST_DEL(&entry->head); /* remove from reclaim list */
+ list_del(&entry->head); /* remove from reclaim list */
list_add(&entry->head, &slab->free);
slab->num_free++;
}
if (slab->num_free >= slab->num_entries) {
- LIST_DEL(&slab->head);
+ list_del(&slab->head);
slabs->slab_free(slabs->priv, slab);
}
}
if (!LIST_IS_EMPTY(&slab->free))
break;
- LIST_DEL(&slab->head);
+ list_del(&slab->head);
}
if (LIST_IS_EMPTY(&group->slabs)) {
}
entry = LIST_ENTRY(struct pb_slab_entry, slab->free.next, head);
- LIST_DEL(&entry->head);
+ list_del(&entry->head);
slab->num_free--;
mtx_unlock(&slabs->mutex);
memset(ptr, DEBUG_FREED_BYTE, hdr->size);
#else
mtx_lock(&list_mutex);
- LIST_DEL(&hdr->head);
+ list_del(&hdr->head);
mtx_unlock(&list_mutex);
hdr->magic = 0;
ftr->magic = 0;
struct threaded_query *tq = threaded_query(payload->query);
if (tq->head_unflushed.next)
- LIST_DEL(&tq->head_unflushed);
+ list_del(&tq->head_unflushed);
pipe->destroy_query(pipe, payload->query);
}
tq->flushed = true;
if (tq->head_unflushed.next) {
/* This is safe because it can only happen after we sync'd. */
- LIST_DEL(&tq->head_unflushed);
+ list_del(&tq->head_unflushed);
}
}
return success;
{
struct threaded_query *tq, *tmp;
LIST_FOR_EACH_ENTRY_SAFE(tq, tmp, &tc->unflushed_queries, head_unflushed) {
- LIST_DEL(&tq->head_unflushed);
+ list_del(&tq->head_unflushed);
/* Memory release semantics: due to a possible race with
* tc_get_query_result, we must ensure that the linked list changes
LIST_FOR_EACH_ENTRY_SAFE(work, tmp, &fence->work, list) {
work->func(work->data);
- LIST_DEL(&work->list);
+ list_del(&work->list);
FREE(work);
}
}
}
slab = LIST_ENTRY(struct mm_slab, bucket->free.next, head);
- LIST_DEL(&slab->head);
+ list_del(&slab->head);
list_add(&slab->head, &bucket->used);
}
nouveau_bo_ref(slab->bo, bo);
if (slab->free == 0) {
- LIST_DEL(&slab->head);
+ list_del(&slab->head);
list_add(&slab->head, &bucket->full);
}
mm_slab_free(slab, alloc->offset >> slab->order);
if (slab->free == slab->count) {
- LIST_DEL(&slab->head);
+ list_del(&slab->head);
list_addtail(&slab->head, &bucket->free);
} else
if (slab->free == 1) {
- LIST_DEL(&slab->head);
+ list_del(&slab->head);
list_addtail(&slab->head, &bucket->used);
}
struct mm_slab *slab, *next;
LIST_FOR_EACH_ENTRY_SAFE(slab, next, head, head) {
- LIST_DEL(&slab->head);
+ list_del(&slab->head);
nouveau_bo_ref(NULL, &slab->bo);
FREE(slab);
}
while (ntfy[3] & 0xff000000) {
}
nouveau_heap_free(&qo->hw);
- LIST_DEL(&qo->list);
+ list_del(&qo->list);
FREE(qo);
}
}
for (i = 0; i < max_slots; ++i) {
slots[i] = result[i];
if (result[i]) {
- LIST_DEL(&result[i]->list);
+ list_del(&result[i]->list);
result[i]->last = 0;
list_addtail(&result[i]->list, &bc->cf_last->alu);
}
}
if (l1) {
- LIST_DEL(&l1->list);
+ list_del(&l1->list);
list_add(&l1->list, &enc->cpb_slots);
}
if (l0) {
- LIST_DEL(&l0->list);
+ list_del(&l0->list);
list_add(&l0->list, &enc->cpb_slots);
}
}
slot->frame_num = enc->pic.frame_num;
slot->pic_order_cnt = enc->pic.pic_order_cnt;
if (!enc->pic.not_referenced) {
- LIST_DEL(&slot->list);
+ list_del(&slot->list);
list_add(&slot->list, &enc->cpb_slots);
}
}
}
if (l1) {
- LIST_DEL(&l1->list);
+ list_del(&l1->list);
list_add(&l1->list, &enc->cpb_slots);
}
if (l0) {
- LIST_DEL(&l0->list);
+ list_del(&l0->list);
list_add(&l0->list, &enc->cpb_slots);
}
}
slot->frame_num = enc->pic.frame_num;
slot->pic_order_cnt = enc->pic.pic_order_cnt;
if (!enc->pic.not_referenced) {
- LIST_DEL(&slot->list);
+ list_del(&slot->list);
list_add(&slot->list, &enc->cpb_slots);
}
}
if (qbuf->list.prev == &sctx->shader_query_buffers)
continue; /* keep the oldest buffer for recycling */
- LIST_DEL(&qbuf->list);
+ list_del(&qbuf->list);
si_resource_reference(&qbuf->buf, NULL);
FREE(qbuf);
}
!si_rings_is_buffer_referenced(sctx, qbuf->buf->buf, RADEON_USAGE_READWRITE) &&
sctx->ws->buffer_wait(qbuf->buf->buf, 0, RADEON_USAGE_READWRITE)) {
/* Can immediately re-use the oldest buffer */
- LIST_DEL(&qbuf->list);
+ list_del(&qbuf->list);
} else {
qbuf = NULL;
}
struct gfx10_sh_query_buffer *qbuf =
list_first_entry(&sctx->shader_query_buffers,
struct gfx10_sh_query_buffer, list);
- LIST_DEL(&qbuf->list);
+ list_del(&qbuf->list);
assert(!qbuf->refcount);
si_resource_reference(&qbuf->buf, NULL);
si_pc_query_suspend(ctx, squery);
- LIST_DEL(&squery->active_list);
+ list_del(&squery->active_list);
ctx->num_cs_dw_queries_suspend -= squery->num_cs_dw_suspend;
return query->buffer.buf != NULL;
svga_screen_surface_destroy(svga_screen(sbuf->b.b.screen),
&bufsurf->key, &bufsurf->handle);
- LIST_DEL(&bufsurf->list);
+ list_del(&bufsurf->list);
FREE(bufsurf);
}
} else {
sbuf->map.num_ranges = 0;
assert(sbuf->head.prev && sbuf->head.next);
- LIST_DEL(&sbuf->head); /* remove from svga->dirty_buffers list */
+ list_del(&sbuf->head); /* remove from svga->dirty_buffers list */
#ifdef DEBUG
sbuf->head.next = sbuf->head.prev = NULL;
#endif
entry->handle = NULL;
/* Remove from hash table */
- LIST_DEL(&entry->bucket_head);
+ list_del(&entry->bucket_head);
/* remove from LRU list */
- LIST_DEL(&entry->head);
+ list_del(&entry->head);
/* Add the cache entry (but not the surface!) to the empty list */
list_add(&entry->head, &cache->empty);
assert(entry->handle);
sws->surface_reference(sws, &entry->handle, NULL);
- LIST_DEL(&entry->bucket_head);
- LIST_DEL(&entry->head);
+ list_del(&entry->bucket_head);
+ list_del(&entry->head);
list_add(&entry->head, &cache->empty);
if (cache->total_size <= target_size) {
cache->empty.next, head);
/* Remove from LRU list */
- LIST_DEL(&entry->head);
+ list_del(&entry->head);
}
else if (!LIST_IS_EMPTY(&cache->unused)) {
/* free the last used buffer and reuse its entry */
sws->surface_reference(sws, &entry->handle, NULL);
/* Remove from hash table */
- LIST_DEL(&entry->bucket_head);
+ list_del(&entry->bucket_head);
/* Remove from LRU list */
- LIST_DEL(&entry->head);
+ list_del(&entry->head);
}
if (entry) {
if (sws->surface_is_flushed(sws, entry->handle)) {
/* remove entry from the invalidated list */
- LIST_DEL(&entry->head);
+ list_del(&entry->head);
sws->fence_reference(sws, &entry->fence, fence);
if (sws->surface_is_flushed(sws, entry->handle)) {
/* remove entry from the validated list */
- LIST_DEL(&entry->head);
+ list_del(&entry->head);
/* It is now safe to invalidate the surface content.
* It will be done using the current context.
*timestamp = result->timestamp;
--priv->codec_data.h265.dpb_num;
- LIST_DEL(&result->list);
+ list_del(&result->list);
FREE(result);
return buf;
return;
task = LIST_ENTRY(struct encode_task, priv->b_frames.prev, list);
- LIST_DEL(&task->list);
+ list_del(&task->list);
/* promote last from to P frame */
priv->ref_idx_l0 = priv->ref_idx_l1;
if (stacked_num == priv->stacked_frames_num) {
struct encode_task *t;
t = LIST_ENTRY(struct encode_task, priv->stacked_tasks.next, list);
- LIST_DEL(&t->list);
+ list_del(&t->list);
list_addtail(&t->list, &inp->tasks);
}
priv->ref_idx_l1 = priv->frame_num++;
return;
task = LIST_ENTRY(struct encode_task, priv->b_frames.prev, list);
- LIST_DEL(&task->list);
+ list_del(&task->list);
/* promote last from to P frame */
priv->ref_idx_l0 = priv->ref_idx_l1;
if (stacked_num == priv->stacked_frames_num) {
struct encode_task *t;
t = LIST_ENTRY(struct encode_task, priv->stacked_tasks.next, list);
- LIST_DEL(&t->list);
+ list_del(&t->list);
list_addtail(&t->list, &inp->tasks);
}
priv->ref_idx_l1 = priv->frame_num++;
*timestamp = result->timestamp;
--priv->codec_data.h264.dpb_num;
- LIST_DEL(&result->list);
+ list_del(&result->list);
FREE(result);
return buf;
#endif
task = LIST_ENTRY(struct encode_task, inp->tasks.next, list);
- LIST_DEL(&task->list);
+ list_del(&task->list);
list_addtail(&task->list, &priv->used_tasks);
if (!task->bitstream)
if (!LIST_IS_EMPTY(&priv->free_tasks)) {
task = LIST_ENTRY(struct encode_task, priv->free_tasks.next, list);
- LIST_DEL(&task->list);
+ list_del(&task->list);
return task;
}
if (ws->debug_all_bos) {
simple_mtx_lock(&ws->global_bo_list_lock);
- LIST_DEL(&bo->u.real.global_list_item);
+ list_del(&bo->u.real.global_list_item);
ws->num_buffers--;
simple_mtx_unlock(&ws->global_bo_list_lock);
}
assert(!fenced_buf->fence);
assert(fenced_buf->head.prev);
assert(fenced_buf->head.next);
- LIST_DEL(&fenced_buf->head);
+ list_del(&fenced_buf->head);
assert(fenced_mgr->num_unfenced);
--fenced_mgr->num_unfenced;
p_atomic_inc(&fenced_buf->base.reference.count);
- LIST_DEL(&fenced_buf->head);
+ list_del(&fenced_buf->head);
assert(fenced_mgr->num_unfenced);
--fenced_mgr->num_unfenced;
list_addtail(&fenced_buf->head, &fenced_mgr->fenced);
assert(fenced_buf->head.prev);
assert(fenced_buf->head.next);
- LIST_DEL(&fenced_buf->head);
+ list_del(&fenced_buf->head);
assert(fenced_mgr->num_fenced);
--fenced_mgr->num_fenced;
virgl_resource_cache_entry_release(struct virgl_resource_cache *cache,
struct virgl_resource_cache_entry *entry)
{
- LIST_DEL(&entry->head);
+ list_del(&entry->head);
cache->entry_release_func(entry, cache->user_data);
}
}
if (compat_entry)
- LIST_DEL(&compat_entry->head);
+ list_del(&compat_entry->head);
return compat_entry;
}
LIST_FOR_EACH_ENTRY_SAFE(entry, next,
&st->zombie_sampler_views.list.node, node) {
- LIST_DEL(&entry->node); // remove this entry from the list
+ list_del(&entry->node); // remove this entry from the list
assert(entry->view->context == st->pipe);
pipe_sampler_view_reference(&entry->view, NULL);
LIST_FOR_EACH_ENTRY_SAFE(entry, next,
&st->zombie_shaders.list.node, node) {
- LIST_DEL(&entry->node); // remove this entry from the list
+ list_del(&entry->node); // remove this entry from the list
switch (entry->type) {
case PIPE_SHADER_VERTEX:
* deleted.
*/
if (!st_framebuffer_iface_lookup(smapi, stfbi)) {
- LIST_DEL(&stfb->head);
+ list_del(&stfb->head);
st_framebuffer_reference(&stfb, NULL);
}
}
assert(node->next->prev == node && node->prev->next == node);
}
-#define LIST_DEL(__item) list_del(__item)
-
#define LIST_ENTRY(__type, __item, __field) \
((__type *)(((char *)(__item)) - offsetof(__type, __field)))
mtx_lock(&exit_mutex);
LIST_FOR_EACH_ENTRY_SAFE(iter, tmp, &queue_list, head) {
if (iter == queue) {
- LIST_DEL(&iter->head);
+ list_del(&iter->head);
break;
}
}