if (bo->ws->debug_all_bos) {
pthread_mutex_lock(&ws->global_bo_list_lock);
- LIST_ADDTAIL(&bo->global_list_item, &ws->global_bo_list);
+ list_addtail(&bo->global_list_item, &ws->global_bo_list);
ws->num_buffers++;
pthread_mutex_unlock(&ws->global_bo_list_lock);
}
gr->color[1] = colors[color][1];
gr->color[2] = colors[color][2];
gr->pane = pane;
- LIST_ADDTAIL(&gr->head, &pane->graph_list);
+ list_addtail(&gr->head, &pane->graph_list);
pane->num_graphs++;
pane->next_color++;
}
height = 100;
if (pane && pane->num_graphs) {
- LIST_ADDTAIL(&pane->head, &hud->pane_list);
+ list_addtail(&pane->head, &hud->pane_list);
pane = NULL;
}
break;
height = 100;
if (pane && pane->num_graphs) {
- LIST_ADDTAIL(&pane->head, &hud->pane_list);
+ list_addtail(&pane->head, &hud->pane_list);
pane = NULL;
}
if (pane) {
if (pane->num_graphs) {
- LIST_ADDTAIL(&pane->head, &hud->pane_list);
+ list_addtail(&pane->head, &hud->pane_list);
}
else {
FREE(pane);
LIST_DEL(&fenced_buf->head);
assert(fenced_mgr->num_unfenced);
--fenced_mgr->num_unfenced;
- LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->fenced);
+ list_addtail(&fenced_buf->head, &fenced_mgr->fenced);
++fenced_mgr->num_fenced;
}
assert(fenced_mgr->num_fenced);
--fenced_mgr->num_fenced;
- LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
+ list_addtail(&fenced_buf->head, &fenced_mgr->unfenced);
++fenced_mgr->num_unfenced;
if (p_atomic_dec_zero(&fenced_buf->base.reference.count)) {
assert(fenced_buf->buffer || fenced_buf->data);
- LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
+ list_addtail(&fenced_buf->head, &fenced_mgr->unfenced);
++fenced_mgr->num_unfenced;
mtx_unlock(&fenced_mgr->mutex);
(void) mtx_init(&buf->mutex, mtx_plain);
mtx_lock(&mgr->mutex);
- LIST_ADDTAIL(&buf->head, &mgr->list);
+ list_addtail(&buf->head, &mgr->list);
mtx_unlock(&mgr->mutex);
return &buf->base;
buf->mapCount = 0;
LIST_DEL(list);
- LIST_ADDTAIL(list, &slab->freeBuffers);
+ list_addtail(list, &slab->freeBuffers);
slab->numFree++;
if (slab->head.next == &slab->head)
- LIST_ADDTAIL(&slab->head, &mgr->slabs);
+ list_addtail(&slab->head, &mgr->slabs);
/* If the slab becomes totally empty, free it */
if (slab->numFree == slab->numBuffers) {
buf->start = i* mgr->bufSize;
buf->mapCount = 0;
cnd_init(&buf->event);
- LIST_ADDTAIL(&buf->head, &slab->freeBuffers);
+ list_addtail(&buf->head, &slab->freeBuffers);
slab->numFree++;
buf++;
}
/* Add this slab to the list of partial slabs */
- LIST_ADDTAIL(&slab->head, &mgr->slabs);
+ list_addtail(&slab->head, &mgr->slabs);
return PIPE_OK;
entry->start = os_time_get();
entry->end = entry->start + mgr->usecs;
- LIST_ADDTAIL(&entry->head, cache);
+ list_addtail(&entry->head, cache);
++mgr->num_buffers;
mgr->cache_size += buf->size;
mtx_unlock(&mgr->mutex);
/* Add slab to the group's list if it isn't already linked. */
if (!slab->head.next) {
struct pb_slab_group *group = &slabs->groups[entry->group_index];
- LIST_ADDTAIL(&slab->head, &group->slabs);
+ list_addtail(&slab->head, &group->slabs);
}
if (slab->num_free >= slab->num_entries) {
pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry)
{
mtx_lock(&slabs->mutex);
- LIST_ADDTAIL(&entry->head, &slabs->reclaim);
+ list_addtail(&entry->head, &slabs->reclaim);
mtx_unlock(&slabs->mutex);
}
ftr->magic = DEBUG_MEMORY_MAGIC;
mtx_lock(&list_mutex);
- LIST_ADDTAIL(&hdr->head, &list);
+ list_addtail(&hdr->head, &list);
mtx_unlock(&list_mutex);
return data_from_header(hdr);
util_dirty_surface_set_dirty(struct util_dirty_surfaces *dss, struct util_dirty_surface *ds)
{
if(LIST_IS_EMPTY(&ds->dirty_list))
- LIST_ADDTAIL(&ds->dirty_list, &dss->dirty_list);
+ list_addtail(&ds->dirty_list, &dss->dirty_list);
}
static inline void
if (slab->free == slab->count) {
LIST_DEL(&slab->head);
- LIST_ADDTAIL(&slab->head, &bucket->free);
+ list_addtail(&slab->head, &bucket->free);
} else
if (slab->free == 1) {
LIST_DEL(&slab->head);
- LIST_ADDTAIL(&slab->head, &bucket->used);
+ list_addtail(&slab->head, &bucket->used);
}
FREE(alloc);
nv30_query_object_del(screen, &oq);
}
- LIST_ADDTAIL(&qo->list, &screen->queries);
+ list_addtail(&qo->list, &screen->queries);
ntfy = nv30_ntfy(screen, qo);
ntfy[0] = 0x00000000;
view->tex_resource = &tmp->resource;
if (tmp->resource.gpu_address)
- LIST_ADDTAIL(&view->list, &rctx->texture_buffers);
+ list_addtail(&view->list, &rctx->texture_buffers);
return &view->base;
}
if (!cf)
return -ENOMEM;
- LIST_ADDTAIL(&cf->list, &bc->cf);
+ list_addtail(&cf->list, &bc->cf);
if (bc->cf_last) {
cf->id = bc->cf_last->id + 2;
if (bc->cf_last->eg_alu_extended) {
if (result[i]) {
LIST_DEL(&result[i]->list);
result[i]->last = 0;
- LIST_ADDTAIL(&result[i]->list, &bc->cf_last->alu);
+ list_addtail(&result[i]->list, &bc->cf_last->alu);
}
}
if (nalu->dst.sel >= bc->ngpr) {
bc->ngpr = nalu->dst.sel + 1;
}
- LIST_ADDTAIL(&nalu->list, &bc->cf_last->alu);
+ list_addtail(&nalu->list, &bc->cf_last->alu);
/* each alu use 2 dwords */
bc->cf_last->ndw += 2;
bc->ndw += 2;
return -EINVAL;
}
}
- LIST_ADDTAIL(&nvtx->list, &bc->cf_last->vtx);
+ list_addtail(&nvtx->list, &bc->cf_last->vtx);
/* each fetch use 4 dwords */
bc->cf_last->ndw += 4;
bc->ndw += 4;
if (ntex->dst_gpr >= bc->ngpr) {
bc->ngpr = ntex->dst_gpr + 1;
}
- LIST_ADDTAIL(&ntex->list, &bc->cf_last->tex);
+ list_addtail(&ntex->list, &bc->cf_last->tex);
/* each texture fetch use 4 dwords */
bc->cf_last->ndw += 4;
bc->ndw += 4;
bc->cf_last->op = CF_OP_GDS;
}
- LIST_ADDTAIL(&ngds->list, &bc->cf_last->gds);
+ list_addtail(&ngds->list, &bc->cf_last->gds);
bc->cf_last->ndw += 4; /* each GDS uses 4 dwords */
if ((bc->cf_last->ndw / 4) >= r600_bytecode_num_tex_and_vtx_instructions(bc))
bc->force_add_cf = 1;
if (!query->buffer.buf)
return false;
- LIST_ADDTAIL(&query->list, &rctx->active_queries);
+ list_addtail(&query->list, &rctx->active_queries);
return true;
}
slot->picture_type = PIPE_H264_ENC_PICTURE_TYPE_SKIP;
slot->frame_num = 0;
slot->pic_order_cnt = 0;
- LIST_ADDTAIL(&slot->list, &enc->cpb_slots);
+ list_addtail(&slot->list, &enc->cpb_slots);
}
}
slot->picture_type = PIPE_H264_ENC_PICTURE_TYPE_SKIP;
slot->frame_num = 0;
slot->pic_order_cnt = 0;
- LIST_ADDTAIL(&slot->list, &enc->cpb_slots);
+ list_addtail(&slot->list, &enc->cpb_slots);
}
}
results[32 * i + 16] = 0;
}
- LIST_ADDTAIL(&qbuf->list, &sctx->shader_query_buffers);
+ list_addtail(&qbuf->list, &sctx->shader_query_buffers);
qbuf->head = 0;
qbuf->refcount = sctx->num_active_shader_queries;
si_query_buffer_reset(ctx, &query->buffer);
- LIST_ADDTAIL(&query->b.active_list, &ctx->active_queries);
+ list_addtail(&query->b.active_list, &ctx->active_queries);
ctx->num_cs_dw_queries_suspend += query->b.num_cs_dw_suspend;
si_pc_query_resume(ctx, squery);
if (!query->buffer.buf)
return false;
- LIST_ADDTAIL(&query->b.active_list, &sctx->active_queries);
+ list_addtail(&query->b.active_list, &sctx->active_queries);
sctx->num_cs_dw_queries_suspend += query->b.num_cs_dw_suspend;
return true;
}
if (ret == PIPE_OK) {
sbuf->dma.pending = TRUE;
assert(!sbuf->head.prev && !sbuf->head.next);
- LIST_ADDTAIL(&sbuf->head, &svga->dirty_buffers);
+ list_addtail(&sbuf->head, &svga->dirty_buffers);
}
}
else if (ret == PIPE_ERROR_OUT_OF_MEMORY) {
list_inithead(&cache->empty);
for (i = 0; i < SVGA_HOST_SURFACE_CACHE_SIZE; ++i)
- LIST_ADDTAIL(&cache->entries[i].head, &cache->empty);
+ list_addtail(&cache->entries[i].head, &cache->empty);
return PIPE_OK;
}
entry->timestamp = priv->timestamp;
entry->poc = get_poc(priv);
- LIST_ADDTAIL(&entry->list, &priv->codec_data.h265.dpb_list);
+ list_addtail(&entry->list, &priv->codec_data.h265.dpb_list);
++priv->codec_data.h265.dpb_num;
priv->target = NULL;
/* promote last from to P frame */
priv->ref_idx_l0 = priv->ref_idx_l1;
enc_HandleTask(port, task, PIPE_H264_ENC_PICTURE_TYPE_P);
- LIST_ADDTAIL(&task->list, &inp->tasks);
+ list_addtail(&task->list, &inp->tasks);
priv->ref_idx_l1 = priv->frame_num++;
/* handle B frames */
if (picture_type == PIPE_H264_ENC_PICTURE_TYPE_B) {
/* put frame at the tail of the queue */
- LIST_ADDTAIL(&task->list, &priv->b_frames);
+ list_addtail(&task->list, &priv->b_frames);
} else {
/* handle I or P frame */
priv->ref_idx_l0 = priv->ref_idx_l1;
enc_HandleTask(port, task, picture_type);
- LIST_ADDTAIL(&task->list, &priv->stacked_tasks);
+ list_addtail(&task->list, &priv->stacked_tasks);
LIST_FOR_EACH_ENTRY(task, &priv->stacked_tasks, list) {
++stacked_num;
}
struct encode_task *t;
t = LIST_ENTRY(struct encode_task, priv->stacked_tasks.next, list);
LIST_DEL(&t->list);
- LIST_ADDTAIL(&t->list, &inp->tasks);
+ list_addtail(&t->list, &inp->tasks);
}
priv->ref_idx_l1 = priv->frame_num++;
/* promote last from to P frame */
priv->ref_idx_l0 = priv->ref_idx_l1;
enc_HandleTask(priv, task, PIPE_H264_ENC_PICTURE_TYPE_P);
- LIST_ADDTAIL(&task->list, &inp->tasks);
+ list_addtail(&task->list, &inp->tasks);
priv->ref_idx_l1 = priv->frame_num++;
/* handle B frames */
if (picture_type == PIPE_H264_ENC_PICTURE_TYPE_B) {
/* put frame at the tail of the queue */
- LIST_ADDTAIL(&task->list, &priv->b_frames);
+ list_addtail(&task->list, &priv->b_frames);
} else {
/* handle I or P frame */
priv->ref_idx_l0 = priv->ref_idx_l1;
enc_HandleTask(priv, task, picture_type);
- LIST_ADDTAIL(&task->list, &priv->stacked_tasks);
+ list_addtail(&task->list, &priv->stacked_tasks);
LIST_FOR_EACH_ENTRY(task, &priv->stacked_tasks, list) {
++stacked_num;
}
struct encode_task *t;
t = LIST_ENTRY(struct encode_task, priv->stacked_tasks.next, list);
LIST_DEL(&t->list);
- LIST_ADDTAIL(&t->list, &inp->tasks);
+ list_addtail(&t->list, &inp->tasks);
}
priv->ref_idx_l1 = priv->frame_num++;
entry->buffer = priv->target;
entry->timestamp = priv->timestamp;
entry->poc = MIN2(priv->picture.h264.field_order_cnt[0], priv->picture.h264.field_order_cnt[1]);
- LIST_ADDTAIL(&entry->list, &priv->codec_data.h264.dpb_list);
+ list_addtail(&entry->list, &priv->codec_data.h264.dpb_list);
++priv->codec_data.h264.dpb_num;
priv->target = NULL;
priv->picture.h264.field_order_cnt[0] = priv->picture.h264.field_order_cnt[1] = INT_MAX;
task = LIST_ENTRY(struct encode_task, inp->tasks.next, list);
LIST_DEL(&task->list);
- LIST_ADDTAIL(&task->list, &priv->used_tasks);
+ list_addtail(&task->list, &priv->used_tasks);
if (!task->bitstream)
return;
if (ws->debug_all_bos) {
simple_mtx_lock(&ws->global_bo_list_lock);
- LIST_ADDTAIL(&bo->u.real.global_list_item, &ws->global_bo_list);
+ list_addtail(&bo->u.real.global_list_item, &ws->global_bo_list);
ws->num_buffers++;
simple_mtx_unlock(&ws->global_bo_list_lock);
}
assert(bo->u.slab.real->bo);
}
- LIST_ADDTAIL(&bo->u.slab.entry.head, &slab->base.free);
+ list_addtail(&bo->u.slab.entry.head, &slab->base.free);
}
return &slab->base;
bo->u.slab.entry.group_index = group_index;
bo->u.slab.real = slab->buffer;
- LIST_ADDTAIL(&bo->u.slab.entry.head, &slab->base.free);
+ list_addtail(&bo->u.slab.entry.head, &slab->base.free);
}
return &slab->base;
LIST_DEL(&fenced_buf->head);
assert(fenced_mgr->num_unfenced);
--fenced_mgr->num_unfenced;
- LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->fenced);
+ list_addtail(&fenced_buf->head, &fenced_mgr->fenced);
++fenced_mgr->num_fenced;
}
assert(fenced_mgr->num_fenced);
--fenced_mgr->num_fenced;
- LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
+ list_addtail(&fenced_buf->head, &fenced_mgr->unfenced);
++fenced_mgr->num_unfenced;
if (p_atomic_dec_zero(&fenced_buf->base.reference.count)) {
assert(fenced_buf->buffer);
- LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
+ list_addtail(&fenced_buf->head, &fenced_mgr->unfenced);
++fenced_mgr->num_unfenced;
mtx_unlock(&fenced_mgr->mutex);
list_inithead(&fence->ops_list);
} else {
p_atomic_set(&fence->signalled, 0);
- LIST_ADDTAIL(&fence->ops_list, &ops->not_signaled);
+ list_addtail(&fence->ops_list, &ops->not_signaled);
}
mtx_unlock(&ops->mutex);
entry->timeout_start = now;
entry->timeout_end = entry->timeout_start + cache->timeout_usecs;
- LIST_ADDTAIL(&entry->head, &cache->resources);
+ list_addtail(&entry->head, &cache->resources);
}
struct virgl_resource_cache_entry *
* while free_zombie_resource_views() is called from another.
*/
simple_mtx_lock(&st->zombie_sampler_views.mutex);
- LIST_ADDTAIL(&entry->node, &st->zombie_sampler_views.list.node);
+ list_addtail(&entry->node, &st->zombie_sampler_views.list.node);
simple_mtx_unlock(&st->zombie_sampler_views.mutex);
}
* while free_zombie_shaders() is called from another.
*/
simple_mtx_lock(&st->zombie_shaders.mutex);
- LIST_ADDTAIL(&entry->node, &st->zombie_shaders.list.node);
+ list_addtail(&entry->node, &st->zombie_shaders.list.node);
simple_mtx_unlock(&st->zombie_shaders.mutex);
}
}
#define LIST_ADD(__item, __list) list_add(__item, __list)
-#define LIST_ADDTAIL(__item, __list) list_addtail(__item, __list)
#define LIST_REPLACE(__from, __to) list_replace(__from, __to)
#define LIST_DEL(__item) list_del(__item)
#define LIST_DELINIT(__item) list_delinit(__item)