ws->use_local_bos = perftest_flags & RADV_PERFTEST_LOCAL_BOS;
ws->zero_all_vram_allocs = debug_flags & RADV_DEBUG_ZERO_VRAM;
ws->batchchain = !(perftest_flags & RADV_PERFTEST_NO_BATCHCHAIN);
- LIST_INITHEAD(&ws->global_bo_list);
+ list_inithead(&ws->global_bo_list);
pthread_mutex_init(&ws->global_bo_list_lock, NULL);
ws->base.query_info = radv_amdgpu_winsys_query_info;
ws->base.query_value = radv_amdgpu_winsys_query_value;
pane->sort_items = sort_items;
pane->initial_max_value = max_value;
hud_pane_set_max_value(pane, max_value);
- LIST_INITHEAD(&pane->graph_list);
+ list_inithead(&pane->graph_list);
return pane;
}
hud->constbuf.buffer_size = sizeof(hud->constants);
hud->constbuf.user_buffer = &hud->constants;
- LIST_INITHEAD(&hud->pane_list);
+ list_inithead(&hud->pane_list);
/* setup sig handler once for all hud contexts */
#ifdef PIPE_OS_UNIX
fenced_mgr->max_buffer_size = max_buffer_size;
fenced_mgr->max_cpu_total_size = max_cpu_total_size;
- LIST_INITHEAD(&fenced_mgr->fenced);
+ list_inithead(&fenced_mgr->fenced);
fenced_mgr->num_fenced = 0;
- LIST_INITHEAD(&fenced_mgr->unfenced);
+ list_inithead(&fenced_mgr->unfenced);
fenced_mgr->num_unfenced = 0;
(void) mtx_init(&fenced_mgr->mutex, mtx_plain);
mgr->overflow_size = overflow_size;
(void) mtx_init(&mgr->mutex, mtx_plain);
- LIST_INITHEAD(&mgr->list);
+ list_inithead(&mgr->list);
return &mgr->base;
}
goto out_err1;
}
- LIST_INITHEAD(&slab->head);
- LIST_INITHEAD(&slab->freeBuffers);
+ list_inithead(&slab->head);
+ list_inithead(&slab->freeBuffers);
slab->numBuffers = numBuffers;
slab->numFree = 0;
slab->mgr = mgr;
mgr->slabSize = slabSize;
mgr->desc = *desc;
- LIST_INITHEAD(&mgr->slabs);
+ list_inithead(&mgr->slabs);
(void) mtx_init(&mgr->mutex, mtx_plain);
return;
for (i = 0; i < num_heaps; i++)
- LIST_INITHEAD(&mgr->buckets[i]);
+ list_inithead(&mgr->buckets[i]);
(void) mtx_init(&mgr->mutex, mtx_plain);
mgr->cache_size = 0;
slabs->slab_alloc = slab_alloc;
slabs->slab_free = slab_free;
- LIST_INITHEAD(&slabs->reclaim);
+ list_inithead(&slabs->reclaim);
num_groups = slabs->num_orders * slabs->num_heaps;
slabs->groups = CALLOC(num_groups, sizeof(*slabs->groups));
for (i = 0; i < num_groups; ++i) {
struct pb_slab_group *group = &slabs->groups[i];
- LIST_INITHEAD(&group->slabs);
+ list_inithead(&group->slabs);
}
(void) mtx_init(&slabs->mutex, mtx_plain);
static inline void
util_dirty_surfaces_init(struct util_dirty_surfaces *ds)
{
- LIST_INITHEAD(&ds->dirty_list);
+ list_inithead(&ds->dirty_list);
}
static inline void
static inline void
util_dirty_surface_init(struct util_dirty_surface *ds)
{
- LIST_INITHEAD(&ds->dirty_list);
+ list_inithead(&ds->dirty_list);
}
static inline boolean
util_queue_fence_init(&tc->batch_slots[i].fence);
}
- LIST_INITHEAD(&tc->unflushed_queries);
+ list_inithead(&tc->unflushed_queries);
slab_create_child(&tc->pool_transfers, parent_transfer_pool);
(*fence)->screen = screen;
(*fence)->ref = 1;
- LIST_INITHEAD(&(*fence)->work);
+ list_inithead(&(*fence)->work);
return true;
}
return PIPE_ERROR_OUT_OF_MEMORY;
}
- LIST_INITHEAD(&slab->head);
+ list_inithead(&slab->head);
slab->cache = cache;
slab->order = chunk_order;
cache->allocated = 0;
for (i = 0; i < MM_NUM_BUCKETS; ++i) {
- LIST_INITHEAD(&cache->bucket[i].free);
- LIST_INITHEAD(&cache->bucket[i].used);
- LIST_INITHEAD(&cache->bucket[i].full);
+ list_inithead(&cache->bucket[i].free);
+ list_inithead(&cache->bucket[i].used);
+ list_inithead(&cache->bucket[i].full);
}
return cache;
if (ret)
FAIL_SCREEN_INIT("error creating query heap: %d\n", ret);
- LIST_INITHEAD(&screen->queries);
+ list_inithead(&screen->queries);
/* Vertex program resources (code/data), currently 6 of the constant
* slots are reserved to implement user clipping planes
if (!cf)
return NULL;
- LIST_INITHEAD(&cf->list);
- LIST_INITHEAD(&cf->alu);
- LIST_INITHEAD(&cf->vtx);
- LIST_INITHEAD(&cf->tex);
- LIST_INITHEAD(&cf->gds);
+ list_inithead(&cf->list);
+ list_inithead(&cf->alu);
+ list_inithead(&cf->vtx);
+ list_inithead(&cf->tex);
+ list_inithead(&cf->gds);
return cf;
}
if (!alu)
return NULL;
- LIST_INITHEAD(&alu->list);
+ list_inithead(&alu->list);
return alu;
}
if (!vtx)
return NULL;
- LIST_INITHEAD(&vtx->list);
+ list_inithead(&vtx->list);
return vtx;
}
if (!tex)
return NULL;
- LIST_INITHEAD(&tex->list);
+ list_inithead(&tex->list);
return tex;
}
if (gds == NULL)
return NULL;
- LIST_INITHEAD(&gds->list);
+ list_inithead(&gds->list);
return gds;
}
bc->r6xx_nop_after_rel_dst = 0;
}
- LIST_INITHEAD(&bc->cf);
+ list_inithead(&bc->cf);
bc->chip_class = chip_class;
bc->family = family;
bc->has_compressed_msaa_texturing = has_compressed_msaa_texturing;
free(alu);
}
- LIST_INITHEAD(&cf->alu);
+ list_inithead(&cf->alu);
LIST_FOR_EACH_ENTRY_SAFE(tex, next_tex, &cf->tex, list) {
free(tex);
}
- LIST_INITHEAD(&cf->tex);
+ list_inithead(&cf->tex);
LIST_FOR_EACH_ENTRY_SAFE(vtx, next_vtx, &cf->vtx, list) {
free(vtx);
}
- LIST_INITHEAD(&cf->vtx);
+ list_inithead(&cf->vtx);
LIST_FOR_EACH_ENTRY_SAFE(gds, next_gds, &cf->gds, list) {
free(gds);
}
- LIST_INITHEAD(&cf->gds);
+ list_inithead(&cf->gds);
free(cf);
}
- LIST_INITHEAD(&cf->list);
+ list_inithead(&cf->list);
}
static int print_swizzle(unsigned swz)
goto fail;
rctx->screen = rscreen;
- LIST_INITHEAD(&rctx->texture_buffers);
+ list_inithead(&rctx->texture_buffers);
r600_init_blit_functions(rctx);
if (((struct r600_common_screen*)rctx->b.screen)->info.num_render_backends > 0)
rctx->b.render_condition = r600_render_condition;
- LIST_INITHEAD(&rctx->active_queries);
+ list_inithead(&rctx->active_queries);
}
void r600_init_screen_query_functions(struct r600_common_screen *rscreen)
{
unsigned i;
- LIST_INITHEAD(&enc->cpb_slots);
+ list_inithead(&enc->cpb_slots);
for (i = 0; i < enc->cpb_num; ++i) {
struct rvce_cpb_slot *slot = &enc->cpb_array[i];
slot->index = i;
{
unsigned i;
- LIST_INITHEAD(&enc->cpb_slots);
+ list_inithead(&enc->cpb_slots);
for (i = 0; i < enc->cpb_num; ++i) {
struct rvce_cpb_slot *slot = &enc->cpb_array[i];
slot->index = i;
void gfx10_init_query(struct si_context *sctx)
{
- LIST_INITHEAD(&sctx->shader_query_buffers);
+ list_inithead(&sctx->shader_query_buffers);
sctx->atoms.s.shader_query.emit = emit_shader_query;
}
sctx->b.render_condition = si_render_condition;
}
- LIST_INITHEAD(&sctx->active_queries);
+ list_inithead(&sctx->active_queries);
}
void si_init_screen_query_functions(struct si_screen *sscreen)
if (!svga)
goto done;
- LIST_INITHEAD(&svga->dirty_buffers);
+ list_inithead(&svga->dirty_buffers);
svga->pipe.screen = screen;
svga->pipe.priv = priv;
sbuf->b.b.screen = screen;
bind_flags = template->bind & ~PIPE_BIND_CUSTOM;
- LIST_INITHEAD(&sbuf->surfaces);
+ list_inithead(&sbuf->surfaces);
if (bind_flags & PIPE_BIND_CONSTANT_BUFFER) {
/* Constant buffers can only have the PIPE_BIND_CONSTANT_BUFFER
(void) mtx_init(&cache->mutex, mtx_plain);
for (i = 0; i < SVGA_HOST_SURFACE_CACHE_BUCKETS; ++i)
- LIST_INITHEAD(&cache->bucket[i]);
+ list_inithead(&cache->bucket[i]);
- LIST_INITHEAD(&cache->unused);
+ list_inithead(&cache->unused);
- LIST_INITHEAD(&cache->validated);
+ list_inithead(&cache->validated);
- LIST_INITHEAD(&cache->invalidated);
+ list_inithead(&cache->invalidated);
- LIST_INITHEAD(&cache->empty);
+ list_inithead(&cache->empty);
for (i = 0; i < SVGA_HOST_SURFACE_CACHE_SIZE; ++i)
LIST_ADDTAIL(&cache->entries[i].head, &cache->empty);
priv->EndFrame = vid_dec_h264_EndFrame;
priv->Flush = vid_dec_h264_Flush;
- LIST_INITHEAD(&priv->codec_data.h264.dpb_list);
+ list_inithead(&priv->codec_data.h264.dpb_list);
priv->picture.h264.field_order_cnt[0] = priv->picture.h264.field_order_cnt[1] = INT_MAX;
priv->first_buf_in_frame = true;
}
{
priv->picture.base.profile = PIPE_VIDEO_PROFILE_HEVC_MAIN;
- LIST_INITHEAD(&priv->codec_data.h265.dpb_list);
+ list_inithead(&priv->codec_data.h265.dpb_list);
priv->codec_data.h265.ref_pic_set_list = (struct ref_pic_set *)
CALLOC(MAX_NUM_REF_PICS, sizeof(struct ref_pic_set));
priv->scale.xWidth = OMX_VID_ENC_SCALING_WIDTH_DEFAULT;
priv->scale.xHeight = OMX_VID_ENC_SCALING_WIDTH_DEFAULT;
- LIST_INITHEAD(&priv->free_tasks);
- LIST_INITHEAD(&priv->used_tasks);
- LIST_INITHEAD(&priv->b_frames);
- LIST_INITHEAD(&priv->stacked_tasks);
+ list_inithead(&priv->free_tasks);
+ list_inithead(&priv->used_tasks);
+ list_inithead(&priv->b_frames);
+ list_inithead(&priv->stacked_tasks);
return OMX_ErrorNone;
}
return OMX_ErrorInsufficientResources;
}
- LIST_INITHEAD(&inp->tasks);
+ list_inithead(&inp->tasks);
FREE((*buf)->pBuffer);
r = enc_AllocateBackTexture(port, &inp->resource, &inp->transfer, &(*buf)->pBuffer);
return OMX_ErrorInsufficientResources;
}
- LIST_INITHEAD(&inp->tasks);
+ list_inithead(&inp->tasks);
return OMX_ErrorNone;
}
return OMX_ErrorInsufficientResources;
}
- LIST_INITHEAD(&priv->codec_data.h264.dpb_list);
+ list_inithead(&priv->codec_data.h264.dpb_list);
priv->video_buffer_map = util_hash_table_create(handle_hash, handle_compare);
return OMX_ErrorInsufficientResources;
}
- LIST_INITHEAD(&inp->tasks);
+ list_inithead(&inp->tasks);
r = enc_AllocateBackTexture(ap_hdl, idx, &inp->resource, &inp->transfer, &(*buf)->pBuffer);
return OMX_ErrorInsufficientResources;
}
- LIST_INITHEAD(&inp->tasks);
+ list_inithead(&inp->tasks);
return OMX_ErrorNone;
}
if (!priv->t_pipe)
return OMX_ErrorInsufficientResources;
- LIST_INITHEAD(&priv->free_tasks);
- LIST_INITHEAD(&priv->used_tasks);
- LIST_INITHEAD(&priv->b_frames);
- LIST_INITHEAD(&priv->stacked_tasks);
+ list_inithead(&priv->free_tasks);
+ list_inithead(&priv->used_tasks);
+ list_inithead(&priv->b_frames);
+ list_inithead(&priv->stacked_tasks);
return OMX_ErrorNone;
}
from->next->prev = to->prev;
from->prev->next = to;
to->prev = from->prev;
- LIST_INITHEAD(from);
+ list_inithead(from);
}
static void enc_GetPictureParamPreset(struct pipe_h264_enc_picture_desc *picture)
if (!slab->entries)
goto fail_buffer;
- LIST_INITHEAD(&slab->base.free);
+ list_inithead(&slab->base.free);
base_id = __sync_fetch_and_add(&ws->next_bo_unique_id, slab->base.num_entries);
if (!bo->u.sparse.commitments)
goto error_alloc_commitments;
- LIST_INITHEAD(&bo->u.sparse.backing);
+ list_inithead(&bo->u.sparse.backing);
/* For simplicity, we always map a multiple of the page size. */
map_size = align64(size, RADEON_SPARSE_PAGE_SIZE);
/* init reference */
pipe_reference_init(&aws->reference, 1);
- LIST_INITHEAD(&aws->global_bo_list);
+ list_inithead(&aws->global_bo_list);
aws->bo_export_table = util_hash_table_create(hash_pointer, compare_pointers);
(void) simple_mtx_init(&aws->global_bo_list_lock, mtx_plain);
if (!slab->entries)
goto fail_buffer;
- LIST_INITHEAD(&slab->base.free);
+ list_inithead(&slab->base.free);
base_hash = __sync_fetch_and_add(&ws->next_bo_hash, slab->base.num_entries);
fenced_mgr->provider = provider;
fenced_mgr->ops = ops;
- LIST_INITHEAD(&fenced_mgr->fenced);
+ list_inithead(&fenced_mgr->fenced);
fenced_mgr->num_fenced = 0;
- LIST_INITHEAD(&fenced_mgr->unfenced);
+ list_inithead(&fenced_mgr->unfenced);
fenced_mgr->num_unfenced = 0;
(void) mtx_init(&fenced_mgr->mutex, mtx_plain);
if (vmw_fence_seq_is_signaled(seqno, ops->last_signaled, seqno)) {
p_atomic_set(&fence->signalled, 1);
- LIST_INITHEAD(&fence->ops_list);
+ list_inithead(&fence->ops_list);
} else {
p_atomic_set(&fence->signalled, 0);
LIST_ADDTAIL(&fence->ops_list, &ops->not_signaled);
return NULL;
(void) mtx_init(&ops->mutex, mtx_plain);
- LIST_INITHEAD(&ops->not_signaled);
+ list_inithead(&ops->not_signaled);
ops->base.destroy = &vmw_fence_ops_destroy;
ops->base.fence_reference = &vmw_fence_ops_fence_reference;
ops->base.fence_signalled = &vmw_fence_ops_fence_signalled;
virgl_resource_cache_entry_release_func destroy_func,
void *user_data)
{
- LIST_INITHEAD(&cache->resources);
+ list_inithead(&cache->resources);
cache->timeout_usecs = timeout_usecs;
cache->entry_is_busy_func = is_busy_func;
cache->entry_release_func = destroy_func;
st_init_driver_flags(st);
/* Initialize context's winsys buffers list */
- LIST_INITHEAD(&st->winsys_buffers);
+ list_inithead(&st->winsys_buffers);
- LIST_INITHEAD(&st->zombie_sampler_views.list.node);
+ list_inithead(&st->zombie_sampler_views.list.node);
simple_mtx_init(&st->zombie_sampler_views.mutex, mtx_plain);
- LIST_INITHEAD(&st->zombie_shaders.list.node);
+ list_inithead(&st->zombie_shaders.list.node);
simple_mtx_init(&st->zombie_shaders.mutex, mtx_plain);
return st;
assert(node->next->prev == node && node->prev->next == node);
}
-#define LIST_INITHEAD(__item) list_inithead(__item)
#define LIST_ADD(__item, __list) list_add(__item, __list)
#define LIST_ADDTAIL(__item, __list) list_addtail(__item, __list)
#define LIST_REPLACE(__from, __to) list_replace(__from, __to)
static void
global_init(void)
{
- LIST_INITHEAD(&queue_list);
+ list_inithead(&queue_list);
atexit(atexit_handler);
}
data->flags = family_props->queueFlags;
data->timestamp_mask = (1ull << family_props->timestampValidBits) - 1;
data->family_index = family_index;
- LIST_INITHEAD(&data->running_command_buffer);
+ list_inithead(&data->running_command_buffer);
map_object(HKEY(data->queue), data);
/* Fence synchronizing access to queries on that queue. */