struct etna_bo_bucket *bucket = &cache->cache_bucket[i];
struct etna_bo *bo;
- while (!LIST_IS_EMPTY(&bucket->list)) {
+ while (!list_is_empty(&bucket->list)) {
bo = LIST_ENTRY(struct etna_bo, bucket->list.next, list);
/* keep things in cache for at least 1 second: */
pthread_mutex_lock(&etna_drm_table_lock);
- if (LIST_IS_EMPTY(&bucket->list))
+ if (list_is_empty(&bucket->list))
goto out_unlock;
LIST_FOR_EACH_ENTRY_SAFE(bo, tmp, &bucket->list, list) {
struct fd_bo_bucket *bucket = &cache->cache_bucket[i];
struct fd_bo *bo;
- while (!LIST_IS_EMPTY(&bucket->list)) {
+ while (!list_is_empty(&bucket->list)) {
bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
/* keep things in cache for at least 1 second: */
* (MRU, since likely to be in GPU cache), rather than head (LRU)..
*/
pthread_mutex_lock(&table_lock);
- if (!LIST_IS_EMPTY(&bucket->list)) {
+ if (!list_is_empty(&bucket->list)) {
bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
/* TODO check for compatible flags? */
if (is_idle(bo)) {
env += num;
strip_hyphens(s);
- if (added && !LIST_IS_EMPTY(&pane->graph_list)) {
+ if (added && !list_is_empty(&pane->graph_list)) {
struct hud_graph *graph;
graph = LIST_ENTRY(struct hud_graph, pane->graph_list.prev, head);
strncpy(graph->name, s, sizeof(graph->name)-1);
#if 0
mtx_lock(&mgr->mutex);
debug_printf("%s: failed to create buffer\n", __FUNCTION__);
- if(!LIST_IS_EMPTY(&mgr->list))
+ if(!list_is_empty(&mgr->list))
pb_debug_manager_dump_locked(mgr);
mtx_unlock(&mgr->mutex);
#endif
struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
mtx_lock(&mgr->mutex);
- if(!LIST_IS_EMPTY(&mgr->list)) {
+ if(!list_is_empty(&mgr->list)) {
debug_printf("%s: unfreed buffers\n", __FUNCTION__);
pb_debug_manager_dump_locked(mgr);
}
static void
pb_slabs_reclaim_locked(struct pb_slabs *slabs)
{
- while (!LIST_IS_EMPTY(&slabs->reclaim)) {
+ while (!list_is_empty(&slabs->reclaim)) {
struct pb_slab_entry *entry =
LIST_ENTRY(struct pb_slab_entry, slabs->reclaim.next, head);
/* If there is no candidate slab at all, or the first slab has no free
* entries, try reclaiming entries.
*/
- if (LIST_IS_EMPTY(&group->slabs) ||
- LIST_IS_EMPTY(&LIST_ENTRY(struct pb_slab, group->slabs.next, head)->free))
+ if (list_is_empty(&group->slabs) ||
+ list_is_empty(&LIST_ENTRY(struct pb_slab, group->slabs.next, head)->free))
pb_slabs_reclaim_locked(slabs);
/* Remove slabs without free entries. */
- while (!LIST_IS_EMPTY(&group->slabs)) {
+ while (!list_is_empty(&group->slabs)) {
slab = LIST_ENTRY(struct pb_slab, group->slabs.next, head);
- if (!LIST_IS_EMPTY(&slab->free))
+ if (!list_is_empty(&slab->free))
break;
list_del(&slab->head);
}
- if (LIST_IS_EMPTY(&group->slabs)) {
+ if (list_is_empty(&group->slabs)) {
/* Drop the mutex temporarily to prevent a deadlock where the allocation
* calls back into slab functions (most likely to happen for
* pb_slab_reclaim if memory is low).
/* Reclaim all slab entries (even those that are still in flight). This
* implicitly calls slab_free for everything.
*/
- while (!LIST_IS_EMPTY(&slabs->reclaim)) {
+ while (!list_is_empty(&slabs->reclaim)) {
struct pb_slab_entry *entry =
LIST_ENTRY(struct pb_slab_entry, slabs->reclaim.next, head);
pb_slab_reclaim(slabs, entry);
static inline void
util_dirty_surfaces_use_for_sampling_with(struct pipe_context *pipe, struct util_dirty_surfaces *dss, struct pipe_sampler_view *psv, struct pipe_sampler_state *pss, util_dirty_surface_flush_t flush)
{
- if(!LIST_IS_EMPTY(&dss->dirty_list))
+ if(!list_is_empty(&dss->dirty_list))
util_dirty_surfaces_use_levels_for_sampling(pipe, dss, (unsigned)pss->min_lod + psv->u.tex.first_level,
MIN2((unsigned)ceilf(pss->max_lod) + psv->u.tex.first_level, psv->u.tex.last_level), flush);
}
static inline boolean
util_dirty_surface_is_dirty(struct util_dirty_surface *ds)
{
- return !LIST_IS_EMPTY(&ds->dirty_list);
+ return !list_is_empty(&ds->dirty_list);
}
static inline void
util_dirty_surface_set_dirty(struct util_dirty_surfaces *dss, struct util_dirty_surface *ds)
{
- if(LIST_IS_EMPTY(&ds->dirty_list))
+ if(list_is_empty(&ds->dirty_list))
list_addtail(&ds->dirty_list, &dss->dirty_list);
}
static inline void
util_dirty_surface_set_clean(struct util_dirty_surfaces *dss, struct util_dirty_surface *ds)
{
- if(!LIST_IS_EMPTY(&ds->dirty_list))
+ if(!list_is_empty(&ds->dirty_list))
list_delinit(&ds->dirty_list);
}
struct etna_resource *rsc = etna_resource(hq->prsc);
const struct etna_hw_sample_provider *p = hq->provider;
- assert(LIST_IS_EMPTY(&hq->node));
+ assert(list_is_empty(&hq->node));
if (!wait) {
int ret;
DBG("%p: wait=%d, active=%d", q, wait, q->active);
- assert(LIST_IS_EMPTY(&aq->node));
+ assert(list_is_empty(&aq->node));
/* if !wait, then check the last sample (the one most likely to
* not be ready yet) and bail if it is not ready:
DBG("%p: wait=%d, active=%d", q, wait, q->active);
- if (LIST_IS_EMPTY(&hq->periods))
+ if (list_is_empty(&hq->periods))
return true;
- assert(LIST_IS_EMPTY(&hq->list));
+ assert(list_is_empty(&hq->list));
assert(!hq->period);
/* if !wait, then check the last sample (the one most likely to
}
}
- if (!LIST_IS_EMPTY(&fence->work)) {
+ if (!list_is_empty(&fence->work)) {
debug_printf("WARNING: deleting fence with work still pending !\n");
nouveau_fence_trigger_work(fence);
}
return NULL;
}
- if (!LIST_IS_EMPTY(&bucket->used)) {
+ if (!list_is_empty(&bucket->used)) {
slab = LIST_ENTRY(struct mm_slab, bucket->used.next, head);
} else {
- if (LIST_IS_EMPTY(&bucket->free)) {
+ if (list_is_empty(&bucket->free)) {
mm_slab_new(cache, MAX2(mm_get_order(size), MM_MIN_ORDER));
}
slab = LIST_ENTRY(struct mm_slab, bucket->free.next, head);
return;
for (i = 0; i < MM_NUM_BUCKETS; ++i) {
- if (!LIST_IS_EMPTY(&cache->bucket[i].used) ||
- !LIST_IS_EMPTY(&cache->bucket[i].full))
+ if (!list_is_empty(&cache->bucket[i].used) ||
+ !list_is_empty(&cache->bucket[i].full))
debug_printf("WARNING: destroying GPU memory cache "
"with some buffers still in use\n");
void r600_preflush_suspend_features(struct r600_common_context *ctx)
{
/* suspend queries */
- if (!LIST_IS_EMPTY(&ctx->active_queries))
+ if (!list_is_empty(&ctx->active_queries))
r600_suspend_queries(ctx);
ctx->streamout.suspended = false;
}
/* resume queries */
- if (!LIST_IS_EMPTY(&ctx->active_queries))
+ if (!list_is_empty(&ctx->active_queries))
r600_resume_queries(ctx);
}
static void emit_shader_query(struct si_context *sctx)
{
- assert(!LIST_IS_EMPTY(&sctx->shader_query_buffers));
+ assert(!list_is_empty(&sctx->shader_query_buffers));
struct gfx10_sh_query_buffer *qbuf = list_last_entry(&sctx->shader_query_buffers,
struct gfx10_sh_query_buffer, list);
struct gfx10_sh_query_buffer *qbuf = NULL;
- if (!LIST_IS_EMPTY(&sctx->shader_query_buffers)) {
+ if (!list_is_empty(&sctx->shader_query_buffers)) {
qbuf = list_last_entry(&sctx->shader_query_buffers,
struct gfx10_sh_query_buffer, list);
if (qbuf->head + sizeof(struct gfx10_sh_query_buffer_mem) <= qbuf->buf->b.b.width0)
void gfx10_destroy_query(struct si_context *sctx)
{
- while (!LIST_IS_EMPTY(&sctx->shader_query_buffers)) {
+ while (!list_is_empty(&sctx->shader_query_buffers)) {
struct gfx10_sh_query_buffer *qbuf =
list_first_entry(&sctx->shader_query_buffers,
struct gfx10_sh_query_buffer, list);
}
if (ctx->has_graphics) {
- if (!LIST_IS_EMPTY(&ctx->active_queries))
+ if (!list_is_empty(&ctx->active_queries))
si_suspend_queries(ctx);
ctx->streamout.suspended = false;
si_streamout_buffers_dirty(ctx);
}
- if (!LIST_IS_EMPTY(&ctx->active_queries))
+ if (!list_is_empty(&ctx->active_queries))
si_resume_queries(ctx);
assert(!ctx->gfx_cs->prev_dw);
}
}
- if (!LIST_IS_EMPTY(&cache->empty)) {
+ if (!list_is_empty(&cache->empty)) {
/* An empty entry has no surface associated with it.
* Use the first empty entry.
*/
/* Remove from LRU list */
list_del(&entry->head);
}
- else if (!LIST_IS_EMPTY(&cache->unused)) {
+ else if (!list_is_empty(&cache->unused)) {
/* free the last used buffer and reuse its entry */
entry = LIST_ENTRY(struct svga_host_surface_cache_entry,
cache->unused.prev, head);
0 : This->base.info.last_level;
This->managed.lod = MIN2(LODNew, max_level);
- if (This->managed.lod != old && This->bind_count && LIST_IS_EMPTY(&This->list))
+ if (This->managed.lod != old && This->bind_count && list_is_empty(&This->list))
list_add(&This->list, &This->base.base.device->update_textures);
return old;
struct NineBaseTexture9 *old = *slot;
if (tex) {
- if ((tex->managed.dirty | tex->dirty_mip) && LIST_IS_EMPTY(&tex->list))
+ if ((tex->managed.dirty | tex->dirty_mip) && list_is_empty(&tex->list))
list_add(&tex->list, &device->update_textures);
tex->bind_count++;
#define BASETEX_REGISTER_UPDATE(t) do { \
if (((t)->managed.dirty | ((t)->dirty_mip)) && (t)->bind_count) \
- if (LIST_IS_EMPTY(&(t)->list)) \
+ if (list_is_empty(&(t)->list)) \
list_add(&(t)->list, &(t)->base.base.device->update_textures); \
} while(0)
/* Tests on Win: READONLY doesn't wait for the upload */
if (!(Flags & D3DLOCK_READONLY)) {
if (!This->managed.dirty) {
- assert(LIST_IS_EMPTY(&This->managed.list));
+ assert(list_is_empty(&This->managed.list));
This->managed.dirty = TRUE;
This->managed.dirty_box = box;
if (p_atomic_read(&This->managed.pending_upload))
struct NineBuffer9 *old = *slot;
if (buf) {
- if ((buf->managed.dirty) && LIST_IS_EMPTY(&buf->managed.list))
+ if ((buf->managed.dirty) && list_is_empty(&buf->managed.list))
list_add(&buf->managed.list, &device->update_buffers);
buf->bind_count++;
}
#define BASEBUF_REGISTER_UPDATE(b) { \
if ((b)->managed.dirty && (b)->bind_count) \
- if (LIST_IS_EMPTY(&(b)->managed.list)) \
+ if (list_is_empty(&(b)->managed.list)) \
list_add(&(b)->managed.list, &(b)->base.base.device->update_buffers); \
}
vid_enc_PrivateType *priv = comp->pComponentPrivate;
struct encode_task *task;
- if (LIST_IS_EMPTY(&priv->b_frames))
+ if (list_is_empty(&priv->b_frames))
return;
task = LIST_ENTRY(struct encode_task, priv->b_frames.prev, list);
enc_MoveTasks(&priv->b_frames, &inp->tasks);
}
- if (LIST_IS_EMPTY(&inp->tasks))
+ if (list_is_empty(&inp->tasks))
return port->ReturnBufferFunction(port, buf);
else
return base_port_SendBufferFunction(port, buf);
{
struct encode_task *task;
- if (LIST_IS_EMPTY(&priv->b_frames))
+ if (list_is_empty(&priv->b_frames))
return;
task = LIST_ENTRY(struct encode_task, priv->b_frames.prev, list);
enc_MoveTasks(&priv->b_frames, &inp->tasks);
}
- if (LIST_IS_EMPTY(&inp->tasks)) {
+ if (list_is_empty(&inp->tasks)) {
return h264e_buffer_emptied(priv, in_buf);
} else {
return h264e_manage_buffers(priv);
unsigned size;
#if ENABLE_ST_OMX_BELLAGIO
- if (!inp || LIST_IS_EMPTY(&inp->tasks)) {
+ if (!inp || list_is_empty(&inp->tasks)) {
input->nFilledLen = 0; /* mark buffer as empty */
enc_MoveTasks(&priv->used_tasks, &inp->tasks);
return;
struct pipe_video_buffer templat = {};
struct encode_task *task;
- if (!LIST_IS_EMPTY(&priv->free_tasks)) {
+ if (!list_is_empty(&priv->free_tasks)) {
task = LIST_ENTRY(struct encode_task, priv->free_tasks.next, list);
list_del(&task->list);
return task;
if ((va + size) == heap->start) {
heap->start = va;
/* Delete uppermost hole if it reaches the new top */
- if (!LIST_IS_EMPTY(&heap->holes)) {
+ if (!list_is_empty(&heap->holes)) {
hole = container_of(heap->holes.next, hole, list);
if ((hole->offset + hole->size) == va) {
heap->start = hole->offset;
{
struct st_zombie_sampler_view_node *entry, *next;
- if (LIST_IS_EMPTY(&st->zombie_sampler_views.list.node)) {
+ if (list_is_empty(&st->zombie_sampler_views.list.node)) {
return;
}
free(entry);
}
- assert(LIST_IS_EMPTY(&st->zombie_sampler_views.list.node));
+ assert(list_is_empty(&st->zombie_sampler_views.list.node));
simple_mtx_unlock(&st->zombie_sampler_views.mutex);
}
{
struct st_zombie_shader_node *entry, *next;
- if (LIST_IS_EMPTY(&st->zombie_shaders.list.node)) {
+ if (list_is_empty(&st->zombie_shaders.list.node)) {
return;
}
free(entry);
}
- assert(LIST_IS_EMPTY(&st->zombie_shaders.list.node));
+ assert(list_is_empty(&st->zombie_shaders.list.node));
simple_mtx_unlock(&st->zombie_shaders.mutex);
}
#define LIST_ENTRY(__type, __item, __field) \
((__type *)(((char *)(__item)) - offsetof(__type, __field)))
-#define LIST_IS_EMPTY(__list) \
- ((__list)->next == (__list))
-
/**
* Cast from a pointer to a member of a struct back to the containing struct.
*