for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
- if (!list_empty(&pool->free_cmd_buffers)) {
+ if (!list_is_empty(&pool->free_cmd_buffers)) {
struct radv_cmd_buffer *cmd_buffer = list_first_entry(&pool->free_cmd_buffers, struct radv_cmd_buffer, pool_link);
list_del(&cmd_buffer->pool_link);
struct qreg result)
{
struct qinst *last_inst = NULL;
- if (!list_empty(&c->cur_block->instructions))
+ if (!list_is_empty(&c->cur_block->instructions))
last_inst = (struct qinst *)c->cur_block->instructions.prev;
assert((result.file == QFILE_TEMP &&
const struct v3d_device_info *devinfo = c->devinfo;
uint32_t time = 0;
- while (!list_empty(&scoreboard->dag->heads)) {
+ while (!list_is_empty(&scoreboard->dag->heads)) {
struct schedule_node *chosen =
choose_instruction_to_schedule(devinfo,
scoreboard,
list_inithead(&setup_list);
/* Wrap each instruction in a scheduler structure. */
- while (!list_empty(&block->instructions)) {
+ while (!list_is_empty(&block->instructions)) {
struct qinst *qinst = (struct qinst *)block->instructions.next;
struct schedule_node *n =
rzalloc(mem_ctx, struct schedule_node);
c->cursor.link = NULL;
vir_for_each_block(block, c) {
- while (!list_empty(&block->instructions)) {
+ while (!list_is_empty(&block->instructions)) {
struct qinst *qinst =
list_first_entry(&block->instructions,
struct qinst, link);
{
if (dest->is_ssa) {
/* We can only overwrite an SSA destination if it has no uses. */
- assert(list_empty(&dest->ssa.uses) && list_empty(&dest->ssa.if_uses));
+ assert(list_is_empty(&dest->ssa.uses) && list_is_empty(&dest->ssa.if_uses));
} else {
list_del(&dest->reg.def_link);
if (dest->reg.indirect)
}
}
- if (!list_empty(&def->if_uses))
+ if (!list_is_empty(&def->if_uses))
read_mask |= 1;
return read_mask;
list_for_each_entry_safe(nir_src, use_src, &old_if_uses, use_link)
nir_if_rewrite_condition(use_src->parent_if, new_src);
- if (list_empty(&old_def->uses) && list_empty(&old_def->if_uses)) {
+ if (list_is_empty(&old_def->uses) && list_is_empty(&old_def->if_uses)) {
iter = nir_instr_remove(instr);
} else {
iter = nir_after_instr(instr);
list_addtail(&src->src.use_link, &src->src.reg.reg->uses);
}
}
- assert(list_empty(&state->phi_srcs));
+ assert(list_is_empty(&state->phi_srcs));
}
void
clone_reg_list(state, &nfi->registers, &fi->registers);
nfi->reg_alloc = fi->reg_alloc;
- assert(list_empty(&state->phi_srcs));
+ assert(list_is_empty(&state->phi_srcs));
clone_cf_list(state, &nfi->body, &fi->body);
for (nir_deref_instr *d = instr; d; d = nir_deref_instr_parent(d)) {
/* If anyone is using this deref, leave it alone */
assert(d->dest.is_ssa);
- if (!list_empty(&d->dest.ssa.uses))
+ if (!list_is_empty(&d->dest.ssa.uses))
break;
nir_instr_remove(&d->instr);
}
/* If uses would be a bit crazy */
- assert(list_empty(&cast->dest.ssa.if_uses));
+ assert(list_is_empty(&cast->dest.ssa.if_uses));
nir_deref_instr_remove_if_unused(cast);
return progress;
}
nir_ssa_def_rewrite_uses(def, nir_src_for_reg(reg));
- assert(list_empty(&def->uses) && list_empty(&def->if_uses));
+ assert(list_is_empty(&def->uses) && list_is_empty(&def->if_uses));
if (def->parent_instr->type == nir_instr_type_ssa_undef) {
/* If it's an ssa_undef instruction, remove it since we know we just got
}
}
- if (!list_empty(&def->if_uses))
+ if (!list_is_empty(&def->if_uses))
return false;
return true;
* one deref which could break our list walking since we walk the list
* backwards.
*/
- assert(list_empty(&deref->dest.ssa.if_uses));
- if (list_empty(&deref->dest.ssa.uses)) {
+ assert(list_is_empty(&deref->dest.ssa.if_uses));
+ if (list_is_empty(&deref->dest.ssa.uses)) {
nir_instr_remove(&deref->instr);
return;
}
nir_foreach_register_safe(reg, &impl->registers) {
if (state.values[reg->index]) {
- assert(list_empty(®->uses));
- assert(list_empty(®->if_uses));
- assert(list_empty(®->defs));
+ assert(list_is_empty(®->uses));
+ assert(list_is_empty(®->if_uses));
+ assert(list_is_empty(®->defs));
exec_node_remove(®->node);
}
}
alu->src[i].swizzle[j] = parent->src[0].swizzle[alu->src[i].swizzle[j]];
}
- if (list_empty(&parent->dest.dest.ssa.uses) &&
- list_empty(&parent->dest.dest.ssa.if_uses))
+ if (list_is_empty(&parent->dest.dest.ssa.uses) &&
+ list_is_empty(&parent->dest.dest.ssa.if_uses))
nir_instr_remove(&parent->instr);
progress = true;
if (!(options & nir_lower_float_source_mods))
continue;
- if (!list_empty(&alu->dest.dest.ssa.if_uses))
+ if (!list_is_empty(&alu->dest.dest.ssa.if_uses))
continue;
bool all_children_are_sat = true;
return 0;
}
- if (!list_empty(&vec->src[start_idx].src.ssa->if_uses))
+ if (!list_is_empty(&vec->src[start_idx].src.ssa->if_uses))
return 0;
if (vec->src[start_idx].src.ssa->parent_instr->type != nir_instr_type_alu)
alloc_combined_store(struct combine_stores_state *state)
{
struct combined_store *result;
- if (list_empty(&state->freelist)) {
+ if (list_is_empty(&state->freelist)) {
result = linear_zalloc_child(state->lin_ctx, sizeof(*result));
} else {
result = list_first_entry(&state->freelist,
static bool
wrapper_unroll(nir_loop *loop)
{
- if (!list_empty(&loop->info->loop_terminator_list)) {
+ if (!list_is_empty(&loop->info->loop_terminator_list)) {
/* Unrolling a loop with a large number of exits can result in a
* large inrease in register pressure. For now we just skip
return false;
/* It cannot have any if-uses */
- if (!list_empty(&mov->dest.dest.ssa.if_uses))
+ if (!list_is_empty(&mov->dest.dest.ssa.if_uses))
return false;
/* The only uses of this definition must be phis in the successor */
nir_if_rewrite_condition(src->parent_if, nir_src_for_ssa(new_alu1));
}
- assert(list_empty(&alu1->dest.dest.ssa.uses));
- assert(list_empty(&alu1->dest.dest.ssa.if_uses));
+ assert(list_is_empty(&alu1->dest.dest.ssa.uses));
+ assert(list_is_empty(&alu1->dest.dest.ssa.if_uses));
nir_foreach_use_safe(src, &alu2->dest.dest.ssa) {
if (src->parent_instr->type == nir_instr_type_alu) {
nir_if_rewrite_condition(src->parent_if, nir_src_for_ssa(new_alu2));
}
- assert(list_empty(&alu2->dest.dest.ssa.uses));
- assert(list_empty(&alu2->dest.dest.ssa.if_uses));
+ assert(list_is_empty(&alu2->dest.dest.ssa.uses));
+ assert(list_is_empty(&alu2->dest.dest.ssa.if_uses));
nir_instr_remove(instr1);
nir_instr_remove(instr2);
static inline bool
is_used_once(nir_alu_instr *instr)
{
- bool zero_if_use = list_empty(&instr->dest.dest.ssa.if_uses);
- bool zero_use = list_empty(&instr->dest.dest.ssa.uses);
+ bool zero_if_use = list_is_empty(&instr->dest.dest.ssa.if_uses);
+ bool zero_use = list_is_empty(&instr->dest.dest.ssa.uses);
if (zero_if_use && zero_use)
return false;
static inline bool
is_used_by_if(nir_alu_instr *instr)
{
- return !list_empty(&instr->dest.dest.ssa.if_uses);
+ return !list_is_empty(&instr->dest.dest.ssa.if_uses);
}
static inline bool
is_not_used_by_if(nir_alu_instr *instr)
{
- return list_empty(&instr->dest.dest.ssa.if_uses);
+ return list_is_empty(&instr->dest.dest.ssa.if_uses);
}
static inline bool
list_addtail(&src->src.use_link, &src->src.ssa->uses);
}
- assert(list_empty(&ctx->phi_srcs));
+ assert(list_is_empty(&ctx->phi_srcs));
}
static void
* conditions expect well-formed Booleans. If you want to compare with
* NULL, an explicit comparison operation should be used.
*/
- validate_assert(state, list_empty(&instr->dest.ssa.if_uses));
+ validate_assert(state, list_is_empty(&instr->dest.ssa.if_uses));
/* Only certain modes can be used as sources for phi instructions. */
nir_foreach_use(use, &instr->dest.ssa) {
}
static inline bool
-nir_instr_worklist_empty(nir_instr_worklist *wl)
+nir_instr_worklist_is_empty(nir_instr_worklist *wl)
{
return nir_instr_worklist_length(wl) == 0;
}
vtn_emit_cf_list(b, &vtn_loop->body, NULL, NULL, handler);
- if (!list_empty(&vtn_loop->cont_body)) {
+ if (!list_is_empty(&vtn_loop->cont_body)) {
/* If we have a non-trivial continue body then we need to put
* it at the beginning of the loop with a flag to ensure that
* it doesn't get executed in the first iteration.
/* at this point, we should have a single empty block,
* into which we emit the 'end' instruction.
*/
- compile_assert(ctx, list_empty(&ctx->block->instr_list));
+ compile_assert(ctx, list_is_empty(&ctx->block->instr_list));
/* If stream-out (aka transform-feedback) enabled, emit the
* stream-out instructions, followed by a new empty block (into
}
/* need to be able to set (ss) on first instruction: */
- if (list_empty(&block->instr_list) && (opc_cat(n->opc) >= 5))
+ if (list_is_empty(&block->instr_list) && (opc_cat(n->opc) >= 5))
ir3_NOP(block);
- if (is_nop(n) && !list_empty(&block->instr_list)) {
+ if (is_nop(n) && !list_is_empty(&block->instr_list)) {
struct ir3_instruction *last = list_last_entry(&block->instr_list,
struct ir3_instruction, node);
if (is_nop(last) && (last->repeat < 5)) {
* (2) (block-is-empty || only-instr-is-jump)
*/
if (block->successors[1] == NULL) {
- if (list_empty(&block->instr_list)) {
+ if (list_is_empty(&block->instr_list)) {
return block->successors[0];
} else if (list_length(&block->instr_list) == 1) {
struct ir3_instruction *instr = list_first_entry(
}
}
- while (!list_empty(&ctx->depth_list)) {
+ while (!list_is_empty(&ctx->depth_list)) {
struct ir3_sched_notes notes = {0};
struct ir3_instruction *instr;
for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
- if (!list_empty(&pool->free_cmd_buffers)) {
+ if (!list_is_empty(&pool->free_cmd_buffers)) {
struct tu_cmd_buffer *cmd_buffer = list_first_entry(
&pool->free_cmd_buffers, struct tu_cmd_buffer, pool_link);
mtx_destroy(&dctx->mutex);
cnd_destroy(&dctx->cond);
- assert(list_empty(&dctx->records));
+ assert(list_is_empty(&dctx->records));
if (pipe->set_log_context) {
pipe->set_log_context(pipe, NULL);
if (dctx->api_stalled)
cnd_signal(&dctx->cond);
- if (list_empty(&records)) {
+ if (list_is_empty(&records)) {
if (dctx->kill_thread)
break;
dctx->api_stalled = false;
}
- if (list_empty(&dctx->records))
+ if (list_is_empty(&dctx->records))
cnd_signal(&dctx->cond);
list_addtail(&record->list, &dctx->records);
p->start(hq, ctx);
/* add to active list */
- assert(list_empty(&hq->node));
+ assert(list_is_empty(&hq->node));
list_addtail(&hq->node, &ctx->active_hw_queries);
return true;
p->resume(aq, batch);
/* add to active list: */
- assert(list_empty(&aq->node));
+ assert(list_is_empty(&aq->node));
list_addtail(&aq->node, &ctx->acc_active_queries);
return true;
resume_query(batch, hq, batch->draw);
/* add to active list: */
- assert(list_empty(&hq->list));
+ assert(list_is_empty(&hq->list));
list_addtail(&hq->list, &ctx->hw_active_queries);
return true;
static inline bool gpir_node_is_root(gpir_node *node)
{
- return list_empty(&node->succ_list);
+ return list_is_empty(&node->succ_list);
}
static inline bool gpir_node_is_leaf(gpir_node *node)
{
- return list_empty(&node->pred_list);
+ return list_is_empty(&node->pred_list);
}
#define gpir_node_to_alu(node) ((gpir_alu_node *)(node))
static void schedule_ready_list(gpir_block *block, struct list_head *ready_list)
{
- if (list_empty(ready_list))
+ if (list_is_empty(ready_list))
return;
gpir_node *node = list_first_entry(ready_list, gpir_node, list);
}
list_inithead(&block->node_list);
- while (!list_empty(&ctx.ready_list)) {
+ while (!list_is_empty(&ctx.ready_list)) {
if (!schedule_one_instr(&ctx))
return false;
}
}
target = branch->target;
- while (list_empty(&target->instr_list)) {
+ while (list_is_empty(&target->instr_list)) {
if (!target->list.next)
break;
target = LIST_ENTRY(ppir_block, target->list.next, list);
}
- assert(!list_empty(&target->instr_list));
+ assert(!list_is_empty(&target->instr_list));
target_instr = list_first_entry(&target->instr_list, ppir_instr, list);
b->branch.target = target_instr->offset - node->instr->offset;
static inline bool ppir_node_is_root(ppir_node *node)
{
- return list_empty(&node->succ_list);
+ return list_is_empty(&node->succ_list);
}
static inline bool ppir_node_is_leaf(ppir_node *node)
{
- return list_empty(&node->pred_list);
+ return list_is_empty(&node->pred_list);
}
static inline bool ppir_node_has_single_succ(ppir_node *node)
static inline bool ppir_instr_is_root(ppir_instr *instr)
{
- return list_empty(&instr->succ_list);
+ return list_is_empty(&instr->succ_list);
}
static inline bool ppir_instr_is_leaf(ppir_instr *instr)
{
- return list_empty(&instr->pred_list);
+ return list_is_empty(&instr->pred_list);
}
bool ppir_lower_prog(ppir_compiler *comp);
ppir_regalloc_update_reglist_ssa(comp);
/* No registers? Probably shader consists of discard instruction */
- if (list_empty(&comp->reg_list))
+ if (list_is_empty(&comp->reg_list))
return true;
/* this will most likely succeed in the first
static void ppir_schedule_ready_list(ppir_block *block,
struct list_head *ready_list)
{
- if (list_empty(ready_list))
+ if (list_is_empty(ready_list))
return;
ppir_instr *instr = list_first_entry(ready_list, ppir_instr, list);
while (!pool->shutdown) {
struct lp_cs_tpool_task *task;
- while (list_empty(&pool->workqueue) && !pool->shutdown)
+ while (list_is_empty(&pool->workqueue) && !pool->shutdown)
cnd_wait(&pool->new_work, &pool->m);
if (pool->shutdown)
fprintf(stderr, " BOs cached: %d\n", cache_count);
fprintf(stderr, " BOs cached size: %dkb\n", cache_size / 1024);
- if (!list_empty(&cache->time_list)) {
+ if (!list_is_empty(&cache->time_list)) {
struct v3d_bo *first = list_first_entry(&cache->time_list,
struct v3d_bo,
time_list);
struct v3d_bo *bo = NULL;
mtx_lock(&cache->lock);
- if (!list_empty(&cache->size_list[page_index])) {
+ if (!list_is_empty(&cache->size_list[page_index])) {
bo = list_first_entry(&cache->size_list[page_index],
struct v3d_bo, size_list);
bo->offset = create.offset;
if (ret != 0) {
- if (!list_empty(&screen->bo_cache.time_list) &&
+ if (!list_is_empty(&screen->bo_cache.time_list) &&
!cleared_and_retried) {
cleared_and_retried = true;
v3d_bo_cache_free_all(&screen->bo_cache);
*/
for (int i = 0; i < cache->size_list_size; i++) {
struct list_head *old_head = &cache->size_list[i];
- if (list_empty(old_head))
+ if (list_is_empty(old_head))
list_inithead(&new_list[i]);
else {
new_list[i].next = old_head->next;
fprintf(stderr, " BOs cached: %d\n", cache->bo_count);
fprintf(stderr, " BOs cached size: %dkb\n", cache->bo_size / 1024);
- if (!list_empty(&cache->time_list)) {
+ if (!list_is_empty(&cache->time_list)) {
struct vc4_bo *first = LIST_ENTRY(struct vc4_bo,
cache->time_list.next,
time_list);
bo->handle = create.handle;
if (ret != 0) {
- if (!list_empty(&screen->bo_cache.time_list) &&
+ if (!list_is_empty(&screen->bo_cache.time_list) &&
!cleared_and_retried) {
cleared_and_retried = true;
vc4_bo_cache_free_all(&screen->bo_cache);
struct qreg result)
{
struct qinst *last_inst = NULL;
- if (!list_empty(&c->cur_block->instructions))
+ if (!list_is_empty(&c->cur_block->instructions))
last_inst = (struct qinst *)c->cur_block->instructions.prev;
assert(result.file == QFILE_UNIF ||
if (!src->is_ssa)
return false;
- if (!list_empty(&src->ssa->if_uses))
+ if (!list_is_empty(&src->ssa->if_uses))
return false;
return (src->ssa->uses.next == &src->use_link &&
qir_compile_destroy(struct vc4_compile *c)
{
qir_for_each_block(block, c) {
- while (!list_empty(&block->instructions)) {
+ while (!list_is_empty(&block->instructions)) {
struct qinst *qinst =
list_first_entry(&block->instructions,
struct qinst, link);
{
struct qinst *last_inst = NULL;
- if (!list_empty(&c->cur_block->instructions))
+ if (!list_is_empty(&c->cur_block->instructions))
last_inst = (struct qinst *)c->cur_block->instructions.prev;
/* We don't have any way to guess which kind of MOV is implied. */
}
state->time = 0;
- while (!list_empty(&state->dag->heads)) {
+ while (!list_is_empty(&state->dag->heads)) {
struct schedule_node *chosen = choose_instruction(state);
struct qinst *inst = chosen->inst;
{
uint32_t time = 0;
- while (!list_empty(&scoreboard->dag->heads)) {
+ while (!list_is_empty(&scoreboard->dag->heads)) {
struct schedule_node *chosen =
choose_instruction_to_schedule(scoreboard,
schedule_list,
/* Wrap each instruction in a scheduler structure. */
uint32_t next_sched_uniform = *next_uniform;
- while (!list_empty(&block->qpu_inst_list)) {
+ while (!list_is_empty(&block->qpu_inst_list)) {
struct queued_qpu_inst *inst =
(struct queued_qpu_inst *)block->qpu_inst_list.next;
struct schedule_node *n = rzalloc(scoreboard->dag,
fprintf(stderr, "amdgpu: clearing PRT VA region on destroy failed (%d)\n", r);
}
- while (!list_empty(&bo->u.sparse.backing)) {
+ while (!list_is_empty(&bo->u.sparse.backing)) {
struct amdgpu_sparse_backing *dummy = NULL;
sparse_free_backing_buffer(bo,
container_of(bo->u.sparse.backing.next,
static void
get_current_pos(struct gen_aux_map_context *ctx, uint64_t *gpu, uint64_t **map)
{
- assert(!list_empty(&ctx->buffers));
+ assert(!list_is_empty(&ctx->buffers));
struct aux_map_buffer *tail =
list_last_entry(&ctx->buffers, struct aux_map_buffer, link);
if (gpu)
static inline bool
are_all_uses_fadd(nir_ssa_def *def)
{
- if (!list_empty(&def->if_uses))
+ if (!list_is_empty(&def->if_uses))
return false;
nir_foreach_use(use_src, def) {
nir_instr_as_load_const (srcs[i].src.ssa->parent_instr);
if (list_is_singular(&load_const->def.uses) &&
- list_empty(&load_const->def.if_uses)) {
+ list_is_empty(&load_const->def.if_uses)) {
return true;
}
}
nir_src_for_ssa(&ffma->dest.dest.ssa));
nir_builder_instr_insert(b, &ffma->instr);
- assert(list_empty(&add->dest.dest.ssa.uses));
+ assert(list_is_empty(&add->dest.dest.ssa.uses));
nir_instr_remove(&add->instr);
progress = true;
anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
{
/* Delete all but the first batch bo */
- assert(!list_empty(&cmd_buffer->batch_bos));
+ assert(!list_is_empty(&cmd_buffer->batch_bos));
while (cmd_buffer->batch_bos.next != cmd_buffer->batch_bos.prev) {
struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
list_del(&bbo->link);
anv_batch_bo_destroy(bbo, cmd_buffer);
}
- assert(!list_empty(&cmd_buffer->batch_bos));
+ assert(!list_is_empty(&cmd_buffer->batch_bos));
anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer),
&cmd_buffer->batch,
char str[MAX_DEBUG_MESSAGE_LENGTH];
struct anv_device *device = (struct anv_device *)data;
- if (list_empty(&device->instance->debug_report_callbacks.callbacks))
+ if (list_is_empty(&device->instance->debug_report_callbacks.callbacks))
return;
va_list args;
/* Get a buffer out of the cache if available */
retry:
alloc_from_cache = false;
- if (bucket != NULL && !list_empty(&bucket->head)) {
+ if (bucket != NULL && !list_is_empty(&bucket->head)) {
if (busy && !zeroed) {
/* Allocate new render-target BOs from the tail (MRU)
* of the list, as it will likely be hot in the GPU
list->prev = item;
}
-static inline bool list_empty(const struct list_head *list);
+static inline bool list_is_empty(const struct list_head *list);
static inline void list_replace(struct list_head *from, struct list_head *to)
{
- if (list_empty(from)) {
+ if (list_is_empty(from)) {
list_inithead(to);
} else {
to->prev = from->prev;
item->prev = item;
}
-static inline bool list_empty(const struct list_head *list)
+static inline bool list_is_empty(const struct list_head *list)
{
return list->next == list;
}
static inline void list_splice(struct list_head *src, struct list_head *dst)
{
- if (list_empty(src))
+ if (list_is_empty(src))
return;
src->next->prev = dst;
static inline void list_splicetail(struct list_head *src, struct list_head *dst)
{
- if (list_empty(src))
+ if (list_is_empty(src))
return;
src->prev->next = dst;
struct overlay_draw *get_overlay_draw(struct swapchain_data *data)
{
struct device_data *device_data = data->device;
- struct overlay_draw *draw = list_empty(&data->draws) ?
+ struct overlay_draw *draw = list_is_empty(&data->draws) ?
NULL : list_first_entry(&data->draws, struct overlay_draw, link);
VkSemaphoreCreateInfo sem_info = {};
!cmd_buffer_data->timestamp_query_pool)
continue;
- if (list_empty(&cmd_buffer_data->link)) {
+ if (list_is_empty(&cmd_buffer_data->link)) {
list_addtail(&cmd_buffer_data->link,
&queue_data->running_command_buffer);
} else {
const char *pMessage)
{
/* Allow NULL for convinience, return if no callbacks registered. */
- if (!instance || list_empty(&instance->callbacks))
+ if (!instance || list_is_empty(&instance->callbacks))
return;
pthread_mutex_lock(&instance->callbacks_mutex);