Addr::Lib* pLib = Lib::GetLib(hLib);
BOOL_32 enabled = FALSE;
- MAYBE_UNUSED ADDR_E_RETURNCODE returnCode = ADDR_OK;
+ ASSERTED ADDR_E_RETURNCODE returnCode = ADDR_OK;
if (pLib != NULL)
{
// Post validation
if (ret == ADDR_OK)
{
- MAYBE_UNUSED Dim2d microBlockDim = Block256_2d[elementBytesLog2];
+ ASSERTED Dim2d microBlockDim = Block256_2d[elementBytesLog2];
ADDR_ASSERT((2u << GetMaxValidChannelIndex(pEquation->addr, 8, 0)) ==
(microBlockDim.w * (1 << elementBytesLog2)));
ADDR_ASSERT((2u << GetMaxValidChannelIndex(pEquation->addr, 8, 1)) == microBlockDim.h);
const UINT_32 numBankBits = GetBankXorBits(blkSizeLog2);
const UINT_32 bppLog2 = Log2(pIn->bpp >> 3);
const UINT_32 maxYCoordBlock256 = Log2(Block256_2d[bppLog2].h) - 1;
- MAYBE_UNUSED const ADDR_EQUATION *pEqToCheck = &m_equationTable[eqIndex];
+ ASSERTED const ADDR_EQUATION *pEqToCheck = &m_equationTable[eqIndex];
ADDR_ASSERT(maxYCoordBlock256 ==
GetMaxValidChannelIndex(&pEqToCheck->addr[0], GetBlockSizeLog2(ADDR_SW_256B), 1));
) const
{
BOOL_32 valid = TRUE;
- MAYBE_UNUSED UINT_32 numPipes = HwlGetPipes(pTileInfo);
+ ASSERTED UINT_32 numPipes = HwlGetPipes(pTileInfo);
switch (pTileInfo->banks)
{
) const
{
UINT_64 logicalSliceSize;
- MAYBE_UNUSED UINT_64 physicalSliceSize;
+ ASSERTED UINT_64 physicalSliceSize;
UINT_32 pitch = *pPitch;
UINT_32 height = *pHeight;
ac_to_float(ctx, src0),
};
- MAYBE_UNUSED const int length = snprintf(name, sizeof(name), "%s.f%d", intrin,
+ ASSERTED const int length = snprintf(name, sizeof(name), "%s.f%d", intrin,
ac_get_elem_bits(ctx, result_type));
assert(length < sizeof(name));
return ac_build_intrinsic(ctx, name, result_type, params, 1, AC_FUNC_ATTR_READNONE);
ac_to_float(ctx, src1),
};
- MAYBE_UNUSED const int length = snprintf(name, sizeof(name), "%s.f%d", intrin,
+ ASSERTED const int length = snprintf(name, sizeof(name), "%s.f%d", intrin,
ac_get_elem_bits(ctx, result_type));
assert(length < sizeof(name));
return ac_build_intrinsic(ctx, name, result_type, params, 2, AC_FUNC_ATTR_READNONE);
ac_to_float(ctx, src2),
};
- MAYBE_UNUSED const int length = snprintf(name, sizeof(name), "%s.f%d", intrin,
+ ASSERTED const int length = snprintf(name, sizeof(name), "%s.f%d", intrin,
ac_get_elem_bits(ctx, result_type));
assert(length < sizeof(name));
return ac_build_intrinsic(ctx, name, result_type, params, 3, AC_FUNC_ATTR_READNONE);
LLVMValueRef sample_index = ac_llvm_extract_elem(&ctx->ac, get_src(ctx, instr->src[2]), 0);
int count;
- MAYBE_UNUSED bool add_frag_pos = (dim == GLSL_SAMPLER_DIM_SUBPASS ||
+ ASSERTED bool add_frag_pos = (dim == GLSL_SAMPLER_DIM_SUBPASS ||
dim == GLSL_SAMPLER_DIM_SUBPASS_MS);
bool is_ms = (dim == GLSL_SAMPLER_DIM_MS ||
dim == GLSL_SAMPLER_DIM_SUBPASS_MS);
const char *atomic_name;
char intrinsic_name[64];
enum ac_atomic_op atomic_subop;
- MAYBE_UNUSED int length;
+ ASSERTED int length;
enum glsl_sampler_dim dim;
bool is_unsigned = false;
}
case nir_intrinsic_load_interpolated_input: {
/* We assume any indirect loads have been lowered away */
- MAYBE_UNUSED nir_const_value *offset = nir_src_as_const_value(instr->src[1]);
+ ASSERTED nir_const_value *offset = nir_src_as_const_value(instr->src[1]);
assert(offset);
assert(offset[0].i32 == 0);
}
case nir_intrinsic_load_input: {
/* We only lower inputs for fragment shaders ATM */
- MAYBE_UNUSED nir_const_value *offset = nir_src_as_const_value(instr->src[0]);
+ ASSERTED nir_const_value *offset = nir_src_as_const_value(instr->src[0]);
assert(offset);
assert(offset[0].i32 == 0);
struct radv_attachment_info *att = &framebuffer->attachments[idx];
struct radv_image *image = att->attachment->image;
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo);
- MAYBE_UNUSED uint32_t queue_mask = radv_image_queue_family_mask(image,
+ ASSERTED uint32_t queue_mask = radv_image_queue_family_mask(image,
cmd_buffer->queue_family_index,
cmd_buffer->queue_family_index);
/* We currently don't support writing decompressed HTILE */
if (flush_indirect_descriptors)
radv_flush_indirect_descriptor_sets(cmd_buffer, bind_point);
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
+ ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
cmd_buffer->cs,
MAX_SETS * MESA_SHADER_STAGES * 4);
va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
va += offset;
- MAYBE_UNUSED unsigned cdw_max =
+ ASSERTED unsigned cdw_max =
radeon_check_space(cmd_buffer->device->ws,
cmd_buffer->cs, MESA_SHADER_STAGES * 4);
* because it is invalid, according to Vulkan spec.
*/
for (int i = 0; i < descriptorWriteCount; i++) {
- MAYBE_UNUSED const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i];
+ ASSERTED const VkWriteDescriptorSet *writeset = &pDescriptorWrites[i];
assert(writeset->descriptorType != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT);
}
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
- MAYBE_UNUSED const uint32_t total_count = firstViewport + viewportCount;
+ ASSERTED const uint32_t total_count = firstViewport + viewportCount;
assert(firstViewport < MAX_VIEWPORTS);
assert(total_count >= 1 && total_count <= MAX_VIEWPORTS);
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
- MAYBE_UNUSED const uint32_t total_count = firstScissor + scissorCount;
+ ASSERTED const uint32_t total_count = firstScissor + scissorCount;
assert(firstScissor < MAX_SCISSORS);
assert(total_count >= 1 && total_count <= MAX_SCISSORS);
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_cmd_state *state = &cmd_buffer->state;
- MAYBE_UNUSED const uint32_t total_count = firstDiscardRectangle + discardRectangleCount;
+ ASSERTED const uint32_t total_count = firstDiscardRectangle + discardRectangleCount;
assert(firstDiscardRectangle < MAX_DISCARD_RECTANGLES);
assert(total_count >= 1 && total_count <= MAX_DISCARD_RECTANGLES);
struct radv_cmd_state *state = &cmd_buffer->state;
struct radv_subpass *subpass = &state->pass->subpasses[subpass_id];
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
+ ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
cmd_buffer->cs, 4096);
radv_subpass_barrier(cmd_buffer, &subpass->start_barrier);
(cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) &&
cmd_buffer->state.pipeline != cmd_buffer->state.emitted_pipeline;
- MAYBE_UNUSED unsigned cdw_max =
+ ASSERTED unsigned cdw_max =
radeon_check_space(cmd_buffer->device->ws,
cmd_buffer->cs, 4096);
loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_COMPUTE,
AC_UD_CS_GRID_SIZE);
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(ws, cs, 25);
+ ASSERTED unsigned cdw_max = radeon_check_space(ws, cs, 25);
if (info->indirect) {
uint64_t va = radv_buffer_get_va(info->indirect->bo);
radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo);
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
+ ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL, va, 1, 0xffffffff);
assert(cmd_buffer->cs->cdw <= cdw_max);
radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo);
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 21);
+ ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 21);
/* Flags that only require a top-of-pipe event. */
VkPipelineStageFlags top_of_pipe_flags =
unsigned query = firstQuery + i;
uint64_t local_src_va = va + query * pool->stride;
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 19);
+ ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 19);
if (flags & VK_QUERY_RESULT_WAIT_BIT) {
if (cmd_buffer->state.subpass && cmd_buffer->state.subpass->view_mask)
num_queries = util_bitcount(cmd_buffer->state.subpass->view_mask);
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 28 * num_queries);
+ ASSERTED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 28 * num_queries);
for (unsigned i = 0; i < num_queries; i++) {
switch(pipelineStage) {
bo->initial_domain = RADEON_DOMAIN_GTT;
bo->priority = priority;
- MAYBE_UNUSED int r = amdgpu_bo_export(buf_handle, amdgpu_bo_handle_type_kms, &bo->bo_handle);
+ ASSERTED int r = amdgpu_bo_export(buf_handle, amdgpu_bo_handle_type_kms, &bo->bo_handle);
assert(!r);
p_atomic_add(&ws->allocated_gtt,
void *buf;
uint8_t *text_data = NULL;
uint32_t text_offset = 0, text_length = 0;
- MAYBE_UNUSED uint32_t total_length;
+ ASSERTED uint32_t total_length;
for (int i = 0; i < ARRAY_SIZE(genxml_files_table); i++) {
if (i != 0) {
case nir_op_sge:
case nir_op_slt: {
enum v3d_qpu_cond cond;
- MAYBE_UNUSED bool ok = ntq_emit_comparison(c, instr, &cond);
+ ASSERTED bool ok = ntq_emit_comparison(c, instr, &cond);
assert(ok);
result = vir_MOV(c, vir_SEL(c, cond,
vir_uniform_f(c, 1.0),
case nir_op_ilt32:
case nir_op_ult32: {
enum v3d_qpu_cond cond;
- MAYBE_UNUSED bool ok = ntq_emit_comparison(c, instr, &cond);
+ ASSERTED bool ok = ntq_emit_comparison(c, instr, &cond);
assert(ok);
result = vir_MOV(c, vir_SEL(c, cond,
vir_uniform_ui(c, ~0),
reads_uniform(const struct v3d_device_info *devinfo, uint64_t instruction)
{
struct v3d_qpu_instr qpu;
- MAYBE_UNUSED bool ok = v3d_qpu_instr_unpack(devinfo, instruction, &qpu);
+ ASSERTED bool ok = v3d_qpu_instr_unpack(devinfo, instruction, &qpu);
assert(ok);
if (qpu.sig.ldunif ||
} else if (mux == V3D_QPU_MUX_B) {
if (instr->sig.small_imm) {
uint32_t val;
- MAYBE_UNUSED bool ok =
+ ASSERTED bool ok =
v3d_qpu_small_imm_unpack(disasm->devinfo,
instr->raddr_b,
&val);
}
static bool
-validate_point_mode(MAYBE_UNUSED const ast_type_qualifier &qualifier,
- MAYBE_UNUSED const ast_type_qualifier &new_qualifier)
+validate_point_mode(ASSERTED const ast_type_qualifier &qualifier,
+ ASSERTED const ast_type_qualifier &new_qualifier)
{
/* Point mode can only be true if the flag is set. */
assert (!qualifier.flags.q.point_mode || !new_qualifier.flags.q.point_mode
_mesa_set_add(ir_set, ir);
}
-MAYBE_UNUSED static void
+ASSERTED static void
check_node_type(ir_instruction *ir, void *data)
{
(void) data;
* layouts at HIR generation time, but we don't do that for shared
* variables, which are always column-major
*/
- MAYBE_UNUSED ir_variable *var = deref->variable_referenced();
+ ASSERTED ir_variable *var = deref->variable_referenced();
assert((var->is_in_buffer_block() && !matrix) ||
var->data.mode == ir_var_shader_shared);
return false;
static bool
assert_ssa_def_is_not_int(nir_ssa_def *def, void *arg)
{
- MAYBE_UNUSED BITSET_WORD *int_types = arg;
+ ASSERTED BITSET_WORD *int_types = arg;
assert(!BITSET_TEST(int_types, def->index));
return true;
}
nir_foreach_phi_src(src, phi) {
assert(src->src.is_ssa);
size_t blob_offset = blob_reserve_intptr(ctx->blob);
- MAYBE_UNUSED size_t blob_offset2 = blob_reserve_intptr(ctx->blob);
+ ASSERTED size_t blob_offset2 = blob_reserve_intptr(ctx->blob);
assert(blob_offset + sizeof(uintptr_t) == blob_offset2);
write_phi_fixup fixup = {
.blob_offset = blob_offset,
static int
_eglRefreshDeviceList(void)
{
- MAYBE_UNUSED _EGLDevice *dev;
+ ASSERTED _EGLDevice *dev;
int count = 0;
dev = _eglGlobal.DeviceList;
void
trace_context_check(const struct pipe_context *pipe)
{
- MAYBE_UNUSED struct trace_context *tr_ctx = (struct trace_context *) pipe;
+ ASSERTED struct trace_context *tr_ctx = (struct trace_context *) pipe;
assert(tr_ctx->base.destroy == trace_context_destroy);
}
assert(fenced_buf->validation_flags);
if (fenced_buf->fence) {
- MAYBE_UNUSED boolean destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
+ ASSERTED boolean destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
assert(!destroyed);
}
if (fence) {
struct pipe_surface *dst,
int dstX0, int dstY0,
int dstX1, int dstY1,
- MAYBE_UNUSED float z,
+ ASSERTED float z,
enum pipe_tex_filter filter,
uint writemask)
{
blitter->pipe->set_active_query_state(blitter->pipe, true);
}
-static void blitter_check_saved_vertex_states(MAYBE_UNUSED struct blitter_context_priv *ctx)
+static void blitter_check_saved_vertex_states(ASSERTED struct blitter_context_priv *ctx)
{
assert(ctx->base.saved_vs != INVALID_PTR);
assert(!ctx->has_geometry_shader || ctx->base.saved_gs != INVALID_PTR);
ctx->base.saved_rs_state = INVALID_PTR;
}
-static void blitter_check_saved_fragment_states(MAYBE_UNUSED struct blitter_context_priv *ctx)
+static void blitter_check_saved_fragment_states(ASSERTED struct blitter_context_priv *ctx)
{
assert(ctx->base.saved_fs != INVALID_PTR);
assert(ctx->base.saved_dsa_state != INVALID_PTR);
}
}
-static void blitter_check_saved_fb_state(MAYBE_UNUSED struct blitter_context_priv *ctx)
+static void blitter_check_saved_fb_state(ASSERTED struct blitter_context_priv *ctx)
{
assert(ctx->base.saved_fb_state.nr_cbufs != (ubyte) ~0);
}
util_unreference_framebuffer_state(&ctx->base.saved_fb_state);
}
-static void blitter_check_saved_textures(MAYBE_UNUSED struct blitter_context_priv *ctx)
+static void blitter_check_saved_textures(ASSERTED struct blitter_context_priv *ctx)
{
assert(ctx->base.saved_num_sampler_states != ~0u);
assert(ctx->base.saved_num_sampler_views != ~0u);
void
util_format_r8g8_b8g8_unorm_fetch_rgba_float(float *dst, const uint8_t *src,
- unsigned i, MAYBE_UNUSED unsigned j)
+ unsigned i, ASSERTED unsigned j)
{
assert(i < 2);
assert(j < 1);
void
util_format_g8r8_g8b8_unorm_fetch_rgba_float(float *dst, const uint8_t *src,
- unsigned i, MAYBE_UNUSED unsigned j)
+ unsigned i, ASSERTED unsigned j)
{
assert(i < 2);
assert(j < 1);
void
util_format_uyvy_fetch_rgba_float(float *dst, const uint8_t *src,
- unsigned i, MAYBE_UNUSED unsigned j)
+ unsigned i, ASSERTED unsigned j)
{
uint8_t y, u, v;
void
util_format_yuyv_fetch_rgba_float(float *dst, const uint8_t *src,
- unsigned i, MAYBE_UNUSED unsigned j)
+ unsigned i, ASSERTED unsigned j)
{
uint8_t y, u, v;
if (dst != src) {
/* bump the src.count first */
if (src) {
- MAYBE_UNUSED int count = p_atomic_inc_return(&src->count);
+ ASSERTED int count = p_atomic_inc_return(&src->count);
assert(count != 1); /* src had to be referenced */
debug_reference(src, get_desc, 1);
}
tc_call_generate_mipmap(struct pipe_context *pipe, union tc_payload *payload)
{
struct tc_generate_mipmap *p = (struct tc_generate_mipmap *)payload;
- MAYBE_UNUSED bool result = pipe->generate_mipmap(pipe, p->res, p->format,
+ ASSERTED bool result = pipe->generate_mipmap(pipe, p->res, p->format,
p->base_level,
p->last_level,
p->first_layer,
etna_compile_parse_declarations(struct etna_compile *c)
{
struct tgsi_parse_context ctx = { };
- MAYBE_UNUSED unsigned status = tgsi_parse_init(&ctx, c->tokens);
+ ASSERTED unsigned status = tgsi_parse_init(&ctx, c->tokens);
assert(status == TGSI_PARSE_OK);
while (!tgsi_parse_end_of_tokens(&ctx)) {
etna_compile_pass_check_usage(struct etna_compile *c)
{
struct tgsi_parse_context ctx = { };
- MAYBE_UNUSED unsigned status = tgsi_parse_init(&ctx, c->tokens);
+ ASSERTED unsigned status = tgsi_parse_init(&ctx, c->tokens);
assert(status == TGSI_PARSE_OK);
for (int idx = 0; idx < c->total_decls; ++idx) {
{
struct tgsi_parse_context ctx = { };
int inst_idx = 0;
- MAYBE_UNUSED unsigned status = tgsi_parse_init(&ctx, c->tokens);
+ ASSERTED unsigned status = tgsi_parse_init(&ctx, c->tokens);
assert(status == TGSI_PARSE_OK);
while (!tgsi_parse_end_of_tokens(&ctx)) {
etna_compile_pass_generate_code(struct etna_compile *c)
{
struct tgsi_parse_context ctx = { };
- MAYBE_UNUSED unsigned status = tgsi_parse_init(&ctx, c->tokens);
+ ASSERTED unsigned status = tgsi_parse_init(&ctx, c->tokens);
assert(status == TGSI_PARSE_OK);
int inst_idx = 0;
gpir_node_add_dep(&load->node, &store->node, GPIR_DEP_READ_AFTER_WRITE);
gpir_debug("spilling use %d of node %d to load node %d\n",
use->index, node->index, load->node.index);
- MAYBE_UNUSED bool result = _try_place_node(ctx, use->sched.instr, &load->node);
+ ASSERTED bool result = _try_place_node(ctx, use->sched.instr, &load->node);
assert(result);
}
}
if (best_node) {
gpir_debug("scheduling %d (score = %d)%s\n", best_node->index,
best_score, best_node->sched.max_node ? " (max)" : "");
- MAYBE_UNUSED int score = schedule_try_node(ctx, best_node, false);
+ ASSERTED int score = schedule_try_node(ctx, best_node, false);
assert(score != INT_MIN);
return true;
}
gpir_node_replace_child(succ, move, node);
}
}
- MAYBE_UNUSED int score = schedule_try_node(ctx, move, false);
+ ASSERTED int score = schedule_try_node(ctx, move, false);
assert(score != INT_MIN);
}
if (!create_new_instr(block, move))
return false;
- MAYBE_UNUSED bool insert_result =
+ ASSERTED bool insert_result =
ppir_instr_insert_node(move->instr, node);
assert(insert_result);
ushort min_index,
ushort max_index )
{
- MAYBE_UNUSED struct lp_setup_context *setup = lp_setup_context(vbr);
+ ASSERTED struct lp_setup_context *setup = lp_setup_context(vbr);
assert( setup->vertex_buffer_size >= (max_index+1) * setup->vertex_size );
/* do nothing */
}
CodeEmitterNV50::emitLOAD(const Instruction *i)
{
DataFile sf = i->src(0).getFile();
- MAYBE_UNUSED int32_t offset = i->getSrc(0)->reg.data.offset;
+ ASSERTED int32_t offset = i->getSrc(0)->reg.data.offset;
switch (sf) {
case FILE_SHADER_INPUT:
ImmediateValue val;
// getImmediate() has side-effects on the argument so this *shouldn't*
// be folded into the assert()
- MAYBE_UNUSED bool ret = def->src(0).getImmediate(val);
+ ASSERTED bool ret = def->src(0).getImmediate(val);
assert(ret);
if (i->getSrc(1)->reg.data.id & 1)
val.reg.data.u32 >>= 16;
union pipe_desc desc;
unsigned vp_caps, is_ref;
- MAYBE_UNUSED unsigned ret; /* used in debug checks */
+ ASSERTED unsigned ret; /* used in debug checks */
struct nouveau_vp3_video_buffer *refs[16] = {};
desc.base = picture;
{
struct nouveau_vp3_decoder *dec = (struct nouveau_vp3_decoder *)decoder;
uint32_t comm_seq = ++dec->fence_seq;
- MAYBE_UNUSED unsigned ret = 0; /* used in debug checks */
+ ASSERTED unsigned ret = 0; /* used in debug checks */
assert(dec);
assert(target);
{
struct nouveau_vp3_decoder *dec = (struct nouveau_vp3_decoder *)decoder;
uint32_t comm_seq = dec->fence_seq;
- MAYBE_UNUSED unsigned ret = 0; /* used in debug checks */
+ ASSERTED unsigned ret = 0; /* used in debug checks */
assert(decoder);
union pipe_desc desc;
unsigned vp_caps, is_ref;
- MAYBE_UNUSED unsigned ret; /* used in debug checks */
+ ASSERTED unsigned ret; /* used in debug checks */
struct nouveau_vp3_video_buffer *refs[16] = {};
desc.base = picture;
{
struct panfrost_bo *bo = rzalloc(screen, struct panfrost_bo);
struct drm_panfrost_get_bo_offset get_bo_offset = {0,};
- MAYBE_UNUSED int ret;
+ ASSERTED int ret;
unsigned gem_handle;
ret = drmPrimeFDToHandle(screen->fd, fd, &gem_handle);
panfrost_drm_query_gpu_version(struct panfrost_screen *screen)
{
struct drm_panfrost_get_param get_param = {0,};
- MAYBE_UNUSED int ret;
+ ASSERTED int ret;
get_param.param = DRM_PANFROST_PARAM_GPU_PROD_ID;
ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_GET_PARAM, &get_param);
new_start_in_dw, new_start_in_dw * 4);
if (pool->item_list != item->link.prev) {
- MAYBE_UNUSED struct compute_memory_item *prev;
+ ASSERTED struct compute_memory_item *prev;
prev = container_of(item->link.prev, item, link);
assert(prev->start_in_dw + prev->size_in_dw <= new_start_in_dw);
}
struct si_context *sctx = (struct si_context*)ctx;
struct si_texture *src = (struct si_texture*)info->src.resource;
struct si_texture *dst = (struct si_texture*)info->dst.resource;
- MAYBE_UNUSED struct si_texture *stmp;
+ ASSERTED struct si_texture *stmp;
unsigned dst_width = u_minify(info->dst.resource->width0, info->dst.level);
unsigned dst_height = u_minify(info->dst.resource->height0, info->dst.level);
enum pipe_format format = info->src.format;
if (!size)
return;
- MAYBE_UNUSED unsigned clear_alignment = MIN2(clear_value_size, 4);
+ ASSERTED unsigned clear_alignment = MIN2(clear_value_size, 4);
assert(clear_value_size != 3 && clear_value_size != 6); /* 12 is allowed. */
assert(offset % clear_alignment == 0);
/* The compute IB is always chained, but we need to call cs_check_space to add more space. */
struct radeon_cmdbuf *cs = sctx->prim_discard_compute_cs;
- MAYBE_UNUSED bool compute_has_space = sctx->ws->cs_check_space(cs, need_compute_dw, false);
+ ASSERTED bool compute_has_space = sctx->ws->cs_check_space(cs, need_compute_dw, false);
assert(compute_has_space);
assert(si_check_ring_space(sctx, out_indexbuf_size));
return SI_PRIM_DISCARD_ENABLED;
short shader_userdata_rel_index,
unsigned num_elements)
{
- MAYBE_UNUSED unsigned desc_slot;
+ ASSERTED unsigned desc_slot;
si_init_descriptors(desc, shader_userdata_rel_index, 16, num_elements);
sctx->bindless_descriptors.num_active_slots = num_elements;
enum si_arg_regfile regfile, LLVMTypeRef type,
LLVMValueRef *assign, unsigned idx)
{
- MAYBE_UNUSED unsigned actual = add_arg_assign(fninfo, regfile, type, assign);
+ ASSERTED unsigned actual = add_arg_assign(fninfo, regfile, type, assign);
assert(actual == idx);
}
LLVMTypeRef function_type;
unsigned num_first_params;
unsigned num_out, initial_num_out;
- MAYBE_UNUSED unsigned num_out_sgpr; /* used in debug checks */
- MAYBE_UNUSED unsigned initial_num_out_sgpr; /* used in debug checks */
+ ASSERTED unsigned num_out_sgpr; /* used in debug checks */
+ ASSERTED unsigned initial_num_out_sgpr; /* used in debug checks */
unsigned num_sgprs, num_vgprs;
unsigned gprs;
if (!sp_tex->dt) {
/* regular texture - setup array of mipmap level offsets */
- MAYBE_UNUSED struct pipe_resource *res = view->texture;
+ ASSERTED struct pipe_resource *res = view->texture;
int j;
if (view->target != PIPE_BUFFER) {
softpipe_delete_compute_state(struct pipe_context *pipe,
void *cs)
{
- MAYBE_UNUSED struct softpipe_context *softpipe = softpipe_context(pipe);
+ ASSERTED struct softpipe_context *softpipe = softpipe_context(pipe);
struct sp_compute_shader *state = (struct sp_compute_shader *)cs;
assert(softpipe->cs != state);
assert(range->indexWidth == range->indexArray.stride);
if (ib) {
- MAYBE_UNUSED unsigned size = ib->width0;
- MAYBE_UNUSED unsigned offset = range->indexArray.offset;
- MAYBE_UNUSED unsigned stride = range->indexArray.stride;
- MAYBE_UNUSED unsigned count;
+ ASSERTED unsigned size = ib->width0;
+ ASSERTED unsigned offset = range->indexArray.offset;
+ ASSERTED unsigned stride = range->indexArray.stride;
+ ASSERTED unsigned count;
assert(size);
assert(offset < size);
* It will be done using the current context.
*/
if (SVGA3D_InvalidateGBSurface(svga->swc, entry->handle) != PIPE_OK) {
- MAYBE_UNUSED enum pipe_error ret;
+ ASSERTED enum pipe_error ret;
/* Even though surface invalidation here is done after the command
* buffer is flushed, it is still possible that it will
struct pipe_transfer *ib_transfer = NULL;
struct pipe_transfer *cb_transfer[SVGA_MAX_CONST_BUFS] = { 0 };
struct draw_context *draw = svga->swtnl.draw;
- MAYBE_UNUSED unsigned old_num_vertex_buffers;
+ ASSERTED unsigned old_num_vertex_buffers;
unsigned i;
const void *map;
enum pipe_error ret;
static void
set_last_dst_pack(struct qblock *block, struct qinst *inst)
{
- MAYBE_UNUSED bool had_pm = *last_inst(block) & QPU_PM;
- MAYBE_UNUSED bool had_ws = *last_inst(block) & QPU_WS;
- MAYBE_UNUSED uint32_t unpack = QPU_GET_FIELD(*last_inst(block), QPU_UNPACK);
+ ASSERTED bool had_pm = *last_inst(block) & QPU_PM;
+ ASSERTED bool had_ws = *last_inst(block) & QPU_WS;
+ ASSERTED uint32_t unpack = QPU_GET_FIELD(*last_inst(block), QPU_UNPACK);
if (!inst->dst.pack)
return;
break;
}
- MAYBE_UNUSED bool handled_qinst_cond = false;
+ ASSERTED bool handled_qinst_cond = false;
switch (qinst->op) {
case QOP_RCP:
/* Queries use internally created buffers and do not go through transfers.
* Index buffers are not bindable. They are not tracked.
*/
- MAYBE_UNUSED const unsigned tracked_bind = (PIPE_BIND_VERTEX_BUFFER |
+ ASSERTED const unsigned tracked_bind = (PIPE_BIND_VERTEX_BUFFER |
PIPE_BIND_CONSTANT_BUFFER |
PIPE_BIND_SHADER_BUFFER |
PIPE_BIND_SHADER_IMAGE);
XvMCContextPrivate *context_priv;
XvMCSurfacePrivate *target_surface_priv;
- MAYBE_UNUSED XvMCSurfacePrivate *past_surface_priv;
- MAYBE_UNUSED XvMCSurfacePrivate *future_surface_priv;
+ ASSERTED XvMCSurfacePrivate *past_surface_priv;
+ ASSERTED XvMCSurfacePrivate *future_surface_priv;
XvMCMacroBlock *xvmc_mb;
XVMC_MSG(XVMC_TRACE, "[XvMC] Rendering to surface %p, with past %p and future %p\n",
struct pb_buffer *pb_buf,
unsigned flags)
{
- MAYBE_UNUSED enum pipe_error ret;
+ ASSERTED enum pipe_error ret;
unsigned translated_flags;
boolean already_present;
const uint32_t width_rgba_px = width_cl;
const uint32_t height_rgba_px = height_cl * 4;
- MAYBE_UNUSED bool ok =
+ ASSERTED bool ok =
isl_surf_init(batch->blorp->isl_dev, ¶ms.dst.surf,
.dim = ISL_SURF_DIM_2D,
.format = ISL_FORMAT_R32G32B32A32_UINT,
void *buf;
uint8_t *text_data = NULL;
uint32_t text_offset = 0, text_length = 0;
- MAYBE_UNUSED uint32_t total_length;
+ ASSERTED uint32_t total_length;
uint32_t gen_10 = devinfo_to_gen(devinfo, true);
for (int i = 0; i < ARRAY_SIZE(genxml_files_table); i++) {
* '2^n - 1' for some n.
*/
static inline bool
-brw_stage_has_packed_dispatch(MAYBE_UNUSED const struct gen_device_info *devinfo,
+brw_stage_has_packed_dispatch(ASSERTED const struct gen_device_info *devinfo,
gl_shader_stage stage,
const struct brw_stage_prog_data *prog_data)
{
}
static inline bool
-brw_message_desc_header_present(MAYBE_UNUSED const struct gen_device_info *devinfo,
+brw_message_desc_header_present(ASSERTED const struct gen_device_info *devinfo,
uint32_t desc)
{
assert(devinfo->gen >= 5);
}
static inline unsigned
-brw_sampler_desc_return_format(MAYBE_UNUSED const struct gen_device_info *devinfo,
+brw_sampler_desc_return_format(ASSERTED const struct gen_device_info *devinfo,
uint32_t desc)
{
assert(devinfo->gen == 4 && !devinfo->is_g4x);
static inline uint32_t
brw_dp_a64_untyped_atomic_desc(const struct gen_device_info *devinfo,
- MAYBE_UNUSED unsigned exec_size, /**< 0 for SIMD4x2 */
+ ASSERTED unsigned exec_size, /**< 0 for SIMD4x2 */
unsigned bit_size,
unsigned atomic_op,
bool response_expected)
static inline uint32_t
brw_dp_a64_untyped_atomic_float_desc(const struct gen_device_info *devinfo,
- MAYBE_UNUSED unsigned exec_size,
+ ASSERTED unsigned exec_size,
unsigned atomic_op,
bool response_expected)
{
schedule_instructions(SCHEDULE_POST);
if (last_scratch > 0) {
- MAYBE_UNUSED unsigned max_scratch_size = 2 * 1024 * 1024;
+ ASSERTED unsigned max_scratch_size = 2 * 1024 * 1024;
prog_data->total_scratch = brw_get_scratch_size(last_scratch);
* @{
*/
static inline uint16_t
-brw_inst_3src_a1_src0_imm(MAYBE_UNUSED const struct gen_device_info *devinfo,
+brw_inst_3src_a1_src0_imm(ASSERTED const struct gen_device_info *devinfo,
const brw_inst *insn)
{
assert(devinfo->gen >= 10);
}
static inline uint16_t
-brw_inst_3src_a1_src2_imm(MAYBE_UNUSED const struct gen_device_info *devinfo,
+brw_inst_3src_a1_src2_imm(ASSERTED const struct gen_device_info *devinfo,
const brw_inst *insn)
{
assert(devinfo->gen >= 10);
}
static inline void
-brw_inst_set_3src_a1_src0_imm(MAYBE_UNUSED const struct gen_device_info *devinfo,
+brw_inst_set_3src_a1_src0_imm(ASSERTED const struct gen_device_info *devinfo,
brw_inst *insn, uint16_t value)
{
assert(devinfo->gen >= 10);
}
static inline void
-brw_inst_set_3src_a1_src2_imm(MAYBE_UNUSED const struct gen_device_info *devinfo,
+brw_inst_set_3src_a1_src2_imm(ASSERTED const struct gen_device_info *devinfo,
brw_inst *insn, uint16_t value)
{
assert(devinfo->gen >= 10);
}
static inline uint64_t
-brw_inst_imm_uq(MAYBE_UNUSED const struct gen_device_info *devinfo,
+brw_inst_imm_uq(ASSERTED const struct gen_device_info *devinfo,
const brw_inst *insn)
{
assert(devinfo->gen >= 8);
static int
try_immediate_source(const nir_alu_instr *instr, src_reg *op,
bool try_src0_also,
- MAYBE_UNUSED const gen_device_info *devinfo)
+ ASSERTED const gen_device_info *devinfo)
{
unsigned idx;
* S3TC workaround that requires us to do reinterpretation. So assert
* that they're at least the same bpb and block size.
*/
- MAYBE_UNUSED const struct isl_format_layout *surf_fmtl =
+ ASSERTED const struct isl_format_layout *surf_fmtl =
isl_format_get_layout(info->surf->format);
- MAYBE_UNUSED const struct isl_format_layout *view_fmtl =
+ ASSERTED const struct isl_format_layout *view_fmtl =
isl_format_get_layout(info->surf->format);
assert(surf_fmtl->bpb == view_fmtl->bpb);
assert(surf_fmtl->bw == view_fmtl->bw);
new_mem->phys_addr = phys_addr;
new_mem->fd_offset = mem->mem_fd_len;
- MAYBE_UNUSED int ftruncate_res = ftruncate(mem->mem_fd, mem->mem_fd_len += 4096);
+ ASSERTED int ftruncate_res = ftruncate(mem->mem_fd, mem->mem_fd_len += 4096);
assert(ftruncate_res == 0);
new_mem->data = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED,
continue;
uint32_t map_offset = i->virt_addr - address;
- MAYBE_UNUSED void *res =
+ ASSERTED void *res =
mmap((uint8_t *)bo.map + map_offset, 4096, PROT_READ,
MAP_SHARED | MAP_FIXED, mem->mem_fd, phys_mem->fd_offset);
assert(res != MAP_FAILED);
for (uint64_t page = address; page < end; page += 4096) {
struct phys_mem *phys_mem = ppgtt_walk(mem, mem->pml4, page);
- MAYBE_UNUSED void *res =
+ ASSERTED void *res =
mmap((uint8_t *)bo.map + (page - bo.addr), 4096, PROT_READ,
MAP_SHARED | MAP_FIXED, mem->mem_fd, phys_mem->fd_offset);
assert(res != MAP_FAILED);
aub_read_command(struct aub_read *read, const void *data, uint32_t data_len)
{
const uint32_t *p = data, *next;
- MAYBE_UNUSED const uint32_t *end = data + data_len;
+ ASSERTED const uint32_t *end = data + data_len;
uint32_t h, header_length, bias;
assert(data_len >= 4);
setup_pager();
if (S_ISDIR(st.st_mode)) {
- MAYBE_UNUSED int ret;
+ ASSERTED int ret;
char *filename;
ret = asprintf(&filename, "%s/i915_error_state", path);
{
const uint32_t bb_start_offset =
prev_bbo->length - GEN8_MI_BATCH_BUFFER_START_length * 4;
- MAYBE_UNUSED const uint32_t *bb_start = prev_bbo->bo.map + bb_start_offset;
+ ASSERTED const uint32_t *bb_start = prev_bbo->bo.map + bb_start_offset;
/* Make sure we're looking at a MI_BATCH_BUFFER_START */
assert(((*bb_start >> 29) & 0x07) == 0);
{
#ifdef ENABLE_SHADER_CACHE
char renderer[10];
- MAYBE_UNUSED int len = snprintf(renderer, sizeof(renderer), "anv_%04x",
+ ASSERTED int len = snprintf(renderer, sizeof(renderer), "anv_%04x",
device->chipset_id);
assert(len == sizeof(renderer) - 2);
util_vma_heap_free(&device->vma_lo, addr_48b, bo->size);
device->vma_lo_available += bo->size;
} else {
- MAYBE_UNUSED const struct anv_physical_device *physical_device =
+ ASSERTED const struct anv_physical_device *physical_device =
&device->instance->physicalDevice;
assert(addr_48b >= physical_device->memory.heaps[0].vma_start &&
addr_48b < (physical_device->memory.heaps[0].vma_start +
uint32_t width, uint32_t height, const char *filename)
{
VkDevice vk_device = anv_device_to_handle(device);
- MAYBE_UNUSED VkResult result;
+ ASSERTED VkResult result;
image->filename = filename;
image->extent = (VkExtent2D) { width, height };
dump_image_write_to_ppm(struct anv_device *device, struct dump_image *image)
{
VkDevice vk_device = anv_device_to_handle(device);
- MAYBE_UNUSED VkResult result;
+ ASSERTED VkResult result;
VkMemoryRequirements reqs;
anv_GetImageMemoryRequirements(vk_device, image->image, &reqs);
const char *filename)
{
VkDevice vk_device = anv_device_to_handle(device);
- MAYBE_UNUSED VkResult result;
+ ASSERTED VkResult result;
PFN_vkBeginCommandBuffer BeginCommandBuffer =
(void *)anv_GetDeviceProcAddr(anv_device_to_handle(device),
.tv_nsec = abs_timeout_ns % NSEC_PER_SEC,
};
- MAYBE_UNUSED int ret;
+ ASSERTED int ret;
ret = pthread_cond_timedwait(&device->queue_submit,
&device->mutex, &abstime);
assert(ret != EINVAL);
cp = (struct brw_program *) brw->programs[MESA_SHADER_COMPUTE];
cp->id = key.base.program_string_id;
- MAYBE_UNUSED bool success = brw_codegen_cs_prog(brw, cp, &key);
+ ASSERTED bool success = brw_codegen_cs_prog(brw, cp, &key);
assert(success);
}
/* array length: print length + null char + 1 extra to verify it is unused */
char renderer[11];
- MAYBE_UNUSED int len = snprintf(renderer, sizeof(renderer), "i965_%04x",
+ ASSERTED int len = snprintf(renderer, sizeof(renderer), "i965_%04x",
screen->deviceID);
assert(len == sizeof(renderer) - 2);
gp = (struct brw_program *) brw->programs[MESA_SHADER_GEOMETRY];
gp->id = key.base.program_string_id;
- MAYBE_UNUSED bool success = brw_codegen_gs_prog(brw, gp, &key);
+ ASSERTED bool success = brw_codegen_gs_prog(brw, gp, &key);
assert(success);
}
void
gen7_emit_vs_workaround_flush(struct brw_context *brw)
{
- MAYBE_UNUSED const struct gen_device_info *devinfo = &brw->screen->devinfo;
+ ASSERTED const struct gen_device_info *devinfo = &brw->screen->devinfo;
assert(devinfo->gen == 7);
brw_emit_pipe_control_write(brw,
break;
switch ((enum driver_cache_blob_part)part_type) {
case GEN_PART: {
- MAYBE_UNUSED uint32_t gen_size = blob_read_uint32(&reader);
+ ASSERTED uint32_t gen_size = blob_read_uint32(&reader);
assert(!reader.overrun &&
(uintptr_t)(reader.end - reader.current) > gen_size);
deserialize_gen_program(&reader, ctx, prog, stage);
break;
}
case NIR_PART: {
- MAYBE_UNUSED uint32_t nir_size = blob_read_uint32(&reader);
+ ASSERTED uint32_t nir_size = blob_read_uint32(&reader);
assert(!reader.overrun &&
(uintptr_t)(reader.end - reader.current) > nir_size);
const struct nir_shader_compiler_options *options =
/* BRW_NEW_TESS_PROGRAMS */
struct brw_program *tcp =
(struct brw_program *) brw->programs[MESA_SHADER_TESS_CTRL];
- MAYBE_UNUSED struct brw_program *tep =
+ ASSERTED struct brw_program *tep =
(struct brw_program *) brw->programs[MESA_SHADER_TESS_EVAL];
assert(tep);
if (tcp)
tcp->id = key.base.program_string_id;
- MAYBE_UNUSED bool success = brw_codegen_tcs_prog(brw, tcp, tep, &key);
+ ASSERTED bool success = brw_codegen_tcs_prog(brw, tcp, tep, &key);
assert(success);
}
tep = (struct brw_program *) brw->programs[MESA_SHADER_TESS_EVAL];
tep->id = key.base.program_string_id;
- MAYBE_UNUSED bool success = brw_codegen_tes_prog(brw, tep, &key);
+ ASSERTED bool success = brw_codegen_tes_prog(brw, tep, &key);
assert(success);
}
vp = (struct brw_program *) brw->programs[MESA_SHADER_VERTEX];
vp->id = key.base.program_string_id;
- MAYBE_UNUSED bool success = brw_codegen_vs_prog(brw, vp, &key);
+ ASSERTED bool success = brw_codegen_vs_prog(brw, vp, &key);
assert(success);
}
fp = (struct brw_program *) brw->programs[MESA_SHADER_FRAGMENT];
fp->id = key.base.program_string_id;
- MAYBE_UNUSED bool success = brw_codegen_wm_prog(brw, fp, &key,
+ ASSERTED bool success = brw_codegen_wm_prog(brw, fp, &key,
&brw->vue_map_geom_out);
assert(success);
}
intel_renderbuffer_format(struct gl_context * ctx, GLenum internalFormat)
{
struct brw_context *brw = brw_context(ctx);
- MAYBE_UNUSED const struct gen_device_info *devinfo = &brw->screen->devinfo;
+ ASSERTED const struct gen_device_info *devinfo = &brw->screen->devinfo;
switch (internalFormat) {
default:
*/
GLboolean
_mesa_test_proxy_teximage(struct gl_context *ctx, GLenum target,
- GLuint numLevels, MAYBE_UNUSED GLint level,
+ GLuint numLevels, ASSERTED GLint level,
mesa_format format, GLuint numSamples,
GLint width, GLint height, GLint depth)
{
/* XXX if the bitmap is larger than the max texture size, break
* it up into chunks.
*/
- GLuint MAYBE_UNUSED maxSize =
+ ASSERTED GLuint maxSize =
pipe->screen->get_param(pipe->screen, PIPE_CAP_MAX_TEXTURE_2D_SIZE);
assert(width <= (GLsizei) maxSize);
assert(height <= (GLsizei) maxSize);
unpack);
}
else {
- bool MAYBE_UNUSED success;
+ ASSERTED bool success;
success = _mesa_texstore(ctx, 2, /* dims */
baseInternalFormat, /* baseInternalFormat */
mformat, /* mesa_format */
const unsigned fb_width = _mesa_geometric_width(ctx->DrawBuffer);
const unsigned fb_height = _mesa_geometric_height(ctx->DrawBuffer);
GLfloat x0, y0, x1, y1;
- GLsizei MAYBE_UNUSED maxSize;
+ ASSERTED GLsizei maxSize;
boolean normalized = sv[0]->texture->target == PIPE_TEXTURE_2D;
unsigned cso_state_mask;
{
/* debug checks */
{
- const struct gl_texture_image MAYBE_UNUSED *dstImage =
+ ASSERTED const struct gl_texture_image *dstImage =
stObj->base.Image[stImage->base.Face][dstLevel];
assert(dstImage);
assert(dstImage->Width == stImage->base.Width);
*
* \param stObj the st texture object,
*/
-MAYBE_UNUSED static boolean
+ASSERTED static boolean
check_sampler_swizzle(const struct st_context *st,
const struct st_texture_object *stObj,
const struct pipe_sampler_view *sv,
stObj->base._BufferObjectFormat)
== view->format);
assert(view->target == PIPE_BUFFER);
- unsigned base = stObj->base.BufferOffset;
- MAYBE_UNUSED unsigned size = MIN2(buf->width0 - base,
+ ASSERTED unsigned base = stObj->base.BufferOffset;
+ ASSERTED unsigned size = MIN2(buf->width0 - base,
(unsigned) stObj->base.BufferSize);
assert(view->u.buf.offset == base);
assert(view->u.buf.size == size);
{
const int m = val & 0x3ff;
const int e = (val >> 10) & 0x1f;
- MAYBE_UNUSED const int s = (val >> 15) & 0x1;
+ ASSERTED const int s = (val >> 15) & 0x1;
/* v = round_to_nearest(1.mmmmmmmmmm * 2^(e-15) * 255)
* = round_to_nearest((1.mmmmmmmmmm * 255) * 2^(e-15))
# endif
#endif
+/**
+ * UNUSED marks variables (or sometimes functions) that have to be defined,
+ * but are sometimes (or always) unused beyond that. A common case is for
+ * a function parameter to be used in some build configurations but not others.
+ * Another case is fallback vfuncs that don't do anything with their params.
+ *
+ * Note that this should not be used for identifiers used in `assert()`;
+ * see ASSERTED below.
+ */
#ifdef HAVE_FUNC_ATTRIBUTE_UNUSED
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
-#define MAYBE_UNUSED UNUSED
+/**
+ * Use ASSERTED to indicate that an identifier is unused outside of an `assert()`,
+ * so that assert-free builds don't get "unused variable" warnings.
+ */
+#ifdef NDEBUG
+#define ASSERTED UNUSED
+#else
+#define ASSERTED
+#endif
#ifdef HAVE_FUNC_ATTRIBUTE_WARN_UNUSED_RESULT
#define MUST_CHECK __attribute__((warn_unused_result))
*/
#define ASSERT_BITFIELD_SIZE(STRUCT, FIELD, MAXVAL) \
do { \
- MAYBE_UNUSED STRUCT s; \
+ ASSERTED STRUCT s; \
s.FIELD = (MAXVAL); \
assert((int) s.FIELD == (MAXVAL) && "Insufficient bitfield size!"); \
} while (0)
#define _SIMPLE_MTX_INITIALIZER_NP { 0 }
static inline void
-simple_mtx_init(simple_mtx_t *mtx, MAYBE_UNUSED int type)
+simple_mtx_init(simple_mtx_t *mtx, ASSERTED int type)
{
assert(type == mtx_plain);