From b7ef60d846d14471507f198b0a994d6ac42dd2a2 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Wed, 16 Aug 2017 17:38:13 -0700 Subject: [PATCH] spirv: Replace assert with vtn_assert MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Reviewed-by: Tapani Pälli Reviewed-by: Ian Romanick --- src/compiler/spirv/spirv_to_nir.c | 209 ++++++++++++++-------------- src/compiler/spirv/vtn_alu.c | 26 ++-- src/compiler/spirv/vtn_cfg.c | 51 +++---- src/compiler/spirv/vtn_glsl450.c | 4 +- src/compiler/spirv/vtn_variables.c | 210 +++++++++++++++-------------- 5 files changed, 251 insertions(+), 249 deletions(-) diff --git a/src/compiler/spirv/spirv_to_nir.c b/src/compiler/spirv/spirv_to_nir.c index e26775ba1a6..297db87d575 100644 --- a/src/compiler/spirv/spirv_to_nir.c +++ b/src/compiler/spirv/spirv_to_nir.c @@ -260,7 +260,7 @@ vtn_ssa_value(struct vtn_builder *b, uint32_t value_id) return val->ssa; case vtn_value_type_pointer: - assert(val->pointer->ptr_type && val->pointer->ptr_type->type); + vtn_assert(val->pointer->ptr_type && val->pointer->ptr_type->type); struct vtn_ssa_value *ssa = vtn_create_ssa_value(b, val->pointer->ptr_type->type); ssa->def = vtn_pointer_to_ssa(b, val->pointer); @@ -296,7 +296,7 @@ vtn_foreach_instruction(struct vtn_builder *b, const uint32_t *start, while (w < end) { SpvOp opcode = w[0] & SpvOpCodeMask; unsigned count = w[0] >> SpvWordCountShift; - assert(count >= 1 && w + count <= end); + vtn_assert(count >= 1 && w + count <= end); b->spirv_offset = (uint8_t *)w - (uint8_t *)b->spirv; @@ -352,8 +352,7 @@ vtn_handle_extension(struct vtn_builder *b, SpvOp opcode, case SpvOpExtInst: { struct vtn_value *val = vtn_value(b, w[3], vtn_value_type_extension); bool handled = val->ext_handler(b, w[4], w, count); - (void)handled; - assert(handled); + vtn_assert(handled); break; } @@ -374,7 +373,7 @@ _foreach_decoration_helper(struct vtn_builder *b, if (dec->scope == VTN_DEC_DECORATION) { member = parent_member; } else if (dec->scope >= VTN_DEC_STRUCT_MEMBER0) { - assert(parent_member == -1); + vtn_assert(parent_member == -1); member = dec->scope - VTN_DEC_STRUCT_MEMBER0; } else { /* Not a decoration */ @@ -382,7 +381,7 @@ _foreach_decoration_helper(struct vtn_builder *b, } if (dec->group) { - assert(dec->group->value_type == vtn_value_type_decoration_group); + vtn_assert(dec->group->value_type == vtn_value_type_decoration_group); _foreach_decoration_helper(b, base_value, member, dec->group, cb, data); } else { @@ -412,7 +411,7 @@ vtn_foreach_execution_mode(struct vtn_builder *b, struct vtn_value *value, if (dec->scope != VTN_DEC_EXECUTION_MODE) continue; - assert(dec->group == NULL); + vtn_assert(dec->group == NULL); cb(b, value, dec, data); } } @@ -543,7 +542,7 @@ mutable_matrix_member(struct vtn_builder *b, struct vtn_type *type, int member) type = type->array_element; } - assert(glsl_type_is_matrix(type->type)); + vtn_assert(glsl_type_is_matrix(type->type)); return type; } @@ -558,7 +557,7 @@ struct_member_decoration_cb(struct vtn_builder *b, if (member < 0) return; - assert(member < ctx->num_fields); + vtn_assert(member < ctx->num_fields); switch (dec->decoration) { case SpvDecorationNonWritable: @@ -582,7 +581,7 @@ struct_member_decoration_cb(struct vtn_builder *b, break; case SpvDecorationStream: /* Vulkan only allows one GS stream */ - assert(dec->literals[0] == 0); + vtn_assert(dec->literals[0] == 0); break; case SpvDecorationLocation: ctx->fields[member].location = dec->literals[0]; @@ -661,7 +660,7 @@ struct_member_matrix_stride_cb(struct vtn_builder *b, { if (dec->decoration != SpvDecorationMatrixStride) return; - assert(member >= 0); + vtn_assert(member >= 0); struct member_decoration_ctx *ctx = void_ctx; @@ -671,7 +670,7 @@ struct_member_matrix_stride_cb(struct vtn_builder *b, mat_type->stride = mat_type->array_element->stride; mat_type->array_element->stride = dec->literals[0]; } else { - assert(mat_type->array_element->stride > 0); + vtn_assert(mat_type->array_element->stride > 0); mat_type->stride = dec->literals[0]; } } @@ -688,17 +687,17 @@ type_decoration_cb(struct vtn_builder *b, switch (dec->decoration) { case SpvDecorationArrayStride: - assert(type->base_type == vtn_base_type_matrix || - type->base_type == vtn_base_type_array || - type->base_type == vtn_base_type_pointer); + vtn_assert(type->base_type == vtn_base_type_matrix || + type->base_type == vtn_base_type_array || + type->base_type == vtn_base_type_pointer); type->stride = dec->literals[0]; break; case SpvDecorationBlock: - assert(type->base_type == vtn_base_type_struct); + vtn_assert(type->base_type == vtn_base_type_struct); type->block = true; break; case SpvDecorationBufferBlock: - assert(type->base_type == vtn_base_type_struct); + vtn_assert(type->base_type == vtn_base_type_struct); type->buffer_block = true; break; case SpvDecorationGLSLShared: @@ -850,7 +849,7 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, struct vtn_type *base = vtn_value(b, w[2], vtn_value_type_type)->type; unsigned elems = w[3]; - assert(glsl_type_is_scalar(base->type)); + vtn_assert(glsl_type_is_scalar(base->type)); val->type->base_type = vtn_base_type_vector; val->type->type = glsl_vector_type(glsl_get_base_type(base->type), elems); val->type->stride = glsl_get_bit_size(base->type) / 8; @@ -862,12 +861,12 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, struct vtn_type *base = vtn_value(b, w[2], vtn_value_type_type)->type; unsigned columns = w[3]; - assert(glsl_type_is_vector(base->type)); + vtn_assert(glsl_type_is_vector(base->type)); val->type->base_type = vtn_base_type_matrix; val->type->type = glsl_matrix_type(glsl_get_base_type(base->type), glsl_get_vector_elements(base->type), columns); - assert(!glsl_type_is_error(val->type->type)); + vtn_assert(!glsl_type_is_error(val->type->type)); val->type->length = columns; val->type->array_element = base; val->type->row_major = false; @@ -969,7 +968,7 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, const struct glsl_type *sampled_type = vtn_value(b, w[2], vtn_value_type_type)->type->type; - assert(glsl_type_is_vector_or_scalar(sampled_type)); + vtn_assert(glsl_type_is_vector_or_scalar(sampled_type)); enum glsl_sampler_dim dim; switch ((SpvDim)w[3]) { @@ -1011,7 +1010,7 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, val->type->type = glsl_sampler_type(dim, is_shadow, is_array, glsl_get_base_type(sampled_type)); } else if (sampled == 2) { - assert(!is_shadow); + vtn_assert(!is_shadow); val->type->sampled = false; val->type->type = glsl_image_type(dim, is_array, glsl_get_base_type(sampled_type)); @@ -1071,7 +1070,7 @@ vtn_null_constant(struct vtn_builder *b, const struct glsl_type *type) break; case GLSL_TYPE_ARRAY: - assert(glsl_get_length(type) > 0); + vtn_assert(glsl_get_length(type) > 0); c->num_elements = glsl_get_length(type); c->elements = ralloc_array(b, nir_constant *, c->num_elements); @@ -1101,7 +1100,7 @@ spec_constant_decoration_cb(struct vtn_builder *b, struct vtn_value *v, int member, const struct vtn_decoration *dec, void *data) { - assert(member == -1); + vtn_assert(member == -1); if (dec->decoration != SpvDecorationSpecId) return; @@ -1147,12 +1146,12 @@ handle_workgroup_size_decoration_cb(struct vtn_builder *b, const struct vtn_decoration *dec, void *data) { - assert(member == -1); + vtn_assert(member == -1); if (dec->decoration != SpvDecorationBuiltIn || dec->literals[0] != SpvBuiltInWorkgroupSize) return; - assert(val->const_type == glsl_vector_type(GLSL_TYPE_UINT, 3)); + vtn_assert(val->const_type == glsl_vector_type(GLSL_TYPE_UINT, 3)); b->shader->info.cs.local_size[0] = val->constant->values[0].u32[0]; b->shader->info.cs.local_size[1] = val->constant->values[0].u32[1]; @@ -1168,17 +1167,17 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, val->constant = rzalloc(b, nir_constant); switch (opcode) { case SpvOpConstantTrue: - assert(val->const_type == glsl_bool_type()); + vtn_assert(val->const_type == glsl_bool_type()); val->constant->values[0].u32[0] = NIR_TRUE; break; case SpvOpConstantFalse: - assert(val->const_type == glsl_bool_type()); + vtn_assert(val->const_type == glsl_bool_type()); val->constant->values[0].u32[0] = NIR_FALSE; break; case SpvOpSpecConstantTrue: case SpvOpSpecConstantFalse: { - assert(val->const_type == glsl_bool_type()); + vtn_assert(val->const_type == glsl_bool_type()); uint32_t int_val = get_specialization(b, val, (opcode == SpvOpSpecConstantTrue)); val->constant->values[0].u32[0] = int_val ? NIR_TRUE : NIR_FALSE; @@ -1186,19 +1185,19 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, } case SpvOpConstant: { - assert(glsl_type_is_scalar(val->const_type)); + vtn_assert(glsl_type_is_scalar(val->const_type)); int bit_size = glsl_get_bit_size(val->const_type); if (bit_size == 64) { val->constant->values->u32[0] = w[3]; val->constant->values->u32[1] = w[4]; } else { - assert(bit_size == 32); + vtn_assert(bit_size == 32); val->constant->values->u32[0] = w[3]; } break; } case SpvOpSpecConstant: { - assert(glsl_type_is_scalar(val->const_type)); + vtn_assert(glsl_type_is_scalar(val->const_type)); val->constant->values[0].u32[0] = get_specialization(b, val, w[3]); int bit_size = glsl_get_bit_size(val->const_type); if (bit_size == 64) @@ -1225,17 +1224,17 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, case GLSL_TYPE_DOUBLE: { int bit_size = glsl_get_bit_size(val->const_type); if (glsl_type_is_matrix(val->const_type)) { - assert(glsl_get_matrix_columns(val->const_type) == elem_count); + vtn_assert(glsl_get_matrix_columns(val->const_type) == elem_count); for (unsigned i = 0; i < elem_count; i++) val->constant->values[i] = elems[i]->values[0]; } else { - assert(glsl_type_is_vector(val->const_type)); - assert(glsl_get_vector_elements(val->const_type) == elem_count); + vtn_assert(glsl_type_is_vector(val->const_type)); + vtn_assert(glsl_get_vector_elements(val->const_type) == elem_count); for (unsigned i = 0; i < elem_count; i++) { if (bit_size == 64) { val->constant->values[0].u64[i] = elems[i]->values[0].u64[0]; } else { - assert(bit_size == 32); + vtn_assert(bit_size == 32); val->constant->values[0].u32[i] = elems[i]->values[0].u32[0]; } } @@ -1263,10 +1262,10 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, struct vtn_value *v0 = &b->values[w[4]]; struct vtn_value *v1 = &b->values[w[5]]; - assert(v0->value_type == vtn_value_type_constant || - v0->value_type == vtn_value_type_undef); - assert(v1->value_type == vtn_value_type_constant || - v1->value_type == vtn_value_type_undef); + vtn_assert(v0->value_type == vtn_value_type_constant || + v0->value_type == vtn_value_type_undef); + vtn_assert(v1->value_type == vtn_value_type_constant || + v1->value_type == vtn_value_type_undef); unsigned len0 = v0->value_type == vtn_value_type_constant ? glsl_get_vector_elements(v0->const_type) : @@ -1275,7 +1274,7 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, glsl_get_vector_elements(v1->const_type) : glsl_get_vector_elements(v1->type->type); - assert(len0 + len1 < 16); + vtn_assert(len0 + len1 < 16); unsigned bit_size = glsl_get_bit_size(val->const_type); unsigned bit_size0 = v0->value_type == vtn_value_type_constant ? @@ -1285,7 +1284,7 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, glsl_get_bit_size(v1->const_type) : glsl_get_bit_size(v1->type->type); - assert(bit_size == bit_size0 && bit_size == bit_size1); + vtn_assert(bit_size == bit_size0 && bit_size == bit_size1); (void)bit_size0; (void)bit_size1; if (bit_size == 64) { @@ -1365,12 +1364,12 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, case GLSL_TYPE_BOOL: /* If we hit this granularity, we're picking off an element */ if (glsl_type_is_matrix(type)) { - assert(col == 0 && elem == -1); + vtn_assert(col == 0 && elem == -1); col = w[i]; elem = 0; type = glsl_get_column_type(type); } else { - assert(elem <= 0 && glsl_type_is_vector(type)); + vtn_assert(elem <= 0 && glsl_type_is_vector(type)); elem = w[i]; type = glsl_scalar_type(glsl_get_base_type(type)); } @@ -1401,14 +1400,14 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, if (bit_size == 64) { val->constant->values[0].u64[i] = (*c)->values[col].u64[elem + i]; } else { - assert(bit_size == 32); + vtn_assert(bit_size == 32); val->constant->values[0].u32[i] = (*c)->values[col].u32[elem + i]; } } } else { struct vtn_value *insert = vtn_value(b, w[4], vtn_value_type_constant); - assert(insert->const_type == type); + vtn_assert(insert->const_type == type); if (elem == -1) { *c = insert->constant; } else { @@ -1418,7 +1417,7 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, if (bit_size == 64) { (*c)->values[col].u64[elem + i] = insert->constant->values[0].u64[i]; } else { - assert(bit_size == 32); + vtn_assert(bit_size == 32); (*c)->values[col].u32[elem + i] = insert->constant->values[0].u32[i]; } } @@ -1437,13 +1436,13 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, glsl_get_bit_size(val->const_type); nir_const_value src[4]; - assert(count <= 7); + vtn_assert(count <= 7); for (unsigned i = 0; i < count - 4; i++) { nir_constant *c = vtn_value(b, w[4 + i], vtn_value_type_constant)->constant; unsigned j = swap ? 1 - i : i; - assert(bit_size == 32); + vtn_assert(bit_size == 32); src[j] = c->values[0]; } @@ -1503,7 +1502,7 @@ vtn_handle_function_call(struct vtn_builder *b, SpvOp opcode, } nir_variable *out_tmp = NULL; - assert(res_type->type == callee->return_type); + vtn_assert(res_type->type == callee->return_type); if (!glsl_type_is_void(callee->return_type)) { out_tmp = nir_local_variable_create(b->nb.impl, callee->return_type, "out_tmp"); @@ -1588,7 +1587,7 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, if (src_val->value_type == vtn_value_type_sampled_image) { val->pointer = src_val->sampled_image->image; } else { - assert(src_val->value_type == vtn_value_type_pointer); + vtn_assert(src_val->value_type == vtn_value_type_pointer); val->pointer = src_val->pointer; } return; @@ -1602,7 +1601,7 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, if (sampled_val->value_type == vtn_value_type_sampled_image) { sampled = *sampled_val->sampled_image; } else { - assert(sampled_val->value_type == vtn_value_type_pointer); + vtn_assert(sampled_val->value_type == vtn_value_type_pointer); sampled.type = sampled_val->pointer->type; sampled.image = NULL; sampled.sampler = sampled_val->pointer; @@ -1766,19 +1765,19 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, uint32_t operands = w[idx++]; if (operands & SpvImageOperandsBiasMask) { - assert(texop == nir_texop_tex); + vtn_assert(texop == nir_texop_tex); texop = nir_texop_txb; (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_bias); } if (operands & SpvImageOperandsLodMask) { - assert(texop == nir_texop_txl || texop == nir_texop_txf || - texop == nir_texop_txs); + vtn_assert(texop == nir_texop_txl || texop == nir_texop_txf || + texop == nir_texop_txs); (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_lod); } if (operands & SpvImageOperandsGradMask) { - assert(texop == nir_texop_txl); + vtn_assert(texop == nir_texop_txl); texop = nir_texop_txd; (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ddx); (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ddy); @@ -1794,13 +1793,13 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, } if (operands & SpvImageOperandsSampleMask) { - assert(texop == nir_texop_txf_ms); + vtn_assert(texop == nir_texop_txf_ms); texop = nir_texop_txf_ms; (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ms_index); } } /* We should have now consumed exactly all of the arguments */ - assert(idx == count); + vtn_assert(idx == count); nir_tex_instr *instr = nir_tex_instr_create(b->shader, p - srcs); instr->op = texop; @@ -1861,14 +1860,14 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, nir_ssa_dest_init(&instr->instr, &instr->dest, nir_tex_instr_dest_size(instr), 32, NULL); - assert(glsl_get_vector_elements(ret_type->type) == - nir_tex_instr_dest_size(instr)); + vtn_assert(glsl_get_vector_elements(ret_type->type) == + nir_tex_instr_dest_size(instr)); nir_ssa_def *def; nir_instr *instruction; if (gather_offsets) { - assert(glsl_get_base_type(gather_offsets->type) == GLSL_TYPE_ARRAY); - assert(glsl_get_length(gather_offsets->type) == 4); + vtn_assert(glsl_get_base_type(gather_offsets->type) == GLSL_TYPE_ARRAY); + vtn_assert(glsl_get_length(gather_offsets->type) == 4); nir_tex_instr *instrs[4] = {instr, NULL, NULL, NULL}; /* Copy the current instruction 4x */ @@ -2031,7 +2030,7 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode, image.coord = get_image_coord(b, w[4]); if (count > 5 && (w[5] & SpvImageOperandsSampleMask)) { - assert(w[5] == SpvImageOperandsSampleMask); + vtn_assert(w[5] == SpvImageOperandsSampleMask); image.sample = vtn_ssa_value(b, w[6])->def; } else { image.sample = nir_ssa_undef(&b->nb, 1, 32); @@ -2045,7 +2044,7 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode, /* texel = w[3] */ if (count > 4 && (w[4] & SpvImageOperandsSampleMask)) { - assert(w[4] == SpvImageOperandsSampleMask); + vtn_assert(w[4] == SpvImageOperandsSampleMask); image.sample = vtn_ssa_value(b, w[5])->def; } else { image.sample = nir_ssa_undef(&b->nb, 1, 32); @@ -2286,7 +2285,7 @@ vtn_handle_ssbo_or_shared_atomic(struct vtn_builder *b, SpvOp opcode, } } else { - assert(ptr->mode == vtn_variable_mode_ssbo); + vtn_assert(ptr->mode == vtn_variable_mode_ssbo); nir_ssa_def *offset, *index; offset = vtn_pointer_to_offset(b, ptr, &index, NULL); @@ -2493,12 +2492,12 @@ vtn_vector_construct(struct vtn_builder *b, unsigned num_components, * "When constructing a vector, there must be at least two Constituent * operands." */ - assert(num_srcs >= 2); + vtn_assert(num_srcs >= 2); unsigned dest_idx = 0; for (unsigned i = 0; i < num_srcs; i++) { nir_ssa_def *src = srcs[i]; - assert(dest_idx + src->num_components <= num_components); + vtn_assert(dest_idx + src->num_components <= num_components); for (unsigned j = 0; j < src->num_components; j++) { vec->src[dest_idx].src = nir_src_for_ssa(src); vec->src[dest_idx].swizzle[0] = j; @@ -2511,7 +2510,7 @@ vtn_vector_construct(struct vtn_builder *b, unsigned num_components, * "When constructing a vector, the total number of components in all * the operands must equal the number of components in Result Type." */ - assert(dest_idx == num_components); + vtn_assert(dest_idx == num_components); nir_builder_instr_insert(&b->nb, &vec->instr); @@ -2571,7 +2570,7 @@ vtn_composite_extract(struct vtn_builder *b, struct vtn_ssa_value *src, struct vtn_ssa_value *cur = src; for (unsigned i = 0; i < num_indices; i++) { if (glsl_type_is_vector_or_scalar(cur->type)) { - assert(i == num_indices - 1); + vtn_assert(i == num_indices - 1); /* According to the SPIR-V spec, OpCompositeExtract may work down to * the component granularity. The last index will be the index of the * vector to extract. @@ -2904,9 +2903,9 @@ vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode, break; case SpvOpMemoryModel: - assert(w[1] == SpvAddressingModelLogical); - assert(w[2] == SpvMemoryModelSimple || - w[2] == SpvMemoryModelGLSL450); + vtn_assert(w[1] == SpvAddressingModelLogical); + vtn_assert(w[2] == SpvMemoryModelSimple || + w[2] == SpvMemoryModelGLSL450); break; case SpvOpEntryPoint: { @@ -2919,7 +2918,7 @@ vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode, stage_for_execution_model(w[1]) != b->entry_point_stage) break; - assert(b->entry_point == NULL); + vtn_assert(b->entry_point == NULL); b->entry_point = entry_point; break; } @@ -2957,7 +2956,7 @@ static void vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point, const struct vtn_decoration *mode, void *data) { - assert(b->entry_point == entry_point); + vtn_assert(b->entry_point == entry_point); switch(mode->exec_mode) { case SpvExecutionModeOriginUpperLeft: @@ -2967,34 +2966,34 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point, break; case SpvExecutionModeEarlyFragmentTests: - assert(b->shader->info.stage == MESA_SHADER_FRAGMENT); + vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT); b->shader->info.fs.early_fragment_tests = true; break; case SpvExecutionModeInvocations: - assert(b->shader->info.stage == MESA_SHADER_GEOMETRY); + vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY); b->shader->info.gs.invocations = MAX2(1, mode->literals[0]); break; case SpvExecutionModeDepthReplacing: - assert(b->shader->info.stage == MESA_SHADER_FRAGMENT); + vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT); b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_ANY; break; case SpvExecutionModeDepthGreater: - assert(b->shader->info.stage == MESA_SHADER_FRAGMENT); + vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT); b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_GREATER; break; case SpvExecutionModeDepthLess: - assert(b->shader->info.stage == MESA_SHADER_FRAGMENT); + vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT); b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_LESS; break; case SpvExecutionModeDepthUnchanged: - assert(b->shader->info.stage == MESA_SHADER_FRAGMENT); + vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT); b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_UNCHANGED; break; case SpvExecutionModeLocalSize: - assert(b->shader->info.stage == MESA_SHADER_COMPUTE); + vtn_assert(b->shader->info.stage == MESA_SHADER_COMPUTE); b->shader->info.cs.local_size[0] = mode->literals[0]; b->shader->info.cs.local_size[1] = mode->literals[1]; b->shader->info.cs.local_size[2] = mode->literals[2]; @@ -3007,7 +3006,7 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point, b->shader->info.stage == MESA_SHADER_TESS_EVAL) { b->shader->info.tess.tcs_vertices_out = mode->literals[0]; } else { - assert(b->shader->info.stage == MESA_SHADER_GEOMETRY); + vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY); b->shader->info.gs.vertices_out = mode->literals[0]; } break; @@ -3024,7 +3023,7 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point, b->shader->info.tess.primitive_mode = gl_primitive_from_spv_execution_mode(mode->exec_mode); } else { - assert(b->shader->info.stage == MESA_SHADER_GEOMETRY); + vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY); b->shader->info.gs.vertices_in = vertices_in_from_spv_execution_mode(mode->exec_mode); } @@ -3033,39 +3032,39 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point, case SpvExecutionModeOutputPoints: case SpvExecutionModeOutputLineStrip: case SpvExecutionModeOutputTriangleStrip: - assert(b->shader->info.stage == MESA_SHADER_GEOMETRY); + vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY); b->shader->info.gs.output_primitive = gl_primitive_from_spv_execution_mode(mode->exec_mode); break; case SpvExecutionModeSpacingEqual: - assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL || - b->shader->info.stage == MESA_SHADER_TESS_EVAL); + vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL || + b->shader->info.stage == MESA_SHADER_TESS_EVAL); b->shader->info.tess.spacing = TESS_SPACING_EQUAL; break; case SpvExecutionModeSpacingFractionalEven: - assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL || - b->shader->info.stage == MESA_SHADER_TESS_EVAL); + vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL || + b->shader->info.stage == MESA_SHADER_TESS_EVAL); b->shader->info.tess.spacing = TESS_SPACING_FRACTIONAL_EVEN; break; case SpvExecutionModeSpacingFractionalOdd: - assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL || - b->shader->info.stage == MESA_SHADER_TESS_EVAL); + vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL || + b->shader->info.stage == MESA_SHADER_TESS_EVAL); b->shader->info.tess.spacing = TESS_SPACING_FRACTIONAL_ODD; break; case SpvExecutionModeVertexOrderCw: - assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL || - b->shader->info.stage == MESA_SHADER_TESS_EVAL); + vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL || + b->shader->info.stage == MESA_SHADER_TESS_EVAL); b->shader->info.tess.ccw = false; break; case SpvExecutionModeVertexOrderCcw: - assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL || - b->shader->info.stage == MESA_SHADER_TESS_EVAL); + vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL || + b->shader->info.stage == MESA_SHADER_TESS_EVAL); b->shader->info.tess.ccw = true; break; case SpvExecutionModePointMode: - assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL || - b->shader->info.stage == MESA_SHADER_TESS_EVAL); + vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL || + b->shader->info.stage == MESA_SHADER_TESS_EVAL); b->shader->info.tess.point_mode = true; break; @@ -3231,7 +3230,7 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode, if (image->mode == vtn_variable_mode_image) { vtn_handle_image(b, opcode, w, count); } else { - assert(image->mode == vtn_variable_mode_sampler); + vtn_assert(image->mode == vtn_variable_mode_sampler); vtn_handle_texture(b, opcode, w, count); } break; @@ -3256,7 +3255,7 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode, if (pointer->value_type == vtn_value_type_image_pointer) { vtn_handle_image(b, opcode, w, count); } else { - assert(pointer->value_type == vtn_value_type_pointer); + vtn_assert(pointer->value_type == vtn_value_type_pointer); vtn_handle_ssbo_or_shared_atomic(b, opcode, w, count); } break; @@ -3267,7 +3266,7 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode, if (pointer->value_type == vtn_value_type_image_pointer) { vtn_handle_image(b, opcode, w, count); } else { - assert(pointer->value_type == vtn_value_type_pointer); + vtn_assert(pointer->value_type == vtn_value_type_pointer); vtn_handle_ssbo_or_shared_atomic(b, opcode, w, count); } break; @@ -3441,13 +3440,13 @@ spirv_to_nir(const uint32_t *words, size_t word_count, const uint32_t *word_end = words + word_count; /* Handle the SPIR-V header (first 4 dwords) */ - assert(word_count > 5); + vtn_assert(word_count > 5); - assert(words[0] == SpvMagicNumber); - assert(words[1] >= 0x10000); + vtn_assert(words[0] == SpvMagicNumber); + vtn_assert(words[1] >= 0x10000); /* words[2] == generator magic */ unsigned value_id_bound = words[3]; - assert(words[4] == 0); + vtn_assert(words[4] == 0); words+= 5; @@ -3499,9 +3498,9 @@ spirv_to_nir(const uint32_t *words, size_t word_count, } } while (progress); - assert(b->entry_point->value_type == vtn_value_type_function); + vtn_assert(b->entry_point->value_type == vtn_value_type_function); nir_function *entry_point = b->entry_point->func->impl->function; - assert(entry_point); + vtn_assert(entry_point); /* Unparent the shader from the vtn_builder before we delete the builder */ ralloc_steal(NULL, b->shader); diff --git a/src/compiler/spirv/vtn_alu.c b/src/compiler/spirv/vtn_alu.c index ecf9cbc34d6..5fa695bea19 100644 --- a/src/compiler/spirv/vtn_alu.c +++ b/src/compiler/spirv/vtn_alu.c @@ -243,29 +243,29 @@ vtn_handle_bitcast(struct vtn_builder *b, struct vtn_ssa_value *dest, unsigned dest_bit_size = glsl_get_bit_size(dest->type); unsigned src_components = src->num_components; unsigned dest_components = glsl_get_vector_elements(dest->type); - assert(src_bit_size * src_components == dest_bit_size * dest_components); + vtn_assert(src_bit_size * src_components == dest_bit_size * dest_components); nir_ssa_def *dest_chan[4]; if (src_bit_size > dest_bit_size) { - assert(src_bit_size % dest_bit_size == 0); + vtn_assert(src_bit_size % dest_bit_size == 0); unsigned divisor = src_bit_size / dest_bit_size; for (unsigned comp = 0; comp < src_components; comp++) { - assert(src_bit_size == 64); - assert(dest_bit_size == 32); + vtn_assert(src_bit_size == 64); + vtn_assert(dest_bit_size == 32); nir_ssa_def *split = nir_unpack_64_2x32(&b->nb, nir_channel(&b->nb, src, comp)); for (unsigned i = 0; i < divisor; i++) dest_chan[divisor * comp + i] = nir_channel(&b->nb, split, i); } } else { - assert(dest_bit_size % src_bit_size == 0); + vtn_assert(dest_bit_size % src_bit_size == 0); unsigned divisor = dest_bit_size / src_bit_size; for (unsigned comp = 0; comp < dest_components; comp++) { unsigned channels = ((1 << divisor) - 1) << (comp * divisor); nir_ssa_def *src_chan = nir_channels(&b->nb, src, channels); - assert(dest_bit_size == 64); - assert(src_bit_size == 32); + vtn_assert(dest_bit_size == 64); + vtn_assert(src_bit_size == 32); dest_chan[comp] = nir_pack_64_2x32(&b->nb, src_chan); } } @@ -374,7 +374,7 @@ static void handle_no_contraction(struct vtn_builder *b, struct vtn_value *val, int member, const struct vtn_decoration *dec, void *_void) { - assert(dec->scope == VTN_DEC_DECORATION); + vtn_assert(dec->scope == VTN_DEC_DECORATION); if (dec->decoration != SpvDecorationNoContraction) return; @@ -407,7 +407,7 @@ vtn_handle_alu(struct vtn_builder *b, SpvOp opcode, val->ssa = vtn_create_ssa_value(b, type); nir_ssa_def *src[4] = { NULL, }; for (unsigned i = 0; i < num_inputs; i++) { - assert(glsl_type_is_vector_or_scalar(vtn_src[i]->type)); + vtn_assert(glsl_type_is_vector_or_scalar(vtn_src[i]->type)); src[i] = vtn_src[i]->def; } @@ -459,25 +459,25 @@ vtn_handle_alu(struct vtn_builder *b, SpvOp opcode, break; case SpvOpIAddCarry: - assert(glsl_type_is_struct(val->ssa->type)); + vtn_assert(glsl_type_is_struct(val->ssa->type)); val->ssa->elems[0]->def = nir_iadd(&b->nb, src[0], src[1]); val->ssa->elems[1]->def = nir_uadd_carry(&b->nb, src[0], src[1]); break; case SpvOpISubBorrow: - assert(glsl_type_is_struct(val->ssa->type)); + vtn_assert(glsl_type_is_struct(val->ssa->type)); val->ssa->elems[0]->def = nir_isub(&b->nb, src[0], src[1]); val->ssa->elems[1]->def = nir_usub_borrow(&b->nb, src[0], src[1]); break; case SpvOpUMulExtended: - assert(glsl_type_is_struct(val->ssa->type)); + vtn_assert(glsl_type_is_struct(val->ssa->type)); val->ssa->elems[0]->def = nir_imul(&b->nb, src[0], src[1]); val->ssa->elems[1]->def = nir_umul_high(&b->nb, src[0], src[1]); break; case SpvOpSMulExtended: - assert(glsl_type_is_struct(val->ssa->type)); + vtn_assert(glsl_type_is_struct(val->ssa->type)); val->ssa->elems[0]->def = nir_imul(&b->nb, src[0], src[1]); val->ssa->elems[1]->def = nir_imul_high(&b->nb, src[0], src[1]); break; diff --git a/src/compiler/spirv/vtn_cfg.c b/src/compiler/spirv/vtn_cfg.c index 70bbccb7cdd..a8dff09b7f6 100644 --- a/src/compiler/spirv/vtn_cfg.c +++ b/src/compiler/spirv/vtn_cfg.c @@ -30,7 +30,7 @@ vtn_cfg_handle_prepass_instruction(struct vtn_builder *b, SpvOp opcode, { switch (opcode) { case SpvOpFunction: { - assert(b->func == NULL); + vtn_assert(b->func == NULL); b->func = rzalloc(b, struct vtn_function); list_inithead(&b->func->body); @@ -44,7 +44,7 @@ vtn_cfg_handle_prepass_instruction(struct vtn_builder *b, SpvOp opcode, const struct vtn_type *func_type = vtn_value(b, w[4], vtn_value_type_type)->type; - assert(func_type->return_type->type == result_type); + vtn_assert(func_type->return_type->type == result_type); nir_function *func = nir_function_create(b->shader, ralloc_strdup(b->shader, val->name)); @@ -80,7 +80,7 @@ vtn_cfg_handle_prepass_instruction(struct vtn_builder *b, SpvOp opcode, case SpvOpFunctionParameter: { struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type; - assert(b->func_param_idx < b->func->impl->num_params); + vtn_assert(b->func_param_idx < b->func->impl->num_params); nir_variable *param = b->func->impl->params[b->func_param_idx++]; if (type->base_type == vtn_base_type_pointer && type->type == NULL) { @@ -88,7 +88,7 @@ vtn_cfg_handle_prepass_instruction(struct vtn_builder *b, SpvOp opcode, vtn_var->type = type->deref; vtn_var->var = param; - assert(vtn_var->type->type == param->type); + vtn_assert(vtn_var->type->type == param->type); struct vtn_type *without_array = vtn_var->type; while(glsl_type_is_array(without_array->type)) @@ -124,7 +124,7 @@ vtn_cfg_handle_prepass_instruction(struct vtn_builder *b, SpvOp opcode, } case SpvOpLabel: { - assert(b->block == NULL); + vtn_assert(b->block == NULL); b->block = rzalloc(b, struct vtn_block); b->block->node.type = vtn_cf_node_type_block; b->block->label = w; @@ -143,7 +143,7 @@ vtn_cfg_handle_prepass_instruction(struct vtn_builder *b, SpvOp opcode, case SpvOpSelectionMerge: case SpvOpLoopMerge: - assert(b->block && b->block->merge == NULL); + vtn_assert(b->block && b->block->merge == NULL); b->block->merge = w; break; @@ -154,7 +154,7 @@ vtn_cfg_handle_prepass_instruction(struct vtn_builder *b, SpvOp opcode, case SpvOpReturn: case SpvOpReturnValue: case SpvOpUnreachable: - assert(b->block && b->block->branch == NULL); + vtn_assert(b->block && b->block->branch == NULL); b->block->branch = w; b->block = NULL; break; @@ -231,14 +231,15 @@ vtn_order_case(struct vtn_switch *swtch, struct vtn_case *cse) } static enum vtn_branch_type -vtn_get_branch_type(struct vtn_block *block, +vtn_get_branch_type(struct vtn_builder *b, + struct vtn_block *block, struct vtn_case *swcase, struct vtn_block *switch_break, struct vtn_block *loop_break, struct vtn_block *loop_cont) { if (block->switch_case) { /* This branch is actually a fallthrough */ - assert(swcase->fallthrough == NULL || - swcase->fallthrough == block->switch_case); + vtn_assert(swcase->fallthrough == NULL || + swcase->fallthrough == block->switch_case); swcase->fallthrough = block->switch_case; return vtn_branch_type_switch_fallthrough; } else if (block == loop_break) { @@ -301,7 +302,7 @@ vtn_cfg_walk_blocks(struct vtn_builder *b, struct list_head *cf_list, continue; } - assert(block->node.link.next == NULL); + vtn_assert(block->node.link.next == NULL); list_addtail(&block->node.link, cf_list); switch (*block->branch & SpvOpCodeMask) { @@ -309,7 +310,7 @@ vtn_cfg_walk_blocks(struct vtn_builder *b, struct list_head *cf_list, struct vtn_block *branch_block = vtn_value(b, block->branch[1], vtn_value_type_block)->block; - block->branch_type = vtn_get_branch_type(branch_block, + block->branch_type = vtn_get_branch_type(b, branch_block, switch_case, switch_break, loop_break, loop_cont); @@ -349,10 +350,10 @@ vtn_cfg_walk_blocks(struct vtn_builder *b, struct list_head *cf_list, if_stmt->control = block->merge[2]; } - if_stmt->then_type = vtn_get_branch_type(then_block, + if_stmt->then_type = vtn_get_branch_type(b, then_block, switch_case, switch_break, loop_break, loop_cont); - if_stmt->else_type = vtn_get_branch_type(else_block, + if_stmt->else_type = vtn_get_branch_type(b, else_block, switch_case, switch_break, loop_break, loop_cont); @@ -367,7 +368,7 @@ vtn_cfg_walk_blocks(struct vtn_builder *b, struct list_head *cf_list, } else if (if_stmt->then_type == vtn_branch_type_none && if_stmt->else_type == vtn_branch_type_none) { /* Neither side of the if is something we can short-circuit. */ - assert((*block->merge & SpvOpCodeMask) == SpvOpSelectionMerge); + vtn_assert((*block->merge & SpvOpCodeMask) == SpvOpSelectionMerge); struct vtn_block *merge_block = vtn_value(b, block->merge[1], vtn_value_type_block)->block; @@ -379,7 +380,7 @@ vtn_cfg_walk_blocks(struct vtn_builder *b, struct list_head *cf_list, loop_break, loop_cont, merge_block); enum vtn_branch_type merge_type = - vtn_get_branch_type(merge_block, switch_case, switch_break, + vtn_get_branch_type(b, merge_block, switch_case, switch_break, loop_break, loop_cont); if (merge_type == vtn_branch_type_none) { block = merge_block; @@ -408,7 +409,7 @@ vtn_cfg_walk_blocks(struct vtn_builder *b, struct list_head *cf_list, } case SpvOpSwitch: { - assert((*block->merge & SpvOpCodeMask) == SpvOpSelectionMerge); + vtn_assert((*block->merge & SpvOpCodeMask) == SpvOpSelectionMerge); struct vtn_block *break_block = vtn_value(b, block->merge[1], vtn_value_type_block)->block; @@ -433,7 +434,7 @@ vtn_cfg_walk_blocks(struct vtn_builder *b, struct list_head *cf_list, * information. */ list_for_each_entry(struct vtn_case, cse, &swtch->cases, link) { - assert(cse->start_block != break_block); + vtn_assert(cse->start_block != break_block); vtn_cfg_walk_blocks(b, &cse->body, cse->start_block, cse, break_block, loop_break, loop_cont, NULL); } @@ -448,13 +449,13 @@ vtn_cfg_walk_blocks(struct vtn_builder *b, struct list_head *cf_list, if (case_block == break_block) continue; - assert(case_block->switch_case); + vtn_assert(case_block->switch_case); vtn_order_case(swtch, case_block->switch_case); } enum vtn_branch_type branch_type = - vtn_get_branch_type(break_block, switch_case, NULL, + vtn_get_branch_type(b, break_block, switch_case, NULL, loop_break, loop_cont); if (branch_type != vtn_branch_type_none) { @@ -462,7 +463,7 @@ vtn_cfg_walk_blocks(struct vtn_builder *b, struct list_head *cf_list, * for the containing loop. In this case, we need to bail and let * the loop parsing code handle the continue properly. */ - assert(branch_type == vtn_branch_type_loop_continue); + vtn_assert(branch_type == vtn_branch_type_loop_continue); return; } @@ -532,7 +533,7 @@ vtn_handle_phi_second_pass(struct vtn_builder *b, SpvOp opcode, return true; struct hash_entry *phi_entry = _mesa_hash_table_search(b->phi_table, w); - assert(phi_entry); + vtn_assert(phi_entry); nir_variable *phi_var = phi_entry->data; for (unsigned i = 3; i < count; i += 2) { @@ -728,7 +729,7 @@ vtn_emit_cf_list(struct vtn_builder *b, struct list_head *cf_list, any = any ? nir_ior(&b->nb, any, cond) : cond; conditions[i++] = cond; } - assert(i == num_cases); + vtn_assert(i == num_cases); /* Now we can walk the list of cases and actually emit code */ i = 0; @@ -736,7 +737,7 @@ vtn_emit_cf_list(struct vtn_builder *b, struct list_head *cf_list, /* Figure out the condition */ nir_ssa_def *cond = conditions[i++]; if (cse->is_default) { - assert(cond == NULL); + vtn_assert(cond == NULL); cond = nir_inot(&b->nb, any); } /* Take fallthrough into account */ @@ -751,7 +752,7 @@ vtn_emit_cf_list(struct vtn_builder *b, struct list_head *cf_list, nir_pop_if(&b->nb, case_if); } - assert(i == num_cases); + vtn_assert(i == num_cases); break; } diff --git a/src/compiler/spirv/vtn_glsl450.c b/src/compiler/spirv/vtn_glsl450.c index c30dcc74add..051e03482e9 100644 --- a/src/compiler/spirv/vtn_glsl450.c +++ b/src/compiler/spirv/vtn_glsl450.c @@ -515,7 +515,7 @@ handle_glsl450_alu(struct vtn_builder *b, enum GLSLstd450 entrypoint, case GLSLstd450ModfStruct: { nir_ssa_def *sign = nir_fsign(nb, src[0]); nir_ssa_def *abs = nir_fabs(nb, src[0]); - assert(glsl_type_is_struct(val->ssa->type)); + vtn_assert(glsl_type_is_struct(val->ssa->type)); val->ssa->elems[0]->def = nir_fmul(nb, sign, nir_ffract(nb, abs)); val->ssa->elems[1]->def = nir_fmul(nb, sign, nir_ffloor(nb, abs)); return; @@ -690,7 +690,7 @@ handle_glsl450_alu(struct vtn_builder *b, enum GLSLstd450 entrypoint, } case GLSLstd450FrexpStruct: { - assert(glsl_type_is_struct(val->ssa->type)); + vtn_assert(glsl_type_is_struct(val->ssa->type)); val->ssa->elems[0]->def = build_frexp(nb, src[0], &val->ssa->elems[1]->def); return; diff --git a/src/compiler/spirv/vtn_variables.c b/src/compiler/spirv/vtn_variables.c index c57f5541319..3190b21f0c7 100644 --- a/src/compiler/spirv/vtn_variables.c +++ b/src/compiler/spirv/vtn_variables.c @@ -71,14 +71,14 @@ vtn_access_chain_pointer_dereference(struct vtn_builder *b, * pointers. For everything else, the client is expected to just pass us * the right access chain. */ - assert(!deref_chain->ptr_as_array); + vtn_assert(!deref_chain->ptr_as_array); unsigned start = base->chain ? base->chain->length : 0; for (unsigned i = 0; i < deref_chain->length; i++) { chain->link[start + i] = deref_chain->link[i]; if (glsl_type_is_struct(type->type)) { - assert(deref_chain->link[i].mode == vtn_access_mode_literal); + vtn_assert(deref_chain->link[i].mode == vtn_access_mode_literal); type = type->members[deref_chain->link[i].id]; } else { type = type->array_element; @@ -98,7 +98,7 @@ static nir_ssa_def * vtn_access_link_as_ssa(struct vtn_builder *b, struct vtn_access_link link, unsigned stride) { - assert(stride > 0); + vtn_assert(stride > 0); if (link.mode == vtn_access_mode_literal) { return nir_imm_int(&b->nb, link.id * stride); } else if (stride == 1) { @@ -119,7 +119,7 @@ vtn_variable_resource_index(struct vtn_builder *b, struct vtn_variable *var, nir_ssa_def *desc_array_index) { if (!desc_array_index) { - assert(glsl_type_is_struct(var->type->type)); + vtn_assert(glsl_type_is_struct(var->type->type)); desc_array_index = nir_imm_int(&b->nb, 0); } @@ -148,11 +148,11 @@ vtn_ssa_offset_pointer_dereference(struct vtn_builder *b, unsigned idx = 0; if (deref_chain->ptr_as_array) { /* We need ptr_type for the stride */ - assert(base->ptr_type); + vtn_assert(base->ptr_type); /* This must be a pointer to an actual element somewhere */ - assert(block_index && offset); + vtn_assert(block_index && offset); /* We need at least one element in the chain */ - assert(deref_chain->length >= 1); + vtn_assert(deref_chain->length >= 1); nir_ssa_def *elem_offset = vtn_access_link_as_ssa(b, deref_chain->link[idx], @@ -162,10 +162,10 @@ vtn_ssa_offset_pointer_dereference(struct vtn_builder *b, } if (!block_index) { - assert(base->var); + vtn_assert(base->var); if (glsl_type_is_array(type->type)) { /* We need at least one element in the chain */ - assert(deref_chain->length >= 1); + vtn_assert(deref_chain->length >= 1); nir_ssa_def *desc_arr_idx = vtn_access_link_as_ssa(b, deref_chain->link[0], 1); @@ -177,10 +177,10 @@ vtn_ssa_offset_pointer_dereference(struct vtn_builder *b, } /* This is the first access chain so we also need an offset */ - assert(!offset); + vtn_assert(!offset); offset = nir_imm_int(&b->nb, 0); } - assert(offset); + vtn_assert(offset); for (; idx < deref_chain->length; idx++) { switch (glsl_get_base_type(type->type)) { @@ -200,7 +200,7 @@ vtn_ssa_offset_pointer_dereference(struct vtn_builder *b, } case GLSL_TYPE_STRUCT: { - assert(deref_chain->link[idx].mode == vtn_access_mode_literal); + vtn_assert(deref_chain->link[idx].mode == vtn_access_mode_literal); unsigned member = deref_chain->link[idx].id; nir_ssa_def *mem_offset = nir_imm_int(&b->nb, type->offsets[member]); offset = nir_iadd(&b->nb, offset, mem_offset); @@ -240,13 +240,14 @@ vtn_pointer_dereference(struct vtn_builder *b, * tail_type. This is useful for split structures. */ static void -rewrite_deref_types(nir_deref *deref, const struct glsl_type *type) +rewrite_deref_types(struct vtn_builder *b, nir_deref *deref, + const struct glsl_type *type) { deref->type = type; if (deref->child) { - assert(deref->child->deref_type == nir_deref_type_array); - assert(glsl_type_is_array(deref->type)); - rewrite_deref_types(deref->child, glsl_get_array_element(type)); + vtn_assert(deref->child->deref_type == nir_deref_type_array); + vtn_assert(glsl_type_is_array(deref->type)); + rewrite_deref_types(b, deref->child, glsl_get_array_element(type)); } } @@ -258,8 +259,8 @@ vtn_pointer_for_variable(struct vtn_builder *b, pointer->mode = var->mode; pointer->type = var->type; - assert(ptr_type->base_type == vtn_base_type_pointer); - assert(ptr_type->deref->type == var->type->type); + vtn_assert(ptr_type->base_type == vtn_base_type_pointer); + vtn_assert(ptr_type->deref->type == var->type->type); pointer->ptr_type = ptr_type; pointer->var = var; @@ -280,14 +281,14 @@ vtn_pointer_to_deref(struct vtn_builder *b, struct vtn_pointer *ptr) if (!ptr->chain) return deref_var; } else { - assert(ptr->var->members); + vtn_assert(ptr->var->members); /* Create the deref_var manually. It will get filled out later. */ deref_var = rzalloc(b, nir_deref_var); deref_var->deref.deref_type = nir_deref_type_var; } struct vtn_access_chain *chain = ptr->chain; - assert(chain); + vtn_assert(chain); struct vtn_type *deref_type = ptr->var->type; nir_deref *tail = &deref_var->deref; @@ -313,7 +314,7 @@ vtn_pointer_to_deref(struct vtn_builder *b, struct vtn_pointer *ptr) deref_arr->deref_array_type = nir_deref_array_type_direct; deref_arr->base_offset = chain->link[i].id; } else { - assert(chain->link[i].mode == vtn_access_mode_id); + vtn_assert(chain->link[i].mode == vtn_access_mode_id); deref_arr->deref_array_type = nir_deref_array_type_indirect; deref_arr->base_offset = 0; deref_arr->indirect = @@ -325,14 +326,14 @@ vtn_pointer_to_deref(struct vtn_builder *b, struct vtn_pointer *ptr) } case GLSL_TYPE_STRUCT: { - assert(chain->link[i].mode == vtn_access_mode_literal); + vtn_assert(chain->link[i].mode == vtn_access_mode_literal); unsigned idx = chain->link[i].id; deref_type = deref_type->members[idx]; if (members) { /* This is a pre-split structure. */ deref_var->var = members[idx]; - rewrite_deref_types(&deref_var->deref, members[idx]->type); - assert(tail->type == deref_type->type); + rewrite_deref_types(b, &deref_var->deref, members[idx]->type); + vtn_assert(tail->type == deref_type->type); members = NULL; } else { nir_deref_struct *deref_struct = nir_deref_struct_create(b, idx); @@ -347,7 +348,7 @@ vtn_pointer_to_deref(struct vtn_builder *b, struct vtn_pointer *ptr) } } - assert(members == NULL); + vtn_assert(members == NULL); return deref_var; } @@ -398,7 +399,7 @@ _vtn_local_load_store(struct vtn_builder *b, bool load, nir_deref_var *deref, _vtn_local_load_store(b, load, deref, tail->child, inout->elems[i]); } } else { - assert(glsl_get_base_type(tail->type) == GLSL_TYPE_STRUCT); + vtn_assert(glsl_get_base_type(tail->type) == GLSL_TYPE_STRUCT); unsigned elems = glsl_get_length(tail->type); nir_deref_struct *deref_struct = nir_deref_struct_create(b, 0); tail->child = &deref_struct->deref; @@ -443,7 +444,7 @@ vtn_local_load(struct vtn_builder *b, nir_deref_var *src) if (src_tail->child) { nir_deref_array *vec_deref = nir_deref_as_array(src_tail->child); - assert(vec_deref->deref.child == NULL); + vtn_assert(vec_deref->deref.child == NULL); val->type = vec_deref->deref.type; if (vec_deref->deref_array_type == nir_deref_array_type_direct) val->def = vtn_vector_extract(b, val->def, vec_deref->base_offset); @@ -465,7 +466,7 @@ vtn_local_store(struct vtn_builder *b, struct vtn_ssa_value *src, struct vtn_ssa_value *val = vtn_create_ssa_value(b, dest_tail->type); _vtn_local_load_store(b, true, dest, dest_tail, val); nir_deref_array *deref = nir_deref_as_array(dest_tail->child); - assert(deref->deref.child == NULL); + vtn_assert(deref->deref.child == NULL); if (deref->deref_array_type == nir_deref_array_type_direct) val->def = vtn_vector_insert(b, val->def, src->def, deref->base_offset); @@ -490,7 +491,7 @@ get_vulkan_resource_index(struct vtn_builder *b, struct vtn_pointer *ptr, } if (glsl_type_is_array(ptr->var->type->type)) { - assert(ptr->chain->length > 0); + vtn_assert(ptr->chain->length > 0); nir_ssa_def *desc_array_index = vtn_access_link_as_ssa(b, ptr->chain->link[0], 1); *chain_idx = 1; @@ -508,7 +509,7 @@ vtn_pointer_to_offset(struct vtn_builder *b, struct vtn_pointer *ptr, nir_ssa_def **index_out, unsigned *end_idx_out) { if (ptr->offset) { - assert(ptr->block_index); + vtn_assert(ptr->block_index); *index_out = ptr->block_index; return ptr->offset; } @@ -537,7 +538,7 @@ vtn_pointer_to_offset(struct vtn_builder *b, struct vtn_pointer *ptr, break; case GLSL_TYPE_STRUCT: { - assert(ptr->chain->link[idx].mode == vtn_access_mode_literal); + vtn_assert(ptr->chain->link[idx].mode == vtn_access_mode_literal); unsigned member = ptr->chain->link[idx].id; offset = nir_iadd(&b->nb, offset, nir_imm_int(&b->nb, type->offsets[member])); @@ -550,7 +551,7 @@ vtn_pointer_to_offset(struct vtn_builder *b, struct vtn_pointer *ptr, } } - assert(type == ptr->type); + vtn_assert(type == ptr->type); if (end_idx_out) *end_idx_out = idx; @@ -561,7 +562,7 @@ vtn_pointer_to_offset(struct vtn_builder *b, struct vtn_pointer *ptr, * offsets that are provided to us in the SPIR-V source. */ static unsigned -vtn_type_block_size(struct vtn_type *type) +vtn_type_block_size(struct vtn_builder *b, struct vtn_type *type) { enum glsl_base_type base_type = glsl_get_base_type(type->type); switch (base_type) { @@ -575,7 +576,7 @@ vtn_type_block_size(struct vtn_type *type) unsigned cols = type->row_major ? glsl_get_vector_elements(type->type) : glsl_get_matrix_columns(type->type); if (cols > 1) { - assert(type->stride > 0); + vtn_assert(type->stride > 0); return type->stride * cols; } else if (base_type == GLSL_TYPE_DOUBLE || base_type == GLSL_TYPE_UINT64 || @@ -592,15 +593,15 @@ vtn_type_block_size(struct vtn_type *type) unsigned num_fields = glsl_get_length(type->type); for (unsigned f = 0; f < num_fields; f++) { unsigned field_end = type->offsets[f] + - vtn_type_block_size(type->members[f]); + vtn_type_block_size(b, type->members[f]); size = MAX2(size, field_end); } return size; } case GLSL_TYPE_ARRAY: - assert(type->stride > 0); - assert(glsl_get_length(type->type) > 0); + vtn_assert(type->stride > 0); + vtn_assert(glsl_get_length(type->type) > 0); return type->stride * glsl_get_length(type->type); default: @@ -610,7 +611,8 @@ vtn_type_block_size(struct vtn_type *type) } static void -vtn_access_chain_get_offset_size(struct vtn_access_chain *chain, +vtn_access_chain_get_offset_size(struct vtn_builder *b, + struct vtn_access_chain *chain, struct vtn_type *type, unsigned *access_offset, unsigned *access_size) @@ -630,7 +632,7 @@ vtn_access_chain_get_offset_size(struct vtn_access_chain *chain, } } - *access_size = vtn_type_block_size(type); + *access_size = vtn_type_block_size(b, type); } static void @@ -649,7 +651,7 @@ _vtn_load_store_tail(struct vtn_builder *b, nir_intrinsic_op op, bool load, } if (op == nir_intrinsic_load_push_constant) { - assert(access_offset % 4 == 0); + vtn_assert(access_offset % 4 == 0); nir_intrinsic_set_base(instr, access_offset); nir_intrinsic_set_range(instr, access_size); @@ -744,7 +746,7 @@ _vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool load, unsigned type_size = glsl_get_bit_size(type->type) / 8; if (elems == 1 || type->stride == type_size) { /* This is a tightly-packed normal scalar or vector load */ - assert(glsl_type_is_vector_or_scalar(type->type)); + vtn_assert(glsl_type_is_vector_or_scalar(type->type)); _vtn_load_store_tail(b, op, load, index, offset, access_offset, access_size, inout, type->type); @@ -752,8 +754,8 @@ _vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool load, /* This is a strided load. We have to load N things separately. * This is the single column of a row-major matrix case. */ - assert(type->stride > type_size); - assert(type->stride % type_size == 0); + vtn_assert(type->stride > type_size); + vtn_assert(type->stride % type_size == 0); nir_ssa_def *per_comp[4]; for (unsigned i = 0; i < elems; i++) { @@ -826,7 +828,7 @@ vtn_block_load(struct vtn_builder *b, struct vtn_pointer *src) break; case vtn_variable_mode_push_constant: op = nir_intrinsic_load_push_constant; - vtn_access_chain_get_offset_size(src->chain, src->var->type, + vtn_access_chain_get_offset_size(b, src->chain, src->var->type, &access_offset, &access_size); break; default: @@ -895,7 +897,7 @@ _vtn_variable_load_store(struct vtn_builder *b, bool load, case GLSL_TYPE_STRUCT: { unsigned elems = glsl_get_length(ptr->type->type); if (load) { - assert(*inout == NULL); + vtn_assert(*inout == NULL); *inout = rzalloc(b, struct vtn_ssa_value); (*inout)->type = ptr->type->type; (*inout)->elems = rzalloc_array(b, struct vtn_ssa_value *, elems); @@ -937,7 +939,7 @@ vtn_variable_store(struct vtn_builder *b, struct vtn_ssa_value *src, struct vtn_pointer *dest) { if (vtn_pointer_is_external_block(dest)) { - assert(dest->mode == vtn_variable_mode_ssbo); + vtn_assert(dest->mode == vtn_variable_mode_ssbo); vtn_block_store(b, src, dest); } else { _vtn_variable_load_store(b, false, dest, &src); @@ -948,7 +950,7 @@ static void _vtn_variable_copy(struct vtn_builder *b, struct vtn_pointer *dest, struct vtn_pointer *src) { - assert(src->type->type == dest->type->type); + vtn_assert(src->type->type == dest->type->type); enum glsl_base_type base_type = glsl_get_base_type(src->type->type); switch (base_type) { case GLSL_TYPE_UINT: @@ -1004,9 +1006,9 @@ vtn_variable_copy(struct vtn_builder *b, struct vtn_pointer *dest, } static void -set_mode_system_value(nir_variable_mode *mode) +set_mode_system_value(struct vtn_builder *b, nir_variable_mode *mode) { - assert(*mode == nir_var_system_value || *mode == nir_var_shader_in); + vtn_assert(*mode == nir_var_system_value || *mode == nir_var_shader_in); *mode = nir_var_system_value; } @@ -1030,37 +1032,37 @@ vtn_get_builtin_location(struct vtn_builder *b, break; case SpvBuiltInVertexIndex: *location = SYSTEM_VALUE_VERTEX_ID; - set_mode_system_value(mode); + set_mode_system_value(b, mode); break; case SpvBuiltInVertexId: /* Vulkan defines VertexID to be zero-based and reserves the new * builtin keyword VertexIndex to indicate the non-zero-based value. */ *location = SYSTEM_VALUE_VERTEX_ID_ZERO_BASE; - set_mode_system_value(mode); + set_mode_system_value(b, mode); break; case SpvBuiltInInstanceIndex: *location = SYSTEM_VALUE_INSTANCE_INDEX; - set_mode_system_value(mode); + set_mode_system_value(b, mode); break; case SpvBuiltInInstanceId: *location = SYSTEM_VALUE_INSTANCE_ID; - set_mode_system_value(mode); + set_mode_system_value(b, mode); break; case SpvBuiltInPrimitiveId: if (b->shader->info.stage == MESA_SHADER_FRAGMENT) { - assert(*mode == nir_var_shader_in); + vtn_assert(*mode == nir_var_shader_in); *location = VARYING_SLOT_PRIMITIVE_ID; } else if (*mode == nir_var_shader_out) { *location = VARYING_SLOT_PRIMITIVE_ID; } else { *location = SYSTEM_VALUE_PRIMITIVE_ID; - set_mode_system_value(mode); + set_mode_system_value(b, mode); } break; case SpvBuiltInInvocationId: *location = SYSTEM_VALUE_INVOCATION_ID; - set_mode_system_value(mode); + set_mode_system_value(b, mode); break; case SpvBuiltInLayer: *location = VARYING_SLOT_LAYER; @@ -1088,51 +1090,51 @@ vtn_get_builtin_location(struct vtn_builder *b, break; case SpvBuiltInTessCoord: *location = SYSTEM_VALUE_TESS_COORD; - set_mode_system_value(mode); + set_mode_system_value(b, mode); break; case SpvBuiltInPatchVertices: *location = SYSTEM_VALUE_VERTICES_IN; - set_mode_system_value(mode); + set_mode_system_value(b, mode); break; case SpvBuiltInFragCoord: *location = VARYING_SLOT_POS; - assert(*mode == nir_var_shader_in); + vtn_assert(*mode == nir_var_shader_in); break; case SpvBuiltInPointCoord: *location = VARYING_SLOT_PNTC; - assert(*mode == nir_var_shader_in); + vtn_assert(*mode == nir_var_shader_in); break; case SpvBuiltInFrontFacing: *location = SYSTEM_VALUE_FRONT_FACE; - set_mode_system_value(mode); + set_mode_system_value(b, mode); break; case SpvBuiltInSampleId: *location = SYSTEM_VALUE_SAMPLE_ID; - set_mode_system_value(mode); + set_mode_system_value(b, mode); break; case SpvBuiltInSamplePosition: *location = SYSTEM_VALUE_SAMPLE_POS; - set_mode_system_value(mode); + set_mode_system_value(b, mode); break; case SpvBuiltInSampleMask: if (*mode == nir_var_shader_out) { *location = FRAG_RESULT_SAMPLE_MASK; } else { *location = SYSTEM_VALUE_SAMPLE_MASK_IN; - set_mode_system_value(mode); + set_mode_system_value(b, mode); } break; case SpvBuiltInFragDepth: *location = FRAG_RESULT_DEPTH; - assert(*mode == nir_var_shader_out); + vtn_assert(*mode == nir_var_shader_out); break; case SpvBuiltInHelperInvocation: *location = SYSTEM_VALUE_HELPER_INVOCATION; - set_mode_system_value(mode); + set_mode_system_value(b, mode); break; case SpvBuiltInNumWorkgroups: *location = SYSTEM_VALUE_NUM_WORK_GROUPS; - set_mode_system_value(mode); + set_mode_system_value(b, mode); break; case SpvBuiltInWorkgroupSize: /* This should already be handled */ @@ -1140,35 +1142,35 @@ vtn_get_builtin_location(struct vtn_builder *b, break; case SpvBuiltInWorkgroupId: *location = SYSTEM_VALUE_WORK_GROUP_ID; - set_mode_system_value(mode); + set_mode_system_value(b, mode); break; case SpvBuiltInLocalInvocationId: *location = SYSTEM_VALUE_LOCAL_INVOCATION_ID; - set_mode_system_value(mode); + set_mode_system_value(b, mode); break; case SpvBuiltInLocalInvocationIndex: *location = SYSTEM_VALUE_LOCAL_INVOCATION_INDEX; - set_mode_system_value(mode); + set_mode_system_value(b, mode); break; case SpvBuiltInGlobalInvocationId: *location = SYSTEM_VALUE_GLOBAL_INVOCATION_ID; - set_mode_system_value(mode); + set_mode_system_value(b, mode); break; case SpvBuiltInBaseVertex: *location = SYSTEM_VALUE_BASE_VERTEX; - set_mode_system_value(mode); + set_mode_system_value(b, mode); break; case SpvBuiltInBaseInstance: *location = SYSTEM_VALUE_BASE_INSTANCE; - set_mode_system_value(mode); + set_mode_system_value(b, mode); break; case SpvBuiltInDrawIndex: *location = SYSTEM_VALUE_DRAW_ID; - set_mode_system_value(mode); + set_mode_system_value(b, mode); break; case SpvBuiltInViewIndex: *location = SYSTEM_VALUE_VIEW_INDEX; - set_mode_system_value(mode); + set_mode_system_value(b, mode); break; default: unreachable("unsupported builtin"); @@ -1198,7 +1200,7 @@ apply_var_decoration(struct vtn_builder *b, nir_variable *nir_var, nir_var->data.invariant = true; break; case SpvDecorationConstant: - assert(nir_var->constant_initializer != NULL); + vtn_assert(nir_var->constant_initializer != NULL); nir_var->data.read_only = true; break; case SpvDecorationNonReadable: @@ -1341,11 +1343,11 @@ var_decoration_cb(struct vtn_builder *b, struct vtn_value *val, int member, } if (val->value_type == vtn_value_type_pointer) { - assert(val->pointer->var == void_var); - assert(val->pointer->chain == NULL); - assert(member == -1); + vtn_assert(val->pointer->var == void_var); + vtn_assert(val->pointer->chain == NULL); + vtn_assert(member == -1); } else { - assert(val->value_type == vtn_value_type_type); + vtn_assert(val->value_type == vtn_value_type_type); } /* Location is odd. If applied to a split structure, we have to walk the @@ -1377,7 +1379,7 @@ var_decoration_cb(struct vtn_builder *b, struct vtn_value *val, int member, vtn_var->var->data.location = location; } else { /* This handles the structure member case */ - assert(vtn_var->members); + vtn_assert(vtn_var->members); unsigned length = glsl_get_length(glsl_without_array(vtn_var->type->type)); for (unsigned i = 0; i < length; i++) { @@ -1390,11 +1392,11 @@ var_decoration_cb(struct vtn_builder *b, struct vtn_value *val, int member, return; } else { if (vtn_var->var) { - assert(member <= 0); + vtn_assert(member <= 0); apply_var_decoration(b, vtn_var->var, dec); } else if (vtn_var->members) { if (member >= 0) { - assert(vtn_var->members); + vtn_assert(vtn_var->members); apply_var_decoration(b, vtn_var->members[member], dec); } else { unsigned length = @@ -1407,9 +1409,9 @@ var_decoration_cb(struct vtn_builder *b, struct vtn_value *val, int member, * nir_variables associated with them. Fortunately, all decorations * we care about for those variables are on the type only. */ - assert(vtn_var->mode == vtn_variable_mode_ubo || - vtn_var->mode == vtn_variable_mode_ssbo || - vtn_var->mode == vtn_variable_mode_push_constant); + vtn_assert(vtn_var->mode == vtn_variable_mode_ubo || + vtn_var->mode == vtn_variable_mode_ssbo || + vtn_var->mode == vtn_variable_mode_push_constant); } } } @@ -1489,8 +1491,8 @@ nir_ssa_def * vtn_pointer_to_ssa(struct vtn_builder *b, struct vtn_pointer *ptr) { /* This pointer needs to have a pointer type with actual storage */ - assert(ptr->ptr_type); - assert(ptr->ptr_type->type); + vtn_assert(ptr->ptr_type); + vtn_assert(ptr->ptr_type->type); if (ptr->offset && ptr->block_index) { return nir_vec2(&b->nb, ptr->block_index, ptr->offset); @@ -1498,13 +1500,13 @@ vtn_pointer_to_ssa(struct vtn_builder *b, struct vtn_pointer *ptr) /* If we don't have an offset or block index, then we must be a pointer * to the variable itself. */ - assert(!ptr->offset && !ptr->block_index); + vtn_assert(!ptr->offset && !ptr->block_index); /* We can't handle a pointer to an array of descriptors because we have * no way of knowing later on that we need to add to update the block * index when dereferencing. */ - assert(ptr->var && ptr->var->type->base_type == vtn_base_type_struct); + vtn_assert(ptr->var && ptr->var->type->base_type == vtn_base_type_struct); return nir_vec2(&b->nb, vtn_variable_resource_index(b, ptr->var, NULL), nir_imm_int(&b->nb, 0)); @@ -1515,11 +1517,11 @@ struct vtn_pointer * vtn_pointer_from_ssa(struct vtn_builder *b, nir_ssa_def *ssa, struct vtn_type *ptr_type) { - assert(ssa->num_components == 2 && ssa->bit_size == 32); - assert(ptr_type->base_type == vtn_base_type_pointer); - assert(ptr_type->deref->base_type != vtn_base_type_pointer); + vtn_assert(ssa->num_components == 2 && ssa->bit_size == 32); + vtn_assert(ptr_type->base_type == vtn_base_type_pointer); + vtn_assert(ptr_type->deref->base_type != vtn_base_type_pointer); /* This pointer type needs to have actual storage */ - assert(ptr_type->type); + vtn_assert(ptr_type->type); struct vtn_pointer *ptr = rzalloc(b, struct vtn_pointer); ptr->mode = vtn_storage_class_to_mode(ptr_type->storage_class, @@ -1555,7 +1557,7 @@ vtn_create_variable(struct vtn_builder *b, struct vtn_value *val, struct vtn_type *ptr_type, SpvStorageClass storage_class, nir_constant *initializer) { - assert(ptr_type->base_type == vtn_base_type_pointer); + vtn_assert(ptr_type->base_type == vtn_base_type_pointer); struct vtn_type *type = ptr_type->deref; struct vtn_type *without_array = type; @@ -1580,7 +1582,7 @@ vtn_create_variable(struct vtn_builder *b, struct vtn_value *val, b->shader->info.num_textures++; break; case vtn_variable_mode_push_constant: - b->shader->num_uniforms = vtn_type_block_size(type); + b->shader->num_uniforms = vtn_type_block_size(b, type); break; default: /* No tallying is needed */ @@ -1591,7 +1593,7 @@ vtn_create_variable(struct vtn_builder *b, struct vtn_value *val, var->type = type; var->mode = mode; - assert(val->value_type == vtn_value_type_pointer); + vtn_assert(val->value_type == vtn_value_type_pointer); val->pointer = vtn_pointer_for_variable(b, var, ptr_type); switch (var->mode) { @@ -1730,20 +1732,20 @@ vtn_create_variable(struct vtn_builder *b, struct vtn_value *val, } if (var->mode == vtn_variable_mode_local) { - assert(var->members == NULL && var->var != NULL); + vtn_assert(var->members == NULL && var->var != NULL); nir_function_impl_add_variable(b->nb.impl, var->var); } else if (var->var) { nir_shader_add_variable(b->shader, var->var); } else if (var->members) { unsigned count = glsl_get_length(without_array->type); for (unsigned i = 0; i < count; i++) { - assert(var->members[i]->data.mode != nir_var_local); + vtn_assert(var->members[i]->data.mode != nir_var_local); nir_shader_add_variable(b->shader, var->members[i]); } } else { - assert(var->mode == vtn_variable_mode_ubo || - var->mode == vtn_variable_mode_ssbo || - var->mode == vtn_variable_mode_push_constant); + vtn_assert(var->mode == vtn_variable_mode_ubo || + var->mode == vtn_variable_mode_ssbo || + var->mode == vtn_variable_mode_push_constant); } } @@ -1810,7 +1812,7 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, vtn_pointer_dereference(b, base_val->sampled_image->image, chain); val->sampled_image->sampler = base_val->sampled_image->sampler; } else { - assert(base_val->value_type == vtn_value_type_pointer); + vtn_assert(base_val->value_type == vtn_value_type_pointer); struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_pointer); val->pointer = vtn_pointer_dereference(b, base_val->pointer, chain); @@ -1850,7 +1852,7 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, if (glsl_type_is_sampler(dest->type->type)) { vtn_warn("OpStore of a sampler detected. Doing on-the-fly copy " "propagation to workaround the problem."); - assert(dest->var->copy_prop_sampler == NULL); + vtn_assert(dest->var->copy_prop_sampler == NULL); dest->var->copy_prop_sampler = vtn_value(b, w[2], vtn_value_type_pointer)->pointer; break; -- 2.30.2