}
default:
- vtn_fail("Unhandled opcode");
+ vtn_fail_with_opcode("Unhandled opcode", opcode);
}
}
break;
case SpvOpDecorate:
+ case SpvOpDecorateId:
case SpvOpMemberDecorate:
- case SpvOpDecorateStringGOOGLE:
- case SpvOpMemberDecorateStringGOOGLE:
+ case SpvOpDecorateString:
+ case SpvOpMemberDecorateString:
case SpvOpExecutionMode:
case SpvOpExecutionModeId: {
struct vtn_value *val = vtn_untyped_value(b, target);
struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
switch (opcode) {
case SpvOpDecorate:
- case SpvOpDecorateStringGOOGLE:
+ case SpvOpDecorateId:
+ case SpvOpDecorateString:
dec->scope = VTN_DEC_DECORATION;
break;
case SpvOpMemberDecorate:
- case SpvOpMemberDecorateStringGOOGLE:
+ case SpvOpMemberDecorateString:
dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(w++);
vtn_fail_if(dec->scope < VTN_DEC_STRUCT_MEMBER0, /* overflow */
"Member argument of OpMemberDecorate too large");
unreachable("Invalid decoration opcode");
}
dec->decoration = *(w++);
- dec->literals = w;
+ dec->operands = w;
/* Link into the list */
dec->next = val->decoration;
vtn_fail("Invalid base type");
}
+struct vtn_type *
+vtn_type_without_array(struct vtn_type *type)
+{
+ while (type->base_type == vtn_base_type_array)
+ type = type->array_element;
+ return type;
+}
+
/* does a shallow copy of a vtn_type */
static struct vtn_type *
struct vtn_type *type = val->type;
if (dec->decoration == SpvDecorationArrayStride) {
- vtn_fail_if(dec->literals[0] == 0, "ArrayStride must be non-zero");
- type->stride = dec->literals[0];
+ vtn_fail_if(dec->operands[0] == 0, "ArrayStride must be non-zero");
+ type->stride = dec->operands[0];
}
}
break;
case SpvDecorationStream:
/* Vulkan only allows one GS stream */
- vtn_assert(dec->literals[0] == 0);
+ vtn_assert(dec->operands[0] == 0);
break;
case SpvDecorationLocation:
- ctx->fields[member].location = dec->literals[0];
+ ctx->fields[member].location = dec->operands[0];
break;
case SpvDecorationComponent:
break; /* FIXME: What should we do with these? */
case SpvDecorationBuiltIn:
ctx->type->members[member] = vtn_type_copy(b, ctx->type->members[member]);
ctx->type->members[member]->is_builtin = true;
- ctx->type->members[member]->builtin = dec->literals[0];
+ ctx->type->members[member]->builtin = dec->operands[0];
ctx->type->builtin_block = true;
break;
case SpvDecorationOffset:
- ctx->type->offsets[member] = dec->literals[0];
- ctx->fields[member].offset = dec->literals[0];
+ ctx->type->offsets[member] = dec->operands[0];
+ ctx->fields[member].offset = dec->operands[0];
break;
case SpvDecorationMatrixStride:
/* Handled as a second pass */
}
break;
- case SpvDecorationHlslSemanticGOOGLE:
- /* HLSL semantic decorations can safely be ignored by the driver. */
+ case SpvDecorationUserSemantic:
+ /* User semantic decorations can safely be ignored by the driver. */
break;
default:
- vtn_fail("Unhandled decoration");
+ vtn_fail_with_decoration("Unhandled decoration", dec->decoration);
}
}
vtn_fail_if(member < 0,
"The MatrixStride decoration is only allowed on members "
"of OpTypeStruct");
- vtn_fail_if(dec->literals[0] == 0, "MatrixStride must be non-zero");
+ vtn_fail_if(dec->operands[0] == 0, "MatrixStride must be non-zero");
struct member_decoration_ctx *ctx = void_ctx;
if (mat_type->row_major) {
mat_type->array_element = vtn_type_copy(b, mat_type->array_element);
mat_type->stride = mat_type->array_element->stride;
- mat_type->array_element->stride = dec->literals[0];
+ mat_type->array_element->stride = dec->operands[0];
mat_type->type = glsl_explicit_matrix_type(mat_type->type,
- dec->literals[0], true);
+ dec->operands[0], true);
mat_type->array_element->type = glsl_get_column_type(mat_type->type);
} else {
vtn_assert(mat_type->array_element->stride > 0);
- mat_type->stride = dec->literals[0];
+ mat_type->stride = dec->operands[0];
mat_type->type = glsl_explicit_matrix_type(mat_type->type,
- dec->literals[0], false);
+ dec->operands[0], false);
}
/* Now that we've replaced the glsl_type with a properly strided matrix
case SpvDecorationOffset:
case SpvDecorationXfbBuffer:
case SpvDecorationXfbStride:
- case SpvDecorationHlslSemanticGOOGLE:
+ case SpvDecorationUserSemantic:
vtn_warn("Decoration only allowed for struct members: %s",
spirv_decoration_to_string(dec->decoration));
break;
break;
default:
- vtn_fail("Unhandled decoration");
+ vtn_fail_with_decoration("Unhandled decoration", dec->decoration);
}
}
case SpvImageFormatR16ui: return 0x8234; /* GL_R16UI */
case SpvImageFormatR8ui: return 0x8232; /* GL_R8UI */
default:
- vtn_fail("Invalid image format");
+ vtn_fail("Invalid image format: %s (%u)",
+ spirv_imageformat_to_string(format), format);
}
}
val->type->type = (signedness ? glsl_int8_t_type() : glsl_uint8_t_type());
break;
default:
- vtn_fail("Invalid int bit size");
+ vtn_fail("Invalid int bit size: %u", bit_size);
}
val->type->length = 1;
break;
val->type->type = glsl_double_type();
break;
default:
- vtn_fail("Invalid float bit size");
+ vtn_fail("Invalid float bit size: %u", bit_size);
}
val->type->length = 1;
break;
/* These can actually be stored to nir_variables and used as SSA
* values so they need a real glsl_type.
*/
- switch (storage_class) {
- case SpvStorageClassUniform:
- val->type->type = b->options->ubo_ptr_type;
- break;
- case SpvStorageClassStorageBuffer:
- val->type->type = b->options->ssbo_ptr_type;
- break;
- case SpvStorageClassPhysicalStorageBufferEXT:
- val->type->type = b->options->phys_ssbo_ptr_type;
- break;
- case SpvStorageClassPushConstant:
- val->type->type = b->options->push_const_ptr_type;
- break;
- case SpvStorageClassWorkgroup:
- val->type->type = b->options->shared_ptr_type;
- break;
- case SpvStorageClassCrossWorkgroup:
- val->type->type = b->options->global_ptr_type;
- break;
- case SpvStorageClassFunction:
- if (b->physical_ptrs)
- val->type->type = b->options->temp_ptr_type;
- break;
- default:
- /* In this case, no variable pointers are allowed so all deref
- * chains are complete back to the variable and it doesn't matter
- * what type gets used so we leave it NULL.
- */
- break;
- }
+ enum vtn_variable_mode mode = vtn_storage_class_to_mode(
+ b, storage_class, NULL, NULL);
+ val->type->type = nir_address_format_to_glsl_type(
+ vtn_mode_to_address_format(b, mode));
} else {
vtn_fail_if(val->type->storage_class != storage_class,
"The storage classes of an OpTypePointer and any "
case SpvDimBuffer: dim = GLSL_SAMPLER_DIM_BUF; break;
case SpvDimSubpassData: dim = GLSL_SAMPLER_DIM_SUBPASS; break;
default:
- vtn_fail("Invalid SPIR-V image dimensionality");
+ vtn_fail("Invalid SPIR-V image dimensionality: %s (%u)",
+ spirv_dim_to_string((SpvDim)w[3]), w[3]);
}
/* w[4]: as per Vulkan spec "Validation Rules within a Module",
case SpvOpTypeQueue:
case SpvOpTypePipe:
default:
- vtn_fail("Unhandled opcode");
+ vtn_fail_with_opcode("Unhandled opcode", opcode);
}
vtn_foreach_decoration(b, val, type_decoration_cb, NULL);
}
static nir_constant *
-vtn_null_constant(struct vtn_builder *b, const struct glsl_type *type)
+vtn_null_constant(struct vtn_builder *b, struct vtn_type *type)
{
nir_constant *c = rzalloc(b, nir_constant);
- /* For pointers and other typeless things, we have to return something but
- * it doesn't matter what.
- */
- if (!type)
- return c;
-
- switch (glsl_get_base_type(type)) {
- case GLSL_TYPE_INT:
- case GLSL_TYPE_UINT:
- case GLSL_TYPE_INT16:
- case GLSL_TYPE_UINT16:
- case GLSL_TYPE_UINT8:
- case GLSL_TYPE_INT8:
- case GLSL_TYPE_INT64:
- case GLSL_TYPE_UINT64:
- case GLSL_TYPE_BOOL:
- case GLSL_TYPE_FLOAT:
- case GLSL_TYPE_FLOAT16:
- case GLSL_TYPE_DOUBLE:
+ switch (type->base_type) {
+ case vtn_base_type_scalar:
+ case vtn_base_type_vector:
/* Nothing to do here. It's already initialized to zero */
break;
- case GLSL_TYPE_ARRAY:
- vtn_assert(glsl_get_length(type) > 0);
- c->num_elements = glsl_get_length(type);
+ case vtn_base_type_pointer: {
+ enum vtn_variable_mode mode = vtn_storage_class_to_mode(
+ b, type->storage_class, type->deref, NULL);
+ nir_address_format addr_format = vtn_mode_to_address_format(b, mode);
+
+ const nir_const_value *null_value = nir_address_format_null_value(addr_format);
+ memcpy(c->values[0], null_value,
+ sizeof(nir_const_value) * nir_address_format_num_components(addr_format));
+ break;
+ }
+
+ case vtn_base_type_void:
+ case vtn_base_type_image:
+ case vtn_base_type_sampler:
+ case vtn_base_type_sampled_image:
+ case vtn_base_type_function:
+ /* For those we have to return something but it doesn't matter what. */
+ break;
+
+ case vtn_base_type_matrix:
+ case vtn_base_type_array:
+ vtn_assert(type->length > 0);
+ c->num_elements = type->length;
c->elements = ralloc_array(b, nir_constant *, c->num_elements);
- c->elements[0] = vtn_null_constant(b, glsl_get_array_element(type));
+ c->elements[0] = vtn_null_constant(b, type->array_element);
for (unsigned i = 1; i < c->num_elements; i++)
c->elements[i] = c->elements[0];
break;
- case GLSL_TYPE_STRUCT:
- c->num_elements = glsl_get_length(type);
+ case vtn_base_type_struct:
+ c->num_elements = type->length;
c->elements = ralloc_array(b, nir_constant *, c->num_elements);
-
- for (unsigned i = 0; i < c->num_elements; i++) {
- c->elements[i] = vtn_null_constant(b, glsl_get_struct_field(type, i));
- }
+ for (unsigned i = 0; i < c->num_elements; i++)
+ c->elements[i] = vtn_null_constant(b, type->members[i]);
break;
default:
struct spec_constant_value *const_value = data;
for (unsigned i = 0; i < b->num_specializations; i++) {
- if (b->specializations[i].id == dec->literals[0]) {
+ if (b->specializations[i].id == dec->operands[0]) {
if (const_value->is_double)
const_value->data64 = b->specializations[i].data64;
else
{
vtn_assert(member == -1);
if (dec->decoration != SpvDecorationBuiltIn ||
- dec->literals[0] != SpvBuiltInWorkgroupSize)
+ dec->operands[0] != SpvBuiltInWorkgroupSize)
return;
vtn_assert(val->type->type == glsl_vector_type(GLSL_TYPE_UINT, 3));
val->constant->values[0][0].u8 = w[3];
break;
default:
- vtn_fail("Unsupported SpvOpConstant bit size");
+ vtn_fail("Unsupported SpvOpConstant bit size: %u", bit_size);
}
break;
}
"only constants or undefs allowed for "
"SpvOpConstantComposite");
/* to make it easier, just insert a NULL constant for now */
- elems[i] = vtn_null_constant(b, val->type->type);
+ elems[i] = vtn_null_constant(b, val->type);
}
}
switch (opcode) {
case SpvOpSConvert:
case SpvOpFConvert:
+ case SpvOpUConvert:
/* We have a source in a conversion */
src_alu_type =
nir_get_nir_type_for_glsl_type(
}
case SpvOpConstantNull:
- val->constant = vtn_null_constant(b, val->type->type);
+ val->constant = vtn_null_constant(b, val->type);
break;
case SpvOpConstantSampler:
break;
default:
- vtn_fail("Unhandled opcode");
+ vtn_fail_with_opcode("Unhandled opcode", opcode);
}
/* Now that we have the value, update the workgroup size if needed */
break;
default:
- vtn_fail("Unhandled opcode");
+ vtn_fail_with_opcode("Unhandled opcode", opcode);
}
nir_tex_src srcs[10]; /* 10 should be enough */
case nir_texop_samples_identical:
/* These don't */
break;
+ case nir_texop_txf_ms_fb:
+ vtn_fail("unexpected nir_texop_txf_ms_fb");
+ break;
case nir_texop_txf_ms_mcs:
vtn_fail("unexpected nir_texop_txf_ms_mcs");
}
case 32: instr->tg4_offsets[i][j] = cvec[j].i32; break;
case 64: instr->tg4_offsets[i][j] = cvec[j].i64; break;
default:
- vtn_fail("Unsupported bit size");
+ vtn_fail("Unsupported bit size: %u", bit_size);
}
}
}
break;
case SpvOpAtomicCompareExchange:
+ case SpvOpAtomicCompareExchangeWeak:
src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[8])->def);
src[1] = nir_src_for_ssa(vtn_ssa_value(b, w[7])->def);
break;
break;
default:
- vtn_fail("Invalid SPIR-V atomic");
+ vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode);
}
}
for (unsigned i = 0; i < 4; i++)
swizzle[i] = MIN2(i, dim - 1);
- return nir_swizzle(&b->nb, coord->def, swizzle, 4, false);
+ return nir_swizzle(&b->nb, coord->def, swizzle, 4);
}
static nir_ssa_def *
unsigned swiz[4];
for (unsigned i = 0; i < 4; i++)
swiz[i] = i < value->num_components ? i : 0;
- return nir_swizzle(b, value, swiz, 4, false);
+ return nir_swizzle(b, value, swiz, 4);
}
static void
break;
default:
- vtn_fail("Invalid image opcode");
+ vtn_fail_with_opcode("Invalid image opcode", opcode);
}
nir_intrinsic_op op;
switch (opcode) {
#define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_deref_##N; break;
- OP(ImageQuerySize, size)
- OP(ImageRead, load)
- OP(ImageWrite, store)
- OP(AtomicLoad, load)
- OP(AtomicStore, store)
- OP(AtomicExchange, atomic_exchange)
- OP(AtomicCompareExchange, atomic_comp_swap)
- OP(AtomicIIncrement, atomic_add)
- OP(AtomicIDecrement, atomic_add)
- OP(AtomicIAdd, atomic_add)
- OP(AtomicISub, atomic_add)
- OP(AtomicSMin, atomic_min)
- OP(AtomicUMin, atomic_min)
- OP(AtomicSMax, atomic_max)
- OP(AtomicUMax, atomic_max)
- OP(AtomicAnd, atomic_and)
- OP(AtomicOr, atomic_or)
- OP(AtomicXor, atomic_xor)
+ OP(ImageQuerySize, size)
+ OP(ImageRead, load)
+ OP(ImageWrite, store)
+ OP(AtomicLoad, load)
+ OP(AtomicStore, store)
+ OP(AtomicExchange, atomic_exchange)
+ OP(AtomicCompareExchange, atomic_comp_swap)
+ OP(AtomicCompareExchangeWeak, atomic_comp_swap)
+ OP(AtomicIIncrement, atomic_add)
+ OP(AtomicIDecrement, atomic_add)
+ OP(AtomicIAdd, atomic_add)
+ OP(AtomicISub, atomic_add)
+ OP(AtomicSMin, atomic_min)
+ OP(AtomicUMin, atomic_min)
+ OP(AtomicSMax, atomic_max)
+ OP(AtomicUMax, atomic_max)
+ OP(AtomicAnd, atomic_and)
+ OP(AtomicOr, atomic_or)
+ OP(AtomicXor, atomic_xor)
#undef OP
default:
- vtn_fail("Invalid image opcode");
+ vtn_fail_with_opcode("Invalid image opcode", opcode);
}
nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
}
case SpvOpAtomicCompareExchange:
+ case SpvOpAtomicCompareExchangeWeak:
case SpvOpAtomicIIncrement:
case SpvOpAtomicIDecrement:
case SpvOpAtomicExchange:
break;
default:
- vtn_fail("Invalid image opcode");
+ vtn_fail_with_opcode("Invalid image opcode", opcode);
}
if (opcode != SpvOpImageWrite && opcode != SpvOpAtomicStore) {
get_ssbo_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
{
switch (opcode) {
- case SpvOpAtomicLoad: return nir_intrinsic_load_ssbo;
- case SpvOpAtomicStore: return nir_intrinsic_store_ssbo;
+ case SpvOpAtomicLoad: return nir_intrinsic_load_ssbo;
+ case SpvOpAtomicStore: return nir_intrinsic_store_ssbo;
#define OP(S, N) case SpvOp##S: return nir_intrinsic_ssbo_##N;
- OP(AtomicExchange, atomic_exchange)
- OP(AtomicCompareExchange, atomic_comp_swap)
- OP(AtomicIIncrement, atomic_add)
- OP(AtomicIDecrement, atomic_add)
- OP(AtomicIAdd, atomic_add)
- OP(AtomicISub, atomic_add)
- OP(AtomicSMin, atomic_imin)
- OP(AtomicUMin, atomic_umin)
- OP(AtomicSMax, atomic_imax)
- OP(AtomicUMax, atomic_umax)
- OP(AtomicAnd, atomic_and)
- OP(AtomicOr, atomic_or)
- OP(AtomicXor, atomic_xor)
+ OP(AtomicExchange, atomic_exchange)
+ OP(AtomicCompareExchange, atomic_comp_swap)
+ OP(AtomicCompareExchangeWeak, atomic_comp_swap)
+ OP(AtomicIIncrement, atomic_add)
+ OP(AtomicIDecrement, atomic_add)
+ OP(AtomicIAdd, atomic_add)
+ OP(AtomicISub, atomic_add)
+ OP(AtomicSMin, atomic_imin)
+ OP(AtomicUMin, atomic_umin)
+ OP(AtomicSMax, atomic_imax)
+ OP(AtomicUMax, atomic_umax)
+ OP(AtomicAnd, atomic_and)
+ OP(AtomicOr, atomic_or)
+ OP(AtomicXor, atomic_xor)
#undef OP
default:
- vtn_fail("Invalid SSBO atomic");
+ vtn_fail_with_opcode("Invalid SSBO atomic", opcode);
}
}
{
switch (opcode) {
#define OP(S, N) case SpvOp##S: return nir_intrinsic_atomic_counter_ ##N;
- OP(AtomicLoad, read_deref)
- OP(AtomicExchange, exchange)
- OP(AtomicCompareExchange, comp_swap)
- OP(AtomicIIncrement, inc_deref)
- OP(AtomicIDecrement, post_dec_deref)
- OP(AtomicIAdd, add_deref)
- OP(AtomicISub, add_deref)
- OP(AtomicUMin, min_deref)
- OP(AtomicUMax, max_deref)
- OP(AtomicAnd, and_deref)
- OP(AtomicOr, or_deref)
- OP(AtomicXor, xor_deref)
+ OP(AtomicLoad, read_deref)
+ OP(AtomicExchange, exchange)
+ OP(AtomicCompareExchange, comp_swap)
+ OP(AtomicCompareExchangeWeak, comp_swap)
+ OP(AtomicIIncrement, inc_deref)
+ OP(AtomicIDecrement, post_dec_deref)
+ OP(AtomicIAdd, add_deref)
+ OP(AtomicISub, add_deref)
+ OP(AtomicUMin, min_deref)
+ OP(AtomicUMax, max_deref)
+ OP(AtomicAnd, and_deref)
+ OP(AtomicOr, or_deref)
+ OP(AtomicXor, xor_deref)
#undef OP
default:
/* We left the following out: AtomicStore, AtomicSMin and
get_shared_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
{
switch (opcode) {
- case SpvOpAtomicLoad: return nir_intrinsic_load_shared;
- case SpvOpAtomicStore: return nir_intrinsic_store_shared;
+ case SpvOpAtomicLoad: return nir_intrinsic_load_shared;
+ case SpvOpAtomicStore: return nir_intrinsic_store_shared;
#define OP(S, N) case SpvOp##S: return nir_intrinsic_shared_##N;
- OP(AtomicExchange, atomic_exchange)
- OP(AtomicCompareExchange, atomic_comp_swap)
- OP(AtomicIIncrement, atomic_add)
- OP(AtomicIDecrement, atomic_add)
- OP(AtomicIAdd, atomic_add)
- OP(AtomicISub, atomic_add)
- OP(AtomicSMin, atomic_imin)
- OP(AtomicUMin, atomic_umin)
- OP(AtomicSMax, atomic_imax)
- OP(AtomicUMax, atomic_umax)
- OP(AtomicAnd, atomic_and)
- OP(AtomicOr, atomic_or)
- OP(AtomicXor, atomic_xor)
+ OP(AtomicExchange, atomic_exchange)
+ OP(AtomicCompareExchange, atomic_comp_swap)
+ OP(AtomicCompareExchangeWeak, atomic_comp_swap)
+ OP(AtomicIIncrement, atomic_add)
+ OP(AtomicIDecrement, atomic_add)
+ OP(AtomicIAdd, atomic_add)
+ OP(AtomicISub, atomic_add)
+ OP(AtomicSMin, atomic_imin)
+ OP(AtomicUMin, atomic_umin)
+ OP(AtomicSMax, atomic_imax)
+ OP(AtomicUMax, atomic_umax)
+ OP(AtomicAnd, atomic_and)
+ OP(AtomicOr, atomic_or)
+ OP(AtomicXor, atomic_xor)
#undef OP
default:
- vtn_fail("Invalid shared atomic");
+ vtn_fail_with_opcode("Invalid shared atomic", opcode);
}
}
get_deref_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
{
switch (opcode) {
- case SpvOpAtomicLoad: return nir_intrinsic_load_deref;
- case SpvOpAtomicStore: return nir_intrinsic_store_deref;
+ case SpvOpAtomicLoad: return nir_intrinsic_load_deref;
+ case SpvOpAtomicStore: return nir_intrinsic_store_deref;
#define OP(S, N) case SpvOp##S: return nir_intrinsic_deref_##N;
- OP(AtomicExchange, atomic_exchange)
- OP(AtomicCompareExchange, atomic_comp_swap)
- OP(AtomicIIncrement, atomic_add)
- OP(AtomicIDecrement, atomic_add)
- OP(AtomicIAdd, atomic_add)
- OP(AtomicISub, atomic_add)
- OP(AtomicSMin, atomic_imin)
- OP(AtomicUMin, atomic_umin)
- OP(AtomicSMax, atomic_imax)
- OP(AtomicUMax, atomic_umax)
- OP(AtomicAnd, atomic_and)
- OP(AtomicOr, atomic_or)
- OP(AtomicXor, atomic_xor)
+ OP(AtomicExchange, atomic_exchange)
+ OP(AtomicCompareExchange, atomic_comp_swap)
+ OP(AtomicCompareExchangeWeak, atomic_comp_swap)
+ OP(AtomicIIncrement, atomic_add)
+ OP(AtomicIDecrement, atomic_add)
+ OP(AtomicIAdd, atomic_add)
+ OP(AtomicISub, atomic_add)
+ OP(AtomicSMin, atomic_imin)
+ OP(AtomicUMin, atomic_umin)
+ OP(AtomicSMax, atomic_imax)
+ OP(AtomicUMax, atomic_umax)
+ OP(AtomicAnd, atomic_and)
+ OP(AtomicOr, atomic_or)
+ OP(AtomicXor, atomic_xor)
#undef OP
default:
- vtn_fail("Invalid shared atomic");
+ vtn_fail_with_opcode("Invalid shared atomic", opcode);
}
}
break;
default:
- vtn_fail("Invalid SPIR-V atomic");
+ vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode);
}
/*
break;
default:
- vtn_fail("Invalid SPIR-V atomic");
+ vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode);
}
} else {
nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr);
break;
default:
- vtn_fail("Invalid SPIR-V atomic");
+ vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode);
}
}
static nir_alu_instr *
create_vec(struct vtn_builder *b, unsigned num_components, unsigned bit_size)
{
- nir_op op;
- switch (num_components) {
- case 1: op = nir_op_imov; break;
- case 2: op = nir_op_vec2; break;
- case 3: op = nir_op_vec3; break;
- case 4: op = nir_op_vec4; break;
- default: vtn_fail("bad vector size");
- }
-
+ nir_op op = nir_op_vec(num_components);
nir_alu_instr *vec = nir_alu_instr_create(b->shader, op);
nir_ssa_dest_init(&vec->instr, &vec->dest.dest, num_components,
bit_size, NULL);
break;
default:
- vtn_fail("unknown composite operation");
+ vtn_fail_with_opcode("unknown composite operation", opcode);
}
}
case SpvExecutionModeOutputTriangleStrip:
return 5; /* GL_TRIANGLE_STRIP */
default:
- vtn_fail("Invalid primitive type");
+ vtn_fail("Invalid primitive type: %s (%u)",
+ spirv_executionmode_to_string(mode), mode);
}
}
case SpvExecutionModeInputTrianglesAdjacency:
return 6;
default:
- vtn_fail("Invalid GS input mode");
+ vtn_fail("Invalid GS input mode: %s (%u)",
+ spirv_executionmode_to_string(mode), mode);
}
}
case SpvExecutionModelKernel:
return MESA_SHADER_KERNEL;
default:
- vtn_fail("Unsupported execution model");
+ vtn_fail("Unsupported execution model: %s (%u)",
+ spirv_executionmodel_to_string(model), model);
}
}
-#define spv_check_supported(name, cap) do { \
- if (!(b->options && b->options->caps.name)) \
- vtn_warn("Unsupported SPIR-V capability: %s", \
- spirv_capability_to_string(cap)); \
+#define spv_check_supported(name, cap) do { \
+ if (!(b->options && b->options->caps.name)) \
+ vtn_warn("Unsupported SPIR-V capability: %s (%u)", \
+ spirv_capability_to_string(cap), cap); \
} while(0)
case SpvCapabilityLinkage:
case SpvCapabilityVector16:
case SpvCapabilityFloat16Buffer:
- case SpvCapabilityFloat16:
case SpvCapabilitySparseResidency:
vtn_warn("Unsupported SPIR-V capability: %s",
spirv_capability_to_string(cap));
case SpvCapabilityInt16:
spv_check_supported(int16, cap);
break;
+ case SpvCapabilityInt8:
+ spv_check_supported(int8, cap);
+ break;
case SpvCapabilityTransformFeedback:
spv_check_supported(transform_feedback, cap);
spv_check_supported(int64_atomics, cap);
break;
- case SpvCapabilityInt8:
- spv_check_supported(int8, cap);
- break;
-
case SpvCapabilityStorageImageMultisample:
spv_check_supported(storage_image_ms, cap);
break;
spv_check_supported(storage_8bit, cap);
break;
+ case SpvCapabilityShaderNonUniformEXT:
+ spv_check_supported(descriptor_indexing, cap);
+ break;
+
case SpvCapabilityInputAttachmentArrayDynamicIndexingEXT:
case SpvCapabilityUniformTexelBufferArrayDynamicIndexingEXT:
case SpvCapabilityStorageTexelBufferArrayDynamicIndexingEXT:
spv_check_supported(descriptor_array_dynamic_indexing, cap);
break;
+ case SpvCapabilityUniformBufferArrayNonUniformIndexingEXT:
+ case SpvCapabilitySampledImageArrayNonUniformIndexingEXT:
+ case SpvCapabilityStorageBufferArrayNonUniformIndexingEXT:
+ case SpvCapabilityStorageImageArrayNonUniformIndexingEXT:
+ case SpvCapabilityInputAttachmentArrayNonUniformIndexingEXT:
+ case SpvCapabilityUniformTexelBufferArrayNonUniformIndexingEXT:
+ case SpvCapabilityStorageTexelBufferArrayNonUniformIndexingEXT:
+ spv_check_supported(descriptor_array_non_uniform_indexing, cap);
+ break;
+
case SpvCapabilityRuntimeDescriptorArrayEXT:
spv_check_supported(runtime_descriptor_array, cap);
break;
spv_check_supported(derivative_group, cap);
break;
+ case SpvCapabilityFloat16:
+ spv_check_supported(float16, cap);
+ break;
+
default:
- vtn_fail("Unhandled capability");
+ vtn_fail("Unhandled capability: %s (%u)",
+ spirv_capability_to_string(cap), cap);
}
break;
}
"AddressingModelPhysical32 only supported for kernels");
b->shader->info.cs.ptr_size = 32;
b->physical_ptrs = true;
- b->options->shared_ptr_type = glsl_uint_type();
- b->options->global_ptr_type = glsl_uint_type();
- b->options->temp_ptr_type = glsl_uint_type();
+ b->options->shared_addr_format = nir_address_format_32bit_global;
+ b->options->global_addr_format = nir_address_format_32bit_global;
+ b->options->temp_addr_format = nir_address_format_32bit_global;
break;
case SpvAddressingModelPhysical64:
vtn_fail_if(b->shader->info.stage != MESA_SHADER_KERNEL,
"AddressingModelPhysical64 only supported for kernels");
b->shader->info.cs.ptr_size = 64;
b->physical_ptrs = true;
- b->options->shared_ptr_type = glsl_uint64_t_type();
- b->options->global_ptr_type = glsl_uint64_t_type();
- b->options->temp_ptr_type = glsl_uint64_t_type();
+ b->options->shared_addr_format = nir_address_format_64bit_global;
+ b->options->global_addr_format = nir_address_format_64bit_global;
+ b->options->temp_addr_format = nir_address_format_64bit_global;
break;
case SpvAddressingModelLogical:
vtn_fail_if(b->shader->info.stage >= MESA_SHADER_STAGES,
"AddressingModelPhysicalStorageBuffer64EXT not supported");
break;
default:
- vtn_fail("Unknown addressing model");
+ vtn_fail("Unknown addressing model: %s (%u)",
+ spirv_addressingmodel_to_string(w[1]), w[1]);
break;
}
case SpvOpExecutionModeId:
case SpvOpDecorationGroup:
case SpvOpDecorate:
+ case SpvOpDecorateId:
case SpvOpMemberDecorate:
case SpvOpGroupDecorate:
case SpvOpGroupMemberDecorate:
- case SpvOpDecorateStringGOOGLE:
- case SpvOpMemberDecorateStringGOOGLE:
+ case SpvOpDecorateString:
+ case SpvOpMemberDecorateString:
vtn_handle_decoration(b, opcode, w, count);
break;
case SpvExecutionModeInvocations:
vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
- b->shader->info.gs.invocations = MAX2(1, mode->literals[0]);
+ b->shader->info.gs.invocations = MAX2(1, mode->operands[0]);
break;
case SpvExecutionModeDepthReplacing:
case SpvExecutionModeLocalSize:
vtn_assert(gl_shader_stage_is_compute(b->shader->info.stage));
- b->shader->info.cs.local_size[0] = mode->literals[0];
- b->shader->info.cs.local_size[1] = mode->literals[1];
- b->shader->info.cs.local_size[2] = mode->literals[2];
+ b->shader->info.cs.local_size[0] = mode->operands[0];
+ b->shader->info.cs.local_size[1] = mode->operands[1];
+ b->shader->info.cs.local_size[2] = mode->operands[2];
break;
case SpvExecutionModeLocalSizeId:
- b->shader->info.cs.local_size[0] = vtn_constant_uint(b, mode->literals[0]);
- b->shader->info.cs.local_size[1] = vtn_constant_uint(b, mode->literals[1]);
- b->shader->info.cs.local_size[2] = vtn_constant_uint(b, mode->literals[2]);
+ b->shader->info.cs.local_size[0] = vtn_constant_uint(b, mode->operands[0]);
+ b->shader->info.cs.local_size[1] = vtn_constant_uint(b, mode->operands[1]);
+ b->shader->info.cs.local_size[2] = vtn_constant_uint(b, mode->operands[2]);
break;
case SpvExecutionModeLocalSizeHint:
case SpvExecutionModeOutputVertices:
if (b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
b->shader->info.stage == MESA_SHADER_TESS_EVAL) {
- b->shader->info.tess.tcs_vertices_out = mode->literals[0];
+ b->shader->info.tess.tcs_vertices_out = mode->operands[0];
} else {
vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
- b->shader->info.gs.vertices_out = mode->literals[0];
+ b->shader->info.gs.vertices_out = mode->operands[0];
}
break;
break;
default:
- vtn_fail("Unhandled execution mode");
+ vtn_fail("Unhandled execution mode: %s (%u)",
+ spirv_executionmode_to_string(mode->exec_mode),
+ mode->exec_mode);
}
}
case SpvOpMemberName:
case SpvOpDecorationGroup:
case SpvOpDecorate:
+ case SpvOpDecorateId:
case SpvOpMemberDecorate:
case SpvOpGroupDecorate:
case SpvOpGroupMemberDecorate:
- case SpvOpDecorateStringGOOGLE:
- case SpvOpMemberDecorateStringGOOGLE:
+ case SpvOpDecorateString:
+ case SpvOpMemberDecorateString:
vtn_fail("Invalid opcode types and variables section");
break;
return true;
}
+static void
+vtn_handle_ptr(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ struct vtn_type *type1 = vtn_untyped_value(b, w[3])->type;
+ struct vtn_type *type2 = vtn_untyped_value(b, w[4])->type;
+ vtn_fail_if(type1->base_type != vtn_base_type_pointer ||
+ type2->base_type != vtn_base_type_pointer,
+ "%s operands must have pointer types",
+ spirv_op_to_string(opcode));
+ vtn_fail_if(type1->storage_class != type2->storage_class,
+ "%s operands must have the same storage class",
+ spirv_op_to_string(opcode));
+
+ const struct glsl_type *type =
+ vtn_value(b, w[1], vtn_value_type_type)->type->type;
+
+ nir_address_format addr_format = vtn_mode_to_address_format(
+ b, vtn_storage_class_to_mode(b, type1->storage_class, NULL, NULL));
+
+ nir_ssa_def *def;
+
+ switch (opcode) {
+ case SpvOpPtrDiff: {
+ /* OpPtrDiff returns the difference in number of elements (not byte offset). */
+ unsigned elem_size, elem_align;
+ glsl_get_natural_size_align_bytes(type1->deref->type,
+ &elem_size, &elem_align);
+
+ def = nir_build_addr_isub(&b->nb,
+ vtn_ssa_value(b, w[3])->def,
+ vtn_ssa_value(b, w[4])->def,
+ addr_format);
+ def = nir_idiv(&b->nb, def, nir_imm_intN_t(&b->nb, elem_size, def->bit_size));
+ def = nir_i2i(&b->nb, def, glsl_get_bit_size(type));
+ break;
+ }
+
+ case SpvOpPtrEqual:
+ case SpvOpPtrNotEqual: {
+ def = nir_build_addr_ieq(&b->nb,
+ vtn_ssa_value(b, w[3])->def,
+ vtn_ssa_value(b, w[4])->def,
+ addr_format);
+ if (opcode == SpvOpPtrNotEqual)
+ def = nir_inot(&b->nb, def);
+ break;
+ }
+
+ default:
+ unreachable("Invalid ptr operation");
+ }
+
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+ val->ssa = vtn_create_ssa_value(b, type);
+ val->ssa->def = def;
+}
+
static bool
vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode,
const uint32_t *w, unsigned count)
case SpvOpQuantizeToF16:
case SpvOpPtrCastToGeneric:
case SpvOpGenericCastToPtr:
- case SpvOpBitcast:
case SpvOpIsNan:
case SpvOpIsInf:
case SpvOpIsFinite:
vtn_handle_alu(b, opcode, w, count);
break;
+ case SpvOpBitcast:
+ vtn_handle_bitcast(b, w, count);
+ break;
+
case SpvOpVectorExtractDynamic:
case SpvOpVectorInsertDynamic:
case SpvOpVectorShuffle:
vtn_handle_subgroup(b, opcode, w, count);
break;
+ case SpvOpPtrDiff:
+ case SpvOpPtrEqual:
+ case SpvOpPtrNotEqual:
+ vtn_handle_ptr(b, opcode, w, count);
+ break;
+
default:
- vtn_fail("Unhandled opcode");
+ vtn_fail_with_opcode("Unhandled opcode", opcode);
}
return true;
return main_entry_point;
}
-nir_function *
+nir_shader *
spirv_to_nir(const uint32_t *words, size_t word_count,
struct nir_spirv_specialization *spec, unsigned num_spec,
gl_shader_stage stage, const char *entry_point_name,
/* Unparent the shader from the vtn_builder before we delete the builder */
ralloc_steal(NULL, b->shader);
+ nir_shader *shader = b->shader;
ralloc_free(b);
- return entry_point;
+ return shader;
}