return vtn_undef_ssa_value(b, val->type->type);
case vtn_value_type_constant:
- return vtn_const_ssa_value(b, val->constant, val->const_type);
+ return vtn_const_ssa_value(b, val->constant, val->type->type);
case vtn_value_type_ssa:
return val->ssa;
case vtn_base_type_pointer:
case vtn_base_type_image:
case vtn_base_type_sampler:
+ case vtn_base_type_sampled_image:
/* Nothing more to do */
break;
case SpvOpTypeBool:
val->type->base_type = vtn_base_type_scalar;
val->type->type = glsl_bool_type();
+ val->type->length = 1;
break;
case SpvOpTypeInt: {
int bit_size = w[2];
default:
vtn_fail("Invalid int bit size");
}
+ val->type->length = 1;
break;
}
default:
vtn_fail("Invalid float bit size");
}
+ val->type->length = 1;
break;
}
struct vtn_type *base = vtn_value(b, w[2], vtn_value_type_type)->type;
unsigned elems = w[3];
- vtn_assert(glsl_type_is_scalar(base->type));
+ vtn_fail_if(base->base_type != vtn_base_type_scalar,
+ "Base type for OpTypeVector must be a scalar");
+ vtn_fail_if(elems < 2 || elems > 4,
+ "Invalid component count for OpTypeVector");
+
val->type->base_type = vtn_base_type_vector;
val->type->type = glsl_vector_type(glsl_get_base_type(base->type), elems);
+ val->type->length = elems;
val->type->stride = glsl_get_bit_size(base->type) / 8;
val->type->array_element = base;
break;
struct vtn_type *base = vtn_value(b, w[2], vtn_value_type_type)->type;
unsigned columns = w[3];
- vtn_assert(glsl_type_is_vector(base->type));
+ vtn_fail_if(base->base_type != vtn_base_type_vector,
+ "Base type for OpTypeMatrix must be a vector");
+ vtn_fail_if(columns < 2 || columns > 4,
+ "Invalid column count for OpTypeMatrix");
+
val->type->base_type = vtn_base_type_matrix;
val->type->type = glsl_matrix_type(glsl_get_base_type(base->type),
glsl_get_vector_elements(base->type),
columns);
- vtn_assert(!glsl_type_is_error(val->type->type));
+ vtn_fail_if(glsl_type_is_error(val->type->type),
+ "Unsupported base type for OpTypeMatrix");
+ assert(!glsl_type_is_error(val->type->type));
val->type->length = columns;
val->type->array_element = base;
val->type->row_major = false;
case SpvOpTypeImage: {
val->type->base_type = vtn_base_type_image;
- const struct glsl_type *sampled_type =
- vtn_value(b, w[2], vtn_value_type_type)->type->type;
+ const struct vtn_type *sampled_type =
+ vtn_value(b, w[2], vtn_value_type_type)->type;
- vtn_assert(glsl_type_is_vector_or_scalar(sampled_type));
+ vtn_fail_if(sampled_type->base_type != vtn_base_type_scalar ||
+ glsl_get_bit_size(sampled_type->type) != 32,
+ "Sampled type of OpTypeImage must be a 32-bit scalar");
enum glsl_sampler_dim dim;
switch ((SpvDim)w[3]) {
case SpvDimBuffer: dim = GLSL_SAMPLER_DIM_BUF; break;
case SpvDimSubpassData: dim = GLSL_SAMPLER_DIM_SUBPASS; break;
default:
- vtn_fail("Invalid SPIR-V Sampler dimension");
+ vtn_fail("Invalid SPIR-V image dimensionality");
}
bool is_shadow = w[4];
val->type->image_format = translate_image_format(b, format);
+ enum glsl_base_type sampled_base_type =
+ glsl_get_base_type(sampled_type->type);
if (sampled == 1) {
val->type->sampled = true;
val->type->type = glsl_sampler_type(dim, is_shadow, is_array,
- glsl_get_base_type(sampled_type));
+ sampled_base_type);
} else if (sampled == 2) {
vtn_assert(!is_shadow);
val->type->sampled = false;
- val->type->type = glsl_image_type(dim, is_array,
- glsl_get_base_type(sampled_type));
+ val->type->type = glsl_image_type(dim, is_array, sampled_base_type);
} else {
vtn_fail("We need to know if the image will be sampled");
}
}
case SpvOpTypeSampledImage:
- val->type = vtn_value(b, w[2], vtn_value_type_type)->type;
+ val->type->base_type = vtn_base_type_sampled_image;
+ val->type->image = vtn_value(b, w[2], vtn_value_type_type)->type;
+ val->type->type = val->type->image->type;
break;
case SpvOpTypeSampler:
dec->literals[0] != SpvBuiltInWorkgroupSize)
return;
- vtn_assert(val->const_type == glsl_vector_type(GLSL_TYPE_UINT, 3));
+ vtn_assert(val->type->type == glsl_vector_type(GLSL_TYPE_UINT, 3));
b->shader->info.cs.local_size[0] = val->constant->values[0].u32[0];
b->shader->info.cs.local_size[1] = val->constant->values[0].u32[1];
const uint32_t *w, unsigned count)
{
struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_constant);
- val->const_type = vtn_value(b, w[1], vtn_value_type_type)->type->type;
val->constant = rzalloc(b, nir_constant);
switch (opcode) {
case SpvOpConstantTrue:
- vtn_assert(val->const_type == glsl_bool_type());
- val->constant->values[0].u32[0] = NIR_TRUE;
- break;
case SpvOpConstantFalse:
- vtn_assert(val->const_type == glsl_bool_type());
- val->constant->values[0].u32[0] = NIR_FALSE;
- break;
-
case SpvOpSpecConstantTrue:
case SpvOpSpecConstantFalse: {
- vtn_assert(val->const_type == glsl_bool_type());
- uint32_t int_val =
- get_specialization(b, val, (opcode == SpvOpSpecConstantTrue));
+ vtn_fail_if(val->type->type != glsl_bool_type(),
+ "Result type of %s must be OpTypeBool",
+ spirv_op_to_string(opcode));
+
+ uint32_t int_val = (opcode == SpvOpConstantTrue ||
+ opcode == SpvOpSpecConstantTrue);
+
+ if (opcode == SpvOpSpecConstantTrue ||
+ opcode == SpvOpSpecConstantFalse)
+ int_val = get_specialization(b, val, int_val);
+
val->constant->values[0].u32[0] = int_val ? NIR_TRUE : NIR_FALSE;
break;
}
case SpvOpConstant: {
- vtn_assert(glsl_type_is_scalar(val->const_type));
- int bit_size = glsl_get_bit_size(val->const_type);
+ vtn_fail_if(val->type->base_type != vtn_base_type_scalar,
+ "Result type of %s must be a scalar",
+ spirv_op_to_string(opcode));
+ int bit_size = glsl_get_bit_size(val->type->type);
switch (bit_size) {
case 64:
val->constant->values->u64[0] = vtn_u64_literal(&w[3]);
}
break;
}
+
case SpvOpSpecConstant: {
- vtn_assert(glsl_type_is_scalar(val->const_type));
- val->constant->values[0].u32[0] = get_specialization(b, val, w[3]);
- int bit_size = glsl_get_bit_size(val->const_type);
+ vtn_fail_if(val->type->base_type != vtn_base_type_scalar,
+ "Result type of %s must be a scalar",
+ spirv_op_to_string(opcode));
+ int bit_size = glsl_get_bit_size(val->type->type);
switch (bit_size) {
case 64:
val->constant->values[0].u64[0] =
}
break;
}
+
case SpvOpSpecConstantComposite:
case SpvOpConstantComposite: {
unsigned elem_count = count - 3;
+ vtn_fail_if(elem_count != val->type->length,
+ "%s has %u constituents, expected %u",
+ spirv_op_to_string(opcode), elem_count, val->type->length);
+
nir_constant **elems = ralloc_array(b, nir_constant *, elem_count);
for (unsigned i = 0; i < elem_count; i++)
elems[i] = vtn_value(b, w[i + 3], vtn_value_type_constant)->constant;
- switch (glsl_get_base_type(val->const_type)) {
- case GLSL_TYPE_UINT:
- case GLSL_TYPE_INT:
- case GLSL_TYPE_UINT16:
- case GLSL_TYPE_INT16:
- case GLSL_TYPE_UINT64:
- case GLSL_TYPE_INT64:
- case GLSL_TYPE_FLOAT:
- case GLSL_TYPE_FLOAT16:
- case GLSL_TYPE_BOOL:
- case GLSL_TYPE_DOUBLE: {
- int bit_size = glsl_get_bit_size(val->const_type);
- if (glsl_type_is_matrix(val->const_type)) {
- vtn_assert(glsl_get_matrix_columns(val->const_type) == elem_count);
- for (unsigned i = 0; i < elem_count; i++)
- val->constant->values[i] = elems[i]->values[0];
- } else {
- vtn_assert(glsl_type_is_vector(val->const_type));
- vtn_assert(glsl_get_vector_elements(val->const_type) == elem_count);
- for (unsigned i = 0; i < elem_count; i++) {
- switch (bit_size) {
- case 64:
- val->constant->values[0].u64[i] = elems[i]->values[0].u64[0];
- break;
- case 32:
- val->constant->values[0].u32[i] = elems[i]->values[0].u32[0];
- break;
- case 16:
- val->constant->values[0].u16[i] = elems[i]->values[0].u16[0];
- break;
- default:
- vtn_fail("Invalid SpvOpConstantComposite bit size");
- }
+ switch (val->type->base_type) {
+ case vtn_base_type_vector: {
+ assert(glsl_type_is_vector(val->type->type));
+ int bit_size = glsl_get_bit_size(val->type->type);
+ for (unsigned i = 0; i < elem_count; i++) {
+ switch (bit_size) {
+ case 64:
+ val->constant->values[0].u64[i] = elems[i]->values[0].u64[0];
+ break;
+ case 32:
+ val->constant->values[0].u32[i] = elems[i]->values[0].u32[0];
+ break;
+ case 16:
+ val->constant->values[0].u16[i] = elems[i]->values[0].u16[0];
+ break;
+ default:
+ vtn_fail("Invalid SpvOpConstantComposite bit size");
}
}
- ralloc_free(elems);
break;
}
- case GLSL_TYPE_STRUCT:
- case GLSL_TYPE_ARRAY:
+
+ case vtn_base_type_matrix:
+ assert(glsl_type_is_matrix(val->type->type));
+ for (unsigned i = 0; i < elem_count; i++)
+ val->constant->values[i] = elems[i]->values[0];
+ break;
+
+ case vtn_base_type_struct:
+ case vtn_base_type_array:
ralloc_steal(val->constant, elems);
val->constant->num_elements = elem_count;
val->constant->elements = elems;
break;
default:
- vtn_fail("Unsupported type for constants");
+ vtn_fail("Result type of %s must be a composite type",
+ spirv_op_to_string(opcode));
}
break;
}
vtn_assert(v1->value_type == vtn_value_type_constant ||
v1->value_type == vtn_value_type_undef);
- unsigned len0 = v0->value_type == vtn_value_type_constant ?
- glsl_get_vector_elements(v0->const_type) :
- glsl_get_vector_elements(v0->type->type);
- unsigned len1 = v1->value_type == vtn_value_type_constant ?
- glsl_get_vector_elements(v1->const_type) :
- glsl_get_vector_elements(v1->type->type);
+ unsigned len0 = glsl_get_vector_elements(v0->type->type);
+ unsigned len1 = glsl_get_vector_elements(v1->type->type);
vtn_assert(len0 + len1 < 16);
- unsigned bit_size = glsl_get_bit_size(val->const_type);
- unsigned bit_size0 = v0->value_type == vtn_value_type_constant ?
- glsl_get_bit_size(v0->const_type) :
- glsl_get_bit_size(v0->type->type);
- unsigned bit_size1 = v1->value_type == vtn_value_type_constant ?
- glsl_get_bit_size(v1->const_type) :
- glsl_get_bit_size(v1->type->type);
+ unsigned bit_size = glsl_get_bit_size(val->type->type);
+ unsigned bit_size0 = glsl_get_bit_size(v0->type->type);
+ unsigned bit_size1 = glsl_get_bit_size(v1->type->type);
vtn_assert(bit_size == bit_size0 && bit_size == bit_size1);
(void)bit_size0; (void)bit_size1;
int elem = -1;
int col = 0;
- const struct glsl_type *type = comp->const_type;
+ const struct vtn_type *type = comp->type;
for (unsigned i = deref_start; i < count; i++) {
- switch (glsl_get_base_type(type)) {
- case GLSL_TYPE_UINT:
- case GLSL_TYPE_INT:
- case GLSL_TYPE_UINT16:
- case GLSL_TYPE_INT16:
- case GLSL_TYPE_UINT64:
- case GLSL_TYPE_INT64:
- case GLSL_TYPE_FLOAT:
- case GLSL_TYPE_FLOAT16:
- case GLSL_TYPE_DOUBLE:
- case GLSL_TYPE_BOOL:
- /* If we hit this granularity, we're picking off an element */
- if (glsl_type_is_matrix(type)) {
- vtn_assert(col == 0 && elem == -1);
- col = w[i];
- elem = 0;
- type = glsl_get_column_type(type);
- } else {
- vtn_assert(elem <= 0 && glsl_type_is_vector(type));
- elem = w[i];
- type = glsl_scalar_type(glsl_get_base_type(type));
- }
- continue;
-
- case GLSL_TYPE_ARRAY:
+ vtn_fail_if(w[i] > type->length,
+ "%uth index of %s is %u but the type has only "
+ "%u elements", i - deref_start,
+ spirv_op_to_string(opcode), w[i], type->length);
+
+ switch (type->base_type) {
+ case vtn_base_type_vector:
+ elem = w[i];
+ type = type->array_element;
+ break;
+
+ case vtn_base_type_matrix:
+ assert(col == 0 && elem == -1);
+ col = w[i];
+ elem = 0;
+ type = type->array_element;
+ break;
+
+ case vtn_base_type_array:
c = &(*c)->elements[w[i]];
- type = glsl_get_array_element(type);
- continue;
+ type = type->array_element;
+ break;
- case GLSL_TYPE_STRUCT:
+ case vtn_base_type_struct:
c = &(*c)->elements[w[i]];
- type = glsl_get_struct_field(type, w[i]);
- continue;
+ type = type->members[w[i]];
+ break;
default:
- vtn_fail("Invalid constant type");
+ vtn_fail("%s must only index into composite types",
+ spirv_op_to_string(opcode));
}
}
if (elem == -1) {
val->constant = *c;
} else {
- unsigned num_components = glsl_get_vector_elements(type);
- unsigned bit_size = glsl_get_bit_size(type);
+ unsigned num_components = type->length;
+ unsigned bit_size = glsl_get_bit_size(type->type);
for (unsigned i = 0; i < num_components; i++)
switch(bit_size) {
case 64:
} else {
struct vtn_value *insert =
vtn_value(b, w[4], vtn_value_type_constant);
- vtn_assert(insert->const_type == type);
+ vtn_assert(insert->type == type);
if (elem == -1) {
*c = insert->constant;
} else {
- unsigned num_components = glsl_get_vector_elements(type);
- unsigned bit_size = glsl_get_bit_size(type);
+ unsigned num_components = type->length;
+ unsigned bit_size = glsl_get_bit_size(type->type);
for (unsigned i = 0; i < num_components; i++)
switch (bit_size) {
case 64:
default: {
bool swap;
- nir_alu_type dst_alu_type = nir_get_nir_type_for_glsl_type(val->const_type);
+ nir_alu_type dst_alu_type = nir_get_nir_type_for_glsl_type(val->type->type);
nir_alu_type src_alu_type = dst_alu_type;
+ unsigned num_components = glsl_get_vector_elements(val->type->type);
+ unsigned bit_size;
+
+ vtn_assert(count <= 7);
+
+ switch (opcode) {
+ case SpvOpSConvert:
+ case SpvOpFConvert:
+ /* We have a source in a conversion */
+ src_alu_type =
+ nir_get_nir_type_for_glsl_type(
+ vtn_value(b, w[4], vtn_value_type_constant)->type->type);
+ /* We use the bitsize of the conversion source to evaluate the opcode later */
+ bit_size = glsl_get_bit_size(
+ vtn_value(b, w[4], vtn_value_type_constant)->type->type);
+ break;
+ default:
+ bit_size = glsl_get_bit_size(val->type->type);
+ };
+
nir_op op = vtn_nir_alu_op_for_spirv_opcode(b, opcode, &swap,
src_alu_type,
dst_alu_type);
-
- unsigned num_components = glsl_get_vector_elements(val->const_type);
- unsigned bit_size =
- glsl_get_bit_size(val->const_type);
-
nir_const_value src[4];
- vtn_assert(count <= 7);
+
for (unsigned i = 0; i < count - 4; i++) {
nir_constant *c =
vtn_value(b, w[4 + i], vtn_value_type_constant)->constant;
unsigned j = swap ? 1 - i : i;
- vtn_assert(bit_size == 32);
src[j] = c->values[0];
}
}
case SpvOpConstantNull:
- val->constant = vtn_null_constant(b, val->const_type);
+ val->constant = vtn_null_constant(b, val->type->type);
break;
case SpvOpConstantSampler:
vtn_handle_variable_or_type_instruction(struct vtn_builder *b, SpvOp opcode,
const uint32_t *w, unsigned count)
{
+ vtn_set_instruction_result_type(b, opcode, w, count);
+
switch (opcode) {
case SpvOpSource:
case SpvOpSourceContinued:
/* Handle OpSelect up-front here because it needs to be able to handle
* pointers and not just regular vectors and scalars.
*/
+ struct vtn_value *res_val = vtn_untyped_value(b, w[2]);
+ struct vtn_value *sel_val = vtn_untyped_value(b, w[3]);
+ struct vtn_value *obj1_val = vtn_untyped_value(b, w[4]);
+ struct vtn_value *obj2_val = vtn_untyped_value(b, w[5]);
+
+ const struct glsl_type *sel_type;
+ switch (res_val->type->base_type) {
+ case vtn_base_type_scalar:
+ sel_type = glsl_bool_type();
+ break;
+ case vtn_base_type_vector:
+ sel_type = glsl_vector_type(GLSL_TYPE_BOOL, res_val->type->length);
+ break;
+ case vtn_base_type_pointer:
+ /* We need to have actual storage for pointer types */
+ vtn_fail_if(res_val->type->type == NULL,
+ "Invalid pointer result type for OpSelect");
+ sel_type = glsl_bool_type();
+ break;
+ default:
+ vtn_fail("Result type of OpSelect must be a scalar, vector, or pointer");
+ }
+
+ if (unlikely(sel_val->type->type != sel_type)) {
+ if (sel_val->type->type == glsl_bool_type()) {
+ /* This case is illegal but some older versions of GLSLang produce
+ * it. The GLSLang issue was fixed on March 30, 2017:
+ *
+ * https://github.com/KhronosGroup/glslang/issues/809
+ *
+ * Unfortunately, there are applications in the wild which are
+ * shipping with this bug so it isn't nice to fail on them so we
+ * throw a warning instead. It's not actually a problem for us as
+ * nir_builder will just splat the condition out which is most
+ * likely what the client wanted anyway.
+ */
+ vtn_warn("Condition type of OpSelect must have the same number "
+ "of components as Result Type");
+ } else {
+ vtn_fail("Condition type of OpSelect must be a scalar or vector "
+ "of Boolean type. It must have the same number of "
+ "components as Result Type");
+ }
+ }
+
+ vtn_fail_if(obj1_val->type != res_val->type ||
+ obj2_val->type != res_val->type,
+ "Object types must match the result type in OpSelect");
+
struct vtn_type *res_type = vtn_value(b, w[1], vtn_value_type_type)->type;
struct vtn_ssa_value *ssa = vtn_create_ssa_value(b, res_type->type);
ssa->def = nir_bcsel(&b->nb, vtn_ssa_value(b, w[3])->def,
words = vtn_foreach_instruction(b, words, word_end,
vtn_handle_variable_or_type_instruction);
+ /* Set types on all vtn_values */
+ vtn_foreach_instruction(b, words, word_end, vtn_set_instruction_result_type);
+
vtn_build_cfg(b, words, word_end);
assert(b->entry_point->value_type == vtn_value_type_function);