#include "nir/nir_constant_expressions.h"
#include "spirv_info.h"
+struct spec_constant_value {
+ bool is_double;
+ union {
+ uint32_t data32;
+ uint64_t data64;
+ };
+};
+
void
_vtn_warn(const char *file, int line, const char *msg, ...)
{
switch (glsl_get_base_type(type)) {
case GLSL_TYPE_INT:
case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT64:
+ case GLSL_TYPE_UINT64:
case GLSL_TYPE_BOOL:
case GLSL_TYPE_FLOAT:
- case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_DOUBLE: {
+ int bit_size = glsl_get_bit_size(type);
if (glsl_type_is_vector_or_scalar(type)) {
unsigned num_components = glsl_get_vector_elements(val->type);
nir_load_const_instr *load =
- nir_load_const_instr_create(b->shader, num_components, 32);
+ nir_load_const_instr_create(b->shader, num_components, bit_size);
load->value = constant->values[0];
struct vtn_ssa_value *col_val = rzalloc(b, struct vtn_ssa_value);
col_val->type = glsl_get_column_type(val->type);
nir_load_const_instr *load =
- nir_load_const_instr_create(b->shader, rows, 32);
+ nir_load_const_instr_create(b->shader, rows, bit_size);
load->value = constant->values[i];
}
}
break;
+ }
case GLSL_TYPE_ARRAY: {
unsigned elems = glsl_get_length(val->type);
case vtn_value_type_ssa:
return val->ssa;
- case vtn_value_type_access_chain:
- /* This is needed for function parameters */
- return vtn_variable_load(b, val->access_chain);
-
default:
unreachable("Invalid type for an SSA value");
}
vtn_type_copy(struct vtn_builder *b, struct vtn_type *src)
{
struct vtn_type *dest = ralloc(b, struct vtn_type);
- dest->type = src->type;
- dest->is_builtin = src->is_builtin;
- if (src->is_builtin)
- dest->builtin = src->builtin;
-
- if (!glsl_type_is_scalar(src->type)) {
- switch (glsl_get_base_type(src->type)) {
- case GLSL_TYPE_INT:
- case GLSL_TYPE_UINT:
- case GLSL_TYPE_BOOL:
- case GLSL_TYPE_FLOAT:
- case GLSL_TYPE_DOUBLE:
- case GLSL_TYPE_ARRAY:
- dest->row_major = src->row_major;
- dest->stride = src->stride;
- dest->array_element = src->array_element;
- break;
+ *dest = *src;
- case GLSL_TYPE_STRUCT: {
- unsigned elems = glsl_get_length(src->type);
+ switch (src->base_type) {
+ case vtn_base_type_void:
+ case vtn_base_type_scalar:
+ case vtn_base_type_vector:
+ case vtn_base_type_matrix:
+ case vtn_base_type_array:
+ case vtn_base_type_pointer:
+ case vtn_base_type_image:
+ case vtn_base_type_sampler:
+ /* Nothing more to do */
+ break;
- dest->members = ralloc_array(b, struct vtn_type *, elems);
- memcpy(dest->members, src->members, elems * sizeof(struct vtn_type *));
+ case vtn_base_type_struct:
+ dest->members = ralloc_array(b, struct vtn_type *, src->length);
+ memcpy(dest->members, src->members,
+ src->length * sizeof(src->members[0]));
- dest->offsets = ralloc_array(b, unsigned, elems);
- memcpy(dest->offsets, src->offsets, elems * sizeof(unsigned));
- break;
- }
+ dest->offsets = ralloc_array(b, unsigned, src->length);
+ memcpy(dest->offsets, src->offsets,
+ src->length * sizeof(src->offsets[0]));
+ break;
- default:
- unreachable("unhandled type");
- }
+ case vtn_base_type_function:
+ dest->params = ralloc_array(b, struct vtn_type *, src->length);
+ memcpy(dest->params, src->params, src->length * sizeof(src->params[0]));
+ break;
}
return dest;
ctx->type->offsets[member] = dec->literals[0];
break;
case SpvDecorationMatrixStride:
- mutable_matrix_member(b, ctx->type, member)->stride = dec->literals[0];
+ /* Handled as a second pass */
break;
case SpvDecorationColMajor:
break; /* Nothing to do here. Column-major is the default. */
break;
case SpvDecorationPatch:
- vtn_warn("Tessellation not yet supported");
break;
case SpvDecorationSpecId:
case SpvDecorationFPRoundingMode:
case SpvDecorationFPFastMathMode:
case SpvDecorationAlignment:
- vtn_warn("Decoraiton only allowed for CL-style kernels: %s",
+ vtn_warn("Decoration only allowed for CL-style kernels: %s",
spirv_decoration_to_string(dec->decoration));
break;
+
+ default:
+ unreachable("Unhandled decoration");
+ }
+}
+
+/* Matrix strides are handled as a separate pass because we need to know
+ * whether the matrix is row-major or not first.
+ */
+static void
+struct_member_matrix_stride_cb(struct vtn_builder *b,
+ struct vtn_value *val, int member,
+ const struct vtn_decoration *dec,
+ void *void_ctx)
+{
+ if (dec->decoration != SpvDecorationMatrixStride)
+ return;
+ assert(member >= 0);
+
+ struct member_decoration_ctx *ctx = void_ctx;
+
+ struct vtn_type *mat_type = mutable_matrix_member(b, ctx->type, member);
+ if (mat_type->row_major) {
+ mat_type->array_element = vtn_type_copy(b, mat_type->array_element);
+ mat_type->stride = mat_type->array_element->stride;
+ mat_type->array_element->stride = dec->literals[0];
+ } else {
+ assert(mat_type->array_element->stride > 0);
+ mat_type->stride = dec->literals[0];
}
}
case SpvDecorationOffset:
case SpvDecorationXfbBuffer:
case SpvDecorationXfbStride:
- vtn_warn("Decoraiton only allowed for struct members: %s",
+ vtn_warn("Decoration only allowed for struct members: %s",
spirv_decoration_to_string(dec->decoration));
break;
case SpvDecorationLinkageAttributes:
case SpvDecorationNoContraction:
case SpvDecorationInputAttachmentIndex:
- vtn_warn("Decoraiton not allowed on types: %s",
+ vtn_warn("Decoration not allowed on types: %s",
spirv_decoration_to_string(dec->decoration));
break;
case SpvDecorationFPRoundingMode:
case SpvDecorationFPFastMathMode:
case SpvDecorationAlignment:
- vtn_warn("Decoraiton only allowed for CL-style kernels: %s",
+ vtn_warn("Decoration only allowed for CL-style kernels: %s",
spirv_decoration_to_string(dec->decoration));
break;
+
+ default:
+ unreachable("Unhandled decoration");
}
}
struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_type);
val->type = rzalloc(b, struct vtn_type);
- val->type->is_builtin = false;
val->type->val = val;
switch (opcode) {
case SpvOpTypeVoid:
+ val->type->base_type = vtn_base_type_void;
val->type->type = glsl_void_type();
break;
case SpvOpTypeBool:
+ val->type->base_type = vtn_base_type_scalar;
val->type->type = glsl_bool_type();
break;
case SpvOpTypeInt: {
+ int bit_size = w[2];
const bool signedness = w[3];
- val->type->type = (signedness ? glsl_int_type() : glsl_uint_type());
+ val->type->base_type = vtn_base_type_scalar;
+ if (bit_size == 64)
+ val->type->type = (signedness ? glsl_int64_t_type() : glsl_uint64_t_type());
+ else
+ val->type->type = (signedness ? glsl_int_type() : glsl_uint_type());
break;
}
case SpvOpTypeFloat: {
int bit_size = w[2];
+ val->type->base_type = vtn_base_type_scalar;
val->type->type = bit_size == 64 ? glsl_double_type() : glsl_float_type();
break;
}
unsigned elems = w[3];
assert(glsl_type_is_scalar(base->type));
+ val->type->base_type = vtn_base_type_vector;
val->type->type = glsl_vector_type(glsl_get_base_type(base->type), elems);
-
- /* Vectors implicitly have sizeof(base_type) stride. For now, this
- * is always 4 bytes. This will have to change if we want to start
- * supporting doubles or half-floats.
- */
- val->type->stride = 4;
+ val->type->stride = glsl_get_bit_size(base->type) / 8;
val->type->array_element = base;
break;
}
unsigned columns = w[3];
assert(glsl_type_is_vector(base->type));
+ val->type->base_type = vtn_base_type_matrix;
val->type->type = glsl_matrix_type(glsl_get_base_type(base->type),
glsl_get_vector_elements(base->type),
columns);
assert(!glsl_type_is_error(val->type->type));
+ val->type->length = columns;
val->type->array_element = base;
val->type->row_major = false;
val->type->stride = 0;
struct vtn_type *array_element =
vtn_value(b, w[2], vtn_value_type_type)->type;
- unsigned length;
if (opcode == SpvOpTypeRuntimeArray) {
/* A length of 0 is used to denote unsized arrays */
- length = 0;
+ val->type->length = 0;
} else {
- length =
+ val->type->length =
vtn_value(b, w[3], vtn_value_type_constant)->constant->values[0].u32[0];
}
- val->type->type = glsl_array_type(array_element->type, length);
+ val->type->base_type = vtn_base_type_array;
+ val->type->type = glsl_array_type(array_element->type, val->type->length);
val->type->array_element = array_element;
val->type->stride = 0;
break;
case SpvOpTypeStruct: {
unsigned num_fields = count - 2;
+ val->type->base_type = vtn_base_type_struct;
+ val->type->length = num_fields;
val->type->members = ralloc_array(b, struct vtn_type *, num_fields);
val->type->offsets = ralloc_array(b, unsigned, num_fields);
};
vtn_foreach_decoration(b, val, struct_member_decoration_cb, &ctx);
+ vtn_foreach_decoration(b, val, struct_member_matrix_stride_cb, &ctx);
const char *name = val->name ? val->name : "struct";
}
case SpvOpTypeFunction: {
- const struct glsl_type *return_type =
- vtn_value(b, w[2], vtn_value_type_type)->type->type;
- NIR_VLA(struct glsl_function_param, params, count - 3);
- for (unsigned i = 0; i < count - 3; i++) {
- params[i].type = vtn_value(b, w[i + 3], vtn_value_type_type)->type->type;
+ val->type->base_type = vtn_base_type_function;
+ val->type->type = NULL;
- /* FIXME: */
- params[i].in = true;
- params[i].out = true;
+ val->type->return_type = vtn_value(b, w[2], vtn_value_type_type)->type;
+
+ const unsigned num_params = count - 3;
+ val->type->length = num_params;
+ val->type->params = ralloc_array(b, struct vtn_type *, num_params);
+ for (unsigned i = 0; i < count - 3; i++) {
+ val->type->params[i] =
+ vtn_value(b, w[i + 3], vtn_value_type_type)->type;
}
- val->type->type = glsl_function_type(return_type, params, count - 3);
break;
}
- case SpvOpTypePointer:
- /* FIXME: For now, we'll just do the really lame thing and return
- * the same type. The validator should ensure that the proper number
- * of dereferences happen
- */
- val->type = vtn_value(b, w[3], vtn_value_type_type)->type;
+ case SpvOpTypePointer: {
+ SpvStorageClass storage_class = w[2];
+ struct vtn_type *deref_type =
+ vtn_value(b, w[3], vtn_value_type_type)->type;
+
+ val->type->base_type = vtn_base_type_pointer;
+ val->type->type = NULL;
+ val->type->storage_class = storage_class;
+ val->type->deref = deref_type;
break;
+ }
case SpvOpTypeImage: {
+ val->type->base_type = vtn_base_type_image;
+
const struct glsl_type *sampled_type =
vtn_value(b, w[2], vtn_value_type_type)->type->type;
val->type->access_qualifier = SpvAccessQualifierReadWrite;
if (multisampled) {
- assert(dim == GLSL_SAMPLER_DIM_2D);
- dim = GLSL_SAMPLER_DIM_MS;
+ if (dim == GLSL_SAMPLER_DIM_2D)
+ dim = GLSL_SAMPLER_DIM_MS;
+ else if (dim == GLSL_SAMPLER_DIM_SUBPASS)
+ dim = GLSL_SAMPLER_DIM_SUBPASS_MS;
+ else
+ assert(!"Unsupported multisampled image type");
}
val->type->image_format = translate_image_format(format);
if (sampled == 1) {
+ val->type->sampled = true;
val->type->type = glsl_sampler_type(dim, is_shadow, is_array,
glsl_get_base_type(sampled_type));
} else if (sampled == 2) {
- assert((dim == GLSL_SAMPLER_DIM_SUBPASS) || format);
assert(!is_shadow);
+ val->type->sampled = false;
val->type->type = glsl_image_type(dim, is_array,
glsl_get_base_type(sampled_type));
} else {
* matters is that it's a sampler type as opposed to an integer type
* so the backend knows what to do.
*/
+ val->type->base_type = vtn_base_type_sampler;
val->type->type = glsl_bare_sampler_type();
break;
{
nir_constant *c = rzalloc(b, nir_constant);
+ /* For pointers and other typeless things, we have to return something but
+ * it doesn't matter what.
+ */
+ if (!type)
+ return c;
+
switch (glsl_get_base_type(type)) {
case GLSL_TYPE_INT:
case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT64:
+ case GLSL_TYPE_UINT64:
case GLSL_TYPE_BOOL:
case GLSL_TYPE_FLOAT:
case GLSL_TYPE_DOUBLE:
if (dec->decoration != SpvDecorationSpecId)
return;
- uint32_t *const_value = data;
+ struct spec_constant_value *const_value = data;
for (unsigned i = 0; i < b->num_specializations; i++) {
if (b->specializations[i].id == dec->literals[0]) {
- *const_value = b->specializations[i].data;
+ if (const_value->is_double)
+ const_value->data64 = b->specializations[i].data64;
+ else
+ const_value->data32 = b->specializations[i].data32;
return;
}
}
get_specialization(struct vtn_builder *b, struct vtn_value *val,
uint32_t const_value)
{
- vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &const_value);
- return const_value;
+ struct spec_constant_value data;
+ data.is_double = false;
+ data.data32 = const_value;
+ vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &data);
+ return data.data32;
+}
+
+static uint64_t
+get_specialization64(struct vtn_builder *b, struct vtn_value *val,
+ uint64_t const_value)
+{
+ struct spec_constant_value data;
+ data.is_double = true;
+ data.data64 = const_value;
+ vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &data);
+ return data.data64;
}
static void
assert(val->const_type == glsl_vector_type(GLSL_TYPE_UINT, 3));
- b->shader->info->cs.local_size[0] = val->constant->values[0].u32[0];
- b->shader->info->cs.local_size[1] = val->constant->values[0].u32[1];
- b->shader->info->cs.local_size[2] = val->constant->values[0].u32[2];
+ b->shader->info.cs.local_size[0] = val->constant->values[0].u32[0];
+ b->shader->info.cs.local_size[1] = val->constant->values[0].u32[1];
+ b->shader->info.cs.local_size[2] = val->constant->values[0].u32[2];
}
static void
break;
}
- case SpvOpConstant:
+ case SpvOpConstant: {
assert(glsl_type_is_scalar(val->const_type));
- val->constant->values[0].u32[0] = w[3];
+ int bit_size = glsl_get_bit_size(val->const_type);
+ if (bit_size == 64) {
+ val->constant->values->u32[0] = w[3];
+ val->constant->values->u32[1] = w[4];
+ } else {
+ assert(bit_size == 32);
+ val->constant->values->u32[0] = w[3];
+ }
break;
- case SpvOpSpecConstant:
+ }
+ case SpvOpSpecConstant: {
assert(glsl_type_is_scalar(val->const_type));
val->constant->values[0].u32[0] = get_specialization(b, val, w[3]);
+ int bit_size = glsl_get_bit_size(val->const_type);
+ if (bit_size == 64)
+ val->constant->values[0].u64[0] =
+ get_specialization64(b, val, vtn_u64_literal(&w[3]));
+ else
+ val->constant->values[0].u32[0] = get_specialization(b, val, w[3]);
break;
+ }
case SpvOpSpecConstantComposite:
case SpvOpConstantComposite: {
unsigned elem_count = count - 3;
switch (glsl_get_base_type(val->const_type)) {
case GLSL_TYPE_UINT:
case GLSL_TYPE_INT:
+ case GLSL_TYPE_UINT64:
+ case GLSL_TYPE_INT64:
case GLSL_TYPE_FLOAT:
case GLSL_TYPE_BOOL:
+ case GLSL_TYPE_DOUBLE: {
+ int bit_size = glsl_get_bit_size(val->const_type);
if (glsl_type_is_matrix(val->const_type)) {
assert(glsl_get_matrix_columns(val->const_type) == elem_count);
for (unsigned i = 0; i < elem_count; i++)
} else {
assert(glsl_type_is_vector(val->const_type));
assert(glsl_get_vector_elements(val->const_type) == elem_count);
- for (unsigned i = 0; i < elem_count; i++)
- val->constant->values[0].u32[i] = elems[i]->values[0].u32[0];
+ for (unsigned i = 0; i < elem_count; i++) {
+ if (bit_size == 64) {
+ val->constant->values[0].u64[i] = elems[i]->values[0].u64[0];
+ } else {
+ assert(bit_size == 32);
+ val->constant->values[0].u32[i] = elems[i]->values[0].u32[0];
+ }
+ }
}
ralloc_free(elems);
break;
-
+ }
case GLSL_TYPE_STRUCT:
case GLSL_TYPE_ARRAY:
ralloc_steal(val->constant, elems);
SpvOp opcode = get_specialization(b, val, w[3]);
switch (opcode) {
case SpvOpVectorShuffle: {
- struct vtn_value *v0 = vtn_value(b, w[4], vtn_value_type_constant);
- struct vtn_value *v1 = vtn_value(b, w[5], vtn_value_type_constant);
- unsigned len0 = glsl_get_vector_elements(v0->const_type);
- unsigned len1 = glsl_get_vector_elements(v1->const_type);
-
- uint32_t u[8];
- for (unsigned i = 0; i < len0; i++)
- u[i] = v0->constant->values[0].u32[i];
- for (unsigned i = 0; i < len1; i++)
- u[len0 + i] = v1->constant->values[0].u32[i];
-
- for (unsigned i = 0; i < count - 6; i++) {
- uint32_t comp = w[i + 6];
- if (comp == (uint32_t)-1) {
- val->constant->values[0].u32[i] = 0xdeadbeef;
- } else {
- val->constant->values[0].u32[i] = u[comp];
+ struct vtn_value *v0 = &b->values[w[4]];
+ struct vtn_value *v1 = &b->values[w[5]];
+
+ assert(v0->value_type == vtn_value_type_constant ||
+ v0->value_type == vtn_value_type_undef);
+ assert(v1->value_type == vtn_value_type_constant ||
+ v1->value_type == vtn_value_type_undef);
+
+ unsigned len0 = v0->value_type == vtn_value_type_constant ?
+ glsl_get_vector_elements(v0->const_type) :
+ glsl_get_vector_elements(v0->type->type);
+ unsigned len1 = v1->value_type == vtn_value_type_constant ?
+ glsl_get_vector_elements(v1->const_type) :
+ glsl_get_vector_elements(v1->type->type);
+
+ assert(len0 + len1 < 16);
+
+ unsigned bit_size = glsl_get_bit_size(val->const_type);
+ unsigned bit_size0 = v0->value_type == vtn_value_type_constant ?
+ glsl_get_bit_size(v0->const_type) :
+ glsl_get_bit_size(v0->type->type);
+ unsigned bit_size1 = v1->value_type == vtn_value_type_constant ?
+ glsl_get_bit_size(v1->const_type) :
+ glsl_get_bit_size(v1->type->type);
+
+ assert(bit_size == bit_size0 && bit_size == bit_size1);
+ (void)bit_size0; (void)bit_size1;
+
+ if (bit_size == 64) {
+ uint64_t u64[8];
+ if (v0->value_type == vtn_value_type_constant) {
+ for (unsigned i = 0; i < len0; i++)
+ u64[i] = v0->constant->values[0].u64[i];
+ }
+ if (v1->value_type == vtn_value_type_constant) {
+ for (unsigned i = 0; i < len1; i++)
+ u64[len0 + i] = v1->constant->values[0].u64[i];
+ }
+
+ for (unsigned i = 0, j = 0; i < count - 6; i++, j++) {
+ uint32_t comp = w[i + 6];
+ /* If component is not used, set the value to a known constant
+ * to detect if it is wrongly used.
+ */
+ if (comp == (uint32_t)-1)
+ val->constant->values[0].u64[j] = 0xdeadbeefdeadbeef;
+ else
+ val->constant->values[0].u64[j] = u64[comp];
+ }
+ } else {
+ uint32_t u32[8];
+ if (v0->value_type == vtn_value_type_constant) {
+ for (unsigned i = 0; i < len0; i++)
+ u32[i] = v0->constant->values[0].u32[i];
+ }
+ if (v1->value_type == vtn_value_type_constant) {
+ for (unsigned i = 0; i < len1; i++)
+ u32[len0 + i] = v1->constant->values[0].u32[i];
+ }
+
+ for (unsigned i = 0, j = 0; i < count - 6; i++, j++) {
+ uint32_t comp = w[i + 6];
+ /* If component is not used, set the value to a known constant
+ * to detect if it is wrongly used.
+ */
+ if (comp == (uint32_t)-1)
+ val->constant->values[0].u32[j] = 0xdeadbeef;
+ else
+ val->constant->values[0].u32[j] = u32[comp];
}
}
break;
switch (glsl_get_base_type(type)) {
case GLSL_TYPE_UINT:
case GLSL_TYPE_INT:
+ case GLSL_TYPE_UINT64:
+ case GLSL_TYPE_INT64:
case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_DOUBLE:
case GLSL_TYPE_BOOL:
/* If we hit this granularity, we're picking off an element */
if (glsl_type_is_matrix(type)) {
val->constant = *c;
} else {
unsigned num_components = glsl_get_vector_elements(type);
+ unsigned bit_size = glsl_get_bit_size(type);
for (unsigned i = 0; i < num_components; i++)
- val->constant->values[0].u32[i] = (*c)->values[col].u32[elem + i];
+ if (bit_size == 64) {
+ val->constant->values[0].u64[i] = (*c)->values[col].u64[elem + i];
+ } else {
+ assert(bit_size == 32);
+ val->constant->values[0].u32[i] = (*c)->values[col].u32[elem + i];
+ }
}
} else {
struct vtn_value *insert =
*c = insert->constant;
} else {
unsigned num_components = glsl_get_vector_elements(type);
+ unsigned bit_size = glsl_get_bit_size(type);
for (unsigned i = 0; i < num_components; i++)
- (*c)->values[col].u32[elem + i] = insert->constant->values[0].u32[i];
+ if (bit_size == 64) {
+ (*c)->values[col].u64[elem + i] = insert->constant->values[0].u64[i];
+ } else {
+ assert(bit_size == 32);
+ (*c)->values[col].u32[elem + i] = insert->constant->values[0].u32[i];
+ }
}
}
break;
default: {
bool swap;
- nir_op op = vtn_nir_alu_op_for_spirv_opcode(opcode, &swap);
+ nir_alu_type dst_alu_type = nir_get_nir_type_for_glsl_type(val->const_type);
+ nir_alu_type src_alu_type = dst_alu_type;
+ nir_op op = vtn_nir_alu_op_for_spirv_opcode(opcode, &swap, src_alu_type, dst_alu_type);
unsigned num_components = glsl_get_vector_elements(val->const_type);
unsigned bit_size =
for (unsigned i = 0; i < call->num_params; i++) {
unsigned arg_id = w[4 + i];
struct vtn_value *arg = vtn_untyped_value(b, arg_id);
- if (arg->value_type == vtn_value_type_access_chain) {
- nir_deref_var *d = vtn_access_chain_to_deref(b, arg->access_chain);
+ if (arg->value_type == vtn_value_type_pointer) {
+ nir_deref_var *d = vtn_pointer_to_deref(b, arg->pointer);
call->params[i] = nir_deref_var_clone(d, call);
} else {
struct vtn_ssa_value *arg_ssa = vtn_ssa_value(b, arg_id);
switch (glsl_get_base_type(type)) {
case GLSL_TYPE_INT:
case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT64:
+ case GLSL_TYPE_UINT64:
case GLSL_TYPE_BOOL:
case GLSL_TYPE_FLOAT:
case GLSL_TYPE_DOUBLE:
vtn_push_value(b, w[2], vtn_value_type_sampled_image);
val->sampled_image = ralloc(b, struct vtn_sampled_image);
val->sampled_image->image =
- vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
+ vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
val->sampled_image->sampler =
- vtn_value(b, w[4], vtn_value_type_access_chain)->access_chain;
+ vtn_value(b, w[4], vtn_value_type_pointer)->pointer;
return;
} else if (opcode == SpvOpImage) {
- struct vtn_value *val =
- vtn_push_value(b, w[2], vtn_value_type_access_chain);
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_pointer);
struct vtn_value *src_val = vtn_untyped_value(b, w[3]);
if (src_val->value_type == vtn_value_type_sampled_image) {
- val->access_chain = src_val->sampled_image->image;
+ val->pointer = src_val->sampled_image->image;
} else {
- assert(src_val->value_type == vtn_value_type_access_chain);
- val->access_chain = src_val->access_chain;
+ assert(src_val->value_type == vtn_value_type_pointer);
+ val->pointer = src_val->pointer;
}
return;
}
if (sampled_val->value_type == vtn_value_type_sampled_image) {
sampled = *sampled_val->sampled_image;
} else {
- assert(sampled_val->value_type == vtn_value_type_access_chain);
+ assert(sampled_val->value_type == vtn_value_type_pointer);
sampled.image = NULL;
- sampled.sampler = sampled_val->access_chain;
+ sampled.sampler = sampled_val->pointer;
}
const struct glsl_type *image_type;
coord_components++;
coord = vtn_ssa_value(b, w[idx++])->def;
- p->src = nir_src_for_ssa(coord);
+ p->src = nir_src_for_ssa(nir_channels(&b->nb, coord,
+ (1 << coord_components) - 1));
p->src_type = nir_tex_src_coord;
p++;
break;
unreachable("Invalid base type for sampler result");
}
- nir_deref_var *sampler = vtn_access_chain_to_deref(b, sampled.sampler);
+ nir_deref_var *sampler = vtn_pointer_to_deref(b, sampled.sampler);
nir_deref_var *texture;
if (sampled.image) {
- nir_deref_var *image = vtn_access_chain_to_deref(b, sampled.image);
+ nir_deref_var *image = vtn_pointer_to_deref(b, sampled.image);
texture = image;
} else {
texture = sampler;
vtn_push_value(b, w[2], vtn_value_type_image_pointer);
val->image = ralloc(b, struct vtn_image_pointer);
- val->image->image =
- vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
+ val->image->image = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
val->image->coord = get_image_coord(b, w[4]);
val->image->sample = vtn_ssa_value(b, w[5])->def;
return;
break;
case SpvOpImageQuerySize:
- image.image =
- vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
+ image.image = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
image.coord = NULL;
image.sample = NULL;
break;
case SpvOpImageRead:
- image.image =
- vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
+ image.image = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
image.coord = get_image_coord(b, w[4]);
if (count > 5 && (w[5] & SpvImageOperandsSampleMask)) {
break;
case SpvOpImageWrite:
- image.image =
- vtn_value(b, w[1], vtn_value_type_access_chain)->access_chain;
+ image.image = vtn_value(b, w[1], vtn_value_type_pointer)->pointer;
image.coord = get_image_coord(b, w[2]);
/* texel = w[3] */
nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
- nir_deref_var *image_deref = vtn_access_chain_to_deref(b, image.image);
+ nir_deref_var *image_deref = vtn_pointer_to_deref(b, image.image);
intrin->variables[0] = nir_deref_var_clone(image_deref, intrin);
/* ImageQuerySize doesn't take any extra parameters */
intrin->src[2] = nir_src_for_ssa(vtn_ssa_value(b, w[3])->def);
break;
+ case SpvOpAtomicCompareExchange:
case SpvOpAtomicIIncrement:
case SpvOpAtomicIDecrement:
case SpvOpAtomicExchange:
if (opcode != SpvOpImageWrite) {
struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
- nir_ssa_dest_init(&intrin->instr, &intrin->dest, 4, 32, NULL);
+
+ unsigned dest_components =
+ nir_intrinsic_infos[intrin->intrinsic].dest_components;
+ if (intrin->intrinsic == nir_intrinsic_image_size) {
+ dest_components = intrin->num_components =
+ glsl_get_vector_elements(type->type);
+ }
+
+ nir_ssa_dest_init(&intrin->instr, &intrin->dest,
+ dest_components, 32, NULL);
nir_builder_instr_insert(&b->nb, &intrin->instr);
- /* The image intrinsics always return 4 channels but we may not want
- * that many. Emit a mov to trim it down.
- */
- unsigned swiz[4] = {0, 1, 2, 3};
val->ssa = vtn_create_ssa_value(b, type->type);
- val->ssa->def = nir_swizzle(&b->nb, &intrin->dest.ssa, swiz,
- glsl_get_vector_elements(type->type), false);
+ val->ssa->def = &intrin->dest.ssa;
} else {
nir_builder_instr_insert(&b->nb, &intrin->instr);
}
vtn_handle_ssbo_or_shared_atomic(struct vtn_builder *b, SpvOp opcode,
const uint32_t *w, unsigned count)
{
- struct vtn_access_chain *chain;
+ struct vtn_pointer *ptr;
nir_intrinsic_instr *atomic;
switch (opcode) {
case SpvOpAtomicAnd:
case SpvOpAtomicOr:
case SpvOpAtomicXor:
- chain =
- vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
+ ptr = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
break;
case SpvOpAtomicStore:
- chain =
- vtn_value(b, w[1], vtn_value_type_access_chain)->access_chain;
+ ptr = vtn_value(b, w[1], vtn_value_type_pointer)->pointer;
break;
default:
SpvMemorySemanticsMask semantics = w[5];
*/
- if (chain->var->mode == vtn_variable_mode_workgroup) {
- struct vtn_type *type = chain->var->type;
- nir_deref_var *deref = vtn_access_chain_to_deref(b, chain);
+ if (ptr->mode == vtn_variable_mode_workgroup) {
+ nir_deref_var *deref = vtn_pointer_to_deref(b, ptr);
+ const struct glsl_type *deref_type = nir_deref_tail(&deref->deref)->type;
nir_intrinsic_op op = get_shared_nir_atomic_op(opcode);
atomic = nir_intrinsic_instr_create(b->nb.shader, op);
atomic->variables[0] = nir_deref_var_clone(deref, atomic);
switch (opcode) {
case SpvOpAtomicLoad:
- atomic->num_components = glsl_get_vector_elements(type->type);
+ atomic->num_components = glsl_get_vector_elements(deref_type);
break;
case SpvOpAtomicStore:
- atomic->num_components = glsl_get_vector_elements(type->type);
+ atomic->num_components = glsl_get_vector_elements(deref_type);
nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1);
atomic->src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
break;
}
} else {
- assert(chain->var->mode == vtn_variable_mode_ssbo);
- struct vtn_type *type;
+ assert(ptr->mode == vtn_variable_mode_ssbo);
nir_ssa_def *offset, *index;
- offset = vtn_access_chain_to_offset(b, chain, &index, &type, NULL, false);
+ offset = vtn_pointer_to_offset(b, ptr, &index, NULL);
nir_intrinsic_op op = get_ssbo_nir_atomic_op(opcode);
switch (opcode) {
case SpvOpAtomicLoad:
- atomic->num_components = glsl_get_vector_elements(type->type);
+ atomic->num_components = glsl_get_vector_elements(ptr->type->type);
atomic->src[0] = nir_src_for_ssa(index);
atomic->src[1] = nir_src_for_ssa(offset);
break;
case SpvOpAtomicStore:
- atomic->num_components = glsl_get_vector_elements(type->type);
+ atomic->num_components = glsl_get_vector_elements(ptr->type->type);
nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1);
atomic->src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
atomic->src[1] = nir_src_for_ssa(index);
nir_alu_instr *vec = create_vec(b->shader, num_components,
srcs[0]->bit_size);
+ /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
+ *
+ * "When constructing a vector, there must be at least two Constituent
+ * operands."
+ */
+ assert(num_srcs >= 2);
+
unsigned dest_idx = 0;
for (unsigned i = 0; i < num_srcs; i++) {
nir_ssa_def *src = srcs[i];
+ assert(dest_idx + src->num_components <= num_components);
for (unsigned j = 0; j < src->num_components; j++) {
vec->src[dest_idx].src = nir_src_for_ssa(src);
vec->src[dest_idx].swizzle[0] = j;
}
}
+ /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
+ *
+ * "When constructing a vector, the total number of components in all
+ * the operands must equal the number of components in Result Type."
+ */
+ assert(dest_idx == num_components);
+
nir_builder_instr_insert(&b->nb, &vec->instr);
return &vec->dest.dest.ssa;
break;
case SpvCapabilityGeometryStreams:
- case SpvCapabilityTessellation:
- case SpvCapabilityTessellationPointSize:
case SpvCapabilityLinkage:
case SpvCapabilityVector16:
case SpvCapabilityFloat16Buffer:
case SpvCapabilityFloat16:
- case SpvCapabilityFloat64:
- case SpvCapabilityInt64:
case SpvCapabilityInt64Atomics:
case SpvCapabilityAtomicStorage:
case SpvCapabilityInt16:
case SpvCapabilitySparseResidency:
case SpvCapabilityMinLod:
case SpvCapabilityTransformFeedback:
- case SpvCapabilityStorageImageReadWithoutFormat:
- case SpvCapabilityStorageImageWriteWithoutFormat:
vtn_warn("Unsupported SPIR-V capability: %s",
spirv_capability_to_string(cap));
break;
+ case SpvCapabilityFloat64:
+ spv_check_supported(float64, cap);
+ break;
+ case SpvCapabilityInt64:
+ spv_check_supported(int64, cap);
+ break;
+
case SpvCapabilityAddresses:
case SpvCapabilityKernel:
case SpvCapabilityImageBasic:
case SpvCapabilityImageMSArray:
spv_check_supported(image_ms_array, cap);
break;
+
+ case SpvCapabilityTessellation:
+ case SpvCapabilityTessellationPointSize:
+ spv_check_supported(tessellation, cap);
+ break;
+
+ case SpvCapabilityDrawParameters:
+ spv_check_supported(draw_parameters, cap);
+ break;
+
+ case SpvCapabilityStorageImageReadWithoutFormat:
+ spv_check_supported(image_read_without_format, cap);
+ break;
+
+ case SpvCapabilityStorageImageWriteWithoutFormat:
+ spv_check_supported(image_write_without_format, cap);
+ break;
+
+ case SpvCapabilityMultiView:
+ spv_check_supported(multiview, cap);
+ break;
+
+ default:
+ unreachable("Unhandled capability");
}
break;
}
case SpvExecutionModeEarlyFragmentTests:
assert(b->shader->stage == MESA_SHADER_FRAGMENT);
- b->shader->info->fs.early_fragment_tests = true;
+ b->shader->info.fs.early_fragment_tests = true;
break;
case SpvExecutionModeInvocations:
assert(b->shader->stage == MESA_SHADER_GEOMETRY);
- b->shader->info->gs.invocations = MAX2(1, mode->literals[0]);
+ b->shader->info.gs.invocations = MAX2(1, mode->literals[0]);
break;
case SpvExecutionModeDepthReplacing:
assert(b->shader->stage == MESA_SHADER_FRAGMENT);
- b->shader->info->fs.depth_layout = FRAG_DEPTH_LAYOUT_ANY;
+ b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_ANY;
break;
case SpvExecutionModeDepthGreater:
assert(b->shader->stage == MESA_SHADER_FRAGMENT);
- b->shader->info->fs.depth_layout = FRAG_DEPTH_LAYOUT_GREATER;
+ b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_GREATER;
break;
case SpvExecutionModeDepthLess:
assert(b->shader->stage == MESA_SHADER_FRAGMENT);
- b->shader->info->fs.depth_layout = FRAG_DEPTH_LAYOUT_LESS;
+ b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_LESS;
break;
case SpvExecutionModeDepthUnchanged:
assert(b->shader->stage == MESA_SHADER_FRAGMENT);
- b->shader->info->fs.depth_layout = FRAG_DEPTH_LAYOUT_UNCHANGED;
+ b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_UNCHANGED;
break;
case SpvExecutionModeLocalSize:
assert(b->shader->stage == MESA_SHADER_COMPUTE);
- b->shader->info->cs.local_size[0] = mode->literals[0];
- b->shader->info->cs.local_size[1] = mode->literals[1];
- b->shader->info->cs.local_size[2] = mode->literals[2];
+ b->shader->info.cs.local_size[0] = mode->literals[0];
+ b->shader->info.cs.local_size[1] = mode->literals[1];
+ b->shader->info.cs.local_size[2] = mode->literals[2];
break;
case SpvExecutionModeLocalSizeHint:
break; /* Nothing to do with this */
case SpvExecutionModeOutputVertices:
- assert(b->shader->stage == MESA_SHADER_GEOMETRY);
- b->shader->info->gs.vertices_out = mode->literals[0];
+ if (b->shader->stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->stage == MESA_SHADER_TESS_EVAL) {
+ b->shader->info.tess.tcs_vertices_out = mode->literals[0];
+ } else {
+ assert(b->shader->stage == MESA_SHADER_GEOMETRY);
+ b->shader->info.gs.vertices_out = mode->literals[0];
+ }
break;
case SpvExecutionModeInputPoints:
case SpvExecutionModeInputTrianglesAdjacency:
case SpvExecutionModeQuads:
case SpvExecutionModeIsolines:
- if (b->shader->stage == MESA_SHADER_GEOMETRY) {
- b->shader->info->gs.vertices_in =
- vertices_in_from_spv_execution_mode(mode->exec_mode);
+ if (b->shader->stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->stage == MESA_SHADER_TESS_EVAL) {
+ b->shader->info.tess.primitive_mode =
+ gl_primitive_from_spv_execution_mode(mode->exec_mode);
} else {
- assert(!"Tesselation shaders not yet supported");
+ assert(b->shader->stage == MESA_SHADER_GEOMETRY);
+ b->shader->info.gs.vertices_in =
+ vertices_in_from_spv_execution_mode(mode->exec_mode);
}
break;
case SpvExecutionModeOutputLineStrip:
case SpvExecutionModeOutputTriangleStrip:
assert(b->shader->stage == MESA_SHADER_GEOMETRY);
- b->shader->info->gs.output_primitive =
+ b->shader->info.gs.output_primitive =
gl_primitive_from_spv_execution_mode(mode->exec_mode);
break;
case SpvExecutionModeSpacingEqual:
+ assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->stage == MESA_SHADER_TESS_EVAL);
+ b->shader->info.tess.spacing = TESS_SPACING_EQUAL;
+ break;
case SpvExecutionModeSpacingFractionalEven:
+ assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->stage == MESA_SHADER_TESS_EVAL);
+ b->shader->info.tess.spacing = TESS_SPACING_FRACTIONAL_EVEN;
+ break;
case SpvExecutionModeSpacingFractionalOdd:
+ assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->stage == MESA_SHADER_TESS_EVAL);
+ b->shader->info.tess.spacing = TESS_SPACING_FRACTIONAL_ODD;
+ break;
case SpvExecutionModeVertexOrderCw:
+ assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->stage == MESA_SHADER_TESS_EVAL);
+ /* Vulkan's notion of CCW seems to match the hardware backends,
+ * but be the opposite of OpenGL. Currently NIR follows GL semantics,
+ * so we set it backwards here.
+ */
+ b->shader->info.tess.ccw = true;
+ break;
case SpvExecutionModeVertexOrderCcw:
+ assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->stage == MESA_SHADER_TESS_EVAL);
+ /* Backwards; see above */
+ b->shader->info.tess.ccw = false;
+ break;
case SpvExecutionModePointMode:
- assert(!"TODO: Add tessellation metadata");
+ assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->stage == MESA_SHADER_TESS_EVAL);
+ b->shader->info.tess.point_mode = true;
break;
case SpvExecutionModePixelCenterInteger:
case SpvExecutionModeVecTypeHint:
case SpvExecutionModeContractionOff:
break; /* OpenCL */
+
+ default:
+ unreachable("Unhandled execution mode");
}
}
vtn_handle_constant(b, opcode, w, count);
break;
+ case SpvOpUndef:
case SpvOpVariable:
vtn_handle_variables(b, opcode, w, count);
break;
break;
case SpvOpImageQuerySize: {
- struct vtn_access_chain *image =
- vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
- if (glsl_type_is_image(image->var->var->interface_type)) {
+ struct vtn_pointer *image =
+ vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
+ if (image->mode == vtn_variable_mode_image) {
vtn_handle_image(b, opcode, w, count);
} else {
+ assert(image->mode == vtn_variable_mode_sampler);
vtn_handle_texture(b, opcode, w, count);
}
break;
if (pointer->value_type == vtn_value_type_image_pointer) {
vtn_handle_image(b, opcode, w, count);
} else {
- assert(pointer->value_type == vtn_value_type_access_chain);
+ assert(pointer->value_type == vtn_value_type_pointer);
vtn_handle_ssbo_or_shared_atomic(b, opcode, w, count);
}
break;
if (pointer->value_type == vtn_value_type_image_pointer) {
vtn_handle_image(b, opcode, w, count);
} else {
- assert(pointer->value_type == vtn_value_type_access_chain);
+ assert(pointer->value_type == vtn_value_type_pointer);
vtn_handle_ssbo_or_shared_atomic(b, opcode, w, count);
}
break;
b->shader = nir_shader_create(NULL, stage, options, NULL);
/* Set shader info defaults */
- b->shader->info->gs.invocations = 1;
+ b->shader->info.gs.invocations = 1;
/* Parse execution modes */
vtn_foreach_execution_mode(b, b->entry_point,