#include "nir/nir_vla.h"
#include "nir/nir_control_flow.h"
#include "nir/nir_constant_expressions.h"
+#include "spirv_info.h"
+
+struct spec_constant_value {
+ bool is_double;
+ union {
+ uint32_t data32;
+ uint64_t data64;
+ };
+};
+
+void
+_vtn_warn(const char *file, int line, const char *msg, ...)
+{
+ char *formatted;
+ va_list args;
+
+ va_start(args, msg);
+ formatted = ralloc_vasprintf(NULL, msg, args);
+ va_end(args);
+
+ fprintf(stderr, "%s:%d WARNING: %s\n", file, line, formatted);
+
+ ralloc_free(formatted);
+}
static struct vtn_ssa_value *
vtn_undef_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
if (glsl_type_is_vector_or_scalar(type)) {
unsigned num_components = glsl_get_vector_elements(val->type);
- unsigned bit_size = glsl_get_bit_size(glsl_get_base_type(val->type));
+ unsigned bit_size = glsl_get_bit_size(val->type);
val->def = nir_ssa_undef(&b->nb, num_components, bit_size);
} else {
unsigned elems = glsl_get_length(val->type);
switch (glsl_get_base_type(type)) {
case GLSL_TYPE_INT:
case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT64:
+ case GLSL_TYPE_UINT64:
case GLSL_TYPE_BOOL:
case GLSL_TYPE_FLOAT:
- case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_DOUBLE: {
+ int bit_size = glsl_get_bit_size(type);
if (glsl_type_is_vector_or_scalar(type)) {
unsigned num_components = glsl_get_vector_elements(val->type);
nir_load_const_instr *load =
- nir_load_const_instr_create(b->shader, num_components, 32);
+ nir_load_const_instr_create(b->shader, num_components, bit_size);
- for (unsigned i = 0; i < num_components; i++)
- load->value.u32[i] = constant->value.u[i];
+ load->value = constant->values[0];
nir_instr_insert_before_cf_list(&b->impl->body, &load->instr);
val->def = &load->def;
struct vtn_ssa_value *col_val = rzalloc(b, struct vtn_ssa_value);
col_val->type = glsl_get_column_type(val->type);
nir_load_const_instr *load =
- nir_load_const_instr_create(b->shader, rows, 32);
+ nir_load_const_instr_create(b->shader, rows, bit_size);
- for (unsigned j = 0; j < rows; j++)
- load->value.u32[j] = constant->value.u[rows * i + j];
+ load->value = constant->values[i];
nir_instr_insert_before_cf_list(&b->impl->body, &load->instr);
col_val->def = &load->def;
}
}
break;
+ }
case GLSL_TYPE_ARRAY: {
unsigned elems = glsl_get_length(val->type);
case vtn_value_type_ssa:
return val->ssa;
- case vtn_value_type_access_chain:
- /* This is needed for function parameters */
- return vtn_variable_load(b, val->access_chain);
-
default:
unreachable("Invalid type for an SSA value");
}
vtn_type_copy(struct vtn_builder *b, struct vtn_type *src)
{
struct vtn_type *dest = ralloc(b, struct vtn_type);
- dest->type = src->type;
- dest->is_builtin = src->is_builtin;
- if (src->is_builtin)
- dest->builtin = src->builtin;
-
- if (!glsl_type_is_scalar(src->type)) {
- switch (glsl_get_base_type(src->type)) {
- case GLSL_TYPE_INT:
- case GLSL_TYPE_UINT:
- case GLSL_TYPE_BOOL:
- case GLSL_TYPE_FLOAT:
- case GLSL_TYPE_DOUBLE:
- case GLSL_TYPE_ARRAY:
- dest->row_major = src->row_major;
- dest->stride = src->stride;
- dest->array_element = src->array_element;
- break;
+ *dest = *src;
- case GLSL_TYPE_STRUCT: {
- unsigned elems = glsl_get_length(src->type);
+ switch (src->base_type) {
+ case vtn_base_type_void:
+ case vtn_base_type_scalar:
+ case vtn_base_type_vector:
+ case vtn_base_type_matrix:
+ case vtn_base_type_array:
+ case vtn_base_type_pointer:
+ case vtn_base_type_image:
+ case vtn_base_type_sampler:
+ /* Nothing more to do */
+ break;
- dest->members = ralloc_array(b, struct vtn_type *, elems);
- memcpy(dest->members, src->members, elems * sizeof(struct vtn_type *));
+ case vtn_base_type_struct:
+ dest->members = ralloc_array(b, struct vtn_type *, src->length);
+ memcpy(dest->members, src->members,
+ src->length * sizeof(src->members[0]));
- dest->offsets = ralloc_array(b, unsigned, elems);
- memcpy(dest->offsets, src->offsets, elems * sizeof(unsigned));
- break;
- }
+ dest->offsets = ralloc_array(b, unsigned, src->length);
+ memcpy(dest->offsets, src->offsets,
+ src->length * sizeof(src->offsets[0]));
+ break;
- default:
- unreachable("unhandled type");
- }
+ case vtn_base_type_function:
+ dest->params = ralloc_array(b, struct vtn_type *, src->length);
+ memcpy(dest->params, src->params, src->length * sizeof(src->params[0]));
+ break;
}
return dest;
assert(member < ctx->num_fields);
switch (dec->decoration) {
+ case SpvDecorationNonWritable:
+ case SpvDecorationNonReadable:
case SpvDecorationRelaxedPrecision:
+ case SpvDecorationVolatile:
+ case SpvDecorationCoherent:
+ case SpvDecorationUniform:
break; /* FIXME: Do nothing with this for now. */
case SpvDecorationNoPerspective:
- ctx->fields[member].interpolation = INTERP_QUALIFIER_NOPERSPECTIVE;
+ ctx->fields[member].interpolation = INTERP_MODE_NOPERSPECTIVE;
break;
case SpvDecorationFlat:
- ctx->fields[member].interpolation = INTERP_QUALIFIER_FLAT;
+ ctx->fields[member].interpolation = INTERP_MODE_FLAT;
break;
case SpvDecorationCentroid:
ctx->fields[member].centroid = true;
case SpvDecorationSample:
ctx->fields[member].sample = true;
break;
+ case SpvDecorationStream:
+ /* Vulkan only allows one GS stream */
+ assert(dec->literals[0] == 0);
+ break;
case SpvDecorationLocation:
ctx->fields[member].location = dec->literals[0];
break;
+ case SpvDecorationComponent:
+ break; /* FIXME: What should we do with these? */
case SpvDecorationBuiltIn:
ctx->type->members[member] = vtn_type_copy(b, ctx->type->members[member]);
ctx->type->members[member]->is_builtin = true;
ctx->type->offsets[member] = dec->literals[0];
break;
case SpvDecorationMatrixStride:
- mutable_matrix_member(b, ctx->type, member)->stride = dec->literals[0];
+ /* Handled as a second pass */
break;
case SpvDecorationColMajor:
break; /* Nothing to do here. Column-major is the default. */
case SpvDecorationRowMajor:
mutable_matrix_member(b, ctx->type, member)->row_major = true;
break;
+
+ case SpvDecorationPatch:
+ break;
+
+ case SpvDecorationSpecId:
+ case SpvDecorationBlock:
+ case SpvDecorationBufferBlock:
+ case SpvDecorationArrayStride:
+ case SpvDecorationGLSLShared:
+ case SpvDecorationGLSLPacked:
+ case SpvDecorationInvariant:
+ case SpvDecorationRestrict:
+ case SpvDecorationAliased:
+ case SpvDecorationConstant:
+ case SpvDecorationIndex:
+ case SpvDecorationBinding:
+ case SpvDecorationDescriptorSet:
+ case SpvDecorationLinkageAttributes:
+ case SpvDecorationNoContraction:
+ case SpvDecorationInputAttachmentIndex:
+ vtn_warn("Decoration not allowed on struct members: %s",
+ spirv_decoration_to_string(dec->decoration));
+ break;
+
+ case SpvDecorationXfbBuffer:
+ case SpvDecorationXfbStride:
+ vtn_warn("Vulkan does not have transform feedback");
+ break;
+
+ case SpvDecorationCPacked:
+ case SpvDecorationSaturatedConversion:
+ case SpvDecorationFuncParamAttr:
+ case SpvDecorationFPRoundingMode:
+ case SpvDecorationFPFastMathMode:
+ case SpvDecorationAlignment:
+ vtn_warn("Decoration only allowed for CL-style kernels: %s",
+ spirv_decoration_to_string(dec->decoration));
+ break;
+
default:
- unreachable("Unhandled member decoration");
+ unreachable("Unhandled decoration");
+ }
+}
+
+/* Matrix strides are handled as a separate pass because we need to know
+ * whether the matrix is row-major or not first.
+ */
+static void
+struct_member_matrix_stride_cb(struct vtn_builder *b,
+ struct vtn_value *val, int member,
+ const struct vtn_decoration *dec,
+ void *void_ctx)
+{
+ if (dec->decoration != SpvDecorationMatrixStride)
+ return;
+ assert(member >= 0);
+
+ struct member_decoration_ctx *ctx = void_ctx;
+
+ struct vtn_type *mat_type = mutable_matrix_member(b, ctx->type, member);
+ if (mat_type->row_major) {
+ mat_type->array_element = vtn_type_copy(b, mat_type->array_element);
+ mat_type->stride = mat_type->array_element->stride;
+ mat_type->array_element->stride = dec->literals[0];
+ } else {
+ assert(mat_type->array_element->stride > 0);
+ mat_type->stride = dec->literals[0];
}
}
/* Ignore these, since we get explicit offsets anyways */
break;
+ case SpvDecorationRowMajor:
+ case SpvDecorationColMajor:
+ case SpvDecorationMatrixStride:
+ case SpvDecorationBuiltIn:
+ case SpvDecorationNoPerspective:
+ case SpvDecorationFlat:
+ case SpvDecorationPatch:
+ case SpvDecorationCentroid:
+ case SpvDecorationSample:
+ case SpvDecorationVolatile:
+ case SpvDecorationCoherent:
+ case SpvDecorationNonWritable:
+ case SpvDecorationNonReadable:
+ case SpvDecorationUniform:
case SpvDecorationStream:
- assert(dec->literals[0] == 0);
+ case SpvDecorationLocation:
+ case SpvDecorationComponent:
+ case SpvDecorationOffset:
+ case SpvDecorationXfbBuffer:
+ case SpvDecorationXfbStride:
+ vtn_warn("Decoration only allowed for struct members: %s",
+ spirv_decoration_to_string(dec->decoration));
+ break;
+
+ case SpvDecorationRelaxedPrecision:
+ case SpvDecorationSpecId:
+ case SpvDecorationInvariant:
+ case SpvDecorationRestrict:
+ case SpvDecorationAliased:
+ case SpvDecorationConstant:
+ case SpvDecorationIndex:
+ case SpvDecorationBinding:
+ case SpvDecorationDescriptorSet:
+ case SpvDecorationLinkageAttributes:
+ case SpvDecorationNoContraction:
+ case SpvDecorationInputAttachmentIndex:
+ vtn_warn("Decoration not allowed on types: %s",
+ spirv_decoration_to_string(dec->decoration));
+ break;
+
+ case SpvDecorationCPacked:
+ case SpvDecorationSaturatedConversion:
+ case SpvDecorationFuncParamAttr:
+ case SpvDecorationFPRoundingMode:
+ case SpvDecorationFPFastMathMode:
+ case SpvDecorationAlignment:
+ vtn_warn("Decoration only allowed for CL-style kernels: %s",
+ spirv_decoration_to_string(dec->decoration));
break;
default:
- unreachable("Unhandled type decoration");
+ unreachable("Unhandled decoration");
}
}
struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_type);
val->type = rzalloc(b, struct vtn_type);
- val->type->is_builtin = false;
val->type->val = val;
switch (opcode) {
case SpvOpTypeVoid:
+ val->type->base_type = vtn_base_type_void;
val->type->type = glsl_void_type();
break;
case SpvOpTypeBool:
+ val->type->base_type = vtn_base_type_scalar;
val->type->type = glsl_bool_type();
break;
case SpvOpTypeInt: {
+ int bit_size = w[2];
const bool signedness = w[3];
- val->type->type = (signedness ? glsl_int_type() : glsl_uint_type());
+ val->type->base_type = vtn_base_type_scalar;
+ if (bit_size == 64)
+ val->type->type = (signedness ? glsl_int64_t_type() : glsl_uint64_t_type());
+ else
+ val->type->type = (signedness ? glsl_int_type() : glsl_uint_type());
break;
}
- case SpvOpTypeFloat:
- val->type->type = glsl_float_type();
+ case SpvOpTypeFloat: {
+ int bit_size = w[2];
+ val->type->base_type = vtn_base_type_scalar;
+ val->type->type = bit_size == 64 ? glsl_double_type() : glsl_float_type();
break;
+ }
case SpvOpTypeVector: {
struct vtn_type *base = vtn_value(b, w[2], vtn_value_type_type)->type;
unsigned elems = w[3];
assert(glsl_type_is_scalar(base->type));
+ val->type->base_type = vtn_base_type_vector;
val->type->type = glsl_vector_type(glsl_get_base_type(base->type), elems);
-
- /* Vectors implicitly have sizeof(base_type) stride. For now, this
- * is always 4 bytes. This will have to change if we want to start
- * supporting doubles or half-floats.
- */
- val->type->stride = 4;
+ val->type->stride = glsl_get_bit_size(base->type) / 8;
val->type->array_element = base;
break;
}
unsigned columns = w[3];
assert(glsl_type_is_vector(base->type));
+ val->type->base_type = vtn_base_type_matrix;
val->type->type = glsl_matrix_type(glsl_get_base_type(base->type),
glsl_get_vector_elements(base->type),
columns);
assert(!glsl_type_is_error(val->type->type));
+ val->type->length = columns;
val->type->array_element = base;
val->type->row_major = false;
val->type->stride = 0;
struct vtn_type *array_element =
vtn_value(b, w[2], vtn_value_type_type)->type;
- unsigned length;
if (opcode == SpvOpTypeRuntimeArray) {
/* A length of 0 is used to denote unsized arrays */
- length = 0;
+ val->type->length = 0;
} else {
- length =
- vtn_value(b, w[3], vtn_value_type_constant)->constant->value.u[0];
+ val->type->length =
+ vtn_value(b, w[3], vtn_value_type_constant)->constant->values[0].u32[0];
}
- val->type->type = glsl_array_type(array_element->type, length);
+ val->type->base_type = vtn_base_type_array;
+ val->type->type = glsl_array_type(array_element->type, val->type->length);
val->type->array_element = array_element;
val->type->stride = 0;
break;
case SpvOpTypeStruct: {
unsigned num_fields = count - 2;
+ val->type->base_type = vtn_base_type_struct;
+ val->type->length = num_fields;
val->type->members = ralloc_array(b, struct vtn_type *, num_fields);
val->type->offsets = ralloc_array(b, unsigned, num_fields);
};
vtn_foreach_decoration(b, val, struct_member_decoration_cb, &ctx);
+ vtn_foreach_decoration(b, val, struct_member_matrix_stride_cb, &ctx);
const char *name = val->name ? val->name : "struct";
}
case SpvOpTypeFunction: {
- const struct glsl_type *return_type =
- vtn_value(b, w[2], vtn_value_type_type)->type->type;
- NIR_VLA(struct glsl_function_param, params, count - 3);
- for (unsigned i = 0; i < count - 3; i++) {
- params[i].type = vtn_value(b, w[i + 3], vtn_value_type_type)->type->type;
+ val->type->base_type = vtn_base_type_function;
+ val->type->type = NULL;
- /* FIXME: */
- params[i].in = true;
- params[i].out = true;
+ val->type->return_type = vtn_value(b, w[2], vtn_value_type_type)->type;
+
+ const unsigned num_params = count - 3;
+ val->type->length = num_params;
+ val->type->params = ralloc_array(b, struct vtn_type *, num_params);
+ for (unsigned i = 0; i < count - 3; i++) {
+ val->type->params[i] =
+ vtn_value(b, w[i + 3], vtn_value_type_type)->type;
}
- val->type->type = glsl_function_type(return_type, params, count - 3);
break;
}
- case SpvOpTypePointer:
- /* FIXME: For now, we'll just do the really lame thing and return
- * the same type. The validator should ensure that the proper number
- * of dereferences happen
- */
- val->type = vtn_value(b, w[3], vtn_value_type_type)->type;
+ case SpvOpTypePointer: {
+ SpvStorageClass storage_class = w[2];
+ struct vtn_type *deref_type =
+ vtn_value(b, w[3], vtn_value_type_type)->type;
+
+ val->type->base_type = vtn_base_type_pointer;
+ val->type->type = NULL;
+ val->type->storage_class = storage_class;
+ val->type->deref = deref_type;
break;
+ }
case SpvOpTypeImage: {
+ val->type->base_type = vtn_base_type_image;
+
const struct glsl_type *sampled_type =
vtn_value(b, w[2], vtn_value_type_type)->type->type;
case SpvDimCube: dim = GLSL_SAMPLER_DIM_CUBE; break;
case SpvDimRect: dim = GLSL_SAMPLER_DIM_RECT; break;
case SpvDimBuffer: dim = GLSL_SAMPLER_DIM_BUF; break;
+ case SpvDimSubpassData: dim = GLSL_SAMPLER_DIM_SUBPASS; break;
default:
unreachable("Invalid SPIR-V Sampler dimension");
}
val->type->access_qualifier = SpvAccessQualifierReadWrite;
if (multisampled) {
- assert(dim == GLSL_SAMPLER_DIM_2D);
- dim = GLSL_SAMPLER_DIM_MS;
+ if (dim == GLSL_SAMPLER_DIM_2D)
+ dim = GLSL_SAMPLER_DIM_MS;
+ else if (dim == GLSL_SAMPLER_DIM_SUBPASS)
+ dim = GLSL_SAMPLER_DIM_SUBPASS_MS;
+ else
+ assert(!"Unsupported multisampled image type");
}
val->type->image_format = translate_image_format(format);
if (sampled == 1) {
+ val->type->sampled = true;
val->type->type = glsl_sampler_type(dim, is_shadow, is_array,
glsl_get_base_type(sampled_type));
} else if (sampled == 2) {
- assert(format);
assert(!is_shadow);
+ val->type->sampled = false;
val->type->type = glsl_image_type(dim, is_array,
glsl_get_base_type(sampled_type));
} else {
* matters is that it's a sampler type as opposed to an integer type
* so the backend knows what to do.
*/
+ val->type->base_type = vtn_base_type_sampler;
val->type->type = glsl_bare_sampler_type();
break;
{
nir_constant *c = rzalloc(b, nir_constant);
+ /* For pointers and other typeless things, we have to return something but
+ * it doesn't matter what.
+ */
+ if (!type)
+ return c;
+
switch (glsl_get_base_type(type)) {
case GLSL_TYPE_INT:
case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT64:
+ case GLSL_TYPE_UINT64:
case GLSL_TYPE_BOOL:
case GLSL_TYPE_FLOAT:
case GLSL_TYPE_DOUBLE:
}
static void
-spec_constant_deocoration_cb(struct vtn_builder *b, struct vtn_value *v,
+spec_constant_decoration_cb(struct vtn_builder *b, struct vtn_value *v,
int member, const struct vtn_decoration *dec,
void *data)
{
if (dec->decoration != SpvDecorationSpecId)
return;
- uint32_t *const_value = data;
+ struct spec_constant_value *const_value = data;
for (unsigned i = 0; i < b->num_specializations; i++) {
if (b->specializations[i].id == dec->literals[0]) {
- *const_value = b->specializations[i].data;
+ if (const_value->is_double)
+ const_value->data64 = b->specializations[i].data64;
+ else
+ const_value->data32 = b->specializations[i].data32;
return;
}
}
get_specialization(struct vtn_builder *b, struct vtn_value *val,
uint32_t const_value)
{
- vtn_foreach_decoration(b, val, spec_constant_deocoration_cb, &const_value);
- return const_value;
+ struct spec_constant_value data;
+ data.is_double = false;
+ data.data32 = const_value;
+ vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &data);
+ return data.data32;
+}
+
+static uint64_t
+get_specialization64(struct vtn_builder *b, struct vtn_value *val,
+ uint64_t const_value)
+{
+ struct spec_constant_value data;
+ data.is_double = true;
+ data.data64 = const_value;
+ vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &data);
+ return data.data64;
+}
+
+static void
+handle_workgroup_size_decoration_cb(struct vtn_builder *b,
+ struct vtn_value *val,
+ int member,
+ const struct vtn_decoration *dec,
+ void *data)
+{
+ assert(member == -1);
+ if (dec->decoration != SpvDecorationBuiltIn ||
+ dec->literals[0] != SpvBuiltInWorkgroupSize)
+ return;
+
+ assert(val->const_type == glsl_vector_type(GLSL_TYPE_UINT, 3));
+
+ b->shader->info.cs.local_size[0] = val->constant->values[0].u32[0];
+ b->shader->info.cs.local_size[1] = val->constant->values[0].u32[1];
+ b->shader->info.cs.local_size[2] = val->constant->values[0].u32[2];
}
static void
switch (opcode) {
case SpvOpConstantTrue:
assert(val->const_type == glsl_bool_type());
- val->constant->value.u[0] = NIR_TRUE;
+ val->constant->values[0].u32[0] = NIR_TRUE;
break;
case SpvOpConstantFalse:
assert(val->const_type == glsl_bool_type());
- val->constant->value.u[0] = NIR_FALSE;
+ val->constant->values[0].u32[0] = NIR_FALSE;
break;
case SpvOpSpecConstantTrue:
assert(val->const_type == glsl_bool_type());
uint32_t int_val =
get_specialization(b, val, (opcode == SpvOpSpecConstantTrue));
- val->constant->value.u[0] = int_val ? NIR_TRUE : NIR_FALSE;
+ val->constant->values[0].u32[0] = int_val ? NIR_TRUE : NIR_FALSE;
break;
}
- case SpvOpConstant:
+ case SpvOpConstant: {
assert(glsl_type_is_scalar(val->const_type));
- val->constant->value.u[0] = w[3];
+ int bit_size = glsl_get_bit_size(val->const_type);
+ if (bit_size == 64) {
+ val->constant->values->u32[0] = w[3];
+ val->constant->values->u32[1] = w[4];
+ } else {
+ assert(bit_size == 32);
+ val->constant->values->u32[0] = w[3];
+ }
break;
- case SpvOpSpecConstant:
+ }
+ case SpvOpSpecConstant: {
assert(glsl_type_is_scalar(val->const_type));
- val->constant->value.u[0] = get_specialization(b, val, w[3]);
+ val->constant->values[0].u32[0] = get_specialization(b, val, w[3]);
+ int bit_size = glsl_get_bit_size(val->const_type);
+ if (bit_size == 64)
+ val->constant->values[0].u64[0] =
+ get_specialization64(b, val, vtn_u64_literal(&w[3]));
+ else
+ val->constant->values[0].u32[0] = get_specialization(b, val, w[3]);
break;
+ }
case SpvOpSpecConstantComposite:
case SpvOpConstantComposite: {
unsigned elem_count = count - 3;
switch (glsl_get_base_type(val->const_type)) {
case GLSL_TYPE_UINT:
case GLSL_TYPE_INT:
+ case GLSL_TYPE_UINT64:
+ case GLSL_TYPE_INT64:
case GLSL_TYPE_FLOAT:
case GLSL_TYPE_BOOL:
+ case GLSL_TYPE_DOUBLE: {
+ int bit_size = glsl_get_bit_size(val->const_type);
if (glsl_type_is_matrix(val->const_type)) {
- unsigned rows = glsl_get_vector_elements(val->const_type);
assert(glsl_get_matrix_columns(val->const_type) == elem_count);
for (unsigned i = 0; i < elem_count; i++)
- for (unsigned j = 0; j < rows; j++)
- val->constant->value.u[rows * i + j] = elems[i]->value.u[j];
+ val->constant->values[i] = elems[i]->values[0];
} else {
assert(glsl_type_is_vector(val->const_type));
assert(glsl_get_vector_elements(val->const_type) == elem_count);
- for (unsigned i = 0; i < elem_count; i++)
- val->constant->value.u[i] = elems[i]->value.u[0];
+ for (unsigned i = 0; i < elem_count; i++) {
+ if (bit_size == 64) {
+ val->constant->values[0].u64[i] = elems[i]->values[0].u64[0];
+ } else {
+ assert(bit_size == 32);
+ val->constant->values[0].u32[i] = elems[i]->values[0].u32[0];
+ }
+ }
}
ralloc_free(elems);
break;
-
+ }
case GLSL_TYPE_STRUCT:
case GLSL_TYPE_ARRAY:
ralloc_steal(val->constant, elems);
SpvOp opcode = get_specialization(b, val, w[3]);
switch (opcode) {
case SpvOpVectorShuffle: {
- struct vtn_value *v0 = vtn_value(b, w[4], vtn_value_type_constant);
- struct vtn_value *v1 = vtn_value(b, w[5], vtn_value_type_constant);
- unsigned len0 = glsl_get_vector_elements(v0->const_type);
- unsigned len1 = glsl_get_vector_elements(v1->const_type);
-
- uint32_t u[8];
- for (unsigned i = 0; i < len0; i++)
- u[i] = v0->constant->value.u[i];
- for (unsigned i = 0; i < len1; i++)
- u[len0 + i] = v1->constant->value.u[i];
-
- for (unsigned i = 0; i < count - 6; i++) {
- uint32_t comp = w[i + 6];
- if (comp == (uint32_t)-1) {
- val->constant->value.u[i] = 0xdeadbeef;
- } else {
- val->constant->value.u[i] = u[comp];
+ struct vtn_value *v0 = &b->values[w[4]];
+ struct vtn_value *v1 = &b->values[w[5]];
+
+ assert(v0->value_type == vtn_value_type_constant ||
+ v0->value_type == vtn_value_type_undef);
+ assert(v1->value_type == vtn_value_type_constant ||
+ v1->value_type == vtn_value_type_undef);
+
+ unsigned len0 = v0->value_type == vtn_value_type_constant ?
+ glsl_get_vector_elements(v0->const_type) :
+ glsl_get_vector_elements(v0->type->type);
+ unsigned len1 = v1->value_type == vtn_value_type_constant ?
+ glsl_get_vector_elements(v1->const_type) :
+ glsl_get_vector_elements(v1->type->type);
+
+ assert(len0 + len1 < 16);
+
+ unsigned bit_size = glsl_get_bit_size(val->const_type);
+ unsigned bit_size0 = v0->value_type == vtn_value_type_constant ?
+ glsl_get_bit_size(v0->const_type) :
+ glsl_get_bit_size(v0->type->type);
+ unsigned bit_size1 = v1->value_type == vtn_value_type_constant ?
+ glsl_get_bit_size(v1->const_type) :
+ glsl_get_bit_size(v1->type->type);
+
+ assert(bit_size == bit_size0 && bit_size == bit_size1);
+ (void)bit_size0; (void)bit_size1;
+
+ if (bit_size == 64) {
+ uint64_t u64[8];
+ if (v0->value_type == vtn_value_type_constant) {
+ for (unsigned i = 0; i < len0; i++)
+ u64[i] = v0->constant->values[0].u64[i];
+ }
+ if (v1->value_type == vtn_value_type_constant) {
+ for (unsigned i = 0; i < len1; i++)
+ u64[len0 + i] = v1->constant->values[0].u64[i];
+ }
+
+ for (unsigned i = 0, j = 0; i < count - 6; i++, j++) {
+ uint32_t comp = w[i + 6];
+ /* If component is not used, set the value to a known constant
+ * to detect if it is wrongly used.
+ */
+ if (comp == (uint32_t)-1)
+ val->constant->values[0].u64[j] = 0xdeadbeefdeadbeef;
+ else
+ val->constant->values[0].u64[j] = u64[comp];
+ }
+ } else {
+ uint32_t u32[8];
+ if (v0->value_type == vtn_value_type_constant) {
+ for (unsigned i = 0; i < len0; i++)
+ u32[i] = v0->constant->values[0].u32[i];
+ }
+ if (v1->value_type == vtn_value_type_constant) {
+ for (unsigned i = 0; i < len1; i++)
+ u32[len0 + i] = v1->constant->values[0].u32[i];
+ }
+
+ for (unsigned i = 0, j = 0; i < count - 6; i++, j++) {
+ uint32_t comp = w[i + 6];
+ /* If component is not used, set the value to a known constant
+ * to detect if it is wrongly used.
+ */
+ if (comp == (uint32_t)-1)
+ val->constant->values[0].u32[j] = 0xdeadbeef;
+ else
+ val->constant->values[0].u32[j] = u32[comp];
}
}
- return;
+ break;
}
case SpvOpCompositeExtract:
}
int elem = -1;
+ int col = 0;
const struct glsl_type *type = comp->const_type;
for (unsigned i = deref_start; i < count; i++) {
switch (glsl_get_base_type(type)) {
case GLSL_TYPE_UINT:
case GLSL_TYPE_INT:
+ case GLSL_TYPE_UINT64:
+ case GLSL_TYPE_INT64:
case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_DOUBLE:
case GLSL_TYPE_BOOL:
/* If we hit this granularity, we're picking off an element */
- if (elem < 0)
- elem = 0;
-
if (glsl_type_is_matrix(type)) {
- elem += w[i] * glsl_get_vector_elements(type);
+ assert(col == 0 && elem == -1);
+ col = w[i];
+ elem = 0;
type = glsl_get_column_type(type);
} else {
- assert(glsl_type_is_vector(type));
- elem += w[i];
+ assert(elem <= 0 && glsl_type_is_vector(type));
+ elem = w[i];
type = glsl_scalar_type(glsl_get_base_type(type));
}
continue;
val->constant = *c;
} else {
unsigned num_components = glsl_get_vector_elements(type);
+ unsigned bit_size = glsl_get_bit_size(type);
for (unsigned i = 0; i < num_components; i++)
- val->constant->value.u[i] = (*c)->value.u[elem + i];
+ if (bit_size == 64) {
+ val->constant->values[0].u64[i] = (*c)->values[col].u64[elem + i];
+ } else {
+ assert(bit_size == 32);
+ val->constant->values[0].u32[i] = (*c)->values[col].u32[elem + i];
+ }
}
} else {
struct vtn_value *insert =
*c = insert->constant;
} else {
unsigned num_components = glsl_get_vector_elements(type);
+ unsigned bit_size = glsl_get_bit_size(type);
for (unsigned i = 0; i < num_components; i++)
- (*c)->value.u[elem + i] = insert->constant->value.u[i];
+ if (bit_size == 64) {
+ (*c)->values[col].u64[elem + i] = insert->constant->values[0].u64[i];
+ } else {
+ assert(bit_size == 32);
+ (*c)->values[col].u32[elem + i] = insert->constant->values[0].u32[i];
+ }
}
}
- return;
+ break;
}
default: {
bool swap;
- nir_op op = vtn_nir_alu_op_for_spirv_opcode(opcode, &swap);
+ nir_alu_type dst_alu_type = nir_get_nir_type_for_glsl_type(val->const_type);
+ nir_alu_type src_alu_type = dst_alu_type;
+ nir_op op = vtn_nir_alu_op_for_spirv_opcode(opcode, &swap, src_alu_type, dst_alu_type);
unsigned num_components = glsl_get_vector_elements(val->const_type);
unsigned bit_size =
- glsl_get_bit_size(glsl_get_base_type(val->const_type));
+ glsl_get_bit_size(val->const_type);
- nir_const_value src[3];
+ nir_const_value src[4];
assert(count <= 7);
for (unsigned i = 0; i < count - 4; i++) {
nir_constant *c =
unsigned j = swap ? 1 - i : i;
assert(bit_size == 32);
- for (unsigned k = 0; k < num_components; k++)
- src[j].u32[k] = c->value.u[k];
+ src[j] = c->values[0];
}
- nir_const_value res = nir_eval_const_opcode(op, num_components,
- bit_size, src);
-
- for (unsigned k = 0; k < num_components; k++)
- val->constant->value.u[k] = res.u32[k];
-
- return;
+ val->constant->values[0] =
+ nir_eval_const_opcode(op, num_components, bit_size, src);
+ break;
} /* default */
}
+ break;
}
case SpvOpConstantNull:
default:
unreachable("Unhandled opcode");
}
+
+ /* Now that we have the value, update the workgroup size if needed */
+ vtn_foreach_decoration(b, val, handle_workgroup_size_decoration_cb, NULL);
}
static void
for (unsigned i = 0; i < call->num_params; i++) {
unsigned arg_id = w[4 + i];
struct vtn_value *arg = vtn_untyped_value(b, arg_id);
- if (arg->value_type == vtn_value_type_access_chain) {
- nir_deref_var *d = vtn_access_chain_to_deref(b, arg->access_chain);
- call->params[i] = nir_deref_as_var(nir_copy_deref(call, &d->deref));
+ if (arg->value_type == vtn_value_type_pointer) {
+ nir_deref_var *d = vtn_pointer_to_deref(b, arg->pointer);
+ call->params[i] = nir_deref_var_clone(d, call);
} else {
struct vtn_ssa_value *arg_ssa = vtn_ssa_value(b, arg_id);
switch (glsl_get_base_type(type)) {
case GLSL_TYPE_INT:
case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT64:
+ case GLSL_TYPE_UINT64:
case GLSL_TYPE_BOOL:
case GLSL_TYPE_FLOAT:
case GLSL_TYPE_DOUBLE:
vtn_push_value(b, w[2], vtn_value_type_sampled_image);
val->sampled_image = ralloc(b, struct vtn_sampled_image);
val->sampled_image->image =
- vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
+ vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
val->sampled_image->sampler =
- vtn_value(b, w[4], vtn_value_type_access_chain)->access_chain;
+ vtn_value(b, w[4], vtn_value_type_pointer)->pointer;
return;
} else if (opcode == SpvOpImage) {
- struct vtn_value *val =
- vtn_push_value(b, w[2], vtn_value_type_access_chain);
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_pointer);
struct vtn_value *src_val = vtn_untyped_value(b, w[3]);
if (src_val->value_type == vtn_value_type_sampled_image) {
- val->access_chain = src_val->sampled_image->image;
+ val->pointer = src_val->sampled_image->image;
} else {
- assert(src_val->value_type == vtn_value_type_access_chain);
- val->access_chain = src_val->access_chain;
+ assert(src_val->value_type == vtn_value_type_pointer);
+ val->pointer = src_val->pointer;
}
return;
}
if (sampled_val->value_type == vtn_value_type_sampled_image) {
sampled = *sampled_val->sampled_image;
} else {
- assert(sampled_val->value_type == vtn_value_type_access_chain);
+ assert(sampled_val->value_type == vtn_value_type_pointer);
sampled.image = NULL;
- sampled.sampler = sampled_val->access_chain;
+ sampled.sampler = sampled_val->pointer;
}
const struct glsl_type *image_type;
} else {
image_type = sampled.sampler->var->var->interface_type;
}
-
- nir_tex_src srcs[8]; /* 8 should be enough */
- nir_tex_src *p = srcs;
-
- unsigned idx = 4;
-
- bool has_coord = false;
- switch (opcode) {
- case SpvOpImageSampleImplicitLod:
- case SpvOpImageSampleExplicitLod:
- case SpvOpImageSampleDrefImplicitLod:
- case SpvOpImageSampleDrefExplicitLod:
- case SpvOpImageSampleProjImplicitLod:
- case SpvOpImageSampleProjExplicitLod:
- case SpvOpImageSampleProjDrefImplicitLod:
- case SpvOpImageSampleProjDrefExplicitLod:
- case SpvOpImageFetch:
- case SpvOpImageGather:
- case SpvOpImageDrefGather:
- case SpvOpImageQueryLod: {
- /* All these types have the coordinate as their first real argument */
- struct vtn_ssa_value *coord = vtn_ssa_value(b, w[idx++]);
- has_coord = true;
- p->src = nir_src_for_ssa(coord->def);
- p->src_type = nir_tex_src_coord;
- p++;
- break;
- }
-
- default:
- break;
- }
-
- /* These all have an explicit depth value as their next source */
- switch (opcode) {
- case SpvOpImageSampleDrefImplicitLod:
- case SpvOpImageSampleDrefExplicitLod:
- case SpvOpImageSampleProjDrefImplicitLod:
- case SpvOpImageSampleProjDrefExplicitLod:
- (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_comparitor);
- break;
- default:
- break;
- }
-
- /* For OpImageQuerySizeLod, we always have an LOD */
- if (opcode == SpvOpImageQuerySizeLod)
- (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_lod);
+ const enum glsl_sampler_dim sampler_dim = glsl_get_sampler_dim(image_type);
+ const bool is_array = glsl_sampler_type_is_array(image_type);
+ const bool is_shadow = glsl_sampler_type_is_shadow(image_type);
/* Figure out the base texture operation */
nir_texop texop;
break;
case SpvOpImageQuerySamples:
+ texop = nir_texop_texture_samples;
+ break;
+
default:
unreachable("Unhandled opcode");
}
+ nir_tex_src srcs[8]; /* 8 should be enough */
+ nir_tex_src *p = srcs;
+
+ unsigned idx = 4;
+
+ struct nir_ssa_def *coord;
+ unsigned coord_components;
+ switch (opcode) {
+ case SpvOpImageSampleImplicitLod:
+ case SpvOpImageSampleExplicitLod:
+ case SpvOpImageSampleDrefImplicitLod:
+ case SpvOpImageSampleDrefExplicitLod:
+ case SpvOpImageSampleProjImplicitLod:
+ case SpvOpImageSampleProjExplicitLod:
+ case SpvOpImageSampleProjDrefImplicitLod:
+ case SpvOpImageSampleProjDrefExplicitLod:
+ case SpvOpImageFetch:
+ case SpvOpImageGather:
+ case SpvOpImageDrefGather:
+ case SpvOpImageQueryLod: {
+ /* All these types have the coordinate as their first real argument */
+ switch (sampler_dim) {
+ case GLSL_SAMPLER_DIM_1D:
+ case GLSL_SAMPLER_DIM_BUF:
+ coord_components = 1;
+ break;
+ case GLSL_SAMPLER_DIM_2D:
+ case GLSL_SAMPLER_DIM_RECT:
+ case GLSL_SAMPLER_DIM_MS:
+ coord_components = 2;
+ break;
+ case GLSL_SAMPLER_DIM_3D:
+ case GLSL_SAMPLER_DIM_CUBE:
+ coord_components = 3;
+ break;
+ default:
+ unreachable("Invalid sampler type");
+ }
+
+ if (is_array && texop != nir_texop_lod)
+ coord_components++;
+
+ coord = vtn_ssa_value(b, w[idx++])->def;
+ p->src = nir_src_for_ssa(nir_channels(&b->nb, coord,
+ (1 << coord_components) - 1));
+ p->src_type = nir_tex_src_coord;
+ p++;
+ break;
+ }
+
+ default:
+ coord = NULL;
+ coord_components = 0;
+ break;
+ }
+
+ switch (opcode) {
+ case SpvOpImageSampleProjImplicitLod:
+ case SpvOpImageSampleProjExplicitLod:
+ case SpvOpImageSampleProjDrefImplicitLod:
+ case SpvOpImageSampleProjDrefExplicitLod:
+ /* These have the projector as the last coordinate component */
+ p->src = nir_src_for_ssa(nir_channel(&b->nb, coord, coord_components));
+ p->src_type = nir_tex_src_projector;
+ p++;
+ break;
+
+ default:
+ break;
+ }
+
+ unsigned gather_component = 0;
+ switch (opcode) {
+ case SpvOpImageSampleDrefImplicitLod:
+ case SpvOpImageSampleDrefExplicitLod:
+ case SpvOpImageSampleProjDrefImplicitLod:
+ case SpvOpImageSampleProjDrefExplicitLod:
+ case SpvOpImageDrefGather:
+ /* These all have an explicit depth value as their next source */
+ (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_comparator);
+ break;
+
+ case SpvOpImageGather:
+ /* This has a component as its next source */
+ gather_component =
+ vtn_value(b, w[idx++], vtn_value_type_constant)->constant->values[0].u32[0];
+ break;
+
+ default:
+ break;
+ }
+
+ /* For OpImageQuerySizeLod, we always have an LOD */
+ if (opcode == SpvOpImageQuerySizeLod)
+ (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_lod);
+
/* Now we need to handle some number of optional arguments */
+ const struct vtn_ssa_value *gather_offsets = NULL;
if (idx < count) {
uint32_t operands = w[idx++];
if (operands & SpvImageOperandsLodMask) {
assert(texop == nir_texop_txl || texop == nir_texop_txf ||
- texop == nir_texop_txf_ms || texop == nir_texop_txs);
+ texop == nir_texop_txs);
(*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_lod);
}
if (operands & SpvImageOperandsGradMask) {
- assert(texop == nir_texop_tex);
+ assert(texop == nir_texop_txl);
texop = nir_texop_txd;
(*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ddx);
(*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ddy);
operands & SpvImageOperandsConstOffsetMask)
(*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_offset);
- if (operands & SpvImageOperandsConstOffsetsMask)
- assert(!"Constant offsets to texture gather not yet implemented");
+ if (operands & SpvImageOperandsConstOffsetsMask) {
+ gather_offsets = vtn_ssa_value(b, w[idx++]);
+ (*p++) = (nir_tex_src){};
+ }
if (operands & SpvImageOperandsSampleMask) {
assert(texop == nir_texop_txf_ms);
memcpy(instr->src, srcs, instr->num_srcs * sizeof(*instr->src));
- instr->sampler_dim = glsl_get_sampler_dim(image_type);
- instr->is_array = glsl_sampler_type_is_array(image_type);
- instr->is_shadow = glsl_sampler_type_is_shadow(image_type);
- instr->is_new_style_shadow = instr->is_shadow;
-
- if (has_coord) {
- switch (instr->sampler_dim) {
- case GLSL_SAMPLER_DIM_1D:
- case GLSL_SAMPLER_DIM_BUF:
- instr->coord_components = 1;
- break;
- case GLSL_SAMPLER_DIM_2D:
- case GLSL_SAMPLER_DIM_RECT:
- case GLSL_SAMPLER_DIM_MS:
- instr->coord_components = 2;
- break;
- case GLSL_SAMPLER_DIM_3D:
- case GLSL_SAMPLER_DIM_CUBE:
- instr->coord_components = 3;
- break;
- default:
- assert("Invalid sampler type");
- }
-
- if (instr->is_array)
- instr->coord_components++;
- } else {
- instr->coord_components = 0;
- }
+ instr->coord_components = coord_components;
+ instr->sampler_dim = sampler_dim;
+ instr->is_array = is_array;
+ instr->is_shadow = is_shadow;
+ instr->is_new_style_shadow =
+ is_shadow && glsl_get_components(ret_type->type) == 1;
+ instr->component = gather_component;
switch (glsl_get_sampler_result_type(image_type)) {
case GLSL_TYPE_FLOAT: instr->dest_type = nir_type_float; break;
unreachable("Invalid base type for sampler result");
}
- nir_deref_var *sampler = vtn_access_chain_to_deref(b, sampled.sampler);
+ nir_deref_var *sampler = vtn_pointer_to_deref(b, sampled.sampler);
+ nir_deref_var *texture;
if (sampled.image) {
- nir_deref_var *image = vtn_access_chain_to_deref(b, sampled.image);
- instr->texture = nir_deref_as_var(nir_copy_deref(instr, &image->deref));
+ nir_deref_var *image = vtn_pointer_to_deref(b, sampled.image);
+ texture = image;
} else {
- instr->texture = nir_deref_as_var(nir_copy_deref(instr, &sampler->deref));
+ texture = sampler;
}
+ instr->texture = nir_deref_var_clone(texture, instr);
+
switch (instr->op) {
case nir_texop_tex:
case nir_texop_txb:
case nir_texop_txl:
case nir_texop_txd:
/* These operations require a sampler */
- instr->sampler = nir_deref_as_var(nir_copy_deref(instr, &sampler->deref));
+ instr->sampler = nir_deref_var_clone(sampler, instr);
break;
case nir_texop_txf:
case nir_texop_txf_ms:
/* These don't */
instr->sampler = NULL;
break;
+ case nir_texop_txf_ms_mcs:
+ unreachable("unexpected nir_texop_txf_ms_mcs");
}
nir_ssa_dest_init(&instr->instr, &instr->dest,
assert(glsl_get_vector_elements(ret_type->type) ==
nir_tex_instr_dest_size(instr));
+ nir_ssa_def *def;
+ nir_instr *instruction;
+ if (gather_offsets) {
+ assert(glsl_get_base_type(gather_offsets->type) == GLSL_TYPE_ARRAY);
+ assert(glsl_get_length(gather_offsets->type) == 4);
+ nir_tex_instr *instrs[4] = {instr, NULL, NULL, NULL};
+
+ /* Copy the current instruction 4x */
+ for (uint32_t i = 1; i < 4; i++) {
+ instrs[i] = nir_tex_instr_create(b->shader, instr->num_srcs);
+ instrs[i]->op = instr->op;
+ instrs[i]->coord_components = instr->coord_components;
+ instrs[i]->sampler_dim = instr->sampler_dim;
+ instrs[i]->is_array = instr->is_array;
+ instrs[i]->is_shadow = instr->is_shadow;
+ instrs[i]->is_new_style_shadow = instr->is_new_style_shadow;
+ instrs[i]->component = instr->component;
+ instrs[i]->dest_type = instr->dest_type;
+ instrs[i]->texture = nir_deref_var_clone(texture, instrs[i]);
+ instrs[i]->sampler = NULL;
+
+ memcpy(instrs[i]->src, srcs, instr->num_srcs * sizeof(*instr->src));
+
+ nir_ssa_dest_init(&instrs[i]->instr, &instrs[i]->dest,
+ nir_tex_instr_dest_size(instr), 32, NULL);
+ }
+
+ /* Fill in the last argument with the offset from the passed in offsets
+ * and insert the instruction into the stream.
+ */
+ for (uint32_t i = 0; i < 4; i++) {
+ nir_tex_src src;
+ src.src = nir_src_for_ssa(gather_offsets->elems[i]->def);
+ src.src_type = nir_tex_src_offset;
+ instrs[i]->src[instrs[i]->num_srcs - 1] = src;
+ nir_builder_instr_insert(&b->nb, &instrs[i]->instr);
+ }
+
+ /* Combine the results of the 4 instructions by taking their .w
+ * components
+ */
+ nir_alu_instr *vec4 = nir_alu_instr_create(b->shader, nir_op_vec4);
+ nir_ssa_dest_init(&vec4->instr, &vec4->dest.dest, 4, 32, NULL);
+ vec4->dest.write_mask = 0xf;
+ for (uint32_t i = 0; i < 4; i++) {
+ vec4->src[i].src = nir_src_for_ssa(&instrs[i]->dest.ssa);
+ vec4->src[i].swizzle[0] = 3;
+ }
+ def = &vec4->dest.dest.ssa;
+ instruction = &vec4->instr;
+ } else {
+ def = &instr->dest.ssa;
+ instruction = &instr->instr;
+ }
+
val->ssa = vtn_create_ssa_value(b, ret_type->type);
- val->ssa->def = &instr->dest.ssa;
+ val->ssa->def = def;
+
+ nir_builder_instr_insert(&b->nb, instruction);
+}
- nir_builder_instr_insert(&b->nb, &instr->instr);
+static void
+fill_common_atomic_sources(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, nir_src *src)
+{
+ switch (opcode) {
+ case SpvOpAtomicIIncrement:
+ src[0] = nir_src_for_ssa(nir_imm_int(&b->nb, 1));
+ break;
+
+ case SpvOpAtomicIDecrement:
+ src[0] = nir_src_for_ssa(nir_imm_int(&b->nb, -1));
+ break;
+
+ case SpvOpAtomicISub:
+ src[0] =
+ nir_src_for_ssa(nir_ineg(&b->nb, vtn_ssa_value(b, w[6])->def));
+ break;
+
+ case SpvOpAtomicCompareExchange:
+ src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[8])->def);
+ src[1] = nir_src_for_ssa(vtn_ssa_value(b, w[7])->def);
+ break;
+
+ case SpvOpAtomicExchange:
+ case SpvOpAtomicIAdd:
+ case SpvOpAtomicSMin:
+ case SpvOpAtomicUMin:
+ case SpvOpAtomicSMax:
+ case SpvOpAtomicUMax:
+ case SpvOpAtomicAnd:
+ case SpvOpAtomicOr:
+ case SpvOpAtomicXor:
+ src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[6])->def);
+ break;
+
+ default:
+ unreachable("Invalid SPIR-V atomic");
+ }
}
static nir_ssa_def *
vtn_push_value(b, w[2], vtn_value_type_image_pointer);
val->image = ralloc(b, struct vtn_image_pointer);
- val->image->image =
- vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
+ val->image->image = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
val->image->coord = get_image_coord(b, w[4]);
val->image->sample = vtn_ssa_value(b, w[5])->def;
return;
case SpvOpAtomicIDecrement:
case SpvOpAtomicIAdd:
case SpvOpAtomicISub:
+ case SpvOpAtomicLoad:
case SpvOpAtomicSMin:
case SpvOpAtomicUMin:
case SpvOpAtomicSMax:
image = *vtn_value(b, w[3], vtn_value_type_image_pointer)->image;
break;
+ case SpvOpAtomicStore:
+ image = *vtn_value(b, w[1], vtn_value_type_image_pointer)->image;
+ break;
+
case SpvOpImageQuerySize:
- image.image =
- vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
+ image.image = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
image.coord = NULL;
image.sample = NULL;
break;
case SpvOpImageRead:
- image.image =
- vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
+ image.image = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
image.coord = get_image_coord(b, w[4]);
if (count > 5 && (w[5] & SpvImageOperandsSampleMask)) {
break;
case SpvOpImageWrite:
- image.image =
- vtn_value(b, w[1], vtn_value_type_access_chain)->access_chain;
+ image.image = vtn_value(b, w[1], vtn_value_type_pointer)->pointer;
image.coord = get_image_coord(b, w[2]);
/* texel = w[3] */
OP(ImageQuerySize, size)
OP(ImageRead, load)
OP(ImageWrite, store)
+ OP(AtomicLoad, load)
+ OP(AtomicStore, store)
OP(AtomicExchange, atomic_exchange)
OP(AtomicCompareExchange, atomic_comp_swap)
OP(AtomicIIncrement, atomic_add)
nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
- nir_deref_var *image_deref = vtn_access_chain_to_deref(b, image.image);
- intrin->variables[0] =
- nir_deref_as_var(nir_copy_deref(&intrin->instr, &image_deref->deref));
+ nir_deref_var *image_deref = vtn_pointer_to_deref(b, image.image);
+ intrin->variables[0] = nir_deref_var_clone(image_deref, intrin);
/* ImageQuerySize doesn't take any extra parameters */
if (opcode != SpvOpImageQuerySize) {
}
switch (opcode) {
+ case SpvOpAtomicLoad:
case SpvOpImageQuerySize:
case SpvOpImageRead:
break;
+ case SpvOpAtomicStore:
+ intrin->src[2] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
+ break;
case SpvOpImageWrite:
intrin->src[2] = nir_src_for_ssa(vtn_ssa_value(b, w[3])->def);
break;
+
+ case SpvOpAtomicCompareExchange:
case SpvOpAtomicIIncrement:
- intrin->src[2] = nir_src_for_ssa(nir_imm_int(&b->nb, 1));
- break;
case SpvOpAtomicIDecrement:
- intrin->src[2] = nir_src_for_ssa(nir_imm_int(&b->nb, -1));
- break;
-
case SpvOpAtomicExchange:
case SpvOpAtomicIAdd:
case SpvOpAtomicSMin:
case SpvOpAtomicAnd:
case SpvOpAtomicOr:
case SpvOpAtomicXor:
- intrin->src[2] = nir_src_for_ssa(vtn_ssa_value(b, w[6])->def);
- break;
-
- case SpvOpAtomicCompareExchange:
- intrin->src[2] = nir_src_for_ssa(vtn_ssa_value(b, w[7])->def);
- intrin->src[3] = nir_src_for_ssa(vtn_ssa_value(b, w[6])->def);
- break;
-
- case SpvOpAtomicISub:
- intrin->src[2] = nir_src_for_ssa(nir_ineg(&b->nb, vtn_ssa_value(b, w[6])->def));
+ fill_common_atomic_sources(b, opcode, w, &intrin->src[2]);
break;
default:
if (opcode != SpvOpImageWrite) {
struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
- nir_ssa_dest_init(&intrin->instr, &intrin->dest, 4, 32, NULL);
+
+ unsigned dest_components =
+ nir_intrinsic_infos[intrin->intrinsic].dest_components;
+ if (intrin->intrinsic == nir_intrinsic_image_size) {
+ dest_components = intrin->num_components =
+ glsl_get_vector_elements(type->type);
+ }
+
+ nir_ssa_dest_init(&intrin->instr, &intrin->dest,
+ dest_components, 32, NULL);
nir_builder_instr_insert(&b->nb, &intrin->instr);
- /* The image intrinsics always return 4 channels but we may not want
- * that many. Emit a mov to trim it down.
- */
- unsigned swiz[4] = {0, 1, 2, 3};
val->ssa = vtn_create_ssa_value(b, type->type);
- val->ssa->def = nir_swizzle(&b->nb, &intrin->dest.ssa, swiz,
- glsl_get_vector_elements(type->type), false);
+ val->ssa->def = &intrin->dest.ssa;
} else {
nir_builder_instr_insert(&b->nb, &intrin->instr);
}
get_ssbo_nir_atomic_op(SpvOp opcode)
{
switch (opcode) {
+ case SpvOpAtomicLoad: return nir_intrinsic_load_ssbo;
+ case SpvOpAtomicStore: return nir_intrinsic_store_ssbo;
#define OP(S, N) case SpvOp##S: return nir_intrinsic_ssbo_##N;
OP(AtomicExchange, atomic_exchange)
OP(AtomicCompareExchange, atomic_comp_swap)
get_shared_nir_atomic_op(SpvOp opcode)
{
switch (opcode) {
+ case SpvOpAtomicLoad: return nir_intrinsic_load_var;
+ case SpvOpAtomicStore: return nir_intrinsic_store_var;
#define OP(S, N) case SpvOp##S: return nir_intrinsic_var_##N;
OP(AtomicExchange, atomic_exchange)
OP(AtomicCompareExchange, atomic_comp_swap)
}
static void
-fill_common_atomic_sources(struct vtn_builder *b, SpvOp opcode,
- const uint32_t *w, nir_src *src)
+vtn_handle_ssbo_or_shared_atomic(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
{
+ struct vtn_pointer *ptr;
+ nir_intrinsic_instr *atomic;
+
switch (opcode) {
+ case SpvOpAtomicLoad:
+ case SpvOpAtomicExchange:
+ case SpvOpAtomicCompareExchange:
+ case SpvOpAtomicCompareExchangeWeak:
case SpvOpAtomicIIncrement:
- src[0] = nir_src_for_ssa(nir_imm_int(&b->nb, 1));
- break;
-
case SpvOpAtomicIDecrement:
- src[0] = nir_src_for_ssa(nir_imm_int(&b->nb, -1));
- break;
-
- case SpvOpAtomicISub:
- src[0] =
- nir_src_for_ssa(nir_ineg(&b->nb, vtn_ssa_value(b, w[6])->def));
- break;
-
- case SpvOpAtomicCompareExchange:
- src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[7])->def);
- src[1] = nir_src_for_ssa(vtn_ssa_value(b, w[8])->def);
- break;
- /* Fall through */
-
- case SpvOpAtomicExchange:
case SpvOpAtomicIAdd:
+ case SpvOpAtomicISub:
case SpvOpAtomicSMin:
case SpvOpAtomicUMin:
case SpvOpAtomicSMax:
case SpvOpAtomicAnd:
case SpvOpAtomicOr:
case SpvOpAtomicXor:
- src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[6])->def);
+ ptr = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
+ break;
+
+ case SpvOpAtomicStore:
+ ptr = vtn_value(b, w[1], vtn_value_type_pointer)->pointer;
break;
default:
unreachable("Invalid SPIR-V atomic");
}
-}
-
-static void
-vtn_handle_ssbo_or_shared_atomic(struct vtn_builder *b, SpvOp opcode,
- const uint32_t *w, unsigned count)
-{
- struct vtn_access_chain *chain =
- vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
- nir_intrinsic_instr *atomic;
/*
SpvScope scope = w[4];
SpvMemorySemanticsMask semantics = w[5];
*/
- if (chain->var->mode == vtn_variable_mode_workgroup) {
- nir_deref *deref = &vtn_access_chain_to_deref(b, chain)->deref;
+ if (ptr->mode == vtn_variable_mode_workgroup) {
+ nir_deref_var *deref = vtn_pointer_to_deref(b, ptr);
+ const struct glsl_type *deref_type = nir_deref_tail(&deref->deref)->type;
nir_intrinsic_op op = get_shared_nir_atomic_op(opcode);
atomic = nir_intrinsic_instr_create(b->nb.shader, op);
- atomic->variables[0] = nir_deref_as_var(nir_copy_deref(atomic, deref));
- fill_common_atomic_sources(b, opcode, w, &atomic->src[0]);
+ atomic->variables[0] = nir_deref_var_clone(deref, atomic);
+
+ switch (opcode) {
+ case SpvOpAtomicLoad:
+ atomic->num_components = glsl_get_vector_elements(deref_type);
+ break;
+
+ case SpvOpAtomicStore:
+ atomic->num_components = glsl_get_vector_elements(deref_type);
+ nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1);
+ atomic->src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
+ break;
+
+ case SpvOpAtomicExchange:
+ case SpvOpAtomicCompareExchange:
+ case SpvOpAtomicCompareExchangeWeak:
+ case SpvOpAtomicIIncrement:
+ case SpvOpAtomicIDecrement:
+ case SpvOpAtomicIAdd:
+ case SpvOpAtomicISub:
+ case SpvOpAtomicSMin:
+ case SpvOpAtomicUMin:
+ case SpvOpAtomicSMax:
+ case SpvOpAtomicUMax:
+ case SpvOpAtomicAnd:
+ case SpvOpAtomicOr:
+ case SpvOpAtomicXor:
+ fill_common_atomic_sources(b, opcode, w, &atomic->src[0]);
+ break;
+
+ default:
+ unreachable("Invalid SPIR-V atomic");
+
+ }
} else {
- assert(chain->var->mode == vtn_variable_mode_ssbo);
- struct vtn_type *type;
+ assert(ptr->mode == vtn_variable_mode_ssbo);
nir_ssa_def *offset, *index;
- offset = vtn_access_chain_to_offset(b, chain, &index, &type, NULL, false);
+ offset = vtn_pointer_to_offset(b, ptr, &index, NULL);
nir_intrinsic_op op = get_ssbo_nir_atomic_op(opcode);
atomic = nir_intrinsic_instr_create(b->nb.shader, op);
- atomic->src[0] = nir_src_for_ssa(index);
- atomic->src[1] = nir_src_for_ssa(offset);
- fill_common_atomic_sources(b, opcode, w, &atomic->src[2]);
+
+ switch (opcode) {
+ case SpvOpAtomicLoad:
+ atomic->num_components = glsl_get_vector_elements(ptr->type->type);
+ atomic->src[0] = nir_src_for_ssa(index);
+ atomic->src[1] = nir_src_for_ssa(offset);
+ break;
+
+ case SpvOpAtomicStore:
+ atomic->num_components = glsl_get_vector_elements(ptr->type->type);
+ nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1);
+ atomic->src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
+ atomic->src[1] = nir_src_for_ssa(index);
+ atomic->src[2] = nir_src_for_ssa(offset);
+ break;
+
+ case SpvOpAtomicExchange:
+ case SpvOpAtomicCompareExchange:
+ case SpvOpAtomicCompareExchangeWeak:
+ case SpvOpAtomicIIncrement:
+ case SpvOpAtomicIDecrement:
+ case SpvOpAtomicIAdd:
+ case SpvOpAtomicISub:
+ case SpvOpAtomicSMin:
+ case SpvOpAtomicUMin:
+ case SpvOpAtomicSMax:
+ case SpvOpAtomicUMax:
+ case SpvOpAtomicAnd:
+ case SpvOpAtomicOr:
+ case SpvOpAtomicXor:
+ atomic->src[0] = nir_src_for_ssa(index);
+ atomic->src[1] = nir_src_for_ssa(offset);
+ fill_common_atomic_sources(b, opcode, w, &atomic->src[2]);
+ break;
+
+ default:
+ unreachable("Invalid SPIR-V atomic");
+ }
}
- nir_ssa_dest_init(&atomic->instr, &atomic->dest, 1, 32, NULL);
+ if (opcode != SpvOpAtomicStore) {
+ struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
- struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
- struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
- val->ssa = rzalloc(b, struct vtn_ssa_value);
- val->ssa->def = &atomic->dest.ssa;
- val->ssa->type = type->type;
+ nir_ssa_dest_init(&atomic->instr, &atomic->dest,
+ glsl_get_vector_elements(type->type),
+ glsl_get_bit_size(type->type), NULL);
+
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+ val->ssa = rzalloc(b, struct vtn_ssa_value);
+ val->ssa->def = &atomic->dest.ssa;
+ val->ssa->type = type->type;
+ }
nir_builder_instr_insert(&b->nb, &atomic->instr);
}
for (unsigned i = 0; i < glsl_get_matrix_columns(dest->type); i++) {
nir_alu_instr *vec = create_vec(b->shader,
glsl_get_matrix_columns(src->type),
- glsl_get_bit_size(glsl_get_base_type(src->type)));
+ glsl_get_bit_size(src->type));
if (glsl_type_is_vector_or_scalar(src->type)) {
vec->src[0].src = nir_src_for_ssa(src->def);
vec->src[0].swizzle[0] = i;
nir_alu_instr *vec = create_vec(b->shader, num_components,
srcs[0]->bit_size);
+ /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
+ *
+ * "When constructing a vector, there must be at least two Constituent
+ * operands."
+ */
+ assert(num_srcs >= 2);
+
unsigned dest_idx = 0;
for (unsigned i = 0; i < num_srcs; i++) {
nir_ssa_def *src = srcs[i];
+ assert(dest_idx + src->num_components <= num_components);
for (unsigned j = 0; j < src->num_components; j++) {
vec->src[dest_idx].src = nir_src_for_ssa(src);
vec->src[dest_idx].swizzle[0] = j;
}
}
+ /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
+ *
+ * "When constructing a vector, the total number of components in all
+ * the operands must equal the number of components in Result Type."
+ */
+ assert(dest_idx == num_components);
+
nir_builder_instr_insert(&b->nb, &vec->instr);
return &vec->dest.dest.ssa;
}
}
+#define spv_check_supported(name, cap) do { \
+ if (!(b->ext && b->ext->name)) \
+ vtn_warn("Unsupported SPIR-V capability: %s", \
+ spirv_capability_to_string(cap)); \
+ } while(0)
+
static bool
vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode,
const uint32_t *w, unsigned count)
case SpvCapabilityMatrix:
case SpvCapabilityShader:
case SpvCapabilityGeometry:
- case SpvCapabilityTessellationPointSize:
case SpvCapabilityGeometryPointSize:
case SpvCapabilityUniformBufferArrayDynamicIndexing:
case SpvCapabilitySampledImageArrayDynamicIndexing:
case SpvCapabilitySampledBuffer:
case SpvCapabilityImageBuffer:
case SpvCapabilityImageQuery:
- break;
+ case SpvCapabilityDerivativeControl:
+ case SpvCapabilityInterpolationFunction:
+ case SpvCapabilityMultiViewport:
+ case SpvCapabilitySampleRateShading:
case SpvCapabilityClipDistance:
case SpvCapabilityCullDistance:
+ case SpvCapabilityInputAttachment:
+ case SpvCapabilityImageGatherExtended:
+ case SpvCapabilityStorageImageExtendedFormats:
+ break;
+
case SpvCapabilityGeometryStreams:
- fprintf(stderr, "WARNING: Unsupported SPIR-V Capability\n");
+ case SpvCapabilityLinkage:
+ case SpvCapabilityVector16:
+ case SpvCapabilityFloat16Buffer:
+ case SpvCapabilityFloat16:
+ case SpvCapabilityInt64Atomics:
+ case SpvCapabilityAtomicStorage:
+ case SpvCapabilityInt16:
+ case SpvCapabilityStorageImageMultisample:
+ case SpvCapabilityImageCubeArray:
+ case SpvCapabilityInt8:
+ case SpvCapabilitySparseResidency:
+ case SpvCapabilityMinLod:
+ case SpvCapabilityTransformFeedback:
+ vtn_warn("Unsupported SPIR-V capability: %s",
+ spirv_capability_to_string(cap));
+ break;
+
+ case SpvCapabilityFloat64:
+ spv_check_supported(float64, cap);
+ break;
+ case SpvCapabilityInt64:
+ spv_check_supported(int64, cap);
+ break;
+
+ case SpvCapabilityAddresses:
+ case SpvCapabilityKernel:
+ case SpvCapabilityImageBasic:
+ case SpvCapabilityImageReadWrite:
+ case SpvCapabilityImageMipmap:
+ case SpvCapabilityPipes:
+ case SpvCapabilityGroups:
+ case SpvCapabilityDeviceEnqueue:
+ case SpvCapabilityLiteralSampler:
+ case SpvCapabilityGenericPointer:
+ vtn_warn("Unsupported OpenCL-style SPIR-V capability: %s",
+ spirv_capability_to_string(cap));
+ break;
+
+ case SpvCapabilityImageMSArray:
+ spv_check_supported(image_ms_array, cap);
+ break;
+
+ case SpvCapabilityTessellation:
+ case SpvCapabilityTessellationPointSize:
+ spv_check_supported(tessellation, cap);
+ break;
+
+ case SpvCapabilityDrawParameters:
+ spv_check_supported(draw_parameters, cap);
+ break;
+
+ case SpvCapabilityStorageImageReadWithoutFormat:
+ spv_check_supported(image_read_without_format, cap);
+ break;
+
+ case SpvCapabilityStorageImageWriteWithoutFormat:
+ spv_check_supported(image_write_without_format, cap);
+ break;
+
+ case SpvCapabilityMultiView:
+ spv_check_supported(multiview, cap);
break;
+
default:
- assert(!"Unsupported capability");
+ unreachable("Unhandled capability");
}
break;
}
b->shader->info.cs.local_size[2] = mode->literals[2];
break;
case SpvExecutionModeLocalSizeHint:
- break; /* Nothing do do with this */
+ break; /* Nothing to do with this */
case SpvExecutionModeOutputVertices:
- assert(b->shader->stage == MESA_SHADER_GEOMETRY);
- b->shader->info.gs.vertices_out = mode->literals[0];
+ if (b->shader->stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->stage == MESA_SHADER_TESS_EVAL) {
+ b->shader->info.tess.tcs_vertices_out = mode->literals[0];
+ } else {
+ assert(b->shader->stage == MESA_SHADER_GEOMETRY);
+ b->shader->info.gs.vertices_out = mode->literals[0];
+ }
break;
case SpvExecutionModeInputPoints:
case SpvExecutionModeInputTrianglesAdjacency:
case SpvExecutionModeQuads:
case SpvExecutionModeIsolines:
- if (b->shader->stage == MESA_SHADER_GEOMETRY) {
+ if (b->shader->stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->stage == MESA_SHADER_TESS_EVAL) {
+ b->shader->info.tess.primitive_mode =
+ gl_primitive_from_spv_execution_mode(mode->exec_mode);
+ } else {
+ assert(b->shader->stage == MESA_SHADER_GEOMETRY);
b->shader->info.gs.vertices_in =
vertices_in_from_spv_execution_mode(mode->exec_mode);
- } else {
- assert(!"Tesselation shaders not yet supported");
}
break;
break;
case SpvExecutionModeSpacingEqual:
+ assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->stage == MESA_SHADER_TESS_EVAL);
+ b->shader->info.tess.spacing = TESS_SPACING_EQUAL;
+ break;
case SpvExecutionModeSpacingFractionalEven:
+ assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->stage == MESA_SHADER_TESS_EVAL);
+ b->shader->info.tess.spacing = TESS_SPACING_FRACTIONAL_EVEN;
+ break;
case SpvExecutionModeSpacingFractionalOdd:
+ assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->stage == MESA_SHADER_TESS_EVAL);
+ b->shader->info.tess.spacing = TESS_SPACING_FRACTIONAL_ODD;
+ break;
case SpvExecutionModeVertexOrderCw:
+ assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->stage == MESA_SHADER_TESS_EVAL);
+ /* Vulkan's notion of CCW seems to match the hardware backends,
+ * but be the opposite of OpenGL. Currently NIR follows GL semantics,
+ * so we set it backwards here.
+ */
+ b->shader->info.tess.ccw = true;
+ break;
case SpvExecutionModeVertexOrderCcw:
+ assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->stage == MESA_SHADER_TESS_EVAL);
+ /* Backwards; see above */
+ b->shader->info.tess.ccw = false;
+ break;
case SpvExecutionModePointMode:
- assert(!"TODO: Add tessellation metadata");
+ assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->stage == MESA_SHADER_TESS_EVAL);
+ b->shader->info.tess.point_mode = true;
break;
case SpvExecutionModePixelCenterInteger:
+ b->pixel_center_integer = true;
+ break;
+
case SpvExecutionModeXfb:
assert(!"Unhandled execution mode");
break;
case SpvExecutionModeVecTypeHint:
case SpvExecutionModeContractionOff:
break; /* OpenCL */
+
+ default:
+ unreachable("Unhandled execution mode");
}
}
vtn_handle_constant(b, opcode, w, count);
break;
+ case SpvOpUndef:
case SpvOpVariable:
vtn_handle_variables(b, opcode, w, count);
break;
break;
case SpvOpImageQuerySize: {
- struct vtn_access_chain *image =
- vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
- if (glsl_type_is_image(image->var->var->interface_type)) {
+ struct vtn_pointer *image =
+ vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
+ if (image->mode == vtn_variable_mode_image) {
vtn_handle_image(b, opcode, w, count);
} else {
+ assert(image->mode == vtn_variable_mode_sampler);
vtn_handle_texture(b, opcode, w, count);
}
break;
}
+ case SpvOpAtomicLoad:
case SpvOpAtomicExchange:
case SpvOpAtomicCompareExchange:
case SpvOpAtomicCompareExchangeWeak:
if (pointer->value_type == vtn_value_type_image_pointer) {
vtn_handle_image(b, opcode, w, count);
} else {
- assert(pointer->value_type == vtn_value_type_access_chain);
+ assert(pointer->value_type == vtn_value_type_pointer);
+ vtn_handle_ssbo_or_shared_atomic(b, opcode, w, count);
+ }
+ break;
+ }
+
+ case SpvOpAtomicStore: {
+ struct vtn_value *pointer = vtn_untyped_value(b, w[1]);
+ if (pointer->value_type == vtn_value_type_image_pointer) {
+ vtn_handle_image(b, opcode, w, count);
+ } else {
+ assert(pointer->value_type == vtn_value_type_pointer);
vtn_handle_ssbo_or_shared_atomic(b, opcode, w, count);
}
break;
spirv_to_nir(const uint32_t *words, size_t word_count,
struct nir_spirv_specialization *spec, unsigned num_spec,
gl_shader_stage stage, const char *entry_point_name,
+ const struct nir_spirv_supported_extensions *ext,
const nir_shader_compiler_options *options)
{
const uint32_t *word_end = words + word_count;
exec_list_make_empty(&b->functions);
b->entry_point_stage = stage;
b->entry_point_name = entry_point_name;
+ b->ext = ext;
/* Handle all the preamble instructions */
words = vtn_foreach_instruction(b, words, word_end,
return NULL;
}
- b->shader = nir_shader_create(NULL, stage, options);
+ b->shader = nir_shader_create(NULL, stage, options, NULL);
/* Set shader info defaults */
b->shader->info.gs.invocations = 1;