#include "nir/nir_deref.h"
#include "spirv_info.h"
+#include "util/u_math.h"
+
#include <stdio.h>
void
nir_load_const_instr *load =
nir_load_const_instr_create(b->shader, num_components, bit_size);
- load->value = constant->values[0];
+ memcpy(load->value, constant->values[0],
+ sizeof(nir_const_value) * load->def.num_components);
nir_instr_insert_before_cf_list(&b->nb.impl->body, &load->instr);
val->def = &load->def;
nir_load_const_instr *load =
nir_load_const_instr_create(b->shader, rows, bit_size);
- load->value = constant->values[i];
+ memcpy(load->value, constant->values[i],
+ sizeof(nir_const_value) * load->def.num_components);
nir_instr_insert_before_cf_list(&b->nb.impl->body, &load->instr);
col_val->def = &load->def;
} else if ((strcmp(ext, "SPV_AMD_shader_trinary_minmax") == 0)
&& (b->options && b->options->caps.trinary_minmax)) {
val->ext_handler = vtn_handle_amd_shader_trinary_minmax_instruction;
+ } else if (strcmp(ext, "OpenCL.std") == 0) {
+ val->ext_handler = vtn_handle_opencl_instruction;
} else {
vtn_fail("Unsupported extension: %s", ext);
}
}
default:
- vtn_fail("Unhandled opcode");
+ vtn_fail_with_opcode("Unhandled opcode", opcode);
}
}
break;
case SpvOpDecorate:
+ case SpvOpDecorateId:
case SpvOpMemberDecorate:
- case SpvOpExecutionMode: {
+ case SpvOpDecorateString:
+ case SpvOpMemberDecorateString:
+ case SpvOpExecutionMode:
+ case SpvOpExecutionModeId: {
struct vtn_value *val = vtn_untyped_value(b, target);
struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
switch (opcode) {
case SpvOpDecorate:
+ case SpvOpDecorateId:
+ case SpvOpDecorateString:
dec->scope = VTN_DEC_DECORATION;
break;
case SpvOpMemberDecorate:
+ case SpvOpMemberDecorateString:
dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(w++);
vtn_fail_if(dec->scope < VTN_DEC_STRUCT_MEMBER0, /* overflow */
"Member argument of OpMemberDecorate too large");
break;
case SpvOpExecutionMode:
+ case SpvOpExecutionModeId:
dec->scope = VTN_DEC_EXECUTION_MODE;
break;
default:
unreachable("Invalid decoration opcode");
}
dec->decoration = *(w++);
- dec->literals = w;
+ dec->operands = w;
/* Link into the list */
dec->next = val->decoration;
struct vtn_type *type;
};
+/**
+ * Returns true if the given type contains a struct decorated Block or
+ * BufferBlock
+ */
+bool
+vtn_type_contains_block(struct vtn_builder *b, struct vtn_type *type)
+{
+ switch (type->base_type) {
+ case vtn_base_type_array:
+ return vtn_type_contains_block(b, type->array_element);
+ case vtn_base_type_struct:
+ if (type->block || type->buffer_block)
+ return true;
+ for (unsigned i = 0; i < type->length; i++) {
+ if (vtn_type_contains_block(b, type->members[i]))
+ return true;
+ }
+ return false;
+ default:
+ return false;
+ }
+}
+
/** Returns true if two types are "compatible", i.e. you can do an OpLoad,
* OpStore, or OpCopyMemory between them without breaking anything.
* Technically, the SPIR-V rules require the exact same type ID but this lets
vtn_fail("Invalid base type");
}
+struct vtn_type *
+vtn_type_without_array(struct vtn_type *type)
+{
+ while (type->base_type == vtn_base_type_array)
+ type = type->array_element;
+ return type;
+}
+
/* does a shallow copy of a vtn_type */
static struct vtn_type *
return type;
}
+static void
+vtn_handle_access_qualifier(struct vtn_builder *b, struct vtn_type *type,
+ int member, enum gl_access_qualifier access)
+{
+ type->members[member] = vtn_type_copy(b, type->members[member]);
+ type = type->members[member];
+
+ type->access |= access;
+}
+
+static void
+array_stride_decoration_cb(struct vtn_builder *b,
+ struct vtn_value *val, int member,
+ const struct vtn_decoration *dec, void *void_ctx)
+{
+ struct vtn_type *type = val->type;
+
+ if (dec->decoration == SpvDecorationArrayStride) {
+ vtn_fail_if(dec->operands[0] == 0, "ArrayStride must be non-zero");
+ type->stride = dec->operands[0];
+ }
+}
+
static void
struct_member_decoration_cb(struct vtn_builder *b,
struct vtn_value *val, int member,
assert(member < ctx->num_fields);
switch (dec->decoration) {
+ case SpvDecorationRelaxedPrecision:
+ case SpvDecorationUniform:
+ break; /* FIXME: Do nothing with this for now. */
case SpvDecorationNonWritable:
+ vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_NON_WRITEABLE);
+ break;
case SpvDecorationNonReadable:
- case SpvDecorationRelaxedPrecision:
+ vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_NON_READABLE);
+ break;
case SpvDecorationVolatile:
+ vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_VOLATILE);
+ break;
case SpvDecorationCoherent:
- case SpvDecorationUniform:
- break; /* FIXME: Do nothing with this for now. */
+ vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_COHERENT);
+ break;
case SpvDecorationNoPerspective:
ctx->fields[member].interpolation = INTERP_MODE_NOPERSPECTIVE;
break;
break;
case SpvDecorationStream:
/* Vulkan only allows one GS stream */
- vtn_assert(dec->literals[0] == 0);
+ vtn_assert(dec->operands[0] == 0);
break;
case SpvDecorationLocation:
- ctx->fields[member].location = dec->literals[0];
+ ctx->fields[member].location = dec->operands[0];
break;
case SpvDecorationComponent:
break; /* FIXME: What should we do with these? */
case SpvDecorationBuiltIn:
ctx->type->members[member] = vtn_type_copy(b, ctx->type->members[member]);
ctx->type->members[member]->is_builtin = true;
- ctx->type->members[member]->builtin = dec->literals[0];
+ ctx->type->members[member]->builtin = dec->operands[0];
ctx->type->builtin_block = true;
break;
case SpvDecorationOffset:
- ctx->type->offsets[member] = dec->literals[0];
+ ctx->type->offsets[member] = dec->operands[0];
+ ctx->fields[member].offset = dec->operands[0];
break;
case SpvDecorationMatrixStride:
/* Handled as a second pass */
break;
case SpvDecorationCPacked:
+ if (b->shader->info.stage != MESA_SHADER_KERNEL)
+ vtn_warn("Decoration only allowed for CL-style kernels: %s",
+ spirv_decoration_to_string(dec->decoration));
+ else
+ ctx->type->packed = true;
+ break;
+
case SpvDecorationSaturatedConversion:
case SpvDecorationFuncParamAttr:
case SpvDecorationFPRoundingMode:
case SpvDecorationFPFastMathMode:
case SpvDecorationAlignment:
- vtn_warn("Decoration only allowed for CL-style kernels: %s",
- spirv_decoration_to_string(dec->decoration));
+ if (b->shader->info.stage != MESA_SHADER_KERNEL) {
+ vtn_warn("Decoration only allowed for CL-style kernels: %s",
+ spirv_decoration_to_string(dec->decoration));
+ }
+ break;
+
+ case SpvDecorationUserSemantic:
+ /* User semantic decorations can safely be ignored by the driver. */
break;
default:
- vtn_fail("Unhandled decoration");
+ vtn_fail_with_decoration("Unhandled decoration", dec->decoration);
}
}
+/** Chases the array type all the way down to the tail and rewrites the
+ * glsl_types to be based off the tail's glsl_type.
+ */
+static void
+vtn_array_type_rewrite_glsl_type(struct vtn_type *type)
+{
+ if (type->base_type != vtn_base_type_array)
+ return;
+
+ vtn_array_type_rewrite_glsl_type(type->array_element);
+
+ type->type = glsl_array_type(type->array_element->type,
+ type->length, type->stride);
+}
+
/* Matrix strides are handled as a separate pass because we need to know
* whether the matrix is row-major or not first.
*/
vtn_fail_if(member < 0,
"The MatrixStride decoration is only allowed on members "
"of OpTypeStruct");
+ vtn_fail_if(dec->operands[0] == 0, "MatrixStride must be non-zero");
struct member_decoration_ctx *ctx = void_ctx;
if (mat_type->row_major) {
mat_type->array_element = vtn_type_copy(b, mat_type->array_element);
mat_type->stride = mat_type->array_element->stride;
- mat_type->array_element->stride = dec->literals[0];
+ mat_type->array_element->stride = dec->operands[0];
+
+ mat_type->type = glsl_explicit_matrix_type(mat_type->type,
+ dec->operands[0], true);
+ mat_type->array_element->type = glsl_get_column_type(mat_type->type);
} else {
vtn_assert(mat_type->array_element->stride > 0);
- mat_type->stride = dec->literals[0];
+ mat_type->stride = dec->operands[0];
+
+ mat_type->type = glsl_explicit_matrix_type(mat_type->type,
+ dec->operands[0], false);
}
+
+ /* Now that we've replaced the glsl_type with a properly strided matrix
+ * type, rewrite the member type so that it's an array of the proper kind
+ * of glsl_type.
+ */
+ vtn_array_type_rewrite_glsl_type(ctx->type->members[member]);
+ ctx->fields[member].type = ctx->type->members[member]->type;
+}
+
+static void
+struct_block_decoration_cb(struct vtn_builder *b,
+ struct vtn_value *val, int member,
+ const struct vtn_decoration *dec, void *ctx)
+{
+ if (member != -1)
+ return;
+
+ struct vtn_type *type = val->type;
+ if (dec->decoration == SpvDecorationBlock)
+ type->block = true;
+ else if (dec->decoration == SpvDecorationBufferBlock)
+ type->buffer_block = true;
}
static void
switch (dec->decoration) {
case SpvDecorationArrayStride:
- vtn_assert(type->base_type == vtn_base_type_matrix ||
- type->base_type == vtn_base_type_array ||
+ vtn_assert(type->base_type == vtn_base_type_array ||
type->base_type == vtn_base_type_pointer);
- type->stride = dec->literals[0];
break;
case SpvDecorationBlock:
vtn_assert(type->base_type == vtn_base_type_struct);
- type->block = true;
+ vtn_assert(type->block);
break;
case SpvDecorationBufferBlock:
vtn_assert(type->base_type == vtn_base_type_struct);
- type->buffer_block = true;
+ vtn_assert(type->buffer_block);
break;
case SpvDecorationGLSLShared:
case SpvDecorationGLSLPacked:
case SpvDecorationNonWritable:
case SpvDecorationNonReadable:
case SpvDecorationUniform:
- case SpvDecorationStream:
case SpvDecorationLocation:
case SpvDecorationComponent:
case SpvDecorationOffset:
case SpvDecorationXfbBuffer:
case SpvDecorationXfbStride:
+ case SpvDecorationUserSemantic:
vtn_warn("Decoration only allowed for struct members: %s",
spirv_decoration_to_string(dec->decoration));
break;
+ case SpvDecorationStream:
+ /* We don't need to do anything here, as stream is filled up when
+ * aplying the decoration to a variable, just check that if it is not a
+ * struct member, it should be a struct.
+ */
+ vtn_assert(type->base_type == vtn_base_type_struct);
+ break;
+
case SpvDecorationRelaxedPrecision:
case SpvDecorationSpecId:
case SpvDecorationInvariant:
break;
case SpvDecorationCPacked:
+ if (b->shader->info.stage != MESA_SHADER_KERNEL)
+ vtn_warn("Decoration only allowed for CL-style kernels: %s",
+ spirv_decoration_to_string(dec->decoration));
+ else
+ type->packed = true;
+ break;
+
case SpvDecorationSaturatedConversion:
case SpvDecorationFuncParamAttr:
case SpvDecorationFPRoundingMode:
break;
default:
- vtn_fail("Unhandled decoration");
+ vtn_fail_with_decoration("Unhandled decoration", dec->decoration);
}
}
case SpvImageFormatR16ui: return 0x8234; /* GL_R16UI */
case SpvImageFormatR8ui: return 0x8232; /* GL_R8UI */
default:
- vtn_fail("Invalid image format");
+ vtn_fail("Invalid image format: %s (%u)",
+ spirv_imageformat_to_string(format), format);
}
}
{
switch (type->base_type) {
case vtn_base_type_scalar: {
- uint32_t comp_size = glsl_get_bit_size(type->type) / 8;
+ uint32_t comp_size = glsl_type_is_boolean(type->type)
+ ? 4 : glsl_get_bit_size(type->type) / 8;
*size_out = comp_size;
*align_out = comp_size;
return type;
}
case vtn_base_type_vector: {
- uint32_t comp_size = glsl_get_bit_size(type->type) / 8;
+ uint32_t comp_size = glsl_type_is_boolean(type->type)
+ ? 4 : glsl_get_bit_size(type->type) / 8;
unsigned align_comps = type->length == 3 ? 4 : type->length;
*size_out = comp_size * type->length,
*align_out = comp_size * align_comps;
vtn_handle_type(struct vtn_builder *b, SpvOp opcode,
const uint32_t *w, unsigned count)
{
- struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_type);
+ struct vtn_value *val = NULL;
- val->type = rzalloc(b, struct vtn_type);
- val->type->id = w[1];
+ /* In order to properly handle forward declarations, we have to defer
+ * allocation for pointer types.
+ */
+ if (opcode != SpvOpTypePointer && opcode != SpvOpTypeForwardPointer) {
+ val = vtn_push_value(b, w[1], vtn_value_type_type);
+ vtn_fail_if(val->type != NULL,
+ "Only pointers can have forward declarations");
+ val->type = rzalloc(b, struct vtn_type);
+ val->type->id = w[1];
+ }
switch (opcode) {
case SpvOpTypeVoid:
val->type->type = (signedness ? glsl_int8_t_type() : glsl_uint8_t_type());
break;
default:
- vtn_fail("Invalid int bit size");
+ vtn_fail("Invalid int bit size: %u", bit_size);
}
val->type->length = 1;
break;
val->type->type = glsl_double_type();
break;
default:
- vtn_fail("Invalid float bit size");
+ vtn_fail("Invalid float bit size: %u", bit_size);
}
val->type->length = 1;
break;
val->type->base_type = vtn_base_type_vector;
val->type->type = glsl_vector_type(glsl_get_base_type(base->type), elems);
val->type->length = elems;
- val->type->stride = glsl_get_bit_size(base->type) / 8;
+ val->type->stride = glsl_type_is_boolean(val->type->type)
+ ? 4 : glsl_get_bit_size(base->type) / 8;
val->type->array_element = base;
break;
}
val->type->length = 0;
} else {
val->type->length =
- vtn_value(b, w[3], vtn_value_type_constant)->constant->values[0].u32[0];
+ vtn_value(b, w[3], vtn_value_type_constant)->constant->values[0][0].u32;
}
val->type->base_type = vtn_base_type_array;
- val->type->type = glsl_array_type(array_element->type, val->type->length);
val->type->array_element = array_element;
- val->type->stride = 0;
+ if (b->shader->info.stage == MESA_SHADER_KERNEL)
+ val->type->stride = glsl_get_cl_size(array_element->type);
+
+ vtn_foreach_decoration(b, val, array_stride_decoration_cb, NULL);
+ val->type->type = glsl_array_type(array_element->type, val->type->length,
+ val->type->stride);
break;
}
val->type->length = num_fields;
val->type->members = ralloc_array(b, struct vtn_type *, num_fields);
val->type->offsets = ralloc_array(b, unsigned, num_fields);
+ val->type->packed = false;
NIR_VLA(struct glsl_struct_field, fields, count);
for (unsigned i = 0; i < num_fields; i++) {
.type = val->type->members[i]->type,
.name = ralloc_asprintf(b, "field%d", i),
.location = -1,
+ .offset = -1,
};
}
+ if (b->shader->info.stage == MESA_SHADER_KERNEL) {
+ unsigned offset = 0;
+ for (unsigned i = 0; i < num_fields; i++) {
+ offset = align(offset, glsl_get_cl_alignment(fields[i].type));
+ fields[i].offset = offset;
+ offset += glsl_get_cl_size(fields[i].type);
+ }
+ }
+
struct member_decoration_ctx ctx = {
.num_fields = num_fields,
.fields = fields,
vtn_foreach_decoration(b, val, struct_member_decoration_cb, &ctx);
vtn_foreach_decoration(b, val, struct_member_matrix_stride_cb, &ctx);
- const char *name = val->name ? val->name : "struct";
+ vtn_foreach_decoration(b, val, struct_block_decoration_cb, NULL);
- val->type->type = glsl_struct_type(fields, num_fields, name);
+ const char *name = val->name;
+
+ if (val->type->block || val->type->buffer_block) {
+ /* Packing will be ignored since types coming from SPIR-V are
+ * explicitly laid out.
+ */
+ val->type->type = glsl_interface_type(fields, num_fields,
+ /* packing */ 0, false,
+ name ? name : "block");
+ } else {
+ val->type->type = glsl_struct_type(fields, num_fields,
+ name ? name : "struct", false);
+ }
break;
}
break;
}
- case SpvOpTypePointer: {
+ case SpvOpTypePointer:
+ case SpvOpTypeForwardPointer: {
+ /* We can't blindly push the value because it might be a forward
+ * declaration.
+ */
+ val = vtn_untyped_value(b, w[1]);
+
SpvStorageClass storage_class = w[2];
- struct vtn_type *deref_type =
- vtn_value(b, w[3], vtn_value_type_type)->type;
- val->type->base_type = vtn_base_type_pointer;
- val->type->storage_class = storage_class;
- val->type->deref = deref_type;
+ if (val->value_type == vtn_value_type_invalid) {
+ val->value_type = vtn_value_type_type;
+ val->type = rzalloc(b, struct vtn_type);
+ val->type->id = w[1];
+ val->type->base_type = vtn_base_type_pointer;
+ val->type->storage_class = storage_class;
- if (storage_class == SpvStorageClassUniform ||
- storage_class == SpvStorageClassStorageBuffer) {
/* These can actually be stored to nir_variables and used as SSA
* values so they need a real glsl_type.
*/
- val->type->type = glsl_vector_type(GLSL_TYPE_UINT, 2);
+ enum vtn_variable_mode mode = vtn_storage_class_to_mode(
+ b, storage_class, NULL, NULL);
+ val->type->type = nir_address_format_to_glsl_type(
+ vtn_mode_to_address_format(b, mode));
+ } else {
+ vtn_fail_if(val->type->storage_class != storage_class,
+ "The storage classes of an OpTypePointer and any "
+ "OpTypeForwardPointers that provide forward "
+ "declarations of it must match.");
}
- if (storage_class == SpvStorageClassPushConstant) {
- /* These can actually be stored to nir_variables and used as SSA
- * values so they need a real glsl_type.
- */
- val->type->type = glsl_uint_type();
- }
+ if (opcode == SpvOpTypePointer) {
+ vtn_fail_if(val->type->deref != NULL,
+ "While OpTypeForwardPointer can be used to provide a "
+ "forward declaration of a pointer, OpTypePointer can "
+ "only be used once for a given id.");
- if (storage_class == SpvStorageClassWorkgroup &&
- b->options->lower_workgroup_access_to_offsets) {
- uint32_t size, align;
- val->type->deref = vtn_type_layout_std430(b, val->type->deref,
- &size, &align);
- val->type->length = size;
- val->type->align = align;
- /* These can actually be stored to nir_variables and used as SSA
- * values so they need a real glsl_type.
- */
- val->type->type = glsl_uint_type();
+ val->type->deref = vtn_value(b, w[3], vtn_value_type_type)->type;
+
+ vtn_foreach_decoration(b, val, array_stride_decoration_cb, NULL);
+
+ if (b->physical_ptrs) {
+ switch (storage_class) {
+ case SpvStorageClassFunction:
+ case SpvStorageClassWorkgroup:
+ case SpvStorageClassCrossWorkgroup:
+ val->type->stride = align(glsl_get_cl_size(val->type->deref->type),
+ glsl_get_cl_alignment(val->type->deref->type));
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (storage_class == SpvStorageClassWorkgroup &&
+ b->options->lower_workgroup_access_to_offsets) {
+ uint32_t size, align;
+ val->type->deref = vtn_type_layout_std430(b, val->type->deref,
+ &size, &align);
+ val->type->length = size;
+ val->type->align = align;
+ }
}
break;
}
case SpvDimBuffer: dim = GLSL_SAMPLER_DIM_BUF; break;
case SpvDimSubpassData: dim = GLSL_SAMPLER_DIM_SUBPASS; break;
default:
- vtn_fail("Invalid SPIR-V image dimensionality");
+ vtn_fail("Invalid SPIR-V image dimensionality: %s (%u)",
+ spirv_dim_to_string((SpvDim)w[3]), w[3]);
}
- bool is_shadow = w[4];
+ /* w[4]: as per Vulkan spec "Validation Rules within a Module",
+ * The “Depth” operand of OpTypeImage is ignored.
+ */
bool is_array = w[5];
bool multisampled = w[6];
unsigned sampled = w[7];
glsl_get_base_type(sampled_type->type);
if (sampled == 1) {
val->type->sampled = true;
- val->type->type = glsl_sampler_type(dim, is_shadow, is_array,
+ val->type->type = glsl_sampler_type(dim, false, is_array,
sampled_base_type);
} else if (sampled == 2) {
- vtn_assert(!is_shadow);
val->type->sampled = false;
val->type->type = glsl_image_type(dim, is_array, sampled_base_type);
} else {
case SpvOpTypeQueue:
case SpvOpTypePipe:
default:
- vtn_fail("Unhandled opcode");
+ vtn_fail_with_opcode("Unhandled opcode", opcode);
}
vtn_foreach_decoration(b, val, type_decoration_cb, NULL);
+
+ if (val->type->base_type == vtn_base_type_struct &&
+ (val->type->block || val->type->buffer_block)) {
+ for (unsigned i = 0; i < val->type->length; i++) {
+ vtn_fail_if(vtn_type_contains_block(b, val->type->members[i]),
+ "Block and BufferBlock decorations cannot decorate a "
+ "structure type that is nested at any level inside "
+ "another structure type decorated with Block or "
+ "BufferBlock.");
+ }
+ }
}
static nir_constant *
-vtn_null_constant(struct vtn_builder *b, const struct glsl_type *type)
+vtn_null_constant(struct vtn_builder *b, struct vtn_type *type)
{
nir_constant *c = rzalloc(b, nir_constant);
- /* For pointers and other typeless things, we have to return something but
- * it doesn't matter what.
- */
- if (!type)
- return c;
-
- switch (glsl_get_base_type(type)) {
- case GLSL_TYPE_INT:
- case GLSL_TYPE_UINT:
- case GLSL_TYPE_INT16:
- case GLSL_TYPE_UINT16:
- case GLSL_TYPE_UINT8:
- case GLSL_TYPE_INT8:
- case GLSL_TYPE_INT64:
- case GLSL_TYPE_UINT64:
- case GLSL_TYPE_BOOL:
- case GLSL_TYPE_FLOAT:
- case GLSL_TYPE_FLOAT16:
- case GLSL_TYPE_DOUBLE:
+ switch (type->base_type) {
+ case vtn_base_type_scalar:
+ case vtn_base_type_vector:
/* Nothing to do here. It's already initialized to zero */
break;
- case GLSL_TYPE_ARRAY:
- vtn_assert(glsl_get_length(type) > 0);
- c->num_elements = glsl_get_length(type);
+ case vtn_base_type_pointer: {
+ enum vtn_variable_mode mode = vtn_storage_class_to_mode(
+ b, type->storage_class, type->deref, NULL);
+ nir_address_format addr_format = vtn_mode_to_address_format(b, mode);
+
+ const nir_const_value *null_value = nir_address_format_null_value(addr_format);
+ memcpy(c->values[0], null_value,
+ sizeof(nir_const_value) * nir_address_format_num_components(addr_format));
+ break;
+ }
+
+ case vtn_base_type_void:
+ case vtn_base_type_image:
+ case vtn_base_type_sampler:
+ case vtn_base_type_sampled_image:
+ case vtn_base_type_function:
+ /* For those we have to return something but it doesn't matter what. */
+ break;
+
+ case vtn_base_type_matrix:
+ case vtn_base_type_array:
+ vtn_assert(type->length > 0);
+ c->num_elements = type->length;
c->elements = ralloc_array(b, nir_constant *, c->num_elements);
- c->elements[0] = vtn_null_constant(b, glsl_get_array_element(type));
+ c->elements[0] = vtn_null_constant(b, type->array_element);
for (unsigned i = 1; i < c->num_elements; i++)
c->elements[i] = c->elements[0];
break;
- case GLSL_TYPE_STRUCT:
- c->num_elements = glsl_get_length(type);
+ case vtn_base_type_struct:
+ c->num_elements = type->length;
c->elements = ralloc_array(b, nir_constant *, c->num_elements);
-
- for (unsigned i = 0; i < c->num_elements; i++) {
- c->elements[i] = vtn_null_constant(b, glsl_get_struct_field(type, i));
- }
+ for (unsigned i = 0; i < c->num_elements; i++)
+ c->elements[i] = vtn_null_constant(b, type->members[i]);
break;
default:
struct spec_constant_value *const_value = data;
for (unsigned i = 0; i < b->num_specializations; i++) {
- if (b->specializations[i].id == dec->literals[0]) {
+ if (b->specializations[i].id == dec->operands[0]) {
if (const_value->is_double)
const_value->data64 = b->specializations[i].data64;
else
{
vtn_assert(member == -1);
if (dec->decoration != SpvDecorationBuiltIn ||
- dec->literals[0] != SpvBuiltInWorkgroupSize)
+ dec->operands[0] != SpvBuiltInWorkgroupSize)
return;
vtn_assert(val->type->type == glsl_vector_type(GLSL_TYPE_UINT, 3));
-
- b->shader->info.cs.local_size[0] = val->constant->values[0].u32[0];
- b->shader->info.cs.local_size[1] = val->constant->values[0].u32[1];
- b->shader->info.cs.local_size[2] = val->constant->values[0].u32[2];
+ b->workgroup_size_builtin = val;
}
static void
opcode == SpvOpSpecConstantFalse)
int_val = get_specialization(b, val, int_val);
- val->constant->values[0].u32[0] = int_val ? NIR_TRUE : NIR_FALSE;
+ val->constant->values[0][0].b = int_val != 0;
break;
}
int bit_size = glsl_get_bit_size(val->type->type);
switch (bit_size) {
case 64:
- val->constant->values->u64[0] = vtn_u64_literal(&w[3]);
+ val->constant->values[0][0].u64 = vtn_u64_literal(&w[3]);
break;
case 32:
- val->constant->values->u32[0] = w[3];
+ val->constant->values[0][0].u32 = w[3];
break;
case 16:
- val->constant->values->u16[0] = w[3];
+ val->constant->values[0][0].u16 = w[3];
break;
case 8:
- val->constant->values->u8[0] = w[3];
+ val->constant->values[0][0].u8 = w[3];
break;
default:
- vtn_fail("Unsupported SpvOpConstant bit size");
+ vtn_fail("Unsupported SpvOpConstant bit size: %u", bit_size);
}
break;
}
int bit_size = glsl_get_bit_size(val->type->type);
switch (bit_size) {
case 64:
- val->constant->values[0].u64[0] =
+ val->constant->values[0][0].u64 =
get_specialization64(b, val, vtn_u64_literal(&w[3]));
break;
case 32:
- val->constant->values[0].u32[0] = get_specialization(b, val, w[3]);
+ val->constant->values[0][0].u32 = get_specialization(b, val, w[3]);
break;
case 16:
- val->constant->values[0].u16[0] = get_specialization(b, val, w[3]);
+ val->constant->values[0][0].u16 = get_specialization(b, val, w[3]);
break;
case 8:
- val->constant->values[0].u8[0] = get_specialization(b, val, w[3]);
+ val->constant->values[0][0].u8 = get_specialization(b, val, w[3]);
break;
default:
vtn_fail("Unsupported SpvOpSpecConstant bit size");
"only constants or undefs allowed for "
"SpvOpConstantComposite");
/* to make it easier, just insert a NULL constant for now */
- elems[i] = vtn_null_constant(b, val->type->type);
+ elems[i] = vtn_null_constant(b, val->type);
}
}
switch (val->type->base_type) {
case vtn_base_type_vector: {
assert(glsl_type_is_vector(val->type->type));
- int bit_size = glsl_get_bit_size(val->type->type);
- for (unsigned i = 0; i < elem_count; i++) {
- switch (bit_size) {
- case 64:
- val->constant->values[0].u64[i] = elems[i]->values[0].u64[0];
- break;
- case 32:
- val->constant->values[0].u32[i] = elems[i]->values[0].u32[0];
- break;
- case 16:
- val->constant->values[0].u16[i] = elems[i]->values[0].u16[0];
- break;
- case 8:
- val->constant->values[0].u8[i] = elems[i]->values[0].u8[0];
- break;
- default:
- vtn_fail("Invalid SpvOpConstantComposite bit size");
- }
- }
+ for (unsigned i = 0; i < elem_count; i++)
+ val->constant->values[0][i] = elems[i]->values[0][0];
break;
}
case vtn_base_type_matrix:
assert(glsl_type_is_matrix(val->type->type));
- for (unsigned i = 0; i < elem_count; i++)
- val->constant->values[i] = elems[i]->values[0];
+ for (unsigned i = 0; i < elem_count; i++) {
+ unsigned components =
+ glsl_get_components(glsl_get_column_type(val->type->type));
+ memcpy(val->constant->values[i], elems[i]->values,
+ sizeof(nir_const_value) * components);
+ }
break;
case vtn_base_type_struct:
uint64_t u64[8];
if (v0->value_type == vtn_value_type_constant) {
for (unsigned i = 0; i < len0; i++)
- u64[i] = v0->constant->values[0].u64[i];
+ u64[i] = v0->constant->values[0][i].u64;
}
if (v1->value_type == vtn_value_type_constant) {
for (unsigned i = 0; i < len1; i++)
- u64[len0 + i] = v1->constant->values[0].u64[i];
+ u64[len0 + i] = v1->constant->values[0][i].u64;
}
for (unsigned i = 0, j = 0; i < count - 6; i++, j++) {
* to detect if it is wrongly used.
*/
if (comp == (uint32_t)-1)
- val->constant->values[0].u64[j] = 0xdeadbeefdeadbeef;
+ val->constant->values[0][j].u64 = 0xdeadbeefdeadbeef;
else
- val->constant->values[0].u64[j] = u64[comp];
+ val->constant->values[0][j].u64 = u64[comp];
}
} else {
/* This is for both 32-bit and 16-bit values */
uint32_t u32[8];
if (v0->value_type == vtn_value_type_constant) {
for (unsigned i = 0; i < len0; i++)
- u32[i] = v0->constant->values[0].u32[i];
+ u32[i] = v0->constant->values[0][i].u32;
}
if (v1->value_type == vtn_value_type_constant) {
for (unsigned i = 0; i < len1; i++)
- u32[len0 + i] = v1->constant->values[0].u32[i];
+ u32[len0 + i] = v1->constant->values[0][i].u32;
}
for (unsigned i = 0, j = 0; i < count - 6; i++, j++) {
* to detect if it is wrongly used.
*/
if (comp == (uint32_t)-1)
- val->constant->values[0].u32[j] = 0xdeadbeef;
+ val->constant->values[0][j].u32 = 0xdeadbeef;
else
- val->constant->values[0].u32[j] = u32[comp];
+ val->constant->values[0][j].u32 = u32[comp];
}
}
break;
val->constant = *c;
} else {
unsigned num_components = type->length;
- unsigned bit_size = glsl_get_bit_size(type->type);
for (unsigned i = 0; i < num_components; i++)
- switch(bit_size) {
- case 64:
- val->constant->values[0].u64[i] = (*c)->values[col].u64[elem + i];
- break;
- case 32:
- val->constant->values[0].u32[i] = (*c)->values[col].u32[elem + i];
- break;
- case 16:
- val->constant->values[0].u16[i] = (*c)->values[col].u16[elem + i];
- break;
- case 8:
- val->constant->values[0].u8[i] = (*c)->values[col].u8[elem + i];
- break;
- default:
- vtn_fail("Invalid SpvOpCompositeExtract bit size");
- }
+ val->constant->values[0][i] = (*c)->values[col][elem + i];
}
} else {
struct vtn_value *insert =
*c = insert->constant;
} else {
unsigned num_components = type->length;
- unsigned bit_size = glsl_get_bit_size(type->type);
for (unsigned i = 0; i < num_components; i++)
- switch (bit_size) {
- case 64:
- (*c)->values[col].u64[elem + i] = insert->constant->values[0].u64[i];
- break;
- case 32:
- (*c)->values[col].u32[elem + i] = insert->constant->values[0].u32[i];
- break;
- case 16:
- (*c)->values[col].u16[elem + i] = insert->constant->values[0].u16[i];
- break;
- case 8:
- (*c)->values[col].u8[elem + i] = insert->constant->values[0].u8[i];
- break;
- default:
- vtn_fail("Invalid SpvOpCompositeInsert bit size");
- }
+ (*c)->values[col][elem + i] = insert->constant->values[0][i];
}
}
break;
switch (opcode) {
case SpvOpSConvert:
case SpvOpFConvert:
+ case SpvOpUConvert:
/* We have a source in a conversion */
src_alu_type =
nir_get_nir_type_for_glsl_type(
nir_op op = vtn_nir_alu_op_for_spirv_opcode(b, opcode, &swap,
nir_alu_type_get_type_size(src_alu_type),
nir_alu_type_get_type_size(dst_alu_type));
- nir_const_value src[4];
+ nir_const_value src[3][NIR_MAX_VEC_COMPONENTS];
for (unsigned i = 0; i < count - 4; i++) {
- nir_constant *c =
- vtn_value(b, w[4 + i], vtn_value_type_constant)->constant;
+ struct vtn_value *src_val =
+ vtn_value(b, w[4 + i], vtn_value_type_constant);
+
+ /* If this is an unsized source, pull the bit size from the
+ * source; otherwise, we'll use the bit size from the destination.
+ */
+ if (!nir_alu_type_get_type_size(nir_op_infos[op].input_types[i]))
+ bit_size = glsl_get_bit_size(src_val->type->type);
unsigned j = swap ? 1 - i : i;
- src[j] = c->values[0];
+ memcpy(src[j], src_val->constant->values[0], sizeof(src[j]));
}
- val->constant->values[0] =
- nir_eval_const_opcode(op, num_components, bit_size, src);
+ /* fix up fixed size sources */
+ switch (op) {
+ case nir_op_ishl:
+ case nir_op_ishr:
+ case nir_op_ushr: {
+ if (bit_size == 32)
+ break;
+ for (unsigned i = 0; i < num_components; ++i) {
+ switch (bit_size) {
+ case 64: src[1][i].u32 = src[1][i].u64; break;
+ case 16: src[1][i].u32 = src[1][i].u16; break;
+ case 8: src[1][i].u32 = src[1][i].u8; break;
+ }
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ nir_const_value *srcs[3] = {
+ src[0], src[1], src[2],
+ };
+ nir_eval_const_opcode(op, val->constant->values[0], num_components, bit_size, srcs);
break;
} /* default */
}
}
case SpvOpConstantNull:
- val->constant = vtn_null_constant(b, val->type->type);
+ val->constant = vtn_null_constant(b, val->type);
break;
case SpvOpConstantSampler:
break;
default:
- vtn_fail("Unhandled opcode");
+ vtn_fail_with_opcode("Unhandled opcode", opcode);
}
/* Now that we have the value, update the workgroup size if needed */
vtn_foreach_decoration(b, val, handle_workgroup_size_decoration_cb, NULL);
}
-static void
-vtn_handle_function_call(struct vtn_builder *b, SpvOp opcode,
- const uint32_t *w, unsigned count)
-{
- struct vtn_type *res_type = vtn_value(b, w[1], vtn_value_type_type)->type;
- struct vtn_function *vtn_callee =
- vtn_value(b, w[3], vtn_value_type_function)->func;
- struct nir_function *callee = vtn_callee->impl->function;
-
- vtn_callee->referenced = true;
-
- nir_call_instr *call = nir_call_instr_create(b->nb.shader, callee);
-
- unsigned param_idx = 0;
-
- nir_deref_instr *ret_deref = NULL;
- struct vtn_type *ret_type = vtn_callee->type->return_type;
- if (ret_type->base_type != vtn_base_type_void) {
- nir_variable *ret_tmp =
- nir_local_variable_create(b->nb.impl, ret_type->type, "return_tmp");
- ret_deref = nir_build_deref_var(&b->nb, ret_tmp);
- call->params[param_idx++] = nir_src_for_ssa(&ret_deref->dest.ssa);
- }
-
- for (unsigned i = 0; i < vtn_callee->type->length; i++) {
- struct vtn_type *arg_type = vtn_callee->type->params[i];
- unsigned arg_id = w[4 + i];
-
- if (arg_type->base_type == vtn_base_type_sampled_image) {
- struct vtn_sampled_image *sampled_image =
- vtn_value(b, arg_id, vtn_value_type_sampled_image)->sampled_image;
-
- call->params[param_idx++] =
- nir_src_for_ssa(&sampled_image->image->deref->dest.ssa);
- call->params[param_idx++] =
- nir_src_for_ssa(&sampled_image->sampler->deref->dest.ssa);
- } else if (arg_type->base_type == vtn_base_type_pointer ||
- arg_type->base_type == vtn_base_type_image ||
- arg_type->base_type == vtn_base_type_sampler) {
- struct vtn_pointer *pointer =
- vtn_value(b, arg_id, vtn_value_type_pointer)->pointer;
- call->params[param_idx++] =
- nir_src_for_ssa(vtn_pointer_to_ssa(b, pointer));
- } else {
- /* This is a regular SSA value and we need a temporary */
- nir_variable *tmp =
- nir_local_variable_create(b->nb.impl, arg_type->type, "arg_tmp");
- nir_deref_instr *tmp_deref = nir_build_deref_var(&b->nb, tmp);
- vtn_local_store(b, vtn_ssa_value(b, arg_id), tmp_deref);
- call->params[param_idx++] = nir_src_for_ssa(&tmp_deref->dest.ssa);
- }
- }
- assert(param_idx == call->num_params);
-
- nir_builder_instr_insert(&b->nb, &call->instr);
-
- if (ret_type->base_type == vtn_base_type_void) {
- vtn_push_value(b, w[2], vtn_value_type_undef);
- } else {
- vtn_push_ssa(b, w[2], res_type, vtn_local_load(b, ret_deref));
- }
-}
-
struct vtn_ssa_value *
vtn_create_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
{
child_type = glsl_get_array_element(type);
break;
case GLSL_TYPE_STRUCT:
+ case GLSL_TYPE_INTERFACE:
child_type = glsl_get_struct_field(type, i);
break;
default:
break;
default:
- vtn_fail("Unhandled opcode");
+ vtn_fail_with_opcode("Unhandled opcode", opcode);
}
nir_tex_src srcs[10]; /* 10 should be enough */
case nir_texop_txl:
case nir_texop_txd:
case nir_texop_tg4:
+ case nir_texop_lod:
/* These operations require a sampler */
p->src = nir_src_for_ssa(&sampler->dest.ssa);
p->src_type = nir_tex_src_sampler_deref;
case nir_texop_txf:
case nir_texop_txf_ms:
case nir_texop_txs:
- case nir_texop_lod:
case nir_texop_query_levels:
case nir_texop_texture_samples:
case nir_texop_samples_identical:
/* These don't */
break;
+ case nir_texop_txf_ms_fb:
+ vtn_fail("unexpected nir_texop_txf_ms_fb");
+ break;
case nir_texop_txf_ms_mcs:
vtn_fail("unexpected nir_texop_txf_ms_mcs");
}
case SpvOpImageGather:
/* This has a component as its next source */
gather_component =
- vtn_value(b, w[idx++], vtn_value_type_constant)->constant->values[0].u32[0];
+ vtn_value(b, w[idx++], vtn_value_type_constant)->constant->values[0][0].u32;
break;
default:
(*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_lod);
/* Now we need to handle some number of optional arguments */
- const struct vtn_ssa_value *gather_offsets = NULL;
+ struct vtn_value *gather_offsets = NULL;
if (idx < count) {
uint32_t operands = w[idx++];
(*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_offset);
if (operands & SpvImageOperandsConstOffsetsMask) {
- nir_tex_src none = {0};
- gather_offsets = vtn_ssa_value(b, w[idx++]);
- (*p++) = none;
+ vtn_assert(texop == nir_texop_tg4);
+ gather_offsets = vtn_value(b, w[idx++], vtn_value_type_constant);
}
if (operands & SpvImageOperandsSampleMask) {
texop = nir_texop_txf_ms;
(*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ms_index);
}
+
+ if (operands & SpvImageOperandsMinLodMask) {
+ vtn_assert(texop == nir_texop_tex ||
+ texop == nir_texop_txb ||
+ texop == nir_texop_txd);
+ (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_min_lod);
+ }
}
/* We should have now consumed exactly all of the arguments */
vtn_assert(idx == count);
is_shadow && glsl_get_components(ret_type->type) == 1;
instr->component = gather_component;
+ if (sampled.image && (sampled.image->access & ACCESS_NON_UNIFORM))
+ instr->texture_non_uniform = true;
+
+ if (sampled.sampler && (sampled.sampler->access & ACCESS_NON_UNIFORM))
+ instr->sampler_non_uniform = true;
+
switch (glsl_get_sampler_result_type(image_type)) {
case GLSL_TYPE_FLOAT: instr->dest_type = nir_type_float; break;
case GLSL_TYPE_INT: instr->dest_type = nir_type_int; break;
vtn_assert(glsl_get_vector_elements(ret_type->type) ==
nir_tex_instr_dest_size(instr));
- nir_ssa_def *def;
- nir_instr *instruction;
if (gather_offsets) {
- vtn_assert(glsl_get_base_type(gather_offsets->type) == GLSL_TYPE_ARRAY);
- vtn_assert(glsl_get_length(gather_offsets->type) == 4);
- nir_tex_instr *instrs[4] = {instr, NULL, NULL, NULL};
-
- /* Copy the current instruction 4x */
- for (uint32_t i = 1; i < 4; i++) {
- instrs[i] = nir_tex_instr_create(b->shader, instr->num_srcs);
- instrs[i]->op = instr->op;
- instrs[i]->coord_components = instr->coord_components;
- instrs[i]->sampler_dim = instr->sampler_dim;
- instrs[i]->is_array = instr->is_array;
- instrs[i]->is_shadow = instr->is_shadow;
- instrs[i]->is_new_style_shadow = instr->is_new_style_shadow;
- instrs[i]->component = instr->component;
- instrs[i]->dest_type = instr->dest_type;
-
- memcpy(instrs[i]->src, srcs, instr->num_srcs * sizeof(*instr->src));
-
- nir_ssa_dest_init(&instrs[i]->instr, &instrs[i]->dest,
- nir_tex_instr_dest_size(instr), 32, NULL);
- }
-
- /* Fill in the last argument with the offset from the passed in offsets
- * and insert the instruction into the stream.
- */
+ vtn_fail_if(gather_offsets->type->base_type != vtn_base_type_array ||
+ gather_offsets->type->length != 4,
+ "ConstOffsets must be an array of size four of vectors "
+ "of two integer components");
+
+ struct vtn_type *vec_type = gather_offsets->type->array_element;
+ vtn_fail_if(vec_type->base_type != vtn_base_type_vector ||
+ vec_type->length != 2 ||
+ !glsl_type_is_integer(vec_type->type),
+ "ConstOffsets must be an array of size four of vectors "
+ "of two integer components");
+
+ unsigned bit_size = glsl_get_bit_size(vec_type->type);
for (uint32_t i = 0; i < 4; i++) {
- nir_tex_src src;
- src.src = nir_src_for_ssa(gather_offsets->elems[i]->def);
- src.src_type = nir_tex_src_offset;
- instrs[i]->src[instrs[i]->num_srcs - 1] = src;
- nir_builder_instr_insert(&b->nb, &instrs[i]->instr);
- }
-
- /* Combine the results of the 4 instructions by taking their .w
- * components
- */
- nir_alu_instr *vec4 = nir_alu_instr_create(b->shader, nir_op_vec4);
- nir_ssa_dest_init(&vec4->instr, &vec4->dest.dest, 4, 32, NULL);
- vec4->dest.write_mask = 0xf;
- for (uint32_t i = 0; i < 4; i++) {
- vec4->src[i].src = nir_src_for_ssa(&instrs[i]->dest.ssa);
- vec4->src[i].swizzle[0] = 3;
+ const nir_const_value *cvec =
+ gather_offsets->constant->elements[i]->values[0];
+ for (uint32_t j = 0; j < 2; j++) {
+ switch (bit_size) {
+ case 8: instr->tg4_offsets[i][j] = cvec[j].i8; break;
+ case 16: instr->tg4_offsets[i][j] = cvec[j].i16; break;
+ case 32: instr->tg4_offsets[i][j] = cvec[j].i32; break;
+ case 64: instr->tg4_offsets[i][j] = cvec[j].i64; break;
+ default:
+ vtn_fail("Unsupported bit size: %u", bit_size);
+ }
+ }
}
- def = &vec4->dest.dest.ssa;
- instruction = &vec4->instr;
- } else {
- def = &instr->dest.ssa;
- instruction = &instr->instr;
}
val->ssa = vtn_create_ssa_value(b, ret_type->type);
- val->ssa->def = def;
+ val->ssa->def = &instr->dest.ssa;
- nir_builder_instr_insert(&b->nb, instruction);
+ nir_builder_instr_insert(&b->nb, &instr->instr);
}
static void
break;
case SpvOpAtomicCompareExchange:
+ case SpvOpAtomicCompareExchangeWeak:
src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[8])->def);
src[1] = nir_src_for_ssa(vtn_ssa_value(b, w[7])->def);
break;
break;
default:
- vtn_fail("Invalid SPIR-V atomic");
+ vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode);
}
}
for (unsigned i = 0; i < 4; i++)
swizzle[i] = MIN2(i, dim - 1);
- return nir_swizzle(&b->nb, coord->def, swizzle, 4, false);
+ return nir_swizzle(&b->nb, coord->def, swizzle, 4);
}
static nir_ssa_def *
unsigned swiz[4];
for (unsigned i = 0; i < 4; i++)
swiz[i] = i < value->num_components ? i : 0;
- return nir_swizzle(b, value, swiz, 4, false);
+ return nir_swizzle(b, value, swiz, 4);
}
static void
break;
default:
- vtn_fail("Invalid image opcode");
+ vtn_fail_with_opcode("Invalid image opcode", opcode);
}
nir_intrinsic_op op;
switch (opcode) {
#define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_deref_##N; break;
- OP(ImageQuerySize, size)
- OP(ImageRead, load)
- OP(ImageWrite, store)
- OP(AtomicLoad, load)
- OP(AtomicStore, store)
- OP(AtomicExchange, atomic_exchange)
- OP(AtomicCompareExchange, atomic_comp_swap)
- OP(AtomicIIncrement, atomic_add)
- OP(AtomicIDecrement, atomic_add)
- OP(AtomicIAdd, atomic_add)
- OP(AtomicISub, atomic_add)
- OP(AtomicSMin, atomic_min)
- OP(AtomicUMin, atomic_min)
- OP(AtomicSMax, atomic_max)
- OP(AtomicUMax, atomic_max)
- OP(AtomicAnd, atomic_and)
- OP(AtomicOr, atomic_or)
- OP(AtomicXor, atomic_xor)
+ OP(ImageQuerySize, size)
+ OP(ImageRead, load)
+ OP(ImageWrite, store)
+ OP(AtomicLoad, load)
+ OP(AtomicStore, store)
+ OP(AtomicExchange, atomic_exchange)
+ OP(AtomicCompareExchange, atomic_comp_swap)
+ OP(AtomicCompareExchangeWeak, atomic_comp_swap)
+ OP(AtomicIIncrement, atomic_add)
+ OP(AtomicIDecrement, atomic_add)
+ OP(AtomicIAdd, atomic_add)
+ OP(AtomicISub, atomic_add)
+ OP(AtomicSMin, atomic_min)
+ OP(AtomicUMin, atomic_min)
+ OP(AtomicSMax, atomic_max)
+ OP(AtomicUMax, atomic_max)
+ OP(AtomicAnd, atomic_and)
+ OP(AtomicOr, atomic_or)
+ OP(AtomicXor, atomic_xor)
#undef OP
default:
- vtn_fail("Invalid image opcode");
+ vtn_fail_with_opcode("Invalid image opcode", opcode);
}
nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
const uint32_t value_id = opcode == SpvOpAtomicStore ? w[4] : w[3];
nir_ssa_def *value = vtn_ssa_value(b, value_id)->def;
/* nir_intrinsic_image_deref_store always takes a vec4 value */
+ assert(op == nir_intrinsic_image_deref_store);
+ intrin->num_components = 4;
intrin->src[3] = nir_src_for_ssa(expand_to_vec4(&b->nb, value));
break;
}
case SpvOpAtomicCompareExchange:
+ case SpvOpAtomicCompareExchangeWeak:
case SpvOpAtomicIIncrement:
case SpvOpAtomicIDecrement:
case SpvOpAtomicExchange:
break;
default:
- vtn_fail("Invalid image opcode");
+ vtn_fail_with_opcode("Invalid image opcode", opcode);
}
if (opcode != SpvOpImageWrite && opcode != SpvOpAtomicStore) {
get_ssbo_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
{
switch (opcode) {
- case SpvOpAtomicLoad: return nir_intrinsic_load_ssbo;
- case SpvOpAtomicStore: return nir_intrinsic_store_ssbo;
+ case SpvOpAtomicLoad: return nir_intrinsic_load_ssbo;
+ case SpvOpAtomicStore: return nir_intrinsic_store_ssbo;
#define OP(S, N) case SpvOp##S: return nir_intrinsic_ssbo_##N;
- OP(AtomicExchange, atomic_exchange)
- OP(AtomicCompareExchange, atomic_comp_swap)
- OP(AtomicIIncrement, atomic_add)
- OP(AtomicIDecrement, atomic_add)
- OP(AtomicIAdd, atomic_add)
- OP(AtomicISub, atomic_add)
- OP(AtomicSMin, atomic_imin)
- OP(AtomicUMin, atomic_umin)
- OP(AtomicSMax, atomic_imax)
- OP(AtomicUMax, atomic_umax)
- OP(AtomicAnd, atomic_and)
- OP(AtomicOr, atomic_or)
- OP(AtomicXor, atomic_xor)
+ OP(AtomicExchange, atomic_exchange)
+ OP(AtomicCompareExchange, atomic_comp_swap)
+ OP(AtomicCompareExchangeWeak, atomic_comp_swap)
+ OP(AtomicIIncrement, atomic_add)
+ OP(AtomicIDecrement, atomic_add)
+ OP(AtomicIAdd, atomic_add)
+ OP(AtomicISub, atomic_add)
+ OP(AtomicSMin, atomic_imin)
+ OP(AtomicUMin, atomic_umin)
+ OP(AtomicSMax, atomic_imax)
+ OP(AtomicUMax, atomic_umax)
+ OP(AtomicAnd, atomic_and)
+ OP(AtomicOr, atomic_or)
+ OP(AtomicXor, atomic_xor)
#undef OP
default:
- vtn_fail("Invalid SSBO atomic");
+ vtn_fail_with_opcode("Invalid SSBO atomic", opcode);
}
}
{
switch (opcode) {
#define OP(S, N) case SpvOp##S: return nir_intrinsic_atomic_counter_ ##N;
- OP(AtomicLoad, read_deref)
- OP(AtomicExchange, exchange)
- OP(AtomicCompareExchange, comp_swap)
- OP(AtomicIIncrement, inc_deref)
- OP(AtomicIDecrement, post_dec_deref)
- OP(AtomicIAdd, add_deref)
- OP(AtomicISub, add_deref)
- OP(AtomicUMin, min_deref)
- OP(AtomicUMax, max_deref)
- OP(AtomicAnd, and_deref)
- OP(AtomicOr, or_deref)
- OP(AtomicXor, xor_deref)
+ OP(AtomicLoad, read_deref)
+ OP(AtomicExchange, exchange)
+ OP(AtomicCompareExchange, comp_swap)
+ OP(AtomicCompareExchangeWeak, comp_swap)
+ OP(AtomicIIncrement, inc_deref)
+ OP(AtomicIDecrement, post_dec_deref)
+ OP(AtomicIAdd, add_deref)
+ OP(AtomicISub, add_deref)
+ OP(AtomicUMin, min_deref)
+ OP(AtomicUMax, max_deref)
+ OP(AtomicAnd, and_deref)
+ OP(AtomicOr, or_deref)
+ OP(AtomicXor, xor_deref)
#undef OP
default:
/* We left the following out: AtomicStore, AtomicSMin and
get_shared_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
{
switch (opcode) {
- case SpvOpAtomicLoad: return nir_intrinsic_load_shared;
- case SpvOpAtomicStore: return nir_intrinsic_store_shared;
+ case SpvOpAtomicLoad: return nir_intrinsic_load_shared;
+ case SpvOpAtomicStore: return nir_intrinsic_store_shared;
#define OP(S, N) case SpvOp##S: return nir_intrinsic_shared_##N;
- OP(AtomicExchange, atomic_exchange)
- OP(AtomicCompareExchange, atomic_comp_swap)
- OP(AtomicIIncrement, atomic_add)
- OP(AtomicIDecrement, atomic_add)
- OP(AtomicIAdd, atomic_add)
- OP(AtomicISub, atomic_add)
- OP(AtomicSMin, atomic_imin)
- OP(AtomicUMin, atomic_umin)
- OP(AtomicSMax, atomic_imax)
- OP(AtomicUMax, atomic_umax)
- OP(AtomicAnd, atomic_and)
- OP(AtomicOr, atomic_or)
- OP(AtomicXor, atomic_xor)
+ OP(AtomicExchange, atomic_exchange)
+ OP(AtomicCompareExchange, atomic_comp_swap)
+ OP(AtomicCompareExchangeWeak, atomic_comp_swap)
+ OP(AtomicIIncrement, atomic_add)
+ OP(AtomicIDecrement, atomic_add)
+ OP(AtomicIAdd, atomic_add)
+ OP(AtomicISub, atomic_add)
+ OP(AtomicSMin, atomic_imin)
+ OP(AtomicUMin, atomic_umin)
+ OP(AtomicSMax, atomic_imax)
+ OP(AtomicUMax, atomic_umax)
+ OP(AtomicAnd, atomic_and)
+ OP(AtomicOr, atomic_or)
+ OP(AtomicXor, atomic_xor)
#undef OP
default:
- vtn_fail("Invalid shared atomic");
+ vtn_fail_with_opcode("Invalid shared atomic", opcode);
}
}
get_deref_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
{
switch (opcode) {
- case SpvOpAtomicLoad: return nir_intrinsic_load_deref;
- case SpvOpAtomicStore: return nir_intrinsic_store_deref;
+ case SpvOpAtomicLoad: return nir_intrinsic_load_deref;
+ case SpvOpAtomicStore: return nir_intrinsic_store_deref;
#define OP(S, N) case SpvOp##S: return nir_intrinsic_deref_##N;
- OP(AtomicExchange, atomic_exchange)
- OP(AtomicCompareExchange, atomic_comp_swap)
- OP(AtomicIIncrement, atomic_add)
- OP(AtomicIDecrement, atomic_add)
- OP(AtomicIAdd, atomic_add)
- OP(AtomicISub, atomic_add)
- OP(AtomicSMin, atomic_imin)
- OP(AtomicUMin, atomic_umin)
- OP(AtomicSMax, atomic_imax)
- OP(AtomicUMax, atomic_umax)
- OP(AtomicAnd, atomic_and)
- OP(AtomicOr, atomic_or)
- OP(AtomicXor, atomic_xor)
+ OP(AtomicExchange, atomic_exchange)
+ OP(AtomicCompareExchange, atomic_comp_swap)
+ OP(AtomicCompareExchangeWeak, atomic_comp_swap)
+ OP(AtomicIIncrement, atomic_add)
+ OP(AtomicIDecrement, atomic_add)
+ OP(AtomicIAdd, atomic_add)
+ OP(AtomicISub, atomic_add)
+ OP(AtomicSMin, atomic_imin)
+ OP(AtomicUMin, atomic_umin)
+ OP(AtomicSMax, atomic_imax)
+ OP(AtomicUMax, atomic_umax)
+ OP(AtomicAnd, atomic_and)
+ OP(AtomicOr, atomic_or)
+ OP(AtomicXor, atomic_xor)
#undef OP
default:
- vtn_fail("Invalid shared atomic");
+ vtn_fail_with_opcode("Invalid shared atomic", opcode);
}
}
break;
default:
- vtn_fail("Invalid SPIR-V atomic");
+ vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode);
}
/*
unreachable("Invalid SPIR-V atomic");
}
- } else if (ptr->mode == vtn_variable_mode_workgroup &&
- !b->options->lower_workgroup_access_to_offsets) {
- nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr);
- const struct glsl_type *deref_type = deref->type;
- nir_intrinsic_op op = get_deref_nir_atomic_op(b, opcode);
+ } else if (vtn_pointer_uses_ssa_offset(b, ptr)) {
+ nir_ssa_def *offset, *index;
+ offset = vtn_pointer_to_offset(b, ptr, &index);
+
+ nir_intrinsic_op op;
+ if (ptr->mode == vtn_variable_mode_ssbo) {
+ op = get_ssbo_nir_atomic_op(b, opcode);
+ } else {
+ vtn_assert(ptr->mode == vtn_variable_mode_workgroup &&
+ b->options->lower_workgroup_access_to_offsets);
+ op = get_shared_nir_atomic_op(b, opcode);
+ }
+
atomic = nir_intrinsic_instr_create(b->nb.shader, op);
- atomic->src[0] = nir_src_for_ssa(&deref->dest.ssa);
+ int src = 0;
switch (opcode) {
case SpvOpAtomicLoad:
- atomic->num_components = glsl_get_vector_elements(deref_type);
+ atomic->num_components = glsl_get_vector_elements(ptr->type->type);
+ nir_intrinsic_set_align(atomic, 4, 0);
+ if (ptr->mode == vtn_variable_mode_ssbo)
+ atomic->src[src++] = nir_src_for_ssa(index);
+ atomic->src[src++] = nir_src_for_ssa(offset);
break;
case SpvOpAtomicStore:
- atomic->num_components = glsl_get_vector_elements(deref_type);
+ atomic->num_components = glsl_get_vector_elements(ptr->type->type);
nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1);
- atomic->src[1] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
+ nir_intrinsic_set_align(atomic, 4, 0);
+ atomic->src[src++] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
+ if (ptr->mode == vtn_variable_mode_ssbo)
+ atomic->src[src++] = nir_src_for_ssa(index);
+ atomic->src[src++] = nir_src_for_ssa(offset);
break;
case SpvOpAtomicExchange:
case SpvOpAtomicAnd:
case SpvOpAtomicOr:
case SpvOpAtomicXor:
- fill_common_atomic_sources(b, opcode, w, &atomic->src[1]);
+ if (ptr->mode == vtn_variable_mode_ssbo)
+ atomic->src[src++] = nir_src_for_ssa(index);
+ atomic->src[src++] = nir_src_for_ssa(offset);
+ fill_common_atomic_sources(b, opcode, w, &atomic->src[src]);
break;
default:
- vtn_fail("Invalid SPIR-V atomic");
-
+ vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode);
}
} else {
- nir_ssa_def *offset, *index;
- offset = vtn_pointer_to_offset(b, ptr, &index);
-
- nir_intrinsic_op op;
- if (ptr->mode == vtn_variable_mode_ssbo) {
- op = get_ssbo_nir_atomic_op(b, opcode);
- } else {
- vtn_assert(ptr->mode == vtn_variable_mode_workgroup &&
- b->options->lower_workgroup_access_to_offsets);
- op = get_shared_nir_atomic_op(b, opcode);
- }
-
+ nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr);
+ const struct glsl_type *deref_type = deref->type;
+ nir_intrinsic_op op = get_deref_nir_atomic_op(b, opcode);
atomic = nir_intrinsic_instr_create(b->nb.shader, op);
+ atomic->src[0] = nir_src_for_ssa(&deref->dest.ssa);
- int src = 0;
switch (opcode) {
case SpvOpAtomicLoad:
- atomic->num_components = glsl_get_vector_elements(ptr->type->type);
- if (ptr->mode == vtn_variable_mode_ssbo)
- atomic->src[src++] = nir_src_for_ssa(index);
- atomic->src[src++] = nir_src_for_ssa(offset);
+ atomic->num_components = glsl_get_vector_elements(deref_type);
break;
case SpvOpAtomicStore:
- atomic->num_components = glsl_get_vector_elements(ptr->type->type);
+ atomic->num_components = glsl_get_vector_elements(deref_type);
nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1);
- atomic->src[src++] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
- if (ptr->mode == vtn_variable_mode_ssbo)
- atomic->src[src++] = nir_src_for_ssa(index);
- atomic->src[src++] = nir_src_for_ssa(offset);
+ atomic->src[1] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
break;
case SpvOpAtomicExchange:
case SpvOpAtomicAnd:
case SpvOpAtomicOr:
case SpvOpAtomicXor:
- if (ptr->mode == vtn_variable_mode_ssbo)
- atomic->src[src++] = nir_src_for_ssa(index);
- atomic->src[src++] = nir_src_for_ssa(offset);
- fill_common_atomic_sources(b, opcode, w, &atomic->src[src]);
+ fill_common_atomic_sources(b, opcode, w, &atomic->src[1]);
break;
default:
- vtn_fail("Invalid SPIR-V atomic");
+ vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode);
}
}
static nir_alu_instr *
create_vec(struct vtn_builder *b, unsigned num_components, unsigned bit_size)
{
- nir_op op;
- switch (num_components) {
- case 1: op = nir_op_imov; break;
- case 2: op = nir_op_vec2; break;
- case 3: op = nir_op_vec3; break;
- case 4: op = nir_op_vec4; break;
- default: vtn_fail("bad vector size");
- }
-
+ nir_op op = nir_op_vec(num_components);
nir_alu_instr *vec = nir_alu_instr_create(b->shader, op);
nir_ssa_dest_init(&vec->instr, &vec->dest.dest, num_components,
bit_size, NULL);
return &vec->dest.dest.ssa;
}
+static nir_ssa_def *
+nir_ieq_imm(nir_builder *b, nir_ssa_def *x, uint64_t i)
+{
+ return nir_ieq(b, x, nir_imm_intN_t(b, i, x->bit_size));
+}
+
nir_ssa_def *
vtn_vector_extract_dynamic(struct vtn_builder *b, nir_ssa_def *src,
nir_ssa_def *index)
{
- nir_ssa_def *dest = vtn_vector_extract(b, src, 0);
- for (unsigned i = 1; i < src->num_components; i++)
- dest = nir_bcsel(&b->nb, nir_ieq(&b->nb, index, nir_imm_int(&b->nb, i)),
- vtn_vector_extract(b, src, i), dest);
-
- return dest;
+ return nir_vector_extract(&b->nb, src, nir_i2i(&b->nb, index, 32));
}
nir_ssa_def *
{
nir_ssa_def *dest = vtn_vector_insert(b, src, insert, 0);
for (unsigned i = 1; i < src->num_components; i++)
- dest = nir_bcsel(&b->nb, nir_ieq(&b->nb, index, nir_imm_int(&b->nb, i)),
+ dest = nir_bcsel(&b->nb, nir_ieq_imm(&b->nb, index, i),
vtn_vector_insert(b, src, insert, i), dest);
return dest;
unsigned elems = count - 3;
assume(elems >= 1);
if (glsl_type_is_vector_or_scalar(type)) {
- nir_ssa_def *srcs[4];
+ nir_ssa_def *srcs[NIR_MAX_VEC_COMPONENTS];
for (unsigned i = 0; i < elems; i++)
srcs[i] = vtn_ssa_value(b, w[3 + i])->def;
val->ssa->def =
break;
default:
- vtn_fail("unknown composite operation");
+ vtn_fail_with_opcode("unknown composite operation", opcode);
}
}
switch (opcode) {
case SpvOpEmitStreamVertex:
- case SpvOpEndStreamPrimitive:
- nir_intrinsic_set_stream_id(intrin, w[1]);
+ case SpvOpEndStreamPrimitive: {
+ unsigned stream = vtn_constant_uint(b, w[1]);
+ nir_intrinsic_set_stream_id(intrin, stream);
break;
+ }
+
default:
break;
}
}
case SpvOpMemoryBarrier: {
- SpvScope scope = vtn_constant_value(b, w[1])->values[0].u32[0];
- SpvMemorySemanticsMask semantics =
- vtn_constant_value(b, w[2])->values[0].u32[0];
+ SpvScope scope = vtn_constant_uint(b, w[1]);
+ SpvMemorySemanticsMask semantics = vtn_constant_uint(b, w[2]);
vtn_emit_memory_barrier(b, scope, semantics);
return;
}
case SpvOpControlBarrier: {
- SpvScope execution_scope =
- vtn_constant_value(b, w[1])->values[0].u32[0];
+ SpvScope execution_scope = vtn_constant_uint(b, w[1]);
if (execution_scope == SpvScopeWorkgroup)
vtn_emit_barrier(b, nir_intrinsic_barrier);
- SpvScope memory_scope =
- vtn_constant_value(b, w[2])->values[0].u32[0];
- SpvMemorySemanticsMask memory_semantics =
- vtn_constant_value(b, w[3])->values[0].u32[0];
+ SpvScope memory_scope = vtn_constant_uint(b, w[2]);
+ SpvMemorySemanticsMask memory_semantics = vtn_constant_uint(b, w[3]);
vtn_emit_memory_barrier(b, memory_scope, memory_semantics);
break;
}
case SpvExecutionModeOutputTriangleStrip:
return 5; /* GL_TRIANGLE_STRIP */
default:
- vtn_fail("Invalid primitive type");
+ vtn_fail("Invalid primitive type: %s (%u)",
+ spirv_executionmode_to_string(mode), mode);
}
}
case SpvExecutionModeInputTrianglesAdjacency:
return 6;
default:
- vtn_fail("Invalid GS input mode");
+ vtn_fail("Invalid GS input mode: %s (%u)",
+ spirv_executionmode_to_string(mode), mode);
}
}
return MESA_SHADER_FRAGMENT;
case SpvExecutionModelGLCompute:
return MESA_SHADER_COMPUTE;
+ case SpvExecutionModelKernel:
+ return MESA_SHADER_KERNEL;
default:
- vtn_fail("Unsupported execution model");
+ vtn_fail("Unsupported execution model: %s (%u)",
+ spirv_executionmodel_to_string(model), model);
}
}
-#define spv_check_supported(name, cap) do { \
- if (!(b->options && b->options->caps.name)) \
- vtn_warn("Unsupported SPIR-V capability: %s", \
- spirv_capability_to_string(cap)); \
+#define spv_check_supported(name, cap) do { \
+ if (!(b->options && b->options->caps.name)) \
+ vtn_warn("Unsupported SPIR-V capability: %s (%u)", \
+ spirv_capability_to_string(cap), cap); \
} while(0)
case SpvCapabilityStorageImageExtendedFormats:
break;
- case SpvCapabilityGeometryStreams:
case SpvCapabilityLinkage:
case SpvCapabilityVector16:
case SpvCapabilityFloat16Buffer:
- case SpvCapabilityFloat16:
- case SpvCapabilityInt64Atomics:
- case SpvCapabilityStorageImageMultisample:
- case SpvCapabilityInt8:
case SpvCapabilitySparseResidency:
- case SpvCapabilityMinLod:
- case SpvCapabilityTransformFeedback:
vtn_warn("Unsupported SPIR-V capability: %s",
spirv_capability_to_string(cap));
break;
+ case SpvCapabilityMinLod:
+ spv_check_supported(min_lod, cap);
+ break;
+
case SpvCapabilityAtomicStorage:
spv_check_supported(atomic_storage, cap);
break;
case SpvCapabilityInt16:
spv_check_supported(int16, cap);
break;
+ case SpvCapabilityInt8:
+ spv_check_supported(int8, cap);
+ break;
+
+ case SpvCapabilityTransformFeedback:
+ spv_check_supported(transform_feedback, cap);
+ break;
+
+ case SpvCapabilityGeometryStreams:
+ spv_check_supported(geometry_streams, cap);
+ break;
+
+ case SpvCapabilityInt64Atomics:
+ spv_check_supported(int64_atomics, cap);
+ break;
+
+ case SpvCapabilityStorageImageMultisample:
+ spv_check_supported(storage_image_ms, cap);
+ break;
case SpvCapabilityAddresses:
+ spv_check_supported(address, cap);
+ break;
+
case SpvCapabilityKernel:
+ spv_check_supported(kernel, cap);
+ break;
+
case SpvCapabilityImageBasic:
case SpvCapabilityImageReadWrite:
case SpvCapabilityImageMipmap:
case SpvCapabilityVariablePointersStorageBuffer:
case SpvCapabilityVariablePointers:
spv_check_supported(variable_pointers, cap);
+ b->variable_pointers = true;
break;
case SpvCapabilityStorageUniformBufferBlock16:
spv_check_supported(storage_8bit, cap);
break;
+ case SpvCapabilityShaderNonUniformEXT:
+ spv_check_supported(descriptor_indexing, cap);
+ break;
+
case SpvCapabilityInputAttachmentArrayDynamicIndexingEXT:
case SpvCapabilityUniformTexelBufferArrayDynamicIndexingEXT:
case SpvCapabilityStorageTexelBufferArrayDynamicIndexingEXT:
spv_check_supported(descriptor_array_dynamic_indexing, cap);
break;
+ case SpvCapabilityUniformBufferArrayNonUniformIndexingEXT:
+ case SpvCapabilitySampledImageArrayNonUniformIndexingEXT:
+ case SpvCapabilityStorageBufferArrayNonUniformIndexingEXT:
+ case SpvCapabilityStorageImageArrayNonUniformIndexingEXT:
+ case SpvCapabilityInputAttachmentArrayNonUniformIndexingEXT:
+ case SpvCapabilityUniformTexelBufferArrayNonUniformIndexingEXT:
+ case SpvCapabilityStorageTexelBufferArrayNonUniformIndexingEXT:
+ spv_check_supported(descriptor_array_non_uniform_indexing, cap);
+ break;
+
case SpvCapabilityRuntimeDescriptorArrayEXT:
spv_check_supported(runtime_descriptor_array, cap);
break;
spv_check_supported(stencil_export, cap);
break;
+ case SpvCapabilitySampleMaskPostDepthCoverage:
+ spv_check_supported(post_depth_coverage, cap);
+ break;
+
+ case SpvCapabilityPhysicalStorageBufferAddressesEXT:
+ spv_check_supported(physical_storage_buffer_address, cap);
+ break;
+
+ case SpvCapabilityComputeDerivativeGroupQuadsNV:
+ case SpvCapabilityComputeDerivativeGroupLinearNV:
+ spv_check_supported(derivative_group, cap);
+ break;
+
+ case SpvCapabilityFloat16:
+ spv_check_supported(float16, cap);
+ break;
+
default:
- vtn_fail("Unhandled capability");
+ vtn_fail("Unhandled capability: %s (%u)",
+ spirv_capability_to_string(cap), cap);
}
break;
}
break;
case SpvOpMemoryModel:
- vtn_assert(w[1] == SpvAddressingModelLogical);
+ switch (w[1]) {
+ case SpvAddressingModelPhysical32:
+ vtn_fail_if(b->shader->info.stage != MESA_SHADER_KERNEL,
+ "AddressingModelPhysical32 only supported for kernels");
+ b->shader->info.cs.ptr_size = 32;
+ b->physical_ptrs = true;
+ b->options->shared_addr_format = nir_address_format_32bit_global;
+ b->options->global_addr_format = nir_address_format_32bit_global;
+ b->options->temp_addr_format = nir_address_format_32bit_global;
+ break;
+ case SpvAddressingModelPhysical64:
+ vtn_fail_if(b->shader->info.stage != MESA_SHADER_KERNEL,
+ "AddressingModelPhysical64 only supported for kernels");
+ b->shader->info.cs.ptr_size = 64;
+ b->physical_ptrs = true;
+ b->options->shared_addr_format = nir_address_format_64bit_global;
+ b->options->global_addr_format = nir_address_format_64bit_global;
+ b->options->temp_addr_format = nir_address_format_64bit_global;
+ break;
+ case SpvAddressingModelLogical:
+ vtn_fail_if(b->shader->info.stage >= MESA_SHADER_STAGES,
+ "AddressingModelLogical only supported for shaders");
+ b->shader->info.cs.ptr_size = 0;
+ b->physical_ptrs = false;
+ break;
+ case SpvAddressingModelPhysicalStorageBuffer64EXT:
+ vtn_fail_if(!b->options ||
+ !b->options->caps.physical_storage_buffer_address,
+ "AddressingModelPhysicalStorageBuffer64EXT not supported");
+ break;
+ default:
+ vtn_fail("Unknown addressing model: %s (%u)",
+ spirv_addressingmodel_to_string(w[1]), w[1]);
+ break;
+ }
+
vtn_assert(w[2] == SpvMemoryModelSimple ||
- w[2] == SpvMemoryModelGLSL450);
+ w[2] == SpvMemoryModelGLSL450 ||
+ w[2] == SpvMemoryModelOpenCL);
break;
case SpvOpEntryPoint:
break;
case SpvOpExecutionMode:
+ case SpvOpExecutionModeId:
case SpvOpDecorationGroup:
case SpvOpDecorate:
+ case SpvOpDecorateId:
case SpvOpMemberDecorate:
case SpvOpGroupDecorate:
case SpvOpGroupMemberDecorate:
+ case SpvOpDecorateString:
+ case SpvOpMemberDecorateString:
vtn_handle_decoration(b, opcode, w, count);
break;
switch(mode->exec_mode) {
case SpvExecutionModeOriginUpperLeft:
case SpvExecutionModeOriginLowerLeft:
- b->origin_upper_left =
+ vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
+ b->shader->info.fs.origin_upper_left =
(mode->exec_mode == SpvExecutionModeOriginUpperLeft);
break;
b->shader->info.fs.early_fragment_tests = true;
break;
+ case SpvExecutionModePostDepthCoverage:
+ vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
+ b->shader->info.fs.post_depth_coverage = true;
+ break;
+
case SpvExecutionModeInvocations:
vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
- b->shader->info.gs.invocations = MAX2(1, mode->literals[0]);
+ b->shader->info.gs.invocations = MAX2(1, mode->operands[0]);
break;
case SpvExecutionModeDepthReplacing:
break;
case SpvExecutionModeLocalSize:
- vtn_assert(b->shader->info.stage == MESA_SHADER_COMPUTE);
- b->shader->info.cs.local_size[0] = mode->literals[0];
- b->shader->info.cs.local_size[1] = mode->literals[1];
- b->shader->info.cs.local_size[2] = mode->literals[2];
+ vtn_assert(gl_shader_stage_is_compute(b->shader->info.stage));
+ b->shader->info.cs.local_size[0] = mode->operands[0];
+ b->shader->info.cs.local_size[1] = mode->operands[1];
+ b->shader->info.cs.local_size[2] = mode->operands[2];
break;
+
+ case SpvExecutionModeLocalSizeId:
+ b->shader->info.cs.local_size[0] = vtn_constant_uint(b, mode->operands[0]);
+ b->shader->info.cs.local_size[1] = vtn_constant_uint(b, mode->operands[1]);
+ b->shader->info.cs.local_size[2] = vtn_constant_uint(b, mode->operands[2]);
+ break;
+
case SpvExecutionModeLocalSizeHint:
+ case SpvExecutionModeLocalSizeHintId:
break; /* Nothing to do with this */
case SpvExecutionModeOutputVertices:
if (b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
b->shader->info.stage == MESA_SHADER_TESS_EVAL) {
- b->shader->info.tess.tcs_vertices_out = mode->literals[0];
+ b->shader->info.tess.tcs_vertices_out = mode->operands[0];
} else {
vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
- b->shader->info.gs.vertices_out = mode->literals[0];
+ b->shader->info.gs.vertices_out = mode->operands[0];
}
break;
vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
b->shader->info.gs.vertices_in =
vertices_in_from_spv_execution_mode(b, mode->exec_mode);
+ b->shader->info.gs.input_primitive =
+ gl_primitive_from_spv_execution_mode(b, mode->exec_mode);
}
break;
break;
case SpvExecutionModePixelCenterInteger:
- b->pixel_center_integer = true;
+ vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
+ b->shader->info.fs.pixel_center_integer = true;
break;
case SpvExecutionModeXfb:
- vtn_fail("Unhandled execution mode");
+ b->shader->info.has_transform_feedback_varyings = true;
break;
case SpvExecutionModeVecTypeHint:
- case SpvExecutionModeContractionOff:
break; /* OpenCL */
+ case SpvExecutionModeContractionOff:
+ if (b->shader->info.stage != MESA_SHADER_KERNEL)
+ vtn_warn("ExectionMode only allowed for CL-style kernels: %s",
+ spirv_executionmode_to_string(mode->exec_mode));
+ else
+ b->exact = true;
+ break;
+
case SpvExecutionModeStencilRefReplacingEXT:
vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
break;
+ case SpvExecutionModeDerivativeGroupQuadsNV:
+ vtn_assert(b->shader->info.stage == MESA_SHADER_COMPUTE);
+ b->shader->info.cs.derivative_group = DERIVATIVE_GROUP_QUADS;
+ break;
+
+ case SpvExecutionModeDerivativeGroupLinearNV:
+ vtn_assert(b->shader->info.stage == MESA_SHADER_COMPUTE);
+ b->shader->info.cs.derivative_group = DERIVATIVE_GROUP_LINEAR;
+ break;
+
default:
- vtn_fail("Unhandled execution mode");
+ vtn_fail("Unhandled execution mode: %s (%u)",
+ spirv_executionmode_to_string(mode->exec_mode),
+ mode->exec_mode);
}
}
case SpvOpMemberName:
case SpvOpDecorationGroup:
case SpvOpDecorate:
+ case SpvOpDecorateId:
case SpvOpMemberDecorate:
case SpvOpGroupDecorate:
case SpvOpGroupMemberDecorate:
+ case SpvOpDecorateString:
+ case SpvOpMemberDecorateString:
vtn_fail("Invalid opcode types and variables section");
break;
case SpvOpTypeStruct:
case SpvOpTypeOpaque:
case SpvOpTypePointer:
+ case SpvOpTypeForwardPointer:
case SpvOpTypeFunction:
case SpvOpTypeEvent:
case SpvOpTypeDeviceEvent:
return true;
}
+static void
+vtn_handle_ptr(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ struct vtn_type *type1 = vtn_untyped_value(b, w[3])->type;
+ struct vtn_type *type2 = vtn_untyped_value(b, w[4])->type;
+ vtn_fail_if(type1->base_type != vtn_base_type_pointer ||
+ type2->base_type != vtn_base_type_pointer,
+ "%s operands must have pointer types",
+ spirv_op_to_string(opcode));
+ vtn_fail_if(type1->storage_class != type2->storage_class,
+ "%s operands must have the same storage class",
+ spirv_op_to_string(opcode));
+
+ const struct glsl_type *type =
+ vtn_value(b, w[1], vtn_value_type_type)->type->type;
+
+ nir_address_format addr_format = vtn_mode_to_address_format(
+ b, vtn_storage_class_to_mode(b, type1->storage_class, NULL, NULL));
+
+ nir_ssa_def *def;
+
+ switch (opcode) {
+ case SpvOpPtrDiff: {
+ /* OpPtrDiff returns the difference in number of elements (not byte offset). */
+ unsigned elem_size, elem_align;
+ glsl_get_natural_size_align_bytes(type1->deref->type,
+ &elem_size, &elem_align);
+
+ def = nir_build_addr_isub(&b->nb,
+ vtn_ssa_value(b, w[3])->def,
+ vtn_ssa_value(b, w[4])->def,
+ addr_format);
+ def = nir_idiv(&b->nb, def, nir_imm_intN_t(&b->nb, elem_size, def->bit_size));
+ def = nir_i2i(&b->nb, def, glsl_get_bit_size(type));
+ break;
+ }
+
+ case SpvOpPtrEqual:
+ case SpvOpPtrNotEqual: {
+ def = nir_build_addr_ieq(&b->nb,
+ vtn_ssa_value(b, w[3])->def,
+ vtn_ssa_value(b, w[4])->def,
+ addr_format);
+ if (opcode == SpvOpPtrNotEqual)
+ def = nir_inot(&b->nb, def);
+ break;
+ }
+
+ default:
+ unreachable("Invalid ptr operation");
+ }
+
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+ val->ssa = vtn_create_ssa_value(b, type);
+ val->ssa->def = def;
+}
+
static bool
vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode,
const uint32_t *w, unsigned count)
case SpvOpAccessChain:
case SpvOpPtrAccessChain:
case SpvOpInBoundsAccessChain:
+ case SpvOpInBoundsPtrAccessChain:
case SpvOpArrayLength:
+ case SpvOpConvertPtrToU:
+ case SpvOpConvertUToPtr:
vtn_handle_variables(b, opcode, w, count);
break;
case SpvOpSConvert:
case SpvOpFConvert:
case SpvOpQuantizeToF16:
- case SpvOpConvertPtrToU:
- case SpvOpConvertUToPtr:
case SpvOpPtrCastToGeneric:
case SpvOpGenericCastToPtr:
- case SpvOpBitcast:
case SpvOpIsNan:
case SpvOpIsInf:
case SpvOpIsFinite:
vtn_handle_alu(b, opcode, w, count);
break;
+ case SpvOpBitcast:
+ vtn_handle_bitcast(b, w, count);
+ break;
+
case SpvOpVectorExtractDynamic:
case SpvOpVectorInsertDynamic:
case SpvOpVectorShuffle:
vtn_handle_subgroup(b, opcode, w, count);
break;
+ case SpvOpPtrDiff:
+ case SpvOpPtrEqual:
+ case SpvOpPtrNotEqual:
+ vtn_handle_ptr(b, opcode, w, count);
+ break;
+
default:
- vtn_fail("Unhandled opcode");
+ vtn_fail_with_opcode("Unhandled opcode", opcode);
}
return true;
{
/* Initialize the vtn_builder object */
struct vtn_builder *b = rzalloc(NULL, struct vtn_builder);
+ struct spirv_to_nir_options *dup_options =
+ ralloc(b, struct spirv_to_nir_options);
+ *dup_options = *options;
+
b->spirv = words;
b->spirv_word_count = word_count;
b->file = NULL;
exec_list_make_empty(&b->functions);
b->entry_point_stage = stage;
b->entry_point_name = entry_point_name;
- b->options = options;
+ b->options = dup_options;
/*
* Handle the SPIR-V header (first 5 dwords).
goto fail;
}
+ uint16_t generator_id = words[2] >> 16;
+ uint16_t generator_version = words[2];
+
+ /* The first GLSLang version bump actually 1.5 years after #179 was fixed
+ * but this should at least let us shut the workaround off for modern
+ * versions of GLSLang.
+ */
+ b->wa_glslang_179 = (generator_id == 8 && generator_version == 1);
+
/* words[2] == generator magic */
unsigned value_id_bound = words[3];
if (words[4] != 0) {
return NULL;
}
-nir_function *
+static nir_function *
+vtn_emit_kernel_entry_point_wrapper(struct vtn_builder *b,
+ nir_function *entry_point)
+{
+ vtn_assert(entry_point == b->entry_point->func->impl->function);
+ vtn_fail_if(!entry_point->name, "entry points are required to have a name");
+ const char *func_name =
+ ralloc_asprintf(b->shader, "__wrapped_%s", entry_point->name);
+
+ /* we shouldn't have any inputs yet */
+ vtn_assert(!entry_point->shader->num_inputs);
+ vtn_assert(b->shader->info.stage == MESA_SHADER_KERNEL);
+
+ nir_function *main_entry_point = nir_function_create(b->shader, func_name);
+ main_entry_point->impl = nir_function_impl_create(main_entry_point);
+ nir_builder_init(&b->nb, main_entry_point->impl);
+ b->nb.cursor = nir_after_cf_list(&main_entry_point->impl->body);
+ b->func_param_idx = 0;
+
+ nir_call_instr *call = nir_call_instr_create(b->nb.shader, entry_point);
+
+ for (unsigned i = 0; i < entry_point->num_params; ++i) {
+ struct vtn_type *param_type = b->entry_point->func->type->params[i];
+
+ /* consider all pointers to function memory to be parameters passed
+ * by value
+ */
+ bool is_by_val = param_type->base_type == vtn_base_type_pointer &&
+ param_type->storage_class == SpvStorageClassFunction;
+
+ /* input variable */
+ nir_variable *in_var = rzalloc(b->nb.shader, nir_variable);
+ in_var->data.mode = nir_var_shader_in;
+ in_var->data.read_only = true;
+ in_var->data.location = i;
+
+ if (is_by_val)
+ in_var->type = param_type->deref->type;
+ else
+ in_var->type = param_type->type;
+
+ nir_shader_add_variable(b->nb.shader, in_var);
+ b->nb.shader->num_inputs++;
+
+ /* we have to copy the entire variable into function memory */
+ if (is_by_val) {
+ nir_variable *copy_var =
+ nir_local_variable_create(main_entry_point->impl, in_var->type,
+ "copy_in");
+ nir_copy_var(&b->nb, copy_var, in_var);
+ call->params[i] =
+ nir_src_for_ssa(&nir_build_deref_var(&b->nb, copy_var)->dest.ssa);
+ } else {
+ call->params[i] = nir_src_for_ssa(nir_load_var(&b->nb, in_var));
+ }
+ }
+
+ nir_builder_instr_insert(&b->nb, &call->instr);
+
+ return main_entry_point;
+}
+
+nir_shader *
spirv_to_nir(const uint32_t *words, size_t word_count,
struct nir_spirv_specialization *spec, unsigned num_spec,
gl_shader_stage stage, const char *entry_point_name,
/* Skip the SPIR-V header, handled at vtn_create_builder */
words+= 5;
+ b->shader = nir_shader_create(b, stage, nir_options, NULL);
+
/* Handle all the preamble instructions */
words = vtn_foreach_instruction(b, words, word_end,
vtn_handle_preamble_instruction);
return NULL;
}
- b->shader = nir_shader_create(b, stage, nir_options, NULL);
-
/* Set shader info defaults */
b->shader->info.gs.invocations = 1;
- /* Parse execution modes */
- vtn_foreach_execution_mode(b, b->entry_point,
- vtn_handle_execution_mode, NULL);
-
b->specializations = spec;
b->num_specializations = num_spec;
words = vtn_foreach_instruction(b, words, word_end,
vtn_handle_variable_or_type_instruction);
+ /* Parse execution modes */
+ vtn_foreach_execution_mode(b, b->entry_point,
+ vtn_handle_execution_mode, NULL);
+
+ if (b->workgroup_size_builtin) {
+ vtn_assert(b->workgroup_size_builtin->type->type ==
+ glsl_vector_type(GLSL_TYPE_UINT, 3));
+
+ nir_const_value *const_size =
+ b->workgroup_size_builtin->constant->values[0];
+
+ b->shader->info.cs.local_size[0] = const_size[0].u32;
+ b->shader->info.cs.local_size[1] = const_size[1].u32;
+ b->shader->info.cs.local_size[2] = const_size[2].u32;
+ }
+
/* Set types on all vtn_values */
vtn_foreach_instruction(b, words, word_end, vtn_set_instruction_result_type);
progress = false;
foreach_list_typed(struct vtn_function, func, node, &b->functions) {
if (func->referenced && !func->emitted) {
- b->const_table = _mesa_hash_table_create(b, _mesa_hash_pointer,
- _mesa_key_pointer_equal);
+ b->const_table = _mesa_pointer_hash_table_create(b);
vtn_function_emit(b, func, vtn_handle_body_instruction);
progress = true;
}
} while (progress);
+ vtn_assert(b->entry_point->value_type == vtn_value_type_function);
+ nir_function *entry_point = b->entry_point->func->impl->function;
+ vtn_assert(entry_point);
+
+ /* post process entry_points with input params */
+ if (entry_point->num_params && b->shader->info.stage == MESA_SHADER_KERNEL)
+ entry_point = vtn_emit_kernel_entry_point_wrapper(b, entry_point);
+
+ entry_point->is_entrypoint = true;
+
+ /* When multiple shader stages exist in the same SPIR-V module, we
+ * generate input and output variables for every stage, in the same
+ * NIR program. These dead variables can be invalid NIR. For example,
+ * TCS outputs must be per-vertex arrays (or decorated 'patch'), while
+ * VS output variables wouldn't be.
+ *
+ * To ensure we have valid NIR, we eliminate any dead inputs and outputs
+ * right away. In order to do so, we must lower any constant initializers
+ * on outputs so nir_remove_dead_variables sees that they're written to.
+ */
+ nir_lower_constant_initializers(b->shader, nir_var_shader_out);
+ nir_remove_dead_variables(b->shader,
+ nir_var_shader_in | nir_var_shader_out);
+
/* We sometimes generate bogus derefs that, while never used, give the
* validator a bit of heartburn. Run dead code to get rid of them.
*/
nir_opt_dce(b->shader);
- vtn_assert(b->entry_point->value_type == vtn_value_type_function);
- nir_function *entry_point = b->entry_point->func->impl->function;
- vtn_assert(entry_point);
-
/* Unparent the shader from the vtn_builder before we delete the builder */
ralloc_steal(NULL, b->shader);
+ nir_shader *shader = b->shader;
ralloc_free(b);
- return entry_point;
+ return shader;
}