X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fcompiler%2Fspirv%2Fspirv_to_nir.c;h=7dc6bc914d6f9374440702959f9d0f54e5758fb1;hb=44227453ec03f5462f1cff5760909a9dba95c61a;hp=b639dcdc939cf623897ef8b5710d3e8f51128709;hpb=936f49268e1a3906130d213fe859b13e85fe0c53;p=mesa.git diff --git a/src/compiler/spirv/spirv_to_nir.c b/src/compiler/spirv/spirv_to_nir.c index b639dcdc939..7dc6bc914d6 100644 --- a/src/compiler/spirv/spirv_to_nir.c +++ b/src/compiler/spirv/spirv_to_nir.c @@ -29,8 +29,11 @@ #include "nir/nir_vla.h" #include "nir/nir_control_flow.h" #include "nir/nir_constant_expressions.h" +#include "nir/nir_deref.h" #include "spirv_info.h" +#include + void vtn_log(struct vtn_builder *b, enum nir_spirv_debug_level level, size_t spirv_offset, const char *message) @@ -94,6 +97,27 @@ vtn_log_err(struct vtn_builder *b, ralloc_free(msg); } +static void +vtn_dump_shader(struct vtn_builder *b, const char *path, const char *prefix) +{ + static int idx = 0; + + char filename[1024]; + int len = snprintf(filename, sizeof(filename), "%s/%s-%d.spirv", + path, prefix, idx++); + if (len < 0 || len >= sizeof(filename)) + return; + + FILE *f = fopen(filename, "w"); + if (f == NULL) + return; + + fwrite(b->spirv, sizeof(*b->spirv), b->spirv_word_count, f); + fclose(f); + + vtn_info("SPIR-V shader dumped to %s", filename); +} + void _vtn_warn(struct vtn_builder *b, const char *file, unsigned line, const char *fmt, ...) @@ -106,6 +130,18 @@ _vtn_warn(struct vtn_builder *b, const char *file, unsigned line, va_end(args); } +void +_vtn_err(struct vtn_builder *b, const char *file, unsigned line, + const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + vtn_log_err(b, NIR_SPIRV_DEBUG_LEVEL_ERROR, "SPIR-V ERROR:\n", + file, line, fmt, args); + va_end(args); +} + void _vtn_fail(struct vtn_builder *b, const char *file, unsigned line, const char *fmt, ...) @@ -117,6 +153,10 @@ _vtn_fail(struct vtn_builder *b, const char *file, unsigned line, file, line, fmt, args); va_end(args); + const char *dump_path = getenv("MESA_SPIRV_FAIL_DUMP_PATH"); + if (dump_path) + vtn_dump_shader(b, dump_path, "fail"); + longjmp(b->fail_jump, 1); } @@ -180,6 +220,8 @@ vtn_const_ssa_value(struct vtn_builder *b, nir_constant *constant, case GLSL_TYPE_UINT: case GLSL_TYPE_INT16: case GLSL_TYPE_UINT16: + case GLSL_TYPE_UINT8: + case GLSL_TYPE_INT8: case GLSL_TYPE_INT64: case GLSL_TYPE_UINT64: case GLSL_TYPE_BOOL: @@ -341,13 +383,20 @@ static void vtn_handle_extension(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count) { + const char *ext = (const char *)&w[2]; switch (opcode) { case SpvOpExtInstImport: { struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_extension); - if (strcmp((const char *)&w[2], "GLSL.std.450") == 0) { + if (strcmp(ext, "GLSL.std.450") == 0) { val->ext_handler = vtn_handle_glsl450_instruction; + } else if ((strcmp(ext, "SPV_AMD_gcn_shader") == 0) + && (b->options && b->options->caps.gcn_shader)) { + val->ext_handler = vtn_handle_amd_gcn_shader_instruction; + } else if ((strcmp(ext, "SPV_AMD_shader_trinary_minmax") == 0) + && (b->options && b->options->caps.trinary_minmax)) { + val->ext_handler = vtn_handle_amd_shader_trinary_minmax_instruction; } else { - vtn_fail("Unsupported extension"); + vtn_fail("Unsupported extension: %s", ext); } break; } @@ -376,15 +425,27 @@ _foreach_decoration_helper(struct vtn_builder *b, if (dec->scope == VTN_DEC_DECORATION) { member = parent_member; } else if (dec->scope >= VTN_DEC_STRUCT_MEMBER0) { - vtn_assert(parent_member == -1); + vtn_fail_if(value->value_type != vtn_value_type_type || + value->type->base_type != vtn_base_type_struct, + "OpMemberDecorate and OpGroupMemberDecorate are only " + "allowed on OpTypeStruct"); + /* This means we haven't recursed yet */ + assert(value == base_value); + member = dec->scope - VTN_DEC_STRUCT_MEMBER0; + + vtn_fail_if(member >= base_value->type->length, + "OpMemberDecorate specifies member %d but the " + "OpTypeStruct has only %u members", + member, base_value->type->length); } else { /* Not a decoration */ + assert(dec->scope == VTN_DEC_EXECUTION_MODE); continue; } if (dec->group) { - vtn_assert(dec->group->value_type == vtn_value_type_decoration_group); + assert(dec->group->value_type == vtn_value_type_decoration_group); _foreach_decoration_helper(b, base_value, member, dec->group, cb, data); } else { @@ -414,12 +475,12 @@ vtn_foreach_execution_mode(struct vtn_builder *b, struct vtn_value *value, if (dec->scope != VTN_DEC_EXECUTION_MODE) continue; - vtn_assert(dec->group == NULL); + assert(dec->group == NULL); cb(b, value, dec, data); } } -static void +void vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count) { @@ -434,22 +495,28 @@ vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode, case SpvOpDecorate: case SpvOpMemberDecorate: + case SpvOpDecorateStringGOOGLE: + case SpvOpMemberDecorateStringGOOGLE: case SpvOpExecutionMode: { - struct vtn_value *val = &b->values[target]; + struct vtn_value *val = vtn_untyped_value(b, target); struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration); switch (opcode) { case SpvOpDecorate: + case SpvOpDecorateStringGOOGLE: dec->scope = VTN_DEC_DECORATION; break; case SpvOpMemberDecorate: + case SpvOpMemberDecorateStringGOOGLE: dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(w++); + vtn_fail_if(dec->scope < VTN_DEC_STRUCT_MEMBER0, /* overflow */ + "Member argument of OpMemberDecorate too large"); break; case SpvOpExecutionMode: dec->scope = VTN_DEC_EXECUTION_MODE; break; default: - vtn_fail("Invalid decoration opcode"); + unreachable("Invalid decoration opcode"); } dec->decoration = *(w++); dec->literals = w; @@ -474,6 +541,8 @@ vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode, dec->scope = VTN_DEC_DECORATION; } else { dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(++w); + vtn_fail_if(dec->scope < 0, /* Check for overflow */ + "Member argument of OpGroupMemberDecorate too large"); } /* Link into the list */ @@ -484,7 +553,7 @@ vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode, } default: - vtn_fail("Unhandled opcode"); + unreachable("Unhandled opcode"); } } @@ -494,6 +563,58 @@ struct member_decoration_ctx { struct vtn_type *type; }; +/** Returns true if two types are "compatible", i.e. you can do an OpLoad, + * OpStore, or OpCopyMemory between them without breaking anything. + * Technically, the SPIR-V rules require the exact same type ID but this lets + * us internally be a bit looser. + */ +bool +vtn_types_compatible(struct vtn_builder *b, + struct vtn_type *t1, struct vtn_type *t2) +{ + if (t1->id == t2->id) + return true; + + if (t1->base_type != t2->base_type) + return false; + + switch (t1->base_type) { + case vtn_base_type_void: + case vtn_base_type_scalar: + case vtn_base_type_vector: + case vtn_base_type_matrix: + case vtn_base_type_image: + case vtn_base_type_sampler: + case vtn_base_type_sampled_image: + return t1->type == t2->type; + + case vtn_base_type_array: + return t1->length == t2->length && + vtn_types_compatible(b, t1->array_element, t2->array_element); + + case vtn_base_type_pointer: + return vtn_types_compatible(b, t1->deref, t2->deref); + + case vtn_base_type_struct: + if (t1->length != t2->length) + return false; + + for (unsigned i = 0; i < t1->length; i++) { + if (!vtn_types_compatible(b, t1->members[i], t2->members[i])) + return false; + } + return true; + + case vtn_base_type_function: + /* This case shouldn't get hit since you can't copy around function + * types. Just require them to be identical. + */ + return false; + } + + vtn_fail("Invalid base type"); +} + /* does a shallow copy of a vtn_type */ static struct vtn_type * @@ -551,6 +672,16 @@ mutable_matrix_member(struct vtn_builder *b, struct vtn_type *type, int member) return type; } +static void +vtn_handle_access_qualifier(struct vtn_builder *b, struct vtn_type *type, + int member, enum gl_access_qualifier access) +{ + type->members[member] = vtn_type_copy(b, type->members[member]); + type = type->members[member]; + + type->access |= access; +} + static void struct_member_decoration_cb(struct vtn_builder *b, struct vtn_value *val, int member, @@ -561,16 +692,24 @@ struct_member_decoration_cb(struct vtn_builder *b, if (member < 0) return; - vtn_assert(member < ctx->num_fields); + assert(member < ctx->num_fields); switch (dec->decoration) { + case SpvDecorationRelaxedPrecision: + case SpvDecorationUniform: + break; /* FIXME: Do nothing with this for now. */ case SpvDecorationNonWritable: + vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_NON_WRITEABLE); + break; case SpvDecorationNonReadable: - case SpvDecorationRelaxedPrecision: + vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_NON_READABLE); + break; case SpvDecorationVolatile: + vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_VOLATILE); + break; case SpvDecorationCoherent: - case SpvDecorationUniform: - break; /* FIXME: Do nothing with this for now. */ + vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_COHERENT); + break; case SpvDecorationNoPerspective: ctx->fields[member].interpolation = INTERP_MODE_NOPERSPECTIVE; break; @@ -648,6 +787,10 @@ struct_member_decoration_cb(struct vtn_builder *b, spirv_decoration_to_string(dec->decoration)); break; + case SpvDecorationHlslSemanticGOOGLE: + /* HLSL semantic decorations can safely be ignored by the driver. */ + break; + default: vtn_fail("Unhandled decoration"); } @@ -664,7 +807,10 @@ struct_member_matrix_stride_cb(struct vtn_builder *b, { if (dec->decoration != SpvDecorationMatrixStride) return; - vtn_assert(member >= 0); + + vtn_fail_if(member < 0, + "The MatrixStride decoration is only allowed on members " + "of OpTypeStruct"); struct member_decoration_ctx *ctx = void_ctx; @@ -686,8 +832,12 @@ type_decoration_cb(struct vtn_builder *b, { struct vtn_type *type = val->type; - if (member != -1) + if (member != -1) { + /* This should have been handled by OpTypeStruct */ + assert(val->type->base_type == vtn_base_type_struct); + assert(member >= 0 && member < val->type->length); return; + } switch (dec->decoration) { case SpvDecorationArrayStride: @@ -723,16 +873,24 @@ type_decoration_cb(struct vtn_builder *b, case SpvDecorationNonWritable: case SpvDecorationNonReadable: case SpvDecorationUniform: - case SpvDecorationStream: case SpvDecorationLocation: case SpvDecorationComponent: case SpvDecorationOffset: case SpvDecorationXfbBuffer: case SpvDecorationXfbStride: + case SpvDecorationHlslSemanticGOOGLE: vtn_warn("Decoration only allowed for struct members: %s", spirv_decoration_to_string(dec->decoration)); break; + case SpvDecorationStream: + /* We don't need to do anything here, as stream is filled up when + * aplying the decoration to a variable, just check that if it is not a + * struct member, it should be a struct. + */ + vtn_assert(type->base_type == vtn_base_type_struct); + break; + case SpvDecorationRelaxedPrecision: case SpvDecorationSpecId: case SpvDecorationInvariant: @@ -827,7 +985,6 @@ vtn_type_layout_std430(struct vtn_builder *b, struct vtn_type *type, case vtn_base_type_vector: { uint32_t comp_size = glsl_get_bit_size(type->type) / 8; - assert(type->length > 0 && type->length <= 4); unsigned align_comps = type->length == 3 ? 4 : type->length; *size_out = comp_size * type->length, *align_out = comp_size * align_comps; @@ -878,7 +1035,7 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_type); val->type = rzalloc(b, struct vtn_type); - val->type->val = val; + val->type->id = w[1]; switch (opcode) { case SpvOpTypeVoid: @@ -904,6 +1061,9 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, case 16: val->type->type = (signedness ? glsl_int16_t_type() : glsl_uint16_t_type()); break; + case 8: + val->type->type = (signedness ? glsl_int8_t_type() : glsl_uint8_t_type()); + break; default: vtn_fail("Invalid int bit size"); } @@ -937,7 +1097,7 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, vtn_fail_if(base->base_type != vtn_base_type_scalar, "Base type for OpTypeVector must be a scalar"); - vtn_fail_if(elems < 2 || elems > 4, + vtn_fail_if((elems < 2 || elems > 4) && (elems != 8) && (elems != 16), "Invalid component count for OpTypeVector"); val->type->base_type = vtn_base_type_vector; @@ -1057,28 +1217,38 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, val->type->type = glsl_vector_type(GLSL_TYPE_UINT, 2); } - if (storage_class == SpvStorageClassWorkgroup && - b->options->lower_workgroup_access_to_offsets) { - uint32_t size, align; - val->type->deref = vtn_type_layout_std430(b, val->type->deref, - &size, &align); - val->type->length = size; - val->type->align = align; + if (storage_class == SpvStorageClassPushConstant) { /* These can actually be stored to nir_variables and used as SSA * values so they need a real glsl_type. */ val->type->type = glsl_uint_type(); } + + if (storage_class == SpvStorageClassWorkgroup) { + /* These can actually be stored to nir_variables and used as SSA + * values so they need a real glsl_type. + */ + val->type->type = glsl_uint_type(); + if (b->options->lower_workgroup_access_to_offsets) { + uint32_t size, align; + val->type->deref = vtn_type_layout_std430(b, val->type->deref, + &size, &align); + val->type->length = size; + val->type->align = align; + } + } break; } case SpvOpTypeImage: { val->type->base_type = vtn_base_type_image; - const struct glsl_type *sampled_type = - vtn_value(b, w[2], vtn_value_type_type)->type->type; + const struct vtn_type *sampled_type = + vtn_value(b, w[2], vtn_value_type_type)->type; - vtn_assert(glsl_type_is_vector_or_scalar(sampled_type)); + vtn_fail_if(sampled_type->base_type != vtn_base_type_scalar || + glsl_get_bit_size(sampled_type->type) != 32, + "Sampled type of OpTypeImage must be a 32-bit scalar"); enum glsl_sampler_dim dim; switch ((SpvDim)w[3]) { @@ -1090,7 +1260,7 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, case SpvDimBuffer: dim = GLSL_SAMPLER_DIM_BUF; break; case SpvDimSubpassData: dim = GLSL_SAMPLER_DIM_SUBPASS; break; default: - vtn_fail("Invalid SPIR-V Sampler dimension"); + vtn_fail("Invalid SPIR-V image dimensionality"); } bool is_shadow = w[4]; @@ -1115,15 +1285,16 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, val->type->image_format = translate_image_format(b, format); + enum glsl_base_type sampled_base_type = + glsl_get_base_type(sampled_type->type); if (sampled == 1) { val->type->sampled = true; val->type->type = glsl_sampler_type(dim, is_shadow, is_array, - glsl_get_base_type(sampled_type)); + sampled_base_type); } else if (sampled == 2) { vtn_assert(!is_shadow); val->type->sampled = false; - val->type->type = glsl_image_type(dim, is_array, - glsl_get_base_type(sampled_type)); + val->type->type = glsl_image_type(dim, is_array, sampled_base_type); } else { vtn_fail("We need to know if the image will be sampled"); } @@ -1175,6 +1346,8 @@ vtn_null_constant(struct vtn_builder *b, const struct glsl_type *type) case GLSL_TYPE_UINT: case GLSL_TYPE_INT16: case GLSL_TYPE_UINT16: + case GLSL_TYPE_UINT8: + case GLSL_TYPE_INT8: case GLSL_TYPE_INT64: case GLSL_TYPE_UINT64: case GLSL_TYPE_BOOL: @@ -1295,7 +1468,7 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, opcode == SpvOpSpecConstantFalse) int_val = get_specialization(b, val, int_val); - val->constant->values[0].u32[0] = int_val ? NIR_TRUE : NIR_FALSE; + val->constant->values[0].b[0] = int_val != 0; break; } @@ -1314,6 +1487,9 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, case 16: val->constant->values->u16[0] = w[3]; break; + case 8: + val->constant->values->u8[0] = w[3]; + break; default: vtn_fail("Unsupported SpvOpConstant bit size"); } @@ -1336,6 +1512,9 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, case 16: val->constant->values[0].u16[0] = get_specialization(b, val, w[3]); break; + case 8: + val->constant->values[0].u8[0] = get_specialization(b, val, w[3]); + break; default: vtn_fail("Unsupported SpvOpSpecConstant bit size"); } @@ -1350,8 +1529,19 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, spirv_op_to_string(opcode), elem_count, val->type->length); nir_constant **elems = ralloc_array(b, nir_constant *, elem_count); - for (unsigned i = 0; i < elem_count; i++) - elems[i] = vtn_value(b, w[i + 3], vtn_value_type_constant)->constant; + for (unsigned i = 0; i < elem_count; i++) { + struct vtn_value *val = vtn_untyped_value(b, w[i + 3]); + + if (val->value_type == vtn_value_type_constant) { + elems[i] = val->constant; + } else { + vtn_fail_if(val->value_type != vtn_value_type_undef, + "only constants or undefs allowed for " + "SpvOpConstantComposite"); + /* to make it easier, just insert a NULL constant for now */ + elems[i] = vtn_null_constant(b, val->type->type); + } + } switch (val->type->base_type) { case vtn_base_type_vector: { @@ -1368,6 +1558,12 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, case 16: val->constant->values[0].u16[i] = elems[i]->values[0].u16[0]; break; + case 8: + val->constant->values[0].u8[i] = elems[i]->values[0].u8[0]; + break; + case 1: + val->constant->values[0].b[i] = elems[i]->values[0].b[0]; + break; default: vtn_fail("Invalid SpvOpConstantComposite bit size"); } @@ -1485,44 +1681,39 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, int elem = -1; int col = 0; - const struct glsl_type *type = comp->type->type; + const struct vtn_type *type = comp->type; for (unsigned i = deref_start; i < count; i++) { - switch (glsl_get_base_type(type)) { - case GLSL_TYPE_UINT: - case GLSL_TYPE_INT: - case GLSL_TYPE_UINT16: - case GLSL_TYPE_INT16: - case GLSL_TYPE_UINT64: - case GLSL_TYPE_INT64: - case GLSL_TYPE_FLOAT: - case GLSL_TYPE_FLOAT16: - case GLSL_TYPE_DOUBLE: - case GLSL_TYPE_BOOL: - /* If we hit this granularity, we're picking off an element */ - if (glsl_type_is_matrix(type)) { - vtn_assert(col == 0 && elem == -1); - col = w[i]; - elem = 0; - type = glsl_get_column_type(type); - } else { - vtn_assert(elem <= 0 && glsl_type_is_vector(type)); - elem = w[i]; - type = glsl_scalar_type(glsl_get_base_type(type)); - } - continue; + vtn_fail_if(w[i] > type->length, + "%uth index of %s is %u but the type has only " + "%u elements", i - deref_start, + spirv_op_to_string(opcode), w[i], type->length); + + switch (type->base_type) { + case vtn_base_type_vector: + elem = w[i]; + type = type->array_element; + break; + + case vtn_base_type_matrix: + assert(col == 0 && elem == -1); + col = w[i]; + elem = 0; + type = type->array_element; + break; - case GLSL_TYPE_ARRAY: + case vtn_base_type_array: c = &(*c)->elements[w[i]]; - type = glsl_get_array_element(type); - continue; + type = type->array_element; + break; - case GLSL_TYPE_STRUCT: + case vtn_base_type_struct: c = &(*c)->elements[w[i]]; - type = glsl_get_struct_field(type, w[i]); - continue; + type = type->members[w[i]]; + break; default: - vtn_fail("Invalid constant type"); + vtn_fail("%s must only index into composite types", + spirv_op_to_string(opcode)); } } @@ -1530,8 +1721,8 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, if (elem == -1) { val->constant = *c; } else { - unsigned num_components = glsl_get_vector_elements(type); - unsigned bit_size = glsl_get_bit_size(type); + unsigned num_components = type->length; + unsigned bit_size = glsl_get_bit_size(type->type); for (unsigned i = 0; i < num_components; i++) switch(bit_size) { case 64: @@ -1543,6 +1734,12 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, case 16: val->constant->values[0].u16[i] = (*c)->values[col].u16[elem + i]; break; + case 8: + val->constant->values[0].u8[i] = (*c)->values[col].u8[elem + i]; + break; + case 1: + val->constant->values[0].b[i] = (*c)->values[col].b[elem + i]; + break; default: vtn_fail("Invalid SpvOpCompositeExtract bit size"); } @@ -1550,12 +1747,12 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, } else { struct vtn_value *insert = vtn_value(b, w[4], vtn_value_type_constant); - vtn_assert(insert->type->type == type); + vtn_assert(insert->type == type); if (elem == -1) { *c = insert->constant; } else { - unsigned num_components = glsl_get_vector_elements(type); - unsigned bit_size = glsl_get_bit_size(type); + unsigned num_components = type->length; + unsigned bit_size = glsl_get_bit_size(type->type); for (unsigned i = 0; i < num_components; i++) switch (bit_size) { case 64: @@ -1567,6 +1764,12 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, case 16: (*c)->values[col].u16[elem + i] = insert->constant->values[0].u16[i]; break; + case 8: + (*c)->values[col].u8[elem + i] = insert->constant->values[0].u8[i]; + break; + case 1: + (*c)->values[col].b[elem + i] = insert->constant->values[0].b[i]; + break; default: vtn_fail("Invalid SpvOpCompositeInsert bit size"); } @@ -1600,16 +1803,42 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, }; nir_op op = vtn_nir_alu_op_for_spirv_opcode(b, opcode, &swap, - src_alu_type, - dst_alu_type); + nir_alu_type_get_type_size(src_alu_type), + nir_alu_type_get_type_size(dst_alu_type)); nir_const_value src[4]; for (unsigned i = 0; i < count - 4; i++) { - nir_constant *c = - vtn_value(b, w[4 + i], vtn_value_type_constant)->constant; + struct vtn_value *src_val = + vtn_value(b, w[4 + i], vtn_value_type_constant); + + /* If this is an unsized source, pull the bit size from the + * source; otherwise, we'll use the bit size from the destination. + */ + if (!nir_alu_type_get_type_size(nir_op_infos[op].input_types[i])) + bit_size = glsl_get_bit_size(src_val->type->type); unsigned j = swap ? 1 - i : i; - src[j] = c->values[0]; + src[j] = src_val->constant->values[0]; + } + + /* fix up fixed size sources */ + switch (op) { + case nir_op_ishl: + case nir_op_ishr: + case nir_op_ushr: { + if (bit_size == 32) + break; + for (unsigned i = 0; i < num_components; ++i) { + switch (bit_size) { + case 64: src[1].u32[i] = src[1].u64[i]; break; + case 16: src[1].u32[i] = src[1].u16[i]; break; + case 8: src[1].u32[i] = src[1].u8[i]; break; + } + } + break; + } + default: + break; } val->constant->values[0] = @@ -1636,54 +1865,6 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, vtn_foreach_decoration(b, val, handle_workgroup_size_decoration_cb, NULL); } -static void -vtn_handle_function_call(struct vtn_builder *b, SpvOp opcode, - const uint32_t *w, unsigned count) -{ - struct vtn_type *res_type = vtn_value(b, w[1], vtn_value_type_type)->type; - struct vtn_function *vtn_callee = - vtn_value(b, w[3], vtn_value_type_function)->func; - struct nir_function *callee = vtn_callee->impl->function; - - vtn_callee->referenced = true; - - nir_call_instr *call = nir_call_instr_create(b->nb.shader, callee); - for (unsigned i = 0; i < call->num_params; i++) { - unsigned arg_id = w[4 + i]; - struct vtn_value *arg = vtn_untyped_value(b, arg_id); - if (arg->value_type == vtn_value_type_pointer && - arg->pointer->ptr_type->type == NULL) { - nir_deref_var *d = vtn_pointer_to_deref(b, arg->pointer); - call->params[i] = nir_deref_var_clone(d, call); - } else { - struct vtn_ssa_value *arg_ssa = vtn_ssa_value(b, arg_id); - - /* Make a temporary to store the argument in */ - nir_variable *tmp = - nir_local_variable_create(b->nb.impl, arg_ssa->type, "arg_tmp"); - call->params[i] = nir_deref_var_create(call, tmp); - - vtn_local_store(b, arg_ssa, call->params[i]); - } - } - - nir_variable *out_tmp = NULL; - vtn_assert(res_type->type == callee->return_type); - if (!glsl_type_is_void(callee->return_type)) { - out_tmp = nir_local_variable_create(b->nb.impl, callee->return_type, - "out_tmp"); - call->return_deref = nir_deref_var_create(call, out_tmp); - } - - nir_builder_instr_insert(&b->nb, &call->instr); - - if (glsl_type_is_void(callee->return_type)) { - vtn_push_value(b, w[2], vtn_value_type_undef); - } else { - vtn_push_ssa(b, w[2], res_type, vtn_local_load(b, call->return_deref)); - } -} - struct vtn_ssa_value * vtn_create_ssa_value(struct vtn_builder *b, const struct glsl_type *type) { @@ -1701,6 +1882,8 @@ vtn_create_ssa_value(struct vtn_builder *b, const struct glsl_type *type) case GLSL_TYPE_UINT: case GLSL_TYPE_INT16: case GLSL_TYPE_UINT16: + case GLSL_TYPE_UINT8: + case GLSL_TYPE_INT8: case GLSL_TYPE_INT64: case GLSL_TYPE_UINT64: case GLSL_TYPE_BOOL: @@ -1779,7 +1962,6 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, const struct glsl_type *image_type = sampled.type->type; const enum glsl_sampler_dim sampler_dim = glsl_get_sampler_dim(image_type); const bool is_array = glsl_sampler_type_is_array(image_type); - const bool is_shadow = glsl_sampler_type_is_shadow(image_type); /* Figure out the base texture operation */ nir_texop texop; @@ -1832,9 +2014,41 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, vtn_fail("Unhandled opcode"); } - nir_tex_src srcs[8]; /* 8 should be enough */ + nir_tex_src srcs[10]; /* 10 should be enough */ nir_tex_src *p = srcs; + nir_deref_instr *sampler = vtn_pointer_to_deref(b, sampled.sampler); + nir_deref_instr *texture = + sampled.image ? vtn_pointer_to_deref(b, sampled.image) : sampler; + + p->src = nir_src_for_ssa(&texture->dest.ssa); + p->src_type = nir_tex_src_texture_deref; + p++; + + switch (texop) { + case nir_texop_tex: + case nir_texop_txb: + case nir_texop_txl: + case nir_texop_txd: + case nir_texop_tg4: + /* These operations require a sampler */ + p->src = nir_src_for_ssa(&sampler->dest.ssa); + p->src_type = nir_tex_src_sampler_deref; + p++; + break; + case nir_texop_txf: + case nir_texop_txf_ms: + case nir_texop_txs: + case nir_texop_lod: + case nir_texop_query_levels: + case nir_texop_texture_samples: + case nir_texop_samples_identical: + /* These don't */ + break; + case nir_texop_txf_ms_mcs: + vtn_fail("unexpected nir_texop_txf_ms_mcs"); + } + unsigned idx = 4; struct nir_ssa_def *coord; @@ -1903,6 +2117,7 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, break; } + bool is_shadow = false; unsigned gather_component = 0; switch (opcode) { case SpvOpImageSampleDrefImplicitLod: @@ -1911,6 +2126,7 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, case SpvOpImageSampleProjDrefExplicitLod: case SpvOpImageDrefGather: /* These all have an explicit depth value as their next source */ + is_shadow = true; (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_comparator); break; @@ -1957,8 +2173,9 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_offset); if (operands & SpvImageOperandsConstOffsetsMask) { + nir_tex_src none = {0}; gather_offsets = vtn_ssa_value(b, w[idx++]); - (*p++) = (nir_tex_src){}; + (*p++) = none; } if (operands & SpvImageOperandsSampleMask) { @@ -1966,6 +2183,13 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, texop = nir_texop_txf_ms; (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ms_index); } + + if (operands & SpvImageOperandsMinLodMask) { + vtn_assert(texop == nir_texop_tex || + texop == nir_texop_txb || + texop == nir_texop_txd); + (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_min_lod); + } } /* We should have now consumed exactly all of the arguments */ vtn_assert(idx == count); @@ -1992,40 +2216,6 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, vtn_fail("Invalid base type for sampler result"); } - nir_deref_var *sampler = vtn_pointer_to_deref(b, sampled.sampler); - nir_deref_var *texture; - if (sampled.image) { - nir_deref_var *image = vtn_pointer_to_deref(b, sampled.image); - texture = image; - } else { - texture = sampler; - } - - instr->texture = nir_deref_var_clone(texture, instr); - - switch (instr->op) { - case nir_texop_tex: - case nir_texop_txb: - case nir_texop_txl: - case nir_texop_txd: - case nir_texop_tg4: - /* These operations require a sampler */ - instr->sampler = nir_deref_var_clone(sampler, instr); - break; - case nir_texop_txf: - case nir_texop_txf_ms: - case nir_texop_txs: - case nir_texop_lod: - case nir_texop_query_levels: - case nir_texop_texture_samples: - case nir_texop_samples_identical: - /* These don't */ - instr->sampler = NULL; - break; - case nir_texop_txf_ms_mcs: - vtn_fail("unexpected nir_texop_txf_ms_mcs"); - } - nir_ssa_dest_init(&instr->instr, &instr->dest, nir_tex_instr_dest_size(instr), 32, NULL); @@ -2050,8 +2240,6 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, instrs[i]->is_new_style_shadow = instr->is_new_style_shadow; instrs[i]->component = instr->component; instrs[i]->dest_type = instr->dest_type; - instrs[i]->texture = nir_deref_var_clone(texture, instrs[i]); - instrs[i]->sampler = NULL; memcpy(instrs[i]->src, srcs, instr->num_srcs * sizeof(*instr->src)); @@ -2147,6 +2335,18 @@ get_image_coord(struct vtn_builder *b, uint32_t value) return nir_swizzle(&b->nb, coord->def, swizzle, 4, false); } +static nir_ssa_def * +expand_to_vec4(nir_builder *b, nir_ssa_def *value) +{ + if (value->num_components == 4) + return value; + + unsigned swiz[4]; + for (unsigned i = 0; i < 4; i++) + swiz[i] = i < value->num_components ? i : 0; + return nir_swizzle(b, value, swiz, 4, false); +} + static void vtn_handle_image(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count) @@ -2226,7 +2426,7 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode, nir_intrinsic_op op; switch (opcode) { -#define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_##N; break; +#define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_deref_##N; break; OP(ImageQuerySize, size) OP(ImageRead, load) OP(ImageWrite, store) @@ -2252,20 +2452,16 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode, nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op); - nir_deref_var *image_deref = vtn_pointer_to_deref(b, image.image); - intrin->variables[0] = nir_deref_var_clone(image_deref, intrin); + nir_deref_instr *image_deref = vtn_pointer_to_deref(b, image.image); + intrin->src[0] = nir_src_for_ssa(&image_deref->dest.ssa); /* ImageQuerySize doesn't take any extra parameters */ if (opcode != SpvOpImageQuerySize) { /* The image coordinate is always 4 components but we may not have that * many. Swizzle to compensate. */ - unsigned swiz[4]; - for (unsigned i = 0; i < 4; i++) - swiz[i] = i < image.coord->num_components ? i : 0; - intrin->src[0] = nir_src_for_ssa(nir_swizzle(&b->nb, image.coord, - swiz, 4, false)); - intrin->src[1] = nir_src_for_ssa(image.sample); + intrin->src[1] = nir_src_for_ssa(expand_to_vec4(&b->nb, image.coord)); + intrin->src[2] = nir_src_for_ssa(image.sample); } switch (opcode) { @@ -2274,11 +2470,15 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode, case SpvOpImageRead: break; case SpvOpAtomicStore: - intrin->src[2] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def); - break; - case SpvOpImageWrite: - intrin->src[2] = nir_src_for_ssa(vtn_ssa_value(b, w[3])->def); + case SpvOpImageWrite: { + const uint32_t value_id = opcode == SpvOpAtomicStore ? w[4] : w[3]; + nir_ssa_def *value = vtn_ssa_value(b, value_id)->def; + /* nir_intrinsic_image_deref_store always takes a vec4 value */ + assert(op == nir_intrinsic_image_deref_store); + intrin->num_components = 4; + intrin->src[3] = nir_src_for_ssa(expand_to_vec4(&b->nb, value)); break; + } case SpvOpAtomicCompareExchange: case SpvOpAtomicIIncrement: @@ -2293,31 +2493,33 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode, case SpvOpAtomicAnd: case SpvOpAtomicOr: case SpvOpAtomicXor: - fill_common_atomic_sources(b, opcode, w, &intrin->src[2]); + fill_common_atomic_sources(b, opcode, w, &intrin->src[3]); break; default: vtn_fail("Invalid image opcode"); } - if (opcode != SpvOpImageWrite) { + if (opcode != SpvOpImageWrite && opcode != SpvOpAtomicStore) { struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa); struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type; - unsigned dest_components = - nir_intrinsic_infos[intrin->intrinsic].dest_components; - if (intrin->intrinsic == nir_intrinsic_image_size) { - dest_components = intrin->num_components = - glsl_get_vector_elements(type->type); - } + unsigned dest_components = glsl_get_vector_elements(type->type); + intrin->num_components = nir_intrinsic_infos[op].dest_components; + if (intrin->num_components == 0) + intrin->num_components = dest_components; nir_ssa_dest_init(&intrin->instr, &intrin->dest, - dest_components, 32, NULL); + intrin->num_components, 32, NULL); nir_builder_instr_insert(&b->nb, &intrin->instr); + nir_ssa_def *result = &intrin->dest.ssa; + if (intrin->num_components != dest_components) + result = nir_channels(&b->nb, result, (1 << dest_components) - 1); + val->ssa = vtn_create_ssa_value(b, type->type); - val->ssa->def = &intrin->dest.ssa; + val->ssa->def = result; } else { nir_builder_instr_insert(&b->nb, &intrin->instr); } @@ -2349,6 +2551,35 @@ get_ssbo_nir_atomic_op(struct vtn_builder *b, SpvOp opcode) } } +static nir_intrinsic_op +get_uniform_nir_atomic_op(struct vtn_builder *b, SpvOp opcode) +{ + switch (opcode) { +#define OP(S, N) case SpvOp##S: return nir_intrinsic_atomic_counter_ ##N; + OP(AtomicLoad, read_deref) + OP(AtomicExchange, exchange) + OP(AtomicCompareExchange, comp_swap) + OP(AtomicIIncrement, inc_deref) + OP(AtomicIDecrement, post_dec_deref) + OP(AtomicIAdd, add_deref) + OP(AtomicISub, add_deref) + OP(AtomicUMin, min_deref) + OP(AtomicUMax, max_deref) + OP(AtomicAnd, and_deref) + OP(AtomicOr, or_deref) + OP(AtomicXor, xor_deref) +#undef OP + default: + /* We left the following out: AtomicStore, AtomicSMin and + * AtomicSmax. Right now there are not nir intrinsics for them. At this + * moment Atomic Counter support is needed for ARB_spirv support, so is + * only need to support GLSL Atomic Counters that are uints and don't + * allow direct storage. + */ + unreachable("Invalid uniform atomic"); + } +} + static nir_intrinsic_op get_shared_nir_atomic_op(struct vtn_builder *b, SpvOp opcode) { @@ -2376,12 +2607,12 @@ get_shared_nir_atomic_op(struct vtn_builder *b, SpvOp opcode) } static nir_intrinsic_op -get_var_nir_atomic_op(struct vtn_builder *b, SpvOp opcode) +get_deref_nir_atomic_op(struct vtn_builder *b, SpvOp opcode) { switch (opcode) { - case SpvOpAtomicLoad: return nir_intrinsic_load_var; - case SpvOpAtomicStore: return nir_intrinsic_store_var; -#define OP(S, N) case SpvOp##S: return nir_intrinsic_var_##N; + case SpvOpAtomicLoad: return nir_intrinsic_load_deref; + case SpvOpAtomicStore: return nir_intrinsic_store_deref; +#define OP(S, N) case SpvOp##S: return nir_intrinsic_deref_##N; OP(AtomicExchange, atomic_exchange) OP(AtomicCompareExchange, atomic_comp_swap) OP(AtomicIIncrement, atomic_add) @@ -2401,9 +2632,12 @@ get_var_nir_atomic_op(struct vtn_builder *b, SpvOp opcode) } } +/* + * Handles shared atomics, ssbo atomics and atomic counters. + */ static void -vtn_handle_ssbo_or_shared_atomic(struct vtn_builder *b, SpvOp opcode, - const uint32_t *w, unsigned count) +vtn_handle_atomics(struct vtn_builder *b, SpvOp opcode, + const uint32_t *w, unsigned count) { struct vtn_pointer *ptr; nir_intrinsic_instr *atomic; @@ -2440,13 +2674,18 @@ vtn_handle_ssbo_or_shared_atomic(struct vtn_builder *b, SpvOp opcode, SpvMemorySemanticsMask semantics = w[5]; */ - if (ptr->mode == vtn_variable_mode_workgroup && - !b->options->lower_workgroup_access_to_offsets) { - nir_deref_var *deref = vtn_pointer_to_deref(b, ptr); - const struct glsl_type *deref_type = nir_deref_tail(&deref->deref)->type; - nir_intrinsic_op op = get_var_nir_atomic_op(b, opcode); + /* uniform as "atomic counter uniform" */ + if (ptr->mode == vtn_variable_mode_uniform) { + nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr); + const struct glsl_type *deref_type = deref->type; + nir_intrinsic_op op = get_uniform_nir_atomic_op(b, opcode); atomic = nir_intrinsic_instr_create(b->nb.shader, op); - atomic->variables[0] = nir_deref_var_clone(deref, atomic); + atomic->src[0] = nir_src_for_ssa(&deref->dest.ssa); + + /* SSBO needs to initialize index/offset. In this case we don't need to, + * as that info is already stored on the ptr->var->var nir_variable (see + * vtn_create_variable) + */ switch (opcode) { case SpvOpAtomicLoad: @@ -2456,7 +2695,6 @@ vtn_handle_ssbo_or_shared_atomic(struct vtn_builder *b, SpvOp opcode, case SpvOpAtomicStore: atomic->num_components = glsl_get_vector_elements(deref_type); nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1); - atomic->src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def); break; case SpvOpAtomicExchange: @@ -2473,7 +2711,49 @@ vtn_handle_ssbo_or_shared_atomic(struct vtn_builder *b, SpvOp opcode, case SpvOpAtomicAnd: case SpvOpAtomicOr: case SpvOpAtomicXor: - fill_common_atomic_sources(b, opcode, w, &atomic->src[0]); + /* Nothing: we don't need to call fill_common_atomic_sources here, as + * atomic counter uniforms doesn't have sources + */ + break; + + default: + unreachable("Invalid SPIR-V atomic"); + + } + } else if (ptr->mode == vtn_variable_mode_workgroup && + !b->options->lower_workgroup_access_to_offsets) { + nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr); + const struct glsl_type *deref_type = deref->type; + nir_intrinsic_op op = get_deref_nir_atomic_op(b, opcode); + atomic = nir_intrinsic_instr_create(b->nb.shader, op); + atomic->src[0] = nir_src_for_ssa(&deref->dest.ssa); + + switch (opcode) { + case SpvOpAtomicLoad: + atomic->num_components = glsl_get_vector_elements(deref_type); + break; + + case SpvOpAtomicStore: + atomic->num_components = glsl_get_vector_elements(deref_type); + nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1); + atomic->src[1] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def); + break; + + case SpvOpAtomicExchange: + case SpvOpAtomicCompareExchange: + case SpvOpAtomicCompareExchangeWeak: + case SpvOpAtomicIIncrement: + case SpvOpAtomicIDecrement: + case SpvOpAtomicIAdd: + case SpvOpAtomicISub: + case SpvOpAtomicSMin: + case SpvOpAtomicUMin: + case SpvOpAtomicSMax: + case SpvOpAtomicUMax: + case SpvOpAtomicAnd: + case SpvOpAtomicOr: + case SpvOpAtomicXor: + fill_common_atomic_sources(b, opcode, w, &atomic->src[1]); break; default: @@ -2482,7 +2762,7 @@ vtn_handle_ssbo_or_shared_atomic(struct vtn_builder *b, SpvOp opcode, } } else { nir_ssa_def *offset, *index; - offset = vtn_pointer_to_offset(b, ptr, &index, NULL); + offset = vtn_pointer_to_offset(b, ptr, &index); nir_intrinsic_op op; if (ptr->mode == vtn_variable_mode_ssbo) { @@ -2499,6 +2779,7 @@ vtn_handle_ssbo_or_shared_atomic(struct vtn_builder *b, SpvOp opcode, switch (opcode) { case SpvOpAtomicLoad: atomic->num_components = glsl_get_vector_elements(ptr->type->type); + nir_intrinsic_set_align(atomic, 4, 0); if (ptr->mode == vtn_variable_mode_ssbo) atomic->src[src++] = nir_src_for_ssa(index); atomic->src[src++] = nir_src_for_ssa(offset); @@ -2507,6 +2788,7 @@ vtn_handle_ssbo_or_shared_atomic(struct vtn_builder *b, SpvOp opcode, case SpvOpAtomicStore: atomic->num_components = glsl_get_vector_elements(ptr->type->type); nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1); + nir_intrinsic_set_align(atomic, 4, 0); atomic->src[src++] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def); if (ptr->mode == vtn_variable_mode_ssbo) atomic->src[src++] = nir_src_for_ssa(index); @@ -2559,7 +2841,7 @@ create_vec(struct vtn_builder *b, unsigned num_components, unsigned bit_size) { nir_op op; switch (num_components) { - case 1: op = nir_op_fmov; break; + case 1: op = nir_op_imov; break; case 2: op = nir_op_vec2; break; case 3: op = nir_op_vec3; break; case 4: op = nir_op_vec4; break; @@ -2607,8 +2889,7 @@ vtn_ssa_transpose(struct vtn_builder *b, struct vtn_ssa_value *src) nir_ssa_def * vtn_vector_extract(struct vtn_builder *b, nir_ssa_def *src, unsigned index) { - unsigned swiz[4] = { index }; - return nir_swizzle(&b->nb, src, swiz, 1, true); + return nir_channel(&b->nb, src, index); } nir_ssa_def * @@ -2822,8 +3103,9 @@ vtn_handle_composite(struct vtn_builder *b, SpvOp opcode, case SpvOpCompositeConstruct: { unsigned elems = count - 3; + assume(elems >= 1); if (glsl_type_is_vector_or_scalar(type)) { - nir_ssa_def *srcs[4]; + nir_ssa_def *srcs[NIR_MAX_VEC_COMPONENTS]; for (unsigned i = 0; i < elems; i++) srcs[i] = vtn_ssa_value(b, w[3 + i])->def; val->ssa->def = @@ -2856,37 +3138,136 @@ vtn_handle_composite(struct vtn_builder *b, SpvOp opcode, } } +static void +vtn_emit_barrier(struct vtn_builder *b, nir_intrinsic_op op) +{ + nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op); + nir_builder_instr_insert(&b->nb, &intrin->instr); +} + +static void +vtn_emit_memory_barrier(struct vtn_builder *b, SpvScope scope, + SpvMemorySemanticsMask semantics) +{ + static const SpvMemorySemanticsMask all_memory_semantics = + SpvMemorySemanticsUniformMemoryMask | + SpvMemorySemanticsWorkgroupMemoryMask | + SpvMemorySemanticsAtomicCounterMemoryMask | + SpvMemorySemanticsImageMemoryMask; + + /* If we're not actually doing a memory barrier, bail */ + if (!(semantics & all_memory_semantics)) + return; + + /* GL and Vulkan don't have these */ + vtn_assert(scope != SpvScopeCrossDevice); + + if (scope == SpvScopeSubgroup) + return; /* Nothing to do here */ + + if (scope == SpvScopeWorkgroup) { + vtn_emit_barrier(b, nir_intrinsic_group_memory_barrier); + return; + } + + /* There's only two scopes thing left */ + vtn_assert(scope == SpvScopeInvocation || scope == SpvScopeDevice); + + if ((semantics & all_memory_semantics) == all_memory_semantics) { + vtn_emit_barrier(b, nir_intrinsic_memory_barrier); + return; + } + + /* Issue a bunch of more specific barriers */ + uint32_t bits = semantics; + while (bits) { + SpvMemorySemanticsMask semantic = 1 << u_bit_scan(&bits); + switch (semantic) { + case SpvMemorySemanticsUniformMemoryMask: + vtn_emit_barrier(b, nir_intrinsic_memory_barrier_buffer); + break; + case SpvMemorySemanticsWorkgroupMemoryMask: + vtn_emit_barrier(b, nir_intrinsic_memory_barrier_shared); + break; + case SpvMemorySemanticsAtomicCounterMemoryMask: + vtn_emit_barrier(b, nir_intrinsic_memory_barrier_atomic_counter); + break; + case SpvMemorySemanticsImageMemoryMask: + vtn_emit_barrier(b, nir_intrinsic_memory_barrier_image); + break; + default: + break;; + } + } +} + static void vtn_handle_barrier(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count) { - nir_intrinsic_op intrinsic_op; switch (opcode) { case SpvOpEmitVertex: case SpvOpEmitStreamVertex: - intrinsic_op = nir_intrinsic_emit_vertex; - break; case SpvOpEndPrimitive: - case SpvOpEndStreamPrimitive: - intrinsic_op = nir_intrinsic_end_primitive; - break; - case SpvOpMemoryBarrier: - intrinsic_op = nir_intrinsic_memory_barrier; - break; - case SpvOpControlBarrier: - intrinsic_op = nir_intrinsic_barrier; + case SpvOpEndStreamPrimitive: { + nir_intrinsic_op intrinsic_op; + switch (opcode) { + case SpvOpEmitVertex: + case SpvOpEmitStreamVertex: + intrinsic_op = nir_intrinsic_emit_vertex; + break; + case SpvOpEndPrimitive: + case SpvOpEndStreamPrimitive: + intrinsic_op = nir_intrinsic_end_primitive; + break; + default: + unreachable("Invalid opcode"); + } + + nir_intrinsic_instr *intrin = + nir_intrinsic_instr_create(b->shader, intrinsic_op); + + switch (opcode) { + case SpvOpEmitStreamVertex: + case SpvOpEndStreamPrimitive: { + unsigned stream = vtn_constant_value(b, w[1])->values[0].u32[0]; + nir_intrinsic_set_stream_id(intrin, stream); + break; + } + + default: + break; + } + + nir_builder_instr_insert(&b->nb, &intrin->instr); break; - default: - vtn_fail("unknown barrier instruction"); } - nir_intrinsic_instr *intrin = - nir_intrinsic_instr_create(b->shader, intrinsic_op); + case SpvOpMemoryBarrier: { + SpvScope scope = vtn_constant_value(b, w[1])->values[0].u32[0]; + SpvMemorySemanticsMask semantics = + vtn_constant_value(b, w[2])->values[0].u32[0]; + vtn_emit_memory_barrier(b, scope, semantics); + return; + } - if (opcode == SpvOpEmitStreamVertex || opcode == SpvOpEndStreamPrimitive) - nir_intrinsic_set_stream_id(intrin, w[1]); + case SpvOpControlBarrier: { + SpvScope execution_scope = + vtn_constant_value(b, w[1])->values[0].u32[0]; + if (execution_scope == SpvScopeWorkgroup) + vtn_emit_barrier(b, nir_intrinsic_barrier); - nir_builder_instr_insert(&b->nb, &intrin->instr); + SpvScope memory_scope = + vtn_constant_value(b, w[2])->values[0].u32[0]; + SpvMemorySemanticsMask memory_semantics = + vtn_constant_value(b, w[3])->values[0].u32[0]; + vtn_emit_memory_barrier(b, memory_scope, memory_semantics); + break; + } + + default: + unreachable("unknown barrier instruction"); + } } static unsigned @@ -2965,6 +3346,24 @@ stage_for_execution_model(struct vtn_builder *b, SpvExecutionModel model) spirv_capability_to_string(cap)); \ } while(0) + +void +vtn_handle_entry_point(struct vtn_builder *b, const uint32_t *w, + unsigned count) +{ + struct vtn_value *entry_point = &b->values[w[2]]; + /* Let this be a name label regardless */ + unsigned name_words; + entry_point->name = vtn_string_literal(b, &w[3], count - 3, &name_words); + + if (strcmp(entry_point->name, b->entry_point_name) != 0 || + stage_for_execution_model(b, w[1]) != b->entry_point_stage) + return; + + vtn_assert(b->entry_point == NULL); + b->entry_point = entry_point; +} + static bool vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count) @@ -2994,6 +3393,7 @@ vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode, case SpvOpSourceExtension: case SpvOpSourceContinued: case SpvOpExtension: + case SpvOpModuleProcessed: /* Unhandled, but these are for debug so that's ok. */ break; @@ -3028,29 +3428,46 @@ vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode, case SpvCapabilityStorageImageExtendedFormats: break; - case SpvCapabilityGeometryStreams: case SpvCapabilityLinkage: case SpvCapabilityVector16: case SpvCapabilityFloat16Buffer: case SpvCapabilityFloat16: - case SpvCapabilityInt64Atomics: - case SpvCapabilityAtomicStorage: - case SpvCapabilityInt16: case SpvCapabilityStorageImageMultisample: case SpvCapabilityInt8: case SpvCapabilitySparseResidency: - case SpvCapabilityMinLod: - case SpvCapabilityTransformFeedback: vtn_warn("Unsupported SPIR-V capability: %s", spirv_capability_to_string(cap)); break; + case SpvCapabilityMinLod: + spv_check_supported(min_lod, cap); + break; + + case SpvCapabilityAtomicStorage: + spv_check_supported(atomic_storage, cap); + break; + case SpvCapabilityFloat64: spv_check_supported(float64, cap); break; case SpvCapabilityInt64: spv_check_supported(int64, cap); break; + case SpvCapabilityInt16: + spv_check_supported(int16, cap); + break; + + case SpvCapabilityTransformFeedback: + spv_check_supported(transform_feedback, cap); + break; + + case SpvCapabilityGeometryStreams: + spv_check_supported(geometry_streams, cap); + break; + + case SpvCapabilityInt64Atomics: + spv_check_supported(int64_atomics, cap); + break; case SpvCapabilityAddresses: case SpvCapabilityKernel: @@ -3087,10 +3504,41 @@ vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode, spv_check_supported(image_write_without_format, cap); break; + case SpvCapabilityDeviceGroup: + spv_check_supported(device_group, cap); + break; + case SpvCapabilityMultiView: spv_check_supported(multiview, cap); break; + case SpvCapabilityGroupNonUniform: + spv_check_supported(subgroup_basic, cap); + break; + + case SpvCapabilityGroupNonUniformVote: + spv_check_supported(subgroup_vote, cap); + break; + + case SpvCapabilitySubgroupBallotKHR: + case SpvCapabilityGroupNonUniformBallot: + spv_check_supported(subgroup_ballot, cap); + break; + + case SpvCapabilityGroupNonUniformShuffle: + case SpvCapabilityGroupNonUniformShuffleRelative: + spv_check_supported(subgroup_shuffle, cap); + break; + + case SpvCapabilityGroupNonUniformQuad: + spv_check_supported(subgroup_quad, cap); + break; + + case SpvCapabilityGroupNonUniformArithmetic: + case SpvCapabilityGroupNonUniformClustered: + spv_check_supported(subgroup_arithmetic, cap); + break; + case SpvCapabilityVariablePointersStorageBuffer: case SpvCapabilityVariablePointers: spv_check_supported(variable_pointers, cap); @@ -3103,6 +3551,34 @@ vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode, spv_check_supported(storage_16bit, cap); break; + case SpvCapabilityShaderViewportIndexLayerEXT: + spv_check_supported(shader_viewport_index_layer, cap); + break; + + case SpvCapabilityStorageBuffer8BitAccess: + case SpvCapabilityUniformAndStorageBuffer8BitAccess: + case SpvCapabilityStoragePushConstant8: + spv_check_supported(storage_8bit, cap); + break; + + case SpvCapabilityInputAttachmentArrayDynamicIndexingEXT: + case SpvCapabilityUniformTexelBufferArrayDynamicIndexingEXT: + case SpvCapabilityStorageTexelBufferArrayDynamicIndexingEXT: + spv_check_supported(descriptor_array_dynamic_indexing, cap); + break; + + case SpvCapabilityRuntimeDescriptorArrayEXT: + spv_check_supported(runtime_descriptor_array, cap); + break; + + case SpvCapabilityStencilExportEXT: + spv_check_supported(stencil_export, cap); + break; + + case SpvCapabilitySampleMaskPostDepthCoverage: + spv_check_supported(post_depth_coverage, cap); + break; + default: vtn_fail("Unhandled capability"); } @@ -3119,20 +3595,9 @@ vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode, w[2] == SpvMemoryModelGLSL450); break; - case SpvOpEntryPoint: { - struct vtn_value *entry_point = &b->values[w[2]]; - /* Let this be a name label regardless */ - unsigned name_words; - entry_point->name = vtn_string_literal(b, &w[3], count - 3, &name_words); - - if (strcmp(entry_point->name, b->entry_point_name) != 0 || - stage_for_execution_model(b, w[1]) != b->entry_point_stage) - break; - - vtn_assert(b->entry_point == NULL); - b->entry_point = entry_point; + case SpvOpEntryPoint: + vtn_handle_entry_point(b, w, count); break; - } case SpvOpString: vtn_push_value(b, w[1], vtn_value_type_string)->str = @@ -3153,6 +3618,8 @@ vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode, case SpvOpMemberDecorate: case SpvOpGroupDecorate: case SpvOpGroupMemberDecorate: + case SpvOpDecorateStringGOOGLE: + case SpvOpMemberDecorateStringGOOGLE: vtn_handle_decoration(b, opcode, w, count); break; @@ -3181,6 +3648,11 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point, b->shader->info.fs.early_fragment_tests = true; break; + case SpvExecutionModePostDepthCoverage: + vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT); + b->shader->info.fs.post_depth_coverage = true; + break; + case SpvExecutionModeInvocations: vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY); b->shader->info.gs.invocations = MAX2(1, mode->literals[0]); @@ -3237,6 +3709,8 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point, vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY); b->shader->info.gs.vertices_in = vertices_in_from_spv_execution_mode(b, mode->exec_mode); + b->shader->info.gs.input_primitive = + gl_primitive_from_spv_execution_mode(b, mode->exec_mode); } break; @@ -3284,13 +3758,17 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point, break; case SpvExecutionModeXfb: - vtn_fail("Unhandled execution mode"); + b->shader->info.has_transform_feedback_varyings = true; break; case SpvExecutionModeVecTypeHint: case SpvExecutionModeContractionOff: break; /* OpenCL */ + case SpvExecutionModeStencilRefReplacingEXT: + vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT); + break; + default: vtn_fail("Unhandled execution mode"); } @@ -3320,6 +3798,8 @@ vtn_handle_variable_or_type_instruction(struct vtn_builder *b, SpvOp opcode, case SpvOpMemberDecorate: case SpvOpGroupDecorate: case SpvOpGroupMemberDecorate: + case SpvOpDecorateStringGOOGLE: + case SpvOpMemberDecorateStringGOOGLE: vtn_fail("Invalid opcode types and variables section"); break; @@ -3440,10 +3920,10 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode, case SpvOpImageQuerySize: { struct vtn_pointer *image = vtn_value(b, w[3], vtn_value_type_pointer)->pointer; - if (image->mode == vtn_variable_mode_image) { + if (glsl_type_is_image(image->type->type)) { vtn_handle_image(b, opcode, w, count); } else { - vtn_assert(image->mode == vtn_variable_mode_sampler); + vtn_assert(glsl_type_is_sampler(image->type->type)); vtn_handle_texture(b, opcode, w, count); } break; @@ -3469,7 +3949,7 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode, vtn_handle_image(b, opcode, w, count); } else { vtn_assert(pointer->value_type == vtn_value_type_pointer); - vtn_handle_ssbo_or_shared_atomic(b, opcode, w, count); + vtn_handle_atomics(b, opcode, w, count); } break; } @@ -3480,7 +3960,7 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode, vtn_handle_image(b, opcode, w, count); } else { vtn_assert(pointer->value_type == vtn_value_type_pointer); - vtn_handle_ssbo_or_shared_atomic(b, opcode, w, count); + vtn_handle_atomics(b, opcode, w, count); } break; } @@ -3668,6 +4148,43 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode, vtn_handle_barrier(b, opcode, w, count); break; + case SpvOpGroupNonUniformElect: + case SpvOpGroupNonUniformAll: + case SpvOpGroupNonUniformAny: + case SpvOpGroupNonUniformAllEqual: + case SpvOpGroupNonUniformBroadcast: + case SpvOpGroupNonUniformBroadcastFirst: + case SpvOpGroupNonUniformBallot: + case SpvOpGroupNonUniformInverseBallot: + case SpvOpGroupNonUniformBallotBitExtract: + case SpvOpGroupNonUniformBallotBitCount: + case SpvOpGroupNonUniformBallotFindLSB: + case SpvOpGroupNonUniformBallotFindMSB: + case SpvOpGroupNonUniformShuffle: + case SpvOpGroupNonUniformShuffleXor: + case SpvOpGroupNonUniformShuffleUp: + case SpvOpGroupNonUniformShuffleDown: + case SpvOpGroupNonUniformIAdd: + case SpvOpGroupNonUniformFAdd: + case SpvOpGroupNonUniformIMul: + case SpvOpGroupNonUniformFMul: + case SpvOpGroupNonUniformSMin: + case SpvOpGroupNonUniformUMin: + case SpvOpGroupNonUniformFMin: + case SpvOpGroupNonUniformSMax: + case SpvOpGroupNonUniformUMax: + case SpvOpGroupNonUniformFMax: + case SpvOpGroupNonUniformBitwiseAnd: + case SpvOpGroupNonUniformBitwiseOr: + case SpvOpGroupNonUniformBitwiseXor: + case SpvOpGroupNonUniformLogicalAnd: + case SpvOpGroupNonUniformLogicalOr: + case SpvOpGroupNonUniformLogicalXor: + case SpvOpGroupNonUniformQuadBroadcast: + case SpvOpGroupNonUniformQuadSwap: + vtn_handle_subgroup(b, opcode, w, count); + break; + default: vtn_fail("Unhandled opcode"); } @@ -3675,16 +4192,15 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode, return true; } -nir_function * -spirv_to_nir(const uint32_t *words, size_t word_count, - struct nir_spirv_specialization *spec, unsigned num_spec, - gl_shader_stage stage, const char *entry_point_name, - const struct spirv_to_nir_options *options, - const nir_shader_compiler_options *nir_options) +struct vtn_builder* +vtn_create_builder(const uint32_t *words, size_t word_count, + gl_shader_stage stage, const char *entry_point_name, + const struct spirv_to_nir_options *options) { - /* Initialize the stn_builder object */ + /* Initialize the vtn_builder object */ struct vtn_builder *b = rzalloc(NULL, struct vtn_builder); b->spirv = words; + b->spirv_word_count = word_count; b->file = NULL; b->line = -1; b->col = -1; @@ -3693,28 +4209,64 @@ spirv_to_nir(const uint32_t *words, size_t word_count, b->entry_point_name = entry_point_name; b->options = options; - /* See also _vtn_fail() */ - if (setjmp(b->fail_jump)) { - ralloc_free(b); - return NULL; - } - - const uint32_t *word_end = words + word_count; + /* + * Handle the SPIR-V header (first 5 dwords). + * Can't use vtx_assert() as the setjmp(3) target isn't initialized yet. + */ + if (word_count <= 5) + goto fail; - /* Handle the SPIR-V header (first 4 dwords) */ - vtn_assert(word_count > 5); + if (words[0] != SpvMagicNumber) { + vtn_err("words[0] was 0x%x, want 0x%x", words[0], SpvMagicNumber); + goto fail; + } + if (words[1] < 0x10000) { + vtn_err("words[1] was 0x%x, want >= 0x10000", words[1]); + goto fail; + } - vtn_assert(words[0] == SpvMagicNumber); - vtn_assert(words[1] >= 0x10000); /* words[2] == generator magic */ unsigned value_id_bound = words[3]; - vtn_assert(words[4] == 0); - - words+= 5; + if (words[4] != 0) { + vtn_err("words[4] was %u, want 0", words[4]); + goto fail; + } b->value_id_bound = value_id_bound; b->values = rzalloc_array(b, struct vtn_value, value_id_bound); + return b; + fail: + ralloc_free(b); + return NULL; +} + +nir_function * +spirv_to_nir(const uint32_t *words, size_t word_count, + struct nir_spirv_specialization *spec, unsigned num_spec, + gl_shader_stage stage, const char *entry_point_name, + const struct spirv_to_nir_options *options, + const nir_shader_compiler_options *nir_options) + +{ + const uint32_t *word_end = words + word_count; + + struct vtn_builder *b = vtn_create_builder(words, word_count, + stage, entry_point_name, + options); + + if (b == NULL) + return NULL; + + /* See also _vtn_fail() */ + if (setjmp(b->fail_jump)) { + ralloc_free(b); + return NULL; + } + + /* Skip the SPIR-V header, handled at vtn_create_builder */ + words+= 5; + /* Handle all the preamble instructions */ words = vtn_foreach_instruction(b, words, word_end, vtn_handle_preamble_instruction); @@ -3763,6 +4315,11 @@ spirv_to_nir(const uint32_t *words, size_t word_count, } } while (progress); + /* We sometimes generate bogus derefs that, while never used, give the + * validator a bit of heartburn. Run dead code to get rid of them. + */ + nir_opt_dce(b->shader); + vtn_assert(b->entry_point->value_type == vtn_value_type_function); nir_function *entry_point = b->entry_point->func->impl->function; vtn_assert(entry_point);