X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fcompiler%2Fspirv%2Fspirv_to_nir.c;h=77f49b505630e5d9391182334a0423c3635cac0e;hb=9d0ae777dd68dad682dcc7768726996639ae2684;hp=42a559122a664284c09f1d806405b331a67986b4;hpb=b617bfcccfd906c638ef6c6eb5adab857e1938e5;p=mesa.git diff --git a/src/compiler/spirv/spirv_to_nir.c b/src/compiler/spirv/spirv_to_nir.c index 42a559122a6..77f49b50563 100644 --- a/src/compiler/spirv/spirv_to_nir.c +++ b/src/compiler/spirv/spirv_to_nir.c @@ -29,8 +29,11 @@ #include "nir/nir_vla.h" #include "nir/nir_control_flow.h" #include "nir/nir_constant_expressions.h" +#include "nir/nir_deref.h" #include "spirv_info.h" +#include "util/u_math.h" + #include void @@ -129,6 +132,18 @@ _vtn_warn(struct vtn_builder *b, const char *file, unsigned line, va_end(args); } +void +_vtn_err(struct vtn_builder *b, const char *file, unsigned line, + const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + vtn_log_err(b, NIR_SPIRV_DEBUG_LEVEL_ERROR, "SPIR-V ERROR:\n", + file, line, fmt, args); + va_end(args); +} + void _vtn_fail(struct vtn_builder *b, const char *file, unsigned line, const char *fmt, ...) @@ -370,16 +385,22 @@ static void vtn_handle_extension(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count) { + const char *ext = (const char *)&w[2]; switch (opcode) { case SpvOpExtInstImport: { struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_extension); - if (strcmp((const char *)&w[2], "GLSL.std.450") == 0) { + if (strcmp(ext, "GLSL.std.450") == 0) { val->ext_handler = vtn_handle_glsl450_instruction; - } else if ((strcmp((const char *)&w[2], "SPV_AMD_gcn_shader") == 0) - && (b->options && b->options->exts.AMD_gcn_shader)) { + } else if ((strcmp(ext, "SPV_AMD_gcn_shader") == 0) + && (b->options && b->options->caps.gcn_shader)) { val->ext_handler = vtn_handle_amd_gcn_shader_instruction; + } else if ((strcmp(ext, "SPV_AMD_shader_trinary_minmax") == 0) + && (b->options && b->options->caps.trinary_minmax)) { + val->ext_handler = vtn_handle_amd_shader_trinary_minmax_instruction; + } else if (strcmp(ext, "OpenCL.std") == 0) { + val->ext_handler = vtn_handle_opencl_instruction; } else { - vtn_fail("Unsupported extension"); + vtn_fail("Unsupported extension: %s", ext); } break; } @@ -463,7 +484,7 @@ vtn_foreach_execution_mode(struct vtn_builder *b, struct vtn_value *value, } } -static void +void vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count) { @@ -478,20 +499,26 @@ vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode, case SpvOpDecorate: case SpvOpMemberDecorate: - case SpvOpExecutionMode: { + case SpvOpDecorateStringGOOGLE: + case SpvOpMemberDecorateStringGOOGLE: + case SpvOpExecutionMode: + case SpvOpExecutionModeId: { struct vtn_value *val = vtn_untyped_value(b, target); struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration); switch (opcode) { case SpvOpDecorate: + case SpvOpDecorateStringGOOGLE: dec->scope = VTN_DEC_DECORATION; break; case SpvOpMemberDecorate: + case SpvOpMemberDecorateStringGOOGLE: dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(w++); vtn_fail_if(dec->scope < VTN_DEC_STRUCT_MEMBER0, /* overflow */ "Member argument of OpMemberDecorate too large"); break; case SpvOpExecutionMode: + case SpvOpExecutionModeId: dec->scope = VTN_DEC_EXECUTION_MODE; break; default: @@ -542,6 +569,29 @@ struct member_decoration_ctx { struct vtn_type *type; }; +/** + * Returns true if the given type contains a struct decorated Block or + * BufferBlock + */ +bool +vtn_type_contains_block(struct vtn_builder *b, struct vtn_type *type) +{ + switch (type->base_type) { + case vtn_base_type_array: + return vtn_type_contains_block(b, type->array_element); + case vtn_base_type_struct: + if (type->block || type->buffer_block) + return true; + for (unsigned i = 0; i < type->length; i++) { + if (vtn_type_contains_block(b, type->members[i])) + return true; + } + return false; + default: + return false; + } +} + /** Returns true if two types are "compatible", i.e. you can do an OpLoad, * OpStore, or OpCopyMemory between them without breaking anything. * Technically, the SPIR-V rules require the exact same type ID but this lets @@ -651,6 +701,29 @@ mutable_matrix_member(struct vtn_builder *b, struct vtn_type *type, int member) return type; } +static void +vtn_handle_access_qualifier(struct vtn_builder *b, struct vtn_type *type, + int member, enum gl_access_qualifier access) +{ + type->members[member] = vtn_type_copy(b, type->members[member]); + type = type->members[member]; + + type->access |= access; +} + +static void +array_stride_decoration_cb(struct vtn_builder *b, + struct vtn_value *val, int member, + const struct vtn_decoration *dec, void *void_ctx) +{ + struct vtn_type *type = val->type; + + if (dec->decoration == SpvDecorationArrayStride) { + vtn_fail_if(dec->literals[0] == 0, "ArrayStride must be non-zero"); + type->stride = dec->literals[0]; + } +} + static void struct_member_decoration_cb(struct vtn_builder *b, struct vtn_value *val, int member, @@ -664,13 +737,21 @@ struct_member_decoration_cb(struct vtn_builder *b, assert(member < ctx->num_fields); switch (dec->decoration) { + case SpvDecorationRelaxedPrecision: + case SpvDecorationUniform: + break; /* FIXME: Do nothing with this for now. */ case SpvDecorationNonWritable: + vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_NON_WRITEABLE); + break; case SpvDecorationNonReadable: - case SpvDecorationRelaxedPrecision: + vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_NON_READABLE); + break; case SpvDecorationVolatile: + vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_VOLATILE); + break; case SpvDecorationCoherent: - case SpvDecorationUniform: - break; /* FIXME: Do nothing with this for now. */ + vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_COHERENT); + break; case SpvDecorationNoPerspective: ctx->fields[member].interpolation = INTERP_MODE_NOPERSPECTIVE; break; @@ -700,6 +781,7 @@ struct_member_decoration_cb(struct vtn_builder *b, break; case SpvDecorationOffset: ctx->type->offsets[member] = dec->literals[0]; + ctx->fields[member].offset = dec->literals[0]; break; case SpvDecorationMatrixStride: /* Handled as a second pass */ @@ -739,13 +821,26 @@ struct_member_decoration_cb(struct vtn_builder *b, break; case SpvDecorationCPacked: + if (b->shader->info.stage != MESA_SHADER_KERNEL) + vtn_warn("Decoration only allowed for CL-style kernels: %s", + spirv_decoration_to_string(dec->decoration)); + else + ctx->type->packed = true; + break; + case SpvDecorationSaturatedConversion: case SpvDecorationFuncParamAttr: case SpvDecorationFPRoundingMode: case SpvDecorationFPFastMathMode: case SpvDecorationAlignment: - vtn_warn("Decoration only allowed for CL-style kernels: %s", - spirv_decoration_to_string(dec->decoration)); + if (b->shader->info.stage != MESA_SHADER_KERNEL) { + vtn_warn("Decoration only allowed for CL-style kernels: %s", + spirv_decoration_to_string(dec->decoration)); + } + break; + + case SpvDecorationHlslSemanticGOOGLE: + /* HLSL semantic decorations can safely be ignored by the driver. */ break; default: @@ -753,6 +848,21 @@ struct_member_decoration_cb(struct vtn_builder *b, } } +/** Chases the array type all the way down to the tail and rewrites the + * glsl_types to be based off the tail's glsl_type. + */ +static void +vtn_array_type_rewrite_glsl_type(struct vtn_type *type) +{ + if (type->base_type != vtn_base_type_array) + return; + + vtn_array_type_rewrite_glsl_type(type->array_element); + + type->type = glsl_array_type(type->array_element->type, + type->length, type->stride); +} + /* Matrix strides are handled as a separate pass because we need to know * whether the matrix is row-major or not first. */ @@ -768,6 +878,7 @@ struct_member_matrix_stride_cb(struct vtn_builder *b, vtn_fail_if(member < 0, "The MatrixStride decoration is only allowed on members " "of OpTypeStruct"); + vtn_fail_if(dec->literals[0] == 0, "MatrixStride must be non-zero"); struct member_decoration_ctx *ctx = void_ctx; @@ -776,10 +887,39 @@ struct_member_matrix_stride_cb(struct vtn_builder *b, mat_type->array_element = vtn_type_copy(b, mat_type->array_element); mat_type->stride = mat_type->array_element->stride; mat_type->array_element->stride = dec->literals[0]; + + mat_type->type = glsl_explicit_matrix_type(mat_type->type, + dec->literals[0], true); + mat_type->array_element->type = glsl_get_column_type(mat_type->type); } else { vtn_assert(mat_type->array_element->stride > 0); mat_type->stride = dec->literals[0]; + + mat_type->type = glsl_explicit_matrix_type(mat_type->type, + dec->literals[0], false); } + + /* Now that we've replaced the glsl_type with a properly strided matrix + * type, rewrite the member type so that it's an array of the proper kind + * of glsl_type. + */ + vtn_array_type_rewrite_glsl_type(ctx->type->members[member]); + ctx->fields[member].type = ctx->type->members[member]->type; +} + +static void +struct_block_decoration_cb(struct vtn_builder *b, + struct vtn_value *val, int member, + const struct vtn_decoration *dec, void *ctx) +{ + if (member != -1) + return; + + struct vtn_type *type = val->type; + if (dec->decoration == SpvDecorationBlock) + type->block = true; + else if (dec->decoration == SpvDecorationBufferBlock) + type->buffer_block = true; } static void @@ -798,18 +938,16 @@ type_decoration_cb(struct vtn_builder *b, switch (dec->decoration) { case SpvDecorationArrayStride: - vtn_assert(type->base_type == vtn_base_type_matrix || - type->base_type == vtn_base_type_array || + vtn_assert(type->base_type == vtn_base_type_array || type->base_type == vtn_base_type_pointer); - type->stride = dec->literals[0]; break; case SpvDecorationBlock: vtn_assert(type->base_type == vtn_base_type_struct); - type->block = true; + vtn_assert(type->block); break; case SpvDecorationBufferBlock: vtn_assert(type->base_type == vtn_base_type_struct); - type->buffer_block = true; + vtn_assert(type->buffer_block); break; case SpvDecorationGLSLShared: case SpvDecorationGLSLPacked: @@ -830,16 +968,24 @@ type_decoration_cb(struct vtn_builder *b, case SpvDecorationNonWritable: case SpvDecorationNonReadable: case SpvDecorationUniform: - case SpvDecorationStream: case SpvDecorationLocation: case SpvDecorationComponent: case SpvDecorationOffset: case SpvDecorationXfbBuffer: case SpvDecorationXfbStride: + case SpvDecorationHlslSemanticGOOGLE: vtn_warn("Decoration only allowed for struct members: %s", spirv_decoration_to_string(dec->decoration)); break; + case SpvDecorationStream: + /* We don't need to do anything here, as stream is filled up when + * aplying the decoration to a variable, just check that if it is not a + * struct member, it should be a struct. + */ + vtn_assert(type->base_type == vtn_base_type_struct); + break; + case SpvDecorationRelaxedPrecision: case SpvDecorationSpecId: case SpvDecorationInvariant: @@ -857,6 +1003,13 @@ type_decoration_cb(struct vtn_builder *b, break; case SpvDecorationCPacked: + if (b->shader->info.stage != MESA_SHADER_KERNEL) + vtn_warn("Decoration only allowed for CL-style kernels: %s", + spirv_decoration_to_string(dec->decoration)); + else + type->packed = true; + break; + case SpvDecorationSaturatedConversion: case SpvDecorationFuncParamAttr: case SpvDecorationFPRoundingMode: @@ -926,15 +1079,16 @@ vtn_type_layout_std430(struct vtn_builder *b, struct vtn_type *type, { switch (type->base_type) { case vtn_base_type_scalar: { - uint32_t comp_size = glsl_get_bit_size(type->type) / 8; + uint32_t comp_size = glsl_type_is_boolean(type->type) + ? 4 : glsl_get_bit_size(type->type) / 8; *size_out = comp_size; *align_out = comp_size; return type; } case vtn_base_type_vector: { - uint32_t comp_size = glsl_get_bit_size(type->type) / 8; - assert(type->length > 0 && type->length <= 4); + uint32_t comp_size = glsl_type_is_boolean(type->type) + ? 4 : glsl_get_bit_size(type->type) / 8; unsigned align_comps = type->length == 3 ? 4 : type->length; *size_out = comp_size * type->length, *align_out = comp_size * align_comps; @@ -982,10 +1136,18 @@ static void vtn_handle_type(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count) { - struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_type); + struct vtn_value *val = NULL; - val->type = rzalloc(b, struct vtn_type); - val->type->id = w[1]; + /* In order to properly handle forward declarations, we have to defer + * allocation for pointer types. + */ + if (opcode != SpvOpTypePointer && opcode != SpvOpTypeForwardPointer) { + val = vtn_push_value(b, w[1], vtn_value_type_type); + vtn_fail_if(val->type != NULL, + "Only pointers can have forward declarations"); + val->type = rzalloc(b, struct vtn_type); + val->type->id = w[1]; + } switch (opcode) { case SpvOpTypeVoid: @@ -1047,13 +1209,14 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, vtn_fail_if(base->base_type != vtn_base_type_scalar, "Base type for OpTypeVector must be a scalar"); - vtn_fail_if(elems < 2 || elems > 4, + vtn_fail_if((elems < 2 || elems > 4) && (elems != 8) && (elems != 16), "Invalid component count for OpTypeVector"); val->type->base_type = vtn_base_type_vector; val->type->type = glsl_vector_type(glsl_get_base_type(base->type), elems); val->type->length = elems; - val->type->stride = glsl_get_bit_size(base->type) / 8; + val->type->stride = glsl_type_is_boolean(val->type->type) + ? 4 : glsl_get_bit_size(base->type) / 8; val->type->array_element = base; break; } @@ -1095,9 +1258,13 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, } val->type->base_type = vtn_base_type_array; - val->type->type = glsl_array_type(array_element->type, val->type->length); val->type->array_element = array_element; - val->type->stride = 0; + if (b->shader->info.stage == MESA_SHADER_KERNEL) + val->type->stride = glsl_get_cl_size(array_element->type); + + vtn_foreach_decoration(b, val, array_stride_decoration_cb, NULL); + val->type->type = glsl_array_type(array_element->type, val->type->length, + val->type->stride); break; } @@ -1107,6 +1274,7 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, val->type->length = num_fields; val->type->members = ralloc_array(b, struct vtn_type *, num_fields); val->type->offsets = ralloc_array(b, unsigned, num_fields); + val->type->packed = false; NIR_VLA(struct glsl_struct_field, fields, count); for (unsigned i = 0; i < num_fields; i++) { @@ -1116,9 +1284,19 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, .type = val->type->members[i]->type, .name = ralloc_asprintf(b, "field%d", i), .location = -1, + .offset = -1, }; } + if (b->shader->info.stage == MESA_SHADER_KERNEL) { + unsigned offset = 0; + for (unsigned i = 0; i < num_fields; i++) { + offset = align(offset, glsl_get_cl_alignment(fields[i].type)); + fields[i].offset = offset; + offset += glsl_get_cl_size(fields[i].type); + } + } + struct member_decoration_ctx ctx = { .num_fields = num_fields, .fields = fields, @@ -1128,9 +1306,21 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, vtn_foreach_decoration(b, val, struct_member_decoration_cb, &ctx); vtn_foreach_decoration(b, val, struct_member_matrix_stride_cb, &ctx); - const char *name = val->name ? val->name : "struct"; + vtn_foreach_decoration(b, val, struct_block_decoration_cb, NULL); + + const char *name = val->name; - val->type->type = glsl_struct_type(fields, num_fields, name); + if (val->type->block || val->type->buffer_block) { + /* Packing will be ignored since types coming from SPIR-V are + * explicitly laid out. + */ + val->type->type = glsl_interface_type(fields, num_fields, + /* packing */ 0, false, + name ? name : "block"); + } else { + val->type->type = glsl_struct_type(fields, num_fields, + name ? name : "struct", false); + } break; } @@ -1150,34 +1340,93 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, break; } - case SpvOpTypePointer: { + case SpvOpTypePointer: + case SpvOpTypeForwardPointer: { + /* We can't blindly push the value because it might be a forward + * declaration. + */ + val = vtn_untyped_value(b, w[1]); + SpvStorageClass storage_class = w[2]; - struct vtn_type *deref_type = - vtn_value(b, w[3], vtn_value_type_type)->type; - val->type->base_type = vtn_base_type_pointer; - val->type->storage_class = storage_class; - val->type->deref = deref_type; + if (val->value_type == vtn_value_type_invalid) { + val->value_type = vtn_value_type_type; + val->type = rzalloc(b, struct vtn_type); + val->type->id = w[1]; + val->type->base_type = vtn_base_type_pointer; + val->type->storage_class = storage_class; - if (storage_class == SpvStorageClassUniform || - storage_class == SpvStorageClassStorageBuffer) { /* These can actually be stored to nir_variables and used as SSA * values so they need a real glsl_type. */ - val->type->type = glsl_vector_type(GLSL_TYPE_UINT, 2); + switch (storage_class) { + case SpvStorageClassUniform: + val->type->type = b->options->ubo_ptr_type; + break; + case SpvStorageClassStorageBuffer: + val->type->type = b->options->ssbo_ptr_type; + break; + case SpvStorageClassPhysicalStorageBufferEXT: + val->type->type = b->options->phys_ssbo_ptr_type; + break; + case SpvStorageClassPushConstant: + val->type->type = b->options->push_const_ptr_type; + break; + case SpvStorageClassWorkgroup: + val->type->type = b->options->shared_ptr_type; + break; + case SpvStorageClassCrossWorkgroup: + val->type->type = b->options->global_ptr_type; + break; + case SpvStorageClassFunction: + if (b->physical_ptrs) + val->type->type = b->options->temp_ptr_type; + break; + default: + /* In this case, no variable pointers are allowed so all deref + * chains are complete back to the variable and it doesn't matter + * what type gets used so we leave it NULL. + */ + break; + } + } else { + vtn_fail_if(val->type->storage_class != storage_class, + "The storage classes of an OpTypePointer and any " + "OpTypeForwardPointers that provide forward " + "declarations of it must match."); } - if (storage_class == SpvStorageClassWorkgroup && - b->options->lower_workgroup_access_to_offsets) { - uint32_t size, align; - val->type->deref = vtn_type_layout_std430(b, val->type->deref, - &size, &align); - val->type->length = size; - val->type->align = align; - /* These can actually be stored to nir_variables and used as SSA - * values so they need a real glsl_type. - */ - val->type->type = glsl_uint_type(); + if (opcode == SpvOpTypePointer) { + vtn_fail_if(val->type->deref != NULL, + "While OpTypeForwardPointer can be used to provide a " + "forward declaration of a pointer, OpTypePointer can " + "only be used once for a given id."); + + val->type->deref = vtn_value(b, w[3], vtn_value_type_type)->type; + + vtn_foreach_decoration(b, val, array_stride_decoration_cb, NULL); + + if (b->physical_ptrs) { + switch (storage_class) { + case SpvStorageClassFunction: + case SpvStorageClassWorkgroup: + case SpvStorageClassCrossWorkgroup: + val->type->stride = align(glsl_get_cl_size(val->type->deref->type), + glsl_get_cl_alignment(val->type->deref->type)); + break; + default: + break; + } + } + + if (storage_class == SpvStorageClassWorkgroup && + b->options->lower_workgroup_access_to_offsets) { + uint32_t size, align; + val->type->deref = vtn_type_layout_std430(b, val->type->deref, + &size, &align); + val->type->length = size; + val->type->align = align; + } } break; } @@ -1205,7 +1454,9 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, vtn_fail("Invalid SPIR-V image dimensionality"); } - bool is_shadow = w[4]; + /* w[4]: as per Vulkan spec "Validation Rules within a Module", + * The “Depth” operand of OpTypeImage is ignored. + */ bool is_array = w[5]; bool multisampled = w[6]; unsigned sampled = w[7]; @@ -1231,10 +1482,9 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, glsl_get_base_type(sampled_type->type); if (sampled == 1) { val->type->sampled = true; - val->type->type = glsl_sampler_type(dim, is_shadow, is_array, + val->type->type = glsl_sampler_type(dim, false, is_array, sampled_base_type); } else if (sampled == 2) { - vtn_assert(!is_shadow); val->type->sampled = false; val->type->type = glsl_image_type(dim, is_array, sampled_base_type); } else { @@ -1270,6 +1520,17 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, } vtn_foreach_decoration(b, val, type_decoration_cb, NULL); + + if (val->type->base_type == vtn_base_type_struct && + (val->type->block || val->type->buffer_block)) { + for (unsigned i = 0; i < val->type->length; i++) { + vtn_fail_if(vtn_type_contains_block(b, val->type->members[i]), + "Block and BufferBlock decorations cannot decorate a " + "structure type that is nested at any level inside " + "another structure type decorated with Block or " + "BufferBlock."); + } + } } static nir_constant * @@ -1382,10 +1643,7 @@ handle_workgroup_size_decoration_cb(struct vtn_builder *b, return; vtn_assert(val->type->type == glsl_vector_type(GLSL_TYPE_UINT, 3)); - - b->shader->info.cs.local_size[0] = val->constant->values[0].u32[0]; - b->shader->info.cs.local_size[1] = val->constant->values[0].u32[1]; - b->shader->info.cs.local_size[2] = val->constant->values[0].u32[2]; + b->workgroup_size_builtin = val; } static void @@ -1410,7 +1668,7 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, opcode == SpvOpSpecConstantFalse) int_val = get_specialization(b, val, int_val); - val->constant->values[0].u32[0] = int_val ? NIR_TRUE : NIR_FALSE; + val->constant->values[0].b[0] = int_val != 0; break; } @@ -1471,8 +1729,19 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, spirv_op_to_string(opcode), elem_count, val->type->length); nir_constant **elems = ralloc_array(b, nir_constant *, elem_count); - for (unsigned i = 0; i < elem_count; i++) - elems[i] = vtn_value(b, w[i + 3], vtn_value_type_constant)->constant; + for (unsigned i = 0; i < elem_count; i++) { + struct vtn_value *val = vtn_untyped_value(b, w[i + 3]); + + if (val->value_type == vtn_value_type_constant) { + elems[i] = val->constant; + } else { + vtn_fail_if(val->value_type != vtn_value_type_undef, + "only constants or undefs allowed for " + "SpvOpConstantComposite"); + /* to make it easier, just insert a NULL constant for now */ + elems[i] = vtn_null_constant(b, val->type->type); + } + } switch (val->type->base_type) { case vtn_base_type_vector: { @@ -1492,6 +1761,9 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, case 8: val->constant->values[0].u8[i] = elems[i]->values[0].u8[0]; break; + case 1: + val->constant->values[0].b[i] = elems[i]->values[0].b[0]; + break; default: vtn_fail("Invalid SpvOpConstantComposite bit size"); } @@ -1665,6 +1937,9 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, case 8: val->constant->values[0].u8[i] = (*c)->values[col].u8[elem + i]; break; + case 1: + val->constant->values[0].b[i] = (*c)->values[col].b[elem + i]; + break; default: vtn_fail("Invalid SpvOpCompositeExtract bit size"); } @@ -1692,6 +1967,9 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, case 8: (*c)->values[col].u8[elem + i] = insert->constant->values[0].u8[i]; break; + case 1: + (*c)->values[col].b[elem + i] = insert->constant->values[0].b[i]; + break; default: vtn_fail("Invalid SpvOpCompositeInsert bit size"); } @@ -1725,16 +2003,42 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, }; nir_op op = vtn_nir_alu_op_for_spirv_opcode(b, opcode, &swap, - src_alu_type, - dst_alu_type); + nir_alu_type_get_type_size(src_alu_type), + nir_alu_type_get_type_size(dst_alu_type)); nir_const_value src[4]; for (unsigned i = 0; i < count - 4; i++) { - nir_constant *c = - vtn_value(b, w[4 + i], vtn_value_type_constant)->constant; + struct vtn_value *src_val = + vtn_value(b, w[4 + i], vtn_value_type_constant); + + /* If this is an unsized source, pull the bit size from the + * source; otherwise, we'll use the bit size from the destination. + */ + if (!nir_alu_type_get_type_size(nir_op_infos[op].input_types[i])) + bit_size = glsl_get_bit_size(src_val->type->type); unsigned j = swap ? 1 - i : i; - src[j] = c->values[0]; + src[j] = src_val->constant->values[0]; + } + + /* fix up fixed size sources */ + switch (op) { + case nir_op_ishl: + case nir_op_ishr: + case nir_op_ushr: { + if (bit_size == 32) + break; + for (unsigned i = 0; i < num_components; ++i) { + switch (bit_size) { + case 64: src[1].u32[i] = src[1].u64[i]; break; + case 16: src[1].u32[i] = src[1].u16[i]; break; + case 8: src[1].u32[i] = src[1].u8[i]; break; + } + } + break; + } + default: + break; } val->constant->values[0] = @@ -1761,54 +2065,6 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, vtn_foreach_decoration(b, val, handle_workgroup_size_decoration_cb, NULL); } -static void -vtn_handle_function_call(struct vtn_builder *b, SpvOp opcode, - const uint32_t *w, unsigned count) -{ - struct vtn_type *res_type = vtn_value(b, w[1], vtn_value_type_type)->type; - struct vtn_function *vtn_callee = - vtn_value(b, w[3], vtn_value_type_function)->func; - struct nir_function *callee = vtn_callee->impl->function; - - vtn_callee->referenced = true; - - nir_call_instr *call = nir_call_instr_create(b->nb.shader, callee); - for (unsigned i = 0; i < call->num_params; i++) { - unsigned arg_id = w[4 + i]; - struct vtn_value *arg = vtn_untyped_value(b, arg_id); - if (arg->value_type == vtn_value_type_pointer && - arg->pointer->ptr_type->type == NULL) { - nir_deref_var *d = vtn_pointer_to_deref(b, arg->pointer); - call->params[i] = nir_deref_var_clone(d, call); - } else { - struct vtn_ssa_value *arg_ssa = vtn_ssa_value(b, arg_id); - - /* Make a temporary to store the argument in */ - nir_variable *tmp = - nir_local_variable_create(b->nb.impl, arg_ssa->type, "arg_tmp"); - call->params[i] = nir_deref_var_create(call, tmp); - - vtn_local_store(b, arg_ssa, call->params[i]); - } - } - - nir_variable *out_tmp = NULL; - vtn_assert(res_type->type == callee->return_type); - if (!glsl_type_is_void(callee->return_type)) { - out_tmp = nir_local_variable_create(b->nb.impl, callee->return_type, - "out_tmp"); - call->return_deref = nir_deref_var_create(call, out_tmp); - } - - nir_builder_instr_insert(&b->nb, &call->instr); - - if (glsl_type_is_void(callee->return_type)) { - vtn_push_value(b, w[2], vtn_value_type_undef); - } else { - vtn_push_ssa(b, w[2], res_type, vtn_local_load(b, call->return_deref)); - } -} - struct vtn_ssa_value * vtn_create_ssa_value(struct vtn_builder *b, const struct glsl_type *type) { @@ -1840,6 +2096,7 @@ vtn_create_ssa_value(struct vtn_builder *b, const struct glsl_type *type) child_type = glsl_get_array_element(type); break; case GLSL_TYPE_STRUCT: + case GLSL_TYPE_INTERFACE: child_type = glsl_get_struct_field(type, i); break; default: @@ -1906,7 +2163,6 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, const struct glsl_type *image_type = sampled.type->type; const enum glsl_sampler_dim sampler_dim = glsl_get_sampler_dim(image_type); const bool is_array = glsl_sampler_type_is_array(image_type); - const bool is_shadow = glsl_sampler_type_is_shadow(image_type); /* Figure out the base texture operation */ nir_texop texop; @@ -1959,9 +2215,41 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, vtn_fail("Unhandled opcode"); } - nir_tex_src srcs[8]; /* 8 should be enough */ + nir_tex_src srcs[10]; /* 10 should be enough */ nir_tex_src *p = srcs; + nir_deref_instr *sampler = vtn_pointer_to_deref(b, sampled.sampler); + nir_deref_instr *texture = + sampled.image ? vtn_pointer_to_deref(b, sampled.image) : sampler; + + p->src = nir_src_for_ssa(&texture->dest.ssa); + p->src_type = nir_tex_src_texture_deref; + p++; + + switch (texop) { + case nir_texop_tex: + case nir_texop_txb: + case nir_texop_txl: + case nir_texop_txd: + case nir_texop_tg4: + case nir_texop_lod: + /* These operations require a sampler */ + p->src = nir_src_for_ssa(&sampler->dest.ssa); + p->src_type = nir_tex_src_sampler_deref; + p++; + break; + case nir_texop_txf: + case nir_texop_txf_ms: + case nir_texop_txs: + case nir_texop_query_levels: + case nir_texop_texture_samples: + case nir_texop_samples_identical: + /* These don't */ + break; + case nir_texop_txf_ms_mcs: + vtn_fail("unexpected nir_texop_txf_ms_mcs"); + } + unsigned idx = 4; struct nir_ssa_def *coord; @@ -2030,6 +2318,7 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, break; } + bool is_shadow = false; unsigned gather_component = 0; switch (opcode) { case SpvOpImageSampleDrefImplicitLod: @@ -2038,6 +2327,7 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, case SpvOpImageSampleProjDrefExplicitLod: case SpvOpImageDrefGather: /* These all have an explicit depth value as their next source */ + is_shadow = true; (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_comparator); break; @@ -2056,7 +2346,7 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_lod); /* Now we need to handle some number of optional arguments */ - const struct vtn_ssa_value *gather_offsets = NULL; + struct vtn_value *gather_offsets = NULL; if (idx < count) { uint32_t operands = w[idx++]; @@ -2084,8 +2374,8 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_offset); if (operands & SpvImageOperandsConstOffsetsMask) { - gather_offsets = vtn_ssa_value(b, w[idx++]); - (*p++) = (nir_tex_src){}; + vtn_assert(texop == nir_texop_tg4); + gather_offsets = vtn_value(b, w[idx++], vtn_value_type_constant); } if (operands & SpvImageOperandsSampleMask) { @@ -2093,6 +2383,13 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, texop = nir_texop_txf_ms; (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ms_index); } + + if (operands & SpvImageOperandsMinLodMask) { + vtn_assert(texop == nir_texop_tex || + texop == nir_texop_txb || + texop == nir_texop_txd); + (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_min_lod); + } } /* We should have now consumed exactly all of the arguments */ vtn_assert(idx == count); @@ -2119,105 +2416,46 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, vtn_fail("Invalid base type for sampler result"); } - nir_deref_var *sampler = vtn_pointer_to_deref(b, sampled.sampler); - nir_deref_var *texture; - if (sampled.image) { - nir_deref_var *image = vtn_pointer_to_deref(b, sampled.image); - texture = image; - } else { - texture = sampler; - } - - instr->texture = nir_deref_var_clone(texture, instr); - - switch (instr->op) { - case nir_texop_tex: - case nir_texop_txb: - case nir_texop_txl: - case nir_texop_txd: - case nir_texop_tg4: - /* These operations require a sampler */ - instr->sampler = nir_deref_var_clone(sampler, instr); - break; - case nir_texop_txf: - case nir_texop_txf_ms: - case nir_texop_txs: - case nir_texop_lod: - case nir_texop_query_levels: - case nir_texop_texture_samples: - case nir_texop_samples_identical: - /* These don't */ - instr->sampler = NULL; - break; - case nir_texop_txf_ms_mcs: - vtn_fail("unexpected nir_texop_txf_ms_mcs"); - } - nir_ssa_dest_init(&instr->instr, &instr->dest, nir_tex_instr_dest_size(instr), 32, NULL); vtn_assert(glsl_get_vector_elements(ret_type->type) == nir_tex_instr_dest_size(instr)); - nir_ssa_def *def; - nir_instr *instruction; if (gather_offsets) { - vtn_assert(glsl_get_base_type(gather_offsets->type) == GLSL_TYPE_ARRAY); - vtn_assert(glsl_get_length(gather_offsets->type) == 4); - nir_tex_instr *instrs[4] = {instr, NULL, NULL, NULL}; - - /* Copy the current instruction 4x */ - for (uint32_t i = 1; i < 4; i++) { - instrs[i] = nir_tex_instr_create(b->shader, instr->num_srcs); - instrs[i]->op = instr->op; - instrs[i]->coord_components = instr->coord_components; - instrs[i]->sampler_dim = instr->sampler_dim; - instrs[i]->is_array = instr->is_array; - instrs[i]->is_shadow = instr->is_shadow; - instrs[i]->is_new_style_shadow = instr->is_new_style_shadow; - instrs[i]->component = instr->component; - instrs[i]->dest_type = instr->dest_type; - instrs[i]->texture = nir_deref_var_clone(texture, instrs[i]); - instrs[i]->sampler = NULL; - - memcpy(instrs[i]->src, srcs, instr->num_srcs * sizeof(*instr->src)); - - nir_ssa_dest_init(&instrs[i]->instr, &instrs[i]->dest, - nir_tex_instr_dest_size(instr), 32, NULL); - } - - /* Fill in the last argument with the offset from the passed in offsets - * and insert the instruction into the stream. - */ + vtn_fail_if(gather_offsets->type->base_type != vtn_base_type_array || + gather_offsets->type->length != 4, + "ConstOffsets must be an array of size four of vectors " + "of two integer components"); + + struct vtn_type *vec_type = gather_offsets->type->array_element; + vtn_fail_if(vec_type->base_type != vtn_base_type_vector || + vec_type->length != 2 || + !glsl_type_is_integer(vec_type->type), + "ConstOffsets must be an array of size four of vectors " + "of two integer components"); + + unsigned bit_size = glsl_get_bit_size(vec_type->type); for (uint32_t i = 0; i < 4; i++) { - nir_tex_src src; - src.src = nir_src_for_ssa(gather_offsets->elems[i]->def); - src.src_type = nir_tex_src_offset; - instrs[i]->src[instrs[i]->num_srcs - 1] = src; - nir_builder_instr_insert(&b->nb, &instrs[i]->instr); - } - - /* Combine the results of the 4 instructions by taking their .w - * components - */ - nir_alu_instr *vec4 = nir_alu_instr_create(b->shader, nir_op_vec4); - nir_ssa_dest_init(&vec4->instr, &vec4->dest.dest, 4, 32, NULL); - vec4->dest.write_mask = 0xf; - for (uint32_t i = 0; i < 4; i++) { - vec4->src[i].src = nir_src_for_ssa(&instrs[i]->dest.ssa); - vec4->src[i].swizzle[0] = 3; + const nir_const_value *cvec = + &gather_offsets->constant->elements[i]->values[0]; + for (uint32_t j = 0; j < 2; j++) { + switch (bit_size) { + case 8: instr->tg4_offsets[i][j] = cvec->i8[j]; break; + case 16: instr->tg4_offsets[i][j] = cvec->i16[j]; break; + case 32: instr->tg4_offsets[i][j] = cvec->i32[j]; break; + case 64: instr->tg4_offsets[i][j] = cvec->i64[j]; break; + default: + vtn_fail("Unsupported bit size"); + } + } } - def = &vec4->dest.dest.ssa; - instruction = &vec4->instr; - } else { - def = &instr->dest.ssa; - instruction = &instr->instr; } val->ssa = vtn_create_ssa_value(b, ret_type->type); - val->ssa->def = def; + val->ssa->def = &instr->dest.ssa; - nir_builder_instr_insert(&b->nb, instruction); + nir_builder_instr_insert(&b->nb, &instr->instr); } static void @@ -2274,6 +2512,18 @@ get_image_coord(struct vtn_builder *b, uint32_t value) return nir_swizzle(&b->nb, coord->def, swizzle, 4, false); } +static nir_ssa_def * +expand_to_vec4(nir_builder *b, nir_ssa_def *value) +{ + if (value->num_components == 4) + return value; + + unsigned swiz[4]; + for (unsigned i = 0; i < 4; i++) + swiz[i] = i < value->num_components ? i : 0; + return nir_swizzle(b, value, swiz, 4, false); +} + static void vtn_handle_image(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count) @@ -2353,7 +2603,7 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode, nir_intrinsic_op op; switch (opcode) { -#define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_##N; break; +#define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_deref_##N; break; OP(ImageQuerySize, size) OP(ImageRead, load) OP(ImageWrite, store) @@ -2379,20 +2629,16 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode, nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op); - nir_deref_var *image_deref = vtn_pointer_to_deref(b, image.image); - intrin->variables[0] = nir_deref_var_clone(image_deref, intrin); + nir_deref_instr *image_deref = vtn_pointer_to_deref(b, image.image); + intrin->src[0] = nir_src_for_ssa(&image_deref->dest.ssa); /* ImageQuerySize doesn't take any extra parameters */ if (opcode != SpvOpImageQuerySize) { /* The image coordinate is always 4 components but we may not have that * many. Swizzle to compensate. */ - unsigned swiz[4]; - for (unsigned i = 0; i < 4; i++) - swiz[i] = i < image.coord->num_components ? i : 0; - intrin->src[0] = nir_src_for_ssa(nir_swizzle(&b->nb, image.coord, - swiz, 4, false)); - intrin->src[1] = nir_src_for_ssa(image.sample); + intrin->src[1] = nir_src_for_ssa(expand_to_vec4(&b->nb, image.coord)); + intrin->src[2] = nir_src_for_ssa(image.sample); } switch (opcode) { @@ -2401,11 +2647,15 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode, case SpvOpImageRead: break; case SpvOpAtomicStore: - intrin->src[2] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def); - break; - case SpvOpImageWrite: - intrin->src[2] = nir_src_for_ssa(vtn_ssa_value(b, w[3])->def); + case SpvOpImageWrite: { + const uint32_t value_id = opcode == SpvOpAtomicStore ? w[4] : w[3]; + nir_ssa_def *value = vtn_ssa_value(b, value_id)->def; + /* nir_intrinsic_image_deref_store always takes a vec4 value */ + assert(op == nir_intrinsic_image_deref_store); + intrin->num_components = 4; + intrin->src[3] = nir_src_for_ssa(expand_to_vec4(&b->nb, value)); break; + } case SpvOpAtomicCompareExchange: case SpvOpAtomicIIncrement: @@ -2420,31 +2670,33 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode, case SpvOpAtomicAnd: case SpvOpAtomicOr: case SpvOpAtomicXor: - fill_common_atomic_sources(b, opcode, w, &intrin->src[2]); + fill_common_atomic_sources(b, opcode, w, &intrin->src[3]); break; default: vtn_fail("Invalid image opcode"); } - if (opcode != SpvOpImageWrite) { + if (opcode != SpvOpImageWrite && opcode != SpvOpAtomicStore) { struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa); struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type; - unsigned dest_components = - nir_intrinsic_infos[intrin->intrinsic].dest_components; - if (intrin->intrinsic == nir_intrinsic_image_size) { - dest_components = intrin->num_components = - glsl_get_vector_elements(type->type); - } + unsigned dest_components = glsl_get_vector_elements(type->type); + intrin->num_components = nir_intrinsic_infos[op].dest_components; + if (intrin->num_components == 0) + intrin->num_components = dest_components; nir_ssa_dest_init(&intrin->instr, &intrin->dest, - dest_components, 32, NULL); + intrin->num_components, 32, NULL); nir_builder_instr_insert(&b->nb, &intrin->instr); + nir_ssa_def *result = &intrin->dest.ssa; + if (intrin->num_components != dest_components) + result = nir_channels(&b->nb, result, (1 << dest_components) - 1); + val->ssa = vtn_create_ssa_value(b, type->type); - val->ssa->def = &intrin->dest.ssa; + val->ssa->def = result; } else { nir_builder_instr_insert(&b->nb, &intrin->instr); } @@ -2476,6 +2728,35 @@ get_ssbo_nir_atomic_op(struct vtn_builder *b, SpvOp opcode) } } +static nir_intrinsic_op +get_uniform_nir_atomic_op(struct vtn_builder *b, SpvOp opcode) +{ + switch (opcode) { +#define OP(S, N) case SpvOp##S: return nir_intrinsic_atomic_counter_ ##N; + OP(AtomicLoad, read_deref) + OP(AtomicExchange, exchange) + OP(AtomicCompareExchange, comp_swap) + OP(AtomicIIncrement, inc_deref) + OP(AtomicIDecrement, post_dec_deref) + OP(AtomicIAdd, add_deref) + OP(AtomicISub, add_deref) + OP(AtomicUMin, min_deref) + OP(AtomicUMax, max_deref) + OP(AtomicAnd, and_deref) + OP(AtomicOr, or_deref) + OP(AtomicXor, xor_deref) +#undef OP + default: + /* We left the following out: AtomicStore, AtomicSMin and + * AtomicSmax. Right now there are not nir intrinsics for them. At this + * moment Atomic Counter support is needed for ARB_spirv support, so is + * only need to support GLSL Atomic Counters that are uints and don't + * allow direct storage. + */ + unreachable("Invalid uniform atomic"); + } +} + static nir_intrinsic_op get_shared_nir_atomic_op(struct vtn_builder *b, SpvOp opcode) { @@ -2503,12 +2784,12 @@ get_shared_nir_atomic_op(struct vtn_builder *b, SpvOp opcode) } static nir_intrinsic_op -get_var_nir_atomic_op(struct vtn_builder *b, SpvOp opcode) +get_deref_nir_atomic_op(struct vtn_builder *b, SpvOp opcode) { switch (opcode) { - case SpvOpAtomicLoad: return nir_intrinsic_load_var; - case SpvOpAtomicStore: return nir_intrinsic_store_var; -#define OP(S, N) case SpvOp##S: return nir_intrinsic_var_##N; + case SpvOpAtomicLoad: return nir_intrinsic_load_deref; + case SpvOpAtomicStore: return nir_intrinsic_store_deref; +#define OP(S, N) case SpvOp##S: return nir_intrinsic_deref_##N; OP(AtomicExchange, atomic_exchange) OP(AtomicCompareExchange, atomic_comp_swap) OP(AtomicIIncrement, atomic_add) @@ -2528,9 +2809,12 @@ get_var_nir_atomic_op(struct vtn_builder *b, SpvOp opcode) } } +/* + * Handles shared atomics, ssbo atomics and atomic counters. + */ static void -vtn_handle_ssbo_or_shared_atomic(struct vtn_builder *b, SpvOp opcode, - const uint32_t *w, unsigned count) +vtn_handle_atomics(struct vtn_builder *b, SpvOp opcode, + const uint32_t *w, unsigned count) { struct vtn_pointer *ptr; nir_intrinsic_instr *atomic; @@ -2567,13 +2851,18 @@ vtn_handle_ssbo_or_shared_atomic(struct vtn_builder *b, SpvOp opcode, SpvMemorySemanticsMask semantics = w[5]; */ - if (ptr->mode == vtn_variable_mode_workgroup && - !b->options->lower_workgroup_access_to_offsets) { - nir_deref_var *deref = vtn_pointer_to_deref(b, ptr); - const struct glsl_type *deref_type = nir_deref_tail(&deref->deref)->type; - nir_intrinsic_op op = get_var_nir_atomic_op(b, opcode); + /* uniform as "atomic counter uniform" */ + if (ptr->mode == vtn_variable_mode_uniform) { + nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr); + const struct glsl_type *deref_type = deref->type; + nir_intrinsic_op op = get_uniform_nir_atomic_op(b, opcode); atomic = nir_intrinsic_instr_create(b->nb.shader, op); - atomic->variables[0] = nir_deref_var_clone(deref, atomic); + atomic->src[0] = nir_src_for_ssa(&deref->dest.ssa); + + /* SSBO needs to initialize index/offset. In this case we don't need to, + * as that info is already stored on the ptr->var->var nir_variable (see + * vtn_create_variable) + */ switch (opcode) { case SpvOpAtomicLoad: @@ -2583,7 +2872,6 @@ vtn_handle_ssbo_or_shared_atomic(struct vtn_builder *b, SpvOp opcode, case SpvOpAtomicStore: atomic->num_components = glsl_get_vector_elements(deref_type); nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1); - atomic->src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def); break; case SpvOpAtomicExchange: @@ -2600,16 +2888,18 @@ vtn_handle_ssbo_or_shared_atomic(struct vtn_builder *b, SpvOp opcode, case SpvOpAtomicAnd: case SpvOpAtomicOr: case SpvOpAtomicXor: - fill_common_atomic_sources(b, opcode, w, &atomic->src[0]); + /* Nothing: we don't need to call fill_common_atomic_sources here, as + * atomic counter uniforms doesn't have sources + */ break; default: - vtn_fail("Invalid SPIR-V atomic"); + unreachable("Invalid SPIR-V atomic"); } - } else { + } else if (vtn_pointer_uses_ssa_offset(b, ptr)) { nir_ssa_def *offset, *index; - offset = vtn_pointer_to_offset(b, ptr, &index, NULL); + offset = vtn_pointer_to_offset(b, ptr, &index); nir_intrinsic_op op; if (ptr->mode == vtn_variable_mode_ssbo) { @@ -2626,6 +2916,7 @@ vtn_handle_ssbo_or_shared_atomic(struct vtn_builder *b, SpvOp opcode, switch (opcode) { case SpvOpAtomicLoad: atomic->num_components = glsl_get_vector_elements(ptr->type->type); + nir_intrinsic_set_align(atomic, 4, 0); if (ptr->mode == vtn_variable_mode_ssbo) atomic->src[src++] = nir_src_for_ssa(index); atomic->src[src++] = nir_src_for_ssa(offset); @@ -2634,6 +2925,7 @@ vtn_handle_ssbo_or_shared_atomic(struct vtn_builder *b, SpvOp opcode, case SpvOpAtomicStore: atomic->num_components = glsl_get_vector_elements(ptr->type->type); nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1); + nir_intrinsic_set_align(atomic, 4, 0); atomic->src[src++] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def); if (ptr->mode == vtn_variable_mode_ssbo) atomic->src[src++] = nir_src_for_ssa(index); @@ -2660,6 +2952,44 @@ vtn_handle_ssbo_or_shared_atomic(struct vtn_builder *b, SpvOp opcode, fill_common_atomic_sources(b, opcode, w, &atomic->src[src]); break; + default: + vtn_fail("Invalid SPIR-V atomic"); + } + } else { + nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr); + const struct glsl_type *deref_type = deref->type; + nir_intrinsic_op op = get_deref_nir_atomic_op(b, opcode); + atomic = nir_intrinsic_instr_create(b->nb.shader, op); + atomic->src[0] = nir_src_for_ssa(&deref->dest.ssa); + + switch (opcode) { + case SpvOpAtomicLoad: + atomic->num_components = glsl_get_vector_elements(deref_type); + break; + + case SpvOpAtomicStore: + atomic->num_components = glsl_get_vector_elements(deref_type); + nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1); + atomic->src[1] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def); + break; + + case SpvOpAtomicExchange: + case SpvOpAtomicCompareExchange: + case SpvOpAtomicCompareExchangeWeak: + case SpvOpAtomicIIncrement: + case SpvOpAtomicIDecrement: + case SpvOpAtomicIAdd: + case SpvOpAtomicISub: + case SpvOpAtomicSMin: + case SpvOpAtomicUMin: + case SpvOpAtomicSMax: + case SpvOpAtomicUMax: + case SpvOpAtomicAnd: + case SpvOpAtomicOr: + case SpvOpAtomicXor: + fill_common_atomic_sources(b, opcode, w, &atomic->src[1]); + break; + default: vtn_fail("Invalid SPIR-V atomic"); } @@ -2686,7 +3016,7 @@ create_vec(struct vtn_builder *b, unsigned num_components, unsigned bit_size) { nir_op op; switch (num_components) { - case 1: op = nir_op_fmov; break; + case 1: op = nir_op_imov; break; case 2: op = nir_op_vec2; break; case 3: op = nir_op_vec3; break; case 4: op = nir_op_vec4; break; @@ -2734,8 +3064,7 @@ vtn_ssa_transpose(struct vtn_builder *b, struct vtn_ssa_value *src) nir_ssa_def * vtn_vector_extract(struct vtn_builder *b, nir_ssa_def *src, unsigned index) { - unsigned swiz[4] = { index }; - return nir_swizzle(&b->nb, src, swiz, 1, true); + return nir_channel(&b->nb, src, index); } nir_ssa_def * @@ -2759,16 +3088,17 @@ vtn_vector_insert(struct vtn_builder *b, nir_ssa_def *src, nir_ssa_def *insert, return &vec->dest.dest.ssa; } +static nir_ssa_def * +nir_ieq_imm(nir_builder *b, nir_ssa_def *x, uint64_t i) +{ + return nir_ieq(b, x, nir_imm_intN_t(b, i, x->bit_size)); +} + nir_ssa_def * vtn_vector_extract_dynamic(struct vtn_builder *b, nir_ssa_def *src, nir_ssa_def *index) { - nir_ssa_def *dest = vtn_vector_extract(b, src, 0); - for (unsigned i = 1; i < src->num_components; i++) - dest = nir_bcsel(&b->nb, nir_ieq(&b->nb, index, nir_imm_int(&b->nb, i)), - vtn_vector_extract(b, src, i), dest); - - return dest; + return nir_vector_extract(&b->nb, src, nir_i2i(&b->nb, index, 32)); } nir_ssa_def * @@ -2777,7 +3107,7 @@ vtn_vector_insert_dynamic(struct vtn_builder *b, nir_ssa_def *src, { nir_ssa_def *dest = vtn_vector_insert(b, src, insert, 0); for (unsigned i = 1; i < src->num_components; i++) - dest = nir_bcsel(&b->nb, nir_ieq(&b->nb, index, nir_imm_int(&b->nb, i)), + dest = nir_bcsel(&b->nb, nir_ieq_imm(&b->nb, index, i), vtn_vector_insert(b, src, insert, i), dest); return dest; @@ -2951,7 +3281,7 @@ vtn_handle_composite(struct vtn_builder *b, SpvOp opcode, unsigned elems = count - 3; assume(elems >= 1); if (glsl_type_is_vector_or_scalar(type)) { - nir_ssa_def *srcs[4]; + nir_ssa_def *srcs[NIR_MAX_VEC_COMPONENTS]; for (unsigned i = 0; i < elems; i++) srcs[i] = vtn_ssa_value(b, w[3 + i])->def; val->ssa->def = @@ -3075,9 +3405,12 @@ vtn_handle_barrier(struct vtn_builder *b, SpvOp opcode, switch (opcode) { case SpvOpEmitStreamVertex: - case SpvOpEndStreamPrimitive: - nir_intrinsic_set_stream_id(intrin, w[1]); + case SpvOpEndStreamPrimitive: { + unsigned stream = vtn_constant_uint(b, w[1]); + nir_intrinsic_set_stream_id(intrin, stream); break; + } + default: break; } @@ -3087,23 +3420,19 @@ vtn_handle_barrier(struct vtn_builder *b, SpvOp opcode, } case SpvOpMemoryBarrier: { - SpvScope scope = vtn_constant_value(b, w[1])->values[0].u32[0]; - SpvMemorySemanticsMask semantics = - vtn_constant_value(b, w[2])->values[0].u32[0]; + SpvScope scope = vtn_constant_uint(b, w[1]); + SpvMemorySemanticsMask semantics = vtn_constant_uint(b, w[2]); vtn_emit_memory_barrier(b, scope, semantics); return; } case SpvOpControlBarrier: { - SpvScope execution_scope = - vtn_constant_value(b, w[1])->values[0].u32[0]; + SpvScope execution_scope = vtn_constant_uint(b, w[1]); if (execution_scope == SpvScopeWorkgroup) vtn_emit_barrier(b, nir_intrinsic_barrier); - SpvScope memory_scope = - vtn_constant_value(b, w[2])->values[0].u32[0]; - SpvMemorySemanticsMask memory_semantics = - vtn_constant_value(b, w[3])->values[0].u32[0]; + SpvScope memory_scope = vtn_constant_uint(b, w[2]); + SpvMemorySemanticsMask memory_semantics = vtn_constant_uint(b, w[3]); vtn_emit_memory_barrier(b, memory_scope, memory_semantics); break; } @@ -3178,6 +3507,8 @@ stage_for_execution_model(struct vtn_builder *b, SpvExecutionModel model) return MESA_SHADER_FRAGMENT; case SpvExecutionModelGLCompute: return MESA_SHADER_COMPUTE; + case SpvExecutionModelKernel: + return MESA_SHADER_KERNEL; default: vtn_fail("Unsupported execution model"); } @@ -3189,6 +3520,24 @@ stage_for_execution_model(struct vtn_builder *b, SpvExecutionModel model) spirv_capability_to_string(cap)); \ } while(0) + +void +vtn_handle_entry_point(struct vtn_builder *b, const uint32_t *w, + unsigned count) +{ + struct vtn_value *entry_point = &b->values[w[2]]; + /* Let this be a name label regardless */ + unsigned name_words; + entry_point->name = vtn_string_literal(b, &w[3], count - 3, &name_words); + + if (strcmp(entry_point->name, b->entry_point_name) != 0 || + stage_for_execution_model(b, w[1]) != b->entry_point_stage) + return; + + vtn_assert(b->entry_point == NULL); + b->entry_point = entry_point; +} + static bool vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count) @@ -3253,32 +3602,61 @@ vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode, case SpvCapabilityStorageImageExtendedFormats: break; - case SpvCapabilityGeometryStreams: case SpvCapabilityLinkage: case SpvCapabilityVector16: case SpvCapabilityFloat16Buffer: case SpvCapabilityFloat16: - case SpvCapabilityInt64Atomics: - case SpvCapabilityAtomicStorage: - case SpvCapabilityInt16: - case SpvCapabilityStorageImageMultisample: - case SpvCapabilityInt8: case SpvCapabilitySparseResidency: - case SpvCapabilityMinLod: - case SpvCapabilityTransformFeedback: vtn_warn("Unsupported SPIR-V capability: %s", spirv_capability_to_string(cap)); break; + case SpvCapabilityMinLod: + spv_check_supported(min_lod, cap); + break; + + case SpvCapabilityAtomicStorage: + spv_check_supported(atomic_storage, cap); + break; + case SpvCapabilityFloat64: spv_check_supported(float64, cap); break; case SpvCapabilityInt64: spv_check_supported(int64, cap); break; + case SpvCapabilityInt16: + spv_check_supported(int16, cap); + break; + + case SpvCapabilityTransformFeedback: + spv_check_supported(transform_feedback, cap); + break; + + case SpvCapabilityGeometryStreams: + spv_check_supported(geometry_streams, cap); + break; + + case SpvCapabilityInt64Atomics: + spv_check_supported(int64_atomics, cap); + break; + + case SpvCapabilityInt8: + spv_check_supported(int8, cap); + break; + + case SpvCapabilityStorageImageMultisample: + spv_check_supported(storage_image_ms, cap); + break; case SpvCapabilityAddresses: + spv_check_supported(address, cap); + break; + case SpvCapabilityKernel: + spv_check_supported(kernel, cap); + break; + case SpvCapabilityImageBasic: case SpvCapabilityImageReadWrite: case SpvCapabilityImageMipmap: @@ -3340,14 +3718,17 @@ vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode, case SpvCapabilityGroupNonUniformQuad: spv_check_supported(subgroup_quad, cap); + break; case SpvCapabilityGroupNonUniformArithmetic: case SpvCapabilityGroupNonUniformClustered: spv_check_supported(subgroup_arithmetic, cap); + break; case SpvCapabilityVariablePointersStorageBuffer: case SpvCapabilityVariablePointers: spv_check_supported(variable_pointers, cap); + b->variable_pointers = true; break; case SpvCapabilityStorageUniformBufferBlock16: @@ -3361,6 +3742,34 @@ vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode, spv_check_supported(shader_viewport_index_layer, cap); break; + case SpvCapabilityStorageBuffer8BitAccess: + case SpvCapabilityUniformAndStorageBuffer8BitAccess: + case SpvCapabilityStoragePushConstant8: + spv_check_supported(storage_8bit, cap); + break; + + case SpvCapabilityInputAttachmentArrayDynamicIndexingEXT: + case SpvCapabilityUniformTexelBufferArrayDynamicIndexingEXT: + case SpvCapabilityStorageTexelBufferArrayDynamicIndexingEXT: + spv_check_supported(descriptor_array_dynamic_indexing, cap); + break; + + case SpvCapabilityRuntimeDescriptorArrayEXT: + spv_check_supported(runtime_descriptor_array, cap); + break; + + case SpvCapabilityStencilExportEXT: + spv_check_supported(stencil_export, cap); + break; + + case SpvCapabilitySampleMaskPostDepthCoverage: + spv_check_supported(post_depth_coverage, cap); + break; + + case SpvCapabilityPhysicalStorageBufferAddressesEXT: + spv_check_supported(physical_storage_buffer_address, cap); + break; + default: vtn_fail("Unhandled capability"); } @@ -3372,25 +3781,49 @@ vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode, break; case SpvOpMemoryModel: - vtn_assert(w[1] == SpvAddressingModelLogical); + switch (w[1]) { + case SpvAddressingModelPhysical32: + vtn_fail_if(b->shader->info.stage != MESA_SHADER_KERNEL, + "AddressingModelPhysical32 only supported for kernels"); + b->shader->info.cs.ptr_size = 32; + b->physical_ptrs = true; + b->options->shared_ptr_type = glsl_uint_type(); + b->options->global_ptr_type = glsl_uint_type(); + b->options->temp_ptr_type = glsl_uint_type(); + break; + case SpvAddressingModelPhysical64: + vtn_fail_if(b->shader->info.stage != MESA_SHADER_KERNEL, + "AddressingModelPhysical64 only supported for kernels"); + b->shader->info.cs.ptr_size = 64; + b->physical_ptrs = true; + b->options->shared_ptr_type = glsl_uint64_t_type(); + b->options->global_ptr_type = glsl_uint64_t_type(); + b->options->temp_ptr_type = glsl_uint64_t_type(); + break; + case SpvAddressingModelLogical: + vtn_fail_if(b->shader->info.stage >= MESA_SHADER_STAGES, + "AddressingModelLogical only supported for shaders"); + b->shader->info.cs.ptr_size = 0; + b->physical_ptrs = false; + break; + case SpvAddressingModelPhysicalStorageBuffer64EXT: + vtn_fail_if(!b->options || + !b->options->caps.physical_storage_buffer_address, + "AddressingModelPhysicalStorageBuffer64EXT not supported"); + break; + default: + vtn_fail("Unknown addressing model"); + break; + } + vtn_assert(w[2] == SpvMemoryModelSimple || - w[2] == SpvMemoryModelGLSL450); + w[2] == SpvMemoryModelGLSL450 || + w[2] == SpvMemoryModelOpenCL); break; - case SpvOpEntryPoint: { - struct vtn_value *entry_point = &b->values[w[2]]; - /* Let this be a name label regardless */ - unsigned name_words; - entry_point->name = vtn_string_literal(b, &w[3], count - 3, &name_words); - - if (strcmp(entry_point->name, b->entry_point_name) != 0 || - stage_for_execution_model(b, w[1]) != b->entry_point_stage) - break; - - vtn_assert(b->entry_point == NULL); - b->entry_point = entry_point; + case SpvOpEntryPoint: + vtn_handle_entry_point(b, w, count); break; - } case SpvOpString: vtn_push_value(b, w[1], vtn_value_type_string)->str = @@ -3406,11 +3839,14 @@ vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode, break; case SpvOpExecutionMode: + case SpvOpExecutionModeId: case SpvOpDecorationGroup: case SpvOpDecorate: case SpvOpMemberDecorate: case SpvOpGroupDecorate: case SpvOpGroupMemberDecorate: + case SpvOpDecorateStringGOOGLE: + case SpvOpMemberDecorateStringGOOGLE: vtn_handle_decoration(b, opcode, w, count); break; @@ -3430,7 +3866,8 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point, switch(mode->exec_mode) { case SpvExecutionModeOriginUpperLeft: case SpvExecutionModeOriginLowerLeft: - b->origin_upper_left = + vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT); + b->shader->info.fs.origin_upper_left = (mode->exec_mode == SpvExecutionModeOriginUpperLeft); break; @@ -3439,6 +3876,11 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point, b->shader->info.fs.early_fragment_tests = true; break; + case SpvExecutionModePostDepthCoverage: + vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT); + b->shader->info.fs.post_depth_coverage = true; + break; + case SpvExecutionModeInvocations: vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY); b->shader->info.gs.invocations = MAX2(1, mode->literals[0]); @@ -3462,12 +3904,20 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point, break; case SpvExecutionModeLocalSize: - vtn_assert(b->shader->info.stage == MESA_SHADER_COMPUTE); + vtn_assert(gl_shader_stage_is_compute(b->shader->info.stage)); b->shader->info.cs.local_size[0] = mode->literals[0]; b->shader->info.cs.local_size[1] = mode->literals[1]; b->shader->info.cs.local_size[2] = mode->literals[2]; break; + + case SpvExecutionModeLocalSizeId: + b->shader->info.cs.local_size[0] = vtn_constant_uint(b, mode->literals[0]); + b->shader->info.cs.local_size[1] = vtn_constant_uint(b, mode->literals[1]); + b->shader->info.cs.local_size[2] = vtn_constant_uint(b, mode->literals[2]); + break; + case SpvExecutionModeLocalSizeHint: + case SpvExecutionModeLocalSizeHintId: break; /* Nothing to do with this */ case SpvExecutionModeOutputVertices: @@ -3495,6 +3945,8 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point, vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY); b->shader->info.gs.vertices_in = vertices_in_from_spv_execution_mode(b, mode->exec_mode); + b->shader->info.gs.input_primitive = + gl_primitive_from_spv_execution_mode(b, mode->exec_mode); } break; @@ -3538,17 +3990,29 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point, break; case SpvExecutionModePixelCenterInteger: - b->pixel_center_integer = true; + vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT); + b->shader->info.fs.pixel_center_integer = true; break; case SpvExecutionModeXfb: - vtn_fail("Unhandled execution mode"); + b->shader->info.has_transform_feedback_varyings = true; break; case SpvExecutionModeVecTypeHint: - case SpvExecutionModeContractionOff: break; /* OpenCL */ + case SpvExecutionModeContractionOff: + if (b->shader->info.stage != MESA_SHADER_KERNEL) + vtn_warn("ExectionMode only allowed for CL-style kernels: %s", + spirv_executionmode_to_string(mode->exec_mode)); + else + b->exact = true; + break; + + case SpvExecutionModeStencilRefReplacingEXT: + vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT); + break; + default: vtn_fail("Unhandled execution mode"); } @@ -3578,6 +4042,8 @@ vtn_handle_variable_or_type_instruction(struct vtn_builder *b, SpvOp opcode, case SpvOpMemberDecorate: case SpvOpGroupDecorate: case SpvOpGroupMemberDecorate: + case SpvOpDecorateStringGOOGLE: + case SpvOpMemberDecorateStringGOOGLE: vtn_fail("Invalid opcode types and variables section"); break; @@ -3595,6 +4061,7 @@ vtn_handle_variable_or_type_instruction(struct vtn_builder *b, SpvOp opcode, case SpvOpTypeStruct: case SpvOpTypeOpaque: case SpvOpTypePointer: + case SpvOpTypeForwardPointer: case SpvOpTypeFunction: case SpvOpTypeEvent: case SpvOpTypeDeviceEvent: @@ -3661,7 +4128,10 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode, case SpvOpAccessChain: case SpvOpPtrAccessChain: case SpvOpInBoundsAccessChain: + case SpvOpInBoundsPtrAccessChain: case SpvOpArrayLength: + case SpvOpConvertPtrToU: + case SpvOpConvertUToPtr: vtn_handle_variables(b, opcode, w, count); break; @@ -3698,10 +4168,10 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode, case SpvOpImageQuerySize: { struct vtn_pointer *image = vtn_value(b, w[3], vtn_value_type_pointer)->pointer; - if (image->mode == vtn_variable_mode_image) { + if (glsl_type_is_image(image->type->type)) { vtn_handle_image(b, opcode, w, count); } else { - vtn_assert(image->mode == vtn_variable_mode_sampler); + vtn_assert(glsl_type_is_sampler(image->type->type)); vtn_handle_texture(b, opcode, w, count); } break; @@ -3727,7 +4197,7 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode, vtn_handle_image(b, opcode, w, count); } else { vtn_assert(pointer->value_type == vtn_value_type_pointer); - vtn_handle_ssbo_or_shared_atomic(b, opcode, w, count); + vtn_handle_atomics(b, opcode, w, count); } break; } @@ -3738,7 +4208,7 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode, vtn_handle_image(b, opcode, w, count); } else { vtn_assert(pointer->value_type == vtn_value_type_pointer); - vtn_handle_ssbo_or_shared_atomic(b, opcode, w, count); + vtn_handle_atomics(b, opcode, w, count); } break; } @@ -3818,8 +4288,6 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode, case SpvOpSConvert: case SpvOpFConvert: case SpvOpQuantizeToF16: - case SpvOpConvertPtrToU: - case SpvOpConvertUToPtr: case SpvOpPtrCastToGeneric: case SpvOpGenericCastToPtr: case SpvOpBitcast: @@ -3970,15 +4438,17 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode, return true; } -nir_function * -spirv_to_nir(const uint32_t *words, size_t word_count, - struct nir_spirv_specialization *spec, unsigned num_spec, - gl_shader_stage stage, const char *entry_point_name, - const struct spirv_to_nir_options *options, - const nir_shader_compiler_options *nir_options) +struct vtn_builder* +vtn_create_builder(const uint32_t *words, size_t word_count, + gl_shader_stage stage, const char *entry_point_name, + const struct spirv_to_nir_options *options) { - /* Initialize the stn_builder object */ + /* Initialize the vtn_builder object */ struct vtn_builder *b = rzalloc(NULL, struct vtn_builder); + struct spirv_to_nir_options *dup_options = + ralloc(b, struct spirv_to_nir_options); + *dup_options = *options; + b->spirv = words; b->spirv_word_count = word_count; b->file = NULL; @@ -3987,30 +4457,139 @@ spirv_to_nir(const uint32_t *words, size_t word_count, exec_list_make_empty(&b->functions); b->entry_point_stage = stage; b->entry_point_name = entry_point_name; - b->options = options; + b->options = dup_options; - /* See also _vtn_fail() */ - if (setjmp(b->fail_jump)) { - ralloc_free(b); - return NULL; + /* + * Handle the SPIR-V header (first 5 dwords). + * Can't use vtx_assert() as the setjmp(3) target isn't initialized yet. + */ + if (word_count <= 5) + goto fail; + + if (words[0] != SpvMagicNumber) { + vtn_err("words[0] was 0x%x, want 0x%x", words[0], SpvMagicNumber); + goto fail; + } + if (words[1] < 0x10000) { + vtn_err("words[1] was 0x%x, want >= 0x10000", words[1]); + goto fail; } - const uint32_t *word_end = words + word_count; + uint16_t generator_id = words[2] >> 16; + uint16_t generator_version = words[2]; - /* Handle the SPIR-V header (first 4 dwords) */ - vtn_assert(word_count > 5); + /* The first GLSLang version bump actually 1.5 years after #179 was fixed + * but this should at least let us shut the workaround off for modern + * versions of GLSLang. + */ + b->wa_glslang_179 = (generator_id == 8 && generator_version == 1); - vtn_assert(words[0] == SpvMagicNumber); - vtn_assert(words[1] >= 0x10000); /* words[2] == generator magic */ unsigned value_id_bound = words[3]; - vtn_assert(words[4] == 0); - - words+= 5; + if (words[4] != 0) { + vtn_err("words[4] was %u, want 0", words[4]); + goto fail; + } b->value_id_bound = value_id_bound; b->values = rzalloc_array(b, struct vtn_value, value_id_bound); + return b; + fail: + ralloc_free(b); + return NULL; +} + +static nir_function * +vtn_emit_kernel_entry_point_wrapper(struct vtn_builder *b, + nir_function *entry_point) +{ + vtn_assert(entry_point == b->entry_point->func->impl->function); + vtn_fail_if(!entry_point->name, "entry points are required to have a name"); + const char *func_name = + ralloc_asprintf(b->shader, "__wrapped_%s", entry_point->name); + + /* we shouldn't have any inputs yet */ + vtn_assert(!entry_point->shader->num_inputs); + vtn_assert(b->shader->info.stage == MESA_SHADER_KERNEL); + + nir_function *main_entry_point = nir_function_create(b->shader, func_name); + main_entry_point->impl = nir_function_impl_create(main_entry_point); + nir_builder_init(&b->nb, main_entry_point->impl); + b->nb.cursor = nir_after_cf_list(&main_entry_point->impl->body); + b->func_param_idx = 0; + + nir_call_instr *call = nir_call_instr_create(b->nb.shader, entry_point); + + for (unsigned i = 0; i < entry_point->num_params; ++i) { + struct vtn_type *param_type = b->entry_point->func->type->params[i]; + + /* consider all pointers to function memory to be parameters passed + * by value + */ + bool is_by_val = param_type->base_type == vtn_base_type_pointer && + param_type->storage_class == SpvStorageClassFunction; + + /* input variable */ + nir_variable *in_var = rzalloc(b->nb.shader, nir_variable); + in_var->data.mode = nir_var_shader_in; + in_var->data.read_only = true; + in_var->data.location = i; + + if (is_by_val) + in_var->type = param_type->deref->type; + else + in_var->type = param_type->type; + + nir_shader_add_variable(b->nb.shader, in_var); + b->nb.shader->num_inputs++; + + /* we have to copy the entire variable into function memory */ + if (is_by_val) { + nir_variable *copy_var = + nir_local_variable_create(main_entry_point->impl, in_var->type, + "copy_in"); + nir_copy_var(&b->nb, copy_var, in_var); + call->params[i] = + nir_src_for_ssa(&nir_build_deref_var(&b->nb, copy_var)->dest.ssa); + } else { + call->params[i] = nir_src_for_ssa(nir_load_var(&b->nb, in_var)); + } + } + + nir_builder_instr_insert(&b->nb, &call->instr); + + return main_entry_point; +} + +nir_function * +spirv_to_nir(const uint32_t *words, size_t word_count, + struct nir_spirv_specialization *spec, unsigned num_spec, + gl_shader_stage stage, const char *entry_point_name, + const struct spirv_to_nir_options *options, + const nir_shader_compiler_options *nir_options) + +{ + const uint32_t *word_end = words + word_count; + + struct vtn_builder *b = vtn_create_builder(words, word_count, + stage, entry_point_name, + options); + + if (b == NULL) + return NULL; + + /* See also _vtn_fail() */ + if (setjmp(b->fail_jump)) { + ralloc_free(b); + return NULL; + } + + /* Skip the SPIR-V header, handled at vtn_create_builder */ + words+= 5; + + b->shader = nir_shader_create(b, stage, nir_options, NULL); + /* Handle all the preamble instructions */ words = vtn_foreach_instruction(b, words, word_end, vtn_handle_preamble_instruction); @@ -4021,15 +4600,9 @@ spirv_to_nir(const uint32_t *words, size_t word_count, return NULL; } - b->shader = nir_shader_create(b, stage, nir_options, NULL); - /* Set shader info defaults */ b->shader->info.gs.invocations = 1; - /* Parse execution modes */ - vtn_foreach_execution_mode(b, b->entry_point, - vtn_handle_execution_mode, NULL); - b->specializations = spec; b->num_specializations = num_spec; @@ -4037,6 +4610,22 @@ spirv_to_nir(const uint32_t *words, size_t word_count, words = vtn_foreach_instruction(b, words, word_end, vtn_handle_variable_or_type_instruction); + /* Parse execution modes */ + vtn_foreach_execution_mode(b, b->entry_point, + vtn_handle_execution_mode, NULL); + + if (b->workgroup_size_builtin) { + vtn_assert(b->workgroup_size_builtin->type->type == + glsl_vector_type(GLSL_TYPE_UINT, 3)); + + nir_const_value *const_size = + &b->workgroup_size_builtin->constant->values[0]; + + b->shader->info.cs.local_size[0] = const_size->u32[0]; + b->shader->info.cs.local_size[1] = const_size->u32[1]; + b->shader->info.cs.local_size[2] = const_size->u32[2]; + } + /* Set types on all vtn_values */ vtn_foreach_instruction(b, words, word_end, vtn_set_instruction_result_type); @@ -4050,8 +4639,7 @@ spirv_to_nir(const uint32_t *words, size_t word_count, progress = false; foreach_list_typed(struct vtn_function, func, node, &b->functions) { if (func->referenced && !func->emitted) { - b->const_table = _mesa_hash_table_create(b, _mesa_hash_pointer, - _mesa_key_pointer_equal); + b->const_table = _mesa_pointer_hash_table_create(b); vtn_function_emit(b, func, vtn_handle_body_instruction); progress = true; @@ -4063,6 +4651,31 @@ spirv_to_nir(const uint32_t *words, size_t word_count, nir_function *entry_point = b->entry_point->func->impl->function; vtn_assert(entry_point); + /* post process entry_points with input params */ + if (entry_point->num_params && b->shader->info.stage == MESA_SHADER_KERNEL) + entry_point = vtn_emit_kernel_entry_point_wrapper(b, entry_point); + + entry_point->is_entrypoint = true; + + /* When multiple shader stages exist in the same SPIR-V module, we + * generate input and output variables for every stage, in the same + * NIR program. These dead variables can be invalid NIR. For example, + * TCS outputs must be per-vertex arrays (or decorated 'patch'), while + * VS output variables wouldn't be. + * + * To ensure we have valid NIR, we eliminate any dead inputs and outputs + * right away. In order to do so, we must lower any constant initializers + * on outputs so nir_remove_dead_variables sees that they're written to. + */ + nir_lower_constant_initializers(b->shader, nir_var_shader_out); + nir_remove_dead_variables(b->shader, + nir_var_shader_in | nir_var_shader_out); + + /* We sometimes generate bogus derefs that, while never used, give the + * validator a bit of heartburn. Run dead code to get rid of them. + */ + nir_opt_dce(b->shader); + /* Unparent the shader from the vtn_builder before we delete the builder */ ralloc_steal(NULL, b->shader);