+/**
+ * This is the equivalent to compiler/glsl/linker.cpp::link_shaders()
+ * but for SPIR-V programs.
+ *
+ * This method just creates the gl_linked_shader structs with a reference to
+ * the SPIR-V data collected during previous steps.
+ *
+ * The real linking happens later in the driver-specifc call LinkShader().
+ * This is so backends can implement different linking strategies for
+ * SPIR-V programs.
+ */
+void
+_mesa_spirv_link_shaders(struct gl_context *ctx, struct gl_shader_program *prog)
+{
+ prog->data->LinkStatus = LINKING_SUCCESS;
+ prog->data->Validated = false;
+
+ for (unsigned i = 0; i < prog->NumShaders; i++) {
+ struct gl_shader *shader = prog->Shaders[i];
+ gl_shader_stage shader_type = shader->Stage;
+
+ /* We only support one shader per stage. The gl_spirv spec doesn't seem
+ * to prevent this, but the way the API is designed, requiring all shaders
+ * to be specialized with an entry point, makes supporting this quite
+ * undefined.
+ *
+ * TODO: Turn this into a proper error once the spec bug
+ * <https://gitlab.khronos.org/opengl/API/issues/58> is resolved.
+ */
+ if (prog->_LinkedShaders[shader_type]) {
+ ralloc_strcat(&prog->data->InfoLog,
+ "\nError trying to link more than one SPIR-V shader "
+ "per stage.\n");
+ prog->data->LinkStatus = LINKING_FAILURE;
+ return;
+ }
+
+ assert(shader->spirv_data);
+
+ struct gl_linked_shader *linked = rzalloc(NULL, struct gl_linked_shader);
+ linked->Stage = shader_type;
+
+ /* Create program and attach it to the linked shader */
+ struct gl_program *gl_prog =
+ ctx->Driver.NewProgram(ctx, shader_type, prog->Name, false);
+ if (!gl_prog) {
+ prog->data->LinkStatus = LINKING_FAILURE;
+ _mesa_delete_linked_shader(ctx, linked);
+ return;
+ }
+
+ _mesa_reference_shader_program_data(ctx,
+ &gl_prog->sh.data,
+ prog->data);
+
+ /* Don't use _mesa_reference_program() just take ownership */
+ linked->Program = gl_prog;
+
+ /* Reference the SPIR-V data from shader to the linked shader */
+ _mesa_shader_spirv_data_reference(&linked->spirv_data,
+ shader->spirv_data);
+
+ prog->_LinkedShaders[shader_type] = linked;
+ prog->data->linked_stages |= 1 << shader_type;
+ }
+
+ int last_vert_stage =
+ util_last_bit(prog->data->linked_stages &
+ ((1 << (MESA_SHADER_GEOMETRY + 1)) - 1));
+
+ if (last_vert_stage)
+ prog->last_vert_prog = prog->_LinkedShaders[last_vert_stage - 1]->Program;
+
+ /* Some shaders have to be linked with some other shaders present. */
+ if (!prog->SeparateShader) {
+ static const struct {
+ gl_shader_stage a, b;
+ } stage_pairs[] = {
+ { MESA_SHADER_GEOMETRY, MESA_SHADER_VERTEX },
+ { MESA_SHADER_TESS_EVAL, MESA_SHADER_VERTEX },
+ { MESA_SHADER_TESS_CTRL, MESA_SHADER_VERTEX },
+ { MESA_SHADER_TESS_CTRL, MESA_SHADER_TESS_EVAL },
+ };
+
+ for (unsigned i = 0; i < ARRAY_SIZE(stage_pairs); i++) {
+ gl_shader_stage a = stage_pairs[i].a;
+ gl_shader_stage b = stage_pairs[i].b;
+ if ((prog->data->linked_stages & ((1 << a) | (1 << b))) == (1 << a)) {
+ ralloc_asprintf_append(&prog->data->InfoLog,
+ "%s shader must be linked with %s shader\n",
+ _mesa_shader_stage_to_string(a),
+ _mesa_shader_stage_to_string(b));
+ prog->data->LinkStatus = LINKING_FAILURE;
+ return;
+ }
+ }
+ }
+
+ /* Compute shaders have additional restrictions. */
+ if ((prog->data->linked_stages & (1 << MESA_SHADER_COMPUTE)) &&
+ (prog->data->linked_stages & ~(1 << MESA_SHADER_COMPUTE))) {
+ ralloc_asprintf_append(&prog->data->InfoLog,
+ "Compute shaders may not be linked with any other "
+ "type of shader\n");
+ prog->data->LinkStatus = LINKING_FAILURE;
+ return;
+ }
+}
+
+nir_shader *
+_mesa_spirv_to_nir(struct gl_context *ctx,
+ const struct gl_shader_program *prog,
+ gl_shader_stage stage,
+ const nir_shader_compiler_options *options)
+{
+ struct gl_linked_shader *linked_shader = prog->_LinkedShaders[stage];
+ assert (linked_shader);
+
+ struct gl_shader_spirv_data *spirv_data = linked_shader->spirv_data;
+ assert(spirv_data);
+
+ struct gl_spirv_module *spirv_module = spirv_data->SpirVModule;
+ assert (spirv_module != NULL);
+
+ const char *entry_point_name = spirv_data->SpirVEntryPoint;
+ assert(entry_point_name);
+
+ struct nir_spirv_specialization *spec_entries =
+ calloc(sizeof(*spec_entries),
+ spirv_data->NumSpecializationConstants);
+
+ for (unsigned i = 0; i < spirv_data->NumSpecializationConstants; ++i) {
+ spec_entries[i].id = spirv_data->SpecializationConstantsIndex[i];
+ spec_entries[i].value.u32 = spirv_data->SpecializationConstantsValue[i];
+ spec_entries[i].defined_on_module = false;
+ }
+
+ const struct spirv_to_nir_options spirv_options = {
+ .environment = NIR_SPIRV_OPENGL,
+ .frag_coord_is_sysval = ctx->Const.GLSLFragCoordIsSysVal,
+ .caps = ctx->Const.SpirVCapabilities,
+ .ubo_addr_format = nir_address_format_32bit_index_offset,
+ .ssbo_addr_format = nir_address_format_32bit_index_offset,
+
+ /* TODO: Consider changing this to an address format that has the NULL
+ * pointer equals to 0. That might be a better format to play nice
+ * with certain code / code generators.
+ */
+ .shared_addr_format = nir_address_format_32bit_offset,
+
+ };
+
+ nir_shader *nir =
+ spirv_to_nir((const uint32_t *) &spirv_module->Binary[0],
+ spirv_module->Length / 4,
+ spec_entries, spirv_data->NumSpecializationConstants,
+ stage, entry_point_name,
+ &spirv_options,
+ options);
+ free(spec_entries);
+
+ assert(nir);
+ assert(nir->info.stage == stage);
+
+ nir->options = options;
+
+ nir->info.name =
+ ralloc_asprintf(nir, "SPIRV:%s:%d",
+ _mesa_shader_stage_to_abbrev(nir->info.stage),
+ prog->Name);
+ nir_validate_shader(nir, "after spirv_to_nir");
+
+ nir->info.separate_shader = linked_shader->Program->info.separate_shader;
+
+ /* We have to lower away local constant initializers right before we
+ * inline functions. That way they get properly initialized at the top
+ * of the function and not at the top of its caller.
+ */
+ NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
+ NIR_PASS_V(nir, nir_lower_returns);
+ NIR_PASS_V(nir, nir_inline_functions);
+ NIR_PASS_V(nir, nir_opt_deref);
+
+ /* Pick off the single entrypoint that we want */
+ foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
+ if (!func->is_entrypoint)
+ exec_node_remove(&func->node);
+ }
+ assert(exec_list_length(&nir->functions) == 1);
+
+ /* Split member structs. We do this before lower_io_to_temporaries so that
+ * it doesn't lower system values to temporaries by accident.
+ */
+ NIR_PASS_V(nir, nir_split_var_copies);
+ NIR_PASS_V(nir, nir_split_per_member_structs);
+
+ if (nir->info.stage == MESA_SHADER_VERTEX)
+ nir_remap_dual_slot_attributes(nir, &linked_shader->Program->DualSlotInputs);
+
+ NIR_PASS_V(nir, nir_lower_frexp);
+
+ return nir;
+}
+