struct gl_spirv_module *module;
struct gl_shader_spirv_data *spirv_data;
- assert(length >= 0);
-
module = malloc(sizeof(*module) + length);
if (!module) {
_mesa_error(ctx, GL_OUT_OF_MEMORY, "glShaderBinary");
/* Create program and attach it to the linked shader */
struct gl_program *gl_prog =
- ctx->Driver.NewProgram(ctx,
- _mesa_shader_stage_to_program(shader_type),
- prog->Name, false);
+ ctx->Driver.NewProgram(ctx, shader_type, prog->Name, false);
if (!gl_prog) {
prog->data->LinkStatus = LINKING_FAILURE;
_mesa_delete_linked_shader(ctx, linked);
if (last_vert_stage)
prog->last_vert_prog = prog->_LinkedShaders[last_vert_stage - 1]->Program;
-}
-static void
-nir_compute_double_inputs(nir_shader *shader,
- const nir_shader_compiler_options *options)
-{
- nir_foreach_variable(var, &shader->inputs) {
- if (glsl_type_is_dual_slot(glsl_without_array(var->type))) {
- for (uint i = 0; i < glsl_count_attribute_slots(var->type, true); i++) {
- uint64_t bitfield = BITFIELD64_BIT(var->data.location + i);
- shader->info.vs.double_inputs |= bitfield;
+ /* Some shaders have to be linked with some other shaders present. */
+ if (!prog->SeparateShader) {
+ static const struct {
+ gl_shader_stage a, b;
+ } stage_pairs[] = {
+ { MESA_SHADER_GEOMETRY, MESA_SHADER_VERTEX },
+ { MESA_SHADER_TESS_EVAL, MESA_SHADER_VERTEX },
+ { MESA_SHADER_TESS_CTRL, MESA_SHADER_VERTEX },
+ { MESA_SHADER_TESS_CTRL, MESA_SHADER_TESS_EVAL },
+ };
+
+ for (unsigned i = 0; i < ARRAY_SIZE(stage_pairs); i++) {
+ gl_shader_stage a = stage_pairs[i].a;
+ gl_shader_stage b = stage_pairs[i].b;
+ if ((prog->data->linked_stages & ((1 << a) | (1 << b))) == (1 << a)) {
+ ralloc_asprintf_append(&prog->data->InfoLog,
+ "%s shader must be linked with %s shader\n",
+ _mesa_shader_stage_to_string(a),
+ _mesa_shader_stage_to_string(b));
+ prog->data->LinkStatus = LINKING_FAILURE;
+ return;
}
}
}
+
+ /* Compute shaders have additional restrictions. */
+ if ((prog->data->linked_stages & (1 << MESA_SHADER_COMPUTE)) &&
+ (prog->data->linked_stages & ~(1 << MESA_SHADER_COMPUTE))) {
+ ralloc_asprintf_append(&prog->data->InfoLog,
+ "Compute shaders may not be linked with any other "
+ "type of shader\n");
+ prog->data->LinkStatus = LINKING_FAILURE;
+ return;
+ }
}
nir_shader *
gl_shader_stage stage,
const nir_shader_compiler_options *options)
{
- nir_shader *nir = NULL;
-
struct gl_linked_shader *linked_shader = prog->_LinkedShaders[stage];
assert (linked_shader);
for (unsigned i = 0; i < spirv_data->NumSpecializationConstants; ++i) {
spec_entries[i].id = spirv_data->SpecializationConstantsIndex[i];
- spec_entries[i].data32 = spirv_data->SpecializationConstantsValue[i];
+ spec_entries[i].value.u32 = spirv_data->SpecializationConstantsValue[i];
spec_entries[i].defined_on_module = false;
}
const struct spirv_to_nir_options spirv_options = {
- .lower_workgroup_access_to_offsets = true,
- .caps = ctx->Const.SpirVCapabilities
+ .environment = NIR_SPIRV_OPENGL,
+ .frag_coord_is_sysval = ctx->Const.GLSLFragCoordIsSysVal,
+ .caps = ctx->Const.SpirVCapabilities,
+ .ubo_addr_format = nir_address_format_32bit_index_offset,
+ .ssbo_addr_format = nir_address_format_32bit_index_offset,
+
+ /* TODO: Consider changing this to an address format that has the NULL
+ * pointer equals to 0. That might be a better format to play nice
+ * with certain code / code generators.
+ */
+ .shared_addr_format = nir_address_format_32bit_offset,
+
};
- nir_function *entry_point =
+ nir_shader *nir =
spirv_to_nir((const uint32_t *) &spirv_module->Binary[0],
spirv_module->Length / 4,
spec_entries, spirv_data->NumSpecializationConstants,
options);
free(spec_entries);
- assert (entry_point);
- nir = entry_point->shader;
+ assert(nir);
assert(nir->info.stage == stage);
nir->options = options;
ralloc_asprintf(nir, "SPIRV:%s:%d",
_mesa_shader_stage_to_abbrev(nir->info.stage),
prog->Name);
- nir_validate_shader(nir);
+ nir_validate_shader(nir, "after spirv_to_nir");
+
+ nir->info.separate_shader = linked_shader->Program->info.separate_shader;
/* We have to lower away local constant initializers right before we
* inline functions. That way they get properly initialized at the top
* of the function and not at the top of its caller.
*/
- NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_local);
+ NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
NIR_PASS_V(nir, nir_lower_returns);
NIR_PASS_V(nir, nir_inline_functions);
NIR_PASS_V(nir, nir_copy_prop);
+ NIR_PASS_V(nir, nir_opt_deref);
/* Pick off the single entrypoint that we want */
foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
- if (func != entry_point)
+ if (!func->is_entrypoint)
exec_node_remove(&func->node);
}
assert(exec_list_length(&nir->functions) == 1);
- entry_point->name = ralloc_strdup(entry_point, "main");
/* Split member structs. We do this before lower_io_to_temporaries so that
* it doesn't lower system values to temporaries by accident.
NIR_PASS_V(nir, nir_split_var_copies);
NIR_PASS_V(nir, nir_split_per_member_structs);
- if (nir->info.stage == MESA_SHADER_VERTEX) {
- nir_compute_double_inputs(nir, options);
- nir_remap_attributes(nir, options);
- }
+ if (nir->info.stage == MESA_SHADER_VERTEX)
+ nir_remap_dual_slot_attributes(nir, &linked_shader->Program->DualSlotInputs);
+
+ NIR_PASS_V(nir, nir_lower_frexp);
return nir;
}
for (unsigned i = 0; i < numSpecializationConstants; ++i) {
spec_entries[i].id = pConstantIndex[i];
- spec_entries[i].data32 = pConstantValue[i];
+ spec_entries[i].value.u32 = pConstantValue[i];
spec_entries[i].defined_on_module = false;
}