if (var->type->contains_subroutine())
continue;
+ /* Don't cross validate interface instances. These are only relevant
+ * inside a shader. The cross validation is done at the Interface Block
+ * name level.
+ */
+ if (var->is_interface_instance())
+ continue;
+
/* Don't cross validate temporaries that are at global scope. These
* will eventually get pulled into the shaders 'main'.
*/
*/
ir_variable *const existing = variables->get_variable(var->name);
if (existing != NULL) {
- /* Check if types match. Interface blocks have some special
- * rules so we handle those elsewhere.
- */
- if (var->type != existing->type &&
- !var->is_interface_instance()) {
+ /* Check if types match. */
+ if (var->type != existing->type) {
if (!validate_intrastage_arrays(prog, var, existing)) {
if (var->type->is_record() && existing->type->is_record()
&& existing->type->record_compare(var->type)) {
for (unsigned k = 0; k <= i; k++) {
delete[] InterfaceBlockStageIndex[k];
}
+
+ /* Reset the block count. This will help avoid various segfaults
+ * from api calls that assume the array exists due to the count
+ * being non-zero.
+ */
+ *num_blks = 0;
return false;
}
}
linked_shader->Program->info.fs.early_fragment_tests |=
- shader->EarlyFragmentTests;
+ shader->EarlyFragmentTests || shader->PostDepthCoverage;
linked_shader->Program->info.fs.inner_coverage |= shader->InnerCoverage;
linked_shader->Program->info.fs.post_depth_coverage |=
shader->PostDepthCoverage;
return NULL;
}
- _mesa_reference_shader_program_data(ctx, &gl_prog->sh.data, prog->data);
+ if (!prog->data->cache_fallback)
+ _mesa_reference_shader_program_data(ctx, &gl_prog->sh.data, prog->data);
/* Don't use _mesa_reference_program() just take ownership */
linked->Program = gl_prog;
v.run(linked->ir);
v.fixup_unnamed_interface_types();
- /* Link up uniform blocks defined within this stage. */
- link_uniform_blocks(mem_ctx, ctx, prog, linked, &ubo_blocks,
- &num_ubo_blocks, &ssbo_blocks, &num_ssbo_blocks);
-
- if (!prog->data->LinkStatus) {
- _mesa_delete_linked_shader(ctx, linked);
- return NULL;
- }
+ if (!prog->data->cache_fallback) {
+ /* Link up uniform blocks defined within this stage. */
+ link_uniform_blocks(mem_ctx, ctx, prog, linked, &ubo_blocks,
+ &num_ubo_blocks, &ssbo_blocks, &num_ssbo_blocks);
- /* Copy ubo blocks to linked shader list */
- linked->Program->sh.UniformBlocks =
- ralloc_array(linked, gl_uniform_block *, num_ubo_blocks);
- ralloc_steal(linked, ubo_blocks);
- for (unsigned i = 0; i < num_ubo_blocks; i++) {
- linked->Program->sh.UniformBlocks[i] = &ubo_blocks[i];
- }
- linked->Program->info.num_ubos = num_ubo_blocks;
+ if (!prog->data->LinkStatus) {
+ _mesa_delete_linked_shader(ctx, linked);
+ return NULL;
+ }
- /* Copy ssbo blocks to linked shader list */
- linked->Program->sh.ShaderStorageBlocks =
- ralloc_array(linked, gl_uniform_block *, num_ssbo_blocks);
- ralloc_steal(linked, ssbo_blocks);
- for (unsigned i = 0; i < num_ssbo_blocks; i++) {
- linked->Program->sh.ShaderStorageBlocks[i] = &ssbo_blocks[i];
+ /* Copy ubo blocks to linked shader list */
+ linked->Program->sh.UniformBlocks =
+ ralloc_array(linked, gl_uniform_block *, num_ubo_blocks);
+ ralloc_steal(linked, ubo_blocks);
+ for (unsigned i = 0; i < num_ubo_blocks; i++) {
+ linked->Program->sh.UniformBlocks[i] = &ubo_blocks[i];
+ }
+ linked->Program->info.num_ubos = num_ubo_blocks;
+
+ /* Copy ssbo blocks to linked shader list */
+ linked->Program->sh.ShaderStorageBlocks =
+ ralloc_array(linked, gl_uniform_block *, num_ssbo_blocks);
+ ralloc_steal(linked, ssbo_blocks);
+ for (unsigned i = 0; i < num_ssbo_blocks; i++) {
+ linked->Program->sh.ShaderStorageBlocks[i] = &ssbo_blocks[i];
+ }
+ linked->Program->info.num_ssbos = num_ssbo_blocks;
}
- linked->Program->info.num_ssbos = num_ssbo_blocks;
/* At this point linked should contain all of the linked IR, so
* validate it to make sure nothing went wrong.
* qualifier, except for vertex shader inputs and fragment shader
* outputs."
*/
- if (in->type->base_type == GLSL_TYPE_ATOMIC_UINT ||
- is_gl_identifier(in->name) ||
+ if (in->type->is_atomic_uint() || is_gl_identifier(in->name) ||
!(in->data.explicit_location || use_implicit_location)) {
out->location = -1;
} else {
update_array_sizes(prog);
link_assign_uniform_locations(prog, ctx);
- link_assign_atomic_counter_resources(ctx, prog);
- link_calculate_subroutine_compat(prog);
- check_resources(ctx, prog);
- check_subroutine_resources(prog);
- check_image_resources(ctx, prog);
- link_check_atomic_counter_resources(ctx, prog);
+ if (!prog->data->cache_fallback) {
+ link_assign_atomic_counter_resources(ctx, prog);
+ link_calculate_subroutine_compat(prog);
+ check_resources(ctx, prog);
+ check_subroutine_resources(prog);
+ check_image_resources(ctx, prog);
+ link_check_atomic_counter_resources(ctx, prog);
+ }
}
static bool
return;
}
+#ifdef ENABLE_SHADER_CACHE
/* If transform feedback used on the program then compile all shaders. */
bool skip_cache = false;
if (prog->TransformFeedback.NumVarying > 0) {
for (unsigned i = 0; i < prog->NumShaders; i++) {
- if (prog->Shaders[i]->ir) {
- continue;
- }
_mesa_glsl_compile_shader(ctx, prog->Shaders[i], false, false, true);
}
skip_cache = true;
if (!skip_cache && shader_cache_read_program_metadata(ctx, prog))
return;
+#endif
void *mem_ctx = ralloc_context(NULL); // temporary linker context
"tessellation evaluation shader\n");
goto done;
}
+
+ if (prog->IsES) {
+ if (num_shaders[MESA_SHADER_TESS_EVAL] > 0 &&
+ num_shaders[MESA_SHADER_TESS_CTRL] == 0) {
+ linker_error(prog, "GLSL ES requires non-separable programs "
+ "containing a tessellation evaluation shader to also "
+ "be linked with a tessellation control shader\n");
+ goto done;
+ }
+ }
}
/* Compute shaders have additional restrictions. */
last = i;
}
- check_explicit_uniform_locations(ctx, prog);
- link_assign_subroutine_types(prog);
+ if (!prog->data->cache_fallback) {
+ check_explicit_uniform_locations(ctx, prog);
+ link_assign_subroutine_types(prog);
+ }
if (!prog->data->LinkStatus)
goto done;
if (prog->SeparateShader)
disable_varying_optimizations_for_sso(prog);
- /* Process UBOs */
- if (!interstage_cross_validate_uniform_blocks(prog, false))
- goto done;
+ if (!prog->data->cache_fallback) {
+ /* Process UBOs */
+ if (!interstage_cross_validate_uniform_blocks(prog, false))
+ goto done;
- /* Process SSBOs */
- if (!interstage_cross_validate_uniform_blocks(prog, true))
- goto done;
+ /* Process SSBOs */
+ if (!interstage_cross_validate_uniform_blocks(prog, true))
+ goto done;
+ }
/* Do common optimization before assigning storage for attributes,
* uniforms, and varyings. Later optimization could possibly make