/* Set default language version and extensions */
this->language_version = 110;
this->forced_language_version = ctx->Const.ForceGLSLVersion;
- this->zero_init = ctx->Const.GLSLZeroInit;
+ if (ctx->Const.GLSLZeroInit == 1) {
+ this->zero_init = (1u << ir_var_auto) | (1u << ir_var_temporary) | (1u << ir_var_shader_out);
+ } else if (ctx->Const.GLSLZeroInit == 2) {
+ this->zero_init = (1u << ir_var_auto) | (1u << ir_var_temporary) | (1u << ir_var_function_out);
+ } else {
+ this->zero_init = 0;
+ }
this->gl_version = 20;
this->compat_shader = true;
this->es_shader = false;
sizeof(this->atomic_counter_offsets));
this->allow_extension_directive_midshader =
ctx->Const.AllowGLSLExtensionDirectiveMidShader;
+ this->allow_glsl_120_subset_in_110 =
+ ctx->Const.AllowGLSL120SubsetIn110;
this->allow_builtin_variable_redeclaration =
ctx->Const.AllowGLSLBuiltinVariableRedeclaration;
this->allow_layout_qualifier_on_function_parameter =
/* Get the offset that the new message will be written to. */
int msg_offset = strlen(state->info_log);
- ralloc_asprintf_append(&state->info_log, "%u:%u(%u): %s: ",
- locp->source,
- locp->first_line,
- locp->first_column,
- error ? "error" : "warning");
+ if (locp->path) {
+ ralloc_asprintf_append(&state->info_log, "\"%s\"", locp->path);
+ } else {
+ ralloc_asprintf_append(&state->info_log, "%u", locp->source);
+ }
+ ralloc_asprintf_append(&state->info_log, ":%u(%u): %s: ",
+ locp->first_line, locp->first_column,
+ error ? "error" : "warning");
+
ralloc_vasprintf_append(&state->info_log, fmt, ap);
const char *const msg = &state->info_log[msg_offset];
EXT(ARB_shader_texture_lod),
EXT(ARB_shader_viewport_layer_array),
EXT(ARB_shading_language_420pack),
+ EXT(ARB_shading_language_include),
EXT(ARB_shading_language_packing),
EXT(ARB_tessellation_shader),
EXT(ARB_texture_cube_map_array),
EXT(EXT_demote_to_helper_invocation),
EXT(EXT_frag_depth),
EXT(EXT_draw_buffers),
+ EXT(EXT_draw_instanced),
EXT(EXT_clip_cull_distance),
EXT(EXT_geometry_point_size),
EXT_AEP(EXT_geometry_shader),
EXT(EXT_separate_shader_objects),
EXT(EXT_shader_framebuffer_fetch),
EXT(EXT_shader_framebuffer_fetch_non_coherent),
+ EXT(EXT_shader_group_vote),
EXT(EXT_shader_image_load_formatted),
EXT(EXT_shader_image_load_store),
EXT(EXT_shader_implicit_conversions),
EXT(EXT_texture_shadow_lod),
EXT(INTEL_conservative_rasterization),
EXT(INTEL_shader_atomic_float_minmax),
+ EXT(INTEL_shader_integer_functions2),
EXT(MESA_shader_integer_functions),
EXT(NV_compute_shader_derivatives),
EXT(NV_fragment_shader_interlock),
EXT(NV_image_formats),
EXT(NV_shader_atomic_float),
+ EXT(NV_viewport_array2),
};
#undef EXT
}
+void
+ast_demote_statement::print(void) const
+{
+ printf("demote; ");
+}
+
+
void
ast_selection_statement::print(void) const
{
shader->bindless_image = state->bindless_image_specified;
shader->bound_sampler = state->bound_sampler_specified;
shader->bound_image = state->bound_image_specified;
+ shader->redeclares_gl_layer = state->redeclares_gl_layer;
+ shader->layer_viewport_relative = state->layer_viewport_relative;
}
/* src can be NULL if only the symbols found in the exec_list should be
shader->symbols);
}
-void
-_mesa_glsl_compile_shader(struct gl_context *ctx, struct gl_shader *shader,
- bool dump_ast, bool dump_hir, bool force_recompile)
+static bool
+can_skip_compile(struct gl_context *ctx, struct gl_shader *shader,
+ const char *source, bool force_recompile,
+ bool source_has_shader_include)
{
- const char *source = force_recompile && shader->FallbackSource ?
- shader->FallbackSource : shader->Source;
-
if (!force_recompile) {
if (ctx->Cache) {
char buf[41];
shader->CompileStatus = COMPILE_SKIPPED;
free((void *)shader->FallbackSource);
- shader->FallbackSource = NULL;
- return;
+
+ /* Copy pre-processed shader include to fallback source otherwise
+ * we have no guarantee the shader include source tree has not
+ * changed.
+ */
+ shader->FallbackSource = source_has_shader_include ?
+ strdup(source) : NULL;
+ return true;
}
}
} else {
/* We should only ever end up here if a re-compile has been forced by a
* shader cache miss. In which case we can skip the compile if its
- * already be done by a previous fallback or the initial compile call.
+ * already been done by a previous fallback or the initial compile call.
*/
if (shader->CompileStatus == COMPILE_SUCCESS)
- return;
+ return true;
}
- struct _mesa_glsl_parse_state *state =
+ return false;
+}
+
+void
+_mesa_glsl_compile_shader(struct gl_context *ctx, struct gl_shader *shader,
+ bool dump_ast, bool dump_hir, bool force_recompile)
+{
+ const char *source = force_recompile && shader->FallbackSource ?
+ shader->FallbackSource : shader->Source;
+
+ /* Note this will be true for shaders the have #include inside comments
+ * however that should be rare enough not to worry about.
+ */
+ bool source_has_shader_include =
+ strstr(source, "#include") == NULL ? false : true;
+
+ /* If there was no shader include we can check the shader cache and skip
+ * compilation before we run the preprocessor. We never skip compiling
+ * shaders that use ARB_shading_language_include because we would need to
+ * keep duplicate copies of the shader include source tree and paths.
+ */
+ if (!source_has_shader_include &&
+ can_skip_compile(ctx, shader, source, force_recompile, false))
+ return;
+
+ struct _mesa_glsl_parse_state *state =
new(shader) _mesa_glsl_parse_state(ctx, shader->Stage, shader);
if (ctx->Const.GenerateTemporaryNames)
(void) p_atomic_cmpxchg(&ir_variable::temporaries_allocate_names,
false, true);
- state->error = glcpp_preprocess(state, &source, &state->info_log,
- add_builtin_defines, state, ctx);
+ if (!source_has_shader_include || !force_recompile) {
+ state->error = glcpp_preprocess(state, &source, &state->info_log,
+ add_builtin_defines, state, ctx);
+ }
+
+ /* Now that we have run the preprocessor we can check the shader cache and
+ * skip compilation if possible for those shaders that contained a shader
+ * include.
+ */
+ if (source_has_shader_include &&
+ can_skip_compile(ctx, shader, source, force_recompile, true))
+ return;
if (!state->error) {
_mesa_glsl_lexer_ctor(state, source);
shader->Version = state->language_version;
shader->IsES = state->es_shader;
+ struct gl_shader_compiler_options *options =
+ &ctx->Const.ShaderCompilerOptions[shader->Stage];
+
if (!state->error && !shader->ir->is_empty()) {
+ if (options->LowerPrecisionFloat16 || options->LowerPrecisionInt16)
+ lower_precision(options, shader->ir);
+ lower_builtins(shader->ir);
assign_subroutine_indexes(state);
lower_subroutine(shader->ir, state);
opt_shader_and_create_symbol_table(ctx, state->symbols, shader);
if (!force_recompile) {
free((void *)shader->FallbackSource);
- shader->FallbackSource = NULL;
+
+ /* Copy pre-processed shader include to fallback source otherwise we
+ * have no guarantee the shader include source tree has not changed.
+ */
+ shader->FallbackSource = source_has_shader_include ?
+ strdup(source) : NULL;
}
delete state->symbols;
OPT(lower_vector_insert, ir, false);
OPT(optimize_swizzles, ir);
- OPT(optimize_split_arrays, ir, linked);
+ /* Some drivers only call do_common_optimization() once rather than in a
+ * loop, and split arrays causes each element of a constant array to
+ * dereference is own copy of the entire array initilizer. This IR is not
+ * something that can be generated manually in a shader and is not
+ * accounted for by NIR optimisations, the result is an exponential slow
+ * down in compilation speed as a constant arrays element count grows. To
+ * avoid that here we make sure to always clean up the mess split arrays
+ * causes to constant arrays.
+ */
+ bool array_split = optimize_split_arrays(ir, linked);
+ if (array_split)
+ do_constant_propagation(ir);
+ progress |= array_split;
+
OPT(optimize_redundant_jumps, ir);
if (options->MaxUnrollIterations) {