static const unsigned known_desktop_glsl_versions[] =
- { 110, 120, 130, 140, 150, 330, 400, 410, 420, 430, 440, 450 };
+ { 110, 120, 130, 140, 150, 330, 400, 410, 420, 430, 440, 450, 460 };
static const unsigned known_desktop_gl_versions[] =
- { 20, 21, 30, 31, 32, 33, 40, 41, 42, 43, 44, 45 };
+ { 20, 21, 30, 31, 32, 33, 40, 41, 42, 43, 44, 45, 46 };
_mesa_glsl_parse_state::_mesa_glsl_parse_state(struct gl_context *_ctx,
sizeof(this->atomic_counter_offsets));
this->allow_extension_directive_midshader =
ctx->Const.AllowGLSLExtensionDirectiveMidShader;
+ this->allow_builtin_variable_redeclaration =
+ ctx->Const.AllowGLSLBuiltinVariableRedeclaration;
this->cs_input_local_size_variable_specified = false;
+
+ /* ARB_bindless_texture */
+ this->bindless_sampler_specified = false;
+ this->bindless_image_specified = false;
+ this->bound_sampler_specified = false;
+ this->bound_image_specified = false;
}
/**
EXT(ARB_ES3_1_compatibility),
EXT(ARB_ES3_2_compatibility),
EXT(ARB_arrays_of_arrays),
+ EXT(ARB_bindless_texture),
EXT(ARB_compute_shader),
EXT(ARB_compute_variable_group_size),
EXT(ARB_conservative_depth),
EXT(ARB_separate_shader_objects),
EXT(ARB_shader_atomic_counter_ops),
EXT(ARB_shader_atomic_counters),
+ EXT(ARB_shader_ballot),
EXT(ARB_shader_bit_encoding),
EXT(ARB_shader_clock),
EXT(ARB_shader_draw_parameters),
}
}
-void
+static void
_mesa_ast_type_qualifier_print(const struct ast_type_qualifier *q)
{
- if (q->flags.q.subroutine)
+ if (q->is_subroutine_decl())
printf("subroutine ");
- if (q->flags.q.subroutine_def) {
+ if (q->subroutine_list) {
printf("subroutine (");
q->subroutine_list->print();
printf(")");
}
-ast_struct_specifier::ast_struct_specifier(void *lin_ctx, const char *identifier,
+ast_struct_specifier::ast_struct_specifier(const char *identifier,
ast_declarator_list *declarator_list)
+ : name(identifier), layout(NULL), declarations(), is_declaration(true),
+ type(NULL)
{
- if (identifier == NULL) {
- static mtx_t mutex = _MTX_INITIALIZER_NP;
- static unsigned anon_count = 1;
- unsigned count;
-
- mtx_lock(&mutex);
- count = anon_count++;
- mtx_unlock(&mutex);
-
- identifier = linear_asprintf(lin_ctx, "#anon_struct_%04x", count);
- }
- name = identifier;
this->declarations.push_degenerate_list_at_head(&declarator_list->link);
- is_declaration = true;
- layout = NULL;
}
void ast_subroutine_list::print(void) const
/* Nothing to do. */
break;
}
+
+ shader->bindless_sampler = state->bindless_sampler_specified;
+ shader->bindless_image = state->bindless_image_specified;
+ shader->bound_sampler = state->bound_sampler_specified;
+ shader->bound_image = state->bound_image_specified;
+}
+
+/* src can be NULL if only the symbols found in the exec_list should be
+ * copied
+ */
+void
+_mesa_glsl_copy_symbols_from_table(struct exec_list *shader_ir,
+ struct glsl_symbol_table *src,
+ struct glsl_symbol_table *dest)
+{
+ foreach_in_list (ir_instruction, ir, shader_ir) {
+ switch (ir->ir_type) {
+ case ir_type_function:
+ dest->add_function((ir_function *) ir);
+ break;
+ case ir_type_variable: {
+ ir_variable *const var = (ir_variable *) ir;
+
+ if (var->data.mode != ir_var_temporary)
+ dest->add_variable(var);
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ if (src != NULL) {
+ /* Explicitly copy the gl_PerVertex interface definitions because these
+ * are needed to check they are the same during the interstage link.
+ * They can’t necessarily be found via the exec_list because the members
+ * might not be referenced. The GL spec still requires that they match
+ * in that case.
+ */
+ const glsl_type *iface =
+ src->get_interface("gl_PerVertex", ir_var_shader_in);
+ if (iface)
+ dest->add_interface(iface->name, iface, ir_var_shader_in);
+
+ iface = src->get_interface("gl_PerVertex", ir_var_shader_out);
+ if (iface)
+ dest->add_interface(iface->name, iface, ir_var_shader_out);
+ }
}
extern "C" {
static void
-assign_subroutine_indexes(struct gl_shader *sh,
- struct _mesa_glsl_parse_state *state)
+assign_subroutine_indexes(struct _mesa_glsl_parse_state *state)
{
int j, k;
int index = 0;
}
}
+static void
+opt_shader_and_create_symbol_table(struct gl_context *ctx,
+ struct glsl_symbol_table *source_symbols,
+ struct gl_shader *shader)
+{
+ assert(shader->CompileStatus != compile_failure &&
+ !shader->ir->is_empty());
+
+ struct gl_shader_compiler_options *options =
+ &ctx->Const.ShaderCompilerOptions[shader->Stage];
+
+ /* Do some optimization at compile time to reduce shader IR size
+ * and reduce later work if the same shader is linked multiple times
+ */
+ if (ctx->Const.GLSLOptimizeConservatively) {
+ /* Run it just once. */
+ do_common_optimization(shader->ir, false, false, options,
+ ctx->Const.NativeIntegers);
+ } else {
+ /* Repeat it until it stops making changes. */
+ while (do_common_optimization(shader->ir, false, false, options,
+ ctx->Const.NativeIntegers))
+ ;
+ }
+
+ validate_ir_tree(shader->ir);
+
+ enum ir_variable_mode other;
+ switch (shader->Stage) {
+ case MESA_SHADER_VERTEX:
+ other = ir_var_shader_in;
+ break;
+ case MESA_SHADER_FRAGMENT:
+ other = ir_var_shader_out;
+ break;
+ default:
+ /* Something invalid to ensure optimize_dead_builtin_uniforms
+ * doesn't remove anything other than uniforms or constants.
+ */
+ other = ir_var_mode_count;
+ break;
+ }
+
+ optimize_dead_builtin_variables(shader->ir, other);
+
+ validate_ir_tree(shader->ir);
+
+ /* Retain any live IR, but trash the rest. */
+ reparent_ir(shader->ir, shader->ir);
+
+ /* Destroy the symbol table. Create a new symbol table that contains only
+ * the variables and functions that still exist in the IR. The symbol
+ * table will be used later during linking.
+ *
+ * There must NOT be any freed objects still referenced by the symbol
+ * table. That could cause the linker to dereference freed memory.
+ *
+ * We don't have to worry about types or interface-types here because those
+ * are fly-weights that are looked up by glsl_type.
+ */
+ _mesa_glsl_copy_symbols_from_table(shader->ir, source_symbols,
+ shader->symbols);
+}
+
void
_mesa_glsl_compile_shader(struct gl_context *ctx, struct gl_shader *shader,
bool dump_ast, bool dump_hir, bool force_recompile)
{
- struct _mesa_glsl_parse_state *state =
- new(shader) _mesa_glsl_parse_state(ctx, shader->Stage, shader);
const char *source = force_recompile && shader->FallbackSource ?
shader->FallbackSource : shader->Source;
- if (ctx->Const.GenerateTemporaryNames)
- (void) p_atomic_cmpxchg(&ir_variable::temporaries_allocate_names,
- false, true);
-
- state->error = glcpp_preprocess(state, &source, &state->info_log,
- add_builtin_defines, state, ctx);
-
if (!force_recompile) {
- char buf[41];
- _mesa_sha1_compute(source, strlen(source), shader->sha1);
- if (ctx->Cache && disk_cache_has_key(ctx->Cache, shader->sha1)) {
- /* We've seen this shader before and know it compiles */
- if (ctx->_Shader->Flags & GLSL_CACHE_INFO) {
- fprintf(stderr, "deferring compile of shader: %s\n",
- _mesa_sha1_format(buf, shader->sha1));
+ if (ctx->Cache) {
+ char buf[41];
+ disk_cache_compute_key(ctx->Cache, source, strlen(source),
+ shader->sha1);
+ if (disk_cache_has_key(ctx->Cache, shader->sha1)) {
+ /* We've seen this shader before and know it compiles */
+ if (ctx->_Shader->Flags & GLSL_CACHE_INFO) {
+ _mesa_sha1_format(buf, shader->sha1);
+ fprintf(stderr, "deferring compile of shader: %s\n", buf);
+ }
+ shader->CompileStatus = compile_skipped;
+
+ free((void *)shader->FallbackSource);
+ shader->FallbackSource = NULL;
+ return;
}
- shader->CompileStatus = true;
+ }
+ } else {
+ /* We should only ever end up here if a re-compile has been forced by a
+ * shader cache miss. In which case we can skip the compile if its
+ * already be done by a previous fallback or the initial compile call.
+ */
+ if (shader->CompileStatus == compile_success)
+ return;
- free((void *)shader->FallbackSource);
- shader->FallbackSource = NULL;
+ if (shader->CompileStatus == compiled_no_opts) {
+ opt_shader_and_create_symbol_table(ctx,
+ NULL, /* source_symbols */
+ shader);
+ shader->CompileStatus = compile_success;
return;
}
}
+ struct _mesa_glsl_parse_state *state =
+ new(shader) _mesa_glsl_parse_state(ctx, shader->Stage, shader);
+
+ if (ctx->Const.GenerateTemporaryNames)
+ (void) p_atomic_cmpxchg(&ir_variable::temporaries_allocate_names,
+ false, true);
+
+ state->error = glcpp_preprocess(state, &source, &state->info_log,
+ add_builtin_defines, state, ctx);
+
if (!state->error) {
_mesa_glsl_lexer_ctor(state, source);
_mesa_glsl_parse(state);
}
}
-
- if (!state->error && !shader->ir->is_empty()) {
- struct gl_shader_compiler_options *options =
- &ctx->Const.ShaderCompilerOptions[shader->Stage];
-
- assign_subroutine_indexes(shader, state);
- lower_subroutine(shader->ir, state);
-
- /* Do some optimization at compile time to reduce shader IR size
- * and reduce later work if the same shader is linked multiple times
- */
- if (ctx->Const.GLSLOptimizeConservatively) {
- /* Run it just once. */
- do_common_optimization(shader->ir, false, false, options,
- ctx->Const.NativeIntegers);
- } else {
- /* Repeat it until it stops making changes. */
- while (do_common_optimization(shader->ir, false, false, options,
- ctx->Const.NativeIntegers))
- ;
- }
-
- validate_ir_tree(shader->ir);
-
- enum ir_variable_mode other;
- switch (shader->Stage) {
- case MESA_SHADER_VERTEX:
- other = ir_var_shader_in;
- break;
- case MESA_SHADER_FRAGMENT:
- other = ir_var_shader_out;
- break;
- default:
- /* Something invalid to ensure optimize_dead_builtin_uniforms
- * doesn't remove anything other than uniforms or constants.
- */
- other = ir_var_mode_count;
- break;
- }
-
- optimize_dead_builtin_variables(shader->ir, other);
-
- validate_ir_tree(shader->ir);
- }
-
if (shader->InfoLog)
ralloc_free(shader->InfoLog);
set_shader_inout_layout(shader, state);
shader->symbols = new(shader->ir) glsl_symbol_table;
- shader->CompileStatus = !state->error;
+ shader->CompileStatus = state->error ? compile_failure : compile_success;
shader->InfoLog = state->info_log;
shader->Version = state->language_version;
shader->IsES = state->es_shader;
- /* Retain any live IR, but trash the rest. */
- reparent_ir(shader->ir, shader->ir);
-
- /* Destroy the symbol table. Create a new symbol table that contains only
- * the variables and functions that still exist in the IR. The symbol
- * table will be used later during linking.
- *
- * There must NOT be any freed objects still referenced by the symbol
- * table. That could cause the linker to dereference freed memory.
- *
- * We don't have to worry about types or interface-types here because those
- * are fly-weights that are looked up by glsl_type.
- */
- foreach_in_list (ir_instruction, ir, shader->ir) {
- switch (ir->ir_type) {
- case ir_type_function:
- shader->symbols->add_function((ir_function *) ir);
- break;
- case ir_type_variable: {
- ir_variable *const var = (ir_variable *) ir;
+ if (!state->error && !shader->ir->is_empty()) {
+ assign_subroutine_indexes(state);
+ lower_subroutine(shader->ir, state);
- if (var->data.mode != ir_var_temporary)
- shader->symbols->add_variable(var);
- break;
- }
- default:
- break;
+ if (!ctx->Cache || force_recompile)
+ opt_shader_and_create_symbol_table(ctx, state->symbols, shader);
+ else {
+ reparent_ir(shader->ir, shader->ir);
+ shader->CompileStatus = compiled_no_opts;
}
}
- _mesa_glsl_initialize_derived_variables(ctx, shader);
-
if (!force_recompile) {
free((void *)shader->FallbackSource);
shader->FallbackSource = NULL;
options->EmitNoCont, options->EmitNoLoops);
OPT(do_vec_index_to_swizzle, ir);
OPT(lower_vector_insert, ir, false);
- OPT(do_swizzle_swizzle, ir);
- OPT(do_noop_swizzle, ir);
+ OPT(optimize_swizzles, ir);
OPT(optimize_split_arrays, ir, linked);
OPT(optimize_redundant_jumps, ir);
if (options->MaxUnrollIterations) {
loop_state *ls = analyze_loop_variables(ir);
if (ls->loop_found) {
- OPT(set_loop_controls, ir, ls);
- OPT(unroll_loops, ir, ls, options);
+ bool loop_progress = unroll_loops(ir, ls, options);
+ while (loop_progress) {
+ loop_progress = false;
+ loop_progress |= do_constant_propagation(ir);
+ loop_progress |= do_if_simplification(ir);
+ }
+ progress |= loop_progress;
}
delete ls;
}