* Flag set if GL_ARB_post_depth_coverage layout qualifier is used.
*/
unsigned post_depth_coverage:1;
+
+ /**
+ * Flags for the layout qualifers added by ARB_fragment_shader_interlock
+ */
+
+ unsigned pixel_interlock_ordered:1;
+ unsigned pixel_interlock_unordered:1;
+ unsigned sample_interlock_ordered:1;
+ unsigned sample_interlock_unordered:1;
+
/**
* Flag set if GL_INTEL_conservartive_rasterization layout qualifier
* is used.
if (state->has_bindless())
apply_bindless_qualifier_to_variable(qual, var, state, loc);
+
+ if (qual->flags.q.pixel_interlock_ordered ||
+ qual->flags.q.pixel_interlock_unordered ||
+ qual->flags.q.sample_interlock_ordered ||
+ qual->flags.q.sample_interlock_unordered) {
+ _mesa_glsl_error(loc, state, "interlock layout qualifiers: "
+ "pixel_interlock_ordered, pixel_interlock_unordered, "
+ "sample_interlock_ordered and sample_interlock_unordered, "
+ "only valid in fragment shader input layout declaration.");
+ }
}
static void
valid_in_mask.flags.q.early_fragment_tests = 1;
valid_in_mask.flags.q.inner_coverage = 1;
valid_in_mask.flags.q.post_depth_coverage = 1;
+ valid_in_mask.flags.q.pixel_interlock_ordered = 1;
+ valid_in_mask.flags.q.pixel_interlock_unordered = 1;
+ valid_in_mask.flags.q.sample_interlock_ordered = 1;
+ valid_in_mask.flags.q.sample_interlock_unordered = 1;
break;
case MESA_SHADER_COMPUTE:
valid_in_mask.flags.q.local_size = 7;
r = false;
}
+ if (state->in_qualifier->flags.q.pixel_interlock_ordered) {
+ state->fs_pixel_interlock_ordered = true;
+ state->in_qualifier->flags.q.pixel_interlock_ordered = false;
+ }
+
+ if (state->in_qualifier->flags.q.pixel_interlock_unordered) {
+ state->fs_pixel_interlock_unordered = true;
+ state->in_qualifier->flags.q.pixel_interlock_unordered = false;
+ }
+
+ if (state->in_qualifier->flags.q.sample_interlock_ordered) {
+ state->fs_sample_interlock_ordered = true;
+ state->in_qualifier->flags.q.sample_interlock_ordered = false;
+ }
+
+ if (state->in_qualifier->flags.q.sample_interlock_unordered) {
+ state->fs_sample_interlock_unordered = true;
+ state->in_qualifier->flags.q.sample_interlock_unordered = false;
+ }
+
+ if (state->fs_pixel_interlock_ordered +
+ state->fs_pixel_interlock_unordered +
+ state->fs_sample_interlock_ordered +
+ state->fs_sample_interlock_unordered > 1) {
+ _mesa_glsl_error(loc, state,
+ "only one interlock mode can be used at any time.");
+ r = false;
+ }
+
/* We allow the creation of multiple cs_input_layout nodes. Coherence among
* all existing nodes is checked later, when the AST node is transformed
* into HIR.
"%s '%s':"
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
- "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
message, name,
bad.flags.q.invariant ? " invariant" : "",
bad.flags.q.precise ? " precise" : "",
bad.flags.q.bound_sampler ? " bound_sampler" : "",
bad.flags.q.bound_image ? " bound_image" : "",
bad.flags.q.post_depth_coverage ? " post_depth_coverage" : "",
+ bad.flags.q.pixel_interlock_ordered ? " pixel_interlock_ordered" : "",
+ bad.flags.q.pixel_interlock_unordered ? " pixel_interlock_unordered": "",
+ bad.flags.q.sample_interlock_ordered ? " sample_interlock_ordered": "",
+ bad.flags.q.sample_interlock_unordered ? " sample_interlock_unordered": "",
bad.flags.q.non_coherent ? " noncoherent" : "");
return false;
}
return state->ARB_shader_ballot_enable;
}
+static bool
+supports_arb_fragment_shader_interlock(const _mesa_glsl_parse_state *state)
+{
+ return state->ARB_fragment_shader_interlock_enable;
+}
+
static bool
shader_clock(const _mesa_glsl_parse_state *state)
{
ir_function_signature *_read_invocation_intrinsic(const glsl_type *type);
ir_function_signature *_read_invocation(const glsl_type *type);
+
+ ir_function_signature *_invocation_interlock_intrinsic(
+ builtin_available_predicate avail,
+ enum ir_intrinsic_id id);
+ ir_function_signature *_invocation_interlock(
+ const char *intrinsic_name,
+ builtin_available_predicate avail);
+
ir_function_signature *_shader_clock_intrinsic(builtin_available_predicate avail,
const glsl_type *type);
ir_function_signature *_shader_clock(builtin_available_predicate avail,
ir_intrinsic_memory_barrier_shared),
NULL);
+ add_function("__intrinsic_begin_invocation_interlock",
+ _invocation_interlock_intrinsic(
+ supports_arb_fragment_shader_interlock,
+ ir_intrinsic_begin_invocation_interlock), NULL);
+
+ add_function("__intrinsic_end_invocation_interlock",
+ _invocation_interlock_intrinsic(
+ supports_arb_fragment_shader_interlock,
+ ir_intrinsic_end_invocation_interlock), NULL);
+
add_function("__intrinsic_shader_clock",
_shader_clock_intrinsic(shader_clock,
glsl_type::uvec2_type),
glsl_type::uint64_t_type),
NULL);
+ add_function("beginInvocationInterlockARB",
+ _invocation_interlock(
+ "__intrinsic_begin_invocation_interlock",
+ supports_arb_fragment_shader_interlock),
+ NULL);
+
+ add_function("endInvocationInterlockARB",
+ _invocation_interlock(
+ "__intrinsic_end_invocation_interlock",
+ supports_arb_fragment_shader_interlock),
+ NULL);
+
add_function("anyInvocationARB",
_vote("__intrinsic_vote_any", vote),
NULL);
return sig;
}
+ir_function_signature *
+builtin_builder::_invocation_interlock_intrinsic(builtin_available_predicate avail,
+ enum ir_intrinsic_id id)
+{
+ MAKE_INTRINSIC(glsl_type::void_type, id, avail, 0);
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_invocation_interlock(const char *intrinsic_name,
+ builtin_available_predicate avail)
+{
+ MAKE_SIG(glsl_type::void_type, avail, 0);
+ body.emit(call(shader->symbols->get_function(intrinsic_name),
+ NULL, sig->parameters));
+ return sig;
+}
+
ir_function_signature *
builtin_builder::_shader_clock_intrinsic(builtin_available_predicate avail,
const glsl_type *type)
}
}
+ const bool pixel_interlock_ordered = match_layout_qualifier($1,
+ "pixel_interlock_ordered", state) == 0;
+ const bool pixel_interlock_unordered = match_layout_qualifier($1,
+ "pixel_interlock_unordered", state) == 0;
+ const bool sample_interlock_ordered = match_layout_qualifier($1,
+ "sample_interlock_ordered", state) == 0;
+ const bool sample_interlock_unordered = match_layout_qualifier($1,
+ "sample_interlock_unordered", state) == 0;
+
+ if (pixel_interlock_ordered + pixel_interlock_unordered +
+ sample_interlock_ordered + sample_interlock_unordered > 0 &&
+ state->stage != MESA_SHADER_FRAGMENT) {
+ _mesa_glsl_error(& @1, state, "interlock layout qualifiers: "
+ "pixel_interlock_ordered, pixel_interlock_unordered, "
+ "sample_interlock_ordered and sample_interlock_unordered, "
+ "only valid in fragment shader input layout declaration.");
+ } else if (pixel_interlock_ordered + pixel_interlock_unordered +
+ sample_interlock_ordered + sample_interlock_unordered > 0 &&
+ !state->ARB_fragment_shader_interlock_enable) {
+ _mesa_glsl_error(& @1, state,
+ "interlock layout qualifier present, but the "
+ "GL_ARB_fragment_shader_interlock extension is not "
+ "enabled.");
+ } else {
+ $$.flags.q.pixel_interlock_ordered = pixel_interlock_ordered;
+ $$.flags.q.pixel_interlock_unordered = pixel_interlock_unordered;
+ $$.flags.q.sample_interlock_ordered = sample_interlock_ordered;
+ $$.flags.q.sample_interlock_unordered = sample_interlock_unordered;
+ }
+
/* Layout qualifiers for tessellation evaluation shaders. */
if (!$$.flags.i) {
static const struct {
this->fs_early_fragment_tests = false;
this->fs_inner_coverage = false;
this->fs_post_depth_coverage = false;
+ this->fs_pixel_interlock_ordered = false;
+ this->fs_pixel_interlock_unordered = false;
+ this->fs_sample_interlock_ordered = false;
+ this->fs_sample_interlock_unordered = false;
this->fs_blend_support = 0;
memset(this->atomic_counter_offsets, 0,
sizeof(this->atomic_counter_offsets));
EXT(ARB_explicit_uniform_location),
EXT(ARB_fragment_coord_conventions),
EXT(ARB_fragment_layer_viewport),
+ EXT(ARB_fragment_shader_interlock),
EXT(ARB_gpu_shader5),
EXT(ARB_gpu_shader_fp64),
EXT(ARB_gpu_shader_int64),
assert(!state->fs_early_fragment_tests);
assert(!state->fs_inner_coverage);
assert(!state->fs_post_depth_coverage);
+ assert(!state->fs_pixel_interlock_ordered);
+ assert(!state->fs_pixel_interlock_unordered);
+ assert(!state->fs_sample_interlock_ordered);
+ assert(!state->fs_sample_interlock_unordered);
}
for (unsigned i = 0; i < MAX_FEEDBACK_BUFFERS; i++) {
shader->EarlyFragmentTests = state->fs_early_fragment_tests;
shader->InnerCoverage = state->fs_inner_coverage;
shader->PostDepthCoverage = state->fs_post_depth_coverage;
+ shader->PixelInterlockOrdered = state->fs_pixel_interlock_ordered;
+ shader->PixelInterlockUnordered = state->fs_pixel_interlock_unordered;
+ shader->SampleInterlockOrdered = state->fs_sample_interlock_ordered;
+ shader->SampleInterlockUnordered = state->fs_sample_interlock_unordered;
shader->BlendSupport = state->fs_blend_support;
break;
bool ARB_fragment_coord_conventions_warn;
bool ARB_fragment_layer_viewport_enable;
bool ARB_fragment_layer_viewport_warn;
+ bool ARB_fragment_shader_interlock_enable;
+ bool ARB_fragment_shader_interlock_warn;
bool ARB_gpu_shader5_enable;
bool ARB_gpu_shader5_warn;
bool ARB_gpu_shader_fp64_enable;
bool fs_post_depth_coverage;
+ bool fs_pixel_interlock_ordered;
+ bool fs_pixel_interlock_unordered;
+ bool fs_sample_interlock_ordered;
+ bool fs_sample_interlock_unordered;
+
unsigned fs_blend_support;
/**
case ir_intrinsic_shader_clock:
op = nir_intrinsic_shader_clock;
break;
+ case ir_intrinsic_begin_invocation_interlock:
+ op = nir_intrinsic_begin_invocation_interlock;
+ break;
+ case ir_intrinsic_end_invocation_interlock:
+ op = nir_intrinsic_end_invocation_interlock;
+ break;
case ir_intrinsic_group_memory_barrier:
op = nir_intrinsic_group_memory_barrier;
break;
instr->num_components = 2;
nir_builder_instr_insert(&b, &instr->instr);
break;
+ case nir_intrinsic_begin_invocation_interlock:
+ nir_builder_instr_insert(&b, &instr->instr);
+ break;
+ case nir_intrinsic_end_invocation_interlock:
+ nir_builder_instr_insert(&b, &instr->instr);
+ break;
case nir_intrinsic_store_ssbo: {
exec_node *param = ir->actual_parameters.get_head();
ir_rvalue *block = ((ir_instruction *)param)->as_rvalue();
ir_intrinsic_memory_barrier_buffer,
ir_intrinsic_memory_barrier_image,
ir_intrinsic_memory_barrier_shared,
+ ir_intrinsic_begin_invocation_interlock,
+ ir_intrinsic_end_invocation_interlock,
ir_intrinsic_vote_all,
ir_intrinsic_vote_any,
linked_shader->Program->info.fs.inner_coverage |= shader->InnerCoverage;
linked_shader->Program->info.fs.post_depth_coverage |=
shader->PostDepthCoverage;
+ linked_shader->Program->info.fs.pixel_interlock_ordered |=
+ shader->PixelInterlockOrdered;
+ linked_shader->Program->info.fs.pixel_interlock_unordered |=
+ shader->PixelInterlockUnordered;
+ linked_shader->Program->info.fs.sample_interlock_ordered |=
+ shader->SampleInterlockOrdered;
+ linked_shader->Program->info.fs.sample_interlock_unordered |=
+ shader->SampleInterlockUnordered;
linked_shader->Program->sh.fs.BlendSupport |= shader->BlendSupport;
}
barrier("memory_barrier_buffer")
barrier("memory_barrier_image")
barrier("memory_barrier_shared")
+barrier("begin_invocation_interlock")
+barrier("end_invocation_interlock")
# A conditional discard, with a single boolean source.
intrinsic("discard_if", src_comp=[1])
bool pixel_center_integer;
+ bool pixel_interlock_ordered;
+ bool pixel_interlock_unordered;
+ bool sample_interlock_ordered;
+ bool sample_interlock_unordered;
+
/** gl_FragDepth layout for ARB_conservative_depth. */
enum gl_frag_depth_layout depth_layout;
} fs;
EXT(ARB_fragment_program , ARB_fragment_program , GLL, x , x , x , 2002)
EXT(ARB_fragment_program_shadow , ARB_fragment_program_shadow , GLL, x , x , x , 2003)
EXT(ARB_fragment_shader , ARB_fragment_shader , GLL, GLC, x , x , 2002)
+EXT(ARB_fragment_shader_interlock , ARB_fragment_shader_interlock , GLL, GLC, x , x , 2015)
EXT(ARB_framebuffer_no_attachments , ARB_framebuffer_no_attachments , GLL, GLC, x , x , 2012)
EXT(ARB_framebuffer_object , ARB_framebuffer_object , GLL, GLC, x , x , 2005)
EXT(ARB_framebuffer_sRGB , EXT_framebuffer_sRGB , GLL, GLC, x , x , 1998)
bool uses_gl_fragcoord;
bool PostDepthCoverage;
+ bool PixelInterlockOrdered;
+ bool PixelInterlockUnordered;
+ bool SampleInterlockOrdered;
+ bool SampleInterlockUnordered;
bool InnerCoverage;
/**
GLboolean ARB_fragment_shader;
GLboolean ARB_framebuffer_no_attachments;
GLboolean ARB_framebuffer_object;
+ GLboolean ARB_fragment_shader_interlock;
GLboolean ARB_enhanced_layouts;
GLboolean ARB_explicit_attrib_location;
GLboolean ARB_explicit_uniform_location;