*/
#include "nir.h"
+#include "nir_deref.h"
#include "main/menums.h"
+static void
+get_deref_info(nir_shader *shader, nir_variable *var, nir_deref_instr *deref,
+ bool *cross_invocation, bool *indirect)
+{
+ *cross_invocation = false;
+ *indirect = false;
+
+ const bool per_vertex = nir_is_per_vertex_io(var, shader->info.stage);
+
+ nir_deref_path path;
+ nir_deref_path_init(&path, deref, NULL);
+ assert(path.path[0]->deref_type == nir_deref_type_var);
+ nir_deref_instr **p = &path.path[1];
+
+ /* Vertex index is the outermost array index. */
+ if (per_vertex) {
+ assert((*p)->deref_type == nir_deref_type_array);
+ nir_instr *vertex_index_instr = (*p)->arr.index.ssa->parent_instr;
+ *cross_invocation =
+ vertex_index_instr->type != nir_instr_type_intrinsic ||
+ nir_instr_as_intrinsic(vertex_index_instr)->intrinsic !=
+ nir_intrinsic_load_invocation_id;
+ p++;
+ }
+
+ /* We always lower indirect dereferences for "compact" array vars. */
+ if (!path.path[0]->var->data.compact) {
+ /* Non-compact array vars: find out if they are indirect. */
+ for (; *p; p++) {
+ if ((*p)->deref_type == nir_deref_type_array) {
+ *indirect |= !nir_src_is_const((*p)->arr.index);
+ } else if ((*p)->deref_type == nir_deref_type_struct) {
+ /* Struct indices are always constant. */
+ } else {
+ unreachable("Unsupported deref type");
+ }
+ }
+ }
+
+ nir_deref_path_finish(&path);
+}
+
static void
set_io_mask(nir_shader *shader, nir_variable *var, int offset, int len,
- bool is_output_read)
+ nir_deref_instr *deref, bool is_output_read)
{
for (int i = 0; i < len; i++) {
assert(var->data.location != -1);
bitfield = BITFIELD64_BIT(idx);
}
+ bool cross_invocation;
+ bool indirect;
+ get_deref_info(shader, var, deref, &cross_invocation, &indirect);
+
if (var->data.mode == nir_var_shader_in) {
- if (is_patch_generic)
+ if (is_patch_generic) {
shader->info.patch_inputs_read |= bitfield;
- else
+ if (indirect)
+ shader->info.patch_inputs_read_indirectly |= bitfield;
+ } else {
shader->info.inputs_read |= bitfield;
+ if (indirect)
+ shader->info.inputs_read_indirectly |= bitfield;
+ }
+
+ if (cross_invocation && shader->info.stage == MESA_SHADER_TESS_CTRL)
+ shader->info.tess.tcs_cross_invocation_inputs_read |= bitfield;
if (shader->info.stage == MESA_SHADER_FRAGMENT) {
shader->info.fs.uses_sample_qualifier |= var->data.sample;
if (is_output_read) {
if (is_patch_generic) {
shader->info.patch_outputs_read |= bitfield;
+ if (indirect)
+ shader->info.patch_outputs_accessed_indirectly |= bitfield;
} else {
shader->info.outputs_read |= bitfield;
+ if (indirect)
+ shader->info.outputs_accessed_indirectly |= bitfield;
}
+
+ if (cross_invocation && shader->info.stage == MESA_SHADER_TESS_CTRL)
+ shader->info.tess.tcs_cross_invocation_outputs_read |= bitfield;
} else {
- if (is_patch_generic) {
- shader->info.patch_outputs_written |= bitfield;
- } else if (!var->data.read_only) {
- shader->info.outputs_written |= bitfield;
- }
- }
+ if (is_patch_generic) {
+ shader->info.patch_outputs_written |= bitfield;
+ if (indirect)
+ shader->info.patch_outputs_accessed_indirectly |= bitfield;
+ } else if (!var->data.read_only) {
+ shader->info.outputs_written |= bitfield;
+ if (indirect)
+ shader->info.outputs_accessed_indirectly |= bitfield;
+ }
+ }
if (var->data.fb_fetch_output)
* represents a shader input or output.
*/
static void
-mark_whole_variable(nir_shader *shader, nir_variable *var, bool is_output_read)
+mark_whole_variable(nir_shader *shader, nir_variable *var,
+ nir_deref_instr *deref, bool is_output_read)
{
const struct glsl_type *type = var->type;
type = glsl_get_array_element(type);
}
+ if (var->data.per_view) {
+ /* TODO: Per view and Per Vertex are not currently used together. When
+ * they start to be used (e.g. when adding Primitive Replication for GS
+ * on Intel), verify that "peeling" the type twice is correct. This
+ * assert ensures we remember it.
+ */
+ assert(!nir_is_per_vertex_io(var, shader->info.stage));
+ assert(glsl_type_is_array(type));
+ type = glsl_get_array_element(type);
+ }
+
const unsigned slots =
var->data.compact ? DIV_ROUND_UP(glsl_get_length(type), 4)
: glsl_count_attribute_slots(type, false);
- set_io_mask(shader, var, 0, slots, is_output_read);
+ set_io_mask(shader, var, 0, slots, deref, is_output_read);
}
static unsigned
-get_io_offset(nir_deref_instr *deref, bool is_vertex_input)
+get_io_offset(nir_deref_instr *deref, bool is_vertex_input, bool per_vertex)
{
unsigned offset = 0;
for (nir_deref_instr *d = deref; d; d = nir_deref_instr_parent(d)) {
if (d->deref_type == nir_deref_type_array) {
+ if (per_vertex && nir_deref_instr_parent(d)->deref_type == nir_deref_type_var)
+ break;
+
if (!nir_src_is_const(d->arr.index))
return -1;
nir_deref_instr *deref, bool is_output_read)
{
const struct glsl_type *type = var->type;
+ bool per_vertex = nir_is_per_vertex_io(var, shader->info.stage);
- if (nir_is_per_vertex_io(var, shader->info.stage)) {
+ if (per_vertex) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
+ /* Per view variables will be considered as a whole. */
+ if (var->data.per_view)
+ return false;
+
/* The code below only handles:
*
* - Indexing into matrices
return false;
}
- unsigned offset = get_io_offset(deref, false);
+ unsigned offset = get_io_offset(deref, false, per_vertex);
if (offset == -1)
return false;
return false;
}
- set_io_mask(shader, var, offset, elem_width, is_output_read);
+ set_io_mask(shader, var, offset, elem_width, deref, is_output_read);
return true;
}
+static void
+update_memory_written_for_deref(nir_shader *shader, nir_deref_instr *deref)
+{
+ switch (deref->mode) {
+ case nir_var_mem_ssbo:
+ case nir_var_mem_global:
+ shader->info.writes_memory = true;
+ break;
+ default:
+ /* Nothing to do. */
+ break;
+ }
+}
+
static void
gather_intrinsic_info(nir_intrinsic_instr *instr, nir_shader *shader,
void *dead_ctx)
switch (instr->intrinsic) {
case nir_intrinsic_demote:
case nir_intrinsic_demote_if:
+ shader->info.fs.uses_demote = true;
+ /* fallthrough - quads with helper lanes only might be discarded entirely */
case nir_intrinsic_discard:
case nir_intrinsic_discard_if:
- assert(shader->info.stage == MESA_SHADER_FRAGMENT);
- shader->info.fs.uses_discard = true;
+ /* Freedreno uses the discard_if intrinsic to end GS invocations that
+ * don't produce a vertex, so we only set uses_discard if executing on
+ * a fragment shader. */
+ if (shader->info.stage == MESA_SHADER_FRAGMENT)
+ shader->info.fs.uses_discard = true;
break;
case nir_intrinsic_interp_deref_at_centroid:
case nir_intrinsic_interp_deref_at_sample:
case nir_intrinsic_interp_deref_at_offset:
+ case nir_intrinsic_interp_deref_at_vertex:
case nir_intrinsic_load_deref:
case nir_intrinsic_store_deref:{
nir_deref_instr *deref = nir_src_as_deref(instr->src[0]);
is_output_read = true;
if (!try_mask_partial_io(shader, var, deref, is_output_read))
- mark_whole_variable(shader, var, is_output_read);
+ mark_whole_variable(shader, var, deref, is_output_read);
/* We need to track which input_reads bits correspond to a
* dvec3/dvec4 input attribute */
}
}
}
+ if (instr->intrinsic == nir_intrinsic_store_deref)
+ update_memory_written_for_deref(shader, deref);
break;
}
case nir_intrinsic_load_draw_id:
case nir_intrinsic_load_frag_coord:
case nir_intrinsic_load_point_coord:
+ case nir_intrinsic_load_line_coord:
case nir_intrinsic_load_front_face:
case nir_intrinsic_load_vertex_id:
case nir_intrinsic_load_vertex_id_zero_base:
/* fall through */
case nir_intrinsic_emit_vertex:
+ case nir_intrinsic_emit_vertex_with_counter:
if (nir_intrinsic_stream_id(instr) > 0)
shader->info.gs.uses_streams = true;
break;
+ case nir_intrinsic_atomic_counter_inc:
+ case nir_intrinsic_atomic_counter_inc_deref:
+ case nir_intrinsic_atomic_counter_add:
+ case nir_intrinsic_atomic_counter_add_deref:
+ case nir_intrinsic_atomic_counter_pre_dec:
+ case nir_intrinsic_atomic_counter_pre_dec_deref:
+ case nir_intrinsic_atomic_counter_post_dec:
+ case nir_intrinsic_atomic_counter_post_dec_deref:
+ case nir_intrinsic_atomic_counter_min:
+ case nir_intrinsic_atomic_counter_min_deref:
+ case nir_intrinsic_atomic_counter_max:
+ case nir_intrinsic_atomic_counter_max_deref:
+ case nir_intrinsic_atomic_counter_and:
+ case nir_intrinsic_atomic_counter_and_deref:
+ case nir_intrinsic_atomic_counter_or:
+ case nir_intrinsic_atomic_counter_or_deref:
+ case nir_intrinsic_atomic_counter_xor:
+ case nir_intrinsic_atomic_counter_xor_deref:
+ case nir_intrinsic_atomic_counter_exchange:
+ case nir_intrinsic_atomic_counter_exchange_deref:
+ case nir_intrinsic_atomic_counter_comp_swap:
+ case nir_intrinsic_atomic_counter_comp_swap_deref:
+ case nir_intrinsic_bindless_image_atomic_add:
+ case nir_intrinsic_bindless_image_atomic_and:
+ case nir_intrinsic_bindless_image_atomic_comp_swap:
+ case nir_intrinsic_bindless_image_atomic_dec_wrap:
+ case nir_intrinsic_bindless_image_atomic_exchange:
+ case nir_intrinsic_bindless_image_atomic_fadd:
+ case nir_intrinsic_bindless_image_atomic_imax:
+ case nir_intrinsic_bindless_image_atomic_imin:
+ case nir_intrinsic_bindless_image_atomic_inc_wrap:
+ case nir_intrinsic_bindless_image_atomic_or:
+ case nir_intrinsic_bindless_image_atomic_umax:
+ case nir_intrinsic_bindless_image_atomic_umin:
+ case nir_intrinsic_bindless_image_atomic_xor:
+ case nir_intrinsic_bindless_image_store:
+ case nir_intrinsic_bindless_image_store_raw_intel:
+ case nir_intrinsic_global_atomic_add:
+ case nir_intrinsic_global_atomic_and:
+ case nir_intrinsic_global_atomic_comp_swap:
+ case nir_intrinsic_global_atomic_exchange:
+ case nir_intrinsic_global_atomic_fadd:
+ case nir_intrinsic_global_atomic_fcomp_swap:
+ case nir_intrinsic_global_atomic_fmax:
+ case nir_intrinsic_global_atomic_fmin:
+ case nir_intrinsic_global_atomic_imax:
+ case nir_intrinsic_global_atomic_imin:
+ case nir_intrinsic_global_atomic_or:
+ case nir_intrinsic_global_atomic_umax:
+ case nir_intrinsic_global_atomic_umin:
+ case nir_intrinsic_global_atomic_xor:
+ case nir_intrinsic_image_atomic_add:
+ case nir_intrinsic_image_atomic_and:
+ case nir_intrinsic_image_atomic_comp_swap:
+ case nir_intrinsic_image_atomic_dec_wrap:
+ case nir_intrinsic_image_atomic_exchange:
+ case nir_intrinsic_image_atomic_fadd:
+ case nir_intrinsic_image_atomic_imax:
+ case nir_intrinsic_image_atomic_imin:
+ case nir_intrinsic_image_atomic_inc_wrap:
+ case nir_intrinsic_image_atomic_or:
+ case nir_intrinsic_image_atomic_umax:
+ case nir_intrinsic_image_atomic_umin:
+ case nir_intrinsic_image_atomic_xor:
+ case nir_intrinsic_image_deref_atomic_add:
+ case nir_intrinsic_image_deref_atomic_and:
+ case nir_intrinsic_image_deref_atomic_comp_swap:
+ case nir_intrinsic_image_deref_atomic_dec_wrap:
+ case nir_intrinsic_image_deref_atomic_exchange:
+ case nir_intrinsic_image_deref_atomic_fadd:
+ case nir_intrinsic_image_deref_atomic_imax:
+ case nir_intrinsic_image_deref_atomic_imin:
+ case nir_intrinsic_image_deref_atomic_inc_wrap:
+ case nir_intrinsic_image_deref_atomic_or:
+ case nir_intrinsic_image_deref_atomic_umax:
+ case nir_intrinsic_image_deref_atomic_umin:
+ case nir_intrinsic_image_deref_atomic_xor:
+ case nir_intrinsic_image_deref_store:
+ case nir_intrinsic_image_deref_store_raw_intel:
+ case nir_intrinsic_image_store:
+ case nir_intrinsic_image_store_raw_intel:
+ case nir_intrinsic_ssbo_atomic_add:
+ case nir_intrinsic_ssbo_atomic_add_ir3:
+ case nir_intrinsic_ssbo_atomic_and:
+ case nir_intrinsic_ssbo_atomic_and_ir3:
+ case nir_intrinsic_ssbo_atomic_comp_swap:
+ case nir_intrinsic_ssbo_atomic_comp_swap_ir3:
+ case nir_intrinsic_ssbo_atomic_exchange:
+ case nir_intrinsic_ssbo_atomic_exchange_ir3:
+ case nir_intrinsic_ssbo_atomic_fadd:
+ case nir_intrinsic_ssbo_atomic_fcomp_swap:
+ case nir_intrinsic_ssbo_atomic_fmax:
+ case nir_intrinsic_ssbo_atomic_fmin:
+ case nir_intrinsic_ssbo_atomic_imax:
+ case nir_intrinsic_ssbo_atomic_imax_ir3:
+ case nir_intrinsic_ssbo_atomic_imin:
+ case nir_intrinsic_ssbo_atomic_imin_ir3:
+ case nir_intrinsic_ssbo_atomic_or:
+ case nir_intrinsic_ssbo_atomic_or_ir3:
+ case nir_intrinsic_ssbo_atomic_umax:
+ case nir_intrinsic_ssbo_atomic_umax_ir3:
+ case nir_intrinsic_ssbo_atomic_umin:
+ case nir_intrinsic_ssbo_atomic_umin_ir3:
+ case nir_intrinsic_ssbo_atomic_xor:
+ case nir_intrinsic_ssbo_atomic_xor_ir3:
+ case nir_intrinsic_store_global:
+ case nir_intrinsic_store_global_ir3:
+ case nir_intrinsic_store_ssbo:
+ case nir_intrinsic_store_ssbo_ir3:
+ /* Only set this for globally visible memory, not scratch and not
+ * shared.
+ */
+ shader->info.writes_memory = true;
+ break;
+
+ case nir_intrinsic_deref_atomic_add:
+ case nir_intrinsic_deref_atomic_imin:
+ case nir_intrinsic_deref_atomic_umin:
+ case nir_intrinsic_deref_atomic_imax:
+ case nir_intrinsic_deref_atomic_umax:
+ case nir_intrinsic_deref_atomic_and:
+ case nir_intrinsic_deref_atomic_or:
+ case nir_intrinsic_deref_atomic_xor:
+ case nir_intrinsic_deref_atomic_exchange:
+ case nir_intrinsic_deref_atomic_comp_swap:
+ update_memory_written_for_deref(shader, nir_src_as_deref(instr->src[0]));
+ break;
+
default:
break;
}
{
shader->info.num_textures = 0;
shader->info.num_images = 0;
- shader->info.last_msaa_image = -1;
- nir_foreach_variable(var, &shader->uniforms) {
- /* Bindless textures and images don't use non-bindless slots. */
- if (var->data.bindless)
+ shader->info.image_buffers = 0;
+ shader->info.msaa_images = 0;
+
+ nir_foreach_uniform_variable(var, shader) {
+ /* Bindless textures and images don't use non-bindless slots.
+ * Interface blocks imply inputs, outputs, UBO, or SSBO, which can only
+ * mean bindless.
+ */
+ if (var->data.bindless || var->interface_type)
continue;
shader->info.num_textures += glsl_type_get_sampler_count(var->type);
- shader->info.num_images += glsl_type_get_image_count(var->type);
- /* Assuming image slots don't have holes (e.g. OpenGL) */
- if (glsl_type_is_image(var->type) &&
- glsl_get_sampler_dim(var->type) == GLSL_SAMPLER_DIM_MS)
- shader->info.last_msaa_image = shader->info.num_images - 1;
+ unsigned num_image_slots = glsl_type_get_image_count(var->type);
+ if (num_image_slots) {
+ const struct glsl_type *image_type = glsl_without_array(var->type);
+
+ if (glsl_get_sampler_dim(image_type) == GLSL_SAMPLER_DIM_BUF) {
+ shader->info.image_buffers |=
+ BITFIELD_RANGE(shader->info.num_images, num_image_slots);
+ }
+ if (glsl_get_sampler_dim(image_type) == GLSL_SAMPLER_DIM_MS) {
+ shader->info.msaa_images |=
+ BITFIELD_RANGE(shader->info.num_images, num_image_slots);
+ }
+ shader->info.num_images += num_image_slots;
+ }
}
shader->info.inputs_read = 0;
shader->info.patch_inputs_read = 0;
shader->info.patch_outputs_written = 0;
shader->info.system_values_read = 0;
+ shader->info.inputs_read_indirectly = 0;
+ shader->info.outputs_accessed_indirectly = 0;
+ shader->info.patch_inputs_read_indirectly = 0;
+ shader->info.patch_outputs_accessed_indirectly = 0;
+
if (shader->info.stage == MESA_SHADER_VERTEX) {
shader->info.vs.double_inputs = 0;
}
if (shader->info.stage == MESA_SHADER_FRAGMENT) {
shader->info.fs.uses_sample_qualifier = false;
shader->info.fs.uses_discard = false;
+ shader->info.fs.uses_demote = false;
shader->info.fs.needs_helper_invocations = false;
}
+ if (shader->info.stage == MESA_SHADER_TESS_CTRL) {
+ shader->info.tess.tcs_cross_invocation_inputs_read = 0;
+ shader->info.tess.tcs_cross_invocation_outputs_read = 0;
+ }
+
+ shader->info.writes_memory = shader->info.has_transform_feedback_varyings;
void *dead_ctx = ralloc_context(NULL);
nir_foreach_block(block, entrypoint) {