+ *cross_invocation = false;
+ *indirect = false;
+
+ const bool per_vertex = nir_is_per_vertex_io(var, shader->info.stage);
+
+ nir_deref_path path;
+ nir_deref_path_init(&path, deref, NULL);
+ assert(path.path[0]->deref_type == nir_deref_type_var);
+ nir_deref_instr **p = &path.path[1];
+
+ /* Vertex index is the outermost array index. */
+ if (per_vertex) {
+ assert((*p)->deref_type == nir_deref_type_array);
+ nir_instr *vertex_index_instr = (*p)->arr.index.ssa->parent_instr;
+ *cross_invocation =
+ vertex_index_instr->type != nir_instr_type_intrinsic ||
+ nir_instr_as_intrinsic(vertex_index_instr)->intrinsic !=
+ nir_intrinsic_load_invocation_id;
+ p++;
+ }
+
+ /* We always lower indirect dereferences for "compact" array vars. */
+ if (!path.path[0]->var->data.compact) {
+ /* Non-compact array vars: find out if they are indirect. */
+ for (; *p; p++) {
+ if ((*p)->deref_type == nir_deref_type_array) {
+ *indirect |= !nir_src_is_const((*p)->arr.index);
+ } else if ((*p)->deref_type == nir_deref_type_struct) {
+ /* Struct indices are always constant. */
+ } else {
+ unreachable("Unsupported deref type");
+ }
+ }
+ }
+
+ nir_deref_path_finish(&path);
+}
+
+static void
+set_io_mask(nir_shader *shader, nir_variable *var, int offset, int len,
+ nir_deref_instr *deref, bool is_output_read)
+{
+ for (int i = 0; i < len; i++) {
+ assert(var->data.location != -1);
+
+ int idx = var->data.location + offset + i;
+ bool is_patch_generic = var->data.patch &&
+ idx != VARYING_SLOT_TESS_LEVEL_INNER &&
+ idx != VARYING_SLOT_TESS_LEVEL_OUTER &&
+ idx != VARYING_SLOT_BOUNDING_BOX0 &&
+ idx != VARYING_SLOT_BOUNDING_BOX1;
+ uint64_t bitfield;
+
+ if (is_patch_generic) {
+ assert(idx >= VARYING_SLOT_PATCH0 && idx < VARYING_SLOT_TESS_MAX);
+ bitfield = BITFIELD64_BIT(idx - VARYING_SLOT_PATCH0);
+ }
+ else {
+ assert(idx < VARYING_SLOT_MAX);
+ bitfield = BITFIELD64_BIT(idx);
+ }
+
+ bool cross_invocation;
+ bool indirect;
+ get_deref_info(shader, var, deref, &cross_invocation, &indirect);
+
+ if (var->data.mode == nir_var_shader_in) {
+ if (is_patch_generic) {
+ shader->info.patch_inputs_read |= bitfield;
+ if (indirect)
+ shader->info.patch_inputs_read_indirectly |= bitfield;
+ } else {
+ shader->info.inputs_read |= bitfield;
+ if (indirect)
+ shader->info.inputs_read_indirectly |= bitfield;
+ }
+
+ if (cross_invocation && shader->info.stage == MESA_SHADER_TESS_CTRL)
+ shader->info.tess.tcs_cross_invocation_inputs_read |= bitfield;
+
+ if (shader->info.stage == MESA_SHADER_FRAGMENT) {
+ shader->info.fs.uses_sample_qualifier |= var->data.sample;
+ }
+ } else {
+ assert(var->data.mode == nir_var_shader_out);
+ if (is_output_read) {
+ if (is_patch_generic) {
+ shader->info.patch_outputs_read |= bitfield;
+ if (indirect)
+ shader->info.patch_outputs_accessed_indirectly |= bitfield;
+ } else {
+ shader->info.outputs_read |= bitfield;
+ if (indirect)
+ shader->info.outputs_accessed_indirectly |= bitfield;
+ }
+
+ if (cross_invocation && shader->info.stage == MESA_SHADER_TESS_CTRL)
+ shader->info.tess.tcs_cross_invocation_outputs_read |= bitfield;
+ } else {
+ if (is_patch_generic) {
+ shader->info.patch_outputs_written |= bitfield;
+ if (indirect)
+ shader->info.patch_outputs_accessed_indirectly |= bitfield;
+ } else if (!var->data.read_only) {
+ shader->info.outputs_written |= bitfield;
+ if (indirect)
+ shader->info.outputs_accessed_indirectly |= bitfield;
+ }
+ }
+
+
+ if (var->data.fb_fetch_output)
+ shader->info.outputs_read |= bitfield;
+ }
+ }
+}
+
+/**
+ * Mark an entire variable as used. Caller must ensure that the variable
+ * represents a shader input or output.
+ */
+static void
+mark_whole_variable(nir_shader *shader, nir_variable *var,
+ nir_deref_instr *deref, bool is_output_read)
+{
+ const struct glsl_type *type = var->type;
+
+ if (nir_is_per_vertex_io(var, shader->info.stage)) {
+ assert(glsl_type_is_array(type));
+ type = glsl_get_array_element(type);
+ }
+
+ if (var->data.per_view) {
+ /* TODO: Per view and Per Vertex are not currently used together. When
+ * they start to be used (e.g. when adding Primitive Replication for GS
+ * on Intel), verify that "peeling" the type twice is correct. This
+ * assert ensures we remember it.
+ */
+ assert(!nir_is_per_vertex_io(var, shader->info.stage));
+ assert(glsl_type_is_array(type));
+ type = glsl_get_array_element(type);
+ }
+
+ const unsigned slots =
+ var->data.compact ? DIV_ROUND_UP(glsl_get_length(type), 4)
+ : glsl_count_attribute_slots(type, false);
+
+ set_io_mask(shader, var, 0, slots, deref, is_output_read);
+}
+
+static unsigned
+get_io_offset(nir_deref_instr *deref, bool is_vertex_input, bool per_vertex)
+{
+ unsigned offset = 0;
+
+ for (nir_deref_instr *d = deref; d; d = nir_deref_instr_parent(d)) {
+ if (d->deref_type == nir_deref_type_array) {
+ if (per_vertex && nir_deref_instr_parent(d)->deref_type == nir_deref_type_var)
+ break;
+
+ if (!nir_src_is_const(d->arr.index))
+ return -1;
+
+ offset += glsl_count_attribute_slots(d->type, is_vertex_input) *
+ nir_src_as_uint(d->arr.index);
+ }
+ /* TODO: we can get the offset for structs here see nir_lower_io() */
+ }
+
+ return offset;
+}
+
+/**
+ * Try to mark a portion of the given varying as used. Caller must ensure
+ * that the variable represents a shader input or output.
+ *
+ * If the index can't be interpreted as a constant, or some other problem
+ * occurs, then nothing will be marked and false will be returned.
+ */
+static bool
+try_mask_partial_io(nir_shader *shader, nir_variable *var,
+ nir_deref_instr *deref, bool is_output_read)
+{
+ const struct glsl_type *type = var->type;
+ bool per_vertex = nir_is_per_vertex_io(var, shader->info.stage);
+
+ if (per_vertex) {
+ assert(glsl_type_is_array(type));
+ type = glsl_get_array_element(type);
+ }
+
+ /* Per view variables will be considered as a whole. */
+ if (var->data.per_view)
+ return false;
+
+ /* The code below only handles:
+ *
+ * - Indexing into matrices
+ * - Indexing into arrays of (arrays, matrices, vectors, or scalars)
+ *
+ * For now, we just give up if we see varying structs and arrays of structs
+ * here marking the entire variable as used.
+ */
+ if (!(glsl_type_is_matrix(type) ||
+ (glsl_type_is_array(type) && !var->data.compact &&
+ (glsl_type_is_numeric(glsl_without_array(type)) ||
+ glsl_type_is_boolean(glsl_without_array(type)))))) {
+
+ /* If we don't know how to handle this case, give up and let the
+ * caller mark the whole variable as used.
+ */
+ return false;
+ }
+
+ unsigned offset = get_io_offset(deref, false, per_vertex);
+ if (offset == -1)
+ return false;
+
+ unsigned num_elems;
+ unsigned elem_width = 1;
+ unsigned mat_cols = 1;
+ if (glsl_type_is_array(type)) {
+ num_elems = glsl_get_aoa_size(type);
+ if (glsl_type_is_matrix(glsl_without_array(type)))
+ mat_cols = glsl_get_matrix_columns(glsl_without_array(type));
+ } else {
+ num_elems = glsl_get_matrix_columns(type);
+ }
+
+ /* double element width for double types that takes two slots */
+ if (glsl_type_is_dual_slot(glsl_without_array(type)))
+ elem_width *= 2;
+
+ if (offset >= num_elems * elem_width * mat_cols) {
+ /* Constant index outside the bounds of the matrix/array. This could
+ * arise as a result of constant folding of a legal GLSL program.
+ *
+ * Even though the spec says that indexing outside the bounds of a
+ * matrix/array results in undefined behaviour, we don't want to pass
+ * out-of-range values to set_io_mask() (since this could result in
+ * slots that don't exist being marked as used), so just let the caller
+ * mark the whole variable as used.
+ */
+ return false;
+ }
+
+ set_io_mask(shader, var, offset, elem_width, deref, is_output_read);
+ return true;
+}
+
+static void
+update_memory_written_for_deref(nir_shader *shader, nir_deref_instr *deref)
+{
+ switch (deref->mode) {
+ case nir_var_mem_ssbo:
+ case nir_var_mem_global:
+ shader->info.writes_memory = true;
+ break;
+ default:
+ /* Nothing to do. */
+ break;
+ }
+}
+
+static void
+gather_intrinsic_info(nir_intrinsic_instr *instr, nir_shader *shader,
+ void *dead_ctx)
+{
+ unsigned slot_mask = 0;
+
+ if (nir_intrinsic_infos[instr->intrinsic].index_map[NIR_INTRINSIC_IO_SEMANTICS] > 0) {
+ nir_io_semantics semantics = nir_intrinsic_io_semantics(instr);
+
+ slot_mask = BITFIELD64_RANGE(semantics.location, semantics.num_slots);
+ }
+