+static void gather_usage_helper(const nir_deref_instr **deref_ptr,
+ unsigned location,
+ uint8_t mask,
+ uint8_t *usage_mask)
+{
+ for (; *deref_ptr; deref_ptr++) {
+ const nir_deref_instr *deref = *deref_ptr;
+ switch (deref->deref_type) {
+ case nir_deref_type_array: {
+ unsigned elem_size =
+ glsl_count_attribute_slots(deref->type, false);
+ if (nir_src_is_const(deref->arr.index)) {
+ location += elem_size * nir_src_as_uint(deref->arr.index);
+ } else {
+ unsigned array_elems =
+ glsl_get_length(deref_ptr[-1]->type);
+ for (unsigned i = 0; i < array_elems; i++) {
+ gather_usage_helper(deref_ptr + 1,
+ location + elem_size * i,
+ mask, usage_mask);
+ }
+ return;
+ }
+ break;
+ }
+ case nir_deref_type_struct: {
+ const struct glsl_type *parent_type =
+ deref_ptr[-1]->type;
+ unsigned index = deref->strct.index;
+ for (unsigned i = 0; i < index; i++) {
+ const struct glsl_type *ft = glsl_get_struct_field(parent_type, i);
+ location += glsl_count_attribute_slots(ft, false);
+ }
+ break;
+ }
+ default:
+ unreachable("Unhandled deref type in gather_components_used_helper");
+ }
+ }
+
+ usage_mask[location] |= mask & 0xf;
+ if (mask & 0xf0)
+ usage_mask[location + 1] |= (mask >> 4) & 0xf;
+}
+
+static void gather_usage(const nir_deref_instr *deref,
+ uint8_t mask,
+ uint8_t *usage_mask)
+{
+ nir_deref_path path;
+ nir_deref_path_init(&path, (nir_deref_instr *)deref, NULL);
+
+ unsigned location_frac = path.path[0]->var->data.location_frac;
+ if (glsl_type_is_64bit(deref->type)) {
+ uint8_t new_mask = 0;
+ for (unsigned i = 0; i < 4; i++) {
+ if (mask & (1 << i))
+ new_mask |= 0x3 << (2 * i);
+ }
+ mask = new_mask << location_frac;
+ } else {
+ mask <<= location_frac;
+ mask &= 0xf;
+ }
+
+ gather_usage_helper((const nir_deref_instr **)&path.path[1],
+ path.path[0]->var->data.driver_location,
+ mask, usage_mask);
+
+ nir_deref_path_finish(&path);
+}
+
+static void gather_intrinsic_load_deref_input_info(const nir_shader *nir,
+ const nir_intrinsic_instr *instr,
+ const nir_deref_instr *deref,
+ struct tgsi_shader_info *info)
+{
+ switch (nir->info.stage) {
+ case MESA_SHADER_VERTEX:
+ gather_usage(deref, nir_ssa_def_components_read(&instr->dest.ssa),
+ info->input_usage_mask);
+ default:;
+ }
+}
+
+static void gather_intrinsic_load_deref_output_info(const nir_shader *nir,
+ const nir_intrinsic_instr *instr,
+ nir_variable *var,
+ struct tgsi_shader_info *info)
+{
+ assert(var && var->data.mode == nir_var_shader_out);
+
+ switch (nir->info.stage) {
+ case MESA_SHADER_TESS_CTRL:
+ if (var->data.location == VARYING_SLOT_TESS_LEVEL_INNER ||
+ var->data.location == VARYING_SLOT_TESS_LEVEL_OUTER)
+ info->reads_tessfactor_outputs = true;
+ else if (var->data.patch)
+ info->reads_perpatch_outputs = true;
+ else
+ info->reads_pervertex_outputs = true;
+ break;
+
+ case MESA_SHADER_FRAGMENT:
+ if (var->data.fb_fetch_output)
+ info->uses_fbfetch = true;
+ break;
+ default:;
+ }
+}
+
+static void gather_intrinsic_store_deref_output_info(const nir_shader *nir,
+ const nir_intrinsic_instr *instr,
+ const nir_deref_instr *deref,
+ struct tgsi_shader_info *info)
+{
+ switch (nir->info.stage) {
+ case MESA_SHADER_VERTEX: /* needed by LS, ES */
+ case MESA_SHADER_TESS_EVAL: /* needed by ES */
+ case MESA_SHADER_GEOMETRY:
+ gather_usage(deref, nir_intrinsic_write_mask(instr),
+ info->output_usagemask);
+ break;
+ default:;
+ }
+}
+
+static void scan_instruction(const struct nir_shader *nir,
+ struct tgsi_shader_info *info,