+ nir_intrinsic_op sysval_op =
+ nir_intrinsic_from_system_value(var->data.location);
+ return nir_load_system_value(b, sysval_op, 0,
+ intrin->dest.ssa.num_components,
+ intrin->dest.ssa.bit_size);
+ }
+
+ default:
+ return NULL;
+ }
+}
+
+bool
+nir_lower_system_values(nir_shader *shader)
+{
+ bool progress = nir_shader_lower_instructions(shader,
+ lower_system_value_filter,
+ lower_system_value_instr,
+ NULL);
+
+ /* We're going to delete the variables so we need to clean up all those
+ * derefs we left lying around.
+ */
+ if (progress)
+ nir_remove_dead_derefs(shader);
+
+ nir_foreach_variable_with_modes_safe(var, shader, nir_var_system_value)
+ exec_node_remove(&var->node);
+
+ return progress;
+}
+
+static bool
+lower_compute_system_value_filter(const nir_instr *instr, const void *_options)
+{
+ return instr->type == nir_instr_type_intrinsic;
+}
+
+static nir_ssa_def *
+lower_compute_system_value_instr(nir_builder *b,
+ nir_instr *instr, void *_options)
+{
+ nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+ const nir_lower_compute_system_values_options *options = _options;
+
+ /* All the intrinsics we care about are loads */
+ if (!nir_intrinsic_infos[intrin->intrinsic].has_dest)
+ return NULL;
+
+ assert(intrin->dest.is_ssa);
+ const unsigned bit_size = intrin->dest.ssa.bit_size;
+
+ switch (intrin->intrinsic) {
+ case nir_intrinsic_load_local_invocation_id:
+ /* If lower_cs_local_id_from_index is true, then we derive the local
+ * index from the local id.
+ */
+ if (b->shader->options->lower_cs_local_id_from_index) {
+ /* We lower gl_LocalInvocationID from gl_LocalInvocationIndex based
+ * on this formula:
+ *
+ * gl_LocalInvocationID.x =
+ * gl_LocalInvocationIndex % gl_WorkGroupSize.x;
+ * gl_LocalInvocationID.y =
+ * (gl_LocalInvocationIndex / gl_WorkGroupSize.x) %
+ * gl_WorkGroupSize.y;
+ * gl_LocalInvocationID.z =
+ * (gl_LocalInvocationIndex /
+ * (gl_WorkGroupSize.x * gl_WorkGroupSize.y)) %
+ * gl_WorkGroupSize.z;
+ *
+ * However, the final % gl_WorkGroupSize.z does nothing unless we
+ * accidentally end up with a gl_LocalInvocationIndex that is too
+ * large so it can safely be omitted.
+ */
+ nir_ssa_def *local_index = nir_load_local_invocation_index(b);
+ nir_ssa_def *local_size = nir_load_local_group_size(b);
+
+ /* Because no hardware supports a local workgroup size greater than
+ * about 1K, this calculation can be done in 32-bit and can save some
+ * 64-bit arithmetic.
+ */
+ nir_ssa_def *id_x, *id_y, *id_z;
+ id_x = nir_umod(b, local_index,
+ nir_channel(b, local_size, 0));
+ id_y = nir_umod(b, nir_udiv(b, local_index,
+ nir_channel(b, local_size, 0)),
+ nir_channel(b, local_size, 1));
+ id_z = nir_udiv(b, local_index,
+ nir_imul(b, nir_channel(b, local_size, 0),
+ nir_channel(b, local_size, 1)));
+ return nir_u2u(b, nir_vec3(b, id_x, id_y, id_z), bit_size);
+ } else {
+ return NULL;
+ }
+
+ case nir_intrinsic_load_local_invocation_index:
+ /* If lower_cs_local_index_from_id is true, then we derive the local
+ * index from the local id.
+ */
+ if (b->shader->options->lower_cs_local_index_from_id) {