struct lower_intrinsics_state {
nir_shader *nir;
- struct brw_cs_prog_data *prog_data;
nir_function_impl *impl;
bool progress;
nir_builder builder;
- bool cs_thread_id_used;
+ unsigned local_workgroup_size;
};
-static nir_ssa_def *
-read_thread_local_id(struct lower_intrinsics_state *state)
-{
- nir_builder *b = &state->builder;
- nir_shader *nir = state->nir;
- const unsigned *sizes = nir->info.cs.local_size;
- const unsigned group_size = sizes[0] * sizes[1] * sizes[2];
-
- /* Some programs have local_size dimensions so small that the thread local
- * ID will always be 0.
- */
- if (group_size <= 8)
- return nir_imm_int(b, 0);
-
- assert(state->prog_data->thread_local_id_index >= 0);
- state->cs_thread_id_used = true;
- const int id_index = state->prog_data->thread_local_id_index;
-
- nir_intrinsic_instr *load =
- nir_intrinsic_instr_create(nir, nir_intrinsic_load_uniform);
- load->num_components = 1;
- load->src[0] = nir_src_for_ssa(nir_imm_int(b, 0));
- nir_ssa_dest_init(&load->instr, &load->dest, 1, 32, NULL);
- nir_intrinsic_set_base(load, id_index * sizeof(uint32_t));
- nir_intrinsic_set_range(load, sizeof(uint32_t));
- nir_builder_instr_insert(b, &load->instr);
- return &load->dest.ssa;
-}
-
static bool
lower_cs_intrinsics_convert_block(struct lower_intrinsics_state *state,
nir_block *block)
nir_builder *b = &state->builder;
nir_shader *nir = state->nir;
+ /* Reuse calculated values inside the block. */
+ nir_ssa_def *local_index = NULL;
+ nir_ssa_def *local_id = NULL;
+
nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
nir_ssa_def *sysval;
switch (intrinsic->intrinsic) {
- case nir_intrinsic_load_local_invocation_index: {
- /* We construct the local invocation index from:
- *
- * gl_LocalInvocationIndex =
- * cs_thread_local_id + subgroup_invocation;
- */
- nir_ssa_def *thread_local_id = read_thread_local_id(state);
- nir_ssa_def *channel = nir_load_subgroup_invocation(b);
- sysval = nir_iadd(b, channel, thread_local_id);
+ case nir_intrinsic_load_local_invocation_index:
+ case nir_intrinsic_load_local_invocation_id: {
+ /* First time we are using those, so let's calculate them. */
+ if (!local_index) {
+ assert(!local_id);
+
+ nir_ssa_def *subgroup_id = nir_load_subgroup_id(b);
+
+ nir_ssa_def *thread_local_id =
+ nir_imul(b, subgroup_id, nir_load_simd_width_intel(b));
+ nir_ssa_def *channel = nir_load_subgroup_invocation(b);
+ nir_ssa_def *linear = nir_iadd(b, channel, thread_local_id);
+
+ nir_ssa_def *size_x;
+ nir_ssa_def *size_y;
+ if (state->nir->info.cs.local_size_variable) {
+ nir_ssa_def *size_xyz = nir_load_local_group_size(b);
+ size_x = nir_channel(b, size_xyz, 0);
+ size_y = nir_channel(b, size_xyz, 1);
+ } else {
+ size_x = nir_imm_int(b, nir->info.cs.local_size[0]);
+ size_y = nir_imm_int(b, nir->info.cs.local_size[1]);
+ }
+
+ /* The local invocation index and ID must respect the following
+ *
+ * gl_LocalInvocationID.x =
+ * gl_LocalInvocationIndex % gl_WorkGroupSize.x;
+ * gl_LocalInvocationID.y =
+ * (gl_LocalInvocationIndex / gl_WorkGroupSize.x) %
+ * gl_WorkGroupSize.y;
+ * gl_LocalInvocationID.z =
+ * (gl_LocalInvocationIndex /
+ * (gl_WorkGroupSize.x * gl_WorkGroupSize.y)) %
+ * gl_WorkGroupSize.z;
+ *
+ * However, the final % gl_WorkGroupSize.z does nothing unless we
+ * accidentally end up with a gl_LocalInvocationIndex that is too
+ * large so it can safely be omitted.
+ */
+
+ if (state->nir->info.cs.derivative_group != DERIVATIVE_GROUP_QUADS) {
+ /* If we are not grouping in quads, just set the local invocatio
+ * index linearly, and calculate local invocation ID from that.
+ */
+ local_index = linear;
+
+ nir_ssa_def *id_x, *id_y, *id_z;
+ id_x = nir_umod(b, local_index, size_x);
+ id_y = nir_umod(b, nir_udiv(b, local_index, size_x), size_y);
+ id_z = nir_udiv(b, local_index, nir_imul(b, size_x, size_y));
+ local_id = nir_vec3(b, id_x, id_y, id_z);
+ } else {
+ /* For quads, first we figure out the 2x2 grid the invocation
+ * belongs to -- treating extra Z layers as just more rows.
+ * Then map that into local invocation ID (trivial) and local
+ * invocation index. Skipping Z simplify index calculation.
+ */
+
+ nir_ssa_def *one = nir_imm_int(b, 1);
+ nir_ssa_def *double_size_x = nir_ishl(b, size_x, one);
+
+ /* ID within a pair of rows, where each group of 4 is 2x2 quad. */
+ nir_ssa_def *row_pair_id = nir_umod(b, linear, double_size_x);
+ nir_ssa_def *y_row_pairs = nir_udiv(b, linear, double_size_x);
+
+ nir_ssa_def *x =
+ nir_ior(b,
+ nir_iand(b, row_pair_id, one),
+ nir_iand(b, nir_ishr(b, row_pair_id, one),
+ nir_imm_int(b, 0xfffffffe)));
+ nir_ssa_def *y =
+ nir_ior(b,
+ nir_ishl(b, y_row_pairs, one),
+ nir_iand(b, nir_ishr(b, row_pair_id, one), one));
+
+ local_id = nir_vec3(b, x,
+ nir_umod(b, y, size_y),
+ nir_udiv(b, y, size_y));
+ local_index = nir_iadd(b, x, nir_imul(b, y, size_x));
+ }
+ }
+
+ assert(local_id);
+ assert(local_index);
+ if (intrinsic->intrinsic == nir_intrinsic_load_local_invocation_id)
+ sysval = local_id;
+ else
+ sysval = local_index;
break;
}
- case nir_intrinsic_load_local_invocation_id: {
- /* We lower gl_LocalInvocationID from gl_LocalInvocationIndex based
- * on this formula:
- *
- * gl_LocalInvocationID.x =
- * gl_LocalInvocationIndex % gl_WorkGroupSize.x;
- * gl_LocalInvocationID.y =
- * (gl_LocalInvocationIndex / gl_WorkGroupSize.x) %
- * gl_WorkGroupSize.y;
- * gl_LocalInvocationID.z =
- * (gl_LocalInvocationIndex /
- * (gl_WorkGroupSize.x * gl_WorkGroupSize.y)) %
- * gl_WorkGroupSize.z;
- */
- unsigned *size = nir->info.cs.local_size;
-
- nir_ssa_def *local_index = nir_load_local_invocation_index(b);
-
- nir_const_value uvec3;
- uvec3.u32[0] = 1;
- uvec3.u32[1] = size[0];
- uvec3.u32[2] = size[0] * size[1];
- nir_ssa_def *div_val = nir_build_imm(b, 3, 32, uvec3);
- uvec3.u32[0] = size[0];
- uvec3.u32[1] = size[1];
- uvec3.u32[2] = size[2];
- nir_ssa_def *mod_val = nir_build_imm(b, 3, 32, uvec3);
-
- sysval = nir_umod(b, nir_udiv(b, local_index, div_val), mod_val);
+ case nir_intrinsic_load_num_subgroups: {
+ nir_ssa_def *size;
+ if (state->nir->info.cs.local_size_variable) {
+ nir_ssa_def *size_xyz = nir_load_local_group_size(b);
+ nir_ssa_def *size_x = nir_channel(b, size_xyz, 0);
+ nir_ssa_def *size_y = nir_channel(b, size_xyz, 1);
+ nir_ssa_def *size_z = nir_channel(b, size_xyz, 2);
+ size = nir_imul(b, nir_imul(b, size_x, size_y), size_z);
+ } else {
+ size = nir_imm_int(b, nir->info.cs.local_size[0] *
+ nir->info.cs.local_size[1] *
+ nir->info.cs.local_size[2]);
+ }
+
+ /* Calculate the equivalent of DIV_ROUND_UP. */
+ nir_ssa_def *simd_width = nir_load_simd_width_intel(b);
+ sysval =
+ nir_udiv(b, nir_iadd_imm(b, nir_iadd(b, size, simd_width), -1),
+ simd_width);
break;
}
}
bool
-brw_nir_lower_cs_intrinsics(nir_shader *nir,
- struct brw_cs_prog_data *prog_data)
+brw_nir_lower_cs_intrinsics(nir_shader *nir)
{
- assert(nir->stage == MESA_SHADER_COMPUTE);
+ assert(nir->info.stage == MESA_SHADER_COMPUTE);
+
+ struct lower_intrinsics_state state = {
+ .nir = nir,
+ };
+
+ if (!nir->info.cs.local_size_variable) {
+ state.local_workgroup_size = nir->info.cs.local_size[0] *
+ nir->info.cs.local_size[1] *
+ nir->info.cs.local_size[2];
+ } else {
+ state.local_workgroup_size = nir->info.cs.max_variable_local_size;
+ }
- bool progress = false;
- struct lower_intrinsics_state state;
- memset(&state, 0, sizeof(state));
- state.nir = nir;
- state.prog_data = prog_data;
-
- do {
- state.progress = false;
- nir_foreach_function(function, nir) {
- if (function->impl) {
- state.impl = function->impl;
- lower_cs_intrinsics_convert_impl(&state);
- }
- }
- progress |= state.progress;
- } while (state.progress);
+ /* Constraints from NV_compute_shader_derivatives. */
+ if (nir->info.cs.derivative_group == DERIVATIVE_GROUP_QUADS &&
+ !nir->info.cs.local_size_variable) {
+ assert(nir->info.cs.local_size[0] % 2 == 0);
+ assert(nir->info.cs.local_size[1] % 2 == 0);
+ } else if (nir->info.cs.derivative_group == DERIVATIVE_GROUP_LINEAR &&
+ !nir->info.cs.local_size_variable) {
+ assert(state.local_workgroup_size % 4 == 0);
+ }
- if (!state.cs_thread_id_used)
- state.prog_data->thread_local_id_index = -1;
+ nir_foreach_function(function, nir) {
+ if (function->impl) {
+ state.impl = function->impl;
+ lower_cs_intrinsics_convert_impl(&state);
+ }
+ }
- return progress;
+ return state.progress;
}